1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Definitions for the 'struct sk_buff' memory handlers. 4 * 5 * Authors: 6 * Alan Cox, <[email protected]> 7 * Florian La Roche, <[email protected]> 8 */ 9 10 #ifndef _LINUX_SKBUFF_H 11 #define _LINUX_SKBUFF_H 12 13 #include <linux/kernel.h> 14 #include <linux/compiler.h> 15 #include <linux/time.h> 16 #include <linux/bug.h> 17 #include <linux/bvec.h> 18 #include <linux/cache.h> 19 #include <linux/rbtree.h> 20 #include <linux/socket.h> 21 #include <linux/refcount.h> 22 23 #include <linux/atomic.h> 24 #include <asm/types.h> 25 #include <linux/spinlock.h> 26 #include <linux/net.h> 27 #include <linux/textsearch.h> 28 #include <net/checksum.h> 29 #include <linux/rcupdate.h> 30 #include <linux/hrtimer.h> 31 #include <linux/dma-mapping.h> 32 #include <linux/netdev_features.h> 33 #include <linux/sched.h> 34 #include <linux/sched/clock.h> 35 #include <net/flow_dissector.h> 36 #include <linux/splice.h> 37 #include <linux/in6.h> 38 #include <linux/if_packet.h> 39 #include <linux/llist.h> 40 #include <net/flow.h> 41 #include <net/page_pool.h> 42 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 43 #include <linux/netfilter/nf_conntrack_common.h> 44 #endif 45 #include <net/net_debug.h> 46 47 /** 48 * DOC: skb checksums 49 * 50 * The interface for checksum offload between the stack and networking drivers 51 * is as follows... 52 * 53 * IP checksum related features 54 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 55 * 56 * Drivers advertise checksum offload capabilities in the features of a device. 57 * From the stack's point of view these are capabilities offered by the driver. 58 * A driver typically only advertises features that it is capable of offloading 59 * to its device. 60 * 61 * .. flat-table:: Checksum related device features 62 * :widths: 1 10 63 * 64 * * - %NETIF_F_HW_CSUM 65 * - The driver (or its device) is able to compute one 66 * IP (one's complement) checksum for any combination 67 * of protocols or protocol layering. The checksum is 68 * computed and set in a packet per the CHECKSUM_PARTIAL 69 * interface (see below). 70 * 71 * * - %NETIF_F_IP_CSUM 72 * - Driver (device) is only able to checksum plain 73 * TCP or UDP packets over IPv4. These are specifically 74 * unencapsulated packets of the form IPv4|TCP or 75 * IPv4|UDP where the Protocol field in the IPv4 header 76 * is TCP or UDP. The IPv4 header may contain IP options. 77 * This feature cannot be set in features for a device 78 * with NETIF_F_HW_CSUM also set. This feature is being 79 * DEPRECATED (see below). 80 * 81 * * - %NETIF_F_IPV6_CSUM 82 * - Driver (device) is only able to checksum plain 83 * TCP or UDP packets over IPv6. These are specifically 84 * unencapsulated packets of the form IPv6|TCP or 85 * IPv6|UDP where the Next Header field in the IPv6 86 * header is either TCP or UDP. IPv6 extension headers 87 * are not supported with this feature. This feature 88 * cannot be set in features for a device with 89 * NETIF_F_HW_CSUM also set. This feature is being 90 * DEPRECATED (see below). 91 * 92 * * - %NETIF_F_RXCSUM 93 * - Driver (device) performs receive checksum offload. 94 * This flag is only used to disable the RX checksum 95 * feature for a device. The stack will accept receive 96 * checksum indication in packets received on a device 97 * regardless of whether NETIF_F_RXCSUM is set. 98 * 99 * Checksumming of received packets by device 100 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 101 * 102 * Indication of checksum verification is set in &sk_buff.ip_summed. 103 * Possible values are: 104 * 105 * - %CHECKSUM_NONE 106 * 107 * Device did not checksum this packet e.g. due to lack of capabilities. 108 * The packet contains full (though not verified) checksum in packet but 109 * not in skb->csum. Thus, skb->csum is undefined in this case. 110 * 111 * - %CHECKSUM_UNNECESSARY 112 * 113 * The hardware you're dealing with doesn't calculate the full checksum 114 * (as in %CHECKSUM_COMPLETE), but it does parse headers and verify checksums 115 * for specific protocols. For such packets it will set %CHECKSUM_UNNECESSARY 116 * if their checksums are okay. &sk_buff.csum is still undefined in this case 117 * though. A driver or device must never modify the checksum field in the 118 * packet even if checksum is verified. 119 * 120 * %CHECKSUM_UNNECESSARY is applicable to following protocols: 121 * 122 * - TCP: IPv6 and IPv4. 123 * - UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a 124 * zero UDP checksum for either IPv4 or IPv6, the networking stack 125 * may perform further validation in this case. 126 * - GRE: only if the checksum is present in the header. 127 * - SCTP: indicates the CRC in SCTP header has been validated. 128 * - FCOE: indicates the CRC in FC frame has been validated. 129 * 130 * &sk_buff.csum_level indicates the number of consecutive checksums found in 131 * the packet minus one that have been verified as %CHECKSUM_UNNECESSARY. 132 * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet 133 * and a device is able to verify the checksums for UDP (possibly zero), 134 * GRE (checksum flag is set) and TCP, &sk_buff.csum_level would be set to 135 * two. If the device were only able to verify the UDP checksum and not 136 * GRE, either because it doesn't support GRE checksum or because GRE 137 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is 138 * not considered in this case). 139 * 140 * - %CHECKSUM_COMPLETE 141 * 142 * This is the most generic way. The device supplied checksum of the _whole_ 143 * packet as seen by netif_rx() and fills in &sk_buff.csum. This means the 144 * hardware doesn't need to parse L3/L4 headers to implement this. 145 * 146 * Notes: 147 * 148 * - Even if device supports only some protocols, but is able to produce 149 * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY. 150 * - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols. 151 * 152 * - %CHECKSUM_PARTIAL 153 * 154 * A checksum is set up to be offloaded to a device as described in the 155 * output description for CHECKSUM_PARTIAL. This may occur on a packet 156 * received directly from another Linux OS, e.g., a virtualized Linux kernel 157 * on the same host, or it may be set in the input path in GRO or remote 158 * checksum offload. For the purposes of checksum verification, the checksum 159 * referred to by skb->csum_start + skb->csum_offset and any preceding 160 * checksums in the packet are considered verified. Any checksums in the 161 * packet that are after the checksum being offloaded are not considered to 162 * be verified. 163 * 164 * Checksumming on transmit for non-GSO 165 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 166 * 167 * The stack requests checksum offload in the &sk_buff.ip_summed for a packet. 168 * Values are: 169 * 170 * - %CHECKSUM_PARTIAL 171 * 172 * The driver is required to checksum the packet as seen by hard_start_xmit() 173 * from &sk_buff.csum_start up to the end, and to record/write the checksum at 174 * offset &sk_buff.csum_start + &sk_buff.csum_offset. 175 * A driver may verify that the 176 * csum_start and csum_offset values are valid values given the length and 177 * offset of the packet, but it should not attempt to validate that the 178 * checksum refers to a legitimate transport layer checksum -- it is the 179 * purview of the stack to validate that csum_start and csum_offset are set 180 * correctly. 181 * 182 * When the stack requests checksum offload for a packet, the driver MUST 183 * ensure that the checksum is set correctly. A driver can either offload the 184 * checksum calculation to the device, or call skb_checksum_help (in the case 185 * that the device does not support offload for a particular checksum). 186 * 187 * %NETIF_F_IP_CSUM and %NETIF_F_IPV6_CSUM are being deprecated in favor of 188 * %NETIF_F_HW_CSUM. New devices should use %NETIF_F_HW_CSUM to indicate 189 * checksum offload capability. 190 * skb_csum_hwoffload_help() can be called to resolve %CHECKSUM_PARTIAL based 191 * on network device checksumming capabilities: if a packet does not match 192 * them, skb_checksum_help() or skb_crc32c_help() (depending on the value of 193 * &sk_buff.csum_not_inet, see :ref:`crc`) 194 * is called to resolve the checksum. 195 * 196 * - %CHECKSUM_NONE 197 * 198 * The skb was already checksummed by the protocol, or a checksum is not 199 * required. 200 * 201 * - %CHECKSUM_UNNECESSARY 202 * 203 * This has the same meaning as CHECKSUM_NONE for checksum offload on 204 * output. 205 * 206 * - %CHECKSUM_COMPLETE 207 * 208 * Not used in checksum output. If a driver observes a packet with this value 209 * set in skbuff, it should treat the packet as if %CHECKSUM_NONE were set. 210 * 211 * .. _crc: 212 * 213 * Non-IP checksum (CRC) offloads 214 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 215 * 216 * .. flat-table:: 217 * :widths: 1 10 218 * 219 * * - %NETIF_F_SCTP_CRC 220 * - This feature indicates that a device is capable of 221 * offloading the SCTP CRC in a packet. To perform this offload the stack 222 * will set csum_start and csum_offset accordingly, set ip_summed to 223 * %CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication 224 * in the skbuff that the %CHECKSUM_PARTIAL refers to CRC32c. 225 * A driver that supports both IP checksum offload and SCTP CRC32c offload 226 * must verify which offload is configured for a packet by testing the 227 * value of &sk_buff.csum_not_inet; skb_crc32c_csum_help() is provided to 228 * resolve %CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1. 229 * 230 * * - %NETIF_F_FCOE_CRC 231 * - This feature indicates that a device is capable of offloading the FCOE 232 * CRC in a packet. To perform this offload the stack will set ip_summed 233 * to %CHECKSUM_PARTIAL and set csum_start and csum_offset 234 * accordingly. Note that there is no indication in the skbuff that the 235 * %CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports 236 * both IP checksum offload and FCOE CRC offload must verify which offload 237 * is configured for a packet, presumably by inspecting packet headers. 238 * 239 * Checksumming on output with GSO 240 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 241 * 242 * In the case of a GSO packet (skb_is_gso() is true), checksum offload 243 * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the 244 * gso_type is %SKB_GSO_TCPV4 or %SKB_GSO_TCPV6, TCP checksum offload as 245 * part of the GSO operation is implied. If a checksum is being offloaded 246 * with GSO then ip_summed is %CHECKSUM_PARTIAL, and both csum_start and 247 * csum_offset are set to refer to the outermost checksum being offloaded 248 * (two offloaded checksums are possible with UDP encapsulation). 249 */ 250 251 /* Don't change this without changing skb_csum_unnecessary! */ 252 #define CHECKSUM_NONE 0 253 #define CHECKSUM_UNNECESSARY 1 254 #define CHECKSUM_COMPLETE 2 255 #define CHECKSUM_PARTIAL 3 256 257 /* Maximum value in skb->csum_level */ 258 #define SKB_MAX_CSUM_LEVEL 3 259 260 #define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) 261 #define SKB_WITH_OVERHEAD(X) \ 262 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 263 #define SKB_MAX_ORDER(X, ORDER) \ 264 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) 265 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 266 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) 267 268 /* return minimum truesize of one skb containing X bytes of data */ 269 #define SKB_TRUESIZE(X) ((X) + \ 270 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ 271 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 272 273 struct ahash_request; 274 struct net_device; 275 struct scatterlist; 276 struct pipe_inode_info; 277 struct iov_iter; 278 struct napi_struct; 279 struct bpf_prog; 280 union bpf_attr; 281 struct skb_ext; 282 283 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 284 struct nf_bridge_info { 285 enum { 286 BRNF_PROTO_UNCHANGED, 287 BRNF_PROTO_8021Q, 288 BRNF_PROTO_PPPOE 289 } orig_proto:8; 290 u8 pkt_otherhost:1; 291 u8 in_prerouting:1; 292 u8 bridged_dnat:1; 293 __u16 frag_max_size; 294 struct net_device *physindev; 295 296 /* always valid & non-NULL from FORWARD on, for physdev match */ 297 struct net_device *physoutdev; 298 union { 299 /* prerouting: detect dnat in orig/reply direction */ 300 __be32 ipv4_daddr; 301 struct in6_addr ipv6_daddr; 302 303 /* after prerouting + nat detected: store original source 304 * mac since neigh resolution overwrites it, only used while 305 * skb is out in neigh layer. 306 */ 307 char neigh_header[8]; 308 }; 309 }; 310 #endif 311 312 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 313 /* Chain in tc_skb_ext will be used to share the tc chain with 314 * ovs recirc_id. It will be set to the current chain by tc 315 * and read by ovs to recirc_id. 316 */ 317 struct tc_skb_ext { 318 __u32 chain; 319 __u16 mru; 320 __u16 zone; 321 u8 post_ct:1; 322 u8 post_ct_snat:1; 323 u8 post_ct_dnat:1; 324 }; 325 #endif 326 327 struct sk_buff_head { 328 /* These two members must be first to match sk_buff. */ 329 struct_group_tagged(sk_buff_list, list, 330 struct sk_buff *next; 331 struct sk_buff *prev; 332 ); 333 334 __u32 qlen; 335 spinlock_t lock; 336 }; 337 338 struct sk_buff; 339 340 /* The reason of skb drop, which is used in kfree_skb_reason(). 341 * en...maybe they should be splited by group? 342 * 343 * Each item here should also be in 'TRACE_SKB_DROP_REASON', which is 344 * used to translate the reason to string. 345 */ 346 enum skb_drop_reason { 347 SKB_NOT_DROPPED_YET = 0, 348 SKB_DROP_REASON_NOT_SPECIFIED, /* drop reason is not specified */ 349 SKB_DROP_REASON_NO_SOCKET, /* socket not found */ 350 SKB_DROP_REASON_PKT_TOO_SMALL, /* packet size is too small */ 351 SKB_DROP_REASON_TCP_CSUM, /* TCP checksum error */ 352 SKB_DROP_REASON_SOCKET_FILTER, /* dropped by socket filter */ 353 SKB_DROP_REASON_UDP_CSUM, /* UDP checksum error */ 354 SKB_DROP_REASON_NETFILTER_DROP, /* dropped by netfilter */ 355 SKB_DROP_REASON_OTHERHOST, /* packet don't belong to current 356 * host (interface is in promisc 357 * mode) 358 */ 359 SKB_DROP_REASON_IP_CSUM, /* IP checksum error */ 360 SKB_DROP_REASON_IP_INHDR, /* there is something wrong with 361 * IP header (see 362 * IPSTATS_MIB_INHDRERRORS) 363 */ 364 SKB_DROP_REASON_IP_RPFILTER, /* IP rpfilter validate failed. 365 * see the document for rp_filter 366 * in ip-sysctl.rst for more 367 * information 368 */ 369 SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST, /* destination address of L2 370 * is multicast, but L3 is 371 * unicast. 372 */ 373 SKB_DROP_REASON_XFRM_POLICY, /* xfrm policy check failed */ 374 SKB_DROP_REASON_IP_NOPROTO, /* no support for IP protocol */ 375 SKB_DROP_REASON_SOCKET_RCVBUFF, /* socket receive buff is full */ 376 SKB_DROP_REASON_PROTO_MEM, /* proto memory limition, such as 377 * udp packet drop out of 378 * udp_memory_allocated. 379 */ 380 SKB_DROP_REASON_TCP_MD5NOTFOUND, /* no MD5 hash and one 381 * expected, corresponding 382 * to LINUX_MIB_TCPMD5NOTFOUND 383 */ 384 SKB_DROP_REASON_TCP_MD5UNEXPECTED, /* MD5 hash and we're not 385 * expecting one, corresponding 386 * to LINUX_MIB_TCPMD5UNEXPECTED 387 */ 388 SKB_DROP_REASON_TCP_MD5FAILURE, /* MD5 hash and its wrong, 389 * corresponding to 390 * LINUX_MIB_TCPMD5FAILURE 391 */ 392 SKB_DROP_REASON_SOCKET_BACKLOG, /* failed to add skb to socket 393 * backlog (see 394 * LINUX_MIB_TCPBACKLOGDROP) 395 */ 396 SKB_DROP_REASON_TCP_FLAGS, /* TCP flags invalid */ 397 SKB_DROP_REASON_TCP_ZEROWINDOW, /* TCP receive window size is zero, 398 * see LINUX_MIB_TCPZEROWINDOWDROP 399 */ 400 SKB_DROP_REASON_TCP_OLD_DATA, /* the TCP data reveived is already 401 * received before (spurious retrans 402 * may happened), see 403 * LINUX_MIB_DELAYEDACKLOST 404 */ 405 SKB_DROP_REASON_TCP_OVERWINDOW, /* the TCP data is out of window, 406 * the seq of the first byte exceed 407 * the right edges of receive 408 * window 409 */ 410 SKB_DROP_REASON_TCP_OFOMERGE, /* the data of skb is already in 411 * the ofo queue, corresponding to 412 * LINUX_MIB_TCPOFOMERGE 413 */ 414 SKB_DROP_REASON_TCP_RFC7323_PAWS, /* PAWS check, corresponding to 415 * LINUX_MIB_PAWSESTABREJECTED 416 */ 417 SKB_DROP_REASON_TCP_INVALID_SEQUENCE, /* Not acceptable SEQ field */ 418 SKB_DROP_REASON_TCP_RESET, /* Invalid RST packet */ 419 SKB_DROP_REASON_TCP_INVALID_SYN, /* Incoming packet has unexpected SYN flag */ 420 SKB_DROP_REASON_TCP_CLOSE, /* TCP socket in CLOSE state */ 421 SKB_DROP_REASON_TCP_FASTOPEN, /* dropped by FASTOPEN request socket */ 422 SKB_DROP_REASON_TCP_OLD_ACK, /* TCP ACK is old, but in window */ 423 SKB_DROP_REASON_TCP_TOO_OLD_ACK, /* TCP ACK is too old */ 424 SKB_DROP_REASON_TCP_ACK_UNSENT_DATA, /* TCP ACK for data we haven't sent yet */ 425 SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE, /* pruned from TCP OFO queue */ 426 SKB_DROP_REASON_TCP_OFO_DROP, /* data already in receive queue */ 427 SKB_DROP_REASON_IP_OUTNOROUTES, /* route lookup failed */ 428 SKB_DROP_REASON_BPF_CGROUP_EGRESS, /* dropped by 429 * BPF_PROG_TYPE_CGROUP_SKB 430 * eBPF program 431 */ 432 SKB_DROP_REASON_IPV6DISABLED, /* IPv6 is disabled on the device */ 433 SKB_DROP_REASON_NEIGH_CREATEFAIL, /* failed to create neigh 434 * entry 435 */ 436 SKB_DROP_REASON_NEIGH_FAILED, /* neigh entry in failed state */ 437 SKB_DROP_REASON_NEIGH_QUEUEFULL, /* arp_queue for neigh 438 * entry is full 439 */ 440 SKB_DROP_REASON_NEIGH_DEAD, /* neigh entry is dead */ 441 SKB_DROP_REASON_TC_EGRESS, /* dropped in TC egress HOOK */ 442 SKB_DROP_REASON_QDISC_DROP, /* dropped by qdisc when packet 443 * outputting (failed to enqueue to 444 * current qdisc) 445 */ 446 SKB_DROP_REASON_CPU_BACKLOG, /* failed to enqueue the skb to 447 * the per CPU backlog queue. This 448 * can be caused by backlog queue 449 * full (see netdev_max_backlog in 450 * net.rst) or RPS flow limit 451 */ 452 SKB_DROP_REASON_XDP, /* dropped by XDP in input path */ 453 SKB_DROP_REASON_TC_INGRESS, /* dropped in TC ingress HOOK */ 454 SKB_DROP_REASON_UNHANDLED_PROTO, /* protocol not implemented 455 * or not supported 456 */ 457 SKB_DROP_REASON_SKB_CSUM, /* sk_buff checksum computation 458 * error 459 */ 460 SKB_DROP_REASON_SKB_GSO_SEG, /* gso segmentation error */ 461 SKB_DROP_REASON_SKB_UCOPY_FAULT, /* failed to copy data from 462 * user space, e.g., via 463 * zerocopy_sg_from_iter() 464 * or skb_orphan_frags_rx() 465 */ 466 SKB_DROP_REASON_DEV_HDR, /* device driver specific 467 * header/metadata is invalid 468 */ 469 /* the device is not ready to xmit/recv due to any of its data 470 * structure that is not up/ready/initialized, e.g., the IFF_UP is 471 * not set, or driver specific tun->tfiles[txq] is not initialized 472 */ 473 SKB_DROP_REASON_DEV_READY, 474 SKB_DROP_REASON_FULL_RING, /* ring buffer is full */ 475 SKB_DROP_REASON_NOMEM, /* error due to OOM */ 476 SKB_DROP_REASON_HDR_TRUNC, /* failed to trunc/extract the header 477 * from networking data, e.g., failed 478 * to pull the protocol header from 479 * frags via pskb_may_pull() 480 */ 481 SKB_DROP_REASON_TAP_FILTER, /* dropped by (ebpf) filter directly 482 * attached to tun/tap, e.g., via 483 * TUNSETFILTEREBPF 484 */ 485 SKB_DROP_REASON_TAP_TXFILTER, /* dropped by tx filter implemented 486 * at tun/tap, e.g., check_filter() 487 */ 488 SKB_DROP_REASON_ICMP_CSUM, /* ICMP checksum error */ 489 SKB_DROP_REASON_INVALID_PROTO, /* the packet doesn't follow RFC 490 * 2211, such as a broadcasts 491 * ICMP_TIMESTAMP 492 */ 493 SKB_DROP_REASON_IP_INADDRERRORS, /* host unreachable, corresponding 494 * to IPSTATS_MIB_INADDRERRORS 495 */ 496 SKB_DROP_REASON_IP_INNOROUTES, /* network unreachable, corresponding 497 * to IPSTATS_MIB_INADDRERRORS 498 */ 499 SKB_DROP_REASON_PKT_TOO_BIG, /* packet size is too big (maybe exceed 500 * the MTU) 501 */ 502 SKB_DROP_REASON_MAX, 503 }; 504 505 #define SKB_DR_INIT(name, reason) \ 506 enum skb_drop_reason name = SKB_DROP_REASON_##reason 507 #define SKB_DR(name) \ 508 SKB_DR_INIT(name, NOT_SPECIFIED) 509 #define SKB_DR_SET(name, reason) \ 510 (name = SKB_DROP_REASON_##reason) 511 #define SKB_DR_OR(name, reason) \ 512 do { \ 513 if (name == SKB_DROP_REASON_NOT_SPECIFIED || \ 514 name == SKB_NOT_DROPPED_YET) \ 515 SKB_DR_SET(name, reason); \ 516 } while (0) 517 518 /* To allow 64K frame to be packed as single skb without frag_list we 519 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for 520 * buffers which do not start on a page boundary. 521 * 522 * Since GRO uses frags we allocate at least 16 regardless of page 523 * size. 524 */ 525 #if (65536/PAGE_SIZE + 1) < 16 526 #define MAX_SKB_FRAGS 16UL 527 #else 528 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) 529 #endif 530 extern int sysctl_max_skb_frags; 531 532 /* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to 533 * segment using its current segmentation instead. 534 */ 535 #define GSO_BY_FRAGS 0xFFFF 536 537 typedef struct bio_vec skb_frag_t; 538 539 /** 540 * skb_frag_size() - Returns the size of a skb fragment 541 * @frag: skb fragment 542 */ 543 static inline unsigned int skb_frag_size(const skb_frag_t *frag) 544 { 545 return frag->bv_len; 546 } 547 548 /** 549 * skb_frag_size_set() - Sets the size of a skb fragment 550 * @frag: skb fragment 551 * @size: size of fragment 552 */ 553 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) 554 { 555 frag->bv_len = size; 556 } 557 558 /** 559 * skb_frag_size_add() - Increments the size of a skb fragment by @delta 560 * @frag: skb fragment 561 * @delta: value to add 562 */ 563 static inline void skb_frag_size_add(skb_frag_t *frag, int delta) 564 { 565 frag->bv_len += delta; 566 } 567 568 /** 569 * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta 570 * @frag: skb fragment 571 * @delta: value to subtract 572 */ 573 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) 574 { 575 frag->bv_len -= delta; 576 } 577 578 /** 579 * skb_frag_must_loop - Test if %p is a high memory page 580 * @p: fragment's page 581 */ 582 static inline bool skb_frag_must_loop(struct page *p) 583 { 584 #if defined(CONFIG_HIGHMEM) 585 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p)) 586 return true; 587 #endif 588 return false; 589 } 590 591 /** 592 * skb_frag_foreach_page - loop over pages in a fragment 593 * 594 * @f: skb frag to operate on 595 * @f_off: offset from start of f->bv_page 596 * @f_len: length from f_off to loop over 597 * @p: (temp var) current page 598 * @p_off: (temp var) offset from start of current page, 599 * non-zero only on first page. 600 * @p_len: (temp var) length in current page, 601 * < PAGE_SIZE only on first and last page. 602 * @copied: (temp var) length so far, excluding current p_len. 603 * 604 * A fragment can hold a compound page, in which case per-page 605 * operations, notably kmap_atomic, must be called for each 606 * regular page. 607 */ 608 #define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \ 609 for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \ 610 p_off = (f_off) & (PAGE_SIZE - 1), \ 611 p_len = skb_frag_must_loop(p) ? \ 612 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \ 613 copied = 0; \ 614 copied < f_len; \ 615 copied += p_len, p++, p_off = 0, \ 616 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \ 617 618 #define HAVE_HW_TIME_STAMP 619 620 /** 621 * struct skb_shared_hwtstamps - hardware time stamps 622 * @hwtstamp: hardware time stamp transformed into duration 623 * since arbitrary point in time 624 * @netdev_data: address/cookie of network device driver used as 625 * reference to actual hardware time stamp 626 * 627 * Software time stamps generated by ktime_get_real() are stored in 628 * skb->tstamp. 629 * 630 * hwtstamps can only be compared against other hwtstamps from 631 * the same device. 632 * 633 * This structure is attached to packets as part of the 634 * &skb_shared_info. Use skb_hwtstamps() to get a pointer. 635 */ 636 struct skb_shared_hwtstamps { 637 union { 638 ktime_t hwtstamp; 639 void *netdev_data; 640 }; 641 }; 642 643 /* Definitions for tx_flags in struct skb_shared_info */ 644 enum { 645 /* generate hardware time stamp */ 646 SKBTX_HW_TSTAMP = 1 << 0, 647 648 /* generate software time stamp when queueing packet to NIC */ 649 SKBTX_SW_TSTAMP = 1 << 1, 650 651 /* device driver is going to provide hardware time stamp */ 652 SKBTX_IN_PROGRESS = 1 << 2, 653 654 /* generate hardware time stamp based on cycles if supported */ 655 SKBTX_HW_TSTAMP_USE_CYCLES = 1 << 3, 656 657 /* generate wifi status information (where possible) */ 658 SKBTX_WIFI_STATUS = 1 << 4, 659 660 /* determine hardware time stamp based on time or cycles */ 661 SKBTX_HW_TSTAMP_NETDEV = 1 << 5, 662 663 /* generate software time stamp when entering packet scheduling */ 664 SKBTX_SCHED_TSTAMP = 1 << 6, 665 }; 666 667 #define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \ 668 SKBTX_SCHED_TSTAMP) 669 #define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | \ 670 SKBTX_HW_TSTAMP_USE_CYCLES | \ 671 SKBTX_ANY_SW_TSTAMP) 672 673 /* Definitions for flags in struct skb_shared_info */ 674 enum { 675 /* use zcopy routines */ 676 SKBFL_ZEROCOPY_ENABLE = BIT(0), 677 678 /* This indicates at least one fragment might be overwritten 679 * (as in vmsplice(), sendfile() ...) 680 * If we need to compute a TX checksum, we'll need to copy 681 * all frags to avoid possible bad checksum 682 */ 683 SKBFL_SHARED_FRAG = BIT(1), 684 685 /* segment contains only zerocopy data and should not be 686 * charged to the kernel memory. 687 */ 688 SKBFL_PURE_ZEROCOPY = BIT(2), 689 690 SKBFL_DONT_ORPHAN = BIT(3), 691 692 /* page references are managed by the ubuf_info, so it's safe to 693 * use frags only up until ubuf_info is released 694 */ 695 SKBFL_MANAGED_FRAG_REFS = BIT(4), 696 }; 697 698 #define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG) 699 #define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY | \ 700 SKBFL_DONT_ORPHAN | SKBFL_MANAGED_FRAG_REFS) 701 702 /* 703 * The callback notifies userspace to release buffers when skb DMA is done in 704 * lower device, the skb last reference should be 0 when calling this. 705 * The zerocopy_success argument is true if zero copy transmit occurred, 706 * false on data copy or out of memory error caused by data copy attempt. 707 * The ctx field is used to track device context. 708 * The desc field is used to track userspace buffer index. 709 */ 710 struct ubuf_info { 711 void (*callback)(struct sk_buff *, struct ubuf_info *, 712 bool zerocopy_success); 713 union { 714 struct { 715 unsigned long desc; 716 void *ctx; 717 }; 718 struct { 719 u32 id; 720 u16 len; 721 u16 zerocopy:1; 722 u32 bytelen; 723 }; 724 }; 725 refcount_t refcnt; 726 u8 flags; 727 728 struct mmpin { 729 struct user_struct *user; 730 unsigned int num_pg; 731 } mmp; 732 }; 733 734 #define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg)) 735 736 int mm_account_pinned_pages(struct mmpin *mmp, size_t size); 737 void mm_unaccount_pinned_pages(struct mmpin *mmp); 738 739 /* This data is invariant across clones and lives at 740 * the end of the header data, ie. at skb->end. 741 */ 742 struct skb_shared_info { 743 __u8 flags; 744 __u8 meta_len; 745 __u8 nr_frags; 746 __u8 tx_flags; 747 unsigned short gso_size; 748 /* Warning: this field is not always filled in (UFO)! */ 749 unsigned short gso_segs; 750 struct sk_buff *frag_list; 751 struct skb_shared_hwtstamps hwtstamps; 752 unsigned int gso_type; 753 u32 tskey; 754 755 /* 756 * Warning : all fields before dataref are cleared in __alloc_skb() 757 */ 758 atomic_t dataref; 759 unsigned int xdp_frags_size; 760 761 /* Intermediate layers must ensure that destructor_arg 762 * remains valid until skb destructor */ 763 void * destructor_arg; 764 765 /* must be last field, see pskb_expand_head() */ 766 skb_frag_t frags[MAX_SKB_FRAGS]; 767 }; 768 769 /** 770 * DOC: dataref and headerless skbs 771 * 772 * Transport layers send out clones of payload skbs they hold for 773 * retransmissions. To allow lower layers of the stack to prepend their headers 774 * we split &skb_shared_info.dataref into two halves. 775 * The lower 16 bits count the overall number of references. 776 * The higher 16 bits indicate how many of the references are payload-only. 777 * skb_header_cloned() checks if skb is allowed to add / write the headers. 778 * 779 * The creator of the skb (e.g. TCP) marks its skb as &sk_buff.nohdr 780 * (via __skb_header_release()). Any clone created from marked skb will get 781 * &sk_buff.hdr_len populated with the available headroom. 782 * If there's the only clone in existence it's able to modify the headroom 783 * at will. The sequence of calls inside the transport layer is:: 784 * 785 * <alloc skb> 786 * skb_reserve() 787 * __skb_header_release() 788 * skb_clone() 789 * // send the clone down the stack 790 * 791 * This is not a very generic construct and it depends on the transport layers 792 * doing the right thing. In practice there's usually only one payload-only skb. 793 * Having multiple payload-only skbs with different lengths of hdr_len is not 794 * possible. The payload-only skbs should never leave their owner. 795 */ 796 #define SKB_DATAREF_SHIFT 16 797 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 798 799 800 enum { 801 SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */ 802 SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */ 803 SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */ 804 }; 805 806 enum { 807 SKB_GSO_TCPV4 = 1 << 0, 808 809 /* This indicates the skb is from an untrusted source. */ 810 SKB_GSO_DODGY = 1 << 1, 811 812 /* This indicates the tcp segment has CWR set. */ 813 SKB_GSO_TCP_ECN = 1 << 2, 814 815 SKB_GSO_TCP_FIXEDID = 1 << 3, 816 817 SKB_GSO_TCPV6 = 1 << 4, 818 819 SKB_GSO_FCOE = 1 << 5, 820 821 SKB_GSO_GRE = 1 << 6, 822 823 SKB_GSO_GRE_CSUM = 1 << 7, 824 825 SKB_GSO_IPXIP4 = 1 << 8, 826 827 SKB_GSO_IPXIP6 = 1 << 9, 828 829 SKB_GSO_UDP_TUNNEL = 1 << 10, 830 831 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, 832 833 SKB_GSO_PARTIAL = 1 << 12, 834 835 SKB_GSO_TUNNEL_REMCSUM = 1 << 13, 836 837 SKB_GSO_SCTP = 1 << 14, 838 839 SKB_GSO_ESP = 1 << 15, 840 841 SKB_GSO_UDP = 1 << 16, 842 843 SKB_GSO_UDP_L4 = 1 << 17, 844 845 SKB_GSO_FRAGLIST = 1 << 18, 846 }; 847 848 #if BITS_PER_LONG > 32 849 #define NET_SKBUFF_DATA_USES_OFFSET 1 850 #endif 851 852 #ifdef NET_SKBUFF_DATA_USES_OFFSET 853 typedef unsigned int sk_buff_data_t; 854 #else 855 typedef unsigned char *sk_buff_data_t; 856 #endif 857 858 /** 859 * DOC: Basic sk_buff geometry 860 * 861 * struct sk_buff itself is a metadata structure and does not hold any packet 862 * data. All the data is held in associated buffers. 863 * 864 * &sk_buff.head points to the main "head" buffer. The head buffer is divided 865 * into two parts: 866 * 867 * - data buffer, containing headers and sometimes payload; 868 * this is the part of the skb operated on by the common helpers 869 * such as skb_put() or skb_pull(); 870 * - shared info (struct skb_shared_info) which holds an array of pointers 871 * to read-only data in the (page, offset, length) format. 872 * 873 * Optionally &skb_shared_info.frag_list may point to another skb. 874 * 875 * Basic diagram may look like this:: 876 * 877 * --------------- 878 * | sk_buff | 879 * --------------- 880 * ,--------------------------- + head 881 * / ,----------------- + data 882 * / / ,----------- + tail 883 * | | | , + end 884 * | | | | 885 * v v v v 886 * ----------------------------------------------- 887 * | headroom | data | tailroom | skb_shared_info | 888 * ----------------------------------------------- 889 * + [page frag] 890 * + [page frag] 891 * + [page frag] 892 * + [page frag] --------- 893 * + frag_list --> | sk_buff | 894 * --------- 895 * 896 */ 897 898 /** 899 * struct sk_buff - socket buffer 900 * @next: Next buffer in list 901 * @prev: Previous buffer in list 902 * @tstamp: Time we arrived/left 903 * @skb_mstamp_ns: (aka @tstamp) earliest departure time; start point 904 * for retransmit timer 905 * @rbnode: RB tree node, alternative to next/prev for netem/tcp 906 * @list: queue head 907 * @ll_node: anchor in an llist (eg socket defer_list) 908 * @sk: Socket we are owned by 909 * @ip_defrag_offset: (aka @sk) alternate use of @sk, used in 910 * fragmentation management 911 * @dev: Device we arrived on/are leaving by 912 * @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL 913 * @cb: Control buffer. Free for use by every layer. Put private vars here 914 * @_skb_refdst: destination entry (with norefcount bit) 915 * @sp: the security path, used for xfrm 916 * @len: Length of actual data 917 * @data_len: Data length 918 * @mac_len: Length of link layer header 919 * @hdr_len: writable header length of cloned skb 920 * @csum: Checksum (must include start/offset pair) 921 * @csum_start: Offset from skb->head where checksumming should start 922 * @csum_offset: Offset from csum_start where checksum should be stored 923 * @priority: Packet queueing priority 924 * @ignore_df: allow local fragmentation 925 * @cloned: Head may be cloned (check refcnt to be sure) 926 * @ip_summed: Driver fed us an IP checksum 927 * @nohdr: Payload reference only, must not modify header 928 * @pkt_type: Packet class 929 * @fclone: skbuff clone status 930 * @ipvs_property: skbuff is owned by ipvs 931 * @inner_protocol_type: whether the inner protocol is 932 * ENCAP_TYPE_ETHER or ENCAP_TYPE_IPPROTO 933 * @remcsum_offload: remote checksum offload is enabled 934 * @offload_fwd_mark: Packet was L2-forwarded in hardware 935 * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware 936 * @tc_skip_classify: do not classify packet. set by IFB device 937 * @tc_at_ingress: used within tc_classify to distinguish in/egress 938 * @redirected: packet was redirected by packet classifier 939 * @from_ingress: packet was redirected from the ingress path 940 * @nf_skip_egress: packet shall skip nf egress - see netfilter_netdev.h 941 * @peeked: this packet has been seen already, so stats have been 942 * done for it, don't do them again 943 * @nf_trace: netfilter packet trace flag 944 * @protocol: Packet protocol from driver 945 * @destructor: Destruct function 946 * @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue) 947 * @_sk_redir: socket redirection information for skmsg 948 * @_nfct: Associated connection, if any (with nfctinfo bits) 949 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 950 * @skb_iif: ifindex of device we arrived on 951 * @tc_index: Traffic control index 952 * @hash: the packet hash 953 * @queue_mapping: Queue mapping for multiqueue devices 954 * @head_frag: skb was allocated from page fragments, 955 * not allocated by kmalloc() or vmalloc(). 956 * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves 957 * @pp_recycle: mark the packet for recycling instead of freeing (implies 958 * page_pool support on driver) 959 * @active_extensions: active extensions (skb_ext_id types) 960 * @ndisc_nodetype: router type (from link layer) 961 * @ooo_okay: allow the mapping of a socket to a queue to be changed 962 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport 963 * ports. 964 * @sw_hash: indicates hash was computed in software stack 965 * @wifi_acked_valid: wifi_acked was set 966 * @wifi_acked: whether frame was acked on wifi or not 967 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS 968 * @encapsulation: indicates the inner headers in the skbuff are valid 969 * @encap_hdr_csum: software checksum is needed 970 * @csum_valid: checksum is already valid 971 * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL 972 * @csum_complete_sw: checksum was completed by software 973 * @csum_level: indicates the number of consecutive checksums found in 974 * the packet minus one that have been verified as 975 * CHECKSUM_UNNECESSARY (max 3) 976 * @dst_pending_confirm: need to confirm neighbour 977 * @decrypted: Decrypted SKB 978 * @slow_gro: state present at GRO time, slower prepare step required 979 * @mono_delivery_time: When set, skb->tstamp has the 980 * delivery_time in mono clock base (i.e. EDT). Otherwise, the 981 * skb->tstamp has the (rcv) timestamp at ingress and 982 * delivery_time at egress. 983 * @napi_id: id of the NAPI struct this skb came from 984 * @sender_cpu: (aka @napi_id) source CPU in XPS 985 * @alloc_cpu: CPU which did the skb allocation. 986 * @secmark: security marking 987 * @mark: Generic packet mark 988 * @reserved_tailroom: (aka @mark) number of bytes of free space available 989 * at the tail of an sk_buff 990 * @vlan_present: VLAN tag is present 991 * @vlan_proto: vlan encapsulation protocol 992 * @vlan_tci: vlan tag control information 993 * @inner_protocol: Protocol (encapsulation) 994 * @inner_ipproto: (aka @inner_protocol) stores ipproto when 995 * skb->inner_protocol_type == ENCAP_TYPE_IPPROTO; 996 * @inner_transport_header: Inner transport layer header (encapsulation) 997 * @inner_network_header: Network layer header (encapsulation) 998 * @inner_mac_header: Link layer header (encapsulation) 999 * @transport_header: Transport layer header 1000 * @network_header: Network layer header 1001 * @mac_header: Link layer header 1002 * @kcov_handle: KCOV remote handle for remote coverage collection 1003 * @tail: Tail pointer 1004 * @end: End pointer 1005 * @head: Head of buffer 1006 * @data: Data head pointer 1007 * @truesize: Buffer size 1008 * @users: User count - see {datagram,tcp}.c 1009 * @extensions: allocated extensions, valid if active_extensions is nonzero 1010 */ 1011 1012 struct sk_buff { 1013 union { 1014 struct { 1015 /* These two members must be first to match sk_buff_head. */ 1016 struct sk_buff *next; 1017 struct sk_buff *prev; 1018 1019 union { 1020 struct net_device *dev; 1021 /* Some protocols might use this space to store information, 1022 * while device pointer would be NULL. 1023 * UDP receive path is one user. 1024 */ 1025 unsigned long dev_scratch; 1026 }; 1027 }; 1028 struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */ 1029 struct list_head list; 1030 struct llist_node ll_node; 1031 }; 1032 1033 union { 1034 struct sock *sk; 1035 int ip_defrag_offset; 1036 }; 1037 1038 union { 1039 ktime_t tstamp; 1040 u64 skb_mstamp_ns; /* earliest departure time */ 1041 }; 1042 /* 1043 * This is the control buffer. It is free to use for every 1044 * layer. Please put your private variables there. If you 1045 * want to keep them across layers you have to do a skb_clone() 1046 * first. This is owned by whoever has the skb queued ATM. 1047 */ 1048 char cb[48] __aligned(8); 1049 1050 union { 1051 struct { 1052 unsigned long _skb_refdst; 1053 void (*destructor)(struct sk_buff *skb); 1054 }; 1055 struct list_head tcp_tsorted_anchor; 1056 #ifdef CONFIG_NET_SOCK_MSG 1057 unsigned long _sk_redir; 1058 #endif 1059 }; 1060 1061 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1062 unsigned long _nfct; 1063 #endif 1064 unsigned int len, 1065 data_len; 1066 __u16 mac_len, 1067 hdr_len; 1068 1069 /* Following fields are _not_ copied in __copy_skb_header() 1070 * Note that queue_mapping is here mostly to fill a hole. 1071 */ 1072 __u16 queue_mapping; 1073 1074 /* if you move cloned around you also must adapt those constants */ 1075 #ifdef __BIG_ENDIAN_BITFIELD 1076 #define CLONED_MASK (1 << 7) 1077 #else 1078 #define CLONED_MASK 1 1079 #endif 1080 #define CLONED_OFFSET offsetof(struct sk_buff, __cloned_offset) 1081 1082 /* private: */ 1083 __u8 __cloned_offset[0]; 1084 /* public: */ 1085 __u8 cloned:1, 1086 nohdr:1, 1087 fclone:2, 1088 peeked:1, 1089 head_frag:1, 1090 pfmemalloc:1, 1091 pp_recycle:1; /* page_pool recycle indicator */ 1092 #ifdef CONFIG_SKB_EXTENSIONS 1093 __u8 active_extensions; 1094 #endif 1095 1096 /* Fields enclosed in headers group are copied 1097 * using a single memcpy() in __copy_skb_header() 1098 */ 1099 struct_group(headers, 1100 1101 /* private: */ 1102 __u8 __pkt_type_offset[0]; 1103 /* public: */ 1104 __u8 pkt_type:3; /* see PKT_TYPE_MAX */ 1105 __u8 ignore_df:1; 1106 __u8 nf_trace:1; 1107 __u8 ip_summed:2; 1108 __u8 ooo_okay:1; 1109 1110 __u8 l4_hash:1; 1111 __u8 sw_hash:1; 1112 __u8 wifi_acked_valid:1; 1113 __u8 wifi_acked:1; 1114 __u8 no_fcs:1; 1115 /* Indicates the inner headers are valid in the skbuff. */ 1116 __u8 encapsulation:1; 1117 __u8 encap_hdr_csum:1; 1118 __u8 csum_valid:1; 1119 1120 /* private: */ 1121 __u8 __pkt_vlan_present_offset[0]; 1122 /* public: */ 1123 __u8 vlan_present:1; /* See PKT_VLAN_PRESENT_BIT */ 1124 __u8 csum_complete_sw:1; 1125 __u8 csum_level:2; 1126 __u8 dst_pending_confirm:1; 1127 __u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */ 1128 #ifdef CONFIG_NET_CLS_ACT 1129 __u8 tc_skip_classify:1; 1130 __u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */ 1131 #endif 1132 #ifdef CONFIG_IPV6_NDISC_NODETYPE 1133 __u8 ndisc_nodetype:2; 1134 #endif 1135 1136 __u8 ipvs_property:1; 1137 __u8 inner_protocol_type:1; 1138 __u8 remcsum_offload:1; 1139 #ifdef CONFIG_NET_SWITCHDEV 1140 __u8 offload_fwd_mark:1; 1141 __u8 offload_l3_fwd_mark:1; 1142 #endif 1143 __u8 redirected:1; 1144 #ifdef CONFIG_NET_REDIRECT 1145 __u8 from_ingress:1; 1146 #endif 1147 #ifdef CONFIG_NETFILTER_SKIP_EGRESS 1148 __u8 nf_skip_egress:1; 1149 #endif 1150 #ifdef CONFIG_TLS_DEVICE 1151 __u8 decrypted:1; 1152 #endif 1153 __u8 slow_gro:1; 1154 __u8 csum_not_inet:1; 1155 1156 #ifdef CONFIG_NET_SCHED 1157 __u16 tc_index; /* traffic control index */ 1158 #endif 1159 1160 union { 1161 __wsum csum; 1162 struct { 1163 __u16 csum_start; 1164 __u16 csum_offset; 1165 }; 1166 }; 1167 __u32 priority; 1168 int skb_iif; 1169 __u32 hash; 1170 __be16 vlan_proto; 1171 __u16 vlan_tci; 1172 #if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS) 1173 union { 1174 unsigned int napi_id; 1175 unsigned int sender_cpu; 1176 }; 1177 #endif 1178 u16 alloc_cpu; 1179 #ifdef CONFIG_NETWORK_SECMARK 1180 __u32 secmark; 1181 #endif 1182 1183 union { 1184 __u32 mark; 1185 __u32 reserved_tailroom; 1186 }; 1187 1188 union { 1189 __be16 inner_protocol; 1190 __u8 inner_ipproto; 1191 }; 1192 1193 __u16 inner_transport_header; 1194 __u16 inner_network_header; 1195 __u16 inner_mac_header; 1196 1197 __be16 protocol; 1198 __u16 transport_header; 1199 __u16 network_header; 1200 __u16 mac_header; 1201 1202 #ifdef CONFIG_KCOV 1203 u64 kcov_handle; 1204 #endif 1205 1206 ); /* end headers group */ 1207 1208 /* These elements must be at the end, see alloc_skb() for details. */ 1209 sk_buff_data_t tail; 1210 sk_buff_data_t end; 1211 unsigned char *head, 1212 *data; 1213 unsigned int truesize; 1214 refcount_t users; 1215 1216 #ifdef CONFIG_SKB_EXTENSIONS 1217 /* only useable after checking ->active_extensions != 0 */ 1218 struct skb_ext *extensions; 1219 #endif 1220 }; 1221 1222 /* if you move pkt_type around you also must adapt those constants */ 1223 #ifdef __BIG_ENDIAN_BITFIELD 1224 #define PKT_TYPE_MAX (7 << 5) 1225 #else 1226 #define PKT_TYPE_MAX 7 1227 #endif 1228 #define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset) 1229 1230 /* if you move pkt_vlan_present, tc_at_ingress, or mono_delivery_time 1231 * around, you also must adapt these constants. 1232 */ 1233 #ifdef __BIG_ENDIAN_BITFIELD 1234 #define PKT_VLAN_PRESENT_BIT 7 1235 #define TC_AT_INGRESS_MASK (1 << 0) 1236 #define SKB_MONO_DELIVERY_TIME_MASK (1 << 2) 1237 #else 1238 #define PKT_VLAN_PRESENT_BIT 0 1239 #define TC_AT_INGRESS_MASK (1 << 7) 1240 #define SKB_MONO_DELIVERY_TIME_MASK (1 << 5) 1241 #endif 1242 #define PKT_VLAN_PRESENT_OFFSET offsetof(struct sk_buff, __pkt_vlan_present_offset) 1243 1244 #ifdef __KERNEL__ 1245 /* 1246 * Handling routines are only of interest to the kernel 1247 */ 1248 1249 #define SKB_ALLOC_FCLONE 0x01 1250 #define SKB_ALLOC_RX 0x02 1251 #define SKB_ALLOC_NAPI 0x04 1252 1253 /** 1254 * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves 1255 * @skb: buffer 1256 */ 1257 static inline bool skb_pfmemalloc(const struct sk_buff *skb) 1258 { 1259 return unlikely(skb->pfmemalloc); 1260 } 1261 1262 /* 1263 * skb might have a dst pointer attached, refcounted or not. 1264 * _skb_refdst low order bit is set if refcount was _not_ taken 1265 */ 1266 #define SKB_DST_NOREF 1UL 1267 #define SKB_DST_PTRMASK ~(SKB_DST_NOREF) 1268 1269 /** 1270 * skb_dst - returns skb dst_entry 1271 * @skb: buffer 1272 * 1273 * Returns skb dst_entry, regardless of reference taken or not. 1274 */ 1275 static inline struct dst_entry *skb_dst(const struct sk_buff *skb) 1276 { 1277 /* If refdst was not refcounted, check we still are in a 1278 * rcu_read_lock section 1279 */ 1280 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && 1281 !rcu_read_lock_held() && 1282 !rcu_read_lock_bh_held()); 1283 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); 1284 } 1285 1286 /** 1287 * skb_dst_set - sets skb dst 1288 * @skb: buffer 1289 * @dst: dst entry 1290 * 1291 * Sets skb dst, assuming a reference was taken on dst and should 1292 * be released by skb_dst_drop() 1293 */ 1294 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) 1295 { 1296 skb->slow_gro |= !!dst; 1297 skb->_skb_refdst = (unsigned long)dst; 1298 } 1299 1300 /** 1301 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference 1302 * @skb: buffer 1303 * @dst: dst entry 1304 * 1305 * Sets skb dst, assuming a reference was not taken on dst. 1306 * If dst entry is cached, we do not take reference and dst_release 1307 * will be avoided by refdst_drop. If dst entry is not cached, we take 1308 * reference, so that last dst_release can destroy the dst immediately. 1309 */ 1310 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) 1311 { 1312 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 1313 skb->slow_gro |= !!dst; 1314 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; 1315 } 1316 1317 /** 1318 * skb_dst_is_noref - Test if skb dst isn't refcounted 1319 * @skb: buffer 1320 */ 1321 static inline bool skb_dst_is_noref(const struct sk_buff *skb) 1322 { 1323 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); 1324 } 1325 1326 /** 1327 * skb_rtable - Returns the skb &rtable 1328 * @skb: buffer 1329 */ 1330 static inline struct rtable *skb_rtable(const struct sk_buff *skb) 1331 { 1332 return (struct rtable *)skb_dst(skb); 1333 } 1334 1335 /* For mangling skb->pkt_type from user space side from applications 1336 * such as nft, tc, etc, we only allow a conservative subset of 1337 * possible pkt_types to be set. 1338 */ 1339 static inline bool skb_pkt_type_ok(u32 ptype) 1340 { 1341 return ptype <= PACKET_OTHERHOST; 1342 } 1343 1344 /** 1345 * skb_napi_id - Returns the skb's NAPI id 1346 * @skb: buffer 1347 */ 1348 static inline unsigned int skb_napi_id(const struct sk_buff *skb) 1349 { 1350 #ifdef CONFIG_NET_RX_BUSY_POLL 1351 return skb->napi_id; 1352 #else 1353 return 0; 1354 #endif 1355 } 1356 1357 /** 1358 * skb_unref - decrement the skb's reference count 1359 * @skb: buffer 1360 * 1361 * Returns true if we can free the skb. 1362 */ 1363 static inline bool skb_unref(struct sk_buff *skb) 1364 { 1365 if (unlikely(!skb)) 1366 return false; 1367 if (likely(refcount_read(&skb->users) == 1)) 1368 smp_rmb(); 1369 else if (likely(!refcount_dec_and_test(&skb->users))) 1370 return false; 1371 1372 return true; 1373 } 1374 1375 void kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason); 1376 1377 /** 1378 * kfree_skb - free an sk_buff with 'NOT_SPECIFIED' reason 1379 * @skb: buffer to free 1380 */ 1381 static inline void kfree_skb(struct sk_buff *skb) 1382 { 1383 kfree_skb_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); 1384 } 1385 1386 void skb_release_head_state(struct sk_buff *skb); 1387 void kfree_skb_list_reason(struct sk_buff *segs, 1388 enum skb_drop_reason reason); 1389 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt); 1390 void skb_tx_error(struct sk_buff *skb); 1391 1392 static inline void kfree_skb_list(struct sk_buff *segs) 1393 { 1394 kfree_skb_list_reason(segs, SKB_DROP_REASON_NOT_SPECIFIED); 1395 } 1396 1397 #ifdef CONFIG_TRACEPOINTS 1398 void consume_skb(struct sk_buff *skb); 1399 #else 1400 static inline void consume_skb(struct sk_buff *skb) 1401 { 1402 return kfree_skb(skb); 1403 } 1404 #endif 1405 1406 void __consume_stateless_skb(struct sk_buff *skb); 1407 void __kfree_skb(struct sk_buff *skb); 1408 extern struct kmem_cache *skbuff_head_cache; 1409 1410 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); 1411 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 1412 bool *fragstolen, int *delta_truesize); 1413 1414 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, 1415 int node); 1416 struct sk_buff *__build_skb(void *data, unsigned int frag_size); 1417 struct sk_buff *build_skb(void *data, unsigned int frag_size); 1418 struct sk_buff *build_skb_around(struct sk_buff *skb, 1419 void *data, unsigned int frag_size); 1420 void skb_attempt_defer_free(struct sk_buff *skb); 1421 1422 struct sk_buff *napi_build_skb(void *data, unsigned int frag_size); 1423 1424 /** 1425 * alloc_skb - allocate a network buffer 1426 * @size: size to allocate 1427 * @priority: allocation mask 1428 * 1429 * This function is a convenient wrapper around __alloc_skb(). 1430 */ 1431 static inline struct sk_buff *alloc_skb(unsigned int size, 1432 gfp_t priority) 1433 { 1434 return __alloc_skb(size, priority, 0, NUMA_NO_NODE); 1435 } 1436 1437 struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 1438 unsigned long data_len, 1439 int max_page_order, 1440 int *errcode, 1441 gfp_t gfp_mask); 1442 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first); 1443 1444 /* Layout of fast clones : [skb1][skb2][fclone_ref] */ 1445 struct sk_buff_fclones { 1446 struct sk_buff skb1; 1447 1448 struct sk_buff skb2; 1449 1450 refcount_t fclone_ref; 1451 }; 1452 1453 /** 1454 * skb_fclone_busy - check if fclone is busy 1455 * @sk: socket 1456 * @skb: buffer 1457 * 1458 * Returns true if skb is a fast clone, and its clone is not freed. 1459 * Some drivers call skb_orphan() in their ndo_start_xmit(), 1460 * so we also check that this didnt happen. 1461 */ 1462 static inline bool skb_fclone_busy(const struct sock *sk, 1463 const struct sk_buff *skb) 1464 { 1465 const struct sk_buff_fclones *fclones; 1466 1467 fclones = container_of(skb, struct sk_buff_fclones, skb1); 1468 1469 return skb->fclone == SKB_FCLONE_ORIG && 1470 refcount_read(&fclones->fclone_ref) > 1 && 1471 READ_ONCE(fclones->skb2.sk) == sk; 1472 } 1473 1474 /** 1475 * alloc_skb_fclone - allocate a network buffer from fclone cache 1476 * @size: size to allocate 1477 * @priority: allocation mask 1478 * 1479 * This function is a convenient wrapper around __alloc_skb(). 1480 */ 1481 static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 1482 gfp_t priority) 1483 { 1484 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); 1485 } 1486 1487 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 1488 void skb_headers_offset_update(struct sk_buff *skb, int off); 1489 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 1490 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); 1491 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old); 1492 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); 1493 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 1494 gfp_t gfp_mask, bool fclone); 1495 static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, 1496 gfp_t gfp_mask) 1497 { 1498 return __pskb_copy_fclone(skb, headroom, gfp_mask, false); 1499 } 1500 1501 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); 1502 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 1503 unsigned int headroom); 1504 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom); 1505 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, 1506 int newtailroom, gfp_t priority); 1507 int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 1508 int offset, int len); 1509 int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, 1510 int offset, int len); 1511 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); 1512 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error); 1513 1514 /** 1515 * skb_pad - zero pad the tail of an skb 1516 * @skb: buffer to pad 1517 * @pad: space to pad 1518 * 1519 * Ensure that a buffer is followed by a padding area that is zero 1520 * filled. Used by network drivers which may DMA or transfer data 1521 * beyond the buffer end onto the wire. 1522 * 1523 * May return error in out of memory cases. The skb is freed on error. 1524 */ 1525 static inline int skb_pad(struct sk_buff *skb, int pad) 1526 { 1527 return __skb_pad(skb, pad, true); 1528 } 1529 #define dev_kfree_skb(a) consume_skb(a) 1530 1531 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 1532 int offset, size_t size); 1533 1534 struct skb_seq_state { 1535 __u32 lower_offset; 1536 __u32 upper_offset; 1537 __u32 frag_idx; 1538 __u32 stepped_offset; 1539 struct sk_buff *root_skb; 1540 struct sk_buff *cur_skb; 1541 __u8 *frag_data; 1542 __u32 frag_off; 1543 }; 1544 1545 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 1546 unsigned int to, struct skb_seq_state *st); 1547 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 1548 struct skb_seq_state *st); 1549 void skb_abort_seq_read(struct skb_seq_state *st); 1550 1551 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 1552 unsigned int to, struct ts_config *config); 1553 1554 /* 1555 * Packet hash types specify the type of hash in skb_set_hash. 1556 * 1557 * Hash types refer to the protocol layer addresses which are used to 1558 * construct a packet's hash. The hashes are used to differentiate or identify 1559 * flows of the protocol layer for the hash type. Hash types are either 1560 * layer-2 (L2), layer-3 (L3), or layer-4 (L4). 1561 * 1562 * Properties of hashes: 1563 * 1564 * 1) Two packets in different flows have different hash values 1565 * 2) Two packets in the same flow should have the same hash value 1566 * 1567 * A hash at a higher layer is considered to be more specific. A driver should 1568 * set the most specific hash possible. 1569 * 1570 * A driver cannot indicate a more specific hash than the layer at which a hash 1571 * was computed. For instance an L3 hash cannot be set as an L4 hash. 1572 * 1573 * A driver may indicate a hash level which is less specific than the 1574 * actual layer the hash was computed on. For instance, a hash computed 1575 * at L4 may be considered an L3 hash. This should only be done if the 1576 * driver can't unambiguously determine that the HW computed the hash at 1577 * the higher layer. Note that the "should" in the second property above 1578 * permits this. 1579 */ 1580 enum pkt_hash_types { 1581 PKT_HASH_TYPE_NONE, /* Undefined type */ 1582 PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */ 1583 PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */ 1584 PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */ 1585 }; 1586 1587 static inline void skb_clear_hash(struct sk_buff *skb) 1588 { 1589 skb->hash = 0; 1590 skb->sw_hash = 0; 1591 skb->l4_hash = 0; 1592 } 1593 1594 static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb) 1595 { 1596 if (!skb->l4_hash) 1597 skb_clear_hash(skb); 1598 } 1599 1600 static inline void 1601 __skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4) 1602 { 1603 skb->l4_hash = is_l4; 1604 skb->sw_hash = is_sw; 1605 skb->hash = hash; 1606 } 1607 1608 static inline void 1609 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type) 1610 { 1611 /* Used by drivers to set hash from HW */ 1612 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4); 1613 } 1614 1615 static inline void 1616 __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) 1617 { 1618 __skb_set_hash(skb, hash, true, is_l4); 1619 } 1620 1621 void __skb_get_hash(struct sk_buff *skb); 1622 u32 __skb_get_hash_symmetric(const struct sk_buff *skb); 1623 u32 skb_get_poff(const struct sk_buff *skb); 1624 u32 __skb_get_poff(const struct sk_buff *skb, const void *data, 1625 const struct flow_keys_basic *keys, int hlen); 1626 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, 1627 const void *data, int hlen_proto); 1628 1629 static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, 1630 int thoff, u8 ip_proto) 1631 { 1632 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0); 1633 } 1634 1635 void skb_flow_dissector_init(struct flow_dissector *flow_dissector, 1636 const struct flow_dissector_key *key, 1637 unsigned int key_count); 1638 1639 struct bpf_flow_dissector; 1640 bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, 1641 __be16 proto, int nhoff, int hlen, unsigned int flags); 1642 1643 bool __skb_flow_dissect(const struct net *net, 1644 const struct sk_buff *skb, 1645 struct flow_dissector *flow_dissector, 1646 void *target_container, const void *data, 1647 __be16 proto, int nhoff, int hlen, unsigned int flags); 1648 1649 static inline bool skb_flow_dissect(const struct sk_buff *skb, 1650 struct flow_dissector *flow_dissector, 1651 void *target_container, unsigned int flags) 1652 { 1653 return __skb_flow_dissect(NULL, skb, flow_dissector, 1654 target_container, NULL, 0, 0, 0, flags); 1655 } 1656 1657 static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb, 1658 struct flow_keys *flow, 1659 unsigned int flags) 1660 { 1661 memset(flow, 0, sizeof(*flow)); 1662 return __skb_flow_dissect(NULL, skb, &flow_keys_dissector, 1663 flow, NULL, 0, 0, 0, flags); 1664 } 1665 1666 static inline bool 1667 skb_flow_dissect_flow_keys_basic(const struct net *net, 1668 const struct sk_buff *skb, 1669 struct flow_keys_basic *flow, 1670 const void *data, __be16 proto, 1671 int nhoff, int hlen, unsigned int flags) 1672 { 1673 memset(flow, 0, sizeof(*flow)); 1674 return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow, 1675 data, proto, nhoff, hlen, flags); 1676 } 1677 1678 void skb_flow_dissect_meta(const struct sk_buff *skb, 1679 struct flow_dissector *flow_dissector, 1680 void *target_container); 1681 1682 /* Gets a skb connection tracking info, ctinfo map should be a 1683 * map of mapsize to translate enum ip_conntrack_info states 1684 * to user states. 1685 */ 1686 void 1687 skb_flow_dissect_ct(const struct sk_buff *skb, 1688 struct flow_dissector *flow_dissector, 1689 void *target_container, 1690 u16 *ctinfo_map, size_t mapsize, 1691 bool post_ct, u16 zone); 1692 void 1693 skb_flow_dissect_tunnel_info(const struct sk_buff *skb, 1694 struct flow_dissector *flow_dissector, 1695 void *target_container); 1696 1697 void skb_flow_dissect_hash(const struct sk_buff *skb, 1698 struct flow_dissector *flow_dissector, 1699 void *target_container); 1700 1701 static inline __u32 skb_get_hash(struct sk_buff *skb) 1702 { 1703 if (!skb->l4_hash && !skb->sw_hash) 1704 __skb_get_hash(skb); 1705 1706 return skb->hash; 1707 } 1708 1709 static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6) 1710 { 1711 if (!skb->l4_hash && !skb->sw_hash) { 1712 struct flow_keys keys; 1713 __u32 hash = __get_hash_from_flowi6(fl6, &keys); 1714 1715 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); 1716 } 1717 1718 return skb->hash; 1719 } 1720 1721 __u32 skb_get_hash_perturb(const struct sk_buff *skb, 1722 const siphash_key_t *perturb); 1723 1724 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) 1725 { 1726 return skb->hash; 1727 } 1728 1729 static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) 1730 { 1731 to->hash = from->hash; 1732 to->sw_hash = from->sw_hash; 1733 to->l4_hash = from->l4_hash; 1734 }; 1735 1736 static inline void skb_copy_decrypted(struct sk_buff *to, 1737 const struct sk_buff *from) 1738 { 1739 #ifdef CONFIG_TLS_DEVICE 1740 to->decrypted = from->decrypted; 1741 #endif 1742 } 1743 1744 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1745 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 1746 { 1747 return skb->head + skb->end; 1748 } 1749 1750 static inline unsigned int skb_end_offset(const struct sk_buff *skb) 1751 { 1752 return skb->end; 1753 } 1754 1755 static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) 1756 { 1757 skb->end = offset; 1758 } 1759 #else 1760 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 1761 { 1762 return skb->end; 1763 } 1764 1765 static inline unsigned int skb_end_offset(const struct sk_buff *skb) 1766 { 1767 return skb->end - skb->head; 1768 } 1769 1770 static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) 1771 { 1772 skb->end = skb->head + offset; 1773 } 1774 #endif 1775 1776 struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, 1777 struct ubuf_info *uarg); 1778 1779 void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref); 1780 1781 void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, 1782 bool success); 1783 1784 int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk, 1785 struct sk_buff *skb, struct iov_iter *from, 1786 size_t length); 1787 1788 static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb, 1789 struct msghdr *msg, int len) 1790 { 1791 return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len); 1792 } 1793 1794 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, 1795 struct msghdr *msg, int len, 1796 struct ubuf_info *uarg); 1797 1798 /* Internal */ 1799 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) 1800 1801 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) 1802 { 1803 return &skb_shinfo(skb)->hwtstamps; 1804 } 1805 1806 static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb) 1807 { 1808 bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE; 1809 1810 return is_zcopy ? skb_uarg(skb) : NULL; 1811 } 1812 1813 static inline bool skb_zcopy_pure(const struct sk_buff *skb) 1814 { 1815 return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY; 1816 } 1817 1818 static inline bool skb_zcopy_managed(const struct sk_buff *skb) 1819 { 1820 return skb_shinfo(skb)->flags & SKBFL_MANAGED_FRAG_REFS; 1821 } 1822 1823 static inline bool skb_pure_zcopy_same(const struct sk_buff *skb1, 1824 const struct sk_buff *skb2) 1825 { 1826 return skb_zcopy_pure(skb1) == skb_zcopy_pure(skb2); 1827 } 1828 1829 static inline void net_zcopy_get(struct ubuf_info *uarg) 1830 { 1831 refcount_inc(&uarg->refcnt); 1832 } 1833 1834 static inline void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg) 1835 { 1836 skb_shinfo(skb)->destructor_arg = uarg; 1837 skb_shinfo(skb)->flags |= uarg->flags; 1838 } 1839 1840 static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg, 1841 bool *have_ref) 1842 { 1843 if (skb && uarg && !skb_zcopy(skb)) { 1844 if (unlikely(have_ref && *have_ref)) 1845 *have_ref = false; 1846 else 1847 net_zcopy_get(uarg); 1848 skb_zcopy_init(skb, uarg); 1849 } 1850 } 1851 1852 static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val) 1853 { 1854 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL); 1855 skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG; 1856 } 1857 1858 static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb) 1859 { 1860 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL; 1861 } 1862 1863 static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb) 1864 { 1865 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL); 1866 } 1867 1868 static inline void net_zcopy_put(struct ubuf_info *uarg) 1869 { 1870 if (uarg) 1871 uarg->callback(NULL, uarg, true); 1872 } 1873 1874 static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref) 1875 { 1876 if (uarg) { 1877 if (uarg->callback == msg_zerocopy_callback) 1878 msg_zerocopy_put_abort(uarg, have_uref); 1879 else if (have_uref) 1880 net_zcopy_put(uarg); 1881 } 1882 } 1883 1884 /* Release a reference on a zerocopy structure */ 1885 static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success) 1886 { 1887 struct ubuf_info *uarg = skb_zcopy(skb); 1888 1889 if (uarg) { 1890 if (!skb_zcopy_is_nouarg(skb)) 1891 uarg->callback(skb, uarg, zerocopy_success); 1892 1893 skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY; 1894 } 1895 } 1896 1897 void __skb_zcopy_downgrade_managed(struct sk_buff *skb); 1898 1899 static inline void skb_zcopy_downgrade_managed(struct sk_buff *skb) 1900 { 1901 if (unlikely(skb_zcopy_managed(skb))) 1902 __skb_zcopy_downgrade_managed(skb); 1903 } 1904 1905 static inline void skb_mark_not_on_list(struct sk_buff *skb) 1906 { 1907 skb->next = NULL; 1908 } 1909 1910 /* Iterate through singly-linked GSO fragments of an skb. */ 1911 #define skb_list_walk_safe(first, skb, next_skb) \ 1912 for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \ 1913 (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL) 1914 1915 static inline void skb_list_del_init(struct sk_buff *skb) 1916 { 1917 __list_del_entry(&skb->list); 1918 skb_mark_not_on_list(skb); 1919 } 1920 1921 /** 1922 * skb_queue_empty - check if a queue is empty 1923 * @list: queue head 1924 * 1925 * Returns true if the queue is empty, false otherwise. 1926 */ 1927 static inline int skb_queue_empty(const struct sk_buff_head *list) 1928 { 1929 return list->next == (const struct sk_buff *) list; 1930 } 1931 1932 /** 1933 * skb_queue_empty_lockless - check if a queue is empty 1934 * @list: queue head 1935 * 1936 * Returns true if the queue is empty, false otherwise. 1937 * This variant can be used in lockless contexts. 1938 */ 1939 static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list) 1940 { 1941 return READ_ONCE(list->next) == (const struct sk_buff *) list; 1942 } 1943 1944 1945 /** 1946 * skb_queue_is_last - check if skb is the last entry in the queue 1947 * @list: queue head 1948 * @skb: buffer 1949 * 1950 * Returns true if @skb is the last buffer on the list. 1951 */ 1952 static inline bool skb_queue_is_last(const struct sk_buff_head *list, 1953 const struct sk_buff *skb) 1954 { 1955 return skb->next == (const struct sk_buff *) list; 1956 } 1957 1958 /** 1959 * skb_queue_is_first - check if skb is the first entry in the queue 1960 * @list: queue head 1961 * @skb: buffer 1962 * 1963 * Returns true if @skb is the first buffer on the list. 1964 */ 1965 static inline bool skb_queue_is_first(const struct sk_buff_head *list, 1966 const struct sk_buff *skb) 1967 { 1968 return skb->prev == (const struct sk_buff *) list; 1969 } 1970 1971 /** 1972 * skb_queue_next - return the next packet in the queue 1973 * @list: queue head 1974 * @skb: current buffer 1975 * 1976 * Return the next packet in @list after @skb. It is only valid to 1977 * call this if skb_queue_is_last() evaluates to false. 1978 */ 1979 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, 1980 const struct sk_buff *skb) 1981 { 1982 /* This BUG_ON may seem severe, but if we just return then we 1983 * are going to dereference garbage. 1984 */ 1985 BUG_ON(skb_queue_is_last(list, skb)); 1986 return skb->next; 1987 } 1988 1989 /** 1990 * skb_queue_prev - return the prev packet in the queue 1991 * @list: queue head 1992 * @skb: current buffer 1993 * 1994 * Return the prev packet in @list before @skb. It is only valid to 1995 * call this if skb_queue_is_first() evaluates to false. 1996 */ 1997 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, 1998 const struct sk_buff *skb) 1999 { 2000 /* This BUG_ON may seem severe, but if we just return then we 2001 * are going to dereference garbage. 2002 */ 2003 BUG_ON(skb_queue_is_first(list, skb)); 2004 return skb->prev; 2005 } 2006 2007 /** 2008 * skb_get - reference buffer 2009 * @skb: buffer to reference 2010 * 2011 * Makes another reference to a socket buffer and returns a pointer 2012 * to the buffer. 2013 */ 2014 static inline struct sk_buff *skb_get(struct sk_buff *skb) 2015 { 2016 refcount_inc(&skb->users); 2017 return skb; 2018 } 2019 2020 /* 2021 * If users == 1, we are the only owner and can avoid redundant atomic changes. 2022 */ 2023 2024 /** 2025 * skb_cloned - is the buffer a clone 2026 * @skb: buffer to check 2027 * 2028 * Returns true if the buffer was generated with skb_clone() and is 2029 * one of multiple shared copies of the buffer. Cloned buffers are 2030 * shared data so must not be written to under normal circumstances. 2031 */ 2032 static inline int skb_cloned(const struct sk_buff *skb) 2033 { 2034 return skb->cloned && 2035 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; 2036 } 2037 2038 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) 2039 { 2040 might_sleep_if(gfpflags_allow_blocking(pri)); 2041 2042 if (skb_cloned(skb)) 2043 return pskb_expand_head(skb, 0, 0, pri); 2044 2045 return 0; 2046 } 2047 2048 /* This variant of skb_unclone() makes sure skb->truesize 2049 * and skb_end_offset() are not changed, whenever a new skb->head is needed. 2050 * 2051 * Indeed there is no guarantee that ksize(kmalloc(X)) == ksize(kmalloc(X)) 2052 * when various debugging features are in place. 2053 */ 2054 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri); 2055 static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) 2056 { 2057 might_sleep_if(gfpflags_allow_blocking(pri)); 2058 2059 if (skb_cloned(skb)) 2060 return __skb_unclone_keeptruesize(skb, pri); 2061 return 0; 2062 } 2063 2064 /** 2065 * skb_header_cloned - is the header a clone 2066 * @skb: buffer to check 2067 * 2068 * Returns true if modifying the header part of the buffer requires 2069 * the data to be copied. 2070 */ 2071 static inline int skb_header_cloned(const struct sk_buff *skb) 2072 { 2073 int dataref; 2074 2075 if (!skb->cloned) 2076 return 0; 2077 2078 dataref = atomic_read(&skb_shinfo(skb)->dataref); 2079 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); 2080 return dataref != 1; 2081 } 2082 2083 static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri) 2084 { 2085 might_sleep_if(gfpflags_allow_blocking(pri)); 2086 2087 if (skb_header_cloned(skb)) 2088 return pskb_expand_head(skb, 0, 0, pri); 2089 2090 return 0; 2091 } 2092 2093 /** 2094 * __skb_header_release() - allow clones to use the headroom 2095 * @skb: buffer to operate on 2096 * 2097 * See "DOC: dataref and headerless skbs". 2098 */ 2099 static inline void __skb_header_release(struct sk_buff *skb) 2100 { 2101 skb->nohdr = 1; 2102 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT)); 2103 } 2104 2105 2106 /** 2107 * skb_shared - is the buffer shared 2108 * @skb: buffer to check 2109 * 2110 * Returns true if more than one person has a reference to this 2111 * buffer. 2112 */ 2113 static inline int skb_shared(const struct sk_buff *skb) 2114 { 2115 return refcount_read(&skb->users) != 1; 2116 } 2117 2118 /** 2119 * skb_share_check - check if buffer is shared and if so clone it 2120 * @skb: buffer to check 2121 * @pri: priority for memory allocation 2122 * 2123 * If the buffer is shared the buffer is cloned and the old copy 2124 * drops a reference. A new clone with a single reference is returned. 2125 * If the buffer is not shared the original buffer is returned. When 2126 * being called from interrupt status or with spinlocks held pri must 2127 * be GFP_ATOMIC. 2128 * 2129 * NULL is returned on a memory allocation failure. 2130 */ 2131 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) 2132 { 2133 might_sleep_if(gfpflags_allow_blocking(pri)); 2134 if (skb_shared(skb)) { 2135 struct sk_buff *nskb = skb_clone(skb, pri); 2136 2137 if (likely(nskb)) 2138 consume_skb(skb); 2139 else 2140 kfree_skb(skb); 2141 skb = nskb; 2142 } 2143 return skb; 2144 } 2145 2146 /* 2147 * Copy shared buffers into a new sk_buff. We effectively do COW on 2148 * packets to handle cases where we have a local reader and forward 2149 * and a couple of other messy ones. The normal one is tcpdumping 2150 * a packet thats being forwarded. 2151 */ 2152 2153 /** 2154 * skb_unshare - make a copy of a shared buffer 2155 * @skb: buffer to check 2156 * @pri: priority for memory allocation 2157 * 2158 * If the socket buffer is a clone then this function creates a new 2159 * copy of the data, drops a reference count on the old copy and returns 2160 * the new copy with the reference count at 1. If the buffer is not a clone 2161 * the original buffer is returned. When called with a spinlock held or 2162 * from interrupt state @pri must be %GFP_ATOMIC 2163 * 2164 * %NULL is returned on a memory allocation failure. 2165 */ 2166 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 2167 gfp_t pri) 2168 { 2169 might_sleep_if(gfpflags_allow_blocking(pri)); 2170 if (skb_cloned(skb)) { 2171 struct sk_buff *nskb = skb_copy(skb, pri); 2172 2173 /* Free our shared copy */ 2174 if (likely(nskb)) 2175 consume_skb(skb); 2176 else 2177 kfree_skb(skb); 2178 skb = nskb; 2179 } 2180 return skb; 2181 } 2182 2183 /** 2184 * skb_peek - peek at the head of an &sk_buff_head 2185 * @list_: list to peek at 2186 * 2187 * Peek an &sk_buff. Unlike most other operations you _MUST_ 2188 * be careful with this one. A peek leaves the buffer on the 2189 * list and someone else may run off with it. You must hold 2190 * the appropriate locks or have a private queue to do this. 2191 * 2192 * Returns %NULL for an empty list or a pointer to the head element. 2193 * The reference count is not incremented and the reference is therefore 2194 * volatile. Use with caution. 2195 */ 2196 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) 2197 { 2198 struct sk_buff *skb = list_->next; 2199 2200 if (skb == (struct sk_buff *)list_) 2201 skb = NULL; 2202 return skb; 2203 } 2204 2205 /** 2206 * __skb_peek - peek at the head of a non-empty &sk_buff_head 2207 * @list_: list to peek at 2208 * 2209 * Like skb_peek(), but the caller knows that the list is not empty. 2210 */ 2211 static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_) 2212 { 2213 return list_->next; 2214 } 2215 2216 /** 2217 * skb_peek_next - peek skb following the given one from a queue 2218 * @skb: skb to start from 2219 * @list_: list to peek at 2220 * 2221 * Returns %NULL when the end of the list is met or a pointer to the 2222 * next element. The reference count is not incremented and the 2223 * reference is therefore volatile. Use with caution. 2224 */ 2225 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, 2226 const struct sk_buff_head *list_) 2227 { 2228 struct sk_buff *next = skb->next; 2229 2230 if (next == (struct sk_buff *)list_) 2231 next = NULL; 2232 return next; 2233 } 2234 2235 /** 2236 * skb_peek_tail - peek at the tail of an &sk_buff_head 2237 * @list_: list to peek at 2238 * 2239 * Peek an &sk_buff. Unlike most other operations you _MUST_ 2240 * be careful with this one. A peek leaves the buffer on the 2241 * list and someone else may run off with it. You must hold 2242 * the appropriate locks or have a private queue to do this. 2243 * 2244 * Returns %NULL for an empty list or a pointer to the tail element. 2245 * The reference count is not incremented and the reference is therefore 2246 * volatile. Use with caution. 2247 */ 2248 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) 2249 { 2250 struct sk_buff *skb = READ_ONCE(list_->prev); 2251 2252 if (skb == (struct sk_buff *)list_) 2253 skb = NULL; 2254 return skb; 2255 2256 } 2257 2258 /** 2259 * skb_queue_len - get queue length 2260 * @list_: list to measure 2261 * 2262 * Return the length of an &sk_buff queue. 2263 */ 2264 static inline __u32 skb_queue_len(const struct sk_buff_head *list_) 2265 { 2266 return list_->qlen; 2267 } 2268 2269 /** 2270 * skb_queue_len_lockless - get queue length 2271 * @list_: list to measure 2272 * 2273 * Return the length of an &sk_buff queue. 2274 * This variant can be used in lockless contexts. 2275 */ 2276 static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_) 2277 { 2278 return READ_ONCE(list_->qlen); 2279 } 2280 2281 /** 2282 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head 2283 * @list: queue to initialize 2284 * 2285 * This initializes only the list and queue length aspects of 2286 * an sk_buff_head object. This allows to initialize the list 2287 * aspects of an sk_buff_head without reinitializing things like 2288 * the spinlock. It can also be used for on-stack sk_buff_head 2289 * objects where the spinlock is known to not be used. 2290 */ 2291 static inline void __skb_queue_head_init(struct sk_buff_head *list) 2292 { 2293 list->prev = list->next = (struct sk_buff *)list; 2294 list->qlen = 0; 2295 } 2296 2297 /* 2298 * This function creates a split out lock class for each invocation; 2299 * this is needed for now since a whole lot of users of the skb-queue 2300 * infrastructure in drivers have different locking usage (in hardirq) 2301 * than the networking core (in softirq only). In the long run either the 2302 * network layer or drivers should need annotation to consolidate the 2303 * main types of usage into 3 classes. 2304 */ 2305 static inline void skb_queue_head_init(struct sk_buff_head *list) 2306 { 2307 spin_lock_init(&list->lock); 2308 __skb_queue_head_init(list); 2309 } 2310 2311 static inline void skb_queue_head_init_class(struct sk_buff_head *list, 2312 struct lock_class_key *class) 2313 { 2314 skb_queue_head_init(list); 2315 lockdep_set_class(&list->lock, class); 2316 } 2317 2318 /* 2319 * Insert an sk_buff on a list. 2320 * 2321 * The "__skb_xxxx()" functions are the non-atomic ones that 2322 * can only be called with interrupts disabled. 2323 */ 2324 static inline void __skb_insert(struct sk_buff *newsk, 2325 struct sk_buff *prev, struct sk_buff *next, 2326 struct sk_buff_head *list) 2327 { 2328 /* See skb_queue_empty_lockless() and skb_peek_tail() 2329 * for the opposite READ_ONCE() 2330 */ 2331 WRITE_ONCE(newsk->next, next); 2332 WRITE_ONCE(newsk->prev, prev); 2333 WRITE_ONCE(((struct sk_buff_list *)next)->prev, newsk); 2334 WRITE_ONCE(((struct sk_buff_list *)prev)->next, newsk); 2335 WRITE_ONCE(list->qlen, list->qlen + 1); 2336 } 2337 2338 static inline void __skb_queue_splice(const struct sk_buff_head *list, 2339 struct sk_buff *prev, 2340 struct sk_buff *next) 2341 { 2342 struct sk_buff *first = list->next; 2343 struct sk_buff *last = list->prev; 2344 2345 WRITE_ONCE(first->prev, prev); 2346 WRITE_ONCE(prev->next, first); 2347 2348 WRITE_ONCE(last->next, next); 2349 WRITE_ONCE(next->prev, last); 2350 } 2351 2352 /** 2353 * skb_queue_splice - join two skb lists, this is designed for stacks 2354 * @list: the new list to add 2355 * @head: the place to add it in the first list 2356 */ 2357 static inline void skb_queue_splice(const struct sk_buff_head *list, 2358 struct sk_buff_head *head) 2359 { 2360 if (!skb_queue_empty(list)) { 2361 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 2362 head->qlen += list->qlen; 2363 } 2364 } 2365 2366 /** 2367 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list 2368 * @list: the new list to add 2369 * @head: the place to add it in the first list 2370 * 2371 * The list at @list is reinitialised 2372 */ 2373 static inline void skb_queue_splice_init(struct sk_buff_head *list, 2374 struct sk_buff_head *head) 2375 { 2376 if (!skb_queue_empty(list)) { 2377 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 2378 head->qlen += list->qlen; 2379 __skb_queue_head_init(list); 2380 } 2381 } 2382 2383 /** 2384 * skb_queue_splice_tail - join two skb lists, each list being a queue 2385 * @list: the new list to add 2386 * @head: the place to add it in the first list 2387 */ 2388 static inline void skb_queue_splice_tail(const struct sk_buff_head *list, 2389 struct sk_buff_head *head) 2390 { 2391 if (!skb_queue_empty(list)) { 2392 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 2393 head->qlen += list->qlen; 2394 } 2395 } 2396 2397 /** 2398 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list 2399 * @list: the new list to add 2400 * @head: the place to add it in the first list 2401 * 2402 * Each of the lists is a queue. 2403 * The list at @list is reinitialised 2404 */ 2405 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, 2406 struct sk_buff_head *head) 2407 { 2408 if (!skb_queue_empty(list)) { 2409 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 2410 head->qlen += list->qlen; 2411 __skb_queue_head_init(list); 2412 } 2413 } 2414 2415 /** 2416 * __skb_queue_after - queue a buffer at the list head 2417 * @list: list to use 2418 * @prev: place after this buffer 2419 * @newsk: buffer to queue 2420 * 2421 * Queue a buffer int the middle of a list. This function takes no locks 2422 * and you must therefore hold required locks before calling it. 2423 * 2424 * A buffer cannot be placed on two lists at the same time. 2425 */ 2426 static inline void __skb_queue_after(struct sk_buff_head *list, 2427 struct sk_buff *prev, 2428 struct sk_buff *newsk) 2429 { 2430 __skb_insert(newsk, prev, ((struct sk_buff_list *)prev)->next, list); 2431 } 2432 2433 void skb_append(struct sk_buff *old, struct sk_buff *newsk, 2434 struct sk_buff_head *list); 2435 2436 static inline void __skb_queue_before(struct sk_buff_head *list, 2437 struct sk_buff *next, 2438 struct sk_buff *newsk) 2439 { 2440 __skb_insert(newsk, ((struct sk_buff_list *)next)->prev, next, list); 2441 } 2442 2443 /** 2444 * __skb_queue_head - queue a buffer at the list head 2445 * @list: list to use 2446 * @newsk: buffer to queue 2447 * 2448 * Queue a buffer at the start of a list. This function takes no locks 2449 * and you must therefore hold required locks before calling it. 2450 * 2451 * A buffer cannot be placed on two lists at the same time. 2452 */ 2453 static inline void __skb_queue_head(struct sk_buff_head *list, 2454 struct sk_buff *newsk) 2455 { 2456 __skb_queue_after(list, (struct sk_buff *)list, newsk); 2457 } 2458 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 2459 2460 /** 2461 * __skb_queue_tail - queue a buffer at the list tail 2462 * @list: list to use 2463 * @newsk: buffer to queue 2464 * 2465 * Queue a buffer at the end of a list. This function takes no locks 2466 * and you must therefore hold required locks before calling it. 2467 * 2468 * A buffer cannot be placed on two lists at the same time. 2469 */ 2470 static inline void __skb_queue_tail(struct sk_buff_head *list, 2471 struct sk_buff *newsk) 2472 { 2473 __skb_queue_before(list, (struct sk_buff *)list, newsk); 2474 } 2475 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 2476 2477 /* 2478 * remove sk_buff from list. _Must_ be called atomically, and with 2479 * the list known.. 2480 */ 2481 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 2482 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 2483 { 2484 struct sk_buff *next, *prev; 2485 2486 WRITE_ONCE(list->qlen, list->qlen - 1); 2487 next = skb->next; 2488 prev = skb->prev; 2489 skb->next = skb->prev = NULL; 2490 WRITE_ONCE(next->prev, prev); 2491 WRITE_ONCE(prev->next, next); 2492 } 2493 2494 /** 2495 * __skb_dequeue - remove from the head of the queue 2496 * @list: list to dequeue from 2497 * 2498 * Remove the head of the list. This function does not take any locks 2499 * so must be used with appropriate locks held only. The head item is 2500 * returned or %NULL if the list is empty. 2501 */ 2502 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 2503 { 2504 struct sk_buff *skb = skb_peek(list); 2505 if (skb) 2506 __skb_unlink(skb, list); 2507 return skb; 2508 } 2509 struct sk_buff *skb_dequeue(struct sk_buff_head *list); 2510 2511 /** 2512 * __skb_dequeue_tail - remove from the tail of the queue 2513 * @list: list to dequeue from 2514 * 2515 * Remove the tail of the list. This function does not take any locks 2516 * so must be used with appropriate locks held only. The tail item is 2517 * returned or %NULL if the list is empty. 2518 */ 2519 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 2520 { 2521 struct sk_buff *skb = skb_peek_tail(list); 2522 if (skb) 2523 __skb_unlink(skb, list); 2524 return skb; 2525 } 2526 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 2527 2528 2529 static inline bool skb_is_nonlinear(const struct sk_buff *skb) 2530 { 2531 return skb->data_len; 2532 } 2533 2534 static inline unsigned int skb_headlen(const struct sk_buff *skb) 2535 { 2536 return skb->len - skb->data_len; 2537 } 2538 2539 static inline unsigned int __skb_pagelen(const struct sk_buff *skb) 2540 { 2541 unsigned int i, len = 0; 2542 2543 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--) 2544 len += skb_frag_size(&skb_shinfo(skb)->frags[i]); 2545 return len; 2546 } 2547 2548 static inline unsigned int skb_pagelen(const struct sk_buff *skb) 2549 { 2550 return skb_headlen(skb) + __skb_pagelen(skb); 2551 } 2552 2553 static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo, 2554 int i, struct page *page, 2555 int off, int size) 2556 { 2557 skb_frag_t *frag = &shinfo->frags[i]; 2558 2559 /* 2560 * Propagate page pfmemalloc to the skb if we can. The problem is 2561 * that not all callers have unique ownership of the page but rely 2562 * on page_is_pfmemalloc doing the right thing(tm). 2563 */ 2564 frag->bv_page = page; 2565 frag->bv_offset = off; 2566 skb_frag_size_set(frag, size); 2567 } 2568 2569 /** 2570 * __skb_fill_page_desc - initialise a paged fragment in an skb 2571 * @skb: buffer containing fragment to be initialised 2572 * @i: paged fragment index to initialise 2573 * @page: the page to use for this fragment 2574 * @off: the offset to the data with @page 2575 * @size: the length of the data 2576 * 2577 * Initialises the @i'th fragment of @skb to point to &size bytes at 2578 * offset @off within @page. 2579 * 2580 * Does not take any additional reference on the fragment. 2581 */ 2582 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, 2583 struct page *page, int off, int size) 2584 { 2585 __skb_fill_page_desc_noacc(skb_shinfo(skb), i, page, off, size); 2586 page = compound_head(page); 2587 if (page_is_pfmemalloc(page)) 2588 skb->pfmemalloc = true; 2589 } 2590 2591 /** 2592 * skb_fill_page_desc - initialise a paged fragment in an skb 2593 * @skb: buffer containing fragment to be initialised 2594 * @i: paged fragment index to initialise 2595 * @page: the page to use for this fragment 2596 * @off: the offset to the data with @page 2597 * @size: the length of the data 2598 * 2599 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of 2600 * @skb to point to @size bytes at offset @off within @page. In 2601 * addition updates @skb such that @i is the last fragment. 2602 * 2603 * Does not take any additional reference on the fragment. 2604 */ 2605 static inline void skb_fill_page_desc(struct sk_buff *skb, int i, 2606 struct page *page, int off, int size) 2607 { 2608 __skb_fill_page_desc(skb, i, page, off, size); 2609 skb_shinfo(skb)->nr_frags = i + 1; 2610 } 2611 2612 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 2613 int size, unsigned int truesize); 2614 2615 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 2616 unsigned int truesize); 2617 2618 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 2619 2620 #ifdef NET_SKBUFF_DATA_USES_OFFSET 2621 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 2622 { 2623 return skb->head + skb->tail; 2624 } 2625 2626 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 2627 { 2628 skb->tail = skb->data - skb->head; 2629 } 2630 2631 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 2632 { 2633 skb_reset_tail_pointer(skb); 2634 skb->tail += offset; 2635 } 2636 2637 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 2638 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 2639 { 2640 return skb->tail; 2641 } 2642 2643 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 2644 { 2645 skb->tail = skb->data; 2646 } 2647 2648 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 2649 { 2650 skb->tail = skb->data + offset; 2651 } 2652 2653 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 2654 2655 /* 2656 * Add data to an sk_buff 2657 */ 2658 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); 2659 void *skb_put(struct sk_buff *skb, unsigned int len); 2660 static inline void *__skb_put(struct sk_buff *skb, unsigned int len) 2661 { 2662 void *tmp = skb_tail_pointer(skb); 2663 SKB_LINEAR_ASSERT(skb); 2664 skb->tail += len; 2665 skb->len += len; 2666 return tmp; 2667 } 2668 2669 static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len) 2670 { 2671 void *tmp = __skb_put(skb, len); 2672 2673 memset(tmp, 0, len); 2674 return tmp; 2675 } 2676 2677 static inline void *__skb_put_data(struct sk_buff *skb, const void *data, 2678 unsigned int len) 2679 { 2680 void *tmp = __skb_put(skb, len); 2681 2682 memcpy(tmp, data, len); 2683 return tmp; 2684 } 2685 2686 static inline void __skb_put_u8(struct sk_buff *skb, u8 val) 2687 { 2688 *(u8 *)__skb_put(skb, 1) = val; 2689 } 2690 2691 static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len) 2692 { 2693 void *tmp = skb_put(skb, len); 2694 2695 memset(tmp, 0, len); 2696 2697 return tmp; 2698 } 2699 2700 static inline void *skb_put_data(struct sk_buff *skb, const void *data, 2701 unsigned int len) 2702 { 2703 void *tmp = skb_put(skb, len); 2704 2705 memcpy(tmp, data, len); 2706 2707 return tmp; 2708 } 2709 2710 static inline void skb_put_u8(struct sk_buff *skb, u8 val) 2711 { 2712 *(u8 *)skb_put(skb, 1) = val; 2713 } 2714 2715 void *skb_push(struct sk_buff *skb, unsigned int len); 2716 static inline void *__skb_push(struct sk_buff *skb, unsigned int len) 2717 { 2718 skb->data -= len; 2719 skb->len += len; 2720 return skb->data; 2721 } 2722 2723 void *skb_pull(struct sk_buff *skb, unsigned int len); 2724 static inline void *__skb_pull(struct sk_buff *skb, unsigned int len) 2725 { 2726 skb->len -= len; 2727 if (unlikely(skb->len < skb->data_len)) { 2728 #if defined(CONFIG_DEBUG_NET) 2729 skb->len += len; 2730 pr_err("__skb_pull(len=%u)\n", len); 2731 skb_dump(KERN_ERR, skb, false); 2732 #endif 2733 BUG(); 2734 } 2735 return skb->data += len; 2736 } 2737 2738 static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len) 2739 { 2740 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 2741 } 2742 2743 void *skb_pull_data(struct sk_buff *skb, size_t len); 2744 2745 void *__pskb_pull_tail(struct sk_buff *skb, int delta); 2746 2747 static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len) 2748 { 2749 if (len > skb_headlen(skb) && 2750 !__pskb_pull_tail(skb, len - skb_headlen(skb))) 2751 return NULL; 2752 skb->len -= len; 2753 return skb->data += len; 2754 } 2755 2756 static inline void *pskb_pull(struct sk_buff *skb, unsigned int len) 2757 { 2758 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); 2759 } 2760 2761 static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len) 2762 { 2763 if (likely(len <= skb_headlen(skb))) 2764 return true; 2765 if (unlikely(len > skb->len)) 2766 return false; 2767 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; 2768 } 2769 2770 void skb_condense(struct sk_buff *skb); 2771 2772 /** 2773 * skb_headroom - bytes at buffer head 2774 * @skb: buffer to check 2775 * 2776 * Return the number of bytes of free space at the head of an &sk_buff. 2777 */ 2778 static inline unsigned int skb_headroom(const struct sk_buff *skb) 2779 { 2780 return skb->data - skb->head; 2781 } 2782 2783 /** 2784 * skb_tailroom - bytes at buffer end 2785 * @skb: buffer to check 2786 * 2787 * Return the number of bytes of free space at the tail of an sk_buff 2788 */ 2789 static inline int skb_tailroom(const struct sk_buff *skb) 2790 { 2791 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; 2792 } 2793 2794 /** 2795 * skb_availroom - bytes at buffer end 2796 * @skb: buffer to check 2797 * 2798 * Return the number of bytes of free space at the tail of an sk_buff 2799 * allocated by sk_stream_alloc() 2800 */ 2801 static inline int skb_availroom(const struct sk_buff *skb) 2802 { 2803 if (skb_is_nonlinear(skb)) 2804 return 0; 2805 2806 return skb->end - skb->tail - skb->reserved_tailroom; 2807 } 2808 2809 /** 2810 * skb_reserve - adjust headroom 2811 * @skb: buffer to alter 2812 * @len: bytes to move 2813 * 2814 * Increase the headroom of an empty &sk_buff by reducing the tail 2815 * room. This is only allowed for an empty buffer. 2816 */ 2817 static inline void skb_reserve(struct sk_buff *skb, int len) 2818 { 2819 skb->data += len; 2820 skb->tail += len; 2821 } 2822 2823 /** 2824 * skb_tailroom_reserve - adjust reserved_tailroom 2825 * @skb: buffer to alter 2826 * @mtu: maximum amount of headlen permitted 2827 * @needed_tailroom: minimum amount of reserved_tailroom 2828 * 2829 * Set reserved_tailroom so that headlen can be as large as possible but 2830 * not larger than mtu and tailroom cannot be smaller than 2831 * needed_tailroom. 2832 * The required headroom should already have been reserved before using 2833 * this function. 2834 */ 2835 static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu, 2836 unsigned int needed_tailroom) 2837 { 2838 SKB_LINEAR_ASSERT(skb); 2839 if (mtu < skb_tailroom(skb) - needed_tailroom) 2840 /* use at most mtu */ 2841 skb->reserved_tailroom = skb_tailroom(skb) - mtu; 2842 else 2843 /* use up to all available space */ 2844 skb->reserved_tailroom = needed_tailroom; 2845 } 2846 2847 #define ENCAP_TYPE_ETHER 0 2848 #define ENCAP_TYPE_IPPROTO 1 2849 2850 static inline void skb_set_inner_protocol(struct sk_buff *skb, 2851 __be16 protocol) 2852 { 2853 skb->inner_protocol = protocol; 2854 skb->inner_protocol_type = ENCAP_TYPE_ETHER; 2855 } 2856 2857 static inline void skb_set_inner_ipproto(struct sk_buff *skb, 2858 __u8 ipproto) 2859 { 2860 skb->inner_ipproto = ipproto; 2861 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO; 2862 } 2863 2864 static inline void skb_reset_inner_headers(struct sk_buff *skb) 2865 { 2866 skb->inner_mac_header = skb->mac_header; 2867 skb->inner_network_header = skb->network_header; 2868 skb->inner_transport_header = skb->transport_header; 2869 } 2870 2871 static inline void skb_reset_mac_len(struct sk_buff *skb) 2872 { 2873 skb->mac_len = skb->network_header - skb->mac_header; 2874 } 2875 2876 static inline unsigned char *skb_inner_transport_header(const struct sk_buff 2877 *skb) 2878 { 2879 return skb->head + skb->inner_transport_header; 2880 } 2881 2882 static inline int skb_inner_transport_offset(const struct sk_buff *skb) 2883 { 2884 return skb_inner_transport_header(skb) - skb->data; 2885 } 2886 2887 static inline void skb_reset_inner_transport_header(struct sk_buff *skb) 2888 { 2889 skb->inner_transport_header = skb->data - skb->head; 2890 } 2891 2892 static inline void skb_set_inner_transport_header(struct sk_buff *skb, 2893 const int offset) 2894 { 2895 skb_reset_inner_transport_header(skb); 2896 skb->inner_transport_header += offset; 2897 } 2898 2899 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb) 2900 { 2901 return skb->head + skb->inner_network_header; 2902 } 2903 2904 static inline void skb_reset_inner_network_header(struct sk_buff *skb) 2905 { 2906 skb->inner_network_header = skb->data - skb->head; 2907 } 2908 2909 static inline void skb_set_inner_network_header(struct sk_buff *skb, 2910 const int offset) 2911 { 2912 skb_reset_inner_network_header(skb); 2913 skb->inner_network_header += offset; 2914 } 2915 2916 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) 2917 { 2918 return skb->head + skb->inner_mac_header; 2919 } 2920 2921 static inline void skb_reset_inner_mac_header(struct sk_buff *skb) 2922 { 2923 skb->inner_mac_header = skb->data - skb->head; 2924 } 2925 2926 static inline void skb_set_inner_mac_header(struct sk_buff *skb, 2927 const int offset) 2928 { 2929 skb_reset_inner_mac_header(skb); 2930 skb->inner_mac_header += offset; 2931 } 2932 static inline bool skb_transport_header_was_set(const struct sk_buff *skb) 2933 { 2934 return skb->transport_header != (typeof(skb->transport_header))~0U; 2935 } 2936 2937 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 2938 { 2939 DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb)); 2940 return skb->head + skb->transport_header; 2941 } 2942 2943 static inline void skb_reset_transport_header(struct sk_buff *skb) 2944 { 2945 skb->transport_header = skb->data - skb->head; 2946 } 2947 2948 static inline void skb_set_transport_header(struct sk_buff *skb, 2949 const int offset) 2950 { 2951 skb_reset_transport_header(skb); 2952 skb->transport_header += offset; 2953 } 2954 2955 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 2956 { 2957 return skb->head + skb->network_header; 2958 } 2959 2960 static inline void skb_reset_network_header(struct sk_buff *skb) 2961 { 2962 skb->network_header = skb->data - skb->head; 2963 } 2964 2965 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 2966 { 2967 skb_reset_network_header(skb); 2968 skb->network_header += offset; 2969 } 2970 2971 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 2972 { 2973 return skb->head + skb->mac_header; 2974 } 2975 2976 static inline int skb_mac_offset(const struct sk_buff *skb) 2977 { 2978 return skb_mac_header(skb) - skb->data; 2979 } 2980 2981 static inline u32 skb_mac_header_len(const struct sk_buff *skb) 2982 { 2983 return skb->network_header - skb->mac_header; 2984 } 2985 2986 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 2987 { 2988 return skb->mac_header != (typeof(skb->mac_header))~0U; 2989 } 2990 2991 static inline void skb_unset_mac_header(struct sk_buff *skb) 2992 { 2993 skb->mac_header = (typeof(skb->mac_header))~0U; 2994 } 2995 2996 static inline void skb_reset_mac_header(struct sk_buff *skb) 2997 { 2998 skb->mac_header = skb->data - skb->head; 2999 } 3000 3001 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 3002 { 3003 skb_reset_mac_header(skb); 3004 skb->mac_header += offset; 3005 } 3006 3007 static inline void skb_pop_mac_header(struct sk_buff *skb) 3008 { 3009 skb->mac_header = skb->network_header; 3010 } 3011 3012 static inline void skb_probe_transport_header(struct sk_buff *skb) 3013 { 3014 struct flow_keys_basic keys; 3015 3016 if (skb_transport_header_was_set(skb)) 3017 return; 3018 3019 if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, 3020 NULL, 0, 0, 0, 0)) 3021 skb_set_transport_header(skb, keys.control.thoff); 3022 } 3023 3024 static inline void skb_mac_header_rebuild(struct sk_buff *skb) 3025 { 3026 if (skb_mac_header_was_set(skb)) { 3027 const unsigned char *old_mac = skb_mac_header(skb); 3028 3029 skb_set_mac_header(skb, -skb->mac_len); 3030 memmove(skb_mac_header(skb), old_mac, skb->mac_len); 3031 } 3032 } 3033 3034 static inline int skb_checksum_start_offset(const struct sk_buff *skb) 3035 { 3036 return skb->csum_start - skb_headroom(skb); 3037 } 3038 3039 static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) 3040 { 3041 return skb->head + skb->csum_start; 3042 } 3043 3044 static inline int skb_transport_offset(const struct sk_buff *skb) 3045 { 3046 return skb_transport_header(skb) - skb->data; 3047 } 3048 3049 static inline u32 skb_network_header_len(const struct sk_buff *skb) 3050 { 3051 return skb->transport_header - skb->network_header; 3052 } 3053 3054 static inline u32 skb_inner_network_header_len(const struct sk_buff *skb) 3055 { 3056 return skb->inner_transport_header - skb->inner_network_header; 3057 } 3058 3059 static inline int skb_network_offset(const struct sk_buff *skb) 3060 { 3061 return skb_network_header(skb) - skb->data; 3062 } 3063 3064 static inline int skb_inner_network_offset(const struct sk_buff *skb) 3065 { 3066 return skb_inner_network_header(skb) - skb->data; 3067 } 3068 3069 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) 3070 { 3071 return pskb_may_pull(skb, skb_network_offset(skb) + len); 3072 } 3073 3074 /* 3075 * CPUs often take a performance hit when accessing unaligned memory 3076 * locations. The actual performance hit varies, it can be small if the 3077 * hardware handles it or large if we have to take an exception and fix it 3078 * in software. 3079 * 3080 * Since an ethernet header is 14 bytes network drivers often end up with 3081 * the IP header at an unaligned offset. The IP header can be aligned by 3082 * shifting the start of the packet by 2 bytes. Drivers should do this 3083 * with: 3084 * 3085 * skb_reserve(skb, NET_IP_ALIGN); 3086 * 3087 * The downside to this alignment of the IP header is that the DMA is now 3088 * unaligned. On some architectures the cost of an unaligned DMA is high 3089 * and this cost outweighs the gains made by aligning the IP header. 3090 * 3091 * Since this trade off varies between architectures, we allow NET_IP_ALIGN 3092 * to be overridden. 3093 */ 3094 #ifndef NET_IP_ALIGN 3095 #define NET_IP_ALIGN 2 3096 #endif 3097 3098 /* 3099 * The networking layer reserves some headroom in skb data (via 3100 * dev_alloc_skb). This is used to avoid having to reallocate skb data when 3101 * the header has to grow. In the default case, if the header has to grow 3102 * 32 bytes or less we avoid the reallocation. 3103 * 3104 * Unfortunately this headroom changes the DMA alignment of the resulting 3105 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive 3106 * on some architectures. An architecture can override this value, 3107 * perhaps setting it to a cacheline in size (since that will maintain 3108 * cacheline alignment of the DMA). It must be a power of 2. 3109 * 3110 * Various parts of the networking layer expect at least 32 bytes of 3111 * headroom, you should not reduce this. 3112 * 3113 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) 3114 * to reduce average number of cache lines per packet. 3115 * get_rps_cpu() for example only access one 64 bytes aligned block : 3116 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) 3117 */ 3118 #ifndef NET_SKB_PAD 3119 #define NET_SKB_PAD max(32, L1_CACHE_BYTES) 3120 #endif 3121 3122 int ___pskb_trim(struct sk_buff *skb, unsigned int len); 3123 3124 static inline void __skb_set_length(struct sk_buff *skb, unsigned int len) 3125 { 3126 if (WARN_ON(skb_is_nonlinear(skb))) 3127 return; 3128 skb->len = len; 3129 skb_set_tail_pointer(skb, len); 3130 } 3131 3132 static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 3133 { 3134 __skb_set_length(skb, len); 3135 } 3136 3137 void skb_trim(struct sk_buff *skb, unsigned int len); 3138 3139 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 3140 { 3141 if (skb->data_len) 3142 return ___pskb_trim(skb, len); 3143 __skb_trim(skb, len); 3144 return 0; 3145 } 3146 3147 static inline int pskb_trim(struct sk_buff *skb, unsigned int len) 3148 { 3149 return (len < skb->len) ? __pskb_trim(skb, len) : 0; 3150 } 3151 3152 /** 3153 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer 3154 * @skb: buffer to alter 3155 * @len: new length 3156 * 3157 * This is identical to pskb_trim except that the caller knows that 3158 * the skb is not cloned so we should never get an error due to out- 3159 * of-memory. 3160 */ 3161 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) 3162 { 3163 int err = pskb_trim(skb, len); 3164 BUG_ON(err); 3165 } 3166 3167 static inline int __skb_grow(struct sk_buff *skb, unsigned int len) 3168 { 3169 unsigned int diff = len - skb->len; 3170 3171 if (skb_tailroom(skb) < diff) { 3172 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb), 3173 GFP_ATOMIC); 3174 if (ret) 3175 return ret; 3176 } 3177 __skb_set_length(skb, len); 3178 return 0; 3179 } 3180 3181 /** 3182 * skb_orphan - orphan a buffer 3183 * @skb: buffer to orphan 3184 * 3185 * If a buffer currently has an owner then we call the owner's 3186 * destructor function and make the @skb unowned. The buffer continues 3187 * to exist but is no longer charged to its former owner. 3188 */ 3189 static inline void skb_orphan(struct sk_buff *skb) 3190 { 3191 if (skb->destructor) { 3192 skb->destructor(skb); 3193 skb->destructor = NULL; 3194 skb->sk = NULL; 3195 } else { 3196 BUG_ON(skb->sk); 3197 } 3198 } 3199 3200 /** 3201 * skb_orphan_frags - orphan the frags contained in a buffer 3202 * @skb: buffer to orphan frags from 3203 * @gfp_mask: allocation mask for replacement pages 3204 * 3205 * For each frag in the SKB which needs a destructor (i.e. has an 3206 * owner) create a copy of that frag and release the original 3207 * page by calling the destructor. 3208 */ 3209 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) 3210 { 3211 if (likely(!skb_zcopy(skb))) 3212 return 0; 3213 if (skb_shinfo(skb)->flags & SKBFL_DONT_ORPHAN) 3214 return 0; 3215 return skb_copy_ubufs(skb, gfp_mask); 3216 } 3217 3218 /* Frags must be orphaned, even if refcounted, if skb might loop to rx path */ 3219 static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask) 3220 { 3221 if (likely(!skb_zcopy(skb))) 3222 return 0; 3223 return skb_copy_ubufs(skb, gfp_mask); 3224 } 3225 3226 /** 3227 * __skb_queue_purge - empty a list 3228 * @list: list to empty 3229 * 3230 * Delete all buffers on an &sk_buff list. Each buffer is removed from 3231 * the list and one reference dropped. This function does not take the 3232 * list lock and the caller must hold the relevant locks to use it. 3233 */ 3234 static inline void __skb_queue_purge(struct sk_buff_head *list) 3235 { 3236 struct sk_buff *skb; 3237 while ((skb = __skb_dequeue(list)) != NULL) 3238 kfree_skb(skb); 3239 } 3240 void skb_queue_purge(struct sk_buff_head *list); 3241 3242 unsigned int skb_rbtree_purge(struct rb_root *root); 3243 3244 void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask); 3245 3246 /** 3247 * netdev_alloc_frag - allocate a page fragment 3248 * @fragsz: fragment size 3249 * 3250 * Allocates a frag from a page for receive buffer. 3251 * Uses GFP_ATOMIC allocations. 3252 */ 3253 static inline void *netdev_alloc_frag(unsigned int fragsz) 3254 { 3255 return __netdev_alloc_frag_align(fragsz, ~0u); 3256 } 3257 3258 static inline void *netdev_alloc_frag_align(unsigned int fragsz, 3259 unsigned int align) 3260 { 3261 WARN_ON_ONCE(!is_power_of_2(align)); 3262 return __netdev_alloc_frag_align(fragsz, -align); 3263 } 3264 3265 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, 3266 gfp_t gfp_mask); 3267 3268 /** 3269 * netdev_alloc_skb - allocate an skbuff for rx on a specific device 3270 * @dev: network device to receive on 3271 * @length: length to allocate 3272 * 3273 * Allocate a new &sk_buff and assign it a usage count of one. The 3274 * buffer has unspecified headroom built in. Users should allocate 3275 * the headroom they think they need without accounting for the 3276 * built in space. The built in space is used for optimisations. 3277 * 3278 * %NULL is returned if there is no free memory. Although this function 3279 * allocates memory it can be called from an interrupt. 3280 */ 3281 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 3282 unsigned int length) 3283 { 3284 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 3285 } 3286 3287 /* legacy helper around __netdev_alloc_skb() */ 3288 static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 3289 gfp_t gfp_mask) 3290 { 3291 return __netdev_alloc_skb(NULL, length, gfp_mask); 3292 } 3293 3294 /* legacy helper around netdev_alloc_skb() */ 3295 static inline struct sk_buff *dev_alloc_skb(unsigned int length) 3296 { 3297 return netdev_alloc_skb(NULL, length); 3298 } 3299 3300 3301 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, 3302 unsigned int length, gfp_t gfp) 3303 { 3304 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); 3305 3306 if (NET_IP_ALIGN && skb) 3307 skb_reserve(skb, NET_IP_ALIGN); 3308 return skb; 3309 } 3310 3311 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, 3312 unsigned int length) 3313 { 3314 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); 3315 } 3316 3317 static inline void skb_free_frag(void *addr) 3318 { 3319 page_frag_free(addr); 3320 } 3321 3322 void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask); 3323 3324 static inline void *napi_alloc_frag(unsigned int fragsz) 3325 { 3326 return __napi_alloc_frag_align(fragsz, ~0u); 3327 } 3328 3329 static inline void *napi_alloc_frag_align(unsigned int fragsz, 3330 unsigned int align) 3331 { 3332 WARN_ON_ONCE(!is_power_of_2(align)); 3333 return __napi_alloc_frag_align(fragsz, -align); 3334 } 3335 3336 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, 3337 unsigned int length, gfp_t gfp_mask); 3338 static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, 3339 unsigned int length) 3340 { 3341 return __napi_alloc_skb(napi, length, GFP_ATOMIC); 3342 } 3343 void napi_consume_skb(struct sk_buff *skb, int budget); 3344 3345 void napi_skb_free_stolen_head(struct sk_buff *skb); 3346 void __kfree_skb_defer(struct sk_buff *skb); 3347 3348 /** 3349 * __dev_alloc_pages - allocate page for network Rx 3350 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx 3351 * @order: size of the allocation 3352 * 3353 * Allocate a new page. 3354 * 3355 * %NULL is returned if there is no free memory. 3356 */ 3357 static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, 3358 unsigned int order) 3359 { 3360 /* This piece of code contains several assumptions. 3361 * 1. This is for device Rx, therefor a cold page is preferred. 3362 * 2. The expectation is the user wants a compound page. 3363 * 3. If requesting a order 0 page it will not be compound 3364 * due to the check to see if order has a value in prep_new_page 3365 * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to 3366 * code in gfp_to_alloc_flags that should be enforcing this. 3367 */ 3368 gfp_mask |= __GFP_COMP | __GFP_MEMALLOC; 3369 3370 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); 3371 } 3372 3373 static inline struct page *dev_alloc_pages(unsigned int order) 3374 { 3375 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order); 3376 } 3377 3378 /** 3379 * __dev_alloc_page - allocate a page for network Rx 3380 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx 3381 * 3382 * Allocate a new page. 3383 * 3384 * %NULL is returned if there is no free memory. 3385 */ 3386 static inline struct page *__dev_alloc_page(gfp_t gfp_mask) 3387 { 3388 return __dev_alloc_pages(gfp_mask, 0); 3389 } 3390 3391 static inline struct page *dev_alloc_page(void) 3392 { 3393 return dev_alloc_pages(0); 3394 } 3395 3396 /** 3397 * dev_page_is_reusable - check whether a page can be reused for network Rx 3398 * @page: the page to test 3399 * 3400 * A page shouldn't be considered for reusing/recycling if it was allocated 3401 * under memory pressure or at a distant memory node. 3402 * 3403 * Returns false if this page should be returned to page allocator, true 3404 * otherwise. 3405 */ 3406 static inline bool dev_page_is_reusable(const struct page *page) 3407 { 3408 return likely(page_to_nid(page) == numa_mem_id() && 3409 !page_is_pfmemalloc(page)); 3410 } 3411 3412 /** 3413 * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page 3414 * @page: The page that was allocated from skb_alloc_page 3415 * @skb: The skb that may need pfmemalloc set 3416 */ 3417 static inline void skb_propagate_pfmemalloc(const struct page *page, 3418 struct sk_buff *skb) 3419 { 3420 if (page_is_pfmemalloc(page)) 3421 skb->pfmemalloc = true; 3422 } 3423 3424 /** 3425 * skb_frag_off() - Returns the offset of a skb fragment 3426 * @frag: the paged fragment 3427 */ 3428 static inline unsigned int skb_frag_off(const skb_frag_t *frag) 3429 { 3430 return frag->bv_offset; 3431 } 3432 3433 /** 3434 * skb_frag_off_add() - Increments the offset of a skb fragment by @delta 3435 * @frag: skb fragment 3436 * @delta: value to add 3437 */ 3438 static inline void skb_frag_off_add(skb_frag_t *frag, int delta) 3439 { 3440 frag->bv_offset += delta; 3441 } 3442 3443 /** 3444 * skb_frag_off_set() - Sets the offset of a skb fragment 3445 * @frag: skb fragment 3446 * @offset: offset of fragment 3447 */ 3448 static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset) 3449 { 3450 frag->bv_offset = offset; 3451 } 3452 3453 /** 3454 * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment 3455 * @fragto: skb fragment where offset is set 3456 * @fragfrom: skb fragment offset is copied from 3457 */ 3458 static inline void skb_frag_off_copy(skb_frag_t *fragto, 3459 const skb_frag_t *fragfrom) 3460 { 3461 fragto->bv_offset = fragfrom->bv_offset; 3462 } 3463 3464 /** 3465 * skb_frag_page - retrieve the page referred to by a paged fragment 3466 * @frag: the paged fragment 3467 * 3468 * Returns the &struct page associated with @frag. 3469 */ 3470 static inline struct page *skb_frag_page(const skb_frag_t *frag) 3471 { 3472 return frag->bv_page; 3473 } 3474 3475 /** 3476 * __skb_frag_ref - take an addition reference on a paged fragment. 3477 * @frag: the paged fragment 3478 * 3479 * Takes an additional reference on the paged fragment @frag. 3480 */ 3481 static inline void __skb_frag_ref(skb_frag_t *frag) 3482 { 3483 get_page(skb_frag_page(frag)); 3484 } 3485 3486 /** 3487 * skb_frag_ref - take an addition reference on a paged fragment of an skb. 3488 * @skb: the buffer 3489 * @f: the fragment offset. 3490 * 3491 * Takes an additional reference on the @f'th paged fragment of @skb. 3492 */ 3493 static inline void skb_frag_ref(struct sk_buff *skb, int f) 3494 { 3495 __skb_frag_ref(&skb_shinfo(skb)->frags[f]); 3496 } 3497 3498 /** 3499 * __skb_frag_unref - release a reference on a paged fragment. 3500 * @frag: the paged fragment 3501 * @recycle: recycle the page if allocated via page_pool 3502 * 3503 * Releases a reference on the paged fragment @frag 3504 * or recycles the page via the page_pool API. 3505 */ 3506 static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) 3507 { 3508 struct page *page = skb_frag_page(frag); 3509 3510 #ifdef CONFIG_PAGE_POOL 3511 if (recycle && page_pool_return_skb_page(page)) 3512 return; 3513 #endif 3514 put_page(page); 3515 } 3516 3517 /** 3518 * skb_frag_unref - release a reference on a paged fragment of an skb. 3519 * @skb: the buffer 3520 * @f: the fragment offset 3521 * 3522 * Releases a reference on the @f'th paged fragment of @skb. 3523 */ 3524 static inline void skb_frag_unref(struct sk_buff *skb, int f) 3525 { 3526 struct skb_shared_info *shinfo = skb_shinfo(skb); 3527 3528 if (!skb_zcopy_managed(skb)) 3529 __skb_frag_unref(&shinfo->frags[f], skb->pp_recycle); 3530 } 3531 3532 /** 3533 * skb_frag_address - gets the address of the data contained in a paged fragment 3534 * @frag: the paged fragment buffer 3535 * 3536 * Returns the address of the data within @frag. The page must already 3537 * be mapped. 3538 */ 3539 static inline void *skb_frag_address(const skb_frag_t *frag) 3540 { 3541 return page_address(skb_frag_page(frag)) + skb_frag_off(frag); 3542 } 3543 3544 /** 3545 * skb_frag_address_safe - gets the address of the data contained in a paged fragment 3546 * @frag: the paged fragment buffer 3547 * 3548 * Returns the address of the data within @frag. Checks that the page 3549 * is mapped and returns %NULL otherwise. 3550 */ 3551 static inline void *skb_frag_address_safe(const skb_frag_t *frag) 3552 { 3553 void *ptr = page_address(skb_frag_page(frag)); 3554 if (unlikely(!ptr)) 3555 return NULL; 3556 3557 return ptr + skb_frag_off(frag); 3558 } 3559 3560 /** 3561 * skb_frag_page_copy() - sets the page in a fragment from another fragment 3562 * @fragto: skb fragment where page is set 3563 * @fragfrom: skb fragment page is copied from 3564 */ 3565 static inline void skb_frag_page_copy(skb_frag_t *fragto, 3566 const skb_frag_t *fragfrom) 3567 { 3568 fragto->bv_page = fragfrom->bv_page; 3569 } 3570 3571 /** 3572 * __skb_frag_set_page - sets the page contained in a paged fragment 3573 * @frag: the paged fragment 3574 * @page: the page to set 3575 * 3576 * Sets the fragment @frag to contain @page. 3577 */ 3578 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) 3579 { 3580 frag->bv_page = page; 3581 } 3582 3583 /** 3584 * skb_frag_set_page - sets the page contained in a paged fragment of an skb 3585 * @skb: the buffer 3586 * @f: the fragment offset 3587 * @page: the page to set 3588 * 3589 * Sets the @f'th fragment of @skb to contain @page. 3590 */ 3591 static inline void skb_frag_set_page(struct sk_buff *skb, int f, 3592 struct page *page) 3593 { 3594 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page); 3595 } 3596 3597 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio); 3598 3599 /** 3600 * skb_frag_dma_map - maps a paged fragment via the DMA API 3601 * @dev: the device to map the fragment to 3602 * @frag: the paged fragment to map 3603 * @offset: the offset within the fragment (starting at the 3604 * fragment's own offset) 3605 * @size: the number of bytes to map 3606 * @dir: the direction of the mapping (``PCI_DMA_*``) 3607 * 3608 * Maps the page associated with @frag to @device. 3609 */ 3610 static inline dma_addr_t skb_frag_dma_map(struct device *dev, 3611 const skb_frag_t *frag, 3612 size_t offset, size_t size, 3613 enum dma_data_direction dir) 3614 { 3615 return dma_map_page(dev, skb_frag_page(frag), 3616 skb_frag_off(frag) + offset, size, dir); 3617 } 3618 3619 static inline struct sk_buff *pskb_copy(struct sk_buff *skb, 3620 gfp_t gfp_mask) 3621 { 3622 return __pskb_copy(skb, skb_headroom(skb), gfp_mask); 3623 } 3624 3625 3626 static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb, 3627 gfp_t gfp_mask) 3628 { 3629 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true); 3630 } 3631 3632 3633 /** 3634 * skb_clone_writable - is the header of a clone writable 3635 * @skb: buffer to check 3636 * @len: length up to which to write 3637 * 3638 * Returns true if modifying the header part of the cloned buffer 3639 * does not requires the data to be copied. 3640 */ 3641 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len) 3642 { 3643 return !skb_header_cloned(skb) && 3644 skb_headroom(skb) + len <= skb->hdr_len; 3645 } 3646 3647 static inline int skb_try_make_writable(struct sk_buff *skb, 3648 unsigned int write_len) 3649 { 3650 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) && 3651 pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 3652 } 3653 3654 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, 3655 int cloned) 3656 { 3657 int delta = 0; 3658 3659 if (headroom > skb_headroom(skb)) 3660 delta = headroom - skb_headroom(skb); 3661 3662 if (delta || cloned) 3663 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, 3664 GFP_ATOMIC); 3665 return 0; 3666 } 3667 3668 /** 3669 * skb_cow - copy header of skb when it is required 3670 * @skb: buffer to cow 3671 * @headroom: needed headroom 3672 * 3673 * If the skb passed lacks sufficient headroom or its data part 3674 * is shared, data is reallocated. If reallocation fails, an error 3675 * is returned and original skb is not changed. 3676 * 3677 * The result is skb with writable area skb->head...skb->tail 3678 * and at least @headroom of space at head. 3679 */ 3680 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 3681 { 3682 return __skb_cow(skb, headroom, skb_cloned(skb)); 3683 } 3684 3685 /** 3686 * skb_cow_head - skb_cow but only making the head writable 3687 * @skb: buffer to cow 3688 * @headroom: needed headroom 3689 * 3690 * This function is identical to skb_cow except that we replace the 3691 * skb_cloned check by skb_header_cloned. It should be used when 3692 * you only need to push on some header and do not need to modify 3693 * the data. 3694 */ 3695 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) 3696 { 3697 return __skb_cow(skb, headroom, skb_header_cloned(skb)); 3698 } 3699 3700 /** 3701 * skb_padto - pad an skbuff up to a minimal size 3702 * @skb: buffer to pad 3703 * @len: minimal length 3704 * 3705 * Pads up a buffer to ensure the trailing bytes exist and are 3706 * blanked. If the buffer already contains sufficient data it 3707 * is untouched. Otherwise it is extended. Returns zero on 3708 * success. The skb is freed on error. 3709 */ 3710 static inline int skb_padto(struct sk_buff *skb, unsigned int len) 3711 { 3712 unsigned int size = skb->len; 3713 if (likely(size >= len)) 3714 return 0; 3715 return skb_pad(skb, len - size); 3716 } 3717 3718 /** 3719 * __skb_put_padto - increase size and pad an skbuff up to a minimal size 3720 * @skb: buffer to pad 3721 * @len: minimal length 3722 * @free_on_error: free buffer on error 3723 * 3724 * Pads up a buffer to ensure the trailing bytes exist and are 3725 * blanked. If the buffer already contains sufficient data it 3726 * is untouched. Otherwise it is extended. Returns zero on 3727 * success. The skb is freed on error if @free_on_error is true. 3728 */ 3729 static inline int __must_check __skb_put_padto(struct sk_buff *skb, 3730 unsigned int len, 3731 bool free_on_error) 3732 { 3733 unsigned int size = skb->len; 3734 3735 if (unlikely(size < len)) { 3736 len -= size; 3737 if (__skb_pad(skb, len, free_on_error)) 3738 return -ENOMEM; 3739 __skb_put(skb, len); 3740 } 3741 return 0; 3742 } 3743 3744 /** 3745 * skb_put_padto - increase size and pad an skbuff up to a minimal size 3746 * @skb: buffer to pad 3747 * @len: minimal length 3748 * 3749 * Pads up a buffer to ensure the trailing bytes exist and are 3750 * blanked. If the buffer already contains sufficient data it 3751 * is untouched. Otherwise it is extended. Returns zero on 3752 * success. The skb is freed on error. 3753 */ 3754 static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len) 3755 { 3756 return __skb_put_padto(skb, len, true); 3757 } 3758 3759 static inline int skb_add_data(struct sk_buff *skb, 3760 struct iov_iter *from, int copy) 3761 { 3762 const int off = skb->len; 3763 3764 if (skb->ip_summed == CHECKSUM_NONE) { 3765 __wsum csum = 0; 3766 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy, 3767 &csum, from)) { 3768 skb->csum = csum_block_add(skb->csum, csum, off); 3769 return 0; 3770 } 3771 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from)) 3772 return 0; 3773 3774 __skb_trim(skb, off); 3775 return -EFAULT; 3776 } 3777 3778 static inline bool skb_can_coalesce(struct sk_buff *skb, int i, 3779 const struct page *page, int off) 3780 { 3781 if (skb_zcopy(skb)) 3782 return false; 3783 if (i) { 3784 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 3785 3786 return page == skb_frag_page(frag) && 3787 off == skb_frag_off(frag) + skb_frag_size(frag); 3788 } 3789 return false; 3790 } 3791 3792 static inline int __skb_linearize(struct sk_buff *skb) 3793 { 3794 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; 3795 } 3796 3797 /** 3798 * skb_linearize - convert paged skb to linear one 3799 * @skb: buffer to linarize 3800 * 3801 * If there is no free memory -ENOMEM is returned, otherwise zero 3802 * is returned and the old skb data released. 3803 */ 3804 static inline int skb_linearize(struct sk_buff *skb) 3805 { 3806 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; 3807 } 3808 3809 /** 3810 * skb_has_shared_frag - can any frag be overwritten 3811 * @skb: buffer to test 3812 * 3813 * Return true if the skb has at least one frag that might be modified 3814 * by an external entity (as in vmsplice()/sendfile()) 3815 */ 3816 static inline bool skb_has_shared_frag(const struct sk_buff *skb) 3817 { 3818 return skb_is_nonlinear(skb) && 3819 skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG; 3820 } 3821 3822 /** 3823 * skb_linearize_cow - make sure skb is linear and writable 3824 * @skb: buffer to process 3825 * 3826 * If there is no free memory -ENOMEM is returned, otherwise zero 3827 * is returned and the old skb data released. 3828 */ 3829 static inline int skb_linearize_cow(struct sk_buff *skb) 3830 { 3831 return skb_is_nonlinear(skb) || skb_cloned(skb) ? 3832 __skb_linearize(skb) : 0; 3833 } 3834 3835 static __always_inline void 3836 __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len, 3837 unsigned int off) 3838 { 3839 if (skb->ip_summed == CHECKSUM_COMPLETE) 3840 skb->csum = csum_block_sub(skb->csum, 3841 csum_partial(start, len, 0), off); 3842 else if (skb->ip_summed == CHECKSUM_PARTIAL && 3843 skb_checksum_start_offset(skb) < 0) 3844 skb->ip_summed = CHECKSUM_NONE; 3845 } 3846 3847 /** 3848 * skb_postpull_rcsum - update checksum for received skb after pull 3849 * @skb: buffer to update 3850 * @start: start of data before pull 3851 * @len: length of data pulled 3852 * 3853 * After doing a pull on a received packet, you need to call this to 3854 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to 3855 * CHECKSUM_NONE so that it can be recomputed from scratch. 3856 */ 3857 static inline void skb_postpull_rcsum(struct sk_buff *skb, 3858 const void *start, unsigned int len) 3859 { 3860 if (skb->ip_summed == CHECKSUM_COMPLETE) 3861 skb->csum = wsum_negate(csum_partial(start, len, 3862 wsum_negate(skb->csum))); 3863 else if (skb->ip_summed == CHECKSUM_PARTIAL && 3864 skb_checksum_start_offset(skb) < 0) 3865 skb->ip_summed = CHECKSUM_NONE; 3866 } 3867 3868 static __always_inline void 3869 __skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len, 3870 unsigned int off) 3871 { 3872 if (skb->ip_summed == CHECKSUM_COMPLETE) 3873 skb->csum = csum_block_add(skb->csum, 3874 csum_partial(start, len, 0), off); 3875 } 3876 3877 /** 3878 * skb_postpush_rcsum - update checksum for received skb after push 3879 * @skb: buffer to update 3880 * @start: start of data after push 3881 * @len: length of data pushed 3882 * 3883 * After doing a push on a received packet, you need to call this to 3884 * update the CHECKSUM_COMPLETE checksum. 3885 */ 3886 static inline void skb_postpush_rcsum(struct sk_buff *skb, 3887 const void *start, unsigned int len) 3888 { 3889 __skb_postpush_rcsum(skb, start, len, 0); 3890 } 3891 3892 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 3893 3894 /** 3895 * skb_push_rcsum - push skb and update receive checksum 3896 * @skb: buffer to update 3897 * @len: length of data pulled 3898 * 3899 * This function performs an skb_push on the packet and updates 3900 * the CHECKSUM_COMPLETE checksum. It should be used on 3901 * receive path processing instead of skb_push unless you know 3902 * that the checksum difference is zero (e.g., a valid IP header) 3903 * or you are setting ip_summed to CHECKSUM_NONE. 3904 */ 3905 static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len) 3906 { 3907 skb_push(skb, len); 3908 skb_postpush_rcsum(skb, skb->data, len); 3909 return skb->data; 3910 } 3911 3912 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len); 3913 /** 3914 * pskb_trim_rcsum - trim received skb and update checksum 3915 * @skb: buffer to trim 3916 * @len: new length 3917 * 3918 * This is exactly the same as pskb_trim except that it ensures the 3919 * checksum of received packets are still valid after the operation. 3920 * It can change skb pointers. 3921 */ 3922 3923 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) 3924 { 3925 if (likely(len >= skb->len)) 3926 return 0; 3927 return pskb_trim_rcsum_slow(skb, len); 3928 } 3929 3930 static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len) 3931 { 3932 if (skb->ip_summed == CHECKSUM_COMPLETE) 3933 skb->ip_summed = CHECKSUM_NONE; 3934 __skb_trim(skb, len); 3935 return 0; 3936 } 3937 3938 static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) 3939 { 3940 if (skb->ip_summed == CHECKSUM_COMPLETE) 3941 skb->ip_summed = CHECKSUM_NONE; 3942 return __skb_grow(skb, len); 3943 } 3944 3945 #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode) 3946 #define skb_rb_first(root) rb_to_skb(rb_first(root)) 3947 #define skb_rb_last(root) rb_to_skb(rb_last(root)) 3948 #define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode)) 3949 #define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode)) 3950 3951 #define skb_queue_walk(queue, skb) \ 3952 for (skb = (queue)->next; \ 3953 skb != (struct sk_buff *)(queue); \ 3954 skb = skb->next) 3955 3956 #define skb_queue_walk_safe(queue, skb, tmp) \ 3957 for (skb = (queue)->next, tmp = skb->next; \ 3958 skb != (struct sk_buff *)(queue); \ 3959 skb = tmp, tmp = skb->next) 3960 3961 #define skb_queue_walk_from(queue, skb) \ 3962 for (; skb != (struct sk_buff *)(queue); \ 3963 skb = skb->next) 3964 3965 #define skb_rbtree_walk(skb, root) \ 3966 for (skb = skb_rb_first(root); skb != NULL; \ 3967 skb = skb_rb_next(skb)) 3968 3969 #define skb_rbtree_walk_from(skb) \ 3970 for (; skb != NULL; \ 3971 skb = skb_rb_next(skb)) 3972 3973 #define skb_rbtree_walk_from_safe(skb, tmp) \ 3974 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \ 3975 skb = tmp) 3976 3977 #define skb_queue_walk_from_safe(queue, skb, tmp) \ 3978 for (tmp = skb->next; \ 3979 skb != (struct sk_buff *)(queue); \ 3980 skb = tmp, tmp = skb->next) 3981 3982 #define skb_queue_reverse_walk(queue, skb) \ 3983 for (skb = (queue)->prev; \ 3984 skb != (struct sk_buff *)(queue); \ 3985 skb = skb->prev) 3986 3987 #define skb_queue_reverse_walk_safe(queue, skb, tmp) \ 3988 for (skb = (queue)->prev, tmp = skb->prev; \ 3989 skb != (struct sk_buff *)(queue); \ 3990 skb = tmp, tmp = skb->prev) 3991 3992 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \ 3993 for (tmp = skb->prev; \ 3994 skb != (struct sk_buff *)(queue); \ 3995 skb = tmp, tmp = skb->prev) 3996 3997 static inline bool skb_has_frag_list(const struct sk_buff *skb) 3998 { 3999 return skb_shinfo(skb)->frag_list != NULL; 4000 } 4001 4002 static inline void skb_frag_list_init(struct sk_buff *skb) 4003 { 4004 skb_shinfo(skb)->frag_list = NULL; 4005 } 4006 4007 #define skb_walk_frags(skb, iter) \ 4008 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) 4009 4010 4011 int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue, 4012 int *err, long *timeo_p, 4013 const struct sk_buff *skb); 4014 struct sk_buff *__skb_try_recv_from_queue(struct sock *sk, 4015 struct sk_buff_head *queue, 4016 unsigned int flags, 4017 int *off, int *err, 4018 struct sk_buff **last); 4019 struct sk_buff *__skb_try_recv_datagram(struct sock *sk, 4020 struct sk_buff_head *queue, 4021 unsigned int flags, int *off, int *err, 4022 struct sk_buff **last); 4023 struct sk_buff *__skb_recv_datagram(struct sock *sk, 4024 struct sk_buff_head *sk_queue, 4025 unsigned int flags, int *off, int *err); 4026 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int *err); 4027 __poll_t datagram_poll(struct file *file, struct socket *sock, 4028 struct poll_table_struct *wait); 4029 int skb_copy_datagram_iter(const struct sk_buff *from, int offset, 4030 struct iov_iter *to, int size); 4031 static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, 4032 struct msghdr *msg, int size) 4033 { 4034 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size); 4035 } 4036 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen, 4037 struct msghdr *msg); 4038 int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset, 4039 struct iov_iter *to, int len, 4040 struct ahash_request *hash); 4041 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, 4042 struct iov_iter *from, int len); 4043 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); 4044 void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 4045 void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len); 4046 static inline void skb_free_datagram_locked(struct sock *sk, 4047 struct sk_buff *skb) 4048 { 4049 __skb_free_datagram_locked(sk, skb, 0); 4050 } 4051 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); 4052 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); 4053 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); 4054 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, 4055 int len); 4056 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 4057 struct pipe_inode_info *pipe, unsigned int len, 4058 unsigned int flags); 4059 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, 4060 int len); 4061 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len); 4062 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 4063 unsigned int skb_zerocopy_headlen(const struct sk_buff *from); 4064 int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, 4065 int len, int hlen); 4066 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); 4067 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); 4068 void skb_scrub_packet(struct sk_buff *skb, bool xnet); 4069 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu); 4070 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); 4071 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); 4072 struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features, 4073 unsigned int offset); 4074 struct sk_buff *skb_vlan_untag(struct sk_buff *skb); 4075 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len); 4076 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci); 4077 int skb_vlan_pop(struct sk_buff *skb); 4078 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); 4079 int skb_eth_pop(struct sk_buff *skb); 4080 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, 4081 const unsigned char *src); 4082 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, 4083 int mac_len, bool ethernet); 4084 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, 4085 bool ethernet); 4086 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse); 4087 int skb_mpls_dec_ttl(struct sk_buff *skb); 4088 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, 4089 gfp_t gfp); 4090 4091 static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) 4092 { 4093 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT; 4094 } 4095 4096 static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len) 4097 { 4098 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; 4099 } 4100 4101 struct skb_checksum_ops { 4102 __wsum (*update)(const void *mem, int len, __wsum wsum); 4103 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len); 4104 }; 4105 4106 extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly; 4107 4108 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 4109 __wsum csum, const struct skb_checksum_ops *ops); 4110 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, 4111 __wsum csum); 4112 4113 static inline void * __must_check 4114 __skb_header_pointer(const struct sk_buff *skb, int offset, int len, 4115 const void *data, int hlen, void *buffer) 4116 { 4117 if (likely(hlen - offset >= len)) 4118 return (void *)data + offset; 4119 4120 if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0)) 4121 return NULL; 4122 4123 return buffer; 4124 } 4125 4126 static inline void * __must_check 4127 skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) 4128 { 4129 return __skb_header_pointer(skb, offset, len, skb->data, 4130 skb_headlen(skb), buffer); 4131 } 4132 4133 /** 4134 * skb_needs_linearize - check if we need to linearize a given skb 4135 * depending on the given device features. 4136 * @skb: socket buffer to check 4137 * @features: net device features 4138 * 4139 * Returns true if either: 4140 * 1. skb has frag_list and the device doesn't support FRAGLIST, or 4141 * 2. skb is fragmented and the device does not support SG. 4142 */ 4143 static inline bool skb_needs_linearize(struct sk_buff *skb, 4144 netdev_features_t features) 4145 { 4146 return skb_is_nonlinear(skb) && 4147 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) || 4148 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG))); 4149 } 4150 4151 static inline void skb_copy_from_linear_data(const struct sk_buff *skb, 4152 void *to, 4153 const unsigned int len) 4154 { 4155 memcpy(to, skb->data, len); 4156 } 4157 4158 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, 4159 const int offset, void *to, 4160 const unsigned int len) 4161 { 4162 memcpy(to, skb->data + offset, len); 4163 } 4164 4165 static inline void skb_copy_to_linear_data(struct sk_buff *skb, 4166 const void *from, 4167 const unsigned int len) 4168 { 4169 memcpy(skb->data, from, len); 4170 } 4171 4172 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, 4173 const int offset, 4174 const void *from, 4175 const unsigned int len) 4176 { 4177 memcpy(skb->data + offset, from, len); 4178 } 4179 4180 void skb_init(void); 4181 4182 static inline ktime_t skb_get_ktime(const struct sk_buff *skb) 4183 { 4184 return skb->tstamp; 4185 } 4186 4187 /** 4188 * skb_get_timestamp - get timestamp from a skb 4189 * @skb: skb to get stamp from 4190 * @stamp: pointer to struct __kernel_old_timeval to store stamp in 4191 * 4192 * Timestamps are stored in the skb as offsets to a base timestamp. 4193 * This function converts the offset back to a struct timeval and stores 4194 * it in stamp. 4195 */ 4196 static inline void skb_get_timestamp(const struct sk_buff *skb, 4197 struct __kernel_old_timeval *stamp) 4198 { 4199 *stamp = ns_to_kernel_old_timeval(skb->tstamp); 4200 } 4201 4202 static inline void skb_get_new_timestamp(const struct sk_buff *skb, 4203 struct __kernel_sock_timeval *stamp) 4204 { 4205 struct timespec64 ts = ktime_to_timespec64(skb->tstamp); 4206 4207 stamp->tv_sec = ts.tv_sec; 4208 stamp->tv_usec = ts.tv_nsec / 1000; 4209 } 4210 4211 static inline void skb_get_timestampns(const struct sk_buff *skb, 4212 struct __kernel_old_timespec *stamp) 4213 { 4214 struct timespec64 ts = ktime_to_timespec64(skb->tstamp); 4215 4216 stamp->tv_sec = ts.tv_sec; 4217 stamp->tv_nsec = ts.tv_nsec; 4218 } 4219 4220 static inline void skb_get_new_timestampns(const struct sk_buff *skb, 4221 struct __kernel_timespec *stamp) 4222 { 4223 struct timespec64 ts = ktime_to_timespec64(skb->tstamp); 4224 4225 stamp->tv_sec = ts.tv_sec; 4226 stamp->tv_nsec = ts.tv_nsec; 4227 } 4228 4229 static inline void __net_timestamp(struct sk_buff *skb) 4230 { 4231 skb->tstamp = ktime_get_real(); 4232 skb->mono_delivery_time = 0; 4233 } 4234 4235 static inline ktime_t net_timedelta(ktime_t t) 4236 { 4237 return ktime_sub(ktime_get_real(), t); 4238 } 4239 4240 static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt, 4241 bool mono) 4242 { 4243 skb->tstamp = kt; 4244 skb->mono_delivery_time = kt && mono; 4245 } 4246 4247 DECLARE_STATIC_KEY_FALSE(netstamp_needed_key); 4248 4249 /* It is used in the ingress path to clear the delivery_time. 4250 * If needed, set the skb->tstamp to the (rcv) timestamp. 4251 */ 4252 static inline void skb_clear_delivery_time(struct sk_buff *skb) 4253 { 4254 if (skb->mono_delivery_time) { 4255 skb->mono_delivery_time = 0; 4256 if (static_branch_unlikely(&netstamp_needed_key)) 4257 skb->tstamp = ktime_get_real(); 4258 else 4259 skb->tstamp = 0; 4260 } 4261 } 4262 4263 static inline void skb_clear_tstamp(struct sk_buff *skb) 4264 { 4265 if (skb->mono_delivery_time) 4266 return; 4267 4268 skb->tstamp = 0; 4269 } 4270 4271 static inline ktime_t skb_tstamp(const struct sk_buff *skb) 4272 { 4273 if (skb->mono_delivery_time) 4274 return 0; 4275 4276 return skb->tstamp; 4277 } 4278 4279 static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond) 4280 { 4281 if (!skb->mono_delivery_time && skb->tstamp) 4282 return skb->tstamp; 4283 4284 if (static_branch_unlikely(&netstamp_needed_key) || cond) 4285 return ktime_get_real(); 4286 4287 return 0; 4288 } 4289 4290 static inline u8 skb_metadata_len(const struct sk_buff *skb) 4291 { 4292 return skb_shinfo(skb)->meta_len; 4293 } 4294 4295 static inline void *skb_metadata_end(const struct sk_buff *skb) 4296 { 4297 return skb_mac_header(skb); 4298 } 4299 4300 static inline bool __skb_metadata_differs(const struct sk_buff *skb_a, 4301 const struct sk_buff *skb_b, 4302 u8 meta_len) 4303 { 4304 const void *a = skb_metadata_end(skb_a); 4305 const void *b = skb_metadata_end(skb_b); 4306 /* Using more efficient varaiant than plain call to memcmp(). */ 4307 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 4308 u64 diffs = 0; 4309 4310 switch (meta_len) { 4311 #define __it(x, op) (x -= sizeof(u##op)) 4312 #define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op)) 4313 case 32: diffs |= __it_diff(a, b, 64); 4314 fallthrough; 4315 case 24: diffs |= __it_diff(a, b, 64); 4316 fallthrough; 4317 case 16: diffs |= __it_diff(a, b, 64); 4318 fallthrough; 4319 case 8: diffs |= __it_diff(a, b, 64); 4320 break; 4321 case 28: diffs |= __it_diff(a, b, 64); 4322 fallthrough; 4323 case 20: diffs |= __it_diff(a, b, 64); 4324 fallthrough; 4325 case 12: diffs |= __it_diff(a, b, 64); 4326 fallthrough; 4327 case 4: diffs |= __it_diff(a, b, 32); 4328 break; 4329 } 4330 return diffs; 4331 #else 4332 return memcmp(a - meta_len, b - meta_len, meta_len); 4333 #endif 4334 } 4335 4336 static inline bool skb_metadata_differs(const struct sk_buff *skb_a, 4337 const struct sk_buff *skb_b) 4338 { 4339 u8 len_a = skb_metadata_len(skb_a); 4340 u8 len_b = skb_metadata_len(skb_b); 4341 4342 if (!(len_a | len_b)) 4343 return false; 4344 4345 return len_a != len_b ? 4346 true : __skb_metadata_differs(skb_a, skb_b, len_a); 4347 } 4348 4349 static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len) 4350 { 4351 skb_shinfo(skb)->meta_len = meta_len; 4352 } 4353 4354 static inline void skb_metadata_clear(struct sk_buff *skb) 4355 { 4356 skb_metadata_set(skb, 0); 4357 } 4358 4359 struct sk_buff *skb_clone_sk(struct sk_buff *skb); 4360 4361 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING 4362 4363 void skb_clone_tx_timestamp(struct sk_buff *skb); 4364 bool skb_defer_rx_timestamp(struct sk_buff *skb); 4365 4366 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ 4367 4368 static inline void skb_clone_tx_timestamp(struct sk_buff *skb) 4369 { 4370 } 4371 4372 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) 4373 { 4374 return false; 4375 } 4376 4377 #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */ 4378 4379 /** 4380 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps 4381 * 4382 * PHY drivers may accept clones of transmitted packets for 4383 * timestamping via their phy_driver.txtstamp method. These drivers 4384 * must call this function to return the skb back to the stack with a 4385 * timestamp. 4386 * 4387 * @skb: clone of the original outgoing packet 4388 * @hwtstamps: hardware time stamps 4389 * 4390 */ 4391 void skb_complete_tx_timestamp(struct sk_buff *skb, 4392 struct skb_shared_hwtstamps *hwtstamps); 4393 4394 void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb, 4395 struct skb_shared_hwtstamps *hwtstamps, 4396 struct sock *sk, int tstype); 4397 4398 /** 4399 * skb_tstamp_tx - queue clone of skb with send time stamps 4400 * @orig_skb: the original outgoing packet 4401 * @hwtstamps: hardware time stamps, may be NULL if not available 4402 * 4403 * If the skb has a socket associated, then this function clones the 4404 * skb (thus sharing the actual data and optional structures), stores 4405 * the optional hardware time stamping information (if non NULL) or 4406 * generates a software time stamp (otherwise), then queues the clone 4407 * to the error queue of the socket. Errors are silently ignored. 4408 */ 4409 void skb_tstamp_tx(struct sk_buff *orig_skb, 4410 struct skb_shared_hwtstamps *hwtstamps); 4411 4412 /** 4413 * skb_tx_timestamp() - Driver hook for transmit timestamping 4414 * 4415 * Ethernet MAC Drivers should call this function in their hard_xmit() 4416 * function immediately before giving the sk_buff to the MAC hardware. 4417 * 4418 * Specifically, one should make absolutely sure that this function is 4419 * called before TX completion of this packet can trigger. Otherwise 4420 * the packet could potentially already be freed. 4421 * 4422 * @skb: A socket buffer. 4423 */ 4424 static inline void skb_tx_timestamp(struct sk_buff *skb) 4425 { 4426 skb_clone_tx_timestamp(skb); 4427 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP) 4428 skb_tstamp_tx(skb, NULL); 4429 } 4430 4431 /** 4432 * skb_complete_wifi_ack - deliver skb with wifi status 4433 * 4434 * @skb: the original outgoing packet 4435 * @acked: ack status 4436 * 4437 */ 4438 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); 4439 4440 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); 4441 __sum16 __skb_checksum_complete(struct sk_buff *skb); 4442 4443 static inline int skb_csum_unnecessary(const struct sk_buff *skb) 4444 { 4445 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) || 4446 skb->csum_valid || 4447 (skb->ip_summed == CHECKSUM_PARTIAL && 4448 skb_checksum_start_offset(skb) >= 0)); 4449 } 4450 4451 /** 4452 * skb_checksum_complete - Calculate checksum of an entire packet 4453 * @skb: packet to process 4454 * 4455 * This function calculates the checksum over the entire packet plus 4456 * the value of skb->csum. The latter can be used to supply the 4457 * checksum of a pseudo header as used by TCP/UDP. It returns the 4458 * checksum. 4459 * 4460 * For protocols that contain complete checksums such as ICMP/TCP/UDP, 4461 * this function can be used to verify that checksum on received 4462 * packets. In that case the function should return zero if the 4463 * checksum is correct. In particular, this function will return zero 4464 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the 4465 * hardware has already verified the correctness of the checksum. 4466 */ 4467 static inline __sum16 skb_checksum_complete(struct sk_buff *skb) 4468 { 4469 return skb_csum_unnecessary(skb) ? 4470 0 : __skb_checksum_complete(skb); 4471 } 4472 4473 static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb) 4474 { 4475 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 4476 if (skb->csum_level == 0) 4477 skb->ip_summed = CHECKSUM_NONE; 4478 else 4479 skb->csum_level--; 4480 } 4481 } 4482 4483 static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) 4484 { 4485 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 4486 if (skb->csum_level < SKB_MAX_CSUM_LEVEL) 4487 skb->csum_level++; 4488 } else if (skb->ip_summed == CHECKSUM_NONE) { 4489 skb->ip_summed = CHECKSUM_UNNECESSARY; 4490 skb->csum_level = 0; 4491 } 4492 } 4493 4494 static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb) 4495 { 4496 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 4497 skb->ip_summed = CHECKSUM_NONE; 4498 skb->csum_level = 0; 4499 } 4500 } 4501 4502 /* Check if we need to perform checksum complete validation. 4503 * 4504 * Returns true if checksum complete is needed, false otherwise 4505 * (either checksum is unnecessary or zero checksum is allowed). 4506 */ 4507 static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, 4508 bool zero_okay, 4509 __sum16 check) 4510 { 4511 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { 4512 skb->csum_valid = 1; 4513 __skb_decr_checksum_unnecessary(skb); 4514 return false; 4515 } 4516 4517 return true; 4518 } 4519 4520 /* For small packets <= CHECKSUM_BREAK perform checksum complete directly 4521 * in checksum_init. 4522 */ 4523 #define CHECKSUM_BREAK 76 4524 4525 /* Unset checksum-complete 4526 * 4527 * Unset checksum complete can be done when packet is being modified 4528 * (uncompressed for instance) and checksum-complete value is 4529 * invalidated. 4530 */ 4531 static inline void skb_checksum_complete_unset(struct sk_buff *skb) 4532 { 4533 if (skb->ip_summed == CHECKSUM_COMPLETE) 4534 skb->ip_summed = CHECKSUM_NONE; 4535 } 4536 4537 /* Validate (init) checksum based on checksum complete. 4538 * 4539 * Return values: 4540 * 0: checksum is validated or try to in skb_checksum_complete. In the latter 4541 * case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo 4542 * checksum is stored in skb->csum for use in __skb_checksum_complete 4543 * non-zero: value of invalid checksum 4544 * 4545 */ 4546 static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, 4547 bool complete, 4548 __wsum psum) 4549 { 4550 if (skb->ip_summed == CHECKSUM_COMPLETE) { 4551 if (!csum_fold(csum_add(psum, skb->csum))) { 4552 skb->csum_valid = 1; 4553 return 0; 4554 } 4555 } 4556 4557 skb->csum = psum; 4558 4559 if (complete || skb->len <= CHECKSUM_BREAK) { 4560 __sum16 csum; 4561 4562 csum = __skb_checksum_complete(skb); 4563 skb->csum_valid = !csum; 4564 return csum; 4565 } 4566 4567 return 0; 4568 } 4569 4570 static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) 4571 { 4572 return 0; 4573 } 4574 4575 /* Perform checksum validate (init). Note that this is a macro since we only 4576 * want to calculate the pseudo header which is an input function if necessary. 4577 * First we try to validate without any computation (checksum unnecessary) and 4578 * then calculate based on checksum complete calling the function to compute 4579 * pseudo header. 4580 * 4581 * Return values: 4582 * 0: checksum is validated or try to in skb_checksum_complete 4583 * non-zero: value of invalid checksum 4584 */ 4585 #define __skb_checksum_validate(skb, proto, complete, \ 4586 zero_okay, check, compute_pseudo) \ 4587 ({ \ 4588 __sum16 __ret = 0; \ 4589 skb->csum_valid = 0; \ 4590 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \ 4591 __ret = __skb_checksum_validate_complete(skb, \ 4592 complete, compute_pseudo(skb, proto)); \ 4593 __ret; \ 4594 }) 4595 4596 #define skb_checksum_init(skb, proto, compute_pseudo) \ 4597 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo) 4598 4599 #define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \ 4600 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo) 4601 4602 #define skb_checksum_validate(skb, proto, compute_pseudo) \ 4603 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo) 4604 4605 #define skb_checksum_validate_zero_check(skb, proto, check, \ 4606 compute_pseudo) \ 4607 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo) 4608 4609 #define skb_checksum_simple_validate(skb) \ 4610 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo) 4611 4612 static inline bool __skb_checksum_convert_check(struct sk_buff *skb) 4613 { 4614 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid); 4615 } 4616 4617 static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo) 4618 { 4619 skb->csum = ~pseudo; 4620 skb->ip_summed = CHECKSUM_COMPLETE; 4621 } 4622 4623 #define skb_checksum_try_convert(skb, proto, compute_pseudo) \ 4624 do { \ 4625 if (__skb_checksum_convert_check(skb)) \ 4626 __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \ 4627 } while (0) 4628 4629 static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr, 4630 u16 start, u16 offset) 4631 { 4632 skb->ip_summed = CHECKSUM_PARTIAL; 4633 skb->csum_start = ((unsigned char *)ptr + start) - skb->head; 4634 skb->csum_offset = offset - start; 4635 } 4636 4637 /* Update skbuf and packet to reflect the remote checksum offload operation. 4638 * When called, ptr indicates the starting point for skb->csum when 4639 * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete 4640 * here, skb_postpull_rcsum is done so skb->csum start is ptr. 4641 */ 4642 static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, 4643 int start, int offset, bool nopartial) 4644 { 4645 __wsum delta; 4646 4647 if (!nopartial) { 4648 skb_remcsum_adjust_partial(skb, ptr, start, offset); 4649 return; 4650 } 4651 4652 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) { 4653 __skb_checksum_complete(skb); 4654 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data); 4655 } 4656 4657 delta = remcsum_adjust(ptr, skb->csum, start, offset); 4658 4659 /* Adjust skb->csum since we changed the packet */ 4660 skb->csum = csum_add(skb->csum, delta); 4661 } 4662 4663 static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb) 4664 { 4665 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 4666 return (void *)(skb->_nfct & NFCT_PTRMASK); 4667 #else 4668 return NULL; 4669 #endif 4670 } 4671 4672 static inline unsigned long skb_get_nfct(const struct sk_buff *skb) 4673 { 4674 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 4675 return skb->_nfct; 4676 #else 4677 return 0UL; 4678 #endif 4679 } 4680 4681 static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct) 4682 { 4683 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 4684 skb->slow_gro |= !!nfct; 4685 skb->_nfct = nfct; 4686 #endif 4687 } 4688 4689 #ifdef CONFIG_SKB_EXTENSIONS 4690 enum skb_ext_id { 4691 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4692 SKB_EXT_BRIDGE_NF, 4693 #endif 4694 #ifdef CONFIG_XFRM 4695 SKB_EXT_SEC_PATH, 4696 #endif 4697 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 4698 TC_SKB_EXT, 4699 #endif 4700 #if IS_ENABLED(CONFIG_MPTCP) 4701 SKB_EXT_MPTCP, 4702 #endif 4703 #if IS_ENABLED(CONFIG_MCTP_FLOWS) 4704 SKB_EXT_MCTP, 4705 #endif 4706 SKB_EXT_NUM, /* must be last */ 4707 }; 4708 4709 /** 4710 * struct skb_ext - sk_buff extensions 4711 * @refcnt: 1 on allocation, deallocated on 0 4712 * @offset: offset to add to @data to obtain extension address 4713 * @chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units 4714 * @data: start of extension data, variable sized 4715 * 4716 * Note: offsets/lengths are stored in chunks of 8 bytes, this allows 4717 * to use 'u8' types while allowing up to 2kb worth of extension data. 4718 */ 4719 struct skb_ext { 4720 refcount_t refcnt; 4721 u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */ 4722 u8 chunks; /* same */ 4723 char data[] __aligned(8); 4724 }; 4725 4726 struct skb_ext *__skb_ext_alloc(gfp_t flags); 4727 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, 4728 struct skb_ext *ext); 4729 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id); 4730 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id); 4731 void __skb_ext_put(struct skb_ext *ext); 4732 4733 static inline void skb_ext_put(struct sk_buff *skb) 4734 { 4735 if (skb->active_extensions) 4736 __skb_ext_put(skb->extensions); 4737 } 4738 4739 static inline void __skb_ext_copy(struct sk_buff *dst, 4740 const struct sk_buff *src) 4741 { 4742 dst->active_extensions = src->active_extensions; 4743 4744 if (src->active_extensions) { 4745 struct skb_ext *ext = src->extensions; 4746 4747 refcount_inc(&ext->refcnt); 4748 dst->extensions = ext; 4749 } 4750 } 4751 4752 static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src) 4753 { 4754 skb_ext_put(dst); 4755 __skb_ext_copy(dst, src); 4756 } 4757 4758 static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i) 4759 { 4760 return !!ext->offset[i]; 4761 } 4762 4763 static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id) 4764 { 4765 return skb->active_extensions & (1 << id); 4766 } 4767 4768 static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) 4769 { 4770 if (skb_ext_exist(skb, id)) 4771 __skb_ext_del(skb, id); 4772 } 4773 4774 static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id) 4775 { 4776 if (skb_ext_exist(skb, id)) { 4777 struct skb_ext *ext = skb->extensions; 4778 4779 return (void *)ext + (ext->offset[id] << 3); 4780 } 4781 4782 return NULL; 4783 } 4784 4785 static inline void skb_ext_reset(struct sk_buff *skb) 4786 { 4787 if (unlikely(skb->active_extensions)) { 4788 __skb_ext_put(skb->extensions); 4789 skb->active_extensions = 0; 4790 } 4791 } 4792 4793 static inline bool skb_has_extensions(struct sk_buff *skb) 4794 { 4795 return unlikely(skb->active_extensions); 4796 } 4797 #else 4798 static inline void skb_ext_put(struct sk_buff *skb) {} 4799 static inline void skb_ext_reset(struct sk_buff *skb) {} 4800 static inline void skb_ext_del(struct sk_buff *skb, int unused) {} 4801 static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {} 4802 static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {} 4803 static inline bool skb_has_extensions(struct sk_buff *skb) { return false; } 4804 #endif /* CONFIG_SKB_EXTENSIONS */ 4805 4806 static inline void nf_reset_ct(struct sk_buff *skb) 4807 { 4808 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 4809 nf_conntrack_put(skb_nfct(skb)); 4810 skb->_nfct = 0; 4811 #endif 4812 } 4813 4814 static inline void nf_reset_trace(struct sk_buff *skb) 4815 { 4816 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) 4817 skb->nf_trace = 0; 4818 #endif 4819 } 4820 4821 static inline void ipvs_reset(struct sk_buff *skb) 4822 { 4823 #if IS_ENABLED(CONFIG_IP_VS) 4824 skb->ipvs_property = 0; 4825 #endif 4826 } 4827 4828 /* Note: This doesn't put any conntrack info in dst. */ 4829 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, 4830 bool copy) 4831 { 4832 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 4833 dst->_nfct = src->_nfct; 4834 nf_conntrack_get(skb_nfct(src)); 4835 #endif 4836 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) 4837 if (copy) 4838 dst->nf_trace = src->nf_trace; 4839 #endif 4840 } 4841 4842 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) 4843 { 4844 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 4845 nf_conntrack_put(skb_nfct(dst)); 4846 #endif 4847 dst->slow_gro = src->slow_gro; 4848 __nf_copy(dst, src, true); 4849 } 4850 4851 #ifdef CONFIG_NETWORK_SECMARK 4852 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 4853 { 4854 to->secmark = from->secmark; 4855 } 4856 4857 static inline void skb_init_secmark(struct sk_buff *skb) 4858 { 4859 skb->secmark = 0; 4860 } 4861 #else 4862 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 4863 { } 4864 4865 static inline void skb_init_secmark(struct sk_buff *skb) 4866 { } 4867 #endif 4868 4869 static inline int secpath_exists(const struct sk_buff *skb) 4870 { 4871 #ifdef CONFIG_XFRM 4872 return skb_ext_exist(skb, SKB_EXT_SEC_PATH); 4873 #else 4874 return 0; 4875 #endif 4876 } 4877 4878 static inline bool skb_irq_freeable(const struct sk_buff *skb) 4879 { 4880 return !skb->destructor && 4881 !secpath_exists(skb) && 4882 !skb_nfct(skb) && 4883 !skb->_skb_refdst && 4884 !skb_has_frag_list(skb); 4885 } 4886 4887 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) 4888 { 4889 skb->queue_mapping = queue_mapping; 4890 } 4891 4892 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) 4893 { 4894 return skb->queue_mapping; 4895 } 4896 4897 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) 4898 { 4899 to->queue_mapping = from->queue_mapping; 4900 } 4901 4902 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) 4903 { 4904 skb->queue_mapping = rx_queue + 1; 4905 } 4906 4907 static inline u16 skb_get_rx_queue(const struct sk_buff *skb) 4908 { 4909 return skb->queue_mapping - 1; 4910 } 4911 4912 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) 4913 { 4914 return skb->queue_mapping != 0; 4915 } 4916 4917 static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val) 4918 { 4919 skb->dst_pending_confirm = val; 4920 } 4921 4922 static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb) 4923 { 4924 return skb->dst_pending_confirm != 0; 4925 } 4926 4927 static inline struct sec_path *skb_sec_path(const struct sk_buff *skb) 4928 { 4929 #ifdef CONFIG_XFRM 4930 return skb_ext_find(skb, SKB_EXT_SEC_PATH); 4931 #else 4932 return NULL; 4933 #endif 4934 } 4935 4936 /* Keeps track of mac header offset relative to skb->head. 4937 * It is useful for TSO of Tunneling protocol. e.g. GRE. 4938 * For non-tunnel skb it points to skb_mac_header() and for 4939 * tunnel skb it points to outer mac header. 4940 * Keeps track of level of encapsulation of network headers. 4941 */ 4942 struct skb_gso_cb { 4943 union { 4944 int mac_offset; 4945 int data_offset; 4946 }; 4947 int encap_level; 4948 __wsum csum; 4949 __u16 csum_start; 4950 }; 4951 #define SKB_GSO_CB_OFFSET 32 4952 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET)) 4953 4954 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) 4955 { 4956 return (skb_mac_header(inner_skb) - inner_skb->head) - 4957 SKB_GSO_CB(inner_skb)->mac_offset; 4958 } 4959 4960 static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) 4961 { 4962 int new_headroom, headroom; 4963 int ret; 4964 4965 headroom = skb_headroom(skb); 4966 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC); 4967 if (ret) 4968 return ret; 4969 4970 new_headroom = skb_headroom(skb); 4971 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom); 4972 return 0; 4973 } 4974 4975 static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res) 4976 { 4977 /* Do not update partial checksums if remote checksum is enabled. */ 4978 if (skb->remcsum_offload) 4979 return; 4980 4981 SKB_GSO_CB(skb)->csum = res; 4982 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head; 4983 } 4984 4985 /* Compute the checksum for a gso segment. First compute the checksum value 4986 * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and 4987 * then add in skb->csum (checksum from csum_start to end of packet). 4988 * skb->csum and csum_start are then updated to reflect the checksum of the 4989 * resultant packet starting from the transport header-- the resultant checksum 4990 * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo 4991 * header. 4992 */ 4993 static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) 4994 { 4995 unsigned char *csum_start = skb_transport_header(skb); 4996 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start; 4997 __wsum partial = SKB_GSO_CB(skb)->csum; 4998 4999 SKB_GSO_CB(skb)->csum = res; 5000 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head; 5001 5002 return csum_fold(csum_partial(csum_start, plen, partial)); 5003 } 5004 5005 static inline bool skb_is_gso(const struct sk_buff *skb) 5006 { 5007 return skb_shinfo(skb)->gso_size; 5008 } 5009 5010 /* Note: Should be called only if skb_is_gso(skb) is true */ 5011 static inline bool skb_is_gso_v6(const struct sk_buff *skb) 5012 { 5013 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; 5014 } 5015 5016 /* Note: Should be called only if skb_is_gso(skb) is true */ 5017 static inline bool skb_is_gso_sctp(const struct sk_buff *skb) 5018 { 5019 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; 5020 } 5021 5022 /* Note: Should be called only if skb_is_gso(skb) is true */ 5023 static inline bool skb_is_gso_tcp(const struct sk_buff *skb) 5024 { 5025 return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); 5026 } 5027 5028 static inline void skb_gso_reset(struct sk_buff *skb) 5029 { 5030 skb_shinfo(skb)->gso_size = 0; 5031 skb_shinfo(skb)->gso_segs = 0; 5032 skb_shinfo(skb)->gso_type = 0; 5033 } 5034 5035 static inline void skb_increase_gso_size(struct skb_shared_info *shinfo, 5036 u16 increment) 5037 { 5038 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) 5039 return; 5040 shinfo->gso_size += increment; 5041 } 5042 5043 static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo, 5044 u16 decrement) 5045 { 5046 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) 5047 return; 5048 shinfo->gso_size -= decrement; 5049 } 5050 5051 void __skb_warn_lro_forwarding(const struct sk_buff *skb); 5052 5053 static inline bool skb_warn_if_lro(const struct sk_buff *skb) 5054 { 5055 /* LRO sets gso_size but not gso_type, whereas if GSO is really 5056 * wanted then gso_type will be set. */ 5057 const struct skb_shared_info *shinfo = skb_shinfo(skb); 5058 5059 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && 5060 unlikely(shinfo->gso_type == 0)) { 5061 __skb_warn_lro_forwarding(skb); 5062 return true; 5063 } 5064 return false; 5065 } 5066 5067 static inline void skb_forward_csum(struct sk_buff *skb) 5068 { 5069 /* Unfortunately we don't support this one. Any brave souls? */ 5070 if (skb->ip_summed == CHECKSUM_COMPLETE) 5071 skb->ip_summed = CHECKSUM_NONE; 5072 } 5073 5074 /** 5075 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE 5076 * @skb: skb to check 5077 * 5078 * fresh skbs have their ip_summed set to CHECKSUM_NONE. 5079 * Instead of forcing ip_summed to CHECKSUM_NONE, we can 5080 * use this helper, to document places where we make this assertion. 5081 */ 5082 static inline void skb_checksum_none_assert(const struct sk_buff *skb) 5083 { 5084 DEBUG_NET_WARN_ON_ONCE(skb->ip_summed != CHECKSUM_NONE); 5085 } 5086 5087 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 5088 5089 int skb_checksum_setup(struct sk_buff *skb, bool recalculate); 5090 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 5091 unsigned int transport_len, 5092 __sum16(*skb_chkf)(struct sk_buff *skb)); 5093 5094 /** 5095 * skb_head_is_locked - Determine if the skb->head is locked down 5096 * @skb: skb to check 5097 * 5098 * The head on skbs build around a head frag can be removed if they are 5099 * not cloned. This function returns true if the skb head is locked down 5100 * due to either being allocated via kmalloc, or by being a clone with 5101 * multiple references to the head. 5102 */ 5103 static inline bool skb_head_is_locked(const struct sk_buff *skb) 5104 { 5105 return !skb->head_frag || skb_cloned(skb); 5106 } 5107 5108 /* Local Checksum Offload. 5109 * Compute outer checksum based on the assumption that the 5110 * inner checksum will be offloaded later. 5111 * See Documentation/networking/checksum-offloads.rst for 5112 * explanation of how this works. 5113 * Fill in outer checksum adjustment (e.g. with sum of outer 5114 * pseudo-header) before calling. 5115 * Also ensure that inner checksum is in linear data area. 5116 */ 5117 static inline __wsum lco_csum(struct sk_buff *skb) 5118 { 5119 unsigned char *csum_start = skb_checksum_start(skb); 5120 unsigned char *l4_hdr = skb_transport_header(skb); 5121 __wsum partial; 5122 5123 /* Start with complement of inner checksum adjustment */ 5124 partial = ~csum_unfold(*(__force __sum16 *)(csum_start + 5125 skb->csum_offset)); 5126 5127 /* Add in checksum of our headers (incl. outer checksum 5128 * adjustment filled in by caller) and return result. 5129 */ 5130 return csum_partial(l4_hdr, csum_start - l4_hdr, partial); 5131 } 5132 5133 static inline bool skb_is_redirected(const struct sk_buff *skb) 5134 { 5135 return skb->redirected; 5136 } 5137 5138 static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) 5139 { 5140 skb->redirected = 1; 5141 #ifdef CONFIG_NET_REDIRECT 5142 skb->from_ingress = from_ingress; 5143 if (skb->from_ingress) 5144 skb_clear_tstamp(skb); 5145 #endif 5146 } 5147 5148 static inline void skb_reset_redirect(struct sk_buff *skb) 5149 { 5150 skb->redirected = 0; 5151 } 5152 5153 static inline bool skb_csum_is_sctp(struct sk_buff *skb) 5154 { 5155 return skb->csum_not_inet; 5156 } 5157 5158 static inline void skb_set_kcov_handle(struct sk_buff *skb, 5159 const u64 kcov_handle) 5160 { 5161 #ifdef CONFIG_KCOV 5162 skb->kcov_handle = kcov_handle; 5163 #endif 5164 } 5165 5166 static inline u64 skb_get_kcov_handle(struct sk_buff *skb) 5167 { 5168 #ifdef CONFIG_KCOV 5169 return skb->kcov_handle; 5170 #else 5171 return 0; 5172 #endif 5173 } 5174 5175 #ifdef CONFIG_PAGE_POOL 5176 static inline void skb_mark_for_recycle(struct sk_buff *skb) 5177 { 5178 skb->pp_recycle = 1; 5179 } 5180 #endif 5181 5182 static inline bool skb_pp_recycle(struct sk_buff *skb, void *data) 5183 { 5184 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) 5185 return false; 5186 return page_pool_return_skb_page(virt_to_page(data)); 5187 } 5188 5189 #endif /* __KERNEL__ */ 5190 #endif /* _LINUX_SKBUFF_H */ 5191