1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Definitions for the Interfaces handler. 8 * 9 * Version: @(#)dev.h 1.0.10 08/12/93 10 * 11 * Authors: Ross Biro 12 * Fred N. van Kempen, <[email protected]> 13 * Corey Minyard <[email protected]> 14 * Donald J. Becker, <[email protected]> 15 * Alan Cox, <[email protected]> 16 * Bjorn Ekwall. <[email protected]> 17 * Pekka Riikonen <[email protected]> 18 * 19 * Moved to /usr/include/linux for NET3 20 */ 21 #ifndef _LINUX_NETDEVICE_H 22 #define _LINUX_NETDEVICE_H 23 24 #include <linux/timer.h> 25 #include <linux/bug.h> 26 #include <linux/delay.h> 27 #include <linux/atomic.h> 28 #include <linux/prefetch.h> 29 #include <asm/cache.h> 30 #include <asm/byteorder.h> 31 #include <asm/local.h> 32 33 #include <linux/percpu.h> 34 #include <linux/rculist.h> 35 #include <linux/workqueue.h> 36 #include <linux/dynamic_queue_limits.h> 37 38 #include <net/net_namespace.h> 39 #ifdef CONFIG_DCB 40 #include <net/dcbnl.h> 41 #endif 42 #include <net/netprio_cgroup.h> 43 #include <linux/netdev_features.h> 44 #include <linux/neighbour.h> 45 #include <linux/netdevice_xmit.h> 46 #include <uapi/linux/netdevice.h> 47 #include <uapi/linux/if_bonding.h> 48 #include <uapi/linux/pkt_cls.h> 49 #include <uapi/linux/netdev.h> 50 #include <linux/hashtable.h> 51 #include <linux/rbtree.h> 52 #include <net/net_trackers.h> 53 #include <net/net_debug.h> 54 #include <net/dropreason-core.h> 55 #include <net/neighbour_tables.h> 56 57 struct netpoll_info; 58 struct device; 59 struct ethtool_ops; 60 struct kernel_hwtstamp_config; 61 struct phy_device; 62 struct dsa_port; 63 struct ip_tunnel_parm_kern; 64 struct macsec_context; 65 struct macsec_ops; 66 struct netdev_config; 67 struct netdev_name_node; 68 struct sd_flow_limit; 69 struct sfp_bus; 70 /* 802.11 specific */ 71 struct wireless_dev; 72 /* 802.15.4 specific */ 73 struct wpan_dev; 74 struct mpls_dev; 75 /* UDP Tunnel offloads */ 76 struct udp_tunnel_info; 77 struct udp_tunnel_nic_info; 78 struct udp_tunnel_nic; 79 struct bpf_prog; 80 struct xdp_buff; 81 struct xdp_frame; 82 struct xdp_metadata_ops; 83 struct xdp_md; 84 struct ethtool_netdev_state; 85 struct phy_link_topology; 86 struct hwtstamp_provider; 87 88 typedef u32 xdp_features_t; 89 90 void synchronize_net(void); 91 void netdev_set_default_ethtool_ops(struct net_device *dev, 92 const struct ethtool_ops *ops); 93 void netdev_sw_irq_coalesce_default_on(struct net_device *dev); 94 95 /* Backlog congestion levels */ 96 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 97 #define NET_RX_DROP 1 /* packet dropped */ 98 99 #define MAX_NEST_DEV 8 100 101 /* 102 * Transmit return codes: transmit return codes originate from three different 103 * namespaces: 104 * 105 * - qdisc return codes 106 * - driver transmit return codes 107 * - errno values 108 * 109 * Drivers are allowed to return any one of those in their hard_start_xmit() 110 * function. Real network devices commonly used with qdiscs should only return 111 * the driver transmit return codes though - when qdiscs are used, the actual 112 * transmission happens asynchronously, so the value is not propagated to 113 * higher layers. Virtual network devices transmit synchronously; in this case 114 * the driver transmit return codes are consumed by dev_queue_xmit(), and all 115 * others are propagated to higher layers. 116 */ 117 118 /* qdisc ->enqueue() return codes. */ 119 #define NET_XMIT_SUCCESS 0x00 120 #define NET_XMIT_DROP 0x01 /* skb dropped */ 121 #define NET_XMIT_CN 0x02 /* congestion notification */ 122 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ 123 124 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It 125 * indicates that the device will soon be dropping packets, or already drops 126 * some packets of the same priority; prompting us to send less aggressively. */ 127 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) 128 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) 129 130 /* Driver transmit return codes */ 131 #define NETDEV_TX_MASK 0xf0 132 133 enum netdev_tx { 134 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ 135 NETDEV_TX_OK = 0x00, /* driver took care of packet */ 136 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ 137 }; 138 typedef enum netdev_tx netdev_tx_t; 139 140 /* 141 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; 142 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. 143 */ 144 static inline bool dev_xmit_complete(int rc) 145 { 146 /* 147 * Positive cases with an skb consumed by a driver: 148 * - successful transmission (rc == NETDEV_TX_OK) 149 * - error while transmitting (rc < 0) 150 * - error while queueing to a different device (rc & NET_XMIT_MASK) 151 */ 152 if (likely(rc < NET_XMIT_MASK)) 153 return true; 154 155 return false; 156 } 157 158 /* 159 * Compute the worst-case header length according to the protocols 160 * used. 161 */ 162 163 #if defined(CONFIG_HYPERV_NET) 164 # define LL_MAX_HEADER 128 165 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) 166 # if defined(CONFIG_MAC80211_MESH) 167 # define LL_MAX_HEADER 128 168 # else 169 # define LL_MAX_HEADER 96 170 # endif 171 #else 172 # define LL_MAX_HEADER 32 173 #endif 174 175 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ 176 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) 177 #define MAX_HEADER LL_MAX_HEADER 178 #else 179 #define MAX_HEADER (LL_MAX_HEADER + 48) 180 #endif 181 182 /* 183 * Old network device statistics. Fields are native words 184 * (unsigned long) so they can be read and written atomically. 185 */ 186 187 #define NET_DEV_STAT(FIELD) \ 188 union { \ 189 unsigned long FIELD; \ 190 atomic_long_t __##FIELD; \ 191 } 192 193 struct net_device_stats { 194 NET_DEV_STAT(rx_packets); 195 NET_DEV_STAT(tx_packets); 196 NET_DEV_STAT(rx_bytes); 197 NET_DEV_STAT(tx_bytes); 198 NET_DEV_STAT(rx_errors); 199 NET_DEV_STAT(tx_errors); 200 NET_DEV_STAT(rx_dropped); 201 NET_DEV_STAT(tx_dropped); 202 NET_DEV_STAT(multicast); 203 NET_DEV_STAT(collisions); 204 NET_DEV_STAT(rx_length_errors); 205 NET_DEV_STAT(rx_over_errors); 206 NET_DEV_STAT(rx_crc_errors); 207 NET_DEV_STAT(rx_frame_errors); 208 NET_DEV_STAT(rx_fifo_errors); 209 NET_DEV_STAT(rx_missed_errors); 210 NET_DEV_STAT(tx_aborted_errors); 211 NET_DEV_STAT(tx_carrier_errors); 212 NET_DEV_STAT(tx_fifo_errors); 213 NET_DEV_STAT(tx_heartbeat_errors); 214 NET_DEV_STAT(tx_window_errors); 215 NET_DEV_STAT(rx_compressed); 216 NET_DEV_STAT(tx_compressed); 217 }; 218 #undef NET_DEV_STAT 219 220 /* per-cpu stats, allocated on demand. 221 * Try to fit them in a single cache line, for dev_get_stats() sake. 222 */ 223 struct net_device_core_stats { 224 unsigned long rx_dropped; 225 unsigned long tx_dropped; 226 unsigned long rx_nohandler; 227 unsigned long rx_otherhost_dropped; 228 } __aligned(4 * sizeof(unsigned long)); 229 230 #include <linux/cache.h> 231 #include <linux/skbuff.h> 232 233 struct neighbour; 234 struct neigh_parms; 235 struct sk_buff; 236 237 struct netdev_hw_addr { 238 struct list_head list; 239 struct rb_node node; 240 unsigned char addr[MAX_ADDR_LEN]; 241 unsigned char type; 242 #define NETDEV_HW_ADDR_T_LAN 1 243 #define NETDEV_HW_ADDR_T_SAN 2 244 #define NETDEV_HW_ADDR_T_UNICAST 3 245 #define NETDEV_HW_ADDR_T_MULTICAST 4 246 bool global_use; 247 int sync_cnt; 248 int refcount; 249 int synced; 250 struct rcu_head rcu_head; 251 }; 252 253 struct netdev_hw_addr_list { 254 struct list_head list; 255 int count; 256 257 /* Auxiliary tree for faster lookup on addition and deletion */ 258 struct rb_root tree; 259 }; 260 261 #define netdev_hw_addr_list_count(l) ((l)->count) 262 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) 263 #define netdev_hw_addr_list_for_each(ha, l) \ 264 list_for_each_entry(ha, &(l)->list, list) 265 266 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) 267 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) 268 #define netdev_for_each_uc_addr(ha, dev) \ 269 netdev_hw_addr_list_for_each(ha, &(dev)->uc) 270 #define netdev_for_each_synced_uc_addr(_ha, _dev) \ 271 netdev_for_each_uc_addr((_ha), (_dev)) \ 272 if ((_ha)->sync_cnt) 273 274 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) 275 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) 276 #define netdev_for_each_mc_addr(ha, dev) \ 277 netdev_hw_addr_list_for_each(ha, &(dev)->mc) 278 #define netdev_for_each_synced_mc_addr(_ha, _dev) \ 279 netdev_for_each_mc_addr((_ha), (_dev)) \ 280 if ((_ha)->sync_cnt) 281 282 struct hh_cache { 283 unsigned int hh_len; 284 seqlock_t hh_lock; 285 286 /* cached hardware header; allow for machine alignment needs. */ 287 #define HH_DATA_MOD 16 288 #define HH_DATA_OFF(__len) \ 289 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) 290 #define HH_DATA_ALIGN(__len) \ 291 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) 292 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; 293 }; 294 295 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much. 296 * Alternative is: 297 * dev->hard_header_len ? (dev->hard_header_len + 298 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 299 * 300 * We could use other alignment values, but we must maintain the 301 * relationship HH alignment <= LL alignment. 302 */ 303 #define LL_RESERVED_SPACE(dev) \ 304 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \ 305 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 306 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 307 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \ 308 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 309 310 struct header_ops { 311 int (*create) (struct sk_buff *skb, struct net_device *dev, 312 unsigned short type, const void *daddr, 313 const void *saddr, unsigned int len); 314 int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 315 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 316 void (*cache_update)(struct hh_cache *hh, 317 const struct net_device *dev, 318 const unsigned char *haddr); 319 bool (*validate)(const char *ll_header, unsigned int len); 320 __be16 (*parse_protocol)(const struct sk_buff *skb); 321 }; 322 323 /* These flag bits are private to the generic network queueing 324 * layer; they may not be explicitly referenced by any other 325 * code. 326 */ 327 328 enum netdev_state_t { 329 __LINK_STATE_START, 330 __LINK_STATE_PRESENT, 331 __LINK_STATE_NOCARRIER, 332 __LINK_STATE_LINKWATCH_PENDING, 333 __LINK_STATE_DORMANT, 334 __LINK_STATE_TESTING, 335 }; 336 337 struct gro_list { 338 struct list_head list; 339 int count; 340 }; 341 342 /* 343 * size of gro hash buckets, must be <= the number of bits in 344 * gro_node::bitmask 345 */ 346 #define GRO_HASH_BUCKETS 8 347 348 /** 349 * struct gro_node - structure to support Generic Receive Offload 350 * @bitmask: bitmask to indicate used buckets in @hash 351 * @hash: hashtable of pending aggregated skbs, separated by flows 352 * @rx_list: list of pending ``GRO_NORMAL`` skbs 353 * @rx_count: cached current length of @rx_list 354 * @cached_napi_id: napi_struct::napi_id cached for hotpath, 0 for standalone 355 */ 356 struct gro_node { 357 unsigned long bitmask; 358 struct gro_list hash[GRO_HASH_BUCKETS]; 359 struct list_head rx_list; 360 u32 rx_count; 361 u32 cached_napi_id; 362 }; 363 364 /* 365 * Structure for per-NAPI config 366 */ 367 struct napi_config { 368 u64 gro_flush_timeout; 369 u64 irq_suspend_timeout; 370 u32 defer_hard_irqs; 371 cpumask_t affinity_mask; 372 unsigned int napi_id; 373 }; 374 375 /* 376 * Structure for NAPI scheduling similar to tasklet but with weighting 377 */ 378 struct napi_struct { 379 /* The poll_list must only be managed by the entity which 380 * changes the state of the NAPI_STATE_SCHED bit. This means 381 * whoever atomically sets that bit can add this napi_struct 382 * to the per-CPU poll_list, and whoever clears that bit 383 * can remove from the list right before clearing the bit. 384 */ 385 struct list_head poll_list; 386 387 unsigned long state; 388 int weight; 389 u32 defer_hard_irqs_count; 390 int (*poll)(struct napi_struct *, int); 391 #ifdef CONFIG_NETPOLL 392 /* CPU actively polling if netpoll is configured */ 393 int poll_owner; 394 #endif 395 /* CPU on which NAPI has been scheduled for processing */ 396 int list_owner; 397 struct net_device *dev; 398 struct sk_buff *skb; 399 struct gro_node gro; 400 struct hrtimer timer; 401 /* all fields past this point are write-protected by netdev_lock */ 402 struct task_struct *thread; 403 unsigned long gro_flush_timeout; 404 unsigned long irq_suspend_timeout; 405 u32 defer_hard_irqs; 406 /* control-path-only fields follow */ 407 u32 napi_id; 408 struct list_head dev_list; 409 struct hlist_node napi_hash_node; 410 int irq; 411 struct irq_affinity_notify notify; 412 int napi_rmap_idx; 413 int index; 414 struct napi_config *config; 415 }; 416 417 enum { 418 NAPI_STATE_SCHED, /* Poll is scheduled */ 419 NAPI_STATE_MISSED, /* reschedule a napi */ 420 NAPI_STATE_DISABLE, /* Disable pending */ 421 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ 422 NAPI_STATE_LISTED, /* NAPI added to system lists */ 423 NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */ 424 NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */ 425 NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/ 426 NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ 427 NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */ 428 NAPI_STATE_HAS_NOTIFIER, /* Napi has an IRQ notifier */ 429 }; 430 431 enum { 432 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), 433 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), 434 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), 435 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), 436 NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), 437 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), 438 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), 439 NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL), 440 NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), 441 NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED), 442 NAPIF_STATE_HAS_NOTIFIER = BIT(NAPI_STATE_HAS_NOTIFIER), 443 }; 444 445 enum gro_result { 446 GRO_MERGED, 447 GRO_MERGED_FREE, 448 GRO_HELD, 449 GRO_NORMAL, 450 GRO_CONSUMED, 451 }; 452 typedef enum gro_result gro_result_t; 453 454 /* 455 * enum rx_handler_result - Possible return values for rx_handlers. 456 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it 457 * further. 458 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in 459 * case skb->dev was changed by rx_handler. 460 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. 461 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called. 462 * 463 * rx_handlers are functions called from inside __netif_receive_skb(), to do 464 * special processing of the skb, prior to delivery to protocol handlers. 465 * 466 * Currently, a net_device can only have a single rx_handler registered. Trying 467 * to register a second rx_handler will return -EBUSY. 468 * 469 * To register a rx_handler on a net_device, use netdev_rx_handler_register(). 470 * To unregister a rx_handler on a net_device, use 471 * netdev_rx_handler_unregister(). 472 * 473 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to 474 * do with the skb. 475 * 476 * If the rx_handler consumed the skb in some way, it should return 477 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for 478 * the skb to be delivered in some other way. 479 * 480 * If the rx_handler changed skb->dev, to divert the skb to another 481 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the 482 * new device will be called if it exists. 483 * 484 * If the rx_handler decides the skb should be ignored, it should return 485 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that 486 * are registered on exact device (ptype->dev == skb->dev). 487 * 488 * If the rx_handler didn't change skb->dev, but wants the skb to be normally 489 * delivered, it should return RX_HANDLER_PASS. 490 * 491 * A device without a registered rx_handler will behave as if rx_handler 492 * returned RX_HANDLER_PASS. 493 */ 494 495 enum rx_handler_result { 496 RX_HANDLER_CONSUMED, 497 RX_HANDLER_ANOTHER, 498 RX_HANDLER_EXACT, 499 RX_HANDLER_PASS, 500 }; 501 typedef enum rx_handler_result rx_handler_result_t; 502 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); 503 504 void __napi_schedule(struct napi_struct *n); 505 void __napi_schedule_irqoff(struct napi_struct *n); 506 507 static inline bool napi_disable_pending(struct napi_struct *n) 508 { 509 return test_bit(NAPI_STATE_DISABLE, &n->state); 510 } 511 512 static inline bool napi_prefer_busy_poll(struct napi_struct *n) 513 { 514 return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); 515 } 516 517 /** 518 * napi_is_scheduled - test if NAPI is scheduled 519 * @n: NAPI context 520 * 521 * This check is "best-effort". With no locking implemented, 522 * a NAPI can be scheduled or terminate right after this check 523 * and produce not precise results. 524 * 525 * NAPI_STATE_SCHED is an internal state, napi_is_scheduled 526 * should not be used normally and napi_schedule should be 527 * used instead. 528 * 529 * Use only if the driver really needs to check if a NAPI 530 * is scheduled for example in the context of delayed timer 531 * that can be skipped if a NAPI is already scheduled. 532 * 533 * Return: True if NAPI is scheduled, False otherwise. 534 */ 535 static inline bool napi_is_scheduled(struct napi_struct *n) 536 { 537 return test_bit(NAPI_STATE_SCHED, &n->state); 538 } 539 540 bool napi_schedule_prep(struct napi_struct *n); 541 542 /** 543 * napi_schedule - schedule NAPI poll 544 * @n: NAPI context 545 * 546 * Schedule NAPI poll routine to be called if it is not already 547 * running. 548 * Return: true if we schedule a NAPI or false if not. 549 * Refer to napi_schedule_prep() for additional reason on why 550 * a NAPI might not be scheduled. 551 */ 552 static inline bool napi_schedule(struct napi_struct *n) 553 { 554 if (napi_schedule_prep(n)) { 555 __napi_schedule(n); 556 return true; 557 } 558 559 return false; 560 } 561 562 /** 563 * napi_schedule_irqoff - schedule NAPI poll 564 * @n: NAPI context 565 * 566 * Variant of napi_schedule(), assuming hard irqs are masked. 567 */ 568 static inline void napi_schedule_irqoff(struct napi_struct *n) 569 { 570 if (napi_schedule_prep(n)) 571 __napi_schedule_irqoff(n); 572 } 573 574 /** 575 * napi_complete_done - NAPI processing complete 576 * @n: NAPI context 577 * @work_done: number of packets processed 578 * 579 * Mark NAPI processing as complete. Should only be called if poll budget 580 * has not been completely consumed. 581 * Prefer over napi_complete(). 582 * Return: false if device should avoid rearming interrupts. 583 */ 584 bool napi_complete_done(struct napi_struct *n, int work_done); 585 586 static inline bool napi_complete(struct napi_struct *n) 587 { 588 return napi_complete_done(n, 0); 589 } 590 591 int dev_set_threaded(struct net_device *dev, bool threaded); 592 593 void napi_disable(struct napi_struct *n); 594 void napi_disable_locked(struct napi_struct *n); 595 596 void napi_enable(struct napi_struct *n); 597 void napi_enable_locked(struct napi_struct *n); 598 599 /** 600 * napi_synchronize - wait until NAPI is not running 601 * @n: NAPI context 602 * 603 * Wait until NAPI is done being scheduled on this context. 604 * Waits till any outstanding processing completes but 605 * does not disable future activations. 606 */ 607 static inline void napi_synchronize(const struct napi_struct *n) 608 { 609 if (IS_ENABLED(CONFIG_SMP)) 610 while (test_bit(NAPI_STATE_SCHED, &n->state)) 611 msleep(1); 612 else 613 barrier(); 614 } 615 616 /** 617 * napi_if_scheduled_mark_missed - if napi is running, set the 618 * NAPIF_STATE_MISSED 619 * @n: NAPI context 620 * 621 * If napi is running, set the NAPIF_STATE_MISSED, and return true if 622 * NAPI is scheduled. 623 **/ 624 static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) 625 { 626 unsigned long val, new; 627 628 val = READ_ONCE(n->state); 629 do { 630 if (val & NAPIF_STATE_DISABLE) 631 return true; 632 633 if (!(val & NAPIF_STATE_SCHED)) 634 return false; 635 636 new = val | NAPIF_STATE_MISSED; 637 } while (!try_cmpxchg(&n->state, &val, new)); 638 639 return true; 640 } 641 642 enum netdev_queue_state_t { 643 __QUEUE_STATE_DRV_XOFF, 644 __QUEUE_STATE_STACK_XOFF, 645 __QUEUE_STATE_FROZEN, 646 }; 647 648 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF) 649 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF) 650 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN) 651 652 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF) 653 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ 654 QUEUE_STATE_FROZEN) 655 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \ 656 QUEUE_STATE_FROZEN) 657 658 /* 659 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The 660 * netif_tx_* functions below are used to manipulate this flag. The 661 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit 662 * queue independently. The netif_xmit_*stopped functions below are called 663 * to check if the queue has been stopped by the driver or stack (either 664 * of the XOFF bits are set in the state). Drivers should not need to call 665 * netif_xmit*stopped functions, they should only be using netif_tx_*. 666 */ 667 668 struct netdev_queue { 669 /* 670 * read-mostly part 671 */ 672 struct net_device *dev; 673 netdevice_tracker dev_tracker; 674 675 struct Qdisc __rcu *qdisc; 676 struct Qdisc __rcu *qdisc_sleeping; 677 #ifdef CONFIG_SYSFS 678 struct kobject kobj; 679 const struct attribute_group **groups; 680 #endif 681 unsigned long tx_maxrate; 682 /* 683 * Number of TX timeouts for this queue 684 * (/sys/class/net/DEV/Q/trans_timeout) 685 */ 686 atomic_long_t trans_timeout; 687 688 /* Subordinate device that the queue has been assigned to */ 689 struct net_device *sb_dev; 690 #ifdef CONFIG_XDP_SOCKETS 691 struct xsk_buff_pool *pool; 692 #endif 693 694 /* 695 * write-mostly part 696 */ 697 #ifdef CONFIG_BQL 698 struct dql dql; 699 #endif 700 spinlock_t _xmit_lock ____cacheline_aligned_in_smp; 701 int xmit_lock_owner; 702 /* 703 * Time (in jiffies) of last Tx 704 */ 705 unsigned long trans_start; 706 707 unsigned long state; 708 709 /* 710 * slow- / control-path part 711 */ 712 /* NAPI instance for the queue 713 * Readers and writers must hold RTNL 714 */ 715 struct napi_struct *napi; 716 717 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 718 int numa_node; 719 #endif 720 } ____cacheline_aligned_in_smp; 721 722 extern int sysctl_fb_tunnels_only_for_init_net; 723 extern int sysctl_devconf_inherit_init_net; 724 725 /* 726 * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns 727 * == 1 : For initns only 728 * == 2 : For none. 729 */ 730 static inline bool net_has_fallback_tunnels(const struct net *net) 731 { 732 #if IS_ENABLED(CONFIG_SYSCTL) 733 int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net); 734 735 return !fb_tunnels_only_for_init_net || 736 (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1); 737 #else 738 return true; 739 #endif 740 } 741 742 static inline int net_inherit_devconf(void) 743 { 744 #if IS_ENABLED(CONFIG_SYSCTL) 745 return READ_ONCE(sysctl_devconf_inherit_init_net); 746 #else 747 return 0; 748 #endif 749 } 750 751 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) 752 { 753 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 754 return q->numa_node; 755 #else 756 return NUMA_NO_NODE; 757 #endif 758 } 759 760 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) 761 { 762 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 763 q->numa_node = node; 764 #endif 765 } 766 767 #ifdef CONFIG_RFS_ACCEL 768 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, 769 u16 filter_id); 770 #endif 771 772 /* XPS map type and offset of the xps map within net_device->xps_maps[]. */ 773 enum xps_map_type { 774 XPS_CPUS = 0, 775 XPS_RXQS, 776 XPS_MAPS_MAX, 777 }; 778 779 #ifdef CONFIG_XPS 780 /* 781 * This structure holds an XPS map which can be of variable length. The 782 * map is an array of queues. 783 */ 784 struct xps_map { 785 unsigned int len; 786 unsigned int alloc_len; 787 struct rcu_head rcu; 788 u16 queues[]; 789 }; 790 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) 791 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ 792 - sizeof(struct xps_map)) / sizeof(u16)) 793 794 /* 795 * This structure holds all XPS maps for device. Maps are indexed by CPU. 796 * 797 * We keep track of the number of cpus/rxqs used when the struct is allocated, 798 * in nr_ids. This will help not accessing out-of-bound memory. 799 * 800 * We keep track of the number of traffic classes used when the struct is 801 * allocated, in num_tc. This will be used to navigate the maps, to ensure we're 802 * not crossing its upper bound, as the original dev->num_tc can be updated in 803 * the meantime. 804 */ 805 struct xps_dev_maps { 806 struct rcu_head rcu; 807 unsigned int nr_ids; 808 s16 num_tc; 809 struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ 810 }; 811 812 #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ 813 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) 814 815 #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ 816 (_rxqs * (_tcs) * sizeof(struct xps_map *))) 817 818 #endif /* CONFIG_XPS */ 819 820 #define TC_MAX_QUEUE 16 821 #define TC_BITMASK 15 822 /* HW offloaded queuing disciplines txq count and offset maps */ 823 struct netdev_tc_txq { 824 u16 count; 825 u16 offset; 826 }; 827 828 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 829 /* 830 * This structure is to hold information about the device 831 * configured to run FCoE protocol stack. 832 */ 833 struct netdev_fcoe_hbainfo { 834 char manufacturer[64]; 835 char serial_number[64]; 836 char hardware_version[64]; 837 char driver_version[64]; 838 char optionrom_version[64]; 839 char firmware_version[64]; 840 char model[256]; 841 char model_description[256]; 842 }; 843 #endif 844 845 #define MAX_PHYS_ITEM_ID_LEN 32 846 847 /* This structure holds a unique identifier to identify some 848 * physical item (port for example) used by a netdevice. 849 */ 850 struct netdev_phys_item_id { 851 unsigned char id[MAX_PHYS_ITEM_ID_LEN]; 852 unsigned char id_len; 853 }; 854 855 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, 856 struct netdev_phys_item_id *b) 857 { 858 return a->id_len == b->id_len && 859 memcmp(a->id, b->id, a->id_len) == 0; 860 } 861 862 typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 863 struct sk_buff *skb, 864 struct net_device *sb_dev); 865 866 enum net_device_path_type { 867 DEV_PATH_ETHERNET = 0, 868 DEV_PATH_VLAN, 869 DEV_PATH_BRIDGE, 870 DEV_PATH_PPPOE, 871 DEV_PATH_DSA, 872 DEV_PATH_MTK_WDMA, 873 }; 874 875 struct net_device_path { 876 enum net_device_path_type type; 877 const struct net_device *dev; 878 union { 879 struct { 880 u16 id; 881 __be16 proto; 882 u8 h_dest[ETH_ALEN]; 883 } encap; 884 struct { 885 enum { 886 DEV_PATH_BR_VLAN_KEEP, 887 DEV_PATH_BR_VLAN_TAG, 888 DEV_PATH_BR_VLAN_UNTAG, 889 DEV_PATH_BR_VLAN_UNTAG_HW, 890 } vlan_mode; 891 u16 vlan_id; 892 __be16 vlan_proto; 893 } bridge; 894 struct { 895 int port; 896 u16 proto; 897 } dsa; 898 struct { 899 u8 wdma_idx; 900 u8 queue; 901 u16 wcid; 902 u8 bss; 903 u8 amsdu; 904 } mtk_wdma; 905 }; 906 }; 907 908 #define NET_DEVICE_PATH_STACK_MAX 5 909 #define NET_DEVICE_PATH_VLAN_MAX 2 910 911 struct net_device_path_stack { 912 int num_paths; 913 struct net_device_path path[NET_DEVICE_PATH_STACK_MAX]; 914 }; 915 916 struct net_device_path_ctx { 917 const struct net_device *dev; 918 u8 daddr[ETH_ALEN]; 919 920 int num_vlans; 921 struct { 922 u16 id; 923 __be16 proto; 924 } vlan[NET_DEVICE_PATH_VLAN_MAX]; 925 }; 926 927 enum tc_setup_type { 928 TC_QUERY_CAPS, 929 TC_SETUP_QDISC_MQPRIO, 930 TC_SETUP_CLSU32, 931 TC_SETUP_CLSFLOWER, 932 TC_SETUP_CLSMATCHALL, 933 TC_SETUP_CLSBPF, 934 TC_SETUP_BLOCK, 935 TC_SETUP_QDISC_CBS, 936 TC_SETUP_QDISC_RED, 937 TC_SETUP_QDISC_PRIO, 938 TC_SETUP_QDISC_MQ, 939 TC_SETUP_QDISC_ETF, 940 TC_SETUP_ROOT_QDISC, 941 TC_SETUP_QDISC_GRED, 942 TC_SETUP_QDISC_TAPRIO, 943 TC_SETUP_FT, 944 TC_SETUP_QDISC_ETS, 945 TC_SETUP_QDISC_TBF, 946 TC_SETUP_QDISC_FIFO, 947 TC_SETUP_QDISC_HTB, 948 TC_SETUP_ACT, 949 }; 950 951 /* These structures hold the attributes of bpf state that are being passed 952 * to the netdevice through the bpf op. 953 */ 954 enum bpf_netdev_command { 955 /* Set or clear a bpf program used in the earliest stages of packet 956 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee 957 * is responsible for calling bpf_prog_put on any old progs that are 958 * stored. In case of error, the callee need not release the new prog 959 * reference, but on success it takes ownership and must bpf_prog_put 960 * when it is no longer used. 961 */ 962 XDP_SETUP_PROG, 963 XDP_SETUP_PROG_HW, 964 /* BPF program for offload callbacks, invoked at program load time. */ 965 BPF_OFFLOAD_MAP_ALLOC, 966 BPF_OFFLOAD_MAP_FREE, 967 XDP_SETUP_XSK_POOL, 968 }; 969 970 struct bpf_prog_offload_ops; 971 struct netlink_ext_ack; 972 struct xdp_umem; 973 struct xdp_dev_bulk_queue; 974 struct bpf_xdp_link; 975 976 enum bpf_xdp_mode { 977 XDP_MODE_SKB = 0, 978 XDP_MODE_DRV = 1, 979 XDP_MODE_HW = 2, 980 __MAX_XDP_MODE 981 }; 982 983 struct bpf_xdp_entity { 984 struct bpf_prog *prog; 985 struct bpf_xdp_link *link; 986 }; 987 988 struct netdev_bpf { 989 enum bpf_netdev_command command; 990 union { 991 /* XDP_SETUP_PROG */ 992 struct { 993 u32 flags; 994 struct bpf_prog *prog; 995 struct netlink_ext_ack *extack; 996 }; 997 /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ 998 struct { 999 struct bpf_offloaded_map *offmap; 1000 }; 1001 /* XDP_SETUP_XSK_POOL */ 1002 struct { 1003 struct xsk_buff_pool *pool; 1004 u16 queue_id; 1005 } xsk; 1006 }; 1007 }; 1008 1009 /* Flags for ndo_xsk_wakeup. */ 1010 #define XDP_WAKEUP_RX (1 << 0) 1011 #define XDP_WAKEUP_TX (1 << 1) 1012 1013 #ifdef CONFIG_XFRM_OFFLOAD 1014 struct xfrmdev_ops { 1015 int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack); 1016 void (*xdo_dev_state_delete) (struct xfrm_state *x); 1017 void (*xdo_dev_state_free) (struct xfrm_state *x); 1018 bool (*xdo_dev_offload_ok) (struct sk_buff *skb, 1019 struct xfrm_state *x); 1020 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); 1021 void (*xdo_dev_state_update_stats) (struct xfrm_state *x); 1022 int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack); 1023 void (*xdo_dev_policy_delete) (struct xfrm_policy *x); 1024 void (*xdo_dev_policy_free) (struct xfrm_policy *x); 1025 }; 1026 #endif 1027 1028 struct dev_ifalias { 1029 struct rcu_head rcuhead; 1030 char ifalias[]; 1031 }; 1032 1033 struct devlink; 1034 struct tlsdev_ops; 1035 1036 struct netdev_net_notifier { 1037 struct list_head list; 1038 struct notifier_block *nb; 1039 }; 1040 1041 /* 1042 * This structure defines the management hooks for network devices. 1043 * The following hooks can be defined; unless noted otherwise, they are 1044 * optional and can be filled with a null pointer. 1045 * 1046 * int (*ndo_init)(struct net_device *dev); 1047 * This function is called once when a network device is registered. 1048 * The network device can use this for any late stage initialization 1049 * or semantic validation. It can fail with an error code which will 1050 * be propagated back to register_netdev. 1051 * 1052 * void (*ndo_uninit)(struct net_device *dev); 1053 * This function is called when device is unregistered or when registration 1054 * fails. It is not called if init fails. 1055 * 1056 * int (*ndo_open)(struct net_device *dev); 1057 * This function is called when a network device transitions to the up 1058 * state. 1059 * 1060 * int (*ndo_stop)(struct net_device *dev); 1061 * This function is called when a network device transitions to the down 1062 * state. 1063 * 1064 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 1065 * struct net_device *dev); 1066 * Called when a packet needs to be transmitted. 1067 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop 1068 * the queue before that can happen; it's for obsolete devices and weird 1069 * corner cases, but the stack really does a non-trivial amount 1070 * of useless work if you return NETDEV_TX_BUSY. 1071 * Required; cannot be NULL. 1072 * 1073 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 1074 * struct net_device *dev 1075 * netdev_features_t features); 1076 * Called by core transmit path to determine if device is capable of 1077 * performing offload operations on a given packet. This is to give 1078 * the device an opportunity to implement any restrictions that cannot 1079 * be otherwise expressed by feature flags. The check is called with 1080 * the set of features that the stack has calculated and it returns 1081 * those the driver believes to be appropriate. 1082 * 1083 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 1084 * struct net_device *sb_dev); 1085 * Called to decide which queue to use when device supports multiple 1086 * transmit queues. 1087 * 1088 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); 1089 * This function is called to allow device receiver to make 1090 * changes to configuration when multicast or promiscuous is enabled. 1091 * 1092 * void (*ndo_set_rx_mode)(struct net_device *dev); 1093 * This function is called device changes address list filtering. 1094 * If driver handles unicast address filtering, it should set 1095 * IFF_UNICAST_FLT in its priv_flags. 1096 * 1097 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); 1098 * This function is called when the Media Access Control address 1099 * needs to be changed. If this interface is not defined, the 1100 * MAC address can not be changed. 1101 * 1102 * int (*ndo_validate_addr)(struct net_device *dev); 1103 * Test if Media Access Control address is valid for the device. 1104 * 1105 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 1106 * Old-style ioctl entry point. This is used internally by the 1107 * ieee802154 subsystem but is no longer called by the device 1108 * ioctl handler. 1109 * 1110 * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd); 1111 * Used by the bonding driver for its device specific ioctls: 1112 * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE, 1113 * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY 1114 * 1115 * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 1116 * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG, 1117 * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP. 1118 * 1119 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); 1120 * Used to set network devices bus interface parameters. This interface 1121 * is retained for legacy reasons; new devices should use the bus 1122 * interface (PCI) for low level management. 1123 * 1124 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 1125 * Called when a user wants to change the Maximum Transfer Unit 1126 * of a device. 1127 * 1128 * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue); 1129 * Callback used when the transmitter has not made any progress 1130 * for dev->watchdog ticks. 1131 * 1132 * void (*ndo_get_stats64)(struct net_device *dev, 1133 * struct rtnl_link_stats64 *storage); 1134 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1135 * Called when a user wants to get the network device usage 1136 * statistics. Drivers must do one of the following: 1137 * 1. Define @ndo_get_stats64 to fill in a zero-initialised 1138 * rtnl_link_stats64 structure passed by the caller. 1139 * 2. Define @ndo_get_stats to update a net_device_stats structure 1140 * (which should normally be dev->stats) and return a pointer to 1141 * it. The structure may be changed asynchronously only if each 1142 * field is written atomically. 1143 * 3. Update dev->stats asynchronously and atomically, and define 1144 * neither operation. 1145 * 1146 * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) 1147 * Return true if this device supports offload stats of this attr_id. 1148 * 1149 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, 1150 * void *attr_data) 1151 * Get statistics for offload operations by attr_id. Write it into the 1152 * attr_data pointer. 1153 * 1154 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); 1155 * If device supports VLAN filtering this function is called when a 1156 * VLAN id is registered. 1157 * 1158 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); 1159 * If device supports VLAN filtering this function is called when a 1160 * VLAN id is unregistered. 1161 * 1162 * void (*ndo_poll_controller)(struct net_device *dev); 1163 * 1164 * SR-IOV management functions. 1165 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); 1166 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, 1167 * u8 qos, __be16 proto); 1168 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, 1169 * int max_tx_rate); 1170 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); 1171 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); 1172 * int (*ndo_get_vf_config)(struct net_device *dev, 1173 * int vf, struct ifla_vf_info *ivf); 1174 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); 1175 * int (*ndo_set_vf_port)(struct net_device *dev, int vf, 1176 * struct nlattr *port[]); 1177 * 1178 * Enable or disable the VF ability to query its RSS Redirection Table and 1179 * Hash Key. This is needed since on some devices VF share this information 1180 * with PF and querying it may introduce a theoretical security risk. 1181 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); 1182 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); 1183 * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, 1184 * void *type_data); 1185 * Called to setup any 'tc' scheduler, classifier or action on @dev. 1186 * This is always called from the stack with the rtnl lock held and netif 1187 * tx queues stopped. This allows the netdevice to perform queue 1188 * management safely. 1189 * 1190 * Fiber Channel over Ethernet (FCoE) offload functions. 1191 * int (*ndo_fcoe_enable)(struct net_device *dev); 1192 * Called when the FCoE protocol stack wants to start using LLD for FCoE 1193 * so the underlying device can perform whatever needed configuration or 1194 * initialization to support acceleration of FCoE traffic. 1195 * 1196 * int (*ndo_fcoe_disable)(struct net_device *dev); 1197 * Called when the FCoE protocol stack wants to stop using LLD for FCoE 1198 * so the underlying device can perform whatever needed clean-ups to 1199 * stop supporting acceleration of FCoE traffic. 1200 * 1201 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, 1202 * struct scatterlist *sgl, unsigned int sgc); 1203 * Called when the FCoE Initiator wants to initialize an I/O that 1204 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1205 * perform necessary setup and returns 1 to indicate the device is set up 1206 * successfully to perform DDP on this I/O, otherwise this returns 0. 1207 * 1208 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); 1209 * Called when the FCoE Initiator/Target is done with the DDPed I/O as 1210 * indicated by the FC exchange id 'xid', so the underlying device can 1211 * clean up and reuse resources for later DDP requests. 1212 * 1213 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, 1214 * struct scatterlist *sgl, unsigned int sgc); 1215 * Called when the FCoE Target wants to initialize an I/O that 1216 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1217 * perform necessary setup and returns 1 to indicate the device is set up 1218 * successfully to perform DDP on this I/O, otherwise this returns 0. 1219 * 1220 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1221 * struct netdev_fcoe_hbainfo *hbainfo); 1222 * Called when the FCoE Protocol stack wants information on the underlying 1223 * device. This information is utilized by the FCoE protocol stack to 1224 * register attributes with Fiber Channel management service as per the 1225 * FC-GS Fabric Device Management Information(FDMI) specification. 1226 * 1227 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); 1228 * Called when the underlying device wants to override default World Wide 1229 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own 1230 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE 1231 * protocol stack to use. 1232 * 1233 * RFS acceleration. 1234 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, 1235 * u16 rxq_index, u32 flow_id); 1236 * Set hardware filter for RFS. rxq_index is the target queue index; 1237 * flow_id is a flow ID to be passed to rps_may_expire_flow() later. 1238 * Return the filter ID on success, or a negative error code. 1239 * 1240 * Slave management functions (for bridge, bonding, etc). 1241 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); 1242 * Called to make another netdev an underling. 1243 * 1244 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); 1245 * Called to release previously enslaved netdev. 1246 * 1247 * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev, 1248 * struct sk_buff *skb, 1249 * bool all_slaves); 1250 * Get the xmit slave of master device. If all_slaves is true, function 1251 * assume all the slaves can transmit. 1252 * 1253 * Feature/offload setting functions. 1254 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1255 * netdev_features_t features); 1256 * Adjusts the requested feature flags according to device-specific 1257 * constraints, and returns the resulting flags. Must not modify 1258 * the device state. 1259 * 1260 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 1261 * Called to update device configuration to new features. Passed 1262 * feature set might be less than what was returned by ndo_fix_features()). 1263 * Must return >0 or -errno if it changed dev->features itself. 1264 * 1265 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], 1266 * struct net_device *dev, 1267 * const unsigned char *addr, u16 vid, u16 flags, 1268 * bool *notified, struct netlink_ext_ack *extack); 1269 * Adds an FDB entry to dev for addr. 1270 * Callee shall set *notified to true if it sent any appropriate 1271 * notification(s). Otherwise core will send a generic one. 1272 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], 1273 * struct net_device *dev, 1274 * const unsigned char *addr, u16 vid 1275 * bool *notified, struct netlink_ext_ack *extack); 1276 * Deletes the FDB entry from dev corresponding to addr. 1277 * Callee shall set *notified to true if it sent any appropriate 1278 * notification(s). Otherwise core will send a generic one. 1279 * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev, 1280 * struct netlink_ext_ack *extack); 1281 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, 1282 * struct net_device *dev, struct net_device *filter_dev, 1283 * int *idx) 1284 * Used to add FDB entries to dump requests. Implementers should add 1285 * entries to skb and update idx with the number of entries. 1286 * 1287 * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[], 1288 * u16 nlmsg_flags, struct netlink_ext_ack *extack); 1289 * Adds an MDB entry to dev. 1290 * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[], 1291 * struct netlink_ext_ack *extack); 1292 * Deletes the MDB entry from dev. 1293 * int (*ndo_mdb_del_bulk)(struct net_device *dev, struct nlattr *tb[], 1294 * struct netlink_ext_ack *extack); 1295 * Bulk deletes MDB entries from dev. 1296 * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb, 1297 * struct netlink_callback *cb); 1298 * Dumps MDB entries from dev. The first argument (marker) in the netlink 1299 * callback is used by core rtnetlink code. 1300 * 1301 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, 1302 * u16 flags, struct netlink_ext_ack *extack) 1303 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, 1304 * struct net_device *dev, u32 filter_mask, 1305 * int nlflags) 1306 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, 1307 * u16 flags); 1308 * 1309 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); 1310 * Called to change device carrier. Soft-devices (like dummy, team, etc) 1311 * which do not represent real hardware may define this to allow their 1312 * userspace components to manage their virtual carrier state. Devices 1313 * that determine carrier state from physical hardware properties (eg 1314 * network cables) or protocol-dependent mechanisms (eg 1315 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. 1316 * 1317 * int (*ndo_get_phys_port_id)(struct net_device *dev, 1318 * struct netdev_phys_item_id *ppid); 1319 * Called to get ID of physical port of this device. If driver does 1320 * not implement this, it is assumed that the hw is not able to have 1321 * multiple net devices on single physical port. 1322 * 1323 * int (*ndo_get_port_parent_id)(struct net_device *dev, 1324 * struct netdev_phys_item_id *ppid) 1325 * Called to get the parent ID of the physical port of this device. 1326 * 1327 * void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1328 * struct net_device *dev) 1329 * Called by upper layer devices to accelerate switching or other 1330 * station functionality into hardware. 'pdev is the lowerdev 1331 * to use for the offload and 'dev' is the net device that will 1332 * back the offload. Returns a pointer to the private structure 1333 * the upper layer will maintain. 1334 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) 1335 * Called by upper layer device to delete the station created 1336 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing 1337 * the station and priv is the structure returned by the add 1338 * operation. 1339 * int (*ndo_set_tx_maxrate)(struct net_device *dev, 1340 * int queue_index, u32 maxrate); 1341 * Called when a user wants to set a max-rate limitation of specific 1342 * TX queue. 1343 * int (*ndo_get_iflink)(const struct net_device *dev); 1344 * Called to get the iflink value of this device. 1345 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); 1346 * This function is used to get egress tunnel information for given skb. 1347 * This is useful for retrieving outer tunnel header parameters while 1348 * sampling packet. 1349 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); 1350 * This function is used to specify the headroom that the skb must 1351 * consider when allocation skb during packet reception. Setting 1352 * appropriate rx headroom value allows avoiding skb head copy on 1353 * forward. Setting a negative value resets the rx headroom to the 1354 * default value. 1355 * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); 1356 * This function is used to set or query state related to XDP on the 1357 * netdevice and manage BPF offload. See definition of 1358 * enum bpf_netdev_command for details. 1359 * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, 1360 * u32 flags); 1361 * This function is used to submit @n XDP packets for transmit on a 1362 * netdevice. Returns number of frames successfully transmitted, frames 1363 * that got dropped are freed/returned via xdp_return_frame(). 1364 * Returns negative number, means general error invoking ndo, meaning 1365 * no frames were xmit'ed and core-caller will free all frames. 1366 * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1367 * struct xdp_buff *xdp); 1368 * Get the xmit slave of master device based on the xdp_buff. 1369 * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); 1370 * This function is used to wake up the softirq, ksoftirqd or kthread 1371 * responsible for sending and/or receiving packets on a specific 1372 * queue id bound to an AF_XDP socket. The flags field specifies if 1373 * only RX, only Tx, or both should be woken up using the flags 1374 * XDP_WAKEUP_RX and XDP_WAKEUP_TX. 1375 * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm_kern *p, 1376 * int cmd); 1377 * Add, change, delete or get information on an IPv4 tunnel. 1378 * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); 1379 * If a device is paired with a peer device, return the peer instance. 1380 * The caller must be under RCU read context. 1381 * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path); 1382 * Get the forwarding path to reach the real device from the HW destination address 1383 * ktime_t (*ndo_get_tstamp)(struct net_device *dev, 1384 * const struct skb_shared_hwtstamps *hwtstamps, 1385 * bool cycles); 1386 * Get hardware timestamp based on normal/adjustable time or free running 1387 * cycle counter. This function is required if physical clock supports a 1388 * free running cycle counter. 1389 * 1390 * int (*ndo_hwtstamp_get)(struct net_device *dev, 1391 * struct kernel_hwtstamp_config *kernel_config); 1392 * Get the currently configured hardware timestamping parameters for the 1393 * NIC device. 1394 * 1395 * int (*ndo_hwtstamp_set)(struct net_device *dev, 1396 * struct kernel_hwtstamp_config *kernel_config, 1397 * struct netlink_ext_ack *extack); 1398 * Change the hardware timestamping parameters for NIC device. 1399 */ 1400 struct net_device_ops { 1401 int (*ndo_init)(struct net_device *dev); 1402 void (*ndo_uninit)(struct net_device *dev); 1403 int (*ndo_open)(struct net_device *dev); 1404 int (*ndo_stop)(struct net_device *dev); 1405 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 1406 struct net_device *dev); 1407 netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 1408 struct net_device *dev, 1409 netdev_features_t features); 1410 u16 (*ndo_select_queue)(struct net_device *dev, 1411 struct sk_buff *skb, 1412 struct net_device *sb_dev); 1413 void (*ndo_change_rx_flags)(struct net_device *dev, 1414 int flags); 1415 void (*ndo_set_rx_mode)(struct net_device *dev); 1416 int (*ndo_set_mac_address)(struct net_device *dev, 1417 void *addr); 1418 int (*ndo_validate_addr)(struct net_device *dev); 1419 int (*ndo_do_ioctl)(struct net_device *dev, 1420 struct ifreq *ifr, int cmd); 1421 int (*ndo_eth_ioctl)(struct net_device *dev, 1422 struct ifreq *ifr, int cmd); 1423 int (*ndo_siocbond)(struct net_device *dev, 1424 struct ifreq *ifr, int cmd); 1425 int (*ndo_siocwandev)(struct net_device *dev, 1426 struct if_settings *ifs); 1427 int (*ndo_siocdevprivate)(struct net_device *dev, 1428 struct ifreq *ifr, 1429 void __user *data, int cmd); 1430 int (*ndo_set_config)(struct net_device *dev, 1431 struct ifmap *map); 1432 int (*ndo_change_mtu)(struct net_device *dev, 1433 int new_mtu); 1434 int (*ndo_neigh_setup)(struct net_device *dev, 1435 struct neigh_parms *); 1436 void (*ndo_tx_timeout) (struct net_device *dev, 1437 unsigned int txqueue); 1438 1439 void (*ndo_get_stats64)(struct net_device *dev, 1440 struct rtnl_link_stats64 *storage); 1441 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); 1442 int (*ndo_get_offload_stats)(int attr_id, 1443 const struct net_device *dev, 1444 void *attr_data); 1445 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1446 1447 int (*ndo_vlan_rx_add_vid)(struct net_device *dev, 1448 __be16 proto, u16 vid); 1449 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, 1450 __be16 proto, u16 vid); 1451 #ifdef CONFIG_NET_POLL_CONTROLLER 1452 void (*ndo_poll_controller)(struct net_device *dev); 1453 int (*ndo_netpoll_setup)(struct net_device *dev); 1454 void (*ndo_netpoll_cleanup)(struct net_device *dev); 1455 #endif 1456 int (*ndo_set_vf_mac)(struct net_device *dev, 1457 int queue, u8 *mac); 1458 int (*ndo_set_vf_vlan)(struct net_device *dev, 1459 int queue, u16 vlan, 1460 u8 qos, __be16 proto); 1461 int (*ndo_set_vf_rate)(struct net_device *dev, 1462 int vf, int min_tx_rate, 1463 int max_tx_rate); 1464 int (*ndo_set_vf_spoofchk)(struct net_device *dev, 1465 int vf, bool setting); 1466 int (*ndo_set_vf_trust)(struct net_device *dev, 1467 int vf, bool setting); 1468 int (*ndo_get_vf_config)(struct net_device *dev, 1469 int vf, 1470 struct ifla_vf_info *ivf); 1471 int (*ndo_set_vf_link_state)(struct net_device *dev, 1472 int vf, int link_state); 1473 int (*ndo_get_vf_stats)(struct net_device *dev, 1474 int vf, 1475 struct ifla_vf_stats 1476 *vf_stats); 1477 int (*ndo_set_vf_port)(struct net_device *dev, 1478 int vf, 1479 struct nlattr *port[]); 1480 int (*ndo_get_vf_port)(struct net_device *dev, 1481 int vf, struct sk_buff *skb); 1482 int (*ndo_get_vf_guid)(struct net_device *dev, 1483 int vf, 1484 struct ifla_vf_guid *node_guid, 1485 struct ifla_vf_guid *port_guid); 1486 int (*ndo_set_vf_guid)(struct net_device *dev, 1487 int vf, u64 guid, 1488 int guid_type); 1489 int (*ndo_set_vf_rss_query_en)( 1490 struct net_device *dev, 1491 int vf, bool setting); 1492 int (*ndo_setup_tc)(struct net_device *dev, 1493 enum tc_setup_type type, 1494 void *type_data); 1495 #if IS_ENABLED(CONFIG_FCOE) 1496 int (*ndo_fcoe_enable)(struct net_device *dev); 1497 int (*ndo_fcoe_disable)(struct net_device *dev); 1498 int (*ndo_fcoe_ddp_setup)(struct net_device *dev, 1499 u16 xid, 1500 struct scatterlist *sgl, 1501 unsigned int sgc); 1502 int (*ndo_fcoe_ddp_done)(struct net_device *dev, 1503 u16 xid); 1504 int (*ndo_fcoe_ddp_target)(struct net_device *dev, 1505 u16 xid, 1506 struct scatterlist *sgl, 1507 unsigned int sgc); 1508 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1509 struct netdev_fcoe_hbainfo *hbainfo); 1510 #endif 1511 1512 #if IS_ENABLED(CONFIG_LIBFCOE) 1513 #define NETDEV_FCOE_WWNN 0 1514 #define NETDEV_FCOE_WWPN 1 1515 int (*ndo_fcoe_get_wwn)(struct net_device *dev, 1516 u64 *wwn, int type); 1517 #endif 1518 1519 #ifdef CONFIG_RFS_ACCEL 1520 int (*ndo_rx_flow_steer)(struct net_device *dev, 1521 const struct sk_buff *skb, 1522 u16 rxq_index, 1523 u32 flow_id); 1524 #endif 1525 int (*ndo_add_slave)(struct net_device *dev, 1526 struct net_device *slave_dev, 1527 struct netlink_ext_ack *extack); 1528 int (*ndo_del_slave)(struct net_device *dev, 1529 struct net_device *slave_dev); 1530 struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, 1531 struct sk_buff *skb, 1532 bool all_slaves); 1533 struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev, 1534 struct sock *sk); 1535 netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1536 netdev_features_t features); 1537 int (*ndo_set_features)(struct net_device *dev, 1538 netdev_features_t features); 1539 int (*ndo_neigh_construct)(struct net_device *dev, 1540 struct neighbour *n); 1541 void (*ndo_neigh_destroy)(struct net_device *dev, 1542 struct neighbour *n); 1543 1544 int (*ndo_fdb_add)(struct ndmsg *ndm, 1545 struct nlattr *tb[], 1546 struct net_device *dev, 1547 const unsigned char *addr, 1548 u16 vid, 1549 u16 flags, 1550 bool *notified, 1551 struct netlink_ext_ack *extack); 1552 int (*ndo_fdb_del)(struct ndmsg *ndm, 1553 struct nlattr *tb[], 1554 struct net_device *dev, 1555 const unsigned char *addr, 1556 u16 vid, 1557 bool *notified, 1558 struct netlink_ext_ack *extack); 1559 int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, 1560 struct net_device *dev, 1561 struct netlink_ext_ack *extack); 1562 int (*ndo_fdb_dump)(struct sk_buff *skb, 1563 struct netlink_callback *cb, 1564 struct net_device *dev, 1565 struct net_device *filter_dev, 1566 int *idx); 1567 int (*ndo_fdb_get)(struct sk_buff *skb, 1568 struct nlattr *tb[], 1569 struct net_device *dev, 1570 const unsigned char *addr, 1571 u16 vid, u32 portid, u32 seq, 1572 struct netlink_ext_ack *extack); 1573 int (*ndo_mdb_add)(struct net_device *dev, 1574 struct nlattr *tb[], 1575 u16 nlmsg_flags, 1576 struct netlink_ext_ack *extack); 1577 int (*ndo_mdb_del)(struct net_device *dev, 1578 struct nlattr *tb[], 1579 struct netlink_ext_ack *extack); 1580 int (*ndo_mdb_del_bulk)(struct net_device *dev, 1581 struct nlattr *tb[], 1582 struct netlink_ext_ack *extack); 1583 int (*ndo_mdb_dump)(struct net_device *dev, 1584 struct sk_buff *skb, 1585 struct netlink_callback *cb); 1586 int (*ndo_mdb_get)(struct net_device *dev, 1587 struct nlattr *tb[], u32 portid, 1588 u32 seq, 1589 struct netlink_ext_ack *extack); 1590 int (*ndo_bridge_setlink)(struct net_device *dev, 1591 struct nlmsghdr *nlh, 1592 u16 flags, 1593 struct netlink_ext_ack *extack); 1594 int (*ndo_bridge_getlink)(struct sk_buff *skb, 1595 u32 pid, u32 seq, 1596 struct net_device *dev, 1597 u32 filter_mask, 1598 int nlflags); 1599 int (*ndo_bridge_dellink)(struct net_device *dev, 1600 struct nlmsghdr *nlh, 1601 u16 flags); 1602 int (*ndo_change_carrier)(struct net_device *dev, 1603 bool new_carrier); 1604 int (*ndo_get_phys_port_id)(struct net_device *dev, 1605 struct netdev_phys_item_id *ppid); 1606 int (*ndo_get_port_parent_id)(struct net_device *dev, 1607 struct netdev_phys_item_id *ppid); 1608 int (*ndo_get_phys_port_name)(struct net_device *dev, 1609 char *name, size_t len); 1610 void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1611 struct net_device *dev); 1612 void (*ndo_dfwd_del_station)(struct net_device *pdev, 1613 void *priv); 1614 1615 int (*ndo_set_tx_maxrate)(struct net_device *dev, 1616 int queue_index, 1617 u32 maxrate); 1618 int (*ndo_get_iflink)(const struct net_device *dev); 1619 int (*ndo_fill_metadata_dst)(struct net_device *dev, 1620 struct sk_buff *skb); 1621 void (*ndo_set_rx_headroom)(struct net_device *dev, 1622 int needed_headroom); 1623 int (*ndo_bpf)(struct net_device *dev, 1624 struct netdev_bpf *bpf); 1625 int (*ndo_xdp_xmit)(struct net_device *dev, int n, 1626 struct xdp_frame **xdp, 1627 u32 flags); 1628 struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1629 struct xdp_buff *xdp); 1630 int (*ndo_xsk_wakeup)(struct net_device *dev, 1631 u32 queue_id, u32 flags); 1632 int (*ndo_tunnel_ctl)(struct net_device *dev, 1633 struct ip_tunnel_parm_kern *p, 1634 int cmd); 1635 struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); 1636 int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, 1637 struct net_device_path *path); 1638 ktime_t (*ndo_get_tstamp)(struct net_device *dev, 1639 const struct skb_shared_hwtstamps *hwtstamps, 1640 bool cycles); 1641 int (*ndo_hwtstamp_get)(struct net_device *dev, 1642 struct kernel_hwtstamp_config *kernel_config); 1643 int (*ndo_hwtstamp_set)(struct net_device *dev, 1644 struct kernel_hwtstamp_config *kernel_config, 1645 struct netlink_ext_ack *extack); 1646 1647 #if IS_ENABLED(CONFIG_NET_SHAPER) 1648 /** 1649 * @net_shaper_ops: Device shaping offload operations 1650 * see include/net/net_shapers.h 1651 */ 1652 const struct net_shaper_ops *net_shaper_ops; 1653 #endif 1654 }; 1655 1656 /** 1657 * enum netdev_priv_flags - &struct net_device priv_flags 1658 * 1659 * These are the &struct net_device, they are only set internally 1660 * by drivers and used in the kernel. These flags are invisible to 1661 * userspace; this means that the order of these flags can change 1662 * during any kernel release. 1663 * 1664 * You should add bitfield booleans after either net_device::priv_flags 1665 * (hotpath) or ::threaded (slowpath) instead of extending these flags. 1666 * 1667 * @IFF_802_1Q_VLAN: 802.1Q VLAN device 1668 * @IFF_EBRIDGE: Ethernet bridging device 1669 * @IFF_BONDING: bonding master or slave 1670 * @IFF_ISATAP: ISATAP interface (RFC4214) 1671 * @IFF_WAN_HDLC: WAN HDLC device 1672 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to 1673 * release skb->dst 1674 * @IFF_DONT_BRIDGE: disallow bridging this ether dev 1675 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time 1676 * @IFF_MACVLAN_PORT: device used as macvlan port 1677 * @IFF_BRIDGE_PORT: device used as bridge port 1678 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port 1679 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit 1680 * @IFF_UNICAST_FLT: Supports unicast filtering 1681 * @IFF_TEAM_PORT: device used as team port 1682 * @IFF_SUPP_NOFCS: device supports sending custom FCS 1683 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address 1684 * change when it's running 1685 * @IFF_MACVLAN: Macvlan device 1686 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account 1687 * underlying stacked devices 1688 * @IFF_L3MDEV_MASTER: device is an L3 master device 1689 * @IFF_NO_QUEUE: device can run without qdisc attached 1690 * @IFF_OPENVSWITCH: device is a Open vSwitch master 1691 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device 1692 * @IFF_TEAM: device is a team device 1693 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured 1694 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external 1695 * entity (i.e. the master device for bridged veth) 1696 * @IFF_MACSEC: device is a MACsec device 1697 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook 1698 * @IFF_FAILOVER: device is a failover master device 1699 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device 1700 * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device 1701 * @IFF_NO_ADDRCONF: prevent ipv6 addrconf 1702 * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with 1703 * skb_headlen(skb) == 0 (data starts from frag0) 1704 */ 1705 enum netdev_priv_flags { 1706 IFF_802_1Q_VLAN = 1<<0, 1707 IFF_EBRIDGE = 1<<1, 1708 IFF_BONDING = 1<<2, 1709 IFF_ISATAP = 1<<3, 1710 IFF_WAN_HDLC = 1<<4, 1711 IFF_XMIT_DST_RELEASE = 1<<5, 1712 IFF_DONT_BRIDGE = 1<<6, 1713 IFF_DISABLE_NETPOLL = 1<<7, 1714 IFF_MACVLAN_PORT = 1<<8, 1715 IFF_BRIDGE_PORT = 1<<9, 1716 IFF_OVS_DATAPATH = 1<<10, 1717 IFF_TX_SKB_SHARING = 1<<11, 1718 IFF_UNICAST_FLT = 1<<12, 1719 IFF_TEAM_PORT = 1<<13, 1720 IFF_SUPP_NOFCS = 1<<14, 1721 IFF_LIVE_ADDR_CHANGE = 1<<15, 1722 IFF_MACVLAN = 1<<16, 1723 IFF_XMIT_DST_RELEASE_PERM = 1<<17, 1724 IFF_L3MDEV_MASTER = 1<<18, 1725 IFF_NO_QUEUE = 1<<19, 1726 IFF_OPENVSWITCH = 1<<20, 1727 IFF_L3MDEV_SLAVE = 1<<21, 1728 IFF_TEAM = 1<<22, 1729 IFF_RXFH_CONFIGURED = 1<<23, 1730 IFF_PHONY_HEADROOM = 1<<24, 1731 IFF_MACSEC = 1<<25, 1732 IFF_NO_RX_HANDLER = 1<<26, 1733 IFF_FAILOVER = 1<<27, 1734 IFF_FAILOVER_SLAVE = 1<<28, 1735 IFF_L3MDEV_RX_HANDLER = 1<<29, 1736 IFF_NO_ADDRCONF = BIT_ULL(30), 1737 IFF_TX_SKB_NO_LINEAR = BIT_ULL(31), 1738 }; 1739 1740 /* Specifies the type of the struct net_device::ml_priv pointer */ 1741 enum netdev_ml_priv_type { 1742 ML_PRIV_NONE, 1743 ML_PRIV_CAN, 1744 }; 1745 1746 enum netdev_stat_type { 1747 NETDEV_PCPU_STAT_NONE, 1748 NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */ 1749 NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */ 1750 NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */ 1751 }; 1752 1753 enum netdev_reg_state { 1754 NETREG_UNINITIALIZED = 0, 1755 NETREG_REGISTERED, /* completed register_netdevice */ 1756 NETREG_UNREGISTERING, /* called unregister_netdevice */ 1757 NETREG_UNREGISTERED, /* completed unregister todo */ 1758 NETREG_RELEASED, /* called free_netdev */ 1759 NETREG_DUMMY, /* dummy device for NAPI poll */ 1760 }; 1761 1762 /** 1763 * struct net_device - The DEVICE structure. 1764 * 1765 * Actually, this whole structure is a big mistake. It mixes I/O 1766 * data with strictly "high-level" data, and it has to know about 1767 * almost every data structure used in the INET module. 1768 * 1769 * @priv_flags: flags invisible to userspace defined as bits, see 1770 * enum netdev_priv_flags for the definitions 1771 * @lltx: device supports lockless Tx. Deprecated for real HW 1772 * drivers. Mainly used by logical interfaces, such as 1773 * bonding and tunnels 1774 * 1775 * @name: This is the first field of the "visible" part of this structure 1776 * (i.e. as seen by users in the "Space.c" file). It is the name 1777 * of the interface. 1778 * 1779 * @name_node: Name hashlist node 1780 * @ifalias: SNMP alias 1781 * @mem_end: Shared memory end 1782 * @mem_start: Shared memory start 1783 * @base_addr: Device I/O address 1784 * @irq: Device IRQ number 1785 * 1786 * @state: Generic network queuing layer state, see netdev_state_t 1787 * @dev_list: The global list of network devices 1788 * @napi_list: List entry used for polling NAPI devices 1789 * @unreg_list: List entry when we are unregistering the 1790 * device; see the function unregister_netdev 1791 * @close_list: List entry used when we are closing the device 1792 * @ptype_all: Device-specific packet handlers for all protocols 1793 * @ptype_specific: Device-specific, protocol-specific packet handlers 1794 * 1795 * @adj_list: Directly linked devices, like slaves for bonding 1796 * @features: Currently active device features 1797 * @hw_features: User-changeable features 1798 * 1799 * @wanted_features: User-requested features 1800 * @vlan_features: Mask of features inheritable by VLAN devices 1801 * 1802 * @hw_enc_features: Mask of features inherited by encapsulating devices 1803 * This field indicates what encapsulation 1804 * offloads the hardware is capable of doing, 1805 * and drivers will need to set them appropriately. 1806 * 1807 * @mpls_features: Mask of features inheritable by MPLS 1808 * @gso_partial_features: value(s) from NETIF_F_GSO\* 1809 * 1810 * @ifindex: interface index 1811 * @group: The group the device belongs to 1812 * 1813 * @stats: Statistics struct, which was left as a legacy, use 1814 * rtnl_link_stats64 instead 1815 * 1816 * @core_stats: core networking counters, 1817 * do not use this in drivers 1818 * @carrier_up_count: Number of times the carrier has been up 1819 * @carrier_down_count: Number of times the carrier has been down 1820 * 1821 * @wireless_handlers: List of functions to handle Wireless Extensions, 1822 * instead of ioctl, 1823 * see <net/iw_handler.h> for details. 1824 * 1825 * @netdev_ops: Includes several pointers to callbacks, 1826 * if one wants to override the ndo_*() functions 1827 * @xdp_metadata_ops: Includes pointers to XDP metadata callbacks. 1828 * @xsk_tx_metadata_ops: Includes pointers to AF_XDP TX metadata callbacks. 1829 * @ethtool_ops: Management operations 1830 * @l3mdev_ops: Layer 3 master device operations 1831 * @ndisc_ops: Includes callbacks for different IPv6 neighbour 1832 * discovery handling. Necessary for e.g. 6LoWPAN. 1833 * @xfrmdev_ops: Transformation offload operations 1834 * @tlsdev_ops: Transport Layer Security offload operations 1835 * @header_ops: Includes callbacks for creating,parsing,caching,etc 1836 * of Layer 2 headers. 1837 * 1838 * @flags: Interface flags (a la BSD) 1839 * @xdp_features: XDP capability supported by the device 1840 * @gflags: Global flags ( kept as legacy ) 1841 * @priv_len: Size of the ->priv flexible array 1842 * @priv: Flexible array containing private data 1843 * @operstate: RFC2863 operstate 1844 * @link_mode: Mapping policy to operstate 1845 * @if_port: Selectable AUI, TP, ... 1846 * @dma: DMA channel 1847 * @mtu: Interface MTU value 1848 * @min_mtu: Interface Minimum MTU value 1849 * @max_mtu: Interface Maximum MTU value 1850 * @type: Interface hardware type 1851 * @hard_header_len: Maximum hardware header length. 1852 * @min_header_len: Minimum hardware header length 1853 * 1854 * @needed_headroom: Extra headroom the hardware may need, but not in all 1855 * cases can this be guaranteed 1856 * @needed_tailroom: Extra tailroom the hardware may need, but not in all 1857 * cases can this be guaranteed. Some cases also use 1858 * LL_MAX_HEADER instead to allocate the skb 1859 * 1860 * interface address info: 1861 * 1862 * @perm_addr: Permanent hw address 1863 * @addr_assign_type: Hw address assignment type 1864 * @addr_len: Hardware address length 1865 * @upper_level: Maximum depth level of upper devices. 1866 * @lower_level: Maximum depth level of lower devices. 1867 * @neigh_priv_len: Used in neigh_alloc() 1868 * @dev_id: Used to differentiate devices that share 1869 * the same link layer address 1870 * @dev_port: Used to differentiate devices that share 1871 * the same function 1872 * @addr_list_lock: XXX: need comments on this one 1873 * @name_assign_type: network interface name assignment type 1874 * @uc_promisc: Counter that indicates promiscuous mode 1875 * has been enabled due to the need to listen to 1876 * additional unicast addresses in a device that 1877 * does not implement ndo_set_rx_mode() 1878 * @uc: unicast mac addresses 1879 * @mc: multicast mac addresses 1880 * @dev_addrs: list of device hw addresses 1881 * @queues_kset: Group of all Kobjects in the Tx and RX queues 1882 * @promiscuity: Number of times the NIC is told to work in 1883 * promiscuous mode; if it becomes 0 the NIC will 1884 * exit promiscuous mode 1885 * @allmulti: Counter, enables or disables allmulticast mode 1886 * 1887 * @vlan_info: VLAN info 1888 * @dsa_ptr: dsa specific data 1889 * @tipc_ptr: TIPC specific data 1890 * @atalk_ptr: AppleTalk link 1891 * @ip_ptr: IPv4 specific data 1892 * @ip6_ptr: IPv6 specific data 1893 * @ax25_ptr: AX.25 specific data 1894 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering 1895 * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network 1896 * device struct 1897 * @mpls_ptr: mpls_dev struct pointer 1898 * @mctp_ptr: MCTP specific data 1899 * 1900 * @dev_addr: Hw address (before bcast, 1901 * because most packets are unicast) 1902 * 1903 * @_rx: Array of RX queues 1904 * @num_rx_queues: Number of RX queues 1905 * allocated at register_netdev() time 1906 * @real_num_rx_queues: Number of RX queues currently active in device 1907 * @xdp_prog: XDP sockets filter program pointer 1908 * 1909 * @rx_handler: handler for received packets 1910 * @rx_handler_data: XXX: need comments on this one 1911 * @tcx_ingress: BPF & clsact qdisc specific data for ingress processing 1912 * @ingress_queue: XXX: need comments on this one 1913 * @nf_hooks_ingress: netfilter hooks executed for ingress packets 1914 * @broadcast: hw bcast address 1915 * 1916 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, 1917 * indexed by RX queue number. Assigned by driver. 1918 * This must only be set if the ndo_rx_flow_steer 1919 * operation is defined 1920 * @index_hlist: Device index hash chain 1921 * 1922 * @_tx: Array of TX queues 1923 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time 1924 * @real_num_tx_queues: Number of TX queues currently active in device 1925 * @qdisc: Root qdisc from userspace point of view 1926 * @tx_queue_len: Max frames per queue allowed 1927 * @tx_global_lock: XXX: need comments on this one 1928 * @xdp_bulkq: XDP device bulk queue 1929 * @xps_maps: all CPUs/RXQs maps for XPS device 1930 * 1931 * @xps_maps: XXX: need comments on this one 1932 * @tcx_egress: BPF & clsact qdisc specific data for egress processing 1933 * @nf_hooks_egress: netfilter hooks executed for egress packets 1934 * @qdisc_hash: qdisc hash table 1935 * @watchdog_timeo: Represents the timeout that is used by 1936 * the watchdog (see dev_watchdog()) 1937 * @watchdog_timer: List of timers 1938 * 1939 * @proto_down_reason: reason a netdev interface is held down 1940 * @pcpu_refcnt: Number of references to this device 1941 * @dev_refcnt: Number of references to this device 1942 * @refcnt_tracker: Tracker directory for tracked references to this device 1943 * @todo_list: Delayed register/unregister 1944 * @link_watch_list: XXX: need comments on this one 1945 * 1946 * @reg_state: Register/unregister state machine 1947 * @dismantle: Device is going to be freed 1948 * @rtnl_link_state: This enum represents the phases of creating 1949 * a new link 1950 * 1951 * @needs_free_netdev: Should unregister perform free_netdev? 1952 * @priv_destructor: Called from unregister 1953 * @npinfo: XXX: need comments on this one 1954 * @nd_net: Network namespace this network device is inside 1955 * 1956 * @ml_priv: Mid-layer private 1957 * @ml_priv_type: Mid-layer private type 1958 * 1959 * @pcpu_stat_type: Type of device statistics which the core should 1960 * allocate/free: none, lstats, tstats, dstats. none 1961 * means the driver is handling statistics allocation/ 1962 * freeing internally. 1963 * @lstats: Loopback statistics: packets, bytes 1964 * @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes 1965 * @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes 1966 * 1967 * @garp_port: GARP 1968 * @mrp_port: MRP 1969 * 1970 * @dm_private: Drop monitor private 1971 * 1972 * @dev: Class/net/name entry 1973 * @sysfs_groups: Space for optional device, statistics and wireless 1974 * sysfs groups 1975 * 1976 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes 1977 * @rtnl_link_ops: Rtnl_link_ops 1978 * @stat_ops: Optional ops for queue-aware statistics 1979 * @queue_mgmt_ops: Optional ops for queue management 1980 * 1981 * @gso_max_size: Maximum size of generic segmentation offload 1982 * @tso_max_size: Device (as in HW) limit on the max TSO request size 1983 * @gso_max_segs: Maximum number of segments that can be passed to the 1984 * NIC for GSO 1985 * @tso_max_segs: Device (as in HW) limit on the max TSO segment count 1986 * @gso_ipv4_max_size: Maximum size of generic segmentation offload, 1987 * for IPv4. 1988 * 1989 * @dcbnl_ops: Data Center Bridging netlink ops 1990 * @num_tc: Number of traffic classes in the net device 1991 * @tc_to_txq: XXX: need comments on this one 1992 * @prio_tc_map: XXX: need comments on this one 1993 * 1994 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp 1995 * 1996 * @priomap: XXX: need comments on this one 1997 * @link_topo: Physical link topology tracking attached PHYs 1998 * @phydev: Physical device may attach itself 1999 * for hardware timestamping 2000 * @sfp_bus: attached &struct sfp_bus structure. 2001 * 2002 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock 2003 * 2004 * @proto_down: protocol port state information can be sent to the 2005 * switch driver and used to set the phys state of the 2006 * switch port. 2007 * 2008 * @threaded: napi threaded mode is enabled 2009 * 2010 * @irq_affinity_auto: driver wants the core to store and re-assign the IRQ 2011 * affinity. Set by netif_enable_irq_affinity(), then 2012 * the driver must create a persistent napi by 2013 * netif_napi_add_config() and finally bind the napi to 2014 * IRQ (via netif_napi_set_irq()). 2015 * 2016 * @rx_cpu_rmap_auto: driver wants the core to manage the ARFS rmap. 2017 * Set by calling netif_enable_cpu_rmap(). 2018 * 2019 * @see_all_hwtstamp_requests: device wants to see calls to 2020 * ndo_hwtstamp_set() for all timestamp requests 2021 * regardless of source, even if those aren't 2022 * HWTSTAMP_SOURCE_NETDEV 2023 * @change_proto_down: device supports setting carrier via IFLA_PROTO_DOWN 2024 * @netns_immutable: interface can't change network namespaces 2025 * @fcoe_mtu: device supports maximum FCoE MTU, 2158 bytes 2026 * 2027 * @net_notifier_list: List of per-net netdev notifier block 2028 * that follow this device when it is moved 2029 * to another network namespace. 2030 * 2031 * @macsec_ops: MACsec offloading ops 2032 * 2033 * @udp_tunnel_nic_info: static structure describing the UDP tunnel 2034 * offload capabilities of the device 2035 * @udp_tunnel_nic: UDP tunnel offload state 2036 * @ethtool: ethtool related state 2037 * @xdp_state: stores info on attached XDP BPF programs 2038 * 2039 * @nested_level: Used as a parameter of spin_lock_nested() of 2040 * dev->addr_list_lock. 2041 * @unlink_list: As netif_addr_lock() can be called recursively, 2042 * keep a list of interfaces to be deleted. 2043 * @gro_max_size: Maximum size of aggregated packet in generic 2044 * receive offload (GRO) 2045 * @gro_ipv4_max_size: Maximum size of aggregated packet in generic 2046 * receive offload (GRO), for IPv4. 2047 * @xdp_zc_max_segs: Maximum number of segments supported by AF_XDP 2048 * zero copy driver 2049 * 2050 * @dev_addr_shadow: Copy of @dev_addr to catch direct writes. 2051 * @linkwatch_dev_tracker: refcount tracker used by linkwatch. 2052 * @watchdog_dev_tracker: refcount tracker used by watchdog. 2053 * @dev_registered_tracker: tracker for reference held while 2054 * registered 2055 * @offload_xstats_l3: L3 HW stats for this netdevice. 2056 * 2057 * @devlink_port: Pointer to related devlink port structure. 2058 * Assigned by a driver before netdev registration using 2059 * SET_NETDEV_DEVLINK_PORT macro. This pointer is static 2060 * during the time netdevice is registered. 2061 * 2062 * @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem, 2063 * where the clock is recovered. 2064 * 2065 * @max_pacing_offload_horizon: max EDT offload horizon in nsec. 2066 * @napi_config: An array of napi_config structures containing per-NAPI 2067 * settings. 2068 * @gro_flush_timeout: timeout for GRO layer in NAPI 2069 * @napi_defer_hard_irqs: If not zero, provides a counter that would 2070 * allow to avoid NIC hard IRQ, on busy queues. 2071 * 2072 * @neighbours: List heads pointing to this device's neighbours' 2073 * dev_list, one per address-family. 2074 * @hwprov: Tracks which PTP performs hardware packet time stamping. 2075 * 2076 * FIXME: cleanup struct net_device such that network protocol info 2077 * moves out. 2078 */ 2079 2080 struct net_device { 2081 /* Cacheline organization can be found documented in 2082 * Documentation/networking/net_cachelines/net_device.rst. 2083 * Please update the document when adding new fields. 2084 */ 2085 2086 /* TX read-mostly hotpath */ 2087 __cacheline_group_begin(net_device_read_tx); 2088 struct_group(priv_flags_fast, 2089 unsigned long priv_flags:32; 2090 unsigned long lltx:1; 2091 ); 2092 const struct net_device_ops *netdev_ops; 2093 const struct header_ops *header_ops; 2094 struct netdev_queue *_tx; 2095 netdev_features_t gso_partial_features; 2096 unsigned int real_num_tx_queues; 2097 unsigned int gso_max_size; 2098 unsigned int gso_ipv4_max_size; 2099 u16 gso_max_segs; 2100 s16 num_tc; 2101 /* Note : dev->mtu is often read without holding a lock. 2102 * Writers usually hold RTNL. 2103 * It is recommended to use READ_ONCE() to annotate the reads, 2104 * and to use WRITE_ONCE() to annotate the writes. 2105 */ 2106 unsigned int mtu; 2107 unsigned short needed_headroom; 2108 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 2109 #ifdef CONFIG_XPS 2110 struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX]; 2111 #endif 2112 #ifdef CONFIG_NETFILTER_EGRESS 2113 struct nf_hook_entries __rcu *nf_hooks_egress; 2114 #endif 2115 #ifdef CONFIG_NET_XGRESS 2116 struct bpf_mprog_entry __rcu *tcx_egress; 2117 #endif 2118 __cacheline_group_end(net_device_read_tx); 2119 2120 /* TXRX read-mostly hotpath */ 2121 __cacheline_group_begin(net_device_read_txrx); 2122 union { 2123 struct pcpu_lstats __percpu *lstats; 2124 struct pcpu_sw_netstats __percpu *tstats; 2125 struct pcpu_dstats __percpu *dstats; 2126 }; 2127 unsigned long state; 2128 unsigned int flags; 2129 unsigned short hard_header_len; 2130 netdev_features_t features; 2131 struct inet6_dev __rcu *ip6_ptr; 2132 __cacheline_group_end(net_device_read_txrx); 2133 2134 /* RX read-mostly hotpath */ 2135 __cacheline_group_begin(net_device_read_rx); 2136 struct bpf_prog __rcu *xdp_prog; 2137 struct list_head ptype_specific; 2138 int ifindex; 2139 unsigned int real_num_rx_queues; 2140 struct netdev_rx_queue *_rx; 2141 unsigned int gro_max_size; 2142 unsigned int gro_ipv4_max_size; 2143 rx_handler_func_t __rcu *rx_handler; 2144 void __rcu *rx_handler_data; 2145 possible_net_t nd_net; 2146 #ifdef CONFIG_NETPOLL 2147 struct netpoll_info __rcu *npinfo; 2148 #endif 2149 #ifdef CONFIG_NET_XGRESS 2150 struct bpf_mprog_entry __rcu *tcx_ingress; 2151 #endif 2152 __cacheline_group_end(net_device_read_rx); 2153 2154 char name[IFNAMSIZ]; 2155 struct netdev_name_node *name_node; 2156 struct dev_ifalias __rcu *ifalias; 2157 /* 2158 * I/O specific fields 2159 * FIXME: Merge these and struct ifmap into one 2160 */ 2161 unsigned long mem_end; 2162 unsigned long mem_start; 2163 unsigned long base_addr; 2164 2165 /* 2166 * Some hardware also needs these fields (state,dev_list, 2167 * napi_list,unreg_list,close_list) but they are not 2168 * part of the usual set specified in Space.c. 2169 */ 2170 2171 2172 struct list_head dev_list; 2173 struct list_head napi_list; 2174 struct list_head unreg_list; 2175 struct list_head close_list; 2176 struct list_head ptype_all; 2177 2178 struct { 2179 struct list_head upper; 2180 struct list_head lower; 2181 } adj_list; 2182 2183 /* Read-mostly cache-line for fast-path access */ 2184 xdp_features_t xdp_features; 2185 const struct xdp_metadata_ops *xdp_metadata_ops; 2186 const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops; 2187 unsigned short gflags; 2188 2189 unsigned short needed_tailroom; 2190 2191 netdev_features_t hw_features; 2192 netdev_features_t wanted_features; 2193 netdev_features_t vlan_features; 2194 netdev_features_t hw_enc_features; 2195 netdev_features_t mpls_features; 2196 2197 unsigned int min_mtu; 2198 unsigned int max_mtu; 2199 unsigned short type; 2200 unsigned char min_header_len; 2201 unsigned char name_assign_type; 2202 2203 int group; 2204 2205 struct net_device_stats stats; /* not used by modern drivers */ 2206 2207 struct net_device_core_stats __percpu *core_stats; 2208 2209 /* Stats to monitor link on/off, flapping */ 2210 atomic_t carrier_up_count; 2211 atomic_t carrier_down_count; 2212 2213 #ifdef CONFIG_WIRELESS_EXT 2214 const struct iw_handler_def *wireless_handlers; 2215 #endif 2216 const struct ethtool_ops *ethtool_ops; 2217 #ifdef CONFIG_NET_L3_MASTER_DEV 2218 const struct l3mdev_ops *l3mdev_ops; 2219 #endif 2220 #if IS_ENABLED(CONFIG_IPV6) 2221 const struct ndisc_ops *ndisc_ops; 2222 #endif 2223 2224 #ifdef CONFIG_XFRM_OFFLOAD 2225 const struct xfrmdev_ops *xfrmdev_ops; 2226 #endif 2227 2228 #if IS_ENABLED(CONFIG_TLS_DEVICE) 2229 const struct tlsdev_ops *tlsdev_ops; 2230 #endif 2231 2232 unsigned int operstate; 2233 unsigned char link_mode; 2234 2235 unsigned char if_port; 2236 unsigned char dma; 2237 2238 /* Interface address info. */ 2239 unsigned char perm_addr[MAX_ADDR_LEN]; 2240 unsigned char addr_assign_type; 2241 unsigned char addr_len; 2242 unsigned char upper_level; 2243 unsigned char lower_level; 2244 2245 unsigned short neigh_priv_len; 2246 unsigned short dev_id; 2247 unsigned short dev_port; 2248 int irq; 2249 u32 priv_len; 2250 2251 spinlock_t addr_list_lock; 2252 2253 struct netdev_hw_addr_list uc; 2254 struct netdev_hw_addr_list mc; 2255 struct netdev_hw_addr_list dev_addrs; 2256 2257 #ifdef CONFIG_SYSFS 2258 struct kset *queues_kset; 2259 #endif 2260 #ifdef CONFIG_LOCKDEP 2261 struct list_head unlink_list; 2262 #endif 2263 unsigned int promiscuity; 2264 unsigned int allmulti; 2265 bool uc_promisc; 2266 #ifdef CONFIG_LOCKDEP 2267 unsigned char nested_level; 2268 #endif 2269 2270 2271 /* Protocol-specific pointers */ 2272 struct in_device __rcu *ip_ptr; 2273 /** @fib_nh_head: nexthops associated with this netdev */ 2274 struct hlist_head fib_nh_head; 2275 2276 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2277 struct vlan_info __rcu *vlan_info; 2278 #endif 2279 #if IS_ENABLED(CONFIG_NET_DSA) 2280 struct dsa_port *dsa_ptr; 2281 #endif 2282 #if IS_ENABLED(CONFIG_TIPC) 2283 struct tipc_bearer __rcu *tipc_ptr; 2284 #endif 2285 #if IS_ENABLED(CONFIG_ATALK) 2286 void *atalk_ptr; 2287 #endif 2288 #if IS_ENABLED(CONFIG_AX25) 2289 struct ax25_dev __rcu *ax25_ptr; 2290 #endif 2291 #if IS_ENABLED(CONFIG_CFG80211) 2292 struct wireless_dev *ieee80211_ptr; 2293 #endif 2294 #if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN) 2295 struct wpan_dev *ieee802154_ptr; 2296 #endif 2297 #if IS_ENABLED(CONFIG_MPLS_ROUTING) 2298 struct mpls_dev __rcu *mpls_ptr; 2299 #endif 2300 #if IS_ENABLED(CONFIG_MCTP) 2301 struct mctp_dev __rcu *mctp_ptr; 2302 #endif 2303 2304 /* 2305 * Cache lines mostly used on receive path (including eth_type_trans()) 2306 */ 2307 /* Interface address info used in eth_type_trans() */ 2308 const unsigned char *dev_addr; 2309 2310 unsigned int num_rx_queues; 2311 #define GRO_LEGACY_MAX_SIZE 65536u 2312 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), 2313 * and shinfo->gso_segs is a 16bit field. 2314 */ 2315 #define GRO_MAX_SIZE (8 * 65535u) 2316 unsigned int xdp_zc_max_segs; 2317 struct netdev_queue __rcu *ingress_queue; 2318 #ifdef CONFIG_NETFILTER_INGRESS 2319 struct nf_hook_entries __rcu *nf_hooks_ingress; 2320 #endif 2321 2322 unsigned char broadcast[MAX_ADDR_LEN]; 2323 #ifdef CONFIG_RFS_ACCEL 2324 struct cpu_rmap *rx_cpu_rmap; 2325 #endif 2326 struct hlist_node index_hlist; 2327 2328 /* 2329 * Cache lines mostly used on transmit path 2330 */ 2331 unsigned int num_tx_queues; 2332 struct Qdisc __rcu *qdisc; 2333 unsigned int tx_queue_len; 2334 spinlock_t tx_global_lock; 2335 2336 struct xdp_dev_bulk_queue __percpu *xdp_bulkq; 2337 2338 #ifdef CONFIG_NET_SCHED 2339 DECLARE_HASHTABLE (qdisc_hash, 4); 2340 #endif 2341 /* These may be needed for future network-power-down code. */ 2342 struct timer_list watchdog_timer; 2343 int watchdog_timeo; 2344 2345 u32 proto_down_reason; 2346 2347 struct list_head todo_list; 2348 2349 #ifdef CONFIG_PCPU_DEV_REFCNT 2350 int __percpu *pcpu_refcnt; 2351 #else 2352 refcount_t dev_refcnt; 2353 #endif 2354 struct ref_tracker_dir refcnt_tracker; 2355 2356 struct list_head link_watch_list; 2357 2358 u8 reg_state; 2359 2360 bool dismantle; 2361 2362 enum { 2363 RTNL_LINK_INITIALIZED, 2364 RTNL_LINK_INITIALIZING, 2365 } rtnl_link_state:16; 2366 2367 bool needs_free_netdev; 2368 void (*priv_destructor)(struct net_device *dev); 2369 2370 /* mid-layer private */ 2371 void *ml_priv; 2372 enum netdev_ml_priv_type ml_priv_type; 2373 2374 enum netdev_stat_type pcpu_stat_type:8; 2375 2376 #if IS_ENABLED(CONFIG_GARP) 2377 struct garp_port __rcu *garp_port; 2378 #endif 2379 #if IS_ENABLED(CONFIG_MRP) 2380 struct mrp_port __rcu *mrp_port; 2381 #endif 2382 #if IS_ENABLED(CONFIG_NET_DROP_MONITOR) 2383 struct dm_hw_stat_delta __rcu *dm_private; 2384 #endif 2385 struct device dev; 2386 const struct attribute_group *sysfs_groups[4]; 2387 const struct attribute_group *sysfs_rx_queue_group; 2388 2389 const struct rtnl_link_ops *rtnl_link_ops; 2390 2391 const struct netdev_stat_ops *stat_ops; 2392 2393 const struct netdev_queue_mgmt_ops *queue_mgmt_ops; 2394 2395 /* for setting kernel sock attribute on TCP connection setup */ 2396 #define GSO_MAX_SEGS 65535u 2397 #define GSO_LEGACY_MAX_SIZE 65536u 2398 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), 2399 * and shinfo->gso_segs is a 16bit field. 2400 */ 2401 #define GSO_MAX_SIZE (8 * GSO_MAX_SEGS) 2402 2403 #define TSO_LEGACY_MAX_SIZE 65536 2404 #define TSO_MAX_SIZE UINT_MAX 2405 unsigned int tso_max_size; 2406 #define TSO_MAX_SEGS U16_MAX 2407 u16 tso_max_segs; 2408 2409 #ifdef CONFIG_DCB 2410 const struct dcbnl_rtnl_ops *dcbnl_ops; 2411 #endif 2412 u8 prio_tc_map[TC_BITMASK + 1]; 2413 2414 #if IS_ENABLED(CONFIG_FCOE) 2415 unsigned int fcoe_ddp_xid; 2416 #endif 2417 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 2418 struct netprio_map __rcu *priomap; 2419 #endif 2420 struct phy_link_topology *link_topo; 2421 struct phy_device *phydev; 2422 struct sfp_bus *sfp_bus; 2423 struct lock_class_key *qdisc_tx_busylock; 2424 bool proto_down; 2425 bool threaded; 2426 bool irq_affinity_auto; 2427 bool rx_cpu_rmap_auto; 2428 2429 /* priv_flags_slow, ungrouped to save space */ 2430 unsigned long see_all_hwtstamp_requests:1; 2431 unsigned long change_proto_down:1; 2432 unsigned long netns_immutable:1; 2433 unsigned long fcoe_mtu:1; 2434 2435 struct list_head net_notifier_list; 2436 2437 #if IS_ENABLED(CONFIG_MACSEC) 2438 /* MACsec management functions */ 2439 const struct macsec_ops *macsec_ops; 2440 #endif 2441 const struct udp_tunnel_nic_info *udp_tunnel_nic_info; 2442 struct udp_tunnel_nic *udp_tunnel_nic; 2443 2444 /** @cfg: net_device queue-related configuration */ 2445 struct netdev_config *cfg; 2446 /** 2447 * @cfg_pending: same as @cfg but when device is being actively 2448 * reconfigured includes any changes to the configuration 2449 * requested by the user, but which may or may not be rejected. 2450 */ 2451 struct netdev_config *cfg_pending; 2452 struct ethtool_netdev_state *ethtool; 2453 2454 /* protected by rtnl_lock */ 2455 struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; 2456 2457 u8 dev_addr_shadow[MAX_ADDR_LEN]; 2458 netdevice_tracker linkwatch_dev_tracker; 2459 netdevice_tracker watchdog_dev_tracker; 2460 netdevice_tracker dev_registered_tracker; 2461 struct rtnl_hw_stats64 *offload_xstats_l3; 2462 2463 struct devlink_port *devlink_port; 2464 2465 #if IS_ENABLED(CONFIG_DPLL) 2466 struct dpll_pin __rcu *dpll_pin; 2467 #endif 2468 #if IS_ENABLED(CONFIG_PAGE_POOL) 2469 /** @page_pools: page pools created for this netdevice */ 2470 struct hlist_head page_pools; 2471 #endif 2472 2473 /** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */ 2474 struct dim_irq_moder *irq_moder; 2475 2476 u64 max_pacing_offload_horizon; 2477 struct napi_config *napi_config; 2478 unsigned long gro_flush_timeout; 2479 u32 napi_defer_hard_irqs; 2480 2481 /** 2482 * @up: copy of @state's IFF_UP, but safe to read with just @lock. 2483 * May report false negatives while the device is being opened 2484 * or closed (@lock does not protect .ndo_open, or .ndo_close). 2485 */ 2486 bool up; 2487 2488 /** 2489 * @request_ops_lock: request the core to run all @netdev_ops and 2490 * @ethtool_ops under the @lock. 2491 */ 2492 bool request_ops_lock; 2493 2494 /** 2495 * @lock: netdev-scope lock, protects a small selection of fields. 2496 * Should always be taken using netdev_lock() / netdev_unlock() helpers. 2497 * Drivers are free to use it for other protection. 2498 * 2499 * For the drivers that implement shaper or queue API, the scope 2500 * of this lock is expanded to cover most ndo/queue/ethtool/sysfs 2501 * operations. Drivers may opt-in to this behavior by setting 2502 * @request_ops_lock. 2503 * 2504 * @lock protection mixes with rtnl_lock in multiple ways, fields are 2505 * either: 2506 * 2507 * - simply protected by the instance @lock; 2508 * 2509 * - double protected - writers hold both locks, readers hold either; 2510 * 2511 * - ops protected - protected by the lock held around the NDOs 2512 * and other callbacks, that is the instance lock on devices for 2513 * which netdev_need_ops_lock() returns true, otherwise by rtnl_lock; 2514 * 2515 * - double ops protected - always protected by rtnl_lock but for 2516 * devices for which netdev_need_ops_lock() returns true - also 2517 * the instance lock. 2518 * 2519 * Simply protects: 2520 * @gro_flush_timeout, @napi_defer_hard_irqs, @napi_list, 2521 * @net_shaper_hierarchy, @reg_state, @threaded 2522 * 2523 * Double protects: 2524 * @up 2525 * 2526 * Double ops protects: 2527 * @real_num_rx_queues, @real_num_tx_queues 2528 * 2529 * Also protects some fields in struct napi_struct. 2530 * 2531 * Ordering: take after rtnl_lock. 2532 */ 2533 struct mutex lock; 2534 2535 #if IS_ENABLED(CONFIG_NET_SHAPER) 2536 /** 2537 * @net_shaper_hierarchy: data tracking the current shaper status 2538 * see include/net/net_shapers.h 2539 */ 2540 struct net_shaper_hierarchy *net_shaper_hierarchy; 2541 #endif 2542 2543 struct hlist_head neighbours[NEIGH_NR_TABLES]; 2544 2545 struct hwtstamp_provider __rcu *hwprov; 2546 2547 u8 priv[] ____cacheline_aligned 2548 __counted_by(priv_len); 2549 } ____cacheline_aligned; 2550 #define to_net_dev(d) container_of(d, struct net_device, dev) 2551 2552 /* 2553 * Driver should use this to assign devlink port instance to a netdevice 2554 * before it registers the netdevice. Therefore devlink_port is static 2555 * during the netdev lifetime after it is registered. 2556 */ 2557 #define SET_NETDEV_DEVLINK_PORT(dev, port) \ 2558 ({ \ 2559 WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED); \ 2560 ((dev)->devlink_port = (port)); \ 2561 }) 2562 2563 static inline bool netif_elide_gro(const struct net_device *dev) 2564 { 2565 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) 2566 return true; 2567 return false; 2568 } 2569 2570 #define NETDEV_ALIGN 32 2571 2572 static inline 2573 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) 2574 { 2575 return dev->prio_tc_map[prio & TC_BITMASK]; 2576 } 2577 2578 static inline 2579 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) 2580 { 2581 if (tc >= dev->num_tc) 2582 return -EINVAL; 2583 2584 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; 2585 return 0; 2586 } 2587 2588 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); 2589 void netdev_reset_tc(struct net_device *dev); 2590 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); 2591 int netdev_set_num_tc(struct net_device *dev, u8 num_tc); 2592 2593 static inline 2594 int netdev_get_num_tc(struct net_device *dev) 2595 { 2596 return dev->num_tc; 2597 } 2598 2599 static inline void net_prefetch(void *p) 2600 { 2601 prefetch(p); 2602 #if L1_CACHE_BYTES < 128 2603 prefetch((u8 *)p + L1_CACHE_BYTES); 2604 #endif 2605 } 2606 2607 static inline void net_prefetchw(void *p) 2608 { 2609 prefetchw(p); 2610 #if L1_CACHE_BYTES < 128 2611 prefetchw((u8 *)p + L1_CACHE_BYTES); 2612 #endif 2613 } 2614 2615 void netdev_unbind_sb_channel(struct net_device *dev, 2616 struct net_device *sb_dev); 2617 int netdev_bind_sb_channel_queue(struct net_device *dev, 2618 struct net_device *sb_dev, 2619 u8 tc, u16 count, u16 offset); 2620 int netdev_set_sb_channel(struct net_device *dev, u16 channel); 2621 static inline int netdev_get_sb_channel(struct net_device *dev) 2622 { 2623 return max_t(int, -dev->num_tc, 0); 2624 } 2625 2626 static inline 2627 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, 2628 unsigned int index) 2629 { 2630 DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues); 2631 return &dev->_tx[index]; 2632 } 2633 2634 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, 2635 const struct sk_buff *skb) 2636 { 2637 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 2638 } 2639 2640 static inline void netdev_for_each_tx_queue(struct net_device *dev, 2641 void (*f)(struct net_device *, 2642 struct netdev_queue *, 2643 void *), 2644 void *arg) 2645 { 2646 unsigned int i; 2647 2648 for (i = 0; i < dev->num_tx_queues; i++) 2649 f(dev, &dev->_tx[i], arg); 2650 } 2651 2652 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 2653 struct net_device *sb_dev); 2654 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 2655 struct sk_buff *skb, 2656 struct net_device *sb_dev); 2657 2658 /* returns the headroom that the master device needs to take in account 2659 * when forwarding to this dev 2660 */ 2661 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev) 2662 { 2663 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; 2664 } 2665 2666 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr) 2667 { 2668 if (dev->netdev_ops->ndo_set_rx_headroom) 2669 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); 2670 } 2671 2672 /* set the device rx headroom to the dev's default */ 2673 static inline void netdev_reset_rx_headroom(struct net_device *dev) 2674 { 2675 netdev_set_rx_headroom(dev, -1); 2676 } 2677 2678 static inline void *netdev_get_ml_priv(struct net_device *dev, 2679 enum netdev_ml_priv_type type) 2680 { 2681 if (dev->ml_priv_type != type) 2682 return NULL; 2683 2684 return dev->ml_priv; 2685 } 2686 2687 static inline void netdev_set_ml_priv(struct net_device *dev, 2688 void *ml_priv, 2689 enum netdev_ml_priv_type type) 2690 { 2691 WARN(dev->ml_priv_type && dev->ml_priv_type != type, 2692 "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", 2693 dev->ml_priv_type, type); 2694 WARN(!dev->ml_priv_type && dev->ml_priv, 2695 "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); 2696 2697 dev->ml_priv = ml_priv; 2698 dev->ml_priv_type = type; 2699 } 2700 2701 /* 2702 * Net namespace inlines 2703 */ 2704 static inline 2705 struct net *dev_net(const struct net_device *dev) 2706 { 2707 return read_pnet(&dev->nd_net); 2708 } 2709 2710 static inline 2711 struct net *dev_net_rcu(const struct net_device *dev) 2712 { 2713 return read_pnet_rcu(&dev->nd_net); 2714 } 2715 2716 static inline 2717 void dev_net_set(struct net_device *dev, struct net *net) 2718 { 2719 write_pnet(&dev->nd_net, net); 2720 } 2721 2722 /** 2723 * netdev_priv - access network device private data 2724 * @dev: network device 2725 * 2726 * Get network device private data 2727 */ 2728 static inline void *netdev_priv(const struct net_device *dev) 2729 { 2730 return (void *)dev->priv; 2731 } 2732 2733 /* Set the sysfs physical device reference for the network logical device 2734 * if set prior to registration will cause a symlink during initialization. 2735 */ 2736 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) 2737 2738 /* Set the sysfs device type for the network logical device to allow 2739 * fine-grained identification of different network device types. For 2740 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc. 2741 */ 2742 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) 2743 2744 void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index, 2745 enum netdev_queue_type type, 2746 struct napi_struct *napi); 2747 2748 static inline void netdev_lock(struct net_device *dev) 2749 { 2750 mutex_lock(&dev->lock); 2751 } 2752 2753 static inline void netdev_unlock(struct net_device *dev) 2754 { 2755 mutex_unlock(&dev->lock); 2756 } 2757 /* Additional netdev_lock()-related helpers are in net/netdev_lock.h */ 2758 2759 void netif_napi_set_irq_locked(struct napi_struct *napi, int irq); 2760 2761 static inline void netif_napi_set_irq(struct napi_struct *napi, int irq) 2762 { 2763 netdev_lock(napi->dev); 2764 netif_napi_set_irq_locked(napi, irq); 2765 netdev_unlock(napi->dev); 2766 } 2767 2768 /* Default NAPI poll() weight 2769 * Device drivers are strongly advised to not use bigger value 2770 */ 2771 #define NAPI_POLL_WEIGHT 64 2772 2773 void netif_napi_add_weight_locked(struct net_device *dev, 2774 struct napi_struct *napi, 2775 int (*poll)(struct napi_struct *, int), 2776 int weight); 2777 2778 static inline void 2779 netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, 2780 int (*poll)(struct napi_struct *, int), int weight) 2781 { 2782 netdev_lock(dev); 2783 netif_napi_add_weight_locked(dev, napi, poll, weight); 2784 netdev_unlock(dev); 2785 } 2786 2787 /** 2788 * netif_napi_add() - initialize a NAPI context 2789 * @dev: network device 2790 * @napi: NAPI context 2791 * @poll: polling function 2792 * 2793 * netif_napi_add() must be used to initialize a NAPI context prior to calling 2794 * *any* of the other NAPI-related functions. 2795 */ 2796 static inline void 2797 netif_napi_add(struct net_device *dev, struct napi_struct *napi, 2798 int (*poll)(struct napi_struct *, int)) 2799 { 2800 netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); 2801 } 2802 2803 static inline void 2804 netif_napi_add_locked(struct net_device *dev, struct napi_struct *napi, 2805 int (*poll)(struct napi_struct *, int)) 2806 { 2807 netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT); 2808 } 2809 2810 static inline void 2811 netif_napi_add_tx_weight(struct net_device *dev, 2812 struct napi_struct *napi, 2813 int (*poll)(struct napi_struct *, int), 2814 int weight) 2815 { 2816 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); 2817 netif_napi_add_weight(dev, napi, poll, weight); 2818 } 2819 2820 static inline void 2821 netif_napi_add_config_locked(struct net_device *dev, struct napi_struct *napi, 2822 int (*poll)(struct napi_struct *, int), int index) 2823 { 2824 napi->index = index; 2825 napi->config = &dev->napi_config[index]; 2826 netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT); 2827 } 2828 2829 /** 2830 * netif_napi_add_config - initialize a NAPI context with persistent config 2831 * @dev: network device 2832 * @napi: NAPI context 2833 * @poll: polling function 2834 * @index: the NAPI index 2835 */ 2836 static inline void 2837 netif_napi_add_config(struct net_device *dev, struct napi_struct *napi, 2838 int (*poll)(struct napi_struct *, int), int index) 2839 { 2840 netdev_lock(dev); 2841 netif_napi_add_config_locked(dev, napi, poll, index); 2842 netdev_unlock(dev); 2843 } 2844 2845 /** 2846 * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only 2847 * @dev: network device 2848 * @napi: NAPI context 2849 * @poll: polling function 2850 * 2851 * This variant of netif_napi_add() should be used from drivers using NAPI 2852 * to exclusively poll a TX queue. 2853 * This will avoid we add it into napi_hash[], thus polluting this hash table. 2854 */ 2855 static inline void netif_napi_add_tx(struct net_device *dev, 2856 struct napi_struct *napi, 2857 int (*poll)(struct napi_struct *, int)) 2858 { 2859 netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT); 2860 } 2861 2862 void __netif_napi_del_locked(struct napi_struct *napi); 2863 2864 /** 2865 * __netif_napi_del - remove a NAPI context 2866 * @napi: NAPI context 2867 * 2868 * Warning: caller must observe RCU grace period before freeing memory 2869 * containing @napi. Drivers might want to call this helper to combine 2870 * all the needed RCU grace periods into a single one. 2871 */ 2872 static inline void __netif_napi_del(struct napi_struct *napi) 2873 { 2874 netdev_lock(napi->dev); 2875 __netif_napi_del_locked(napi); 2876 netdev_unlock(napi->dev); 2877 } 2878 2879 static inline void netif_napi_del_locked(struct napi_struct *napi) 2880 { 2881 __netif_napi_del_locked(napi); 2882 synchronize_net(); 2883 } 2884 2885 /** 2886 * netif_napi_del - remove a NAPI context 2887 * @napi: NAPI context 2888 * 2889 * netif_napi_del() removes a NAPI context from the network device NAPI list 2890 */ 2891 static inline void netif_napi_del(struct napi_struct *napi) 2892 { 2893 __netif_napi_del(napi); 2894 synchronize_net(); 2895 } 2896 2897 int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs); 2898 void netif_set_affinity_auto(struct net_device *dev); 2899 2900 struct packet_type { 2901 __be16 type; /* This is really htons(ether_type). */ 2902 bool ignore_outgoing; 2903 struct net_device *dev; /* NULL is wildcarded here */ 2904 netdevice_tracker dev_tracker; 2905 int (*func) (struct sk_buff *, 2906 struct net_device *, 2907 struct packet_type *, 2908 struct net_device *); 2909 void (*list_func) (struct list_head *, 2910 struct packet_type *, 2911 struct net_device *); 2912 bool (*id_match)(struct packet_type *ptype, 2913 struct sock *sk); 2914 struct net *af_packet_net; 2915 void *af_packet_priv; 2916 struct list_head list; 2917 }; 2918 2919 struct offload_callbacks { 2920 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 2921 netdev_features_t features); 2922 struct sk_buff *(*gro_receive)(struct list_head *head, 2923 struct sk_buff *skb); 2924 int (*gro_complete)(struct sk_buff *skb, int nhoff); 2925 }; 2926 2927 struct packet_offload { 2928 __be16 type; /* This is really htons(ether_type). */ 2929 u16 priority; 2930 struct offload_callbacks callbacks; 2931 struct list_head list; 2932 }; 2933 2934 /* often modified stats are per-CPU, other are shared (netdev->stats) */ 2935 struct pcpu_sw_netstats { 2936 u64_stats_t rx_packets; 2937 u64_stats_t rx_bytes; 2938 u64_stats_t tx_packets; 2939 u64_stats_t tx_bytes; 2940 struct u64_stats_sync syncp; 2941 } __aligned(4 * sizeof(u64)); 2942 2943 struct pcpu_dstats { 2944 u64_stats_t rx_packets; 2945 u64_stats_t rx_bytes; 2946 u64_stats_t tx_packets; 2947 u64_stats_t tx_bytes; 2948 u64_stats_t rx_drops; 2949 u64_stats_t tx_drops; 2950 struct u64_stats_sync syncp; 2951 } __aligned(8 * sizeof(u64)); 2952 2953 struct pcpu_lstats { 2954 u64_stats_t packets; 2955 u64_stats_t bytes; 2956 struct u64_stats_sync syncp; 2957 } __aligned(2 * sizeof(u64)); 2958 2959 void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes); 2960 2961 static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len) 2962 { 2963 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2964 2965 u64_stats_update_begin(&tstats->syncp); 2966 u64_stats_add(&tstats->rx_bytes, len); 2967 u64_stats_inc(&tstats->rx_packets); 2968 u64_stats_update_end(&tstats->syncp); 2969 } 2970 2971 static inline void dev_sw_netstats_tx_add(struct net_device *dev, 2972 unsigned int packets, 2973 unsigned int len) 2974 { 2975 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2976 2977 u64_stats_update_begin(&tstats->syncp); 2978 u64_stats_add(&tstats->tx_bytes, len); 2979 u64_stats_add(&tstats->tx_packets, packets); 2980 u64_stats_update_end(&tstats->syncp); 2981 } 2982 2983 static inline void dev_lstats_add(struct net_device *dev, unsigned int len) 2984 { 2985 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); 2986 2987 u64_stats_update_begin(&lstats->syncp); 2988 u64_stats_add(&lstats->bytes, len); 2989 u64_stats_inc(&lstats->packets); 2990 u64_stats_update_end(&lstats->syncp); 2991 } 2992 2993 static inline void dev_dstats_rx_add(struct net_device *dev, 2994 unsigned int len) 2995 { 2996 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); 2997 2998 u64_stats_update_begin(&dstats->syncp); 2999 u64_stats_inc(&dstats->rx_packets); 3000 u64_stats_add(&dstats->rx_bytes, len); 3001 u64_stats_update_end(&dstats->syncp); 3002 } 3003 3004 static inline void dev_dstats_rx_dropped(struct net_device *dev) 3005 { 3006 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); 3007 3008 u64_stats_update_begin(&dstats->syncp); 3009 u64_stats_inc(&dstats->rx_drops); 3010 u64_stats_update_end(&dstats->syncp); 3011 } 3012 3013 static inline void dev_dstats_tx_add(struct net_device *dev, 3014 unsigned int len) 3015 { 3016 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); 3017 3018 u64_stats_update_begin(&dstats->syncp); 3019 u64_stats_inc(&dstats->tx_packets); 3020 u64_stats_add(&dstats->tx_bytes, len); 3021 u64_stats_update_end(&dstats->syncp); 3022 } 3023 3024 static inline void dev_dstats_tx_dropped(struct net_device *dev) 3025 { 3026 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); 3027 3028 u64_stats_update_begin(&dstats->syncp); 3029 u64_stats_inc(&dstats->tx_drops); 3030 u64_stats_update_end(&dstats->syncp); 3031 } 3032 3033 #define __netdev_alloc_pcpu_stats(type, gfp) \ 3034 ({ \ 3035 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ 3036 if (pcpu_stats) { \ 3037 int __cpu; \ 3038 for_each_possible_cpu(__cpu) { \ 3039 typeof(type) *stat; \ 3040 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 3041 u64_stats_init(&stat->syncp); \ 3042 } \ 3043 } \ 3044 pcpu_stats; \ 3045 }) 3046 3047 #define netdev_alloc_pcpu_stats(type) \ 3048 __netdev_alloc_pcpu_stats(type, GFP_KERNEL) 3049 3050 #define devm_netdev_alloc_pcpu_stats(dev, type) \ 3051 ({ \ 3052 typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\ 3053 if (pcpu_stats) { \ 3054 int __cpu; \ 3055 for_each_possible_cpu(__cpu) { \ 3056 typeof(type) *stat; \ 3057 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 3058 u64_stats_init(&stat->syncp); \ 3059 } \ 3060 } \ 3061 pcpu_stats; \ 3062 }) 3063 3064 enum netdev_lag_tx_type { 3065 NETDEV_LAG_TX_TYPE_UNKNOWN, 3066 NETDEV_LAG_TX_TYPE_RANDOM, 3067 NETDEV_LAG_TX_TYPE_BROADCAST, 3068 NETDEV_LAG_TX_TYPE_ROUNDROBIN, 3069 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, 3070 NETDEV_LAG_TX_TYPE_HASH, 3071 }; 3072 3073 enum netdev_lag_hash { 3074 NETDEV_LAG_HASH_NONE, 3075 NETDEV_LAG_HASH_L2, 3076 NETDEV_LAG_HASH_L34, 3077 NETDEV_LAG_HASH_L23, 3078 NETDEV_LAG_HASH_E23, 3079 NETDEV_LAG_HASH_E34, 3080 NETDEV_LAG_HASH_VLAN_SRCMAC, 3081 NETDEV_LAG_HASH_UNKNOWN, 3082 }; 3083 3084 struct netdev_lag_upper_info { 3085 enum netdev_lag_tx_type tx_type; 3086 enum netdev_lag_hash hash_type; 3087 }; 3088 3089 struct netdev_lag_lower_state_info { 3090 u8 link_up : 1, 3091 tx_enabled : 1; 3092 }; 3093 3094 #include <linux/notifier.h> 3095 3096 /* netdevice notifier chain. Please remember to update netdev_cmd_to_name() 3097 * and the rtnetlink notification exclusion list in rtnetlink_event() when 3098 * adding new types. 3099 */ 3100 enum netdev_cmd { 3101 NETDEV_UP = 1, /* For now you can't veto a device up/down */ 3102 NETDEV_DOWN, 3103 NETDEV_REBOOT, /* Tell a protocol stack a network interface 3104 detected a hardware crash and restarted 3105 - we can use this eg to kick tcp sessions 3106 once done */ 3107 NETDEV_CHANGE, /* Notify device state change */ 3108 NETDEV_REGISTER, 3109 NETDEV_UNREGISTER, 3110 NETDEV_CHANGEMTU, /* notify after mtu change happened */ 3111 NETDEV_CHANGEADDR, /* notify after the address change */ 3112 NETDEV_PRE_CHANGEADDR, /* notify before the address change */ 3113 NETDEV_GOING_DOWN, 3114 NETDEV_CHANGENAME, 3115 NETDEV_FEAT_CHANGE, 3116 NETDEV_BONDING_FAILOVER, 3117 NETDEV_PRE_UP, 3118 NETDEV_PRE_TYPE_CHANGE, 3119 NETDEV_POST_TYPE_CHANGE, 3120 NETDEV_POST_INIT, 3121 NETDEV_PRE_UNINIT, 3122 NETDEV_RELEASE, 3123 NETDEV_NOTIFY_PEERS, 3124 NETDEV_JOIN, 3125 NETDEV_CHANGEUPPER, 3126 NETDEV_RESEND_IGMP, 3127 NETDEV_PRECHANGEMTU, /* notify before mtu change happened */ 3128 NETDEV_CHANGEINFODATA, 3129 NETDEV_BONDING_INFO, 3130 NETDEV_PRECHANGEUPPER, 3131 NETDEV_CHANGELOWERSTATE, 3132 NETDEV_UDP_TUNNEL_PUSH_INFO, 3133 NETDEV_UDP_TUNNEL_DROP_INFO, 3134 NETDEV_CHANGE_TX_QUEUE_LEN, 3135 NETDEV_CVLAN_FILTER_PUSH_INFO, 3136 NETDEV_CVLAN_FILTER_DROP_INFO, 3137 NETDEV_SVLAN_FILTER_PUSH_INFO, 3138 NETDEV_SVLAN_FILTER_DROP_INFO, 3139 NETDEV_OFFLOAD_XSTATS_ENABLE, 3140 NETDEV_OFFLOAD_XSTATS_DISABLE, 3141 NETDEV_OFFLOAD_XSTATS_REPORT_USED, 3142 NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, 3143 NETDEV_XDP_FEAT_CHANGE, 3144 }; 3145 const char *netdev_cmd_to_name(enum netdev_cmd cmd); 3146 3147 int register_netdevice_notifier(struct notifier_block *nb); 3148 int unregister_netdevice_notifier(struct notifier_block *nb); 3149 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb); 3150 int unregister_netdevice_notifier_net(struct net *net, 3151 struct notifier_block *nb); 3152 int register_netdevice_notifier_dev_net(struct net_device *dev, 3153 struct notifier_block *nb, 3154 struct netdev_net_notifier *nn); 3155 int unregister_netdevice_notifier_dev_net(struct net_device *dev, 3156 struct notifier_block *nb, 3157 struct netdev_net_notifier *nn); 3158 3159 struct netdev_notifier_info { 3160 struct net_device *dev; 3161 struct netlink_ext_ack *extack; 3162 }; 3163 3164 struct netdev_notifier_info_ext { 3165 struct netdev_notifier_info info; /* must be first */ 3166 union { 3167 u32 mtu; 3168 } ext; 3169 }; 3170 3171 struct netdev_notifier_change_info { 3172 struct netdev_notifier_info info; /* must be first */ 3173 unsigned int flags_changed; 3174 }; 3175 3176 struct netdev_notifier_changeupper_info { 3177 struct netdev_notifier_info info; /* must be first */ 3178 struct net_device *upper_dev; /* new upper dev */ 3179 bool master; /* is upper dev master */ 3180 bool linking; /* is the notification for link or unlink */ 3181 void *upper_info; /* upper dev info */ 3182 }; 3183 3184 struct netdev_notifier_changelowerstate_info { 3185 struct netdev_notifier_info info; /* must be first */ 3186 void *lower_state_info; /* is lower dev state */ 3187 }; 3188 3189 struct netdev_notifier_pre_changeaddr_info { 3190 struct netdev_notifier_info info; /* must be first */ 3191 const unsigned char *dev_addr; 3192 }; 3193 3194 enum netdev_offload_xstats_type { 3195 NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, 3196 }; 3197 3198 struct netdev_notifier_offload_xstats_info { 3199 struct netdev_notifier_info info; /* must be first */ 3200 enum netdev_offload_xstats_type type; 3201 3202 union { 3203 /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */ 3204 struct netdev_notifier_offload_xstats_rd *report_delta; 3205 /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */ 3206 struct netdev_notifier_offload_xstats_ru *report_used; 3207 }; 3208 }; 3209 3210 int netdev_offload_xstats_enable(struct net_device *dev, 3211 enum netdev_offload_xstats_type type, 3212 struct netlink_ext_ack *extack); 3213 int netdev_offload_xstats_disable(struct net_device *dev, 3214 enum netdev_offload_xstats_type type); 3215 bool netdev_offload_xstats_enabled(const struct net_device *dev, 3216 enum netdev_offload_xstats_type type); 3217 int netdev_offload_xstats_get(struct net_device *dev, 3218 enum netdev_offload_xstats_type type, 3219 struct rtnl_hw_stats64 *stats, bool *used, 3220 struct netlink_ext_ack *extack); 3221 void 3222 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd, 3223 const struct rtnl_hw_stats64 *stats); 3224 void 3225 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru); 3226 void netdev_offload_xstats_push_delta(struct net_device *dev, 3227 enum netdev_offload_xstats_type type, 3228 const struct rtnl_hw_stats64 *stats); 3229 3230 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, 3231 struct net_device *dev) 3232 { 3233 info->dev = dev; 3234 info->extack = NULL; 3235 } 3236 3237 static inline struct net_device * 3238 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) 3239 { 3240 return info->dev; 3241 } 3242 3243 static inline struct netlink_ext_ack * 3244 netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) 3245 { 3246 return info->extack; 3247 } 3248 3249 int call_netdevice_notifiers(unsigned long val, struct net_device *dev); 3250 int call_netdevice_notifiers_info(unsigned long val, 3251 struct netdev_notifier_info *info); 3252 3253 #define for_each_netdev(net, d) \ 3254 list_for_each_entry(d, &(net)->dev_base_head, dev_list) 3255 #define for_each_netdev_reverse(net, d) \ 3256 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) 3257 #define for_each_netdev_rcu(net, d) \ 3258 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) 3259 #define for_each_netdev_safe(net, d, n) \ 3260 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) 3261 #define for_each_netdev_continue(net, d) \ 3262 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) 3263 #define for_each_netdev_continue_reverse(net, d) \ 3264 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \ 3265 dev_list) 3266 #define for_each_netdev_continue_rcu(net, d) \ 3267 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 3268 #define for_each_netdev_in_bond_rcu(bond, slave) \ 3269 for_each_netdev_rcu(&init_net, slave) \ 3270 if (netdev_master_upper_dev_get_rcu(slave) == (bond)) 3271 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 3272 3273 #define for_each_netdev_dump(net, d, ifindex) \ 3274 for (; (d = xa_find(&(net)->dev_by_index, &ifindex, \ 3275 ULONG_MAX, XA_PRESENT)); ifindex++) 3276 3277 static inline struct net_device *next_net_device(struct net_device *dev) 3278 { 3279 struct list_head *lh; 3280 struct net *net; 3281 3282 net = dev_net(dev); 3283 lh = dev->dev_list.next; 3284 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3285 } 3286 3287 static inline struct net_device *next_net_device_rcu(struct net_device *dev) 3288 { 3289 struct list_head *lh; 3290 struct net *net; 3291 3292 net = dev_net(dev); 3293 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); 3294 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3295 } 3296 3297 static inline struct net_device *first_net_device(struct net *net) 3298 { 3299 return list_empty(&net->dev_base_head) ? NULL : 3300 net_device_entry(net->dev_base_head.next); 3301 } 3302 3303 static inline struct net_device *first_net_device_rcu(struct net *net) 3304 { 3305 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); 3306 3307 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3308 } 3309 3310 int netdev_boot_setup_check(struct net_device *dev); 3311 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, 3312 const char *hwaddr); 3313 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 3314 const char *hwaddr); 3315 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 3316 void dev_add_pack(struct packet_type *pt); 3317 void dev_remove_pack(struct packet_type *pt); 3318 void __dev_remove_pack(struct packet_type *pt); 3319 void dev_add_offload(struct packet_offload *po); 3320 void dev_remove_offload(struct packet_offload *po); 3321 3322 int dev_get_iflink(const struct net_device *dev); 3323 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); 3324 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, 3325 struct net_device_path_stack *stack); 3326 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, 3327 unsigned short mask); 3328 struct net_device *dev_get_by_name(struct net *net, const char *name); 3329 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); 3330 struct net_device *__dev_get_by_name(struct net *net, const char *name); 3331 bool netdev_name_in_use(struct net *net, const char *name); 3332 int dev_alloc_name(struct net_device *dev, const char *name); 3333 int netif_open(struct net_device *dev, struct netlink_ext_ack *extack); 3334 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); 3335 void netif_close(struct net_device *dev); 3336 void dev_close(struct net_device *dev); 3337 void dev_close_many(struct list_head *head, bool unlink); 3338 void netif_disable_lro(struct net_device *dev); 3339 void dev_disable_lro(struct net_device *dev); 3340 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); 3341 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 3342 struct net_device *sb_dev); 3343 3344 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev); 3345 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); 3346 3347 static inline int dev_queue_xmit(struct sk_buff *skb) 3348 { 3349 return __dev_queue_xmit(skb, NULL); 3350 } 3351 3352 static inline int dev_queue_xmit_accel(struct sk_buff *skb, 3353 struct net_device *sb_dev) 3354 { 3355 return __dev_queue_xmit(skb, sb_dev); 3356 } 3357 3358 static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 3359 { 3360 int ret; 3361 3362 ret = __dev_direct_xmit(skb, queue_id); 3363 if (!dev_xmit_complete(ret)) 3364 kfree_skb(skb); 3365 return ret; 3366 } 3367 3368 int register_netdevice(struct net_device *dev); 3369 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 3370 void unregister_netdevice_many(struct list_head *head); 3371 static inline void unregister_netdevice(struct net_device *dev) 3372 { 3373 unregister_netdevice_queue(dev, NULL); 3374 } 3375 3376 int netdev_refcnt_read(const struct net_device *dev); 3377 void free_netdev(struct net_device *dev); 3378 3379 struct net_device *netdev_get_xmit_slave(struct net_device *dev, 3380 struct sk_buff *skb, 3381 bool all_slaves); 3382 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, 3383 struct sock *sk); 3384 struct net_device *dev_get_by_index(struct net *net, int ifindex); 3385 struct net_device *__dev_get_by_index(struct net *net, int ifindex); 3386 struct net_device *netdev_get_by_index(struct net *net, int ifindex, 3387 netdevice_tracker *tracker, gfp_t gfp); 3388 struct net_device *netdev_get_by_name(struct net *net, const char *name, 3389 netdevice_tracker *tracker, gfp_t gfp); 3390 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 3391 void netdev_copy_name(struct net_device *dev, char *name); 3392 3393 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 3394 unsigned short type, 3395 const void *daddr, const void *saddr, 3396 unsigned int len) 3397 { 3398 if (!dev->header_ops || !dev->header_ops->create) 3399 return 0; 3400 3401 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); 3402 } 3403 3404 static inline int dev_parse_header(const struct sk_buff *skb, 3405 unsigned char *haddr) 3406 { 3407 const struct net_device *dev = skb->dev; 3408 3409 if (!dev->header_ops || !dev->header_ops->parse) 3410 return 0; 3411 return dev->header_ops->parse(skb, haddr); 3412 } 3413 3414 static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) 3415 { 3416 const struct net_device *dev = skb->dev; 3417 3418 if (!dev->header_ops || !dev->header_ops->parse_protocol) 3419 return 0; 3420 return dev->header_ops->parse_protocol(skb); 3421 } 3422 3423 /* ll_header must have at least hard_header_len allocated */ 3424 static inline bool dev_validate_header(const struct net_device *dev, 3425 char *ll_header, int len) 3426 { 3427 if (likely(len >= dev->hard_header_len)) 3428 return true; 3429 if (len < dev->min_header_len) 3430 return false; 3431 3432 if (capable(CAP_SYS_RAWIO)) { 3433 memset(ll_header + len, 0, dev->hard_header_len - len); 3434 return true; 3435 } 3436 3437 if (dev->header_ops && dev->header_ops->validate) 3438 return dev->header_ops->validate(ll_header, len); 3439 3440 return false; 3441 } 3442 3443 static inline bool dev_has_header(const struct net_device *dev) 3444 { 3445 return dev->header_ops && dev->header_ops->create; 3446 } 3447 3448 /* 3449 * Incoming packets are placed on per-CPU queues 3450 */ 3451 struct softnet_data { 3452 struct list_head poll_list; 3453 struct sk_buff_head process_queue; 3454 local_lock_t process_queue_bh_lock; 3455 3456 /* stats */ 3457 unsigned int processed; 3458 unsigned int time_squeeze; 3459 #ifdef CONFIG_RPS 3460 struct softnet_data *rps_ipi_list; 3461 #endif 3462 3463 unsigned int received_rps; 3464 bool in_net_rx_action; 3465 bool in_napi_threaded_poll; 3466 3467 #ifdef CONFIG_NET_FLOW_LIMIT 3468 struct sd_flow_limit __rcu *flow_limit; 3469 #endif 3470 struct Qdisc *output_queue; 3471 struct Qdisc **output_queue_tailp; 3472 struct sk_buff *completion_queue; 3473 #ifdef CONFIG_XFRM_OFFLOAD 3474 struct sk_buff_head xfrm_backlog; 3475 #endif 3476 /* written and read only by owning cpu: */ 3477 struct netdev_xmit xmit; 3478 #ifdef CONFIG_RPS 3479 /* input_queue_head should be written by cpu owning this struct, 3480 * and only read by other cpus. Worth using a cache line. 3481 */ 3482 unsigned int input_queue_head ____cacheline_aligned_in_smp; 3483 3484 /* Elements below can be accessed between CPUs for RPS/RFS */ 3485 call_single_data_t csd ____cacheline_aligned_in_smp; 3486 struct softnet_data *rps_ipi_next; 3487 unsigned int cpu; 3488 unsigned int input_queue_tail; 3489 #endif 3490 struct sk_buff_head input_pkt_queue; 3491 struct napi_struct backlog; 3492 3493 atomic_t dropped ____cacheline_aligned_in_smp; 3494 3495 /* Another possibly contended cache line */ 3496 spinlock_t defer_lock ____cacheline_aligned_in_smp; 3497 int defer_count; 3498 int defer_ipi_scheduled; 3499 struct sk_buff *defer_list; 3500 call_single_data_t defer_csd; 3501 }; 3502 3503 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 3504 DECLARE_PER_CPU(struct page_pool *, system_page_pool); 3505 3506 #ifndef CONFIG_PREEMPT_RT 3507 static inline int dev_recursion_level(void) 3508 { 3509 return this_cpu_read(softnet_data.xmit.recursion); 3510 } 3511 #else 3512 static inline int dev_recursion_level(void) 3513 { 3514 return current->net_xmit.recursion; 3515 } 3516 3517 #endif 3518 3519 void __netif_schedule(struct Qdisc *q); 3520 void netif_schedule_queue(struct netdev_queue *txq); 3521 3522 static inline void netif_tx_schedule_all(struct net_device *dev) 3523 { 3524 unsigned int i; 3525 3526 for (i = 0; i < dev->num_tx_queues; i++) 3527 netif_schedule_queue(netdev_get_tx_queue(dev, i)); 3528 } 3529 3530 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) 3531 { 3532 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3533 } 3534 3535 /** 3536 * netif_start_queue - allow transmit 3537 * @dev: network device 3538 * 3539 * Allow upper layers to call the device hard_start_xmit routine. 3540 */ 3541 static inline void netif_start_queue(struct net_device *dev) 3542 { 3543 netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); 3544 } 3545 3546 static inline void netif_tx_start_all_queues(struct net_device *dev) 3547 { 3548 unsigned int i; 3549 3550 for (i = 0; i < dev->num_tx_queues; i++) { 3551 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3552 netif_tx_start_queue(txq); 3553 } 3554 } 3555 3556 void netif_tx_wake_queue(struct netdev_queue *dev_queue); 3557 3558 /** 3559 * netif_wake_queue - restart transmit 3560 * @dev: network device 3561 * 3562 * Allow upper layers to call the device hard_start_xmit routine. 3563 * Used for flow control when transmit resources are available. 3564 */ 3565 static inline void netif_wake_queue(struct net_device *dev) 3566 { 3567 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); 3568 } 3569 3570 static inline void netif_tx_wake_all_queues(struct net_device *dev) 3571 { 3572 unsigned int i; 3573 3574 for (i = 0; i < dev->num_tx_queues; i++) { 3575 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3576 netif_tx_wake_queue(txq); 3577 } 3578 } 3579 3580 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 3581 { 3582 /* Paired with READ_ONCE() from dev_watchdog() */ 3583 WRITE_ONCE(dev_queue->trans_start, jiffies); 3584 3585 /* This barrier is paired with smp_mb() from dev_watchdog() */ 3586 smp_mb__before_atomic(); 3587 3588 /* Must be an atomic op see netif_txq_try_stop() */ 3589 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3590 } 3591 3592 /** 3593 * netif_stop_queue - stop transmitted packets 3594 * @dev: network device 3595 * 3596 * Stop upper layers calling the device hard_start_xmit routine. 3597 * Used for flow control when transmit resources are unavailable. 3598 */ 3599 static inline void netif_stop_queue(struct net_device *dev) 3600 { 3601 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); 3602 } 3603 3604 void netif_tx_stop_all_queues(struct net_device *dev); 3605 3606 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 3607 { 3608 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3609 } 3610 3611 /** 3612 * netif_queue_stopped - test if transmit queue is flowblocked 3613 * @dev: network device 3614 * 3615 * Test if transmit queue on device is currently unable to send. 3616 */ 3617 static inline bool netif_queue_stopped(const struct net_device *dev) 3618 { 3619 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 3620 } 3621 3622 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) 3623 { 3624 return dev_queue->state & QUEUE_STATE_ANY_XOFF; 3625 } 3626 3627 static inline bool 3628 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) 3629 { 3630 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; 3631 } 3632 3633 static inline bool 3634 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) 3635 { 3636 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; 3637 } 3638 3639 /** 3640 * netdev_queue_set_dql_min_limit - set dql minimum limit 3641 * @dev_queue: pointer to transmit queue 3642 * @min_limit: dql minimum limit 3643 * 3644 * Forces xmit_more() to return true until the minimum threshold 3645 * defined by @min_limit is reached (or until the tx queue is 3646 * empty). Warning: to be use with care, misuse will impact the 3647 * latency. 3648 */ 3649 static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue, 3650 unsigned int min_limit) 3651 { 3652 #ifdef CONFIG_BQL 3653 dev_queue->dql.min_limit = min_limit; 3654 #endif 3655 } 3656 3657 static inline int netdev_queue_dql_avail(const struct netdev_queue *txq) 3658 { 3659 #ifdef CONFIG_BQL 3660 /* Non-BQL migrated drivers will return 0, too. */ 3661 return dql_avail(&txq->dql); 3662 #else 3663 return 0; 3664 #endif 3665 } 3666 3667 /** 3668 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write 3669 * @dev_queue: pointer to transmit queue 3670 * 3671 * BQL enabled drivers might use this helper in their ndo_start_xmit(), 3672 * to give appropriate hint to the CPU. 3673 */ 3674 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) 3675 { 3676 #ifdef CONFIG_BQL 3677 prefetchw(&dev_queue->dql.num_queued); 3678 #endif 3679 } 3680 3681 /** 3682 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write 3683 * @dev_queue: pointer to transmit queue 3684 * 3685 * BQL enabled drivers might use this helper in their TX completion path, 3686 * to give appropriate hint to the CPU. 3687 */ 3688 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) 3689 { 3690 #ifdef CONFIG_BQL 3691 prefetchw(&dev_queue->dql.limit); 3692 #endif 3693 } 3694 3695 /** 3696 * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue 3697 * @dev_queue: network device queue 3698 * @bytes: number of bytes queued to the device queue 3699 * 3700 * Report the number of bytes queued for sending/completion to the network 3701 * device hardware queue. @bytes should be a good approximation and should 3702 * exactly match netdev_completed_queue() @bytes. 3703 * This is typically called once per packet, from ndo_start_xmit(). 3704 */ 3705 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3706 unsigned int bytes) 3707 { 3708 #ifdef CONFIG_BQL 3709 dql_queued(&dev_queue->dql, bytes); 3710 3711 if (likely(dql_avail(&dev_queue->dql) >= 0)) 3712 return; 3713 3714 /* Paired with READ_ONCE() from dev_watchdog() */ 3715 WRITE_ONCE(dev_queue->trans_start, jiffies); 3716 3717 /* This barrier is paired with smp_mb() from dev_watchdog() */ 3718 smp_mb__before_atomic(); 3719 3720 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3721 3722 /* 3723 * The XOFF flag must be set before checking the dql_avail below, 3724 * because in netdev_tx_completed_queue we update the dql_completed 3725 * before checking the XOFF flag. 3726 */ 3727 smp_mb__after_atomic(); 3728 3729 /* check again in case another CPU has just made room avail */ 3730 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) 3731 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3732 #endif 3733 } 3734 3735 /* Variant of netdev_tx_sent_queue() for drivers that are aware 3736 * that they should not test BQL status themselves. 3737 * We do want to change __QUEUE_STATE_STACK_XOFF only for the last 3738 * skb of a batch. 3739 * Returns true if the doorbell must be used to kick the NIC. 3740 */ 3741 static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3742 unsigned int bytes, 3743 bool xmit_more) 3744 { 3745 if (xmit_more) { 3746 #ifdef CONFIG_BQL 3747 dql_queued(&dev_queue->dql, bytes); 3748 #endif 3749 return netif_tx_queue_stopped(dev_queue); 3750 } 3751 netdev_tx_sent_queue(dev_queue, bytes); 3752 return true; 3753 } 3754 3755 /** 3756 * netdev_sent_queue - report the number of bytes queued to hardware 3757 * @dev: network device 3758 * @bytes: number of bytes queued to the hardware device queue 3759 * 3760 * Report the number of bytes queued for sending/completion to the network 3761 * device hardware queue#0. @bytes should be a good approximation and should 3762 * exactly match netdev_completed_queue() @bytes. 3763 * This is typically called once per packet, from ndo_start_xmit(). 3764 */ 3765 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) 3766 { 3767 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); 3768 } 3769 3770 static inline bool __netdev_sent_queue(struct net_device *dev, 3771 unsigned int bytes, 3772 bool xmit_more) 3773 { 3774 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, 3775 xmit_more); 3776 } 3777 3778 /** 3779 * netdev_tx_completed_queue - report number of packets/bytes at TX completion. 3780 * @dev_queue: network device queue 3781 * @pkts: number of packets (currently ignored) 3782 * @bytes: number of bytes dequeued from the device queue 3783 * 3784 * Must be called at most once per TX completion round (and not per 3785 * individual packet), so that BQL can adjust its limits appropriately. 3786 */ 3787 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, 3788 unsigned int pkts, unsigned int bytes) 3789 { 3790 #ifdef CONFIG_BQL 3791 if (unlikely(!bytes)) 3792 return; 3793 3794 dql_completed(&dev_queue->dql, bytes); 3795 3796 /* 3797 * Without the memory barrier there is a small possibility that 3798 * netdev_tx_sent_queue will miss the update and cause the queue to 3799 * be stopped forever 3800 */ 3801 smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */ 3802 3803 if (unlikely(dql_avail(&dev_queue->dql) < 0)) 3804 return; 3805 3806 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) 3807 netif_schedule_queue(dev_queue); 3808 #endif 3809 } 3810 3811 /** 3812 * netdev_completed_queue - report bytes and packets completed by device 3813 * @dev: network device 3814 * @pkts: actual number of packets sent over the medium 3815 * @bytes: actual number of bytes sent over the medium 3816 * 3817 * Report the number of bytes and packets transmitted by the network device 3818 * hardware queue over the physical medium, @bytes must exactly match the 3819 * @bytes amount passed to netdev_sent_queue() 3820 */ 3821 static inline void netdev_completed_queue(struct net_device *dev, 3822 unsigned int pkts, unsigned int bytes) 3823 { 3824 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); 3825 } 3826 3827 static inline void netdev_tx_reset_queue(struct netdev_queue *q) 3828 { 3829 #ifdef CONFIG_BQL 3830 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); 3831 dql_reset(&q->dql); 3832 #endif 3833 } 3834 3835 /** 3836 * netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue 3837 * @dev: network device 3838 * @qid: stack index of the queue to reset 3839 */ 3840 static inline void netdev_tx_reset_subqueue(const struct net_device *dev, 3841 u32 qid) 3842 { 3843 netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid)); 3844 } 3845 3846 /** 3847 * netdev_reset_queue - reset the packets and bytes count of a network device 3848 * @dev_queue: network device 3849 * 3850 * Reset the bytes and packet count of a network device and clear the 3851 * software flow control OFF bit for this network device 3852 */ 3853 static inline void netdev_reset_queue(struct net_device *dev_queue) 3854 { 3855 netdev_tx_reset_subqueue(dev_queue, 0); 3856 } 3857 3858 /** 3859 * netdev_cap_txqueue - check if selected tx queue exceeds device queues 3860 * @dev: network device 3861 * @queue_index: given tx queue index 3862 * 3863 * Returns 0 if given tx queue index >= number of device tx queues, 3864 * otherwise returns the originally passed tx queue index. 3865 */ 3866 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) 3867 { 3868 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 3869 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", 3870 dev->name, queue_index, 3871 dev->real_num_tx_queues); 3872 return 0; 3873 } 3874 3875 return queue_index; 3876 } 3877 3878 /** 3879 * netif_running - test if up 3880 * @dev: network device 3881 * 3882 * Test if the device has been brought up. 3883 */ 3884 static inline bool netif_running(const struct net_device *dev) 3885 { 3886 return test_bit(__LINK_STATE_START, &dev->state); 3887 } 3888 3889 /* 3890 * Routines to manage the subqueues on a device. We only need start, 3891 * stop, and a check if it's stopped. All other device management is 3892 * done at the overall netdevice level. 3893 * Also test the device if we're multiqueue. 3894 */ 3895 3896 /** 3897 * netif_start_subqueue - allow sending packets on subqueue 3898 * @dev: network device 3899 * @queue_index: sub queue index 3900 * 3901 * Start individual transmit queue of a device with multiple transmit queues. 3902 */ 3903 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) 3904 { 3905 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3906 3907 netif_tx_start_queue(txq); 3908 } 3909 3910 /** 3911 * netif_stop_subqueue - stop sending packets on subqueue 3912 * @dev: network device 3913 * @queue_index: sub queue index 3914 * 3915 * Stop individual transmit queue of a device with multiple transmit queues. 3916 */ 3917 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) 3918 { 3919 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3920 netif_tx_stop_queue(txq); 3921 } 3922 3923 /** 3924 * __netif_subqueue_stopped - test status of subqueue 3925 * @dev: network device 3926 * @queue_index: sub queue index 3927 * 3928 * Check individual transmit queue of a device with multiple transmit queues. 3929 */ 3930 static inline bool __netif_subqueue_stopped(const struct net_device *dev, 3931 u16 queue_index) 3932 { 3933 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3934 3935 return netif_tx_queue_stopped(txq); 3936 } 3937 3938 /** 3939 * netif_subqueue_stopped - test status of subqueue 3940 * @dev: network device 3941 * @skb: sub queue buffer pointer 3942 * 3943 * Check individual transmit queue of a device with multiple transmit queues. 3944 */ 3945 static inline bool netif_subqueue_stopped(const struct net_device *dev, 3946 struct sk_buff *skb) 3947 { 3948 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); 3949 } 3950 3951 /** 3952 * netif_wake_subqueue - allow sending packets on subqueue 3953 * @dev: network device 3954 * @queue_index: sub queue index 3955 * 3956 * Resume individual transmit queue of a device with multiple transmit queues. 3957 */ 3958 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) 3959 { 3960 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3961 3962 netif_tx_wake_queue(txq); 3963 } 3964 3965 #ifdef CONFIG_XPS 3966 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 3967 u16 index); 3968 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 3969 u16 index, enum xps_map_type type); 3970 3971 /** 3972 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask 3973 * @j: CPU/Rx queue index 3974 * @mask: bitmask of all cpus/rx queues 3975 * @nr_bits: number of bits in the bitmask 3976 * 3977 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. 3978 */ 3979 static inline bool netif_attr_test_mask(unsigned long j, 3980 const unsigned long *mask, 3981 unsigned int nr_bits) 3982 { 3983 cpu_max_bits_warn(j, nr_bits); 3984 return test_bit(j, mask); 3985 } 3986 3987 /** 3988 * netif_attr_test_online - Test for online CPU/Rx queue 3989 * @j: CPU/Rx queue index 3990 * @online_mask: bitmask for CPUs/Rx queues that are online 3991 * @nr_bits: number of bits in the bitmask 3992 * 3993 * Returns: true if a CPU/Rx queue is online. 3994 */ 3995 static inline bool netif_attr_test_online(unsigned long j, 3996 const unsigned long *online_mask, 3997 unsigned int nr_bits) 3998 { 3999 cpu_max_bits_warn(j, nr_bits); 4000 4001 if (online_mask) 4002 return test_bit(j, online_mask); 4003 4004 return (j < nr_bits); 4005 } 4006 4007 /** 4008 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask 4009 * @n: CPU/Rx queue index 4010 * @srcp: the cpumask/Rx queue mask pointer 4011 * @nr_bits: number of bits in the bitmask 4012 * 4013 * Returns: next (after n) CPU/Rx queue index in the mask; 4014 * >= nr_bits if no further CPUs/Rx queues set. 4015 */ 4016 static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, 4017 unsigned int nr_bits) 4018 { 4019 /* -1 is a legal arg here. */ 4020 if (n != -1) 4021 cpu_max_bits_warn(n, nr_bits); 4022 4023 if (srcp) 4024 return find_next_bit(srcp, nr_bits, n + 1); 4025 4026 return n + 1; 4027 } 4028 4029 /** 4030 * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p 4031 * @n: CPU/Rx queue index 4032 * @src1p: the first CPUs/Rx queues mask pointer 4033 * @src2p: the second CPUs/Rx queues mask pointer 4034 * @nr_bits: number of bits in the bitmask 4035 * 4036 * Returns: next (after n) CPU/Rx queue index set in both masks; 4037 * >= nr_bits if no further CPUs/Rx queues set in both. 4038 */ 4039 static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, 4040 const unsigned long *src2p, 4041 unsigned int nr_bits) 4042 { 4043 /* -1 is a legal arg here. */ 4044 if (n != -1) 4045 cpu_max_bits_warn(n, nr_bits); 4046 4047 if (src1p && src2p) 4048 return find_next_and_bit(src1p, src2p, nr_bits, n + 1); 4049 else if (src1p) 4050 return find_next_bit(src1p, nr_bits, n + 1); 4051 else if (src2p) 4052 return find_next_bit(src2p, nr_bits, n + 1); 4053 4054 return n + 1; 4055 } 4056 #else 4057 static inline int netif_set_xps_queue(struct net_device *dev, 4058 const struct cpumask *mask, 4059 u16 index) 4060 { 4061 return 0; 4062 } 4063 4064 static inline int __netif_set_xps_queue(struct net_device *dev, 4065 const unsigned long *mask, 4066 u16 index, enum xps_map_type type) 4067 { 4068 return 0; 4069 } 4070 #endif 4071 4072 /** 4073 * netif_is_multiqueue - test if device has multiple transmit queues 4074 * @dev: network device 4075 * 4076 * Check if device has multiple transmit queues 4077 */ 4078 static inline bool netif_is_multiqueue(const struct net_device *dev) 4079 { 4080 return dev->num_tx_queues > 1; 4081 } 4082 4083 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); 4084 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); 4085 int netif_set_real_num_queues(struct net_device *dev, 4086 unsigned int txq, unsigned int rxq); 4087 4088 int netif_get_num_default_rss_queues(void); 4089 4090 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason); 4091 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason); 4092 4093 /* 4094 * It is not allowed to call kfree_skb() or consume_skb() from hardware 4095 * interrupt context or with hardware interrupts being disabled. 4096 * (in_hardirq() || irqs_disabled()) 4097 * 4098 * We provide four helpers that can be used in following contexts : 4099 * 4100 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, 4101 * replacing kfree_skb(skb) 4102 * 4103 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. 4104 * Typically used in place of consume_skb(skb) in TX completion path 4105 * 4106 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, 4107 * replacing kfree_skb(skb) 4108 * 4109 * dev_consume_skb_any(skb) when caller doesn't know its current irq context, 4110 * and consumed a packet. Used in place of consume_skb(skb) 4111 */ 4112 static inline void dev_kfree_skb_irq(struct sk_buff *skb) 4113 { 4114 dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); 4115 } 4116 4117 static inline void dev_consume_skb_irq(struct sk_buff *skb) 4118 { 4119 dev_kfree_skb_irq_reason(skb, SKB_CONSUMED); 4120 } 4121 4122 static inline void dev_kfree_skb_any(struct sk_buff *skb) 4123 { 4124 dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); 4125 } 4126 4127 static inline void dev_consume_skb_any(struct sk_buff *skb) 4128 { 4129 dev_kfree_skb_any_reason(skb, SKB_CONSUMED); 4130 } 4131 4132 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 4133 const struct bpf_prog *xdp_prog); 4134 void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog); 4135 int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb); 4136 int netif_rx(struct sk_buff *skb); 4137 int __netif_rx(struct sk_buff *skb); 4138 4139 int netif_receive_skb(struct sk_buff *skb); 4140 int netif_receive_skb_core(struct sk_buff *skb); 4141 void netif_receive_skb_list_internal(struct list_head *head); 4142 void netif_receive_skb_list(struct list_head *head); 4143 gro_result_t gro_receive_skb(struct gro_node *gro, struct sk_buff *skb); 4144 4145 static inline gro_result_t napi_gro_receive(struct napi_struct *napi, 4146 struct sk_buff *skb) 4147 { 4148 return gro_receive_skb(&napi->gro, skb); 4149 } 4150 4151 struct sk_buff *napi_get_frags(struct napi_struct *napi); 4152 gro_result_t napi_gro_frags(struct napi_struct *napi); 4153 4154 static inline void napi_free_frags(struct napi_struct *napi) 4155 { 4156 kfree_skb(napi->skb); 4157 napi->skb = NULL; 4158 } 4159 4160 bool netdev_is_rx_handler_busy(struct net_device *dev); 4161 int netdev_rx_handler_register(struct net_device *dev, 4162 rx_handler_func_t *rx_handler, 4163 void *rx_handler_data); 4164 void netdev_rx_handler_unregister(struct net_device *dev); 4165 4166 bool dev_valid_name(const char *name); 4167 static inline bool is_socket_ioctl_cmd(unsigned int cmd) 4168 { 4169 return _IOC_TYPE(cmd) == SOCK_IOC_TYPE; 4170 } 4171 int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg); 4172 int put_user_ifreq(struct ifreq *ifr, void __user *arg); 4173 int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, 4174 void __user *data, bool *need_copyout); 4175 int dev_ifconf(struct net *net, struct ifconf __user *ifc); 4176 int dev_eth_ioctl(struct net_device *dev, 4177 struct ifreq *ifr, unsigned int cmd); 4178 int generic_hwtstamp_get_lower(struct net_device *dev, 4179 struct kernel_hwtstamp_config *kernel_cfg); 4180 int generic_hwtstamp_set_lower(struct net_device *dev, 4181 struct kernel_hwtstamp_config *kernel_cfg, 4182 struct netlink_ext_ack *extack); 4183 int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); 4184 unsigned int dev_get_flags(const struct net_device *); 4185 int __dev_change_flags(struct net_device *dev, unsigned int flags, 4186 struct netlink_ext_ack *extack); 4187 int netif_change_flags(struct net_device *dev, unsigned int flags, 4188 struct netlink_ext_ack *extack); 4189 int dev_change_flags(struct net_device *dev, unsigned int flags, 4190 struct netlink_ext_ack *extack); 4191 int netif_set_alias(struct net_device *dev, const char *alias, size_t len); 4192 int dev_set_alias(struct net_device *, const char *, size_t); 4193 int dev_get_alias(const struct net_device *, char *, size_t); 4194 int netif_change_net_namespace(struct net_device *dev, struct net *net, 4195 const char *pat, int new_ifindex, 4196 struct netlink_ext_ack *extack); 4197 int dev_change_net_namespace(struct net_device *dev, struct net *net, 4198 const char *pat); 4199 int __dev_set_mtu(struct net_device *, int); 4200 int netif_set_mtu(struct net_device *dev, int new_mtu); 4201 int dev_set_mtu(struct net_device *, int); 4202 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 4203 struct netlink_ext_ack *extack); 4204 int netif_set_mac_address(struct net_device *dev, struct sockaddr *sa, 4205 struct netlink_ext_ack *extack); 4206 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 4207 struct netlink_ext_ack *extack); 4208 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, 4209 struct netlink_ext_ack *extack); 4210 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); 4211 int dev_get_port_parent_id(struct net_device *dev, 4212 struct netdev_phys_item_id *ppid, bool recurse); 4213 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); 4214 4215 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); 4216 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 4217 struct netdev_queue *txq, int *ret); 4218 4219 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); 4220 u8 dev_xdp_prog_count(struct net_device *dev); 4221 int netif_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf); 4222 int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf); 4223 u8 dev_xdp_sb_prog_count(struct net_device *dev); 4224 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); 4225 4226 u32 dev_get_min_mp_channel_count(const struct net_device *dev); 4227 4228 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 4229 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 4230 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb); 4231 bool is_skb_forwardable(const struct net_device *dev, 4232 const struct sk_buff *skb); 4233 4234 static __always_inline bool __is_skb_forwardable(const struct net_device *dev, 4235 const struct sk_buff *skb, 4236 const bool check_mtu) 4237 { 4238 const u32 vlan_hdr_len = 4; /* VLAN_HLEN */ 4239 unsigned int len; 4240 4241 if (!(dev->flags & IFF_UP)) 4242 return false; 4243 4244 if (!check_mtu) 4245 return true; 4246 4247 len = dev->mtu + dev->hard_header_len + vlan_hdr_len; 4248 if (skb->len <= len) 4249 return true; 4250 4251 /* if TSO is enabled, we don't care about the length as the packet 4252 * could be forwarded without being segmented before 4253 */ 4254 if (skb_is_gso(skb)) 4255 return true; 4256 4257 return false; 4258 } 4259 4260 void netdev_core_stats_inc(struct net_device *dev, u32 offset); 4261 4262 #define DEV_CORE_STATS_INC(FIELD) \ 4263 static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ 4264 { \ 4265 netdev_core_stats_inc(dev, \ 4266 offsetof(struct net_device_core_stats, FIELD)); \ 4267 } 4268 DEV_CORE_STATS_INC(rx_dropped) 4269 DEV_CORE_STATS_INC(tx_dropped) 4270 DEV_CORE_STATS_INC(rx_nohandler) 4271 DEV_CORE_STATS_INC(rx_otherhost_dropped) 4272 #undef DEV_CORE_STATS_INC 4273 4274 static __always_inline int ____dev_forward_skb(struct net_device *dev, 4275 struct sk_buff *skb, 4276 const bool check_mtu) 4277 { 4278 if (skb_orphan_frags(skb, GFP_ATOMIC) || 4279 unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) { 4280 dev_core_stats_rx_dropped_inc(dev); 4281 kfree_skb(skb); 4282 return NET_RX_DROP; 4283 } 4284 4285 skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev))); 4286 skb->priority = 0; 4287 return 0; 4288 } 4289 4290 bool dev_nit_active_rcu(const struct net_device *dev); 4291 static inline bool dev_nit_active(const struct net_device *dev) 4292 { 4293 bool ret; 4294 4295 rcu_read_lock(); 4296 ret = dev_nit_active_rcu(dev); 4297 rcu_read_unlock(); 4298 return ret; 4299 } 4300 4301 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 4302 4303 static inline void __dev_put(struct net_device *dev) 4304 { 4305 if (dev) { 4306 #ifdef CONFIG_PCPU_DEV_REFCNT 4307 this_cpu_dec(*dev->pcpu_refcnt); 4308 #else 4309 refcount_dec(&dev->dev_refcnt); 4310 #endif 4311 } 4312 } 4313 4314 static inline void __dev_hold(struct net_device *dev) 4315 { 4316 if (dev) { 4317 #ifdef CONFIG_PCPU_DEV_REFCNT 4318 this_cpu_inc(*dev->pcpu_refcnt); 4319 #else 4320 refcount_inc(&dev->dev_refcnt); 4321 #endif 4322 } 4323 } 4324 4325 static inline void __netdev_tracker_alloc(struct net_device *dev, 4326 netdevice_tracker *tracker, 4327 gfp_t gfp) 4328 { 4329 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4330 ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp); 4331 #endif 4332 } 4333 4334 /* netdev_tracker_alloc() can upgrade a prior untracked reference 4335 * taken by dev_get_by_name()/dev_get_by_index() to a tracked one. 4336 */ 4337 static inline void netdev_tracker_alloc(struct net_device *dev, 4338 netdevice_tracker *tracker, gfp_t gfp) 4339 { 4340 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4341 refcount_dec(&dev->refcnt_tracker.no_tracker); 4342 __netdev_tracker_alloc(dev, tracker, gfp); 4343 #endif 4344 } 4345 4346 static inline void netdev_tracker_free(struct net_device *dev, 4347 netdevice_tracker *tracker) 4348 { 4349 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4350 ref_tracker_free(&dev->refcnt_tracker, tracker); 4351 #endif 4352 } 4353 4354 static inline void netdev_hold(struct net_device *dev, 4355 netdevice_tracker *tracker, gfp_t gfp) 4356 { 4357 if (dev) { 4358 __dev_hold(dev); 4359 __netdev_tracker_alloc(dev, tracker, gfp); 4360 } 4361 } 4362 4363 static inline void netdev_put(struct net_device *dev, 4364 netdevice_tracker *tracker) 4365 { 4366 if (dev) { 4367 netdev_tracker_free(dev, tracker); 4368 __dev_put(dev); 4369 } 4370 } 4371 4372 /** 4373 * dev_hold - get reference to device 4374 * @dev: network device 4375 * 4376 * Hold reference to device to keep it from being freed. 4377 * Try using netdev_hold() instead. 4378 */ 4379 static inline void dev_hold(struct net_device *dev) 4380 { 4381 netdev_hold(dev, NULL, GFP_ATOMIC); 4382 } 4383 4384 /** 4385 * dev_put - release reference to device 4386 * @dev: network device 4387 * 4388 * Release reference to device to allow it to be freed. 4389 * Try using netdev_put() instead. 4390 */ 4391 static inline void dev_put(struct net_device *dev) 4392 { 4393 netdev_put(dev, NULL); 4394 } 4395 4396 DEFINE_FREE(dev_put, struct net_device *, if (_T) dev_put(_T)) 4397 4398 static inline void netdev_ref_replace(struct net_device *odev, 4399 struct net_device *ndev, 4400 netdevice_tracker *tracker, 4401 gfp_t gfp) 4402 { 4403 if (odev) 4404 netdev_tracker_free(odev, tracker); 4405 4406 __dev_hold(ndev); 4407 __dev_put(odev); 4408 4409 if (ndev) 4410 __netdev_tracker_alloc(ndev, tracker, gfp); 4411 } 4412 4413 /* Carrier loss detection, dial on demand. The functions netif_carrier_on 4414 * and _off may be called from IRQ context, but it is caller 4415 * who is responsible for serialization of these calls. 4416 * 4417 * The name carrier is inappropriate, these functions should really be 4418 * called netif_lowerlayer_*() because they represent the state of any 4419 * kind of lower layer not just hardware media. 4420 */ 4421 void linkwatch_fire_event(struct net_device *dev); 4422 4423 /** 4424 * linkwatch_sync_dev - sync linkwatch for the given device 4425 * @dev: network device to sync linkwatch for 4426 * 4427 * Sync linkwatch for the given device, removing it from the 4428 * pending work list (if queued). 4429 */ 4430 void linkwatch_sync_dev(struct net_device *dev); 4431 4432 /** 4433 * netif_carrier_ok - test if carrier present 4434 * @dev: network device 4435 * 4436 * Check if carrier is present on device 4437 */ 4438 static inline bool netif_carrier_ok(const struct net_device *dev) 4439 { 4440 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 4441 } 4442 4443 unsigned long dev_trans_start(struct net_device *dev); 4444 4445 void netdev_watchdog_up(struct net_device *dev); 4446 4447 void netif_carrier_on(struct net_device *dev); 4448 void netif_carrier_off(struct net_device *dev); 4449 void netif_carrier_event(struct net_device *dev); 4450 4451 /** 4452 * netif_dormant_on - mark device as dormant. 4453 * @dev: network device 4454 * 4455 * Mark device as dormant (as per RFC2863). 4456 * 4457 * The dormant state indicates that the relevant interface is not 4458 * actually in a condition to pass packets (i.e., it is not 'up') but is 4459 * in a "pending" state, waiting for some external event. For "on- 4460 * demand" interfaces, this new state identifies the situation where the 4461 * interface is waiting for events to place it in the up state. 4462 */ 4463 static inline void netif_dormant_on(struct net_device *dev) 4464 { 4465 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) 4466 linkwatch_fire_event(dev); 4467 } 4468 4469 /** 4470 * netif_dormant_off - set device as not dormant. 4471 * @dev: network device 4472 * 4473 * Device is not in dormant state. 4474 */ 4475 static inline void netif_dormant_off(struct net_device *dev) 4476 { 4477 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) 4478 linkwatch_fire_event(dev); 4479 } 4480 4481 /** 4482 * netif_dormant - test if device is dormant 4483 * @dev: network device 4484 * 4485 * Check if device is dormant. 4486 */ 4487 static inline bool netif_dormant(const struct net_device *dev) 4488 { 4489 return test_bit(__LINK_STATE_DORMANT, &dev->state); 4490 } 4491 4492 4493 /** 4494 * netif_testing_on - mark device as under test. 4495 * @dev: network device 4496 * 4497 * Mark device as under test (as per RFC2863). 4498 * 4499 * The testing state indicates that some test(s) must be performed on 4500 * the interface. After completion, of the test, the interface state 4501 * will change to up, dormant, or down, as appropriate. 4502 */ 4503 static inline void netif_testing_on(struct net_device *dev) 4504 { 4505 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) 4506 linkwatch_fire_event(dev); 4507 } 4508 4509 /** 4510 * netif_testing_off - set device as not under test. 4511 * @dev: network device 4512 * 4513 * Device is not in testing state. 4514 */ 4515 static inline void netif_testing_off(struct net_device *dev) 4516 { 4517 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) 4518 linkwatch_fire_event(dev); 4519 } 4520 4521 /** 4522 * netif_testing - test if device is under test 4523 * @dev: network device 4524 * 4525 * Check if device is under test 4526 */ 4527 static inline bool netif_testing(const struct net_device *dev) 4528 { 4529 return test_bit(__LINK_STATE_TESTING, &dev->state); 4530 } 4531 4532 4533 /** 4534 * netif_oper_up - test if device is operational 4535 * @dev: network device 4536 * 4537 * Check if carrier is operational 4538 */ 4539 static inline bool netif_oper_up(const struct net_device *dev) 4540 { 4541 unsigned int operstate = READ_ONCE(dev->operstate); 4542 4543 return operstate == IF_OPER_UP || 4544 operstate == IF_OPER_UNKNOWN /* backward compat */; 4545 } 4546 4547 /** 4548 * netif_device_present - is device available or removed 4549 * @dev: network device 4550 * 4551 * Check if device has not been removed from system. 4552 */ 4553 static inline bool netif_device_present(const struct net_device *dev) 4554 { 4555 return test_bit(__LINK_STATE_PRESENT, &dev->state); 4556 } 4557 4558 void netif_device_detach(struct net_device *dev); 4559 4560 void netif_device_attach(struct net_device *dev); 4561 4562 /* 4563 * Network interface message level settings 4564 */ 4565 4566 enum { 4567 NETIF_MSG_DRV_BIT, 4568 NETIF_MSG_PROBE_BIT, 4569 NETIF_MSG_LINK_BIT, 4570 NETIF_MSG_TIMER_BIT, 4571 NETIF_MSG_IFDOWN_BIT, 4572 NETIF_MSG_IFUP_BIT, 4573 NETIF_MSG_RX_ERR_BIT, 4574 NETIF_MSG_TX_ERR_BIT, 4575 NETIF_MSG_TX_QUEUED_BIT, 4576 NETIF_MSG_INTR_BIT, 4577 NETIF_MSG_TX_DONE_BIT, 4578 NETIF_MSG_RX_STATUS_BIT, 4579 NETIF_MSG_PKTDATA_BIT, 4580 NETIF_MSG_HW_BIT, 4581 NETIF_MSG_WOL_BIT, 4582 4583 /* When you add a new bit above, update netif_msg_class_names array 4584 * in net/ethtool/common.c 4585 */ 4586 NETIF_MSG_CLASS_COUNT, 4587 }; 4588 /* Both ethtool_ops interface and internal driver implementation use u32 */ 4589 static_assert(NETIF_MSG_CLASS_COUNT <= 32); 4590 4591 #define __NETIF_MSG_BIT(bit) ((u32)1 << (bit)) 4592 #define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT) 4593 4594 #define NETIF_MSG_DRV __NETIF_MSG(DRV) 4595 #define NETIF_MSG_PROBE __NETIF_MSG(PROBE) 4596 #define NETIF_MSG_LINK __NETIF_MSG(LINK) 4597 #define NETIF_MSG_TIMER __NETIF_MSG(TIMER) 4598 #define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN) 4599 #define NETIF_MSG_IFUP __NETIF_MSG(IFUP) 4600 #define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR) 4601 #define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR) 4602 #define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED) 4603 #define NETIF_MSG_INTR __NETIF_MSG(INTR) 4604 #define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE) 4605 #define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS) 4606 #define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA) 4607 #define NETIF_MSG_HW __NETIF_MSG(HW) 4608 #define NETIF_MSG_WOL __NETIF_MSG(WOL) 4609 4610 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) 4611 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) 4612 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) 4613 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) 4614 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) 4615 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) 4616 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) 4617 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) 4618 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) 4619 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) 4620 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) 4621 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) 4622 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) 4623 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) 4624 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) 4625 4626 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) 4627 { 4628 /* use default */ 4629 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) 4630 return default_msg_enable_bits; 4631 if (debug_value == 0) /* no output */ 4632 return 0; 4633 /* set low N bits */ 4634 return (1U << debug_value) - 1; 4635 } 4636 4637 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) 4638 { 4639 spin_lock(&txq->_xmit_lock); 4640 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4641 WRITE_ONCE(txq->xmit_lock_owner, cpu); 4642 } 4643 4644 static inline bool __netif_tx_acquire(struct netdev_queue *txq) 4645 { 4646 __acquire(&txq->_xmit_lock); 4647 return true; 4648 } 4649 4650 static inline void __netif_tx_release(struct netdev_queue *txq) 4651 { 4652 __release(&txq->_xmit_lock); 4653 } 4654 4655 static inline void __netif_tx_lock_bh(struct netdev_queue *txq) 4656 { 4657 spin_lock_bh(&txq->_xmit_lock); 4658 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4659 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); 4660 } 4661 4662 static inline bool __netif_tx_trylock(struct netdev_queue *txq) 4663 { 4664 bool ok = spin_trylock(&txq->_xmit_lock); 4665 4666 if (likely(ok)) { 4667 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4668 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); 4669 } 4670 return ok; 4671 } 4672 4673 static inline void __netif_tx_unlock(struct netdev_queue *txq) 4674 { 4675 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4676 WRITE_ONCE(txq->xmit_lock_owner, -1); 4677 spin_unlock(&txq->_xmit_lock); 4678 } 4679 4680 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) 4681 { 4682 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4683 WRITE_ONCE(txq->xmit_lock_owner, -1); 4684 spin_unlock_bh(&txq->_xmit_lock); 4685 } 4686 4687 /* 4688 * txq->trans_start can be read locklessly from dev_watchdog() 4689 */ 4690 static inline void txq_trans_update(struct netdev_queue *txq) 4691 { 4692 if (txq->xmit_lock_owner != -1) 4693 WRITE_ONCE(txq->trans_start, jiffies); 4694 } 4695 4696 static inline void txq_trans_cond_update(struct netdev_queue *txq) 4697 { 4698 unsigned long now = jiffies; 4699 4700 if (READ_ONCE(txq->trans_start) != now) 4701 WRITE_ONCE(txq->trans_start, now); 4702 } 4703 4704 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ 4705 static inline void netif_trans_update(struct net_device *dev) 4706 { 4707 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 4708 4709 txq_trans_cond_update(txq); 4710 } 4711 4712 /** 4713 * netif_tx_lock - grab network device transmit lock 4714 * @dev: network device 4715 * 4716 * Get network device transmit lock 4717 */ 4718 void netif_tx_lock(struct net_device *dev); 4719 4720 static inline void netif_tx_lock_bh(struct net_device *dev) 4721 { 4722 local_bh_disable(); 4723 netif_tx_lock(dev); 4724 } 4725 4726 void netif_tx_unlock(struct net_device *dev); 4727 4728 static inline void netif_tx_unlock_bh(struct net_device *dev) 4729 { 4730 netif_tx_unlock(dev); 4731 local_bh_enable(); 4732 } 4733 4734 #define HARD_TX_LOCK(dev, txq, cpu) { \ 4735 if (!(dev)->lltx) { \ 4736 __netif_tx_lock(txq, cpu); \ 4737 } else { \ 4738 __netif_tx_acquire(txq); \ 4739 } \ 4740 } 4741 4742 #define HARD_TX_TRYLOCK(dev, txq) \ 4743 (!(dev)->lltx ? \ 4744 __netif_tx_trylock(txq) : \ 4745 __netif_tx_acquire(txq)) 4746 4747 #define HARD_TX_UNLOCK(dev, txq) { \ 4748 if (!(dev)->lltx) { \ 4749 __netif_tx_unlock(txq); \ 4750 } else { \ 4751 __netif_tx_release(txq); \ 4752 } \ 4753 } 4754 4755 static inline void netif_tx_disable(struct net_device *dev) 4756 { 4757 unsigned int i; 4758 int cpu; 4759 4760 local_bh_disable(); 4761 cpu = smp_processor_id(); 4762 spin_lock(&dev->tx_global_lock); 4763 for (i = 0; i < dev->num_tx_queues; i++) { 4764 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 4765 4766 __netif_tx_lock(txq, cpu); 4767 netif_tx_stop_queue(txq); 4768 __netif_tx_unlock(txq); 4769 } 4770 spin_unlock(&dev->tx_global_lock); 4771 local_bh_enable(); 4772 } 4773 4774 static inline void netif_addr_lock(struct net_device *dev) 4775 { 4776 unsigned char nest_level = 0; 4777 4778 #ifdef CONFIG_LOCKDEP 4779 nest_level = dev->nested_level; 4780 #endif 4781 spin_lock_nested(&dev->addr_list_lock, nest_level); 4782 } 4783 4784 static inline void netif_addr_lock_bh(struct net_device *dev) 4785 { 4786 unsigned char nest_level = 0; 4787 4788 #ifdef CONFIG_LOCKDEP 4789 nest_level = dev->nested_level; 4790 #endif 4791 local_bh_disable(); 4792 spin_lock_nested(&dev->addr_list_lock, nest_level); 4793 } 4794 4795 static inline void netif_addr_unlock(struct net_device *dev) 4796 { 4797 spin_unlock(&dev->addr_list_lock); 4798 } 4799 4800 static inline void netif_addr_unlock_bh(struct net_device *dev) 4801 { 4802 spin_unlock_bh(&dev->addr_list_lock); 4803 } 4804 4805 /* 4806 * dev_addrs walker. Should be used only for read access. Call with 4807 * rcu_read_lock held. 4808 */ 4809 #define for_each_dev_addr(dev, ha) \ 4810 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) 4811 4812 /* These functions live elsewhere (drivers/net/net_init.c, but related) */ 4813 4814 void ether_setup(struct net_device *dev); 4815 4816 /* Allocate dummy net_device */ 4817 struct net_device *alloc_netdev_dummy(int sizeof_priv); 4818 4819 /* Support for loadable net-drivers */ 4820 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 4821 unsigned char name_assign_type, 4822 void (*setup)(struct net_device *), 4823 unsigned int txqs, unsigned int rxqs); 4824 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ 4825 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) 4826 4827 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ 4828 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ 4829 count) 4830 4831 int register_netdev(struct net_device *dev); 4832 void unregister_netdev(struct net_device *dev); 4833 4834 int devm_register_netdev(struct device *dev, struct net_device *ndev); 4835 4836 /* General hardware address lists handling functions */ 4837 int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 4838 struct netdev_hw_addr_list *from_list, int addr_len); 4839 int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list, 4840 struct netdev_hw_addr_list *from_list, 4841 int addr_len); 4842 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 4843 struct netdev_hw_addr_list *from_list, int addr_len); 4844 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, 4845 struct net_device *dev, 4846 int (*sync)(struct net_device *, const unsigned char *), 4847 int (*unsync)(struct net_device *, 4848 const unsigned char *)); 4849 int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, 4850 struct net_device *dev, 4851 int (*sync)(struct net_device *, 4852 const unsigned char *, int), 4853 int (*unsync)(struct net_device *, 4854 const unsigned char *, int)); 4855 void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, 4856 struct net_device *dev, 4857 int (*unsync)(struct net_device *, 4858 const unsigned char *, int)); 4859 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, 4860 struct net_device *dev, 4861 int (*unsync)(struct net_device *, 4862 const unsigned char *)); 4863 void __hw_addr_init(struct netdev_hw_addr_list *list); 4864 4865 /* Functions used for device addresses handling */ 4866 void dev_addr_mod(struct net_device *dev, unsigned int offset, 4867 const void *addr, size_t len); 4868 4869 static inline void 4870 __dev_addr_set(struct net_device *dev, const void *addr, size_t len) 4871 { 4872 dev_addr_mod(dev, 0, addr, len); 4873 } 4874 4875 static inline void dev_addr_set(struct net_device *dev, const u8 *addr) 4876 { 4877 __dev_addr_set(dev, addr, dev->addr_len); 4878 } 4879 4880 int dev_addr_add(struct net_device *dev, const unsigned char *addr, 4881 unsigned char addr_type); 4882 int dev_addr_del(struct net_device *dev, const unsigned char *addr, 4883 unsigned char addr_type); 4884 4885 /* Functions used for unicast addresses handling */ 4886 int dev_uc_add(struct net_device *dev, const unsigned char *addr); 4887 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); 4888 int dev_uc_del(struct net_device *dev, const unsigned char *addr); 4889 int dev_uc_sync(struct net_device *to, struct net_device *from); 4890 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); 4891 void dev_uc_unsync(struct net_device *to, struct net_device *from); 4892 void dev_uc_flush(struct net_device *dev); 4893 void dev_uc_init(struct net_device *dev); 4894 4895 /** 4896 * __dev_uc_sync - Synchronize device's unicast list 4897 * @dev: device to sync 4898 * @sync: function to call if address should be added 4899 * @unsync: function to call if address should be removed 4900 * 4901 * Add newly added addresses to the interface, and release 4902 * addresses that have been deleted. 4903 */ 4904 static inline int __dev_uc_sync(struct net_device *dev, 4905 int (*sync)(struct net_device *, 4906 const unsigned char *), 4907 int (*unsync)(struct net_device *, 4908 const unsigned char *)) 4909 { 4910 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); 4911 } 4912 4913 /** 4914 * __dev_uc_unsync - Remove synchronized addresses from device 4915 * @dev: device to sync 4916 * @unsync: function to call if address should be removed 4917 * 4918 * Remove all addresses that were added to the device by dev_uc_sync(). 4919 */ 4920 static inline void __dev_uc_unsync(struct net_device *dev, 4921 int (*unsync)(struct net_device *, 4922 const unsigned char *)) 4923 { 4924 __hw_addr_unsync_dev(&dev->uc, dev, unsync); 4925 } 4926 4927 /* Functions used for multicast addresses handling */ 4928 int dev_mc_add(struct net_device *dev, const unsigned char *addr); 4929 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); 4930 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); 4931 int dev_mc_del(struct net_device *dev, const unsigned char *addr); 4932 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); 4933 int dev_mc_sync(struct net_device *to, struct net_device *from); 4934 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); 4935 void dev_mc_unsync(struct net_device *to, struct net_device *from); 4936 void dev_mc_flush(struct net_device *dev); 4937 void dev_mc_init(struct net_device *dev); 4938 4939 /** 4940 * __dev_mc_sync - Synchronize device's multicast list 4941 * @dev: device to sync 4942 * @sync: function to call if address should be added 4943 * @unsync: function to call if address should be removed 4944 * 4945 * Add newly added addresses to the interface, and release 4946 * addresses that have been deleted. 4947 */ 4948 static inline int __dev_mc_sync(struct net_device *dev, 4949 int (*sync)(struct net_device *, 4950 const unsigned char *), 4951 int (*unsync)(struct net_device *, 4952 const unsigned char *)) 4953 { 4954 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); 4955 } 4956 4957 /** 4958 * __dev_mc_unsync - Remove synchronized addresses from device 4959 * @dev: device to sync 4960 * @unsync: function to call if address should be removed 4961 * 4962 * Remove all addresses that were added to the device by dev_mc_sync(). 4963 */ 4964 static inline void __dev_mc_unsync(struct net_device *dev, 4965 int (*unsync)(struct net_device *, 4966 const unsigned char *)) 4967 { 4968 __hw_addr_unsync_dev(&dev->mc, dev, unsync); 4969 } 4970 4971 /* Functions used for secondary unicast and multicast support */ 4972 void dev_set_rx_mode(struct net_device *dev); 4973 int dev_set_promiscuity(struct net_device *dev, int inc); 4974 int netif_set_allmulti(struct net_device *dev, int inc, bool notify); 4975 int dev_set_allmulti(struct net_device *dev, int inc); 4976 void netdev_state_change(struct net_device *dev); 4977 void __netdev_notify_peers(struct net_device *dev); 4978 void netdev_notify_peers(struct net_device *dev); 4979 void netdev_features_change(struct net_device *dev); 4980 /* Load a device via the kmod */ 4981 void dev_load(struct net *net, const char *name); 4982 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 4983 struct rtnl_link_stats64 *storage); 4984 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 4985 const struct net_device_stats *netdev_stats); 4986 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 4987 const struct pcpu_sw_netstats __percpu *netstats); 4988 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s); 4989 4990 enum { 4991 NESTED_SYNC_IMM_BIT, 4992 NESTED_SYNC_TODO_BIT, 4993 }; 4994 4995 #define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit)) 4996 #define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT) 4997 4998 #define NESTED_SYNC_IMM __NESTED_SYNC(IMM) 4999 #define NESTED_SYNC_TODO __NESTED_SYNC(TODO) 5000 5001 struct netdev_nested_priv { 5002 unsigned char flags; 5003 void *data; 5004 }; 5005 5006 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); 5007 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 5008 struct list_head **iter); 5009 5010 /* iterate through upper list, must be called under RCU read lock */ 5011 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ 5012 for (iter = &(dev)->adj_list.upper, \ 5013 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ 5014 updev; \ 5015 updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) 5016 5017 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 5018 int (*fn)(struct net_device *upper_dev, 5019 struct netdev_nested_priv *priv), 5020 struct netdev_nested_priv *priv); 5021 5022 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 5023 struct net_device *upper_dev); 5024 5025 bool netdev_has_any_upper_dev(struct net_device *dev); 5026 5027 void *netdev_lower_get_next_private(struct net_device *dev, 5028 struct list_head **iter); 5029 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 5030 struct list_head **iter); 5031 5032 #define netdev_for_each_lower_private(dev, priv, iter) \ 5033 for (iter = (dev)->adj_list.lower.next, \ 5034 priv = netdev_lower_get_next_private(dev, &(iter)); \ 5035 priv; \ 5036 priv = netdev_lower_get_next_private(dev, &(iter))) 5037 5038 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \ 5039 for (iter = &(dev)->adj_list.lower, \ 5040 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ 5041 priv; \ 5042 priv = netdev_lower_get_next_private_rcu(dev, &(iter))) 5043 5044 void *netdev_lower_get_next(struct net_device *dev, 5045 struct list_head **iter); 5046 5047 #define netdev_for_each_lower_dev(dev, ldev, iter) \ 5048 for (iter = (dev)->adj_list.lower.next, \ 5049 ldev = netdev_lower_get_next(dev, &(iter)); \ 5050 ldev; \ 5051 ldev = netdev_lower_get_next(dev, &(iter))) 5052 5053 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 5054 struct list_head **iter); 5055 int netdev_walk_all_lower_dev(struct net_device *dev, 5056 int (*fn)(struct net_device *lower_dev, 5057 struct netdev_nested_priv *priv), 5058 struct netdev_nested_priv *priv); 5059 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 5060 int (*fn)(struct net_device *lower_dev, 5061 struct netdev_nested_priv *priv), 5062 struct netdev_nested_priv *priv); 5063 5064 void *netdev_adjacent_get_private(struct list_head *adj_list); 5065 void *netdev_lower_get_first_private_rcu(struct net_device *dev); 5066 struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 5067 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); 5068 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, 5069 struct netlink_ext_ack *extack); 5070 int netdev_master_upper_dev_link(struct net_device *dev, 5071 struct net_device *upper_dev, 5072 void *upper_priv, void *upper_info, 5073 struct netlink_ext_ack *extack); 5074 void netdev_upper_dev_unlink(struct net_device *dev, 5075 struct net_device *upper_dev); 5076 int netdev_adjacent_change_prepare(struct net_device *old_dev, 5077 struct net_device *new_dev, 5078 struct net_device *dev, 5079 struct netlink_ext_ack *extack); 5080 void netdev_adjacent_change_commit(struct net_device *old_dev, 5081 struct net_device *new_dev, 5082 struct net_device *dev); 5083 void netdev_adjacent_change_abort(struct net_device *old_dev, 5084 struct net_device *new_dev, 5085 struct net_device *dev); 5086 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); 5087 void *netdev_lower_dev_get_private(struct net_device *dev, 5088 struct net_device *lower_dev); 5089 void netdev_lower_state_changed(struct net_device *lower_dev, 5090 void *lower_state_info); 5091 5092 /* RSS keys are 40 or 52 bytes long */ 5093 #define NETDEV_RSS_KEY_LEN 52 5094 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; 5095 void netdev_rss_key_fill(void *buffer, size_t len); 5096 5097 int skb_checksum_help(struct sk_buff *skb); 5098 int skb_crc32c_csum_help(struct sk_buff *skb); 5099 int skb_csum_hwoffload_help(struct sk_buff *skb, 5100 const netdev_features_t features); 5101 5102 struct netdev_bonding_info { 5103 ifslave slave; 5104 ifbond master; 5105 }; 5106 5107 struct netdev_notifier_bonding_info { 5108 struct netdev_notifier_info info; /* must be first */ 5109 struct netdev_bonding_info bonding_info; 5110 }; 5111 5112 void netdev_bonding_info_change(struct net_device *dev, 5113 struct netdev_bonding_info *bonding_info); 5114 5115 #if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) 5116 void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data); 5117 #else 5118 static inline void ethtool_notify(struct net_device *dev, unsigned int cmd, 5119 const void *data) 5120 { 5121 } 5122 #endif 5123 5124 __be16 skb_network_protocol(struct sk_buff *skb, int *depth); 5125 5126 static inline bool can_checksum_protocol(netdev_features_t features, 5127 __be16 protocol) 5128 { 5129 if (protocol == htons(ETH_P_FCOE)) 5130 return !!(features & NETIF_F_FCOE_CRC); 5131 5132 /* Assume this is an IP checksum (not SCTP CRC) */ 5133 5134 if (features & NETIF_F_HW_CSUM) { 5135 /* Can checksum everything */ 5136 return true; 5137 } 5138 5139 switch (protocol) { 5140 case htons(ETH_P_IP): 5141 return !!(features & NETIF_F_IP_CSUM); 5142 case htons(ETH_P_IPV6): 5143 return !!(features & NETIF_F_IPV6_CSUM); 5144 default: 5145 return false; 5146 } 5147 } 5148 5149 #ifdef CONFIG_BUG 5150 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); 5151 #else 5152 static inline void netdev_rx_csum_fault(struct net_device *dev, 5153 struct sk_buff *skb) 5154 { 5155 } 5156 #endif 5157 /* rx skb timestamps */ 5158 void net_enable_timestamp(void); 5159 void net_disable_timestamp(void); 5160 5161 static inline ktime_t netdev_get_tstamp(struct net_device *dev, 5162 const struct skb_shared_hwtstamps *hwtstamps, 5163 bool cycles) 5164 { 5165 const struct net_device_ops *ops = dev->netdev_ops; 5166 5167 if (ops->ndo_get_tstamp) 5168 return ops->ndo_get_tstamp(dev, hwtstamps, cycles); 5169 5170 return hwtstamps->hwtstamp; 5171 } 5172 5173 #ifndef CONFIG_PREEMPT_RT 5174 static inline void netdev_xmit_set_more(bool more) 5175 { 5176 __this_cpu_write(softnet_data.xmit.more, more); 5177 } 5178 5179 static inline bool netdev_xmit_more(void) 5180 { 5181 return __this_cpu_read(softnet_data.xmit.more); 5182 } 5183 #else 5184 static inline void netdev_xmit_set_more(bool more) 5185 { 5186 current->net_xmit.more = more; 5187 } 5188 5189 static inline bool netdev_xmit_more(void) 5190 { 5191 return current->net_xmit.more; 5192 } 5193 #endif 5194 5195 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, 5196 struct sk_buff *skb, struct net_device *dev, 5197 bool more) 5198 { 5199 netdev_xmit_set_more(more); 5200 return ops->ndo_start_xmit(skb, dev); 5201 } 5202 5203 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, 5204 struct netdev_queue *txq, bool more) 5205 { 5206 const struct net_device_ops *ops = dev->netdev_ops; 5207 netdev_tx_t rc; 5208 5209 rc = __netdev_start_xmit(ops, skb, dev, more); 5210 if (rc == NETDEV_TX_OK) 5211 txq_trans_update(txq); 5212 5213 return rc; 5214 } 5215 5216 int netdev_class_create_file_ns(const struct class_attribute *class_attr, 5217 const void *ns); 5218 void netdev_class_remove_file_ns(const struct class_attribute *class_attr, 5219 const void *ns); 5220 5221 extern const struct kobj_ns_type_operations net_ns_type_operations; 5222 5223 const char *netdev_drivername(const struct net_device *dev); 5224 5225 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, 5226 netdev_features_t f2) 5227 { 5228 if ((f1 ^ f2) & NETIF_F_HW_CSUM) { 5229 if (f1 & NETIF_F_HW_CSUM) 5230 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5231 else 5232 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5233 } 5234 5235 return f1 & f2; 5236 } 5237 5238 static inline netdev_features_t netdev_get_wanted_features( 5239 struct net_device *dev) 5240 { 5241 return (dev->features & ~dev->hw_features) | dev->wanted_features; 5242 } 5243 netdev_features_t netdev_increment_features(netdev_features_t all, 5244 netdev_features_t one, netdev_features_t mask); 5245 5246 /* Allow TSO being used on stacked device : 5247 * Performing the GSO segmentation before last device 5248 * is a performance improvement. 5249 */ 5250 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, 5251 netdev_features_t mask) 5252 { 5253 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); 5254 } 5255 5256 int __netdev_update_features(struct net_device *dev); 5257 void netdev_update_features(struct net_device *dev); 5258 void netdev_change_features(struct net_device *dev); 5259 5260 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 5261 struct net_device *dev); 5262 5263 netdev_features_t passthru_features_check(struct sk_buff *skb, 5264 struct net_device *dev, 5265 netdev_features_t features); 5266 netdev_features_t netif_skb_features(struct sk_buff *skb); 5267 void skb_warn_bad_offload(const struct sk_buff *skb); 5268 5269 static inline bool net_gso_ok(netdev_features_t features, int gso_type) 5270 { 5271 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; 5272 5273 /* check flags correspondence */ 5274 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); 5275 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); 5276 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); 5277 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); 5278 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); 5279 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); 5280 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); 5281 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); 5282 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)); 5283 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)); 5284 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); 5285 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); 5286 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); 5287 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); 5288 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); 5289 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); 5290 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); 5291 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); 5292 BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT)); 5293 BUILD_BUG_ON(SKB_GSO_TCP_ACCECN != 5294 (NETIF_F_GSO_ACCECN >> NETIF_F_GSO_SHIFT)); 5295 5296 return (features & feature) == feature; 5297 } 5298 5299 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) 5300 { 5301 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && 5302 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 5303 } 5304 5305 static inline bool netif_needs_gso(struct sk_buff *skb, 5306 netdev_features_t features) 5307 { 5308 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 5309 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && 5310 (skb->ip_summed != CHECKSUM_UNNECESSARY))); 5311 } 5312 5313 void netif_set_tso_max_size(struct net_device *dev, unsigned int size); 5314 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs); 5315 void netif_inherit_tso_max(struct net_device *to, 5316 const struct net_device *from); 5317 5318 static inline unsigned int 5319 netif_get_gro_max_size(const struct net_device *dev, const struct sk_buff *skb) 5320 { 5321 /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */ 5322 return skb->protocol == htons(ETH_P_IPV6) ? 5323 READ_ONCE(dev->gro_max_size) : 5324 READ_ONCE(dev->gro_ipv4_max_size); 5325 } 5326 5327 static inline unsigned int 5328 netif_get_gso_max_size(const struct net_device *dev, const struct sk_buff *skb) 5329 { 5330 /* pairs with WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */ 5331 return skb->protocol == htons(ETH_P_IPV6) ? 5332 READ_ONCE(dev->gso_max_size) : 5333 READ_ONCE(dev->gso_ipv4_max_size); 5334 } 5335 5336 static inline bool netif_is_macsec(const struct net_device *dev) 5337 { 5338 return dev->priv_flags & IFF_MACSEC; 5339 } 5340 5341 static inline bool netif_is_macvlan(const struct net_device *dev) 5342 { 5343 return dev->priv_flags & IFF_MACVLAN; 5344 } 5345 5346 static inline bool netif_is_macvlan_port(const struct net_device *dev) 5347 { 5348 return dev->priv_flags & IFF_MACVLAN_PORT; 5349 } 5350 5351 static inline bool netif_is_bond_master(const struct net_device *dev) 5352 { 5353 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; 5354 } 5355 5356 static inline bool netif_is_bond_slave(const struct net_device *dev) 5357 { 5358 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; 5359 } 5360 5361 static inline bool netif_supports_nofcs(struct net_device *dev) 5362 { 5363 return dev->priv_flags & IFF_SUPP_NOFCS; 5364 } 5365 5366 static inline bool netif_has_l3_rx_handler(const struct net_device *dev) 5367 { 5368 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; 5369 } 5370 5371 static inline bool netif_is_l3_master(const struct net_device *dev) 5372 { 5373 return dev->priv_flags & IFF_L3MDEV_MASTER; 5374 } 5375 5376 static inline bool netif_is_l3_slave(const struct net_device *dev) 5377 { 5378 return dev->priv_flags & IFF_L3MDEV_SLAVE; 5379 } 5380 5381 static inline int dev_sdif(const struct net_device *dev) 5382 { 5383 #ifdef CONFIG_NET_L3_MASTER_DEV 5384 if (netif_is_l3_slave(dev)) 5385 return dev->ifindex; 5386 #endif 5387 return 0; 5388 } 5389 5390 static inline bool netif_is_bridge_master(const struct net_device *dev) 5391 { 5392 return dev->priv_flags & IFF_EBRIDGE; 5393 } 5394 5395 static inline bool netif_is_bridge_port(const struct net_device *dev) 5396 { 5397 return dev->priv_flags & IFF_BRIDGE_PORT; 5398 } 5399 5400 static inline bool netif_is_ovs_master(const struct net_device *dev) 5401 { 5402 return dev->priv_flags & IFF_OPENVSWITCH; 5403 } 5404 5405 static inline bool netif_is_ovs_port(const struct net_device *dev) 5406 { 5407 return dev->priv_flags & IFF_OVS_DATAPATH; 5408 } 5409 5410 static inline bool netif_is_any_bridge_master(const struct net_device *dev) 5411 { 5412 return netif_is_bridge_master(dev) || netif_is_ovs_master(dev); 5413 } 5414 5415 static inline bool netif_is_any_bridge_port(const struct net_device *dev) 5416 { 5417 return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); 5418 } 5419 5420 static inline bool netif_is_team_master(const struct net_device *dev) 5421 { 5422 return dev->priv_flags & IFF_TEAM; 5423 } 5424 5425 static inline bool netif_is_team_port(const struct net_device *dev) 5426 { 5427 return dev->priv_flags & IFF_TEAM_PORT; 5428 } 5429 5430 static inline bool netif_is_lag_master(const struct net_device *dev) 5431 { 5432 return netif_is_bond_master(dev) || netif_is_team_master(dev); 5433 } 5434 5435 static inline bool netif_is_lag_port(const struct net_device *dev) 5436 { 5437 return netif_is_bond_slave(dev) || netif_is_team_port(dev); 5438 } 5439 5440 static inline bool netif_is_rxfh_configured(const struct net_device *dev) 5441 { 5442 return dev->priv_flags & IFF_RXFH_CONFIGURED; 5443 } 5444 5445 static inline bool netif_is_failover(const struct net_device *dev) 5446 { 5447 return dev->priv_flags & IFF_FAILOVER; 5448 } 5449 5450 static inline bool netif_is_failover_slave(const struct net_device *dev) 5451 { 5452 return dev->priv_flags & IFF_FAILOVER_SLAVE; 5453 } 5454 5455 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ 5456 static inline void netif_keep_dst(struct net_device *dev) 5457 { 5458 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); 5459 } 5460 5461 /* return true if dev can't cope with mtu frames that need vlan tag insertion */ 5462 static inline bool netif_reduces_vlan_mtu(struct net_device *dev) 5463 { 5464 /* TODO: reserve and use an additional IFF bit, if we get more users */ 5465 return netif_is_macsec(dev); 5466 } 5467 5468 extern struct pernet_operations __net_initdata loopback_net_ops; 5469 5470 /* Logging, debugging and troubleshooting/diagnostic helpers. */ 5471 5472 /* netdev_printk helpers, similar to dev_printk */ 5473 5474 static inline const char *netdev_name(const struct net_device *dev) 5475 { 5476 if (!dev->name[0] || strchr(dev->name, '%')) 5477 return "(unnamed net_device)"; 5478 return dev->name; 5479 } 5480 5481 static inline const char *netdev_reg_state(const struct net_device *dev) 5482 { 5483 u8 reg_state = READ_ONCE(dev->reg_state); 5484 5485 switch (reg_state) { 5486 case NETREG_UNINITIALIZED: return " (uninitialized)"; 5487 case NETREG_REGISTERED: return ""; 5488 case NETREG_UNREGISTERING: return " (unregistering)"; 5489 case NETREG_UNREGISTERED: return " (unregistered)"; 5490 case NETREG_RELEASED: return " (released)"; 5491 case NETREG_DUMMY: return " (dummy)"; 5492 } 5493 5494 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, reg_state); 5495 return " (unknown)"; 5496 } 5497 5498 #define MODULE_ALIAS_NETDEV(device) \ 5499 MODULE_ALIAS("netdev-" device) 5500 5501 /* 5502 * netdev_WARN() acts like dev_printk(), but with the key difference 5503 * of using a WARN/WARN_ON to get the message out, including the 5504 * file/line information and a backtrace. 5505 */ 5506 #define netdev_WARN(dev, format, args...) \ 5507 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5508 netdev_reg_state(dev), ##args) 5509 5510 #define netdev_WARN_ONCE(dev, format, args...) \ 5511 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5512 netdev_reg_state(dev), ##args) 5513 5514 /* 5515 * The list of packet types we will receive (as opposed to discard) 5516 * and the routines to invoke. 5517 * 5518 * Why 16. Because with 16 the only overlap we get on a hash of the 5519 * low nibble of the protocol value is RARP/SNAP/X.25. 5520 * 5521 * 0800 IP 5522 * 0001 802.3 5523 * 0002 AX.25 5524 * 0004 802.2 5525 * 8035 RARP 5526 * 0005 SNAP 5527 * 0805 X.25 5528 * 0806 ARP 5529 * 8137 IPX 5530 * 0009 Localtalk 5531 * 86DD IPv6 5532 */ 5533 #define PTYPE_HASH_SIZE (16) 5534 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) 5535 5536 extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 5537 5538 extern struct net_device *blackhole_netdev; 5539 5540 /* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */ 5541 #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD) 5542 #define DEV_STATS_ADD(DEV, FIELD, VAL) \ 5543 atomic_long_add((VAL), &(DEV)->stats.__##FIELD) 5544 #define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD) 5545 5546 #endif /* _LINUX_NETDEVICE_H */ 5547