1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Definitions for the Interfaces handler. 8 * 9 * Version: @(#)dev.h 1.0.10 08/12/93 10 * 11 * Authors: Ross Biro 12 * Fred N. van Kempen, <[email protected]> 13 * Corey Minyard <[email protected]> 14 * Donald J. Becker, <[email protected]> 15 * Alan Cox, <[email protected]> 16 * Bjorn Ekwall. <[email protected]> 17 * Pekka Riikonen <[email protected]> 18 * 19 * Moved to /usr/include/linux for NET3 20 */ 21 #ifndef _LINUX_NETDEVICE_H 22 #define _LINUX_NETDEVICE_H 23 24 #include <linux/timer.h> 25 #include <linux/bug.h> 26 #include <linux/delay.h> 27 #include <linux/atomic.h> 28 #include <linux/prefetch.h> 29 #include <asm/cache.h> 30 #include <asm/byteorder.h> 31 #include <asm/local.h> 32 33 #include <linux/percpu.h> 34 #include <linux/rculist.h> 35 #include <linux/workqueue.h> 36 #include <linux/dynamic_queue_limits.h> 37 38 #include <net/net_namespace.h> 39 #ifdef CONFIG_DCB 40 #include <net/dcbnl.h> 41 #endif 42 #include <net/netprio_cgroup.h> 43 #include <linux/netdev_features.h> 44 #include <linux/neighbour.h> 45 #include <linux/netdevice_xmit.h> 46 #include <uapi/linux/netdevice.h> 47 #include <uapi/linux/if_bonding.h> 48 #include <uapi/linux/pkt_cls.h> 49 #include <uapi/linux/netdev.h> 50 #include <linux/hashtable.h> 51 #include <linux/rbtree.h> 52 #include <net/net_trackers.h> 53 #include <net/net_debug.h> 54 #include <net/dropreason-core.h> 55 #include <net/neighbour_tables.h> 56 57 struct netpoll_info; 58 struct device; 59 struct ethtool_ops; 60 struct kernel_hwtstamp_config; 61 struct phy_device; 62 struct dsa_port; 63 struct ip_tunnel_parm_kern; 64 struct macsec_context; 65 struct macsec_ops; 66 struct netdev_name_node; 67 struct sd_flow_limit; 68 struct sfp_bus; 69 /* 802.11 specific */ 70 struct wireless_dev; 71 /* 802.15.4 specific */ 72 struct wpan_dev; 73 struct mpls_dev; 74 /* UDP Tunnel offloads */ 75 struct udp_tunnel_info; 76 struct udp_tunnel_nic_info; 77 struct udp_tunnel_nic; 78 struct bpf_prog; 79 struct xdp_buff; 80 struct xdp_frame; 81 struct xdp_metadata_ops; 82 struct xdp_md; 83 struct ethtool_netdev_state; 84 struct phy_link_topology; 85 struct hwtstamp_provider; 86 87 typedef u32 xdp_features_t; 88 89 void synchronize_net(void); 90 void netdev_set_default_ethtool_ops(struct net_device *dev, 91 const struct ethtool_ops *ops); 92 void netdev_sw_irq_coalesce_default_on(struct net_device *dev); 93 94 /* Backlog congestion levels */ 95 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 96 #define NET_RX_DROP 1 /* packet dropped */ 97 98 #define MAX_NEST_DEV 8 99 100 /* 101 * Transmit return codes: transmit return codes originate from three different 102 * namespaces: 103 * 104 * - qdisc return codes 105 * - driver transmit return codes 106 * - errno values 107 * 108 * Drivers are allowed to return any one of those in their hard_start_xmit() 109 * function. Real network devices commonly used with qdiscs should only return 110 * the driver transmit return codes though - when qdiscs are used, the actual 111 * transmission happens asynchronously, so the value is not propagated to 112 * higher layers. Virtual network devices transmit synchronously; in this case 113 * the driver transmit return codes are consumed by dev_queue_xmit(), and all 114 * others are propagated to higher layers. 115 */ 116 117 /* qdisc ->enqueue() return codes. */ 118 #define NET_XMIT_SUCCESS 0x00 119 #define NET_XMIT_DROP 0x01 /* skb dropped */ 120 #define NET_XMIT_CN 0x02 /* congestion notification */ 121 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ 122 123 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It 124 * indicates that the device will soon be dropping packets, or already drops 125 * some packets of the same priority; prompting us to send less aggressively. */ 126 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) 127 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) 128 129 /* Driver transmit return codes */ 130 #define NETDEV_TX_MASK 0xf0 131 132 enum netdev_tx { 133 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ 134 NETDEV_TX_OK = 0x00, /* driver took care of packet */ 135 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ 136 }; 137 typedef enum netdev_tx netdev_tx_t; 138 139 /* 140 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; 141 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. 142 */ 143 static inline bool dev_xmit_complete(int rc) 144 { 145 /* 146 * Positive cases with an skb consumed by a driver: 147 * - successful transmission (rc == NETDEV_TX_OK) 148 * - error while transmitting (rc < 0) 149 * - error while queueing to a different device (rc & NET_XMIT_MASK) 150 */ 151 if (likely(rc < NET_XMIT_MASK)) 152 return true; 153 154 return false; 155 } 156 157 /* 158 * Compute the worst-case header length according to the protocols 159 * used. 160 */ 161 162 #if defined(CONFIG_HYPERV_NET) 163 # define LL_MAX_HEADER 128 164 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) 165 # if defined(CONFIG_MAC80211_MESH) 166 # define LL_MAX_HEADER 128 167 # else 168 # define LL_MAX_HEADER 96 169 # endif 170 #else 171 # define LL_MAX_HEADER 32 172 #endif 173 174 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ 175 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) 176 #define MAX_HEADER LL_MAX_HEADER 177 #else 178 #define MAX_HEADER (LL_MAX_HEADER + 48) 179 #endif 180 181 /* 182 * Old network device statistics. Fields are native words 183 * (unsigned long) so they can be read and written atomically. 184 */ 185 186 #define NET_DEV_STAT(FIELD) \ 187 union { \ 188 unsigned long FIELD; \ 189 atomic_long_t __##FIELD; \ 190 } 191 192 struct net_device_stats { 193 NET_DEV_STAT(rx_packets); 194 NET_DEV_STAT(tx_packets); 195 NET_DEV_STAT(rx_bytes); 196 NET_DEV_STAT(tx_bytes); 197 NET_DEV_STAT(rx_errors); 198 NET_DEV_STAT(tx_errors); 199 NET_DEV_STAT(rx_dropped); 200 NET_DEV_STAT(tx_dropped); 201 NET_DEV_STAT(multicast); 202 NET_DEV_STAT(collisions); 203 NET_DEV_STAT(rx_length_errors); 204 NET_DEV_STAT(rx_over_errors); 205 NET_DEV_STAT(rx_crc_errors); 206 NET_DEV_STAT(rx_frame_errors); 207 NET_DEV_STAT(rx_fifo_errors); 208 NET_DEV_STAT(rx_missed_errors); 209 NET_DEV_STAT(tx_aborted_errors); 210 NET_DEV_STAT(tx_carrier_errors); 211 NET_DEV_STAT(tx_fifo_errors); 212 NET_DEV_STAT(tx_heartbeat_errors); 213 NET_DEV_STAT(tx_window_errors); 214 NET_DEV_STAT(rx_compressed); 215 NET_DEV_STAT(tx_compressed); 216 }; 217 #undef NET_DEV_STAT 218 219 /* per-cpu stats, allocated on demand. 220 * Try to fit them in a single cache line, for dev_get_stats() sake. 221 */ 222 struct net_device_core_stats { 223 unsigned long rx_dropped; 224 unsigned long tx_dropped; 225 unsigned long rx_nohandler; 226 unsigned long rx_otherhost_dropped; 227 } __aligned(4 * sizeof(unsigned long)); 228 229 #include <linux/cache.h> 230 #include <linux/skbuff.h> 231 232 struct neighbour; 233 struct neigh_parms; 234 struct sk_buff; 235 236 struct netdev_hw_addr { 237 struct list_head list; 238 struct rb_node node; 239 unsigned char addr[MAX_ADDR_LEN]; 240 unsigned char type; 241 #define NETDEV_HW_ADDR_T_LAN 1 242 #define NETDEV_HW_ADDR_T_SAN 2 243 #define NETDEV_HW_ADDR_T_UNICAST 3 244 #define NETDEV_HW_ADDR_T_MULTICAST 4 245 bool global_use; 246 int sync_cnt; 247 int refcount; 248 int synced; 249 struct rcu_head rcu_head; 250 }; 251 252 struct netdev_hw_addr_list { 253 struct list_head list; 254 int count; 255 256 /* Auxiliary tree for faster lookup on addition and deletion */ 257 struct rb_root tree; 258 }; 259 260 #define netdev_hw_addr_list_count(l) ((l)->count) 261 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) 262 #define netdev_hw_addr_list_for_each(ha, l) \ 263 list_for_each_entry(ha, &(l)->list, list) 264 265 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) 266 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) 267 #define netdev_for_each_uc_addr(ha, dev) \ 268 netdev_hw_addr_list_for_each(ha, &(dev)->uc) 269 #define netdev_for_each_synced_uc_addr(_ha, _dev) \ 270 netdev_for_each_uc_addr((_ha), (_dev)) \ 271 if ((_ha)->sync_cnt) 272 273 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) 274 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) 275 #define netdev_for_each_mc_addr(ha, dev) \ 276 netdev_hw_addr_list_for_each(ha, &(dev)->mc) 277 #define netdev_for_each_synced_mc_addr(_ha, _dev) \ 278 netdev_for_each_mc_addr((_ha), (_dev)) \ 279 if ((_ha)->sync_cnt) 280 281 struct hh_cache { 282 unsigned int hh_len; 283 seqlock_t hh_lock; 284 285 /* cached hardware header; allow for machine alignment needs. */ 286 #define HH_DATA_MOD 16 287 #define HH_DATA_OFF(__len) \ 288 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) 289 #define HH_DATA_ALIGN(__len) \ 290 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) 291 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; 292 }; 293 294 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much. 295 * Alternative is: 296 * dev->hard_header_len ? (dev->hard_header_len + 297 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 298 * 299 * We could use other alignment values, but we must maintain the 300 * relationship HH alignment <= LL alignment. 301 */ 302 #define LL_RESERVED_SPACE(dev) \ 303 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \ 304 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 305 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 306 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \ 307 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 308 309 struct header_ops { 310 int (*create) (struct sk_buff *skb, struct net_device *dev, 311 unsigned short type, const void *daddr, 312 const void *saddr, unsigned int len); 313 int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 314 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 315 void (*cache_update)(struct hh_cache *hh, 316 const struct net_device *dev, 317 const unsigned char *haddr); 318 bool (*validate)(const char *ll_header, unsigned int len); 319 __be16 (*parse_protocol)(const struct sk_buff *skb); 320 }; 321 322 /* These flag bits are private to the generic network queueing 323 * layer; they may not be explicitly referenced by any other 324 * code. 325 */ 326 327 enum netdev_state_t { 328 __LINK_STATE_START, 329 __LINK_STATE_PRESENT, 330 __LINK_STATE_NOCARRIER, 331 __LINK_STATE_LINKWATCH_PENDING, 332 __LINK_STATE_DORMANT, 333 __LINK_STATE_TESTING, 334 }; 335 336 struct gro_list { 337 struct list_head list; 338 int count; 339 }; 340 341 /* 342 * size of gro hash buckets, must less than bit number of 343 * napi_struct::gro_bitmask 344 */ 345 #define GRO_HASH_BUCKETS 8 346 347 /* 348 * Structure for per-NAPI config 349 */ 350 struct napi_config { 351 u64 gro_flush_timeout; 352 u64 irq_suspend_timeout; 353 u32 defer_hard_irqs; 354 unsigned int napi_id; 355 }; 356 357 /* 358 * Structure for NAPI scheduling similar to tasklet but with weighting 359 */ 360 struct napi_struct { 361 /* The poll_list must only be managed by the entity which 362 * changes the state of the NAPI_STATE_SCHED bit. This means 363 * whoever atomically sets that bit can add this napi_struct 364 * to the per-CPU poll_list, and whoever clears that bit 365 * can remove from the list right before clearing the bit. 366 */ 367 struct list_head poll_list; 368 369 unsigned long state; 370 int weight; 371 u32 defer_hard_irqs_count; 372 unsigned long gro_bitmask; 373 int (*poll)(struct napi_struct *, int); 374 #ifdef CONFIG_NETPOLL 375 /* CPU actively polling if netpoll is configured */ 376 int poll_owner; 377 #endif 378 /* CPU on which NAPI has been scheduled for processing */ 379 int list_owner; 380 struct net_device *dev; 381 struct gro_list gro_hash[GRO_HASH_BUCKETS]; 382 struct sk_buff *skb; 383 struct list_head rx_list; /* Pending GRO_NORMAL skbs */ 384 int rx_count; /* length of rx_list */ 385 unsigned int napi_id; 386 struct hrtimer timer; 387 struct task_struct *thread; 388 unsigned long gro_flush_timeout; 389 unsigned long irq_suspend_timeout; 390 u32 defer_hard_irqs; 391 /* control-path-only fields follow */ 392 struct list_head dev_list; 393 struct hlist_node napi_hash_node; 394 int irq; 395 int index; 396 struct napi_config *config; 397 }; 398 399 enum { 400 NAPI_STATE_SCHED, /* Poll is scheduled */ 401 NAPI_STATE_MISSED, /* reschedule a napi */ 402 NAPI_STATE_DISABLE, /* Disable pending */ 403 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ 404 NAPI_STATE_LISTED, /* NAPI added to system lists */ 405 NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */ 406 NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */ 407 NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/ 408 NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ 409 NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */ 410 }; 411 412 enum { 413 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), 414 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), 415 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), 416 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), 417 NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), 418 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), 419 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), 420 NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL), 421 NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), 422 NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED), 423 }; 424 425 enum gro_result { 426 GRO_MERGED, 427 GRO_MERGED_FREE, 428 GRO_HELD, 429 GRO_NORMAL, 430 GRO_CONSUMED, 431 }; 432 typedef enum gro_result gro_result_t; 433 434 /* 435 * enum rx_handler_result - Possible return values for rx_handlers. 436 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it 437 * further. 438 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in 439 * case skb->dev was changed by rx_handler. 440 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. 441 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called. 442 * 443 * rx_handlers are functions called from inside __netif_receive_skb(), to do 444 * special processing of the skb, prior to delivery to protocol handlers. 445 * 446 * Currently, a net_device can only have a single rx_handler registered. Trying 447 * to register a second rx_handler will return -EBUSY. 448 * 449 * To register a rx_handler on a net_device, use netdev_rx_handler_register(). 450 * To unregister a rx_handler on a net_device, use 451 * netdev_rx_handler_unregister(). 452 * 453 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to 454 * do with the skb. 455 * 456 * If the rx_handler consumed the skb in some way, it should return 457 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for 458 * the skb to be delivered in some other way. 459 * 460 * If the rx_handler changed skb->dev, to divert the skb to another 461 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the 462 * new device will be called if it exists. 463 * 464 * If the rx_handler decides the skb should be ignored, it should return 465 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that 466 * are registered on exact device (ptype->dev == skb->dev). 467 * 468 * If the rx_handler didn't change skb->dev, but wants the skb to be normally 469 * delivered, it should return RX_HANDLER_PASS. 470 * 471 * A device without a registered rx_handler will behave as if rx_handler 472 * returned RX_HANDLER_PASS. 473 */ 474 475 enum rx_handler_result { 476 RX_HANDLER_CONSUMED, 477 RX_HANDLER_ANOTHER, 478 RX_HANDLER_EXACT, 479 RX_HANDLER_PASS, 480 }; 481 typedef enum rx_handler_result rx_handler_result_t; 482 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); 483 484 void __napi_schedule(struct napi_struct *n); 485 void __napi_schedule_irqoff(struct napi_struct *n); 486 487 static inline bool napi_disable_pending(struct napi_struct *n) 488 { 489 return test_bit(NAPI_STATE_DISABLE, &n->state); 490 } 491 492 static inline bool napi_prefer_busy_poll(struct napi_struct *n) 493 { 494 return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); 495 } 496 497 /** 498 * napi_is_scheduled - test if NAPI is scheduled 499 * @n: NAPI context 500 * 501 * This check is "best-effort". With no locking implemented, 502 * a NAPI can be scheduled or terminate right after this check 503 * and produce not precise results. 504 * 505 * NAPI_STATE_SCHED is an internal state, napi_is_scheduled 506 * should not be used normally and napi_schedule should be 507 * used instead. 508 * 509 * Use only if the driver really needs to check if a NAPI 510 * is scheduled for example in the context of delayed timer 511 * that can be skipped if a NAPI is already scheduled. 512 * 513 * Return: True if NAPI is scheduled, False otherwise. 514 */ 515 static inline bool napi_is_scheduled(struct napi_struct *n) 516 { 517 return test_bit(NAPI_STATE_SCHED, &n->state); 518 } 519 520 bool napi_schedule_prep(struct napi_struct *n); 521 522 /** 523 * napi_schedule - schedule NAPI poll 524 * @n: NAPI context 525 * 526 * Schedule NAPI poll routine to be called if it is not already 527 * running. 528 * Return: true if we schedule a NAPI or false if not. 529 * Refer to napi_schedule_prep() for additional reason on why 530 * a NAPI might not be scheduled. 531 */ 532 static inline bool napi_schedule(struct napi_struct *n) 533 { 534 if (napi_schedule_prep(n)) { 535 __napi_schedule(n); 536 return true; 537 } 538 539 return false; 540 } 541 542 /** 543 * napi_schedule_irqoff - schedule NAPI poll 544 * @n: NAPI context 545 * 546 * Variant of napi_schedule(), assuming hard irqs are masked. 547 */ 548 static inline void napi_schedule_irqoff(struct napi_struct *n) 549 { 550 if (napi_schedule_prep(n)) 551 __napi_schedule_irqoff(n); 552 } 553 554 /** 555 * napi_complete_done - NAPI processing complete 556 * @n: NAPI context 557 * @work_done: number of packets processed 558 * 559 * Mark NAPI processing as complete. Should only be called if poll budget 560 * has not been completely consumed. 561 * Prefer over napi_complete(). 562 * Return: false if device should avoid rearming interrupts. 563 */ 564 bool napi_complete_done(struct napi_struct *n, int work_done); 565 566 static inline bool napi_complete(struct napi_struct *n) 567 { 568 return napi_complete_done(n, 0); 569 } 570 571 int dev_set_threaded(struct net_device *dev, bool threaded); 572 573 /** 574 * napi_disable - prevent NAPI from scheduling 575 * @n: NAPI context 576 * 577 * Stop NAPI from being scheduled on this context. 578 * Waits till any outstanding processing completes. 579 */ 580 void napi_disable(struct napi_struct *n); 581 582 void napi_enable(struct napi_struct *n); 583 584 /** 585 * napi_synchronize - wait until NAPI is not running 586 * @n: NAPI context 587 * 588 * Wait until NAPI is done being scheduled on this context. 589 * Waits till any outstanding processing completes but 590 * does not disable future activations. 591 */ 592 static inline void napi_synchronize(const struct napi_struct *n) 593 { 594 if (IS_ENABLED(CONFIG_SMP)) 595 while (test_bit(NAPI_STATE_SCHED, &n->state)) 596 msleep(1); 597 else 598 barrier(); 599 } 600 601 /** 602 * napi_if_scheduled_mark_missed - if napi is running, set the 603 * NAPIF_STATE_MISSED 604 * @n: NAPI context 605 * 606 * If napi is running, set the NAPIF_STATE_MISSED, and return true if 607 * NAPI is scheduled. 608 **/ 609 static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) 610 { 611 unsigned long val, new; 612 613 val = READ_ONCE(n->state); 614 do { 615 if (val & NAPIF_STATE_DISABLE) 616 return true; 617 618 if (!(val & NAPIF_STATE_SCHED)) 619 return false; 620 621 new = val | NAPIF_STATE_MISSED; 622 } while (!try_cmpxchg(&n->state, &val, new)); 623 624 return true; 625 } 626 627 enum netdev_queue_state_t { 628 __QUEUE_STATE_DRV_XOFF, 629 __QUEUE_STATE_STACK_XOFF, 630 __QUEUE_STATE_FROZEN, 631 }; 632 633 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF) 634 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF) 635 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN) 636 637 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF) 638 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ 639 QUEUE_STATE_FROZEN) 640 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \ 641 QUEUE_STATE_FROZEN) 642 643 /* 644 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The 645 * netif_tx_* functions below are used to manipulate this flag. The 646 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit 647 * queue independently. The netif_xmit_*stopped functions below are called 648 * to check if the queue has been stopped by the driver or stack (either 649 * of the XOFF bits are set in the state). Drivers should not need to call 650 * netif_xmit*stopped functions, they should only be using netif_tx_*. 651 */ 652 653 struct netdev_queue { 654 /* 655 * read-mostly part 656 */ 657 struct net_device *dev; 658 netdevice_tracker dev_tracker; 659 660 struct Qdisc __rcu *qdisc; 661 struct Qdisc __rcu *qdisc_sleeping; 662 #ifdef CONFIG_SYSFS 663 struct kobject kobj; 664 #endif 665 unsigned long tx_maxrate; 666 /* 667 * Number of TX timeouts for this queue 668 * (/sys/class/net/DEV/Q/trans_timeout) 669 */ 670 atomic_long_t trans_timeout; 671 672 /* Subordinate device that the queue has been assigned to */ 673 struct net_device *sb_dev; 674 #ifdef CONFIG_XDP_SOCKETS 675 struct xsk_buff_pool *pool; 676 #endif 677 678 /* 679 * write-mostly part 680 */ 681 #ifdef CONFIG_BQL 682 struct dql dql; 683 #endif 684 spinlock_t _xmit_lock ____cacheline_aligned_in_smp; 685 int xmit_lock_owner; 686 /* 687 * Time (in jiffies) of last Tx 688 */ 689 unsigned long trans_start; 690 691 unsigned long state; 692 693 /* 694 * slow- / control-path part 695 */ 696 /* NAPI instance for the queue 697 * Readers and writers must hold RTNL 698 */ 699 struct napi_struct *napi; 700 701 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 702 int numa_node; 703 #endif 704 } ____cacheline_aligned_in_smp; 705 706 extern int sysctl_fb_tunnels_only_for_init_net; 707 extern int sysctl_devconf_inherit_init_net; 708 709 /* 710 * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns 711 * == 1 : For initns only 712 * == 2 : For none. 713 */ 714 static inline bool net_has_fallback_tunnels(const struct net *net) 715 { 716 #if IS_ENABLED(CONFIG_SYSCTL) 717 int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net); 718 719 return !fb_tunnels_only_for_init_net || 720 (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1); 721 #else 722 return true; 723 #endif 724 } 725 726 static inline int net_inherit_devconf(void) 727 { 728 #if IS_ENABLED(CONFIG_SYSCTL) 729 return READ_ONCE(sysctl_devconf_inherit_init_net); 730 #else 731 return 0; 732 #endif 733 } 734 735 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) 736 { 737 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 738 return q->numa_node; 739 #else 740 return NUMA_NO_NODE; 741 #endif 742 } 743 744 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) 745 { 746 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 747 q->numa_node = node; 748 #endif 749 } 750 751 #ifdef CONFIG_RFS_ACCEL 752 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, 753 u16 filter_id); 754 #endif 755 756 /* XPS map type and offset of the xps map within net_device->xps_maps[]. */ 757 enum xps_map_type { 758 XPS_CPUS = 0, 759 XPS_RXQS, 760 XPS_MAPS_MAX, 761 }; 762 763 #ifdef CONFIG_XPS 764 /* 765 * This structure holds an XPS map which can be of variable length. The 766 * map is an array of queues. 767 */ 768 struct xps_map { 769 unsigned int len; 770 unsigned int alloc_len; 771 struct rcu_head rcu; 772 u16 queues[]; 773 }; 774 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) 775 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ 776 - sizeof(struct xps_map)) / sizeof(u16)) 777 778 /* 779 * This structure holds all XPS maps for device. Maps are indexed by CPU. 780 * 781 * We keep track of the number of cpus/rxqs used when the struct is allocated, 782 * in nr_ids. This will help not accessing out-of-bound memory. 783 * 784 * We keep track of the number of traffic classes used when the struct is 785 * allocated, in num_tc. This will be used to navigate the maps, to ensure we're 786 * not crossing its upper bound, as the original dev->num_tc can be updated in 787 * the meantime. 788 */ 789 struct xps_dev_maps { 790 struct rcu_head rcu; 791 unsigned int nr_ids; 792 s16 num_tc; 793 struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ 794 }; 795 796 #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ 797 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) 798 799 #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ 800 (_rxqs * (_tcs) * sizeof(struct xps_map *))) 801 802 #endif /* CONFIG_XPS */ 803 804 #define TC_MAX_QUEUE 16 805 #define TC_BITMASK 15 806 /* HW offloaded queuing disciplines txq count and offset maps */ 807 struct netdev_tc_txq { 808 u16 count; 809 u16 offset; 810 }; 811 812 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 813 /* 814 * This structure is to hold information about the device 815 * configured to run FCoE protocol stack. 816 */ 817 struct netdev_fcoe_hbainfo { 818 char manufacturer[64]; 819 char serial_number[64]; 820 char hardware_version[64]; 821 char driver_version[64]; 822 char optionrom_version[64]; 823 char firmware_version[64]; 824 char model[256]; 825 char model_description[256]; 826 }; 827 #endif 828 829 #define MAX_PHYS_ITEM_ID_LEN 32 830 831 /* This structure holds a unique identifier to identify some 832 * physical item (port for example) used by a netdevice. 833 */ 834 struct netdev_phys_item_id { 835 unsigned char id[MAX_PHYS_ITEM_ID_LEN]; 836 unsigned char id_len; 837 }; 838 839 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, 840 struct netdev_phys_item_id *b) 841 { 842 return a->id_len == b->id_len && 843 memcmp(a->id, b->id, a->id_len) == 0; 844 } 845 846 typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 847 struct sk_buff *skb, 848 struct net_device *sb_dev); 849 850 enum net_device_path_type { 851 DEV_PATH_ETHERNET = 0, 852 DEV_PATH_VLAN, 853 DEV_PATH_BRIDGE, 854 DEV_PATH_PPPOE, 855 DEV_PATH_DSA, 856 DEV_PATH_MTK_WDMA, 857 }; 858 859 struct net_device_path { 860 enum net_device_path_type type; 861 const struct net_device *dev; 862 union { 863 struct { 864 u16 id; 865 __be16 proto; 866 u8 h_dest[ETH_ALEN]; 867 } encap; 868 struct { 869 enum { 870 DEV_PATH_BR_VLAN_KEEP, 871 DEV_PATH_BR_VLAN_TAG, 872 DEV_PATH_BR_VLAN_UNTAG, 873 DEV_PATH_BR_VLAN_UNTAG_HW, 874 } vlan_mode; 875 u16 vlan_id; 876 __be16 vlan_proto; 877 } bridge; 878 struct { 879 int port; 880 u16 proto; 881 } dsa; 882 struct { 883 u8 wdma_idx; 884 u8 queue; 885 u16 wcid; 886 u8 bss; 887 u8 amsdu; 888 } mtk_wdma; 889 }; 890 }; 891 892 #define NET_DEVICE_PATH_STACK_MAX 5 893 #define NET_DEVICE_PATH_VLAN_MAX 2 894 895 struct net_device_path_stack { 896 int num_paths; 897 struct net_device_path path[NET_DEVICE_PATH_STACK_MAX]; 898 }; 899 900 struct net_device_path_ctx { 901 const struct net_device *dev; 902 u8 daddr[ETH_ALEN]; 903 904 int num_vlans; 905 struct { 906 u16 id; 907 __be16 proto; 908 } vlan[NET_DEVICE_PATH_VLAN_MAX]; 909 }; 910 911 enum tc_setup_type { 912 TC_QUERY_CAPS, 913 TC_SETUP_QDISC_MQPRIO, 914 TC_SETUP_CLSU32, 915 TC_SETUP_CLSFLOWER, 916 TC_SETUP_CLSMATCHALL, 917 TC_SETUP_CLSBPF, 918 TC_SETUP_BLOCK, 919 TC_SETUP_QDISC_CBS, 920 TC_SETUP_QDISC_RED, 921 TC_SETUP_QDISC_PRIO, 922 TC_SETUP_QDISC_MQ, 923 TC_SETUP_QDISC_ETF, 924 TC_SETUP_ROOT_QDISC, 925 TC_SETUP_QDISC_GRED, 926 TC_SETUP_QDISC_TAPRIO, 927 TC_SETUP_FT, 928 TC_SETUP_QDISC_ETS, 929 TC_SETUP_QDISC_TBF, 930 TC_SETUP_QDISC_FIFO, 931 TC_SETUP_QDISC_HTB, 932 TC_SETUP_ACT, 933 }; 934 935 /* These structures hold the attributes of bpf state that are being passed 936 * to the netdevice through the bpf op. 937 */ 938 enum bpf_netdev_command { 939 /* Set or clear a bpf program used in the earliest stages of packet 940 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee 941 * is responsible for calling bpf_prog_put on any old progs that are 942 * stored. In case of error, the callee need not release the new prog 943 * reference, but on success it takes ownership and must bpf_prog_put 944 * when it is no longer used. 945 */ 946 XDP_SETUP_PROG, 947 XDP_SETUP_PROG_HW, 948 /* BPF program for offload callbacks, invoked at program load time. */ 949 BPF_OFFLOAD_MAP_ALLOC, 950 BPF_OFFLOAD_MAP_FREE, 951 XDP_SETUP_XSK_POOL, 952 }; 953 954 struct bpf_prog_offload_ops; 955 struct netlink_ext_ack; 956 struct xdp_umem; 957 struct xdp_dev_bulk_queue; 958 struct bpf_xdp_link; 959 960 enum bpf_xdp_mode { 961 XDP_MODE_SKB = 0, 962 XDP_MODE_DRV = 1, 963 XDP_MODE_HW = 2, 964 __MAX_XDP_MODE 965 }; 966 967 struct bpf_xdp_entity { 968 struct bpf_prog *prog; 969 struct bpf_xdp_link *link; 970 }; 971 972 struct netdev_bpf { 973 enum bpf_netdev_command command; 974 union { 975 /* XDP_SETUP_PROG */ 976 struct { 977 u32 flags; 978 struct bpf_prog *prog; 979 struct netlink_ext_ack *extack; 980 }; 981 /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ 982 struct { 983 struct bpf_offloaded_map *offmap; 984 }; 985 /* XDP_SETUP_XSK_POOL */ 986 struct { 987 struct xsk_buff_pool *pool; 988 u16 queue_id; 989 } xsk; 990 }; 991 }; 992 993 /* Flags for ndo_xsk_wakeup. */ 994 #define XDP_WAKEUP_RX (1 << 0) 995 #define XDP_WAKEUP_TX (1 << 1) 996 997 #ifdef CONFIG_XFRM_OFFLOAD 998 struct xfrmdev_ops { 999 int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack); 1000 void (*xdo_dev_state_delete) (struct xfrm_state *x); 1001 void (*xdo_dev_state_free) (struct xfrm_state *x); 1002 bool (*xdo_dev_offload_ok) (struct sk_buff *skb, 1003 struct xfrm_state *x); 1004 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); 1005 void (*xdo_dev_state_update_stats) (struct xfrm_state *x); 1006 int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack); 1007 void (*xdo_dev_policy_delete) (struct xfrm_policy *x); 1008 void (*xdo_dev_policy_free) (struct xfrm_policy *x); 1009 }; 1010 #endif 1011 1012 struct dev_ifalias { 1013 struct rcu_head rcuhead; 1014 char ifalias[]; 1015 }; 1016 1017 struct devlink; 1018 struct tlsdev_ops; 1019 1020 struct netdev_net_notifier { 1021 struct list_head list; 1022 struct notifier_block *nb; 1023 }; 1024 1025 /* 1026 * This structure defines the management hooks for network devices. 1027 * The following hooks can be defined; unless noted otherwise, they are 1028 * optional and can be filled with a null pointer. 1029 * 1030 * int (*ndo_init)(struct net_device *dev); 1031 * This function is called once when a network device is registered. 1032 * The network device can use this for any late stage initialization 1033 * or semantic validation. It can fail with an error code which will 1034 * be propagated back to register_netdev. 1035 * 1036 * void (*ndo_uninit)(struct net_device *dev); 1037 * This function is called when device is unregistered or when registration 1038 * fails. It is not called if init fails. 1039 * 1040 * int (*ndo_open)(struct net_device *dev); 1041 * This function is called when a network device transitions to the up 1042 * state. 1043 * 1044 * int (*ndo_stop)(struct net_device *dev); 1045 * This function is called when a network device transitions to the down 1046 * state. 1047 * 1048 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 1049 * struct net_device *dev); 1050 * Called when a packet needs to be transmitted. 1051 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop 1052 * the queue before that can happen; it's for obsolete devices and weird 1053 * corner cases, but the stack really does a non-trivial amount 1054 * of useless work if you return NETDEV_TX_BUSY. 1055 * Required; cannot be NULL. 1056 * 1057 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 1058 * struct net_device *dev 1059 * netdev_features_t features); 1060 * Called by core transmit path to determine if device is capable of 1061 * performing offload operations on a given packet. This is to give 1062 * the device an opportunity to implement any restrictions that cannot 1063 * be otherwise expressed by feature flags. The check is called with 1064 * the set of features that the stack has calculated and it returns 1065 * those the driver believes to be appropriate. 1066 * 1067 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 1068 * struct net_device *sb_dev); 1069 * Called to decide which queue to use when device supports multiple 1070 * transmit queues. 1071 * 1072 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); 1073 * This function is called to allow device receiver to make 1074 * changes to configuration when multicast or promiscuous is enabled. 1075 * 1076 * void (*ndo_set_rx_mode)(struct net_device *dev); 1077 * This function is called device changes address list filtering. 1078 * If driver handles unicast address filtering, it should set 1079 * IFF_UNICAST_FLT in its priv_flags. 1080 * 1081 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); 1082 * This function is called when the Media Access Control address 1083 * needs to be changed. If this interface is not defined, the 1084 * MAC address can not be changed. 1085 * 1086 * int (*ndo_validate_addr)(struct net_device *dev); 1087 * Test if Media Access Control address is valid for the device. 1088 * 1089 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 1090 * Old-style ioctl entry point. This is used internally by the 1091 * appletalk and ieee802154 subsystems but is no longer called by 1092 * the device ioctl handler. 1093 * 1094 * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd); 1095 * Used by the bonding driver for its device specific ioctls: 1096 * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE, 1097 * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY 1098 * 1099 * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 1100 * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG, 1101 * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP. 1102 * 1103 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); 1104 * Used to set network devices bus interface parameters. This interface 1105 * is retained for legacy reasons; new devices should use the bus 1106 * interface (PCI) for low level management. 1107 * 1108 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 1109 * Called when a user wants to change the Maximum Transfer Unit 1110 * of a device. 1111 * 1112 * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue); 1113 * Callback used when the transmitter has not made any progress 1114 * for dev->watchdog ticks. 1115 * 1116 * void (*ndo_get_stats64)(struct net_device *dev, 1117 * struct rtnl_link_stats64 *storage); 1118 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1119 * Called when a user wants to get the network device usage 1120 * statistics. Drivers must do one of the following: 1121 * 1. Define @ndo_get_stats64 to fill in a zero-initialised 1122 * rtnl_link_stats64 structure passed by the caller. 1123 * 2. Define @ndo_get_stats to update a net_device_stats structure 1124 * (which should normally be dev->stats) and return a pointer to 1125 * it. The structure may be changed asynchronously only if each 1126 * field is written atomically. 1127 * 3. Update dev->stats asynchronously and atomically, and define 1128 * neither operation. 1129 * 1130 * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) 1131 * Return true if this device supports offload stats of this attr_id. 1132 * 1133 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, 1134 * void *attr_data) 1135 * Get statistics for offload operations by attr_id. Write it into the 1136 * attr_data pointer. 1137 * 1138 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); 1139 * If device supports VLAN filtering this function is called when a 1140 * VLAN id is registered. 1141 * 1142 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); 1143 * If device supports VLAN filtering this function is called when a 1144 * VLAN id is unregistered. 1145 * 1146 * void (*ndo_poll_controller)(struct net_device *dev); 1147 * 1148 * SR-IOV management functions. 1149 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); 1150 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, 1151 * u8 qos, __be16 proto); 1152 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, 1153 * int max_tx_rate); 1154 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); 1155 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); 1156 * int (*ndo_get_vf_config)(struct net_device *dev, 1157 * int vf, struct ifla_vf_info *ivf); 1158 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); 1159 * int (*ndo_set_vf_port)(struct net_device *dev, int vf, 1160 * struct nlattr *port[]); 1161 * 1162 * Enable or disable the VF ability to query its RSS Redirection Table and 1163 * Hash Key. This is needed since on some devices VF share this information 1164 * with PF and querying it may introduce a theoretical security risk. 1165 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); 1166 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); 1167 * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, 1168 * void *type_data); 1169 * Called to setup any 'tc' scheduler, classifier or action on @dev. 1170 * This is always called from the stack with the rtnl lock held and netif 1171 * tx queues stopped. This allows the netdevice to perform queue 1172 * management safely. 1173 * 1174 * Fiber Channel over Ethernet (FCoE) offload functions. 1175 * int (*ndo_fcoe_enable)(struct net_device *dev); 1176 * Called when the FCoE protocol stack wants to start using LLD for FCoE 1177 * so the underlying device can perform whatever needed configuration or 1178 * initialization to support acceleration of FCoE traffic. 1179 * 1180 * int (*ndo_fcoe_disable)(struct net_device *dev); 1181 * Called when the FCoE protocol stack wants to stop using LLD for FCoE 1182 * so the underlying device can perform whatever needed clean-ups to 1183 * stop supporting acceleration of FCoE traffic. 1184 * 1185 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, 1186 * struct scatterlist *sgl, unsigned int sgc); 1187 * Called when the FCoE Initiator wants to initialize an I/O that 1188 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1189 * perform necessary setup and returns 1 to indicate the device is set up 1190 * successfully to perform DDP on this I/O, otherwise this returns 0. 1191 * 1192 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); 1193 * Called when the FCoE Initiator/Target is done with the DDPed I/O as 1194 * indicated by the FC exchange id 'xid', so the underlying device can 1195 * clean up and reuse resources for later DDP requests. 1196 * 1197 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, 1198 * struct scatterlist *sgl, unsigned int sgc); 1199 * Called when the FCoE Target wants to initialize an I/O that 1200 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1201 * perform necessary setup and returns 1 to indicate the device is set up 1202 * successfully to perform DDP on this I/O, otherwise this returns 0. 1203 * 1204 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1205 * struct netdev_fcoe_hbainfo *hbainfo); 1206 * Called when the FCoE Protocol stack wants information on the underlying 1207 * device. This information is utilized by the FCoE protocol stack to 1208 * register attributes with Fiber Channel management service as per the 1209 * FC-GS Fabric Device Management Information(FDMI) specification. 1210 * 1211 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); 1212 * Called when the underlying device wants to override default World Wide 1213 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own 1214 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE 1215 * protocol stack to use. 1216 * 1217 * RFS acceleration. 1218 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, 1219 * u16 rxq_index, u32 flow_id); 1220 * Set hardware filter for RFS. rxq_index is the target queue index; 1221 * flow_id is a flow ID to be passed to rps_may_expire_flow() later. 1222 * Return the filter ID on success, or a negative error code. 1223 * 1224 * Slave management functions (for bridge, bonding, etc). 1225 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); 1226 * Called to make another netdev an underling. 1227 * 1228 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); 1229 * Called to release previously enslaved netdev. 1230 * 1231 * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev, 1232 * struct sk_buff *skb, 1233 * bool all_slaves); 1234 * Get the xmit slave of master device. If all_slaves is true, function 1235 * assume all the slaves can transmit. 1236 * 1237 * Feature/offload setting functions. 1238 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1239 * netdev_features_t features); 1240 * Adjusts the requested feature flags according to device-specific 1241 * constraints, and returns the resulting flags. Must not modify 1242 * the device state. 1243 * 1244 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 1245 * Called to update device configuration to new features. Passed 1246 * feature set might be less than what was returned by ndo_fix_features()). 1247 * Must return >0 or -errno if it changed dev->features itself. 1248 * 1249 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], 1250 * struct net_device *dev, 1251 * const unsigned char *addr, u16 vid, u16 flags, 1252 * bool *notified, struct netlink_ext_ack *extack); 1253 * Adds an FDB entry to dev for addr. 1254 * Callee shall set *notified to true if it sent any appropriate 1255 * notification(s). Otherwise core will send a generic one. 1256 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], 1257 * struct net_device *dev, 1258 * const unsigned char *addr, u16 vid 1259 * bool *notified, struct netlink_ext_ack *extack); 1260 * Deletes the FDB entry from dev corresponding to addr. 1261 * Callee shall set *notified to true if it sent any appropriate 1262 * notification(s). Otherwise core will send a generic one. 1263 * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev, 1264 * struct netlink_ext_ack *extack); 1265 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, 1266 * struct net_device *dev, struct net_device *filter_dev, 1267 * int *idx) 1268 * Used to add FDB entries to dump requests. Implementers should add 1269 * entries to skb and update idx with the number of entries. 1270 * 1271 * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[], 1272 * u16 nlmsg_flags, struct netlink_ext_ack *extack); 1273 * Adds an MDB entry to dev. 1274 * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[], 1275 * struct netlink_ext_ack *extack); 1276 * Deletes the MDB entry from dev. 1277 * int (*ndo_mdb_del_bulk)(struct net_device *dev, struct nlattr *tb[], 1278 * struct netlink_ext_ack *extack); 1279 * Bulk deletes MDB entries from dev. 1280 * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb, 1281 * struct netlink_callback *cb); 1282 * Dumps MDB entries from dev. The first argument (marker) in the netlink 1283 * callback is used by core rtnetlink code. 1284 * 1285 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, 1286 * u16 flags, struct netlink_ext_ack *extack) 1287 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, 1288 * struct net_device *dev, u32 filter_mask, 1289 * int nlflags) 1290 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, 1291 * u16 flags); 1292 * 1293 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); 1294 * Called to change device carrier. Soft-devices (like dummy, team, etc) 1295 * which do not represent real hardware may define this to allow their 1296 * userspace components to manage their virtual carrier state. Devices 1297 * that determine carrier state from physical hardware properties (eg 1298 * network cables) or protocol-dependent mechanisms (eg 1299 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. 1300 * 1301 * int (*ndo_get_phys_port_id)(struct net_device *dev, 1302 * struct netdev_phys_item_id *ppid); 1303 * Called to get ID of physical port of this device. If driver does 1304 * not implement this, it is assumed that the hw is not able to have 1305 * multiple net devices on single physical port. 1306 * 1307 * int (*ndo_get_port_parent_id)(struct net_device *dev, 1308 * struct netdev_phys_item_id *ppid) 1309 * Called to get the parent ID of the physical port of this device. 1310 * 1311 * void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1312 * struct net_device *dev) 1313 * Called by upper layer devices to accelerate switching or other 1314 * station functionality into hardware. 'pdev is the lowerdev 1315 * to use for the offload and 'dev' is the net device that will 1316 * back the offload. Returns a pointer to the private structure 1317 * the upper layer will maintain. 1318 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) 1319 * Called by upper layer device to delete the station created 1320 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing 1321 * the station and priv is the structure returned by the add 1322 * operation. 1323 * int (*ndo_set_tx_maxrate)(struct net_device *dev, 1324 * int queue_index, u32 maxrate); 1325 * Called when a user wants to set a max-rate limitation of specific 1326 * TX queue. 1327 * int (*ndo_get_iflink)(const struct net_device *dev); 1328 * Called to get the iflink value of this device. 1329 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); 1330 * This function is used to get egress tunnel information for given skb. 1331 * This is useful for retrieving outer tunnel header parameters while 1332 * sampling packet. 1333 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); 1334 * This function is used to specify the headroom that the skb must 1335 * consider when allocation skb during packet reception. Setting 1336 * appropriate rx headroom value allows avoiding skb head copy on 1337 * forward. Setting a negative value resets the rx headroom to the 1338 * default value. 1339 * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); 1340 * This function is used to set or query state related to XDP on the 1341 * netdevice and manage BPF offload. See definition of 1342 * enum bpf_netdev_command for details. 1343 * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, 1344 * u32 flags); 1345 * This function is used to submit @n XDP packets for transmit on a 1346 * netdevice. Returns number of frames successfully transmitted, frames 1347 * that got dropped are freed/returned via xdp_return_frame(). 1348 * Returns negative number, means general error invoking ndo, meaning 1349 * no frames were xmit'ed and core-caller will free all frames. 1350 * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1351 * struct xdp_buff *xdp); 1352 * Get the xmit slave of master device based on the xdp_buff. 1353 * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); 1354 * This function is used to wake up the softirq, ksoftirqd or kthread 1355 * responsible for sending and/or receiving packets on a specific 1356 * queue id bound to an AF_XDP socket. The flags field specifies if 1357 * only RX, only Tx, or both should be woken up using the flags 1358 * XDP_WAKEUP_RX and XDP_WAKEUP_TX. 1359 * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm_kern *p, 1360 * int cmd); 1361 * Add, change, delete or get information on an IPv4 tunnel. 1362 * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); 1363 * If a device is paired with a peer device, return the peer instance. 1364 * The caller must be under RCU read context. 1365 * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path); 1366 * Get the forwarding path to reach the real device from the HW destination address 1367 * ktime_t (*ndo_get_tstamp)(struct net_device *dev, 1368 * const struct skb_shared_hwtstamps *hwtstamps, 1369 * bool cycles); 1370 * Get hardware timestamp based on normal/adjustable time or free running 1371 * cycle counter. This function is required if physical clock supports a 1372 * free running cycle counter. 1373 * 1374 * int (*ndo_hwtstamp_get)(struct net_device *dev, 1375 * struct kernel_hwtstamp_config *kernel_config); 1376 * Get the currently configured hardware timestamping parameters for the 1377 * NIC device. 1378 * 1379 * int (*ndo_hwtstamp_set)(struct net_device *dev, 1380 * struct kernel_hwtstamp_config *kernel_config, 1381 * struct netlink_ext_ack *extack); 1382 * Change the hardware timestamping parameters for NIC device. 1383 */ 1384 struct net_device_ops { 1385 int (*ndo_init)(struct net_device *dev); 1386 void (*ndo_uninit)(struct net_device *dev); 1387 int (*ndo_open)(struct net_device *dev); 1388 int (*ndo_stop)(struct net_device *dev); 1389 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 1390 struct net_device *dev); 1391 netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 1392 struct net_device *dev, 1393 netdev_features_t features); 1394 u16 (*ndo_select_queue)(struct net_device *dev, 1395 struct sk_buff *skb, 1396 struct net_device *sb_dev); 1397 void (*ndo_change_rx_flags)(struct net_device *dev, 1398 int flags); 1399 void (*ndo_set_rx_mode)(struct net_device *dev); 1400 int (*ndo_set_mac_address)(struct net_device *dev, 1401 void *addr); 1402 int (*ndo_validate_addr)(struct net_device *dev); 1403 int (*ndo_do_ioctl)(struct net_device *dev, 1404 struct ifreq *ifr, int cmd); 1405 int (*ndo_eth_ioctl)(struct net_device *dev, 1406 struct ifreq *ifr, int cmd); 1407 int (*ndo_siocbond)(struct net_device *dev, 1408 struct ifreq *ifr, int cmd); 1409 int (*ndo_siocwandev)(struct net_device *dev, 1410 struct if_settings *ifs); 1411 int (*ndo_siocdevprivate)(struct net_device *dev, 1412 struct ifreq *ifr, 1413 void __user *data, int cmd); 1414 int (*ndo_set_config)(struct net_device *dev, 1415 struct ifmap *map); 1416 int (*ndo_change_mtu)(struct net_device *dev, 1417 int new_mtu); 1418 int (*ndo_neigh_setup)(struct net_device *dev, 1419 struct neigh_parms *); 1420 void (*ndo_tx_timeout) (struct net_device *dev, 1421 unsigned int txqueue); 1422 1423 void (*ndo_get_stats64)(struct net_device *dev, 1424 struct rtnl_link_stats64 *storage); 1425 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); 1426 int (*ndo_get_offload_stats)(int attr_id, 1427 const struct net_device *dev, 1428 void *attr_data); 1429 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1430 1431 int (*ndo_vlan_rx_add_vid)(struct net_device *dev, 1432 __be16 proto, u16 vid); 1433 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, 1434 __be16 proto, u16 vid); 1435 #ifdef CONFIG_NET_POLL_CONTROLLER 1436 void (*ndo_poll_controller)(struct net_device *dev); 1437 int (*ndo_netpoll_setup)(struct net_device *dev); 1438 void (*ndo_netpoll_cleanup)(struct net_device *dev); 1439 #endif 1440 int (*ndo_set_vf_mac)(struct net_device *dev, 1441 int queue, u8 *mac); 1442 int (*ndo_set_vf_vlan)(struct net_device *dev, 1443 int queue, u16 vlan, 1444 u8 qos, __be16 proto); 1445 int (*ndo_set_vf_rate)(struct net_device *dev, 1446 int vf, int min_tx_rate, 1447 int max_tx_rate); 1448 int (*ndo_set_vf_spoofchk)(struct net_device *dev, 1449 int vf, bool setting); 1450 int (*ndo_set_vf_trust)(struct net_device *dev, 1451 int vf, bool setting); 1452 int (*ndo_get_vf_config)(struct net_device *dev, 1453 int vf, 1454 struct ifla_vf_info *ivf); 1455 int (*ndo_set_vf_link_state)(struct net_device *dev, 1456 int vf, int link_state); 1457 int (*ndo_get_vf_stats)(struct net_device *dev, 1458 int vf, 1459 struct ifla_vf_stats 1460 *vf_stats); 1461 int (*ndo_set_vf_port)(struct net_device *dev, 1462 int vf, 1463 struct nlattr *port[]); 1464 int (*ndo_get_vf_port)(struct net_device *dev, 1465 int vf, struct sk_buff *skb); 1466 int (*ndo_get_vf_guid)(struct net_device *dev, 1467 int vf, 1468 struct ifla_vf_guid *node_guid, 1469 struct ifla_vf_guid *port_guid); 1470 int (*ndo_set_vf_guid)(struct net_device *dev, 1471 int vf, u64 guid, 1472 int guid_type); 1473 int (*ndo_set_vf_rss_query_en)( 1474 struct net_device *dev, 1475 int vf, bool setting); 1476 int (*ndo_setup_tc)(struct net_device *dev, 1477 enum tc_setup_type type, 1478 void *type_data); 1479 #if IS_ENABLED(CONFIG_FCOE) 1480 int (*ndo_fcoe_enable)(struct net_device *dev); 1481 int (*ndo_fcoe_disable)(struct net_device *dev); 1482 int (*ndo_fcoe_ddp_setup)(struct net_device *dev, 1483 u16 xid, 1484 struct scatterlist *sgl, 1485 unsigned int sgc); 1486 int (*ndo_fcoe_ddp_done)(struct net_device *dev, 1487 u16 xid); 1488 int (*ndo_fcoe_ddp_target)(struct net_device *dev, 1489 u16 xid, 1490 struct scatterlist *sgl, 1491 unsigned int sgc); 1492 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1493 struct netdev_fcoe_hbainfo *hbainfo); 1494 #endif 1495 1496 #if IS_ENABLED(CONFIG_LIBFCOE) 1497 #define NETDEV_FCOE_WWNN 0 1498 #define NETDEV_FCOE_WWPN 1 1499 int (*ndo_fcoe_get_wwn)(struct net_device *dev, 1500 u64 *wwn, int type); 1501 #endif 1502 1503 #ifdef CONFIG_RFS_ACCEL 1504 int (*ndo_rx_flow_steer)(struct net_device *dev, 1505 const struct sk_buff *skb, 1506 u16 rxq_index, 1507 u32 flow_id); 1508 #endif 1509 int (*ndo_add_slave)(struct net_device *dev, 1510 struct net_device *slave_dev, 1511 struct netlink_ext_ack *extack); 1512 int (*ndo_del_slave)(struct net_device *dev, 1513 struct net_device *slave_dev); 1514 struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, 1515 struct sk_buff *skb, 1516 bool all_slaves); 1517 struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev, 1518 struct sock *sk); 1519 netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1520 netdev_features_t features); 1521 int (*ndo_set_features)(struct net_device *dev, 1522 netdev_features_t features); 1523 int (*ndo_neigh_construct)(struct net_device *dev, 1524 struct neighbour *n); 1525 void (*ndo_neigh_destroy)(struct net_device *dev, 1526 struct neighbour *n); 1527 1528 int (*ndo_fdb_add)(struct ndmsg *ndm, 1529 struct nlattr *tb[], 1530 struct net_device *dev, 1531 const unsigned char *addr, 1532 u16 vid, 1533 u16 flags, 1534 bool *notified, 1535 struct netlink_ext_ack *extack); 1536 int (*ndo_fdb_del)(struct ndmsg *ndm, 1537 struct nlattr *tb[], 1538 struct net_device *dev, 1539 const unsigned char *addr, 1540 u16 vid, 1541 bool *notified, 1542 struct netlink_ext_ack *extack); 1543 int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, 1544 struct net_device *dev, 1545 struct netlink_ext_ack *extack); 1546 int (*ndo_fdb_dump)(struct sk_buff *skb, 1547 struct netlink_callback *cb, 1548 struct net_device *dev, 1549 struct net_device *filter_dev, 1550 int *idx); 1551 int (*ndo_fdb_get)(struct sk_buff *skb, 1552 struct nlattr *tb[], 1553 struct net_device *dev, 1554 const unsigned char *addr, 1555 u16 vid, u32 portid, u32 seq, 1556 struct netlink_ext_ack *extack); 1557 int (*ndo_mdb_add)(struct net_device *dev, 1558 struct nlattr *tb[], 1559 u16 nlmsg_flags, 1560 struct netlink_ext_ack *extack); 1561 int (*ndo_mdb_del)(struct net_device *dev, 1562 struct nlattr *tb[], 1563 struct netlink_ext_ack *extack); 1564 int (*ndo_mdb_del_bulk)(struct net_device *dev, 1565 struct nlattr *tb[], 1566 struct netlink_ext_ack *extack); 1567 int (*ndo_mdb_dump)(struct net_device *dev, 1568 struct sk_buff *skb, 1569 struct netlink_callback *cb); 1570 int (*ndo_mdb_get)(struct net_device *dev, 1571 struct nlattr *tb[], u32 portid, 1572 u32 seq, 1573 struct netlink_ext_ack *extack); 1574 int (*ndo_bridge_setlink)(struct net_device *dev, 1575 struct nlmsghdr *nlh, 1576 u16 flags, 1577 struct netlink_ext_ack *extack); 1578 int (*ndo_bridge_getlink)(struct sk_buff *skb, 1579 u32 pid, u32 seq, 1580 struct net_device *dev, 1581 u32 filter_mask, 1582 int nlflags); 1583 int (*ndo_bridge_dellink)(struct net_device *dev, 1584 struct nlmsghdr *nlh, 1585 u16 flags); 1586 int (*ndo_change_carrier)(struct net_device *dev, 1587 bool new_carrier); 1588 int (*ndo_get_phys_port_id)(struct net_device *dev, 1589 struct netdev_phys_item_id *ppid); 1590 int (*ndo_get_port_parent_id)(struct net_device *dev, 1591 struct netdev_phys_item_id *ppid); 1592 int (*ndo_get_phys_port_name)(struct net_device *dev, 1593 char *name, size_t len); 1594 void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1595 struct net_device *dev); 1596 void (*ndo_dfwd_del_station)(struct net_device *pdev, 1597 void *priv); 1598 1599 int (*ndo_set_tx_maxrate)(struct net_device *dev, 1600 int queue_index, 1601 u32 maxrate); 1602 int (*ndo_get_iflink)(const struct net_device *dev); 1603 int (*ndo_fill_metadata_dst)(struct net_device *dev, 1604 struct sk_buff *skb); 1605 void (*ndo_set_rx_headroom)(struct net_device *dev, 1606 int needed_headroom); 1607 int (*ndo_bpf)(struct net_device *dev, 1608 struct netdev_bpf *bpf); 1609 int (*ndo_xdp_xmit)(struct net_device *dev, int n, 1610 struct xdp_frame **xdp, 1611 u32 flags); 1612 struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1613 struct xdp_buff *xdp); 1614 int (*ndo_xsk_wakeup)(struct net_device *dev, 1615 u32 queue_id, u32 flags); 1616 int (*ndo_tunnel_ctl)(struct net_device *dev, 1617 struct ip_tunnel_parm_kern *p, 1618 int cmd); 1619 struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); 1620 int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, 1621 struct net_device_path *path); 1622 ktime_t (*ndo_get_tstamp)(struct net_device *dev, 1623 const struct skb_shared_hwtstamps *hwtstamps, 1624 bool cycles); 1625 int (*ndo_hwtstamp_get)(struct net_device *dev, 1626 struct kernel_hwtstamp_config *kernel_config); 1627 int (*ndo_hwtstamp_set)(struct net_device *dev, 1628 struct kernel_hwtstamp_config *kernel_config, 1629 struct netlink_ext_ack *extack); 1630 1631 #if IS_ENABLED(CONFIG_NET_SHAPER) 1632 /** 1633 * @net_shaper_ops: Device shaping offload operations 1634 * see include/net/net_shapers.h 1635 */ 1636 const struct net_shaper_ops *net_shaper_ops; 1637 #endif 1638 }; 1639 1640 /** 1641 * enum netdev_priv_flags - &struct net_device priv_flags 1642 * 1643 * These are the &struct net_device, they are only set internally 1644 * by drivers and used in the kernel. These flags are invisible to 1645 * userspace; this means that the order of these flags can change 1646 * during any kernel release. 1647 * 1648 * You should add bitfield booleans after either net_device::priv_flags 1649 * (hotpath) or ::threaded (slowpath) instead of extending these flags. 1650 * 1651 * @IFF_802_1Q_VLAN: 802.1Q VLAN device 1652 * @IFF_EBRIDGE: Ethernet bridging device 1653 * @IFF_BONDING: bonding master or slave 1654 * @IFF_ISATAP: ISATAP interface (RFC4214) 1655 * @IFF_WAN_HDLC: WAN HDLC device 1656 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to 1657 * release skb->dst 1658 * @IFF_DONT_BRIDGE: disallow bridging this ether dev 1659 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time 1660 * @IFF_MACVLAN_PORT: device used as macvlan port 1661 * @IFF_BRIDGE_PORT: device used as bridge port 1662 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port 1663 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit 1664 * @IFF_UNICAST_FLT: Supports unicast filtering 1665 * @IFF_TEAM_PORT: device used as team port 1666 * @IFF_SUPP_NOFCS: device supports sending custom FCS 1667 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address 1668 * change when it's running 1669 * @IFF_MACVLAN: Macvlan device 1670 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account 1671 * underlying stacked devices 1672 * @IFF_L3MDEV_MASTER: device is an L3 master device 1673 * @IFF_NO_QUEUE: device can run without qdisc attached 1674 * @IFF_OPENVSWITCH: device is a Open vSwitch master 1675 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device 1676 * @IFF_TEAM: device is a team device 1677 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured 1678 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external 1679 * entity (i.e. the master device for bridged veth) 1680 * @IFF_MACSEC: device is a MACsec device 1681 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook 1682 * @IFF_FAILOVER: device is a failover master device 1683 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device 1684 * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device 1685 * @IFF_NO_ADDRCONF: prevent ipv6 addrconf 1686 * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with 1687 * skb_headlen(skb) == 0 (data starts from frag0) 1688 */ 1689 enum netdev_priv_flags { 1690 IFF_802_1Q_VLAN = 1<<0, 1691 IFF_EBRIDGE = 1<<1, 1692 IFF_BONDING = 1<<2, 1693 IFF_ISATAP = 1<<3, 1694 IFF_WAN_HDLC = 1<<4, 1695 IFF_XMIT_DST_RELEASE = 1<<5, 1696 IFF_DONT_BRIDGE = 1<<6, 1697 IFF_DISABLE_NETPOLL = 1<<7, 1698 IFF_MACVLAN_PORT = 1<<8, 1699 IFF_BRIDGE_PORT = 1<<9, 1700 IFF_OVS_DATAPATH = 1<<10, 1701 IFF_TX_SKB_SHARING = 1<<11, 1702 IFF_UNICAST_FLT = 1<<12, 1703 IFF_TEAM_PORT = 1<<13, 1704 IFF_SUPP_NOFCS = 1<<14, 1705 IFF_LIVE_ADDR_CHANGE = 1<<15, 1706 IFF_MACVLAN = 1<<16, 1707 IFF_XMIT_DST_RELEASE_PERM = 1<<17, 1708 IFF_L3MDEV_MASTER = 1<<18, 1709 IFF_NO_QUEUE = 1<<19, 1710 IFF_OPENVSWITCH = 1<<20, 1711 IFF_L3MDEV_SLAVE = 1<<21, 1712 IFF_TEAM = 1<<22, 1713 IFF_RXFH_CONFIGURED = 1<<23, 1714 IFF_PHONY_HEADROOM = 1<<24, 1715 IFF_MACSEC = 1<<25, 1716 IFF_NO_RX_HANDLER = 1<<26, 1717 IFF_FAILOVER = 1<<27, 1718 IFF_FAILOVER_SLAVE = 1<<28, 1719 IFF_L3MDEV_RX_HANDLER = 1<<29, 1720 IFF_NO_ADDRCONF = BIT_ULL(30), 1721 IFF_TX_SKB_NO_LINEAR = BIT_ULL(31), 1722 }; 1723 1724 /* Specifies the type of the struct net_device::ml_priv pointer */ 1725 enum netdev_ml_priv_type { 1726 ML_PRIV_NONE, 1727 ML_PRIV_CAN, 1728 }; 1729 1730 enum netdev_stat_type { 1731 NETDEV_PCPU_STAT_NONE, 1732 NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */ 1733 NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */ 1734 NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */ 1735 }; 1736 1737 enum netdev_reg_state { 1738 NETREG_UNINITIALIZED = 0, 1739 NETREG_REGISTERED, /* completed register_netdevice */ 1740 NETREG_UNREGISTERING, /* called unregister_netdevice */ 1741 NETREG_UNREGISTERED, /* completed unregister todo */ 1742 NETREG_RELEASED, /* called free_netdev */ 1743 NETREG_DUMMY, /* dummy device for NAPI poll */ 1744 }; 1745 1746 /** 1747 * struct net_device - The DEVICE structure. 1748 * 1749 * Actually, this whole structure is a big mistake. It mixes I/O 1750 * data with strictly "high-level" data, and it has to know about 1751 * almost every data structure used in the INET module. 1752 * 1753 * @priv_flags: flags invisible to userspace defined as bits, see 1754 * enum netdev_priv_flags for the definitions 1755 * @lltx: device supports lockless Tx. Deprecated for real HW 1756 * drivers. Mainly used by logical interfaces, such as 1757 * bonding and tunnels 1758 * 1759 * @name: This is the first field of the "visible" part of this structure 1760 * (i.e. as seen by users in the "Space.c" file). It is the name 1761 * of the interface. 1762 * 1763 * @name_node: Name hashlist node 1764 * @ifalias: SNMP alias 1765 * @mem_end: Shared memory end 1766 * @mem_start: Shared memory start 1767 * @base_addr: Device I/O address 1768 * @irq: Device IRQ number 1769 * 1770 * @state: Generic network queuing layer state, see netdev_state_t 1771 * @dev_list: The global list of network devices 1772 * @napi_list: List entry used for polling NAPI devices 1773 * @unreg_list: List entry when we are unregistering the 1774 * device; see the function unregister_netdev 1775 * @close_list: List entry used when we are closing the device 1776 * @ptype_all: Device-specific packet handlers for all protocols 1777 * @ptype_specific: Device-specific, protocol-specific packet handlers 1778 * 1779 * @adj_list: Directly linked devices, like slaves for bonding 1780 * @features: Currently active device features 1781 * @hw_features: User-changeable features 1782 * 1783 * @wanted_features: User-requested features 1784 * @vlan_features: Mask of features inheritable by VLAN devices 1785 * 1786 * @hw_enc_features: Mask of features inherited by encapsulating devices 1787 * This field indicates what encapsulation 1788 * offloads the hardware is capable of doing, 1789 * and drivers will need to set them appropriately. 1790 * 1791 * @mpls_features: Mask of features inheritable by MPLS 1792 * @gso_partial_features: value(s) from NETIF_F_GSO\* 1793 * 1794 * @ifindex: interface index 1795 * @group: The group the device belongs to 1796 * 1797 * @stats: Statistics struct, which was left as a legacy, use 1798 * rtnl_link_stats64 instead 1799 * 1800 * @core_stats: core networking counters, 1801 * do not use this in drivers 1802 * @carrier_up_count: Number of times the carrier has been up 1803 * @carrier_down_count: Number of times the carrier has been down 1804 * 1805 * @wireless_handlers: List of functions to handle Wireless Extensions, 1806 * instead of ioctl, 1807 * see <net/iw_handler.h> for details. 1808 * 1809 * @netdev_ops: Includes several pointers to callbacks, 1810 * if one wants to override the ndo_*() functions 1811 * @xdp_metadata_ops: Includes pointers to XDP metadata callbacks. 1812 * @xsk_tx_metadata_ops: Includes pointers to AF_XDP TX metadata callbacks. 1813 * @ethtool_ops: Management operations 1814 * @l3mdev_ops: Layer 3 master device operations 1815 * @ndisc_ops: Includes callbacks for different IPv6 neighbour 1816 * discovery handling. Necessary for e.g. 6LoWPAN. 1817 * @xfrmdev_ops: Transformation offload operations 1818 * @tlsdev_ops: Transport Layer Security offload operations 1819 * @header_ops: Includes callbacks for creating,parsing,caching,etc 1820 * of Layer 2 headers. 1821 * 1822 * @flags: Interface flags (a la BSD) 1823 * @xdp_features: XDP capability supported by the device 1824 * @gflags: Global flags ( kept as legacy ) 1825 * @priv_len: Size of the ->priv flexible array 1826 * @priv: Flexible array containing private data 1827 * @operstate: RFC2863 operstate 1828 * @link_mode: Mapping policy to operstate 1829 * @if_port: Selectable AUI, TP, ... 1830 * @dma: DMA channel 1831 * @mtu: Interface MTU value 1832 * @min_mtu: Interface Minimum MTU value 1833 * @max_mtu: Interface Maximum MTU value 1834 * @type: Interface hardware type 1835 * @hard_header_len: Maximum hardware header length. 1836 * @min_header_len: Minimum hardware header length 1837 * 1838 * @needed_headroom: Extra headroom the hardware may need, but not in all 1839 * cases can this be guaranteed 1840 * @needed_tailroom: Extra tailroom the hardware may need, but not in all 1841 * cases can this be guaranteed. Some cases also use 1842 * LL_MAX_HEADER instead to allocate the skb 1843 * 1844 * interface address info: 1845 * 1846 * @perm_addr: Permanent hw address 1847 * @addr_assign_type: Hw address assignment type 1848 * @addr_len: Hardware address length 1849 * @upper_level: Maximum depth level of upper devices. 1850 * @lower_level: Maximum depth level of lower devices. 1851 * @neigh_priv_len: Used in neigh_alloc() 1852 * @dev_id: Used to differentiate devices that share 1853 * the same link layer address 1854 * @dev_port: Used to differentiate devices that share 1855 * the same function 1856 * @addr_list_lock: XXX: need comments on this one 1857 * @name_assign_type: network interface name assignment type 1858 * @uc_promisc: Counter that indicates promiscuous mode 1859 * has been enabled due to the need to listen to 1860 * additional unicast addresses in a device that 1861 * does not implement ndo_set_rx_mode() 1862 * @uc: unicast mac addresses 1863 * @mc: multicast mac addresses 1864 * @dev_addrs: list of device hw addresses 1865 * @queues_kset: Group of all Kobjects in the Tx and RX queues 1866 * @promiscuity: Number of times the NIC is told to work in 1867 * promiscuous mode; if it becomes 0 the NIC will 1868 * exit promiscuous mode 1869 * @allmulti: Counter, enables or disables allmulticast mode 1870 * 1871 * @vlan_info: VLAN info 1872 * @dsa_ptr: dsa specific data 1873 * @tipc_ptr: TIPC specific data 1874 * @atalk_ptr: AppleTalk link 1875 * @ip_ptr: IPv4 specific data 1876 * @ip6_ptr: IPv6 specific data 1877 * @ax25_ptr: AX.25 specific data 1878 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering 1879 * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network 1880 * device struct 1881 * @mpls_ptr: mpls_dev struct pointer 1882 * @mctp_ptr: MCTP specific data 1883 * 1884 * @dev_addr: Hw address (before bcast, 1885 * because most packets are unicast) 1886 * 1887 * @_rx: Array of RX queues 1888 * @num_rx_queues: Number of RX queues 1889 * allocated at register_netdev() time 1890 * @real_num_rx_queues: Number of RX queues currently active in device 1891 * @xdp_prog: XDP sockets filter program pointer 1892 * 1893 * @rx_handler: handler for received packets 1894 * @rx_handler_data: XXX: need comments on this one 1895 * @tcx_ingress: BPF & clsact qdisc specific data for ingress processing 1896 * @ingress_queue: XXX: need comments on this one 1897 * @nf_hooks_ingress: netfilter hooks executed for ingress packets 1898 * @broadcast: hw bcast address 1899 * 1900 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, 1901 * indexed by RX queue number. Assigned by driver. 1902 * This must only be set if the ndo_rx_flow_steer 1903 * operation is defined 1904 * @index_hlist: Device index hash chain 1905 * 1906 * @_tx: Array of TX queues 1907 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time 1908 * @real_num_tx_queues: Number of TX queues currently active in device 1909 * @qdisc: Root qdisc from userspace point of view 1910 * @tx_queue_len: Max frames per queue allowed 1911 * @tx_global_lock: XXX: need comments on this one 1912 * @xdp_bulkq: XDP device bulk queue 1913 * @xps_maps: all CPUs/RXQs maps for XPS device 1914 * 1915 * @xps_maps: XXX: need comments on this one 1916 * @tcx_egress: BPF & clsact qdisc specific data for egress processing 1917 * @nf_hooks_egress: netfilter hooks executed for egress packets 1918 * @qdisc_hash: qdisc hash table 1919 * @watchdog_timeo: Represents the timeout that is used by 1920 * the watchdog (see dev_watchdog()) 1921 * @watchdog_timer: List of timers 1922 * 1923 * @proto_down_reason: reason a netdev interface is held down 1924 * @pcpu_refcnt: Number of references to this device 1925 * @dev_refcnt: Number of references to this device 1926 * @refcnt_tracker: Tracker directory for tracked references to this device 1927 * @todo_list: Delayed register/unregister 1928 * @link_watch_list: XXX: need comments on this one 1929 * 1930 * @reg_state: Register/unregister state machine 1931 * @dismantle: Device is going to be freed 1932 * @rtnl_link_state: This enum represents the phases of creating 1933 * a new link 1934 * 1935 * @needs_free_netdev: Should unregister perform free_netdev? 1936 * @priv_destructor: Called from unregister 1937 * @npinfo: XXX: need comments on this one 1938 * @nd_net: Network namespace this network device is inside 1939 * 1940 * @ml_priv: Mid-layer private 1941 * @ml_priv_type: Mid-layer private type 1942 * 1943 * @pcpu_stat_type: Type of device statistics which the core should 1944 * allocate/free: none, lstats, tstats, dstats. none 1945 * means the driver is handling statistics allocation/ 1946 * freeing internally. 1947 * @lstats: Loopback statistics: packets, bytes 1948 * @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes 1949 * @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes 1950 * 1951 * @garp_port: GARP 1952 * @mrp_port: MRP 1953 * 1954 * @dm_private: Drop monitor private 1955 * 1956 * @dev: Class/net/name entry 1957 * @sysfs_groups: Space for optional device, statistics and wireless 1958 * sysfs groups 1959 * 1960 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes 1961 * @rtnl_link_ops: Rtnl_link_ops 1962 * @stat_ops: Optional ops for queue-aware statistics 1963 * @queue_mgmt_ops: Optional ops for queue management 1964 * 1965 * @gso_max_size: Maximum size of generic segmentation offload 1966 * @tso_max_size: Device (as in HW) limit on the max TSO request size 1967 * @gso_max_segs: Maximum number of segments that can be passed to the 1968 * NIC for GSO 1969 * @tso_max_segs: Device (as in HW) limit on the max TSO segment count 1970 * @gso_ipv4_max_size: Maximum size of generic segmentation offload, 1971 * for IPv4. 1972 * 1973 * @dcbnl_ops: Data Center Bridging netlink ops 1974 * @num_tc: Number of traffic classes in the net device 1975 * @tc_to_txq: XXX: need comments on this one 1976 * @prio_tc_map: XXX: need comments on this one 1977 * 1978 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp 1979 * 1980 * @priomap: XXX: need comments on this one 1981 * @link_topo: Physical link topology tracking attached PHYs 1982 * @phydev: Physical device may attach itself 1983 * for hardware timestamping 1984 * @sfp_bus: attached &struct sfp_bus structure. 1985 * 1986 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock 1987 * 1988 * @proto_down: protocol port state information can be sent to the 1989 * switch driver and used to set the phys state of the 1990 * switch port. 1991 * 1992 * @threaded: napi threaded mode is enabled 1993 * 1994 * @see_all_hwtstamp_requests: device wants to see calls to 1995 * ndo_hwtstamp_set() for all timestamp requests 1996 * regardless of source, even if those aren't 1997 * HWTSTAMP_SOURCE_NETDEV 1998 * @change_proto_down: device supports setting carrier via IFLA_PROTO_DOWN 1999 * @netns_local: interface can't change network namespaces 2000 * @fcoe_mtu: device supports maximum FCoE MTU, 2158 bytes 2001 * 2002 * @net_notifier_list: List of per-net netdev notifier block 2003 * that follow this device when it is moved 2004 * to another network namespace. 2005 * 2006 * @macsec_ops: MACsec offloading ops 2007 * 2008 * @udp_tunnel_nic_info: static structure describing the UDP tunnel 2009 * offload capabilities of the device 2010 * @udp_tunnel_nic: UDP tunnel offload state 2011 * @ethtool: ethtool related state 2012 * @xdp_state: stores info on attached XDP BPF programs 2013 * 2014 * @nested_level: Used as a parameter of spin_lock_nested() of 2015 * dev->addr_list_lock. 2016 * @unlink_list: As netif_addr_lock() can be called recursively, 2017 * keep a list of interfaces to be deleted. 2018 * @gro_max_size: Maximum size of aggregated packet in generic 2019 * receive offload (GRO) 2020 * @gro_ipv4_max_size: Maximum size of aggregated packet in generic 2021 * receive offload (GRO), for IPv4. 2022 * @xdp_zc_max_segs: Maximum number of segments supported by AF_XDP 2023 * zero copy driver 2024 * 2025 * @dev_addr_shadow: Copy of @dev_addr to catch direct writes. 2026 * @linkwatch_dev_tracker: refcount tracker used by linkwatch. 2027 * @watchdog_dev_tracker: refcount tracker used by watchdog. 2028 * @dev_registered_tracker: tracker for reference held while 2029 * registered 2030 * @offload_xstats_l3: L3 HW stats for this netdevice. 2031 * 2032 * @devlink_port: Pointer to related devlink port structure. 2033 * Assigned by a driver before netdev registration using 2034 * SET_NETDEV_DEVLINK_PORT macro. This pointer is static 2035 * during the time netdevice is registered. 2036 * 2037 * @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem, 2038 * where the clock is recovered. 2039 * 2040 * @max_pacing_offload_horizon: max EDT offload horizon in nsec. 2041 * @napi_config: An array of napi_config structures containing per-NAPI 2042 * settings. 2043 * @gro_flush_timeout: timeout for GRO layer in NAPI 2044 * @napi_defer_hard_irqs: If not zero, provides a counter that would 2045 * allow to avoid NIC hard IRQ, on busy queues. 2046 * 2047 * @neighbours: List heads pointing to this device's neighbours' 2048 * dev_list, one per address-family. 2049 * @hwprov: Tracks which PTP performs hardware packet time stamping. 2050 * 2051 * FIXME: cleanup struct net_device such that network protocol info 2052 * moves out. 2053 */ 2054 2055 struct net_device { 2056 /* Cacheline organization can be found documented in 2057 * Documentation/networking/net_cachelines/net_device.rst. 2058 * Please update the document when adding new fields. 2059 */ 2060 2061 /* TX read-mostly hotpath */ 2062 __cacheline_group_begin(net_device_read_tx); 2063 struct_group(priv_flags_fast, 2064 unsigned long priv_flags:32; 2065 unsigned long lltx:1; 2066 ); 2067 const struct net_device_ops *netdev_ops; 2068 const struct header_ops *header_ops; 2069 struct netdev_queue *_tx; 2070 netdev_features_t gso_partial_features; 2071 unsigned int real_num_tx_queues; 2072 unsigned int gso_max_size; 2073 unsigned int gso_ipv4_max_size; 2074 u16 gso_max_segs; 2075 s16 num_tc; 2076 /* Note : dev->mtu is often read without holding a lock. 2077 * Writers usually hold RTNL. 2078 * It is recommended to use READ_ONCE() to annotate the reads, 2079 * and to use WRITE_ONCE() to annotate the writes. 2080 */ 2081 unsigned int mtu; 2082 unsigned short needed_headroom; 2083 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 2084 #ifdef CONFIG_XPS 2085 struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX]; 2086 #endif 2087 #ifdef CONFIG_NETFILTER_EGRESS 2088 struct nf_hook_entries __rcu *nf_hooks_egress; 2089 #endif 2090 #ifdef CONFIG_NET_XGRESS 2091 struct bpf_mprog_entry __rcu *tcx_egress; 2092 #endif 2093 __cacheline_group_end(net_device_read_tx); 2094 2095 /* TXRX read-mostly hotpath */ 2096 __cacheline_group_begin(net_device_read_txrx); 2097 union { 2098 struct pcpu_lstats __percpu *lstats; 2099 struct pcpu_sw_netstats __percpu *tstats; 2100 struct pcpu_dstats __percpu *dstats; 2101 }; 2102 unsigned long state; 2103 unsigned int flags; 2104 unsigned short hard_header_len; 2105 netdev_features_t features; 2106 struct inet6_dev __rcu *ip6_ptr; 2107 __cacheline_group_end(net_device_read_txrx); 2108 2109 /* RX read-mostly hotpath */ 2110 __cacheline_group_begin(net_device_read_rx); 2111 struct bpf_prog __rcu *xdp_prog; 2112 struct list_head ptype_specific; 2113 int ifindex; 2114 unsigned int real_num_rx_queues; 2115 struct netdev_rx_queue *_rx; 2116 unsigned int gro_max_size; 2117 unsigned int gro_ipv4_max_size; 2118 rx_handler_func_t __rcu *rx_handler; 2119 void __rcu *rx_handler_data; 2120 possible_net_t nd_net; 2121 #ifdef CONFIG_NETPOLL 2122 struct netpoll_info __rcu *npinfo; 2123 #endif 2124 #ifdef CONFIG_NET_XGRESS 2125 struct bpf_mprog_entry __rcu *tcx_ingress; 2126 #endif 2127 __cacheline_group_end(net_device_read_rx); 2128 2129 char name[IFNAMSIZ]; 2130 struct netdev_name_node *name_node; 2131 struct dev_ifalias __rcu *ifalias; 2132 /* 2133 * I/O specific fields 2134 * FIXME: Merge these and struct ifmap into one 2135 */ 2136 unsigned long mem_end; 2137 unsigned long mem_start; 2138 unsigned long base_addr; 2139 2140 /* 2141 * Some hardware also needs these fields (state,dev_list, 2142 * napi_list,unreg_list,close_list) but they are not 2143 * part of the usual set specified in Space.c. 2144 */ 2145 2146 2147 struct list_head dev_list; 2148 struct list_head napi_list; 2149 struct list_head unreg_list; 2150 struct list_head close_list; 2151 struct list_head ptype_all; 2152 2153 struct { 2154 struct list_head upper; 2155 struct list_head lower; 2156 } adj_list; 2157 2158 /* Read-mostly cache-line for fast-path access */ 2159 xdp_features_t xdp_features; 2160 const struct xdp_metadata_ops *xdp_metadata_ops; 2161 const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops; 2162 unsigned short gflags; 2163 2164 unsigned short needed_tailroom; 2165 2166 netdev_features_t hw_features; 2167 netdev_features_t wanted_features; 2168 netdev_features_t vlan_features; 2169 netdev_features_t hw_enc_features; 2170 netdev_features_t mpls_features; 2171 2172 unsigned int min_mtu; 2173 unsigned int max_mtu; 2174 unsigned short type; 2175 unsigned char min_header_len; 2176 unsigned char name_assign_type; 2177 2178 int group; 2179 2180 struct net_device_stats stats; /* not used by modern drivers */ 2181 2182 struct net_device_core_stats __percpu *core_stats; 2183 2184 /* Stats to monitor link on/off, flapping */ 2185 atomic_t carrier_up_count; 2186 atomic_t carrier_down_count; 2187 2188 #ifdef CONFIG_WIRELESS_EXT 2189 const struct iw_handler_def *wireless_handlers; 2190 #endif 2191 const struct ethtool_ops *ethtool_ops; 2192 #ifdef CONFIG_NET_L3_MASTER_DEV 2193 const struct l3mdev_ops *l3mdev_ops; 2194 #endif 2195 #if IS_ENABLED(CONFIG_IPV6) 2196 const struct ndisc_ops *ndisc_ops; 2197 #endif 2198 2199 #ifdef CONFIG_XFRM_OFFLOAD 2200 const struct xfrmdev_ops *xfrmdev_ops; 2201 #endif 2202 2203 #if IS_ENABLED(CONFIG_TLS_DEVICE) 2204 const struct tlsdev_ops *tlsdev_ops; 2205 #endif 2206 2207 unsigned int operstate; 2208 unsigned char link_mode; 2209 2210 unsigned char if_port; 2211 unsigned char dma; 2212 2213 /* Interface address info. */ 2214 unsigned char perm_addr[MAX_ADDR_LEN]; 2215 unsigned char addr_assign_type; 2216 unsigned char addr_len; 2217 unsigned char upper_level; 2218 unsigned char lower_level; 2219 2220 unsigned short neigh_priv_len; 2221 unsigned short dev_id; 2222 unsigned short dev_port; 2223 int irq; 2224 u32 priv_len; 2225 2226 spinlock_t addr_list_lock; 2227 2228 struct netdev_hw_addr_list uc; 2229 struct netdev_hw_addr_list mc; 2230 struct netdev_hw_addr_list dev_addrs; 2231 2232 #ifdef CONFIG_SYSFS 2233 struct kset *queues_kset; 2234 #endif 2235 #ifdef CONFIG_LOCKDEP 2236 struct list_head unlink_list; 2237 #endif 2238 unsigned int promiscuity; 2239 unsigned int allmulti; 2240 bool uc_promisc; 2241 #ifdef CONFIG_LOCKDEP 2242 unsigned char nested_level; 2243 #endif 2244 2245 2246 /* Protocol-specific pointers */ 2247 struct in_device __rcu *ip_ptr; 2248 /** @fib_nh_head: nexthops associated with this netdev */ 2249 struct hlist_head fib_nh_head; 2250 2251 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2252 struct vlan_info __rcu *vlan_info; 2253 #endif 2254 #if IS_ENABLED(CONFIG_NET_DSA) 2255 struct dsa_port *dsa_ptr; 2256 #endif 2257 #if IS_ENABLED(CONFIG_TIPC) 2258 struct tipc_bearer __rcu *tipc_ptr; 2259 #endif 2260 #if IS_ENABLED(CONFIG_ATALK) 2261 void *atalk_ptr; 2262 #endif 2263 #if IS_ENABLED(CONFIG_AX25) 2264 struct ax25_dev __rcu *ax25_ptr; 2265 #endif 2266 #if IS_ENABLED(CONFIG_CFG80211) 2267 struct wireless_dev *ieee80211_ptr; 2268 #endif 2269 #if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN) 2270 struct wpan_dev *ieee802154_ptr; 2271 #endif 2272 #if IS_ENABLED(CONFIG_MPLS_ROUTING) 2273 struct mpls_dev __rcu *mpls_ptr; 2274 #endif 2275 #if IS_ENABLED(CONFIG_MCTP) 2276 struct mctp_dev __rcu *mctp_ptr; 2277 #endif 2278 2279 /* 2280 * Cache lines mostly used on receive path (including eth_type_trans()) 2281 */ 2282 /* Interface address info used in eth_type_trans() */ 2283 const unsigned char *dev_addr; 2284 2285 unsigned int num_rx_queues; 2286 #define GRO_LEGACY_MAX_SIZE 65536u 2287 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), 2288 * and shinfo->gso_segs is a 16bit field. 2289 */ 2290 #define GRO_MAX_SIZE (8 * 65535u) 2291 unsigned int xdp_zc_max_segs; 2292 struct netdev_queue __rcu *ingress_queue; 2293 #ifdef CONFIG_NETFILTER_INGRESS 2294 struct nf_hook_entries __rcu *nf_hooks_ingress; 2295 #endif 2296 2297 unsigned char broadcast[MAX_ADDR_LEN]; 2298 #ifdef CONFIG_RFS_ACCEL 2299 struct cpu_rmap *rx_cpu_rmap; 2300 #endif 2301 struct hlist_node index_hlist; 2302 2303 /* 2304 * Cache lines mostly used on transmit path 2305 */ 2306 unsigned int num_tx_queues; 2307 struct Qdisc __rcu *qdisc; 2308 unsigned int tx_queue_len; 2309 spinlock_t tx_global_lock; 2310 2311 struct xdp_dev_bulk_queue __percpu *xdp_bulkq; 2312 2313 #ifdef CONFIG_NET_SCHED 2314 DECLARE_HASHTABLE (qdisc_hash, 4); 2315 #endif 2316 /* These may be needed for future network-power-down code. */ 2317 struct timer_list watchdog_timer; 2318 int watchdog_timeo; 2319 2320 u32 proto_down_reason; 2321 2322 struct list_head todo_list; 2323 2324 #ifdef CONFIG_PCPU_DEV_REFCNT 2325 int __percpu *pcpu_refcnt; 2326 #else 2327 refcount_t dev_refcnt; 2328 #endif 2329 struct ref_tracker_dir refcnt_tracker; 2330 2331 struct list_head link_watch_list; 2332 2333 u8 reg_state; 2334 2335 bool dismantle; 2336 2337 enum { 2338 RTNL_LINK_INITIALIZED, 2339 RTNL_LINK_INITIALIZING, 2340 } rtnl_link_state:16; 2341 2342 bool needs_free_netdev; 2343 void (*priv_destructor)(struct net_device *dev); 2344 2345 /* mid-layer private */ 2346 void *ml_priv; 2347 enum netdev_ml_priv_type ml_priv_type; 2348 2349 enum netdev_stat_type pcpu_stat_type:8; 2350 2351 #if IS_ENABLED(CONFIG_GARP) 2352 struct garp_port __rcu *garp_port; 2353 #endif 2354 #if IS_ENABLED(CONFIG_MRP) 2355 struct mrp_port __rcu *mrp_port; 2356 #endif 2357 #if IS_ENABLED(CONFIG_NET_DROP_MONITOR) 2358 struct dm_hw_stat_delta __rcu *dm_private; 2359 #endif 2360 struct device dev; 2361 const struct attribute_group *sysfs_groups[4]; 2362 const struct attribute_group *sysfs_rx_queue_group; 2363 2364 const struct rtnl_link_ops *rtnl_link_ops; 2365 2366 const struct netdev_stat_ops *stat_ops; 2367 2368 const struct netdev_queue_mgmt_ops *queue_mgmt_ops; 2369 2370 /* for setting kernel sock attribute on TCP connection setup */ 2371 #define GSO_MAX_SEGS 65535u 2372 #define GSO_LEGACY_MAX_SIZE 65536u 2373 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), 2374 * and shinfo->gso_segs is a 16bit field. 2375 */ 2376 #define GSO_MAX_SIZE (8 * GSO_MAX_SEGS) 2377 2378 #define TSO_LEGACY_MAX_SIZE 65536 2379 #define TSO_MAX_SIZE UINT_MAX 2380 unsigned int tso_max_size; 2381 #define TSO_MAX_SEGS U16_MAX 2382 u16 tso_max_segs; 2383 2384 #ifdef CONFIG_DCB 2385 const struct dcbnl_rtnl_ops *dcbnl_ops; 2386 #endif 2387 u8 prio_tc_map[TC_BITMASK + 1]; 2388 2389 #if IS_ENABLED(CONFIG_FCOE) 2390 unsigned int fcoe_ddp_xid; 2391 #endif 2392 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 2393 struct netprio_map __rcu *priomap; 2394 #endif 2395 struct phy_link_topology *link_topo; 2396 struct phy_device *phydev; 2397 struct sfp_bus *sfp_bus; 2398 struct lock_class_key *qdisc_tx_busylock; 2399 bool proto_down; 2400 bool threaded; 2401 2402 /* priv_flags_slow, ungrouped to save space */ 2403 unsigned long see_all_hwtstamp_requests:1; 2404 unsigned long change_proto_down:1; 2405 unsigned long netns_local:1; 2406 unsigned long fcoe_mtu:1; 2407 2408 struct list_head net_notifier_list; 2409 2410 #if IS_ENABLED(CONFIG_MACSEC) 2411 /* MACsec management functions */ 2412 const struct macsec_ops *macsec_ops; 2413 #endif 2414 const struct udp_tunnel_nic_info *udp_tunnel_nic_info; 2415 struct udp_tunnel_nic *udp_tunnel_nic; 2416 2417 struct ethtool_netdev_state *ethtool; 2418 2419 /* protected by rtnl_lock */ 2420 struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; 2421 2422 u8 dev_addr_shadow[MAX_ADDR_LEN]; 2423 netdevice_tracker linkwatch_dev_tracker; 2424 netdevice_tracker watchdog_dev_tracker; 2425 netdevice_tracker dev_registered_tracker; 2426 struct rtnl_hw_stats64 *offload_xstats_l3; 2427 2428 struct devlink_port *devlink_port; 2429 2430 #if IS_ENABLED(CONFIG_DPLL) 2431 struct dpll_pin __rcu *dpll_pin; 2432 #endif 2433 #if IS_ENABLED(CONFIG_PAGE_POOL) 2434 /** @page_pools: page pools created for this netdevice */ 2435 struct hlist_head page_pools; 2436 #endif 2437 2438 /** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */ 2439 struct dim_irq_moder *irq_moder; 2440 2441 u64 max_pacing_offload_horizon; 2442 struct napi_config *napi_config; 2443 unsigned long gro_flush_timeout; 2444 u32 napi_defer_hard_irqs; 2445 2446 /** 2447 * @lock: protects @net_shaper_hierarchy, feel free to use for other 2448 * netdev-scope protection. Ordering: take after rtnl_lock. 2449 */ 2450 struct mutex lock; 2451 2452 #if IS_ENABLED(CONFIG_NET_SHAPER) 2453 /** 2454 * @net_shaper_hierarchy: data tracking the current shaper status 2455 * see include/net/net_shapers.h 2456 */ 2457 struct net_shaper_hierarchy *net_shaper_hierarchy; 2458 #endif 2459 2460 struct hlist_head neighbours[NEIGH_NR_TABLES]; 2461 2462 struct hwtstamp_provider __rcu *hwprov; 2463 2464 u8 priv[] ____cacheline_aligned 2465 __counted_by(priv_len); 2466 } ____cacheline_aligned; 2467 #define to_net_dev(d) container_of(d, struct net_device, dev) 2468 2469 /* 2470 * Driver should use this to assign devlink port instance to a netdevice 2471 * before it registers the netdevice. Therefore devlink_port is static 2472 * during the netdev lifetime after it is registered. 2473 */ 2474 #define SET_NETDEV_DEVLINK_PORT(dev, port) \ 2475 ({ \ 2476 WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED); \ 2477 ((dev)->devlink_port = (port)); \ 2478 }) 2479 2480 static inline bool netif_elide_gro(const struct net_device *dev) 2481 { 2482 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) 2483 return true; 2484 return false; 2485 } 2486 2487 #define NETDEV_ALIGN 32 2488 2489 static inline 2490 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) 2491 { 2492 return dev->prio_tc_map[prio & TC_BITMASK]; 2493 } 2494 2495 static inline 2496 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) 2497 { 2498 if (tc >= dev->num_tc) 2499 return -EINVAL; 2500 2501 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; 2502 return 0; 2503 } 2504 2505 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); 2506 void netdev_reset_tc(struct net_device *dev); 2507 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); 2508 int netdev_set_num_tc(struct net_device *dev, u8 num_tc); 2509 2510 static inline 2511 int netdev_get_num_tc(struct net_device *dev) 2512 { 2513 return dev->num_tc; 2514 } 2515 2516 static inline void net_prefetch(void *p) 2517 { 2518 prefetch(p); 2519 #if L1_CACHE_BYTES < 128 2520 prefetch((u8 *)p + L1_CACHE_BYTES); 2521 #endif 2522 } 2523 2524 static inline void net_prefetchw(void *p) 2525 { 2526 prefetchw(p); 2527 #if L1_CACHE_BYTES < 128 2528 prefetchw((u8 *)p + L1_CACHE_BYTES); 2529 #endif 2530 } 2531 2532 void netdev_unbind_sb_channel(struct net_device *dev, 2533 struct net_device *sb_dev); 2534 int netdev_bind_sb_channel_queue(struct net_device *dev, 2535 struct net_device *sb_dev, 2536 u8 tc, u16 count, u16 offset); 2537 int netdev_set_sb_channel(struct net_device *dev, u16 channel); 2538 static inline int netdev_get_sb_channel(struct net_device *dev) 2539 { 2540 return max_t(int, -dev->num_tc, 0); 2541 } 2542 2543 static inline 2544 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, 2545 unsigned int index) 2546 { 2547 DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues); 2548 return &dev->_tx[index]; 2549 } 2550 2551 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, 2552 const struct sk_buff *skb) 2553 { 2554 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 2555 } 2556 2557 static inline void netdev_for_each_tx_queue(struct net_device *dev, 2558 void (*f)(struct net_device *, 2559 struct netdev_queue *, 2560 void *), 2561 void *arg) 2562 { 2563 unsigned int i; 2564 2565 for (i = 0; i < dev->num_tx_queues; i++) 2566 f(dev, &dev->_tx[i], arg); 2567 } 2568 2569 #define netdev_lockdep_set_classes(dev) \ 2570 { \ 2571 static struct lock_class_key qdisc_tx_busylock_key; \ 2572 static struct lock_class_key qdisc_xmit_lock_key; \ 2573 static struct lock_class_key dev_addr_list_lock_key; \ 2574 unsigned int i; \ 2575 \ 2576 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ 2577 lockdep_set_class(&(dev)->addr_list_lock, \ 2578 &dev_addr_list_lock_key); \ 2579 for (i = 0; i < (dev)->num_tx_queues; i++) \ 2580 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ 2581 &qdisc_xmit_lock_key); \ 2582 } 2583 2584 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 2585 struct net_device *sb_dev); 2586 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 2587 struct sk_buff *skb, 2588 struct net_device *sb_dev); 2589 2590 /* returns the headroom that the master device needs to take in account 2591 * when forwarding to this dev 2592 */ 2593 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev) 2594 { 2595 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; 2596 } 2597 2598 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr) 2599 { 2600 if (dev->netdev_ops->ndo_set_rx_headroom) 2601 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); 2602 } 2603 2604 /* set the device rx headroom to the dev's default */ 2605 static inline void netdev_reset_rx_headroom(struct net_device *dev) 2606 { 2607 netdev_set_rx_headroom(dev, -1); 2608 } 2609 2610 static inline void *netdev_get_ml_priv(struct net_device *dev, 2611 enum netdev_ml_priv_type type) 2612 { 2613 if (dev->ml_priv_type != type) 2614 return NULL; 2615 2616 return dev->ml_priv; 2617 } 2618 2619 static inline void netdev_set_ml_priv(struct net_device *dev, 2620 void *ml_priv, 2621 enum netdev_ml_priv_type type) 2622 { 2623 WARN(dev->ml_priv_type && dev->ml_priv_type != type, 2624 "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", 2625 dev->ml_priv_type, type); 2626 WARN(!dev->ml_priv_type && dev->ml_priv, 2627 "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); 2628 2629 dev->ml_priv = ml_priv; 2630 dev->ml_priv_type = type; 2631 } 2632 2633 /* 2634 * Net namespace inlines 2635 */ 2636 static inline 2637 struct net *dev_net(const struct net_device *dev) 2638 { 2639 return read_pnet(&dev->nd_net); 2640 } 2641 2642 static inline 2643 void dev_net_set(struct net_device *dev, struct net *net) 2644 { 2645 write_pnet(&dev->nd_net, net); 2646 } 2647 2648 /** 2649 * netdev_priv - access network device private data 2650 * @dev: network device 2651 * 2652 * Get network device private data 2653 */ 2654 static inline void *netdev_priv(const struct net_device *dev) 2655 { 2656 return (void *)dev->priv; 2657 } 2658 2659 /* Set the sysfs physical device reference for the network logical device 2660 * if set prior to registration will cause a symlink during initialization. 2661 */ 2662 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) 2663 2664 /* Set the sysfs device type for the network logical device to allow 2665 * fine-grained identification of different network device types. For 2666 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc. 2667 */ 2668 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) 2669 2670 void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index, 2671 enum netdev_queue_type type, 2672 struct napi_struct *napi); 2673 2674 static inline void netif_napi_set_irq(struct napi_struct *napi, int irq) 2675 { 2676 napi->irq = irq; 2677 } 2678 2679 /* Default NAPI poll() weight 2680 * Device drivers are strongly advised to not use bigger value 2681 */ 2682 #define NAPI_POLL_WEIGHT 64 2683 2684 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, 2685 int (*poll)(struct napi_struct *, int), int weight); 2686 2687 /** 2688 * netif_napi_add() - initialize a NAPI context 2689 * @dev: network device 2690 * @napi: NAPI context 2691 * @poll: polling function 2692 * 2693 * netif_napi_add() must be used to initialize a NAPI context prior to calling 2694 * *any* of the other NAPI-related functions. 2695 */ 2696 static inline void 2697 netif_napi_add(struct net_device *dev, struct napi_struct *napi, 2698 int (*poll)(struct napi_struct *, int)) 2699 { 2700 netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); 2701 } 2702 2703 static inline void 2704 netif_napi_add_tx_weight(struct net_device *dev, 2705 struct napi_struct *napi, 2706 int (*poll)(struct napi_struct *, int), 2707 int weight) 2708 { 2709 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); 2710 netif_napi_add_weight(dev, napi, poll, weight); 2711 } 2712 2713 /** 2714 * netif_napi_add_config - initialize a NAPI context with persistent config 2715 * @dev: network device 2716 * @napi: NAPI context 2717 * @poll: polling function 2718 * @index: the NAPI index 2719 */ 2720 static inline void 2721 netif_napi_add_config(struct net_device *dev, struct napi_struct *napi, 2722 int (*poll)(struct napi_struct *, int), int index) 2723 { 2724 napi->index = index; 2725 napi->config = &dev->napi_config[index]; 2726 netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); 2727 } 2728 2729 /** 2730 * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only 2731 * @dev: network device 2732 * @napi: NAPI context 2733 * @poll: polling function 2734 * 2735 * This variant of netif_napi_add() should be used from drivers using NAPI 2736 * to exclusively poll a TX queue. 2737 * This will avoid we add it into napi_hash[], thus polluting this hash table. 2738 */ 2739 static inline void netif_napi_add_tx(struct net_device *dev, 2740 struct napi_struct *napi, 2741 int (*poll)(struct napi_struct *, int)) 2742 { 2743 netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT); 2744 } 2745 2746 /** 2747 * __netif_napi_del - remove a NAPI context 2748 * @napi: NAPI context 2749 * 2750 * Warning: caller must observe RCU grace period before freeing memory 2751 * containing @napi. Drivers might want to call this helper to combine 2752 * all the needed RCU grace periods into a single one. 2753 */ 2754 void __netif_napi_del(struct napi_struct *napi); 2755 2756 /** 2757 * netif_napi_del - remove a NAPI context 2758 * @napi: NAPI context 2759 * 2760 * netif_napi_del() removes a NAPI context from the network device NAPI list 2761 */ 2762 static inline void netif_napi_del(struct napi_struct *napi) 2763 { 2764 __netif_napi_del(napi); 2765 synchronize_net(); 2766 } 2767 2768 struct packet_type { 2769 __be16 type; /* This is really htons(ether_type). */ 2770 bool ignore_outgoing; 2771 struct net_device *dev; /* NULL is wildcarded here */ 2772 netdevice_tracker dev_tracker; 2773 int (*func) (struct sk_buff *, 2774 struct net_device *, 2775 struct packet_type *, 2776 struct net_device *); 2777 void (*list_func) (struct list_head *, 2778 struct packet_type *, 2779 struct net_device *); 2780 bool (*id_match)(struct packet_type *ptype, 2781 struct sock *sk); 2782 struct net *af_packet_net; 2783 void *af_packet_priv; 2784 struct list_head list; 2785 }; 2786 2787 struct offload_callbacks { 2788 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 2789 netdev_features_t features); 2790 struct sk_buff *(*gro_receive)(struct list_head *head, 2791 struct sk_buff *skb); 2792 int (*gro_complete)(struct sk_buff *skb, int nhoff); 2793 }; 2794 2795 struct packet_offload { 2796 __be16 type; /* This is really htons(ether_type). */ 2797 u16 priority; 2798 struct offload_callbacks callbacks; 2799 struct list_head list; 2800 }; 2801 2802 /* often modified stats are per-CPU, other are shared (netdev->stats) */ 2803 struct pcpu_sw_netstats { 2804 u64_stats_t rx_packets; 2805 u64_stats_t rx_bytes; 2806 u64_stats_t tx_packets; 2807 u64_stats_t tx_bytes; 2808 struct u64_stats_sync syncp; 2809 } __aligned(4 * sizeof(u64)); 2810 2811 struct pcpu_dstats { 2812 u64_stats_t rx_packets; 2813 u64_stats_t rx_bytes; 2814 u64_stats_t rx_drops; 2815 u64_stats_t tx_packets; 2816 u64_stats_t tx_bytes; 2817 u64_stats_t tx_drops; 2818 struct u64_stats_sync syncp; 2819 } __aligned(8 * sizeof(u64)); 2820 2821 struct pcpu_lstats { 2822 u64_stats_t packets; 2823 u64_stats_t bytes; 2824 struct u64_stats_sync syncp; 2825 } __aligned(2 * sizeof(u64)); 2826 2827 void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes); 2828 2829 static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len) 2830 { 2831 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2832 2833 u64_stats_update_begin(&tstats->syncp); 2834 u64_stats_add(&tstats->rx_bytes, len); 2835 u64_stats_inc(&tstats->rx_packets); 2836 u64_stats_update_end(&tstats->syncp); 2837 } 2838 2839 static inline void dev_sw_netstats_tx_add(struct net_device *dev, 2840 unsigned int packets, 2841 unsigned int len) 2842 { 2843 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2844 2845 u64_stats_update_begin(&tstats->syncp); 2846 u64_stats_add(&tstats->tx_bytes, len); 2847 u64_stats_add(&tstats->tx_packets, packets); 2848 u64_stats_update_end(&tstats->syncp); 2849 } 2850 2851 static inline void dev_lstats_add(struct net_device *dev, unsigned int len) 2852 { 2853 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); 2854 2855 u64_stats_update_begin(&lstats->syncp); 2856 u64_stats_add(&lstats->bytes, len); 2857 u64_stats_inc(&lstats->packets); 2858 u64_stats_update_end(&lstats->syncp); 2859 } 2860 2861 static inline void dev_dstats_rx_add(struct net_device *dev, 2862 unsigned int len) 2863 { 2864 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); 2865 2866 u64_stats_update_begin(&dstats->syncp); 2867 u64_stats_inc(&dstats->rx_packets); 2868 u64_stats_add(&dstats->rx_bytes, len); 2869 u64_stats_update_end(&dstats->syncp); 2870 } 2871 2872 static inline void dev_dstats_rx_dropped(struct net_device *dev) 2873 { 2874 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); 2875 2876 u64_stats_update_begin(&dstats->syncp); 2877 u64_stats_inc(&dstats->rx_drops); 2878 u64_stats_update_end(&dstats->syncp); 2879 } 2880 2881 static inline void dev_dstats_tx_add(struct net_device *dev, 2882 unsigned int len) 2883 { 2884 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); 2885 2886 u64_stats_update_begin(&dstats->syncp); 2887 u64_stats_inc(&dstats->tx_packets); 2888 u64_stats_add(&dstats->tx_bytes, len); 2889 u64_stats_update_end(&dstats->syncp); 2890 } 2891 2892 static inline void dev_dstats_tx_dropped(struct net_device *dev) 2893 { 2894 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); 2895 2896 u64_stats_update_begin(&dstats->syncp); 2897 u64_stats_inc(&dstats->tx_drops); 2898 u64_stats_update_end(&dstats->syncp); 2899 } 2900 2901 #define __netdev_alloc_pcpu_stats(type, gfp) \ 2902 ({ \ 2903 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ 2904 if (pcpu_stats) { \ 2905 int __cpu; \ 2906 for_each_possible_cpu(__cpu) { \ 2907 typeof(type) *stat; \ 2908 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2909 u64_stats_init(&stat->syncp); \ 2910 } \ 2911 } \ 2912 pcpu_stats; \ 2913 }) 2914 2915 #define netdev_alloc_pcpu_stats(type) \ 2916 __netdev_alloc_pcpu_stats(type, GFP_KERNEL) 2917 2918 #define devm_netdev_alloc_pcpu_stats(dev, type) \ 2919 ({ \ 2920 typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\ 2921 if (pcpu_stats) { \ 2922 int __cpu; \ 2923 for_each_possible_cpu(__cpu) { \ 2924 typeof(type) *stat; \ 2925 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2926 u64_stats_init(&stat->syncp); \ 2927 } \ 2928 } \ 2929 pcpu_stats; \ 2930 }) 2931 2932 enum netdev_lag_tx_type { 2933 NETDEV_LAG_TX_TYPE_UNKNOWN, 2934 NETDEV_LAG_TX_TYPE_RANDOM, 2935 NETDEV_LAG_TX_TYPE_BROADCAST, 2936 NETDEV_LAG_TX_TYPE_ROUNDROBIN, 2937 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, 2938 NETDEV_LAG_TX_TYPE_HASH, 2939 }; 2940 2941 enum netdev_lag_hash { 2942 NETDEV_LAG_HASH_NONE, 2943 NETDEV_LAG_HASH_L2, 2944 NETDEV_LAG_HASH_L34, 2945 NETDEV_LAG_HASH_L23, 2946 NETDEV_LAG_HASH_E23, 2947 NETDEV_LAG_HASH_E34, 2948 NETDEV_LAG_HASH_VLAN_SRCMAC, 2949 NETDEV_LAG_HASH_UNKNOWN, 2950 }; 2951 2952 struct netdev_lag_upper_info { 2953 enum netdev_lag_tx_type tx_type; 2954 enum netdev_lag_hash hash_type; 2955 }; 2956 2957 struct netdev_lag_lower_state_info { 2958 u8 link_up : 1, 2959 tx_enabled : 1; 2960 }; 2961 2962 #include <linux/notifier.h> 2963 2964 /* netdevice notifier chain. Please remember to update netdev_cmd_to_name() 2965 * and the rtnetlink notification exclusion list in rtnetlink_event() when 2966 * adding new types. 2967 */ 2968 enum netdev_cmd { 2969 NETDEV_UP = 1, /* For now you can't veto a device up/down */ 2970 NETDEV_DOWN, 2971 NETDEV_REBOOT, /* Tell a protocol stack a network interface 2972 detected a hardware crash and restarted 2973 - we can use this eg to kick tcp sessions 2974 once done */ 2975 NETDEV_CHANGE, /* Notify device state change */ 2976 NETDEV_REGISTER, 2977 NETDEV_UNREGISTER, 2978 NETDEV_CHANGEMTU, /* notify after mtu change happened */ 2979 NETDEV_CHANGEADDR, /* notify after the address change */ 2980 NETDEV_PRE_CHANGEADDR, /* notify before the address change */ 2981 NETDEV_GOING_DOWN, 2982 NETDEV_CHANGENAME, 2983 NETDEV_FEAT_CHANGE, 2984 NETDEV_BONDING_FAILOVER, 2985 NETDEV_PRE_UP, 2986 NETDEV_PRE_TYPE_CHANGE, 2987 NETDEV_POST_TYPE_CHANGE, 2988 NETDEV_POST_INIT, 2989 NETDEV_PRE_UNINIT, 2990 NETDEV_RELEASE, 2991 NETDEV_NOTIFY_PEERS, 2992 NETDEV_JOIN, 2993 NETDEV_CHANGEUPPER, 2994 NETDEV_RESEND_IGMP, 2995 NETDEV_PRECHANGEMTU, /* notify before mtu change happened */ 2996 NETDEV_CHANGEINFODATA, 2997 NETDEV_BONDING_INFO, 2998 NETDEV_PRECHANGEUPPER, 2999 NETDEV_CHANGELOWERSTATE, 3000 NETDEV_UDP_TUNNEL_PUSH_INFO, 3001 NETDEV_UDP_TUNNEL_DROP_INFO, 3002 NETDEV_CHANGE_TX_QUEUE_LEN, 3003 NETDEV_CVLAN_FILTER_PUSH_INFO, 3004 NETDEV_CVLAN_FILTER_DROP_INFO, 3005 NETDEV_SVLAN_FILTER_PUSH_INFO, 3006 NETDEV_SVLAN_FILTER_DROP_INFO, 3007 NETDEV_OFFLOAD_XSTATS_ENABLE, 3008 NETDEV_OFFLOAD_XSTATS_DISABLE, 3009 NETDEV_OFFLOAD_XSTATS_REPORT_USED, 3010 NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, 3011 NETDEV_XDP_FEAT_CHANGE, 3012 }; 3013 const char *netdev_cmd_to_name(enum netdev_cmd cmd); 3014 3015 int register_netdevice_notifier(struct notifier_block *nb); 3016 int unregister_netdevice_notifier(struct notifier_block *nb); 3017 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb); 3018 int unregister_netdevice_notifier_net(struct net *net, 3019 struct notifier_block *nb); 3020 int register_netdevice_notifier_dev_net(struct net_device *dev, 3021 struct notifier_block *nb, 3022 struct netdev_net_notifier *nn); 3023 int unregister_netdevice_notifier_dev_net(struct net_device *dev, 3024 struct notifier_block *nb, 3025 struct netdev_net_notifier *nn); 3026 3027 struct netdev_notifier_info { 3028 struct net_device *dev; 3029 struct netlink_ext_ack *extack; 3030 }; 3031 3032 struct netdev_notifier_info_ext { 3033 struct netdev_notifier_info info; /* must be first */ 3034 union { 3035 u32 mtu; 3036 } ext; 3037 }; 3038 3039 struct netdev_notifier_change_info { 3040 struct netdev_notifier_info info; /* must be first */ 3041 unsigned int flags_changed; 3042 }; 3043 3044 struct netdev_notifier_changeupper_info { 3045 struct netdev_notifier_info info; /* must be first */ 3046 struct net_device *upper_dev; /* new upper dev */ 3047 bool master; /* is upper dev master */ 3048 bool linking; /* is the notification for link or unlink */ 3049 void *upper_info; /* upper dev info */ 3050 }; 3051 3052 struct netdev_notifier_changelowerstate_info { 3053 struct netdev_notifier_info info; /* must be first */ 3054 void *lower_state_info; /* is lower dev state */ 3055 }; 3056 3057 struct netdev_notifier_pre_changeaddr_info { 3058 struct netdev_notifier_info info; /* must be first */ 3059 const unsigned char *dev_addr; 3060 }; 3061 3062 enum netdev_offload_xstats_type { 3063 NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, 3064 }; 3065 3066 struct netdev_notifier_offload_xstats_info { 3067 struct netdev_notifier_info info; /* must be first */ 3068 enum netdev_offload_xstats_type type; 3069 3070 union { 3071 /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */ 3072 struct netdev_notifier_offload_xstats_rd *report_delta; 3073 /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */ 3074 struct netdev_notifier_offload_xstats_ru *report_used; 3075 }; 3076 }; 3077 3078 int netdev_offload_xstats_enable(struct net_device *dev, 3079 enum netdev_offload_xstats_type type, 3080 struct netlink_ext_ack *extack); 3081 int netdev_offload_xstats_disable(struct net_device *dev, 3082 enum netdev_offload_xstats_type type); 3083 bool netdev_offload_xstats_enabled(const struct net_device *dev, 3084 enum netdev_offload_xstats_type type); 3085 int netdev_offload_xstats_get(struct net_device *dev, 3086 enum netdev_offload_xstats_type type, 3087 struct rtnl_hw_stats64 *stats, bool *used, 3088 struct netlink_ext_ack *extack); 3089 void 3090 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd, 3091 const struct rtnl_hw_stats64 *stats); 3092 void 3093 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru); 3094 void netdev_offload_xstats_push_delta(struct net_device *dev, 3095 enum netdev_offload_xstats_type type, 3096 const struct rtnl_hw_stats64 *stats); 3097 3098 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, 3099 struct net_device *dev) 3100 { 3101 info->dev = dev; 3102 info->extack = NULL; 3103 } 3104 3105 static inline struct net_device * 3106 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) 3107 { 3108 return info->dev; 3109 } 3110 3111 static inline struct netlink_ext_ack * 3112 netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) 3113 { 3114 return info->extack; 3115 } 3116 3117 int call_netdevice_notifiers(unsigned long val, struct net_device *dev); 3118 int call_netdevice_notifiers_info(unsigned long val, 3119 struct netdev_notifier_info *info); 3120 3121 #define for_each_netdev(net, d) \ 3122 list_for_each_entry(d, &(net)->dev_base_head, dev_list) 3123 #define for_each_netdev_reverse(net, d) \ 3124 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) 3125 #define for_each_netdev_rcu(net, d) \ 3126 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) 3127 #define for_each_netdev_safe(net, d, n) \ 3128 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) 3129 #define for_each_netdev_continue(net, d) \ 3130 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) 3131 #define for_each_netdev_continue_reverse(net, d) \ 3132 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \ 3133 dev_list) 3134 #define for_each_netdev_continue_rcu(net, d) \ 3135 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 3136 #define for_each_netdev_in_bond_rcu(bond, slave) \ 3137 for_each_netdev_rcu(&init_net, slave) \ 3138 if (netdev_master_upper_dev_get_rcu(slave) == (bond)) 3139 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 3140 3141 #define for_each_netdev_dump(net, d, ifindex) \ 3142 for (; (d = xa_find(&(net)->dev_by_index, &ifindex, \ 3143 ULONG_MAX, XA_PRESENT)); ifindex++) 3144 3145 static inline struct net_device *next_net_device(struct net_device *dev) 3146 { 3147 struct list_head *lh; 3148 struct net *net; 3149 3150 net = dev_net(dev); 3151 lh = dev->dev_list.next; 3152 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3153 } 3154 3155 static inline struct net_device *next_net_device_rcu(struct net_device *dev) 3156 { 3157 struct list_head *lh; 3158 struct net *net; 3159 3160 net = dev_net(dev); 3161 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); 3162 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3163 } 3164 3165 static inline struct net_device *first_net_device(struct net *net) 3166 { 3167 return list_empty(&net->dev_base_head) ? NULL : 3168 net_device_entry(net->dev_base_head.next); 3169 } 3170 3171 static inline struct net_device *first_net_device_rcu(struct net *net) 3172 { 3173 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); 3174 3175 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3176 } 3177 3178 int netdev_boot_setup_check(struct net_device *dev); 3179 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 3180 const char *hwaddr); 3181 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 3182 void dev_add_pack(struct packet_type *pt); 3183 void dev_remove_pack(struct packet_type *pt); 3184 void __dev_remove_pack(struct packet_type *pt); 3185 void dev_add_offload(struct packet_offload *po); 3186 void dev_remove_offload(struct packet_offload *po); 3187 3188 int dev_get_iflink(const struct net_device *dev); 3189 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); 3190 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, 3191 struct net_device_path_stack *stack); 3192 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, 3193 unsigned short mask); 3194 struct net_device *dev_get_by_name(struct net *net, const char *name); 3195 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); 3196 struct net_device *__dev_get_by_name(struct net *net, const char *name); 3197 bool netdev_name_in_use(struct net *net, const char *name); 3198 int dev_alloc_name(struct net_device *dev, const char *name); 3199 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); 3200 void dev_close(struct net_device *dev); 3201 void dev_close_many(struct list_head *head, bool unlink); 3202 void dev_disable_lro(struct net_device *dev); 3203 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); 3204 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 3205 struct net_device *sb_dev); 3206 3207 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev); 3208 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); 3209 3210 static inline int dev_queue_xmit(struct sk_buff *skb) 3211 { 3212 return __dev_queue_xmit(skb, NULL); 3213 } 3214 3215 static inline int dev_queue_xmit_accel(struct sk_buff *skb, 3216 struct net_device *sb_dev) 3217 { 3218 return __dev_queue_xmit(skb, sb_dev); 3219 } 3220 3221 static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 3222 { 3223 int ret; 3224 3225 ret = __dev_direct_xmit(skb, queue_id); 3226 if (!dev_xmit_complete(ret)) 3227 kfree_skb(skb); 3228 return ret; 3229 } 3230 3231 int register_netdevice(struct net_device *dev); 3232 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 3233 void unregister_netdevice_many(struct list_head *head); 3234 static inline void unregister_netdevice(struct net_device *dev) 3235 { 3236 unregister_netdevice_queue(dev, NULL); 3237 } 3238 3239 int netdev_refcnt_read(const struct net_device *dev); 3240 void free_netdev(struct net_device *dev); 3241 void init_dummy_netdev(struct net_device *dev); 3242 3243 struct net_device *netdev_get_xmit_slave(struct net_device *dev, 3244 struct sk_buff *skb, 3245 bool all_slaves); 3246 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, 3247 struct sock *sk); 3248 struct net_device *dev_get_by_index(struct net *net, int ifindex); 3249 struct net_device *__dev_get_by_index(struct net *net, int ifindex); 3250 struct net_device *netdev_get_by_index(struct net *net, int ifindex, 3251 netdevice_tracker *tracker, gfp_t gfp); 3252 struct net_device *netdev_get_by_name(struct net *net, const char *name, 3253 netdevice_tracker *tracker, gfp_t gfp); 3254 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 3255 void netdev_copy_name(struct net_device *dev, char *name); 3256 3257 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 3258 unsigned short type, 3259 const void *daddr, const void *saddr, 3260 unsigned int len) 3261 { 3262 if (!dev->header_ops || !dev->header_ops->create) 3263 return 0; 3264 3265 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); 3266 } 3267 3268 static inline int dev_parse_header(const struct sk_buff *skb, 3269 unsigned char *haddr) 3270 { 3271 const struct net_device *dev = skb->dev; 3272 3273 if (!dev->header_ops || !dev->header_ops->parse) 3274 return 0; 3275 return dev->header_ops->parse(skb, haddr); 3276 } 3277 3278 static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) 3279 { 3280 const struct net_device *dev = skb->dev; 3281 3282 if (!dev->header_ops || !dev->header_ops->parse_protocol) 3283 return 0; 3284 return dev->header_ops->parse_protocol(skb); 3285 } 3286 3287 /* ll_header must have at least hard_header_len allocated */ 3288 static inline bool dev_validate_header(const struct net_device *dev, 3289 char *ll_header, int len) 3290 { 3291 if (likely(len >= dev->hard_header_len)) 3292 return true; 3293 if (len < dev->min_header_len) 3294 return false; 3295 3296 if (capable(CAP_SYS_RAWIO)) { 3297 memset(ll_header + len, 0, dev->hard_header_len - len); 3298 return true; 3299 } 3300 3301 if (dev->header_ops && dev->header_ops->validate) 3302 return dev->header_ops->validate(ll_header, len); 3303 3304 return false; 3305 } 3306 3307 static inline bool dev_has_header(const struct net_device *dev) 3308 { 3309 return dev->header_ops && dev->header_ops->create; 3310 } 3311 3312 /* 3313 * Incoming packets are placed on per-CPU queues 3314 */ 3315 struct softnet_data { 3316 struct list_head poll_list; 3317 struct sk_buff_head process_queue; 3318 local_lock_t process_queue_bh_lock; 3319 3320 /* stats */ 3321 unsigned int processed; 3322 unsigned int time_squeeze; 3323 #ifdef CONFIG_RPS 3324 struct softnet_data *rps_ipi_list; 3325 #endif 3326 3327 unsigned int received_rps; 3328 bool in_net_rx_action; 3329 bool in_napi_threaded_poll; 3330 3331 #ifdef CONFIG_NET_FLOW_LIMIT 3332 struct sd_flow_limit __rcu *flow_limit; 3333 #endif 3334 struct Qdisc *output_queue; 3335 struct Qdisc **output_queue_tailp; 3336 struct sk_buff *completion_queue; 3337 #ifdef CONFIG_XFRM_OFFLOAD 3338 struct sk_buff_head xfrm_backlog; 3339 #endif 3340 /* written and read only by owning cpu: */ 3341 struct netdev_xmit xmit; 3342 #ifdef CONFIG_RPS 3343 /* input_queue_head should be written by cpu owning this struct, 3344 * and only read by other cpus. Worth using a cache line. 3345 */ 3346 unsigned int input_queue_head ____cacheline_aligned_in_smp; 3347 3348 /* Elements below can be accessed between CPUs for RPS/RFS */ 3349 call_single_data_t csd ____cacheline_aligned_in_smp; 3350 struct softnet_data *rps_ipi_next; 3351 unsigned int cpu; 3352 unsigned int input_queue_tail; 3353 #endif 3354 struct sk_buff_head input_pkt_queue; 3355 struct napi_struct backlog; 3356 3357 atomic_t dropped ____cacheline_aligned_in_smp; 3358 3359 /* Another possibly contended cache line */ 3360 spinlock_t defer_lock ____cacheline_aligned_in_smp; 3361 int defer_count; 3362 int defer_ipi_scheduled; 3363 struct sk_buff *defer_list; 3364 call_single_data_t defer_csd; 3365 }; 3366 3367 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 3368 DECLARE_PER_CPU(struct page_pool *, system_page_pool); 3369 3370 #ifndef CONFIG_PREEMPT_RT 3371 static inline int dev_recursion_level(void) 3372 { 3373 return this_cpu_read(softnet_data.xmit.recursion); 3374 } 3375 #else 3376 static inline int dev_recursion_level(void) 3377 { 3378 return current->net_xmit.recursion; 3379 } 3380 3381 #endif 3382 3383 void __netif_schedule(struct Qdisc *q); 3384 void netif_schedule_queue(struct netdev_queue *txq); 3385 3386 static inline void netif_tx_schedule_all(struct net_device *dev) 3387 { 3388 unsigned int i; 3389 3390 for (i = 0; i < dev->num_tx_queues; i++) 3391 netif_schedule_queue(netdev_get_tx_queue(dev, i)); 3392 } 3393 3394 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) 3395 { 3396 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3397 } 3398 3399 /** 3400 * netif_start_queue - allow transmit 3401 * @dev: network device 3402 * 3403 * Allow upper layers to call the device hard_start_xmit routine. 3404 */ 3405 static inline void netif_start_queue(struct net_device *dev) 3406 { 3407 netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); 3408 } 3409 3410 static inline void netif_tx_start_all_queues(struct net_device *dev) 3411 { 3412 unsigned int i; 3413 3414 for (i = 0; i < dev->num_tx_queues; i++) { 3415 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3416 netif_tx_start_queue(txq); 3417 } 3418 } 3419 3420 void netif_tx_wake_queue(struct netdev_queue *dev_queue); 3421 3422 /** 3423 * netif_wake_queue - restart transmit 3424 * @dev: network device 3425 * 3426 * Allow upper layers to call the device hard_start_xmit routine. 3427 * Used for flow control when transmit resources are available. 3428 */ 3429 static inline void netif_wake_queue(struct net_device *dev) 3430 { 3431 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); 3432 } 3433 3434 static inline void netif_tx_wake_all_queues(struct net_device *dev) 3435 { 3436 unsigned int i; 3437 3438 for (i = 0; i < dev->num_tx_queues; i++) { 3439 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3440 netif_tx_wake_queue(txq); 3441 } 3442 } 3443 3444 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 3445 { 3446 /* Paired with READ_ONCE() from dev_watchdog() */ 3447 WRITE_ONCE(dev_queue->trans_start, jiffies); 3448 3449 /* This barrier is paired with smp_mb() from dev_watchdog() */ 3450 smp_mb__before_atomic(); 3451 3452 /* Must be an atomic op see netif_txq_try_stop() */ 3453 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3454 } 3455 3456 /** 3457 * netif_stop_queue - stop transmitted packets 3458 * @dev: network device 3459 * 3460 * Stop upper layers calling the device hard_start_xmit routine. 3461 * Used for flow control when transmit resources are unavailable. 3462 */ 3463 static inline void netif_stop_queue(struct net_device *dev) 3464 { 3465 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); 3466 } 3467 3468 void netif_tx_stop_all_queues(struct net_device *dev); 3469 3470 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 3471 { 3472 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3473 } 3474 3475 /** 3476 * netif_queue_stopped - test if transmit queue is flowblocked 3477 * @dev: network device 3478 * 3479 * Test if transmit queue on device is currently unable to send. 3480 */ 3481 static inline bool netif_queue_stopped(const struct net_device *dev) 3482 { 3483 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 3484 } 3485 3486 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) 3487 { 3488 return dev_queue->state & QUEUE_STATE_ANY_XOFF; 3489 } 3490 3491 static inline bool 3492 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) 3493 { 3494 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; 3495 } 3496 3497 static inline bool 3498 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) 3499 { 3500 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; 3501 } 3502 3503 /** 3504 * netdev_queue_set_dql_min_limit - set dql minimum limit 3505 * @dev_queue: pointer to transmit queue 3506 * @min_limit: dql minimum limit 3507 * 3508 * Forces xmit_more() to return true until the minimum threshold 3509 * defined by @min_limit is reached (or until the tx queue is 3510 * empty). Warning: to be use with care, misuse will impact the 3511 * latency. 3512 */ 3513 static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue, 3514 unsigned int min_limit) 3515 { 3516 #ifdef CONFIG_BQL 3517 dev_queue->dql.min_limit = min_limit; 3518 #endif 3519 } 3520 3521 static inline int netdev_queue_dql_avail(const struct netdev_queue *txq) 3522 { 3523 #ifdef CONFIG_BQL 3524 /* Non-BQL migrated drivers will return 0, too. */ 3525 return dql_avail(&txq->dql); 3526 #else 3527 return 0; 3528 #endif 3529 } 3530 3531 /** 3532 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write 3533 * @dev_queue: pointer to transmit queue 3534 * 3535 * BQL enabled drivers might use this helper in their ndo_start_xmit(), 3536 * to give appropriate hint to the CPU. 3537 */ 3538 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) 3539 { 3540 #ifdef CONFIG_BQL 3541 prefetchw(&dev_queue->dql.num_queued); 3542 #endif 3543 } 3544 3545 /** 3546 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write 3547 * @dev_queue: pointer to transmit queue 3548 * 3549 * BQL enabled drivers might use this helper in their TX completion path, 3550 * to give appropriate hint to the CPU. 3551 */ 3552 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) 3553 { 3554 #ifdef CONFIG_BQL 3555 prefetchw(&dev_queue->dql.limit); 3556 #endif 3557 } 3558 3559 /** 3560 * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue 3561 * @dev_queue: network device queue 3562 * @bytes: number of bytes queued to the device queue 3563 * 3564 * Report the number of bytes queued for sending/completion to the network 3565 * device hardware queue. @bytes should be a good approximation and should 3566 * exactly match netdev_completed_queue() @bytes. 3567 * This is typically called once per packet, from ndo_start_xmit(). 3568 */ 3569 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3570 unsigned int bytes) 3571 { 3572 #ifdef CONFIG_BQL 3573 dql_queued(&dev_queue->dql, bytes); 3574 3575 if (likely(dql_avail(&dev_queue->dql) >= 0)) 3576 return; 3577 3578 /* Paired with READ_ONCE() from dev_watchdog() */ 3579 WRITE_ONCE(dev_queue->trans_start, jiffies); 3580 3581 /* This barrier is paired with smp_mb() from dev_watchdog() */ 3582 smp_mb__before_atomic(); 3583 3584 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3585 3586 /* 3587 * The XOFF flag must be set before checking the dql_avail below, 3588 * because in netdev_tx_completed_queue we update the dql_completed 3589 * before checking the XOFF flag. 3590 */ 3591 smp_mb__after_atomic(); 3592 3593 /* check again in case another CPU has just made room avail */ 3594 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) 3595 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3596 #endif 3597 } 3598 3599 /* Variant of netdev_tx_sent_queue() for drivers that are aware 3600 * that they should not test BQL status themselves. 3601 * We do want to change __QUEUE_STATE_STACK_XOFF only for the last 3602 * skb of a batch. 3603 * Returns true if the doorbell must be used to kick the NIC. 3604 */ 3605 static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3606 unsigned int bytes, 3607 bool xmit_more) 3608 { 3609 if (xmit_more) { 3610 #ifdef CONFIG_BQL 3611 dql_queued(&dev_queue->dql, bytes); 3612 #endif 3613 return netif_tx_queue_stopped(dev_queue); 3614 } 3615 netdev_tx_sent_queue(dev_queue, bytes); 3616 return true; 3617 } 3618 3619 /** 3620 * netdev_sent_queue - report the number of bytes queued to hardware 3621 * @dev: network device 3622 * @bytes: number of bytes queued to the hardware device queue 3623 * 3624 * Report the number of bytes queued for sending/completion to the network 3625 * device hardware queue#0. @bytes should be a good approximation and should 3626 * exactly match netdev_completed_queue() @bytes. 3627 * This is typically called once per packet, from ndo_start_xmit(). 3628 */ 3629 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) 3630 { 3631 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); 3632 } 3633 3634 static inline bool __netdev_sent_queue(struct net_device *dev, 3635 unsigned int bytes, 3636 bool xmit_more) 3637 { 3638 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, 3639 xmit_more); 3640 } 3641 3642 /** 3643 * netdev_tx_completed_queue - report number of packets/bytes at TX completion. 3644 * @dev_queue: network device queue 3645 * @pkts: number of packets (currently ignored) 3646 * @bytes: number of bytes dequeued from the device queue 3647 * 3648 * Must be called at most once per TX completion round (and not per 3649 * individual packet), so that BQL can adjust its limits appropriately. 3650 */ 3651 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, 3652 unsigned int pkts, unsigned int bytes) 3653 { 3654 #ifdef CONFIG_BQL 3655 if (unlikely(!bytes)) 3656 return; 3657 3658 dql_completed(&dev_queue->dql, bytes); 3659 3660 /* 3661 * Without the memory barrier there is a small possibility that 3662 * netdev_tx_sent_queue will miss the update and cause the queue to 3663 * be stopped forever 3664 */ 3665 smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */ 3666 3667 if (unlikely(dql_avail(&dev_queue->dql) < 0)) 3668 return; 3669 3670 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) 3671 netif_schedule_queue(dev_queue); 3672 #endif 3673 } 3674 3675 /** 3676 * netdev_completed_queue - report bytes and packets completed by device 3677 * @dev: network device 3678 * @pkts: actual number of packets sent over the medium 3679 * @bytes: actual number of bytes sent over the medium 3680 * 3681 * Report the number of bytes and packets transmitted by the network device 3682 * hardware queue over the physical medium, @bytes must exactly match the 3683 * @bytes amount passed to netdev_sent_queue() 3684 */ 3685 static inline void netdev_completed_queue(struct net_device *dev, 3686 unsigned int pkts, unsigned int bytes) 3687 { 3688 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); 3689 } 3690 3691 static inline void netdev_tx_reset_queue(struct netdev_queue *q) 3692 { 3693 #ifdef CONFIG_BQL 3694 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); 3695 dql_reset(&q->dql); 3696 #endif 3697 } 3698 3699 /** 3700 * netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue 3701 * @dev: network device 3702 * @qid: stack index of the queue to reset 3703 */ 3704 static inline void netdev_tx_reset_subqueue(const struct net_device *dev, 3705 u32 qid) 3706 { 3707 netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid)); 3708 } 3709 3710 /** 3711 * netdev_reset_queue - reset the packets and bytes count of a network device 3712 * @dev_queue: network device 3713 * 3714 * Reset the bytes and packet count of a network device and clear the 3715 * software flow control OFF bit for this network device 3716 */ 3717 static inline void netdev_reset_queue(struct net_device *dev_queue) 3718 { 3719 netdev_tx_reset_subqueue(dev_queue, 0); 3720 } 3721 3722 /** 3723 * netdev_cap_txqueue - check if selected tx queue exceeds device queues 3724 * @dev: network device 3725 * @queue_index: given tx queue index 3726 * 3727 * Returns 0 if given tx queue index >= number of device tx queues, 3728 * otherwise returns the originally passed tx queue index. 3729 */ 3730 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) 3731 { 3732 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 3733 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", 3734 dev->name, queue_index, 3735 dev->real_num_tx_queues); 3736 return 0; 3737 } 3738 3739 return queue_index; 3740 } 3741 3742 /** 3743 * netif_running - test if up 3744 * @dev: network device 3745 * 3746 * Test if the device has been brought up. 3747 */ 3748 static inline bool netif_running(const struct net_device *dev) 3749 { 3750 return test_bit(__LINK_STATE_START, &dev->state); 3751 } 3752 3753 /* 3754 * Routines to manage the subqueues on a device. We only need start, 3755 * stop, and a check if it's stopped. All other device management is 3756 * done at the overall netdevice level. 3757 * Also test the device if we're multiqueue. 3758 */ 3759 3760 /** 3761 * netif_start_subqueue - allow sending packets on subqueue 3762 * @dev: network device 3763 * @queue_index: sub queue index 3764 * 3765 * Start individual transmit queue of a device with multiple transmit queues. 3766 */ 3767 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) 3768 { 3769 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3770 3771 netif_tx_start_queue(txq); 3772 } 3773 3774 /** 3775 * netif_stop_subqueue - stop sending packets on subqueue 3776 * @dev: network device 3777 * @queue_index: sub queue index 3778 * 3779 * Stop individual transmit queue of a device with multiple transmit queues. 3780 */ 3781 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) 3782 { 3783 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3784 netif_tx_stop_queue(txq); 3785 } 3786 3787 /** 3788 * __netif_subqueue_stopped - test status of subqueue 3789 * @dev: network device 3790 * @queue_index: sub queue index 3791 * 3792 * Check individual transmit queue of a device with multiple transmit queues. 3793 */ 3794 static inline bool __netif_subqueue_stopped(const struct net_device *dev, 3795 u16 queue_index) 3796 { 3797 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3798 3799 return netif_tx_queue_stopped(txq); 3800 } 3801 3802 /** 3803 * netif_subqueue_stopped - test status of subqueue 3804 * @dev: network device 3805 * @skb: sub queue buffer pointer 3806 * 3807 * Check individual transmit queue of a device with multiple transmit queues. 3808 */ 3809 static inline bool netif_subqueue_stopped(const struct net_device *dev, 3810 struct sk_buff *skb) 3811 { 3812 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); 3813 } 3814 3815 /** 3816 * netif_wake_subqueue - allow sending packets on subqueue 3817 * @dev: network device 3818 * @queue_index: sub queue index 3819 * 3820 * Resume individual transmit queue of a device with multiple transmit queues. 3821 */ 3822 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) 3823 { 3824 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3825 3826 netif_tx_wake_queue(txq); 3827 } 3828 3829 #ifdef CONFIG_XPS 3830 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 3831 u16 index); 3832 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 3833 u16 index, enum xps_map_type type); 3834 3835 /** 3836 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask 3837 * @j: CPU/Rx queue index 3838 * @mask: bitmask of all cpus/rx queues 3839 * @nr_bits: number of bits in the bitmask 3840 * 3841 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. 3842 */ 3843 static inline bool netif_attr_test_mask(unsigned long j, 3844 const unsigned long *mask, 3845 unsigned int nr_bits) 3846 { 3847 cpu_max_bits_warn(j, nr_bits); 3848 return test_bit(j, mask); 3849 } 3850 3851 /** 3852 * netif_attr_test_online - Test for online CPU/Rx queue 3853 * @j: CPU/Rx queue index 3854 * @online_mask: bitmask for CPUs/Rx queues that are online 3855 * @nr_bits: number of bits in the bitmask 3856 * 3857 * Returns: true if a CPU/Rx queue is online. 3858 */ 3859 static inline bool netif_attr_test_online(unsigned long j, 3860 const unsigned long *online_mask, 3861 unsigned int nr_bits) 3862 { 3863 cpu_max_bits_warn(j, nr_bits); 3864 3865 if (online_mask) 3866 return test_bit(j, online_mask); 3867 3868 return (j < nr_bits); 3869 } 3870 3871 /** 3872 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask 3873 * @n: CPU/Rx queue index 3874 * @srcp: the cpumask/Rx queue mask pointer 3875 * @nr_bits: number of bits in the bitmask 3876 * 3877 * Returns: next (after n) CPU/Rx queue index in the mask; 3878 * >= nr_bits if no further CPUs/Rx queues set. 3879 */ 3880 static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, 3881 unsigned int nr_bits) 3882 { 3883 /* -1 is a legal arg here. */ 3884 if (n != -1) 3885 cpu_max_bits_warn(n, nr_bits); 3886 3887 if (srcp) 3888 return find_next_bit(srcp, nr_bits, n + 1); 3889 3890 return n + 1; 3891 } 3892 3893 /** 3894 * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p 3895 * @n: CPU/Rx queue index 3896 * @src1p: the first CPUs/Rx queues mask pointer 3897 * @src2p: the second CPUs/Rx queues mask pointer 3898 * @nr_bits: number of bits in the bitmask 3899 * 3900 * Returns: next (after n) CPU/Rx queue index set in both masks; 3901 * >= nr_bits if no further CPUs/Rx queues set in both. 3902 */ 3903 static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, 3904 const unsigned long *src2p, 3905 unsigned int nr_bits) 3906 { 3907 /* -1 is a legal arg here. */ 3908 if (n != -1) 3909 cpu_max_bits_warn(n, nr_bits); 3910 3911 if (src1p && src2p) 3912 return find_next_and_bit(src1p, src2p, nr_bits, n + 1); 3913 else if (src1p) 3914 return find_next_bit(src1p, nr_bits, n + 1); 3915 else if (src2p) 3916 return find_next_bit(src2p, nr_bits, n + 1); 3917 3918 return n + 1; 3919 } 3920 #else 3921 static inline int netif_set_xps_queue(struct net_device *dev, 3922 const struct cpumask *mask, 3923 u16 index) 3924 { 3925 return 0; 3926 } 3927 3928 static inline int __netif_set_xps_queue(struct net_device *dev, 3929 const unsigned long *mask, 3930 u16 index, enum xps_map_type type) 3931 { 3932 return 0; 3933 } 3934 #endif 3935 3936 /** 3937 * netif_is_multiqueue - test if device has multiple transmit queues 3938 * @dev: network device 3939 * 3940 * Check if device has multiple transmit queues 3941 */ 3942 static inline bool netif_is_multiqueue(const struct net_device *dev) 3943 { 3944 return dev->num_tx_queues > 1; 3945 } 3946 3947 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); 3948 3949 #ifdef CONFIG_SYSFS 3950 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); 3951 #else 3952 static inline int netif_set_real_num_rx_queues(struct net_device *dev, 3953 unsigned int rxqs) 3954 { 3955 dev->real_num_rx_queues = rxqs; 3956 return 0; 3957 } 3958 #endif 3959 int netif_set_real_num_queues(struct net_device *dev, 3960 unsigned int txq, unsigned int rxq); 3961 3962 int netif_get_num_default_rss_queues(void); 3963 3964 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason); 3965 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason); 3966 3967 /* 3968 * It is not allowed to call kfree_skb() or consume_skb() from hardware 3969 * interrupt context or with hardware interrupts being disabled. 3970 * (in_hardirq() || irqs_disabled()) 3971 * 3972 * We provide four helpers that can be used in following contexts : 3973 * 3974 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, 3975 * replacing kfree_skb(skb) 3976 * 3977 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. 3978 * Typically used in place of consume_skb(skb) in TX completion path 3979 * 3980 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, 3981 * replacing kfree_skb(skb) 3982 * 3983 * dev_consume_skb_any(skb) when caller doesn't know its current irq context, 3984 * and consumed a packet. Used in place of consume_skb(skb) 3985 */ 3986 static inline void dev_kfree_skb_irq(struct sk_buff *skb) 3987 { 3988 dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); 3989 } 3990 3991 static inline void dev_consume_skb_irq(struct sk_buff *skb) 3992 { 3993 dev_kfree_skb_irq_reason(skb, SKB_CONSUMED); 3994 } 3995 3996 static inline void dev_kfree_skb_any(struct sk_buff *skb) 3997 { 3998 dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); 3999 } 4000 4001 static inline void dev_consume_skb_any(struct sk_buff *skb) 4002 { 4003 dev_kfree_skb_any_reason(skb, SKB_CONSUMED); 4004 } 4005 4006 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 4007 const struct bpf_prog *xdp_prog); 4008 void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog); 4009 int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb); 4010 int netif_rx(struct sk_buff *skb); 4011 int __netif_rx(struct sk_buff *skb); 4012 4013 int netif_receive_skb(struct sk_buff *skb); 4014 int netif_receive_skb_core(struct sk_buff *skb); 4015 void netif_receive_skb_list_internal(struct list_head *head); 4016 void netif_receive_skb_list(struct list_head *head); 4017 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); 4018 void napi_gro_flush(struct napi_struct *napi, bool flush_old); 4019 struct sk_buff *napi_get_frags(struct napi_struct *napi); 4020 void napi_get_frags_check(struct napi_struct *napi); 4021 gro_result_t napi_gro_frags(struct napi_struct *napi); 4022 4023 static inline void napi_free_frags(struct napi_struct *napi) 4024 { 4025 kfree_skb(napi->skb); 4026 napi->skb = NULL; 4027 } 4028 4029 bool netdev_is_rx_handler_busy(struct net_device *dev); 4030 int netdev_rx_handler_register(struct net_device *dev, 4031 rx_handler_func_t *rx_handler, 4032 void *rx_handler_data); 4033 void netdev_rx_handler_unregister(struct net_device *dev); 4034 4035 bool dev_valid_name(const char *name); 4036 static inline bool is_socket_ioctl_cmd(unsigned int cmd) 4037 { 4038 return _IOC_TYPE(cmd) == SOCK_IOC_TYPE; 4039 } 4040 int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg); 4041 int put_user_ifreq(struct ifreq *ifr, void __user *arg); 4042 int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, 4043 void __user *data, bool *need_copyout); 4044 int dev_ifconf(struct net *net, struct ifconf __user *ifc); 4045 int generic_hwtstamp_get_lower(struct net_device *dev, 4046 struct kernel_hwtstamp_config *kernel_cfg); 4047 int generic_hwtstamp_set_lower(struct net_device *dev, 4048 struct kernel_hwtstamp_config *kernel_cfg, 4049 struct netlink_ext_ack *extack); 4050 int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); 4051 unsigned int dev_get_flags(const struct net_device *); 4052 int __dev_change_flags(struct net_device *dev, unsigned int flags, 4053 struct netlink_ext_ack *extack); 4054 int dev_change_flags(struct net_device *dev, unsigned int flags, 4055 struct netlink_ext_ack *extack); 4056 int dev_set_alias(struct net_device *, const char *, size_t); 4057 int dev_get_alias(const struct net_device *, char *, size_t); 4058 int __dev_change_net_namespace(struct net_device *dev, struct net *net, 4059 const char *pat, int new_ifindex); 4060 static inline 4061 int dev_change_net_namespace(struct net_device *dev, struct net *net, 4062 const char *pat) 4063 { 4064 return __dev_change_net_namespace(dev, net, pat, 0); 4065 } 4066 int __dev_set_mtu(struct net_device *, int); 4067 int dev_set_mtu(struct net_device *, int); 4068 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 4069 struct netlink_ext_ack *extack); 4070 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 4071 struct netlink_ext_ack *extack); 4072 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, 4073 struct netlink_ext_ack *extack); 4074 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); 4075 int dev_get_port_parent_id(struct net_device *dev, 4076 struct netdev_phys_item_id *ppid, bool recurse); 4077 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); 4078 4079 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); 4080 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 4081 struct netdev_queue *txq, int *ret); 4082 4083 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); 4084 u8 dev_xdp_prog_count(struct net_device *dev); 4085 int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf); 4086 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); 4087 4088 u32 dev_get_min_mp_channel_count(const struct net_device *dev); 4089 4090 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 4091 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 4092 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb); 4093 bool is_skb_forwardable(const struct net_device *dev, 4094 const struct sk_buff *skb); 4095 4096 static __always_inline bool __is_skb_forwardable(const struct net_device *dev, 4097 const struct sk_buff *skb, 4098 const bool check_mtu) 4099 { 4100 const u32 vlan_hdr_len = 4; /* VLAN_HLEN */ 4101 unsigned int len; 4102 4103 if (!(dev->flags & IFF_UP)) 4104 return false; 4105 4106 if (!check_mtu) 4107 return true; 4108 4109 len = dev->mtu + dev->hard_header_len + vlan_hdr_len; 4110 if (skb->len <= len) 4111 return true; 4112 4113 /* if TSO is enabled, we don't care about the length as the packet 4114 * could be forwarded without being segmented before 4115 */ 4116 if (skb_is_gso(skb)) 4117 return true; 4118 4119 return false; 4120 } 4121 4122 void netdev_core_stats_inc(struct net_device *dev, u32 offset); 4123 4124 #define DEV_CORE_STATS_INC(FIELD) \ 4125 static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ 4126 { \ 4127 netdev_core_stats_inc(dev, \ 4128 offsetof(struct net_device_core_stats, FIELD)); \ 4129 } 4130 DEV_CORE_STATS_INC(rx_dropped) 4131 DEV_CORE_STATS_INC(tx_dropped) 4132 DEV_CORE_STATS_INC(rx_nohandler) 4133 DEV_CORE_STATS_INC(rx_otherhost_dropped) 4134 #undef DEV_CORE_STATS_INC 4135 4136 static __always_inline int ____dev_forward_skb(struct net_device *dev, 4137 struct sk_buff *skb, 4138 const bool check_mtu) 4139 { 4140 if (skb_orphan_frags(skb, GFP_ATOMIC) || 4141 unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) { 4142 dev_core_stats_rx_dropped_inc(dev); 4143 kfree_skb(skb); 4144 return NET_RX_DROP; 4145 } 4146 4147 skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev))); 4148 skb->priority = 0; 4149 return 0; 4150 } 4151 4152 bool dev_nit_active(struct net_device *dev); 4153 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 4154 4155 static inline void __dev_put(struct net_device *dev) 4156 { 4157 if (dev) { 4158 #ifdef CONFIG_PCPU_DEV_REFCNT 4159 this_cpu_dec(*dev->pcpu_refcnt); 4160 #else 4161 refcount_dec(&dev->dev_refcnt); 4162 #endif 4163 } 4164 } 4165 4166 static inline void __dev_hold(struct net_device *dev) 4167 { 4168 if (dev) { 4169 #ifdef CONFIG_PCPU_DEV_REFCNT 4170 this_cpu_inc(*dev->pcpu_refcnt); 4171 #else 4172 refcount_inc(&dev->dev_refcnt); 4173 #endif 4174 } 4175 } 4176 4177 static inline void __netdev_tracker_alloc(struct net_device *dev, 4178 netdevice_tracker *tracker, 4179 gfp_t gfp) 4180 { 4181 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4182 ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp); 4183 #endif 4184 } 4185 4186 /* netdev_tracker_alloc() can upgrade a prior untracked reference 4187 * taken by dev_get_by_name()/dev_get_by_index() to a tracked one. 4188 */ 4189 static inline void netdev_tracker_alloc(struct net_device *dev, 4190 netdevice_tracker *tracker, gfp_t gfp) 4191 { 4192 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4193 refcount_dec(&dev->refcnt_tracker.no_tracker); 4194 __netdev_tracker_alloc(dev, tracker, gfp); 4195 #endif 4196 } 4197 4198 static inline void netdev_tracker_free(struct net_device *dev, 4199 netdevice_tracker *tracker) 4200 { 4201 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4202 ref_tracker_free(&dev->refcnt_tracker, tracker); 4203 #endif 4204 } 4205 4206 static inline void netdev_hold(struct net_device *dev, 4207 netdevice_tracker *tracker, gfp_t gfp) 4208 { 4209 if (dev) { 4210 __dev_hold(dev); 4211 __netdev_tracker_alloc(dev, tracker, gfp); 4212 } 4213 } 4214 4215 static inline void netdev_put(struct net_device *dev, 4216 netdevice_tracker *tracker) 4217 { 4218 if (dev) { 4219 netdev_tracker_free(dev, tracker); 4220 __dev_put(dev); 4221 } 4222 } 4223 4224 /** 4225 * dev_hold - get reference to device 4226 * @dev: network device 4227 * 4228 * Hold reference to device to keep it from being freed. 4229 * Try using netdev_hold() instead. 4230 */ 4231 static inline void dev_hold(struct net_device *dev) 4232 { 4233 netdev_hold(dev, NULL, GFP_ATOMIC); 4234 } 4235 4236 /** 4237 * dev_put - release reference to device 4238 * @dev: network device 4239 * 4240 * Release reference to device to allow it to be freed. 4241 * Try using netdev_put() instead. 4242 */ 4243 static inline void dev_put(struct net_device *dev) 4244 { 4245 netdev_put(dev, NULL); 4246 } 4247 4248 DEFINE_FREE(dev_put, struct net_device *, if (_T) dev_put(_T)) 4249 4250 static inline void netdev_ref_replace(struct net_device *odev, 4251 struct net_device *ndev, 4252 netdevice_tracker *tracker, 4253 gfp_t gfp) 4254 { 4255 if (odev) 4256 netdev_tracker_free(odev, tracker); 4257 4258 __dev_hold(ndev); 4259 __dev_put(odev); 4260 4261 if (ndev) 4262 __netdev_tracker_alloc(ndev, tracker, gfp); 4263 } 4264 4265 /* Carrier loss detection, dial on demand. The functions netif_carrier_on 4266 * and _off may be called from IRQ context, but it is caller 4267 * who is responsible for serialization of these calls. 4268 * 4269 * The name carrier is inappropriate, these functions should really be 4270 * called netif_lowerlayer_*() because they represent the state of any 4271 * kind of lower layer not just hardware media. 4272 */ 4273 void linkwatch_fire_event(struct net_device *dev); 4274 4275 /** 4276 * linkwatch_sync_dev - sync linkwatch for the given device 4277 * @dev: network device to sync linkwatch for 4278 * 4279 * Sync linkwatch for the given device, removing it from the 4280 * pending work list (if queued). 4281 */ 4282 void linkwatch_sync_dev(struct net_device *dev); 4283 4284 /** 4285 * netif_carrier_ok - test if carrier present 4286 * @dev: network device 4287 * 4288 * Check if carrier is present on device 4289 */ 4290 static inline bool netif_carrier_ok(const struct net_device *dev) 4291 { 4292 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 4293 } 4294 4295 unsigned long dev_trans_start(struct net_device *dev); 4296 4297 void netdev_watchdog_up(struct net_device *dev); 4298 4299 void netif_carrier_on(struct net_device *dev); 4300 void netif_carrier_off(struct net_device *dev); 4301 void netif_carrier_event(struct net_device *dev); 4302 4303 /** 4304 * netif_dormant_on - mark device as dormant. 4305 * @dev: network device 4306 * 4307 * Mark device as dormant (as per RFC2863). 4308 * 4309 * The dormant state indicates that the relevant interface is not 4310 * actually in a condition to pass packets (i.e., it is not 'up') but is 4311 * in a "pending" state, waiting for some external event. For "on- 4312 * demand" interfaces, this new state identifies the situation where the 4313 * interface is waiting for events to place it in the up state. 4314 */ 4315 static inline void netif_dormant_on(struct net_device *dev) 4316 { 4317 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) 4318 linkwatch_fire_event(dev); 4319 } 4320 4321 /** 4322 * netif_dormant_off - set device as not dormant. 4323 * @dev: network device 4324 * 4325 * Device is not in dormant state. 4326 */ 4327 static inline void netif_dormant_off(struct net_device *dev) 4328 { 4329 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) 4330 linkwatch_fire_event(dev); 4331 } 4332 4333 /** 4334 * netif_dormant - test if device is dormant 4335 * @dev: network device 4336 * 4337 * Check if device is dormant. 4338 */ 4339 static inline bool netif_dormant(const struct net_device *dev) 4340 { 4341 return test_bit(__LINK_STATE_DORMANT, &dev->state); 4342 } 4343 4344 4345 /** 4346 * netif_testing_on - mark device as under test. 4347 * @dev: network device 4348 * 4349 * Mark device as under test (as per RFC2863). 4350 * 4351 * The testing state indicates that some test(s) must be performed on 4352 * the interface. After completion, of the test, the interface state 4353 * will change to up, dormant, or down, as appropriate. 4354 */ 4355 static inline void netif_testing_on(struct net_device *dev) 4356 { 4357 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) 4358 linkwatch_fire_event(dev); 4359 } 4360 4361 /** 4362 * netif_testing_off - set device as not under test. 4363 * @dev: network device 4364 * 4365 * Device is not in testing state. 4366 */ 4367 static inline void netif_testing_off(struct net_device *dev) 4368 { 4369 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) 4370 linkwatch_fire_event(dev); 4371 } 4372 4373 /** 4374 * netif_testing - test if device is under test 4375 * @dev: network device 4376 * 4377 * Check if device is under test 4378 */ 4379 static inline bool netif_testing(const struct net_device *dev) 4380 { 4381 return test_bit(__LINK_STATE_TESTING, &dev->state); 4382 } 4383 4384 4385 /** 4386 * netif_oper_up - test if device is operational 4387 * @dev: network device 4388 * 4389 * Check if carrier is operational 4390 */ 4391 static inline bool netif_oper_up(const struct net_device *dev) 4392 { 4393 unsigned int operstate = READ_ONCE(dev->operstate); 4394 4395 return operstate == IF_OPER_UP || 4396 operstate == IF_OPER_UNKNOWN /* backward compat */; 4397 } 4398 4399 /** 4400 * netif_device_present - is device available or removed 4401 * @dev: network device 4402 * 4403 * Check if device has not been removed from system. 4404 */ 4405 static inline bool netif_device_present(const struct net_device *dev) 4406 { 4407 return test_bit(__LINK_STATE_PRESENT, &dev->state); 4408 } 4409 4410 void netif_device_detach(struct net_device *dev); 4411 4412 void netif_device_attach(struct net_device *dev); 4413 4414 /* 4415 * Network interface message level settings 4416 */ 4417 4418 enum { 4419 NETIF_MSG_DRV_BIT, 4420 NETIF_MSG_PROBE_BIT, 4421 NETIF_MSG_LINK_BIT, 4422 NETIF_MSG_TIMER_BIT, 4423 NETIF_MSG_IFDOWN_BIT, 4424 NETIF_MSG_IFUP_BIT, 4425 NETIF_MSG_RX_ERR_BIT, 4426 NETIF_MSG_TX_ERR_BIT, 4427 NETIF_MSG_TX_QUEUED_BIT, 4428 NETIF_MSG_INTR_BIT, 4429 NETIF_MSG_TX_DONE_BIT, 4430 NETIF_MSG_RX_STATUS_BIT, 4431 NETIF_MSG_PKTDATA_BIT, 4432 NETIF_MSG_HW_BIT, 4433 NETIF_MSG_WOL_BIT, 4434 4435 /* When you add a new bit above, update netif_msg_class_names array 4436 * in net/ethtool/common.c 4437 */ 4438 NETIF_MSG_CLASS_COUNT, 4439 }; 4440 /* Both ethtool_ops interface and internal driver implementation use u32 */ 4441 static_assert(NETIF_MSG_CLASS_COUNT <= 32); 4442 4443 #define __NETIF_MSG_BIT(bit) ((u32)1 << (bit)) 4444 #define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT) 4445 4446 #define NETIF_MSG_DRV __NETIF_MSG(DRV) 4447 #define NETIF_MSG_PROBE __NETIF_MSG(PROBE) 4448 #define NETIF_MSG_LINK __NETIF_MSG(LINK) 4449 #define NETIF_MSG_TIMER __NETIF_MSG(TIMER) 4450 #define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN) 4451 #define NETIF_MSG_IFUP __NETIF_MSG(IFUP) 4452 #define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR) 4453 #define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR) 4454 #define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED) 4455 #define NETIF_MSG_INTR __NETIF_MSG(INTR) 4456 #define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE) 4457 #define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS) 4458 #define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA) 4459 #define NETIF_MSG_HW __NETIF_MSG(HW) 4460 #define NETIF_MSG_WOL __NETIF_MSG(WOL) 4461 4462 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) 4463 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) 4464 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) 4465 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) 4466 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) 4467 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) 4468 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) 4469 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) 4470 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) 4471 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) 4472 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) 4473 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) 4474 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) 4475 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) 4476 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) 4477 4478 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) 4479 { 4480 /* use default */ 4481 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) 4482 return default_msg_enable_bits; 4483 if (debug_value == 0) /* no output */ 4484 return 0; 4485 /* set low N bits */ 4486 return (1U << debug_value) - 1; 4487 } 4488 4489 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) 4490 { 4491 spin_lock(&txq->_xmit_lock); 4492 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4493 WRITE_ONCE(txq->xmit_lock_owner, cpu); 4494 } 4495 4496 static inline bool __netif_tx_acquire(struct netdev_queue *txq) 4497 { 4498 __acquire(&txq->_xmit_lock); 4499 return true; 4500 } 4501 4502 static inline void __netif_tx_release(struct netdev_queue *txq) 4503 { 4504 __release(&txq->_xmit_lock); 4505 } 4506 4507 static inline void __netif_tx_lock_bh(struct netdev_queue *txq) 4508 { 4509 spin_lock_bh(&txq->_xmit_lock); 4510 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4511 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); 4512 } 4513 4514 static inline bool __netif_tx_trylock(struct netdev_queue *txq) 4515 { 4516 bool ok = spin_trylock(&txq->_xmit_lock); 4517 4518 if (likely(ok)) { 4519 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4520 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); 4521 } 4522 return ok; 4523 } 4524 4525 static inline void __netif_tx_unlock(struct netdev_queue *txq) 4526 { 4527 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4528 WRITE_ONCE(txq->xmit_lock_owner, -1); 4529 spin_unlock(&txq->_xmit_lock); 4530 } 4531 4532 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) 4533 { 4534 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4535 WRITE_ONCE(txq->xmit_lock_owner, -1); 4536 spin_unlock_bh(&txq->_xmit_lock); 4537 } 4538 4539 /* 4540 * txq->trans_start can be read locklessly from dev_watchdog() 4541 */ 4542 static inline void txq_trans_update(struct netdev_queue *txq) 4543 { 4544 if (txq->xmit_lock_owner != -1) 4545 WRITE_ONCE(txq->trans_start, jiffies); 4546 } 4547 4548 static inline void txq_trans_cond_update(struct netdev_queue *txq) 4549 { 4550 unsigned long now = jiffies; 4551 4552 if (READ_ONCE(txq->trans_start) != now) 4553 WRITE_ONCE(txq->trans_start, now); 4554 } 4555 4556 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ 4557 static inline void netif_trans_update(struct net_device *dev) 4558 { 4559 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 4560 4561 txq_trans_cond_update(txq); 4562 } 4563 4564 /** 4565 * netif_tx_lock - grab network device transmit lock 4566 * @dev: network device 4567 * 4568 * Get network device transmit lock 4569 */ 4570 void netif_tx_lock(struct net_device *dev); 4571 4572 static inline void netif_tx_lock_bh(struct net_device *dev) 4573 { 4574 local_bh_disable(); 4575 netif_tx_lock(dev); 4576 } 4577 4578 void netif_tx_unlock(struct net_device *dev); 4579 4580 static inline void netif_tx_unlock_bh(struct net_device *dev) 4581 { 4582 netif_tx_unlock(dev); 4583 local_bh_enable(); 4584 } 4585 4586 #define HARD_TX_LOCK(dev, txq, cpu) { \ 4587 if (!(dev)->lltx) { \ 4588 __netif_tx_lock(txq, cpu); \ 4589 } else { \ 4590 __netif_tx_acquire(txq); \ 4591 } \ 4592 } 4593 4594 #define HARD_TX_TRYLOCK(dev, txq) \ 4595 (!(dev)->lltx ? \ 4596 __netif_tx_trylock(txq) : \ 4597 __netif_tx_acquire(txq)) 4598 4599 #define HARD_TX_UNLOCK(dev, txq) { \ 4600 if (!(dev)->lltx) { \ 4601 __netif_tx_unlock(txq); \ 4602 } else { \ 4603 __netif_tx_release(txq); \ 4604 } \ 4605 } 4606 4607 static inline void netif_tx_disable(struct net_device *dev) 4608 { 4609 unsigned int i; 4610 int cpu; 4611 4612 local_bh_disable(); 4613 cpu = smp_processor_id(); 4614 spin_lock(&dev->tx_global_lock); 4615 for (i = 0; i < dev->num_tx_queues; i++) { 4616 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 4617 4618 __netif_tx_lock(txq, cpu); 4619 netif_tx_stop_queue(txq); 4620 __netif_tx_unlock(txq); 4621 } 4622 spin_unlock(&dev->tx_global_lock); 4623 local_bh_enable(); 4624 } 4625 4626 static inline void netif_addr_lock(struct net_device *dev) 4627 { 4628 unsigned char nest_level = 0; 4629 4630 #ifdef CONFIG_LOCKDEP 4631 nest_level = dev->nested_level; 4632 #endif 4633 spin_lock_nested(&dev->addr_list_lock, nest_level); 4634 } 4635 4636 static inline void netif_addr_lock_bh(struct net_device *dev) 4637 { 4638 unsigned char nest_level = 0; 4639 4640 #ifdef CONFIG_LOCKDEP 4641 nest_level = dev->nested_level; 4642 #endif 4643 local_bh_disable(); 4644 spin_lock_nested(&dev->addr_list_lock, nest_level); 4645 } 4646 4647 static inline void netif_addr_unlock(struct net_device *dev) 4648 { 4649 spin_unlock(&dev->addr_list_lock); 4650 } 4651 4652 static inline void netif_addr_unlock_bh(struct net_device *dev) 4653 { 4654 spin_unlock_bh(&dev->addr_list_lock); 4655 } 4656 4657 /* 4658 * dev_addrs walker. Should be used only for read access. Call with 4659 * rcu_read_lock held. 4660 */ 4661 #define for_each_dev_addr(dev, ha) \ 4662 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) 4663 4664 /* These functions live elsewhere (drivers/net/net_init.c, but related) */ 4665 4666 void ether_setup(struct net_device *dev); 4667 4668 /* Allocate dummy net_device */ 4669 struct net_device *alloc_netdev_dummy(int sizeof_priv); 4670 4671 /* Support for loadable net-drivers */ 4672 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 4673 unsigned char name_assign_type, 4674 void (*setup)(struct net_device *), 4675 unsigned int txqs, unsigned int rxqs); 4676 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ 4677 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) 4678 4679 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ 4680 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ 4681 count) 4682 4683 int register_netdev(struct net_device *dev); 4684 void unregister_netdev(struct net_device *dev); 4685 4686 int devm_register_netdev(struct device *dev, struct net_device *ndev); 4687 4688 /* General hardware address lists handling functions */ 4689 int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 4690 struct netdev_hw_addr_list *from_list, int addr_len); 4691 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 4692 struct netdev_hw_addr_list *from_list, int addr_len); 4693 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, 4694 struct net_device *dev, 4695 int (*sync)(struct net_device *, const unsigned char *), 4696 int (*unsync)(struct net_device *, 4697 const unsigned char *)); 4698 int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, 4699 struct net_device *dev, 4700 int (*sync)(struct net_device *, 4701 const unsigned char *, int), 4702 int (*unsync)(struct net_device *, 4703 const unsigned char *, int)); 4704 void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, 4705 struct net_device *dev, 4706 int (*unsync)(struct net_device *, 4707 const unsigned char *, int)); 4708 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, 4709 struct net_device *dev, 4710 int (*unsync)(struct net_device *, 4711 const unsigned char *)); 4712 void __hw_addr_init(struct netdev_hw_addr_list *list); 4713 4714 /* Functions used for device addresses handling */ 4715 void dev_addr_mod(struct net_device *dev, unsigned int offset, 4716 const void *addr, size_t len); 4717 4718 static inline void 4719 __dev_addr_set(struct net_device *dev, const void *addr, size_t len) 4720 { 4721 dev_addr_mod(dev, 0, addr, len); 4722 } 4723 4724 static inline void dev_addr_set(struct net_device *dev, const u8 *addr) 4725 { 4726 __dev_addr_set(dev, addr, dev->addr_len); 4727 } 4728 4729 int dev_addr_add(struct net_device *dev, const unsigned char *addr, 4730 unsigned char addr_type); 4731 int dev_addr_del(struct net_device *dev, const unsigned char *addr, 4732 unsigned char addr_type); 4733 4734 /* Functions used for unicast addresses handling */ 4735 int dev_uc_add(struct net_device *dev, const unsigned char *addr); 4736 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); 4737 int dev_uc_del(struct net_device *dev, const unsigned char *addr); 4738 int dev_uc_sync(struct net_device *to, struct net_device *from); 4739 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); 4740 void dev_uc_unsync(struct net_device *to, struct net_device *from); 4741 void dev_uc_flush(struct net_device *dev); 4742 void dev_uc_init(struct net_device *dev); 4743 4744 /** 4745 * __dev_uc_sync - Synchronize device's unicast list 4746 * @dev: device to sync 4747 * @sync: function to call if address should be added 4748 * @unsync: function to call if address should be removed 4749 * 4750 * Add newly added addresses to the interface, and release 4751 * addresses that have been deleted. 4752 */ 4753 static inline int __dev_uc_sync(struct net_device *dev, 4754 int (*sync)(struct net_device *, 4755 const unsigned char *), 4756 int (*unsync)(struct net_device *, 4757 const unsigned char *)) 4758 { 4759 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); 4760 } 4761 4762 /** 4763 * __dev_uc_unsync - Remove synchronized addresses from device 4764 * @dev: device to sync 4765 * @unsync: function to call if address should be removed 4766 * 4767 * Remove all addresses that were added to the device by dev_uc_sync(). 4768 */ 4769 static inline void __dev_uc_unsync(struct net_device *dev, 4770 int (*unsync)(struct net_device *, 4771 const unsigned char *)) 4772 { 4773 __hw_addr_unsync_dev(&dev->uc, dev, unsync); 4774 } 4775 4776 /* Functions used for multicast addresses handling */ 4777 int dev_mc_add(struct net_device *dev, const unsigned char *addr); 4778 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); 4779 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); 4780 int dev_mc_del(struct net_device *dev, const unsigned char *addr); 4781 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); 4782 int dev_mc_sync(struct net_device *to, struct net_device *from); 4783 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); 4784 void dev_mc_unsync(struct net_device *to, struct net_device *from); 4785 void dev_mc_flush(struct net_device *dev); 4786 void dev_mc_init(struct net_device *dev); 4787 4788 /** 4789 * __dev_mc_sync - Synchronize device's multicast list 4790 * @dev: device to sync 4791 * @sync: function to call if address should be added 4792 * @unsync: function to call if address should be removed 4793 * 4794 * Add newly added addresses to the interface, and release 4795 * addresses that have been deleted. 4796 */ 4797 static inline int __dev_mc_sync(struct net_device *dev, 4798 int (*sync)(struct net_device *, 4799 const unsigned char *), 4800 int (*unsync)(struct net_device *, 4801 const unsigned char *)) 4802 { 4803 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); 4804 } 4805 4806 /** 4807 * __dev_mc_unsync - Remove synchronized addresses from device 4808 * @dev: device to sync 4809 * @unsync: function to call if address should be removed 4810 * 4811 * Remove all addresses that were added to the device by dev_mc_sync(). 4812 */ 4813 static inline void __dev_mc_unsync(struct net_device *dev, 4814 int (*unsync)(struct net_device *, 4815 const unsigned char *)) 4816 { 4817 __hw_addr_unsync_dev(&dev->mc, dev, unsync); 4818 } 4819 4820 /* Functions used for secondary unicast and multicast support */ 4821 void dev_set_rx_mode(struct net_device *dev); 4822 int dev_set_promiscuity(struct net_device *dev, int inc); 4823 int dev_set_allmulti(struct net_device *dev, int inc); 4824 void netdev_state_change(struct net_device *dev); 4825 void __netdev_notify_peers(struct net_device *dev); 4826 void netdev_notify_peers(struct net_device *dev); 4827 void netdev_features_change(struct net_device *dev); 4828 /* Load a device via the kmod */ 4829 void dev_load(struct net *net, const char *name); 4830 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 4831 struct rtnl_link_stats64 *storage); 4832 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 4833 const struct net_device_stats *netdev_stats); 4834 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 4835 const struct pcpu_sw_netstats __percpu *netstats); 4836 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s); 4837 4838 enum { 4839 NESTED_SYNC_IMM_BIT, 4840 NESTED_SYNC_TODO_BIT, 4841 }; 4842 4843 #define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit)) 4844 #define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT) 4845 4846 #define NESTED_SYNC_IMM __NESTED_SYNC(IMM) 4847 #define NESTED_SYNC_TODO __NESTED_SYNC(TODO) 4848 4849 struct netdev_nested_priv { 4850 unsigned char flags; 4851 void *data; 4852 }; 4853 4854 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); 4855 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 4856 struct list_head **iter); 4857 4858 /* iterate through upper list, must be called under RCU read lock */ 4859 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ 4860 for (iter = &(dev)->adj_list.upper, \ 4861 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ 4862 updev; \ 4863 updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) 4864 4865 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 4866 int (*fn)(struct net_device *upper_dev, 4867 struct netdev_nested_priv *priv), 4868 struct netdev_nested_priv *priv); 4869 4870 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 4871 struct net_device *upper_dev); 4872 4873 bool netdev_has_any_upper_dev(struct net_device *dev); 4874 4875 void *netdev_lower_get_next_private(struct net_device *dev, 4876 struct list_head **iter); 4877 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 4878 struct list_head **iter); 4879 4880 #define netdev_for_each_lower_private(dev, priv, iter) \ 4881 for (iter = (dev)->adj_list.lower.next, \ 4882 priv = netdev_lower_get_next_private(dev, &(iter)); \ 4883 priv; \ 4884 priv = netdev_lower_get_next_private(dev, &(iter))) 4885 4886 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \ 4887 for (iter = &(dev)->adj_list.lower, \ 4888 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ 4889 priv; \ 4890 priv = netdev_lower_get_next_private_rcu(dev, &(iter))) 4891 4892 void *netdev_lower_get_next(struct net_device *dev, 4893 struct list_head **iter); 4894 4895 #define netdev_for_each_lower_dev(dev, ldev, iter) \ 4896 for (iter = (dev)->adj_list.lower.next, \ 4897 ldev = netdev_lower_get_next(dev, &(iter)); \ 4898 ldev; \ 4899 ldev = netdev_lower_get_next(dev, &(iter))) 4900 4901 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 4902 struct list_head **iter); 4903 int netdev_walk_all_lower_dev(struct net_device *dev, 4904 int (*fn)(struct net_device *lower_dev, 4905 struct netdev_nested_priv *priv), 4906 struct netdev_nested_priv *priv); 4907 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 4908 int (*fn)(struct net_device *lower_dev, 4909 struct netdev_nested_priv *priv), 4910 struct netdev_nested_priv *priv); 4911 4912 void *netdev_adjacent_get_private(struct list_head *adj_list); 4913 void *netdev_lower_get_first_private_rcu(struct net_device *dev); 4914 struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 4915 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); 4916 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, 4917 struct netlink_ext_ack *extack); 4918 int netdev_master_upper_dev_link(struct net_device *dev, 4919 struct net_device *upper_dev, 4920 void *upper_priv, void *upper_info, 4921 struct netlink_ext_ack *extack); 4922 void netdev_upper_dev_unlink(struct net_device *dev, 4923 struct net_device *upper_dev); 4924 int netdev_adjacent_change_prepare(struct net_device *old_dev, 4925 struct net_device *new_dev, 4926 struct net_device *dev, 4927 struct netlink_ext_ack *extack); 4928 void netdev_adjacent_change_commit(struct net_device *old_dev, 4929 struct net_device *new_dev, 4930 struct net_device *dev); 4931 void netdev_adjacent_change_abort(struct net_device *old_dev, 4932 struct net_device *new_dev, 4933 struct net_device *dev); 4934 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); 4935 void *netdev_lower_dev_get_private(struct net_device *dev, 4936 struct net_device *lower_dev); 4937 void netdev_lower_state_changed(struct net_device *lower_dev, 4938 void *lower_state_info); 4939 4940 /* RSS keys are 40 or 52 bytes long */ 4941 #define NETDEV_RSS_KEY_LEN 52 4942 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; 4943 void netdev_rss_key_fill(void *buffer, size_t len); 4944 4945 int skb_checksum_help(struct sk_buff *skb); 4946 int skb_crc32c_csum_help(struct sk_buff *skb); 4947 int skb_csum_hwoffload_help(struct sk_buff *skb, 4948 const netdev_features_t features); 4949 4950 struct netdev_bonding_info { 4951 ifslave slave; 4952 ifbond master; 4953 }; 4954 4955 struct netdev_notifier_bonding_info { 4956 struct netdev_notifier_info info; /* must be first */ 4957 struct netdev_bonding_info bonding_info; 4958 }; 4959 4960 void netdev_bonding_info_change(struct net_device *dev, 4961 struct netdev_bonding_info *bonding_info); 4962 4963 #if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) 4964 void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data); 4965 #else 4966 static inline void ethtool_notify(struct net_device *dev, unsigned int cmd, 4967 const void *data) 4968 { 4969 } 4970 #endif 4971 4972 __be16 skb_network_protocol(struct sk_buff *skb, int *depth); 4973 4974 static inline bool can_checksum_protocol(netdev_features_t features, 4975 __be16 protocol) 4976 { 4977 if (protocol == htons(ETH_P_FCOE)) 4978 return !!(features & NETIF_F_FCOE_CRC); 4979 4980 /* Assume this is an IP checksum (not SCTP CRC) */ 4981 4982 if (features & NETIF_F_HW_CSUM) { 4983 /* Can checksum everything */ 4984 return true; 4985 } 4986 4987 switch (protocol) { 4988 case htons(ETH_P_IP): 4989 return !!(features & NETIF_F_IP_CSUM); 4990 case htons(ETH_P_IPV6): 4991 return !!(features & NETIF_F_IPV6_CSUM); 4992 default: 4993 return false; 4994 } 4995 } 4996 4997 #ifdef CONFIG_BUG 4998 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); 4999 #else 5000 static inline void netdev_rx_csum_fault(struct net_device *dev, 5001 struct sk_buff *skb) 5002 { 5003 } 5004 #endif 5005 /* rx skb timestamps */ 5006 void net_enable_timestamp(void); 5007 void net_disable_timestamp(void); 5008 5009 static inline ktime_t netdev_get_tstamp(struct net_device *dev, 5010 const struct skb_shared_hwtstamps *hwtstamps, 5011 bool cycles) 5012 { 5013 const struct net_device_ops *ops = dev->netdev_ops; 5014 5015 if (ops->ndo_get_tstamp) 5016 return ops->ndo_get_tstamp(dev, hwtstamps, cycles); 5017 5018 return hwtstamps->hwtstamp; 5019 } 5020 5021 #ifndef CONFIG_PREEMPT_RT 5022 static inline void netdev_xmit_set_more(bool more) 5023 { 5024 __this_cpu_write(softnet_data.xmit.more, more); 5025 } 5026 5027 static inline bool netdev_xmit_more(void) 5028 { 5029 return __this_cpu_read(softnet_data.xmit.more); 5030 } 5031 #else 5032 static inline void netdev_xmit_set_more(bool more) 5033 { 5034 current->net_xmit.more = more; 5035 } 5036 5037 static inline bool netdev_xmit_more(void) 5038 { 5039 return current->net_xmit.more; 5040 } 5041 #endif 5042 5043 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, 5044 struct sk_buff *skb, struct net_device *dev, 5045 bool more) 5046 { 5047 netdev_xmit_set_more(more); 5048 return ops->ndo_start_xmit(skb, dev); 5049 } 5050 5051 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, 5052 struct netdev_queue *txq, bool more) 5053 { 5054 const struct net_device_ops *ops = dev->netdev_ops; 5055 netdev_tx_t rc; 5056 5057 rc = __netdev_start_xmit(ops, skb, dev, more); 5058 if (rc == NETDEV_TX_OK) 5059 txq_trans_update(txq); 5060 5061 return rc; 5062 } 5063 5064 int netdev_class_create_file_ns(const struct class_attribute *class_attr, 5065 const void *ns); 5066 void netdev_class_remove_file_ns(const struct class_attribute *class_attr, 5067 const void *ns); 5068 5069 extern const struct kobj_ns_type_operations net_ns_type_operations; 5070 5071 const char *netdev_drivername(const struct net_device *dev); 5072 5073 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, 5074 netdev_features_t f2) 5075 { 5076 if ((f1 ^ f2) & NETIF_F_HW_CSUM) { 5077 if (f1 & NETIF_F_HW_CSUM) 5078 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5079 else 5080 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5081 } 5082 5083 return f1 & f2; 5084 } 5085 5086 static inline netdev_features_t netdev_get_wanted_features( 5087 struct net_device *dev) 5088 { 5089 return (dev->features & ~dev->hw_features) | dev->wanted_features; 5090 } 5091 netdev_features_t netdev_increment_features(netdev_features_t all, 5092 netdev_features_t one, netdev_features_t mask); 5093 5094 /* Allow TSO being used on stacked device : 5095 * Performing the GSO segmentation before last device 5096 * is a performance improvement. 5097 */ 5098 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, 5099 netdev_features_t mask) 5100 { 5101 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); 5102 } 5103 5104 int __netdev_update_features(struct net_device *dev); 5105 void netdev_update_features(struct net_device *dev); 5106 void netdev_change_features(struct net_device *dev); 5107 5108 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 5109 struct net_device *dev); 5110 5111 netdev_features_t passthru_features_check(struct sk_buff *skb, 5112 struct net_device *dev, 5113 netdev_features_t features); 5114 netdev_features_t netif_skb_features(struct sk_buff *skb); 5115 void skb_warn_bad_offload(const struct sk_buff *skb); 5116 5117 static inline bool net_gso_ok(netdev_features_t features, int gso_type) 5118 { 5119 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; 5120 5121 /* check flags correspondence */ 5122 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); 5123 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); 5124 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); 5125 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); 5126 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); 5127 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); 5128 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); 5129 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); 5130 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)); 5131 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)); 5132 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); 5133 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); 5134 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); 5135 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); 5136 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); 5137 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); 5138 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); 5139 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); 5140 BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT)); 5141 5142 return (features & feature) == feature; 5143 } 5144 5145 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) 5146 { 5147 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && 5148 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 5149 } 5150 5151 static inline bool netif_needs_gso(struct sk_buff *skb, 5152 netdev_features_t features) 5153 { 5154 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 5155 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && 5156 (skb->ip_summed != CHECKSUM_UNNECESSARY))); 5157 } 5158 5159 void netif_set_tso_max_size(struct net_device *dev, unsigned int size); 5160 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs); 5161 void netif_inherit_tso_max(struct net_device *to, 5162 const struct net_device *from); 5163 5164 static inline unsigned int 5165 netif_get_gro_max_size(const struct net_device *dev, const struct sk_buff *skb) 5166 { 5167 /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */ 5168 return skb->protocol == htons(ETH_P_IPV6) ? 5169 READ_ONCE(dev->gro_max_size) : 5170 READ_ONCE(dev->gro_ipv4_max_size); 5171 } 5172 5173 static inline unsigned int 5174 netif_get_gso_max_size(const struct net_device *dev, const struct sk_buff *skb) 5175 { 5176 /* pairs with WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */ 5177 return skb->protocol == htons(ETH_P_IPV6) ? 5178 READ_ONCE(dev->gso_max_size) : 5179 READ_ONCE(dev->gso_ipv4_max_size); 5180 } 5181 5182 static inline bool netif_is_macsec(const struct net_device *dev) 5183 { 5184 return dev->priv_flags & IFF_MACSEC; 5185 } 5186 5187 static inline bool netif_is_macvlan(const struct net_device *dev) 5188 { 5189 return dev->priv_flags & IFF_MACVLAN; 5190 } 5191 5192 static inline bool netif_is_macvlan_port(const struct net_device *dev) 5193 { 5194 return dev->priv_flags & IFF_MACVLAN_PORT; 5195 } 5196 5197 static inline bool netif_is_bond_master(const struct net_device *dev) 5198 { 5199 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; 5200 } 5201 5202 static inline bool netif_is_bond_slave(const struct net_device *dev) 5203 { 5204 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; 5205 } 5206 5207 static inline bool netif_supports_nofcs(struct net_device *dev) 5208 { 5209 return dev->priv_flags & IFF_SUPP_NOFCS; 5210 } 5211 5212 static inline bool netif_has_l3_rx_handler(const struct net_device *dev) 5213 { 5214 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; 5215 } 5216 5217 static inline bool netif_is_l3_master(const struct net_device *dev) 5218 { 5219 return dev->priv_flags & IFF_L3MDEV_MASTER; 5220 } 5221 5222 static inline bool netif_is_l3_slave(const struct net_device *dev) 5223 { 5224 return dev->priv_flags & IFF_L3MDEV_SLAVE; 5225 } 5226 5227 static inline int dev_sdif(const struct net_device *dev) 5228 { 5229 #ifdef CONFIG_NET_L3_MASTER_DEV 5230 if (netif_is_l3_slave(dev)) 5231 return dev->ifindex; 5232 #endif 5233 return 0; 5234 } 5235 5236 static inline bool netif_is_bridge_master(const struct net_device *dev) 5237 { 5238 return dev->priv_flags & IFF_EBRIDGE; 5239 } 5240 5241 static inline bool netif_is_bridge_port(const struct net_device *dev) 5242 { 5243 return dev->priv_flags & IFF_BRIDGE_PORT; 5244 } 5245 5246 static inline bool netif_is_ovs_master(const struct net_device *dev) 5247 { 5248 return dev->priv_flags & IFF_OPENVSWITCH; 5249 } 5250 5251 static inline bool netif_is_ovs_port(const struct net_device *dev) 5252 { 5253 return dev->priv_flags & IFF_OVS_DATAPATH; 5254 } 5255 5256 static inline bool netif_is_any_bridge_master(const struct net_device *dev) 5257 { 5258 return netif_is_bridge_master(dev) || netif_is_ovs_master(dev); 5259 } 5260 5261 static inline bool netif_is_any_bridge_port(const struct net_device *dev) 5262 { 5263 return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); 5264 } 5265 5266 static inline bool netif_is_team_master(const struct net_device *dev) 5267 { 5268 return dev->priv_flags & IFF_TEAM; 5269 } 5270 5271 static inline bool netif_is_team_port(const struct net_device *dev) 5272 { 5273 return dev->priv_flags & IFF_TEAM_PORT; 5274 } 5275 5276 static inline bool netif_is_lag_master(const struct net_device *dev) 5277 { 5278 return netif_is_bond_master(dev) || netif_is_team_master(dev); 5279 } 5280 5281 static inline bool netif_is_lag_port(const struct net_device *dev) 5282 { 5283 return netif_is_bond_slave(dev) || netif_is_team_port(dev); 5284 } 5285 5286 static inline bool netif_is_rxfh_configured(const struct net_device *dev) 5287 { 5288 return dev->priv_flags & IFF_RXFH_CONFIGURED; 5289 } 5290 5291 static inline bool netif_is_failover(const struct net_device *dev) 5292 { 5293 return dev->priv_flags & IFF_FAILOVER; 5294 } 5295 5296 static inline bool netif_is_failover_slave(const struct net_device *dev) 5297 { 5298 return dev->priv_flags & IFF_FAILOVER_SLAVE; 5299 } 5300 5301 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ 5302 static inline void netif_keep_dst(struct net_device *dev) 5303 { 5304 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); 5305 } 5306 5307 /* return true if dev can't cope with mtu frames that need vlan tag insertion */ 5308 static inline bool netif_reduces_vlan_mtu(struct net_device *dev) 5309 { 5310 /* TODO: reserve and use an additional IFF bit, if we get more users */ 5311 return netif_is_macsec(dev); 5312 } 5313 5314 extern struct pernet_operations __net_initdata loopback_net_ops; 5315 5316 /* Logging, debugging and troubleshooting/diagnostic helpers. */ 5317 5318 /* netdev_printk helpers, similar to dev_printk */ 5319 5320 static inline const char *netdev_name(const struct net_device *dev) 5321 { 5322 if (!dev->name[0] || strchr(dev->name, '%')) 5323 return "(unnamed net_device)"; 5324 return dev->name; 5325 } 5326 5327 static inline const char *netdev_reg_state(const struct net_device *dev) 5328 { 5329 u8 reg_state = READ_ONCE(dev->reg_state); 5330 5331 switch (reg_state) { 5332 case NETREG_UNINITIALIZED: return " (uninitialized)"; 5333 case NETREG_REGISTERED: return ""; 5334 case NETREG_UNREGISTERING: return " (unregistering)"; 5335 case NETREG_UNREGISTERED: return " (unregistered)"; 5336 case NETREG_RELEASED: return " (released)"; 5337 case NETREG_DUMMY: return " (dummy)"; 5338 } 5339 5340 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, reg_state); 5341 return " (unknown)"; 5342 } 5343 5344 #define MODULE_ALIAS_NETDEV(device) \ 5345 MODULE_ALIAS("netdev-" device) 5346 5347 /* 5348 * netdev_WARN() acts like dev_printk(), but with the key difference 5349 * of using a WARN/WARN_ON to get the message out, including the 5350 * file/line information and a backtrace. 5351 */ 5352 #define netdev_WARN(dev, format, args...) \ 5353 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5354 netdev_reg_state(dev), ##args) 5355 5356 #define netdev_WARN_ONCE(dev, format, args...) \ 5357 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5358 netdev_reg_state(dev), ##args) 5359 5360 /* 5361 * The list of packet types we will receive (as opposed to discard) 5362 * and the routines to invoke. 5363 * 5364 * Why 16. Because with 16 the only overlap we get on a hash of the 5365 * low nibble of the protocol value is RARP/SNAP/X.25. 5366 * 5367 * 0800 IP 5368 * 0001 802.3 5369 * 0002 AX.25 5370 * 0004 802.2 5371 * 8035 RARP 5372 * 0005 SNAP 5373 * 0805 X.25 5374 * 0806 ARP 5375 * 8137 IPX 5376 * 0009 Localtalk 5377 * 86DD IPv6 5378 */ 5379 #define PTYPE_HASH_SIZE (16) 5380 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) 5381 5382 extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 5383 5384 extern struct net_device *blackhole_netdev; 5385 5386 /* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */ 5387 #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD) 5388 #define DEV_STATS_ADD(DEV, FIELD, VAL) \ 5389 atomic_long_add((VAL), &(DEV)->stats.__##FIELD) 5390 #define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD) 5391 5392 #endif /* _LINUX_NETDEVICE_H */ 5393