1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Definitions for the Interfaces handler. 8 * 9 * Version: @(#)dev.h 1.0.10 08/12/93 10 * 11 * Authors: Ross Biro 12 * Fred N. van Kempen, <[email protected]> 13 * Corey Minyard <[email protected]> 14 * Donald J. Becker, <[email protected]> 15 * Alan Cox, <[email protected]> 16 * Bjorn Ekwall. <[email protected]> 17 * Pekka Riikonen <[email protected]> 18 * 19 * Moved to /usr/include/linux for NET3 20 */ 21 #ifndef _LINUX_NETDEVICE_H 22 #define _LINUX_NETDEVICE_H 23 24 #include <linux/timer.h> 25 #include <linux/bug.h> 26 #include <linux/delay.h> 27 #include <linux/atomic.h> 28 #include <linux/prefetch.h> 29 #include <asm/cache.h> 30 #include <asm/byteorder.h> 31 #include <asm/local.h> 32 33 #include <linux/percpu.h> 34 #include <linux/rculist.h> 35 #include <linux/workqueue.h> 36 #include <linux/dynamic_queue_limits.h> 37 38 #include <net/net_namespace.h> 39 #ifdef CONFIG_DCB 40 #include <net/dcbnl.h> 41 #endif 42 #include <net/netprio_cgroup.h> 43 #include <linux/netdev_features.h> 44 #include <linux/neighbour.h> 45 #include <linux/netdevice_xmit.h> 46 #include <uapi/linux/netdevice.h> 47 #include <uapi/linux/if_bonding.h> 48 #include <uapi/linux/pkt_cls.h> 49 #include <uapi/linux/netdev.h> 50 #include <linux/hashtable.h> 51 #include <linux/rbtree.h> 52 #include <net/net_trackers.h> 53 #include <net/net_debug.h> 54 #include <net/dropreason-core.h> 55 #include <net/neighbour_tables.h> 56 57 struct netpoll_info; 58 struct device; 59 struct ethtool_ops; 60 struct kernel_hwtstamp_config; 61 struct phy_device; 62 struct dsa_port; 63 struct ip_tunnel_parm_kern; 64 struct macsec_context; 65 struct macsec_ops; 66 struct netdev_config; 67 struct netdev_name_node; 68 struct sd_flow_limit; 69 struct sfp_bus; 70 /* 802.11 specific */ 71 struct wireless_dev; 72 /* 802.15.4 specific */ 73 struct wpan_dev; 74 struct mpls_dev; 75 /* UDP Tunnel offloads */ 76 struct udp_tunnel_info; 77 struct udp_tunnel_nic_info; 78 struct udp_tunnel_nic; 79 struct bpf_prog; 80 struct xdp_buff; 81 struct xdp_frame; 82 struct xdp_metadata_ops; 83 struct xdp_md; 84 struct ethtool_netdev_state; 85 struct phy_link_topology; 86 struct hwtstamp_provider; 87 88 typedef u32 xdp_features_t; 89 90 void synchronize_net(void); 91 void netdev_set_default_ethtool_ops(struct net_device *dev, 92 const struct ethtool_ops *ops); 93 void netdev_sw_irq_coalesce_default_on(struct net_device *dev); 94 95 /* Backlog congestion levels */ 96 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 97 #define NET_RX_DROP 1 /* packet dropped */ 98 99 #define MAX_NEST_DEV 8 100 101 /* 102 * Transmit return codes: transmit return codes originate from three different 103 * namespaces: 104 * 105 * - qdisc return codes 106 * - driver transmit return codes 107 * - errno values 108 * 109 * Drivers are allowed to return any one of those in their hard_start_xmit() 110 * function. Real network devices commonly used with qdiscs should only return 111 * the driver transmit return codes though - when qdiscs are used, the actual 112 * transmission happens asynchronously, so the value is not propagated to 113 * higher layers. Virtual network devices transmit synchronously; in this case 114 * the driver transmit return codes are consumed by dev_queue_xmit(), and all 115 * others are propagated to higher layers. 116 */ 117 118 /* qdisc ->enqueue() return codes. */ 119 #define NET_XMIT_SUCCESS 0x00 120 #define NET_XMIT_DROP 0x01 /* skb dropped */ 121 #define NET_XMIT_CN 0x02 /* congestion notification */ 122 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ 123 124 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It 125 * indicates that the device will soon be dropping packets, or already drops 126 * some packets of the same priority; prompting us to send less aggressively. */ 127 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) 128 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) 129 130 /* Driver transmit return codes */ 131 #define NETDEV_TX_MASK 0xf0 132 133 enum netdev_tx { 134 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ 135 NETDEV_TX_OK = 0x00, /* driver took care of packet */ 136 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ 137 }; 138 typedef enum netdev_tx netdev_tx_t; 139 140 /* 141 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; 142 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. 143 */ 144 static inline bool dev_xmit_complete(int rc) 145 { 146 /* 147 * Positive cases with an skb consumed by a driver: 148 * - successful transmission (rc == NETDEV_TX_OK) 149 * - error while transmitting (rc < 0) 150 * - error while queueing to a different device (rc & NET_XMIT_MASK) 151 */ 152 if (likely(rc < NET_XMIT_MASK)) 153 return true; 154 155 return false; 156 } 157 158 /* 159 * Compute the worst-case header length according to the protocols 160 * used. 161 */ 162 163 #if defined(CONFIG_HYPERV_NET) 164 # define LL_MAX_HEADER 128 165 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) 166 # if defined(CONFIG_MAC80211_MESH) 167 # define LL_MAX_HEADER 128 168 # else 169 # define LL_MAX_HEADER 96 170 # endif 171 #else 172 # define LL_MAX_HEADER 32 173 #endif 174 175 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ 176 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) 177 #define MAX_HEADER LL_MAX_HEADER 178 #else 179 #define MAX_HEADER (LL_MAX_HEADER + 48) 180 #endif 181 182 /* 183 * Old network device statistics. Fields are native words 184 * (unsigned long) so they can be read and written atomically. 185 */ 186 187 #define NET_DEV_STAT(FIELD) \ 188 union { \ 189 unsigned long FIELD; \ 190 atomic_long_t __##FIELD; \ 191 } 192 193 struct net_device_stats { 194 NET_DEV_STAT(rx_packets); 195 NET_DEV_STAT(tx_packets); 196 NET_DEV_STAT(rx_bytes); 197 NET_DEV_STAT(tx_bytes); 198 NET_DEV_STAT(rx_errors); 199 NET_DEV_STAT(tx_errors); 200 NET_DEV_STAT(rx_dropped); 201 NET_DEV_STAT(tx_dropped); 202 NET_DEV_STAT(multicast); 203 NET_DEV_STAT(collisions); 204 NET_DEV_STAT(rx_length_errors); 205 NET_DEV_STAT(rx_over_errors); 206 NET_DEV_STAT(rx_crc_errors); 207 NET_DEV_STAT(rx_frame_errors); 208 NET_DEV_STAT(rx_fifo_errors); 209 NET_DEV_STAT(rx_missed_errors); 210 NET_DEV_STAT(tx_aborted_errors); 211 NET_DEV_STAT(tx_carrier_errors); 212 NET_DEV_STAT(tx_fifo_errors); 213 NET_DEV_STAT(tx_heartbeat_errors); 214 NET_DEV_STAT(tx_window_errors); 215 NET_DEV_STAT(rx_compressed); 216 NET_DEV_STAT(tx_compressed); 217 }; 218 #undef NET_DEV_STAT 219 220 /* per-cpu stats, allocated on demand. 221 * Try to fit them in a single cache line, for dev_get_stats() sake. 222 */ 223 struct net_device_core_stats { 224 unsigned long rx_dropped; 225 unsigned long tx_dropped; 226 unsigned long rx_nohandler; 227 unsigned long rx_otherhost_dropped; 228 } __aligned(4 * sizeof(unsigned long)); 229 230 #include <linux/cache.h> 231 #include <linux/skbuff.h> 232 233 struct neighbour; 234 struct neigh_parms; 235 struct sk_buff; 236 237 struct netdev_hw_addr { 238 struct list_head list; 239 struct rb_node node; 240 unsigned char addr[MAX_ADDR_LEN]; 241 unsigned char type; 242 #define NETDEV_HW_ADDR_T_LAN 1 243 #define NETDEV_HW_ADDR_T_SAN 2 244 #define NETDEV_HW_ADDR_T_UNICAST 3 245 #define NETDEV_HW_ADDR_T_MULTICAST 4 246 bool global_use; 247 int sync_cnt; 248 int refcount; 249 int synced; 250 struct rcu_head rcu_head; 251 }; 252 253 struct netdev_hw_addr_list { 254 struct list_head list; 255 int count; 256 257 /* Auxiliary tree for faster lookup on addition and deletion */ 258 struct rb_root tree; 259 }; 260 261 #define netdev_hw_addr_list_count(l) ((l)->count) 262 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) 263 #define netdev_hw_addr_list_for_each(ha, l) \ 264 list_for_each_entry(ha, &(l)->list, list) 265 266 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) 267 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) 268 #define netdev_for_each_uc_addr(ha, dev) \ 269 netdev_hw_addr_list_for_each(ha, &(dev)->uc) 270 #define netdev_for_each_synced_uc_addr(_ha, _dev) \ 271 netdev_for_each_uc_addr((_ha), (_dev)) \ 272 if ((_ha)->sync_cnt) 273 274 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) 275 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) 276 #define netdev_for_each_mc_addr(ha, dev) \ 277 netdev_hw_addr_list_for_each(ha, &(dev)->mc) 278 #define netdev_for_each_synced_mc_addr(_ha, _dev) \ 279 netdev_for_each_mc_addr((_ha), (_dev)) \ 280 if ((_ha)->sync_cnt) 281 282 struct hh_cache { 283 unsigned int hh_len; 284 seqlock_t hh_lock; 285 286 /* cached hardware header; allow for machine alignment needs. */ 287 #define HH_DATA_MOD 16 288 #define HH_DATA_OFF(__len) \ 289 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) 290 #define HH_DATA_ALIGN(__len) \ 291 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) 292 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; 293 }; 294 295 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much. 296 * Alternative is: 297 * dev->hard_header_len ? (dev->hard_header_len + 298 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 299 * 300 * We could use other alignment values, but we must maintain the 301 * relationship HH alignment <= LL alignment. 302 */ 303 #define LL_RESERVED_SPACE(dev) \ 304 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \ 305 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 306 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 307 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \ 308 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 309 310 struct header_ops { 311 int (*create) (struct sk_buff *skb, struct net_device *dev, 312 unsigned short type, const void *daddr, 313 const void *saddr, unsigned int len); 314 int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 315 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 316 void (*cache_update)(struct hh_cache *hh, 317 const struct net_device *dev, 318 const unsigned char *haddr); 319 bool (*validate)(const char *ll_header, unsigned int len); 320 __be16 (*parse_protocol)(const struct sk_buff *skb); 321 }; 322 323 /* These flag bits are private to the generic network queueing 324 * layer; they may not be explicitly referenced by any other 325 * code. 326 */ 327 328 enum netdev_state_t { 329 __LINK_STATE_START, 330 __LINK_STATE_PRESENT, 331 __LINK_STATE_NOCARRIER, 332 __LINK_STATE_LINKWATCH_PENDING, 333 __LINK_STATE_DORMANT, 334 __LINK_STATE_TESTING, 335 }; 336 337 struct gro_list { 338 struct list_head list; 339 int count; 340 }; 341 342 /* 343 * size of gro hash buckets, must less than bit number of 344 * napi_struct::gro_bitmask 345 */ 346 #define GRO_HASH_BUCKETS 8 347 348 /* 349 * Structure for per-NAPI config 350 */ 351 struct napi_config { 352 u64 gro_flush_timeout; 353 u64 irq_suspend_timeout; 354 u32 defer_hard_irqs; 355 unsigned int napi_id; 356 }; 357 358 /* 359 * Structure for NAPI scheduling similar to tasklet but with weighting 360 */ 361 struct napi_struct { 362 /* The poll_list must only be managed by the entity which 363 * changes the state of the NAPI_STATE_SCHED bit. This means 364 * whoever atomically sets that bit can add this napi_struct 365 * to the per-CPU poll_list, and whoever clears that bit 366 * can remove from the list right before clearing the bit. 367 */ 368 struct list_head poll_list; 369 370 unsigned long state; 371 int weight; 372 u32 defer_hard_irqs_count; 373 unsigned long gro_bitmask; 374 int (*poll)(struct napi_struct *, int); 375 #ifdef CONFIG_NETPOLL 376 /* CPU actively polling if netpoll is configured */ 377 int poll_owner; 378 #endif 379 /* CPU on which NAPI has been scheduled for processing */ 380 int list_owner; 381 struct net_device *dev; 382 struct gro_list gro_hash[GRO_HASH_BUCKETS]; 383 struct sk_buff *skb; 384 struct list_head rx_list; /* Pending GRO_NORMAL skbs */ 385 int rx_count; /* length of rx_list */ 386 unsigned int napi_id; /* protected by netdev_lock */ 387 struct hrtimer timer; 388 /* all fields past this point are write-protected by netdev_lock */ 389 struct task_struct *thread; 390 unsigned long gro_flush_timeout; 391 unsigned long irq_suspend_timeout; 392 u32 defer_hard_irqs; 393 /* control-path-only fields follow */ 394 struct list_head dev_list; 395 struct hlist_node napi_hash_node; 396 int irq; 397 int index; 398 struct napi_config *config; 399 }; 400 401 enum { 402 NAPI_STATE_SCHED, /* Poll is scheduled */ 403 NAPI_STATE_MISSED, /* reschedule a napi */ 404 NAPI_STATE_DISABLE, /* Disable pending */ 405 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ 406 NAPI_STATE_LISTED, /* NAPI added to system lists */ 407 NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */ 408 NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */ 409 NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/ 410 NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ 411 NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */ 412 }; 413 414 enum { 415 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), 416 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), 417 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), 418 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), 419 NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), 420 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), 421 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), 422 NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL), 423 NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), 424 NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED), 425 }; 426 427 enum gro_result { 428 GRO_MERGED, 429 GRO_MERGED_FREE, 430 GRO_HELD, 431 GRO_NORMAL, 432 GRO_CONSUMED, 433 }; 434 typedef enum gro_result gro_result_t; 435 436 /* 437 * enum rx_handler_result - Possible return values for rx_handlers. 438 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it 439 * further. 440 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in 441 * case skb->dev was changed by rx_handler. 442 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. 443 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called. 444 * 445 * rx_handlers are functions called from inside __netif_receive_skb(), to do 446 * special processing of the skb, prior to delivery to protocol handlers. 447 * 448 * Currently, a net_device can only have a single rx_handler registered. Trying 449 * to register a second rx_handler will return -EBUSY. 450 * 451 * To register a rx_handler on a net_device, use netdev_rx_handler_register(). 452 * To unregister a rx_handler on a net_device, use 453 * netdev_rx_handler_unregister(). 454 * 455 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to 456 * do with the skb. 457 * 458 * If the rx_handler consumed the skb in some way, it should return 459 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for 460 * the skb to be delivered in some other way. 461 * 462 * If the rx_handler changed skb->dev, to divert the skb to another 463 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the 464 * new device will be called if it exists. 465 * 466 * If the rx_handler decides the skb should be ignored, it should return 467 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that 468 * are registered on exact device (ptype->dev == skb->dev). 469 * 470 * If the rx_handler didn't change skb->dev, but wants the skb to be normally 471 * delivered, it should return RX_HANDLER_PASS. 472 * 473 * A device without a registered rx_handler will behave as if rx_handler 474 * returned RX_HANDLER_PASS. 475 */ 476 477 enum rx_handler_result { 478 RX_HANDLER_CONSUMED, 479 RX_HANDLER_ANOTHER, 480 RX_HANDLER_EXACT, 481 RX_HANDLER_PASS, 482 }; 483 typedef enum rx_handler_result rx_handler_result_t; 484 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); 485 486 void __napi_schedule(struct napi_struct *n); 487 void __napi_schedule_irqoff(struct napi_struct *n); 488 489 static inline bool napi_disable_pending(struct napi_struct *n) 490 { 491 return test_bit(NAPI_STATE_DISABLE, &n->state); 492 } 493 494 static inline bool napi_prefer_busy_poll(struct napi_struct *n) 495 { 496 return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); 497 } 498 499 /** 500 * napi_is_scheduled - test if NAPI is scheduled 501 * @n: NAPI context 502 * 503 * This check is "best-effort". With no locking implemented, 504 * a NAPI can be scheduled or terminate right after this check 505 * and produce not precise results. 506 * 507 * NAPI_STATE_SCHED is an internal state, napi_is_scheduled 508 * should not be used normally and napi_schedule should be 509 * used instead. 510 * 511 * Use only if the driver really needs to check if a NAPI 512 * is scheduled for example in the context of delayed timer 513 * that can be skipped if a NAPI is already scheduled. 514 * 515 * Return: True if NAPI is scheduled, False otherwise. 516 */ 517 static inline bool napi_is_scheduled(struct napi_struct *n) 518 { 519 return test_bit(NAPI_STATE_SCHED, &n->state); 520 } 521 522 bool napi_schedule_prep(struct napi_struct *n); 523 524 /** 525 * napi_schedule - schedule NAPI poll 526 * @n: NAPI context 527 * 528 * Schedule NAPI poll routine to be called if it is not already 529 * running. 530 * Return: true if we schedule a NAPI or false if not. 531 * Refer to napi_schedule_prep() for additional reason on why 532 * a NAPI might not be scheduled. 533 */ 534 static inline bool napi_schedule(struct napi_struct *n) 535 { 536 if (napi_schedule_prep(n)) { 537 __napi_schedule(n); 538 return true; 539 } 540 541 return false; 542 } 543 544 /** 545 * napi_schedule_irqoff - schedule NAPI poll 546 * @n: NAPI context 547 * 548 * Variant of napi_schedule(), assuming hard irqs are masked. 549 */ 550 static inline void napi_schedule_irqoff(struct napi_struct *n) 551 { 552 if (napi_schedule_prep(n)) 553 __napi_schedule_irqoff(n); 554 } 555 556 /** 557 * napi_complete_done - NAPI processing complete 558 * @n: NAPI context 559 * @work_done: number of packets processed 560 * 561 * Mark NAPI processing as complete. Should only be called if poll budget 562 * has not been completely consumed. 563 * Prefer over napi_complete(). 564 * Return: false if device should avoid rearming interrupts. 565 */ 566 bool napi_complete_done(struct napi_struct *n, int work_done); 567 568 static inline bool napi_complete(struct napi_struct *n) 569 { 570 return napi_complete_done(n, 0); 571 } 572 573 int dev_set_threaded(struct net_device *dev, bool threaded); 574 575 void napi_disable(struct napi_struct *n); 576 void napi_disable_locked(struct napi_struct *n); 577 578 void napi_enable(struct napi_struct *n); 579 void napi_enable_locked(struct napi_struct *n); 580 581 /** 582 * napi_synchronize - wait until NAPI is not running 583 * @n: NAPI context 584 * 585 * Wait until NAPI is done being scheduled on this context. 586 * Waits till any outstanding processing completes but 587 * does not disable future activations. 588 */ 589 static inline void napi_synchronize(const struct napi_struct *n) 590 { 591 if (IS_ENABLED(CONFIG_SMP)) 592 while (test_bit(NAPI_STATE_SCHED, &n->state)) 593 msleep(1); 594 else 595 barrier(); 596 } 597 598 /** 599 * napi_if_scheduled_mark_missed - if napi is running, set the 600 * NAPIF_STATE_MISSED 601 * @n: NAPI context 602 * 603 * If napi is running, set the NAPIF_STATE_MISSED, and return true if 604 * NAPI is scheduled. 605 **/ 606 static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) 607 { 608 unsigned long val, new; 609 610 val = READ_ONCE(n->state); 611 do { 612 if (val & NAPIF_STATE_DISABLE) 613 return true; 614 615 if (!(val & NAPIF_STATE_SCHED)) 616 return false; 617 618 new = val | NAPIF_STATE_MISSED; 619 } while (!try_cmpxchg(&n->state, &val, new)); 620 621 return true; 622 } 623 624 enum netdev_queue_state_t { 625 __QUEUE_STATE_DRV_XOFF, 626 __QUEUE_STATE_STACK_XOFF, 627 __QUEUE_STATE_FROZEN, 628 }; 629 630 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF) 631 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF) 632 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN) 633 634 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF) 635 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ 636 QUEUE_STATE_FROZEN) 637 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \ 638 QUEUE_STATE_FROZEN) 639 640 /* 641 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The 642 * netif_tx_* functions below are used to manipulate this flag. The 643 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit 644 * queue independently. The netif_xmit_*stopped functions below are called 645 * to check if the queue has been stopped by the driver or stack (either 646 * of the XOFF bits are set in the state). Drivers should not need to call 647 * netif_xmit*stopped functions, they should only be using netif_tx_*. 648 */ 649 650 struct netdev_queue { 651 /* 652 * read-mostly part 653 */ 654 struct net_device *dev; 655 netdevice_tracker dev_tracker; 656 657 struct Qdisc __rcu *qdisc; 658 struct Qdisc __rcu *qdisc_sleeping; 659 #ifdef CONFIG_SYSFS 660 struct kobject kobj; 661 #endif 662 unsigned long tx_maxrate; 663 /* 664 * Number of TX timeouts for this queue 665 * (/sys/class/net/DEV/Q/trans_timeout) 666 */ 667 atomic_long_t trans_timeout; 668 669 /* Subordinate device that the queue has been assigned to */ 670 struct net_device *sb_dev; 671 #ifdef CONFIG_XDP_SOCKETS 672 struct xsk_buff_pool *pool; 673 #endif 674 675 /* 676 * write-mostly part 677 */ 678 #ifdef CONFIG_BQL 679 struct dql dql; 680 #endif 681 spinlock_t _xmit_lock ____cacheline_aligned_in_smp; 682 int xmit_lock_owner; 683 /* 684 * Time (in jiffies) of last Tx 685 */ 686 unsigned long trans_start; 687 688 unsigned long state; 689 690 /* 691 * slow- / control-path part 692 */ 693 /* NAPI instance for the queue 694 * Readers and writers must hold RTNL 695 */ 696 struct napi_struct *napi; 697 698 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 699 int numa_node; 700 #endif 701 } ____cacheline_aligned_in_smp; 702 703 extern int sysctl_fb_tunnels_only_for_init_net; 704 extern int sysctl_devconf_inherit_init_net; 705 706 /* 707 * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns 708 * == 1 : For initns only 709 * == 2 : For none. 710 */ 711 static inline bool net_has_fallback_tunnels(const struct net *net) 712 { 713 #if IS_ENABLED(CONFIG_SYSCTL) 714 int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net); 715 716 return !fb_tunnels_only_for_init_net || 717 (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1); 718 #else 719 return true; 720 #endif 721 } 722 723 static inline int net_inherit_devconf(void) 724 { 725 #if IS_ENABLED(CONFIG_SYSCTL) 726 return READ_ONCE(sysctl_devconf_inherit_init_net); 727 #else 728 return 0; 729 #endif 730 } 731 732 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) 733 { 734 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 735 return q->numa_node; 736 #else 737 return NUMA_NO_NODE; 738 #endif 739 } 740 741 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) 742 { 743 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 744 q->numa_node = node; 745 #endif 746 } 747 748 #ifdef CONFIG_RFS_ACCEL 749 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, 750 u16 filter_id); 751 #endif 752 753 /* XPS map type and offset of the xps map within net_device->xps_maps[]. */ 754 enum xps_map_type { 755 XPS_CPUS = 0, 756 XPS_RXQS, 757 XPS_MAPS_MAX, 758 }; 759 760 #ifdef CONFIG_XPS 761 /* 762 * This structure holds an XPS map which can be of variable length. The 763 * map is an array of queues. 764 */ 765 struct xps_map { 766 unsigned int len; 767 unsigned int alloc_len; 768 struct rcu_head rcu; 769 u16 queues[]; 770 }; 771 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) 772 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ 773 - sizeof(struct xps_map)) / sizeof(u16)) 774 775 /* 776 * This structure holds all XPS maps for device. Maps are indexed by CPU. 777 * 778 * We keep track of the number of cpus/rxqs used when the struct is allocated, 779 * in nr_ids. This will help not accessing out-of-bound memory. 780 * 781 * We keep track of the number of traffic classes used when the struct is 782 * allocated, in num_tc. This will be used to navigate the maps, to ensure we're 783 * not crossing its upper bound, as the original dev->num_tc can be updated in 784 * the meantime. 785 */ 786 struct xps_dev_maps { 787 struct rcu_head rcu; 788 unsigned int nr_ids; 789 s16 num_tc; 790 struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ 791 }; 792 793 #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ 794 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) 795 796 #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ 797 (_rxqs * (_tcs) * sizeof(struct xps_map *))) 798 799 #endif /* CONFIG_XPS */ 800 801 #define TC_MAX_QUEUE 16 802 #define TC_BITMASK 15 803 /* HW offloaded queuing disciplines txq count and offset maps */ 804 struct netdev_tc_txq { 805 u16 count; 806 u16 offset; 807 }; 808 809 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 810 /* 811 * This structure is to hold information about the device 812 * configured to run FCoE protocol stack. 813 */ 814 struct netdev_fcoe_hbainfo { 815 char manufacturer[64]; 816 char serial_number[64]; 817 char hardware_version[64]; 818 char driver_version[64]; 819 char optionrom_version[64]; 820 char firmware_version[64]; 821 char model[256]; 822 char model_description[256]; 823 }; 824 #endif 825 826 #define MAX_PHYS_ITEM_ID_LEN 32 827 828 /* This structure holds a unique identifier to identify some 829 * physical item (port for example) used by a netdevice. 830 */ 831 struct netdev_phys_item_id { 832 unsigned char id[MAX_PHYS_ITEM_ID_LEN]; 833 unsigned char id_len; 834 }; 835 836 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, 837 struct netdev_phys_item_id *b) 838 { 839 return a->id_len == b->id_len && 840 memcmp(a->id, b->id, a->id_len) == 0; 841 } 842 843 typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 844 struct sk_buff *skb, 845 struct net_device *sb_dev); 846 847 enum net_device_path_type { 848 DEV_PATH_ETHERNET = 0, 849 DEV_PATH_VLAN, 850 DEV_PATH_BRIDGE, 851 DEV_PATH_PPPOE, 852 DEV_PATH_DSA, 853 DEV_PATH_MTK_WDMA, 854 }; 855 856 struct net_device_path { 857 enum net_device_path_type type; 858 const struct net_device *dev; 859 union { 860 struct { 861 u16 id; 862 __be16 proto; 863 u8 h_dest[ETH_ALEN]; 864 } encap; 865 struct { 866 enum { 867 DEV_PATH_BR_VLAN_KEEP, 868 DEV_PATH_BR_VLAN_TAG, 869 DEV_PATH_BR_VLAN_UNTAG, 870 DEV_PATH_BR_VLAN_UNTAG_HW, 871 } vlan_mode; 872 u16 vlan_id; 873 __be16 vlan_proto; 874 } bridge; 875 struct { 876 int port; 877 u16 proto; 878 } dsa; 879 struct { 880 u8 wdma_idx; 881 u8 queue; 882 u16 wcid; 883 u8 bss; 884 u8 amsdu; 885 } mtk_wdma; 886 }; 887 }; 888 889 #define NET_DEVICE_PATH_STACK_MAX 5 890 #define NET_DEVICE_PATH_VLAN_MAX 2 891 892 struct net_device_path_stack { 893 int num_paths; 894 struct net_device_path path[NET_DEVICE_PATH_STACK_MAX]; 895 }; 896 897 struct net_device_path_ctx { 898 const struct net_device *dev; 899 u8 daddr[ETH_ALEN]; 900 901 int num_vlans; 902 struct { 903 u16 id; 904 __be16 proto; 905 } vlan[NET_DEVICE_PATH_VLAN_MAX]; 906 }; 907 908 enum tc_setup_type { 909 TC_QUERY_CAPS, 910 TC_SETUP_QDISC_MQPRIO, 911 TC_SETUP_CLSU32, 912 TC_SETUP_CLSFLOWER, 913 TC_SETUP_CLSMATCHALL, 914 TC_SETUP_CLSBPF, 915 TC_SETUP_BLOCK, 916 TC_SETUP_QDISC_CBS, 917 TC_SETUP_QDISC_RED, 918 TC_SETUP_QDISC_PRIO, 919 TC_SETUP_QDISC_MQ, 920 TC_SETUP_QDISC_ETF, 921 TC_SETUP_ROOT_QDISC, 922 TC_SETUP_QDISC_GRED, 923 TC_SETUP_QDISC_TAPRIO, 924 TC_SETUP_FT, 925 TC_SETUP_QDISC_ETS, 926 TC_SETUP_QDISC_TBF, 927 TC_SETUP_QDISC_FIFO, 928 TC_SETUP_QDISC_HTB, 929 TC_SETUP_ACT, 930 }; 931 932 /* These structures hold the attributes of bpf state that are being passed 933 * to the netdevice through the bpf op. 934 */ 935 enum bpf_netdev_command { 936 /* Set or clear a bpf program used in the earliest stages of packet 937 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee 938 * is responsible for calling bpf_prog_put on any old progs that are 939 * stored. In case of error, the callee need not release the new prog 940 * reference, but on success it takes ownership and must bpf_prog_put 941 * when it is no longer used. 942 */ 943 XDP_SETUP_PROG, 944 XDP_SETUP_PROG_HW, 945 /* BPF program for offload callbacks, invoked at program load time. */ 946 BPF_OFFLOAD_MAP_ALLOC, 947 BPF_OFFLOAD_MAP_FREE, 948 XDP_SETUP_XSK_POOL, 949 }; 950 951 struct bpf_prog_offload_ops; 952 struct netlink_ext_ack; 953 struct xdp_umem; 954 struct xdp_dev_bulk_queue; 955 struct bpf_xdp_link; 956 957 enum bpf_xdp_mode { 958 XDP_MODE_SKB = 0, 959 XDP_MODE_DRV = 1, 960 XDP_MODE_HW = 2, 961 __MAX_XDP_MODE 962 }; 963 964 struct bpf_xdp_entity { 965 struct bpf_prog *prog; 966 struct bpf_xdp_link *link; 967 }; 968 969 struct netdev_bpf { 970 enum bpf_netdev_command command; 971 union { 972 /* XDP_SETUP_PROG */ 973 struct { 974 u32 flags; 975 struct bpf_prog *prog; 976 struct netlink_ext_ack *extack; 977 }; 978 /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ 979 struct { 980 struct bpf_offloaded_map *offmap; 981 }; 982 /* XDP_SETUP_XSK_POOL */ 983 struct { 984 struct xsk_buff_pool *pool; 985 u16 queue_id; 986 } xsk; 987 }; 988 }; 989 990 /* Flags for ndo_xsk_wakeup. */ 991 #define XDP_WAKEUP_RX (1 << 0) 992 #define XDP_WAKEUP_TX (1 << 1) 993 994 #ifdef CONFIG_XFRM_OFFLOAD 995 struct xfrmdev_ops { 996 int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack); 997 void (*xdo_dev_state_delete) (struct xfrm_state *x); 998 void (*xdo_dev_state_free) (struct xfrm_state *x); 999 bool (*xdo_dev_offload_ok) (struct sk_buff *skb, 1000 struct xfrm_state *x); 1001 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); 1002 void (*xdo_dev_state_update_stats) (struct xfrm_state *x); 1003 int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack); 1004 void (*xdo_dev_policy_delete) (struct xfrm_policy *x); 1005 void (*xdo_dev_policy_free) (struct xfrm_policy *x); 1006 }; 1007 #endif 1008 1009 struct dev_ifalias { 1010 struct rcu_head rcuhead; 1011 char ifalias[]; 1012 }; 1013 1014 struct devlink; 1015 struct tlsdev_ops; 1016 1017 struct netdev_net_notifier { 1018 struct list_head list; 1019 struct notifier_block *nb; 1020 }; 1021 1022 /* 1023 * This structure defines the management hooks for network devices. 1024 * The following hooks can be defined; unless noted otherwise, they are 1025 * optional and can be filled with a null pointer. 1026 * 1027 * int (*ndo_init)(struct net_device *dev); 1028 * This function is called once when a network device is registered. 1029 * The network device can use this for any late stage initialization 1030 * or semantic validation. It can fail with an error code which will 1031 * be propagated back to register_netdev. 1032 * 1033 * void (*ndo_uninit)(struct net_device *dev); 1034 * This function is called when device is unregistered or when registration 1035 * fails. It is not called if init fails. 1036 * 1037 * int (*ndo_open)(struct net_device *dev); 1038 * This function is called when a network device transitions to the up 1039 * state. 1040 * 1041 * int (*ndo_stop)(struct net_device *dev); 1042 * This function is called when a network device transitions to the down 1043 * state. 1044 * 1045 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 1046 * struct net_device *dev); 1047 * Called when a packet needs to be transmitted. 1048 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop 1049 * the queue before that can happen; it's for obsolete devices and weird 1050 * corner cases, but the stack really does a non-trivial amount 1051 * of useless work if you return NETDEV_TX_BUSY. 1052 * Required; cannot be NULL. 1053 * 1054 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 1055 * struct net_device *dev 1056 * netdev_features_t features); 1057 * Called by core transmit path to determine if device is capable of 1058 * performing offload operations on a given packet. This is to give 1059 * the device an opportunity to implement any restrictions that cannot 1060 * be otherwise expressed by feature flags. The check is called with 1061 * the set of features that the stack has calculated and it returns 1062 * those the driver believes to be appropriate. 1063 * 1064 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 1065 * struct net_device *sb_dev); 1066 * Called to decide which queue to use when device supports multiple 1067 * transmit queues. 1068 * 1069 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); 1070 * This function is called to allow device receiver to make 1071 * changes to configuration when multicast or promiscuous is enabled. 1072 * 1073 * void (*ndo_set_rx_mode)(struct net_device *dev); 1074 * This function is called device changes address list filtering. 1075 * If driver handles unicast address filtering, it should set 1076 * IFF_UNICAST_FLT in its priv_flags. 1077 * 1078 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); 1079 * This function is called when the Media Access Control address 1080 * needs to be changed. If this interface is not defined, the 1081 * MAC address can not be changed. 1082 * 1083 * int (*ndo_validate_addr)(struct net_device *dev); 1084 * Test if Media Access Control address is valid for the device. 1085 * 1086 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 1087 * Old-style ioctl entry point. This is used internally by the 1088 * ieee802154 subsystem but is no longer called by the device 1089 * ioctl handler. 1090 * 1091 * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd); 1092 * Used by the bonding driver for its device specific ioctls: 1093 * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE, 1094 * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY 1095 * 1096 * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 1097 * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG, 1098 * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP. 1099 * 1100 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); 1101 * Used to set network devices bus interface parameters. This interface 1102 * is retained for legacy reasons; new devices should use the bus 1103 * interface (PCI) for low level management. 1104 * 1105 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 1106 * Called when a user wants to change the Maximum Transfer Unit 1107 * of a device. 1108 * 1109 * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue); 1110 * Callback used when the transmitter has not made any progress 1111 * for dev->watchdog ticks. 1112 * 1113 * void (*ndo_get_stats64)(struct net_device *dev, 1114 * struct rtnl_link_stats64 *storage); 1115 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1116 * Called when a user wants to get the network device usage 1117 * statistics. Drivers must do one of the following: 1118 * 1. Define @ndo_get_stats64 to fill in a zero-initialised 1119 * rtnl_link_stats64 structure passed by the caller. 1120 * 2. Define @ndo_get_stats to update a net_device_stats structure 1121 * (which should normally be dev->stats) and return a pointer to 1122 * it. The structure may be changed asynchronously only if each 1123 * field is written atomically. 1124 * 3. Update dev->stats asynchronously and atomically, and define 1125 * neither operation. 1126 * 1127 * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) 1128 * Return true if this device supports offload stats of this attr_id. 1129 * 1130 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, 1131 * void *attr_data) 1132 * Get statistics for offload operations by attr_id. Write it into the 1133 * attr_data pointer. 1134 * 1135 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); 1136 * If device supports VLAN filtering this function is called when a 1137 * VLAN id is registered. 1138 * 1139 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); 1140 * If device supports VLAN filtering this function is called when a 1141 * VLAN id is unregistered. 1142 * 1143 * void (*ndo_poll_controller)(struct net_device *dev); 1144 * 1145 * SR-IOV management functions. 1146 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); 1147 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, 1148 * u8 qos, __be16 proto); 1149 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, 1150 * int max_tx_rate); 1151 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); 1152 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); 1153 * int (*ndo_get_vf_config)(struct net_device *dev, 1154 * int vf, struct ifla_vf_info *ivf); 1155 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); 1156 * int (*ndo_set_vf_port)(struct net_device *dev, int vf, 1157 * struct nlattr *port[]); 1158 * 1159 * Enable or disable the VF ability to query its RSS Redirection Table and 1160 * Hash Key. This is needed since on some devices VF share this information 1161 * with PF and querying it may introduce a theoretical security risk. 1162 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); 1163 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); 1164 * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, 1165 * void *type_data); 1166 * Called to setup any 'tc' scheduler, classifier or action on @dev. 1167 * This is always called from the stack with the rtnl lock held and netif 1168 * tx queues stopped. This allows the netdevice to perform queue 1169 * management safely. 1170 * 1171 * Fiber Channel over Ethernet (FCoE) offload functions. 1172 * int (*ndo_fcoe_enable)(struct net_device *dev); 1173 * Called when the FCoE protocol stack wants to start using LLD for FCoE 1174 * so the underlying device can perform whatever needed configuration or 1175 * initialization to support acceleration of FCoE traffic. 1176 * 1177 * int (*ndo_fcoe_disable)(struct net_device *dev); 1178 * Called when the FCoE protocol stack wants to stop using LLD for FCoE 1179 * so the underlying device can perform whatever needed clean-ups to 1180 * stop supporting acceleration of FCoE traffic. 1181 * 1182 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, 1183 * struct scatterlist *sgl, unsigned int sgc); 1184 * Called when the FCoE Initiator wants to initialize an I/O that 1185 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1186 * perform necessary setup and returns 1 to indicate the device is set up 1187 * successfully to perform DDP on this I/O, otherwise this returns 0. 1188 * 1189 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); 1190 * Called when the FCoE Initiator/Target is done with the DDPed I/O as 1191 * indicated by the FC exchange id 'xid', so the underlying device can 1192 * clean up and reuse resources for later DDP requests. 1193 * 1194 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, 1195 * struct scatterlist *sgl, unsigned int sgc); 1196 * Called when the FCoE Target wants to initialize an I/O that 1197 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1198 * perform necessary setup and returns 1 to indicate the device is set up 1199 * successfully to perform DDP on this I/O, otherwise this returns 0. 1200 * 1201 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1202 * struct netdev_fcoe_hbainfo *hbainfo); 1203 * Called when the FCoE Protocol stack wants information on the underlying 1204 * device. This information is utilized by the FCoE protocol stack to 1205 * register attributes with Fiber Channel management service as per the 1206 * FC-GS Fabric Device Management Information(FDMI) specification. 1207 * 1208 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); 1209 * Called when the underlying device wants to override default World Wide 1210 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own 1211 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE 1212 * protocol stack to use. 1213 * 1214 * RFS acceleration. 1215 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, 1216 * u16 rxq_index, u32 flow_id); 1217 * Set hardware filter for RFS. rxq_index is the target queue index; 1218 * flow_id is a flow ID to be passed to rps_may_expire_flow() later. 1219 * Return the filter ID on success, or a negative error code. 1220 * 1221 * Slave management functions (for bridge, bonding, etc). 1222 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); 1223 * Called to make another netdev an underling. 1224 * 1225 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); 1226 * Called to release previously enslaved netdev. 1227 * 1228 * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev, 1229 * struct sk_buff *skb, 1230 * bool all_slaves); 1231 * Get the xmit slave of master device. If all_slaves is true, function 1232 * assume all the slaves can transmit. 1233 * 1234 * Feature/offload setting functions. 1235 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1236 * netdev_features_t features); 1237 * Adjusts the requested feature flags according to device-specific 1238 * constraints, and returns the resulting flags. Must not modify 1239 * the device state. 1240 * 1241 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 1242 * Called to update device configuration to new features. Passed 1243 * feature set might be less than what was returned by ndo_fix_features()). 1244 * Must return >0 or -errno if it changed dev->features itself. 1245 * 1246 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], 1247 * struct net_device *dev, 1248 * const unsigned char *addr, u16 vid, u16 flags, 1249 * bool *notified, struct netlink_ext_ack *extack); 1250 * Adds an FDB entry to dev for addr. 1251 * Callee shall set *notified to true if it sent any appropriate 1252 * notification(s). Otherwise core will send a generic one. 1253 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], 1254 * struct net_device *dev, 1255 * const unsigned char *addr, u16 vid 1256 * bool *notified, struct netlink_ext_ack *extack); 1257 * Deletes the FDB entry from dev corresponding to addr. 1258 * Callee shall set *notified to true if it sent any appropriate 1259 * notification(s). Otherwise core will send a generic one. 1260 * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev, 1261 * struct netlink_ext_ack *extack); 1262 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, 1263 * struct net_device *dev, struct net_device *filter_dev, 1264 * int *idx) 1265 * Used to add FDB entries to dump requests. Implementers should add 1266 * entries to skb and update idx with the number of entries. 1267 * 1268 * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[], 1269 * u16 nlmsg_flags, struct netlink_ext_ack *extack); 1270 * Adds an MDB entry to dev. 1271 * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[], 1272 * struct netlink_ext_ack *extack); 1273 * Deletes the MDB entry from dev. 1274 * int (*ndo_mdb_del_bulk)(struct net_device *dev, struct nlattr *tb[], 1275 * struct netlink_ext_ack *extack); 1276 * Bulk deletes MDB entries from dev. 1277 * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb, 1278 * struct netlink_callback *cb); 1279 * Dumps MDB entries from dev. The first argument (marker) in the netlink 1280 * callback is used by core rtnetlink code. 1281 * 1282 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, 1283 * u16 flags, struct netlink_ext_ack *extack) 1284 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, 1285 * struct net_device *dev, u32 filter_mask, 1286 * int nlflags) 1287 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, 1288 * u16 flags); 1289 * 1290 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); 1291 * Called to change device carrier. Soft-devices (like dummy, team, etc) 1292 * which do not represent real hardware may define this to allow their 1293 * userspace components to manage their virtual carrier state. Devices 1294 * that determine carrier state from physical hardware properties (eg 1295 * network cables) or protocol-dependent mechanisms (eg 1296 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. 1297 * 1298 * int (*ndo_get_phys_port_id)(struct net_device *dev, 1299 * struct netdev_phys_item_id *ppid); 1300 * Called to get ID of physical port of this device. If driver does 1301 * not implement this, it is assumed that the hw is not able to have 1302 * multiple net devices on single physical port. 1303 * 1304 * int (*ndo_get_port_parent_id)(struct net_device *dev, 1305 * struct netdev_phys_item_id *ppid) 1306 * Called to get the parent ID of the physical port of this device. 1307 * 1308 * void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1309 * struct net_device *dev) 1310 * Called by upper layer devices to accelerate switching or other 1311 * station functionality into hardware. 'pdev is the lowerdev 1312 * to use for the offload and 'dev' is the net device that will 1313 * back the offload. Returns a pointer to the private structure 1314 * the upper layer will maintain. 1315 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) 1316 * Called by upper layer device to delete the station created 1317 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing 1318 * the station and priv is the structure returned by the add 1319 * operation. 1320 * int (*ndo_set_tx_maxrate)(struct net_device *dev, 1321 * int queue_index, u32 maxrate); 1322 * Called when a user wants to set a max-rate limitation of specific 1323 * TX queue. 1324 * int (*ndo_get_iflink)(const struct net_device *dev); 1325 * Called to get the iflink value of this device. 1326 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); 1327 * This function is used to get egress tunnel information for given skb. 1328 * This is useful for retrieving outer tunnel header parameters while 1329 * sampling packet. 1330 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); 1331 * This function is used to specify the headroom that the skb must 1332 * consider when allocation skb during packet reception. Setting 1333 * appropriate rx headroom value allows avoiding skb head copy on 1334 * forward. Setting a negative value resets the rx headroom to the 1335 * default value. 1336 * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); 1337 * This function is used to set or query state related to XDP on the 1338 * netdevice and manage BPF offload. See definition of 1339 * enum bpf_netdev_command for details. 1340 * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, 1341 * u32 flags); 1342 * This function is used to submit @n XDP packets for transmit on a 1343 * netdevice. Returns number of frames successfully transmitted, frames 1344 * that got dropped are freed/returned via xdp_return_frame(). 1345 * Returns negative number, means general error invoking ndo, meaning 1346 * no frames were xmit'ed and core-caller will free all frames. 1347 * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1348 * struct xdp_buff *xdp); 1349 * Get the xmit slave of master device based on the xdp_buff. 1350 * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); 1351 * This function is used to wake up the softirq, ksoftirqd or kthread 1352 * responsible for sending and/or receiving packets on a specific 1353 * queue id bound to an AF_XDP socket. The flags field specifies if 1354 * only RX, only Tx, or both should be woken up using the flags 1355 * XDP_WAKEUP_RX and XDP_WAKEUP_TX. 1356 * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm_kern *p, 1357 * int cmd); 1358 * Add, change, delete or get information on an IPv4 tunnel. 1359 * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); 1360 * If a device is paired with a peer device, return the peer instance. 1361 * The caller must be under RCU read context. 1362 * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path); 1363 * Get the forwarding path to reach the real device from the HW destination address 1364 * ktime_t (*ndo_get_tstamp)(struct net_device *dev, 1365 * const struct skb_shared_hwtstamps *hwtstamps, 1366 * bool cycles); 1367 * Get hardware timestamp based on normal/adjustable time or free running 1368 * cycle counter. This function is required if physical clock supports a 1369 * free running cycle counter. 1370 * 1371 * int (*ndo_hwtstamp_get)(struct net_device *dev, 1372 * struct kernel_hwtstamp_config *kernel_config); 1373 * Get the currently configured hardware timestamping parameters for the 1374 * NIC device. 1375 * 1376 * int (*ndo_hwtstamp_set)(struct net_device *dev, 1377 * struct kernel_hwtstamp_config *kernel_config, 1378 * struct netlink_ext_ack *extack); 1379 * Change the hardware timestamping parameters for NIC device. 1380 */ 1381 struct net_device_ops { 1382 int (*ndo_init)(struct net_device *dev); 1383 void (*ndo_uninit)(struct net_device *dev); 1384 int (*ndo_open)(struct net_device *dev); 1385 int (*ndo_stop)(struct net_device *dev); 1386 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 1387 struct net_device *dev); 1388 netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 1389 struct net_device *dev, 1390 netdev_features_t features); 1391 u16 (*ndo_select_queue)(struct net_device *dev, 1392 struct sk_buff *skb, 1393 struct net_device *sb_dev); 1394 void (*ndo_change_rx_flags)(struct net_device *dev, 1395 int flags); 1396 void (*ndo_set_rx_mode)(struct net_device *dev); 1397 int (*ndo_set_mac_address)(struct net_device *dev, 1398 void *addr); 1399 int (*ndo_validate_addr)(struct net_device *dev); 1400 int (*ndo_do_ioctl)(struct net_device *dev, 1401 struct ifreq *ifr, int cmd); 1402 int (*ndo_eth_ioctl)(struct net_device *dev, 1403 struct ifreq *ifr, int cmd); 1404 int (*ndo_siocbond)(struct net_device *dev, 1405 struct ifreq *ifr, int cmd); 1406 int (*ndo_siocwandev)(struct net_device *dev, 1407 struct if_settings *ifs); 1408 int (*ndo_siocdevprivate)(struct net_device *dev, 1409 struct ifreq *ifr, 1410 void __user *data, int cmd); 1411 int (*ndo_set_config)(struct net_device *dev, 1412 struct ifmap *map); 1413 int (*ndo_change_mtu)(struct net_device *dev, 1414 int new_mtu); 1415 int (*ndo_neigh_setup)(struct net_device *dev, 1416 struct neigh_parms *); 1417 void (*ndo_tx_timeout) (struct net_device *dev, 1418 unsigned int txqueue); 1419 1420 void (*ndo_get_stats64)(struct net_device *dev, 1421 struct rtnl_link_stats64 *storage); 1422 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); 1423 int (*ndo_get_offload_stats)(int attr_id, 1424 const struct net_device *dev, 1425 void *attr_data); 1426 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1427 1428 int (*ndo_vlan_rx_add_vid)(struct net_device *dev, 1429 __be16 proto, u16 vid); 1430 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, 1431 __be16 proto, u16 vid); 1432 #ifdef CONFIG_NET_POLL_CONTROLLER 1433 void (*ndo_poll_controller)(struct net_device *dev); 1434 int (*ndo_netpoll_setup)(struct net_device *dev); 1435 void (*ndo_netpoll_cleanup)(struct net_device *dev); 1436 #endif 1437 int (*ndo_set_vf_mac)(struct net_device *dev, 1438 int queue, u8 *mac); 1439 int (*ndo_set_vf_vlan)(struct net_device *dev, 1440 int queue, u16 vlan, 1441 u8 qos, __be16 proto); 1442 int (*ndo_set_vf_rate)(struct net_device *dev, 1443 int vf, int min_tx_rate, 1444 int max_tx_rate); 1445 int (*ndo_set_vf_spoofchk)(struct net_device *dev, 1446 int vf, bool setting); 1447 int (*ndo_set_vf_trust)(struct net_device *dev, 1448 int vf, bool setting); 1449 int (*ndo_get_vf_config)(struct net_device *dev, 1450 int vf, 1451 struct ifla_vf_info *ivf); 1452 int (*ndo_set_vf_link_state)(struct net_device *dev, 1453 int vf, int link_state); 1454 int (*ndo_get_vf_stats)(struct net_device *dev, 1455 int vf, 1456 struct ifla_vf_stats 1457 *vf_stats); 1458 int (*ndo_set_vf_port)(struct net_device *dev, 1459 int vf, 1460 struct nlattr *port[]); 1461 int (*ndo_get_vf_port)(struct net_device *dev, 1462 int vf, struct sk_buff *skb); 1463 int (*ndo_get_vf_guid)(struct net_device *dev, 1464 int vf, 1465 struct ifla_vf_guid *node_guid, 1466 struct ifla_vf_guid *port_guid); 1467 int (*ndo_set_vf_guid)(struct net_device *dev, 1468 int vf, u64 guid, 1469 int guid_type); 1470 int (*ndo_set_vf_rss_query_en)( 1471 struct net_device *dev, 1472 int vf, bool setting); 1473 int (*ndo_setup_tc)(struct net_device *dev, 1474 enum tc_setup_type type, 1475 void *type_data); 1476 #if IS_ENABLED(CONFIG_FCOE) 1477 int (*ndo_fcoe_enable)(struct net_device *dev); 1478 int (*ndo_fcoe_disable)(struct net_device *dev); 1479 int (*ndo_fcoe_ddp_setup)(struct net_device *dev, 1480 u16 xid, 1481 struct scatterlist *sgl, 1482 unsigned int sgc); 1483 int (*ndo_fcoe_ddp_done)(struct net_device *dev, 1484 u16 xid); 1485 int (*ndo_fcoe_ddp_target)(struct net_device *dev, 1486 u16 xid, 1487 struct scatterlist *sgl, 1488 unsigned int sgc); 1489 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1490 struct netdev_fcoe_hbainfo *hbainfo); 1491 #endif 1492 1493 #if IS_ENABLED(CONFIG_LIBFCOE) 1494 #define NETDEV_FCOE_WWNN 0 1495 #define NETDEV_FCOE_WWPN 1 1496 int (*ndo_fcoe_get_wwn)(struct net_device *dev, 1497 u64 *wwn, int type); 1498 #endif 1499 1500 #ifdef CONFIG_RFS_ACCEL 1501 int (*ndo_rx_flow_steer)(struct net_device *dev, 1502 const struct sk_buff *skb, 1503 u16 rxq_index, 1504 u32 flow_id); 1505 #endif 1506 int (*ndo_add_slave)(struct net_device *dev, 1507 struct net_device *slave_dev, 1508 struct netlink_ext_ack *extack); 1509 int (*ndo_del_slave)(struct net_device *dev, 1510 struct net_device *slave_dev); 1511 struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, 1512 struct sk_buff *skb, 1513 bool all_slaves); 1514 struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev, 1515 struct sock *sk); 1516 netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1517 netdev_features_t features); 1518 int (*ndo_set_features)(struct net_device *dev, 1519 netdev_features_t features); 1520 int (*ndo_neigh_construct)(struct net_device *dev, 1521 struct neighbour *n); 1522 void (*ndo_neigh_destroy)(struct net_device *dev, 1523 struct neighbour *n); 1524 1525 int (*ndo_fdb_add)(struct ndmsg *ndm, 1526 struct nlattr *tb[], 1527 struct net_device *dev, 1528 const unsigned char *addr, 1529 u16 vid, 1530 u16 flags, 1531 bool *notified, 1532 struct netlink_ext_ack *extack); 1533 int (*ndo_fdb_del)(struct ndmsg *ndm, 1534 struct nlattr *tb[], 1535 struct net_device *dev, 1536 const unsigned char *addr, 1537 u16 vid, 1538 bool *notified, 1539 struct netlink_ext_ack *extack); 1540 int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, 1541 struct net_device *dev, 1542 struct netlink_ext_ack *extack); 1543 int (*ndo_fdb_dump)(struct sk_buff *skb, 1544 struct netlink_callback *cb, 1545 struct net_device *dev, 1546 struct net_device *filter_dev, 1547 int *idx); 1548 int (*ndo_fdb_get)(struct sk_buff *skb, 1549 struct nlattr *tb[], 1550 struct net_device *dev, 1551 const unsigned char *addr, 1552 u16 vid, u32 portid, u32 seq, 1553 struct netlink_ext_ack *extack); 1554 int (*ndo_mdb_add)(struct net_device *dev, 1555 struct nlattr *tb[], 1556 u16 nlmsg_flags, 1557 struct netlink_ext_ack *extack); 1558 int (*ndo_mdb_del)(struct net_device *dev, 1559 struct nlattr *tb[], 1560 struct netlink_ext_ack *extack); 1561 int (*ndo_mdb_del_bulk)(struct net_device *dev, 1562 struct nlattr *tb[], 1563 struct netlink_ext_ack *extack); 1564 int (*ndo_mdb_dump)(struct net_device *dev, 1565 struct sk_buff *skb, 1566 struct netlink_callback *cb); 1567 int (*ndo_mdb_get)(struct net_device *dev, 1568 struct nlattr *tb[], u32 portid, 1569 u32 seq, 1570 struct netlink_ext_ack *extack); 1571 int (*ndo_bridge_setlink)(struct net_device *dev, 1572 struct nlmsghdr *nlh, 1573 u16 flags, 1574 struct netlink_ext_ack *extack); 1575 int (*ndo_bridge_getlink)(struct sk_buff *skb, 1576 u32 pid, u32 seq, 1577 struct net_device *dev, 1578 u32 filter_mask, 1579 int nlflags); 1580 int (*ndo_bridge_dellink)(struct net_device *dev, 1581 struct nlmsghdr *nlh, 1582 u16 flags); 1583 int (*ndo_change_carrier)(struct net_device *dev, 1584 bool new_carrier); 1585 int (*ndo_get_phys_port_id)(struct net_device *dev, 1586 struct netdev_phys_item_id *ppid); 1587 int (*ndo_get_port_parent_id)(struct net_device *dev, 1588 struct netdev_phys_item_id *ppid); 1589 int (*ndo_get_phys_port_name)(struct net_device *dev, 1590 char *name, size_t len); 1591 void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1592 struct net_device *dev); 1593 void (*ndo_dfwd_del_station)(struct net_device *pdev, 1594 void *priv); 1595 1596 int (*ndo_set_tx_maxrate)(struct net_device *dev, 1597 int queue_index, 1598 u32 maxrate); 1599 int (*ndo_get_iflink)(const struct net_device *dev); 1600 int (*ndo_fill_metadata_dst)(struct net_device *dev, 1601 struct sk_buff *skb); 1602 void (*ndo_set_rx_headroom)(struct net_device *dev, 1603 int needed_headroom); 1604 int (*ndo_bpf)(struct net_device *dev, 1605 struct netdev_bpf *bpf); 1606 int (*ndo_xdp_xmit)(struct net_device *dev, int n, 1607 struct xdp_frame **xdp, 1608 u32 flags); 1609 struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1610 struct xdp_buff *xdp); 1611 int (*ndo_xsk_wakeup)(struct net_device *dev, 1612 u32 queue_id, u32 flags); 1613 int (*ndo_tunnel_ctl)(struct net_device *dev, 1614 struct ip_tunnel_parm_kern *p, 1615 int cmd); 1616 struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); 1617 int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, 1618 struct net_device_path *path); 1619 ktime_t (*ndo_get_tstamp)(struct net_device *dev, 1620 const struct skb_shared_hwtstamps *hwtstamps, 1621 bool cycles); 1622 int (*ndo_hwtstamp_get)(struct net_device *dev, 1623 struct kernel_hwtstamp_config *kernel_config); 1624 int (*ndo_hwtstamp_set)(struct net_device *dev, 1625 struct kernel_hwtstamp_config *kernel_config, 1626 struct netlink_ext_ack *extack); 1627 1628 #if IS_ENABLED(CONFIG_NET_SHAPER) 1629 /** 1630 * @net_shaper_ops: Device shaping offload operations 1631 * see include/net/net_shapers.h 1632 */ 1633 const struct net_shaper_ops *net_shaper_ops; 1634 #endif 1635 }; 1636 1637 /** 1638 * enum netdev_priv_flags - &struct net_device priv_flags 1639 * 1640 * These are the &struct net_device, they are only set internally 1641 * by drivers and used in the kernel. These flags are invisible to 1642 * userspace; this means that the order of these flags can change 1643 * during any kernel release. 1644 * 1645 * You should add bitfield booleans after either net_device::priv_flags 1646 * (hotpath) or ::threaded (slowpath) instead of extending these flags. 1647 * 1648 * @IFF_802_1Q_VLAN: 802.1Q VLAN device 1649 * @IFF_EBRIDGE: Ethernet bridging device 1650 * @IFF_BONDING: bonding master or slave 1651 * @IFF_ISATAP: ISATAP interface (RFC4214) 1652 * @IFF_WAN_HDLC: WAN HDLC device 1653 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to 1654 * release skb->dst 1655 * @IFF_DONT_BRIDGE: disallow bridging this ether dev 1656 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time 1657 * @IFF_MACVLAN_PORT: device used as macvlan port 1658 * @IFF_BRIDGE_PORT: device used as bridge port 1659 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port 1660 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit 1661 * @IFF_UNICAST_FLT: Supports unicast filtering 1662 * @IFF_TEAM_PORT: device used as team port 1663 * @IFF_SUPP_NOFCS: device supports sending custom FCS 1664 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address 1665 * change when it's running 1666 * @IFF_MACVLAN: Macvlan device 1667 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account 1668 * underlying stacked devices 1669 * @IFF_L3MDEV_MASTER: device is an L3 master device 1670 * @IFF_NO_QUEUE: device can run without qdisc attached 1671 * @IFF_OPENVSWITCH: device is a Open vSwitch master 1672 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device 1673 * @IFF_TEAM: device is a team device 1674 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured 1675 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external 1676 * entity (i.e. the master device for bridged veth) 1677 * @IFF_MACSEC: device is a MACsec device 1678 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook 1679 * @IFF_FAILOVER: device is a failover master device 1680 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device 1681 * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device 1682 * @IFF_NO_ADDRCONF: prevent ipv6 addrconf 1683 * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with 1684 * skb_headlen(skb) == 0 (data starts from frag0) 1685 */ 1686 enum netdev_priv_flags { 1687 IFF_802_1Q_VLAN = 1<<0, 1688 IFF_EBRIDGE = 1<<1, 1689 IFF_BONDING = 1<<2, 1690 IFF_ISATAP = 1<<3, 1691 IFF_WAN_HDLC = 1<<4, 1692 IFF_XMIT_DST_RELEASE = 1<<5, 1693 IFF_DONT_BRIDGE = 1<<6, 1694 IFF_DISABLE_NETPOLL = 1<<7, 1695 IFF_MACVLAN_PORT = 1<<8, 1696 IFF_BRIDGE_PORT = 1<<9, 1697 IFF_OVS_DATAPATH = 1<<10, 1698 IFF_TX_SKB_SHARING = 1<<11, 1699 IFF_UNICAST_FLT = 1<<12, 1700 IFF_TEAM_PORT = 1<<13, 1701 IFF_SUPP_NOFCS = 1<<14, 1702 IFF_LIVE_ADDR_CHANGE = 1<<15, 1703 IFF_MACVLAN = 1<<16, 1704 IFF_XMIT_DST_RELEASE_PERM = 1<<17, 1705 IFF_L3MDEV_MASTER = 1<<18, 1706 IFF_NO_QUEUE = 1<<19, 1707 IFF_OPENVSWITCH = 1<<20, 1708 IFF_L3MDEV_SLAVE = 1<<21, 1709 IFF_TEAM = 1<<22, 1710 IFF_RXFH_CONFIGURED = 1<<23, 1711 IFF_PHONY_HEADROOM = 1<<24, 1712 IFF_MACSEC = 1<<25, 1713 IFF_NO_RX_HANDLER = 1<<26, 1714 IFF_FAILOVER = 1<<27, 1715 IFF_FAILOVER_SLAVE = 1<<28, 1716 IFF_L3MDEV_RX_HANDLER = 1<<29, 1717 IFF_NO_ADDRCONF = BIT_ULL(30), 1718 IFF_TX_SKB_NO_LINEAR = BIT_ULL(31), 1719 }; 1720 1721 /* Specifies the type of the struct net_device::ml_priv pointer */ 1722 enum netdev_ml_priv_type { 1723 ML_PRIV_NONE, 1724 ML_PRIV_CAN, 1725 }; 1726 1727 enum netdev_stat_type { 1728 NETDEV_PCPU_STAT_NONE, 1729 NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */ 1730 NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */ 1731 NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */ 1732 }; 1733 1734 enum netdev_reg_state { 1735 NETREG_UNINITIALIZED = 0, 1736 NETREG_REGISTERED, /* completed register_netdevice */ 1737 NETREG_UNREGISTERING, /* called unregister_netdevice */ 1738 NETREG_UNREGISTERED, /* completed unregister todo */ 1739 NETREG_RELEASED, /* called free_netdev */ 1740 NETREG_DUMMY, /* dummy device for NAPI poll */ 1741 }; 1742 1743 /** 1744 * struct net_device - The DEVICE structure. 1745 * 1746 * Actually, this whole structure is a big mistake. It mixes I/O 1747 * data with strictly "high-level" data, and it has to know about 1748 * almost every data structure used in the INET module. 1749 * 1750 * @priv_flags: flags invisible to userspace defined as bits, see 1751 * enum netdev_priv_flags for the definitions 1752 * @lltx: device supports lockless Tx. Deprecated for real HW 1753 * drivers. Mainly used by logical interfaces, such as 1754 * bonding and tunnels 1755 * 1756 * @name: This is the first field of the "visible" part of this structure 1757 * (i.e. as seen by users in the "Space.c" file). It is the name 1758 * of the interface. 1759 * 1760 * @name_node: Name hashlist node 1761 * @ifalias: SNMP alias 1762 * @mem_end: Shared memory end 1763 * @mem_start: Shared memory start 1764 * @base_addr: Device I/O address 1765 * @irq: Device IRQ number 1766 * 1767 * @state: Generic network queuing layer state, see netdev_state_t 1768 * @dev_list: The global list of network devices 1769 * @napi_list: List entry used for polling NAPI devices 1770 * @unreg_list: List entry when we are unregistering the 1771 * device; see the function unregister_netdev 1772 * @close_list: List entry used when we are closing the device 1773 * @ptype_all: Device-specific packet handlers for all protocols 1774 * @ptype_specific: Device-specific, protocol-specific packet handlers 1775 * 1776 * @adj_list: Directly linked devices, like slaves for bonding 1777 * @features: Currently active device features 1778 * @hw_features: User-changeable features 1779 * 1780 * @wanted_features: User-requested features 1781 * @vlan_features: Mask of features inheritable by VLAN devices 1782 * 1783 * @hw_enc_features: Mask of features inherited by encapsulating devices 1784 * This field indicates what encapsulation 1785 * offloads the hardware is capable of doing, 1786 * and drivers will need to set them appropriately. 1787 * 1788 * @mpls_features: Mask of features inheritable by MPLS 1789 * @gso_partial_features: value(s) from NETIF_F_GSO\* 1790 * 1791 * @ifindex: interface index 1792 * @group: The group the device belongs to 1793 * 1794 * @stats: Statistics struct, which was left as a legacy, use 1795 * rtnl_link_stats64 instead 1796 * 1797 * @core_stats: core networking counters, 1798 * do not use this in drivers 1799 * @carrier_up_count: Number of times the carrier has been up 1800 * @carrier_down_count: Number of times the carrier has been down 1801 * 1802 * @wireless_handlers: List of functions to handle Wireless Extensions, 1803 * instead of ioctl, 1804 * see <net/iw_handler.h> for details. 1805 * 1806 * @netdev_ops: Includes several pointers to callbacks, 1807 * if one wants to override the ndo_*() functions 1808 * @xdp_metadata_ops: Includes pointers to XDP metadata callbacks. 1809 * @xsk_tx_metadata_ops: Includes pointers to AF_XDP TX metadata callbacks. 1810 * @ethtool_ops: Management operations 1811 * @l3mdev_ops: Layer 3 master device operations 1812 * @ndisc_ops: Includes callbacks for different IPv6 neighbour 1813 * discovery handling. Necessary for e.g. 6LoWPAN. 1814 * @xfrmdev_ops: Transformation offload operations 1815 * @tlsdev_ops: Transport Layer Security offload operations 1816 * @header_ops: Includes callbacks for creating,parsing,caching,etc 1817 * of Layer 2 headers. 1818 * 1819 * @flags: Interface flags (a la BSD) 1820 * @xdp_features: XDP capability supported by the device 1821 * @gflags: Global flags ( kept as legacy ) 1822 * @priv_len: Size of the ->priv flexible array 1823 * @priv: Flexible array containing private data 1824 * @operstate: RFC2863 operstate 1825 * @link_mode: Mapping policy to operstate 1826 * @if_port: Selectable AUI, TP, ... 1827 * @dma: DMA channel 1828 * @mtu: Interface MTU value 1829 * @min_mtu: Interface Minimum MTU value 1830 * @max_mtu: Interface Maximum MTU value 1831 * @type: Interface hardware type 1832 * @hard_header_len: Maximum hardware header length. 1833 * @min_header_len: Minimum hardware header length 1834 * 1835 * @needed_headroom: Extra headroom the hardware may need, but not in all 1836 * cases can this be guaranteed 1837 * @needed_tailroom: Extra tailroom the hardware may need, but not in all 1838 * cases can this be guaranteed. Some cases also use 1839 * LL_MAX_HEADER instead to allocate the skb 1840 * 1841 * interface address info: 1842 * 1843 * @perm_addr: Permanent hw address 1844 * @addr_assign_type: Hw address assignment type 1845 * @addr_len: Hardware address length 1846 * @upper_level: Maximum depth level of upper devices. 1847 * @lower_level: Maximum depth level of lower devices. 1848 * @neigh_priv_len: Used in neigh_alloc() 1849 * @dev_id: Used to differentiate devices that share 1850 * the same link layer address 1851 * @dev_port: Used to differentiate devices that share 1852 * the same function 1853 * @addr_list_lock: XXX: need comments on this one 1854 * @name_assign_type: network interface name assignment type 1855 * @uc_promisc: Counter that indicates promiscuous mode 1856 * has been enabled due to the need to listen to 1857 * additional unicast addresses in a device that 1858 * does not implement ndo_set_rx_mode() 1859 * @uc: unicast mac addresses 1860 * @mc: multicast mac addresses 1861 * @dev_addrs: list of device hw addresses 1862 * @queues_kset: Group of all Kobjects in the Tx and RX queues 1863 * @promiscuity: Number of times the NIC is told to work in 1864 * promiscuous mode; if it becomes 0 the NIC will 1865 * exit promiscuous mode 1866 * @allmulti: Counter, enables or disables allmulticast mode 1867 * 1868 * @vlan_info: VLAN info 1869 * @dsa_ptr: dsa specific data 1870 * @tipc_ptr: TIPC specific data 1871 * @atalk_ptr: AppleTalk link 1872 * @ip_ptr: IPv4 specific data 1873 * @ip6_ptr: IPv6 specific data 1874 * @ax25_ptr: AX.25 specific data 1875 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering 1876 * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network 1877 * device struct 1878 * @mpls_ptr: mpls_dev struct pointer 1879 * @mctp_ptr: MCTP specific data 1880 * 1881 * @dev_addr: Hw address (before bcast, 1882 * because most packets are unicast) 1883 * 1884 * @_rx: Array of RX queues 1885 * @num_rx_queues: Number of RX queues 1886 * allocated at register_netdev() time 1887 * @real_num_rx_queues: Number of RX queues currently active in device 1888 * @xdp_prog: XDP sockets filter program pointer 1889 * 1890 * @rx_handler: handler for received packets 1891 * @rx_handler_data: XXX: need comments on this one 1892 * @tcx_ingress: BPF & clsact qdisc specific data for ingress processing 1893 * @ingress_queue: XXX: need comments on this one 1894 * @nf_hooks_ingress: netfilter hooks executed for ingress packets 1895 * @broadcast: hw bcast address 1896 * 1897 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, 1898 * indexed by RX queue number. Assigned by driver. 1899 * This must only be set if the ndo_rx_flow_steer 1900 * operation is defined 1901 * @index_hlist: Device index hash chain 1902 * 1903 * @_tx: Array of TX queues 1904 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time 1905 * @real_num_tx_queues: Number of TX queues currently active in device 1906 * @qdisc: Root qdisc from userspace point of view 1907 * @tx_queue_len: Max frames per queue allowed 1908 * @tx_global_lock: XXX: need comments on this one 1909 * @xdp_bulkq: XDP device bulk queue 1910 * @xps_maps: all CPUs/RXQs maps for XPS device 1911 * 1912 * @xps_maps: XXX: need comments on this one 1913 * @tcx_egress: BPF & clsact qdisc specific data for egress processing 1914 * @nf_hooks_egress: netfilter hooks executed for egress packets 1915 * @qdisc_hash: qdisc hash table 1916 * @watchdog_timeo: Represents the timeout that is used by 1917 * the watchdog (see dev_watchdog()) 1918 * @watchdog_timer: List of timers 1919 * 1920 * @proto_down_reason: reason a netdev interface is held down 1921 * @pcpu_refcnt: Number of references to this device 1922 * @dev_refcnt: Number of references to this device 1923 * @refcnt_tracker: Tracker directory for tracked references to this device 1924 * @todo_list: Delayed register/unregister 1925 * @link_watch_list: XXX: need comments on this one 1926 * 1927 * @reg_state: Register/unregister state machine 1928 * @dismantle: Device is going to be freed 1929 * @rtnl_link_state: This enum represents the phases of creating 1930 * a new link 1931 * 1932 * @needs_free_netdev: Should unregister perform free_netdev? 1933 * @priv_destructor: Called from unregister 1934 * @npinfo: XXX: need comments on this one 1935 * @nd_net: Network namespace this network device is inside 1936 * 1937 * @ml_priv: Mid-layer private 1938 * @ml_priv_type: Mid-layer private type 1939 * 1940 * @pcpu_stat_type: Type of device statistics which the core should 1941 * allocate/free: none, lstats, tstats, dstats. none 1942 * means the driver is handling statistics allocation/ 1943 * freeing internally. 1944 * @lstats: Loopback statistics: packets, bytes 1945 * @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes 1946 * @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes 1947 * 1948 * @garp_port: GARP 1949 * @mrp_port: MRP 1950 * 1951 * @dm_private: Drop monitor private 1952 * 1953 * @dev: Class/net/name entry 1954 * @sysfs_groups: Space for optional device, statistics and wireless 1955 * sysfs groups 1956 * 1957 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes 1958 * @rtnl_link_ops: Rtnl_link_ops 1959 * @stat_ops: Optional ops for queue-aware statistics 1960 * @queue_mgmt_ops: Optional ops for queue management 1961 * 1962 * @gso_max_size: Maximum size of generic segmentation offload 1963 * @tso_max_size: Device (as in HW) limit on the max TSO request size 1964 * @gso_max_segs: Maximum number of segments that can be passed to the 1965 * NIC for GSO 1966 * @tso_max_segs: Device (as in HW) limit on the max TSO segment count 1967 * @gso_ipv4_max_size: Maximum size of generic segmentation offload, 1968 * for IPv4. 1969 * 1970 * @dcbnl_ops: Data Center Bridging netlink ops 1971 * @num_tc: Number of traffic classes in the net device 1972 * @tc_to_txq: XXX: need comments on this one 1973 * @prio_tc_map: XXX: need comments on this one 1974 * 1975 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp 1976 * 1977 * @priomap: XXX: need comments on this one 1978 * @link_topo: Physical link topology tracking attached PHYs 1979 * @phydev: Physical device may attach itself 1980 * for hardware timestamping 1981 * @sfp_bus: attached &struct sfp_bus structure. 1982 * 1983 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock 1984 * 1985 * @proto_down: protocol port state information can be sent to the 1986 * switch driver and used to set the phys state of the 1987 * switch port. 1988 * 1989 * @threaded: napi threaded mode is enabled 1990 * 1991 * @see_all_hwtstamp_requests: device wants to see calls to 1992 * ndo_hwtstamp_set() for all timestamp requests 1993 * regardless of source, even if those aren't 1994 * HWTSTAMP_SOURCE_NETDEV 1995 * @change_proto_down: device supports setting carrier via IFLA_PROTO_DOWN 1996 * @netns_local: interface can't change network namespaces 1997 * @fcoe_mtu: device supports maximum FCoE MTU, 2158 bytes 1998 * 1999 * @net_notifier_list: List of per-net netdev notifier block 2000 * that follow this device when it is moved 2001 * to another network namespace. 2002 * 2003 * @macsec_ops: MACsec offloading ops 2004 * 2005 * @udp_tunnel_nic_info: static structure describing the UDP tunnel 2006 * offload capabilities of the device 2007 * @udp_tunnel_nic: UDP tunnel offload state 2008 * @ethtool: ethtool related state 2009 * @xdp_state: stores info on attached XDP BPF programs 2010 * 2011 * @nested_level: Used as a parameter of spin_lock_nested() of 2012 * dev->addr_list_lock. 2013 * @unlink_list: As netif_addr_lock() can be called recursively, 2014 * keep a list of interfaces to be deleted. 2015 * @gro_max_size: Maximum size of aggregated packet in generic 2016 * receive offload (GRO) 2017 * @gro_ipv4_max_size: Maximum size of aggregated packet in generic 2018 * receive offload (GRO), for IPv4. 2019 * @xdp_zc_max_segs: Maximum number of segments supported by AF_XDP 2020 * zero copy driver 2021 * 2022 * @dev_addr_shadow: Copy of @dev_addr to catch direct writes. 2023 * @linkwatch_dev_tracker: refcount tracker used by linkwatch. 2024 * @watchdog_dev_tracker: refcount tracker used by watchdog. 2025 * @dev_registered_tracker: tracker for reference held while 2026 * registered 2027 * @offload_xstats_l3: L3 HW stats for this netdevice. 2028 * 2029 * @devlink_port: Pointer to related devlink port structure. 2030 * Assigned by a driver before netdev registration using 2031 * SET_NETDEV_DEVLINK_PORT macro. This pointer is static 2032 * during the time netdevice is registered. 2033 * 2034 * @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem, 2035 * where the clock is recovered. 2036 * 2037 * @max_pacing_offload_horizon: max EDT offload horizon in nsec. 2038 * @napi_config: An array of napi_config structures containing per-NAPI 2039 * settings. 2040 * @gro_flush_timeout: timeout for GRO layer in NAPI 2041 * @napi_defer_hard_irqs: If not zero, provides a counter that would 2042 * allow to avoid NIC hard IRQ, on busy queues. 2043 * 2044 * @neighbours: List heads pointing to this device's neighbours' 2045 * dev_list, one per address-family. 2046 * @hwprov: Tracks which PTP performs hardware packet time stamping. 2047 * 2048 * FIXME: cleanup struct net_device such that network protocol info 2049 * moves out. 2050 */ 2051 2052 struct net_device { 2053 /* Cacheline organization can be found documented in 2054 * Documentation/networking/net_cachelines/net_device.rst. 2055 * Please update the document when adding new fields. 2056 */ 2057 2058 /* TX read-mostly hotpath */ 2059 __cacheline_group_begin(net_device_read_tx); 2060 struct_group(priv_flags_fast, 2061 unsigned long priv_flags:32; 2062 unsigned long lltx:1; 2063 ); 2064 const struct net_device_ops *netdev_ops; 2065 const struct header_ops *header_ops; 2066 struct netdev_queue *_tx; 2067 netdev_features_t gso_partial_features; 2068 unsigned int real_num_tx_queues; 2069 unsigned int gso_max_size; 2070 unsigned int gso_ipv4_max_size; 2071 u16 gso_max_segs; 2072 s16 num_tc; 2073 /* Note : dev->mtu is often read without holding a lock. 2074 * Writers usually hold RTNL. 2075 * It is recommended to use READ_ONCE() to annotate the reads, 2076 * and to use WRITE_ONCE() to annotate the writes. 2077 */ 2078 unsigned int mtu; 2079 unsigned short needed_headroom; 2080 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 2081 #ifdef CONFIG_XPS 2082 struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX]; 2083 #endif 2084 #ifdef CONFIG_NETFILTER_EGRESS 2085 struct nf_hook_entries __rcu *nf_hooks_egress; 2086 #endif 2087 #ifdef CONFIG_NET_XGRESS 2088 struct bpf_mprog_entry __rcu *tcx_egress; 2089 #endif 2090 __cacheline_group_end(net_device_read_tx); 2091 2092 /* TXRX read-mostly hotpath */ 2093 __cacheline_group_begin(net_device_read_txrx); 2094 union { 2095 struct pcpu_lstats __percpu *lstats; 2096 struct pcpu_sw_netstats __percpu *tstats; 2097 struct pcpu_dstats __percpu *dstats; 2098 }; 2099 unsigned long state; 2100 unsigned int flags; 2101 unsigned short hard_header_len; 2102 netdev_features_t features; 2103 struct inet6_dev __rcu *ip6_ptr; 2104 __cacheline_group_end(net_device_read_txrx); 2105 2106 /* RX read-mostly hotpath */ 2107 __cacheline_group_begin(net_device_read_rx); 2108 struct bpf_prog __rcu *xdp_prog; 2109 struct list_head ptype_specific; 2110 int ifindex; 2111 unsigned int real_num_rx_queues; 2112 struct netdev_rx_queue *_rx; 2113 unsigned int gro_max_size; 2114 unsigned int gro_ipv4_max_size; 2115 rx_handler_func_t __rcu *rx_handler; 2116 void __rcu *rx_handler_data; 2117 possible_net_t nd_net; 2118 #ifdef CONFIG_NETPOLL 2119 struct netpoll_info __rcu *npinfo; 2120 #endif 2121 #ifdef CONFIG_NET_XGRESS 2122 struct bpf_mprog_entry __rcu *tcx_ingress; 2123 #endif 2124 __cacheline_group_end(net_device_read_rx); 2125 2126 char name[IFNAMSIZ]; 2127 struct netdev_name_node *name_node; 2128 struct dev_ifalias __rcu *ifalias; 2129 /* 2130 * I/O specific fields 2131 * FIXME: Merge these and struct ifmap into one 2132 */ 2133 unsigned long mem_end; 2134 unsigned long mem_start; 2135 unsigned long base_addr; 2136 2137 /* 2138 * Some hardware also needs these fields (state,dev_list, 2139 * napi_list,unreg_list,close_list) but they are not 2140 * part of the usual set specified in Space.c. 2141 */ 2142 2143 2144 struct list_head dev_list; 2145 struct list_head napi_list; 2146 struct list_head unreg_list; 2147 struct list_head close_list; 2148 struct list_head ptype_all; 2149 2150 struct { 2151 struct list_head upper; 2152 struct list_head lower; 2153 } adj_list; 2154 2155 /* Read-mostly cache-line for fast-path access */ 2156 xdp_features_t xdp_features; 2157 const struct xdp_metadata_ops *xdp_metadata_ops; 2158 const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops; 2159 unsigned short gflags; 2160 2161 unsigned short needed_tailroom; 2162 2163 netdev_features_t hw_features; 2164 netdev_features_t wanted_features; 2165 netdev_features_t vlan_features; 2166 netdev_features_t hw_enc_features; 2167 netdev_features_t mpls_features; 2168 2169 unsigned int min_mtu; 2170 unsigned int max_mtu; 2171 unsigned short type; 2172 unsigned char min_header_len; 2173 unsigned char name_assign_type; 2174 2175 int group; 2176 2177 struct net_device_stats stats; /* not used by modern drivers */ 2178 2179 struct net_device_core_stats __percpu *core_stats; 2180 2181 /* Stats to monitor link on/off, flapping */ 2182 atomic_t carrier_up_count; 2183 atomic_t carrier_down_count; 2184 2185 #ifdef CONFIG_WIRELESS_EXT 2186 const struct iw_handler_def *wireless_handlers; 2187 #endif 2188 const struct ethtool_ops *ethtool_ops; 2189 #ifdef CONFIG_NET_L3_MASTER_DEV 2190 const struct l3mdev_ops *l3mdev_ops; 2191 #endif 2192 #if IS_ENABLED(CONFIG_IPV6) 2193 const struct ndisc_ops *ndisc_ops; 2194 #endif 2195 2196 #ifdef CONFIG_XFRM_OFFLOAD 2197 const struct xfrmdev_ops *xfrmdev_ops; 2198 #endif 2199 2200 #if IS_ENABLED(CONFIG_TLS_DEVICE) 2201 const struct tlsdev_ops *tlsdev_ops; 2202 #endif 2203 2204 unsigned int operstate; 2205 unsigned char link_mode; 2206 2207 unsigned char if_port; 2208 unsigned char dma; 2209 2210 /* Interface address info. */ 2211 unsigned char perm_addr[MAX_ADDR_LEN]; 2212 unsigned char addr_assign_type; 2213 unsigned char addr_len; 2214 unsigned char upper_level; 2215 unsigned char lower_level; 2216 2217 unsigned short neigh_priv_len; 2218 unsigned short dev_id; 2219 unsigned short dev_port; 2220 int irq; 2221 u32 priv_len; 2222 2223 spinlock_t addr_list_lock; 2224 2225 struct netdev_hw_addr_list uc; 2226 struct netdev_hw_addr_list mc; 2227 struct netdev_hw_addr_list dev_addrs; 2228 2229 #ifdef CONFIG_SYSFS 2230 struct kset *queues_kset; 2231 #endif 2232 #ifdef CONFIG_LOCKDEP 2233 struct list_head unlink_list; 2234 #endif 2235 unsigned int promiscuity; 2236 unsigned int allmulti; 2237 bool uc_promisc; 2238 #ifdef CONFIG_LOCKDEP 2239 unsigned char nested_level; 2240 #endif 2241 2242 2243 /* Protocol-specific pointers */ 2244 struct in_device __rcu *ip_ptr; 2245 /** @fib_nh_head: nexthops associated with this netdev */ 2246 struct hlist_head fib_nh_head; 2247 2248 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2249 struct vlan_info __rcu *vlan_info; 2250 #endif 2251 #if IS_ENABLED(CONFIG_NET_DSA) 2252 struct dsa_port *dsa_ptr; 2253 #endif 2254 #if IS_ENABLED(CONFIG_TIPC) 2255 struct tipc_bearer __rcu *tipc_ptr; 2256 #endif 2257 #if IS_ENABLED(CONFIG_ATALK) 2258 void *atalk_ptr; 2259 #endif 2260 #if IS_ENABLED(CONFIG_AX25) 2261 struct ax25_dev __rcu *ax25_ptr; 2262 #endif 2263 #if IS_ENABLED(CONFIG_CFG80211) 2264 struct wireless_dev *ieee80211_ptr; 2265 #endif 2266 #if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN) 2267 struct wpan_dev *ieee802154_ptr; 2268 #endif 2269 #if IS_ENABLED(CONFIG_MPLS_ROUTING) 2270 struct mpls_dev __rcu *mpls_ptr; 2271 #endif 2272 #if IS_ENABLED(CONFIG_MCTP) 2273 struct mctp_dev __rcu *mctp_ptr; 2274 #endif 2275 2276 /* 2277 * Cache lines mostly used on receive path (including eth_type_trans()) 2278 */ 2279 /* Interface address info used in eth_type_trans() */ 2280 const unsigned char *dev_addr; 2281 2282 unsigned int num_rx_queues; 2283 #define GRO_LEGACY_MAX_SIZE 65536u 2284 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), 2285 * and shinfo->gso_segs is a 16bit field. 2286 */ 2287 #define GRO_MAX_SIZE (8 * 65535u) 2288 unsigned int xdp_zc_max_segs; 2289 struct netdev_queue __rcu *ingress_queue; 2290 #ifdef CONFIG_NETFILTER_INGRESS 2291 struct nf_hook_entries __rcu *nf_hooks_ingress; 2292 #endif 2293 2294 unsigned char broadcast[MAX_ADDR_LEN]; 2295 #ifdef CONFIG_RFS_ACCEL 2296 struct cpu_rmap *rx_cpu_rmap; 2297 #endif 2298 struct hlist_node index_hlist; 2299 2300 /* 2301 * Cache lines mostly used on transmit path 2302 */ 2303 unsigned int num_tx_queues; 2304 struct Qdisc __rcu *qdisc; 2305 unsigned int tx_queue_len; 2306 spinlock_t tx_global_lock; 2307 2308 struct xdp_dev_bulk_queue __percpu *xdp_bulkq; 2309 2310 #ifdef CONFIG_NET_SCHED 2311 DECLARE_HASHTABLE (qdisc_hash, 4); 2312 #endif 2313 /* These may be needed for future network-power-down code. */ 2314 struct timer_list watchdog_timer; 2315 int watchdog_timeo; 2316 2317 u32 proto_down_reason; 2318 2319 struct list_head todo_list; 2320 2321 #ifdef CONFIG_PCPU_DEV_REFCNT 2322 int __percpu *pcpu_refcnt; 2323 #else 2324 refcount_t dev_refcnt; 2325 #endif 2326 struct ref_tracker_dir refcnt_tracker; 2327 2328 struct list_head link_watch_list; 2329 2330 u8 reg_state; 2331 2332 bool dismantle; 2333 2334 enum { 2335 RTNL_LINK_INITIALIZED, 2336 RTNL_LINK_INITIALIZING, 2337 } rtnl_link_state:16; 2338 2339 bool needs_free_netdev; 2340 void (*priv_destructor)(struct net_device *dev); 2341 2342 /* mid-layer private */ 2343 void *ml_priv; 2344 enum netdev_ml_priv_type ml_priv_type; 2345 2346 enum netdev_stat_type pcpu_stat_type:8; 2347 2348 #if IS_ENABLED(CONFIG_GARP) 2349 struct garp_port __rcu *garp_port; 2350 #endif 2351 #if IS_ENABLED(CONFIG_MRP) 2352 struct mrp_port __rcu *mrp_port; 2353 #endif 2354 #if IS_ENABLED(CONFIG_NET_DROP_MONITOR) 2355 struct dm_hw_stat_delta __rcu *dm_private; 2356 #endif 2357 struct device dev; 2358 const struct attribute_group *sysfs_groups[4]; 2359 const struct attribute_group *sysfs_rx_queue_group; 2360 2361 const struct rtnl_link_ops *rtnl_link_ops; 2362 2363 const struct netdev_stat_ops *stat_ops; 2364 2365 const struct netdev_queue_mgmt_ops *queue_mgmt_ops; 2366 2367 /* for setting kernel sock attribute on TCP connection setup */ 2368 #define GSO_MAX_SEGS 65535u 2369 #define GSO_LEGACY_MAX_SIZE 65536u 2370 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), 2371 * and shinfo->gso_segs is a 16bit field. 2372 */ 2373 #define GSO_MAX_SIZE (8 * GSO_MAX_SEGS) 2374 2375 #define TSO_LEGACY_MAX_SIZE 65536 2376 #define TSO_MAX_SIZE UINT_MAX 2377 unsigned int tso_max_size; 2378 #define TSO_MAX_SEGS U16_MAX 2379 u16 tso_max_segs; 2380 2381 #ifdef CONFIG_DCB 2382 const struct dcbnl_rtnl_ops *dcbnl_ops; 2383 #endif 2384 u8 prio_tc_map[TC_BITMASK + 1]; 2385 2386 #if IS_ENABLED(CONFIG_FCOE) 2387 unsigned int fcoe_ddp_xid; 2388 #endif 2389 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 2390 struct netprio_map __rcu *priomap; 2391 #endif 2392 struct phy_link_topology *link_topo; 2393 struct phy_device *phydev; 2394 struct sfp_bus *sfp_bus; 2395 struct lock_class_key *qdisc_tx_busylock; 2396 bool proto_down; 2397 bool threaded; 2398 2399 /* priv_flags_slow, ungrouped to save space */ 2400 unsigned long see_all_hwtstamp_requests:1; 2401 unsigned long change_proto_down:1; 2402 unsigned long netns_local:1; 2403 unsigned long fcoe_mtu:1; 2404 2405 struct list_head net_notifier_list; 2406 2407 #if IS_ENABLED(CONFIG_MACSEC) 2408 /* MACsec management functions */ 2409 const struct macsec_ops *macsec_ops; 2410 #endif 2411 const struct udp_tunnel_nic_info *udp_tunnel_nic_info; 2412 struct udp_tunnel_nic *udp_tunnel_nic; 2413 2414 /** @cfg: net_device queue-related configuration */ 2415 struct netdev_config *cfg; 2416 /** 2417 * @cfg_pending: same as @cfg but when device is being actively 2418 * reconfigured includes any changes to the configuration 2419 * requested by the user, but which may or may not be rejected. 2420 */ 2421 struct netdev_config *cfg_pending; 2422 struct ethtool_netdev_state *ethtool; 2423 2424 /* protected by rtnl_lock */ 2425 struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; 2426 2427 u8 dev_addr_shadow[MAX_ADDR_LEN]; 2428 netdevice_tracker linkwatch_dev_tracker; 2429 netdevice_tracker watchdog_dev_tracker; 2430 netdevice_tracker dev_registered_tracker; 2431 struct rtnl_hw_stats64 *offload_xstats_l3; 2432 2433 struct devlink_port *devlink_port; 2434 2435 #if IS_ENABLED(CONFIG_DPLL) 2436 struct dpll_pin __rcu *dpll_pin; 2437 #endif 2438 #if IS_ENABLED(CONFIG_PAGE_POOL) 2439 /** @page_pools: page pools created for this netdevice */ 2440 struct hlist_head page_pools; 2441 #endif 2442 2443 /** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */ 2444 struct dim_irq_moder *irq_moder; 2445 2446 u64 max_pacing_offload_horizon; 2447 struct napi_config *napi_config; 2448 unsigned long gro_flush_timeout; 2449 u32 napi_defer_hard_irqs; 2450 2451 /** 2452 * @up: copy of @state's IFF_UP, but safe to read with just @lock. 2453 * May report false negatives while the device is being opened 2454 * or closed (@lock does not protect .ndo_open, or .ndo_close). 2455 */ 2456 bool up; 2457 2458 /** 2459 * @lock: netdev-scope lock, protects a small selection of fields. 2460 * Should always be taken using netdev_lock() / netdev_unlock() helpers. 2461 * Drivers are free to use it for other protection. 2462 * 2463 * Protects: 2464 * @gro_flush_timeout, @napi_defer_hard_irqs, @napi_list, 2465 * @net_shaper_hierarchy, @reg_state, @threaded 2466 * 2467 * Partially protects (writers must hold both @lock and rtnl_lock): 2468 * @up 2469 * 2470 * Also protects some fields in struct napi_struct. 2471 * 2472 * Ordering: take after rtnl_lock. 2473 */ 2474 struct mutex lock; 2475 2476 #if IS_ENABLED(CONFIG_NET_SHAPER) 2477 /** 2478 * @net_shaper_hierarchy: data tracking the current shaper status 2479 * see include/net/net_shapers.h 2480 */ 2481 struct net_shaper_hierarchy *net_shaper_hierarchy; 2482 #endif 2483 2484 struct hlist_head neighbours[NEIGH_NR_TABLES]; 2485 2486 struct hwtstamp_provider __rcu *hwprov; 2487 2488 u8 priv[] ____cacheline_aligned 2489 __counted_by(priv_len); 2490 } ____cacheline_aligned; 2491 #define to_net_dev(d) container_of(d, struct net_device, dev) 2492 2493 /* 2494 * Driver should use this to assign devlink port instance to a netdevice 2495 * before it registers the netdevice. Therefore devlink_port is static 2496 * during the netdev lifetime after it is registered. 2497 */ 2498 #define SET_NETDEV_DEVLINK_PORT(dev, port) \ 2499 ({ \ 2500 WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED); \ 2501 ((dev)->devlink_port = (port)); \ 2502 }) 2503 2504 static inline bool netif_elide_gro(const struct net_device *dev) 2505 { 2506 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) 2507 return true; 2508 return false; 2509 } 2510 2511 #define NETDEV_ALIGN 32 2512 2513 static inline 2514 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) 2515 { 2516 return dev->prio_tc_map[prio & TC_BITMASK]; 2517 } 2518 2519 static inline 2520 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) 2521 { 2522 if (tc >= dev->num_tc) 2523 return -EINVAL; 2524 2525 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; 2526 return 0; 2527 } 2528 2529 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); 2530 void netdev_reset_tc(struct net_device *dev); 2531 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); 2532 int netdev_set_num_tc(struct net_device *dev, u8 num_tc); 2533 2534 static inline 2535 int netdev_get_num_tc(struct net_device *dev) 2536 { 2537 return dev->num_tc; 2538 } 2539 2540 static inline void net_prefetch(void *p) 2541 { 2542 prefetch(p); 2543 #if L1_CACHE_BYTES < 128 2544 prefetch((u8 *)p + L1_CACHE_BYTES); 2545 #endif 2546 } 2547 2548 static inline void net_prefetchw(void *p) 2549 { 2550 prefetchw(p); 2551 #if L1_CACHE_BYTES < 128 2552 prefetchw((u8 *)p + L1_CACHE_BYTES); 2553 #endif 2554 } 2555 2556 void netdev_unbind_sb_channel(struct net_device *dev, 2557 struct net_device *sb_dev); 2558 int netdev_bind_sb_channel_queue(struct net_device *dev, 2559 struct net_device *sb_dev, 2560 u8 tc, u16 count, u16 offset); 2561 int netdev_set_sb_channel(struct net_device *dev, u16 channel); 2562 static inline int netdev_get_sb_channel(struct net_device *dev) 2563 { 2564 return max_t(int, -dev->num_tc, 0); 2565 } 2566 2567 static inline 2568 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, 2569 unsigned int index) 2570 { 2571 DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues); 2572 return &dev->_tx[index]; 2573 } 2574 2575 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, 2576 const struct sk_buff *skb) 2577 { 2578 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 2579 } 2580 2581 static inline void netdev_for_each_tx_queue(struct net_device *dev, 2582 void (*f)(struct net_device *, 2583 struct netdev_queue *, 2584 void *), 2585 void *arg) 2586 { 2587 unsigned int i; 2588 2589 for (i = 0; i < dev->num_tx_queues; i++) 2590 f(dev, &dev->_tx[i], arg); 2591 } 2592 2593 #define netdev_lockdep_set_classes(dev) \ 2594 { \ 2595 static struct lock_class_key qdisc_tx_busylock_key; \ 2596 static struct lock_class_key qdisc_xmit_lock_key; \ 2597 static struct lock_class_key dev_addr_list_lock_key; \ 2598 unsigned int i; \ 2599 \ 2600 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ 2601 lockdep_set_class(&(dev)->addr_list_lock, \ 2602 &dev_addr_list_lock_key); \ 2603 for (i = 0; i < (dev)->num_tx_queues; i++) \ 2604 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ 2605 &qdisc_xmit_lock_key); \ 2606 } 2607 2608 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 2609 struct net_device *sb_dev); 2610 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 2611 struct sk_buff *skb, 2612 struct net_device *sb_dev); 2613 2614 /* returns the headroom that the master device needs to take in account 2615 * when forwarding to this dev 2616 */ 2617 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev) 2618 { 2619 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; 2620 } 2621 2622 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr) 2623 { 2624 if (dev->netdev_ops->ndo_set_rx_headroom) 2625 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); 2626 } 2627 2628 /* set the device rx headroom to the dev's default */ 2629 static inline void netdev_reset_rx_headroom(struct net_device *dev) 2630 { 2631 netdev_set_rx_headroom(dev, -1); 2632 } 2633 2634 static inline void *netdev_get_ml_priv(struct net_device *dev, 2635 enum netdev_ml_priv_type type) 2636 { 2637 if (dev->ml_priv_type != type) 2638 return NULL; 2639 2640 return dev->ml_priv; 2641 } 2642 2643 static inline void netdev_set_ml_priv(struct net_device *dev, 2644 void *ml_priv, 2645 enum netdev_ml_priv_type type) 2646 { 2647 WARN(dev->ml_priv_type && dev->ml_priv_type != type, 2648 "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", 2649 dev->ml_priv_type, type); 2650 WARN(!dev->ml_priv_type && dev->ml_priv, 2651 "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); 2652 2653 dev->ml_priv = ml_priv; 2654 dev->ml_priv_type = type; 2655 } 2656 2657 /* 2658 * Net namespace inlines 2659 */ 2660 static inline 2661 struct net *dev_net(const struct net_device *dev) 2662 { 2663 return read_pnet(&dev->nd_net); 2664 } 2665 2666 static inline 2667 struct net *dev_net_rcu(const struct net_device *dev) 2668 { 2669 return read_pnet_rcu(&dev->nd_net); 2670 } 2671 2672 static inline 2673 void dev_net_set(struct net_device *dev, struct net *net) 2674 { 2675 write_pnet(&dev->nd_net, net); 2676 } 2677 2678 /** 2679 * netdev_priv - access network device private data 2680 * @dev: network device 2681 * 2682 * Get network device private data 2683 */ 2684 static inline void *netdev_priv(const struct net_device *dev) 2685 { 2686 return (void *)dev->priv; 2687 } 2688 2689 /* Set the sysfs physical device reference for the network logical device 2690 * if set prior to registration will cause a symlink during initialization. 2691 */ 2692 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) 2693 2694 /* Set the sysfs device type for the network logical device to allow 2695 * fine-grained identification of different network device types. For 2696 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc. 2697 */ 2698 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) 2699 2700 void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index, 2701 enum netdev_queue_type type, 2702 struct napi_struct *napi); 2703 2704 static inline void netdev_lock(struct net_device *dev) 2705 { 2706 mutex_lock(&dev->lock); 2707 } 2708 2709 static inline void netdev_unlock(struct net_device *dev) 2710 { 2711 mutex_unlock(&dev->lock); 2712 } 2713 2714 static inline void netdev_assert_locked(struct net_device *dev) 2715 { 2716 lockdep_assert_held(&dev->lock); 2717 } 2718 2719 static inline void netdev_assert_locked_or_invisible(struct net_device *dev) 2720 { 2721 if (dev->reg_state == NETREG_REGISTERED || 2722 dev->reg_state == NETREG_UNREGISTERING) 2723 netdev_assert_locked(dev); 2724 } 2725 2726 static inline void netif_napi_set_irq_locked(struct napi_struct *napi, int irq) 2727 { 2728 napi->irq = irq; 2729 } 2730 2731 static inline void netif_napi_set_irq(struct napi_struct *napi, int irq) 2732 { 2733 netdev_lock(napi->dev); 2734 netif_napi_set_irq_locked(napi, irq); 2735 netdev_unlock(napi->dev); 2736 } 2737 2738 /* Default NAPI poll() weight 2739 * Device drivers are strongly advised to not use bigger value 2740 */ 2741 #define NAPI_POLL_WEIGHT 64 2742 2743 void netif_napi_add_weight_locked(struct net_device *dev, 2744 struct napi_struct *napi, 2745 int (*poll)(struct napi_struct *, int), 2746 int weight); 2747 2748 static inline void 2749 netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, 2750 int (*poll)(struct napi_struct *, int), int weight) 2751 { 2752 netdev_lock(dev); 2753 netif_napi_add_weight_locked(dev, napi, poll, weight); 2754 netdev_unlock(dev); 2755 } 2756 2757 /** 2758 * netif_napi_add() - initialize a NAPI context 2759 * @dev: network device 2760 * @napi: NAPI context 2761 * @poll: polling function 2762 * 2763 * netif_napi_add() must be used to initialize a NAPI context prior to calling 2764 * *any* of the other NAPI-related functions. 2765 */ 2766 static inline void 2767 netif_napi_add(struct net_device *dev, struct napi_struct *napi, 2768 int (*poll)(struct napi_struct *, int)) 2769 { 2770 netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); 2771 } 2772 2773 static inline void 2774 netif_napi_add_locked(struct net_device *dev, struct napi_struct *napi, 2775 int (*poll)(struct napi_struct *, int)) 2776 { 2777 netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT); 2778 } 2779 2780 static inline void 2781 netif_napi_add_tx_weight(struct net_device *dev, 2782 struct napi_struct *napi, 2783 int (*poll)(struct napi_struct *, int), 2784 int weight) 2785 { 2786 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); 2787 netif_napi_add_weight(dev, napi, poll, weight); 2788 } 2789 2790 static inline void 2791 netif_napi_add_config_locked(struct net_device *dev, struct napi_struct *napi, 2792 int (*poll)(struct napi_struct *, int), int index) 2793 { 2794 napi->index = index; 2795 napi->config = &dev->napi_config[index]; 2796 netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT); 2797 } 2798 2799 /** 2800 * netif_napi_add_config - initialize a NAPI context with persistent config 2801 * @dev: network device 2802 * @napi: NAPI context 2803 * @poll: polling function 2804 * @index: the NAPI index 2805 */ 2806 static inline void 2807 netif_napi_add_config(struct net_device *dev, struct napi_struct *napi, 2808 int (*poll)(struct napi_struct *, int), int index) 2809 { 2810 netdev_lock(dev); 2811 netif_napi_add_config_locked(dev, napi, poll, index); 2812 netdev_unlock(dev); 2813 } 2814 2815 /** 2816 * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only 2817 * @dev: network device 2818 * @napi: NAPI context 2819 * @poll: polling function 2820 * 2821 * This variant of netif_napi_add() should be used from drivers using NAPI 2822 * to exclusively poll a TX queue. 2823 * This will avoid we add it into napi_hash[], thus polluting this hash table. 2824 */ 2825 static inline void netif_napi_add_tx(struct net_device *dev, 2826 struct napi_struct *napi, 2827 int (*poll)(struct napi_struct *, int)) 2828 { 2829 netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT); 2830 } 2831 2832 void __netif_napi_del_locked(struct napi_struct *napi); 2833 2834 /** 2835 * __netif_napi_del - remove a NAPI context 2836 * @napi: NAPI context 2837 * 2838 * Warning: caller must observe RCU grace period before freeing memory 2839 * containing @napi. Drivers might want to call this helper to combine 2840 * all the needed RCU grace periods into a single one. 2841 */ 2842 static inline void __netif_napi_del(struct napi_struct *napi) 2843 { 2844 netdev_lock(napi->dev); 2845 __netif_napi_del_locked(napi); 2846 netdev_unlock(napi->dev); 2847 } 2848 2849 static inline void netif_napi_del_locked(struct napi_struct *napi) 2850 { 2851 __netif_napi_del_locked(napi); 2852 synchronize_net(); 2853 } 2854 2855 /** 2856 * netif_napi_del - remove a NAPI context 2857 * @napi: NAPI context 2858 * 2859 * netif_napi_del() removes a NAPI context from the network device NAPI list 2860 */ 2861 static inline void netif_napi_del(struct napi_struct *napi) 2862 { 2863 __netif_napi_del(napi); 2864 synchronize_net(); 2865 } 2866 2867 struct packet_type { 2868 __be16 type; /* This is really htons(ether_type). */ 2869 bool ignore_outgoing; 2870 struct net_device *dev; /* NULL is wildcarded here */ 2871 netdevice_tracker dev_tracker; 2872 int (*func) (struct sk_buff *, 2873 struct net_device *, 2874 struct packet_type *, 2875 struct net_device *); 2876 void (*list_func) (struct list_head *, 2877 struct packet_type *, 2878 struct net_device *); 2879 bool (*id_match)(struct packet_type *ptype, 2880 struct sock *sk); 2881 struct net *af_packet_net; 2882 void *af_packet_priv; 2883 struct list_head list; 2884 }; 2885 2886 struct offload_callbacks { 2887 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 2888 netdev_features_t features); 2889 struct sk_buff *(*gro_receive)(struct list_head *head, 2890 struct sk_buff *skb); 2891 int (*gro_complete)(struct sk_buff *skb, int nhoff); 2892 }; 2893 2894 struct packet_offload { 2895 __be16 type; /* This is really htons(ether_type). */ 2896 u16 priority; 2897 struct offload_callbacks callbacks; 2898 struct list_head list; 2899 }; 2900 2901 /* often modified stats are per-CPU, other are shared (netdev->stats) */ 2902 struct pcpu_sw_netstats { 2903 u64_stats_t rx_packets; 2904 u64_stats_t rx_bytes; 2905 u64_stats_t tx_packets; 2906 u64_stats_t tx_bytes; 2907 struct u64_stats_sync syncp; 2908 } __aligned(4 * sizeof(u64)); 2909 2910 struct pcpu_dstats { 2911 u64_stats_t rx_packets; 2912 u64_stats_t rx_bytes; 2913 u64_stats_t tx_packets; 2914 u64_stats_t tx_bytes; 2915 u64_stats_t rx_drops; 2916 u64_stats_t tx_drops; 2917 struct u64_stats_sync syncp; 2918 } __aligned(8 * sizeof(u64)); 2919 2920 struct pcpu_lstats { 2921 u64_stats_t packets; 2922 u64_stats_t bytes; 2923 struct u64_stats_sync syncp; 2924 } __aligned(2 * sizeof(u64)); 2925 2926 void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes); 2927 2928 static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len) 2929 { 2930 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2931 2932 u64_stats_update_begin(&tstats->syncp); 2933 u64_stats_add(&tstats->rx_bytes, len); 2934 u64_stats_inc(&tstats->rx_packets); 2935 u64_stats_update_end(&tstats->syncp); 2936 } 2937 2938 static inline void dev_sw_netstats_tx_add(struct net_device *dev, 2939 unsigned int packets, 2940 unsigned int len) 2941 { 2942 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2943 2944 u64_stats_update_begin(&tstats->syncp); 2945 u64_stats_add(&tstats->tx_bytes, len); 2946 u64_stats_add(&tstats->tx_packets, packets); 2947 u64_stats_update_end(&tstats->syncp); 2948 } 2949 2950 static inline void dev_lstats_add(struct net_device *dev, unsigned int len) 2951 { 2952 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); 2953 2954 u64_stats_update_begin(&lstats->syncp); 2955 u64_stats_add(&lstats->bytes, len); 2956 u64_stats_inc(&lstats->packets); 2957 u64_stats_update_end(&lstats->syncp); 2958 } 2959 2960 static inline void dev_dstats_rx_add(struct net_device *dev, 2961 unsigned int len) 2962 { 2963 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); 2964 2965 u64_stats_update_begin(&dstats->syncp); 2966 u64_stats_inc(&dstats->rx_packets); 2967 u64_stats_add(&dstats->rx_bytes, len); 2968 u64_stats_update_end(&dstats->syncp); 2969 } 2970 2971 static inline void dev_dstats_rx_dropped(struct net_device *dev) 2972 { 2973 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); 2974 2975 u64_stats_update_begin(&dstats->syncp); 2976 u64_stats_inc(&dstats->rx_drops); 2977 u64_stats_update_end(&dstats->syncp); 2978 } 2979 2980 static inline void dev_dstats_tx_add(struct net_device *dev, 2981 unsigned int len) 2982 { 2983 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); 2984 2985 u64_stats_update_begin(&dstats->syncp); 2986 u64_stats_inc(&dstats->tx_packets); 2987 u64_stats_add(&dstats->tx_bytes, len); 2988 u64_stats_update_end(&dstats->syncp); 2989 } 2990 2991 static inline void dev_dstats_tx_dropped(struct net_device *dev) 2992 { 2993 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); 2994 2995 u64_stats_update_begin(&dstats->syncp); 2996 u64_stats_inc(&dstats->tx_drops); 2997 u64_stats_update_end(&dstats->syncp); 2998 } 2999 3000 #define __netdev_alloc_pcpu_stats(type, gfp) \ 3001 ({ \ 3002 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ 3003 if (pcpu_stats) { \ 3004 int __cpu; \ 3005 for_each_possible_cpu(__cpu) { \ 3006 typeof(type) *stat; \ 3007 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 3008 u64_stats_init(&stat->syncp); \ 3009 } \ 3010 } \ 3011 pcpu_stats; \ 3012 }) 3013 3014 #define netdev_alloc_pcpu_stats(type) \ 3015 __netdev_alloc_pcpu_stats(type, GFP_KERNEL) 3016 3017 #define devm_netdev_alloc_pcpu_stats(dev, type) \ 3018 ({ \ 3019 typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\ 3020 if (pcpu_stats) { \ 3021 int __cpu; \ 3022 for_each_possible_cpu(__cpu) { \ 3023 typeof(type) *stat; \ 3024 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 3025 u64_stats_init(&stat->syncp); \ 3026 } \ 3027 } \ 3028 pcpu_stats; \ 3029 }) 3030 3031 enum netdev_lag_tx_type { 3032 NETDEV_LAG_TX_TYPE_UNKNOWN, 3033 NETDEV_LAG_TX_TYPE_RANDOM, 3034 NETDEV_LAG_TX_TYPE_BROADCAST, 3035 NETDEV_LAG_TX_TYPE_ROUNDROBIN, 3036 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, 3037 NETDEV_LAG_TX_TYPE_HASH, 3038 }; 3039 3040 enum netdev_lag_hash { 3041 NETDEV_LAG_HASH_NONE, 3042 NETDEV_LAG_HASH_L2, 3043 NETDEV_LAG_HASH_L34, 3044 NETDEV_LAG_HASH_L23, 3045 NETDEV_LAG_HASH_E23, 3046 NETDEV_LAG_HASH_E34, 3047 NETDEV_LAG_HASH_VLAN_SRCMAC, 3048 NETDEV_LAG_HASH_UNKNOWN, 3049 }; 3050 3051 struct netdev_lag_upper_info { 3052 enum netdev_lag_tx_type tx_type; 3053 enum netdev_lag_hash hash_type; 3054 }; 3055 3056 struct netdev_lag_lower_state_info { 3057 u8 link_up : 1, 3058 tx_enabled : 1; 3059 }; 3060 3061 #include <linux/notifier.h> 3062 3063 /* netdevice notifier chain. Please remember to update netdev_cmd_to_name() 3064 * and the rtnetlink notification exclusion list in rtnetlink_event() when 3065 * adding new types. 3066 */ 3067 enum netdev_cmd { 3068 NETDEV_UP = 1, /* For now you can't veto a device up/down */ 3069 NETDEV_DOWN, 3070 NETDEV_REBOOT, /* Tell a protocol stack a network interface 3071 detected a hardware crash and restarted 3072 - we can use this eg to kick tcp sessions 3073 once done */ 3074 NETDEV_CHANGE, /* Notify device state change */ 3075 NETDEV_REGISTER, 3076 NETDEV_UNREGISTER, 3077 NETDEV_CHANGEMTU, /* notify after mtu change happened */ 3078 NETDEV_CHANGEADDR, /* notify after the address change */ 3079 NETDEV_PRE_CHANGEADDR, /* notify before the address change */ 3080 NETDEV_GOING_DOWN, 3081 NETDEV_CHANGENAME, 3082 NETDEV_FEAT_CHANGE, 3083 NETDEV_BONDING_FAILOVER, 3084 NETDEV_PRE_UP, 3085 NETDEV_PRE_TYPE_CHANGE, 3086 NETDEV_POST_TYPE_CHANGE, 3087 NETDEV_POST_INIT, 3088 NETDEV_PRE_UNINIT, 3089 NETDEV_RELEASE, 3090 NETDEV_NOTIFY_PEERS, 3091 NETDEV_JOIN, 3092 NETDEV_CHANGEUPPER, 3093 NETDEV_RESEND_IGMP, 3094 NETDEV_PRECHANGEMTU, /* notify before mtu change happened */ 3095 NETDEV_CHANGEINFODATA, 3096 NETDEV_BONDING_INFO, 3097 NETDEV_PRECHANGEUPPER, 3098 NETDEV_CHANGELOWERSTATE, 3099 NETDEV_UDP_TUNNEL_PUSH_INFO, 3100 NETDEV_UDP_TUNNEL_DROP_INFO, 3101 NETDEV_CHANGE_TX_QUEUE_LEN, 3102 NETDEV_CVLAN_FILTER_PUSH_INFO, 3103 NETDEV_CVLAN_FILTER_DROP_INFO, 3104 NETDEV_SVLAN_FILTER_PUSH_INFO, 3105 NETDEV_SVLAN_FILTER_DROP_INFO, 3106 NETDEV_OFFLOAD_XSTATS_ENABLE, 3107 NETDEV_OFFLOAD_XSTATS_DISABLE, 3108 NETDEV_OFFLOAD_XSTATS_REPORT_USED, 3109 NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, 3110 NETDEV_XDP_FEAT_CHANGE, 3111 }; 3112 const char *netdev_cmd_to_name(enum netdev_cmd cmd); 3113 3114 int register_netdevice_notifier(struct notifier_block *nb); 3115 int unregister_netdevice_notifier(struct notifier_block *nb); 3116 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb); 3117 int unregister_netdevice_notifier_net(struct net *net, 3118 struct notifier_block *nb); 3119 int register_netdevice_notifier_dev_net(struct net_device *dev, 3120 struct notifier_block *nb, 3121 struct netdev_net_notifier *nn); 3122 int unregister_netdevice_notifier_dev_net(struct net_device *dev, 3123 struct notifier_block *nb, 3124 struct netdev_net_notifier *nn); 3125 3126 struct netdev_notifier_info { 3127 struct net_device *dev; 3128 struct netlink_ext_ack *extack; 3129 }; 3130 3131 struct netdev_notifier_info_ext { 3132 struct netdev_notifier_info info; /* must be first */ 3133 union { 3134 u32 mtu; 3135 } ext; 3136 }; 3137 3138 struct netdev_notifier_change_info { 3139 struct netdev_notifier_info info; /* must be first */ 3140 unsigned int flags_changed; 3141 }; 3142 3143 struct netdev_notifier_changeupper_info { 3144 struct netdev_notifier_info info; /* must be first */ 3145 struct net_device *upper_dev; /* new upper dev */ 3146 bool master; /* is upper dev master */ 3147 bool linking; /* is the notification for link or unlink */ 3148 void *upper_info; /* upper dev info */ 3149 }; 3150 3151 struct netdev_notifier_changelowerstate_info { 3152 struct netdev_notifier_info info; /* must be first */ 3153 void *lower_state_info; /* is lower dev state */ 3154 }; 3155 3156 struct netdev_notifier_pre_changeaddr_info { 3157 struct netdev_notifier_info info; /* must be first */ 3158 const unsigned char *dev_addr; 3159 }; 3160 3161 enum netdev_offload_xstats_type { 3162 NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, 3163 }; 3164 3165 struct netdev_notifier_offload_xstats_info { 3166 struct netdev_notifier_info info; /* must be first */ 3167 enum netdev_offload_xstats_type type; 3168 3169 union { 3170 /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */ 3171 struct netdev_notifier_offload_xstats_rd *report_delta; 3172 /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */ 3173 struct netdev_notifier_offload_xstats_ru *report_used; 3174 }; 3175 }; 3176 3177 int netdev_offload_xstats_enable(struct net_device *dev, 3178 enum netdev_offload_xstats_type type, 3179 struct netlink_ext_ack *extack); 3180 int netdev_offload_xstats_disable(struct net_device *dev, 3181 enum netdev_offload_xstats_type type); 3182 bool netdev_offload_xstats_enabled(const struct net_device *dev, 3183 enum netdev_offload_xstats_type type); 3184 int netdev_offload_xstats_get(struct net_device *dev, 3185 enum netdev_offload_xstats_type type, 3186 struct rtnl_hw_stats64 *stats, bool *used, 3187 struct netlink_ext_ack *extack); 3188 void 3189 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd, 3190 const struct rtnl_hw_stats64 *stats); 3191 void 3192 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru); 3193 void netdev_offload_xstats_push_delta(struct net_device *dev, 3194 enum netdev_offload_xstats_type type, 3195 const struct rtnl_hw_stats64 *stats); 3196 3197 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, 3198 struct net_device *dev) 3199 { 3200 info->dev = dev; 3201 info->extack = NULL; 3202 } 3203 3204 static inline struct net_device * 3205 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) 3206 { 3207 return info->dev; 3208 } 3209 3210 static inline struct netlink_ext_ack * 3211 netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) 3212 { 3213 return info->extack; 3214 } 3215 3216 int call_netdevice_notifiers(unsigned long val, struct net_device *dev); 3217 int call_netdevice_notifiers_info(unsigned long val, 3218 struct netdev_notifier_info *info); 3219 3220 #define for_each_netdev(net, d) \ 3221 list_for_each_entry(d, &(net)->dev_base_head, dev_list) 3222 #define for_each_netdev_reverse(net, d) \ 3223 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) 3224 #define for_each_netdev_rcu(net, d) \ 3225 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) 3226 #define for_each_netdev_safe(net, d, n) \ 3227 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) 3228 #define for_each_netdev_continue(net, d) \ 3229 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) 3230 #define for_each_netdev_continue_reverse(net, d) \ 3231 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \ 3232 dev_list) 3233 #define for_each_netdev_continue_rcu(net, d) \ 3234 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 3235 #define for_each_netdev_in_bond_rcu(bond, slave) \ 3236 for_each_netdev_rcu(&init_net, slave) \ 3237 if (netdev_master_upper_dev_get_rcu(slave) == (bond)) 3238 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 3239 3240 #define for_each_netdev_dump(net, d, ifindex) \ 3241 for (; (d = xa_find(&(net)->dev_by_index, &ifindex, \ 3242 ULONG_MAX, XA_PRESENT)); ifindex++) 3243 3244 static inline struct net_device *next_net_device(struct net_device *dev) 3245 { 3246 struct list_head *lh; 3247 struct net *net; 3248 3249 net = dev_net(dev); 3250 lh = dev->dev_list.next; 3251 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3252 } 3253 3254 static inline struct net_device *next_net_device_rcu(struct net_device *dev) 3255 { 3256 struct list_head *lh; 3257 struct net *net; 3258 3259 net = dev_net(dev); 3260 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); 3261 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3262 } 3263 3264 static inline struct net_device *first_net_device(struct net *net) 3265 { 3266 return list_empty(&net->dev_base_head) ? NULL : 3267 net_device_entry(net->dev_base_head.next); 3268 } 3269 3270 static inline struct net_device *first_net_device_rcu(struct net *net) 3271 { 3272 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); 3273 3274 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3275 } 3276 3277 int netdev_boot_setup_check(struct net_device *dev); 3278 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 3279 const char *hwaddr); 3280 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 3281 void dev_add_pack(struct packet_type *pt); 3282 void dev_remove_pack(struct packet_type *pt); 3283 void __dev_remove_pack(struct packet_type *pt); 3284 void dev_add_offload(struct packet_offload *po); 3285 void dev_remove_offload(struct packet_offload *po); 3286 3287 int dev_get_iflink(const struct net_device *dev); 3288 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); 3289 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, 3290 struct net_device_path_stack *stack); 3291 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, 3292 unsigned short mask); 3293 struct net_device *dev_get_by_name(struct net *net, const char *name); 3294 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); 3295 struct net_device *__dev_get_by_name(struct net *net, const char *name); 3296 bool netdev_name_in_use(struct net *net, const char *name); 3297 int dev_alloc_name(struct net_device *dev, const char *name); 3298 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); 3299 void dev_close(struct net_device *dev); 3300 void dev_close_many(struct list_head *head, bool unlink); 3301 void dev_disable_lro(struct net_device *dev); 3302 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); 3303 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 3304 struct net_device *sb_dev); 3305 3306 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev); 3307 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); 3308 3309 static inline int dev_queue_xmit(struct sk_buff *skb) 3310 { 3311 return __dev_queue_xmit(skb, NULL); 3312 } 3313 3314 static inline int dev_queue_xmit_accel(struct sk_buff *skb, 3315 struct net_device *sb_dev) 3316 { 3317 return __dev_queue_xmit(skb, sb_dev); 3318 } 3319 3320 static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 3321 { 3322 int ret; 3323 3324 ret = __dev_direct_xmit(skb, queue_id); 3325 if (!dev_xmit_complete(ret)) 3326 kfree_skb(skb); 3327 return ret; 3328 } 3329 3330 int register_netdevice(struct net_device *dev); 3331 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 3332 void unregister_netdevice_many(struct list_head *head); 3333 static inline void unregister_netdevice(struct net_device *dev) 3334 { 3335 unregister_netdevice_queue(dev, NULL); 3336 } 3337 3338 int netdev_refcnt_read(const struct net_device *dev); 3339 void free_netdev(struct net_device *dev); 3340 3341 struct net_device *netdev_get_xmit_slave(struct net_device *dev, 3342 struct sk_buff *skb, 3343 bool all_slaves); 3344 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, 3345 struct sock *sk); 3346 struct net_device *dev_get_by_index(struct net *net, int ifindex); 3347 struct net_device *__dev_get_by_index(struct net *net, int ifindex); 3348 struct net_device *netdev_get_by_index(struct net *net, int ifindex, 3349 netdevice_tracker *tracker, gfp_t gfp); 3350 struct net_device *netdev_get_by_name(struct net *net, const char *name, 3351 netdevice_tracker *tracker, gfp_t gfp); 3352 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 3353 void netdev_copy_name(struct net_device *dev, char *name); 3354 3355 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 3356 unsigned short type, 3357 const void *daddr, const void *saddr, 3358 unsigned int len) 3359 { 3360 if (!dev->header_ops || !dev->header_ops->create) 3361 return 0; 3362 3363 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); 3364 } 3365 3366 static inline int dev_parse_header(const struct sk_buff *skb, 3367 unsigned char *haddr) 3368 { 3369 const struct net_device *dev = skb->dev; 3370 3371 if (!dev->header_ops || !dev->header_ops->parse) 3372 return 0; 3373 return dev->header_ops->parse(skb, haddr); 3374 } 3375 3376 static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) 3377 { 3378 const struct net_device *dev = skb->dev; 3379 3380 if (!dev->header_ops || !dev->header_ops->parse_protocol) 3381 return 0; 3382 return dev->header_ops->parse_protocol(skb); 3383 } 3384 3385 /* ll_header must have at least hard_header_len allocated */ 3386 static inline bool dev_validate_header(const struct net_device *dev, 3387 char *ll_header, int len) 3388 { 3389 if (likely(len >= dev->hard_header_len)) 3390 return true; 3391 if (len < dev->min_header_len) 3392 return false; 3393 3394 if (capable(CAP_SYS_RAWIO)) { 3395 memset(ll_header + len, 0, dev->hard_header_len - len); 3396 return true; 3397 } 3398 3399 if (dev->header_ops && dev->header_ops->validate) 3400 return dev->header_ops->validate(ll_header, len); 3401 3402 return false; 3403 } 3404 3405 static inline bool dev_has_header(const struct net_device *dev) 3406 { 3407 return dev->header_ops && dev->header_ops->create; 3408 } 3409 3410 /* 3411 * Incoming packets are placed on per-CPU queues 3412 */ 3413 struct softnet_data { 3414 struct list_head poll_list; 3415 struct sk_buff_head process_queue; 3416 local_lock_t process_queue_bh_lock; 3417 3418 /* stats */ 3419 unsigned int processed; 3420 unsigned int time_squeeze; 3421 #ifdef CONFIG_RPS 3422 struct softnet_data *rps_ipi_list; 3423 #endif 3424 3425 unsigned int received_rps; 3426 bool in_net_rx_action; 3427 bool in_napi_threaded_poll; 3428 3429 #ifdef CONFIG_NET_FLOW_LIMIT 3430 struct sd_flow_limit __rcu *flow_limit; 3431 #endif 3432 struct Qdisc *output_queue; 3433 struct Qdisc **output_queue_tailp; 3434 struct sk_buff *completion_queue; 3435 #ifdef CONFIG_XFRM_OFFLOAD 3436 struct sk_buff_head xfrm_backlog; 3437 #endif 3438 /* written and read only by owning cpu: */ 3439 struct netdev_xmit xmit; 3440 #ifdef CONFIG_RPS 3441 /* input_queue_head should be written by cpu owning this struct, 3442 * and only read by other cpus. Worth using a cache line. 3443 */ 3444 unsigned int input_queue_head ____cacheline_aligned_in_smp; 3445 3446 /* Elements below can be accessed between CPUs for RPS/RFS */ 3447 call_single_data_t csd ____cacheline_aligned_in_smp; 3448 struct softnet_data *rps_ipi_next; 3449 unsigned int cpu; 3450 unsigned int input_queue_tail; 3451 #endif 3452 struct sk_buff_head input_pkt_queue; 3453 struct napi_struct backlog; 3454 3455 atomic_t dropped ____cacheline_aligned_in_smp; 3456 3457 /* Another possibly contended cache line */ 3458 spinlock_t defer_lock ____cacheline_aligned_in_smp; 3459 int defer_count; 3460 int defer_ipi_scheduled; 3461 struct sk_buff *defer_list; 3462 call_single_data_t defer_csd; 3463 }; 3464 3465 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 3466 DECLARE_PER_CPU(struct page_pool *, system_page_pool); 3467 3468 #ifndef CONFIG_PREEMPT_RT 3469 static inline int dev_recursion_level(void) 3470 { 3471 return this_cpu_read(softnet_data.xmit.recursion); 3472 } 3473 #else 3474 static inline int dev_recursion_level(void) 3475 { 3476 return current->net_xmit.recursion; 3477 } 3478 3479 #endif 3480 3481 void __netif_schedule(struct Qdisc *q); 3482 void netif_schedule_queue(struct netdev_queue *txq); 3483 3484 static inline void netif_tx_schedule_all(struct net_device *dev) 3485 { 3486 unsigned int i; 3487 3488 for (i = 0; i < dev->num_tx_queues; i++) 3489 netif_schedule_queue(netdev_get_tx_queue(dev, i)); 3490 } 3491 3492 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) 3493 { 3494 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3495 } 3496 3497 /** 3498 * netif_start_queue - allow transmit 3499 * @dev: network device 3500 * 3501 * Allow upper layers to call the device hard_start_xmit routine. 3502 */ 3503 static inline void netif_start_queue(struct net_device *dev) 3504 { 3505 netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); 3506 } 3507 3508 static inline void netif_tx_start_all_queues(struct net_device *dev) 3509 { 3510 unsigned int i; 3511 3512 for (i = 0; i < dev->num_tx_queues; i++) { 3513 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3514 netif_tx_start_queue(txq); 3515 } 3516 } 3517 3518 void netif_tx_wake_queue(struct netdev_queue *dev_queue); 3519 3520 /** 3521 * netif_wake_queue - restart transmit 3522 * @dev: network device 3523 * 3524 * Allow upper layers to call the device hard_start_xmit routine. 3525 * Used for flow control when transmit resources are available. 3526 */ 3527 static inline void netif_wake_queue(struct net_device *dev) 3528 { 3529 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); 3530 } 3531 3532 static inline void netif_tx_wake_all_queues(struct net_device *dev) 3533 { 3534 unsigned int i; 3535 3536 for (i = 0; i < dev->num_tx_queues; i++) { 3537 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3538 netif_tx_wake_queue(txq); 3539 } 3540 } 3541 3542 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 3543 { 3544 /* Paired with READ_ONCE() from dev_watchdog() */ 3545 WRITE_ONCE(dev_queue->trans_start, jiffies); 3546 3547 /* This barrier is paired with smp_mb() from dev_watchdog() */ 3548 smp_mb__before_atomic(); 3549 3550 /* Must be an atomic op see netif_txq_try_stop() */ 3551 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3552 } 3553 3554 /** 3555 * netif_stop_queue - stop transmitted packets 3556 * @dev: network device 3557 * 3558 * Stop upper layers calling the device hard_start_xmit routine. 3559 * Used for flow control when transmit resources are unavailable. 3560 */ 3561 static inline void netif_stop_queue(struct net_device *dev) 3562 { 3563 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); 3564 } 3565 3566 void netif_tx_stop_all_queues(struct net_device *dev); 3567 3568 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 3569 { 3570 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3571 } 3572 3573 /** 3574 * netif_queue_stopped - test if transmit queue is flowblocked 3575 * @dev: network device 3576 * 3577 * Test if transmit queue on device is currently unable to send. 3578 */ 3579 static inline bool netif_queue_stopped(const struct net_device *dev) 3580 { 3581 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 3582 } 3583 3584 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) 3585 { 3586 return dev_queue->state & QUEUE_STATE_ANY_XOFF; 3587 } 3588 3589 static inline bool 3590 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) 3591 { 3592 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; 3593 } 3594 3595 static inline bool 3596 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) 3597 { 3598 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; 3599 } 3600 3601 /** 3602 * netdev_queue_set_dql_min_limit - set dql minimum limit 3603 * @dev_queue: pointer to transmit queue 3604 * @min_limit: dql minimum limit 3605 * 3606 * Forces xmit_more() to return true until the minimum threshold 3607 * defined by @min_limit is reached (or until the tx queue is 3608 * empty). Warning: to be use with care, misuse will impact the 3609 * latency. 3610 */ 3611 static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue, 3612 unsigned int min_limit) 3613 { 3614 #ifdef CONFIG_BQL 3615 dev_queue->dql.min_limit = min_limit; 3616 #endif 3617 } 3618 3619 static inline int netdev_queue_dql_avail(const struct netdev_queue *txq) 3620 { 3621 #ifdef CONFIG_BQL 3622 /* Non-BQL migrated drivers will return 0, too. */ 3623 return dql_avail(&txq->dql); 3624 #else 3625 return 0; 3626 #endif 3627 } 3628 3629 /** 3630 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write 3631 * @dev_queue: pointer to transmit queue 3632 * 3633 * BQL enabled drivers might use this helper in their ndo_start_xmit(), 3634 * to give appropriate hint to the CPU. 3635 */ 3636 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) 3637 { 3638 #ifdef CONFIG_BQL 3639 prefetchw(&dev_queue->dql.num_queued); 3640 #endif 3641 } 3642 3643 /** 3644 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write 3645 * @dev_queue: pointer to transmit queue 3646 * 3647 * BQL enabled drivers might use this helper in their TX completion path, 3648 * to give appropriate hint to the CPU. 3649 */ 3650 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) 3651 { 3652 #ifdef CONFIG_BQL 3653 prefetchw(&dev_queue->dql.limit); 3654 #endif 3655 } 3656 3657 /** 3658 * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue 3659 * @dev_queue: network device queue 3660 * @bytes: number of bytes queued to the device queue 3661 * 3662 * Report the number of bytes queued for sending/completion to the network 3663 * device hardware queue. @bytes should be a good approximation and should 3664 * exactly match netdev_completed_queue() @bytes. 3665 * This is typically called once per packet, from ndo_start_xmit(). 3666 */ 3667 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3668 unsigned int bytes) 3669 { 3670 #ifdef CONFIG_BQL 3671 dql_queued(&dev_queue->dql, bytes); 3672 3673 if (likely(dql_avail(&dev_queue->dql) >= 0)) 3674 return; 3675 3676 /* Paired with READ_ONCE() from dev_watchdog() */ 3677 WRITE_ONCE(dev_queue->trans_start, jiffies); 3678 3679 /* This barrier is paired with smp_mb() from dev_watchdog() */ 3680 smp_mb__before_atomic(); 3681 3682 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3683 3684 /* 3685 * The XOFF flag must be set before checking the dql_avail below, 3686 * because in netdev_tx_completed_queue we update the dql_completed 3687 * before checking the XOFF flag. 3688 */ 3689 smp_mb__after_atomic(); 3690 3691 /* check again in case another CPU has just made room avail */ 3692 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) 3693 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3694 #endif 3695 } 3696 3697 /* Variant of netdev_tx_sent_queue() for drivers that are aware 3698 * that they should not test BQL status themselves. 3699 * We do want to change __QUEUE_STATE_STACK_XOFF only for the last 3700 * skb of a batch. 3701 * Returns true if the doorbell must be used to kick the NIC. 3702 */ 3703 static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3704 unsigned int bytes, 3705 bool xmit_more) 3706 { 3707 if (xmit_more) { 3708 #ifdef CONFIG_BQL 3709 dql_queued(&dev_queue->dql, bytes); 3710 #endif 3711 return netif_tx_queue_stopped(dev_queue); 3712 } 3713 netdev_tx_sent_queue(dev_queue, bytes); 3714 return true; 3715 } 3716 3717 /** 3718 * netdev_sent_queue - report the number of bytes queued to hardware 3719 * @dev: network device 3720 * @bytes: number of bytes queued to the hardware device queue 3721 * 3722 * Report the number of bytes queued for sending/completion to the network 3723 * device hardware queue#0. @bytes should be a good approximation and should 3724 * exactly match netdev_completed_queue() @bytes. 3725 * This is typically called once per packet, from ndo_start_xmit(). 3726 */ 3727 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) 3728 { 3729 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); 3730 } 3731 3732 static inline bool __netdev_sent_queue(struct net_device *dev, 3733 unsigned int bytes, 3734 bool xmit_more) 3735 { 3736 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, 3737 xmit_more); 3738 } 3739 3740 /** 3741 * netdev_tx_completed_queue - report number of packets/bytes at TX completion. 3742 * @dev_queue: network device queue 3743 * @pkts: number of packets (currently ignored) 3744 * @bytes: number of bytes dequeued from the device queue 3745 * 3746 * Must be called at most once per TX completion round (and not per 3747 * individual packet), so that BQL can adjust its limits appropriately. 3748 */ 3749 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, 3750 unsigned int pkts, unsigned int bytes) 3751 { 3752 #ifdef CONFIG_BQL 3753 if (unlikely(!bytes)) 3754 return; 3755 3756 dql_completed(&dev_queue->dql, bytes); 3757 3758 /* 3759 * Without the memory barrier there is a small possibility that 3760 * netdev_tx_sent_queue will miss the update and cause the queue to 3761 * be stopped forever 3762 */ 3763 smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */ 3764 3765 if (unlikely(dql_avail(&dev_queue->dql) < 0)) 3766 return; 3767 3768 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) 3769 netif_schedule_queue(dev_queue); 3770 #endif 3771 } 3772 3773 /** 3774 * netdev_completed_queue - report bytes and packets completed by device 3775 * @dev: network device 3776 * @pkts: actual number of packets sent over the medium 3777 * @bytes: actual number of bytes sent over the medium 3778 * 3779 * Report the number of bytes and packets transmitted by the network device 3780 * hardware queue over the physical medium, @bytes must exactly match the 3781 * @bytes amount passed to netdev_sent_queue() 3782 */ 3783 static inline void netdev_completed_queue(struct net_device *dev, 3784 unsigned int pkts, unsigned int bytes) 3785 { 3786 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); 3787 } 3788 3789 static inline void netdev_tx_reset_queue(struct netdev_queue *q) 3790 { 3791 #ifdef CONFIG_BQL 3792 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); 3793 dql_reset(&q->dql); 3794 #endif 3795 } 3796 3797 /** 3798 * netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue 3799 * @dev: network device 3800 * @qid: stack index of the queue to reset 3801 */ 3802 static inline void netdev_tx_reset_subqueue(const struct net_device *dev, 3803 u32 qid) 3804 { 3805 netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid)); 3806 } 3807 3808 /** 3809 * netdev_reset_queue - reset the packets and bytes count of a network device 3810 * @dev_queue: network device 3811 * 3812 * Reset the bytes and packet count of a network device and clear the 3813 * software flow control OFF bit for this network device 3814 */ 3815 static inline void netdev_reset_queue(struct net_device *dev_queue) 3816 { 3817 netdev_tx_reset_subqueue(dev_queue, 0); 3818 } 3819 3820 /** 3821 * netdev_cap_txqueue - check if selected tx queue exceeds device queues 3822 * @dev: network device 3823 * @queue_index: given tx queue index 3824 * 3825 * Returns 0 if given tx queue index >= number of device tx queues, 3826 * otherwise returns the originally passed tx queue index. 3827 */ 3828 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) 3829 { 3830 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 3831 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", 3832 dev->name, queue_index, 3833 dev->real_num_tx_queues); 3834 return 0; 3835 } 3836 3837 return queue_index; 3838 } 3839 3840 /** 3841 * netif_running - test if up 3842 * @dev: network device 3843 * 3844 * Test if the device has been brought up. 3845 */ 3846 static inline bool netif_running(const struct net_device *dev) 3847 { 3848 return test_bit(__LINK_STATE_START, &dev->state); 3849 } 3850 3851 /* 3852 * Routines to manage the subqueues on a device. We only need start, 3853 * stop, and a check if it's stopped. All other device management is 3854 * done at the overall netdevice level. 3855 * Also test the device if we're multiqueue. 3856 */ 3857 3858 /** 3859 * netif_start_subqueue - allow sending packets on subqueue 3860 * @dev: network device 3861 * @queue_index: sub queue index 3862 * 3863 * Start individual transmit queue of a device with multiple transmit queues. 3864 */ 3865 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) 3866 { 3867 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3868 3869 netif_tx_start_queue(txq); 3870 } 3871 3872 /** 3873 * netif_stop_subqueue - stop sending packets on subqueue 3874 * @dev: network device 3875 * @queue_index: sub queue index 3876 * 3877 * Stop individual transmit queue of a device with multiple transmit queues. 3878 */ 3879 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) 3880 { 3881 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3882 netif_tx_stop_queue(txq); 3883 } 3884 3885 /** 3886 * __netif_subqueue_stopped - test status of subqueue 3887 * @dev: network device 3888 * @queue_index: sub queue index 3889 * 3890 * Check individual transmit queue of a device with multiple transmit queues. 3891 */ 3892 static inline bool __netif_subqueue_stopped(const struct net_device *dev, 3893 u16 queue_index) 3894 { 3895 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3896 3897 return netif_tx_queue_stopped(txq); 3898 } 3899 3900 /** 3901 * netif_subqueue_stopped - test status of subqueue 3902 * @dev: network device 3903 * @skb: sub queue buffer pointer 3904 * 3905 * Check individual transmit queue of a device with multiple transmit queues. 3906 */ 3907 static inline bool netif_subqueue_stopped(const struct net_device *dev, 3908 struct sk_buff *skb) 3909 { 3910 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); 3911 } 3912 3913 /** 3914 * netif_wake_subqueue - allow sending packets on subqueue 3915 * @dev: network device 3916 * @queue_index: sub queue index 3917 * 3918 * Resume individual transmit queue of a device with multiple transmit queues. 3919 */ 3920 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) 3921 { 3922 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3923 3924 netif_tx_wake_queue(txq); 3925 } 3926 3927 #ifdef CONFIG_XPS 3928 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 3929 u16 index); 3930 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 3931 u16 index, enum xps_map_type type); 3932 3933 /** 3934 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask 3935 * @j: CPU/Rx queue index 3936 * @mask: bitmask of all cpus/rx queues 3937 * @nr_bits: number of bits in the bitmask 3938 * 3939 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. 3940 */ 3941 static inline bool netif_attr_test_mask(unsigned long j, 3942 const unsigned long *mask, 3943 unsigned int nr_bits) 3944 { 3945 cpu_max_bits_warn(j, nr_bits); 3946 return test_bit(j, mask); 3947 } 3948 3949 /** 3950 * netif_attr_test_online - Test for online CPU/Rx queue 3951 * @j: CPU/Rx queue index 3952 * @online_mask: bitmask for CPUs/Rx queues that are online 3953 * @nr_bits: number of bits in the bitmask 3954 * 3955 * Returns: true if a CPU/Rx queue is online. 3956 */ 3957 static inline bool netif_attr_test_online(unsigned long j, 3958 const unsigned long *online_mask, 3959 unsigned int nr_bits) 3960 { 3961 cpu_max_bits_warn(j, nr_bits); 3962 3963 if (online_mask) 3964 return test_bit(j, online_mask); 3965 3966 return (j < nr_bits); 3967 } 3968 3969 /** 3970 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask 3971 * @n: CPU/Rx queue index 3972 * @srcp: the cpumask/Rx queue mask pointer 3973 * @nr_bits: number of bits in the bitmask 3974 * 3975 * Returns: next (after n) CPU/Rx queue index in the mask; 3976 * >= nr_bits if no further CPUs/Rx queues set. 3977 */ 3978 static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, 3979 unsigned int nr_bits) 3980 { 3981 /* -1 is a legal arg here. */ 3982 if (n != -1) 3983 cpu_max_bits_warn(n, nr_bits); 3984 3985 if (srcp) 3986 return find_next_bit(srcp, nr_bits, n + 1); 3987 3988 return n + 1; 3989 } 3990 3991 /** 3992 * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p 3993 * @n: CPU/Rx queue index 3994 * @src1p: the first CPUs/Rx queues mask pointer 3995 * @src2p: the second CPUs/Rx queues mask pointer 3996 * @nr_bits: number of bits in the bitmask 3997 * 3998 * Returns: next (after n) CPU/Rx queue index set in both masks; 3999 * >= nr_bits if no further CPUs/Rx queues set in both. 4000 */ 4001 static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, 4002 const unsigned long *src2p, 4003 unsigned int nr_bits) 4004 { 4005 /* -1 is a legal arg here. */ 4006 if (n != -1) 4007 cpu_max_bits_warn(n, nr_bits); 4008 4009 if (src1p && src2p) 4010 return find_next_and_bit(src1p, src2p, nr_bits, n + 1); 4011 else if (src1p) 4012 return find_next_bit(src1p, nr_bits, n + 1); 4013 else if (src2p) 4014 return find_next_bit(src2p, nr_bits, n + 1); 4015 4016 return n + 1; 4017 } 4018 #else 4019 static inline int netif_set_xps_queue(struct net_device *dev, 4020 const struct cpumask *mask, 4021 u16 index) 4022 { 4023 return 0; 4024 } 4025 4026 static inline int __netif_set_xps_queue(struct net_device *dev, 4027 const unsigned long *mask, 4028 u16 index, enum xps_map_type type) 4029 { 4030 return 0; 4031 } 4032 #endif 4033 4034 /** 4035 * netif_is_multiqueue - test if device has multiple transmit queues 4036 * @dev: network device 4037 * 4038 * Check if device has multiple transmit queues 4039 */ 4040 static inline bool netif_is_multiqueue(const struct net_device *dev) 4041 { 4042 return dev->num_tx_queues > 1; 4043 } 4044 4045 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); 4046 4047 #ifdef CONFIG_SYSFS 4048 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); 4049 #else 4050 static inline int netif_set_real_num_rx_queues(struct net_device *dev, 4051 unsigned int rxqs) 4052 { 4053 dev->real_num_rx_queues = rxqs; 4054 return 0; 4055 } 4056 #endif 4057 int netif_set_real_num_queues(struct net_device *dev, 4058 unsigned int txq, unsigned int rxq); 4059 4060 int netif_get_num_default_rss_queues(void); 4061 4062 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason); 4063 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason); 4064 4065 /* 4066 * It is not allowed to call kfree_skb() or consume_skb() from hardware 4067 * interrupt context or with hardware interrupts being disabled. 4068 * (in_hardirq() || irqs_disabled()) 4069 * 4070 * We provide four helpers that can be used in following contexts : 4071 * 4072 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, 4073 * replacing kfree_skb(skb) 4074 * 4075 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. 4076 * Typically used in place of consume_skb(skb) in TX completion path 4077 * 4078 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, 4079 * replacing kfree_skb(skb) 4080 * 4081 * dev_consume_skb_any(skb) when caller doesn't know its current irq context, 4082 * and consumed a packet. Used in place of consume_skb(skb) 4083 */ 4084 static inline void dev_kfree_skb_irq(struct sk_buff *skb) 4085 { 4086 dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); 4087 } 4088 4089 static inline void dev_consume_skb_irq(struct sk_buff *skb) 4090 { 4091 dev_kfree_skb_irq_reason(skb, SKB_CONSUMED); 4092 } 4093 4094 static inline void dev_kfree_skb_any(struct sk_buff *skb) 4095 { 4096 dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); 4097 } 4098 4099 static inline void dev_consume_skb_any(struct sk_buff *skb) 4100 { 4101 dev_kfree_skb_any_reason(skb, SKB_CONSUMED); 4102 } 4103 4104 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 4105 const struct bpf_prog *xdp_prog); 4106 void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog); 4107 int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb); 4108 int netif_rx(struct sk_buff *skb); 4109 int __netif_rx(struct sk_buff *skb); 4110 4111 int netif_receive_skb(struct sk_buff *skb); 4112 int netif_receive_skb_core(struct sk_buff *skb); 4113 void netif_receive_skb_list_internal(struct list_head *head); 4114 void netif_receive_skb_list(struct list_head *head); 4115 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); 4116 void napi_gro_flush(struct napi_struct *napi, bool flush_old); 4117 struct sk_buff *napi_get_frags(struct napi_struct *napi); 4118 void napi_get_frags_check(struct napi_struct *napi); 4119 gro_result_t napi_gro_frags(struct napi_struct *napi); 4120 4121 static inline void napi_free_frags(struct napi_struct *napi) 4122 { 4123 kfree_skb(napi->skb); 4124 napi->skb = NULL; 4125 } 4126 4127 bool netdev_is_rx_handler_busy(struct net_device *dev); 4128 int netdev_rx_handler_register(struct net_device *dev, 4129 rx_handler_func_t *rx_handler, 4130 void *rx_handler_data); 4131 void netdev_rx_handler_unregister(struct net_device *dev); 4132 4133 bool dev_valid_name(const char *name); 4134 static inline bool is_socket_ioctl_cmd(unsigned int cmd) 4135 { 4136 return _IOC_TYPE(cmd) == SOCK_IOC_TYPE; 4137 } 4138 int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg); 4139 int put_user_ifreq(struct ifreq *ifr, void __user *arg); 4140 int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, 4141 void __user *data, bool *need_copyout); 4142 int dev_ifconf(struct net *net, struct ifconf __user *ifc); 4143 int generic_hwtstamp_get_lower(struct net_device *dev, 4144 struct kernel_hwtstamp_config *kernel_cfg); 4145 int generic_hwtstamp_set_lower(struct net_device *dev, 4146 struct kernel_hwtstamp_config *kernel_cfg, 4147 struct netlink_ext_ack *extack); 4148 int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); 4149 unsigned int dev_get_flags(const struct net_device *); 4150 int __dev_change_flags(struct net_device *dev, unsigned int flags, 4151 struct netlink_ext_ack *extack); 4152 int dev_change_flags(struct net_device *dev, unsigned int flags, 4153 struct netlink_ext_ack *extack); 4154 int dev_set_alias(struct net_device *, const char *, size_t); 4155 int dev_get_alias(const struct net_device *, char *, size_t); 4156 int __dev_change_net_namespace(struct net_device *dev, struct net *net, 4157 const char *pat, int new_ifindex); 4158 static inline 4159 int dev_change_net_namespace(struct net_device *dev, struct net *net, 4160 const char *pat) 4161 { 4162 return __dev_change_net_namespace(dev, net, pat, 0); 4163 } 4164 int __dev_set_mtu(struct net_device *, int); 4165 int dev_set_mtu(struct net_device *, int); 4166 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 4167 struct netlink_ext_ack *extack); 4168 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 4169 struct netlink_ext_ack *extack); 4170 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, 4171 struct netlink_ext_ack *extack); 4172 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); 4173 int dev_get_port_parent_id(struct net_device *dev, 4174 struct netdev_phys_item_id *ppid, bool recurse); 4175 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); 4176 4177 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); 4178 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 4179 struct netdev_queue *txq, int *ret); 4180 4181 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); 4182 u8 dev_xdp_prog_count(struct net_device *dev); 4183 int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf); 4184 u8 dev_xdp_sb_prog_count(struct net_device *dev); 4185 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); 4186 4187 u32 dev_get_min_mp_channel_count(const struct net_device *dev); 4188 4189 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 4190 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 4191 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb); 4192 bool is_skb_forwardable(const struct net_device *dev, 4193 const struct sk_buff *skb); 4194 4195 static __always_inline bool __is_skb_forwardable(const struct net_device *dev, 4196 const struct sk_buff *skb, 4197 const bool check_mtu) 4198 { 4199 const u32 vlan_hdr_len = 4; /* VLAN_HLEN */ 4200 unsigned int len; 4201 4202 if (!(dev->flags & IFF_UP)) 4203 return false; 4204 4205 if (!check_mtu) 4206 return true; 4207 4208 len = dev->mtu + dev->hard_header_len + vlan_hdr_len; 4209 if (skb->len <= len) 4210 return true; 4211 4212 /* if TSO is enabled, we don't care about the length as the packet 4213 * could be forwarded without being segmented before 4214 */ 4215 if (skb_is_gso(skb)) 4216 return true; 4217 4218 return false; 4219 } 4220 4221 void netdev_core_stats_inc(struct net_device *dev, u32 offset); 4222 4223 #define DEV_CORE_STATS_INC(FIELD) \ 4224 static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ 4225 { \ 4226 netdev_core_stats_inc(dev, \ 4227 offsetof(struct net_device_core_stats, FIELD)); \ 4228 } 4229 DEV_CORE_STATS_INC(rx_dropped) 4230 DEV_CORE_STATS_INC(tx_dropped) 4231 DEV_CORE_STATS_INC(rx_nohandler) 4232 DEV_CORE_STATS_INC(rx_otherhost_dropped) 4233 #undef DEV_CORE_STATS_INC 4234 4235 static __always_inline int ____dev_forward_skb(struct net_device *dev, 4236 struct sk_buff *skb, 4237 const bool check_mtu) 4238 { 4239 if (skb_orphan_frags(skb, GFP_ATOMIC) || 4240 unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) { 4241 dev_core_stats_rx_dropped_inc(dev); 4242 kfree_skb(skb); 4243 return NET_RX_DROP; 4244 } 4245 4246 skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev))); 4247 skb->priority = 0; 4248 return 0; 4249 } 4250 4251 bool dev_nit_active(struct net_device *dev); 4252 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 4253 4254 static inline void __dev_put(struct net_device *dev) 4255 { 4256 if (dev) { 4257 #ifdef CONFIG_PCPU_DEV_REFCNT 4258 this_cpu_dec(*dev->pcpu_refcnt); 4259 #else 4260 refcount_dec(&dev->dev_refcnt); 4261 #endif 4262 } 4263 } 4264 4265 static inline void __dev_hold(struct net_device *dev) 4266 { 4267 if (dev) { 4268 #ifdef CONFIG_PCPU_DEV_REFCNT 4269 this_cpu_inc(*dev->pcpu_refcnt); 4270 #else 4271 refcount_inc(&dev->dev_refcnt); 4272 #endif 4273 } 4274 } 4275 4276 static inline void __netdev_tracker_alloc(struct net_device *dev, 4277 netdevice_tracker *tracker, 4278 gfp_t gfp) 4279 { 4280 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4281 ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp); 4282 #endif 4283 } 4284 4285 /* netdev_tracker_alloc() can upgrade a prior untracked reference 4286 * taken by dev_get_by_name()/dev_get_by_index() to a tracked one. 4287 */ 4288 static inline void netdev_tracker_alloc(struct net_device *dev, 4289 netdevice_tracker *tracker, gfp_t gfp) 4290 { 4291 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4292 refcount_dec(&dev->refcnt_tracker.no_tracker); 4293 __netdev_tracker_alloc(dev, tracker, gfp); 4294 #endif 4295 } 4296 4297 static inline void netdev_tracker_free(struct net_device *dev, 4298 netdevice_tracker *tracker) 4299 { 4300 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4301 ref_tracker_free(&dev->refcnt_tracker, tracker); 4302 #endif 4303 } 4304 4305 static inline void netdev_hold(struct net_device *dev, 4306 netdevice_tracker *tracker, gfp_t gfp) 4307 { 4308 if (dev) { 4309 __dev_hold(dev); 4310 __netdev_tracker_alloc(dev, tracker, gfp); 4311 } 4312 } 4313 4314 static inline void netdev_put(struct net_device *dev, 4315 netdevice_tracker *tracker) 4316 { 4317 if (dev) { 4318 netdev_tracker_free(dev, tracker); 4319 __dev_put(dev); 4320 } 4321 } 4322 4323 /** 4324 * dev_hold - get reference to device 4325 * @dev: network device 4326 * 4327 * Hold reference to device to keep it from being freed. 4328 * Try using netdev_hold() instead. 4329 */ 4330 static inline void dev_hold(struct net_device *dev) 4331 { 4332 netdev_hold(dev, NULL, GFP_ATOMIC); 4333 } 4334 4335 /** 4336 * dev_put - release reference to device 4337 * @dev: network device 4338 * 4339 * Release reference to device to allow it to be freed. 4340 * Try using netdev_put() instead. 4341 */ 4342 static inline void dev_put(struct net_device *dev) 4343 { 4344 netdev_put(dev, NULL); 4345 } 4346 4347 DEFINE_FREE(dev_put, struct net_device *, if (_T) dev_put(_T)) 4348 4349 static inline void netdev_ref_replace(struct net_device *odev, 4350 struct net_device *ndev, 4351 netdevice_tracker *tracker, 4352 gfp_t gfp) 4353 { 4354 if (odev) 4355 netdev_tracker_free(odev, tracker); 4356 4357 __dev_hold(ndev); 4358 __dev_put(odev); 4359 4360 if (ndev) 4361 __netdev_tracker_alloc(ndev, tracker, gfp); 4362 } 4363 4364 /* Carrier loss detection, dial on demand. The functions netif_carrier_on 4365 * and _off may be called from IRQ context, but it is caller 4366 * who is responsible for serialization of these calls. 4367 * 4368 * The name carrier is inappropriate, these functions should really be 4369 * called netif_lowerlayer_*() because they represent the state of any 4370 * kind of lower layer not just hardware media. 4371 */ 4372 void linkwatch_fire_event(struct net_device *dev); 4373 4374 /** 4375 * linkwatch_sync_dev - sync linkwatch for the given device 4376 * @dev: network device to sync linkwatch for 4377 * 4378 * Sync linkwatch for the given device, removing it from the 4379 * pending work list (if queued). 4380 */ 4381 void linkwatch_sync_dev(struct net_device *dev); 4382 4383 /** 4384 * netif_carrier_ok - test if carrier present 4385 * @dev: network device 4386 * 4387 * Check if carrier is present on device 4388 */ 4389 static inline bool netif_carrier_ok(const struct net_device *dev) 4390 { 4391 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 4392 } 4393 4394 unsigned long dev_trans_start(struct net_device *dev); 4395 4396 void netdev_watchdog_up(struct net_device *dev); 4397 4398 void netif_carrier_on(struct net_device *dev); 4399 void netif_carrier_off(struct net_device *dev); 4400 void netif_carrier_event(struct net_device *dev); 4401 4402 /** 4403 * netif_dormant_on - mark device as dormant. 4404 * @dev: network device 4405 * 4406 * Mark device as dormant (as per RFC2863). 4407 * 4408 * The dormant state indicates that the relevant interface is not 4409 * actually in a condition to pass packets (i.e., it is not 'up') but is 4410 * in a "pending" state, waiting for some external event. For "on- 4411 * demand" interfaces, this new state identifies the situation where the 4412 * interface is waiting for events to place it in the up state. 4413 */ 4414 static inline void netif_dormant_on(struct net_device *dev) 4415 { 4416 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) 4417 linkwatch_fire_event(dev); 4418 } 4419 4420 /** 4421 * netif_dormant_off - set device as not dormant. 4422 * @dev: network device 4423 * 4424 * Device is not in dormant state. 4425 */ 4426 static inline void netif_dormant_off(struct net_device *dev) 4427 { 4428 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) 4429 linkwatch_fire_event(dev); 4430 } 4431 4432 /** 4433 * netif_dormant - test if device is dormant 4434 * @dev: network device 4435 * 4436 * Check if device is dormant. 4437 */ 4438 static inline bool netif_dormant(const struct net_device *dev) 4439 { 4440 return test_bit(__LINK_STATE_DORMANT, &dev->state); 4441 } 4442 4443 4444 /** 4445 * netif_testing_on - mark device as under test. 4446 * @dev: network device 4447 * 4448 * Mark device as under test (as per RFC2863). 4449 * 4450 * The testing state indicates that some test(s) must be performed on 4451 * the interface. After completion, of the test, the interface state 4452 * will change to up, dormant, or down, as appropriate. 4453 */ 4454 static inline void netif_testing_on(struct net_device *dev) 4455 { 4456 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) 4457 linkwatch_fire_event(dev); 4458 } 4459 4460 /** 4461 * netif_testing_off - set device as not under test. 4462 * @dev: network device 4463 * 4464 * Device is not in testing state. 4465 */ 4466 static inline void netif_testing_off(struct net_device *dev) 4467 { 4468 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) 4469 linkwatch_fire_event(dev); 4470 } 4471 4472 /** 4473 * netif_testing - test if device is under test 4474 * @dev: network device 4475 * 4476 * Check if device is under test 4477 */ 4478 static inline bool netif_testing(const struct net_device *dev) 4479 { 4480 return test_bit(__LINK_STATE_TESTING, &dev->state); 4481 } 4482 4483 4484 /** 4485 * netif_oper_up - test if device is operational 4486 * @dev: network device 4487 * 4488 * Check if carrier is operational 4489 */ 4490 static inline bool netif_oper_up(const struct net_device *dev) 4491 { 4492 unsigned int operstate = READ_ONCE(dev->operstate); 4493 4494 return operstate == IF_OPER_UP || 4495 operstate == IF_OPER_UNKNOWN /* backward compat */; 4496 } 4497 4498 /** 4499 * netif_device_present - is device available or removed 4500 * @dev: network device 4501 * 4502 * Check if device has not been removed from system. 4503 */ 4504 static inline bool netif_device_present(const struct net_device *dev) 4505 { 4506 return test_bit(__LINK_STATE_PRESENT, &dev->state); 4507 } 4508 4509 void netif_device_detach(struct net_device *dev); 4510 4511 void netif_device_attach(struct net_device *dev); 4512 4513 /* 4514 * Network interface message level settings 4515 */ 4516 4517 enum { 4518 NETIF_MSG_DRV_BIT, 4519 NETIF_MSG_PROBE_BIT, 4520 NETIF_MSG_LINK_BIT, 4521 NETIF_MSG_TIMER_BIT, 4522 NETIF_MSG_IFDOWN_BIT, 4523 NETIF_MSG_IFUP_BIT, 4524 NETIF_MSG_RX_ERR_BIT, 4525 NETIF_MSG_TX_ERR_BIT, 4526 NETIF_MSG_TX_QUEUED_BIT, 4527 NETIF_MSG_INTR_BIT, 4528 NETIF_MSG_TX_DONE_BIT, 4529 NETIF_MSG_RX_STATUS_BIT, 4530 NETIF_MSG_PKTDATA_BIT, 4531 NETIF_MSG_HW_BIT, 4532 NETIF_MSG_WOL_BIT, 4533 4534 /* When you add a new bit above, update netif_msg_class_names array 4535 * in net/ethtool/common.c 4536 */ 4537 NETIF_MSG_CLASS_COUNT, 4538 }; 4539 /* Both ethtool_ops interface and internal driver implementation use u32 */ 4540 static_assert(NETIF_MSG_CLASS_COUNT <= 32); 4541 4542 #define __NETIF_MSG_BIT(bit) ((u32)1 << (bit)) 4543 #define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT) 4544 4545 #define NETIF_MSG_DRV __NETIF_MSG(DRV) 4546 #define NETIF_MSG_PROBE __NETIF_MSG(PROBE) 4547 #define NETIF_MSG_LINK __NETIF_MSG(LINK) 4548 #define NETIF_MSG_TIMER __NETIF_MSG(TIMER) 4549 #define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN) 4550 #define NETIF_MSG_IFUP __NETIF_MSG(IFUP) 4551 #define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR) 4552 #define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR) 4553 #define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED) 4554 #define NETIF_MSG_INTR __NETIF_MSG(INTR) 4555 #define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE) 4556 #define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS) 4557 #define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA) 4558 #define NETIF_MSG_HW __NETIF_MSG(HW) 4559 #define NETIF_MSG_WOL __NETIF_MSG(WOL) 4560 4561 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) 4562 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) 4563 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) 4564 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) 4565 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) 4566 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) 4567 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) 4568 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) 4569 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) 4570 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) 4571 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) 4572 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) 4573 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) 4574 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) 4575 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) 4576 4577 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) 4578 { 4579 /* use default */ 4580 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) 4581 return default_msg_enable_bits; 4582 if (debug_value == 0) /* no output */ 4583 return 0; 4584 /* set low N bits */ 4585 return (1U << debug_value) - 1; 4586 } 4587 4588 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) 4589 { 4590 spin_lock(&txq->_xmit_lock); 4591 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4592 WRITE_ONCE(txq->xmit_lock_owner, cpu); 4593 } 4594 4595 static inline bool __netif_tx_acquire(struct netdev_queue *txq) 4596 { 4597 __acquire(&txq->_xmit_lock); 4598 return true; 4599 } 4600 4601 static inline void __netif_tx_release(struct netdev_queue *txq) 4602 { 4603 __release(&txq->_xmit_lock); 4604 } 4605 4606 static inline void __netif_tx_lock_bh(struct netdev_queue *txq) 4607 { 4608 spin_lock_bh(&txq->_xmit_lock); 4609 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4610 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); 4611 } 4612 4613 static inline bool __netif_tx_trylock(struct netdev_queue *txq) 4614 { 4615 bool ok = spin_trylock(&txq->_xmit_lock); 4616 4617 if (likely(ok)) { 4618 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4619 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); 4620 } 4621 return ok; 4622 } 4623 4624 static inline void __netif_tx_unlock(struct netdev_queue *txq) 4625 { 4626 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4627 WRITE_ONCE(txq->xmit_lock_owner, -1); 4628 spin_unlock(&txq->_xmit_lock); 4629 } 4630 4631 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) 4632 { 4633 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4634 WRITE_ONCE(txq->xmit_lock_owner, -1); 4635 spin_unlock_bh(&txq->_xmit_lock); 4636 } 4637 4638 /* 4639 * txq->trans_start can be read locklessly from dev_watchdog() 4640 */ 4641 static inline void txq_trans_update(struct netdev_queue *txq) 4642 { 4643 if (txq->xmit_lock_owner != -1) 4644 WRITE_ONCE(txq->trans_start, jiffies); 4645 } 4646 4647 static inline void txq_trans_cond_update(struct netdev_queue *txq) 4648 { 4649 unsigned long now = jiffies; 4650 4651 if (READ_ONCE(txq->trans_start) != now) 4652 WRITE_ONCE(txq->trans_start, now); 4653 } 4654 4655 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ 4656 static inline void netif_trans_update(struct net_device *dev) 4657 { 4658 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 4659 4660 txq_trans_cond_update(txq); 4661 } 4662 4663 /** 4664 * netif_tx_lock - grab network device transmit lock 4665 * @dev: network device 4666 * 4667 * Get network device transmit lock 4668 */ 4669 void netif_tx_lock(struct net_device *dev); 4670 4671 static inline void netif_tx_lock_bh(struct net_device *dev) 4672 { 4673 local_bh_disable(); 4674 netif_tx_lock(dev); 4675 } 4676 4677 void netif_tx_unlock(struct net_device *dev); 4678 4679 static inline void netif_tx_unlock_bh(struct net_device *dev) 4680 { 4681 netif_tx_unlock(dev); 4682 local_bh_enable(); 4683 } 4684 4685 #define HARD_TX_LOCK(dev, txq, cpu) { \ 4686 if (!(dev)->lltx) { \ 4687 __netif_tx_lock(txq, cpu); \ 4688 } else { \ 4689 __netif_tx_acquire(txq); \ 4690 } \ 4691 } 4692 4693 #define HARD_TX_TRYLOCK(dev, txq) \ 4694 (!(dev)->lltx ? \ 4695 __netif_tx_trylock(txq) : \ 4696 __netif_tx_acquire(txq)) 4697 4698 #define HARD_TX_UNLOCK(dev, txq) { \ 4699 if (!(dev)->lltx) { \ 4700 __netif_tx_unlock(txq); \ 4701 } else { \ 4702 __netif_tx_release(txq); \ 4703 } \ 4704 } 4705 4706 static inline void netif_tx_disable(struct net_device *dev) 4707 { 4708 unsigned int i; 4709 int cpu; 4710 4711 local_bh_disable(); 4712 cpu = smp_processor_id(); 4713 spin_lock(&dev->tx_global_lock); 4714 for (i = 0; i < dev->num_tx_queues; i++) { 4715 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 4716 4717 __netif_tx_lock(txq, cpu); 4718 netif_tx_stop_queue(txq); 4719 __netif_tx_unlock(txq); 4720 } 4721 spin_unlock(&dev->tx_global_lock); 4722 local_bh_enable(); 4723 } 4724 4725 static inline void netif_addr_lock(struct net_device *dev) 4726 { 4727 unsigned char nest_level = 0; 4728 4729 #ifdef CONFIG_LOCKDEP 4730 nest_level = dev->nested_level; 4731 #endif 4732 spin_lock_nested(&dev->addr_list_lock, nest_level); 4733 } 4734 4735 static inline void netif_addr_lock_bh(struct net_device *dev) 4736 { 4737 unsigned char nest_level = 0; 4738 4739 #ifdef CONFIG_LOCKDEP 4740 nest_level = dev->nested_level; 4741 #endif 4742 local_bh_disable(); 4743 spin_lock_nested(&dev->addr_list_lock, nest_level); 4744 } 4745 4746 static inline void netif_addr_unlock(struct net_device *dev) 4747 { 4748 spin_unlock(&dev->addr_list_lock); 4749 } 4750 4751 static inline void netif_addr_unlock_bh(struct net_device *dev) 4752 { 4753 spin_unlock_bh(&dev->addr_list_lock); 4754 } 4755 4756 /* 4757 * dev_addrs walker. Should be used only for read access. Call with 4758 * rcu_read_lock held. 4759 */ 4760 #define for_each_dev_addr(dev, ha) \ 4761 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) 4762 4763 /* These functions live elsewhere (drivers/net/net_init.c, but related) */ 4764 4765 void ether_setup(struct net_device *dev); 4766 4767 /* Allocate dummy net_device */ 4768 struct net_device *alloc_netdev_dummy(int sizeof_priv); 4769 4770 /* Support for loadable net-drivers */ 4771 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 4772 unsigned char name_assign_type, 4773 void (*setup)(struct net_device *), 4774 unsigned int txqs, unsigned int rxqs); 4775 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ 4776 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) 4777 4778 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ 4779 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ 4780 count) 4781 4782 int register_netdev(struct net_device *dev); 4783 void unregister_netdev(struct net_device *dev); 4784 4785 int devm_register_netdev(struct device *dev, struct net_device *ndev); 4786 4787 /* General hardware address lists handling functions */ 4788 int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 4789 struct netdev_hw_addr_list *from_list, int addr_len); 4790 int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list, 4791 struct netdev_hw_addr_list *from_list, 4792 int addr_len); 4793 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 4794 struct netdev_hw_addr_list *from_list, int addr_len); 4795 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, 4796 struct net_device *dev, 4797 int (*sync)(struct net_device *, const unsigned char *), 4798 int (*unsync)(struct net_device *, 4799 const unsigned char *)); 4800 int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, 4801 struct net_device *dev, 4802 int (*sync)(struct net_device *, 4803 const unsigned char *, int), 4804 int (*unsync)(struct net_device *, 4805 const unsigned char *, int)); 4806 void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, 4807 struct net_device *dev, 4808 int (*unsync)(struct net_device *, 4809 const unsigned char *, int)); 4810 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, 4811 struct net_device *dev, 4812 int (*unsync)(struct net_device *, 4813 const unsigned char *)); 4814 void __hw_addr_init(struct netdev_hw_addr_list *list); 4815 4816 /* Functions used for device addresses handling */ 4817 void dev_addr_mod(struct net_device *dev, unsigned int offset, 4818 const void *addr, size_t len); 4819 4820 static inline void 4821 __dev_addr_set(struct net_device *dev, const void *addr, size_t len) 4822 { 4823 dev_addr_mod(dev, 0, addr, len); 4824 } 4825 4826 static inline void dev_addr_set(struct net_device *dev, const u8 *addr) 4827 { 4828 __dev_addr_set(dev, addr, dev->addr_len); 4829 } 4830 4831 int dev_addr_add(struct net_device *dev, const unsigned char *addr, 4832 unsigned char addr_type); 4833 int dev_addr_del(struct net_device *dev, const unsigned char *addr, 4834 unsigned char addr_type); 4835 4836 /* Functions used for unicast addresses handling */ 4837 int dev_uc_add(struct net_device *dev, const unsigned char *addr); 4838 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); 4839 int dev_uc_del(struct net_device *dev, const unsigned char *addr); 4840 int dev_uc_sync(struct net_device *to, struct net_device *from); 4841 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); 4842 void dev_uc_unsync(struct net_device *to, struct net_device *from); 4843 void dev_uc_flush(struct net_device *dev); 4844 void dev_uc_init(struct net_device *dev); 4845 4846 /** 4847 * __dev_uc_sync - Synchronize device's unicast list 4848 * @dev: device to sync 4849 * @sync: function to call if address should be added 4850 * @unsync: function to call if address should be removed 4851 * 4852 * Add newly added addresses to the interface, and release 4853 * addresses that have been deleted. 4854 */ 4855 static inline int __dev_uc_sync(struct net_device *dev, 4856 int (*sync)(struct net_device *, 4857 const unsigned char *), 4858 int (*unsync)(struct net_device *, 4859 const unsigned char *)) 4860 { 4861 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); 4862 } 4863 4864 /** 4865 * __dev_uc_unsync - Remove synchronized addresses from device 4866 * @dev: device to sync 4867 * @unsync: function to call if address should be removed 4868 * 4869 * Remove all addresses that were added to the device by dev_uc_sync(). 4870 */ 4871 static inline void __dev_uc_unsync(struct net_device *dev, 4872 int (*unsync)(struct net_device *, 4873 const unsigned char *)) 4874 { 4875 __hw_addr_unsync_dev(&dev->uc, dev, unsync); 4876 } 4877 4878 /* Functions used for multicast addresses handling */ 4879 int dev_mc_add(struct net_device *dev, const unsigned char *addr); 4880 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); 4881 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); 4882 int dev_mc_del(struct net_device *dev, const unsigned char *addr); 4883 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); 4884 int dev_mc_sync(struct net_device *to, struct net_device *from); 4885 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); 4886 void dev_mc_unsync(struct net_device *to, struct net_device *from); 4887 void dev_mc_flush(struct net_device *dev); 4888 void dev_mc_init(struct net_device *dev); 4889 4890 /** 4891 * __dev_mc_sync - Synchronize device's multicast list 4892 * @dev: device to sync 4893 * @sync: function to call if address should be added 4894 * @unsync: function to call if address should be removed 4895 * 4896 * Add newly added addresses to the interface, and release 4897 * addresses that have been deleted. 4898 */ 4899 static inline int __dev_mc_sync(struct net_device *dev, 4900 int (*sync)(struct net_device *, 4901 const unsigned char *), 4902 int (*unsync)(struct net_device *, 4903 const unsigned char *)) 4904 { 4905 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); 4906 } 4907 4908 /** 4909 * __dev_mc_unsync - Remove synchronized addresses from device 4910 * @dev: device to sync 4911 * @unsync: function to call if address should be removed 4912 * 4913 * Remove all addresses that were added to the device by dev_mc_sync(). 4914 */ 4915 static inline void __dev_mc_unsync(struct net_device *dev, 4916 int (*unsync)(struct net_device *, 4917 const unsigned char *)) 4918 { 4919 __hw_addr_unsync_dev(&dev->mc, dev, unsync); 4920 } 4921 4922 /* Functions used for secondary unicast and multicast support */ 4923 void dev_set_rx_mode(struct net_device *dev); 4924 int dev_set_promiscuity(struct net_device *dev, int inc); 4925 int dev_set_allmulti(struct net_device *dev, int inc); 4926 void netdev_state_change(struct net_device *dev); 4927 void __netdev_notify_peers(struct net_device *dev); 4928 void netdev_notify_peers(struct net_device *dev); 4929 void netdev_features_change(struct net_device *dev); 4930 /* Load a device via the kmod */ 4931 void dev_load(struct net *net, const char *name); 4932 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 4933 struct rtnl_link_stats64 *storage); 4934 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 4935 const struct net_device_stats *netdev_stats); 4936 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 4937 const struct pcpu_sw_netstats __percpu *netstats); 4938 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s); 4939 4940 enum { 4941 NESTED_SYNC_IMM_BIT, 4942 NESTED_SYNC_TODO_BIT, 4943 }; 4944 4945 #define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit)) 4946 #define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT) 4947 4948 #define NESTED_SYNC_IMM __NESTED_SYNC(IMM) 4949 #define NESTED_SYNC_TODO __NESTED_SYNC(TODO) 4950 4951 struct netdev_nested_priv { 4952 unsigned char flags; 4953 void *data; 4954 }; 4955 4956 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); 4957 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 4958 struct list_head **iter); 4959 4960 /* iterate through upper list, must be called under RCU read lock */ 4961 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ 4962 for (iter = &(dev)->adj_list.upper, \ 4963 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ 4964 updev; \ 4965 updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) 4966 4967 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 4968 int (*fn)(struct net_device *upper_dev, 4969 struct netdev_nested_priv *priv), 4970 struct netdev_nested_priv *priv); 4971 4972 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 4973 struct net_device *upper_dev); 4974 4975 bool netdev_has_any_upper_dev(struct net_device *dev); 4976 4977 void *netdev_lower_get_next_private(struct net_device *dev, 4978 struct list_head **iter); 4979 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 4980 struct list_head **iter); 4981 4982 #define netdev_for_each_lower_private(dev, priv, iter) \ 4983 for (iter = (dev)->adj_list.lower.next, \ 4984 priv = netdev_lower_get_next_private(dev, &(iter)); \ 4985 priv; \ 4986 priv = netdev_lower_get_next_private(dev, &(iter))) 4987 4988 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \ 4989 for (iter = &(dev)->adj_list.lower, \ 4990 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ 4991 priv; \ 4992 priv = netdev_lower_get_next_private_rcu(dev, &(iter))) 4993 4994 void *netdev_lower_get_next(struct net_device *dev, 4995 struct list_head **iter); 4996 4997 #define netdev_for_each_lower_dev(dev, ldev, iter) \ 4998 for (iter = (dev)->adj_list.lower.next, \ 4999 ldev = netdev_lower_get_next(dev, &(iter)); \ 5000 ldev; \ 5001 ldev = netdev_lower_get_next(dev, &(iter))) 5002 5003 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 5004 struct list_head **iter); 5005 int netdev_walk_all_lower_dev(struct net_device *dev, 5006 int (*fn)(struct net_device *lower_dev, 5007 struct netdev_nested_priv *priv), 5008 struct netdev_nested_priv *priv); 5009 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 5010 int (*fn)(struct net_device *lower_dev, 5011 struct netdev_nested_priv *priv), 5012 struct netdev_nested_priv *priv); 5013 5014 void *netdev_adjacent_get_private(struct list_head *adj_list); 5015 void *netdev_lower_get_first_private_rcu(struct net_device *dev); 5016 struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 5017 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); 5018 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, 5019 struct netlink_ext_ack *extack); 5020 int netdev_master_upper_dev_link(struct net_device *dev, 5021 struct net_device *upper_dev, 5022 void *upper_priv, void *upper_info, 5023 struct netlink_ext_ack *extack); 5024 void netdev_upper_dev_unlink(struct net_device *dev, 5025 struct net_device *upper_dev); 5026 int netdev_adjacent_change_prepare(struct net_device *old_dev, 5027 struct net_device *new_dev, 5028 struct net_device *dev, 5029 struct netlink_ext_ack *extack); 5030 void netdev_adjacent_change_commit(struct net_device *old_dev, 5031 struct net_device *new_dev, 5032 struct net_device *dev); 5033 void netdev_adjacent_change_abort(struct net_device *old_dev, 5034 struct net_device *new_dev, 5035 struct net_device *dev); 5036 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); 5037 void *netdev_lower_dev_get_private(struct net_device *dev, 5038 struct net_device *lower_dev); 5039 void netdev_lower_state_changed(struct net_device *lower_dev, 5040 void *lower_state_info); 5041 5042 /* RSS keys are 40 or 52 bytes long */ 5043 #define NETDEV_RSS_KEY_LEN 52 5044 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; 5045 void netdev_rss_key_fill(void *buffer, size_t len); 5046 5047 int skb_checksum_help(struct sk_buff *skb); 5048 int skb_crc32c_csum_help(struct sk_buff *skb); 5049 int skb_csum_hwoffload_help(struct sk_buff *skb, 5050 const netdev_features_t features); 5051 5052 struct netdev_bonding_info { 5053 ifslave slave; 5054 ifbond master; 5055 }; 5056 5057 struct netdev_notifier_bonding_info { 5058 struct netdev_notifier_info info; /* must be first */ 5059 struct netdev_bonding_info bonding_info; 5060 }; 5061 5062 void netdev_bonding_info_change(struct net_device *dev, 5063 struct netdev_bonding_info *bonding_info); 5064 5065 #if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) 5066 void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data); 5067 #else 5068 static inline void ethtool_notify(struct net_device *dev, unsigned int cmd, 5069 const void *data) 5070 { 5071 } 5072 #endif 5073 5074 __be16 skb_network_protocol(struct sk_buff *skb, int *depth); 5075 5076 static inline bool can_checksum_protocol(netdev_features_t features, 5077 __be16 protocol) 5078 { 5079 if (protocol == htons(ETH_P_FCOE)) 5080 return !!(features & NETIF_F_FCOE_CRC); 5081 5082 /* Assume this is an IP checksum (not SCTP CRC) */ 5083 5084 if (features & NETIF_F_HW_CSUM) { 5085 /* Can checksum everything */ 5086 return true; 5087 } 5088 5089 switch (protocol) { 5090 case htons(ETH_P_IP): 5091 return !!(features & NETIF_F_IP_CSUM); 5092 case htons(ETH_P_IPV6): 5093 return !!(features & NETIF_F_IPV6_CSUM); 5094 default: 5095 return false; 5096 } 5097 } 5098 5099 #ifdef CONFIG_BUG 5100 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); 5101 #else 5102 static inline void netdev_rx_csum_fault(struct net_device *dev, 5103 struct sk_buff *skb) 5104 { 5105 } 5106 #endif 5107 /* rx skb timestamps */ 5108 void net_enable_timestamp(void); 5109 void net_disable_timestamp(void); 5110 5111 static inline ktime_t netdev_get_tstamp(struct net_device *dev, 5112 const struct skb_shared_hwtstamps *hwtstamps, 5113 bool cycles) 5114 { 5115 const struct net_device_ops *ops = dev->netdev_ops; 5116 5117 if (ops->ndo_get_tstamp) 5118 return ops->ndo_get_tstamp(dev, hwtstamps, cycles); 5119 5120 return hwtstamps->hwtstamp; 5121 } 5122 5123 #ifndef CONFIG_PREEMPT_RT 5124 static inline void netdev_xmit_set_more(bool more) 5125 { 5126 __this_cpu_write(softnet_data.xmit.more, more); 5127 } 5128 5129 static inline bool netdev_xmit_more(void) 5130 { 5131 return __this_cpu_read(softnet_data.xmit.more); 5132 } 5133 #else 5134 static inline void netdev_xmit_set_more(bool more) 5135 { 5136 current->net_xmit.more = more; 5137 } 5138 5139 static inline bool netdev_xmit_more(void) 5140 { 5141 return current->net_xmit.more; 5142 } 5143 #endif 5144 5145 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, 5146 struct sk_buff *skb, struct net_device *dev, 5147 bool more) 5148 { 5149 netdev_xmit_set_more(more); 5150 return ops->ndo_start_xmit(skb, dev); 5151 } 5152 5153 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, 5154 struct netdev_queue *txq, bool more) 5155 { 5156 const struct net_device_ops *ops = dev->netdev_ops; 5157 netdev_tx_t rc; 5158 5159 rc = __netdev_start_xmit(ops, skb, dev, more); 5160 if (rc == NETDEV_TX_OK) 5161 txq_trans_update(txq); 5162 5163 return rc; 5164 } 5165 5166 int netdev_class_create_file_ns(const struct class_attribute *class_attr, 5167 const void *ns); 5168 void netdev_class_remove_file_ns(const struct class_attribute *class_attr, 5169 const void *ns); 5170 5171 extern const struct kobj_ns_type_operations net_ns_type_operations; 5172 5173 const char *netdev_drivername(const struct net_device *dev); 5174 5175 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, 5176 netdev_features_t f2) 5177 { 5178 if ((f1 ^ f2) & NETIF_F_HW_CSUM) { 5179 if (f1 & NETIF_F_HW_CSUM) 5180 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5181 else 5182 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5183 } 5184 5185 return f1 & f2; 5186 } 5187 5188 static inline netdev_features_t netdev_get_wanted_features( 5189 struct net_device *dev) 5190 { 5191 return (dev->features & ~dev->hw_features) | dev->wanted_features; 5192 } 5193 netdev_features_t netdev_increment_features(netdev_features_t all, 5194 netdev_features_t one, netdev_features_t mask); 5195 5196 /* Allow TSO being used on stacked device : 5197 * Performing the GSO segmentation before last device 5198 * is a performance improvement. 5199 */ 5200 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, 5201 netdev_features_t mask) 5202 { 5203 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); 5204 } 5205 5206 int __netdev_update_features(struct net_device *dev); 5207 void netdev_update_features(struct net_device *dev); 5208 void netdev_change_features(struct net_device *dev); 5209 5210 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 5211 struct net_device *dev); 5212 5213 netdev_features_t passthru_features_check(struct sk_buff *skb, 5214 struct net_device *dev, 5215 netdev_features_t features); 5216 netdev_features_t netif_skb_features(struct sk_buff *skb); 5217 void skb_warn_bad_offload(const struct sk_buff *skb); 5218 5219 static inline bool net_gso_ok(netdev_features_t features, int gso_type) 5220 { 5221 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; 5222 5223 /* check flags correspondence */ 5224 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); 5225 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); 5226 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); 5227 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); 5228 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); 5229 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); 5230 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); 5231 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); 5232 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)); 5233 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)); 5234 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); 5235 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); 5236 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); 5237 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); 5238 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); 5239 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); 5240 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); 5241 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); 5242 BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT)); 5243 5244 return (features & feature) == feature; 5245 } 5246 5247 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) 5248 { 5249 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && 5250 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 5251 } 5252 5253 static inline bool netif_needs_gso(struct sk_buff *skb, 5254 netdev_features_t features) 5255 { 5256 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 5257 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && 5258 (skb->ip_summed != CHECKSUM_UNNECESSARY))); 5259 } 5260 5261 void netif_set_tso_max_size(struct net_device *dev, unsigned int size); 5262 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs); 5263 void netif_inherit_tso_max(struct net_device *to, 5264 const struct net_device *from); 5265 5266 static inline unsigned int 5267 netif_get_gro_max_size(const struct net_device *dev, const struct sk_buff *skb) 5268 { 5269 /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */ 5270 return skb->protocol == htons(ETH_P_IPV6) ? 5271 READ_ONCE(dev->gro_max_size) : 5272 READ_ONCE(dev->gro_ipv4_max_size); 5273 } 5274 5275 static inline unsigned int 5276 netif_get_gso_max_size(const struct net_device *dev, const struct sk_buff *skb) 5277 { 5278 /* pairs with WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */ 5279 return skb->protocol == htons(ETH_P_IPV6) ? 5280 READ_ONCE(dev->gso_max_size) : 5281 READ_ONCE(dev->gso_ipv4_max_size); 5282 } 5283 5284 static inline bool netif_is_macsec(const struct net_device *dev) 5285 { 5286 return dev->priv_flags & IFF_MACSEC; 5287 } 5288 5289 static inline bool netif_is_macvlan(const struct net_device *dev) 5290 { 5291 return dev->priv_flags & IFF_MACVLAN; 5292 } 5293 5294 static inline bool netif_is_macvlan_port(const struct net_device *dev) 5295 { 5296 return dev->priv_flags & IFF_MACVLAN_PORT; 5297 } 5298 5299 static inline bool netif_is_bond_master(const struct net_device *dev) 5300 { 5301 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; 5302 } 5303 5304 static inline bool netif_is_bond_slave(const struct net_device *dev) 5305 { 5306 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; 5307 } 5308 5309 static inline bool netif_supports_nofcs(struct net_device *dev) 5310 { 5311 return dev->priv_flags & IFF_SUPP_NOFCS; 5312 } 5313 5314 static inline bool netif_has_l3_rx_handler(const struct net_device *dev) 5315 { 5316 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; 5317 } 5318 5319 static inline bool netif_is_l3_master(const struct net_device *dev) 5320 { 5321 return dev->priv_flags & IFF_L3MDEV_MASTER; 5322 } 5323 5324 static inline bool netif_is_l3_slave(const struct net_device *dev) 5325 { 5326 return dev->priv_flags & IFF_L3MDEV_SLAVE; 5327 } 5328 5329 static inline int dev_sdif(const struct net_device *dev) 5330 { 5331 #ifdef CONFIG_NET_L3_MASTER_DEV 5332 if (netif_is_l3_slave(dev)) 5333 return dev->ifindex; 5334 #endif 5335 return 0; 5336 } 5337 5338 static inline bool netif_is_bridge_master(const struct net_device *dev) 5339 { 5340 return dev->priv_flags & IFF_EBRIDGE; 5341 } 5342 5343 static inline bool netif_is_bridge_port(const struct net_device *dev) 5344 { 5345 return dev->priv_flags & IFF_BRIDGE_PORT; 5346 } 5347 5348 static inline bool netif_is_ovs_master(const struct net_device *dev) 5349 { 5350 return dev->priv_flags & IFF_OPENVSWITCH; 5351 } 5352 5353 static inline bool netif_is_ovs_port(const struct net_device *dev) 5354 { 5355 return dev->priv_flags & IFF_OVS_DATAPATH; 5356 } 5357 5358 static inline bool netif_is_any_bridge_master(const struct net_device *dev) 5359 { 5360 return netif_is_bridge_master(dev) || netif_is_ovs_master(dev); 5361 } 5362 5363 static inline bool netif_is_any_bridge_port(const struct net_device *dev) 5364 { 5365 return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); 5366 } 5367 5368 static inline bool netif_is_team_master(const struct net_device *dev) 5369 { 5370 return dev->priv_flags & IFF_TEAM; 5371 } 5372 5373 static inline bool netif_is_team_port(const struct net_device *dev) 5374 { 5375 return dev->priv_flags & IFF_TEAM_PORT; 5376 } 5377 5378 static inline bool netif_is_lag_master(const struct net_device *dev) 5379 { 5380 return netif_is_bond_master(dev) || netif_is_team_master(dev); 5381 } 5382 5383 static inline bool netif_is_lag_port(const struct net_device *dev) 5384 { 5385 return netif_is_bond_slave(dev) || netif_is_team_port(dev); 5386 } 5387 5388 static inline bool netif_is_rxfh_configured(const struct net_device *dev) 5389 { 5390 return dev->priv_flags & IFF_RXFH_CONFIGURED; 5391 } 5392 5393 static inline bool netif_is_failover(const struct net_device *dev) 5394 { 5395 return dev->priv_flags & IFF_FAILOVER; 5396 } 5397 5398 static inline bool netif_is_failover_slave(const struct net_device *dev) 5399 { 5400 return dev->priv_flags & IFF_FAILOVER_SLAVE; 5401 } 5402 5403 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ 5404 static inline void netif_keep_dst(struct net_device *dev) 5405 { 5406 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); 5407 } 5408 5409 /* return true if dev can't cope with mtu frames that need vlan tag insertion */ 5410 static inline bool netif_reduces_vlan_mtu(struct net_device *dev) 5411 { 5412 /* TODO: reserve and use an additional IFF bit, if we get more users */ 5413 return netif_is_macsec(dev); 5414 } 5415 5416 extern struct pernet_operations __net_initdata loopback_net_ops; 5417 5418 /* Logging, debugging and troubleshooting/diagnostic helpers. */ 5419 5420 /* netdev_printk helpers, similar to dev_printk */ 5421 5422 static inline const char *netdev_name(const struct net_device *dev) 5423 { 5424 if (!dev->name[0] || strchr(dev->name, '%')) 5425 return "(unnamed net_device)"; 5426 return dev->name; 5427 } 5428 5429 static inline const char *netdev_reg_state(const struct net_device *dev) 5430 { 5431 u8 reg_state = READ_ONCE(dev->reg_state); 5432 5433 switch (reg_state) { 5434 case NETREG_UNINITIALIZED: return " (uninitialized)"; 5435 case NETREG_REGISTERED: return ""; 5436 case NETREG_UNREGISTERING: return " (unregistering)"; 5437 case NETREG_UNREGISTERED: return " (unregistered)"; 5438 case NETREG_RELEASED: return " (released)"; 5439 case NETREG_DUMMY: return " (dummy)"; 5440 } 5441 5442 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, reg_state); 5443 return " (unknown)"; 5444 } 5445 5446 #define MODULE_ALIAS_NETDEV(device) \ 5447 MODULE_ALIAS("netdev-" device) 5448 5449 /* 5450 * netdev_WARN() acts like dev_printk(), but with the key difference 5451 * of using a WARN/WARN_ON to get the message out, including the 5452 * file/line information and a backtrace. 5453 */ 5454 #define netdev_WARN(dev, format, args...) \ 5455 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5456 netdev_reg_state(dev), ##args) 5457 5458 #define netdev_WARN_ONCE(dev, format, args...) \ 5459 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5460 netdev_reg_state(dev), ##args) 5461 5462 /* 5463 * The list of packet types we will receive (as opposed to discard) 5464 * and the routines to invoke. 5465 * 5466 * Why 16. Because with 16 the only overlap we get on a hash of the 5467 * low nibble of the protocol value is RARP/SNAP/X.25. 5468 * 5469 * 0800 IP 5470 * 0001 802.3 5471 * 0002 AX.25 5472 * 0004 802.2 5473 * 8035 RARP 5474 * 0005 SNAP 5475 * 0805 X.25 5476 * 0806 ARP 5477 * 8137 IPX 5478 * 0009 Localtalk 5479 * 86DD IPv6 5480 */ 5481 #define PTYPE_HASH_SIZE (16) 5482 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) 5483 5484 extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 5485 5486 extern struct net_device *blackhole_netdev; 5487 5488 /* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */ 5489 #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD) 5490 #define DEV_STATS_ADD(DEV, FIELD, VAL) \ 5491 atomic_long_add((VAL), &(DEV)->stats.__##FIELD) 5492 #define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD) 5493 5494 #endif /* _LINUX_NETDEVICE_H */ 5495