1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Definitions for the Interfaces handler. 8 * 9 * Version: @(#)dev.h 1.0.10 08/12/93 10 * 11 * Authors: Ross Biro 12 * Fred N. van Kempen, <[email protected]> 13 * Corey Minyard <[email protected]> 14 * Donald J. Becker, <[email protected]> 15 * Alan Cox, <[email protected]> 16 * Bjorn Ekwall. <[email protected]> 17 * Pekka Riikonen <[email protected]> 18 * 19 * Moved to /usr/include/linux for NET3 20 */ 21 #ifndef _LINUX_NETDEVICE_H 22 #define _LINUX_NETDEVICE_H 23 24 #include <linux/timer.h> 25 #include <linux/bug.h> 26 #include <linux/delay.h> 27 #include <linux/atomic.h> 28 #include <linux/prefetch.h> 29 #include <asm/cache.h> 30 #include <asm/byteorder.h> 31 #include <asm/local.h> 32 33 #include <linux/percpu.h> 34 #include <linux/rculist.h> 35 #include <linux/workqueue.h> 36 #include <linux/dynamic_queue_limits.h> 37 38 #include <net/net_namespace.h> 39 #ifdef CONFIG_DCB 40 #include <net/dcbnl.h> 41 #endif 42 #include <net/netprio_cgroup.h> 43 #include <net/xdp.h> 44 45 #include <linux/netdev_features.h> 46 #include <linux/neighbour.h> 47 #include <uapi/linux/netdevice.h> 48 #include <uapi/linux/if_bonding.h> 49 #include <uapi/linux/pkt_cls.h> 50 #include <uapi/linux/netdev.h> 51 #include <linux/hashtable.h> 52 #include <linux/rbtree.h> 53 #include <net/net_trackers.h> 54 #include <net/net_debug.h> 55 #include <net/dropreason.h> 56 57 struct netpoll_info; 58 struct device; 59 struct ethtool_ops; 60 struct phy_device; 61 struct dsa_port; 62 struct ip_tunnel_parm; 63 struct macsec_context; 64 struct macsec_ops; 65 struct netdev_name_node; 66 struct sd_flow_limit; 67 struct sfp_bus; 68 /* 802.11 specific */ 69 struct wireless_dev; 70 /* 802.15.4 specific */ 71 struct wpan_dev; 72 struct mpls_dev; 73 /* UDP Tunnel offloads */ 74 struct udp_tunnel_info; 75 struct udp_tunnel_nic_info; 76 struct udp_tunnel_nic; 77 struct bpf_prog; 78 struct xdp_buff; 79 struct xdp_md; 80 81 void synchronize_net(void); 82 void netdev_set_default_ethtool_ops(struct net_device *dev, 83 const struct ethtool_ops *ops); 84 void netdev_sw_irq_coalesce_default_on(struct net_device *dev); 85 86 /* Backlog congestion levels */ 87 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 88 #define NET_RX_DROP 1 /* packet dropped */ 89 90 #define MAX_NEST_DEV 8 91 92 /* 93 * Transmit return codes: transmit return codes originate from three different 94 * namespaces: 95 * 96 * - qdisc return codes 97 * - driver transmit return codes 98 * - errno values 99 * 100 * Drivers are allowed to return any one of those in their hard_start_xmit() 101 * function. Real network devices commonly used with qdiscs should only return 102 * the driver transmit return codes though - when qdiscs are used, the actual 103 * transmission happens asynchronously, so the value is not propagated to 104 * higher layers. Virtual network devices transmit synchronously; in this case 105 * the driver transmit return codes are consumed by dev_queue_xmit(), and all 106 * others are propagated to higher layers. 107 */ 108 109 /* qdisc ->enqueue() return codes. */ 110 #define NET_XMIT_SUCCESS 0x00 111 #define NET_XMIT_DROP 0x01 /* skb dropped */ 112 #define NET_XMIT_CN 0x02 /* congestion notification */ 113 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ 114 115 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It 116 * indicates that the device will soon be dropping packets, or already drops 117 * some packets of the same priority; prompting us to send less aggressively. */ 118 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) 119 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) 120 121 /* Driver transmit return codes */ 122 #define NETDEV_TX_MASK 0xf0 123 124 enum netdev_tx { 125 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ 126 NETDEV_TX_OK = 0x00, /* driver took care of packet */ 127 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ 128 }; 129 typedef enum netdev_tx netdev_tx_t; 130 131 /* 132 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; 133 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. 134 */ 135 static inline bool dev_xmit_complete(int rc) 136 { 137 /* 138 * Positive cases with an skb consumed by a driver: 139 * - successful transmission (rc == NETDEV_TX_OK) 140 * - error while transmitting (rc < 0) 141 * - error while queueing to a different device (rc & NET_XMIT_MASK) 142 */ 143 if (likely(rc < NET_XMIT_MASK)) 144 return true; 145 146 return false; 147 } 148 149 /* 150 * Compute the worst-case header length according to the protocols 151 * used. 152 */ 153 154 #if defined(CONFIG_HYPERV_NET) 155 # define LL_MAX_HEADER 128 156 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) 157 # if defined(CONFIG_MAC80211_MESH) 158 # define LL_MAX_HEADER 128 159 # else 160 # define LL_MAX_HEADER 96 161 # endif 162 #else 163 # define LL_MAX_HEADER 32 164 #endif 165 166 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ 167 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) 168 #define MAX_HEADER LL_MAX_HEADER 169 #else 170 #define MAX_HEADER (LL_MAX_HEADER + 48) 171 #endif 172 173 /* 174 * Old network device statistics. Fields are native words 175 * (unsigned long) so they can be read and written atomically. 176 */ 177 178 #define NET_DEV_STAT(FIELD) \ 179 union { \ 180 unsigned long FIELD; \ 181 atomic_long_t __##FIELD; \ 182 } 183 184 struct net_device_stats { 185 NET_DEV_STAT(rx_packets); 186 NET_DEV_STAT(tx_packets); 187 NET_DEV_STAT(rx_bytes); 188 NET_DEV_STAT(tx_bytes); 189 NET_DEV_STAT(rx_errors); 190 NET_DEV_STAT(tx_errors); 191 NET_DEV_STAT(rx_dropped); 192 NET_DEV_STAT(tx_dropped); 193 NET_DEV_STAT(multicast); 194 NET_DEV_STAT(collisions); 195 NET_DEV_STAT(rx_length_errors); 196 NET_DEV_STAT(rx_over_errors); 197 NET_DEV_STAT(rx_crc_errors); 198 NET_DEV_STAT(rx_frame_errors); 199 NET_DEV_STAT(rx_fifo_errors); 200 NET_DEV_STAT(rx_missed_errors); 201 NET_DEV_STAT(tx_aborted_errors); 202 NET_DEV_STAT(tx_carrier_errors); 203 NET_DEV_STAT(tx_fifo_errors); 204 NET_DEV_STAT(tx_heartbeat_errors); 205 NET_DEV_STAT(tx_window_errors); 206 NET_DEV_STAT(rx_compressed); 207 NET_DEV_STAT(tx_compressed); 208 }; 209 #undef NET_DEV_STAT 210 211 /* per-cpu stats, allocated on demand. 212 * Try to fit them in a single cache line, for dev_get_stats() sake. 213 */ 214 struct net_device_core_stats { 215 unsigned long rx_dropped; 216 unsigned long tx_dropped; 217 unsigned long rx_nohandler; 218 unsigned long rx_otherhost_dropped; 219 } __aligned(4 * sizeof(unsigned long)); 220 221 #include <linux/cache.h> 222 #include <linux/skbuff.h> 223 224 #ifdef CONFIG_RPS 225 #include <linux/static_key.h> 226 extern struct static_key_false rps_needed; 227 extern struct static_key_false rfs_needed; 228 #endif 229 230 struct neighbour; 231 struct neigh_parms; 232 struct sk_buff; 233 234 struct netdev_hw_addr { 235 struct list_head list; 236 struct rb_node node; 237 unsigned char addr[MAX_ADDR_LEN]; 238 unsigned char type; 239 #define NETDEV_HW_ADDR_T_LAN 1 240 #define NETDEV_HW_ADDR_T_SAN 2 241 #define NETDEV_HW_ADDR_T_UNICAST 3 242 #define NETDEV_HW_ADDR_T_MULTICAST 4 243 bool global_use; 244 int sync_cnt; 245 int refcount; 246 int synced; 247 struct rcu_head rcu_head; 248 }; 249 250 struct netdev_hw_addr_list { 251 struct list_head list; 252 int count; 253 254 /* Auxiliary tree for faster lookup on addition and deletion */ 255 struct rb_root tree; 256 }; 257 258 #define netdev_hw_addr_list_count(l) ((l)->count) 259 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) 260 #define netdev_hw_addr_list_for_each(ha, l) \ 261 list_for_each_entry(ha, &(l)->list, list) 262 263 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) 264 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) 265 #define netdev_for_each_uc_addr(ha, dev) \ 266 netdev_hw_addr_list_for_each(ha, &(dev)->uc) 267 #define netdev_for_each_synced_uc_addr(_ha, _dev) \ 268 netdev_for_each_uc_addr((_ha), (_dev)) \ 269 if ((_ha)->sync_cnt) 270 271 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) 272 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) 273 #define netdev_for_each_mc_addr(ha, dev) \ 274 netdev_hw_addr_list_for_each(ha, &(dev)->mc) 275 #define netdev_for_each_synced_mc_addr(_ha, _dev) \ 276 netdev_for_each_mc_addr((_ha), (_dev)) \ 277 if ((_ha)->sync_cnt) 278 279 struct hh_cache { 280 unsigned int hh_len; 281 seqlock_t hh_lock; 282 283 /* cached hardware header; allow for machine alignment needs. */ 284 #define HH_DATA_MOD 16 285 #define HH_DATA_OFF(__len) \ 286 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) 287 #define HH_DATA_ALIGN(__len) \ 288 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) 289 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; 290 }; 291 292 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much. 293 * Alternative is: 294 * dev->hard_header_len ? (dev->hard_header_len + 295 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 296 * 297 * We could use other alignment values, but we must maintain the 298 * relationship HH alignment <= LL alignment. 299 */ 300 #define LL_RESERVED_SPACE(dev) \ 301 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \ 302 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 303 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 304 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \ 305 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 306 307 struct header_ops { 308 int (*create) (struct sk_buff *skb, struct net_device *dev, 309 unsigned short type, const void *daddr, 310 const void *saddr, unsigned int len); 311 int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 312 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 313 void (*cache_update)(struct hh_cache *hh, 314 const struct net_device *dev, 315 const unsigned char *haddr); 316 bool (*validate)(const char *ll_header, unsigned int len); 317 __be16 (*parse_protocol)(const struct sk_buff *skb); 318 }; 319 320 /* These flag bits are private to the generic network queueing 321 * layer; they may not be explicitly referenced by any other 322 * code. 323 */ 324 325 enum netdev_state_t { 326 __LINK_STATE_START, 327 __LINK_STATE_PRESENT, 328 __LINK_STATE_NOCARRIER, 329 __LINK_STATE_LINKWATCH_PENDING, 330 __LINK_STATE_DORMANT, 331 __LINK_STATE_TESTING, 332 }; 333 334 struct gro_list { 335 struct list_head list; 336 int count; 337 }; 338 339 /* 340 * size of gro hash buckets, must less than bit number of 341 * napi_struct::gro_bitmask 342 */ 343 #define GRO_HASH_BUCKETS 8 344 345 /* 346 * Structure for NAPI scheduling similar to tasklet but with weighting 347 */ 348 struct napi_struct { 349 /* The poll_list must only be managed by the entity which 350 * changes the state of the NAPI_STATE_SCHED bit. This means 351 * whoever atomically sets that bit can add this napi_struct 352 * to the per-CPU poll_list, and whoever clears that bit 353 * can remove from the list right before clearing the bit. 354 */ 355 struct list_head poll_list; 356 357 unsigned long state; 358 int weight; 359 int defer_hard_irqs_count; 360 unsigned long gro_bitmask; 361 int (*poll)(struct napi_struct *, int); 362 #ifdef CONFIG_NETPOLL 363 int poll_owner; 364 #endif 365 struct net_device *dev; 366 struct gro_list gro_hash[GRO_HASH_BUCKETS]; 367 struct sk_buff *skb; 368 struct list_head rx_list; /* Pending GRO_NORMAL skbs */ 369 int rx_count; /* length of rx_list */ 370 unsigned int napi_id; 371 struct hrtimer timer; 372 struct task_struct *thread; 373 /* control-path-only fields follow */ 374 struct list_head dev_list; 375 struct hlist_node napi_hash_node; 376 }; 377 378 enum { 379 NAPI_STATE_SCHED, /* Poll is scheduled */ 380 NAPI_STATE_MISSED, /* reschedule a napi */ 381 NAPI_STATE_DISABLE, /* Disable pending */ 382 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ 383 NAPI_STATE_LISTED, /* NAPI added to system lists */ 384 NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */ 385 NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */ 386 NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/ 387 NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ 388 NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */ 389 }; 390 391 enum { 392 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), 393 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), 394 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), 395 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), 396 NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), 397 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), 398 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), 399 NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL), 400 NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), 401 NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED), 402 }; 403 404 enum gro_result { 405 GRO_MERGED, 406 GRO_MERGED_FREE, 407 GRO_HELD, 408 GRO_NORMAL, 409 GRO_CONSUMED, 410 }; 411 typedef enum gro_result gro_result_t; 412 413 /* 414 * enum rx_handler_result - Possible return values for rx_handlers. 415 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it 416 * further. 417 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in 418 * case skb->dev was changed by rx_handler. 419 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. 420 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called. 421 * 422 * rx_handlers are functions called from inside __netif_receive_skb(), to do 423 * special processing of the skb, prior to delivery to protocol handlers. 424 * 425 * Currently, a net_device can only have a single rx_handler registered. Trying 426 * to register a second rx_handler will return -EBUSY. 427 * 428 * To register a rx_handler on a net_device, use netdev_rx_handler_register(). 429 * To unregister a rx_handler on a net_device, use 430 * netdev_rx_handler_unregister(). 431 * 432 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to 433 * do with the skb. 434 * 435 * If the rx_handler consumed the skb in some way, it should return 436 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for 437 * the skb to be delivered in some other way. 438 * 439 * If the rx_handler changed skb->dev, to divert the skb to another 440 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the 441 * new device will be called if it exists. 442 * 443 * If the rx_handler decides the skb should be ignored, it should return 444 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that 445 * are registered on exact device (ptype->dev == skb->dev). 446 * 447 * If the rx_handler didn't change skb->dev, but wants the skb to be normally 448 * delivered, it should return RX_HANDLER_PASS. 449 * 450 * A device without a registered rx_handler will behave as if rx_handler 451 * returned RX_HANDLER_PASS. 452 */ 453 454 enum rx_handler_result { 455 RX_HANDLER_CONSUMED, 456 RX_HANDLER_ANOTHER, 457 RX_HANDLER_EXACT, 458 RX_HANDLER_PASS, 459 }; 460 typedef enum rx_handler_result rx_handler_result_t; 461 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); 462 463 void __napi_schedule(struct napi_struct *n); 464 void __napi_schedule_irqoff(struct napi_struct *n); 465 466 static inline bool napi_disable_pending(struct napi_struct *n) 467 { 468 return test_bit(NAPI_STATE_DISABLE, &n->state); 469 } 470 471 static inline bool napi_prefer_busy_poll(struct napi_struct *n) 472 { 473 return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); 474 } 475 476 bool napi_schedule_prep(struct napi_struct *n); 477 478 /** 479 * napi_schedule - schedule NAPI poll 480 * @n: NAPI context 481 * 482 * Schedule NAPI poll routine to be called if it is not already 483 * running. 484 */ 485 static inline void napi_schedule(struct napi_struct *n) 486 { 487 if (napi_schedule_prep(n)) 488 __napi_schedule(n); 489 } 490 491 /** 492 * napi_schedule_irqoff - schedule NAPI poll 493 * @n: NAPI context 494 * 495 * Variant of napi_schedule(), assuming hard irqs are masked. 496 */ 497 static inline void napi_schedule_irqoff(struct napi_struct *n) 498 { 499 if (napi_schedule_prep(n)) 500 __napi_schedule_irqoff(n); 501 } 502 503 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ 504 static inline bool napi_reschedule(struct napi_struct *napi) 505 { 506 if (napi_schedule_prep(napi)) { 507 __napi_schedule(napi); 508 return true; 509 } 510 return false; 511 } 512 513 /** 514 * napi_complete_done - NAPI processing complete 515 * @n: NAPI context 516 * @work_done: number of packets processed 517 * 518 * Mark NAPI processing as complete. Should only be called if poll budget 519 * has not been completely consumed. 520 * Prefer over napi_complete(). 521 * Return false if device should avoid rearming interrupts. 522 */ 523 bool napi_complete_done(struct napi_struct *n, int work_done); 524 525 static inline bool napi_complete(struct napi_struct *n) 526 { 527 return napi_complete_done(n, 0); 528 } 529 530 int dev_set_threaded(struct net_device *dev, bool threaded); 531 532 /** 533 * napi_disable - prevent NAPI from scheduling 534 * @n: NAPI context 535 * 536 * Stop NAPI from being scheduled on this context. 537 * Waits till any outstanding processing completes. 538 */ 539 void napi_disable(struct napi_struct *n); 540 541 void napi_enable(struct napi_struct *n); 542 543 /** 544 * napi_synchronize - wait until NAPI is not running 545 * @n: NAPI context 546 * 547 * Wait until NAPI is done being scheduled on this context. 548 * Waits till any outstanding processing completes but 549 * does not disable future activations. 550 */ 551 static inline void napi_synchronize(const struct napi_struct *n) 552 { 553 if (IS_ENABLED(CONFIG_SMP)) 554 while (test_bit(NAPI_STATE_SCHED, &n->state)) 555 msleep(1); 556 else 557 barrier(); 558 } 559 560 /** 561 * napi_if_scheduled_mark_missed - if napi is running, set the 562 * NAPIF_STATE_MISSED 563 * @n: NAPI context 564 * 565 * If napi is running, set the NAPIF_STATE_MISSED, and return true if 566 * NAPI is scheduled. 567 **/ 568 static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) 569 { 570 unsigned long val, new; 571 572 val = READ_ONCE(n->state); 573 do { 574 if (val & NAPIF_STATE_DISABLE) 575 return true; 576 577 if (!(val & NAPIF_STATE_SCHED)) 578 return false; 579 580 new = val | NAPIF_STATE_MISSED; 581 } while (!try_cmpxchg(&n->state, &val, new)); 582 583 return true; 584 } 585 586 enum netdev_queue_state_t { 587 __QUEUE_STATE_DRV_XOFF, 588 __QUEUE_STATE_STACK_XOFF, 589 __QUEUE_STATE_FROZEN, 590 }; 591 592 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF) 593 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF) 594 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN) 595 596 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF) 597 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ 598 QUEUE_STATE_FROZEN) 599 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \ 600 QUEUE_STATE_FROZEN) 601 602 /* 603 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The 604 * netif_tx_* functions below are used to manipulate this flag. The 605 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit 606 * queue independently. The netif_xmit_*stopped functions below are called 607 * to check if the queue has been stopped by the driver or stack (either 608 * of the XOFF bits are set in the state). Drivers should not need to call 609 * netif_xmit*stopped functions, they should only be using netif_tx_*. 610 */ 611 612 struct netdev_queue { 613 /* 614 * read-mostly part 615 */ 616 struct net_device *dev; 617 netdevice_tracker dev_tracker; 618 619 struct Qdisc __rcu *qdisc; 620 struct Qdisc *qdisc_sleeping; 621 #ifdef CONFIG_SYSFS 622 struct kobject kobj; 623 #endif 624 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 625 int numa_node; 626 #endif 627 unsigned long tx_maxrate; 628 /* 629 * Number of TX timeouts for this queue 630 * (/sys/class/net/DEV/Q/trans_timeout) 631 */ 632 atomic_long_t trans_timeout; 633 634 /* Subordinate device that the queue has been assigned to */ 635 struct net_device *sb_dev; 636 #ifdef CONFIG_XDP_SOCKETS 637 struct xsk_buff_pool *pool; 638 #endif 639 /* 640 * write-mostly part 641 */ 642 spinlock_t _xmit_lock ____cacheline_aligned_in_smp; 643 int xmit_lock_owner; 644 /* 645 * Time (in jiffies) of last Tx 646 */ 647 unsigned long trans_start; 648 649 unsigned long state; 650 651 #ifdef CONFIG_BQL 652 struct dql dql; 653 #endif 654 } ____cacheline_aligned_in_smp; 655 656 extern int sysctl_fb_tunnels_only_for_init_net; 657 extern int sysctl_devconf_inherit_init_net; 658 659 /* 660 * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns 661 * == 1 : For initns only 662 * == 2 : For none. 663 */ 664 static inline bool net_has_fallback_tunnels(const struct net *net) 665 { 666 #if IS_ENABLED(CONFIG_SYSCTL) 667 int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net); 668 669 return !fb_tunnels_only_for_init_net || 670 (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1); 671 #else 672 return true; 673 #endif 674 } 675 676 static inline int net_inherit_devconf(void) 677 { 678 #if IS_ENABLED(CONFIG_SYSCTL) 679 return READ_ONCE(sysctl_devconf_inherit_init_net); 680 #else 681 return 0; 682 #endif 683 } 684 685 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) 686 { 687 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 688 return q->numa_node; 689 #else 690 return NUMA_NO_NODE; 691 #endif 692 } 693 694 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) 695 { 696 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 697 q->numa_node = node; 698 #endif 699 } 700 701 #ifdef CONFIG_RPS 702 /* 703 * This structure holds an RPS map which can be of variable length. The 704 * map is an array of CPUs. 705 */ 706 struct rps_map { 707 unsigned int len; 708 struct rcu_head rcu; 709 u16 cpus[]; 710 }; 711 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) 712 713 /* 714 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the 715 * tail pointer for that CPU's input queue at the time of last enqueue, and 716 * a hardware filter index. 717 */ 718 struct rps_dev_flow { 719 u16 cpu; 720 u16 filter; 721 unsigned int last_qtail; 722 }; 723 #define RPS_NO_FILTER 0xffff 724 725 /* 726 * The rps_dev_flow_table structure contains a table of flow mappings. 727 */ 728 struct rps_dev_flow_table { 729 unsigned int mask; 730 struct rcu_head rcu; 731 struct rps_dev_flow flows[]; 732 }; 733 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ 734 ((_num) * sizeof(struct rps_dev_flow))) 735 736 /* 737 * The rps_sock_flow_table contains mappings of flows to the last CPU 738 * on which they were processed by the application (set in recvmsg). 739 * Each entry is a 32bit value. Upper part is the high-order bits 740 * of flow hash, lower part is CPU number. 741 * rps_cpu_mask is used to partition the space, depending on number of 742 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 743 * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f, 744 * meaning we use 32-6=26 bits for the hash. 745 */ 746 struct rps_sock_flow_table { 747 u32 mask; 748 749 u32 ents[] ____cacheline_aligned_in_smp; 750 }; 751 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) 752 753 #define RPS_NO_CPU 0xffff 754 755 extern u32 rps_cpu_mask; 756 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; 757 758 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, 759 u32 hash) 760 { 761 if (table && hash) { 762 unsigned int index = hash & table->mask; 763 u32 val = hash & ~rps_cpu_mask; 764 765 /* We only give a hint, preemption can change CPU under us */ 766 val |= raw_smp_processor_id(); 767 768 if (table->ents[index] != val) 769 table->ents[index] = val; 770 } 771 } 772 773 #ifdef CONFIG_RFS_ACCEL 774 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, 775 u16 filter_id); 776 #endif 777 #endif /* CONFIG_RPS */ 778 779 /* This structure contains an instance of an RX queue. */ 780 struct netdev_rx_queue { 781 struct xdp_rxq_info xdp_rxq; 782 #ifdef CONFIG_RPS 783 struct rps_map __rcu *rps_map; 784 struct rps_dev_flow_table __rcu *rps_flow_table; 785 #endif 786 struct kobject kobj; 787 struct net_device *dev; 788 netdevice_tracker dev_tracker; 789 790 #ifdef CONFIG_XDP_SOCKETS 791 struct xsk_buff_pool *pool; 792 #endif 793 } ____cacheline_aligned_in_smp; 794 795 /* 796 * RX queue sysfs structures and functions. 797 */ 798 struct rx_queue_attribute { 799 struct attribute attr; 800 ssize_t (*show)(struct netdev_rx_queue *queue, char *buf); 801 ssize_t (*store)(struct netdev_rx_queue *queue, 802 const char *buf, size_t len); 803 }; 804 805 /* XPS map type and offset of the xps map within net_device->xps_maps[]. */ 806 enum xps_map_type { 807 XPS_CPUS = 0, 808 XPS_RXQS, 809 XPS_MAPS_MAX, 810 }; 811 812 #ifdef CONFIG_XPS 813 /* 814 * This structure holds an XPS map which can be of variable length. The 815 * map is an array of queues. 816 */ 817 struct xps_map { 818 unsigned int len; 819 unsigned int alloc_len; 820 struct rcu_head rcu; 821 u16 queues[]; 822 }; 823 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) 824 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ 825 - sizeof(struct xps_map)) / sizeof(u16)) 826 827 /* 828 * This structure holds all XPS maps for device. Maps are indexed by CPU. 829 * 830 * We keep track of the number of cpus/rxqs used when the struct is allocated, 831 * in nr_ids. This will help not accessing out-of-bound memory. 832 * 833 * We keep track of the number of traffic classes used when the struct is 834 * allocated, in num_tc. This will be used to navigate the maps, to ensure we're 835 * not crossing its upper bound, as the original dev->num_tc can be updated in 836 * the meantime. 837 */ 838 struct xps_dev_maps { 839 struct rcu_head rcu; 840 unsigned int nr_ids; 841 s16 num_tc; 842 struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ 843 }; 844 845 #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ 846 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) 847 848 #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ 849 (_rxqs * (_tcs) * sizeof(struct xps_map *))) 850 851 #endif /* CONFIG_XPS */ 852 853 #define TC_MAX_QUEUE 16 854 #define TC_BITMASK 15 855 /* HW offloaded queuing disciplines txq count and offset maps */ 856 struct netdev_tc_txq { 857 u16 count; 858 u16 offset; 859 }; 860 861 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 862 /* 863 * This structure is to hold information about the device 864 * configured to run FCoE protocol stack. 865 */ 866 struct netdev_fcoe_hbainfo { 867 char manufacturer[64]; 868 char serial_number[64]; 869 char hardware_version[64]; 870 char driver_version[64]; 871 char optionrom_version[64]; 872 char firmware_version[64]; 873 char model[256]; 874 char model_description[256]; 875 }; 876 #endif 877 878 #define MAX_PHYS_ITEM_ID_LEN 32 879 880 /* This structure holds a unique identifier to identify some 881 * physical item (port for example) used by a netdevice. 882 */ 883 struct netdev_phys_item_id { 884 unsigned char id[MAX_PHYS_ITEM_ID_LEN]; 885 unsigned char id_len; 886 }; 887 888 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, 889 struct netdev_phys_item_id *b) 890 { 891 return a->id_len == b->id_len && 892 memcmp(a->id, b->id, a->id_len) == 0; 893 } 894 895 typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 896 struct sk_buff *skb, 897 struct net_device *sb_dev); 898 899 enum net_device_path_type { 900 DEV_PATH_ETHERNET = 0, 901 DEV_PATH_VLAN, 902 DEV_PATH_BRIDGE, 903 DEV_PATH_PPPOE, 904 DEV_PATH_DSA, 905 DEV_PATH_MTK_WDMA, 906 }; 907 908 struct net_device_path { 909 enum net_device_path_type type; 910 const struct net_device *dev; 911 union { 912 struct { 913 u16 id; 914 __be16 proto; 915 u8 h_dest[ETH_ALEN]; 916 } encap; 917 struct { 918 enum { 919 DEV_PATH_BR_VLAN_KEEP, 920 DEV_PATH_BR_VLAN_TAG, 921 DEV_PATH_BR_VLAN_UNTAG, 922 DEV_PATH_BR_VLAN_UNTAG_HW, 923 } vlan_mode; 924 u16 vlan_id; 925 __be16 vlan_proto; 926 } bridge; 927 struct { 928 int port; 929 u16 proto; 930 } dsa; 931 struct { 932 u8 wdma_idx; 933 u8 queue; 934 u16 wcid; 935 u8 bss; 936 } mtk_wdma; 937 }; 938 }; 939 940 #define NET_DEVICE_PATH_STACK_MAX 5 941 #define NET_DEVICE_PATH_VLAN_MAX 2 942 943 struct net_device_path_stack { 944 int num_paths; 945 struct net_device_path path[NET_DEVICE_PATH_STACK_MAX]; 946 }; 947 948 struct net_device_path_ctx { 949 const struct net_device *dev; 950 u8 daddr[ETH_ALEN]; 951 952 int num_vlans; 953 struct { 954 u16 id; 955 __be16 proto; 956 } vlan[NET_DEVICE_PATH_VLAN_MAX]; 957 }; 958 959 enum tc_setup_type { 960 TC_QUERY_CAPS, 961 TC_SETUP_QDISC_MQPRIO, 962 TC_SETUP_CLSU32, 963 TC_SETUP_CLSFLOWER, 964 TC_SETUP_CLSMATCHALL, 965 TC_SETUP_CLSBPF, 966 TC_SETUP_BLOCK, 967 TC_SETUP_QDISC_CBS, 968 TC_SETUP_QDISC_RED, 969 TC_SETUP_QDISC_PRIO, 970 TC_SETUP_QDISC_MQ, 971 TC_SETUP_QDISC_ETF, 972 TC_SETUP_ROOT_QDISC, 973 TC_SETUP_QDISC_GRED, 974 TC_SETUP_QDISC_TAPRIO, 975 TC_SETUP_FT, 976 TC_SETUP_QDISC_ETS, 977 TC_SETUP_QDISC_TBF, 978 TC_SETUP_QDISC_FIFO, 979 TC_SETUP_QDISC_HTB, 980 TC_SETUP_ACT, 981 }; 982 983 /* These structures hold the attributes of bpf state that are being passed 984 * to the netdevice through the bpf op. 985 */ 986 enum bpf_netdev_command { 987 /* Set or clear a bpf program used in the earliest stages of packet 988 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee 989 * is responsible for calling bpf_prog_put on any old progs that are 990 * stored. In case of error, the callee need not release the new prog 991 * reference, but on success it takes ownership and must bpf_prog_put 992 * when it is no longer used. 993 */ 994 XDP_SETUP_PROG, 995 XDP_SETUP_PROG_HW, 996 /* BPF program for offload callbacks, invoked at program load time. */ 997 BPF_OFFLOAD_MAP_ALLOC, 998 BPF_OFFLOAD_MAP_FREE, 999 XDP_SETUP_XSK_POOL, 1000 }; 1001 1002 struct bpf_prog_offload_ops; 1003 struct netlink_ext_ack; 1004 struct xdp_umem; 1005 struct xdp_dev_bulk_queue; 1006 struct bpf_xdp_link; 1007 1008 enum bpf_xdp_mode { 1009 XDP_MODE_SKB = 0, 1010 XDP_MODE_DRV = 1, 1011 XDP_MODE_HW = 2, 1012 __MAX_XDP_MODE 1013 }; 1014 1015 struct bpf_xdp_entity { 1016 struct bpf_prog *prog; 1017 struct bpf_xdp_link *link; 1018 }; 1019 1020 struct netdev_bpf { 1021 enum bpf_netdev_command command; 1022 union { 1023 /* XDP_SETUP_PROG */ 1024 struct { 1025 u32 flags; 1026 struct bpf_prog *prog; 1027 struct netlink_ext_ack *extack; 1028 }; 1029 /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ 1030 struct { 1031 struct bpf_offloaded_map *offmap; 1032 }; 1033 /* XDP_SETUP_XSK_POOL */ 1034 struct { 1035 struct xsk_buff_pool *pool; 1036 u16 queue_id; 1037 } xsk; 1038 }; 1039 }; 1040 1041 /* Flags for ndo_xsk_wakeup. */ 1042 #define XDP_WAKEUP_RX (1 << 0) 1043 #define XDP_WAKEUP_TX (1 << 1) 1044 1045 #ifdef CONFIG_XFRM_OFFLOAD 1046 struct xfrmdev_ops { 1047 int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack); 1048 void (*xdo_dev_state_delete) (struct xfrm_state *x); 1049 void (*xdo_dev_state_free) (struct xfrm_state *x); 1050 bool (*xdo_dev_offload_ok) (struct sk_buff *skb, 1051 struct xfrm_state *x); 1052 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); 1053 void (*xdo_dev_state_update_curlft) (struct xfrm_state *x); 1054 int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack); 1055 void (*xdo_dev_policy_delete) (struct xfrm_policy *x); 1056 void (*xdo_dev_policy_free) (struct xfrm_policy *x); 1057 }; 1058 #endif 1059 1060 struct dev_ifalias { 1061 struct rcu_head rcuhead; 1062 char ifalias[]; 1063 }; 1064 1065 struct devlink; 1066 struct tlsdev_ops; 1067 1068 struct netdev_net_notifier { 1069 struct list_head list; 1070 struct notifier_block *nb; 1071 }; 1072 1073 /* 1074 * This structure defines the management hooks for network devices. 1075 * The following hooks can be defined; unless noted otherwise, they are 1076 * optional and can be filled with a null pointer. 1077 * 1078 * int (*ndo_init)(struct net_device *dev); 1079 * This function is called once when a network device is registered. 1080 * The network device can use this for any late stage initialization 1081 * or semantic validation. It can fail with an error code which will 1082 * be propagated back to register_netdev. 1083 * 1084 * void (*ndo_uninit)(struct net_device *dev); 1085 * This function is called when device is unregistered or when registration 1086 * fails. It is not called if init fails. 1087 * 1088 * int (*ndo_open)(struct net_device *dev); 1089 * This function is called when a network device transitions to the up 1090 * state. 1091 * 1092 * int (*ndo_stop)(struct net_device *dev); 1093 * This function is called when a network device transitions to the down 1094 * state. 1095 * 1096 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 1097 * struct net_device *dev); 1098 * Called when a packet needs to be transmitted. 1099 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop 1100 * the queue before that can happen; it's for obsolete devices and weird 1101 * corner cases, but the stack really does a non-trivial amount 1102 * of useless work if you return NETDEV_TX_BUSY. 1103 * Required; cannot be NULL. 1104 * 1105 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 1106 * struct net_device *dev 1107 * netdev_features_t features); 1108 * Called by core transmit path to determine if device is capable of 1109 * performing offload operations on a given packet. This is to give 1110 * the device an opportunity to implement any restrictions that cannot 1111 * be otherwise expressed by feature flags. The check is called with 1112 * the set of features that the stack has calculated and it returns 1113 * those the driver believes to be appropriate. 1114 * 1115 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 1116 * struct net_device *sb_dev); 1117 * Called to decide which queue to use when device supports multiple 1118 * transmit queues. 1119 * 1120 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); 1121 * This function is called to allow device receiver to make 1122 * changes to configuration when multicast or promiscuous is enabled. 1123 * 1124 * void (*ndo_set_rx_mode)(struct net_device *dev); 1125 * This function is called device changes address list filtering. 1126 * If driver handles unicast address filtering, it should set 1127 * IFF_UNICAST_FLT in its priv_flags. 1128 * 1129 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); 1130 * This function is called when the Media Access Control address 1131 * needs to be changed. If this interface is not defined, the 1132 * MAC address can not be changed. 1133 * 1134 * int (*ndo_validate_addr)(struct net_device *dev); 1135 * Test if Media Access Control address is valid for the device. 1136 * 1137 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 1138 * Old-style ioctl entry point. This is used internally by the 1139 * appletalk and ieee802154 subsystems but is no longer called by 1140 * the device ioctl handler. 1141 * 1142 * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd); 1143 * Used by the bonding driver for its device specific ioctls: 1144 * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE, 1145 * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY 1146 * 1147 * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 1148 * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG, 1149 * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP. 1150 * 1151 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); 1152 * Used to set network devices bus interface parameters. This interface 1153 * is retained for legacy reasons; new devices should use the bus 1154 * interface (PCI) for low level management. 1155 * 1156 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 1157 * Called when a user wants to change the Maximum Transfer Unit 1158 * of a device. 1159 * 1160 * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue); 1161 * Callback used when the transmitter has not made any progress 1162 * for dev->watchdog ticks. 1163 * 1164 * void (*ndo_get_stats64)(struct net_device *dev, 1165 * struct rtnl_link_stats64 *storage); 1166 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1167 * Called when a user wants to get the network device usage 1168 * statistics. Drivers must do one of the following: 1169 * 1. Define @ndo_get_stats64 to fill in a zero-initialised 1170 * rtnl_link_stats64 structure passed by the caller. 1171 * 2. Define @ndo_get_stats to update a net_device_stats structure 1172 * (which should normally be dev->stats) and return a pointer to 1173 * it. The structure may be changed asynchronously only if each 1174 * field is written atomically. 1175 * 3. Update dev->stats asynchronously and atomically, and define 1176 * neither operation. 1177 * 1178 * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) 1179 * Return true if this device supports offload stats of this attr_id. 1180 * 1181 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, 1182 * void *attr_data) 1183 * Get statistics for offload operations by attr_id. Write it into the 1184 * attr_data pointer. 1185 * 1186 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); 1187 * If device supports VLAN filtering this function is called when a 1188 * VLAN id is registered. 1189 * 1190 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); 1191 * If device supports VLAN filtering this function is called when a 1192 * VLAN id is unregistered. 1193 * 1194 * void (*ndo_poll_controller)(struct net_device *dev); 1195 * 1196 * SR-IOV management functions. 1197 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); 1198 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, 1199 * u8 qos, __be16 proto); 1200 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, 1201 * int max_tx_rate); 1202 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); 1203 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); 1204 * int (*ndo_get_vf_config)(struct net_device *dev, 1205 * int vf, struct ifla_vf_info *ivf); 1206 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); 1207 * int (*ndo_set_vf_port)(struct net_device *dev, int vf, 1208 * struct nlattr *port[]); 1209 * 1210 * Enable or disable the VF ability to query its RSS Redirection Table and 1211 * Hash Key. This is needed since on some devices VF share this information 1212 * with PF and querying it may introduce a theoretical security risk. 1213 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); 1214 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); 1215 * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, 1216 * void *type_data); 1217 * Called to setup any 'tc' scheduler, classifier or action on @dev. 1218 * This is always called from the stack with the rtnl lock held and netif 1219 * tx queues stopped. This allows the netdevice to perform queue 1220 * management safely. 1221 * 1222 * Fiber Channel over Ethernet (FCoE) offload functions. 1223 * int (*ndo_fcoe_enable)(struct net_device *dev); 1224 * Called when the FCoE protocol stack wants to start using LLD for FCoE 1225 * so the underlying device can perform whatever needed configuration or 1226 * initialization to support acceleration of FCoE traffic. 1227 * 1228 * int (*ndo_fcoe_disable)(struct net_device *dev); 1229 * Called when the FCoE protocol stack wants to stop using LLD for FCoE 1230 * so the underlying device can perform whatever needed clean-ups to 1231 * stop supporting acceleration of FCoE traffic. 1232 * 1233 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, 1234 * struct scatterlist *sgl, unsigned int sgc); 1235 * Called when the FCoE Initiator wants to initialize an I/O that 1236 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1237 * perform necessary setup and returns 1 to indicate the device is set up 1238 * successfully to perform DDP on this I/O, otherwise this returns 0. 1239 * 1240 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); 1241 * Called when the FCoE Initiator/Target is done with the DDPed I/O as 1242 * indicated by the FC exchange id 'xid', so the underlying device can 1243 * clean up and reuse resources for later DDP requests. 1244 * 1245 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, 1246 * struct scatterlist *sgl, unsigned int sgc); 1247 * Called when the FCoE Target wants to initialize an I/O that 1248 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1249 * perform necessary setup and returns 1 to indicate the device is set up 1250 * successfully to perform DDP on this I/O, otherwise this returns 0. 1251 * 1252 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1253 * struct netdev_fcoe_hbainfo *hbainfo); 1254 * Called when the FCoE Protocol stack wants information on the underlying 1255 * device. This information is utilized by the FCoE protocol stack to 1256 * register attributes with Fiber Channel management service as per the 1257 * FC-GS Fabric Device Management Information(FDMI) specification. 1258 * 1259 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); 1260 * Called when the underlying device wants to override default World Wide 1261 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own 1262 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE 1263 * protocol stack to use. 1264 * 1265 * RFS acceleration. 1266 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, 1267 * u16 rxq_index, u32 flow_id); 1268 * Set hardware filter for RFS. rxq_index is the target queue index; 1269 * flow_id is a flow ID to be passed to rps_may_expire_flow() later. 1270 * Return the filter ID on success, or a negative error code. 1271 * 1272 * Slave management functions (for bridge, bonding, etc). 1273 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); 1274 * Called to make another netdev an underling. 1275 * 1276 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); 1277 * Called to release previously enslaved netdev. 1278 * 1279 * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev, 1280 * struct sk_buff *skb, 1281 * bool all_slaves); 1282 * Get the xmit slave of master device. If all_slaves is true, function 1283 * assume all the slaves can transmit. 1284 * 1285 * Feature/offload setting functions. 1286 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1287 * netdev_features_t features); 1288 * Adjusts the requested feature flags according to device-specific 1289 * constraints, and returns the resulting flags. Must not modify 1290 * the device state. 1291 * 1292 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 1293 * Called to update device configuration to new features. Passed 1294 * feature set might be less than what was returned by ndo_fix_features()). 1295 * Must return >0 or -errno if it changed dev->features itself. 1296 * 1297 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], 1298 * struct net_device *dev, 1299 * const unsigned char *addr, u16 vid, u16 flags, 1300 * struct netlink_ext_ack *extack); 1301 * Adds an FDB entry to dev for addr. 1302 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], 1303 * struct net_device *dev, 1304 * const unsigned char *addr, u16 vid) 1305 * Deletes the FDB entry from dev coresponding to addr. 1306 * int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, struct nlattr *tb[], 1307 * struct net_device *dev, 1308 * u16 vid, 1309 * struct netlink_ext_ack *extack); 1310 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, 1311 * struct net_device *dev, struct net_device *filter_dev, 1312 * int *idx) 1313 * Used to add FDB entries to dump requests. Implementers should add 1314 * entries to skb and update idx with the number of entries. 1315 * 1316 * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[], 1317 * u16 nlmsg_flags, struct netlink_ext_ack *extack); 1318 * Adds an MDB entry to dev. 1319 * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[], 1320 * struct netlink_ext_ack *extack); 1321 * Deletes the MDB entry from dev. 1322 * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb, 1323 * struct netlink_callback *cb); 1324 * Dumps MDB entries from dev. The first argument (marker) in the netlink 1325 * callback is used by core rtnetlink code. 1326 * 1327 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, 1328 * u16 flags, struct netlink_ext_ack *extack) 1329 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, 1330 * struct net_device *dev, u32 filter_mask, 1331 * int nlflags) 1332 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, 1333 * u16 flags); 1334 * 1335 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); 1336 * Called to change device carrier. Soft-devices (like dummy, team, etc) 1337 * which do not represent real hardware may define this to allow their 1338 * userspace components to manage their virtual carrier state. Devices 1339 * that determine carrier state from physical hardware properties (eg 1340 * network cables) or protocol-dependent mechanisms (eg 1341 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. 1342 * 1343 * int (*ndo_get_phys_port_id)(struct net_device *dev, 1344 * struct netdev_phys_item_id *ppid); 1345 * Called to get ID of physical port of this device. If driver does 1346 * not implement this, it is assumed that the hw is not able to have 1347 * multiple net devices on single physical port. 1348 * 1349 * int (*ndo_get_port_parent_id)(struct net_device *dev, 1350 * struct netdev_phys_item_id *ppid) 1351 * Called to get the parent ID of the physical port of this device. 1352 * 1353 * void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1354 * struct net_device *dev) 1355 * Called by upper layer devices to accelerate switching or other 1356 * station functionality into hardware. 'pdev is the lowerdev 1357 * to use for the offload and 'dev' is the net device that will 1358 * back the offload. Returns a pointer to the private structure 1359 * the upper layer will maintain. 1360 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) 1361 * Called by upper layer device to delete the station created 1362 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing 1363 * the station and priv is the structure returned by the add 1364 * operation. 1365 * int (*ndo_set_tx_maxrate)(struct net_device *dev, 1366 * int queue_index, u32 maxrate); 1367 * Called when a user wants to set a max-rate limitation of specific 1368 * TX queue. 1369 * int (*ndo_get_iflink)(const struct net_device *dev); 1370 * Called to get the iflink value of this device. 1371 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); 1372 * This function is used to get egress tunnel information for given skb. 1373 * This is useful for retrieving outer tunnel header parameters while 1374 * sampling packet. 1375 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); 1376 * This function is used to specify the headroom that the skb must 1377 * consider when allocation skb during packet reception. Setting 1378 * appropriate rx headroom value allows avoiding skb head copy on 1379 * forward. Setting a negative value resets the rx headroom to the 1380 * default value. 1381 * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); 1382 * This function is used to set or query state related to XDP on the 1383 * netdevice and manage BPF offload. See definition of 1384 * enum bpf_netdev_command for details. 1385 * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, 1386 * u32 flags); 1387 * This function is used to submit @n XDP packets for transmit on a 1388 * netdevice. Returns number of frames successfully transmitted, frames 1389 * that got dropped are freed/returned via xdp_return_frame(). 1390 * Returns negative number, means general error invoking ndo, meaning 1391 * no frames were xmit'ed and core-caller will free all frames. 1392 * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1393 * struct xdp_buff *xdp); 1394 * Get the xmit slave of master device based on the xdp_buff. 1395 * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); 1396 * This function is used to wake up the softirq, ksoftirqd or kthread 1397 * responsible for sending and/or receiving packets on a specific 1398 * queue id bound to an AF_XDP socket. The flags field specifies if 1399 * only RX, only Tx, or both should be woken up using the flags 1400 * XDP_WAKEUP_RX and XDP_WAKEUP_TX. 1401 * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, 1402 * int cmd); 1403 * Add, change, delete or get information on an IPv4 tunnel. 1404 * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); 1405 * If a device is paired with a peer device, return the peer instance. 1406 * The caller must be under RCU read context. 1407 * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path); 1408 * Get the forwarding path to reach the real device from the HW destination address 1409 * ktime_t (*ndo_get_tstamp)(struct net_device *dev, 1410 * const struct skb_shared_hwtstamps *hwtstamps, 1411 * bool cycles); 1412 * Get hardware timestamp based on normal/adjustable time or free running 1413 * cycle counter. This function is required if physical clock supports a 1414 * free running cycle counter. 1415 */ 1416 struct net_device_ops { 1417 int (*ndo_init)(struct net_device *dev); 1418 void (*ndo_uninit)(struct net_device *dev); 1419 int (*ndo_open)(struct net_device *dev); 1420 int (*ndo_stop)(struct net_device *dev); 1421 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 1422 struct net_device *dev); 1423 netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 1424 struct net_device *dev, 1425 netdev_features_t features); 1426 u16 (*ndo_select_queue)(struct net_device *dev, 1427 struct sk_buff *skb, 1428 struct net_device *sb_dev); 1429 void (*ndo_change_rx_flags)(struct net_device *dev, 1430 int flags); 1431 void (*ndo_set_rx_mode)(struct net_device *dev); 1432 int (*ndo_set_mac_address)(struct net_device *dev, 1433 void *addr); 1434 int (*ndo_validate_addr)(struct net_device *dev); 1435 int (*ndo_do_ioctl)(struct net_device *dev, 1436 struct ifreq *ifr, int cmd); 1437 int (*ndo_eth_ioctl)(struct net_device *dev, 1438 struct ifreq *ifr, int cmd); 1439 int (*ndo_siocbond)(struct net_device *dev, 1440 struct ifreq *ifr, int cmd); 1441 int (*ndo_siocwandev)(struct net_device *dev, 1442 struct if_settings *ifs); 1443 int (*ndo_siocdevprivate)(struct net_device *dev, 1444 struct ifreq *ifr, 1445 void __user *data, int cmd); 1446 int (*ndo_set_config)(struct net_device *dev, 1447 struct ifmap *map); 1448 int (*ndo_change_mtu)(struct net_device *dev, 1449 int new_mtu); 1450 int (*ndo_neigh_setup)(struct net_device *dev, 1451 struct neigh_parms *); 1452 void (*ndo_tx_timeout) (struct net_device *dev, 1453 unsigned int txqueue); 1454 1455 void (*ndo_get_stats64)(struct net_device *dev, 1456 struct rtnl_link_stats64 *storage); 1457 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); 1458 int (*ndo_get_offload_stats)(int attr_id, 1459 const struct net_device *dev, 1460 void *attr_data); 1461 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1462 1463 int (*ndo_vlan_rx_add_vid)(struct net_device *dev, 1464 __be16 proto, u16 vid); 1465 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, 1466 __be16 proto, u16 vid); 1467 #ifdef CONFIG_NET_POLL_CONTROLLER 1468 void (*ndo_poll_controller)(struct net_device *dev); 1469 int (*ndo_netpoll_setup)(struct net_device *dev, 1470 struct netpoll_info *info); 1471 void (*ndo_netpoll_cleanup)(struct net_device *dev); 1472 #endif 1473 int (*ndo_set_vf_mac)(struct net_device *dev, 1474 int queue, u8 *mac); 1475 int (*ndo_set_vf_vlan)(struct net_device *dev, 1476 int queue, u16 vlan, 1477 u8 qos, __be16 proto); 1478 int (*ndo_set_vf_rate)(struct net_device *dev, 1479 int vf, int min_tx_rate, 1480 int max_tx_rate); 1481 int (*ndo_set_vf_spoofchk)(struct net_device *dev, 1482 int vf, bool setting); 1483 int (*ndo_set_vf_trust)(struct net_device *dev, 1484 int vf, bool setting); 1485 int (*ndo_get_vf_config)(struct net_device *dev, 1486 int vf, 1487 struct ifla_vf_info *ivf); 1488 int (*ndo_set_vf_link_state)(struct net_device *dev, 1489 int vf, int link_state); 1490 int (*ndo_get_vf_stats)(struct net_device *dev, 1491 int vf, 1492 struct ifla_vf_stats 1493 *vf_stats); 1494 int (*ndo_set_vf_port)(struct net_device *dev, 1495 int vf, 1496 struct nlattr *port[]); 1497 int (*ndo_get_vf_port)(struct net_device *dev, 1498 int vf, struct sk_buff *skb); 1499 int (*ndo_get_vf_guid)(struct net_device *dev, 1500 int vf, 1501 struct ifla_vf_guid *node_guid, 1502 struct ifla_vf_guid *port_guid); 1503 int (*ndo_set_vf_guid)(struct net_device *dev, 1504 int vf, u64 guid, 1505 int guid_type); 1506 int (*ndo_set_vf_rss_query_en)( 1507 struct net_device *dev, 1508 int vf, bool setting); 1509 int (*ndo_setup_tc)(struct net_device *dev, 1510 enum tc_setup_type type, 1511 void *type_data); 1512 #if IS_ENABLED(CONFIG_FCOE) 1513 int (*ndo_fcoe_enable)(struct net_device *dev); 1514 int (*ndo_fcoe_disable)(struct net_device *dev); 1515 int (*ndo_fcoe_ddp_setup)(struct net_device *dev, 1516 u16 xid, 1517 struct scatterlist *sgl, 1518 unsigned int sgc); 1519 int (*ndo_fcoe_ddp_done)(struct net_device *dev, 1520 u16 xid); 1521 int (*ndo_fcoe_ddp_target)(struct net_device *dev, 1522 u16 xid, 1523 struct scatterlist *sgl, 1524 unsigned int sgc); 1525 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1526 struct netdev_fcoe_hbainfo *hbainfo); 1527 #endif 1528 1529 #if IS_ENABLED(CONFIG_LIBFCOE) 1530 #define NETDEV_FCOE_WWNN 0 1531 #define NETDEV_FCOE_WWPN 1 1532 int (*ndo_fcoe_get_wwn)(struct net_device *dev, 1533 u64 *wwn, int type); 1534 #endif 1535 1536 #ifdef CONFIG_RFS_ACCEL 1537 int (*ndo_rx_flow_steer)(struct net_device *dev, 1538 const struct sk_buff *skb, 1539 u16 rxq_index, 1540 u32 flow_id); 1541 #endif 1542 int (*ndo_add_slave)(struct net_device *dev, 1543 struct net_device *slave_dev, 1544 struct netlink_ext_ack *extack); 1545 int (*ndo_del_slave)(struct net_device *dev, 1546 struct net_device *slave_dev); 1547 struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, 1548 struct sk_buff *skb, 1549 bool all_slaves); 1550 struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev, 1551 struct sock *sk); 1552 netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1553 netdev_features_t features); 1554 int (*ndo_set_features)(struct net_device *dev, 1555 netdev_features_t features); 1556 int (*ndo_neigh_construct)(struct net_device *dev, 1557 struct neighbour *n); 1558 void (*ndo_neigh_destroy)(struct net_device *dev, 1559 struct neighbour *n); 1560 1561 int (*ndo_fdb_add)(struct ndmsg *ndm, 1562 struct nlattr *tb[], 1563 struct net_device *dev, 1564 const unsigned char *addr, 1565 u16 vid, 1566 u16 flags, 1567 struct netlink_ext_ack *extack); 1568 int (*ndo_fdb_del)(struct ndmsg *ndm, 1569 struct nlattr *tb[], 1570 struct net_device *dev, 1571 const unsigned char *addr, 1572 u16 vid, struct netlink_ext_ack *extack); 1573 int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, 1574 struct nlattr *tb[], 1575 struct net_device *dev, 1576 u16 vid, 1577 struct netlink_ext_ack *extack); 1578 int (*ndo_fdb_dump)(struct sk_buff *skb, 1579 struct netlink_callback *cb, 1580 struct net_device *dev, 1581 struct net_device *filter_dev, 1582 int *idx); 1583 int (*ndo_fdb_get)(struct sk_buff *skb, 1584 struct nlattr *tb[], 1585 struct net_device *dev, 1586 const unsigned char *addr, 1587 u16 vid, u32 portid, u32 seq, 1588 struct netlink_ext_ack *extack); 1589 int (*ndo_mdb_add)(struct net_device *dev, 1590 struct nlattr *tb[], 1591 u16 nlmsg_flags, 1592 struct netlink_ext_ack *extack); 1593 int (*ndo_mdb_del)(struct net_device *dev, 1594 struct nlattr *tb[], 1595 struct netlink_ext_ack *extack); 1596 int (*ndo_mdb_dump)(struct net_device *dev, 1597 struct sk_buff *skb, 1598 struct netlink_callback *cb); 1599 int (*ndo_bridge_setlink)(struct net_device *dev, 1600 struct nlmsghdr *nlh, 1601 u16 flags, 1602 struct netlink_ext_ack *extack); 1603 int (*ndo_bridge_getlink)(struct sk_buff *skb, 1604 u32 pid, u32 seq, 1605 struct net_device *dev, 1606 u32 filter_mask, 1607 int nlflags); 1608 int (*ndo_bridge_dellink)(struct net_device *dev, 1609 struct nlmsghdr *nlh, 1610 u16 flags); 1611 int (*ndo_change_carrier)(struct net_device *dev, 1612 bool new_carrier); 1613 int (*ndo_get_phys_port_id)(struct net_device *dev, 1614 struct netdev_phys_item_id *ppid); 1615 int (*ndo_get_port_parent_id)(struct net_device *dev, 1616 struct netdev_phys_item_id *ppid); 1617 int (*ndo_get_phys_port_name)(struct net_device *dev, 1618 char *name, size_t len); 1619 void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1620 struct net_device *dev); 1621 void (*ndo_dfwd_del_station)(struct net_device *pdev, 1622 void *priv); 1623 1624 int (*ndo_set_tx_maxrate)(struct net_device *dev, 1625 int queue_index, 1626 u32 maxrate); 1627 int (*ndo_get_iflink)(const struct net_device *dev); 1628 int (*ndo_fill_metadata_dst)(struct net_device *dev, 1629 struct sk_buff *skb); 1630 void (*ndo_set_rx_headroom)(struct net_device *dev, 1631 int needed_headroom); 1632 int (*ndo_bpf)(struct net_device *dev, 1633 struct netdev_bpf *bpf); 1634 int (*ndo_xdp_xmit)(struct net_device *dev, int n, 1635 struct xdp_frame **xdp, 1636 u32 flags); 1637 struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1638 struct xdp_buff *xdp); 1639 int (*ndo_xsk_wakeup)(struct net_device *dev, 1640 u32 queue_id, u32 flags); 1641 int (*ndo_tunnel_ctl)(struct net_device *dev, 1642 struct ip_tunnel_parm *p, int cmd); 1643 struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); 1644 int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, 1645 struct net_device_path *path); 1646 ktime_t (*ndo_get_tstamp)(struct net_device *dev, 1647 const struct skb_shared_hwtstamps *hwtstamps, 1648 bool cycles); 1649 }; 1650 1651 struct xdp_metadata_ops { 1652 int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp); 1653 int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash); 1654 }; 1655 1656 /** 1657 * enum netdev_priv_flags - &struct net_device priv_flags 1658 * 1659 * These are the &struct net_device, they are only set internally 1660 * by drivers and used in the kernel. These flags are invisible to 1661 * userspace; this means that the order of these flags can change 1662 * during any kernel release. 1663 * 1664 * You should have a pretty good reason to be extending these flags. 1665 * 1666 * @IFF_802_1Q_VLAN: 802.1Q VLAN device 1667 * @IFF_EBRIDGE: Ethernet bridging device 1668 * @IFF_BONDING: bonding master or slave 1669 * @IFF_ISATAP: ISATAP interface (RFC4214) 1670 * @IFF_WAN_HDLC: WAN HDLC device 1671 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to 1672 * release skb->dst 1673 * @IFF_DONT_BRIDGE: disallow bridging this ether dev 1674 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time 1675 * @IFF_MACVLAN_PORT: device used as macvlan port 1676 * @IFF_BRIDGE_PORT: device used as bridge port 1677 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port 1678 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit 1679 * @IFF_UNICAST_FLT: Supports unicast filtering 1680 * @IFF_TEAM_PORT: device used as team port 1681 * @IFF_SUPP_NOFCS: device supports sending custom FCS 1682 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address 1683 * change when it's running 1684 * @IFF_MACVLAN: Macvlan device 1685 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account 1686 * underlying stacked devices 1687 * @IFF_L3MDEV_MASTER: device is an L3 master device 1688 * @IFF_NO_QUEUE: device can run without qdisc attached 1689 * @IFF_OPENVSWITCH: device is a Open vSwitch master 1690 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device 1691 * @IFF_TEAM: device is a team device 1692 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured 1693 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external 1694 * entity (i.e. the master device for bridged veth) 1695 * @IFF_MACSEC: device is a MACsec device 1696 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook 1697 * @IFF_FAILOVER: device is a failover master device 1698 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device 1699 * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device 1700 * @IFF_NO_ADDRCONF: prevent ipv6 addrconf 1701 * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with 1702 * skb_headlen(skb) == 0 (data starts from frag0) 1703 * @IFF_CHANGE_PROTO_DOWN: device supports setting carrier via IFLA_PROTO_DOWN 1704 */ 1705 enum netdev_priv_flags { 1706 IFF_802_1Q_VLAN = 1<<0, 1707 IFF_EBRIDGE = 1<<1, 1708 IFF_BONDING = 1<<2, 1709 IFF_ISATAP = 1<<3, 1710 IFF_WAN_HDLC = 1<<4, 1711 IFF_XMIT_DST_RELEASE = 1<<5, 1712 IFF_DONT_BRIDGE = 1<<6, 1713 IFF_DISABLE_NETPOLL = 1<<7, 1714 IFF_MACVLAN_PORT = 1<<8, 1715 IFF_BRIDGE_PORT = 1<<9, 1716 IFF_OVS_DATAPATH = 1<<10, 1717 IFF_TX_SKB_SHARING = 1<<11, 1718 IFF_UNICAST_FLT = 1<<12, 1719 IFF_TEAM_PORT = 1<<13, 1720 IFF_SUPP_NOFCS = 1<<14, 1721 IFF_LIVE_ADDR_CHANGE = 1<<15, 1722 IFF_MACVLAN = 1<<16, 1723 IFF_XMIT_DST_RELEASE_PERM = 1<<17, 1724 IFF_L3MDEV_MASTER = 1<<18, 1725 IFF_NO_QUEUE = 1<<19, 1726 IFF_OPENVSWITCH = 1<<20, 1727 IFF_L3MDEV_SLAVE = 1<<21, 1728 IFF_TEAM = 1<<22, 1729 IFF_RXFH_CONFIGURED = 1<<23, 1730 IFF_PHONY_HEADROOM = 1<<24, 1731 IFF_MACSEC = 1<<25, 1732 IFF_NO_RX_HANDLER = 1<<26, 1733 IFF_FAILOVER = 1<<27, 1734 IFF_FAILOVER_SLAVE = 1<<28, 1735 IFF_L3MDEV_RX_HANDLER = 1<<29, 1736 IFF_NO_ADDRCONF = BIT_ULL(30), 1737 IFF_TX_SKB_NO_LINEAR = BIT_ULL(31), 1738 IFF_CHANGE_PROTO_DOWN = BIT_ULL(32), 1739 }; 1740 1741 #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN 1742 #define IFF_EBRIDGE IFF_EBRIDGE 1743 #define IFF_BONDING IFF_BONDING 1744 #define IFF_ISATAP IFF_ISATAP 1745 #define IFF_WAN_HDLC IFF_WAN_HDLC 1746 #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE 1747 #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE 1748 #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL 1749 #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT 1750 #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT 1751 #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH 1752 #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING 1753 #define IFF_UNICAST_FLT IFF_UNICAST_FLT 1754 #define IFF_TEAM_PORT IFF_TEAM_PORT 1755 #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS 1756 #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE 1757 #define IFF_MACVLAN IFF_MACVLAN 1758 #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM 1759 #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER 1760 #define IFF_NO_QUEUE IFF_NO_QUEUE 1761 #define IFF_OPENVSWITCH IFF_OPENVSWITCH 1762 #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE 1763 #define IFF_TEAM IFF_TEAM 1764 #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED 1765 #define IFF_PHONY_HEADROOM IFF_PHONY_HEADROOM 1766 #define IFF_MACSEC IFF_MACSEC 1767 #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER 1768 #define IFF_FAILOVER IFF_FAILOVER 1769 #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE 1770 #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER 1771 #define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR 1772 1773 /* Specifies the type of the struct net_device::ml_priv pointer */ 1774 enum netdev_ml_priv_type { 1775 ML_PRIV_NONE, 1776 ML_PRIV_CAN, 1777 }; 1778 1779 /** 1780 * struct net_device - The DEVICE structure. 1781 * 1782 * Actually, this whole structure is a big mistake. It mixes I/O 1783 * data with strictly "high-level" data, and it has to know about 1784 * almost every data structure used in the INET module. 1785 * 1786 * @name: This is the first field of the "visible" part of this structure 1787 * (i.e. as seen by users in the "Space.c" file). It is the name 1788 * of the interface. 1789 * 1790 * @name_node: Name hashlist node 1791 * @ifalias: SNMP alias 1792 * @mem_end: Shared memory end 1793 * @mem_start: Shared memory start 1794 * @base_addr: Device I/O address 1795 * @irq: Device IRQ number 1796 * 1797 * @state: Generic network queuing layer state, see netdev_state_t 1798 * @dev_list: The global list of network devices 1799 * @napi_list: List entry used for polling NAPI devices 1800 * @unreg_list: List entry when we are unregistering the 1801 * device; see the function unregister_netdev 1802 * @close_list: List entry used when we are closing the device 1803 * @ptype_all: Device-specific packet handlers for all protocols 1804 * @ptype_specific: Device-specific, protocol-specific packet handlers 1805 * 1806 * @adj_list: Directly linked devices, like slaves for bonding 1807 * @features: Currently active device features 1808 * @hw_features: User-changeable features 1809 * 1810 * @wanted_features: User-requested features 1811 * @vlan_features: Mask of features inheritable by VLAN devices 1812 * 1813 * @hw_enc_features: Mask of features inherited by encapsulating devices 1814 * This field indicates what encapsulation 1815 * offloads the hardware is capable of doing, 1816 * and drivers will need to set them appropriately. 1817 * 1818 * @mpls_features: Mask of features inheritable by MPLS 1819 * @gso_partial_features: value(s) from NETIF_F_GSO\* 1820 * 1821 * @ifindex: interface index 1822 * @group: The group the device belongs to 1823 * 1824 * @stats: Statistics struct, which was left as a legacy, use 1825 * rtnl_link_stats64 instead 1826 * 1827 * @core_stats: core networking counters, 1828 * do not use this in drivers 1829 * @carrier_up_count: Number of times the carrier has been up 1830 * @carrier_down_count: Number of times the carrier has been down 1831 * 1832 * @wireless_handlers: List of functions to handle Wireless Extensions, 1833 * instead of ioctl, 1834 * see <net/iw_handler.h> for details. 1835 * @wireless_data: Instance data managed by the core of wireless extensions 1836 * 1837 * @netdev_ops: Includes several pointers to callbacks, 1838 * if one wants to override the ndo_*() functions 1839 * @xdp_metadata_ops: Includes pointers to XDP metadata callbacks. 1840 * @ethtool_ops: Management operations 1841 * @l3mdev_ops: Layer 3 master device operations 1842 * @ndisc_ops: Includes callbacks for different IPv6 neighbour 1843 * discovery handling. Necessary for e.g. 6LoWPAN. 1844 * @xfrmdev_ops: Transformation offload operations 1845 * @tlsdev_ops: Transport Layer Security offload operations 1846 * @header_ops: Includes callbacks for creating,parsing,caching,etc 1847 * of Layer 2 headers. 1848 * 1849 * @flags: Interface flags (a la BSD) 1850 * @xdp_features: XDP capability supported by the device 1851 * @priv_flags: Like 'flags' but invisible to userspace, 1852 * see if.h for the definitions 1853 * @gflags: Global flags ( kept as legacy ) 1854 * @padded: How much padding added by alloc_netdev() 1855 * @operstate: RFC2863 operstate 1856 * @link_mode: Mapping policy to operstate 1857 * @if_port: Selectable AUI, TP, ... 1858 * @dma: DMA channel 1859 * @mtu: Interface MTU value 1860 * @min_mtu: Interface Minimum MTU value 1861 * @max_mtu: Interface Maximum MTU value 1862 * @type: Interface hardware type 1863 * @hard_header_len: Maximum hardware header length. 1864 * @min_header_len: Minimum hardware header length 1865 * 1866 * @needed_headroom: Extra headroom the hardware may need, but not in all 1867 * cases can this be guaranteed 1868 * @needed_tailroom: Extra tailroom the hardware may need, but not in all 1869 * cases can this be guaranteed. Some cases also use 1870 * LL_MAX_HEADER instead to allocate the skb 1871 * 1872 * interface address info: 1873 * 1874 * @perm_addr: Permanent hw address 1875 * @addr_assign_type: Hw address assignment type 1876 * @addr_len: Hardware address length 1877 * @upper_level: Maximum depth level of upper devices. 1878 * @lower_level: Maximum depth level of lower devices. 1879 * @neigh_priv_len: Used in neigh_alloc() 1880 * @dev_id: Used to differentiate devices that share 1881 * the same link layer address 1882 * @dev_port: Used to differentiate devices that share 1883 * the same function 1884 * @addr_list_lock: XXX: need comments on this one 1885 * @name_assign_type: network interface name assignment type 1886 * @uc_promisc: Counter that indicates promiscuous mode 1887 * has been enabled due to the need to listen to 1888 * additional unicast addresses in a device that 1889 * does not implement ndo_set_rx_mode() 1890 * @uc: unicast mac addresses 1891 * @mc: multicast mac addresses 1892 * @dev_addrs: list of device hw addresses 1893 * @queues_kset: Group of all Kobjects in the Tx and RX queues 1894 * @promiscuity: Number of times the NIC is told to work in 1895 * promiscuous mode; if it becomes 0 the NIC will 1896 * exit promiscuous mode 1897 * @allmulti: Counter, enables or disables allmulticast mode 1898 * 1899 * @vlan_info: VLAN info 1900 * @dsa_ptr: dsa specific data 1901 * @tipc_ptr: TIPC specific data 1902 * @atalk_ptr: AppleTalk link 1903 * @ip_ptr: IPv4 specific data 1904 * @ip6_ptr: IPv6 specific data 1905 * @ax25_ptr: AX.25 specific data 1906 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering 1907 * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network 1908 * device struct 1909 * @mpls_ptr: mpls_dev struct pointer 1910 * @mctp_ptr: MCTP specific data 1911 * 1912 * @dev_addr: Hw address (before bcast, 1913 * because most packets are unicast) 1914 * 1915 * @_rx: Array of RX queues 1916 * @num_rx_queues: Number of RX queues 1917 * allocated at register_netdev() time 1918 * @real_num_rx_queues: Number of RX queues currently active in device 1919 * @xdp_prog: XDP sockets filter program pointer 1920 * @gro_flush_timeout: timeout for GRO layer in NAPI 1921 * @napi_defer_hard_irqs: If not zero, provides a counter that would 1922 * allow to avoid NIC hard IRQ, on busy queues. 1923 * 1924 * @rx_handler: handler for received packets 1925 * @rx_handler_data: XXX: need comments on this one 1926 * @miniq_ingress: ingress/clsact qdisc specific data for 1927 * ingress processing 1928 * @ingress_queue: XXX: need comments on this one 1929 * @nf_hooks_ingress: netfilter hooks executed for ingress packets 1930 * @broadcast: hw bcast address 1931 * 1932 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, 1933 * indexed by RX queue number. Assigned by driver. 1934 * This must only be set if the ndo_rx_flow_steer 1935 * operation is defined 1936 * @index_hlist: Device index hash chain 1937 * 1938 * @_tx: Array of TX queues 1939 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time 1940 * @real_num_tx_queues: Number of TX queues currently active in device 1941 * @qdisc: Root qdisc from userspace point of view 1942 * @tx_queue_len: Max frames per queue allowed 1943 * @tx_global_lock: XXX: need comments on this one 1944 * @xdp_bulkq: XDP device bulk queue 1945 * @xps_maps: all CPUs/RXQs maps for XPS device 1946 * 1947 * @xps_maps: XXX: need comments on this one 1948 * @miniq_egress: clsact qdisc specific data for 1949 * egress processing 1950 * @nf_hooks_egress: netfilter hooks executed for egress packets 1951 * @qdisc_hash: qdisc hash table 1952 * @watchdog_timeo: Represents the timeout that is used by 1953 * the watchdog (see dev_watchdog()) 1954 * @watchdog_timer: List of timers 1955 * 1956 * @proto_down_reason: reason a netdev interface is held down 1957 * @pcpu_refcnt: Number of references to this device 1958 * @dev_refcnt: Number of references to this device 1959 * @refcnt_tracker: Tracker directory for tracked references to this device 1960 * @todo_list: Delayed register/unregister 1961 * @link_watch_list: XXX: need comments on this one 1962 * 1963 * @reg_state: Register/unregister state machine 1964 * @dismantle: Device is going to be freed 1965 * @rtnl_link_state: This enum represents the phases of creating 1966 * a new link 1967 * 1968 * @needs_free_netdev: Should unregister perform free_netdev? 1969 * @priv_destructor: Called from unregister 1970 * @npinfo: XXX: need comments on this one 1971 * @nd_net: Network namespace this network device is inside 1972 * 1973 * @ml_priv: Mid-layer private 1974 * @ml_priv_type: Mid-layer private type 1975 * @lstats: Loopback statistics 1976 * @tstats: Tunnel statistics 1977 * @dstats: Dummy statistics 1978 * @vstats: Virtual ethernet statistics 1979 * 1980 * @garp_port: GARP 1981 * @mrp_port: MRP 1982 * 1983 * @dm_private: Drop monitor private 1984 * 1985 * @dev: Class/net/name entry 1986 * @sysfs_groups: Space for optional device, statistics and wireless 1987 * sysfs groups 1988 * 1989 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes 1990 * @rtnl_link_ops: Rtnl_link_ops 1991 * 1992 * @gso_max_size: Maximum size of generic segmentation offload 1993 * @tso_max_size: Device (as in HW) limit on the max TSO request size 1994 * @gso_max_segs: Maximum number of segments that can be passed to the 1995 * NIC for GSO 1996 * @tso_max_segs: Device (as in HW) limit on the max TSO segment count 1997 * @gso_ipv4_max_size: Maximum size of generic segmentation offload, 1998 * for IPv4. 1999 * 2000 * @dcbnl_ops: Data Center Bridging netlink ops 2001 * @num_tc: Number of traffic classes in the net device 2002 * @tc_to_txq: XXX: need comments on this one 2003 * @prio_tc_map: XXX: need comments on this one 2004 * 2005 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp 2006 * 2007 * @priomap: XXX: need comments on this one 2008 * @phydev: Physical device may attach itself 2009 * for hardware timestamping 2010 * @sfp_bus: attached &struct sfp_bus structure. 2011 * 2012 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock 2013 * 2014 * @proto_down: protocol port state information can be sent to the 2015 * switch driver and used to set the phys state of the 2016 * switch port. 2017 * 2018 * @wol_enabled: Wake-on-LAN is enabled 2019 * 2020 * @threaded: napi threaded mode is enabled 2021 * 2022 * @net_notifier_list: List of per-net netdev notifier block 2023 * that follow this device when it is moved 2024 * to another network namespace. 2025 * 2026 * @macsec_ops: MACsec offloading ops 2027 * 2028 * @udp_tunnel_nic_info: static structure describing the UDP tunnel 2029 * offload capabilities of the device 2030 * @udp_tunnel_nic: UDP tunnel offload state 2031 * @xdp_state: stores info on attached XDP BPF programs 2032 * 2033 * @nested_level: Used as a parameter of spin_lock_nested() of 2034 * dev->addr_list_lock. 2035 * @unlink_list: As netif_addr_lock() can be called recursively, 2036 * keep a list of interfaces to be deleted. 2037 * @gro_max_size: Maximum size of aggregated packet in generic 2038 * receive offload (GRO) 2039 * @gro_ipv4_max_size: Maximum size of aggregated packet in generic 2040 * receive offload (GRO), for IPv4. 2041 * 2042 * @dev_addr_shadow: Copy of @dev_addr to catch direct writes. 2043 * @linkwatch_dev_tracker: refcount tracker used by linkwatch. 2044 * @watchdog_dev_tracker: refcount tracker used by watchdog. 2045 * @dev_registered_tracker: tracker for reference held while 2046 * registered 2047 * @offload_xstats_l3: L3 HW stats for this netdevice. 2048 * 2049 * @devlink_port: Pointer to related devlink port structure. 2050 * Assigned by a driver before netdev registration using 2051 * SET_NETDEV_DEVLINK_PORT macro. This pointer is static 2052 * during the time netdevice is registered. 2053 * 2054 * FIXME: cleanup struct net_device such that network protocol info 2055 * moves out. 2056 */ 2057 2058 struct net_device { 2059 char name[IFNAMSIZ]; 2060 struct netdev_name_node *name_node; 2061 struct dev_ifalias __rcu *ifalias; 2062 /* 2063 * I/O specific fields 2064 * FIXME: Merge these and struct ifmap into one 2065 */ 2066 unsigned long mem_end; 2067 unsigned long mem_start; 2068 unsigned long base_addr; 2069 2070 /* 2071 * Some hardware also needs these fields (state,dev_list, 2072 * napi_list,unreg_list,close_list) but they are not 2073 * part of the usual set specified in Space.c. 2074 */ 2075 2076 unsigned long state; 2077 2078 struct list_head dev_list; 2079 struct list_head napi_list; 2080 struct list_head unreg_list; 2081 struct list_head close_list; 2082 struct list_head ptype_all; 2083 struct list_head ptype_specific; 2084 2085 struct { 2086 struct list_head upper; 2087 struct list_head lower; 2088 } adj_list; 2089 2090 /* Read-mostly cache-line for fast-path access */ 2091 unsigned int flags; 2092 xdp_features_t xdp_features; 2093 unsigned long long priv_flags; 2094 const struct net_device_ops *netdev_ops; 2095 const struct xdp_metadata_ops *xdp_metadata_ops; 2096 int ifindex; 2097 unsigned short gflags; 2098 unsigned short hard_header_len; 2099 2100 /* Note : dev->mtu is often read without holding a lock. 2101 * Writers usually hold RTNL. 2102 * It is recommended to use READ_ONCE() to annotate the reads, 2103 * and to use WRITE_ONCE() to annotate the writes. 2104 */ 2105 unsigned int mtu; 2106 unsigned short needed_headroom; 2107 unsigned short needed_tailroom; 2108 2109 netdev_features_t features; 2110 netdev_features_t hw_features; 2111 netdev_features_t wanted_features; 2112 netdev_features_t vlan_features; 2113 netdev_features_t hw_enc_features; 2114 netdev_features_t mpls_features; 2115 netdev_features_t gso_partial_features; 2116 2117 unsigned int min_mtu; 2118 unsigned int max_mtu; 2119 unsigned short type; 2120 unsigned char min_header_len; 2121 unsigned char name_assign_type; 2122 2123 int group; 2124 2125 struct net_device_stats stats; /* not used by modern drivers */ 2126 2127 struct net_device_core_stats __percpu *core_stats; 2128 2129 /* Stats to monitor link on/off, flapping */ 2130 atomic_t carrier_up_count; 2131 atomic_t carrier_down_count; 2132 2133 #ifdef CONFIG_WIRELESS_EXT 2134 const struct iw_handler_def *wireless_handlers; 2135 struct iw_public_data *wireless_data; 2136 #endif 2137 const struct ethtool_ops *ethtool_ops; 2138 #ifdef CONFIG_NET_L3_MASTER_DEV 2139 const struct l3mdev_ops *l3mdev_ops; 2140 #endif 2141 #if IS_ENABLED(CONFIG_IPV6) 2142 const struct ndisc_ops *ndisc_ops; 2143 #endif 2144 2145 #ifdef CONFIG_XFRM_OFFLOAD 2146 const struct xfrmdev_ops *xfrmdev_ops; 2147 #endif 2148 2149 #if IS_ENABLED(CONFIG_TLS_DEVICE) 2150 const struct tlsdev_ops *tlsdev_ops; 2151 #endif 2152 2153 const struct header_ops *header_ops; 2154 2155 unsigned char operstate; 2156 unsigned char link_mode; 2157 2158 unsigned char if_port; 2159 unsigned char dma; 2160 2161 /* Interface address info. */ 2162 unsigned char perm_addr[MAX_ADDR_LEN]; 2163 unsigned char addr_assign_type; 2164 unsigned char addr_len; 2165 unsigned char upper_level; 2166 unsigned char lower_level; 2167 2168 unsigned short neigh_priv_len; 2169 unsigned short dev_id; 2170 unsigned short dev_port; 2171 unsigned short padded; 2172 2173 spinlock_t addr_list_lock; 2174 int irq; 2175 2176 struct netdev_hw_addr_list uc; 2177 struct netdev_hw_addr_list mc; 2178 struct netdev_hw_addr_list dev_addrs; 2179 2180 #ifdef CONFIG_SYSFS 2181 struct kset *queues_kset; 2182 #endif 2183 #ifdef CONFIG_LOCKDEP 2184 struct list_head unlink_list; 2185 #endif 2186 unsigned int promiscuity; 2187 unsigned int allmulti; 2188 bool uc_promisc; 2189 #ifdef CONFIG_LOCKDEP 2190 unsigned char nested_level; 2191 #endif 2192 2193 2194 /* Protocol-specific pointers */ 2195 2196 struct in_device __rcu *ip_ptr; 2197 struct inet6_dev __rcu *ip6_ptr; 2198 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2199 struct vlan_info __rcu *vlan_info; 2200 #endif 2201 #if IS_ENABLED(CONFIG_NET_DSA) 2202 struct dsa_port *dsa_ptr; 2203 #endif 2204 #if IS_ENABLED(CONFIG_TIPC) 2205 struct tipc_bearer __rcu *tipc_ptr; 2206 #endif 2207 #if IS_ENABLED(CONFIG_ATALK) 2208 void *atalk_ptr; 2209 #endif 2210 #if IS_ENABLED(CONFIG_AX25) 2211 void *ax25_ptr; 2212 #endif 2213 #if IS_ENABLED(CONFIG_CFG80211) 2214 struct wireless_dev *ieee80211_ptr; 2215 #endif 2216 #if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN) 2217 struct wpan_dev *ieee802154_ptr; 2218 #endif 2219 #if IS_ENABLED(CONFIG_MPLS_ROUTING) 2220 struct mpls_dev __rcu *mpls_ptr; 2221 #endif 2222 #if IS_ENABLED(CONFIG_MCTP) 2223 struct mctp_dev __rcu *mctp_ptr; 2224 #endif 2225 2226 /* 2227 * Cache lines mostly used on receive path (including eth_type_trans()) 2228 */ 2229 /* Interface address info used in eth_type_trans() */ 2230 const unsigned char *dev_addr; 2231 2232 struct netdev_rx_queue *_rx; 2233 unsigned int num_rx_queues; 2234 unsigned int real_num_rx_queues; 2235 2236 struct bpf_prog __rcu *xdp_prog; 2237 unsigned long gro_flush_timeout; 2238 int napi_defer_hard_irqs; 2239 #define GRO_LEGACY_MAX_SIZE 65536u 2240 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), 2241 * and shinfo->gso_segs is a 16bit field. 2242 */ 2243 #define GRO_MAX_SIZE (8 * 65535u) 2244 unsigned int gro_max_size; 2245 unsigned int gro_ipv4_max_size; 2246 rx_handler_func_t __rcu *rx_handler; 2247 void __rcu *rx_handler_data; 2248 2249 #ifdef CONFIG_NET_CLS_ACT 2250 struct mini_Qdisc __rcu *miniq_ingress; 2251 #endif 2252 struct netdev_queue __rcu *ingress_queue; 2253 #ifdef CONFIG_NETFILTER_INGRESS 2254 struct nf_hook_entries __rcu *nf_hooks_ingress; 2255 #endif 2256 2257 unsigned char broadcast[MAX_ADDR_LEN]; 2258 #ifdef CONFIG_RFS_ACCEL 2259 struct cpu_rmap *rx_cpu_rmap; 2260 #endif 2261 struct hlist_node index_hlist; 2262 2263 /* 2264 * Cache lines mostly used on transmit path 2265 */ 2266 struct netdev_queue *_tx ____cacheline_aligned_in_smp; 2267 unsigned int num_tx_queues; 2268 unsigned int real_num_tx_queues; 2269 struct Qdisc __rcu *qdisc; 2270 unsigned int tx_queue_len; 2271 spinlock_t tx_global_lock; 2272 2273 struct xdp_dev_bulk_queue __percpu *xdp_bulkq; 2274 2275 #ifdef CONFIG_XPS 2276 struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX]; 2277 #endif 2278 #ifdef CONFIG_NET_CLS_ACT 2279 struct mini_Qdisc __rcu *miniq_egress; 2280 #endif 2281 #ifdef CONFIG_NETFILTER_EGRESS 2282 struct nf_hook_entries __rcu *nf_hooks_egress; 2283 #endif 2284 2285 #ifdef CONFIG_NET_SCHED 2286 DECLARE_HASHTABLE (qdisc_hash, 4); 2287 #endif 2288 /* These may be needed for future network-power-down code. */ 2289 struct timer_list watchdog_timer; 2290 int watchdog_timeo; 2291 2292 u32 proto_down_reason; 2293 2294 struct list_head todo_list; 2295 2296 #ifdef CONFIG_PCPU_DEV_REFCNT 2297 int __percpu *pcpu_refcnt; 2298 #else 2299 refcount_t dev_refcnt; 2300 #endif 2301 struct ref_tracker_dir refcnt_tracker; 2302 2303 struct list_head link_watch_list; 2304 2305 enum { NETREG_UNINITIALIZED=0, 2306 NETREG_REGISTERED, /* completed register_netdevice */ 2307 NETREG_UNREGISTERING, /* called unregister_netdevice */ 2308 NETREG_UNREGISTERED, /* completed unregister todo */ 2309 NETREG_RELEASED, /* called free_netdev */ 2310 NETREG_DUMMY, /* dummy device for NAPI poll */ 2311 } reg_state:8; 2312 2313 bool dismantle; 2314 2315 enum { 2316 RTNL_LINK_INITIALIZED, 2317 RTNL_LINK_INITIALIZING, 2318 } rtnl_link_state:16; 2319 2320 bool needs_free_netdev; 2321 void (*priv_destructor)(struct net_device *dev); 2322 2323 #ifdef CONFIG_NETPOLL 2324 struct netpoll_info __rcu *npinfo; 2325 #endif 2326 2327 possible_net_t nd_net; 2328 2329 /* mid-layer private */ 2330 void *ml_priv; 2331 enum netdev_ml_priv_type ml_priv_type; 2332 2333 union { 2334 struct pcpu_lstats __percpu *lstats; 2335 struct pcpu_sw_netstats __percpu *tstats; 2336 struct pcpu_dstats __percpu *dstats; 2337 }; 2338 2339 #if IS_ENABLED(CONFIG_GARP) 2340 struct garp_port __rcu *garp_port; 2341 #endif 2342 #if IS_ENABLED(CONFIG_MRP) 2343 struct mrp_port __rcu *mrp_port; 2344 #endif 2345 #if IS_ENABLED(CONFIG_NET_DROP_MONITOR) 2346 struct dm_hw_stat_delta __rcu *dm_private; 2347 #endif 2348 struct device dev; 2349 const struct attribute_group *sysfs_groups[4]; 2350 const struct attribute_group *sysfs_rx_queue_group; 2351 2352 const struct rtnl_link_ops *rtnl_link_ops; 2353 2354 /* for setting kernel sock attribute on TCP connection setup */ 2355 #define GSO_MAX_SEGS 65535u 2356 #define GSO_LEGACY_MAX_SIZE 65536u 2357 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), 2358 * and shinfo->gso_segs is a 16bit field. 2359 */ 2360 #define GSO_MAX_SIZE (8 * GSO_MAX_SEGS) 2361 2362 unsigned int gso_max_size; 2363 #define TSO_LEGACY_MAX_SIZE 65536 2364 #define TSO_MAX_SIZE UINT_MAX 2365 unsigned int tso_max_size; 2366 u16 gso_max_segs; 2367 #define TSO_MAX_SEGS U16_MAX 2368 u16 tso_max_segs; 2369 unsigned int gso_ipv4_max_size; 2370 2371 #ifdef CONFIG_DCB 2372 const struct dcbnl_rtnl_ops *dcbnl_ops; 2373 #endif 2374 s16 num_tc; 2375 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 2376 u8 prio_tc_map[TC_BITMASK + 1]; 2377 2378 #if IS_ENABLED(CONFIG_FCOE) 2379 unsigned int fcoe_ddp_xid; 2380 #endif 2381 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 2382 struct netprio_map __rcu *priomap; 2383 #endif 2384 struct phy_device *phydev; 2385 struct sfp_bus *sfp_bus; 2386 struct lock_class_key *qdisc_tx_busylock; 2387 bool proto_down; 2388 unsigned wol_enabled:1; 2389 unsigned threaded:1; 2390 2391 struct list_head net_notifier_list; 2392 2393 #if IS_ENABLED(CONFIG_MACSEC) 2394 /* MACsec management functions */ 2395 const struct macsec_ops *macsec_ops; 2396 #endif 2397 const struct udp_tunnel_nic_info *udp_tunnel_nic_info; 2398 struct udp_tunnel_nic *udp_tunnel_nic; 2399 2400 /* protected by rtnl_lock */ 2401 struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; 2402 2403 u8 dev_addr_shadow[MAX_ADDR_LEN]; 2404 netdevice_tracker linkwatch_dev_tracker; 2405 netdevice_tracker watchdog_dev_tracker; 2406 netdevice_tracker dev_registered_tracker; 2407 struct rtnl_hw_stats64 *offload_xstats_l3; 2408 2409 struct devlink_port *devlink_port; 2410 }; 2411 #define to_net_dev(d) container_of(d, struct net_device, dev) 2412 2413 /* 2414 * Driver should use this to assign devlink port instance to a netdevice 2415 * before it registers the netdevice. Therefore devlink_port is static 2416 * during the netdev lifetime after it is registered. 2417 */ 2418 #define SET_NETDEV_DEVLINK_PORT(dev, port) \ 2419 ({ \ 2420 WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED); \ 2421 ((dev)->devlink_port = (port)); \ 2422 }) 2423 2424 static inline bool netif_elide_gro(const struct net_device *dev) 2425 { 2426 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) 2427 return true; 2428 return false; 2429 } 2430 2431 #define NETDEV_ALIGN 32 2432 2433 static inline 2434 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) 2435 { 2436 return dev->prio_tc_map[prio & TC_BITMASK]; 2437 } 2438 2439 static inline 2440 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) 2441 { 2442 if (tc >= dev->num_tc) 2443 return -EINVAL; 2444 2445 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; 2446 return 0; 2447 } 2448 2449 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); 2450 void netdev_reset_tc(struct net_device *dev); 2451 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); 2452 int netdev_set_num_tc(struct net_device *dev, u8 num_tc); 2453 2454 static inline 2455 int netdev_get_num_tc(struct net_device *dev) 2456 { 2457 return dev->num_tc; 2458 } 2459 2460 static inline void net_prefetch(void *p) 2461 { 2462 prefetch(p); 2463 #if L1_CACHE_BYTES < 128 2464 prefetch((u8 *)p + L1_CACHE_BYTES); 2465 #endif 2466 } 2467 2468 static inline void net_prefetchw(void *p) 2469 { 2470 prefetchw(p); 2471 #if L1_CACHE_BYTES < 128 2472 prefetchw((u8 *)p + L1_CACHE_BYTES); 2473 #endif 2474 } 2475 2476 void netdev_unbind_sb_channel(struct net_device *dev, 2477 struct net_device *sb_dev); 2478 int netdev_bind_sb_channel_queue(struct net_device *dev, 2479 struct net_device *sb_dev, 2480 u8 tc, u16 count, u16 offset); 2481 int netdev_set_sb_channel(struct net_device *dev, u16 channel); 2482 static inline int netdev_get_sb_channel(struct net_device *dev) 2483 { 2484 return max_t(int, -dev->num_tc, 0); 2485 } 2486 2487 static inline 2488 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, 2489 unsigned int index) 2490 { 2491 DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues); 2492 return &dev->_tx[index]; 2493 } 2494 2495 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, 2496 const struct sk_buff *skb) 2497 { 2498 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 2499 } 2500 2501 static inline void netdev_for_each_tx_queue(struct net_device *dev, 2502 void (*f)(struct net_device *, 2503 struct netdev_queue *, 2504 void *), 2505 void *arg) 2506 { 2507 unsigned int i; 2508 2509 for (i = 0; i < dev->num_tx_queues; i++) 2510 f(dev, &dev->_tx[i], arg); 2511 } 2512 2513 #define netdev_lockdep_set_classes(dev) \ 2514 { \ 2515 static struct lock_class_key qdisc_tx_busylock_key; \ 2516 static struct lock_class_key qdisc_xmit_lock_key; \ 2517 static struct lock_class_key dev_addr_list_lock_key; \ 2518 unsigned int i; \ 2519 \ 2520 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ 2521 lockdep_set_class(&(dev)->addr_list_lock, \ 2522 &dev_addr_list_lock_key); \ 2523 for (i = 0; i < (dev)->num_tx_queues; i++) \ 2524 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ 2525 &qdisc_xmit_lock_key); \ 2526 } 2527 2528 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 2529 struct net_device *sb_dev); 2530 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 2531 struct sk_buff *skb, 2532 struct net_device *sb_dev); 2533 2534 /* returns the headroom that the master device needs to take in account 2535 * when forwarding to this dev 2536 */ 2537 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev) 2538 { 2539 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; 2540 } 2541 2542 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr) 2543 { 2544 if (dev->netdev_ops->ndo_set_rx_headroom) 2545 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); 2546 } 2547 2548 /* set the device rx headroom to the dev's default */ 2549 static inline void netdev_reset_rx_headroom(struct net_device *dev) 2550 { 2551 netdev_set_rx_headroom(dev, -1); 2552 } 2553 2554 static inline void *netdev_get_ml_priv(struct net_device *dev, 2555 enum netdev_ml_priv_type type) 2556 { 2557 if (dev->ml_priv_type != type) 2558 return NULL; 2559 2560 return dev->ml_priv; 2561 } 2562 2563 static inline void netdev_set_ml_priv(struct net_device *dev, 2564 void *ml_priv, 2565 enum netdev_ml_priv_type type) 2566 { 2567 WARN(dev->ml_priv_type && dev->ml_priv_type != type, 2568 "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", 2569 dev->ml_priv_type, type); 2570 WARN(!dev->ml_priv_type && dev->ml_priv, 2571 "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); 2572 2573 dev->ml_priv = ml_priv; 2574 dev->ml_priv_type = type; 2575 } 2576 2577 /* 2578 * Net namespace inlines 2579 */ 2580 static inline 2581 struct net *dev_net(const struct net_device *dev) 2582 { 2583 return read_pnet(&dev->nd_net); 2584 } 2585 2586 static inline 2587 void dev_net_set(struct net_device *dev, struct net *net) 2588 { 2589 write_pnet(&dev->nd_net, net); 2590 } 2591 2592 /** 2593 * netdev_priv - access network device private data 2594 * @dev: network device 2595 * 2596 * Get network device private data 2597 */ 2598 static inline void *netdev_priv(const struct net_device *dev) 2599 { 2600 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); 2601 } 2602 2603 /* Set the sysfs physical device reference for the network logical device 2604 * if set prior to registration will cause a symlink during initialization. 2605 */ 2606 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) 2607 2608 /* Set the sysfs device type for the network logical device to allow 2609 * fine-grained identification of different network device types. For 2610 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc. 2611 */ 2612 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) 2613 2614 /* Default NAPI poll() weight 2615 * Device drivers are strongly advised to not use bigger value 2616 */ 2617 #define NAPI_POLL_WEIGHT 64 2618 2619 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, 2620 int (*poll)(struct napi_struct *, int), int weight); 2621 2622 /** 2623 * netif_napi_add() - initialize a NAPI context 2624 * @dev: network device 2625 * @napi: NAPI context 2626 * @poll: polling function 2627 * 2628 * netif_napi_add() must be used to initialize a NAPI context prior to calling 2629 * *any* of the other NAPI-related functions. 2630 */ 2631 static inline void 2632 netif_napi_add(struct net_device *dev, struct napi_struct *napi, 2633 int (*poll)(struct napi_struct *, int)) 2634 { 2635 netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); 2636 } 2637 2638 static inline void 2639 netif_napi_add_tx_weight(struct net_device *dev, 2640 struct napi_struct *napi, 2641 int (*poll)(struct napi_struct *, int), 2642 int weight) 2643 { 2644 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); 2645 netif_napi_add_weight(dev, napi, poll, weight); 2646 } 2647 2648 /** 2649 * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only 2650 * @dev: network device 2651 * @napi: NAPI context 2652 * @poll: polling function 2653 * 2654 * This variant of netif_napi_add() should be used from drivers using NAPI 2655 * to exclusively poll a TX queue. 2656 * This will avoid we add it into napi_hash[], thus polluting this hash table. 2657 */ 2658 static inline void netif_napi_add_tx(struct net_device *dev, 2659 struct napi_struct *napi, 2660 int (*poll)(struct napi_struct *, int)) 2661 { 2662 netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT); 2663 } 2664 2665 /** 2666 * __netif_napi_del - remove a NAPI context 2667 * @napi: NAPI context 2668 * 2669 * Warning: caller must observe RCU grace period before freeing memory 2670 * containing @napi. Drivers might want to call this helper to combine 2671 * all the needed RCU grace periods into a single one. 2672 */ 2673 void __netif_napi_del(struct napi_struct *napi); 2674 2675 /** 2676 * netif_napi_del - remove a NAPI context 2677 * @napi: NAPI context 2678 * 2679 * netif_napi_del() removes a NAPI context from the network device NAPI list 2680 */ 2681 static inline void netif_napi_del(struct napi_struct *napi) 2682 { 2683 __netif_napi_del(napi); 2684 synchronize_net(); 2685 } 2686 2687 struct packet_type { 2688 __be16 type; /* This is really htons(ether_type). */ 2689 bool ignore_outgoing; 2690 struct net_device *dev; /* NULL is wildcarded here */ 2691 netdevice_tracker dev_tracker; 2692 int (*func) (struct sk_buff *, 2693 struct net_device *, 2694 struct packet_type *, 2695 struct net_device *); 2696 void (*list_func) (struct list_head *, 2697 struct packet_type *, 2698 struct net_device *); 2699 bool (*id_match)(struct packet_type *ptype, 2700 struct sock *sk); 2701 struct net *af_packet_net; 2702 void *af_packet_priv; 2703 struct list_head list; 2704 }; 2705 2706 struct offload_callbacks { 2707 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 2708 netdev_features_t features); 2709 struct sk_buff *(*gro_receive)(struct list_head *head, 2710 struct sk_buff *skb); 2711 int (*gro_complete)(struct sk_buff *skb, int nhoff); 2712 }; 2713 2714 struct packet_offload { 2715 __be16 type; /* This is really htons(ether_type). */ 2716 u16 priority; 2717 struct offload_callbacks callbacks; 2718 struct list_head list; 2719 }; 2720 2721 /* often modified stats are per-CPU, other are shared (netdev->stats) */ 2722 struct pcpu_sw_netstats { 2723 u64_stats_t rx_packets; 2724 u64_stats_t rx_bytes; 2725 u64_stats_t tx_packets; 2726 u64_stats_t tx_bytes; 2727 struct u64_stats_sync syncp; 2728 } __aligned(4 * sizeof(u64)); 2729 2730 struct pcpu_lstats { 2731 u64_stats_t packets; 2732 u64_stats_t bytes; 2733 struct u64_stats_sync syncp; 2734 } __aligned(2 * sizeof(u64)); 2735 2736 void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes); 2737 2738 static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len) 2739 { 2740 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2741 2742 u64_stats_update_begin(&tstats->syncp); 2743 u64_stats_add(&tstats->rx_bytes, len); 2744 u64_stats_inc(&tstats->rx_packets); 2745 u64_stats_update_end(&tstats->syncp); 2746 } 2747 2748 static inline void dev_sw_netstats_tx_add(struct net_device *dev, 2749 unsigned int packets, 2750 unsigned int len) 2751 { 2752 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2753 2754 u64_stats_update_begin(&tstats->syncp); 2755 u64_stats_add(&tstats->tx_bytes, len); 2756 u64_stats_add(&tstats->tx_packets, packets); 2757 u64_stats_update_end(&tstats->syncp); 2758 } 2759 2760 static inline void dev_lstats_add(struct net_device *dev, unsigned int len) 2761 { 2762 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); 2763 2764 u64_stats_update_begin(&lstats->syncp); 2765 u64_stats_add(&lstats->bytes, len); 2766 u64_stats_inc(&lstats->packets); 2767 u64_stats_update_end(&lstats->syncp); 2768 } 2769 2770 #define __netdev_alloc_pcpu_stats(type, gfp) \ 2771 ({ \ 2772 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ 2773 if (pcpu_stats) { \ 2774 int __cpu; \ 2775 for_each_possible_cpu(__cpu) { \ 2776 typeof(type) *stat; \ 2777 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2778 u64_stats_init(&stat->syncp); \ 2779 } \ 2780 } \ 2781 pcpu_stats; \ 2782 }) 2783 2784 #define netdev_alloc_pcpu_stats(type) \ 2785 __netdev_alloc_pcpu_stats(type, GFP_KERNEL) 2786 2787 #define devm_netdev_alloc_pcpu_stats(dev, type) \ 2788 ({ \ 2789 typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\ 2790 if (pcpu_stats) { \ 2791 int __cpu; \ 2792 for_each_possible_cpu(__cpu) { \ 2793 typeof(type) *stat; \ 2794 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2795 u64_stats_init(&stat->syncp); \ 2796 } \ 2797 } \ 2798 pcpu_stats; \ 2799 }) 2800 2801 enum netdev_lag_tx_type { 2802 NETDEV_LAG_TX_TYPE_UNKNOWN, 2803 NETDEV_LAG_TX_TYPE_RANDOM, 2804 NETDEV_LAG_TX_TYPE_BROADCAST, 2805 NETDEV_LAG_TX_TYPE_ROUNDROBIN, 2806 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, 2807 NETDEV_LAG_TX_TYPE_HASH, 2808 }; 2809 2810 enum netdev_lag_hash { 2811 NETDEV_LAG_HASH_NONE, 2812 NETDEV_LAG_HASH_L2, 2813 NETDEV_LAG_HASH_L34, 2814 NETDEV_LAG_HASH_L23, 2815 NETDEV_LAG_HASH_E23, 2816 NETDEV_LAG_HASH_E34, 2817 NETDEV_LAG_HASH_VLAN_SRCMAC, 2818 NETDEV_LAG_HASH_UNKNOWN, 2819 }; 2820 2821 struct netdev_lag_upper_info { 2822 enum netdev_lag_tx_type tx_type; 2823 enum netdev_lag_hash hash_type; 2824 }; 2825 2826 struct netdev_lag_lower_state_info { 2827 u8 link_up : 1, 2828 tx_enabled : 1; 2829 }; 2830 2831 #include <linux/notifier.h> 2832 2833 /* netdevice notifier chain. Please remember to update netdev_cmd_to_name() 2834 * and the rtnetlink notification exclusion list in rtnetlink_event() when 2835 * adding new types. 2836 */ 2837 enum netdev_cmd { 2838 NETDEV_UP = 1, /* For now you can't veto a device up/down */ 2839 NETDEV_DOWN, 2840 NETDEV_REBOOT, /* Tell a protocol stack a network interface 2841 detected a hardware crash and restarted 2842 - we can use this eg to kick tcp sessions 2843 once done */ 2844 NETDEV_CHANGE, /* Notify device state change */ 2845 NETDEV_REGISTER, 2846 NETDEV_UNREGISTER, 2847 NETDEV_CHANGEMTU, /* notify after mtu change happened */ 2848 NETDEV_CHANGEADDR, /* notify after the address change */ 2849 NETDEV_PRE_CHANGEADDR, /* notify before the address change */ 2850 NETDEV_GOING_DOWN, 2851 NETDEV_CHANGENAME, 2852 NETDEV_FEAT_CHANGE, 2853 NETDEV_BONDING_FAILOVER, 2854 NETDEV_PRE_UP, 2855 NETDEV_PRE_TYPE_CHANGE, 2856 NETDEV_POST_TYPE_CHANGE, 2857 NETDEV_POST_INIT, 2858 NETDEV_PRE_UNINIT, 2859 NETDEV_RELEASE, 2860 NETDEV_NOTIFY_PEERS, 2861 NETDEV_JOIN, 2862 NETDEV_CHANGEUPPER, 2863 NETDEV_RESEND_IGMP, 2864 NETDEV_PRECHANGEMTU, /* notify before mtu change happened */ 2865 NETDEV_CHANGEINFODATA, 2866 NETDEV_BONDING_INFO, 2867 NETDEV_PRECHANGEUPPER, 2868 NETDEV_CHANGELOWERSTATE, 2869 NETDEV_UDP_TUNNEL_PUSH_INFO, 2870 NETDEV_UDP_TUNNEL_DROP_INFO, 2871 NETDEV_CHANGE_TX_QUEUE_LEN, 2872 NETDEV_CVLAN_FILTER_PUSH_INFO, 2873 NETDEV_CVLAN_FILTER_DROP_INFO, 2874 NETDEV_SVLAN_FILTER_PUSH_INFO, 2875 NETDEV_SVLAN_FILTER_DROP_INFO, 2876 NETDEV_OFFLOAD_XSTATS_ENABLE, 2877 NETDEV_OFFLOAD_XSTATS_DISABLE, 2878 NETDEV_OFFLOAD_XSTATS_REPORT_USED, 2879 NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, 2880 NETDEV_XDP_FEAT_CHANGE, 2881 }; 2882 const char *netdev_cmd_to_name(enum netdev_cmd cmd); 2883 2884 int register_netdevice_notifier(struct notifier_block *nb); 2885 int unregister_netdevice_notifier(struct notifier_block *nb); 2886 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb); 2887 int unregister_netdevice_notifier_net(struct net *net, 2888 struct notifier_block *nb); 2889 int register_netdevice_notifier_dev_net(struct net_device *dev, 2890 struct notifier_block *nb, 2891 struct netdev_net_notifier *nn); 2892 int unregister_netdevice_notifier_dev_net(struct net_device *dev, 2893 struct notifier_block *nb, 2894 struct netdev_net_notifier *nn); 2895 2896 struct netdev_notifier_info { 2897 struct net_device *dev; 2898 struct netlink_ext_ack *extack; 2899 }; 2900 2901 struct netdev_notifier_info_ext { 2902 struct netdev_notifier_info info; /* must be first */ 2903 union { 2904 u32 mtu; 2905 } ext; 2906 }; 2907 2908 struct netdev_notifier_change_info { 2909 struct netdev_notifier_info info; /* must be first */ 2910 unsigned int flags_changed; 2911 }; 2912 2913 struct netdev_notifier_changeupper_info { 2914 struct netdev_notifier_info info; /* must be first */ 2915 struct net_device *upper_dev; /* new upper dev */ 2916 bool master; /* is upper dev master */ 2917 bool linking; /* is the notification for link or unlink */ 2918 void *upper_info; /* upper dev info */ 2919 }; 2920 2921 struct netdev_notifier_changelowerstate_info { 2922 struct netdev_notifier_info info; /* must be first */ 2923 void *lower_state_info; /* is lower dev state */ 2924 }; 2925 2926 struct netdev_notifier_pre_changeaddr_info { 2927 struct netdev_notifier_info info; /* must be first */ 2928 const unsigned char *dev_addr; 2929 }; 2930 2931 enum netdev_offload_xstats_type { 2932 NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, 2933 }; 2934 2935 struct netdev_notifier_offload_xstats_info { 2936 struct netdev_notifier_info info; /* must be first */ 2937 enum netdev_offload_xstats_type type; 2938 2939 union { 2940 /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */ 2941 struct netdev_notifier_offload_xstats_rd *report_delta; 2942 /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */ 2943 struct netdev_notifier_offload_xstats_ru *report_used; 2944 }; 2945 }; 2946 2947 int netdev_offload_xstats_enable(struct net_device *dev, 2948 enum netdev_offload_xstats_type type, 2949 struct netlink_ext_ack *extack); 2950 int netdev_offload_xstats_disable(struct net_device *dev, 2951 enum netdev_offload_xstats_type type); 2952 bool netdev_offload_xstats_enabled(const struct net_device *dev, 2953 enum netdev_offload_xstats_type type); 2954 int netdev_offload_xstats_get(struct net_device *dev, 2955 enum netdev_offload_xstats_type type, 2956 struct rtnl_hw_stats64 *stats, bool *used, 2957 struct netlink_ext_ack *extack); 2958 void 2959 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd, 2960 const struct rtnl_hw_stats64 *stats); 2961 void 2962 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru); 2963 void netdev_offload_xstats_push_delta(struct net_device *dev, 2964 enum netdev_offload_xstats_type type, 2965 const struct rtnl_hw_stats64 *stats); 2966 2967 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, 2968 struct net_device *dev) 2969 { 2970 info->dev = dev; 2971 info->extack = NULL; 2972 } 2973 2974 static inline struct net_device * 2975 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) 2976 { 2977 return info->dev; 2978 } 2979 2980 static inline struct netlink_ext_ack * 2981 netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) 2982 { 2983 return info->extack; 2984 } 2985 2986 int call_netdevice_notifiers(unsigned long val, struct net_device *dev); 2987 int call_netdevice_notifiers_info(unsigned long val, 2988 struct netdev_notifier_info *info); 2989 2990 extern rwlock_t dev_base_lock; /* Device list lock */ 2991 2992 #define for_each_netdev(net, d) \ 2993 list_for_each_entry(d, &(net)->dev_base_head, dev_list) 2994 #define for_each_netdev_reverse(net, d) \ 2995 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) 2996 #define for_each_netdev_rcu(net, d) \ 2997 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) 2998 #define for_each_netdev_safe(net, d, n) \ 2999 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) 3000 #define for_each_netdev_continue(net, d) \ 3001 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) 3002 #define for_each_netdev_continue_reverse(net, d) \ 3003 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \ 3004 dev_list) 3005 #define for_each_netdev_continue_rcu(net, d) \ 3006 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 3007 #define for_each_netdev_in_bond_rcu(bond, slave) \ 3008 for_each_netdev_rcu(&init_net, slave) \ 3009 if (netdev_master_upper_dev_get_rcu(slave) == (bond)) 3010 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 3011 3012 static inline struct net_device *next_net_device(struct net_device *dev) 3013 { 3014 struct list_head *lh; 3015 struct net *net; 3016 3017 net = dev_net(dev); 3018 lh = dev->dev_list.next; 3019 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3020 } 3021 3022 static inline struct net_device *next_net_device_rcu(struct net_device *dev) 3023 { 3024 struct list_head *lh; 3025 struct net *net; 3026 3027 net = dev_net(dev); 3028 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); 3029 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3030 } 3031 3032 static inline struct net_device *first_net_device(struct net *net) 3033 { 3034 return list_empty(&net->dev_base_head) ? NULL : 3035 net_device_entry(net->dev_base_head.next); 3036 } 3037 3038 static inline struct net_device *first_net_device_rcu(struct net *net) 3039 { 3040 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); 3041 3042 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3043 } 3044 3045 int netdev_boot_setup_check(struct net_device *dev); 3046 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 3047 const char *hwaddr); 3048 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 3049 void dev_add_pack(struct packet_type *pt); 3050 void dev_remove_pack(struct packet_type *pt); 3051 void __dev_remove_pack(struct packet_type *pt); 3052 void dev_add_offload(struct packet_offload *po); 3053 void dev_remove_offload(struct packet_offload *po); 3054 3055 int dev_get_iflink(const struct net_device *dev); 3056 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); 3057 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, 3058 struct net_device_path_stack *stack); 3059 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, 3060 unsigned short mask); 3061 struct net_device *dev_get_by_name(struct net *net, const char *name); 3062 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); 3063 struct net_device *__dev_get_by_name(struct net *net, const char *name); 3064 bool netdev_name_in_use(struct net *net, const char *name); 3065 int dev_alloc_name(struct net_device *dev, const char *name); 3066 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); 3067 void dev_close(struct net_device *dev); 3068 void dev_close_many(struct list_head *head, bool unlink); 3069 void dev_disable_lro(struct net_device *dev); 3070 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); 3071 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 3072 struct net_device *sb_dev); 3073 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 3074 struct net_device *sb_dev); 3075 3076 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev); 3077 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); 3078 3079 static inline int dev_queue_xmit(struct sk_buff *skb) 3080 { 3081 return __dev_queue_xmit(skb, NULL); 3082 } 3083 3084 static inline int dev_queue_xmit_accel(struct sk_buff *skb, 3085 struct net_device *sb_dev) 3086 { 3087 return __dev_queue_xmit(skb, sb_dev); 3088 } 3089 3090 static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 3091 { 3092 int ret; 3093 3094 ret = __dev_direct_xmit(skb, queue_id); 3095 if (!dev_xmit_complete(ret)) 3096 kfree_skb(skb); 3097 return ret; 3098 } 3099 3100 int register_netdevice(struct net_device *dev); 3101 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 3102 void unregister_netdevice_many(struct list_head *head); 3103 static inline void unregister_netdevice(struct net_device *dev) 3104 { 3105 unregister_netdevice_queue(dev, NULL); 3106 } 3107 3108 int netdev_refcnt_read(const struct net_device *dev); 3109 void free_netdev(struct net_device *dev); 3110 void netdev_freemem(struct net_device *dev); 3111 int init_dummy_netdev(struct net_device *dev); 3112 3113 struct net_device *netdev_get_xmit_slave(struct net_device *dev, 3114 struct sk_buff *skb, 3115 bool all_slaves); 3116 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, 3117 struct sock *sk); 3118 struct net_device *dev_get_by_index(struct net *net, int ifindex); 3119 struct net_device *__dev_get_by_index(struct net *net, int ifindex); 3120 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 3121 struct net_device *dev_get_by_napi_id(unsigned int napi_id); 3122 int dev_restart(struct net_device *dev); 3123 3124 3125 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 3126 unsigned short type, 3127 const void *daddr, const void *saddr, 3128 unsigned int len) 3129 { 3130 if (!dev->header_ops || !dev->header_ops->create) 3131 return 0; 3132 3133 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); 3134 } 3135 3136 static inline int dev_parse_header(const struct sk_buff *skb, 3137 unsigned char *haddr) 3138 { 3139 const struct net_device *dev = skb->dev; 3140 3141 if (!dev->header_ops || !dev->header_ops->parse) 3142 return 0; 3143 return dev->header_ops->parse(skb, haddr); 3144 } 3145 3146 static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) 3147 { 3148 const struct net_device *dev = skb->dev; 3149 3150 if (!dev->header_ops || !dev->header_ops->parse_protocol) 3151 return 0; 3152 return dev->header_ops->parse_protocol(skb); 3153 } 3154 3155 /* ll_header must have at least hard_header_len allocated */ 3156 static inline bool dev_validate_header(const struct net_device *dev, 3157 char *ll_header, int len) 3158 { 3159 if (likely(len >= dev->hard_header_len)) 3160 return true; 3161 if (len < dev->min_header_len) 3162 return false; 3163 3164 if (capable(CAP_SYS_RAWIO)) { 3165 memset(ll_header + len, 0, dev->hard_header_len - len); 3166 return true; 3167 } 3168 3169 if (dev->header_ops && dev->header_ops->validate) 3170 return dev->header_ops->validate(ll_header, len); 3171 3172 return false; 3173 } 3174 3175 static inline bool dev_has_header(const struct net_device *dev) 3176 { 3177 return dev->header_ops && dev->header_ops->create; 3178 } 3179 3180 /* 3181 * Incoming packets are placed on per-CPU queues 3182 */ 3183 struct softnet_data { 3184 struct list_head poll_list; 3185 struct sk_buff_head process_queue; 3186 3187 /* stats */ 3188 unsigned int processed; 3189 unsigned int time_squeeze; 3190 #ifdef CONFIG_RPS 3191 struct softnet_data *rps_ipi_list; 3192 #endif 3193 bool in_net_rx_action; 3194 #ifdef CONFIG_NET_FLOW_LIMIT 3195 struct sd_flow_limit __rcu *flow_limit; 3196 #endif 3197 struct Qdisc *output_queue; 3198 struct Qdisc **output_queue_tailp; 3199 struct sk_buff *completion_queue; 3200 #ifdef CONFIG_XFRM_OFFLOAD 3201 struct sk_buff_head xfrm_backlog; 3202 #endif 3203 /* written and read only by owning cpu: */ 3204 struct { 3205 u16 recursion; 3206 u8 more; 3207 #ifdef CONFIG_NET_EGRESS 3208 u8 skip_txqueue; 3209 #endif 3210 } xmit; 3211 #ifdef CONFIG_RPS 3212 /* input_queue_head should be written by cpu owning this struct, 3213 * and only read by other cpus. Worth using a cache line. 3214 */ 3215 unsigned int input_queue_head ____cacheline_aligned_in_smp; 3216 3217 /* Elements below can be accessed between CPUs for RPS/RFS */ 3218 call_single_data_t csd ____cacheline_aligned_in_smp; 3219 struct softnet_data *rps_ipi_next; 3220 unsigned int cpu; 3221 unsigned int input_queue_tail; 3222 #endif 3223 unsigned int received_rps; 3224 unsigned int dropped; 3225 struct sk_buff_head input_pkt_queue; 3226 struct napi_struct backlog; 3227 3228 /* Another possibly contended cache line */ 3229 spinlock_t defer_lock ____cacheline_aligned_in_smp; 3230 int defer_count; 3231 int defer_ipi_scheduled; 3232 struct sk_buff *defer_list; 3233 call_single_data_t defer_csd; 3234 }; 3235 3236 static inline void input_queue_head_incr(struct softnet_data *sd) 3237 { 3238 #ifdef CONFIG_RPS 3239 sd->input_queue_head++; 3240 #endif 3241 } 3242 3243 static inline void input_queue_tail_incr_save(struct softnet_data *sd, 3244 unsigned int *qtail) 3245 { 3246 #ifdef CONFIG_RPS 3247 *qtail = ++sd->input_queue_tail; 3248 #endif 3249 } 3250 3251 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 3252 3253 static inline int dev_recursion_level(void) 3254 { 3255 return this_cpu_read(softnet_data.xmit.recursion); 3256 } 3257 3258 #define XMIT_RECURSION_LIMIT 8 3259 static inline bool dev_xmit_recursion(void) 3260 { 3261 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > 3262 XMIT_RECURSION_LIMIT); 3263 } 3264 3265 static inline void dev_xmit_recursion_inc(void) 3266 { 3267 __this_cpu_inc(softnet_data.xmit.recursion); 3268 } 3269 3270 static inline void dev_xmit_recursion_dec(void) 3271 { 3272 __this_cpu_dec(softnet_data.xmit.recursion); 3273 } 3274 3275 void __netif_schedule(struct Qdisc *q); 3276 void netif_schedule_queue(struct netdev_queue *txq); 3277 3278 static inline void netif_tx_schedule_all(struct net_device *dev) 3279 { 3280 unsigned int i; 3281 3282 for (i = 0; i < dev->num_tx_queues; i++) 3283 netif_schedule_queue(netdev_get_tx_queue(dev, i)); 3284 } 3285 3286 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) 3287 { 3288 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3289 } 3290 3291 /** 3292 * netif_start_queue - allow transmit 3293 * @dev: network device 3294 * 3295 * Allow upper layers to call the device hard_start_xmit routine. 3296 */ 3297 static inline void netif_start_queue(struct net_device *dev) 3298 { 3299 netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); 3300 } 3301 3302 static inline void netif_tx_start_all_queues(struct net_device *dev) 3303 { 3304 unsigned int i; 3305 3306 for (i = 0; i < dev->num_tx_queues; i++) { 3307 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3308 netif_tx_start_queue(txq); 3309 } 3310 } 3311 3312 void netif_tx_wake_queue(struct netdev_queue *dev_queue); 3313 3314 /** 3315 * netif_wake_queue - restart transmit 3316 * @dev: network device 3317 * 3318 * Allow upper layers to call the device hard_start_xmit routine. 3319 * Used for flow control when transmit resources are available. 3320 */ 3321 static inline void netif_wake_queue(struct net_device *dev) 3322 { 3323 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); 3324 } 3325 3326 static inline void netif_tx_wake_all_queues(struct net_device *dev) 3327 { 3328 unsigned int i; 3329 3330 for (i = 0; i < dev->num_tx_queues; i++) { 3331 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3332 netif_tx_wake_queue(txq); 3333 } 3334 } 3335 3336 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 3337 { 3338 /* Must be an atomic op see netif_txq_try_stop() */ 3339 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3340 } 3341 3342 /** 3343 * netif_stop_queue - stop transmitted packets 3344 * @dev: network device 3345 * 3346 * Stop upper layers calling the device hard_start_xmit routine. 3347 * Used for flow control when transmit resources are unavailable. 3348 */ 3349 static inline void netif_stop_queue(struct net_device *dev) 3350 { 3351 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); 3352 } 3353 3354 void netif_tx_stop_all_queues(struct net_device *dev); 3355 3356 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 3357 { 3358 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3359 } 3360 3361 /** 3362 * netif_queue_stopped - test if transmit queue is flowblocked 3363 * @dev: network device 3364 * 3365 * Test if transmit queue on device is currently unable to send. 3366 */ 3367 static inline bool netif_queue_stopped(const struct net_device *dev) 3368 { 3369 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 3370 } 3371 3372 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) 3373 { 3374 return dev_queue->state & QUEUE_STATE_ANY_XOFF; 3375 } 3376 3377 static inline bool 3378 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) 3379 { 3380 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; 3381 } 3382 3383 static inline bool 3384 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) 3385 { 3386 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; 3387 } 3388 3389 /** 3390 * netdev_queue_set_dql_min_limit - set dql minimum limit 3391 * @dev_queue: pointer to transmit queue 3392 * @min_limit: dql minimum limit 3393 * 3394 * Forces xmit_more() to return true until the minimum threshold 3395 * defined by @min_limit is reached (or until the tx queue is 3396 * empty). Warning: to be use with care, misuse will impact the 3397 * latency. 3398 */ 3399 static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue, 3400 unsigned int min_limit) 3401 { 3402 #ifdef CONFIG_BQL 3403 dev_queue->dql.min_limit = min_limit; 3404 #endif 3405 } 3406 3407 /** 3408 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write 3409 * @dev_queue: pointer to transmit queue 3410 * 3411 * BQL enabled drivers might use this helper in their ndo_start_xmit(), 3412 * to give appropriate hint to the CPU. 3413 */ 3414 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) 3415 { 3416 #ifdef CONFIG_BQL 3417 prefetchw(&dev_queue->dql.num_queued); 3418 #endif 3419 } 3420 3421 /** 3422 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write 3423 * @dev_queue: pointer to transmit queue 3424 * 3425 * BQL enabled drivers might use this helper in their TX completion path, 3426 * to give appropriate hint to the CPU. 3427 */ 3428 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) 3429 { 3430 #ifdef CONFIG_BQL 3431 prefetchw(&dev_queue->dql.limit); 3432 #endif 3433 } 3434 3435 /** 3436 * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue 3437 * @dev_queue: network device queue 3438 * @bytes: number of bytes queued to the device queue 3439 * 3440 * Report the number of bytes queued for sending/completion to the network 3441 * device hardware queue. @bytes should be a good approximation and should 3442 * exactly match netdev_completed_queue() @bytes. 3443 * This is typically called once per packet, from ndo_start_xmit(). 3444 */ 3445 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3446 unsigned int bytes) 3447 { 3448 #ifdef CONFIG_BQL 3449 dql_queued(&dev_queue->dql, bytes); 3450 3451 if (likely(dql_avail(&dev_queue->dql) >= 0)) 3452 return; 3453 3454 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3455 3456 /* 3457 * The XOFF flag must be set before checking the dql_avail below, 3458 * because in netdev_tx_completed_queue we update the dql_completed 3459 * before checking the XOFF flag. 3460 */ 3461 smp_mb(); 3462 3463 /* check again in case another CPU has just made room avail */ 3464 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) 3465 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3466 #endif 3467 } 3468 3469 /* Variant of netdev_tx_sent_queue() for drivers that are aware 3470 * that they should not test BQL status themselves. 3471 * We do want to change __QUEUE_STATE_STACK_XOFF only for the last 3472 * skb of a batch. 3473 * Returns true if the doorbell must be used to kick the NIC. 3474 */ 3475 static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3476 unsigned int bytes, 3477 bool xmit_more) 3478 { 3479 if (xmit_more) { 3480 #ifdef CONFIG_BQL 3481 dql_queued(&dev_queue->dql, bytes); 3482 #endif 3483 return netif_tx_queue_stopped(dev_queue); 3484 } 3485 netdev_tx_sent_queue(dev_queue, bytes); 3486 return true; 3487 } 3488 3489 /** 3490 * netdev_sent_queue - report the number of bytes queued to hardware 3491 * @dev: network device 3492 * @bytes: number of bytes queued to the hardware device queue 3493 * 3494 * Report the number of bytes queued for sending/completion to the network 3495 * device hardware queue#0. @bytes should be a good approximation and should 3496 * exactly match netdev_completed_queue() @bytes. 3497 * This is typically called once per packet, from ndo_start_xmit(). 3498 */ 3499 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) 3500 { 3501 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); 3502 } 3503 3504 static inline bool __netdev_sent_queue(struct net_device *dev, 3505 unsigned int bytes, 3506 bool xmit_more) 3507 { 3508 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, 3509 xmit_more); 3510 } 3511 3512 /** 3513 * netdev_tx_completed_queue - report number of packets/bytes at TX completion. 3514 * @dev_queue: network device queue 3515 * @pkts: number of packets (currently ignored) 3516 * @bytes: number of bytes dequeued from the device queue 3517 * 3518 * Must be called at most once per TX completion round (and not per 3519 * individual packet), so that BQL can adjust its limits appropriately. 3520 */ 3521 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, 3522 unsigned int pkts, unsigned int bytes) 3523 { 3524 #ifdef CONFIG_BQL 3525 if (unlikely(!bytes)) 3526 return; 3527 3528 dql_completed(&dev_queue->dql, bytes); 3529 3530 /* 3531 * Without the memory barrier there is a small possiblity that 3532 * netdev_tx_sent_queue will miss the update and cause the queue to 3533 * be stopped forever 3534 */ 3535 smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */ 3536 3537 if (unlikely(dql_avail(&dev_queue->dql) < 0)) 3538 return; 3539 3540 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) 3541 netif_schedule_queue(dev_queue); 3542 #endif 3543 } 3544 3545 /** 3546 * netdev_completed_queue - report bytes and packets completed by device 3547 * @dev: network device 3548 * @pkts: actual number of packets sent over the medium 3549 * @bytes: actual number of bytes sent over the medium 3550 * 3551 * Report the number of bytes and packets transmitted by the network device 3552 * hardware queue over the physical medium, @bytes must exactly match the 3553 * @bytes amount passed to netdev_sent_queue() 3554 */ 3555 static inline void netdev_completed_queue(struct net_device *dev, 3556 unsigned int pkts, unsigned int bytes) 3557 { 3558 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); 3559 } 3560 3561 static inline void netdev_tx_reset_queue(struct netdev_queue *q) 3562 { 3563 #ifdef CONFIG_BQL 3564 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); 3565 dql_reset(&q->dql); 3566 #endif 3567 } 3568 3569 /** 3570 * netdev_reset_queue - reset the packets and bytes count of a network device 3571 * @dev_queue: network device 3572 * 3573 * Reset the bytes and packet count of a network device and clear the 3574 * software flow control OFF bit for this network device 3575 */ 3576 static inline void netdev_reset_queue(struct net_device *dev_queue) 3577 { 3578 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); 3579 } 3580 3581 /** 3582 * netdev_cap_txqueue - check if selected tx queue exceeds device queues 3583 * @dev: network device 3584 * @queue_index: given tx queue index 3585 * 3586 * Returns 0 if given tx queue index >= number of device tx queues, 3587 * otherwise returns the originally passed tx queue index. 3588 */ 3589 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) 3590 { 3591 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 3592 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", 3593 dev->name, queue_index, 3594 dev->real_num_tx_queues); 3595 return 0; 3596 } 3597 3598 return queue_index; 3599 } 3600 3601 /** 3602 * netif_running - test if up 3603 * @dev: network device 3604 * 3605 * Test if the device has been brought up. 3606 */ 3607 static inline bool netif_running(const struct net_device *dev) 3608 { 3609 return test_bit(__LINK_STATE_START, &dev->state); 3610 } 3611 3612 /* 3613 * Routines to manage the subqueues on a device. We only need start, 3614 * stop, and a check if it's stopped. All other device management is 3615 * done at the overall netdevice level. 3616 * Also test the device if we're multiqueue. 3617 */ 3618 3619 /** 3620 * netif_start_subqueue - allow sending packets on subqueue 3621 * @dev: network device 3622 * @queue_index: sub queue index 3623 * 3624 * Start individual transmit queue of a device with multiple transmit queues. 3625 */ 3626 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) 3627 { 3628 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3629 3630 netif_tx_start_queue(txq); 3631 } 3632 3633 /** 3634 * netif_stop_subqueue - stop sending packets on subqueue 3635 * @dev: network device 3636 * @queue_index: sub queue index 3637 * 3638 * Stop individual transmit queue of a device with multiple transmit queues. 3639 */ 3640 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) 3641 { 3642 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3643 netif_tx_stop_queue(txq); 3644 } 3645 3646 /** 3647 * __netif_subqueue_stopped - test status of subqueue 3648 * @dev: network device 3649 * @queue_index: sub queue index 3650 * 3651 * Check individual transmit queue of a device with multiple transmit queues. 3652 */ 3653 static inline bool __netif_subqueue_stopped(const struct net_device *dev, 3654 u16 queue_index) 3655 { 3656 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3657 3658 return netif_tx_queue_stopped(txq); 3659 } 3660 3661 /** 3662 * netif_subqueue_stopped - test status of subqueue 3663 * @dev: network device 3664 * @skb: sub queue buffer pointer 3665 * 3666 * Check individual transmit queue of a device with multiple transmit queues. 3667 */ 3668 static inline bool netif_subqueue_stopped(const struct net_device *dev, 3669 struct sk_buff *skb) 3670 { 3671 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); 3672 } 3673 3674 /** 3675 * netif_wake_subqueue - allow sending packets on subqueue 3676 * @dev: network device 3677 * @queue_index: sub queue index 3678 * 3679 * Resume individual transmit queue of a device with multiple transmit queues. 3680 */ 3681 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) 3682 { 3683 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3684 3685 netif_tx_wake_queue(txq); 3686 } 3687 3688 #ifdef CONFIG_XPS 3689 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 3690 u16 index); 3691 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 3692 u16 index, enum xps_map_type type); 3693 3694 /** 3695 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask 3696 * @j: CPU/Rx queue index 3697 * @mask: bitmask of all cpus/rx queues 3698 * @nr_bits: number of bits in the bitmask 3699 * 3700 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. 3701 */ 3702 static inline bool netif_attr_test_mask(unsigned long j, 3703 const unsigned long *mask, 3704 unsigned int nr_bits) 3705 { 3706 cpu_max_bits_warn(j, nr_bits); 3707 return test_bit(j, mask); 3708 } 3709 3710 /** 3711 * netif_attr_test_online - Test for online CPU/Rx queue 3712 * @j: CPU/Rx queue index 3713 * @online_mask: bitmask for CPUs/Rx queues that are online 3714 * @nr_bits: number of bits in the bitmask 3715 * 3716 * Returns true if a CPU/Rx queue is online. 3717 */ 3718 static inline bool netif_attr_test_online(unsigned long j, 3719 const unsigned long *online_mask, 3720 unsigned int nr_bits) 3721 { 3722 cpu_max_bits_warn(j, nr_bits); 3723 3724 if (online_mask) 3725 return test_bit(j, online_mask); 3726 3727 return (j < nr_bits); 3728 } 3729 3730 /** 3731 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask 3732 * @n: CPU/Rx queue index 3733 * @srcp: the cpumask/Rx queue mask pointer 3734 * @nr_bits: number of bits in the bitmask 3735 * 3736 * Returns >= nr_bits if no further CPUs/Rx queues set. 3737 */ 3738 static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, 3739 unsigned int nr_bits) 3740 { 3741 /* -1 is a legal arg here. */ 3742 if (n != -1) 3743 cpu_max_bits_warn(n, nr_bits); 3744 3745 if (srcp) 3746 return find_next_bit(srcp, nr_bits, n + 1); 3747 3748 return n + 1; 3749 } 3750 3751 /** 3752 * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p 3753 * @n: CPU/Rx queue index 3754 * @src1p: the first CPUs/Rx queues mask pointer 3755 * @src2p: the second CPUs/Rx queues mask pointer 3756 * @nr_bits: number of bits in the bitmask 3757 * 3758 * Returns >= nr_bits if no further CPUs/Rx queues set in both. 3759 */ 3760 static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, 3761 const unsigned long *src2p, 3762 unsigned int nr_bits) 3763 { 3764 /* -1 is a legal arg here. */ 3765 if (n != -1) 3766 cpu_max_bits_warn(n, nr_bits); 3767 3768 if (src1p && src2p) 3769 return find_next_and_bit(src1p, src2p, nr_bits, n + 1); 3770 else if (src1p) 3771 return find_next_bit(src1p, nr_bits, n + 1); 3772 else if (src2p) 3773 return find_next_bit(src2p, nr_bits, n + 1); 3774 3775 return n + 1; 3776 } 3777 #else 3778 static inline int netif_set_xps_queue(struct net_device *dev, 3779 const struct cpumask *mask, 3780 u16 index) 3781 { 3782 return 0; 3783 } 3784 3785 static inline int __netif_set_xps_queue(struct net_device *dev, 3786 const unsigned long *mask, 3787 u16 index, enum xps_map_type type) 3788 { 3789 return 0; 3790 } 3791 #endif 3792 3793 /** 3794 * netif_is_multiqueue - test if device has multiple transmit queues 3795 * @dev: network device 3796 * 3797 * Check if device has multiple transmit queues 3798 */ 3799 static inline bool netif_is_multiqueue(const struct net_device *dev) 3800 { 3801 return dev->num_tx_queues > 1; 3802 } 3803 3804 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); 3805 3806 #ifdef CONFIG_SYSFS 3807 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); 3808 #else 3809 static inline int netif_set_real_num_rx_queues(struct net_device *dev, 3810 unsigned int rxqs) 3811 { 3812 dev->real_num_rx_queues = rxqs; 3813 return 0; 3814 } 3815 #endif 3816 int netif_set_real_num_queues(struct net_device *dev, 3817 unsigned int txq, unsigned int rxq); 3818 3819 static inline struct netdev_rx_queue * 3820 __netif_get_rx_queue(struct net_device *dev, unsigned int rxq) 3821 { 3822 return dev->_rx + rxq; 3823 } 3824 3825 #ifdef CONFIG_SYSFS 3826 static inline unsigned int get_netdev_rx_queue_index( 3827 struct netdev_rx_queue *queue) 3828 { 3829 struct net_device *dev = queue->dev; 3830 int index = queue - dev->_rx; 3831 3832 BUG_ON(index >= dev->num_rx_queues); 3833 return index; 3834 } 3835 #endif 3836 3837 int netif_get_num_default_rss_queues(void); 3838 3839 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason); 3840 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason); 3841 3842 /* 3843 * It is not allowed to call kfree_skb() or consume_skb() from hardware 3844 * interrupt context or with hardware interrupts being disabled. 3845 * (in_hardirq() || irqs_disabled()) 3846 * 3847 * We provide four helpers that can be used in following contexts : 3848 * 3849 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, 3850 * replacing kfree_skb(skb) 3851 * 3852 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. 3853 * Typically used in place of consume_skb(skb) in TX completion path 3854 * 3855 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, 3856 * replacing kfree_skb(skb) 3857 * 3858 * dev_consume_skb_any(skb) when caller doesn't know its current irq context, 3859 * and consumed a packet. Used in place of consume_skb(skb) 3860 */ 3861 static inline void dev_kfree_skb_irq(struct sk_buff *skb) 3862 { 3863 dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); 3864 } 3865 3866 static inline void dev_consume_skb_irq(struct sk_buff *skb) 3867 { 3868 dev_kfree_skb_irq_reason(skb, SKB_CONSUMED); 3869 } 3870 3871 static inline void dev_kfree_skb_any(struct sk_buff *skb) 3872 { 3873 dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); 3874 } 3875 3876 static inline void dev_consume_skb_any(struct sk_buff *skb) 3877 { 3878 dev_kfree_skb_any_reason(skb, SKB_CONSUMED); 3879 } 3880 3881 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 3882 struct bpf_prog *xdp_prog); 3883 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); 3884 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); 3885 int netif_rx(struct sk_buff *skb); 3886 int __netif_rx(struct sk_buff *skb); 3887 3888 int netif_receive_skb(struct sk_buff *skb); 3889 int netif_receive_skb_core(struct sk_buff *skb); 3890 void netif_receive_skb_list_internal(struct list_head *head); 3891 void netif_receive_skb_list(struct list_head *head); 3892 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); 3893 void napi_gro_flush(struct napi_struct *napi, bool flush_old); 3894 struct sk_buff *napi_get_frags(struct napi_struct *napi); 3895 void napi_get_frags_check(struct napi_struct *napi); 3896 gro_result_t napi_gro_frags(struct napi_struct *napi); 3897 struct packet_offload *gro_find_receive_by_type(__be16 type); 3898 struct packet_offload *gro_find_complete_by_type(__be16 type); 3899 3900 static inline void napi_free_frags(struct napi_struct *napi) 3901 { 3902 kfree_skb(napi->skb); 3903 napi->skb = NULL; 3904 } 3905 3906 bool netdev_is_rx_handler_busy(struct net_device *dev); 3907 int netdev_rx_handler_register(struct net_device *dev, 3908 rx_handler_func_t *rx_handler, 3909 void *rx_handler_data); 3910 void netdev_rx_handler_unregister(struct net_device *dev); 3911 3912 bool dev_valid_name(const char *name); 3913 static inline bool is_socket_ioctl_cmd(unsigned int cmd) 3914 { 3915 return _IOC_TYPE(cmd) == SOCK_IOC_TYPE; 3916 } 3917 int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg); 3918 int put_user_ifreq(struct ifreq *ifr, void __user *arg); 3919 int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, 3920 void __user *data, bool *need_copyout); 3921 int dev_ifconf(struct net *net, struct ifconf __user *ifc); 3922 int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); 3923 unsigned int dev_get_flags(const struct net_device *); 3924 int __dev_change_flags(struct net_device *dev, unsigned int flags, 3925 struct netlink_ext_ack *extack); 3926 int dev_change_flags(struct net_device *dev, unsigned int flags, 3927 struct netlink_ext_ack *extack); 3928 int dev_set_alias(struct net_device *, const char *, size_t); 3929 int dev_get_alias(const struct net_device *, char *, size_t); 3930 int __dev_change_net_namespace(struct net_device *dev, struct net *net, 3931 const char *pat, int new_ifindex); 3932 static inline 3933 int dev_change_net_namespace(struct net_device *dev, struct net *net, 3934 const char *pat) 3935 { 3936 return __dev_change_net_namespace(dev, net, pat, 0); 3937 } 3938 int __dev_set_mtu(struct net_device *, int); 3939 int dev_set_mtu(struct net_device *, int); 3940 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 3941 struct netlink_ext_ack *extack); 3942 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 3943 struct netlink_ext_ack *extack); 3944 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, 3945 struct netlink_ext_ack *extack); 3946 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); 3947 int dev_get_port_parent_id(struct net_device *dev, 3948 struct netdev_phys_item_id *ppid, bool recurse); 3949 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); 3950 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); 3951 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 3952 struct netdev_queue *txq, int *ret); 3953 3954 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); 3955 u8 dev_xdp_prog_count(struct net_device *dev); 3956 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); 3957 3958 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3959 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3960 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb); 3961 bool is_skb_forwardable(const struct net_device *dev, 3962 const struct sk_buff *skb); 3963 3964 static __always_inline bool __is_skb_forwardable(const struct net_device *dev, 3965 const struct sk_buff *skb, 3966 const bool check_mtu) 3967 { 3968 const u32 vlan_hdr_len = 4; /* VLAN_HLEN */ 3969 unsigned int len; 3970 3971 if (!(dev->flags & IFF_UP)) 3972 return false; 3973 3974 if (!check_mtu) 3975 return true; 3976 3977 len = dev->mtu + dev->hard_header_len + vlan_hdr_len; 3978 if (skb->len <= len) 3979 return true; 3980 3981 /* if TSO is enabled, we don't care about the length as the packet 3982 * could be forwarded without being segmented before 3983 */ 3984 if (skb_is_gso(skb)) 3985 return true; 3986 3987 return false; 3988 } 3989 3990 struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev); 3991 3992 static inline struct net_device_core_stats __percpu *dev_core_stats(struct net_device *dev) 3993 { 3994 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ 3995 struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats); 3996 3997 if (likely(p)) 3998 return p; 3999 4000 return netdev_core_stats_alloc(dev); 4001 } 4002 4003 #define DEV_CORE_STATS_INC(FIELD) \ 4004 static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ 4005 { \ 4006 struct net_device_core_stats __percpu *p; \ 4007 \ 4008 p = dev_core_stats(dev); \ 4009 if (p) \ 4010 this_cpu_inc(p->FIELD); \ 4011 } 4012 DEV_CORE_STATS_INC(rx_dropped) 4013 DEV_CORE_STATS_INC(tx_dropped) 4014 DEV_CORE_STATS_INC(rx_nohandler) 4015 DEV_CORE_STATS_INC(rx_otherhost_dropped) 4016 4017 static __always_inline int ____dev_forward_skb(struct net_device *dev, 4018 struct sk_buff *skb, 4019 const bool check_mtu) 4020 { 4021 if (skb_orphan_frags(skb, GFP_ATOMIC) || 4022 unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) { 4023 dev_core_stats_rx_dropped_inc(dev); 4024 kfree_skb(skb); 4025 return NET_RX_DROP; 4026 } 4027 4028 skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev))); 4029 skb->priority = 0; 4030 return 0; 4031 } 4032 4033 bool dev_nit_active(struct net_device *dev); 4034 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 4035 4036 static inline void __dev_put(struct net_device *dev) 4037 { 4038 if (dev) { 4039 #ifdef CONFIG_PCPU_DEV_REFCNT 4040 this_cpu_dec(*dev->pcpu_refcnt); 4041 #else 4042 refcount_dec(&dev->dev_refcnt); 4043 #endif 4044 } 4045 } 4046 4047 static inline void __dev_hold(struct net_device *dev) 4048 { 4049 if (dev) { 4050 #ifdef CONFIG_PCPU_DEV_REFCNT 4051 this_cpu_inc(*dev->pcpu_refcnt); 4052 #else 4053 refcount_inc(&dev->dev_refcnt); 4054 #endif 4055 } 4056 } 4057 4058 static inline void __netdev_tracker_alloc(struct net_device *dev, 4059 netdevice_tracker *tracker, 4060 gfp_t gfp) 4061 { 4062 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4063 ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp); 4064 #endif 4065 } 4066 4067 /* netdev_tracker_alloc() can upgrade a prior untracked reference 4068 * taken by dev_get_by_name()/dev_get_by_index() to a tracked one. 4069 */ 4070 static inline void netdev_tracker_alloc(struct net_device *dev, 4071 netdevice_tracker *tracker, gfp_t gfp) 4072 { 4073 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4074 refcount_dec(&dev->refcnt_tracker.no_tracker); 4075 __netdev_tracker_alloc(dev, tracker, gfp); 4076 #endif 4077 } 4078 4079 static inline void netdev_tracker_free(struct net_device *dev, 4080 netdevice_tracker *tracker) 4081 { 4082 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4083 ref_tracker_free(&dev->refcnt_tracker, tracker); 4084 #endif 4085 } 4086 4087 static inline void netdev_hold(struct net_device *dev, 4088 netdevice_tracker *tracker, gfp_t gfp) 4089 { 4090 if (dev) { 4091 __dev_hold(dev); 4092 __netdev_tracker_alloc(dev, tracker, gfp); 4093 } 4094 } 4095 4096 static inline void netdev_put(struct net_device *dev, 4097 netdevice_tracker *tracker) 4098 { 4099 if (dev) { 4100 netdev_tracker_free(dev, tracker); 4101 __dev_put(dev); 4102 } 4103 } 4104 4105 /** 4106 * dev_hold - get reference to device 4107 * @dev: network device 4108 * 4109 * Hold reference to device to keep it from being freed. 4110 * Try using netdev_hold() instead. 4111 */ 4112 static inline void dev_hold(struct net_device *dev) 4113 { 4114 netdev_hold(dev, NULL, GFP_ATOMIC); 4115 } 4116 4117 /** 4118 * dev_put - release reference to device 4119 * @dev: network device 4120 * 4121 * Release reference to device to allow it to be freed. 4122 * Try using netdev_put() instead. 4123 */ 4124 static inline void dev_put(struct net_device *dev) 4125 { 4126 netdev_put(dev, NULL); 4127 } 4128 4129 static inline void netdev_ref_replace(struct net_device *odev, 4130 struct net_device *ndev, 4131 netdevice_tracker *tracker, 4132 gfp_t gfp) 4133 { 4134 if (odev) 4135 netdev_tracker_free(odev, tracker); 4136 4137 __dev_hold(ndev); 4138 __dev_put(odev); 4139 4140 if (ndev) 4141 __netdev_tracker_alloc(ndev, tracker, gfp); 4142 } 4143 4144 /* Carrier loss detection, dial on demand. The functions netif_carrier_on 4145 * and _off may be called from IRQ context, but it is caller 4146 * who is responsible for serialization of these calls. 4147 * 4148 * The name carrier is inappropriate, these functions should really be 4149 * called netif_lowerlayer_*() because they represent the state of any 4150 * kind of lower layer not just hardware media. 4151 */ 4152 void linkwatch_fire_event(struct net_device *dev); 4153 4154 /** 4155 * netif_carrier_ok - test if carrier present 4156 * @dev: network device 4157 * 4158 * Check if carrier is present on device 4159 */ 4160 static inline bool netif_carrier_ok(const struct net_device *dev) 4161 { 4162 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 4163 } 4164 4165 unsigned long dev_trans_start(struct net_device *dev); 4166 4167 void __netdev_watchdog_up(struct net_device *dev); 4168 4169 void netif_carrier_on(struct net_device *dev); 4170 void netif_carrier_off(struct net_device *dev); 4171 void netif_carrier_event(struct net_device *dev); 4172 4173 /** 4174 * netif_dormant_on - mark device as dormant. 4175 * @dev: network device 4176 * 4177 * Mark device as dormant (as per RFC2863). 4178 * 4179 * The dormant state indicates that the relevant interface is not 4180 * actually in a condition to pass packets (i.e., it is not 'up') but is 4181 * in a "pending" state, waiting for some external event. For "on- 4182 * demand" interfaces, this new state identifies the situation where the 4183 * interface is waiting for events to place it in the up state. 4184 */ 4185 static inline void netif_dormant_on(struct net_device *dev) 4186 { 4187 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) 4188 linkwatch_fire_event(dev); 4189 } 4190 4191 /** 4192 * netif_dormant_off - set device as not dormant. 4193 * @dev: network device 4194 * 4195 * Device is not in dormant state. 4196 */ 4197 static inline void netif_dormant_off(struct net_device *dev) 4198 { 4199 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) 4200 linkwatch_fire_event(dev); 4201 } 4202 4203 /** 4204 * netif_dormant - test if device is dormant 4205 * @dev: network device 4206 * 4207 * Check if device is dormant. 4208 */ 4209 static inline bool netif_dormant(const struct net_device *dev) 4210 { 4211 return test_bit(__LINK_STATE_DORMANT, &dev->state); 4212 } 4213 4214 4215 /** 4216 * netif_testing_on - mark device as under test. 4217 * @dev: network device 4218 * 4219 * Mark device as under test (as per RFC2863). 4220 * 4221 * The testing state indicates that some test(s) must be performed on 4222 * the interface. After completion, of the test, the interface state 4223 * will change to up, dormant, or down, as appropriate. 4224 */ 4225 static inline void netif_testing_on(struct net_device *dev) 4226 { 4227 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) 4228 linkwatch_fire_event(dev); 4229 } 4230 4231 /** 4232 * netif_testing_off - set device as not under test. 4233 * @dev: network device 4234 * 4235 * Device is not in testing state. 4236 */ 4237 static inline void netif_testing_off(struct net_device *dev) 4238 { 4239 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) 4240 linkwatch_fire_event(dev); 4241 } 4242 4243 /** 4244 * netif_testing - test if device is under test 4245 * @dev: network device 4246 * 4247 * Check if device is under test 4248 */ 4249 static inline bool netif_testing(const struct net_device *dev) 4250 { 4251 return test_bit(__LINK_STATE_TESTING, &dev->state); 4252 } 4253 4254 4255 /** 4256 * netif_oper_up - test if device is operational 4257 * @dev: network device 4258 * 4259 * Check if carrier is operational 4260 */ 4261 static inline bool netif_oper_up(const struct net_device *dev) 4262 { 4263 return (dev->operstate == IF_OPER_UP || 4264 dev->operstate == IF_OPER_UNKNOWN /* backward compat */); 4265 } 4266 4267 /** 4268 * netif_device_present - is device available or removed 4269 * @dev: network device 4270 * 4271 * Check if device has not been removed from system. 4272 */ 4273 static inline bool netif_device_present(const struct net_device *dev) 4274 { 4275 return test_bit(__LINK_STATE_PRESENT, &dev->state); 4276 } 4277 4278 void netif_device_detach(struct net_device *dev); 4279 4280 void netif_device_attach(struct net_device *dev); 4281 4282 /* 4283 * Network interface message level settings 4284 */ 4285 4286 enum { 4287 NETIF_MSG_DRV_BIT, 4288 NETIF_MSG_PROBE_BIT, 4289 NETIF_MSG_LINK_BIT, 4290 NETIF_MSG_TIMER_BIT, 4291 NETIF_MSG_IFDOWN_BIT, 4292 NETIF_MSG_IFUP_BIT, 4293 NETIF_MSG_RX_ERR_BIT, 4294 NETIF_MSG_TX_ERR_BIT, 4295 NETIF_MSG_TX_QUEUED_BIT, 4296 NETIF_MSG_INTR_BIT, 4297 NETIF_MSG_TX_DONE_BIT, 4298 NETIF_MSG_RX_STATUS_BIT, 4299 NETIF_MSG_PKTDATA_BIT, 4300 NETIF_MSG_HW_BIT, 4301 NETIF_MSG_WOL_BIT, 4302 4303 /* When you add a new bit above, update netif_msg_class_names array 4304 * in net/ethtool/common.c 4305 */ 4306 NETIF_MSG_CLASS_COUNT, 4307 }; 4308 /* Both ethtool_ops interface and internal driver implementation use u32 */ 4309 static_assert(NETIF_MSG_CLASS_COUNT <= 32); 4310 4311 #define __NETIF_MSG_BIT(bit) ((u32)1 << (bit)) 4312 #define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT) 4313 4314 #define NETIF_MSG_DRV __NETIF_MSG(DRV) 4315 #define NETIF_MSG_PROBE __NETIF_MSG(PROBE) 4316 #define NETIF_MSG_LINK __NETIF_MSG(LINK) 4317 #define NETIF_MSG_TIMER __NETIF_MSG(TIMER) 4318 #define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN) 4319 #define NETIF_MSG_IFUP __NETIF_MSG(IFUP) 4320 #define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR) 4321 #define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR) 4322 #define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED) 4323 #define NETIF_MSG_INTR __NETIF_MSG(INTR) 4324 #define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE) 4325 #define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS) 4326 #define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA) 4327 #define NETIF_MSG_HW __NETIF_MSG(HW) 4328 #define NETIF_MSG_WOL __NETIF_MSG(WOL) 4329 4330 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) 4331 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) 4332 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) 4333 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) 4334 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) 4335 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) 4336 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) 4337 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) 4338 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) 4339 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) 4340 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) 4341 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) 4342 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) 4343 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) 4344 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) 4345 4346 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) 4347 { 4348 /* use default */ 4349 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) 4350 return default_msg_enable_bits; 4351 if (debug_value == 0) /* no output */ 4352 return 0; 4353 /* set low N bits */ 4354 return (1U << debug_value) - 1; 4355 } 4356 4357 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) 4358 { 4359 spin_lock(&txq->_xmit_lock); 4360 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4361 WRITE_ONCE(txq->xmit_lock_owner, cpu); 4362 } 4363 4364 static inline bool __netif_tx_acquire(struct netdev_queue *txq) 4365 { 4366 __acquire(&txq->_xmit_lock); 4367 return true; 4368 } 4369 4370 static inline void __netif_tx_release(struct netdev_queue *txq) 4371 { 4372 __release(&txq->_xmit_lock); 4373 } 4374 4375 static inline void __netif_tx_lock_bh(struct netdev_queue *txq) 4376 { 4377 spin_lock_bh(&txq->_xmit_lock); 4378 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4379 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); 4380 } 4381 4382 static inline bool __netif_tx_trylock(struct netdev_queue *txq) 4383 { 4384 bool ok = spin_trylock(&txq->_xmit_lock); 4385 4386 if (likely(ok)) { 4387 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4388 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); 4389 } 4390 return ok; 4391 } 4392 4393 static inline void __netif_tx_unlock(struct netdev_queue *txq) 4394 { 4395 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4396 WRITE_ONCE(txq->xmit_lock_owner, -1); 4397 spin_unlock(&txq->_xmit_lock); 4398 } 4399 4400 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) 4401 { 4402 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4403 WRITE_ONCE(txq->xmit_lock_owner, -1); 4404 spin_unlock_bh(&txq->_xmit_lock); 4405 } 4406 4407 /* 4408 * txq->trans_start can be read locklessly from dev_watchdog() 4409 */ 4410 static inline void txq_trans_update(struct netdev_queue *txq) 4411 { 4412 if (txq->xmit_lock_owner != -1) 4413 WRITE_ONCE(txq->trans_start, jiffies); 4414 } 4415 4416 static inline void txq_trans_cond_update(struct netdev_queue *txq) 4417 { 4418 unsigned long now = jiffies; 4419 4420 if (READ_ONCE(txq->trans_start) != now) 4421 WRITE_ONCE(txq->trans_start, now); 4422 } 4423 4424 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ 4425 static inline void netif_trans_update(struct net_device *dev) 4426 { 4427 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 4428 4429 txq_trans_cond_update(txq); 4430 } 4431 4432 /** 4433 * netif_tx_lock - grab network device transmit lock 4434 * @dev: network device 4435 * 4436 * Get network device transmit lock 4437 */ 4438 void netif_tx_lock(struct net_device *dev); 4439 4440 static inline void netif_tx_lock_bh(struct net_device *dev) 4441 { 4442 local_bh_disable(); 4443 netif_tx_lock(dev); 4444 } 4445 4446 void netif_tx_unlock(struct net_device *dev); 4447 4448 static inline void netif_tx_unlock_bh(struct net_device *dev) 4449 { 4450 netif_tx_unlock(dev); 4451 local_bh_enable(); 4452 } 4453 4454 #define HARD_TX_LOCK(dev, txq, cpu) { \ 4455 if ((dev->features & NETIF_F_LLTX) == 0) { \ 4456 __netif_tx_lock(txq, cpu); \ 4457 } else { \ 4458 __netif_tx_acquire(txq); \ 4459 } \ 4460 } 4461 4462 #define HARD_TX_TRYLOCK(dev, txq) \ 4463 (((dev->features & NETIF_F_LLTX) == 0) ? \ 4464 __netif_tx_trylock(txq) : \ 4465 __netif_tx_acquire(txq)) 4466 4467 #define HARD_TX_UNLOCK(dev, txq) { \ 4468 if ((dev->features & NETIF_F_LLTX) == 0) { \ 4469 __netif_tx_unlock(txq); \ 4470 } else { \ 4471 __netif_tx_release(txq); \ 4472 } \ 4473 } 4474 4475 static inline void netif_tx_disable(struct net_device *dev) 4476 { 4477 unsigned int i; 4478 int cpu; 4479 4480 local_bh_disable(); 4481 cpu = smp_processor_id(); 4482 spin_lock(&dev->tx_global_lock); 4483 for (i = 0; i < dev->num_tx_queues; i++) { 4484 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 4485 4486 __netif_tx_lock(txq, cpu); 4487 netif_tx_stop_queue(txq); 4488 __netif_tx_unlock(txq); 4489 } 4490 spin_unlock(&dev->tx_global_lock); 4491 local_bh_enable(); 4492 } 4493 4494 static inline void netif_addr_lock(struct net_device *dev) 4495 { 4496 unsigned char nest_level = 0; 4497 4498 #ifdef CONFIG_LOCKDEP 4499 nest_level = dev->nested_level; 4500 #endif 4501 spin_lock_nested(&dev->addr_list_lock, nest_level); 4502 } 4503 4504 static inline void netif_addr_lock_bh(struct net_device *dev) 4505 { 4506 unsigned char nest_level = 0; 4507 4508 #ifdef CONFIG_LOCKDEP 4509 nest_level = dev->nested_level; 4510 #endif 4511 local_bh_disable(); 4512 spin_lock_nested(&dev->addr_list_lock, nest_level); 4513 } 4514 4515 static inline void netif_addr_unlock(struct net_device *dev) 4516 { 4517 spin_unlock(&dev->addr_list_lock); 4518 } 4519 4520 static inline void netif_addr_unlock_bh(struct net_device *dev) 4521 { 4522 spin_unlock_bh(&dev->addr_list_lock); 4523 } 4524 4525 /* 4526 * dev_addrs walker. Should be used only for read access. Call with 4527 * rcu_read_lock held. 4528 */ 4529 #define for_each_dev_addr(dev, ha) \ 4530 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) 4531 4532 /* These functions live elsewhere (drivers/net/net_init.c, but related) */ 4533 4534 void ether_setup(struct net_device *dev); 4535 4536 /* Support for loadable net-drivers */ 4537 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 4538 unsigned char name_assign_type, 4539 void (*setup)(struct net_device *), 4540 unsigned int txqs, unsigned int rxqs); 4541 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ 4542 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) 4543 4544 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ 4545 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ 4546 count) 4547 4548 int register_netdev(struct net_device *dev); 4549 void unregister_netdev(struct net_device *dev); 4550 4551 int devm_register_netdev(struct device *dev, struct net_device *ndev); 4552 4553 /* General hardware address lists handling functions */ 4554 int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 4555 struct netdev_hw_addr_list *from_list, int addr_len); 4556 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 4557 struct netdev_hw_addr_list *from_list, int addr_len); 4558 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, 4559 struct net_device *dev, 4560 int (*sync)(struct net_device *, const unsigned char *), 4561 int (*unsync)(struct net_device *, 4562 const unsigned char *)); 4563 int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, 4564 struct net_device *dev, 4565 int (*sync)(struct net_device *, 4566 const unsigned char *, int), 4567 int (*unsync)(struct net_device *, 4568 const unsigned char *, int)); 4569 void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, 4570 struct net_device *dev, 4571 int (*unsync)(struct net_device *, 4572 const unsigned char *, int)); 4573 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, 4574 struct net_device *dev, 4575 int (*unsync)(struct net_device *, 4576 const unsigned char *)); 4577 void __hw_addr_init(struct netdev_hw_addr_list *list); 4578 4579 /* Functions used for device addresses handling */ 4580 void dev_addr_mod(struct net_device *dev, unsigned int offset, 4581 const void *addr, size_t len); 4582 4583 static inline void 4584 __dev_addr_set(struct net_device *dev, const void *addr, size_t len) 4585 { 4586 dev_addr_mod(dev, 0, addr, len); 4587 } 4588 4589 static inline void dev_addr_set(struct net_device *dev, const u8 *addr) 4590 { 4591 __dev_addr_set(dev, addr, dev->addr_len); 4592 } 4593 4594 int dev_addr_add(struct net_device *dev, const unsigned char *addr, 4595 unsigned char addr_type); 4596 int dev_addr_del(struct net_device *dev, const unsigned char *addr, 4597 unsigned char addr_type); 4598 4599 /* Functions used for unicast addresses handling */ 4600 int dev_uc_add(struct net_device *dev, const unsigned char *addr); 4601 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); 4602 int dev_uc_del(struct net_device *dev, const unsigned char *addr); 4603 int dev_uc_sync(struct net_device *to, struct net_device *from); 4604 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); 4605 void dev_uc_unsync(struct net_device *to, struct net_device *from); 4606 void dev_uc_flush(struct net_device *dev); 4607 void dev_uc_init(struct net_device *dev); 4608 4609 /** 4610 * __dev_uc_sync - Synchonize device's unicast list 4611 * @dev: device to sync 4612 * @sync: function to call if address should be added 4613 * @unsync: function to call if address should be removed 4614 * 4615 * Add newly added addresses to the interface, and release 4616 * addresses that have been deleted. 4617 */ 4618 static inline int __dev_uc_sync(struct net_device *dev, 4619 int (*sync)(struct net_device *, 4620 const unsigned char *), 4621 int (*unsync)(struct net_device *, 4622 const unsigned char *)) 4623 { 4624 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); 4625 } 4626 4627 /** 4628 * __dev_uc_unsync - Remove synchronized addresses from device 4629 * @dev: device to sync 4630 * @unsync: function to call if address should be removed 4631 * 4632 * Remove all addresses that were added to the device by dev_uc_sync(). 4633 */ 4634 static inline void __dev_uc_unsync(struct net_device *dev, 4635 int (*unsync)(struct net_device *, 4636 const unsigned char *)) 4637 { 4638 __hw_addr_unsync_dev(&dev->uc, dev, unsync); 4639 } 4640 4641 /* Functions used for multicast addresses handling */ 4642 int dev_mc_add(struct net_device *dev, const unsigned char *addr); 4643 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); 4644 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); 4645 int dev_mc_del(struct net_device *dev, const unsigned char *addr); 4646 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); 4647 int dev_mc_sync(struct net_device *to, struct net_device *from); 4648 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); 4649 void dev_mc_unsync(struct net_device *to, struct net_device *from); 4650 void dev_mc_flush(struct net_device *dev); 4651 void dev_mc_init(struct net_device *dev); 4652 4653 /** 4654 * __dev_mc_sync - Synchonize device's multicast list 4655 * @dev: device to sync 4656 * @sync: function to call if address should be added 4657 * @unsync: function to call if address should be removed 4658 * 4659 * Add newly added addresses to the interface, and release 4660 * addresses that have been deleted. 4661 */ 4662 static inline int __dev_mc_sync(struct net_device *dev, 4663 int (*sync)(struct net_device *, 4664 const unsigned char *), 4665 int (*unsync)(struct net_device *, 4666 const unsigned char *)) 4667 { 4668 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); 4669 } 4670 4671 /** 4672 * __dev_mc_unsync - Remove synchronized addresses from device 4673 * @dev: device to sync 4674 * @unsync: function to call if address should be removed 4675 * 4676 * Remove all addresses that were added to the device by dev_mc_sync(). 4677 */ 4678 static inline void __dev_mc_unsync(struct net_device *dev, 4679 int (*unsync)(struct net_device *, 4680 const unsigned char *)) 4681 { 4682 __hw_addr_unsync_dev(&dev->mc, dev, unsync); 4683 } 4684 4685 /* Functions used for secondary unicast and multicast support */ 4686 void dev_set_rx_mode(struct net_device *dev); 4687 int dev_set_promiscuity(struct net_device *dev, int inc); 4688 int dev_set_allmulti(struct net_device *dev, int inc); 4689 void netdev_state_change(struct net_device *dev); 4690 void __netdev_notify_peers(struct net_device *dev); 4691 void netdev_notify_peers(struct net_device *dev); 4692 void netdev_features_change(struct net_device *dev); 4693 /* Load a device via the kmod */ 4694 void dev_load(struct net *net, const char *name); 4695 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 4696 struct rtnl_link_stats64 *storage); 4697 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 4698 const struct net_device_stats *netdev_stats); 4699 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 4700 const struct pcpu_sw_netstats __percpu *netstats); 4701 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s); 4702 4703 extern int netdev_max_backlog; 4704 extern int dev_rx_weight; 4705 extern int dev_tx_weight; 4706 extern int gro_normal_batch; 4707 4708 enum { 4709 NESTED_SYNC_IMM_BIT, 4710 NESTED_SYNC_TODO_BIT, 4711 }; 4712 4713 #define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit)) 4714 #define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT) 4715 4716 #define NESTED_SYNC_IMM __NESTED_SYNC(IMM) 4717 #define NESTED_SYNC_TODO __NESTED_SYNC(TODO) 4718 4719 struct netdev_nested_priv { 4720 unsigned char flags; 4721 void *data; 4722 }; 4723 4724 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); 4725 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 4726 struct list_head **iter); 4727 4728 /* iterate through upper list, must be called under RCU read lock */ 4729 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ 4730 for (iter = &(dev)->adj_list.upper, \ 4731 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ 4732 updev; \ 4733 updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) 4734 4735 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 4736 int (*fn)(struct net_device *upper_dev, 4737 struct netdev_nested_priv *priv), 4738 struct netdev_nested_priv *priv); 4739 4740 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 4741 struct net_device *upper_dev); 4742 4743 bool netdev_has_any_upper_dev(struct net_device *dev); 4744 4745 void *netdev_lower_get_next_private(struct net_device *dev, 4746 struct list_head **iter); 4747 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 4748 struct list_head **iter); 4749 4750 #define netdev_for_each_lower_private(dev, priv, iter) \ 4751 for (iter = (dev)->adj_list.lower.next, \ 4752 priv = netdev_lower_get_next_private(dev, &(iter)); \ 4753 priv; \ 4754 priv = netdev_lower_get_next_private(dev, &(iter))) 4755 4756 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \ 4757 for (iter = &(dev)->adj_list.lower, \ 4758 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ 4759 priv; \ 4760 priv = netdev_lower_get_next_private_rcu(dev, &(iter))) 4761 4762 void *netdev_lower_get_next(struct net_device *dev, 4763 struct list_head **iter); 4764 4765 #define netdev_for_each_lower_dev(dev, ldev, iter) \ 4766 for (iter = (dev)->adj_list.lower.next, \ 4767 ldev = netdev_lower_get_next(dev, &(iter)); \ 4768 ldev; \ 4769 ldev = netdev_lower_get_next(dev, &(iter))) 4770 4771 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 4772 struct list_head **iter); 4773 int netdev_walk_all_lower_dev(struct net_device *dev, 4774 int (*fn)(struct net_device *lower_dev, 4775 struct netdev_nested_priv *priv), 4776 struct netdev_nested_priv *priv); 4777 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 4778 int (*fn)(struct net_device *lower_dev, 4779 struct netdev_nested_priv *priv), 4780 struct netdev_nested_priv *priv); 4781 4782 void *netdev_adjacent_get_private(struct list_head *adj_list); 4783 void *netdev_lower_get_first_private_rcu(struct net_device *dev); 4784 struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 4785 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); 4786 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, 4787 struct netlink_ext_ack *extack); 4788 int netdev_master_upper_dev_link(struct net_device *dev, 4789 struct net_device *upper_dev, 4790 void *upper_priv, void *upper_info, 4791 struct netlink_ext_ack *extack); 4792 void netdev_upper_dev_unlink(struct net_device *dev, 4793 struct net_device *upper_dev); 4794 int netdev_adjacent_change_prepare(struct net_device *old_dev, 4795 struct net_device *new_dev, 4796 struct net_device *dev, 4797 struct netlink_ext_ack *extack); 4798 void netdev_adjacent_change_commit(struct net_device *old_dev, 4799 struct net_device *new_dev, 4800 struct net_device *dev); 4801 void netdev_adjacent_change_abort(struct net_device *old_dev, 4802 struct net_device *new_dev, 4803 struct net_device *dev); 4804 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); 4805 void *netdev_lower_dev_get_private(struct net_device *dev, 4806 struct net_device *lower_dev); 4807 void netdev_lower_state_changed(struct net_device *lower_dev, 4808 void *lower_state_info); 4809 4810 /* RSS keys are 40 or 52 bytes long */ 4811 #define NETDEV_RSS_KEY_LEN 52 4812 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; 4813 void netdev_rss_key_fill(void *buffer, size_t len); 4814 4815 int skb_checksum_help(struct sk_buff *skb); 4816 int skb_crc32c_csum_help(struct sk_buff *skb); 4817 int skb_csum_hwoffload_help(struct sk_buff *skb, 4818 const netdev_features_t features); 4819 4820 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 4821 netdev_features_t features, bool tx_path); 4822 struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, 4823 netdev_features_t features, __be16 type); 4824 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 4825 netdev_features_t features); 4826 4827 struct netdev_bonding_info { 4828 ifslave slave; 4829 ifbond master; 4830 }; 4831 4832 struct netdev_notifier_bonding_info { 4833 struct netdev_notifier_info info; /* must be first */ 4834 struct netdev_bonding_info bonding_info; 4835 }; 4836 4837 void netdev_bonding_info_change(struct net_device *dev, 4838 struct netdev_bonding_info *bonding_info); 4839 4840 #if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) 4841 void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data); 4842 #else 4843 static inline void ethtool_notify(struct net_device *dev, unsigned int cmd, 4844 const void *data) 4845 { 4846 } 4847 #endif 4848 4849 static inline 4850 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) 4851 { 4852 return __skb_gso_segment(skb, features, true); 4853 } 4854 __be16 skb_network_protocol(struct sk_buff *skb, int *depth); 4855 4856 static inline bool can_checksum_protocol(netdev_features_t features, 4857 __be16 protocol) 4858 { 4859 if (protocol == htons(ETH_P_FCOE)) 4860 return !!(features & NETIF_F_FCOE_CRC); 4861 4862 /* Assume this is an IP checksum (not SCTP CRC) */ 4863 4864 if (features & NETIF_F_HW_CSUM) { 4865 /* Can checksum everything */ 4866 return true; 4867 } 4868 4869 switch (protocol) { 4870 case htons(ETH_P_IP): 4871 return !!(features & NETIF_F_IP_CSUM); 4872 case htons(ETH_P_IPV6): 4873 return !!(features & NETIF_F_IPV6_CSUM); 4874 default: 4875 return false; 4876 } 4877 } 4878 4879 #ifdef CONFIG_BUG 4880 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); 4881 #else 4882 static inline void netdev_rx_csum_fault(struct net_device *dev, 4883 struct sk_buff *skb) 4884 { 4885 } 4886 #endif 4887 /* rx skb timestamps */ 4888 void net_enable_timestamp(void); 4889 void net_disable_timestamp(void); 4890 4891 static inline ktime_t netdev_get_tstamp(struct net_device *dev, 4892 const struct skb_shared_hwtstamps *hwtstamps, 4893 bool cycles) 4894 { 4895 const struct net_device_ops *ops = dev->netdev_ops; 4896 4897 if (ops->ndo_get_tstamp) 4898 return ops->ndo_get_tstamp(dev, hwtstamps, cycles); 4899 4900 return hwtstamps->hwtstamp; 4901 } 4902 4903 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, 4904 struct sk_buff *skb, struct net_device *dev, 4905 bool more) 4906 { 4907 __this_cpu_write(softnet_data.xmit.more, more); 4908 return ops->ndo_start_xmit(skb, dev); 4909 } 4910 4911 static inline bool netdev_xmit_more(void) 4912 { 4913 return __this_cpu_read(softnet_data.xmit.more); 4914 } 4915 4916 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, 4917 struct netdev_queue *txq, bool more) 4918 { 4919 const struct net_device_ops *ops = dev->netdev_ops; 4920 netdev_tx_t rc; 4921 4922 rc = __netdev_start_xmit(ops, skb, dev, more); 4923 if (rc == NETDEV_TX_OK) 4924 txq_trans_update(txq); 4925 4926 return rc; 4927 } 4928 4929 int netdev_class_create_file_ns(const struct class_attribute *class_attr, 4930 const void *ns); 4931 void netdev_class_remove_file_ns(const struct class_attribute *class_attr, 4932 const void *ns); 4933 4934 extern const struct kobj_ns_type_operations net_ns_type_operations; 4935 4936 const char *netdev_drivername(const struct net_device *dev); 4937 4938 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, 4939 netdev_features_t f2) 4940 { 4941 if ((f1 ^ f2) & NETIF_F_HW_CSUM) { 4942 if (f1 & NETIF_F_HW_CSUM) 4943 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 4944 else 4945 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 4946 } 4947 4948 return f1 & f2; 4949 } 4950 4951 static inline netdev_features_t netdev_get_wanted_features( 4952 struct net_device *dev) 4953 { 4954 return (dev->features & ~dev->hw_features) | dev->wanted_features; 4955 } 4956 netdev_features_t netdev_increment_features(netdev_features_t all, 4957 netdev_features_t one, netdev_features_t mask); 4958 4959 /* Allow TSO being used on stacked device : 4960 * Performing the GSO segmentation before last device 4961 * is a performance improvement. 4962 */ 4963 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, 4964 netdev_features_t mask) 4965 { 4966 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); 4967 } 4968 4969 int __netdev_update_features(struct net_device *dev); 4970 void netdev_update_features(struct net_device *dev); 4971 void netdev_change_features(struct net_device *dev); 4972 4973 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 4974 struct net_device *dev); 4975 4976 netdev_features_t passthru_features_check(struct sk_buff *skb, 4977 struct net_device *dev, 4978 netdev_features_t features); 4979 netdev_features_t netif_skb_features(struct sk_buff *skb); 4980 4981 static inline bool net_gso_ok(netdev_features_t features, int gso_type) 4982 { 4983 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; 4984 4985 /* check flags correspondence */ 4986 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); 4987 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); 4988 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); 4989 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); 4990 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); 4991 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); 4992 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); 4993 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); 4994 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)); 4995 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)); 4996 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); 4997 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); 4998 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); 4999 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); 5000 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); 5001 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); 5002 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); 5003 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); 5004 BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT)); 5005 5006 return (features & feature) == feature; 5007 } 5008 5009 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) 5010 { 5011 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && 5012 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 5013 } 5014 5015 static inline bool netif_needs_gso(struct sk_buff *skb, 5016 netdev_features_t features) 5017 { 5018 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 5019 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && 5020 (skb->ip_summed != CHECKSUM_UNNECESSARY))); 5021 } 5022 5023 void netif_set_tso_max_size(struct net_device *dev, unsigned int size); 5024 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs); 5025 void netif_inherit_tso_max(struct net_device *to, 5026 const struct net_device *from); 5027 5028 static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, 5029 int pulled_hlen, u16 mac_offset, 5030 int mac_len) 5031 { 5032 skb->protocol = protocol; 5033 skb->encapsulation = 1; 5034 skb_push(skb, pulled_hlen); 5035 skb_reset_transport_header(skb); 5036 skb->mac_header = mac_offset; 5037 skb->network_header = skb->mac_header + mac_len; 5038 skb->mac_len = mac_len; 5039 } 5040 5041 static inline bool netif_is_macsec(const struct net_device *dev) 5042 { 5043 return dev->priv_flags & IFF_MACSEC; 5044 } 5045 5046 static inline bool netif_is_macvlan(const struct net_device *dev) 5047 { 5048 return dev->priv_flags & IFF_MACVLAN; 5049 } 5050 5051 static inline bool netif_is_macvlan_port(const struct net_device *dev) 5052 { 5053 return dev->priv_flags & IFF_MACVLAN_PORT; 5054 } 5055 5056 static inline bool netif_is_bond_master(const struct net_device *dev) 5057 { 5058 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; 5059 } 5060 5061 static inline bool netif_is_bond_slave(const struct net_device *dev) 5062 { 5063 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; 5064 } 5065 5066 static inline bool netif_supports_nofcs(struct net_device *dev) 5067 { 5068 return dev->priv_flags & IFF_SUPP_NOFCS; 5069 } 5070 5071 static inline bool netif_has_l3_rx_handler(const struct net_device *dev) 5072 { 5073 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; 5074 } 5075 5076 static inline bool netif_is_l3_master(const struct net_device *dev) 5077 { 5078 return dev->priv_flags & IFF_L3MDEV_MASTER; 5079 } 5080 5081 static inline bool netif_is_l3_slave(const struct net_device *dev) 5082 { 5083 return dev->priv_flags & IFF_L3MDEV_SLAVE; 5084 } 5085 5086 static inline bool netif_is_bridge_master(const struct net_device *dev) 5087 { 5088 return dev->priv_flags & IFF_EBRIDGE; 5089 } 5090 5091 static inline bool netif_is_bridge_port(const struct net_device *dev) 5092 { 5093 return dev->priv_flags & IFF_BRIDGE_PORT; 5094 } 5095 5096 static inline bool netif_is_ovs_master(const struct net_device *dev) 5097 { 5098 return dev->priv_flags & IFF_OPENVSWITCH; 5099 } 5100 5101 static inline bool netif_is_ovs_port(const struct net_device *dev) 5102 { 5103 return dev->priv_flags & IFF_OVS_DATAPATH; 5104 } 5105 5106 static inline bool netif_is_any_bridge_port(const struct net_device *dev) 5107 { 5108 return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); 5109 } 5110 5111 static inline bool netif_is_team_master(const struct net_device *dev) 5112 { 5113 return dev->priv_flags & IFF_TEAM; 5114 } 5115 5116 static inline bool netif_is_team_port(const struct net_device *dev) 5117 { 5118 return dev->priv_flags & IFF_TEAM_PORT; 5119 } 5120 5121 static inline bool netif_is_lag_master(const struct net_device *dev) 5122 { 5123 return netif_is_bond_master(dev) || netif_is_team_master(dev); 5124 } 5125 5126 static inline bool netif_is_lag_port(const struct net_device *dev) 5127 { 5128 return netif_is_bond_slave(dev) || netif_is_team_port(dev); 5129 } 5130 5131 static inline bool netif_is_rxfh_configured(const struct net_device *dev) 5132 { 5133 return dev->priv_flags & IFF_RXFH_CONFIGURED; 5134 } 5135 5136 static inline bool netif_is_failover(const struct net_device *dev) 5137 { 5138 return dev->priv_flags & IFF_FAILOVER; 5139 } 5140 5141 static inline bool netif_is_failover_slave(const struct net_device *dev) 5142 { 5143 return dev->priv_flags & IFF_FAILOVER_SLAVE; 5144 } 5145 5146 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ 5147 static inline void netif_keep_dst(struct net_device *dev) 5148 { 5149 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); 5150 } 5151 5152 /* return true if dev can't cope with mtu frames that need vlan tag insertion */ 5153 static inline bool netif_reduces_vlan_mtu(struct net_device *dev) 5154 { 5155 /* TODO: reserve and use an additional IFF bit, if we get more users */ 5156 return netif_is_macsec(dev); 5157 } 5158 5159 extern struct pernet_operations __net_initdata loopback_net_ops; 5160 5161 /* Logging, debugging and troubleshooting/diagnostic helpers. */ 5162 5163 /* netdev_printk helpers, similar to dev_printk */ 5164 5165 static inline const char *netdev_name(const struct net_device *dev) 5166 { 5167 if (!dev->name[0] || strchr(dev->name, '%')) 5168 return "(unnamed net_device)"; 5169 return dev->name; 5170 } 5171 5172 static inline const char *netdev_reg_state(const struct net_device *dev) 5173 { 5174 switch (dev->reg_state) { 5175 case NETREG_UNINITIALIZED: return " (uninitialized)"; 5176 case NETREG_REGISTERED: return ""; 5177 case NETREG_UNREGISTERING: return " (unregistering)"; 5178 case NETREG_UNREGISTERED: return " (unregistered)"; 5179 case NETREG_RELEASED: return " (released)"; 5180 case NETREG_DUMMY: return " (dummy)"; 5181 } 5182 5183 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state); 5184 return " (unknown)"; 5185 } 5186 5187 #define MODULE_ALIAS_NETDEV(device) \ 5188 MODULE_ALIAS("netdev-" device) 5189 5190 /* 5191 * netdev_WARN() acts like dev_printk(), but with the key difference 5192 * of using a WARN/WARN_ON to get the message out, including the 5193 * file/line information and a backtrace. 5194 */ 5195 #define netdev_WARN(dev, format, args...) \ 5196 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5197 netdev_reg_state(dev), ##args) 5198 5199 #define netdev_WARN_ONCE(dev, format, args...) \ 5200 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5201 netdev_reg_state(dev), ##args) 5202 5203 /* 5204 * The list of packet types we will receive (as opposed to discard) 5205 * and the routines to invoke. 5206 * 5207 * Why 16. Because with 16 the only overlap we get on a hash of the 5208 * low nibble of the protocol value is RARP/SNAP/X.25. 5209 * 5210 * 0800 IP 5211 * 0001 802.3 5212 * 0002 AX.25 5213 * 0004 802.2 5214 * 8035 RARP 5215 * 0005 SNAP 5216 * 0805 X.25 5217 * 0806 ARP 5218 * 8137 IPX 5219 * 0009 Localtalk 5220 * 86DD IPv6 5221 */ 5222 #define PTYPE_HASH_SIZE (16) 5223 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) 5224 5225 extern struct list_head ptype_all __read_mostly; 5226 extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 5227 5228 extern struct net_device *blackhole_netdev; 5229 5230 /* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */ 5231 #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD) 5232 #define DEV_STATS_ADD(DEV, FIELD, VAL) \ 5233 atomic_long_add((VAL), &(DEV)->stats.__##FIELD) 5234 5235 #endif /* _LINUX_NETDEVICE_H */ 5236