xref: /linux-6.15/include/linux/netdevice.h (revision 529d6dad)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Definitions for the Interfaces handler.
7  *
8  * Version:	@(#)dev.h	1.0.10	08/12/93
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <[email protected]>
12  *		Corey Minyard <[email protected]>
13  *		Donald J. Becker, <[email protected]>
14  *		Alan Cox, <[email protected]>
15  *		Bjorn Ekwall. <[email protected]>
16  *              Pekka Riikonen <[email protected]>
17  *
18  *		This program is free software; you can redistribute it and/or
19  *		modify it under the terms of the GNU General Public License
20  *		as published by the Free Software Foundation; either version
21  *		2 of the License, or (at your option) any later version.
22  *
23  *		Moved to /usr/include/linux for NET3
24  */
25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
27 
28 #include <linux/if.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_packet.h>
31 #include <linux/if_link.h>
32 
33 #ifdef __KERNEL__
34 #include <linux/pm_qos_params.h>
35 #include <linux/timer.h>
36 #include <linux/delay.h>
37 #include <linux/mm.h>
38 #include <asm/atomic.h>
39 #include <asm/cache.h>
40 #include <asm/byteorder.h>
41 
42 #include <linux/device.h>
43 #include <linux/percpu.h>
44 #include <linux/rculist.h>
45 #include <linux/dmaengine.h>
46 #include <linux/workqueue.h>
47 
48 #include <linux/ethtool.h>
49 #include <net/net_namespace.h>
50 #include <net/dsa.h>
51 #ifdef CONFIG_DCB
52 #include <net/dcbnl.h>
53 #endif
54 
55 struct vlan_group;
56 struct netpoll_info;
57 /* 802.11 specific */
58 struct wireless_dev;
59 					/* source back-compat hooks */
60 #define SET_ETHTOOL_OPS(netdev,ops) \
61 	( (netdev)->ethtool_ops = (ops) )
62 
63 #define HAVE_ALLOC_NETDEV		/* feature macro: alloc_xxxdev
64 					   functions are available. */
65 #define HAVE_FREE_NETDEV		/* free_netdev() */
66 #define HAVE_NETDEV_PRIV		/* netdev_priv() */
67 
68 /* Backlog congestion levels */
69 #define NET_RX_SUCCESS		0	/* keep 'em coming, baby */
70 #define NET_RX_DROP		1	/* packet dropped */
71 
72 /*
73  * Transmit return codes: transmit return codes originate from three different
74  * namespaces:
75  *
76  * - qdisc return codes
77  * - driver transmit return codes
78  * - errno values
79  *
80  * Drivers are allowed to return any one of those in their hard_start_xmit()
81  * function. Real network devices commonly used with qdiscs should only return
82  * the driver transmit return codes though - when qdiscs are used, the actual
83  * transmission happens asynchronously, so the value is not propagated to
84  * higher layers. Virtual network devices transmit synchronously, in this case
85  * the driver transmit return codes are consumed by dev_queue_xmit(), all
86  * others are propagated to higher layers.
87  */
88 
89 /* qdisc ->enqueue() return codes. */
90 #define NET_XMIT_SUCCESS	0x00
91 #define NET_XMIT_DROP		0x01	/* skb dropped			*/
92 #define NET_XMIT_CN		0x02	/* congestion notification	*/
93 #define NET_XMIT_POLICED	0x03	/* skb is shot by police	*/
94 #define NET_XMIT_MASK		0x0f	/* qdisc flags in net/sch_generic.h */
95 
96 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
97  * indicates that the device will soon be dropping packets, or already drops
98  * some packets of the same priority; prompting us to send less aggressively. */
99 #define net_xmit_eval(e)	((e) == NET_XMIT_CN ? 0 : (e))
100 #define net_xmit_errno(e)	((e) != NET_XMIT_CN ? -ENOBUFS : 0)
101 
102 /* Driver transmit return codes */
103 #define NETDEV_TX_MASK		0xf0
104 
105 enum netdev_tx {
106 	__NETDEV_TX_MIN	 = INT_MIN,	/* make sure enum is signed */
107 	NETDEV_TX_OK	 = 0x00,	/* driver took care of packet */
108 	NETDEV_TX_BUSY	 = 0x10,	/* driver tx path was busy*/
109 	NETDEV_TX_LOCKED = 0x20,	/* driver tx lock was already taken */
110 };
111 typedef enum netdev_tx netdev_tx_t;
112 
113 /*
114  * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
115  * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
116  */
117 static inline bool dev_xmit_complete(int rc)
118 {
119 	/*
120 	 * Positive cases with an skb consumed by a driver:
121 	 * - successful transmission (rc == NETDEV_TX_OK)
122 	 * - error while transmitting (rc < 0)
123 	 * - error while queueing to a different device (rc & NET_XMIT_MASK)
124 	 */
125 	if (likely(rc < NET_XMIT_MASK))
126 		return true;
127 
128 	return false;
129 }
130 
131 #endif
132 
133 #define MAX_ADDR_LEN	32		/* Largest hardware address length */
134 
135 #ifdef  __KERNEL__
136 /*
137  *	Compute the worst case header length according to the protocols
138  *	used.
139  */
140 
141 #if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
142 # if defined(CONFIG_MAC80211_MESH)
143 #  define LL_MAX_HEADER 128
144 # else
145 #  define LL_MAX_HEADER 96
146 # endif
147 #elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
148 # define LL_MAX_HEADER 48
149 #else
150 # define LL_MAX_HEADER 32
151 #endif
152 
153 #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
154     !defined(CONFIG_NET_IPGRE) &&  !defined(CONFIG_NET_IPGRE_MODULE) && \
155     !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
156     !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
157 #define MAX_HEADER LL_MAX_HEADER
158 #else
159 #define MAX_HEADER (LL_MAX_HEADER + 48)
160 #endif
161 
162 /*
163  *	Old network device statistics. Fields are native words
164  *	(unsigned long) so they can be read and written atomically.
165  *	Each field is padded to 64 bits for compatibility with
166  *	rtnl_link_stats64.
167  */
168 
169 #if BITS_PER_LONG == 64
170 #define NET_DEVICE_STATS_DEFINE(name)	unsigned long name
171 #elif defined(__LITTLE_ENDIAN)
172 #define NET_DEVICE_STATS_DEFINE(name)	unsigned long name, pad_ ## name
173 #else
174 #define NET_DEVICE_STATS_DEFINE(name)	unsigned long pad_ ## name, name
175 #endif
176 
177 struct net_device_stats {
178 	NET_DEVICE_STATS_DEFINE(rx_packets);
179 	NET_DEVICE_STATS_DEFINE(tx_packets);
180 	NET_DEVICE_STATS_DEFINE(rx_bytes);
181 	NET_DEVICE_STATS_DEFINE(tx_bytes);
182 	NET_DEVICE_STATS_DEFINE(rx_errors);
183 	NET_DEVICE_STATS_DEFINE(tx_errors);
184 	NET_DEVICE_STATS_DEFINE(rx_dropped);
185 	NET_DEVICE_STATS_DEFINE(tx_dropped);
186 	NET_DEVICE_STATS_DEFINE(multicast);
187 	NET_DEVICE_STATS_DEFINE(collisions);
188 	NET_DEVICE_STATS_DEFINE(rx_length_errors);
189 	NET_DEVICE_STATS_DEFINE(rx_over_errors);
190 	NET_DEVICE_STATS_DEFINE(rx_crc_errors);
191 	NET_DEVICE_STATS_DEFINE(rx_frame_errors);
192 	NET_DEVICE_STATS_DEFINE(rx_fifo_errors);
193 	NET_DEVICE_STATS_DEFINE(rx_missed_errors);
194 	NET_DEVICE_STATS_DEFINE(tx_aborted_errors);
195 	NET_DEVICE_STATS_DEFINE(tx_carrier_errors);
196 	NET_DEVICE_STATS_DEFINE(tx_fifo_errors);
197 	NET_DEVICE_STATS_DEFINE(tx_heartbeat_errors);
198 	NET_DEVICE_STATS_DEFINE(tx_window_errors);
199 	NET_DEVICE_STATS_DEFINE(rx_compressed);
200 	NET_DEVICE_STATS_DEFINE(tx_compressed);
201 };
202 
203 #endif  /*  __KERNEL__  */
204 
205 
206 /* Media selection options. */
207 enum {
208         IF_PORT_UNKNOWN = 0,
209         IF_PORT_10BASE2,
210         IF_PORT_10BASET,
211         IF_PORT_AUI,
212         IF_PORT_100BASET,
213         IF_PORT_100BASETX,
214         IF_PORT_100BASEFX
215 };
216 
217 #ifdef __KERNEL__
218 
219 #include <linux/cache.h>
220 #include <linux/skbuff.h>
221 
222 struct neighbour;
223 struct neigh_parms;
224 struct sk_buff;
225 
226 struct netdev_hw_addr {
227 	struct list_head	list;
228 	unsigned char		addr[MAX_ADDR_LEN];
229 	unsigned char		type;
230 #define NETDEV_HW_ADDR_T_LAN		1
231 #define NETDEV_HW_ADDR_T_SAN		2
232 #define NETDEV_HW_ADDR_T_SLAVE		3
233 #define NETDEV_HW_ADDR_T_UNICAST	4
234 #define NETDEV_HW_ADDR_T_MULTICAST	5
235 	int			refcount;
236 	bool			synced;
237 	bool			global_use;
238 	struct rcu_head		rcu_head;
239 };
240 
241 struct netdev_hw_addr_list {
242 	struct list_head	list;
243 	int			count;
244 };
245 
246 #define netdev_hw_addr_list_count(l) ((l)->count)
247 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
248 #define netdev_hw_addr_list_for_each(ha, l) \
249 	list_for_each_entry(ha, &(l)->list, list)
250 
251 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
252 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
253 #define netdev_for_each_uc_addr(ha, dev) \
254 	netdev_hw_addr_list_for_each(ha, &(dev)->uc)
255 
256 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
257 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
258 #define netdev_for_each_mc_addr(ha, dev) \
259 	netdev_hw_addr_list_for_each(ha, &(dev)->mc)
260 
261 struct hh_cache {
262 	struct hh_cache *hh_next;	/* Next entry			     */
263 	atomic_t	hh_refcnt;	/* number of users                   */
264 /*
265  * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
266  * cache line on SMP.
267  * They are mostly read, but hh_refcnt may be changed quite frequently,
268  * incurring cache line ping pongs.
269  */
270 	__be16		hh_type ____cacheline_aligned_in_smp;
271 					/* protocol identifier, f.e ETH_P_IP
272                                          *  NOTE:  For VLANs, this will be the
273                                          *  encapuslated type. --BLG
274                                          */
275 	u16		hh_len;		/* length of header */
276 	int		(*hh_output)(struct sk_buff *skb);
277 	seqlock_t	hh_lock;
278 
279 	/* cached hardware header; allow for machine alignment needs.        */
280 #define HH_DATA_MOD	16
281 #define HH_DATA_OFF(__len) \
282 	(HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
283 #define HH_DATA_ALIGN(__len) \
284 	(((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
285 	unsigned long	hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
286 };
287 
288 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
289  * Alternative is:
290  *   dev->hard_header_len ? (dev->hard_header_len +
291  *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
292  *
293  * We could use other alignment values, but we must maintain the
294  * relationship HH alignment <= LL alignment.
295  *
296  * LL_ALLOCATED_SPACE also takes into account the tailroom the device
297  * may need.
298  */
299 #define LL_RESERVED_SPACE(dev) \
300 	((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
301 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
302 	((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
303 #define LL_ALLOCATED_SPACE(dev) \
304 	((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
305 
306 struct header_ops {
307 	int	(*create) (struct sk_buff *skb, struct net_device *dev,
308 			   unsigned short type, const void *daddr,
309 			   const void *saddr, unsigned len);
310 	int	(*parse)(const struct sk_buff *skb, unsigned char *haddr);
311 	int	(*rebuild)(struct sk_buff *skb);
312 #define HAVE_HEADER_CACHE
313 	int	(*cache)(const struct neighbour *neigh, struct hh_cache *hh);
314 	void	(*cache_update)(struct hh_cache *hh,
315 				const struct net_device *dev,
316 				const unsigned char *haddr);
317 };
318 
319 /* These flag bits are private to the generic network queueing
320  * layer, they may not be explicitly referenced by any other
321  * code.
322  */
323 
324 enum netdev_state_t {
325 	__LINK_STATE_START,
326 	__LINK_STATE_PRESENT,
327 	__LINK_STATE_NOCARRIER,
328 	__LINK_STATE_LINKWATCH_PENDING,
329 	__LINK_STATE_DORMANT,
330 };
331 
332 
333 /*
334  * This structure holds at boot time configured netdevice settings. They
335  * are then used in the device probing.
336  */
337 struct netdev_boot_setup {
338 	char name[IFNAMSIZ];
339 	struct ifmap map;
340 };
341 #define NETDEV_BOOT_SETUP_MAX 8
342 
343 extern int __init netdev_boot_setup(char *str);
344 
345 /*
346  * Structure for NAPI scheduling similar to tasklet but with weighting
347  */
348 struct napi_struct {
349 	/* The poll_list must only be managed by the entity which
350 	 * changes the state of the NAPI_STATE_SCHED bit.  This means
351 	 * whoever atomically sets that bit can add this napi_struct
352 	 * to the per-cpu poll_list, and whoever clears that bit
353 	 * can remove from the list right before clearing the bit.
354 	 */
355 	struct list_head	poll_list;
356 
357 	unsigned long		state;
358 	int			weight;
359 	int			(*poll)(struct napi_struct *, int);
360 #ifdef CONFIG_NETPOLL
361 	spinlock_t		poll_lock;
362 	int			poll_owner;
363 #endif
364 
365 	unsigned int		gro_count;
366 
367 	struct net_device	*dev;
368 	struct list_head	dev_list;
369 	struct sk_buff		*gro_list;
370 	struct sk_buff		*skb;
371 };
372 
373 enum {
374 	NAPI_STATE_SCHED,	/* Poll is scheduled */
375 	NAPI_STATE_DISABLE,	/* Disable pending */
376 	NAPI_STATE_NPSVC,	/* Netpoll - don't dequeue from poll_list */
377 };
378 
379 enum gro_result {
380 	GRO_MERGED,
381 	GRO_MERGED_FREE,
382 	GRO_HELD,
383 	GRO_NORMAL,
384 	GRO_DROP,
385 };
386 typedef enum gro_result gro_result_t;
387 
388 typedef struct sk_buff *rx_handler_func_t(struct sk_buff *skb);
389 
390 extern void __napi_schedule(struct napi_struct *n);
391 
392 static inline int napi_disable_pending(struct napi_struct *n)
393 {
394 	return test_bit(NAPI_STATE_DISABLE, &n->state);
395 }
396 
397 /**
398  *	napi_schedule_prep - check if napi can be scheduled
399  *	@n: napi context
400  *
401  * Test if NAPI routine is already running, and if not mark
402  * it as running.  This is used as a condition variable
403  * insure only one NAPI poll instance runs.  We also make
404  * sure there is no pending NAPI disable.
405  */
406 static inline int napi_schedule_prep(struct napi_struct *n)
407 {
408 	return !napi_disable_pending(n) &&
409 		!test_and_set_bit(NAPI_STATE_SCHED, &n->state);
410 }
411 
412 /**
413  *	napi_schedule - schedule NAPI poll
414  *	@n: napi context
415  *
416  * Schedule NAPI poll routine to be called if it is not already
417  * running.
418  */
419 static inline void napi_schedule(struct napi_struct *n)
420 {
421 	if (napi_schedule_prep(n))
422 		__napi_schedule(n);
423 }
424 
425 /* Try to reschedule poll. Called by dev->poll() after napi_complete().  */
426 static inline int napi_reschedule(struct napi_struct *napi)
427 {
428 	if (napi_schedule_prep(napi)) {
429 		__napi_schedule(napi);
430 		return 1;
431 	}
432 	return 0;
433 }
434 
435 /**
436  *	napi_complete - NAPI processing complete
437  *	@n: napi context
438  *
439  * Mark NAPI processing as complete.
440  */
441 extern void __napi_complete(struct napi_struct *n);
442 extern void napi_complete(struct napi_struct *n);
443 
444 /**
445  *	napi_disable - prevent NAPI from scheduling
446  *	@n: napi context
447  *
448  * Stop NAPI from being scheduled on this context.
449  * Waits till any outstanding processing completes.
450  */
451 static inline void napi_disable(struct napi_struct *n)
452 {
453 	set_bit(NAPI_STATE_DISABLE, &n->state);
454 	while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
455 		msleep(1);
456 	clear_bit(NAPI_STATE_DISABLE, &n->state);
457 }
458 
459 /**
460  *	napi_enable - enable NAPI scheduling
461  *	@n: napi context
462  *
463  * Resume NAPI from being scheduled on this context.
464  * Must be paired with napi_disable.
465  */
466 static inline void napi_enable(struct napi_struct *n)
467 {
468 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
469 	smp_mb__before_clear_bit();
470 	clear_bit(NAPI_STATE_SCHED, &n->state);
471 }
472 
473 #ifdef CONFIG_SMP
474 /**
475  *	napi_synchronize - wait until NAPI is not running
476  *	@n: napi context
477  *
478  * Wait until NAPI is done being scheduled on this context.
479  * Waits till any outstanding processing completes but
480  * does not disable future activations.
481  */
482 static inline void napi_synchronize(const struct napi_struct *n)
483 {
484 	while (test_bit(NAPI_STATE_SCHED, &n->state))
485 		msleep(1);
486 }
487 #else
488 # define napi_synchronize(n)	barrier()
489 #endif
490 
491 enum netdev_queue_state_t {
492 	__QUEUE_STATE_XOFF,
493 	__QUEUE_STATE_FROZEN,
494 };
495 
496 struct netdev_queue {
497 /*
498  * read mostly part
499  */
500 	struct net_device	*dev;
501 	struct Qdisc		*qdisc;
502 	unsigned long		state;
503 	struct Qdisc		*qdisc_sleeping;
504 /*
505  * write mostly part
506  */
507 	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
508 	int			xmit_lock_owner;
509 	/*
510 	 * please use this field instead of dev->trans_start
511 	 */
512 	unsigned long		trans_start;
513 	unsigned long		tx_bytes;
514 	unsigned long		tx_packets;
515 	unsigned long		tx_dropped;
516 } ____cacheline_aligned_in_smp;
517 
518 #ifdef CONFIG_RPS
519 /*
520  * This structure holds an RPS map which can be of variable length.  The
521  * map is an array of CPUs.
522  */
523 struct rps_map {
524 	unsigned int len;
525 	struct rcu_head rcu;
526 	u16 cpus[0];
527 };
528 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
529 
530 /*
531  * The rps_dev_flow structure contains the mapping of a flow to a CPU and the
532  * tail pointer for that CPU's input queue at the time of last enqueue.
533  */
534 struct rps_dev_flow {
535 	u16 cpu;
536 	u16 fill;
537 	unsigned int last_qtail;
538 };
539 
540 /*
541  * The rps_dev_flow_table structure contains a table of flow mappings.
542  */
543 struct rps_dev_flow_table {
544 	unsigned int mask;
545 	struct rcu_head rcu;
546 	struct work_struct free_work;
547 	struct rps_dev_flow flows[0];
548 };
549 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
550     (_num * sizeof(struct rps_dev_flow)))
551 
552 /*
553  * The rps_sock_flow_table contains mappings of flows to the last CPU
554  * on which they were processed by the application (set in recvmsg).
555  */
556 struct rps_sock_flow_table {
557 	unsigned int mask;
558 	u16 ents[0];
559 };
560 #define	RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
561     (_num * sizeof(u16)))
562 
563 #define RPS_NO_CPU 0xffff
564 
565 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
566 					u32 hash)
567 {
568 	if (table && hash) {
569 		unsigned int cpu, index = hash & table->mask;
570 
571 		/* We only give a hint, preemption can change cpu under us */
572 		cpu = raw_smp_processor_id();
573 
574 		if (table->ents[index] != cpu)
575 			table->ents[index] = cpu;
576 	}
577 }
578 
579 static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
580 				       u32 hash)
581 {
582 	if (table && hash)
583 		table->ents[hash & table->mask] = RPS_NO_CPU;
584 }
585 
586 extern struct rps_sock_flow_table *rps_sock_flow_table;
587 
588 /* This structure contains an instance of an RX queue. */
589 struct netdev_rx_queue {
590 	struct rps_map *rps_map;
591 	struct rps_dev_flow_table *rps_flow_table;
592 	struct kobject kobj;
593 	struct netdev_rx_queue *first;
594 	atomic_t count;
595 } ____cacheline_aligned_in_smp;
596 #endif /* CONFIG_RPS */
597 
598 /*
599  * This structure defines the management hooks for network devices.
600  * The following hooks can be defined; unless noted otherwise, they are
601  * optional and can be filled with a null pointer.
602  *
603  * int (*ndo_init)(struct net_device *dev);
604  *     This function is called once when network device is registered.
605  *     The network device can use this to any late stage initializaton
606  *     or semantic validattion. It can fail with an error code which will
607  *     be propogated back to register_netdev
608  *
609  * void (*ndo_uninit)(struct net_device *dev);
610  *     This function is called when device is unregistered or when registration
611  *     fails. It is not called if init fails.
612  *
613  * int (*ndo_open)(struct net_device *dev);
614  *     This function is called when network device transistions to the up
615  *     state.
616  *
617  * int (*ndo_stop)(struct net_device *dev);
618  *     This function is called when network device transistions to the down
619  *     state.
620  *
621  * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
622  *                               struct net_device *dev);
623  *	Called when a packet needs to be transmitted.
624  *	Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
625  *        (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
626  *	Required can not be NULL.
627  *
628  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
629  *	Called to decide which queue to when device supports multiple
630  *	transmit queues.
631  *
632  * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
633  *	This function is called to allow device receiver to make
634  *	changes to configuration when multicast or promiscious is enabled.
635  *
636  * void (*ndo_set_rx_mode)(struct net_device *dev);
637  *	This function is called device changes address list filtering.
638  *
639  * void (*ndo_set_multicast_list)(struct net_device *dev);
640  *	This function is called when the multicast address list changes.
641  *
642  * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
643  *	This function  is called when the Media Access Control address
644  *	needs to be changed. If this interface is not defined, the
645  *	mac address can not be changed.
646  *
647  * int (*ndo_validate_addr)(struct net_device *dev);
648  *	Test if Media Access Control address is valid for the device.
649  *
650  * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
651  *	Called when a user request an ioctl which can't be handled by
652  *	the generic interface code. If not defined ioctl's return
653  *	not supported error code.
654  *
655  * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
656  *	Used to set network devices bus interface parameters. This interface
657  *	is retained for legacy reason, new devices should use the bus
658  *	interface (PCI) for low level management.
659  *
660  * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
661  *	Called when a user wants to change the Maximum Transfer Unit
662  *	of a device. If not defined, any request to change MTU will
663  *	will return an error.
664  *
665  * void (*ndo_tx_timeout)(struct net_device *dev);
666  *	Callback uses when the transmitter has not made any progress
667  *	for dev->watchdog ticks.
668  *
669  * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev);
670  * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
671  *	Called when a user wants to get the network device usage
672  *	statistics. Drivers must do one of the following:
673  *	1. Define @ndo_get_stats64 to update a rtnl_link_stats64 structure
674  *	   (which should normally be dev->stats64) and return a ponter to
675  *	   it. The structure must not be changed asynchronously.
676  *	2. Define @ndo_get_stats to update a net_device_stats structure
677  *	   (which should normally be dev->stats) and return a pointer to
678  *	   it. The structure may be changed asynchronously only if each
679  *	   field is written atomically.
680  *	3. Update dev->stats asynchronously and atomically, and define
681  *	   neither operation.
682  *
683  * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
684  *	If device support VLAN receive accleration
685  *	(ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
686  *	when vlan groups for the device changes.  Note: grp is NULL
687  *	if no vlan's groups are being used.
688  *
689  * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
690  *	If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
691  *	this function is called when a VLAN id is registered.
692  *
693  * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
694  *	If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
695  *	this function is called when a VLAN id is unregistered.
696  *
697  * void (*ndo_poll_controller)(struct net_device *dev);
698  *
699  *	SR-IOV management functions.
700  * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
701  * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
702  * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
703  * int (*ndo_get_vf_config)(struct net_device *dev,
704  *			    int vf, struct ifla_vf_info *ivf);
705  * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
706  *			  struct nlattr *port[]);
707  * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
708  */
709 #define HAVE_NET_DEVICE_OPS
710 struct net_device_ops {
711 	int			(*ndo_init)(struct net_device *dev);
712 	void			(*ndo_uninit)(struct net_device *dev);
713 	int			(*ndo_open)(struct net_device *dev);
714 	int			(*ndo_stop)(struct net_device *dev);
715 	netdev_tx_t		(*ndo_start_xmit) (struct sk_buff *skb,
716 						   struct net_device *dev);
717 	u16			(*ndo_select_queue)(struct net_device *dev,
718 						    struct sk_buff *skb);
719 	void			(*ndo_change_rx_flags)(struct net_device *dev,
720 						       int flags);
721 	void			(*ndo_set_rx_mode)(struct net_device *dev);
722 	void			(*ndo_set_multicast_list)(struct net_device *dev);
723 	int			(*ndo_set_mac_address)(struct net_device *dev,
724 						       void *addr);
725 	int			(*ndo_validate_addr)(struct net_device *dev);
726 	int			(*ndo_do_ioctl)(struct net_device *dev,
727 					        struct ifreq *ifr, int cmd);
728 	int			(*ndo_set_config)(struct net_device *dev,
729 					          struct ifmap *map);
730 	int			(*ndo_change_mtu)(struct net_device *dev,
731 						  int new_mtu);
732 	int			(*ndo_neigh_setup)(struct net_device *dev,
733 						   struct neigh_parms *);
734 	void			(*ndo_tx_timeout) (struct net_device *dev);
735 
736 	struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev);
737 	struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
738 
739 	void			(*ndo_vlan_rx_register)(struct net_device *dev,
740 						        struct vlan_group *grp);
741 	void			(*ndo_vlan_rx_add_vid)(struct net_device *dev,
742 						       unsigned short vid);
743 	void			(*ndo_vlan_rx_kill_vid)(struct net_device *dev,
744 						        unsigned short vid);
745 #ifdef CONFIG_NET_POLL_CONTROLLER
746 	void                    (*ndo_poll_controller)(struct net_device *dev);
747 	int			(*ndo_netpoll_setup)(struct net_device *dev,
748 						     struct netpoll_info *info);
749 	void			(*ndo_netpoll_cleanup)(struct net_device *dev);
750 #endif
751 	int			(*ndo_set_vf_mac)(struct net_device *dev,
752 						  int queue, u8 *mac);
753 	int			(*ndo_set_vf_vlan)(struct net_device *dev,
754 						   int queue, u16 vlan, u8 qos);
755 	int			(*ndo_set_vf_tx_rate)(struct net_device *dev,
756 						      int vf, int rate);
757 	int			(*ndo_get_vf_config)(struct net_device *dev,
758 						     int vf,
759 						     struct ifla_vf_info *ivf);
760 	int			(*ndo_set_vf_port)(struct net_device *dev,
761 						   int vf,
762 						   struct nlattr *port[]);
763 	int			(*ndo_get_vf_port)(struct net_device *dev,
764 						   int vf, struct sk_buff *skb);
765 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
766 	int			(*ndo_fcoe_enable)(struct net_device *dev);
767 	int			(*ndo_fcoe_disable)(struct net_device *dev);
768 	int			(*ndo_fcoe_ddp_setup)(struct net_device *dev,
769 						      u16 xid,
770 						      struct scatterlist *sgl,
771 						      unsigned int sgc);
772 	int			(*ndo_fcoe_ddp_done)(struct net_device *dev,
773 						     u16 xid);
774 #define NETDEV_FCOE_WWNN 0
775 #define NETDEV_FCOE_WWPN 1
776 	int			(*ndo_fcoe_get_wwn)(struct net_device *dev,
777 						    u64 *wwn, int type);
778 #endif
779 };
780 
781 /*
782  *	The DEVICE structure.
783  *	Actually, this whole structure is a big mistake.  It mixes I/O
784  *	data with strictly "high-level" data, and it has to know about
785  *	almost every data structure used in the INET module.
786  *
787  *	FIXME: cleanup struct net_device such that network protocol info
788  *	moves out.
789  */
790 
791 struct net_device {
792 
793 	/*
794 	 * This is the first field of the "visible" part of this structure
795 	 * (i.e. as seen by users in the "Space.c" file).  It is the name
796 	 * the interface.
797 	 */
798 	char			name[IFNAMSIZ];
799 
800 	struct pm_qos_request_list *pm_qos_req;
801 
802 	/* device name hash chain */
803 	struct hlist_node	name_hlist;
804 	/* snmp alias */
805 	char 			*ifalias;
806 
807 	/*
808 	 *	I/O specific fields
809 	 *	FIXME: Merge these and struct ifmap into one
810 	 */
811 	unsigned long		mem_end;	/* shared mem end	*/
812 	unsigned long		mem_start;	/* shared mem start	*/
813 	unsigned long		base_addr;	/* device I/O address	*/
814 	unsigned int		irq;		/* device IRQ number	*/
815 
816 	/*
817 	 *	Some hardware also needs these fields, but they are not
818 	 *	part of the usual set specified in Space.c.
819 	 */
820 
821 	unsigned char		if_port;	/* Selectable AUI, TP,..*/
822 	unsigned char		dma;		/* DMA channel		*/
823 
824 	unsigned long		state;
825 
826 	struct list_head	dev_list;
827 	struct list_head	napi_list;
828 	struct list_head	unreg_list;
829 
830 	/* Net device features */
831 	unsigned long		features;
832 #define NETIF_F_SG		1	/* Scatter/gather IO. */
833 #define NETIF_F_IP_CSUM		2	/* Can checksum TCP/UDP over IPv4. */
834 #define NETIF_F_NO_CSUM		4	/* Does not require checksum. F.e. loopack. */
835 #define NETIF_F_HW_CSUM		8	/* Can checksum all the packets. */
836 #define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
837 #define NETIF_F_HIGHDMA		32	/* Can DMA to high memory. */
838 #define NETIF_F_FRAGLIST	64	/* Scatter/gather IO. */
839 #define NETIF_F_HW_VLAN_TX	128	/* Transmit VLAN hw acceleration */
840 #define NETIF_F_HW_VLAN_RX	256	/* Receive VLAN hw acceleration */
841 #define NETIF_F_HW_VLAN_FILTER	512	/* Receive filtering on VLAN */
842 #define NETIF_F_VLAN_CHALLENGED	1024	/* Device cannot handle VLAN packets */
843 #define NETIF_F_GSO		2048	/* Enable software GSO. */
844 #define NETIF_F_LLTX		4096	/* LockLess TX - deprecated. Please */
845 					/* do not use LLTX in new drivers */
846 #define NETIF_F_NETNS_LOCAL	8192	/* Does not change network namespaces */
847 #define NETIF_F_GRO		16384	/* Generic receive offload */
848 #define NETIF_F_LRO		32768	/* large receive offload */
849 
850 /* the GSO_MASK reserves bits 16 through 23 */
851 #define NETIF_F_FCOE_CRC	(1 << 24) /* FCoE CRC32 */
852 #define NETIF_F_SCTP_CSUM	(1 << 25) /* SCTP checksum offload */
853 #define NETIF_F_FCOE_MTU	(1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
854 #define NETIF_F_NTUPLE		(1 << 27) /* N-tuple filters supported */
855 #define NETIF_F_RXHASH		(1 << 28) /* Receive hashing offload */
856 
857 	/* Segmentation offload features */
858 #define NETIF_F_GSO_SHIFT	16
859 #define NETIF_F_GSO_MASK	0x00ff0000
860 #define NETIF_F_TSO		(SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
861 #define NETIF_F_UFO		(SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
862 #define NETIF_F_GSO_ROBUST	(SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
863 #define NETIF_F_TSO_ECN		(SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
864 #define NETIF_F_TSO6		(SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
865 #define NETIF_F_FSO		(SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
866 
867 	/* List of features with software fallbacks. */
868 #define NETIF_F_GSO_SOFTWARE	(NETIF_F_TSO | NETIF_F_TSO_ECN | \
869 				 NETIF_F_TSO6 | NETIF_F_UFO)
870 
871 
872 #define NETIF_F_GEN_CSUM	(NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
873 #define NETIF_F_V4_CSUM		(NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
874 #define NETIF_F_V6_CSUM		(NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
875 #define NETIF_F_ALL_CSUM	(NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
876 
877 	/*
878 	 * If one device supports one of these features, then enable them
879 	 * for all in netdev_increment_features.
880 	 */
881 #define NETIF_F_ONE_FOR_ALL	(NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
882 				 NETIF_F_SG | NETIF_F_HIGHDMA |		\
883 				 NETIF_F_FRAGLIST)
884 
885 	/* Interface index. Unique device identifier	*/
886 	int			ifindex;
887 	int			iflink;
888 
889 	union {
890 		struct rtnl_link_stats64 stats64;
891 		struct net_device_stats stats;
892 	};
893 
894 #ifdef CONFIG_WIRELESS_EXT
895 	/* List of functions to handle Wireless Extensions (instead of ioctl).
896 	 * See <net/iw_handler.h> for details. Jean II */
897 	const struct iw_handler_def *	wireless_handlers;
898 	/* Instance data managed by the core of Wireless Extensions. */
899 	struct iw_public_data *	wireless_data;
900 #endif
901 	/* Management operations */
902 	const struct net_device_ops *netdev_ops;
903 	const struct ethtool_ops *ethtool_ops;
904 
905 	/* Hardware header description */
906 	const struct header_ops *header_ops;
907 
908 	unsigned int		flags;	/* interface flags (a la BSD)	*/
909 	unsigned short		gflags;
910         unsigned short          priv_flags; /* Like 'flags' but invisible to userspace. */
911 	unsigned short		padded;	/* How much padding added by alloc_netdev() */
912 
913 	unsigned char		operstate; /* RFC2863 operstate */
914 	unsigned char		link_mode; /* mapping policy to operstate */
915 
916 	unsigned int		mtu;	/* interface MTU value		*/
917 	unsigned short		type;	/* interface hardware type	*/
918 	unsigned short		hard_header_len;	/* hardware hdr length	*/
919 
920 	/* extra head- and tailroom the hardware may need, but not in all cases
921 	 * can this be guaranteed, especially tailroom. Some cases also use
922 	 * LL_MAX_HEADER instead to allocate the skb.
923 	 */
924 	unsigned short		needed_headroom;
925 	unsigned short		needed_tailroom;
926 
927 	struct net_device	*master; /* Pointer to master device of a group,
928 					  * which this device is member of.
929 					  */
930 
931 	/* Interface address info. */
932 	unsigned char		perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
933 	unsigned char		addr_len;	/* hardware address length	*/
934 	unsigned short          dev_id;		/* for shared network cards */
935 
936 	spinlock_t		addr_list_lock;
937 	struct netdev_hw_addr_list	uc;	/* Unicast mac addresses */
938 	struct netdev_hw_addr_list	mc;	/* Multicast mac addresses */
939 	int			uc_promisc;
940 	unsigned int		promiscuity;
941 	unsigned int		allmulti;
942 
943 
944 	/* Protocol specific pointers */
945 
946 #ifdef CONFIG_NET_DSA
947 	void			*dsa_ptr;	/* dsa specific data */
948 #endif
949 	void 			*atalk_ptr;	/* AppleTalk link 	*/
950 	void			*ip_ptr;	/* IPv4 specific data	*/
951 	void                    *dn_ptr;        /* DECnet specific data */
952 	void                    *ip6_ptr;       /* IPv6 specific data */
953 	void			*ec_ptr;	/* Econet specific data	*/
954 	void			*ax25_ptr;	/* AX.25 specific data */
955 	struct wireless_dev	*ieee80211_ptr;	/* IEEE 802.11 specific data,
956 						   assign before registering */
957 
958 /*
959  * Cache line mostly used on receive path (including eth_type_trans())
960  */
961 	unsigned long		last_rx;	/* Time of last Rx	*/
962 	/* Interface address info used in eth_type_trans() */
963 	unsigned char		*dev_addr;	/* hw address, (before bcast
964 						   because most packets are
965 						   unicast) */
966 
967 	struct netdev_hw_addr_list	dev_addrs; /* list of device
968 						      hw addresses */
969 
970 	unsigned char		broadcast[MAX_ADDR_LEN];	/* hw bcast add	*/
971 
972 #ifdef CONFIG_RPS
973 	struct kset		*queues_kset;
974 
975 	struct netdev_rx_queue	*_rx;
976 
977 	/* Number of RX queues allocated at alloc_netdev_mq() time  */
978 	unsigned int		num_rx_queues;
979 #endif
980 
981 	struct netdev_queue	rx_queue;
982 	rx_handler_func_t	*rx_handler;
983 	void			*rx_handler_data;
984 
985 	struct netdev_queue	*_tx ____cacheline_aligned_in_smp;
986 
987 	/* Number of TX queues allocated at alloc_netdev_mq() time  */
988 	unsigned int		num_tx_queues;
989 
990 	/* Number of TX queues currently active in device  */
991 	unsigned int		real_num_tx_queues;
992 
993 	/* root qdisc from userspace point of view */
994 	struct Qdisc		*qdisc;
995 
996 	unsigned long		tx_queue_len;	/* Max frames per queue allowed */
997 	spinlock_t		tx_global_lock;
998 /*
999  * One part is mostly used on xmit path (device)
1000  */
1001 	/* These may be needed for future network-power-down code. */
1002 
1003 	/*
1004 	 * trans_start here is expensive for high speed devices on SMP,
1005 	 * please use netdev_queue->trans_start instead.
1006 	 */
1007 	unsigned long		trans_start;	/* Time (in jiffies) of last Tx	*/
1008 
1009 	int			watchdog_timeo; /* used by dev_watchdog() */
1010 	struct timer_list	watchdog_timer;
1011 
1012 	/* Number of references to this device */
1013 	atomic_t		refcnt ____cacheline_aligned_in_smp;
1014 
1015 	/* delayed register/unregister */
1016 	struct list_head	todo_list;
1017 	/* device index hash chain */
1018 	struct hlist_node	index_hlist;
1019 
1020 	struct list_head	link_watch_list;
1021 
1022 	/* register/unregister state machine */
1023 	enum { NETREG_UNINITIALIZED=0,
1024 	       NETREG_REGISTERED,	/* completed register_netdevice */
1025 	       NETREG_UNREGISTERING,	/* called unregister_netdevice */
1026 	       NETREG_UNREGISTERED,	/* completed unregister todo */
1027 	       NETREG_RELEASED,		/* called free_netdev */
1028 	       NETREG_DUMMY,		/* dummy device for NAPI poll */
1029 	} reg_state:16;
1030 
1031 	enum {
1032 		RTNL_LINK_INITIALIZED,
1033 		RTNL_LINK_INITIALIZING,
1034 	} rtnl_link_state:16;
1035 
1036 	/* Called from unregister, can be used to call free_netdev */
1037 	void (*destructor)(struct net_device *dev);
1038 
1039 #ifdef CONFIG_NETPOLL
1040 	struct netpoll_info	*npinfo;
1041 #endif
1042 
1043 #ifdef CONFIG_NET_NS
1044 	/* Network namespace this network device is inside */
1045 	struct net		*nd_net;
1046 #endif
1047 
1048 	/* mid-layer private */
1049 	void			*ml_priv;
1050 
1051 	/* GARP */
1052 	struct garp_port	*garp_port;
1053 
1054 	/* class/net/name entry */
1055 	struct device		dev;
1056 	/* space for optional device, statistics, and wireless sysfs groups */
1057 	const struct attribute_group *sysfs_groups[4];
1058 
1059 	/* rtnetlink link ops */
1060 	const struct rtnl_link_ops *rtnl_link_ops;
1061 
1062 	/* VLAN feature mask */
1063 	unsigned long vlan_features;
1064 
1065 	/* for setting kernel sock attribute on TCP connection setup */
1066 #define GSO_MAX_SIZE		65536
1067 	unsigned int		gso_max_size;
1068 
1069 #ifdef CONFIG_DCB
1070 	/* Data Center Bridging netlink ops */
1071 	const struct dcbnl_rtnl_ops *dcbnl_ops;
1072 #endif
1073 
1074 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
1075 	/* max exchange id for FCoE LRO by ddp */
1076 	unsigned int		fcoe_ddp_xid;
1077 #endif
1078 	/* n-tuple filter list attached to this device */
1079 	struct ethtool_rx_ntuple_list ethtool_ntuple_list;
1080 };
1081 #define to_net_dev(d) container_of(d, struct net_device, dev)
1082 
1083 #define	NETDEV_ALIGN		32
1084 
1085 static inline
1086 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1087 					 unsigned int index)
1088 {
1089 	return &dev->_tx[index];
1090 }
1091 
1092 static inline void netdev_for_each_tx_queue(struct net_device *dev,
1093 					    void (*f)(struct net_device *,
1094 						      struct netdev_queue *,
1095 						      void *),
1096 					    void *arg)
1097 {
1098 	unsigned int i;
1099 
1100 	for (i = 0; i < dev->num_tx_queues; i++)
1101 		f(dev, &dev->_tx[i], arg);
1102 }
1103 
1104 /*
1105  * Net namespace inlines
1106  */
1107 static inline
1108 struct net *dev_net(const struct net_device *dev)
1109 {
1110 	return read_pnet(&dev->nd_net);
1111 }
1112 
1113 static inline
1114 void dev_net_set(struct net_device *dev, struct net *net)
1115 {
1116 #ifdef CONFIG_NET_NS
1117 	release_net(dev->nd_net);
1118 	dev->nd_net = hold_net(net);
1119 #endif
1120 }
1121 
1122 static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1123 {
1124 #ifdef CONFIG_NET_DSA_TAG_DSA
1125 	if (dev->dsa_ptr != NULL)
1126 		return dsa_uses_dsa_tags(dev->dsa_ptr);
1127 #endif
1128 
1129 	return 0;
1130 }
1131 
1132 #ifndef CONFIG_NET_NS
1133 static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1134 {
1135 	skb->dev = dev;
1136 }
1137 #else /* CONFIG_NET_NS */
1138 void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
1139 #endif
1140 
1141 static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1142 {
1143 #ifdef CONFIG_NET_DSA_TAG_TRAILER
1144 	if (dev->dsa_ptr != NULL)
1145 		return dsa_uses_trailer_tags(dev->dsa_ptr);
1146 #endif
1147 
1148 	return 0;
1149 }
1150 
1151 /**
1152  *	netdev_priv - access network device private data
1153  *	@dev: network device
1154  *
1155  * Get network device private data
1156  */
1157 static inline void *netdev_priv(const struct net_device *dev)
1158 {
1159 	return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1160 }
1161 
1162 /* Set the sysfs physical device reference for the network logical device
1163  * if set prior to registration will cause a symlink during initialization.
1164  */
1165 #define SET_NETDEV_DEV(net, pdev)	((net)->dev.parent = (pdev))
1166 
1167 /* Set the sysfs device type for the network logical device to allow
1168  * fin grained indentification of different network device types. For
1169  * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1170  */
1171 #define SET_NETDEV_DEVTYPE(net, devtype)	((net)->dev.type = (devtype))
1172 
1173 /**
1174  *	netif_napi_add - initialize a napi context
1175  *	@dev:  network device
1176  *	@napi: napi context
1177  *	@poll: polling function
1178  *	@weight: default weight
1179  *
1180  * netif_napi_add() must be used to initialize a napi context prior to calling
1181  * *any* of the other napi related functions.
1182  */
1183 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1184 		    int (*poll)(struct napi_struct *, int), int weight);
1185 
1186 /**
1187  *  netif_napi_del - remove a napi context
1188  *  @napi: napi context
1189  *
1190  *  netif_napi_del() removes a napi context from the network device napi list
1191  */
1192 void netif_napi_del(struct napi_struct *napi);
1193 
1194 struct napi_gro_cb {
1195 	/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1196 	void *frag0;
1197 
1198 	/* Length of frag0. */
1199 	unsigned int frag0_len;
1200 
1201 	/* This indicates where we are processing relative to skb->data. */
1202 	int data_offset;
1203 
1204 	/* This is non-zero if the packet may be of the same flow. */
1205 	int same_flow;
1206 
1207 	/* This is non-zero if the packet cannot be merged with the new skb. */
1208 	int flush;
1209 
1210 	/* Number of segments aggregated. */
1211 	int count;
1212 
1213 	/* Free the skb? */
1214 	int free;
1215 };
1216 
1217 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1218 
1219 struct packet_type {
1220 	__be16			type;	/* This is really htons(ether_type). */
1221 	struct net_device	*dev;	/* NULL is wildcarded here	     */
1222 	int			(*func) (struct sk_buff *,
1223 					 struct net_device *,
1224 					 struct packet_type *,
1225 					 struct net_device *);
1226 	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
1227 						int features);
1228 	int			(*gso_send_check)(struct sk_buff *skb);
1229 	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
1230 					       struct sk_buff *skb);
1231 	int			(*gro_complete)(struct sk_buff *skb);
1232 	void			*af_packet_priv;
1233 	struct list_head	list;
1234 };
1235 
1236 #include <linux/interrupt.h>
1237 #include <linux/notifier.h>
1238 
1239 extern rwlock_t				dev_base_lock;		/* Device list lock */
1240 
1241 
1242 #define for_each_netdev(net, d)		\
1243 		list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1244 #define for_each_netdev_reverse(net, d)	\
1245 		list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1246 #define for_each_netdev_rcu(net, d)		\
1247 		list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1248 #define for_each_netdev_safe(net, d, n)	\
1249 		list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1250 #define for_each_netdev_continue(net, d)		\
1251 		list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1252 #define for_each_netdev_continue_rcu(net, d)		\
1253 	list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1254 #define net_device_entry(lh)	list_entry(lh, struct net_device, dev_list)
1255 
1256 static inline struct net_device *next_net_device(struct net_device *dev)
1257 {
1258 	struct list_head *lh;
1259 	struct net *net;
1260 
1261 	net = dev_net(dev);
1262 	lh = dev->dev_list.next;
1263 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1264 }
1265 
1266 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1267 {
1268 	struct list_head *lh;
1269 	struct net *net;
1270 
1271 	net = dev_net(dev);
1272 	lh = rcu_dereference(dev->dev_list.next);
1273 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1274 }
1275 
1276 static inline struct net_device *first_net_device(struct net *net)
1277 {
1278 	return list_empty(&net->dev_base_head) ? NULL :
1279 		net_device_entry(net->dev_base_head.next);
1280 }
1281 
1282 extern int 			netdev_boot_setup_check(struct net_device *dev);
1283 extern unsigned long		netdev_boot_base(const char *prefix, int unit);
1284 extern struct net_device    *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
1285 extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1286 extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1287 extern void		dev_add_pack(struct packet_type *pt);
1288 extern void		dev_remove_pack(struct packet_type *pt);
1289 extern void		__dev_remove_pack(struct packet_type *pt);
1290 
1291 extern struct net_device	*dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1292 						      unsigned short mask);
1293 extern struct net_device	*dev_get_by_name(struct net *net, const char *name);
1294 extern struct net_device	*dev_get_by_name_rcu(struct net *net, const char *name);
1295 extern struct net_device	*__dev_get_by_name(struct net *net, const char *name);
1296 extern int		dev_alloc_name(struct net_device *dev, const char *name);
1297 extern int		dev_open(struct net_device *dev);
1298 extern int		dev_close(struct net_device *dev);
1299 extern void		dev_disable_lro(struct net_device *dev);
1300 extern int		dev_queue_xmit(struct sk_buff *skb);
1301 extern int		register_netdevice(struct net_device *dev);
1302 extern void		unregister_netdevice_queue(struct net_device *dev,
1303 						   struct list_head *head);
1304 extern void		unregister_netdevice_many(struct list_head *head);
1305 static inline void unregister_netdevice(struct net_device *dev)
1306 {
1307 	unregister_netdevice_queue(dev, NULL);
1308 }
1309 
1310 extern void		free_netdev(struct net_device *dev);
1311 extern void		synchronize_net(void);
1312 extern int 		register_netdevice_notifier(struct notifier_block *nb);
1313 extern int		unregister_netdevice_notifier(struct notifier_block *nb);
1314 extern int		init_dummy_netdev(struct net_device *dev);
1315 extern void		netdev_resync_ops(struct net_device *dev);
1316 
1317 extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1318 extern struct net_device	*dev_get_by_index(struct net *net, int ifindex);
1319 extern struct net_device	*__dev_get_by_index(struct net *net, int ifindex);
1320 extern struct net_device	*dev_get_by_index_rcu(struct net *net, int ifindex);
1321 extern int		dev_restart(struct net_device *dev);
1322 #ifdef CONFIG_NETPOLL_TRAP
1323 extern int		netpoll_trap(void);
1324 #endif
1325 extern int	       skb_gro_receive(struct sk_buff **head,
1326 				       struct sk_buff *skb);
1327 extern void	       skb_gro_reset_offset(struct sk_buff *skb);
1328 
1329 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1330 {
1331 	return NAPI_GRO_CB(skb)->data_offset;
1332 }
1333 
1334 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1335 {
1336 	return skb->len - NAPI_GRO_CB(skb)->data_offset;
1337 }
1338 
1339 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1340 {
1341 	NAPI_GRO_CB(skb)->data_offset += len;
1342 }
1343 
1344 static inline void *skb_gro_header_fast(struct sk_buff *skb,
1345 					unsigned int offset)
1346 {
1347 	return NAPI_GRO_CB(skb)->frag0 + offset;
1348 }
1349 
1350 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1351 {
1352 	return NAPI_GRO_CB(skb)->frag0_len < hlen;
1353 }
1354 
1355 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1356 					unsigned int offset)
1357 {
1358 	NAPI_GRO_CB(skb)->frag0 = NULL;
1359 	NAPI_GRO_CB(skb)->frag0_len = 0;
1360 	return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
1361 }
1362 
1363 static inline void *skb_gro_mac_header(struct sk_buff *skb)
1364 {
1365 	return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
1366 }
1367 
1368 static inline void *skb_gro_network_header(struct sk_buff *skb)
1369 {
1370 	return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1371 	       skb_network_offset(skb);
1372 }
1373 
1374 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1375 				  unsigned short type,
1376 				  const void *daddr, const void *saddr,
1377 				  unsigned len)
1378 {
1379 	if (!dev->header_ops || !dev->header_ops->create)
1380 		return 0;
1381 
1382 	return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
1383 }
1384 
1385 static inline int dev_parse_header(const struct sk_buff *skb,
1386 				   unsigned char *haddr)
1387 {
1388 	const struct net_device *dev = skb->dev;
1389 
1390 	if (!dev->header_ops || !dev->header_ops->parse)
1391 		return 0;
1392 	return dev->header_ops->parse(skb, haddr);
1393 }
1394 
1395 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1396 extern int		register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1397 static inline int unregister_gifconf(unsigned int family)
1398 {
1399 	return register_gifconf(family, NULL);
1400 }
1401 
1402 /*
1403  * Incoming packets are placed on per-cpu queues
1404  */
1405 struct softnet_data {
1406 	struct Qdisc		*output_queue;
1407 	struct Qdisc		**output_queue_tailp;
1408 	struct list_head	poll_list;
1409 	struct sk_buff		*completion_queue;
1410 	struct sk_buff_head	process_queue;
1411 
1412 	/* stats */
1413 	unsigned int		processed;
1414 	unsigned int		time_squeeze;
1415 	unsigned int		cpu_collision;
1416 	unsigned int		received_rps;
1417 
1418 #ifdef CONFIG_RPS
1419 	struct softnet_data	*rps_ipi_list;
1420 
1421 	/* Elements below can be accessed between CPUs for RPS */
1422 	struct call_single_data	csd ____cacheline_aligned_in_smp;
1423 	struct softnet_data	*rps_ipi_next;
1424 	unsigned int		cpu;
1425 	unsigned int		input_queue_head;
1426 	unsigned int		input_queue_tail;
1427 #endif
1428 	unsigned		dropped;
1429 	struct sk_buff_head	input_pkt_queue;
1430 	struct napi_struct	backlog;
1431 };
1432 
1433 static inline void input_queue_head_incr(struct softnet_data *sd)
1434 {
1435 #ifdef CONFIG_RPS
1436 	sd->input_queue_head++;
1437 #endif
1438 }
1439 
1440 static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1441 					      unsigned int *qtail)
1442 {
1443 #ifdef CONFIG_RPS
1444 	*qtail = ++sd->input_queue_tail;
1445 #endif
1446 }
1447 
1448 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1449 
1450 #define HAVE_NETIF_QUEUE
1451 
1452 extern void __netif_schedule(struct Qdisc *q);
1453 
1454 static inline void netif_schedule_queue(struct netdev_queue *txq)
1455 {
1456 	if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
1457 		__netif_schedule(txq->qdisc);
1458 }
1459 
1460 static inline void netif_tx_schedule_all(struct net_device *dev)
1461 {
1462 	unsigned int i;
1463 
1464 	for (i = 0; i < dev->num_tx_queues; i++)
1465 		netif_schedule_queue(netdev_get_tx_queue(dev, i));
1466 }
1467 
1468 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1469 {
1470 	clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1471 }
1472 
1473 /**
1474  *	netif_start_queue - allow transmit
1475  *	@dev: network device
1476  *
1477  *	Allow upper layers to call the device hard_start_xmit routine.
1478  */
1479 static inline void netif_start_queue(struct net_device *dev)
1480 {
1481 	netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1482 }
1483 
1484 static inline void netif_tx_start_all_queues(struct net_device *dev)
1485 {
1486 	unsigned int i;
1487 
1488 	for (i = 0; i < dev->num_tx_queues; i++) {
1489 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1490 		netif_tx_start_queue(txq);
1491 	}
1492 }
1493 
1494 static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1495 {
1496 #ifdef CONFIG_NETPOLL_TRAP
1497 	if (netpoll_trap()) {
1498 		netif_tx_start_queue(dev_queue);
1499 		return;
1500 	}
1501 #endif
1502 	if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
1503 		__netif_schedule(dev_queue->qdisc);
1504 }
1505 
1506 /**
1507  *	netif_wake_queue - restart transmit
1508  *	@dev: network device
1509  *
1510  *	Allow upper layers to call the device hard_start_xmit routine.
1511  *	Used for flow control when transmit resources are available.
1512  */
1513 static inline void netif_wake_queue(struct net_device *dev)
1514 {
1515 	netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1516 }
1517 
1518 static inline void netif_tx_wake_all_queues(struct net_device *dev)
1519 {
1520 	unsigned int i;
1521 
1522 	for (i = 0; i < dev->num_tx_queues; i++) {
1523 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1524 		netif_tx_wake_queue(txq);
1525 	}
1526 }
1527 
1528 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1529 {
1530 	set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1531 }
1532 
1533 /**
1534  *	netif_stop_queue - stop transmitted packets
1535  *	@dev: network device
1536  *
1537  *	Stop upper layers calling the device hard_start_xmit routine.
1538  *	Used for flow control when transmit resources are unavailable.
1539  */
1540 static inline void netif_stop_queue(struct net_device *dev)
1541 {
1542 	netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
1543 }
1544 
1545 static inline void netif_tx_stop_all_queues(struct net_device *dev)
1546 {
1547 	unsigned int i;
1548 
1549 	for (i = 0; i < dev->num_tx_queues; i++) {
1550 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1551 		netif_tx_stop_queue(txq);
1552 	}
1553 }
1554 
1555 static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1556 {
1557 	return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1558 }
1559 
1560 /**
1561  *	netif_queue_stopped - test if transmit queue is flowblocked
1562  *	@dev: network device
1563  *
1564  *	Test if transmit queue on device is currently unable to send.
1565  */
1566 static inline int netif_queue_stopped(const struct net_device *dev)
1567 {
1568 	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1569 }
1570 
1571 static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
1572 {
1573 	return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
1574 }
1575 
1576 /**
1577  *	netif_running - test if up
1578  *	@dev: network device
1579  *
1580  *	Test if the device has been brought up.
1581  */
1582 static inline int netif_running(const struct net_device *dev)
1583 {
1584 	return test_bit(__LINK_STATE_START, &dev->state);
1585 }
1586 
1587 /*
1588  * Routines to manage the subqueues on a device.  We only need start
1589  * stop, and a check if it's stopped.  All other device management is
1590  * done at the overall netdevice level.
1591  * Also test the device if we're multiqueue.
1592  */
1593 
1594 /**
1595  *	netif_start_subqueue - allow sending packets on subqueue
1596  *	@dev: network device
1597  *	@queue_index: sub queue index
1598  *
1599  * Start individual transmit queue of a device with multiple transmit queues.
1600  */
1601 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1602 {
1603 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1604 
1605 	netif_tx_start_queue(txq);
1606 }
1607 
1608 /**
1609  *	netif_stop_subqueue - stop sending packets on subqueue
1610  *	@dev: network device
1611  *	@queue_index: sub queue index
1612  *
1613  * Stop individual transmit queue of a device with multiple transmit queues.
1614  */
1615 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1616 {
1617 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1618 #ifdef CONFIG_NETPOLL_TRAP
1619 	if (netpoll_trap())
1620 		return;
1621 #endif
1622 	netif_tx_stop_queue(txq);
1623 }
1624 
1625 /**
1626  *	netif_subqueue_stopped - test status of subqueue
1627  *	@dev: network device
1628  *	@queue_index: sub queue index
1629  *
1630  * Check individual transmit queue of a device with multiple transmit queues.
1631  */
1632 static inline int __netif_subqueue_stopped(const struct net_device *dev,
1633 					 u16 queue_index)
1634 {
1635 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1636 
1637 	return netif_tx_queue_stopped(txq);
1638 }
1639 
1640 static inline int netif_subqueue_stopped(const struct net_device *dev,
1641 					 struct sk_buff *skb)
1642 {
1643 	return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
1644 }
1645 
1646 /**
1647  *	netif_wake_subqueue - allow sending packets on subqueue
1648  *	@dev: network device
1649  *	@queue_index: sub queue index
1650  *
1651  * Resume individual transmit queue of a device with multiple transmit queues.
1652  */
1653 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1654 {
1655 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1656 #ifdef CONFIG_NETPOLL_TRAP
1657 	if (netpoll_trap())
1658 		return;
1659 #endif
1660 	if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
1661 		__netif_schedule(txq->qdisc);
1662 }
1663 
1664 /**
1665  *	netif_is_multiqueue - test if device has multiple transmit queues
1666  *	@dev: network device
1667  *
1668  * Check if device has multiple transmit queues
1669  */
1670 static inline int netif_is_multiqueue(const struct net_device *dev)
1671 {
1672 	return (dev->num_tx_queues > 1);
1673 }
1674 
1675 /* Use this variant when it is known for sure that it
1676  * is executing from hardware interrupt context or with hardware interrupts
1677  * disabled.
1678  */
1679 extern void dev_kfree_skb_irq(struct sk_buff *skb);
1680 
1681 /* Use this variant in places where it could be invoked
1682  * from either hardware interrupt or other context, with hardware interrupts
1683  * either disabled or enabled.
1684  */
1685 extern void dev_kfree_skb_any(struct sk_buff *skb);
1686 
1687 #define HAVE_NETIF_RX 1
1688 extern int		netif_rx(struct sk_buff *skb);
1689 extern int		netif_rx_ni(struct sk_buff *skb);
1690 #define HAVE_NETIF_RECEIVE_SKB 1
1691 extern int		netif_receive_skb(struct sk_buff *skb);
1692 extern gro_result_t	dev_gro_receive(struct napi_struct *napi,
1693 					struct sk_buff *skb);
1694 extern gro_result_t	napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
1695 extern gro_result_t	napi_gro_receive(struct napi_struct *napi,
1696 					 struct sk_buff *skb);
1697 extern void		napi_reuse_skb(struct napi_struct *napi,
1698 				       struct sk_buff *skb);
1699 extern struct sk_buff *	napi_get_frags(struct napi_struct *napi);
1700 extern gro_result_t	napi_frags_finish(struct napi_struct *napi,
1701 					  struct sk_buff *skb,
1702 					  gro_result_t ret);
1703 extern struct sk_buff *	napi_frags_skb(struct napi_struct *napi);
1704 extern gro_result_t	napi_gro_frags(struct napi_struct *napi);
1705 
1706 static inline void napi_free_frags(struct napi_struct *napi)
1707 {
1708 	kfree_skb(napi->skb);
1709 	napi->skb = NULL;
1710 }
1711 
1712 extern int netdev_rx_handler_register(struct net_device *dev,
1713 				      rx_handler_func_t *rx_handler,
1714 				      void *rx_handler_data);
1715 extern void netdev_rx_handler_unregister(struct net_device *dev);
1716 
1717 extern void		netif_nit_deliver(struct sk_buff *skb);
1718 extern int		dev_valid_name(const char *name);
1719 extern int		dev_ioctl(struct net *net, unsigned int cmd, void __user *);
1720 extern int		dev_ethtool(struct net *net, struct ifreq *);
1721 extern unsigned		dev_get_flags(const struct net_device *);
1722 extern int		__dev_change_flags(struct net_device *, unsigned int flags);
1723 extern int		dev_change_flags(struct net_device *, unsigned);
1724 extern void		__dev_notify_flags(struct net_device *, unsigned int old_flags);
1725 extern int		dev_change_name(struct net_device *, const char *);
1726 extern int		dev_set_alias(struct net_device *, const char *, size_t);
1727 extern int		dev_change_net_namespace(struct net_device *,
1728 						 struct net *, const char *);
1729 extern int		dev_set_mtu(struct net_device *, int);
1730 extern int		dev_set_mac_address(struct net_device *,
1731 					    struct sockaddr *);
1732 extern int		dev_hard_start_xmit(struct sk_buff *skb,
1733 					    struct net_device *dev,
1734 					    struct netdev_queue *txq);
1735 extern int		dev_forward_skb(struct net_device *dev,
1736 					struct sk_buff *skb);
1737 
1738 extern int		netdev_budget;
1739 
1740 /* Called by rtnetlink.c:rtnl_unlock() */
1741 extern void netdev_run_todo(void);
1742 
1743 /**
1744  *	dev_put - release reference to device
1745  *	@dev: network device
1746  *
1747  * Release reference to device to allow it to be freed.
1748  */
1749 static inline void dev_put(struct net_device *dev)
1750 {
1751 	atomic_dec(&dev->refcnt);
1752 }
1753 
1754 /**
1755  *	dev_hold - get reference to device
1756  *	@dev: network device
1757  *
1758  * Hold reference to device to keep it from being freed.
1759  */
1760 static inline void dev_hold(struct net_device *dev)
1761 {
1762 	atomic_inc(&dev->refcnt);
1763 }
1764 
1765 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
1766  * and _off may be called from IRQ context, but it is caller
1767  * who is responsible for serialization of these calls.
1768  *
1769  * The name carrier is inappropriate, these functions should really be
1770  * called netif_lowerlayer_*() because they represent the state of any
1771  * kind of lower layer not just hardware media.
1772  */
1773 
1774 extern void linkwatch_fire_event(struct net_device *dev);
1775 extern void linkwatch_forget_dev(struct net_device *dev);
1776 
1777 /**
1778  *	netif_carrier_ok - test if carrier present
1779  *	@dev: network device
1780  *
1781  * Check if carrier is present on device
1782  */
1783 static inline int netif_carrier_ok(const struct net_device *dev)
1784 {
1785 	return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
1786 }
1787 
1788 extern unsigned long dev_trans_start(struct net_device *dev);
1789 
1790 extern void __netdev_watchdog_up(struct net_device *dev);
1791 
1792 extern void netif_carrier_on(struct net_device *dev);
1793 
1794 extern void netif_carrier_off(struct net_device *dev);
1795 
1796 extern void netif_notify_peers(struct net_device *dev);
1797 
1798 /**
1799  *	netif_dormant_on - mark device as dormant.
1800  *	@dev: network device
1801  *
1802  * Mark device as dormant (as per RFC2863).
1803  *
1804  * The dormant state indicates that the relevant interface is not
1805  * actually in a condition to pass packets (i.e., it is not 'up') but is
1806  * in a "pending" state, waiting for some external event.  For "on-
1807  * demand" interfaces, this new state identifies the situation where the
1808  * interface is waiting for events to place it in the up state.
1809  *
1810  */
1811 static inline void netif_dormant_on(struct net_device *dev)
1812 {
1813 	if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
1814 		linkwatch_fire_event(dev);
1815 }
1816 
1817 /**
1818  *	netif_dormant_off - set device as not dormant.
1819  *	@dev: network device
1820  *
1821  * Device is not in dormant state.
1822  */
1823 static inline void netif_dormant_off(struct net_device *dev)
1824 {
1825 	if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
1826 		linkwatch_fire_event(dev);
1827 }
1828 
1829 /**
1830  *	netif_dormant - test if carrier present
1831  *	@dev: network device
1832  *
1833  * Check if carrier is present on device
1834  */
1835 static inline int netif_dormant(const struct net_device *dev)
1836 {
1837 	return test_bit(__LINK_STATE_DORMANT, &dev->state);
1838 }
1839 
1840 
1841 /**
1842  *	netif_oper_up - test if device is operational
1843  *	@dev: network device
1844  *
1845  * Check if carrier is operational
1846  */
1847 static inline int netif_oper_up(const struct net_device *dev)
1848 {
1849 	return (dev->operstate == IF_OPER_UP ||
1850 		dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
1851 }
1852 
1853 /**
1854  *	netif_device_present - is device available or removed
1855  *	@dev: network device
1856  *
1857  * Check if device has not been removed from system.
1858  */
1859 static inline int netif_device_present(struct net_device *dev)
1860 {
1861 	return test_bit(__LINK_STATE_PRESENT, &dev->state);
1862 }
1863 
1864 extern void netif_device_detach(struct net_device *dev);
1865 
1866 extern void netif_device_attach(struct net_device *dev);
1867 
1868 /*
1869  * Network interface message level settings
1870  */
1871 #define HAVE_NETIF_MSG 1
1872 
1873 enum {
1874 	NETIF_MSG_DRV		= 0x0001,
1875 	NETIF_MSG_PROBE		= 0x0002,
1876 	NETIF_MSG_LINK		= 0x0004,
1877 	NETIF_MSG_TIMER		= 0x0008,
1878 	NETIF_MSG_IFDOWN	= 0x0010,
1879 	NETIF_MSG_IFUP		= 0x0020,
1880 	NETIF_MSG_RX_ERR	= 0x0040,
1881 	NETIF_MSG_TX_ERR	= 0x0080,
1882 	NETIF_MSG_TX_QUEUED	= 0x0100,
1883 	NETIF_MSG_INTR		= 0x0200,
1884 	NETIF_MSG_TX_DONE	= 0x0400,
1885 	NETIF_MSG_RX_STATUS	= 0x0800,
1886 	NETIF_MSG_PKTDATA	= 0x1000,
1887 	NETIF_MSG_HW		= 0x2000,
1888 	NETIF_MSG_WOL		= 0x4000,
1889 };
1890 
1891 #define netif_msg_drv(p)	((p)->msg_enable & NETIF_MSG_DRV)
1892 #define netif_msg_probe(p)	((p)->msg_enable & NETIF_MSG_PROBE)
1893 #define netif_msg_link(p)	((p)->msg_enable & NETIF_MSG_LINK)
1894 #define netif_msg_timer(p)	((p)->msg_enable & NETIF_MSG_TIMER)
1895 #define netif_msg_ifdown(p)	((p)->msg_enable & NETIF_MSG_IFDOWN)
1896 #define netif_msg_ifup(p)	((p)->msg_enable & NETIF_MSG_IFUP)
1897 #define netif_msg_rx_err(p)	((p)->msg_enable & NETIF_MSG_RX_ERR)
1898 #define netif_msg_tx_err(p)	((p)->msg_enable & NETIF_MSG_TX_ERR)
1899 #define netif_msg_tx_queued(p)	((p)->msg_enable & NETIF_MSG_TX_QUEUED)
1900 #define netif_msg_intr(p)	((p)->msg_enable & NETIF_MSG_INTR)
1901 #define netif_msg_tx_done(p)	((p)->msg_enable & NETIF_MSG_TX_DONE)
1902 #define netif_msg_rx_status(p)	((p)->msg_enable & NETIF_MSG_RX_STATUS)
1903 #define netif_msg_pktdata(p)	((p)->msg_enable & NETIF_MSG_PKTDATA)
1904 #define netif_msg_hw(p)		((p)->msg_enable & NETIF_MSG_HW)
1905 #define netif_msg_wol(p)	((p)->msg_enable & NETIF_MSG_WOL)
1906 
1907 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
1908 {
1909 	/* use default */
1910 	if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
1911 		return default_msg_enable_bits;
1912 	if (debug_value == 0)	/* no output */
1913 		return 0;
1914 	/* set low N bits */
1915 	return (1 << debug_value) - 1;
1916 }
1917 
1918 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
1919 {
1920 	spin_lock(&txq->_xmit_lock);
1921 	txq->xmit_lock_owner = cpu;
1922 }
1923 
1924 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
1925 {
1926 	spin_lock_bh(&txq->_xmit_lock);
1927 	txq->xmit_lock_owner = smp_processor_id();
1928 }
1929 
1930 static inline int __netif_tx_trylock(struct netdev_queue *txq)
1931 {
1932 	int ok = spin_trylock(&txq->_xmit_lock);
1933 	if (likely(ok))
1934 		txq->xmit_lock_owner = smp_processor_id();
1935 	return ok;
1936 }
1937 
1938 static inline void __netif_tx_unlock(struct netdev_queue *txq)
1939 {
1940 	txq->xmit_lock_owner = -1;
1941 	spin_unlock(&txq->_xmit_lock);
1942 }
1943 
1944 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1945 {
1946 	txq->xmit_lock_owner = -1;
1947 	spin_unlock_bh(&txq->_xmit_lock);
1948 }
1949 
1950 static inline void txq_trans_update(struct netdev_queue *txq)
1951 {
1952 	if (txq->xmit_lock_owner != -1)
1953 		txq->trans_start = jiffies;
1954 }
1955 
1956 /**
1957  *	netif_tx_lock - grab network device transmit lock
1958  *	@dev: network device
1959  *
1960  * Get network device transmit lock
1961  */
1962 static inline void netif_tx_lock(struct net_device *dev)
1963 {
1964 	unsigned int i;
1965 	int cpu;
1966 
1967 	spin_lock(&dev->tx_global_lock);
1968 	cpu = smp_processor_id();
1969 	for (i = 0; i < dev->num_tx_queues; i++) {
1970 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1971 
1972 		/* We are the only thread of execution doing a
1973 		 * freeze, but we have to grab the _xmit_lock in
1974 		 * order to synchronize with threads which are in
1975 		 * the ->hard_start_xmit() handler and already
1976 		 * checked the frozen bit.
1977 		 */
1978 		__netif_tx_lock(txq, cpu);
1979 		set_bit(__QUEUE_STATE_FROZEN, &txq->state);
1980 		__netif_tx_unlock(txq);
1981 	}
1982 }
1983 
1984 static inline void netif_tx_lock_bh(struct net_device *dev)
1985 {
1986 	local_bh_disable();
1987 	netif_tx_lock(dev);
1988 }
1989 
1990 static inline void netif_tx_unlock(struct net_device *dev)
1991 {
1992 	unsigned int i;
1993 
1994 	for (i = 0; i < dev->num_tx_queues; i++) {
1995 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1996 
1997 		/* No need to grab the _xmit_lock here.  If the
1998 		 * queue is not stopped for another reason, we
1999 		 * force a schedule.
2000 		 */
2001 		clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
2002 		netif_schedule_queue(txq);
2003 	}
2004 	spin_unlock(&dev->tx_global_lock);
2005 }
2006 
2007 static inline void netif_tx_unlock_bh(struct net_device *dev)
2008 {
2009 	netif_tx_unlock(dev);
2010 	local_bh_enable();
2011 }
2012 
2013 #define HARD_TX_LOCK(dev, txq, cpu) {			\
2014 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
2015 		__netif_tx_lock(txq, cpu);		\
2016 	}						\
2017 }
2018 
2019 #define HARD_TX_UNLOCK(dev, txq) {			\
2020 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
2021 		__netif_tx_unlock(txq);			\
2022 	}						\
2023 }
2024 
2025 static inline void netif_tx_disable(struct net_device *dev)
2026 {
2027 	unsigned int i;
2028 	int cpu;
2029 
2030 	local_bh_disable();
2031 	cpu = smp_processor_id();
2032 	for (i = 0; i < dev->num_tx_queues; i++) {
2033 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2034 
2035 		__netif_tx_lock(txq, cpu);
2036 		netif_tx_stop_queue(txq);
2037 		__netif_tx_unlock(txq);
2038 	}
2039 	local_bh_enable();
2040 }
2041 
2042 static inline void netif_addr_lock(struct net_device *dev)
2043 {
2044 	spin_lock(&dev->addr_list_lock);
2045 }
2046 
2047 static inline void netif_addr_lock_bh(struct net_device *dev)
2048 {
2049 	spin_lock_bh(&dev->addr_list_lock);
2050 }
2051 
2052 static inline void netif_addr_unlock(struct net_device *dev)
2053 {
2054 	spin_unlock(&dev->addr_list_lock);
2055 }
2056 
2057 static inline void netif_addr_unlock_bh(struct net_device *dev)
2058 {
2059 	spin_unlock_bh(&dev->addr_list_lock);
2060 }
2061 
2062 /*
2063  * dev_addrs walker. Should be used only for read access. Call with
2064  * rcu_read_lock held.
2065  */
2066 #define for_each_dev_addr(dev, ha) \
2067 		list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2068 
2069 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
2070 
2071 extern void		ether_setup(struct net_device *dev);
2072 
2073 /* Support for loadable net-drivers */
2074 extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
2075 				       void (*setup)(struct net_device *),
2076 				       unsigned int queue_count);
2077 #define alloc_netdev(sizeof_priv, name, setup) \
2078 	alloc_netdev_mq(sizeof_priv, name, setup, 1)
2079 extern int		register_netdev(struct net_device *dev);
2080 extern void		unregister_netdev(struct net_device *dev);
2081 
2082 /* General hardware address lists handling functions */
2083 extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2084 				  struct netdev_hw_addr_list *from_list,
2085 				  int addr_len, unsigned char addr_type);
2086 extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2087 				   struct netdev_hw_addr_list *from_list,
2088 				   int addr_len, unsigned char addr_type);
2089 extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2090 			  struct netdev_hw_addr_list *from_list,
2091 			  int addr_len);
2092 extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2093 			     struct netdev_hw_addr_list *from_list,
2094 			     int addr_len);
2095 extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2096 extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2097 
2098 /* Functions used for device addresses handling */
2099 extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
2100 			unsigned char addr_type);
2101 extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
2102 			unsigned char addr_type);
2103 extern int dev_addr_add_multiple(struct net_device *to_dev,
2104 				 struct net_device *from_dev,
2105 				 unsigned char addr_type);
2106 extern int dev_addr_del_multiple(struct net_device *to_dev,
2107 				 struct net_device *from_dev,
2108 				 unsigned char addr_type);
2109 extern void dev_addr_flush(struct net_device *dev);
2110 extern int dev_addr_init(struct net_device *dev);
2111 
2112 /* Functions used for unicast addresses handling */
2113 extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
2114 extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
2115 extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2116 extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2117 extern void dev_uc_flush(struct net_device *dev);
2118 extern void dev_uc_init(struct net_device *dev);
2119 
2120 /* Functions used for multicast addresses handling */
2121 extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
2122 extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
2123 extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
2124 extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
2125 extern int dev_mc_sync(struct net_device *to, struct net_device *from);
2126 extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2127 extern void dev_mc_flush(struct net_device *dev);
2128 extern void dev_mc_init(struct net_device *dev);
2129 
2130 /* Functions used for secondary unicast and multicast support */
2131 extern void		dev_set_rx_mode(struct net_device *dev);
2132 extern void		__dev_set_rx_mode(struct net_device *dev);
2133 extern int		dev_set_promiscuity(struct net_device *dev, int inc);
2134 extern int		dev_set_allmulti(struct net_device *dev, int inc);
2135 extern void		netdev_state_change(struct net_device *dev);
2136 extern int		netdev_bonding_change(struct net_device *dev,
2137 					      unsigned long event);
2138 extern void		netdev_features_change(struct net_device *dev);
2139 /* Load a device via the kmod */
2140 extern void		dev_load(struct net *net, const char *name);
2141 extern void		dev_mcast_init(void);
2142 extern const struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev);
2143 extern void		dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats);
2144 
2145 extern int		netdev_max_backlog;
2146 extern int		netdev_tstamp_prequeue;
2147 extern int		weight_p;
2148 extern int		netdev_set_master(struct net_device *dev, struct net_device *master);
2149 extern int skb_checksum_help(struct sk_buff *skb);
2150 extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
2151 #ifdef CONFIG_BUG
2152 extern void netdev_rx_csum_fault(struct net_device *dev);
2153 #else
2154 static inline void netdev_rx_csum_fault(struct net_device *dev)
2155 {
2156 }
2157 #endif
2158 /* rx skb timestamps */
2159 extern void		net_enable_timestamp(void);
2160 extern void		net_disable_timestamp(void);
2161 
2162 #ifdef CONFIG_PROC_FS
2163 extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
2164 extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2165 extern void dev_seq_stop(struct seq_file *seq, void *v);
2166 #endif
2167 
2168 extern int netdev_class_create_file(struct class_attribute *class_attr);
2169 extern void netdev_class_remove_file(struct class_attribute *class_attr);
2170 
2171 extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
2172 
2173 extern void linkwatch_run_queue(void);
2174 
2175 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
2176 					unsigned long mask);
2177 unsigned long netdev_fix_features(unsigned long features, const char *name);
2178 
2179 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2180 					struct net_device *dev);
2181 
2182 static inline int net_gso_ok(int features, int gso_type)
2183 {
2184 	int feature = gso_type << NETIF_F_GSO_SHIFT;
2185 	return (features & feature) == feature;
2186 }
2187 
2188 static inline int skb_gso_ok(struct sk_buff *skb, int features)
2189 {
2190 	return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
2191 	       (!skb_has_frags(skb) || (features & NETIF_F_FRAGLIST));
2192 }
2193 
2194 static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
2195 {
2196 	return skb_is_gso(skb) &&
2197 	       (!skb_gso_ok(skb, dev->features) ||
2198 		unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
2199 }
2200 
2201 static inline void netif_set_gso_max_size(struct net_device *dev,
2202 					  unsigned int size)
2203 {
2204 	dev->gso_max_size = size;
2205 }
2206 
2207 extern int __skb_bond_should_drop(struct sk_buff *skb,
2208 				  struct net_device *master);
2209 
2210 static inline int skb_bond_should_drop(struct sk_buff *skb,
2211 				       struct net_device *master)
2212 {
2213 	if (master)
2214 		return __skb_bond_should_drop(skb, master);
2215 	return 0;
2216 }
2217 
2218 extern struct pernet_operations __net_initdata loopback_net_ops;
2219 
2220 static inline int dev_ethtool_get_settings(struct net_device *dev,
2221 					   struct ethtool_cmd *cmd)
2222 {
2223 	if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
2224 		return -EOPNOTSUPP;
2225 	return dev->ethtool_ops->get_settings(dev, cmd);
2226 }
2227 
2228 static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
2229 {
2230 	if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
2231 		return 0;
2232 	return dev->ethtool_ops->get_rx_csum(dev);
2233 }
2234 
2235 static inline u32 dev_ethtool_get_flags(struct net_device *dev)
2236 {
2237 	if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
2238 		return 0;
2239 	return dev->ethtool_ops->get_flags(dev);
2240 }
2241 
2242 /* Logging, debugging and troubleshooting/diagnostic helpers. */
2243 
2244 /* netdev_printk helpers, similar to dev_printk */
2245 
2246 static inline const char *netdev_name(const struct net_device *dev)
2247 {
2248 	if (dev->reg_state != NETREG_REGISTERED)
2249 		return "(unregistered net_device)";
2250 	return dev->name;
2251 }
2252 
2253 #define netdev_printk(level, netdev, format, args...)		\
2254 	dev_printk(level, (netdev)->dev.parent,			\
2255 		   "%s: " format,				\
2256 		   netdev_name(netdev), ##args)
2257 
2258 #define netdev_emerg(dev, format, args...)			\
2259 	netdev_printk(KERN_EMERG, dev, format, ##args)
2260 #define netdev_alert(dev, format, args...)			\
2261 	netdev_printk(KERN_ALERT, dev, format, ##args)
2262 #define netdev_crit(dev, format, args...)			\
2263 	netdev_printk(KERN_CRIT, dev, format, ##args)
2264 #define netdev_err(dev, format, args...)			\
2265 	netdev_printk(KERN_ERR, dev, format, ##args)
2266 #define netdev_warn(dev, format, args...)			\
2267 	netdev_printk(KERN_WARNING, dev, format, ##args)
2268 #define netdev_notice(dev, format, args...)			\
2269 	netdev_printk(KERN_NOTICE, dev, format, ##args)
2270 #define netdev_info(dev, format, args...)			\
2271 	netdev_printk(KERN_INFO, dev, format, ##args)
2272 
2273 #if defined(DEBUG)
2274 #define netdev_dbg(__dev, format, args...)			\
2275 	netdev_printk(KERN_DEBUG, __dev, format, ##args)
2276 #elif defined(CONFIG_DYNAMIC_DEBUG)
2277 #define netdev_dbg(__dev, format, args...)			\
2278 do {								\
2279 	dynamic_dev_dbg((__dev)->dev.parent, "%s: " format,	\
2280 			netdev_name(__dev), ##args);		\
2281 } while (0)
2282 #else
2283 #define netdev_dbg(__dev, format, args...)			\
2284 ({								\
2285 	if (0)							\
2286 		netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2287 	0;							\
2288 })
2289 #endif
2290 
2291 #if defined(VERBOSE_DEBUG)
2292 #define netdev_vdbg	netdev_dbg
2293 #else
2294 
2295 #define netdev_vdbg(dev, format, args...)			\
2296 ({								\
2297 	if (0)							\
2298 		netdev_printk(KERN_DEBUG, dev, format, ##args);	\
2299 	0;							\
2300 })
2301 #endif
2302 
2303 /*
2304  * netdev_WARN() acts like dev_printk(), but with the key difference
2305  * of using a WARN/WARN_ON to get the message out, including the
2306  * file/line information and a backtrace.
2307  */
2308 #define netdev_WARN(dev, format, args...)			\
2309 	WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
2310 
2311 /* netif printk helpers, similar to netdev_printk */
2312 
2313 #define netif_printk(priv, type, level, dev, fmt, args...)	\
2314 do {					  			\
2315 	if (netif_msg_##type(priv))				\
2316 		netdev_printk(level, (dev), fmt, ##args);	\
2317 } while (0)
2318 
2319 #define netif_emerg(priv, type, dev, fmt, args...)		\
2320 	netif_printk(priv, type, KERN_EMERG, dev, fmt, ##args)
2321 #define netif_alert(priv, type, dev, fmt, args...)		\
2322 	netif_printk(priv, type, KERN_ALERT, dev, fmt, ##args)
2323 #define netif_crit(priv, type, dev, fmt, args...)		\
2324 	netif_printk(priv, type, KERN_CRIT, dev, fmt, ##args)
2325 #define netif_err(priv, type, dev, fmt, args...)		\
2326 	netif_printk(priv, type, KERN_ERR, dev, fmt, ##args)
2327 #define netif_warn(priv, type, dev, fmt, args...)		\
2328 	netif_printk(priv, type, KERN_WARNING, dev, fmt, ##args)
2329 #define netif_notice(priv, type, dev, fmt, args...)		\
2330 	netif_printk(priv, type, KERN_NOTICE, dev, fmt, ##args)
2331 #define netif_info(priv, type, dev, fmt, args...)		\
2332 	netif_printk(priv, type, KERN_INFO, (dev), fmt, ##args)
2333 
2334 #if defined(DEBUG)
2335 #define netif_dbg(priv, type, dev, format, args...)		\
2336 	netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
2337 #elif defined(CONFIG_DYNAMIC_DEBUG)
2338 #define netif_dbg(priv, type, netdev, format, args...)		\
2339 do {								\
2340 	if (netif_msg_##type(priv))				\
2341 		dynamic_dev_dbg((netdev)->dev.parent,		\
2342 				"%s: " format,			\
2343 				netdev_name(netdev), ##args);	\
2344 } while (0)
2345 #else
2346 #define netif_dbg(priv, type, dev, format, args...)			\
2347 ({									\
2348 	if (0)								\
2349 		netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2350 	0;								\
2351 })
2352 #endif
2353 
2354 #if defined(VERBOSE_DEBUG)
2355 #define netif_vdbg	netdev_dbg
2356 #else
2357 #define netif_vdbg(priv, type, dev, format, args...)		\
2358 ({								\
2359 	if (0)							\
2360 		netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2361 	0;							\
2362 })
2363 #endif
2364 
2365 #endif /* __KERNEL__ */
2366 
2367 #endif	/* _LINUX_NETDEVICE_H */
2368