xref: /linux-6.15/net/core/dev.c (revision 5051c94b)
1 /*
2  * 	NET3	Protocol independent device support routines.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  *	Derived from the non IP parts of dev.c 1.0.19
10  * 		Authors:	Ross Biro
11  *				Fred N. van Kempen, <[email protected]>
12  *				Mark Evans, <[email protected]>
13  *
14  *	Additional Authors:
15  *		Florian la Roche <[email protected]>
16  *		Alan Cox <[email protected]>
17  *		David Hinds <[email protected]>
18  *		Alexey Kuznetsov <[email protected]>
19  *		Adam Sulmicki <[email protected]>
20  *              Pekka Riikonen <[email protected]>
21  *
22  *	Changes:
23  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
24  *              			to 2 if register_netdev gets called
25  *              			before net_dev_init & also removed a
26  *              			few lines of code in the process.
27  *		Alan Cox	:	device private ioctl copies fields back.
28  *		Alan Cox	:	Transmit queue code does relevant
29  *					stunts to keep the queue safe.
30  *		Alan Cox	:	Fixed double lock.
31  *		Alan Cox	:	Fixed promisc NULL pointer trap
32  *		????????	:	Support the full private ioctl range
33  *		Alan Cox	:	Moved ioctl permission check into
34  *					drivers
35  *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
36  *		Alan Cox	:	100 backlog just doesn't cut it when
37  *					you start doing multicast video 8)
38  *		Alan Cox	:	Rewrote net_bh and list manager.
39  *		Alan Cox	: 	Fix ETH_P_ALL echoback lengths.
40  *		Alan Cox	:	Took out transmit every packet pass
41  *					Saved a few bytes in the ioctl handler
42  *		Alan Cox	:	Network driver sets packet type before
43  *					calling netif_rx. Saves a function
44  *					call a packet.
45  *		Alan Cox	:	Hashed net_bh()
46  *		Richard Kooijman:	Timestamp fixes.
47  *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
48  *		Alan Cox	:	Device lock protection.
49  *		Alan Cox	: 	Fixed nasty side effect of device close
50  *					changes.
51  *		Rudi Cilibrasi	:	Pass the right thing to
52  *					set_mac_address()
53  *		Dave Miller	:	32bit quantity for the device lock to
54  *					make it work out on a Sparc.
55  *		Bjorn Ekwall	:	Added KERNELD hack.
56  *		Alan Cox	:	Cleaned up the backlog initialise.
57  *		Craig Metz	:	SIOCGIFCONF fix if space for under
58  *					1 device.
59  *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
60  *					is no device open function.
61  *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
62  *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
63  *		Cyrus Durgin	:	Cleaned for KMOD
64  *		Adam Sulmicki   :	Bug Fix : Network Device Unload
65  *					A network device unload needs to purge
66  *					the backlog queue.
67  *	Paul Rusty Russell	:	SIOCSIFNAME
68  *              Pekka Riikonen  :	Netdev boot-time settings code
69  *              Andrew Morton   :       Make unregister_netdevice wait
70  *              			indefinitely on dev->refcnt
71  * 		J Hadi Salim	:	- Backlog queue sampling
72  *				        - netif_rx() feedback
73  */
74 
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
86 #include <linux/mm.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/proc_fs.h>
101 #include <linux/seq_file.h>
102 #include <linux/stat.h>
103 #include <net/dst.h>
104 #include <net/pkt_sched.h>
105 #include <net/checksum.h>
106 #include <net/xfrm.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
123 #include <linux/ip.h>
124 #include <net/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
129 #include <trace/events/napi.h>
130 #include <trace/events/net.h>
131 #include <trace/events/skb.h>
132 #include <linux/pci.h>
133 #include <linux/inetdevice.h>
134 #include <linux/cpu_rmap.h>
135 #include <linux/net_tstamp.h>
136 #include <linux/static_key.h>
137 #include <net/flow_keys.h>
138 
139 #include "net-sysfs.h"
140 
141 /* Instead of increasing this, you should create a hash table. */
142 #define MAX_GRO_SKBS 8
143 
144 /* This should be increased if a protocol with a bigger head is added. */
145 #define GRO_MAX_HEAD (MAX_HEADER + 128)
146 
147 /*
148  *	The list of packet types we will receive (as opposed to discard)
149  *	and the routines to invoke.
150  *
151  *	Why 16. Because with 16 the only overlap we get on a hash of the
152  *	low nibble of the protocol value is RARP/SNAP/X.25.
153  *
154  *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
155  *             sure which should go first, but I bet it won't make much
156  *             difference if we are running VLANs.  The good news is that
157  *             this protocol won't be in the list unless compiled in, so
158  *             the average user (w/out VLANs) will not be adversely affected.
159  *             --BLG
160  *
161  *		0800	IP
162  *		8100    802.1Q VLAN
163  *		0001	802.3
164  *		0002	AX.25
165  *		0004	802.2
166  *		8035	RARP
167  *		0005	SNAP
168  *		0805	X.25
169  *		0806	ARP
170  *		8137	IPX
171  *		0009	Localtalk
172  *		86DD	IPv6
173  */
174 
175 #define PTYPE_HASH_SIZE	(16)
176 #define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1)
177 
178 static DEFINE_SPINLOCK(ptype_lock);
179 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
180 static struct list_head ptype_all __read_mostly;	/* Taps */
181 
182 /*
183  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
184  * semaphore.
185  *
186  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
187  *
188  * Writers must hold the rtnl semaphore while they loop through the
189  * dev_base_head list, and hold dev_base_lock for writing when they do the
190  * actual updates.  This allows pure readers to access the list even
191  * while a writer is preparing to update it.
192  *
193  * To put it another way, dev_base_lock is held for writing only to
194  * protect against pure readers; the rtnl semaphore provides the
195  * protection against other writers.
196  *
197  * See, for example usages, register_netdevice() and
198  * unregister_netdevice(), which must be called with the rtnl
199  * semaphore held.
200  */
201 DEFINE_RWLOCK(dev_base_lock);
202 EXPORT_SYMBOL(dev_base_lock);
203 
204 static inline void dev_base_seq_inc(struct net *net)
205 {
206 	while (++net->dev_base_seq == 0);
207 }
208 
209 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
210 {
211 	unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
212 
213 	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
214 }
215 
216 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
217 {
218 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
219 }
220 
221 static inline void rps_lock(struct softnet_data *sd)
222 {
223 #ifdef CONFIG_RPS
224 	spin_lock(&sd->input_pkt_queue.lock);
225 #endif
226 }
227 
228 static inline void rps_unlock(struct softnet_data *sd)
229 {
230 #ifdef CONFIG_RPS
231 	spin_unlock(&sd->input_pkt_queue.lock);
232 #endif
233 }
234 
235 /* Device list insertion */
236 static int list_netdevice(struct net_device *dev)
237 {
238 	struct net *net = dev_net(dev);
239 
240 	ASSERT_RTNL();
241 
242 	write_lock_bh(&dev_base_lock);
243 	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
244 	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
245 	hlist_add_head_rcu(&dev->index_hlist,
246 			   dev_index_hash(net, dev->ifindex));
247 	write_unlock_bh(&dev_base_lock);
248 
249 	dev_base_seq_inc(net);
250 
251 	return 0;
252 }
253 
254 /* Device list removal
255  * caller must respect a RCU grace period before freeing/reusing dev
256  */
257 static void unlist_netdevice(struct net_device *dev)
258 {
259 	ASSERT_RTNL();
260 
261 	/* Unlink dev from the device chain */
262 	write_lock_bh(&dev_base_lock);
263 	list_del_rcu(&dev->dev_list);
264 	hlist_del_rcu(&dev->name_hlist);
265 	hlist_del_rcu(&dev->index_hlist);
266 	write_unlock_bh(&dev_base_lock);
267 
268 	dev_base_seq_inc(dev_net(dev));
269 }
270 
271 /*
272  *	Our notifier list
273  */
274 
275 static RAW_NOTIFIER_HEAD(netdev_chain);
276 
277 /*
278  *	Device drivers call our routines to queue packets here. We empty the
279  *	queue in the local softnet handler.
280  */
281 
282 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
283 EXPORT_PER_CPU_SYMBOL(softnet_data);
284 
285 #ifdef CONFIG_LOCKDEP
286 /*
287  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
288  * according to dev->type
289  */
290 static const unsigned short netdev_lock_type[] =
291 	{ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
292 	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
293 	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
294 	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
295 	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
296 	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
297 	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
298 	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
299 	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
300 	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
301 	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
302 	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
303 	 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
304 	 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
305 	 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
306 
307 static const char *const netdev_lock_name[] =
308 	{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
309 	 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
310 	 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
311 	 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
312 	 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
313 	 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
314 	 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
315 	 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
316 	 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
317 	 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
318 	 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
319 	 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
320 	 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
321 	 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
322 	 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
323 
324 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
325 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
326 
327 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
328 {
329 	int i;
330 
331 	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
332 		if (netdev_lock_type[i] == dev_type)
333 			return i;
334 	/* the last key is used by default */
335 	return ARRAY_SIZE(netdev_lock_type) - 1;
336 }
337 
338 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
339 						 unsigned short dev_type)
340 {
341 	int i;
342 
343 	i = netdev_lock_pos(dev_type);
344 	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
345 				   netdev_lock_name[i]);
346 }
347 
348 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
349 {
350 	int i;
351 
352 	i = netdev_lock_pos(dev->type);
353 	lockdep_set_class_and_name(&dev->addr_list_lock,
354 				   &netdev_addr_lock_key[i],
355 				   netdev_lock_name[i]);
356 }
357 #else
358 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
359 						 unsigned short dev_type)
360 {
361 }
362 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
363 {
364 }
365 #endif
366 
367 /*******************************************************************************
368 
369 		Protocol management and registration routines
370 
371 *******************************************************************************/
372 
373 /*
374  *	Add a protocol ID to the list. Now that the input handler is
375  *	smarter we can dispense with all the messy stuff that used to be
376  *	here.
377  *
378  *	BEWARE!!! Protocol handlers, mangling input packets,
379  *	MUST BE last in hash buckets and checking protocol handlers
380  *	MUST start from promiscuous ptype_all chain in net_bh.
381  *	It is true now, do not change it.
382  *	Explanation follows: if protocol handler, mangling packet, will
383  *	be the first on list, it is not able to sense, that packet
384  *	is cloned and should be copied-on-write, so that it will
385  *	change it and subsequent readers will get broken packet.
386  *							--ANK (980803)
387  */
388 
389 static inline struct list_head *ptype_head(const struct packet_type *pt)
390 {
391 	if (pt->type == htons(ETH_P_ALL))
392 		return &ptype_all;
393 	else
394 		return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
395 }
396 
397 /**
398  *	dev_add_pack - add packet handler
399  *	@pt: packet type declaration
400  *
401  *	Add a protocol handler to the networking stack. The passed &packet_type
402  *	is linked into kernel lists and may not be freed until it has been
403  *	removed from the kernel lists.
404  *
405  *	This call does not sleep therefore it can not
406  *	guarantee all CPU's that are in middle of receiving packets
407  *	will see the new packet type (until the next received packet).
408  */
409 
410 void dev_add_pack(struct packet_type *pt)
411 {
412 	struct list_head *head = ptype_head(pt);
413 
414 	spin_lock(&ptype_lock);
415 	list_add_rcu(&pt->list, head);
416 	spin_unlock(&ptype_lock);
417 }
418 EXPORT_SYMBOL(dev_add_pack);
419 
420 /**
421  *	__dev_remove_pack	 - remove packet handler
422  *	@pt: packet type declaration
423  *
424  *	Remove a protocol handler that was previously added to the kernel
425  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
426  *	from the kernel lists and can be freed or reused once this function
427  *	returns.
428  *
429  *      The packet type might still be in use by receivers
430  *	and must not be freed until after all the CPU's have gone
431  *	through a quiescent state.
432  */
433 void __dev_remove_pack(struct packet_type *pt)
434 {
435 	struct list_head *head = ptype_head(pt);
436 	struct packet_type *pt1;
437 
438 	spin_lock(&ptype_lock);
439 
440 	list_for_each_entry(pt1, head, list) {
441 		if (pt == pt1) {
442 			list_del_rcu(&pt->list);
443 			goto out;
444 		}
445 	}
446 
447 	pr_warn("dev_remove_pack: %p not found\n", pt);
448 out:
449 	spin_unlock(&ptype_lock);
450 }
451 EXPORT_SYMBOL(__dev_remove_pack);
452 
453 /**
454  *	dev_remove_pack	 - remove packet handler
455  *	@pt: packet type declaration
456  *
457  *	Remove a protocol handler that was previously added to the kernel
458  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
459  *	from the kernel lists and can be freed or reused once this function
460  *	returns.
461  *
462  *	This call sleeps to guarantee that no CPU is looking at the packet
463  *	type after return.
464  */
465 void dev_remove_pack(struct packet_type *pt)
466 {
467 	__dev_remove_pack(pt);
468 
469 	synchronize_net();
470 }
471 EXPORT_SYMBOL(dev_remove_pack);
472 
473 /******************************************************************************
474 
475 		      Device Boot-time Settings Routines
476 
477 *******************************************************************************/
478 
479 /* Boot time configuration table */
480 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
481 
482 /**
483  *	netdev_boot_setup_add	- add new setup entry
484  *	@name: name of the device
485  *	@map: configured settings for the device
486  *
487  *	Adds new setup entry to the dev_boot_setup list.  The function
488  *	returns 0 on error and 1 on success.  This is a generic routine to
489  *	all netdevices.
490  */
491 static int netdev_boot_setup_add(char *name, struct ifmap *map)
492 {
493 	struct netdev_boot_setup *s;
494 	int i;
495 
496 	s = dev_boot_setup;
497 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
498 		if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
499 			memset(s[i].name, 0, sizeof(s[i].name));
500 			strlcpy(s[i].name, name, IFNAMSIZ);
501 			memcpy(&s[i].map, map, sizeof(s[i].map));
502 			break;
503 		}
504 	}
505 
506 	return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
507 }
508 
509 /**
510  *	netdev_boot_setup_check	- check boot time settings
511  *	@dev: the netdevice
512  *
513  * 	Check boot time settings for the device.
514  *	The found settings are set for the device to be used
515  *	later in the device probing.
516  *	Returns 0 if no settings found, 1 if they are.
517  */
518 int netdev_boot_setup_check(struct net_device *dev)
519 {
520 	struct netdev_boot_setup *s = dev_boot_setup;
521 	int i;
522 
523 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
524 		if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
525 		    !strcmp(dev->name, s[i].name)) {
526 			dev->irq 	= s[i].map.irq;
527 			dev->base_addr 	= s[i].map.base_addr;
528 			dev->mem_start 	= s[i].map.mem_start;
529 			dev->mem_end 	= s[i].map.mem_end;
530 			return 1;
531 		}
532 	}
533 	return 0;
534 }
535 EXPORT_SYMBOL(netdev_boot_setup_check);
536 
537 
538 /**
539  *	netdev_boot_base	- get address from boot time settings
540  *	@prefix: prefix for network device
541  *	@unit: id for network device
542  *
543  * 	Check boot time settings for the base address of device.
544  *	The found settings are set for the device to be used
545  *	later in the device probing.
546  *	Returns 0 if no settings found.
547  */
548 unsigned long netdev_boot_base(const char *prefix, int unit)
549 {
550 	const struct netdev_boot_setup *s = dev_boot_setup;
551 	char name[IFNAMSIZ];
552 	int i;
553 
554 	sprintf(name, "%s%d", prefix, unit);
555 
556 	/*
557 	 * If device already registered then return base of 1
558 	 * to indicate not to probe for this interface
559 	 */
560 	if (__dev_get_by_name(&init_net, name))
561 		return 1;
562 
563 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
564 		if (!strcmp(name, s[i].name))
565 			return s[i].map.base_addr;
566 	return 0;
567 }
568 
569 /*
570  * Saves at boot time configured settings for any netdevice.
571  */
572 int __init netdev_boot_setup(char *str)
573 {
574 	int ints[5];
575 	struct ifmap map;
576 
577 	str = get_options(str, ARRAY_SIZE(ints), ints);
578 	if (!str || !*str)
579 		return 0;
580 
581 	/* Save settings */
582 	memset(&map, 0, sizeof(map));
583 	if (ints[0] > 0)
584 		map.irq = ints[1];
585 	if (ints[0] > 1)
586 		map.base_addr = ints[2];
587 	if (ints[0] > 2)
588 		map.mem_start = ints[3];
589 	if (ints[0] > 3)
590 		map.mem_end = ints[4];
591 
592 	/* Add new entry to the list */
593 	return netdev_boot_setup_add(str, &map);
594 }
595 
596 __setup("netdev=", netdev_boot_setup);
597 
598 /*******************************************************************************
599 
600 			    Device Interface Subroutines
601 
602 *******************************************************************************/
603 
604 /**
605  *	__dev_get_by_name	- find a device by its name
606  *	@net: the applicable net namespace
607  *	@name: name to find
608  *
609  *	Find an interface by name. Must be called under RTNL semaphore
610  *	or @dev_base_lock. If the name is found a pointer to the device
611  *	is returned. If the name is not found then %NULL is returned. The
612  *	reference counters are not incremented so the caller must be
613  *	careful with locks.
614  */
615 
616 struct net_device *__dev_get_by_name(struct net *net, const char *name)
617 {
618 	struct hlist_node *p;
619 	struct net_device *dev;
620 	struct hlist_head *head = dev_name_hash(net, name);
621 
622 	hlist_for_each_entry(dev, p, head, name_hlist)
623 		if (!strncmp(dev->name, name, IFNAMSIZ))
624 			return dev;
625 
626 	return NULL;
627 }
628 EXPORT_SYMBOL(__dev_get_by_name);
629 
630 /**
631  *	dev_get_by_name_rcu	- find a device by its name
632  *	@net: the applicable net namespace
633  *	@name: name to find
634  *
635  *	Find an interface by name.
636  *	If the name is found a pointer to the device is returned.
637  * 	If the name is not found then %NULL is returned.
638  *	The reference counters are not incremented so the caller must be
639  *	careful with locks. The caller must hold RCU lock.
640  */
641 
642 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
643 {
644 	struct hlist_node *p;
645 	struct net_device *dev;
646 	struct hlist_head *head = dev_name_hash(net, name);
647 
648 	hlist_for_each_entry_rcu(dev, p, head, name_hlist)
649 		if (!strncmp(dev->name, name, IFNAMSIZ))
650 			return dev;
651 
652 	return NULL;
653 }
654 EXPORT_SYMBOL(dev_get_by_name_rcu);
655 
656 /**
657  *	dev_get_by_name		- find a device by its name
658  *	@net: the applicable net namespace
659  *	@name: name to find
660  *
661  *	Find an interface by name. This can be called from any
662  *	context and does its own locking. The returned handle has
663  *	the usage count incremented and the caller must use dev_put() to
664  *	release it when it is no longer needed. %NULL is returned if no
665  *	matching device is found.
666  */
667 
668 struct net_device *dev_get_by_name(struct net *net, const char *name)
669 {
670 	struct net_device *dev;
671 
672 	rcu_read_lock();
673 	dev = dev_get_by_name_rcu(net, name);
674 	if (dev)
675 		dev_hold(dev);
676 	rcu_read_unlock();
677 	return dev;
678 }
679 EXPORT_SYMBOL(dev_get_by_name);
680 
681 /**
682  *	__dev_get_by_index - find a device by its ifindex
683  *	@net: the applicable net namespace
684  *	@ifindex: index of device
685  *
686  *	Search for an interface by index. Returns %NULL if the device
687  *	is not found or a pointer to the device. The device has not
688  *	had its reference counter increased so the caller must be careful
689  *	about locking. The caller must hold either the RTNL semaphore
690  *	or @dev_base_lock.
691  */
692 
693 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
694 {
695 	struct hlist_node *p;
696 	struct net_device *dev;
697 	struct hlist_head *head = dev_index_hash(net, ifindex);
698 
699 	hlist_for_each_entry(dev, p, head, index_hlist)
700 		if (dev->ifindex == ifindex)
701 			return dev;
702 
703 	return NULL;
704 }
705 EXPORT_SYMBOL(__dev_get_by_index);
706 
707 /**
708  *	dev_get_by_index_rcu - find a device by its ifindex
709  *	@net: the applicable net namespace
710  *	@ifindex: index of device
711  *
712  *	Search for an interface by index. Returns %NULL if the device
713  *	is not found or a pointer to the device. The device has not
714  *	had its reference counter increased so the caller must be careful
715  *	about locking. The caller must hold RCU lock.
716  */
717 
718 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
719 {
720 	struct hlist_node *p;
721 	struct net_device *dev;
722 	struct hlist_head *head = dev_index_hash(net, ifindex);
723 
724 	hlist_for_each_entry_rcu(dev, p, head, index_hlist)
725 		if (dev->ifindex == ifindex)
726 			return dev;
727 
728 	return NULL;
729 }
730 EXPORT_SYMBOL(dev_get_by_index_rcu);
731 
732 
733 /**
734  *	dev_get_by_index - find a device by its ifindex
735  *	@net: the applicable net namespace
736  *	@ifindex: index of device
737  *
738  *	Search for an interface by index. Returns NULL if the device
739  *	is not found or a pointer to the device. The device returned has
740  *	had a reference added and the pointer is safe until the user calls
741  *	dev_put to indicate they have finished with it.
742  */
743 
744 struct net_device *dev_get_by_index(struct net *net, int ifindex)
745 {
746 	struct net_device *dev;
747 
748 	rcu_read_lock();
749 	dev = dev_get_by_index_rcu(net, ifindex);
750 	if (dev)
751 		dev_hold(dev);
752 	rcu_read_unlock();
753 	return dev;
754 }
755 EXPORT_SYMBOL(dev_get_by_index);
756 
757 /**
758  *	dev_getbyhwaddr_rcu - find a device by its hardware address
759  *	@net: the applicable net namespace
760  *	@type: media type of device
761  *	@ha: hardware address
762  *
763  *	Search for an interface by MAC address. Returns NULL if the device
764  *	is not found or a pointer to the device.
765  *	The caller must hold RCU or RTNL.
766  *	The returned device has not had its ref count increased
767  *	and the caller must therefore be careful about locking
768  *
769  */
770 
771 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
772 				       const char *ha)
773 {
774 	struct net_device *dev;
775 
776 	for_each_netdev_rcu(net, dev)
777 		if (dev->type == type &&
778 		    !memcmp(dev->dev_addr, ha, dev->addr_len))
779 			return dev;
780 
781 	return NULL;
782 }
783 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
784 
785 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
786 {
787 	struct net_device *dev;
788 
789 	ASSERT_RTNL();
790 	for_each_netdev(net, dev)
791 		if (dev->type == type)
792 			return dev;
793 
794 	return NULL;
795 }
796 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
797 
798 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
799 {
800 	struct net_device *dev, *ret = NULL;
801 
802 	rcu_read_lock();
803 	for_each_netdev_rcu(net, dev)
804 		if (dev->type == type) {
805 			dev_hold(dev);
806 			ret = dev;
807 			break;
808 		}
809 	rcu_read_unlock();
810 	return ret;
811 }
812 EXPORT_SYMBOL(dev_getfirstbyhwtype);
813 
814 /**
815  *	dev_get_by_flags_rcu - find any device with given flags
816  *	@net: the applicable net namespace
817  *	@if_flags: IFF_* values
818  *	@mask: bitmask of bits in if_flags to check
819  *
820  *	Search for any interface with the given flags. Returns NULL if a device
821  *	is not found or a pointer to the device. Must be called inside
822  *	rcu_read_lock(), and result refcount is unchanged.
823  */
824 
825 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
826 				    unsigned short mask)
827 {
828 	struct net_device *dev, *ret;
829 
830 	ret = NULL;
831 	for_each_netdev_rcu(net, dev) {
832 		if (((dev->flags ^ if_flags) & mask) == 0) {
833 			ret = dev;
834 			break;
835 		}
836 	}
837 	return ret;
838 }
839 EXPORT_SYMBOL(dev_get_by_flags_rcu);
840 
841 /**
842  *	dev_valid_name - check if name is okay for network device
843  *	@name: name string
844  *
845  *	Network device names need to be valid file names to
846  *	to allow sysfs to work.  We also disallow any kind of
847  *	whitespace.
848  */
849 bool dev_valid_name(const char *name)
850 {
851 	if (*name == '\0')
852 		return false;
853 	if (strlen(name) >= IFNAMSIZ)
854 		return false;
855 	if (!strcmp(name, ".") || !strcmp(name, ".."))
856 		return false;
857 
858 	while (*name) {
859 		if (*name == '/' || isspace(*name))
860 			return false;
861 		name++;
862 	}
863 	return true;
864 }
865 EXPORT_SYMBOL(dev_valid_name);
866 
867 /**
868  *	__dev_alloc_name - allocate a name for a device
869  *	@net: network namespace to allocate the device name in
870  *	@name: name format string
871  *	@buf:  scratch buffer and result name string
872  *
873  *	Passed a format string - eg "lt%d" it will try and find a suitable
874  *	id. It scans list of devices to build up a free map, then chooses
875  *	the first empty slot. The caller must hold the dev_base or rtnl lock
876  *	while allocating the name and adding the device in order to avoid
877  *	duplicates.
878  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
879  *	Returns the number of the unit assigned or a negative errno code.
880  */
881 
882 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
883 {
884 	int i = 0;
885 	const char *p;
886 	const int max_netdevices = 8*PAGE_SIZE;
887 	unsigned long *inuse;
888 	struct net_device *d;
889 
890 	p = strnchr(name, IFNAMSIZ-1, '%');
891 	if (p) {
892 		/*
893 		 * Verify the string as this thing may have come from
894 		 * the user.  There must be either one "%d" and no other "%"
895 		 * characters.
896 		 */
897 		if (p[1] != 'd' || strchr(p + 2, '%'))
898 			return -EINVAL;
899 
900 		/* Use one page as a bit array of possible slots */
901 		inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
902 		if (!inuse)
903 			return -ENOMEM;
904 
905 		for_each_netdev(net, d) {
906 			if (!sscanf(d->name, name, &i))
907 				continue;
908 			if (i < 0 || i >= max_netdevices)
909 				continue;
910 
911 			/*  avoid cases where sscanf is not exact inverse of printf */
912 			snprintf(buf, IFNAMSIZ, name, i);
913 			if (!strncmp(buf, d->name, IFNAMSIZ))
914 				set_bit(i, inuse);
915 		}
916 
917 		i = find_first_zero_bit(inuse, max_netdevices);
918 		free_page((unsigned long) inuse);
919 	}
920 
921 	if (buf != name)
922 		snprintf(buf, IFNAMSIZ, name, i);
923 	if (!__dev_get_by_name(net, buf))
924 		return i;
925 
926 	/* It is possible to run out of possible slots
927 	 * when the name is long and there isn't enough space left
928 	 * for the digits, or if all bits are used.
929 	 */
930 	return -ENFILE;
931 }
932 
933 /**
934  *	dev_alloc_name - allocate a name for a device
935  *	@dev: device
936  *	@name: name format string
937  *
938  *	Passed a format string - eg "lt%d" it will try and find a suitable
939  *	id. It scans list of devices to build up a free map, then chooses
940  *	the first empty slot. The caller must hold the dev_base or rtnl lock
941  *	while allocating the name and adding the device in order to avoid
942  *	duplicates.
943  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
944  *	Returns the number of the unit assigned or a negative errno code.
945  */
946 
947 int dev_alloc_name(struct net_device *dev, const char *name)
948 {
949 	char buf[IFNAMSIZ];
950 	struct net *net;
951 	int ret;
952 
953 	BUG_ON(!dev_net(dev));
954 	net = dev_net(dev);
955 	ret = __dev_alloc_name(net, name, buf);
956 	if (ret >= 0)
957 		strlcpy(dev->name, buf, IFNAMSIZ);
958 	return ret;
959 }
960 EXPORT_SYMBOL(dev_alloc_name);
961 
962 static int dev_get_valid_name(struct net_device *dev, const char *name)
963 {
964 	struct net *net;
965 
966 	BUG_ON(!dev_net(dev));
967 	net = dev_net(dev);
968 
969 	if (!dev_valid_name(name))
970 		return -EINVAL;
971 
972 	if (strchr(name, '%'))
973 		return dev_alloc_name(dev, name);
974 	else if (__dev_get_by_name(net, name))
975 		return -EEXIST;
976 	else if (dev->name != name)
977 		strlcpy(dev->name, name, IFNAMSIZ);
978 
979 	return 0;
980 }
981 
982 /**
983  *	dev_change_name - change name of a device
984  *	@dev: device
985  *	@newname: name (or format string) must be at least IFNAMSIZ
986  *
987  *	Change name of a device, can pass format strings "eth%d".
988  *	for wildcarding.
989  */
990 int dev_change_name(struct net_device *dev, const char *newname)
991 {
992 	char oldname[IFNAMSIZ];
993 	int err = 0;
994 	int ret;
995 	struct net *net;
996 
997 	ASSERT_RTNL();
998 	BUG_ON(!dev_net(dev));
999 
1000 	net = dev_net(dev);
1001 	if (dev->flags & IFF_UP)
1002 		return -EBUSY;
1003 
1004 	if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
1005 		return 0;
1006 
1007 	memcpy(oldname, dev->name, IFNAMSIZ);
1008 
1009 	err = dev_get_valid_name(dev, newname);
1010 	if (err < 0)
1011 		return err;
1012 
1013 rollback:
1014 	ret = device_rename(&dev->dev, dev->name);
1015 	if (ret) {
1016 		memcpy(dev->name, oldname, IFNAMSIZ);
1017 		return ret;
1018 	}
1019 
1020 	write_lock_bh(&dev_base_lock);
1021 	hlist_del_rcu(&dev->name_hlist);
1022 	write_unlock_bh(&dev_base_lock);
1023 
1024 	synchronize_rcu();
1025 
1026 	write_lock_bh(&dev_base_lock);
1027 	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1028 	write_unlock_bh(&dev_base_lock);
1029 
1030 	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1031 	ret = notifier_to_errno(ret);
1032 
1033 	if (ret) {
1034 		/* err >= 0 after dev_alloc_name() or stores the first errno */
1035 		if (err >= 0) {
1036 			err = ret;
1037 			memcpy(dev->name, oldname, IFNAMSIZ);
1038 			goto rollback;
1039 		} else {
1040 			pr_err("%s: name change rollback failed: %d\n",
1041 			       dev->name, ret);
1042 		}
1043 	}
1044 
1045 	return err;
1046 }
1047 
1048 /**
1049  *	dev_set_alias - change ifalias of a device
1050  *	@dev: device
1051  *	@alias: name up to IFALIASZ
1052  *	@len: limit of bytes to copy from info
1053  *
1054  *	Set ifalias for a device,
1055  */
1056 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1057 {
1058 	ASSERT_RTNL();
1059 
1060 	if (len >= IFALIASZ)
1061 		return -EINVAL;
1062 
1063 	if (!len) {
1064 		if (dev->ifalias) {
1065 			kfree(dev->ifalias);
1066 			dev->ifalias = NULL;
1067 		}
1068 		return 0;
1069 	}
1070 
1071 	dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1072 	if (!dev->ifalias)
1073 		return -ENOMEM;
1074 
1075 	strlcpy(dev->ifalias, alias, len+1);
1076 	return len;
1077 }
1078 
1079 
1080 /**
1081  *	netdev_features_change - device changes features
1082  *	@dev: device to cause notification
1083  *
1084  *	Called to indicate a device has changed features.
1085  */
1086 void netdev_features_change(struct net_device *dev)
1087 {
1088 	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1089 }
1090 EXPORT_SYMBOL(netdev_features_change);
1091 
1092 /**
1093  *	netdev_state_change - device changes state
1094  *	@dev: device to cause notification
1095  *
1096  *	Called to indicate a device has changed state. This function calls
1097  *	the notifier chains for netdev_chain and sends a NEWLINK message
1098  *	to the routing socket.
1099  */
1100 void netdev_state_change(struct net_device *dev)
1101 {
1102 	if (dev->flags & IFF_UP) {
1103 		call_netdevice_notifiers(NETDEV_CHANGE, dev);
1104 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1105 	}
1106 }
1107 EXPORT_SYMBOL(netdev_state_change);
1108 
1109 int netdev_bonding_change(struct net_device *dev, unsigned long event)
1110 {
1111 	return call_netdevice_notifiers(event, dev);
1112 }
1113 EXPORT_SYMBOL(netdev_bonding_change);
1114 
1115 /**
1116  *	dev_load 	- load a network module
1117  *	@net: the applicable net namespace
1118  *	@name: name of interface
1119  *
1120  *	If a network interface is not present and the process has suitable
1121  *	privileges this function loads the module. If module loading is not
1122  *	available in this kernel then it becomes a nop.
1123  */
1124 
1125 void dev_load(struct net *net, const char *name)
1126 {
1127 	struct net_device *dev;
1128 	int no_module;
1129 
1130 	rcu_read_lock();
1131 	dev = dev_get_by_name_rcu(net, name);
1132 	rcu_read_unlock();
1133 
1134 	no_module = !dev;
1135 	if (no_module && capable(CAP_NET_ADMIN))
1136 		no_module = request_module("netdev-%s", name);
1137 	if (no_module && capable(CAP_SYS_MODULE)) {
1138 		if (!request_module("%s", name))
1139 			pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1140 			       name);
1141 	}
1142 }
1143 EXPORT_SYMBOL(dev_load);
1144 
1145 static int __dev_open(struct net_device *dev)
1146 {
1147 	const struct net_device_ops *ops = dev->netdev_ops;
1148 	int ret;
1149 
1150 	ASSERT_RTNL();
1151 
1152 	if (!netif_device_present(dev))
1153 		return -ENODEV;
1154 
1155 	ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1156 	ret = notifier_to_errno(ret);
1157 	if (ret)
1158 		return ret;
1159 
1160 	set_bit(__LINK_STATE_START, &dev->state);
1161 
1162 	if (ops->ndo_validate_addr)
1163 		ret = ops->ndo_validate_addr(dev);
1164 
1165 	if (!ret && ops->ndo_open)
1166 		ret = ops->ndo_open(dev);
1167 
1168 	if (ret)
1169 		clear_bit(__LINK_STATE_START, &dev->state);
1170 	else {
1171 		dev->flags |= IFF_UP;
1172 		net_dmaengine_get();
1173 		dev_set_rx_mode(dev);
1174 		dev_activate(dev);
1175 	}
1176 
1177 	return ret;
1178 }
1179 
1180 /**
1181  *	dev_open	- prepare an interface for use.
1182  *	@dev:	device to open
1183  *
1184  *	Takes a device from down to up state. The device's private open
1185  *	function is invoked and then the multicast lists are loaded. Finally
1186  *	the device is moved into the up state and a %NETDEV_UP message is
1187  *	sent to the netdev notifier chain.
1188  *
1189  *	Calling this function on an active interface is a nop. On a failure
1190  *	a negative errno code is returned.
1191  */
1192 int dev_open(struct net_device *dev)
1193 {
1194 	int ret;
1195 
1196 	if (dev->flags & IFF_UP)
1197 		return 0;
1198 
1199 	ret = __dev_open(dev);
1200 	if (ret < 0)
1201 		return ret;
1202 
1203 	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1204 	call_netdevice_notifiers(NETDEV_UP, dev);
1205 
1206 	return ret;
1207 }
1208 EXPORT_SYMBOL(dev_open);
1209 
1210 static int __dev_close_many(struct list_head *head)
1211 {
1212 	struct net_device *dev;
1213 
1214 	ASSERT_RTNL();
1215 	might_sleep();
1216 
1217 	list_for_each_entry(dev, head, unreg_list) {
1218 		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1219 
1220 		clear_bit(__LINK_STATE_START, &dev->state);
1221 
1222 		/* Synchronize to scheduled poll. We cannot touch poll list, it
1223 		 * can be even on different cpu. So just clear netif_running().
1224 		 *
1225 		 * dev->stop() will invoke napi_disable() on all of it's
1226 		 * napi_struct instances on this device.
1227 		 */
1228 		smp_mb__after_clear_bit(); /* Commit netif_running(). */
1229 	}
1230 
1231 	dev_deactivate_many(head);
1232 
1233 	list_for_each_entry(dev, head, unreg_list) {
1234 		const struct net_device_ops *ops = dev->netdev_ops;
1235 
1236 		/*
1237 		 *	Call the device specific close. This cannot fail.
1238 		 *	Only if device is UP
1239 		 *
1240 		 *	We allow it to be called even after a DETACH hot-plug
1241 		 *	event.
1242 		 */
1243 		if (ops->ndo_stop)
1244 			ops->ndo_stop(dev);
1245 
1246 		dev->flags &= ~IFF_UP;
1247 		net_dmaengine_put();
1248 	}
1249 
1250 	return 0;
1251 }
1252 
1253 static int __dev_close(struct net_device *dev)
1254 {
1255 	int retval;
1256 	LIST_HEAD(single);
1257 
1258 	list_add(&dev->unreg_list, &single);
1259 	retval = __dev_close_many(&single);
1260 	list_del(&single);
1261 	return retval;
1262 }
1263 
1264 static int dev_close_many(struct list_head *head)
1265 {
1266 	struct net_device *dev, *tmp;
1267 	LIST_HEAD(tmp_list);
1268 
1269 	list_for_each_entry_safe(dev, tmp, head, unreg_list)
1270 		if (!(dev->flags & IFF_UP))
1271 			list_move(&dev->unreg_list, &tmp_list);
1272 
1273 	__dev_close_many(head);
1274 
1275 	list_for_each_entry(dev, head, unreg_list) {
1276 		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1277 		call_netdevice_notifiers(NETDEV_DOWN, dev);
1278 	}
1279 
1280 	/* rollback_registered_many needs the complete original list */
1281 	list_splice(&tmp_list, head);
1282 	return 0;
1283 }
1284 
1285 /**
1286  *	dev_close - shutdown an interface.
1287  *	@dev: device to shutdown
1288  *
1289  *	This function moves an active device into down state. A
1290  *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1291  *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1292  *	chain.
1293  */
1294 int dev_close(struct net_device *dev)
1295 {
1296 	if (dev->flags & IFF_UP) {
1297 		LIST_HEAD(single);
1298 
1299 		list_add(&dev->unreg_list, &single);
1300 		dev_close_many(&single);
1301 		list_del(&single);
1302 	}
1303 	return 0;
1304 }
1305 EXPORT_SYMBOL(dev_close);
1306 
1307 
1308 /**
1309  *	dev_disable_lro - disable Large Receive Offload on a device
1310  *	@dev: device
1311  *
1312  *	Disable Large Receive Offload (LRO) on a net device.  Must be
1313  *	called under RTNL.  This is needed if received packets may be
1314  *	forwarded to another interface.
1315  */
1316 void dev_disable_lro(struct net_device *dev)
1317 {
1318 	/*
1319 	 * If we're trying to disable lro on a vlan device
1320 	 * use the underlying physical device instead
1321 	 */
1322 	if (is_vlan_dev(dev))
1323 		dev = vlan_dev_real_dev(dev);
1324 
1325 	dev->wanted_features &= ~NETIF_F_LRO;
1326 	netdev_update_features(dev);
1327 
1328 	if (unlikely(dev->features & NETIF_F_LRO))
1329 		netdev_WARN(dev, "failed to disable LRO!\n");
1330 }
1331 EXPORT_SYMBOL(dev_disable_lro);
1332 
1333 
1334 static int dev_boot_phase = 1;
1335 
1336 /**
1337  *	register_netdevice_notifier - register a network notifier block
1338  *	@nb: notifier
1339  *
1340  *	Register a notifier to be called when network device events occur.
1341  *	The notifier passed is linked into the kernel structures and must
1342  *	not be reused until it has been unregistered. A negative errno code
1343  *	is returned on a failure.
1344  *
1345  * 	When registered all registration and up events are replayed
1346  *	to the new notifier to allow device to have a race free
1347  *	view of the network device list.
1348  */
1349 
1350 int register_netdevice_notifier(struct notifier_block *nb)
1351 {
1352 	struct net_device *dev;
1353 	struct net_device *last;
1354 	struct net *net;
1355 	int err;
1356 
1357 	rtnl_lock();
1358 	err = raw_notifier_chain_register(&netdev_chain, nb);
1359 	if (err)
1360 		goto unlock;
1361 	if (dev_boot_phase)
1362 		goto unlock;
1363 	for_each_net(net) {
1364 		for_each_netdev(net, dev) {
1365 			err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1366 			err = notifier_to_errno(err);
1367 			if (err)
1368 				goto rollback;
1369 
1370 			if (!(dev->flags & IFF_UP))
1371 				continue;
1372 
1373 			nb->notifier_call(nb, NETDEV_UP, dev);
1374 		}
1375 	}
1376 
1377 unlock:
1378 	rtnl_unlock();
1379 	return err;
1380 
1381 rollback:
1382 	last = dev;
1383 	for_each_net(net) {
1384 		for_each_netdev(net, dev) {
1385 			if (dev == last)
1386 				goto outroll;
1387 
1388 			if (dev->flags & IFF_UP) {
1389 				nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1390 				nb->notifier_call(nb, NETDEV_DOWN, dev);
1391 			}
1392 			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1393 			nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1394 		}
1395 	}
1396 
1397 outroll:
1398 	raw_notifier_chain_unregister(&netdev_chain, nb);
1399 	goto unlock;
1400 }
1401 EXPORT_SYMBOL(register_netdevice_notifier);
1402 
1403 /**
1404  *	unregister_netdevice_notifier - unregister a network notifier block
1405  *	@nb: notifier
1406  *
1407  *	Unregister a notifier previously registered by
1408  *	register_netdevice_notifier(). The notifier is unlinked into the
1409  *	kernel structures and may then be reused. A negative errno code
1410  *	is returned on a failure.
1411  *
1412  * 	After unregistering unregister and down device events are synthesized
1413  *	for all devices on the device list to the removed notifier to remove
1414  *	the need for special case cleanup code.
1415  */
1416 
1417 int unregister_netdevice_notifier(struct notifier_block *nb)
1418 {
1419 	struct net_device *dev;
1420 	struct net *net;
1421 	int err;
1422 
1423 	rtnl_lock();
1424 	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1425 	if (err)
1426 		goto unlock;
1427 
1428 	for_each_net(net) {
1429 		for_each_netdev(net, dev) {
1430 			if (dev->flags & IFF_UP) {
1431 				nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1432 				nb->notifier_call(nb, NETDEV_DOWN, dev);
1433 			}
1434 			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1435 			nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1436 		}
1437 	}
1438 unlock:
1439 	rtnl_unlock();
1440 	return err;
1441 }
1442 EXPORT_SYMBOL(unregister_netdevice_notifier);
1443 
1444 /**
1445  *	call_netdevice_notifiers - call all network notifier blocks
1446  *      @val: value passed unmodified to notifier function
1447  *      @dev: net_device pointer passed unmodified to notifier function
1448  *
1449  *	Call all network notifier blocks.  Parameters and return value
1450  *	are as for raw_notifier_call_chain().
1451  */
1452 
1453 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1454 {
1455 	ASSERT_RTNL();
1456 	return raw_notifier_call_chain(&netdev_chain, val, dev);
1457 }
1458 EXPORT_SYMBOL(call_netdevice_notifiers);
1459 
1460 static struct static_key netstamp_needed __read_mostly;
1461 #ifdef HAVE_JUMP_LABEL
1462 /* We are not allowed to call static_key_slow_dec() from irq context
1463  * If net_disable_timestamp() is called from irq context, defer the
1464  * static_key_slow_dec() calls.
1465  */
1466 static atomic_t netstamp_needed_deferred;
1467 #endif
1468 
1469 void net_enable_timestamp(void)
1470 {
1471 #ifdef HAVE_JUMP_LABEL
1472 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1473 
1474 	if (deferred) {
1475 		while (--deferred)
1476 			static_key_slow_dec(&netstamp_needed);
1477 		return;
1478 	}
1479 #endif
1480 	WARN_ON(in_interrupt());
1481 	static_key_slow_inc(&netstamp_needed);
1482 }
1483 EXPORT_SYMBOL(net_enable_timestamp);
1484 
1485 void net_disable_timestamp(void)
1486 {
1487 #ifdef HAVE_JUMP_LABEL
1488 	if (in_interrupt()) {
1489 		atomic_inc(&netstamp_needed_deferred);
1490 		return;
1491 	}
1492 #endif
1493 	static_key_slow_dec(&netstamp_needed);
1494 }
1495 EXPORT_SYMBOL(net_disable_timestamp);
1496 
1497 static inline void net_timestamp_set(struct sk_buff *skb)
1498 {
1499 	skb->tstamp.tv64 = 0;
1500 	if (static_key_false(&netstamp_needed))
1501 		__net_timestamp(skb);
1502 }
1503 
1504 #define net_timestamp_check(COND, SKB)			\
1505 	if (static_key_false(&netstamp_needed)) {		\
1506 		if ((COND) && !(SKB)->tstamp.tv64)	\
1507 			__net_timestamp(SKB);		\
1508 	}						\
1509 
1510 static int net_hwtstamp_validate(struct ifreq *ifr)
1511 {
1512 	struct hwtstamp_config cfg;
1513 	enum hwtstamp_tx_types tx_type;
1514 	enum hwtstamp_rx_filters rx_filter;
1515 	int tx_type_valid = 0;
1516 	int rx_filter_valid = 0;
1517 
1518 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1519 		return -EFAULT;
1520 
1521 	if (cfg.flags) /* reserved for future extensions */
1522 		return -EINVAL;
1523 
1524 	tx_type = cfg.tx_type;
1525 	rx_filter = cfg.rx_filter;
1526 
1527 	switch (tx_type) {
1528 	case HWTSTAMP_TX_OFF:
1529 	case HWTSTAMP_TX_ON:
1530 	case HWTSTAMP_TX_ONESTEP_SYNC:
1531 		tx_type_valid = 1;
1532 		break;
1533 	}
1534 
1535 	switch (rx_filter) {
1536 	case HWTSTAMP_FILTER_NONE:
1537 	case HWTSTAMP_FILTER_ALL:
1538 	case HWTSTAMP_FILTER_SOME:
1539 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1540 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1541 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1542 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1543 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1544 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1545 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1546 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1547 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1548 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1549 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1550 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1551 		rx_filter_valid = 1;
1552 		break;
1553 	}
1554 
1555 	if (!tx_type_valid || !rx_filter_valid)
1556 		return -ERANGE;
1557 
1558 	return 0;
1559 }
1560 
1561 static inline bool is_skb_forwardable(struct net_device *dev,
1562 				      struct sk_buff *skb)
1563 {
1564 	unsigned int len;
1565 
1566 	if (!(dev->flags & IFF_UP))
1567 		return false;
1568 
1569 	len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1570 	if (skb->len <= len)
1571 		return true;
1572 
1573 	/* if TSO is enabled, we don't care about the length as the packet
1574 	 * could be forwarded without being segmented before
1575 	 */
1576 	if (skb_is_gso(skb))
1577 		return true;
1578 
1579 	return false;
1580 }
1581 
1582 /**
1583  * dev_forward_skb - loopback an skb to another netif
1584  *
1585  * @dev: destination network device
1586  * @skb: buffer to forward
1587  *
1588  * return values:
1589  *	NET_RX_SUCCESS	(no congestion)
1590  *	NET_RX_DROP     (packet was dropped, but freed)
1591  *
1592  * dev_forward_skb can be used for injecting an skb from the
1593  * start_xmit function of one device into the receive queue
1594  * of another device.
1595  *
1596  * The receiving device may be in another namespace, so
1597  * we have to clear all information in the skb that could
1598  * impact namespace isolation.
1599  */
1600 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1601 {
1602 	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1603 		if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1604 			atomic_long_inc(&dev->rx_dropped);
1605 			kfree_skb(skb);
1606 			return NET_RX_DROP;
1607 		}
1608 	}
1609 
1610 	skb_orphan(skb);
1611 	nf_reset(skb);
1612 
1613 	if (unlikely(!is_skb_forwardable(dev, skb))) {
1614 		atomic_long_inc(&dev->rx_dropped);
1615 		kfree_skb(skb);
1616 		return NET_RX_DROP;
1617 	}
1618 	skb->skb_iif = 0;
1619 	skb->dev = dev;
1620 	skb_dst_drop(skb);
1621 	skb->tstamp.tv64 = 0;
1622 	skb->pkt_type = PACKET_HOST;
1623 	skb->protocol = eth_type_trans(skb, dev);
1624 	skb->mark = 0;
1625 	secpath_reset(skb);
1626 	nf_reset(skb);
1627 	return netif_rx(skb);
1628 }
1629 EXPORT_SYMBOL_GPL(dev_forward_skb);
1630 
1631 static inline int deliver_skb(struct sk_buff *skb,
1632 			      struct packet_type *pt_prev,
1633 			      struct net_device *orig_dev)
1634 {
1635 	atomic_inc(&skb->users);
1636 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1637 }
1638 
1639 /*
1640  *	Support routine. Sends outgoing frames to any network
1641  *	taps currently in use.
1642  */
1643 
1644 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1645 {
1646 	struct packet_type *ptype;
1647 	struct sk_buff *skb2 = NULL;
1648 	struct packet_type *pt_prev = NULL;
1649 
1650 	rcu_read_lock();
1651 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
1652 		/* Never send packets back to the socket
1653 		 * they originated from - MvS ([email protected])
1654 		 */
1655 		if ((ptype->dev == dev || !ptype->dev) &&
1656 		    (ptype->af_packet_priv == NULL ||
1657 		     (struct sock *)ptype->af_packet_priv != skb->sk)) {
1658 			if (pt_prev) {
1659 				deliver_skb(skb2, pt_prev, skb->dev);
1660 				pt_prev = ptype;
1661 				continue;
1662 			}
1663 
1664 			skb2 = skb_clone(skb, GFP_ATOMIC);
1665 			if (!skb2)
1666 				break;
1667 
1668 			net_timestamp_set(skb2);
1669 
1670 			/* skb->nh should be correctly
1671 			   set by sender, so that the second statement is
1672 			   just protection against buggy protocols.
1673 			 */
1674 			skb_reset_mac_header(skb2);
1675 
1676 			if (skb_network_header(skb2) < skb2->data ||
1677 			    skb2->network_header > skb2->tail) {
1678 				net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1679 						     ntohs(skb2->protocol),
1680 						     dev->name);
1681 				skb_reset_network_header(skb2);
1682 			}
1683 
1684 			skb2->transport_header = skb2->network_header;
1685 			skb2->pkt_type = PACKET_OUTGOING;
1686 			pt_prev = ptype;
1687 		}
1688 	}
1689 	if (pt_prev)
1690 		pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1691 	rcu_read_unlock();
1692 }
1693 
1694 /* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1695  * @dev: Network device
1696  * @txq: number of queues available
1697  *
1698  * If real_num_tx_queues is changed the tc mappings may no longer be
1699  * valid. To resolve this verify the tc mapping remains valid and if
1700  * not NULL the mapping. With no priorities mapping to this
1701  * offset/count pair it will no longer be used. In the worst case TC0
1702  * is invalid nothing can be done so disable priority mappings. If is
1703  * expected that drivers will fix this mapping if they can before
1704  * calling netif_set_real_num_tx_queues.
1705  */
1706 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1707 {
1708 	int i;
1709 	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1710 
1711 	/* If TC0 is invalidated disable TC mapping */
1712 	if (tc->offset + tc->count > txq) {
1713 		pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1714 		dev->num_tc = 0;
1715 		return;
1716 	}
1717 
1718 	/* Invalidated prio to tc mappings set to TC0 */
1719 	for (i = 1; i < TC_BITMASK + 1; i++) {
1720 		int q = netdev_get_prio_tc_map(dev, i);
1721 
1722 		tc = &dev->tc_to_txq[q];
1723 		if (tc->offset + tc->count > txq) {
1724 			pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1725 				i, q);
1726 			netdev_set_prio_tc_map(dev, i, 0);
1727 		}
1728 	}
1729 }
1730 
1731 /*
1732  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1733  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1734  */
1735 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1736 {
1737 	int rc;
1738 
1739 	if (txq < 1 || txq > dev->num_tx_queues)
1740 		return -EINVAL;
1741 
1742 	if (dev->reg_state == NETREG_REGISTERED ||
1743 	    dev->reg_state == NETREG_UNREGISTERING) {
1744 		ASSERT_RTNL();
1745 
1746 		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1747 						  txq);
1748 		if (rc)
1749 			return rc;
1750 
1751 		if (dev->num_tc)
1752 			netif_setup_tc(dev, txq);
1753 
1754 		if (txq < dev->real_num_tx_queues)
1755 			qdisc_reset_all_tx_gt(dev, txq);
1756 	}
1757 
1758 	dev->real_num_tx_queues = txq;
1759 	return 0;
1760 }
1761 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1762 
1763 #ifdef CONFIG_RPS
1764 /**
1765  *	netif_set_real_num_rx_queues - set actual number of RX queues used
1766  *	@dev: Network device
1767  *	@rxq: Actual number of RX queues
1768  *
1769  *	This must be called either with the rtnl_lock held or before
1770  *	registration of the net device.  Returns 0 on success, or a
1771  *	negative error code.  If called before registration, it always
1772  *	succeeds.
1773  */
1774 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1775 {
1776 	int rc;
1777 
1778 	if (rxq < 1 || rxq > dev->num_rx_queues)
1779 		return -EINVAL;
1780 
1781 	if (dev->reg_state == NETREG_REGISTERED) {
1782 		ASSERT_RTNL();
1783 
1784 		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
1785 						  rxq);
1786 		if (rc)
1787 			return rc;
1788 	}
1789 
1790 	dev->real_num_rx_queues = rxq;
1791 	return 0;
1792 }
1793 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1794 #endif
1795 
1796 static inline void __netif_reschedule(struct Qdisc *q)
1797 {
1798 	struct softnet_data *sd;
1799 	unsigned long flags;
1800 
1801 	local_irq_save(flags);
1802 	sd = &__get_cpu_var(softnet_data);
1803 	q->next_sched = NULL;
1804 	*sd->output_queue_tailp = q;
1805 	sd->output_queue_tailp = &q->next_sched;
1806 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
1807 	local_irq_restore(flags);
1808 }
1809 
1810 void __netif_schedule(struct Qdisc *q)
1811 {
1812 	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1813 		__netif_reschedule(q);
1814 }
1815 EXPORT_SYMBOL(__netif_schedule);
1816 
1817 void dev_kfree_skb_irq(struct sk_buff *skb)
1818 {
1819 	if (atomic_dec_and_test(&skb->users)) {
1820 		struct softnet_data *sd;
1821 		unsigned long flags;
1822 
1823 		local_irq_save(flags);
1824 		sd = &__get_cpu_var(softnet_data);
1825 		skb->next = sd->completion_queue;
1826 		sd->completion_queue = skb;
1827 		raise_softirq_irqoff(NET_TX_SOFTIRQ);
1828 		local_irq_restore(flags);
1829 	}
1830 }
1831 EXPORT_SYMBOL(dev_kfree_skb_irq);
1832 
1833 void dev_kfree_skb_any(struct sk_buff *skb)
1834 {
1835 	if (in_irq() || irqs_disabled())
1836 		dev_kfree_skb_irq(skb);
1837 	else
1838 		dev_kfree_skb(skb);
1839 }
1840 EXPORT_SYMBOL(dev_kfree_skb_any);
1841 
1842 
1843 /**
1844  * netif_device_detach - mark device as removed
1845  * @dev: network device
1846  *
1847  * Mark device as removed from system and therefore no longer available.
1848  */
1849 void netif_device_detach(struct net_device *dev)
1850 {
1851 	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1852 	    netif_running(dev)) {
1853 		netif_tx_stop_all_queues(dev);
1854 	}
1855 }
1856 EXPORT_SYMBOL(netif_device_detach);
1857 
1858 /**
1859  * netif_device_attach - mark device as attached
1860  * @dev: network device
1861  *
1862  * Mark device as attached from system and restart if needed.
1863  */
1864 void netif_device_attach(struct net_device *dev)
1865 {
1866 	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1867 	    netif_running(dev)) {
1868 		netif_tx_wake_all_queues(dev);
1869 		__netdev_watchdog_up(dev);
1870 	}
1871 }
1872 EXPORT_SYMBOL(netif_device_attach);
1873 
1874 static void skb_warn_bad_offload(const struct sk_buff *skb)
1875 {
1876 	static const netdev_features_t null_features = 0;
1877 	struct net_device *dev = skb->dev;
1878 	const char *driver = "";
1879 
1880 	if (dev && dev->dev.parent)
1881 		driver = dev_driver_string(dev->dev.parent);
1882 
1883 	WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
1884 	     "gso_type=%d ip_summed=%d\n",
1885 	     driver, dev ? &dev->features : &null_features,
1886 	     skb->sk ? &skb->sk->sk_route_caps : &null_features,
1887 	     skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
1888 	     skb_shinfo(skb)->gso_type, skb->ip_summed);
1889 }
1890 
1891 /*
1892  * Invalidate hardware checksum when packet is to be mangled, and
1893  * complete checksum manually on outgoing path.
1894  */
1895 int skb_checksum_help(struct sk_buff *skb)
1896 {
1897 	__wsum csum;
1898 	int ret = 0, offset;
1899 
1900 	if (skb->ip_summed == CHECKSUM_COMPLETE)
1901 		goto out_set_summed;
1902 
1903 	if (unlikely(skb_shinfo(skb)->gso_size)) {
1904 		skb_warn_bad_offload(skb);
1905 		return -EINVAL;
1906 	}
1907 
1908 	offset = skb_checksum_start_offset(skb);
1909 	BUG_ON(offset >= skb_headlen(skb));
1910 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
1911 
1912 	offset += skb->csum_offset;
1913 	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1914 
1915 	if (skb_cloned(skb) &&
1916 	    !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1917 		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1918 		if (ret)
1919 			goto out;
1920 	}
1921 
1922 	*(__sum16 *)(skb->data + offset) = csum_fold(csum);
1923 out_set_summed:
1924 	skb->ip_summed = CHECKSUM_NONE;
1925 out:
1926 	return ret;
1927 }
1928 EXPORT_SYMBOL(skb_checksum_help);
1929 
1930 /**
1931  *	skb_gso_segment - Perform segmentation on skb.
1932  *	@skb: buffer to segment
1933  *	@features: features for the output path (see dev->features)
1934  *
1935  *	This function segments the given skb and returns a list of segments.
1936  *
1937  *	It may return NULL if the skb requires no segmentation.  This is
1938  *	only possible when GSO is used for verifying header integrity.
1939  */
1940 struct sk_buff *skb_gso_segment(struct sk_buff *skb,
1941 	netdev_features_t features)
1942 {
1943 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1944 	struct packet_type *ptype;
1945 	__be16 type = skb->protocol;
1946 	int vlan_depth = ETH_HLEN;
1947 	int err;
1948 
1949 	while (type == htons(ETH_P_8021Q)) {
1950 		struct vlan_hdr *vh;
1951 
1952 		if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
1953 			return ERR_PTR(-EINVAL);
1954 
1955 		vh = (struct vlan_hdr *)(skb->data + vlan_depth);
1956 		type = vh->h_vlan_encapsulated_proto;
1957 		vlan_depth += VLAN_HLEN;
1958 	}
1959 
1960 	skb_reset_mac_header(skb);
1961 	skb->mac_len = skb->network_header - skb->mac_header;
1962 	__skb_pull(skb, skb->mac_len);
1963 
1964 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1965 		skb_warn_bad_offload(skb);
1966 
1967 		if (skb_header_cloned(skb) &&
1968 		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1969 			return ERR_PTR(err);
1970 	}
1971 
1972 	rcu_read_lock();
1973 	list_for_each_entry_rcu(ptype,
1974 			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1975 		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1976 			if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1977 				err = ptype->gso_send_check(skb);
1978 				segs = ERR_PTR(err);
1979 				if (err || skb_gso_ok(skb, features))
1980 					break;
1981 				__skb_push(skb, (skb->data -
1982 						 skb_network_header(skb)));
1983 			}
1984 			segs = ptype->gso_segment(skb, features);
1985 			break;
1986 		}
1987 	}
1988 	rcu_read_unlock();
1989 
1990 	__skb_push(skb, skb->data - skb_mac_header(skb));
1991 
1992 	return segs;
1993 }
1994 EXPORT_SYMBOL(skb_gso_segment);
1995 
1996 /* Take action when hardware reception checksum errors are detected. */
1997 #ifdef CONFIG_BUG
1998 void netdev_rx_csum_fault(struct net_device *dev)
1999 {
2000 	if (net_ratelimit()) {
2001 		pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2002 		dump_stack();
2003 	}
2004 }
2005 EXPORT_SYMBOL(netdev_rx_csum_fault);
2006 #endif
2007 
2008 /* Actually, we should eliminate this check as soon as we know, that:
2009  * 1. IOMMU is present and allows to map all the memory.
2010  * 2. No high memory really exists on this machine.
2011  */
2012 
2013 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2014 {
2015 #ifdef CONFIG_HIGHMEM
2016 	int i;
2017 	if (!(dev->features & NETIF_F_HIGHDMA)) {
2018 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2019 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2020 			if (PageHighMem(skb_frag_page(frag)))
2021 				return 1;
2022 		}
2023 	}
2024 
2025 	if (PCI_DMA_BUS_IS_PHYS) {
2026 		struct device *pdev = dev->dev.parent;
2027 
2028 		if (!pdev)
2029 			return 0;
2030 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2031 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2032 			dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2033 			if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2034 				return 1;
2035 		}
2036 	}
2037 #endif
2038 	return 0;
2039 }
2040 
2041 struct dev_gso_cb {
2042 	void (*destructor)(struct sk_buff *skb);
2043 };
2044 
2045 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2046 
2047 static void dev_gso_skb_destructor(struct sk_buff *skb)
2048 {
2049 	struct dev_gso_cb *cb;
2050 
2051 	do {
2052 		struct sk_buff *nskb = skb->next;
2053 
2054 		skb->next = nskb->next;
2055 		nskb->next = NULL;
2056 		kfree_skb(nskb);
2057 	} while (skb->next);
2058 
2059 	cb = DEV_GSO_CB(skb);
2060 	if (cb->destructor)
2061 		cb->destructor(skb);
2062 }
2063 
2064 /**
2065  *	dev_gso_segment - Perform emulated hardware segmentation on skb.
2066  *	@skb: buffer to segment
2067  *	@features: device features as applicable to this skb
2068  *
2069  *	This function segments the given skb and stores the list of segments
2070  *	in skb->next.
2071  */
2072 static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2073 {
2074 	struct sk_buff *segs;
2075 
2076 	segs = skb_gso_segment(skb, features);
2077 
2078 	/* Verifying header integrity only. */
2079 	if (!segs)
2080 		return 0;
2081 
2082 	if (IS_ERR(segs))
2083 		return PTR_ERR(segs);
2084 
2085 	skb->next = segs;
2086 	DEV_GSO_CB(skb)->destructor = skb->destructor;
2087 	skb->destructor = dev_gso_skb_destructor;
2088 
2089 	return 0;
2090 }
2091 
2092 static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
2093 {
2094 	return ((features & NETIF_F_GEN_CSUM) ||
2095 		((features & NETIF_F_V4_CSUM) &&
2096 		 protocol == htons(ETH_P_IP)) ||
2097 		((features & NETIF_F_V6_CSUM) &&
2098 		 protocol == htons(ETH_P_IPV6)) ||
2099 		((features & NETIF_F_FCOE_CRC) &&
2100 		 protocol == htons(ETH_P_FCOE)));
2101 }
2102 
2103 static netdev_features_t harmonize_features(struct sk_buff *skb,
2104 	__be16 protocol, netdev_features_t features)
2105 {
2106 	if (!can_checksum_protocol(features, protocol)) {
2107 		features &= ~NETIF_F_ALL_CSUM;
2108 		features &= ~NETIF_F_SG;
2109 	} else if (illegal_highdma(skb->dev, skb)) {
2110 		features &= ~NETIF_F_SG;
2111 	}
2112 
2113 	return features;
2114 }
2115 
2116 netdev_features_t netif_skb_features(struct sk_buff *skb)
2117 {
2118 	__be16 protocol = skb->protocol;
2119 	netdev_features_t features = skb->dev->features;
2120 
2121 	if (protocol == htons(ETH_P_8021Q)) {
2122 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2123 		protocol = veh->h_vlan_encapsulated_proto;
2124 	} else if (!vlan_tx_tag_present(skb)) {
2125 		return harmonize_features(skb, protocol, features);
2126 	}
2127 
2128 	features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
2129 
2130 	if (protocol != htons(ETH_P_8021Q)) {
2131 		return harmonize_features(skb, protocol, features);
2132 	} else {
2133 		features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2134 				NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
2135 		return harmonize_features(skb, protocol, features);
2136 	}
2137 }
2138 EXPORT_SYMBOL(netif_skb_features);
2139 
2140 /*
2141  * Returns true if either:
2142  *	1. skb has frag_list and the device doesn't support FRAGLIST, or
2143  *	2. skb is fragmented and the device does not support SG, or if
2144  *	   at least one of fragments is in highmem and device does not
2145  *	   support DMA from it.
2146  */
2147 static inline int skb_needs_linearize(struct sk_buff *skb,
2148 				      int features)
2149 {
2150 	return skb_is_nonlinear(skb) &&
2151 			((skb_has_frag_list(skb) &&
2152 				!(features & NETIF_F_FRAGLIST)) ||
2153 			(skb_shinfo(skb)->nr_frags &&
2154 				!(features & NETIF_F_SG)));
2155 }
2156 
2157 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2158 			struct netdev_queue *txq)
2159 {
2160 	const struct net_device_ops *ops = dev->netdev_ops;
2161 	int rc = NETDEV_TX_OK;
2162 	unsigned int skb_len;
2163 
2164 	if (likely(!skb->next)) {
2165 		netdev_features_t features;
2166 
2167 		/*
2168 		 * If device doesn't need skb->dst, release it right now while
2169 		 * its hot in this cpu cache
2170 		 */
2171 		if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2172 			skb_dst_drop(skb);
2173 
2174 		if (!list_empty(&ptype_all))
2175 			dev_queue_xmit_nit(skb, dev);
2176 
2177 		features = netif_skb_features(skb);
2178 
2179 		if (vlan_tx_tag_present(skb) &&
2180 		    !(features & NETIF_F_HW_VLAN_TX)) {
2181 			skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2182 			if (unlikely(!skb))
2183 				goto out;
2184 
2185 			skb->vlan_tci = 0;
2186 		}
2187 
2188 		if (netif_needs_gso(skb, features)) {
2189 			if (unlikely(dev_gso_segment(skb, features)))
2190 				goto out_kfree_skb;
2191 			if (skb->next)
2192 				goto gso;
2193 		} else {
2194 			if (skb_needs_linearize(skb, features) &&
2195 			    __skb_linearize(skb))
2196 				goto out_kfree_skb;
2197 
2198 			/* If packet is not checksummed and device does not
2199 			 * support checksumming for this protocol, complete
2200 			 * checksumming here.
2201 			 */
2202 			if (skb->ip_summed == CHECKSUM_PARTIAL) {
2203 				skb_set_transport_header(skb,
2204 					skb_checksum_start_offset(skb));
2205 				if (!(features & NETIF_F_ALL_CSUM) &&
2206 				     skb_checksum_help(skb))
2207 					goto out_kfree_skb;
2208 			}
2209 		}
2210 
2211 		skb_len = skb->len;
2212 		rc = ops->ndo_start_xmit(skb, dev);
2213 		trace_net_dev_xmit(skb, rc, dev, skb_len);
2214 		if (rc == NETDEV_TX_OK)
2215 			txq_trans_update(txq);
2216 		return rc;
2217 	}
2218 
2219 gso:
2220 	do {
2221 		struct sk_buff *nskb = skb->next;
2222 
2223 		skb->next = nskb->next;
2224 		nskb->next = NULL;
2225 
2226 		/*
2227 		 * If device doesn't need nskb->dst, release it right now while
2228 		 * its hot in this cpu cache
2229 		 */
2230 		if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2231 			skb_dst_drop(nskb);
2232 
2233 		skb_len = nskb->len;
2234 		rc = ops->ndo_start_xmit(nskb, dev);
2235 		trace_net_dev_xmit(nskb, rc, dev, skb_len);
2236 		if (unlikely(rc != NETDEV_TX_OK)) {
2237 			if (rc & ~NETDEV_TX_MASK)
2238 				goto out_kfree_gso_skb;
2239 			nskb->next = skb->next;
2240 			skb->next = nskb;
2241 			return rc;
2242 		}
2243 		txq_trans_update(txq);
2244 		if (unlikely(netif_xmit_stopped(txq) && skb->next))
2245 			return NETDEV_TX_BUSY;
2246 	} while (skb->next);
2247 
2248 out_kfree_gso_skb:
2249 	if (likely(skb->next == NULL))
2250 		skb->destructor = DEV_GSO_CB(skb)->destructor;
2251 out_kfree_skb:
2252 	kfree_skb(skb);
2253 out:
2254 	return rc;
2255 }
2256 
2257 static u32 hashrnd __read_mostly;
2258 
2259 /*
2260  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2261  * to be used as a distribution range.
2262  */
2263 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2264 		  unsigned int num_tx_queues)
2265 {
2266 	u32 hash;
2267 	u16 qoffset = 0;
2268 	u16 qcount = num_tx_queues;
2269 
2270 	if (skb_rx_queue_recorded(skb)) {
2271 		hash = skb_get_rx_queue(skb);
2272 		while (unlikely(hash >= num_tx_queues))
2273 			hash -= num_tx_queues;
2274 		return hash;
2275 	}
2276 
2277 	if (dev->num_tc) {
2278 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2279 		qoffset = dev->tc_to_txq[tc].offset;
2280 		qcount = dev->tc_to_txq[tc].count;
2281 	}
2282 
2283 	if (skb->sk && skb->sk->sk_hash)
2284 		hash = skb->sk->sk_hash;
2285 	else
2286 		hash = (__force u16) skb->protocol;
2287 	hash = jhash_1word(hash, hashrnd);
2288 
2289 	return (u16) (((u64) hash * qcount) >> 32) + qoffset;
2290 }
2291 EXPORT_SYMBOL(__skb_tx_hash);
2292 
2293 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2294 {
2295 	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2296 		net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2297 				     dev->name, queue_index,
2298 				     dev->real_num_tx_queues);
2299 		return 0;
2300 	}
2301 	return queue_index;
2302 }
2303 
2304 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2305 {
2306 #ifdef CONFIG_XPS
2307 	struct xps_dev_maps *dev_maps;
2308 	struct xps_map *map;
2309 	int queue_index = -1;
2310 
2311 	rcu_read_lock();
2312 	dev_maps = rcu_dereference(dev->xps_maps);
2313 	if (dev_maps) {
2314 		map = rcu_dereference(
2315 		    dev_maps->cpu_map[raw_smp_processor_id()]);
2316 		if (map) {
2317 			if (map->len == 1)
2318 				queue_index = map->queues[0];
2319 			else {
2320 				u32 hash;
2321 				if (skb->sk && skb->sk->sk_hash)
2322 					hash = skb->sk->sk_hash;
2323 				else
2324 					hash = (__force u16) skb->protocol ^
2325 					    skb->rxhash;
2326 				hash = jhash_1word(hash, hashrnd);
2327 				queue_index = map->queues[
2328 				    ((u64)hash * map->len) >> 32];
2329 			}
2330 			if (unlikely(queue_index >= dev->real_num_tx_queues))
2331 				queue_index = -1;
2332 		}
2333 	}
2334 	rcu_read_unlock();
2335 
2336 	return queue_index;
2337 #else
2338 	return -1;
2339 #endif
2340 }
2341 
2342 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2343 					struct sk_buff *skb)
2344 {
2345 	int queue_index;
2346 	const struct net_device_ops *ops = dev->netdev_ops;
2347 
2348 	if (dev->real_num_tx_queues == 1)
2349 		queue_index = 0;
2350 	else if (ops->ndo_select_queue) {
2351 		queue_index = ops->ndo_select_queue(dev, skb);
2352 		queue_index = dev_cap_txqueue(dev, queue_index);
2353 	} else {
2354 		struct sock *sk = skb->sk;
2355 		queue_index = sk_tx_queue_get(sk);
2356 
2357 		if (queue_index < 0 || skb->ooo_okay ||
2358 		    queue_index >= dev->real_num_tx_queues) {
2359 			int old_index = queue_index;
2360 
2361 			queue_index = get_xps_queue(dev, skb);
2362 			if (queue_index < 0)
2363 				queue_index = skb_tx_hash(dev, skb);
2364 
2365 			if (queue_index != old_index && sk) {
2366 				struct dst_entry *dst =
2367 				    rcu_dereference_check(sk->sk_dst_cache, 1);
2368 
2369 				if (dst && skb_dst(skb) == dst)
2370 					sk_tx_queue_set(sk, queue_index);
2371 			}
2372 		}
2373 	}
2374 
2375 	skb_set_queue_mapping(skb, queue_index);
2376 	return netdev_get_tx_queue(dev, queue_index);
2377 }
2378 
2379 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2380 				 struct net_device *dev,
2381 				 struct netdev_queue *txq)
2382 {
2383 	spinlock_t *root_lock = qdisc_lock(q);
2384 	bool contended;
2385 	int rc;
2386 
2387 	qdisc_skb_cb(skb)->pkt_len = skb->len;
2388 	qdisc_calculate_pkt_len(skb, q);
2389 	/*
2390 	 * Heuristic to force contended enqueues to serialize on a
2391 	 * separate lock before trying to get qdisc main lock.
2392 	 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2393 	 * and dequeue packets faster.
2394 	 */
2395 	contended = qdisc_is_running(q);
2396 	if (unlikely(contended))
2397 		spin_lock(&q->busylock);
2398 
2399 	spin_lock(root_lock);
2400 	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2401 		kfree_skb(skb);
2402 		rc = NET_XMIT_DROP;
2403 	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2404 		   qdisc_run_begin(q)) {
2405 		/*
2406 		 * This is a work-conserving queue; there are no old skbs
2407 		 * waiting to be sent out; and the qdisc is not running -
2408 		 * xmit the skb directly.
2409 		 */
2410 		if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2411 			skb_dst_force(skb);
2412 
2413 		qdisc_bstats_update(q, skb);
2414 
2415 		if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2416 			if (unlikely(contended)) {
2417 				spin_unlock(&q->busylock);
2418 				contended = false;
2419 			}
2420 			__qdisc_run(q);
2421 		} else
2422 			qdisc_run_end(q);
2423 
2424 		rc = NET_XMIT_SUCCESS;
2425 	} else {
2426 		skb_dst_force(skb);
2427 		rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2428 		if (qdisc_run_begin(q)) {
2429 			if (unlikely(contended)) {
2430 				spin_unlock(&q->busylock);
2431 				contended = false;
2432 			}
2433 			__qdisc_run(q);
2434 		}
2435 	}
2436 	spin_unlock(root_lock);
2437 	if (unlikely(contended))
2438 		spin_unlock(&q->busylock);
2439 	return rc;
2440 }
2441 
2442 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2443 static void skb_update_prio(struct sk_buff *skb)
2444 {
2445 	struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2446 
2447 	if ((!skb->priority) && (skb->sk) && map)
2448 		skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
2449 }
2450 #else
2451 #define skb_update_prio(skb)
2452 #endif
2453 
2454 static DEFINE_PER_CPU(int, xmit_recursion);
2455 #define RECURSION_LIMIT 10
2456 
2457 /**
2458  *	dev_loopback_xmit - loop back @skb
2459  *	@skb: buffer to transmit
2460  */
2461 int dev_loopback_xmit(struct sk_buff *skb)
2462 {
2463 	skb_reset_mac_header(skb);
2464 	__skb_pull(skb, skb_network_offset(skb));
2465 	skb->pkt_type = PACKET_LOOPBACK;
2466 	skb->ip_summed = CHECKSUM_UNNECESSARY;
2467 	WARN_ON(!skb_dst(skb));
2468 	skb_dst_force(skb);
2469 	netif_rx_ni(skb);
2470 	return 0;
2471 }
2472 EXPORT_SYMBOL(dev_loopback_xmit);
2473 
2474 /**
2475  *	dev_queue_xmit - transmit a buffer
2476  *	@skb: buffer to transmit
2477  *
2478  *	Queue a buffer for transmission to a network device. The caller must
2479  *	have set the device and priority and built the buffer before calling
2480  *	this function. The function can be called from an interrupt.
2481  *
2482  *	A negative errno code is returned on a failure. A success does not
2483  *	guarantee the frame will be transmitted as it may be dropped due
2484  *	to congestion or traffic shaping.
2485  *
2486  * -----------------------------------------------------------------------------------
2487  *      I notice this method can also return errors from the queue disciplines,
2488  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
2489  *      be positive.
2490  *
2491  *      Regardless of the return value, the skb is consumed, so it is currently
2492  *      difficult to retry a send to this method.  (You can bump the ref count
2493  *      before sending to hold a reference for retry if you are careful.)
2494  *
2495  *      When calling this method, interrupts MUST be enabled.  This is because
2496  *      the BH enable code must have IRQs enabled so that it will not deadlock.
2497  *          --BLG
2498  */
2499 int dev_queue_xmit(struct sk_buff *skb)
2500 {
2501 	struct net_device *dev = skb->dev;
2502 	struct netdev_queue *txq;
2503 	struct Qdisc *q;
2504 	int rc = -ENOMEM;
2505 
2506 	/* Disable soft irqs for various locks below. Also
2507 	 * stops preemption for RCU.
2508 	 */
2509 	rcu_read_lock_bh();
2510 
2511 	skb_update_prio(skb);
2512 
2513 	txq = dev_pick_tx(dev, skb);
2514 	q = rcu_dereference_bh(txq->qdisc);
2515 
2516 #ifdef CONFIG_NET_CLS_ACT
2517 	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2518 #endif
2519 	trace_net_dev_queue(skb);
2520 	if (q->enqueue) {
2521 		rc = __dev_xmit_skb(skb, q, dev, txq);
2522 		goto out;
2523 	}
2524 
2525 	/* The device has no queue. Common case for software devices:
2526 	   loopback, all the sorts of tunnels...
2527 
2528 	   Really, it is unlikely that netif_tx_lock protection is necessary
2529 	   here.  (f.e. loopback and IP tunnels are clean ignoring statistics
2530 	   counters.)
2531 	   However, it is possible, that they rely on protection
2532 	   made by us here.
2533 
2534 	   Check this and shot the lock. It is not prone from deadlocks.
2535 	   Either shot noqueue qdisc, it is even simpler 8)
2536 	 */
2537 	if (dev->flags & IFF_UP) {
2538 		int cpu = smp_processor_id(); /* ok because BHs are off */
2539 
2540 		if (txq->xmit_lock_owner != cpu) {
2541 
2542 			if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2543 				goto recursion_alert;
2544 
2545 			HARD_TX_LOCK(dev, txq, cpu);
2546 
2547 			if (!netif_xmit_stopped(txq)) {
2548 				__this_cpu_inc(xmit_recursion);
2549 				rc = dev_hard_start_xmit(skb, dev, txq);
2550 				__this_cpu_dec(xmit_recursion);
2551 				if (dev_xmit_complete(rc)) {
2552 					HARD_TX_UNLOCK(dev, txq);
2553 					goto out;
2554 				}
2555 			}
2556 			HARD_TX_UNLOCK(dev, txq);
2557 			net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2558 					     dev->name);
2559 		} else {
2560 			/* Recursion is detected! It is possible,
2561 			 * unfortunately
2562 			 */
2563 recursion_alert:
2564 			net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2565 					     dev->name);
2566 		}
2567 	}
2568 
2569 	rc = -ENETDOWN;
2570 	rcu_read_unlock_bh();
2571 
2572 	kfree_skb(skb);
2573 	return rc;
2574 out:
2575 	rcu_read_unlock_bh();
2576 	return rc;
2577 }
2578 EXPORT_SYMBOL(dev_queue_xmit);
2579 
2580 
2581 /*=======================================================================
2582 			Receiver routines
2583   =======================================================================*/
2584 
2585 int netdev_max_backlog __read_mostly = 1000;
2586 int netdev_tstamp_prequeue __read_mostly = 1;
2587 int netdev_budget __read_mostly = 300;
2588 int weight_p __read_mostly = 64;            /* old backlog weight */
2589 
2590 /* Called with irq disabled */
2591 static inline void ____napi_schedule(struct softnet_data *sd,
2592 				     struct napi_struct *napi)
2593 {
2594 	list_add_tail(&napi->poll_list, &sd->poll_list);
2595 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
2596 }
2597 
2598 /*
2599  * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2600  * and src/dst port numbers.  Sets rxhash in skb to non-zero hash value
2601  * on success, zero indicates no valid hash.  Also, sets l4_rxhash in skb
2602  * if hash is a canonical 4-tuple hash over transport ports.
2603  */
2604 void __skb_get_rxhash(struct sk_buff *skb)
2605 {
2606 	struct flow_keys keys;
2607 	u32 hash;
2608 
2609 	if (!skb_flow_dissect(skb, &keys))
2610 		return;
2611 
2612 	if (keys.ports) {
2613 		if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
2614 			swap(keys.port16[0], keys.port16[1]);
2615 		skb->l4_rxhash = 1;
2616 	}
2617 
2618 	/* get a consistent hash (same value on both flow directions) */
2619 	if ((__force u32)keys.dst < (__force u32)keys.src)
2620 		swap(keys.dst, keys.src);
2621 
2622 	hash = jhash_3words((__force u32)keys.dst,
2623 			    (__force u32)keys.src,
2624 			    (__force u32)keys.ports, hashrnd);
2625 	if (!hash)
2626 		hash = 1;
2627 
2628 	skb->rxhash = hash;
2629 }
2630 EXPORT_SYMBOL(__skb_get_rxhash);
2631 
2632 #ifdef CONFIG_RPS
2633 
2634 /* One global table that all flow-based protocols share. */
2635 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2636 EXPORT_SYMBOL(rps_sock_flow_table);
2637 
2638 struct static_key rps_needed __read_mostly;
2639 
2640 static struct rps_dev_flow *
2641 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2642 	    struct rps_dev_flow *rflow, u16 next_cpu)
2643 {
2644 	if (next_cpu != RPS_NO_CPU) {
2645 #ifdef CONFIG_RFS_ACCEL
2646 		struct netdev_rx_queue *rxqueue;
2647 		struct rps_dev_flow_table *flow_table;
2648 		struct rps_dev_flow *old_rflow;
2649 		u32 flow_id;
2650 		u16 rxq_index;
2651 		int rc;
2652 
2653 		/* Should we steer this flow to a different hardware queue? */
2654 		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2655 		    !(dev->features & NETIF_F_NTUPLE))
2656 			goto out;
2657 		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2658 		if (rxq_index == skb_get_rx_queue(skb))
2659 			goto out;
2660 
2661 		rxqueue = dev->_rx + rxq_index;
2662 		flow_table = rcu_dereference(rxqueue->rps_flow_table);
2663 		if (!flow_table)
2664 			goto out;
2665 		flow_id = skb->rxhash & flow_table->mask;
2666 		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2667 							rxq_index, flow_id);
2668 		if (rc < 0)
2669 			goto out;
2670 		old_rflow = rflow;
2671 		rflow = &flow_table->flows[flow_id];
2672 		rflow->filter = rc;
2673 		if (old_rflow->filter == rflow->filter)
2674 			old_rflow->filter = RPS_NO_FILTER;
2675 	out:
2676 #endif
2677 		rflow->last_qtail =
2678 			per_cpu(softnet_data, next_cpu).input_queue_head;
2679 	}
2680 
2681 	rflow->cpu = next_cpu;
2682 	return rflow;
2683 }
2684 
2685 /*
2686  * get_rps_cpu is called from netif_receive_skb and returns the target
2687  * CPU from the RPS map of the receiving queue for a given skb.
2688  * rcu_read_lock must be held on entry.
2689  */
2690 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2691 		       struct rps_dev_flow **rflowp)
2692 {
2693 	struct netdev_rx_queue *rxqueue;
2694 	struct rps_map *map;
2695 	struct rps_dev_flow_table *flow_table;
2696 	struct rps_sock_flow_table *sock_flow_table;
2697 	int cpu = -1;
2698 	u16 tcpu;
2699 
2700 	if (skb_rx_queue_recorded(skb)) {
2701 		u16 index = skb_get_rx_queue(skb);
2702 		if (unlikely(index >= dev->real_num_rx_queues)) {
2703 			WARN_ONCE(dev->real_num_rx_queues > 1,
2704 				  "%s received packet on queue %u, but number "
2705 				  "of RX queues is %u\n",
2706 				  dev->name, index, dev->real_num_rx_queues);
2707 			goto done;
2708 		}
2709 		rxqueue = dev->_rx + index;
2710 	} else
2711 		rxqueue = dev->_rx;
2712 
2713 	map = rcu_dereference(rxqueue->rps_map);
2714 	if (map) {
2715 		if (map->len == 1 &&
2716 		    !rcu_access_pointer(rxqueue->rps_flow_table)) {
2717 			tcpu = map->cpus[0];
2718 			if (cpu_online(tcpu))
2719 				cpu = tcpu;
2720 			goto done;
2721 		}
2722 	} else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2723 		goto done;
2724 	}
2725 
2726 	skb_reset_network_header(skb);
2727 	if (!skb_get_rxhash(skb))
2728 		goto done;
2729 
2730 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
2731 	sock_flow_table = rcu_dereference(rps_sock_flow_table);
2732 	if (flow_table && sock_flow_table) {
2733 		u16 next_cpu;
2734 		struct rps_dev_flow *rflow;
2735 
2736 		rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2737 		tcpu = rflow->cpu;
2738 
2739 		next_cpu = sock_flow_table->ents[skb->rxhash &
2740 		    sock_flow_table->mask];
2741 
2742 		/*
2743 		 * If the desired CPU (where last recvmsg was done) is
2744 		 * different from current CPU (one in the rx-queue flow
2745 		 * table entry), switch if one of the following holds:
2746 		 *   - Current CPU is unset (equal to RPS_NO_CPU).
2747 		 *   - Current CPU is offline.
2748 		 *   - The current CPU's queue tail has advanced beyond the
2749 		 *     last packet that was enqueued using this table entry.
2750 		 *     This guarantees that all previous packets for the flow
2751 		 *     have been dequeued, thus preserving in order delivery.
2752 		 */
2753 		if (unlikely(tcpu != next_cpu) &&
2754 		    (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2755 		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2756 		      rflow->last_qtail)) >= 0))
2757 			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
2758 
2759 		if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2760 			*rflowp = rflow;
2761 			cpu = tcpu;
2762 			goto done;
2763 		}
2764 	}
2765 
2766 	if (map) {
2767 		tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2768 
2769 		if (cpu_online(tcpu)) {
2770 			cpu = tcpu;
2771 			goto done;
2772 		}
2773 	}
2774 
2775 done:
2776 	return cpu;
2777 }
2778 
2779 #ifdef CONFIG_RFS_ACCEL
2780 
2781 /**
2782  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
2783  * @dev: Device on which the filter was set
2784  * @rxq_index: RX queue index
2785  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
2786  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
2787  *
2788  * Drivers that implement ndo_rx_flow_steer() should periodically call
2789  * this function for each installed filter and remove the filters for
2790  * which it returns %true.
2791  */
2792 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
2793 			 u32 flow_id, u16 filter_id)
2794 {
2795 	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
2796 	struct rps_dev_flow_table *flow_table;
2797 	struct rps_dev_flow *rflow;
2798 	bool expire = true;
2799 	int cpu;
2800 
2801 	rcu_read_lock();
2802 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
2803 	if (flow_table && flow_id <= flow_table->mask) {
2804 		rflow = &flow_table->flows[flow_id];
2805 		cpu = ACCESS_ONCE(rflow->cpu);
2806 		if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
2807 		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
2808 			   rflow->last_qtail) <
2809 		     (int)(10 * flow_table->mask)))
2810 			expire = false;
2811 	}
2812 	rcu_read_unlock();
2813 	return expire;
2814 }
2815 EXPORT_SYMBOL(rps_may_expire_flow);
2816 
2817 #endif /* CONFIG_RFS_ACCEL */
2818 
2819 /* Called from hardirq (IPI) context */
2820 static void rps_trigger_softirq(void *data)
2821 {
2822 	struct softnet_data *sd = data;
2823 
2824 	____napi_schedule(sd, &sd->backlog);
2825 	sd->received_rps++;
2826 }
2827 
2828 #endif /* CONFIG_RPS */
2829 
2830 /*
2831  * Check if this softnet_data structure is another cpu one
2832  * If yes, queue it to our IPI list and return 1
2833  * If no, return 0
2834  */
2835 static int rps_ipi_queued(struct softnet_data *sd)
2836 {
2837 #ifdef CONFIG_RPS
2838 	struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2839 
2840 	if (sd != mysd) {
2841 		sd->rps_ipi_next = mysd->rps_ipi_list;
2842 		mysd->rps_ipi_list = sd;
2843 
2844 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
2845 		return 1;
2846 	}
2847 #endif /* CONFIG_RPS */
2848 	return 0;
2849 }
2850 
2851 /*
2852  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2853  * queue (may be a remote CPU queue).
2854  */
2855 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2856 			      unsigned int *qtail)
2857 {
2858 	struct softnet_data *sd;
2859 	unsigned long flags;
2860 
2861 	sd = &per_cpu(softnet_data, cpu);
2862 
2863 	local_irq_save(flags);
2864 
2865 	rps_lock(sd);
2866 	if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2867 		if (skb_queue_len(&sd->input_pkt_queue)) {
2868 enqueue:
2869 			__skb_queue_tail(&sd->input_pkt_queue, skb);
2870 			input_queue_tail_incr_save(sd, qtail);
2871 			rps_unlock(sd);
2872 			local_irq_restore(flags);
2873 			return NET_RX_SUCCESS;
2874 		}
2875 
2876 		/* Schedule NAPI for backlog device
2877 		 * We can use non atomic operation since we own the queue lock
2878 		 */
2879 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
2880 			if (!rps_ipi_queued(sd))
2881 				____napi_schedule(sd, &sd->backlog);
2882 		}
2883 		goto enqueue;
2884 	}
2885 
2886 	sd->dropped++;
2887 	rps_unlock(sd);
2888 
2889 	local_irq_restore(flags);
2890 
2891 	atomic_long_inc(&skb->dev->rx_dropped);
2892 	kfree_skb(skb);
2893 	return NET_RX_DROP;
2894 }
2895 
2896 /**
2897  *	netif_rx	-	post buffer to the network code
2898  *	@skb: buffer to post
2899  *
2900  *	This function receives a packet from a device driver and queues it for
2901  *	the upper (protocol) levels to process.  It always succeeds. The buffer
2902  *	may be dropped during processing for congestion control or by the
2903  *	protocol layers.
2904  *
2905  *	return values:
2906  *	NET_RX_SUCCESS	(no congestion)
2907  *	NET_RX_DROP     (packet was dropped)
2908  *
2909  */
2910 
2911 int netif_rx(struct sk_buff *skb)
2912 {
2913 	int ret;
2914 
2915 	/* if netpoll wants it, pretend we never saw it */
2916 	if (netpoll_rx(skb))
2917 		return NET_RX_DROP;
2918 
2919 	net_timestamp_check(netdev_tstamp_prequeue, skb);
2920 
2921 	trace_netif_rx(skb);
2922 #ifdef CONFIG_RPS
2923 	if (static_key_false(&rps_needed)) {
2924 		struct rps_dev_flow voidflow, *rflow = &voidflow;
2925 		int cpu;
2926 
2927 		preempt_disable();
2928 		rcu_read_lock();
2929 
2930 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
2931 		if (cpu < 0)
2932 			cpu = smp_processor_id();
2933 
2934 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2935 
2936 		rcu_read_unlock();
2937 		preempt_enable();
2938 	} else
2939 #endif
2940 	{
2941 		unsigned int qtail;
2942 		ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2943 		put_cpu();
2944 	}
2945 	return ret;
2946 }
2947 EXPORT_SYMBOL(netif_rx);
2948 
2949 int netif_rx_ni(struct sk_buff *skb)
2950 {
2951 	int err;
2952 
2953 	preempt_disable();
2954 	err = netif_rx(skb);
2955 	if (local_softirq_pending())
2956 		do_softirq();
2957 	preempt_enable();
2958 
2959 	return err;
2960 }
2961 EXPORT_SYMBOL(netif_rx_ni);
2962 
2963 static void net_tx_action(struct softirq_action *h)
2964 {
2965 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
2966 
2967 	if (sd->completion_queue) {
2968 		struct sk_buff *clist;
2969 
2970 		local_irq_disable();
2971 		clist = sd->completion_queue;
2972 		sd->completion_queue = NULL;
2973 		local_irq_enable();
2974 
2975 		while (clist) {
2976 			struct sk_buff *skb = clist;
2977 			clist = clist->next;
2978 
2979 			WARN_ON(atomic_read(&skb->users));
2980 			trace_kfree_skb(skb, net_tx_action);
2981 			__kfree_skb(skb);
2982 		}
2983 	}
2984 
2985 	if (sd->output_queue) {
2986 		struct Qdisc *head;
2987 
2988 		local_irq_disable();
2989 		head = sd->output_queue;
2990 		sd->output_queue = NULL;
2991 		sd->output_queue_tailp = &sd->output_queue;
2992 		local_irq_enable();
2993 
2994 		while (head) {
2995 			struct Qdisc *q = head;
2996 			spinlock_t *root_lock;
2997 
2998 			head = head->next_sched;
2999 
3000 			root_lock = qdisc_lock(q);
3001 			if (spin_trylock(root_lock)) {
3002 				smp_mb__before_clear_bit();
3003 				clear_bit(__QDISC_STATE_SCHED,
3004 					  &q->state);
3005 				qdisc_run(q);
3006 				spin_unlock(root_lock);
3007 			} else {
3008 				if (!test_bit(__QDISC_STATE_DEACTIVATED,
3009 					      &q->state)) {
3010 					__netif_reschedule(q);
3011 				} else {
3012 					smp_mb__before_clear_bit();
3013 					clear_bit(__QDISC_STATE_SCHED,
3014 						  &q->state);
3015 				}
3016 			}
3017 		}
3018 	}
3019 }
3020 
3021 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3022     (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3023 /* This hook is defined here for ATM LANE */
3024 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3025 			     unsigned char *addr) __read_mostly;
3026 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3027 #endif
3028 
3029 #ifdef CONFIG_NET_CLS_ACT
3030 /* TODO: Maybe we should just force sch_ingress to be compiled in
3031  * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3032  * a compare and 2 stores extra right now if we dont have it on
3033  * but have CONFIG_NET_CLS_ACT
3034  * NOTE: This doesn't stop any functionality; if you dont have
3035  * the ingress scheduler, you just can't add policies on ingress.
3036  *
3037  */
3038 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3039 {
3040 	struct net_device *dev = skb->dev;
3041 	u32 ttl = G_TC_RTTL(skb->tc_verd);
3042 	int result = TC_ACT_OK;
3043 	struct Qdisc *q;
3044 
3045 	if (unlikely(MAX_RED_LOOP < ttl++)) {
3046 		net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3047 				     skb->skb_iif, dev->ifindex);
3048 		return TC_ACT_SHOT;
3049 	}
3050 
3051 	skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3052 	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3053 
3054 	q = rxq->qdisc;
3055 	if (q != &noop_qdisc) {
3056 		spin_lock(qdisc_lock(q));
3057 		if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3058 			result = qdisc_enqueue_root(skb, q);
3059 		spin_unlock(qdisc_lock(q));
3060 	}
3061 
3062 	return result;
3063 }
3064 
3065 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3066 					 struct packet_type **pt_prev,
3067 					 int *ret, struct net_device *orig_dev)
3068 {
3069 	struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3070 
3071 	if (!rxq || rxq->qdisc == &noop_qdisc)
3072 		goto out;
3073 
3074 	if (*pt_prev) {
3075 		*ret = deliver_skb(skb, *pt_prev, orig_dev);
3076 		*pt_prev = NULL;
3077 	}
3078 
3079 	switch (ing_filter(skb, rxq)) {
3080 	case TC_ACT_SHOT:
3081 	case TC_ACT_STOLEN:
3082 		kfree_skb(skb);
3083 		return NULL;
3084 	}
3085 
3086 out:
3087 	skb->tc_verd = 0;
3088 	return skb;
3089 }
3090 #endif
3091 
3092 /**
3093  *	netdev_rx_handler_register - register receive handler
3094  *	@dev: device to register a handler for
3095  *	@rx_handler: receive handler to register
3096  *	@rx_handler_data: data pointer that is used by rx handler
3097  *
3098  *	Register a receive hander for a device. This handler will then be
3099  *	called from __netif_receive_skb. A negative errno code is returned
3100  *	on a failure.
3101  *
3102  *	The caller must hold the rtnl_mutex.
3103  *
3104  *	For a general description of rx_handler, see enum rx_handler_result.
3105  */
3106 int netdev_rx_handler_register(struct net_device *dev,
3107 			       rx_handler_func_t *rx_handler,
3108 			       void *rx_handler_data)
3109 {
3110 	ASSERT_RTNL();
3111 
3112 	if (dev->rx_handler)
3113 		return -EBUSY;
3114 
3115 	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3116 	rcu_assign_pointer(dev->rx_handler, rx_handler);
3117 
3118 	return 0;
3119 }
3120 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3121 
3122 /**
3123  *	netdev_rx_handler_unregister - unregister receive handler
3124  *	@dev: device to unregister a handler from
3125  *
3126  *	Unregister a receive hander from a device.
3127  *
3128  *	The caller must hold the rtnl_mutex.
3129  */
3130 void netdev_rx_handler_unregister(struct net_device *dev)
3131 {
3132 
3133 	ASSERT_RTNL();
3134 	RCU_INIT_POINTER(dev->rx_handler, NULL);
3135 	RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3136 }
3137 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3138 
3139 static int __netif_receive_skb(struct sk_buff *skb)
3140 {
3141 	struct packet_type *ptype, *pt_prev;
3142 	rx_handler_func_t *rx_handler;
3143 	struct net_device *orig_dev;
3144 	struct net_device *null_or_dev;
3145 	bool deliver_exact = false;
3146 	int ret = NET_RX_DROP;
3147 	__be16 type;
3148 
3149 	net_timestamp_check(!netdev_tstamp_prequeue, skb);
3150 
3151 	trace_netif_receive_skb(skb);
3152 
3153 	/* if we've gotten here through NAPI, check netpoll */
3154 	if (netpoll_receive_skb(skb))
3155 		return NET_RX_DROP;
3156 
3157 	if (!skb->skb_iif)
3158 		skb->skb_iif = skb->dev->ifindex;
3159 	orig_dev = skb->dev;
3160 
3161 	skb_reset_network_header(skb);
3162 	skb_reset_transport_header(skb);
3163 	skb_reset_mac_len(skb);
3164 
3165 	pt_prev = NULL;
3166 
3167 	rcu_read_lock();
3168 
3169 another_round:
3170 
3171 	__this_cpu_inc(softnet_data.processed);
3172 
3173 	if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3174 		skb = vlan_untag(skb);
3175 		if (unlikely(!skb))
3176 			goto out;
3177 	}
3178 
3179 #ifdef CONFIG_NET_CLS_ACT
3180 	if (skb->tc_verd & TC_NCLS) {
3181 		skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3182 		goto ncls;
3183 	}
3184 #endif
3185 
3186 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
3187 		if (!ptype->dev || ptype->dev == skb->dev) {
3188 			if (pt_prev)
3189 				ret = deliver_skb(skb, pt_prev, orig_dev);
3190 			pt_prev = ptype;
3191 		}
3192 	}
3193 
3194 #ifdef CONFIG_NET_CLS_ACT
3195 	skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3196 	if (!skb)
3197 		goto out;
3198 ncls:
3199 #endif
3200 
3201 	rx_handler = rcu_dereference(skb->dev->rx_handler);
3202 	if (vlan_tx_tag_present(skb)) {
3203 		if (pt_prev) {
3204 			ret = deliver_skb(skb, pt_prev, orig_dev);
3205 			pt_prev = NULL;
3206 		}
3207 		if (vlan_do_receive(&skb, !rx_handler))
3208 			goto another_round;
3209 		else if (unlikely(!skb))
3210 			goto out;
3211 	}
3212 
3213 	if (rx_handler) {
3214 		if (pt_prev) {
3215 			ret = deliver_skb(skb, pt_prev, orig_dev);
3216 			pt_prev = NULL;
3217 		}
3218 		switch (rx_handler(&skb)) {
3219 		case RX_HANDLER_CONSUMED:
3220 			goto out;
3221 		case RX_HANDLER_ANOTHER:
3222 			goto another_round;
3223 		case RX_HANDLER_EXACT:
3224 			deliver_exact = true;
3225 		case RX_HANDLER_PASS:
3226 			break;
3227 		default:
3228 			BUG();
3229 		}
3230 	}
3231 
3232 	/* deliver only exact match when indicated */
3233 	null_or_dev = deliver_exact ? skb->dev : NULL;
3234 
3235 	type = skb->protocol;
3236 	list_for_each_entry_rcu(ptype,
3237 			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3238 		if (ptype->type == type &&
3239 		    (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3240 		     ptype->dev == orig_dev)) {
3241 			if (pt_prev)
3242 				ret = deliver_skb(skb, pt_prev, orig_dev);
3243 			pt_prev = ptype;
3244 		}
3245 	}
3246 
3247 	if (pt_prev) {
3248 		ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3249 	} else {
3250 		atomic_long_inc(&skb->dev->rx_dropped);
3251 		kfree_skb(skb);
3252 		/* Jamal, now you will not able to escape explaining
3253 		 * me how you were going to use this. :-)
3254 		 */
3255 		ret = NET_RX_DROP;
3256 	}
3257 
3258 out:
3259 	rcu_read_unlock();
3260 	return ret;
3261 }
3262 
3263 /**
3264  *	netif_receive_skb - process receive buffer from network
3265  *	@skb: buffer to process
3266  *
3267  *	netif_receive_skb() is the main receive data processing function.
3268  *	It always succeeds. The buffer may be dropped during processing
3269  *	for congestion control or by the protocol layers.
3270  *
3271  *	This function may only be called from softirq context and interrupts
3272  *	should be enabled.
3273  *
3274  *	Return values (usually ignored):
3275  *	NET_RX_SUCCESS: no congestion
3276  *	NET_RX_DROP: packet was dropped
3277  */
3278 int netif_receive_skb(struct sk_buff *skb)
3279 {
3280 	net_timestamp_check(netdev_tstamp_prequeue, skb);
3281 
3282 	if (skb_defer_rx_timestamp(skb))
3283 		return NET_RX_SUCCESS;
3284 
3285 #ifdef CONFIG_RPS
3286 	if (static_key_false(&rps_needed)) {
3287 		struct rps_dev_flow voidflow, *rflow = &voidflow;
3288 		int cpu, ret;
3289 
3290 		rcu_read_lock();
3291 
3292 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
3293 
3294 		if (cpu >= 0) {
3295 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3296 			rcu_read_unlock();
3297 			return ret;
3298 		}
3299 		rcu_read_unlock();
3300 	}
3301 #endif
3302 	return __netif_receive_skb(skb);
3303 }
3304 EXPORT_SYMBOL(netif_receive_skb);
3305 
3306 /* Network device is going away, flush any packets still pending
3307  * Called with irqs disabled.
3308  */
3309 static void flush_backlog(void *arg)
3310 {
3311 	struct net_device *dev = arg;
3312 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
3313 	struct sk_buff *skb, *tmp;
3314 
3315 	rps_lock(sd);
3316 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3317 		if (skb->dev == dev) {
3318 			__skb_unlink(skb, &sd->input_pkt_queue);
3319 			kfree_skb(skb);
3320 			input_queue_head_incr(sd);
3321 		}
3322 	}
3323 	rps_unlock(sd);
3324 
3325 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3326 		if (skb->dev == dev) {
3327 			__skb_unlink(skb, &sd->process_queue);
3328 			kfree_skb(skb);
3329 			input_queue_head_incr(sd);
3330 		}
3331 	}
3332 }
3333 
3334 static int napi_gro_complete(struct sk_buff *skb)
3335 {
3336 	struct packet_type *ptype;
3337 	__be16 type = skb->protocol;
3338 	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3339 	int err = -ENOENT;
3340 
3341 	if (NAPI_GRO_CB(skb)->count == 1) {
3342 		skb_shinfo(skb)->gso_size = 0;
3343 		goto out;
3344 	}
3345 
3346 	rcu_read_lock();
3347 	list_for_each_entry_rcu(ptype, head, list) {
3348 		if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3349 			continue;
3350 
3351 		err = ptype->gro_complete(skb);
3352 		break;
3353 	}
3354 	rcu_read_unlock();
3355 
3356 	if (err) {
3357 		WARN_ON(&ptype->list == head);
3358 		kfree_skb(skb);
3359 		return NET_RX_SUCCESS;
3360 	}
3361 
3362 out:
3363 	return netif_receive_skb(skb);
3364 }
3365 
3366 inline void napi_gro_flush(struct napi_struct *napi)
3367 {
3368 	struct sk_buff *skb, *next;
3369 
3370 	for (skb = napi->gro_list; skb; skb = next) {
3371 		next = skb->next;
3372 		skb->next = NULL;
3373 		napi_gro_complete(skb);
3374 	}
3375 
3376 	napi->gro_count = 0;
3377 	napi->gro_list = NULL;
3378 }
3379 EXPORT_SYMBOL(napi_gro_flush);
3380 
3381 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3382 {
3383 	struct sk_buff **pp = NULL;
3384 	struct packet_type *ptype;
3385 	__be16 type = skb->protocol;
3386 	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3387 	int same_flow;
3388 	int mac_len;
3389 	enum gro_result ret;
3390 
3391 	if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3392 		goto normal;
3393 
3394 	if (skb_is_gso(skb) || skb_has_frag_list(skb))
3395 		goto normal;
3396 
3397 	rcu_read_lock();
3398 	list_for_each_entry_rcu(ptype, head, list) {
3399 		if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3400 			continue;
3401 
3402 		skb_set_network_header(skb, skb_gro_offset(skb));
3403 		mac_len = skb->network_header - skb->mac_header;
3404 		skb->mac_len = mac_len;
3405 		NAPI_GRO_CB(skb)->same_flow = 0;
3406 		NAPI_GRO_CB(skb)->flush = 0;
3407 		NAPI_GRO_CB(skb)->free = 0;
3408 
3409 		pp = ptype->gro_receive(&napi->gro_list, skb);
3410 		break;
3411 	}
3412 	rcu_read_unlock();
3413 
3414 	if (&ptype->list == head)
3415 		goto normal;
3416 
3417 	same_flow = NAPI_GRO_CB(skb)->same_flow;
3418 	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3419 
3420 	if (pp) {
3421 		struct sk_buff *nskb = *pp;
3422 
3423 		*pp = nskb->next;
3424 		nskb->next = NULL;
3425 		napi_gro_complete(nskb);
3426 		napi->gro_count--;
3427 	}
3428 
3429 	if (same_flow)
3430 		goto ok;
3431 
3432 	if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3433 		goto normal;
3434 
3435 	napi->gro_count++;
3436 	NAPI_GRO_CB(skb)->count = 1;
3437 	skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3438 	skb->next = napi->gro_list;
3439 	napi->gro_list = skb;
3440 	ret = GRO_HELD;
3441 
3442 pull:
3443 	if (skb_headlen(skb) < skb_gro_offset(skb)) {
3444 		int grow = skb_gro_offset(skb) - skb_headlen(skb);
3445 
3446 		BUG_ON(skb->end - skb->tail < grow);
3447 
3448 		memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3449 
3450 		skb->tail += grow;
3451 		skb->data_len -= grow;
3452 
3453 		skb_shinfo(skb)->frags[0].page_offset += grow;
3454 		skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
3455 
3456 		if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
3457 			skb_frag_unref(skb, 0);
3458 			memmove(skb_shinfo(skb)->frags,
3459 				skb_shinfo(skb)->frags + 1,
3460 				--skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3461 		}
3462 	}
3463 
3464 ok:
3465 	return ret;
3466 
3467 normal:
3468 	ret = GRO_NORMAL;
3469 	goto pull;
3470 }
3471 EXPORT_SYMBOL(dev_gro_receive);
3472 
3473 static inline gro_result_t
3474 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3475 {
3476 	struct sk_buff *p;
3477 	unsigned int maclen = skb->dev->hard_header_len;
3478 
3479 	for (p = napi->gro_list; p; p = p->next) {
3480 		unsigned long diffs;
3481 
3482 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3483 		diffs |= p->vlan_tci ^ skb->vlan_tci;
3484 		if (maclen == ETH_HLEN)
3485 			diffs |= compare_ether_header(skb_mac_header(p),
3486 						      skb_gro_mac_header(skb));
3487 		else if (!diffs)
3488 			diffs = memcmp(skb_mac_header(p),
3489 				       skb_gro_mac_header(skb),
3490 				       maclen);
3491 		NAPI_GRO_CB(p)->same_flow = !diffs;
3492 		NAPI_GRO_CB(p)->flush = 0;
3493 	}
3494 
3495 	return dev_gro_receive(napi, skb);
3496 }
3497 
3498 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3499 {
3500 	switch (ret) {
3501 	case GRO_NORMAL:
3502 		if (netif_receive_skb(skb))
3503 			ret = GRO_DROP;
3504 		break;
3505 
3506 	case GRO_DROP:
3507 		kfree_skb(skb);
3508 		break;
3509 
3510 	case GRO_MERGED_FREE:
3511 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3512 			kmem_cache_free(skbuff_head_cache, skb);
3513 		else
3514 			__kfree_skb(skb);
3515 		break;
3516 
3517 	case GRO_HELD:
3518 	case GRO_MERGED:
3519 		break;
3520 	}
3521 
3522 	return ret;
3523 }
3524 EXPORT_SYMBOL(napi_skb_finish);
3525 
3526 void skb_gro_reset_offset(struct sk_buff *skb)
3527 {
3528 	NAPI_GRO_CB(skb)->data_offset = 0;
3529 	NAPI_GRO_CB(skb)->frag0 = NULL;
3530 	NAPI_GRO_CB(skb)->frag0_len = 0;
3531 
3532 	if (skb->mac_header == skb->tail &&
3533 	    !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
3534 		NAPI_GRO_CB(skb)->frag0 =
3535 			skb_frag_address(&skb_shinfo(skb)->frags[0]);
3536 		NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
3537 	}
3538 }
3539 EXPORT_SYMBOL(skb_gro_reset_offset);
3540 
3541 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3542 {
3543 	skb_gro_reset_offset(skb);
3544 
3545 	return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
3546 }
3547 EXPORT_SYMBOL(napi_gro_receive);
3548 
3549 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3550 {
3551 	__skb_pull(skb, skb_headlen(skb));
3552 	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
3553 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3554 	skb->vlan_tci = 0;
3555 	skb->dev = napi->dev;
3556 	skb->skb_iif = 0;
3557 
3558 	napi->skb = skb;
3559 }
3560 
3561 struct sk_buff *napi_get_frags(struct napi_struct *napi)
3562 {
3563 	struct sk_buff *skb = napi->skb;
3564 
3565 	if (!skb) {
3566 		skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3567 		if (skb)
3568 			napi->skb = skb;
3569 	}
3570 	return skb;
3571 }
3572 EXPORT_SYMBOL(napi_get_frags);
3573 
3574 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3575 			       gro_result_t ret)
3576 {
3577 	switch (ret) {
3578 	case GRO_NORMAL:
3579 	case GRO_HELD:
3580 		skb->protocol = eth_type_trans(skb, skb->dev);
3581 
3582 		if (ret == GRO_HELD)
3583 			skb_gro_pull(skb, -ETH_HLEN);
3584 		else if (netif_receive_skb(skb))
3585 			ret = GRO_DROP;
3586 		break;
3587 
3588 	case GRO_DROP:
3589 	case GRO_MERGED_FREE:
3590 		napi_reuse_skb(napi, skb);
3591 		break;
3592 
3593 	case GRO_MERGED:
3594 		break;
3595 	}
3596 
3597 	return ret;
3598 }
3599 EXPORT_SYMBOL(napi_frags_finish);
3600 
3601 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3602 {
3603 	struct sk_buff *skb = napi->skb;
3604 	struct ethhdr *eth;
3605 	unsigned int hlen;
3606 	unsigned int off;
3607 
3608 	napi->skb = NULL;
3609 
3610 	skb_reset_mac_header(skb);
3611 	skb_gro_reset_offset(skb);
3612 
3613 	off = skb_gro_offset(skb);
3614 	hlen = off + sizeof(*eth);
3615 	eth = skb_gro_header_fast(skb, off);
3616 	if (skb_gro_header_hard(skb, hlen)) {
3617 		eth = skb_gro_header_slow(skb, hlen, off);
3618 		if (unlikely(!eth)) {
3619 			napi_reuse_skb(napi, skb);
3620 			skb = NULL;
3621 			goto out;
3622 		}
3623 	}
3624 
3625 	skb_gro_pull(skb, sizeof(*eth));
3626 
3627 	/*
3628 	 * This works because the only protocols we care about don't require
3629 	 * special handling.  We'll fix it up properly at the end.
3630 	 */
3631 	skb->protocol = eth->h_proto;
3632 
3633 out:
3634 	return skb;
3635 }
3636 
3637 gro_result_t napi_gro_frags(struct napi_struct *napi)
3638 {
3639 	struct sk_buff *skb = napi_frags_skb(napi);
3640 
3641 	if (!skb)
3642 		return GRO_DROP;
3643 
3644 	return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3645 }
3646 EXPORT_SYMBOL(napi_gro_frags);
3647 
3648 /*
3649  * net_rps_action sends any pending IPI's for rps.
3650  * Note: called with local irq disabled, but exits with local irq enabled.
3651  */
3652 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3653 {
3654 #ifdef CONFIG_RPS
3655 	struct softnet_data *remsd = sd->rps_ipi_list;
3656 
3657 	if (remsd) {
3658 		sd->rps_ipi_list = NULL;
3659 
3660 		local_irq_enable();
3661 
3662 		/* Send pending IPI's to kick RPS processing on remote cpus. */
3663 		while (remsd) {
3664 			struct softnet_data *next = remsd->rps_ipi_next;
3665 
3666 			if (cpu_online(remsd->cpu))
3667 				__smp_call_function_single(remsd->cpu,
3668 							   &remsd->csd, 0);
3669 			remsd = next;
3670 		}
3671 	} else
3672 #endif
3673 		local_irq_enable();
3674 }
3675 
3676 static int process_backlog(struct napi_struct *napi, int quota)
3677 {
3678 	int work = 0;
3679 	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
3680 
3681 #ifdef CONFIG_RPS
3682 	/* Check if we have pending ipi, its better to send them now,
3683 	 * not waiting net_rx_action() end.
3684 	 */
3685 	if (sd->rps_ipi_list) {
3686 		local_irq_disable();
3687 		net_rps_action_and_irq_enable(sd);
3688 	}
3689 #endif
3690 	napi->weight = weight_p;
3691 	local_irq_disable();
3692 	while (work < quota) {
3693 		struct sk_buff *skb;
3694 		unsigned int qlen;
3695 
3696 		while ((skb = __skb_dequeue(&sd->process_queue))) {
3697 			local_irq_enable();
3698 			__netif_receive_skb(skb);
3699 			local_irq_disable();
3700 			input_queue_head_incr(sd);
3701 			if (++work >= quota) {
3702 				local_irq_enable();
3703 				return work;
3704 			}
3705 		}
3706 
3707 		rps_lock(sd);
3708 		qlen = skb_queue_len(&sd->input_pkt_queue);
3709 		if (qlen)
3710 			skb_queue_splice_tail_init(&sd->input_pkt_queue,
3711 						   &sd->process_queue);
3712 
3713 		if (qlen < quota - work) {
3714 			/*
3715 			 * Inline a custom version of __napi_complete().
3716 			 * only current cpu owns and manipulates this napi,
3717 			 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3718 			 * we can use a plain write instead of clear_bit(),
3719 			 * and we dont need an smp_mb() memory barrier.
3720 			 */
3721 			list_del(&napi->poll_list);
3722 			napi->state = 0;
3723 
3724 			quota = work + qlen;
3725 		}
3726 		rps_unlock(sd);
3727 	}
3728 	local_irq_enable();
3729 
3730 	return work;
3731 }
3732 
3733 /**
3734  * __napi_schedule - schedule for receive
3735  * @n: entry to schedule
3736  *
3737  * The entry's receive function will be scheduled to run
3738  */
3739 void __napi_schedule(struct napi_struct *n)
3740 {
3741 	unsigned long flags;
3742 
3743 	local_irq_save(flags);
3744 	____napi_schedule(&__get_cpu_var(softnet_data), n);
3745 	local_irq_restore(flags);
3746 }
3747 EXPORT_SYMBOL(__napi_schedule);
3748 
3749 void __napi_complete(struct napi_struct *n)
3750 {
3751 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3752 	BUG_ON(n->gro_list);
3753 
3754 	list_del(&n->poll_list);
3755 	smp_mb__before_clear_bit();
3756 	clear_bit(NAPI_STATE_SCHED, &n->state);
3757 }
3758 EXPORT_SYMBOL(__napi_complete);
3759 
3760 void napi_complete(struct napi_struct *n)
3761 {
3762 	unsigned long flags;
3763 
3764 	/*
3765 	 * don't let napi dequeue from the cpu poll list
3766 	 * just in case its running on a different cpu
3767 	 */
3768 	if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3769 		return;
3770 
3771 	napi_gro_flush(n);
3772 	local_irq_save(flags);
3773 	__napi_complete(n);
3774 	local_irq_restore(flags);
3775 }
3776 EXPORT_SYMBOL(napi_complete);
3777 
3778 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3779 		    int (*poll)(struct napi_struct *, int), int weight)
3780 {
3781 	INIT_LIST_HEAD(&napi->poll_list);
3782 	napi->gro_count = 0;
3783 	napi->gro_list = NULL;
3784 	napi->skb = NULL;
3785 	napi->poll = poll;
3786 	napi->weight = weight;
3787 	list_add(&napi->dev_list, &dev->napi_list);
3788 	napi->dev = dev;
3789 #ifdef CONFIG_NETPOLL
3790 	spin_lock_init(&napi->poll_lock);
3791 	napi->poll_owner = -1;
3792 #endif
3793 	set_bit(NAPI_STATE_SCHED, &napi->state);
3794 }
3795 EXPORT_SYMBOL(netif_napi_add);
3796 
3797 void netif_napi_del(struct napi_struct *napi)
3798 {
3799 	struct sk_buff *skb, *next;
3800 
3801 	list_del_init(&napi->dev_list);
3802 	napi_free_frags(napi);
3803 
3804 	for (skb = napi->gro_list; skb; skb = next) {
3805 		next = skb->next;
3806 		skb->next = NULL;
3807 		kfree_skb(skb);
3808 	}
3809 
3810 	napi->gro_list = NULL;
3811 	napi->gro_count = 0;
3812 }
3813 EXPORT_SYMBOL(netif_napi_del);
3814 
3815 static void net_rx_action(struct softirq_action *h)
3816 {
3817 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
3818 	unsigned long time_limit = jiffies + 2;
3819 	int budget = netdev_budget;
3820 	void *have;
3821 
3822 	local_irq_disable();
3823 
3824 	while (!list_empty(&sd->poll_list)) {
3825 		struct napi_struct *n;
3826 		int work, weight;
3827 
3828 		/* If softirq window is exhuasted then punt.
3829 		 * Allow this to run for 2 jiffies since which will allow
3830 		 * an average latency of 1.5/HZ.
3831 		 */
3832 		if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
3833 			goto softnet_break;
3834 
3835 		local_irq_enable();
3836 
3837 		/* Even though interrupts have been re-enabled, this
3838 		 * access is safe because interrupts can only add new
3839 		 * entries to the tail of this list, and only ->poll()
3840 		 * calls can remove this head entry from the list.
3841 		 */
3842 		n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
3843 
3844 		have = netpoll_poll_lock(n);
3845 
3846 		weight = n->weight;
3847 
3848 		/* This NAPI_STATE_SCHED test is for avoiding a race
3849 		 * with netpoll's poll_napi().  Only the entity which
3850 		 * obtains the lock and sees NAPI_STATE_SCHED set will
3851 		 * actually make the ->poll() call.  Therefore we avoid
3852 		 * accidentally calling ->poll() when NAPI is not scheduled.
3853 		 */
3854 		work = 0;
3855 		if (test_bit(NAPI_STATE_SCHED, &n->state)) {
3856 			work = n->poll(n, weight);
3857 			trace_napi_poll(n);
3858 		}
3859 
3860 		WARN_ON_ONCE(work > weight);
3861 
3862 		budget -= work;
3863 
3864 		local_irq_disable();
3865 
3866 		/* Drivers must not modify the NAPI state if they
3867 		 * consume the entire weight.  In such cases this code
3868 		 * still "owns" the NAPI instance and therefore can
3869 		 * move the instance around on the list at-will.
3870 		 */
3871 		if (unlikely(work == weight)) {
3872 			if (unlikely(napi_disable_pending(n))) {
3873 				local_irq_enable();
3874 				napi_complete(n);
3875 				local_irq_disable();
3876 			} else
3877 				list_move_tail(&n->poll_list, &sd->poll_list);
3878 		}
3879 
3880 		netpoll_poll_unlock(have);
3881 	}
3882 out:
3883 	net_rps_action_and_irq_enable(sd);
3884 
3885 #ifdef CONFIG_NET_DMA
3886 	/*
3887 	 * There may not be any more sk_buffs coming right now, so push
3888 	 * any pending DMA copies to hardware
3889 	 */
3890 	dma_issue_pending_all();
3891 #endif
3892 
3893 	return;
3894 
3895 softnet_break:
3896 	sd->time_squeeze++;
3897 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
3898 	goto out;
3899 }
3900 
3901 static gifconf_func_t *gifconf_list[NPROTO];
3902 
3903 /**
3904  *	register_gifconf	-	register a SIOCGIF handler
3905  *	@family: Address family
3906  *	@gifconf: Function handler
3907  *
3908  *	Register protocol dependent address dumping routines. The handler
3909  *	that is passed must not be freed or reused until it has been replaced
3910  *	by another handler.
3911  */
3912 int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
3913 {
3914 	if (family >= NPROTO)
3915 		return -EINVAL;
3916 	gifconf_list[family] = gifconf;
3917 	return 0;
3918 }
3919 EXPORT_SYMBOL(register_gifconf);
3920 
3921 
3922 /*
3923  *	Map an interface index to its name (SIOCGIFNAME)
3924  */
3925 
3926 /*
3927  *	We need this ioctl for efficient implementation of the
3928  *	if_indextoname() function required by the IPv6 API.  Without
3929  *	it, we would have to search all the interfaces to find a
3930  *	match.  --pb
3931  */
3932 
3933 static int dev_ifname(struct net *net, struct ifreq __user *arg)
3934 {
3935 	struct net_device *dev;
3936 	struct ifreq ifr;
3937 
3938 	/*
3939 	 *	Fetch the caller's info block.
3940 	 */
3941 
3942 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3943 		return -EFAULT;
3944 
3945 	rcu_read_lock();
3946 	dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
3947 	if (!dev) {
3948 		rcu_read_unlock();
3949 		return -ENODEV;
3950 	}
3951 
3952 	strcpy(ifr.ifr_name, dev->name);
3953 	rcu_read_unlock();
3954 
3955 	if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3956 		return -EFAULT;
3957 	return 0;
3958 }
3959 
3960 /*
3961  *	Perform a SIOCGIFCONF call. This structure will change
3962  *	size eventually, and there is nothing I can do about it.
3963  *	Thus we will need a 'compatibility mode'.
3964  */
3965 
3966 static int dev_ifconf(struct net *net, char __user *arg)
3967 {
3968 	struct ifconf ifc;
3969 	struct net_device *dev;
3970 	char __user *pos;
3971 	int len;
3972 	int total;
3973 	int i;
3974 
3975 	/*
3976 	 *	Fetch the caller's info block.
3977 	 */
3978 
3979 	if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3980 		return -EFAULT;
3981 
3982 	pos = ifc.ifc_buf;
3983 	len = ifc.ifc_len;
3984 
3985 	/*
3986 	 *	Loop over the interfaces, and write an info block for each.
3987 	 */
3988 
3989 	total = 0;
3990 	for_each_netdev(net, dev) {
3991 		for (i = 0; i < NPROTO; i++) {
3992 			if (gifconf_list[i]) {
3993 				int done;
3994 				if (!pos)
3995 					done = gifconf_list[i](dev, NULL, 0);
3996 				else
3997 					done = gifconf_list[i](dev, pos + total,
3998 							       len - total);
3999 				if (done < 0)
4000 					return -EFAULT;
4001 				total += done;
4002 			}
4003 		}
4004 	}
4005 
4006 	/*
4007 	 *	All done.  Write the updated control block back to the caller.
4008 	 */
4009 	ifc.ifc_len = total;
4010 
4011 	/*
4012 	 * 	Both BSD and Solaris return 0 here, so we do too.
4013 	 */
4014 	return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4015 }
4016 
4017 #ifdef CONFIG_PROC_FS
4018 
4019 #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
4020 
4021 #define get_bucket(x) ((x) >> BUCKET_SPACE)
4022 #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4023 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4024 
4025 static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
4026 {
4027 	struct net *net = seq_file_net(seq);
4028 	struct net_device *dev;
4029 	struct hlist_node *p;
4030 	struct hlist_head *h;
4031 	unsigned int count = 0, offset = get_offset(*pos);
4032 
4033 	h = &net->dev_name_head[get_bucket(*pos)];
4034 	hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4035 		if (++count == offset)
4036 			return dev;
4037 	}
4038 
4039 	return NULL;
4040 }
4041 
4042 static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
4043 {
4044 	struct net_device *dev;
4045 	unsigned int bucket;
4046 
4047 	do {
4048 		dev = dev_from_same_bucket(seq, pos);
4049 		if (dev)
4050 			return dev;
4051 
4052 		bucket = get_bucket(*pos) + 1;
4053 		*pos = set_bucket_offset(bucket, 1);
4054 	} while (bucket < NETDEV_HASHENTRIES);
4055 
4056 	return NULL;
4057 }
4058 
4059 /*
4060  *	This is invoked by the /proc filesystem handler to display a device
4061  *	in detail.
4062  */
4063 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
4064 	__acquires(RCU)
4065 {
4066 	rcu_read_lock();
4067 	if (!*pos)
4068 		return SEQ_START_TOKEN;
4069 
4070 	if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
4071 		return NULL;
4072 
4073 	return dev_from_bucket(seq, pos);
4074 }
4075 
4076 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4077 {
4078 	++*pos;
4079 	return dev_from_bucket(seq, pos);
4080 }
4081 
4082 void dev_seq_stop(struct seq_file *seq, void *v)
4083 	__releases(RCU)
4084 {
4085 	rcu_read_unlock();
4086 }
4087 
4088 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4089 {
4090 	struct rtnl_link_stats64 temp;
4091 	const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
4092 
4093 	seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4094 		   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
4095 		   dev->name, stats->rx_bytes, stats->rx_packets,
4096 		   stats->rx_errors,
4097 		   stats->rx_dropped + stats->rx_missed_errors,
4098 		   stats->rx_fifo_errors,
4099 		   stats->rx_length_errors + stats->rx_over_errors +
4100 		    stats->rx_crc_errors + stats->rx_frame_errors,
4101 		   stats->rx_compressed, stats->multicast,
4102 		   stats->tx_bytes, stats->tx_packets,
4103 		   stats->tx_errors, stats->tx_dropped,
4104 		   stats->tx_fifo_errors, stats->collisions,
4105 		   stats->tx_carrier_errors +
4106 		    stats->tx_aborted_errors +
4107 		    stats->tx_window_errors +
4108 		    stats->tx_heartbeat_errors,
4109 		   stats->tx_compressed);
4110 }
4111 
4112 /*
4113  *	Called from the PROCfs module. This now uses the new arbitrary sized
4114  *	/proc/net interface to create /proc/net/dev
4115  */
4116 static int dev_seq_show(struct seq_file *seq, void *v)
4117 {
4118 	if (v == SEQ_START_TOKEN)
4119 		seq_puts(seq, "Inter-|   Receive                            "
4120 			      "                    |  Transmit\n"
4121 			      " face |bytes    packets errs drop fifo frame "
4122 			      "compressed multicast|bytes    packets errs "
4123 			      "drop fifo colls carrier compressed\n");
4124 	else
4125 		dev_seq_printf_stats(seq, v);
4126 	return 0;
4127 }
4128 
4129 static struct softnet_data *softnet_get_online(loff_t *pos)
4130 {
4131 	struct softnet_data *sd = NULL;
4132 
4133 	while (*pos < nr_cpu_ids)
4134 		if (cpu_online(*pos)) {
4135 			sd = &per_cpu(softnet_data, *pos);
4136 			break;
4137 		} else
4138 			++*pos;
4139 	return sd;
4140 }
4141 
4142 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4143 {
4144 	return softnet_get_online(pos);
4145 }
4146 
4147 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4148 {
4149 	++*pos;
4150 	return softnet_get_online(pos);
4151 }
4152 
4153 static void softnet_seq_stop(struct seq_file *seq, void *v)
4154 {
4155 }
4156 
4157 static int softnet_seq_show(struct seq_file *seq, void *v)
4158 {
4159 	struct softnet_data *sd = v;
4160 
4161 	seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
4162 		   sd->processed, sd->dropped, sd->time_squeeze, 0,
4163 		   0, 0, 0, 0, /* was fastroute */
4164 		   sd->cpu_collision, sd->received_rps);
4165 	return 0;
4166 }
4167 
4168 static const struct seq_operations dev_seq_ops = {
4169 	.start = dev_seq_start,
4170 	.next  = dev_seq_next,
4171 	.stop  = dev_seq_stop,
4172 	.show  = dev_seq_show,
4173 };
4174 
4175 static int dev_seq_open(struct inode *inode, struct file *file)
4176 {
4177 	return seq_open_net(inode, file, &dev_seq_ops,
4178 			    sizeof(struct seq_net_private));
4179 }
4180 
4181 static const struct file_operations dev_seq_fops = {
4182 	.owner	 = THIS_MODULE,
4183 	.open    = dev_seq_open,
4184 	.read    = seq_read,
4185 	.llseek  = seq_lseek,
4186 	.release = seq_release_net,
4187 };
4188 
4189 static const struct seq_operations softnet_seq_ops = {
4190 	.start = softnet_seq_start,
4191 	.next  = softnet_seq_next,
4192 	.stop  = softnet_seq_stop,
4193 	.show  = softnet_seq_show,
4194 };
4195 
4196 static int softnet_seq_open(struct inode *inode, struct file *file)
4197 {
4198 	return seq_open(file, &softnet_seq_ops);
4199 }
4200 
4201 static const struct file_operations softnet_seq_fops = {
4202 	.owner	 = THIS_MODULE,
4203 	.open    = softnet_seq_open,
4204 	.read    = seq_read,
4205 	.llseek  = seq_lseek,
4206 	.release = seq_release,
4207 };
4208 
4209 static void *ptype_get_idx(loff_t pos)
4210 {
4211 	struct packet_type *pt = NULL;
4212 	loff_t i = 0;
4213 	int t;
4214 
4215 	list_for_each_entry_rcu(pt, &ptype_all, list) {
4216 		if (i == pos)
4217 			return pt;
4218 		++i;
4219 	}
4220 
4221 	for (t = 0; t < PTYPE_HASH_SIZE; t++) {
4222 		list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4223 			if (i == pos)
4224 				return pt;
4225 			++i;
4226 		}
4227 	}
4228 	return NULL;
4229 }
4230 
4231 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
4232 	__acquires(RCU)
4233 {
4234 	rcu_read_lock();
4235 	return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4236 }
4237 
4238 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4239 {
4240 	struct packet_type *pt;
4241 	struct list_head *nxt;
4242 	int hash;
4243 
4244 	++*pos;
4245 	if (v == SEQ_START_TOKEN)
4246 		return ptype_get_idx(0);
4247 
4248 	pt = v;
4249 	nxt = pt->list.next;
4250 	if (pt->type == htons(ETH_P_ALL)) {
4251 		if (nxt != &ptype_all)
4252 			goto found;
4253 		hash = 0;
4254 		nxt = ptype_base[0].next;
4255 	} else
4256 		hash = ntohs(pt->type) & PTYPE_HASH_MASK;
4257 
4258 	while (nxt == &ptype_base[hash]) {
4259 		if (++hash >= PTYPE_HASH_SIZE)
4260 			return NULL;
4261 		nxt = ptype_base[hash].next;
4262 	}
4263 found:
4264 	return list_entry(nxt, struct packet_type, list);
4265 }
4266 
4267 static void ptype_seq_stop(struct seq_file *seq, void *v)
4268 	__releases(RCU)
4269 {
4270 	rcu_read_unlock();
4271 }
4272 
4273 static int ptype_seq_show(struct seq_file *seq, void *v)
4274 {
4275 	struct packet_type *pt = v;
4276 
4277 	if (v == SEQ_START_TOKEN)
4278 		seq_puts(seq, "Type Device      Function\n");
4279 	else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
4280 		if (pt->type == htons(ETH_P_ALL))
4281 			seq_puts(seq, "ALL ");
4282 		else
4283 			seq_printf(seq, "%04x", ntohs(pt->type));
4284 
4285 		seq_printf(seq, " %-8s %pF\n",
4286 			   pt->dev ? pt->dev->name : "", pt->func);
4287 	}
4288 
4289 	return 0;
4290 }
4291 
4292 static const struct seq_operations ptype_seq_ops = {
4293 	.start = ptype_seq_start,
4294 	.next  = ptype_seq_next,
4295 	.stop  = ptype_seq_stop,
4296 	.show  = ptype_seq_show,
4297 };
4298 
4299 static int ptype_seq_open(struct inode *inode, struct file *file)
4300 {
4301 	return seq_open_net(inode, file, &ptype_seq_ops,
4302 			sizeof(struct seq_net_private));
4303 }
4304 
4305 static const struct file_operations ptype_seq_fops = {
4306 	.owner	 = THIS_MODULE,
4307 	.open    = ptype_seq_open,
4308 	.read    = seq_read,
4309 	.llseek  = seq_lseek,
4310 	.release = seq_release_net,
4311 };
4312 
4313 
4314 static int __net_init dev_proc_net_init(struct net *net)
4315 {
4316 	int rc = -ENOMEM;
4317 
4318 	if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
4319 		goto out;
4320 	if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
4321 		goto out_dev;
4322 	if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
4323 		goto out_softnet;
4324 
4325 	if (wext_proc_init(net))
4326 		goto out_ptype;
4327 	rc = 0;
4328 out:
4329 	return rc;
4330 out_ptype:
4331 	proc_net_remove(net, "ptype");
4332 out_softnet:
4333 	proc_net_remove(net, "softnet_stat");
4334 out_dev:
4335 	proc_net_remove(net, "dev");
4336 	goto out;
4337 }
4338 
4339 static void __net_exit dev_proc_net_exit(struct net *net)
4340 {
4341 	wext_proc_exit(net);
4342 
4343 	proc_net_remove(net, "ptype");
4344 	proc_net_remove(net, "softnet_stat");
4345 	proc_net_remove(net, "dev");
4346 }
4347 
4348 static struct pernet_operations __net_initdata dev_proc_ops = {
4349 	.init = dev_proc_net_init,
4350 	.exit = dev_proc_net_exit,
4351 };
4352 
4353 static int __init dev_proc_init(void)
4354 {
4355 	return register_pernet_subsys(&dev_proc_ops);
4356 }
4357 #else
4358 #define dev_proc_init() 0
4359 #endif	/* CONFIG_PROC_FS */
4360 
4361 
4362 /**
4363  *	netdev_set_master	-	set up master pointer
4364  *	@slave: slave device
4365  *	@master: new master device
4366  *
4367  *	Changes the master device of the slave. Pass %NULL to break the
4368  *	bonding. The caller must hold the RTNL semaphore. On a failure
4369  *	a negative errno code is returned. On success the reference counts
4370  *	are adjusted and the function returns zero.
4371  */
4372 int netdev_set_master(struct net_device *slave, struct net_device *master)
4373 {
4374 	struct net_device *old = slave->master;
4375 
4376 	ASSERT_RTNL();
4377 
4378 	if (master) {
4379 		if (old)
4380 			return -EBUSY;
4381 		dev_hold(master);
4382 	}
4383 
4384 	slave->master = master;
4385 
4386 	if (old)
4387 		dev_put(old);
4388 	return 0;
4389 }
4390 EXPORT_SYMBOL(netdev_set_master);
4391 
4392 /**
4393  *	netdev_set_bond_master	-	set up bonding master/slave pair
4394  *	@slave: slave device
4395  *	@master: new master device
4396  *
4397  *	Changes the master device of the slave. Pass %NULL to break the
4398  *	bonding. The caller must hold the RTNL semaphore. On a failure
4399  *	a negative errno code is returned. On success %RTM_NEWLINK is sent
4400  *	to the routing socket and the function returns zero.
4401  */
4402 int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
4403 {
4404 	int err;
4405 
4406 	ASSERT_RTNL();
4407 
4408 	err = netdev_set_master(slave, master);
4409 	if (err)
4410 		return err;
4411 	if (master)
4412 		slave->flags |= IFF_SLAVE;
4413 	else
4414 		slave->flags &= ~IFF_SLAVE;
4415 
4416 	rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4417 	return 0;
4418 }
4419 EXPORT_SYMBOL(netdev_set_bond_master);
4420 
4421 static void dev_change_rx_flags(struct net_device *dev, int flags)
4422 {
4423 	const struct net_device_ops *ops = dev->netdev_ops;
4424 
4425 	if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4426 		ops->ndo_change_rx_flags(dev, flags);
4427 }
4428 
4429 static int __dev_set_promiscuity(struct net_device *dev, int inc)
4430 {
4431 	unsigned int old_flags = dev->flags;
4432 	uid_t uid;
4433 	gid_t gid;
4434 
4435 	ASSERT_RTNL();
4436 
4437 	dev->flags |= IFF_PROMISC;
4438 	dev->promiscuity += inc;
4439 	if (dev->promiscuity == 0) {
4440 		/*
4441 		 * Avoid overflow.
4442 		 * If inc causes overflow, untouch promisc and return error.
4443 		 */
4444 		if (inc < 0)
4445 			dev->flags &= ~IFF_PROMISC;
4446 		else {
4447 			dev->promiscuity -= inc;
4448 			pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4449 				dev->name);
4450 			return -EOVERFLOW;
4451 		}
4452 	}
4453 	if (dev->flags != old_flags) {
4454 		pr_info("device %s %s promiscuous mode\n",
4455 			dev->name,
4456 			dev->flags & IFF_PROMISC ? "entered" : "left");
4457 		if (audit_enabled) {
4458 			current_uid_gid(&uid, &gid);
4459 			audit_log(current->audit_context, GFP_ATOMIC,
4460 				AUDIT_ANOM_PROMISCUOUS,
4461 				"dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4462 				dev->name, (dev->flags & IFF_PROMISC),
4463 				(old_flags & IFF_PROMISC),
4464 				audit_get_loginuid(current),
4465 				uid, gid,
4466 				audit_get_sessionid(current));
4467 		}
4468 
4469 		dev_change_rx_flags(dev, IFF_PROMISC);
4470 	}
4471 	return 0;
4472 }
4473 
4474 /**
4475  *	dev_set_promiscuity	- update promiscuity count on a device
4476  *	@dev: device
4477  *	@inc: modifier
4478  *
4479  *	Add or remove promiscuity from a device. While the count in the device
4480  *	remains above zero the interface remains promiscuous. Once it hits zero
4481  *	the device reverts back to normal filtering operation. A negative inc
4482  *	value is used to drop promiscuity on the device.
4483  *	Return 0 if successful or a negative errno code on error.
4484  */
4485 int dev_set_promiscuity(struct net_device *dev, int inc)
4486 {
4487 	unsigned int old_flags = dev->flags;
4488 	int err;
4489 
4490 	err = __dev_set_promiscuity(dev, inc);
4491 	if (err < 0)
4492 		return err;
4493 	if (dev->flags != old_flags)
4494 		dev_set_rx_mode(dev);
4495 	return err;
4496 }
4497 EXPORT_SYMBOL(dev_set_promiscuity);
4498 
4499 /**
4500  *	dev_set_allmulti	- update allmulti count on a device
4501  *	@dev: device
4502  *	@inc: modifier
4503  *
4504  *	Add or remove reception of all multicast frames to a device. While the
4505  *	count in the device remains above zero the interface remains listening
4506  *	to all interfaces. Once it hits zero the device reverts back to normal
4507  *	filtering operation. A negative @inc value is used to drop the counter
4508  *	when releasing a resource needing all multicasts.
4509  *	Return 0 if successful or a negative errno code on error.
4510  */
4511 
4512 int dev_set_allmulti(struct net_device *dev, int inc)
4513 {
4514 	unsigned int old_flags = dev->flags;
4515 
4516 	ASSERT_RTNL();
4517 
4518 	dev->flags |= IFF_ALLMULTI;
4519 	dev->allmulti += inc;
4520 	if (dev->allmulti == 0) {
4521 		/*
4522 		 * Avoid overflow.
4523 		 * If inc causes overflow, untouch allmulti and return error.
4524 		 */
4525 		if (inc < 0)
4526 			dev->flags &= ~IFF_ALLMULTI;
4527 		else {
4528 			dev->allmulti -= inc;
4529 			pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4530 				dev->name);
4531 			return -EOVERFLOW;
4532 		}
4533 	}
4534 	if (dev->flags ^ old_flags) {
4535 		dev_change_rx_flags(dev, IFF_ALLMULTI);
4536 		dev_set_rx_mode(dev);
4537 	}
4538 	return 0;
4539 }
4540 EXPORT_SYMBOL(dev_set_allmulti);
4541 
4542 /*
4543  *	Upload unicast and multicast address lists to device and
4544  *	configure RX filtering. When the device doesn't support unicast
4545  *	filtering it is put in promiscuous mode while unicast addresses
4546  *	are present.
4547  */
4548 void __dev_set_rx_mode(struct net_device *dev)
4549 {
4550 	const struct net_device_ops *ops = dev->netdev_ops;
4551 
4552 	/* dev_open will call this function so the list will stay sane. */
4553 	if (!(dev->flags&IFF_UP))
4554 		return;
4555 
4556 	if (!netif_device_present(dev))
4557 		return;
4558 
4559 	if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4560 		/* Unicast addresses changes may only happen under the rtnl,
4561 		 * therefore calling __dev_set_promiscuity here is safe.
4562 		 */
4563 		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4564 			__dev_set_promiscuity(dev, 1);
4565 			dev->uc_promisc = true;
4566 		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4567 			__dev_set_promiscuity(dev, -1);
4568 			dev->uc_promisc = false;
4569 		}
4570 	}
4571 
4572 	if (ops->ndo_set_rx_mode)
4573 		ops->ndo_set_rx_mode(dev);
4574 }
4575 
4576 void dev_set_rx_mode(struct net_device *dev)
4577 {
4578 	netif_addr_lock_bh(dev);
4579 	__dev_set_rx_mode(dev);
4580 	netif_addr_unlock_bh(dev);
4581 }
4582 
4583 /**
4584  *	dev_get_flags - get flags reported to userspace
4585  *	@dev: device
4586  *
4587  *	Get the combination of flag bits exported through APIs to userspace.
4588  */
4589 unsigned int dev_get_flags(const struct net_device *dev)
4590 {
4591 	unsigned int flags;
4592 
4593 	flags = (dev->flags & ~(IFF_PROMISC |
4594 				IFF_ALLMULTI |
4595 				IFF_RUNNING |
4596 				IFF_LOWER_UP |
4597 				IFF_DORMANT)) |
4598 		(dev->gflags & (IFF_PROMISC |
4599 				IFF_ALLMULTI));
4600 
4601 	if (netif_running(dev)) {
4602 		if (netif_oper_up(dev))
4603 			flags |= IFF_RUNNING;
4604 		if (netif_carrier_ok(dev))
4605 			flags |= IFF_LOWER_UP;
4606 		if (netif_dormant(dev))
4607 			flags |= IFF_DORMANT;
4608 	}
4609 
4610 	return flags;
4611 }
4612 EXPORT_SYMBOL(dev_get_flags);
4613 
4614 int __dev_change_flags(struct net_device *dev, unsigned int flags)
4615 {
4616 	unsigned int old_flags = dev->flags;
4617 	int ret;
4618 
4619 	ASSERT_RTNL();
4620 
4621 	/*
4622 	 *	Set the flags on our device.
4623 	 */
4624 
4625 	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4626 			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4627 			       IFF_AUTOMEDIA)) |
4628 		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4629 				    IFF_ALLMULTI));
4630 
4631 	/*
4632 	 *	Load in the correct multicast list now the flags have changed.
4633 	 */
4634 
4635 	if ((old_flags ^ flags) & IFF_MULTICAST)
4636 		dev_change_rx_flags(dev, IFF_MULTICAST);
4637 
4638 	dev_set_rx_mode(dev);
4639 
4640 	/*
4641 	 *	Have we downed the interface. We handle IFF_UP ourselves
4642 	 *	according to user attempts to set it, rather than blindly
4643 	 *	setting it.
4644 	 */
4645 
4646 	ret = 0;
4647 	if ((old_flags ^ flags) & IFF_UP) {	/* Bit is different  ? */
4648 		ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4649 
4650 		if (!ret)
4651 			dev_set_rx_mode(dev);
4652 	}
4653 
4654 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
4655 		int inc = (flags & IFF_PROMISC) ? 1 : -1;
4656 
4657 		dev->gflags ^= IFF_PROMISC;
4658 		dev_set_promiscuity(dev, inc);
4659 	}
4660 
4661 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4662 	   is important. Some (broken) drivers set IFF_PROMISC, when
4663 	   IFF_ALLMULTI is requested not asking us and not reporting.
4664 	 */
4665 	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4666 		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4667 
4668 		dev->gflags ^= IFF_ALLMULTI;
4669 		dev_set_allmulti(dev, inc);
4670 	}
4671 
4672 	return ret;
4673 }
4674 
4675 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4676 {
4677 	unsigned int changes = dev->flags ^ old_flags;
4678 
4679 	if (changes & IFF_UP) {
4680 		if (dev->flags & IFF_UP)
4681 			call_netdevice_notifiers(NETDEV_UP, dev);
4682 		else
4683 			call_netdevice_notifiers(NETDEV_DOWN, dev);
4684 	}
4685 
4686 	if (dev->flags & IFF_UP &&
4687 	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4688 		call_netdevice_notifiers(NETDEV_CHANGE, dev);
4689 }
4690 
4691 /**
4692  *	dev_change_flags - change device settings
4693  *	@dev: device
4694  *	@flags: device state flags
4695  *
4696  *	Change settings on device based state flags. The flags are
4697  *	in the userspace exported format.
4698  */
4699 int dev_change_flags(struct net_device *dev, unsigned int flags)
4700 {
4701 	int ret;
4702 	unsigned int changes, old_flags = dev->flags;
4703 
4704 	ret = __dev_change_flags(dev, flags);
4705 	if (ret < 0)
4706 		return ret;
4707 
4708 	changes = old_flags ^ dev->flags;
4709 	if (changes)
4710 		rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4711 
4712 	__dev_notify_flags(dev, old_flags);
4713 	return ret;
4714 }
4715 EXPORT_SYMBOL(dev_change_flags);
4716 
4717 /**
4718  *	dev_set_mtu - Change maximum transfer unit
4719  *	@dev: device
4720  *	@new_mtu: new transfer unit
4721  *
4722  *	Change the maximum transfer size of the network device.
4723  */
4724 int dev_set_mtu(struct net_device *dev, int new_mtu)
4725 {
4726 	const struct net_device_ops *ops = dev->netdev_ops;
4727 	int err;
4728 
4729 	if (new_mtu == dev->mtu)
4730 		return 0;
4731 
4732 	/*	MTU must be positive.	 */
4733 	if (new_mtu < 0)
4734 		return -EINVAL;
4735 
4736 	if (!netif_device_present(dev))
4737 		return -ENODEV;
4738 
4739 	err = 0;
4740 	if (ops->ndo_change_mtu)
4741 		err = ops->ndo_change_mtu(dev, new_mtu);
4742 	else
4743 		dev->mtu = new_mtu;
4744 
4745 	if (!err && dev->flags & IFF_UP)
4746 		call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4747 	return err;
4748 }
4749 EXPORT_SYMBOL(dev_set_mtu);
4750 
4751 /**
4752  *	dev_set_group - Change group this device belongs to
4753  *	@dev: device
4754  *	@new_group: group this device should belong to
4755  */
4756 void dev_set_group(struct net_device *dev, int new_group)
4757 {
4758 	dev->group = new_group;
4759 }
4760 EXPORT_SYMBOL(dev_set_group);
4761 
4762 /**
4763  *	dev_set_mac_address - Change Media Access Control Address
4764  *	@dev: device
4765  *	@sa: new address
4766  *
4767  *	Change the hardware (MAC) address of the device
4768  */
4769 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4770 {
4771 	const struct net_device_ops *ops = dev->netdev_ops;
4772 	int err;
4773 
4774 	if (!ops->ndo_set_mac_address)
4775 		return -EOPNOTSUPP;
4776 	if (sa->sa_family != dev->type)
4777 		return -EINVAL;
4778 	if (!netif_device_present(dev))
4779 		return -ENODEV;
4780 	err = ops->ndo_set_mac_address(dev, sa);
4781 	if (!err)
4782 		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4783 	return err;
4784 }
4785 EXPORT_SYMBOL(dev_set_mac_address);
4786 
4787 /*
4788  *	Perform the SIOCxIFxxx calls, inside rcu_read_lock()
4789  */
4790 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4791 {
4792 	int err;
4793 	struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
4794 
4795 	if (!dev)
4796 		return -ENODEV;
4797 
4798 	switch (cmd) {
4799 	case SIOCGIFFLAGS:	/* Get interface flags */
4800 		ifr->ifr_flags = (short) dev_get_flags(dev);
4801 		return 0;
4802 
4803 	case SIOCGIFMETRIC:	/* Get the metric on the interface
4804 				   (currently unused) */
4805 		ifr->ifr_metric = 0;
4806 		return 0;
4807 
4808 	case SIOCGIFMTU:	/* Get the MTU of a device */
4809 		ifr->ifr_mtu = dev->mtu;
4810 		return 0;
4811 
4812 	case SIOCGIFHWADDR:
4813 		if (!dev->addr_len)
4814 			memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4815 		else
4816 			memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4817 			       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4818 		ifr->ifr_hwaddr.sa_family = dev->type;
4819 		return 0;
4820 
4821 	case SIOCGIFSLAVE:
4822 		err = -EINVAL;
4823 		break;
4824 
4825 	case SIOCGIFMAP:
4826 		ifr->ifr_map.mem_start = dev->mem_start;
4827 		ifr->ifr_map.mem_end   = dev->mem_end;
4828 		ifr->ifr_map.base_addr = dev->base_addr;
4829 		ifr->ifr_map.irq       = dev->irq;
4830 		ifr->ifr_map.dma       = dev->dma;
4831 		ifr->ifr_map.port      = dev->if_port;
4832 		return 0;
4833 
4834 	case SIOCGIFINDEX:
4835 		ifr->ifr_ifindex = dev->ifindex;
4836 		return 0;
4837 
4838 	case SIOCGIFTXQLEN:
4839 		ifr->ifr_qlen = dev->tx_queue_len;
4840 		return 0;
4841 
4842 	default:
4843 		/* dev_ioctl() should ensure this case
4844 		 * is never reached
4845 		 */
4846 		WARN_ON(1);
4847 		err = -ENOTTY;
4848 		break;
4849 
4850 	}
4851 	return err;
4852 }
4853 
4854 /*
4855  *	Perform the SIOCxIFxxx calls, inside rtnl_lock()
4856  */
4857 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4858 {
4859 	int err;
4860 	struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4861 	const struct net_device_ops *ops;
4862 
4863 	if (!dev)
4864 		return -ENODEV;
4865 
4866 	ops = dev->netdev_ops;
4867 
4868 	switch (cmd) {
4869 	case SIOCSIFFLAGS:	/* Set interface flags */
4870 		return dev_change_flags(dev, ifr->ifr_flags);
4871 
4872 	case SIOCSIFMETRIC:	/* Set the metric on the interface
4873 				   (currently unused) */
4874 		return -EOPNOTSUPP;
4875 
4876 	case SIOCSIFMTU:	/* Set the MTU of a device */
4877 		return dev_set_mtu(dev, ifr->ifr_mtu);
4878 
4879 	case SIOCSIFHWADDR:
4880 		return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4881 
4882 	case SIOCSIFHWBROADCAST:
4883 		if (ifr->ifr_hwaddr.sa_family != dev->type)
4884 			return -EINVAL;
4885 		memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4886 		       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4887 		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4888 		return 0;
4889 
4890 	case SIOCSIFMAP:
4891 		if (ops->ndo_set_config) {
4892 			if (!netif_device_present(dev))
4893 				return -ENODEV;
4894 			return ops->ndo_set_config(dev, &ifr->ifr_map);
4895 		}
4896 		return -EOPNOTSUPP;
4897 
4898 	case SIOCADDMULTI:
4899 		if (!ops->ndo_set_rx_mode ||
4900 		    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4901 			return -EINVAL;
4902 		if (!netif_device_present(dev))
4903 			return -ENODEV;
4904 		return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
4905 
4906 	case SIOCDELMULTI:
4907 		if (!ops->ndo_set_rx_mode ||
4908 		    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4909 			return -EINVAL;
4910 		if (!netif_device_present(dev))
4911 			return -ENODEV;
4912 		return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
4913 
4914 	case SIOCSIFTXQLEN:
4915 		if (ifr->ifr_qlen < 0)
4916 			return -EINVAL;
4917 		dev->tx_queue_len = ifr->ifr_qlen;
4918 		return 0;
4919 
4920 	case SIOCSIFNAME:
4921 		ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4922 		return dev_change_name(dev, ifr->ifr_newname);
4923 
4924 	case SIOCSHWTSTAMP:
4925 		err = net_hwtstamp_validate(ifr);
4926 		if (err)
4927 			return err;
4928 		/* fall through */
4929 
4930 	/*
4931 	 *	Unknown or private ioctl
4932 	 */
4933 	default:
4934 		if ((cmd >= SIOCDEVPRIVATE &&
4935 		    cmd <= SIOCDEVPRIVATE + 15) ||
4936 		    cmd == SIOCBONDENSLAVE ||
4937 		    cmd == SIOCBONDRELEASE ||
4938 		    cmd == SIOCBONDSETHWADDR ||
4939 		    cmd == SIOCBONDSLAVEINFOQUERY ||
4940 		    cmd == SIOCBONDINFOQUERY ||
4941 		    cmd == SIOCBONDCHANGEACTIVE ||
4942 		    cmd == SIOCGMIIPHY ||
4943 		    cmd == SIOCGMIIREG ||
4944 		    cmd == SIOCSMIIREG ||
4945 		    cmd == SIOCBRADDIF ||
4946 		    cmd == SIOCBRDELIF ||
4947 		    cmd == SIOCSHWTSTAMP ||
4948 		    cmd == SIOCWANDEV) {
4949 			err = -EOPNOTSUPP;
4950 			if (ops->ndo_do_ioctl) {
4951 				if (netif_device_present(dev))
4952 					err = ops->ndo_do_ioctl(dev, ifr, cmd);
4953 				else
4954 					err = -ENODEV;
4955 			}
4956 		} else
4957 			err = -EINVAL;
4958 
4959 	}
4960 	return err;
4961 }
4962 
4963 /*
4964  *	This function handles all "interface"-type I/O control requests. The actual
4965  *	'doing' part of this is dev_ifsioc above.
4966  */
4967 
4968 /**
4969  *	dev_ioctl	-	network device ioctl
4970  *	@net: the applicable net namespace
4971  *	@cmd: command to issue
4972  *	@arg: pointer to a struct ifreq in user space
4973  *
4974  *	Issue ioctl functions to devices. This is normally called by the
4975  *	user space syscall interfaces but can sometimes be useful for
4976  *	other purposes. The return value is the return from the syscall if
4977  *	positive or a negative errno code on error.
4978  */
4979 
4980 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4981 {
4982 	struct ifreq ifr;
4983 	int ret;
4984 	char *colon;
4985 
4986 	/* One special case: SIOCGIFCONF takes ifconf argument
4987 	   and requires shared lock, because it sleeps writing
4988 	   to user space.
4989 	 */
4990 
4991 	if (cmd == SIOCGIFCONF) {
4992 		rtnl_lock();
4993 		ret = dev_ifconf(net, (char __user *) arg);
4994 		rtnl_unlock();
4995 		return ret;
4996 	}
4997 	if (cmd == SIOCGIFNAME)
4998 		return dev_ifname(net, (struct ifreq __user *)arg);
4999 
5000 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5001 		return -EFAULT;
5002 
5003 	ifr.ifr_name[IFNAMSIZ-1] = 0;
5004 
5005 	colon = strchr(ifr.ifr_name, ':');
5006 	if (colon)
5007 		*colon = 0;
5008 
5009 	/*
5010 	 *	See which interface the caller is talking about.
5011 	 */
5012 
5013 	switch (cmd) {
5014 	/*
5015 	 *	These ioctl calls:
5016 	 *	- can be done by all.
5017 	 *	- atomic and do not require locking.
5018 	 *	- return a value
5019 	 */
5020 	case SIOCGIFFLAGS:
5021 	case SIOCGIFMETRIC:
5022 	case SIOCGIFMTU:
5023 	case SIOCGIFHWADDR:
5024 	case SIOCGIFSLAVE:
5025 	case SIOCGIFMAP:
5026 	case SIOCGIFINDEX:
5027 	case SIOCGIFTXQLEN:
5028 		dev_load(net, ifr.ifr_name);
5029 		rcu_read_lock();
5030 		ret = dev_ifsioc_locked(net, &ifr, cmd);
5031 		rcu_read_unlock();
5032 		if (!ret) {
5033 			if (colon)
5034 				*colon = ':';
5035 			if (copy_to_user(arg, &ifr,
5036 					 sizeof(struct ifreq)))
5037 				ret = -EFAULT;
5038 		}
5039 		return ret;
5040 
5041 	case SIOCETHTOOL:
5042 		dev_load(net, ifr.ifr_name);
5043 		rtnl_lock();
5044 		ret = dev_ethtool(net, &ifr);
5045 		rtnl_unlock();
5046 		if (!ret) {
5047 			if (colon)
5048 				*colon = ':';
5049 			if (copy_to_user(arg, &ifr,
5050 					 sizeof(struct ifreq)))
5051 				ret = -EFAULT;
5052 		}
5053 		return ret;
5054 
5055 	/*
5056 	 *	These ioctl calls:
5057 	 *	- require superuser power.
5058 	 *	- require strict serialization.
5059 	 *	- return a value
5060 	 */
5061 	case SIOCGMIIPHY:
5062 	case SIOCGMIIREG:
5063 	case SIOCSIFNAME:
5064 		if (!capable(CAP_NET_ADMIN))
5065 			return -EPERM;
5066 		dev_load(net, ifr.ifr_name);
5067 		rtnl_lock();
5068 		ret = dev_ifsioc(net, &ifr, cmd);
5069 		rtnl_unlock();
5070 		if (!ret) {
5071 			if (colon)
5072 				*colon = ':';
5073 			if (copy_to_user(arg, &ifr,
5074 					 sizeof(struct ifreq)))
5075 				ret = -EFAULT;
5076 		}
5077 		return ret;
5078 
5079 	/*
5080 	 *	These ioctl calls:
5081 	 *	- require superuser power.
5082 	 *	- require strict serialization.
5083 	 *	- do not return a value
5084 	 */
5085 	case SIOCSIFFLAGS:
5086 	case SIOCSIFMETRIC:
5087 	case SIOCSIFMTU:
5088 	case SIOCSIFMAP:
5089 	case SIOCSIFHWADDR:
5090 	case SIOCSIFSLAVE:
5091 	case SIOCADDMULTI:
5092 	case SIOCDELMULTI:
5093 	case SIOCSIFHWBROADCAST:
5094 	case SIOCSIFTXQLEN:
5095 	case SIOCSMIIREG:
5096 	case SIOCBONDENSLAVE:
5097 	case SIOCBONDRELEASE:
5098 	case SIOCBONDSETHWADDR:
5099 	case SIOCBONDCHANGEACTIVE:
5100 	case SIOCBRADDIF:
5101 	case SIOCBRDELIF:
5102 	case SIOCSHWTSTAMP:
5103 		if (!capable(CAP_NET_ADMIN))
5104 			return -EPERM;
5105 		/* fall through */
5106 	case SIOCBONDSLAVEINFOQUERY:
5107 	case SIOCBONDINFOQUERY:
5108 		dev_load(net, ifr.ifr_name);
5109 		rtnl_lock();
5110 		ret = dev_ifsioc(net, &ifr, cmd);
5111 		rtnl_unlock();
5112 		return ret;
5113 
5114 	case SIOCGIFMEM:
5115 		/* Get the per device memory space. We can add this but
5116 		 * currently do not support it */
5117 	case SIOCSIFMEM:
5118 		/* Set the per device memory buffer space.
5119 		 * Not applicable in our case */
5120 	case SIOCSIFLINK:
5121 		return -ENOTTY;
5122 
5123 	/*
5124 	 *	Unknown or private ioctl.
5125 	 */
5126 	default:
5127 		if (cmd == SIOCWANDEV ||
5128 		    (cmd >= SIOCDEVPRIVATE &&
5129 		     cmd <= SIOCDEVPRIVATE + 15)) {
5130 			dev_load(net, ifr.ifr_name);
5131 			rtnl_lock();
5132 			ret = dev_ifsioc(net, &ifr, cmd);
5133 			rtnl_unlock();
5134 			if (!ret && copy_to_user(arg, &ifr,
5135 						 sizeof(struct ifreq)))
5136 				ret = -EFAULT;
5137 			return ret;
5138 		}
5139 		/* Take care of Wireless Extensions */
5140 		if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5141 			return wext_handle_ioctl(net, &ifr, cmd, arg);
5142 		return -ENOTTY;
5143 	}
5144 }
5145 
5146 
5147 /**
5148  *	dev_new_index	-	allocate an ifindex
5149  *	@net: the applicable net namespace
5150  *
5151  *	Returns a suitable unique value for a new device interface
5152  *	number.  The caller must hold the rtnl semaphore or the
5153  *	dev_base_lock to be sure it remains unique.
5154  */
5155 static int dev_new_index(struct net *net)
5156 {
5157 	static int ifindex;
5158 	for (;;) {
5159 		if (++ifindex <= 0)
5160 			ifindex = 1;
5161 		if (!__dev_get_by_index(net, ifindex))
5162 			return ifindex;
5163 	}
5164 }
5165 
5166 /* Delayed registration/unregisteration */
5167 static LIST_HEAD(net_todo_list);
5168 
5169 static void net_set_todo(struct net_device *dev)
5170 {
5171 	list_add_tail(&dev->todo_list, &net_todo_list);
5172 }
5173 
5174 static void rollback_registered_many(struct list_head *head)
5175 {
5176 	struct net_device *dev, *tmp;
5177 
5178 	BUG_ON(dev_boot_phase);
5179 	ASSERT_RTNL();
5180 
5181 	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5182 		/* Some devices call without registering
5183 		 * for initialization unwind. Remove those
5184 		 * devices and proceed with the remaining.
5185 		 */
5186 		if (dev->reg_state == NETREG_UNINITIALIZED) {
5187 			pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5188 				 dev->name, dev);
5189 
5190 			WARN_ON(1);
5191 			list_del(&dev->unreg_list);
5192 			continue;
5193 		}
5194 		dev->dismantle = true;
5195 		BUG_ON(dev->reg_state != NETREG_REGISTERED);
5196 	}
5197 
5198 	/* If device is running, close it first. */
5199 	dev_close_many(head);
5200 
5201 	list_for_each_entry(dev, head, unreg_list) {
5202 		/* And unlink it from device chain. */
5203 		unlist_netdevice(dev);
5204 
5205 		dev->reg_state = NETREG_UNREGISTERING;
5206 	}
5207 
5208 	synchronize_net();
5209 
5210 	list_for_each_entry(dev, head, unreg_list) {
5211 		/* Shutdown queueing discipline. */
5212 		dev_shutdown(dev);
5213 
5214 
5215 		/* Notify protocols, that we are about to destroy
5216 		   this device. They should clean all the things.
5217 		*/
5218 		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5219 
5220 		if (!dev->rtnl_link_ops ||
5221 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5222 			rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5223 
5224 		/*
5225 		 *	Flush the unicast and multicast chains
5226 		 */
5227 		dev_uc_flush(dev);
5228 		dev_mc_flush(dev);
5229 
5230 		if (dev->netdev_ops->ndo_uninit)
5231 			dev->netdev_ops->ndo_uninit(dev);
5232 
5233 		/* Notifier chain MUST detach us from master device. */
5234 		WARN_ON(dev->master);
5235 
5236 		/* Remove entries from kobject tree */
5237 		netdev_unregister_kobject(dev);
5238 	}
5239 
5240 	/* Process any work delayed until the end of the batch */
5241 	dev = list_first_entry(head, struct net_device, unreg_list);
5242 	call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5243 
5244 	synchronize_net();
5245 
5246 	list_for_each_entry(dev, head, unreg_list)
5247 		dev_put(dev);
5248 }
5249 
5250 static void rollback_registered(struct net_device *dev)
5251 {
5252 	LIST_HEAD(single);
5253 
5254 	list_add(&dev->unreg_list, &single);
5255 	rollback_registered_many(&single);
5256 	list_del(&single);
5257 }
5258 
5259 static netdev_features_t netdev_fix_features(struct net_device *dev,
5260 	netdev_features_t features)
5261 {
5262 	/* Fix illegal checksum combinations */
5263 	if ((features & NETIF_F_HW_CSUM) &&
5264 	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5265 		netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5266 		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5267 	}
5268 
5269 	/* Fix illegal SG+CSUM combinations. */
5270 	if ((features & NETIF_F_SG) &&
5271 	    !(features & NETIF_F_ALL_CSUM)) {
5272 		netdev_dbg(dev,
5273 			"Dropping NETIF_F_SG since no checksum feature.\n");
5274 		features &= ~NETIF_F_SG;
5275 	}
5276 
5277 	/* TSO requires that SG is present as well. */
5278 	if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5279 		netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5280 		features &= ~NETIF_F_ALL_TSO;
5281 	}
5282 
5283 	/* TSO ECN requires that TSO is present as well. */
5284 	if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5285 		features &= ~NETIF_F_TSO_ECN;
5286 
5287 	/* Software GSO depends on SG. */
5288 	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5289 		netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5290 		features &= ~NETIF_F_GSO;
5291 	}
5292 
5293 	/* UFO needs SG and checksumming */
5294 	if (features & NETIF_F_UFO) {
5295 		/* maybe split UFO into V4 and V6? */
5296 		if (!((features & NETIF_F_GEN_CSUM) ||
5297 		    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5298 			    == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5299 			netdev_dbg(dev,
5300 				"Dropping NETIF_F_UFO since no checksum offload features.\n");
5301 			features &= ~NETIF_F_UFO;
5302 		}
5303 
5304 		if (!(features & NETIF_F_SG)) {
5305 			netdev_dbg(dev,
5306 				"Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5307 			features &= ~NETIF_F_UFO;
5308 		}
5309 	}
5310 
5311 	return features;
5312 }
5313 
5314 int __netdev_update_features(struct net_device *dev)
5315 {
5316 	netdev_features_t features;
5317 	int err = 0;
5318 
5319 	ASSERT_RTNL();
5320 
5321 	features = netdev_get_wanted_features(dev);
5322 
5323 	if (dev->netdev_ops->ndo_fix_features)
5324 		features = dev->netdev_ops->ndo_fix_features(dev, features);
5325 
5326 	/* driver might be less strict about feature dependencies */
5327 	features = netdev_fix_features(dev, features);
5328 
5329 	if (dev->features == features)
5330 		return 0;
5331 
5332 	netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5333 		&dev->features, &features);
5334 
5335 	if (dev->netdev_ops->ndo_set_features)
5336 		err = dev->netdev_ops->ndo_set_features(dev, features);
5337 
5338 	if (unlikely(err < 0)) {
5339 		netdev_err(dev,
5340 			"set_features() failed (%d); wanted %pNF, left %pNF\n",
5341 			err, &features, &dev->features);
5342 		return -1;
5343 	}
5344 
5345 	if (!err)
5346 		dev->features = features;
5347 
5348 	return 1;
5349 }
5350 
5351 /**
5352  *	netdev_update_features - recalculate device features
5353  *	@dev: the device to check
5354  *
5355  *	Recalculate dev->features set and send notifications if it
5356  *	has changed. Should be called after driver or hardware dependent
5357  *	conditions might have changed that influence the features.
5358  */
5359 void netdev_update_features(struct net_device *dev)
5360 {
5361 	if (__netdev_update_features(dev))
5362 		netdev_features_change(dev);
5363 }
5364 EXPORT_SYMBOL(netdev_update_features);
5365 
5366 /**
5367  *	netdev_change_features - recalculate device features
5368  *	@dev: the device to check
5369  *
5370  *	Recalculate dev->features set and send notifications even
5371  *	if they have not changed. Should be called instead of
5372  *	netdev_update_features() if also dev->vlan_features might
5373  *	have changed to allow the changes to be propagated to stacked
5374  *	VLAN devices.
5375  */
5376 void netdev_change_features(struct net_device *dev)
5377 {
5378 	__netdev_update_features(dev);
5379 	netdev_features_change(dev);
5380 }
5381 EXPORT_SYMBOL(netdev_change_features);
5382 
5383 /**
5384  *	netif_stacked_transfer_operstate -	transfer operstate
5385  *	@rootdev: the root or lower level device to transfer state from
5386  *	@dev: the device to transfer operstate to
5387  *
5388  *	Transfer operational state from root to device. This is normally
5389  *	called when a stacking relationship exists between the root
5390  *	device and the device(a leaf device).
5391  */
5392 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5393 					struct net_device *dev)
5394 {
5395 	if (rootdev->operstate == IF_OPER_DORMANT)
5396 		netif_dormant_on(dev);
5397 	else
5398 		netif_dormant_off(dev);
5399 
5400 	if (netif_carrier_ok(rootdev)) {
5401 		if (!netif_carrier_ok(dev))
5402 			netif_carrier_on(dev);
5403 	} else {
5404 		if (netif_carrier_ok(dev))
5405 			netif_carrier_off(dev);
5406 	}
5407 }
5408 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5409 
5410 #ifdef CONFIG_RPS
5411 static int netif_alloc_rx_queues(struct net_device *dev)
5412 {
5413 	unsigned int i, count = dev->num_rx_queues;
5414 	struct netdev_rx_queue *rx;
5415 
5416 	BUG_ON(count < 1);
5417 
5418 	rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5419 	if (!rx) {
5420 		pr_err("netdev: Unable to allocate %u rx queues\n", count);
5421 		return -ENOMEM;
5422 	}
5423 	dev->_rx = rx;
5424 
5425 	for (i = 0; i < count; i++)
5426 		rx[i].dev = dev;
5427 	return 0;
5428 }
5429 #endif
5430 
5431 static void netdev_init_one_queue(struct net_device *dev,
5432 				  struct netdev_queue *queue, void *_unused)
5433 {
5434 	/* Initialize queue lock */
5435 	spin_lock_init(&queue->_xmit_lock);
5436 	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5437 	queue->xmit_lock_owner = -1;
5438 	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
5439 	queue->dev = dev;
5440 #ifdef CONFIG_BQL
5441 	dql_init(&queue->dql, HZ);
5442 #endif
5443 }
5444 
5445 static int netif_alloc_netdev_queues(struct net_device *dev)
5446 {
5447 	unsigned int count = dev->num_tx_queues;
5448 	struct netdev_queue *tx;
5449 
5450 	BUG_ON(count < 1);
5451 
5452 	tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5453 	if (!tx) {
5454 		pr_err("netdev: Unable to allocate %u tx queues\n", count);
5455 		return -ENOMEM;
5456 	}
5457 	dev->_tx = tx;
5458 
5459 	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5460 	spin_lock_init(&dev->tx_global_lock);
5461 
5462 	return 0;
5463 }
5464 
5465 /**
5466  *	register_netdevice	- register a network device
5467  *	@dev: device to register
5468  *
5469  *	Take a completed network device structure and add it to the kernel
5470  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5471  *	chain. 0 is returned on success. A negative errno code is returned
5472  *	on a failure to set up the device, or if the name is a duplicate.
5473  *
5474  *	Callers must hold the rtnl semaphore. You may want
5475  *	register_netdev() instead of this.
5476  *
5477  *	BUGS:
5478  *	The locking appears insufficient to guarantee two parallel registers
5479  *	will not get the same name.
5480  */
5481 
5482 int register_netdevice(struct net_device *dev)
5483 {
5484 	int ret;
5485 	struct net *net = dev_net(dev);
5486 
5487 	BUG_ON(dev_boot_phase);
5488 	ASSERT_RTNL();
5489 
5490 	might_sleep();
5491 
5492 	/* When net_device's are persistent, this will be fatal. */
5493 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5494 	BUG_ON(!net);
5495 
5496 	spin_lock_init(&dev->addr_list_lock);
5497 	netdev_set_addr_lockdep_class(dev);
5498 
5499 	dev->iflink = -1;
5500 
5501 	ret = dev_get_valid_name(dev, dev->name);
5502 	if (ret < 0)
5503 		goto out;
5504 
5505 	/* Init, if this function is available */
5506 	if (dev->netdev_ops->ndo_init) {
5507 		ret = dev->netdev_ops->ndo_init(dev);
5508 		if (ret) {
5509 			if (ret > 0)
5510 				ret = -EIO;
5511 			goto out;
5512 		}
5513 	}
5514 
5515 	dev->ifindex = dev_new_index(net);
5516 	if (dev->iflink == -1)
5517 		dev->iflink = dev->ifindex;
5518 
5519 	/* Transfer changeable features to wanted_features and enable
5520 	 * software offloads (GSO and GRO).
5521 	 */
5522 	dev->hw_features |= NETIF_F_SOFT_FEATURES;
5523 	dev->features |= NETIF_F_SOFT_FEATURES;
5524 	dev->wanted_features = dev->features & dev->hw_features;
5525 
5526 	/* Turn on no cache copy if HW is doing checksum */
5527 	if (!(dev->flags & IFF_LOOPBACK)) {
5528 		dev->hw_features |= NETIF_F_NOCACHE_COPY;
5529 		if (dev->features & NETIF_F_ALL_CSUM) {
5530 			dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5531 			dev->features |= NETIF_F_NOCACHE_COPY;
5532 		}
5533 	}
5534 
5535 	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
5536 	 */
5537 	dev->vlan_features |= NETIF_F_HIGHDMA;
5538 
5539 	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5540 	ret = notifier_to_errno(ret);
5541 	if (ret)
5542 		goto err_uninit;
5543 
5544 	ret = netdev_register_kobject(dev);
5545 	if (ret)
5546 		goto err_uninit;
5547 	dev->reg_state = NETREG_REGISTERED;
5548 
5549 	__netdev_update_features(dev);
5550 
5551 	/*
5552 	 *	Default initial state at registry is that the
5553 	 *	device is present.
5554 	 */
5555 
5556 	set_bit(__LINK_STATE_PRESENT, &dev->state);
5557 
5558 	dev_init_scheduler(dev);
5559 	dev_hold(dev);
5560 	list_netdevice(dev);
5561 
5562 	/* Notify protocols, that a new device appeared. */
5563 	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5564 	ret = notifier_to_errno(ret);
5565 	if (ret) {
5566 		rollback_registered(dev);
5567 		dev->reg_state = NETREG_UNREGISTERED;
5568 	}
5569 	/*
5570 	 *	Prevent userspace races by waiting until the network
5571 	 *	device is fully setup before sending notifications.
5572 	 */
5573 	if (!dev->rtnl_link_ops ||
5574 	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5575 		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5576 
5577 out:
5578 	return ret;
5579 
5580 err_uninit:
5581 	if (dev->netdev_ops->ndo_uninit)
5582 		dev->netdev_ops->ndo_uninit(dev);
5583 	goto out;
5584 }
5585 EXPORT_SYMBOL(register_netdevice);
5586 
5587 /**
5588  *	init_dummy_netdev	- init a dummy network device for NAPI
5589  *	@dev: device to init
5590  *
5591  *	This takes a network device structure and initialize the minimum
5592  *	amount of fields so it can be used to schedule NAPI polls without
5593  *	registering a full blown interface. This is to be used by drivers
5594  *	that need to tie several hardware interfaces to a single NAPI
5595  *	poll scheduler due to HW limitations.
5596  */
5597 int init_dummy_netdev(struct net_device *dev)
5598 {
5599 	/* Clear everything. Note we don't initialize spinlocks
5600 	 * are they aren't supposed to be taken by any of the
5601 	 * NAPI code and this dummy netdev is supposed to be
5602 	 * only ever used for NAPI polls
5603 	 */
5604 	memset(dev, 0, sizeof(struct net_device));
5605 
5606 	/* make sure we BUG if trying to hit standard
5607 	 * register/unregister code path
5608 	 */
5609 	dev->reg_state = NETREG_DUMMY;
5610 
5611 	/* NAPI wants this */
5612 	INIT_LIST_HEAD(&dev->napi_list);
5613 
5614 	/* a dummy interface is started by default */
5615 	set_bit(__LINK_STATE_PRESENT, &dev->state);
5616 	set_bit(__LINK_STATE_START, &dev->state);
5617 
5618 	/* Note : We dont allocate pcpu_refcnt for dummy devices,
5619 	 * because users of this 'device' dont need to change
5620 	 * its refcount.
5621 	 */
5622 
5623 	return 0;
5624 }
5625 EXPORT_SYMBOL_GPL(init_dummy_netdev);
5626 
5627 
5628 /**
5629  *	register_netdev	- register a network device
5630  *	@dev: device to register
5631  *
5632  *	Take a completed network device structure and add it to the kernel
5633  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5634  *	chain. 0 is returned on success. A negative errno code is returned
5635  *	on a failure to set up the device, or if the name is a duplicate.
5636  *
5637  *	This is a wrapper around register_netdevice that takes the rtnl semaphore
5638  *	and expands the device name if you passed a format string to
5639  *	alloc_netdev.
5640  */
5641 int register_netdev(struct net_device *dev)
5642 {
5643 	int err;
5644 
5645 	rtnl_lock();
5646 	err = register_netdevice(dev);
5647 	rtnl_unlock();
5648 	return err;
5649 }
5650 EXPORT_SYMBOL(register_netdev);
5651 
5652 int netdev_refcnt_read(const struct net_device *dev)
5653 {
5654 	int i, refcnt = 0;
5655 
5656 	for_each_possible_cpu(i)
5657 		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5658 	return refcnt;
5659 }
5660 EXPORT_SYMBOL(netdev_refcnt_read);
5661 
5662 /*
5663  * netdev_wait_allrefs - wait until all references are gone.
5664  *
5665  * This is called when unregistering network devices.
5666  *
5667  * Any protocol or device that holds a reference should register
5668  * for netdevice notification, and cleanup and put back the
5669  * reference if they receive an UNREGISTER event.
5670  * We can get stuck here if buggy protocols don't correctly
5671  * call dev_put.
5672  */
5673 static void netdev_wait_allrefs(struct net_device *dev)
5674 {
5675 	unsigned long rebroadcast_time, warning_time;
5676 	int refcnt;
5677 
5678 	linkwatch_forget_dev(dev);
5679 
5680 	rebroadcast_time = warning_time = jiffies;
5681 	refcnt = netdev_refcnt_read(dev);
5682 
5683 	while (refcnt != 0) {
5684 		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5685 			rtnl_lock();
5686 
5687 			/* Rebroadcast unregister notification */
5688 			call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5689 			/* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
5690 			 * should have already handle it the first time */
5691 
5692 			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5693 				     &dev->state)) {
5694 				/* We must not have linkwatch events
5695 				 * pending on unregister. If this
5696 				 * happens, we simply run the queue
5697 				 * unscheduled, resulting in a noop
5698 				 * for this device.
5699 				 */
5700 				linkwatch_run_queue();
5701 			}
5702 
5703 			__rtnl_unlock();
5704 
5705 			rebroadcast_time = jiffies;
5706 		}
5707 
5708 		msleep(250);
5709 
5710 		refcnt = netdev_refcnt_read(dev);
5711 
5712 		if (time_after(jiffies, warning_time + 10 * HZ)) {
5713 			pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5714 				 dev->name, refcnt);
5715 			warning_time = jiffies;
5716 		}
5717 	}
5718 }
5719 
5720 /* The sequence is:
5721  *
5722  *	rtnl_lock();
5723  *	...
5724  *	register_netdevice(x1);
5725  *	register_netdevice(x2);
5726  *	...
5727  *	unregister_netdevice(y1);
5728  *	unregister_netdevice(y2);
5729  *      ...
5730  *	rtnl_unlock();
5731  *	free_netdev(y1);
5732  *	free_netdev(y2);
5733  *
5734  * We are invoked by rtnl_unlock().
5735  * This allows us to deal with problems:
5736  * 1) We can delete sysfs objects which invoke hotplug
5737  *    without deadlocking with linkwatch via keventd.
5738  * 2) Since we run with the RTNL semaphore not held, we can sleep
5739  *    safely in order to wait for the netdev refcnt to drop to zero.
5740  *
5741  * We must not return until all unregister events added during
5742  * the interval the lock was held have been completed.
5743  */
5744 void netdev_run_todo(void)
5745 {
5746 	struct list_head list;
5747 
5748 	/* Snapshot list, allow later requests */
5749 	list_replace_init(&net_todo_list, &list);
5750 
5751 	__rtnl_unlock();
5752 
5753 	/* Wait for rcu callbacks to finish before attempting to drain
5754 	 * the device list.  This usually avoids a 250ms wait.
5755 	 */
5756 	if (!list_empty(&list))
5757 		rcu_barrier();
5758 
5759 	while (!list_empty(&list)) {
5760 		struct net_device *dev
5761 			= list_first_entry(&list, struct net_device, todo_list);
5762 		list_del(&dev->todo_list);
5763 
5764 		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5765 			pr_err("network todo '%s' but state %d\n",
5766 			       dev->name, dev->reg_state);
5767 			dump_stack();
5768 			continue;
5769 		}
5770 
5771 		dev->reg_state = NETREG_UNREGISTERED;
5772 
5773 		on_each_cpu(flush_backlog, dev, 1);
5774 
5775 		netdev_wait_allrefs(dev);
5776 
5777 		/* paranoia */
5778 		BUG_ON(netdev_refcnt_read(dev));
5779 		WARN_ON(rcu_access_pointer(dev->ip_ptr));
5780 		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
5781 		WARN_ON(dev->dn_ptr);
5782 
5783 		if (dev->destructor)
5784 			dev->destructor(dev);
5785 
5786 		/* Free network device */
5787 		kobject_put(&dev->dev.kobj);
5788 	}
5789 }
5790 
5791 /* Convert net_device_stats to rtnl_link_stats64.  They have the same
5792  * fields in the same order, with only the type differing.
5793  */
5794 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5795 			     const struct net_device_stats *netdev_stats)
5796 {
5797 #if BITS_PER_LONG == 64
5798 	BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5799 	memcpy(stats64, netdev_stats, sizeof(*stats64));
5800 #else
5801 	size_t i, n = sizeof(*stats64) / sizeof(u64);
5802 	const unsigned long *src = (const unsigned long *)netdev_stats;
5803 	u64 *dst = (u64 *)stats64;
5804 
5805 	BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5806 		     sizeof(*stats64) / sizeof(u64));
5807 	for (i = 0; i < n; i++)
5808 		dst[i] = src[i];
5809 #endif
5810 }
5811 EXPORT_SYMBOL(netdev_stats_to_stats64);
5812 
5813 /**
5814  *	dev_get_stats	- get network device statistics
5815  *	@dev: device to get statistics from
5816  *	@storage: place to store stats
5817  *
5818  *	Get network statistics from device. Return @storage.
5819  *	The device driver may provide its own method by setting
5820  *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5821  *	otherwise the internal statistics structure is used.
5822  */
5823 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5824 					struct rtnl_link_stats64 *storage)
5825 {
5826 	const struct net_device_ops *ops = dev->netdev_ops;
5827 
5828 	if (ops->ndo_get_stats64) {
5829 		memset(storage, 0, sizeof(*storage));
5830 		ops->ndo_get_stats64(dev, storage);
5831 	} else if (ops->ndo_get_stats) {
5832 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
5833 	} else {
5834 		netdev_stats_to_stats64(storage, &dev->stats);
5835 	}
5836 	storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
5837 	return storage;
5838 }
5839 EXPORT_SYMBOL(dev_get_stats);
5840 
5841 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5842 {
5843 	struct netdev_queue *queue = dev_ingress_queue(dev);
5844 
5845 #ifdef CONFIG_NET_CLS_ACT
5846 	if (queue)
5847 		return queue;
5848 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5849 	if (!queue)
5850 		return NULL;
5851 	netdev_init_one_queue(dev, queue, NULL);
5852 	queue->qdisc = &noop_qdisc;
5853 	queue->qdisc_sleeping = &noop_qdisc;
5854 	rcu_assign_pointer(dev->ingress_queue, queue);
5855 #endif
5856 	return queue;
5857 }
5858 
5859 /**
5860  *	alloc_netdev_mqs - allocate network device
5861  *	@sizeof_priv:	size of private data to allocate space for
5862  *	@name:		device name format string
5863  *	@setup:		callback to initialize device
5864  *	@txqs:		the number of TX subqueues to allocate
5865  *	@rxqs:		the number of RX subqueues to allocate
5866  *
5867  *	Allocates a struct net_device with private data area for driver use
5868  *	and performs basic initialization.  Also allocates subquue structs
5869  *	for each queue on the device.
5870  */
5871 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5872 		void (*setup)(struct net_device *),
5873 		unsigned int txqs, unsigned int rxqs)
5874 {
5875 	struct net_device *dev;
5876 	size_t alloc_size;
5877 	struct net_device *p;
5878 
5879 	BUG_ON(strlen(name) >= sizeof(dev->name));
5880 
5881 	if (txqs < 1) {
5882 		pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
5883 		return NULL;
5884 	}
5885 
5886 #ifdef CONFIG_RPS
5887 	if (rxqs < 1) {
5888 		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
5889 		return NULL;
5890 	}
5891 #endif
5892 
5893 	alloc_size = sizeof(struct net_device);
5894 	if (sizeof_priv) {
5895 		/* ensure 32-byte alignment of private area */
5896 		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5897 		alloc_size += sizeof_priv;
5898 	}
5899 	/* ensure 32-byte alignment of whole construct */
5900 	alloc_size += NETDEV_ALIGN - 1;
5901 
5902 	p = kzalloc(alloc_size, GFP_KERNEL);
5903 	if (!p) {
5904 		pr_err("alloc_netdev: Unable to allocate device\n");
5905 		return NULL;
5906 	}
5907 
5908 	dev = PTR_ALIGN(p, NETDEV_ALIGN);
5909 	dev->padded = (char *)dev - (char *)p;
5910 
5911 	dev->pcpu_refcnt = alloc_percpu(int);
5912 	if (!dev->pcpu_refcnt)
5913 		goto free_p;
5914 
5915 	if (dev_addr_init(dev))
5916 		goto free_pcpu;
5917 
5918 	dev_mc_init(dev);
5919 	dev_uc_init(dev);
5920 
5921 	dev_net_set(dev, &init_net);
5922 
5923 	dev->gso_max_size = GSO_MAX_SIZE;
5924 
5925 	INIT_LIST_HEAD(&dev->napi_list);
5926 	INIT_LIST_HEAD(&dev->unreg_list);
5927 	INIT_LIST_HEAD(&dev->link_watch_list);
5928 	dev->priv_flags = IFF_XMIT_DST_RELEASE;
5929 	setup(dev);
5930 
5931 	dev->num_tx_queues = txqs;
5932 	dev->real_num_tx_queues = txqs;
5933 	if (netif_alloc_netdev_queues(dev))
5934 		goto free_all;
5935 
5936 #ifdef CONFIG_RPS
5937 	dev->num_rx_queues = rxqs;
5938 	dev->real_num_rx_queues = rxqs;
5939 	if (netif_alloc_rx_queues(dev))
5940 		goto free_all;
5941 #endif
5942 
5943 	strcpy(dev->name, name);
5944 	dev->group = INIT_NETDEV_GROUP;
5945 	return dev;
5946 
5947 free_all:
5948 	free_netdev(dev);
5949 	return NULL;
5950 
5951 free_pcpu:
5952 	free_percpu(dev->pcpu_refcnt);
5953 	kfree(dev->_tx);
5954 #ifdef CONFIG_RPS
5955 	kfree(dev->_rx);
5956 #endif
5957 
5958 free_p:
5959 	kfree(p);
5960 	return NULL;
5961 }
5962 EXPORT_SYMBOL(alloc_netdev_mqs);
5963 
5964 /**
5965  *	free_netdev - free network device
5966  *	@dev: device
5967  *
5968  *	This function does the last stage of destroying an allocated device
5969  * 	interface. The reference to the device object is released.
5970  *	If this is the last reference then it will be freed.
5971  */
5972 void free_netdev(struct net_device *dev)
5973 {
5974 	struct napi_struct *p, *n;
5975 
5976 	release_net(dev_net(dev));
5977 
5978 	kfree(dev->_tx);
5979 #ifdef CONFIG_RPS
5980 	kfree(dev->_rx);
5981 #endif
5982 
5983 	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
5984 
5985 	/* Flush device addresses */
5986 	dev_addr_flush(dev);
5987 
5988 	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5989 		netif_napi_del(p);
5990 
5991 	free_percpu(dev->pcpu_refcnt);
5992 	dev->pcpu_refcnt = NULL;
5993 
5994 	/*  Compatibility with error handling in drivers */
5995 	if (dev->reg_state == NETREG_UNINITIALIZED) {
5996 		kfree((char *)dev - dev->padded);
5997 		return;
5998 	}
5999 
6000 	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6001 	dev->reg_state = NETREG_RELEASED;
6002 
6003 	/* will free via device release */
6004 	put_device(&dev->dev);
6005 }
6006 EXPORT_SYMBOL(free_netdev);
6007 
6008 /**
6009  *	synchronize_net -  Synchronize with packet receive processing
6010  *
6011  *	Wait for packets currently being received to be done.
6012  *	Does not block later packets from starting.
6013  */
6014 void synchronize_net(void)
6015 {
6016 	might_sleep();
6017 	if (rtnl_is_locked())
6018 		synchronize_rcu_expedited();
6019 	else
6020 		synchronize_rcu();
6021 }
6022 EXPORT_SYMBOL(synchronize_net);
6023 
6024 /**
6025  *	unregister_netdevice_queue - remove device from the kernel
6026  *	@dev: device
6027  *	@head: list
6028  *
6029  *	This function shuts down a device interface and removes it
6030  *	from the kernel tables.
6031  *	If head not NULL, device is queued to be unregistered later.
6032  *
6033  *	Callers must hold the rtnl semaphore.  You may want
6034  *	unregister_netdev() instead of this.
6035  */
6036 
6037 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
6038 {
6039 	ASSERT_RTNL();
6040 
6041 	if (head) {
6042 		list_move_tail(&dev->unreg_list, head);
6043 	} else {
6044 		rollback_registered(dev);
6045 		/* Finish processing unregister after unlock */
6046 		net_set_todo(dev);
6047 	}
6048 }
6049 EXPORT_SYMBOL(unregister_netdevice_queue);
6050 
6051 /**
6052  *	unregister_netdevice_many - unregister many devices
6053  *	@head: list of devices
6054  */
6055 void unregister_netdevice_many(struct list_head *head)
6056 {
6057 	struct net_device *dev;
6058 
6059 	if (!list_empty(head)) {
6060 		rollback_registered_many(head);
6061 		list_for_each_entry(dev, head, unreg_list)
6062 			net_set_todo(dev);
6063 	}
6064 }
6065 EXPORT_SYMBOL(unregister_netdevice_many);
6066 
6067 /**
6068  *	unregister_netdev - remove device from the kernel
6069  *	@dev: device
6070  *
6071  *	This function shuts down a device interface and removes it
6072  *	from the kernel tables.
6073  *
6074  *	This is just a wrapper for unregister_netdevice that takes
6075  *	the rtnl semaphore.  In general you want to use this and not
6076  *	unregister_netdevice.
6077  */
6078 void unregister_netdev(struct net_device *dev)
6079 {
6080 	rtnl_lock();
6081 	unregister_netdevice(dev);
6082 	rtnl_unlock();
6083 }
6084 EXPORT_SYMBOL(unregister_netdev);
6085 
6086 /**
6087  *	dev_change_net_namespace - move device to different nethost namespace
6088  *	@dev: device
6089  *	@net: network namespace
6090  *	@pat: If not NULL name pattern to try if the current device name
6091  *	      is already taken in the destination network namespace.
6092  *
6093  *	This function shuts down a device interface and moves it
6094  *	to a new network namespace. On success 0 is returned, on
6095  *	a failure a netagive errno code is returned.
6096  *
6097  *	Callers must hold the rtnl semaphore.
6098  */
6099 
6100 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6101 {
6102 	int err;
6103 
6104 	ASSERT_RTNL();
6105 
6106 	/* Don't allow namespace local devices to be moved. */
6107 	err = -EINVAL;
6108 	if (dev->features & NETIF_F_NETNS_LOCAL)
6109 		goto out;
6110 
6111 	/* Ensure the device has been registrered */
6112 	err = -EINVAL;
6113 	if (dev->reg_state != NETREG_REGISTERED)
6114 		goto out;
6115 
6116 	/* Get out if there is nothing todo */
6117 	err = 0;
6118 	if (net_eq(dev_net(dev), net))
6119 		goto out;
6120 
6121 	/* Pick the destination device name, and ensure
6122 	 * we can use it in the destination network namespace.
6123 	 */
6124 	err = -EEXIST;
6125 	if (__dev_get_by_name(net, dev->name)) {
6126 		/* We get here if we can't use the current device name */
6127 		if (!pat)
6128 			goto out;
6129 		if (dev_get_valid_name(dev, pat) < 0)
6130 			goto out;
6131 	}
6132 
6133 	/*
6134 	 * And now a mini version of register_netdevice unregister_netdevice.
6135 	 */
6136 
6137 	/* If device is running close it first. */
6138 	dev_close(dev);
6139 
6140 	/* And unlink it from device chain */
6141 	err = -ENODEV;
6142 	unlist_netdevice(dev);
6143 
6144 	synchronize_net();
6145 
6146 	/* Shutdown queueing discipline. */
6147 	dev_shutdown(dev);
6148 
6149 	/* Notify protocols, that we are about to destroy
6150 	   this device. They should clean all the things.
6151 
6152 	   Note that dev->reg_state stays at NETREG_REGISTERED.
6153 	   This is wanted because this way 8021q and macvlan know
6154 	   the device is just moving and can keep their slaves up.
6155 	*/
6156 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6157 	call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
6158 	rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
6159 
6160 	/*
6161 	 *	Flush the unicast and multicast chains
6162 	 */
6163 	dev_uc_flush(dev);
6164 	dev_mc_flush(dev);
6165 
6166 	/* Actually switch the network namespace */
6167 	dev_net_set(dev, net);
6168 
6169 	/* If there is an ifindex conflict assign a new one */
6170 	if (__dev_get_by_index(net, dev->ifindex)) {
6171 		int iflink = (dev->iflink == dev->ifindex);
6172 		dev->ifindex = dev_new_index(net);
6173 		if (iflink)
6174 			dev->iflink = dev->ifindex;
6175 	}
6176 
6177 	/* Fixup kobjects */
6178 	err = device_rename(&dev->dev, dev->name);
6179 	WARN_ON(err);
6180 
6181 	/* Add the device back in the hashes */
6182 	list_netdevice(dev);
6183 
6184 	/* Notify protocols, that a new device appeared. */
6185 	call_netdevice_notifiers(NETDEV_REGISTER, dev);
6186 
6187 	/*
6188 	 *	Prevent userspace races by waiting until the network
6189 	 *	device is fully setup before sending notifications.
6190 	 */
6191 	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
6192 
6193 	synchronize_net();
6194 	err = 0;
6195 out:
6196 	return err;
6197 }
6198 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
6199 
6200 static int dev_cpu_callback(struct notifier_block *nfb,
6201 			    unsigned long action,
6202 			    void *ocpu)
6203 {
6204 	struct sk_buff **list_skb;
6205 	struct sk_buff *skb;
6206 	unsigned int cpu, oldcpu = (unsigned long)ocpu;
6207 	struct softnet_data *sd, *oldsd;
6208 
6209 	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
6210 		return NOTIFY_OK;
6211 
6212 	local_irq_disable();
6213 	cpu = smp_processor_id();
6214 	sd = &per_cpu(softnet_data, cpu);
6215 	oldsd = &per_cpu(softnet_data, oldcpu);
6216 
6217 	/* Find end of our completion_queue. */
6218 	list_skb = &sd->completion_queue;
6219 	while (*list_skb)
6220 		list_skb = &(*list_skb)->next;
6221 	/* Append completion queue from offline CPU. */
6222 	*list_skb = oldsd->completion_queue;
6223 	oldsd->completion_queue = NULL;
6224 
6225 	/* Append output queue from offline CPU. */
6226 	if (oldsd->output_queue) {
6227 		*sd->output_queue_tailp = oldsd->output_queue;
6228 		sd->output_queue_tailp = oldsd->output_queue_tailp;
6229 		oldsd->output_queue = NULL;
6230 		oldsd->output_queue_tailp = &oldsd->output_queue;
6231 	}
6232 	/* Append NAPI poll list from offline CPU. */
6233 	if (!list_empty(&oldsd->poll_list)) {
6234 		list_splice_init(&oldsd->poll_list, &sd->poll_list);
6235 		raise_softirq_irqoff(NET_RX_SOFTIRQ);
6236 	}
6237 
6238 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
6239 	local_irq_enable();
6240 
6241 	/* Process offline CPU's input_pkt_queue */
6242 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6243 		netif_rx(skb);
6244 		input_queue_head_incr(oldsd);
6245 	}
6246 	while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6247 		netif_rx(skb);
6248 		input_queue_head_incr(oldsd);
6249 	}
6250 
6251 	return NOTIFY_OK;
6252 }
6253 
6254 
6255 /**
6256  *	netdev_increment_features - increment feature set by one
6257  *	@all: current feature set
6258  *	@one: new feature set
6259  *	@mask: mask feature set
6260  *
6261  *	Computes a new feature set after adding a device with feature set
6262  *	@one to the master device with current feature set @all.  Will not
6263  *	enable anything that is off in @mask. Returns the new feature set.
6264  */
6265 netdev_features_t netdev_increment_features(netdev_features_t all,
6266 	netdev_features_t one, netdev_features_t mask)
6267 {
6268 	if (mask & NETIF_F_GEN_CSUM)
6269 		mask |= NETIF_F_ALL_CSUM;
6270 	mask |= NETIF_F_VLAN_CHALLENGED;
6271 
6272 	all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6273 	all &= one | ~NETIF_F_ALL_FOR_ALL;
6274 
6275 	/* If one device supports hw checksumming, set for all. */
6276 	if (all & NETIF_F_GEN_CSUM)
6277 		all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
6278 
6279 	return all;
6280 }
6281 EXPORT_SYMBOL(netdev_increment_features);
6282 
6283 static struct hlist_head *netdev_create_hash(void)
6284 {
6285 	int i;
6286 	struct hlist_head *hash;
6287 
6288 	hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6289 	if (hash != NULL)
6290 		for (i = 0; i < NETDEV_HASHENTRIES; i++)
6291 			INIT_HLIST_HEAD(&hash[i]);
6292 
6293 	return hash;
6294 }
6295 
6296 /* Initialize per network namespace state */
6297 static int __net_init netdev_init(struct net *net)
6298 {
6299 	INIT_LIST_HEAD(&net->dev_base_head);
6300 
6301 	net->dev_name_head = netdev_create_hash();
6302 	if (net->dev_name_head == NULL)
6303 		goto err_name;
6304 
6305 	net->dev_index_head = netdev_create_hash();
6306 	if (net->dev_index_head == NULL)
6307 		goto err_idx;
6308 
6309 	return 0;
6310 
6311 err_idx:
6312 	kfree(net->dev_name_head);
6313 err_name:
6314 	return -ENOMEM;
6315 }
6316 
6317 /**
6318  *	netdev_drivername - network driver for the device
6319  *	@dev: network device
6320  *
6321  *	Determine network driver for device.
6322  */
6323 const char *netdev_drivername(const struct net_device *dev)
6324 {
6325 	const struct device_driver *driver;
6326 	const struct device *parent;
6327 	const char *empty = "";
6328 
6329 	parent = dev->dev.parent;
6330 	if (!parent)
6331 		return empty;
6332 
6333 	driver = parent->driver;
6334 	if (driver && driver->name)
6335 		return driver->name;
6336 	return empty;
6337 }
6338 
6339 int __netdev_printk(const char *level, const struct net_device *dev,
6340 			   struct va_format *vaf)
6341 {
6342 	int r;
6343 
6344 	if (dev && dev->dev.parent)
6345 		r = dev_printk(level, dev->dev.parent, "%s: %pV",
6346 			       netdev_name(dev), vaf);
6347 	else if (dev)
6348 		r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
6349 	else
6350 		r = printk("%s(NULL net_device): %pV", level, vaf);
6351 
6352 	return r;
6353 }
6354 EXPORT_SYMBOL(__netdev_printk);
6355 
6356 int netdev_printk(const char *level, const struct net_device *dev,
6357 		  const char *format, ...)
6358 {
6359 	struct va_format vaf;
6360 	va_list args;
6361 	int r;
6362 
6363 	va_start(args, format);
6364 
6365 	vaf.fmt = format;
6366 	vaf.va = &args;
6367 
6368 	r = __netdev_printk(level, dev, &vaf);
6369 	va_end(args);
6370 
6371 	return r;
6372 }
6373 EXPORT_SYMBOL(netdev_printk);
6374 
6375 #define define_netdev_printk_level(func, level)			\
6376 int func(const struct net_device *dev, const char *fmt, ...)	\
6377 {								\
6378 	int r;							\
6379 	struct va_format vaf;					\
6380 	va_list args;						\
6381 								\
6382 	va_start(args, fmt);					\
6383 								\
6384 	vaf.fmt = fmt;						\
6385 	vaf.va = &args;						\
6386 								\
6387 	r = __netdev_printk(level, dev, &vaf);			\
6388 	va_end(args);						\
6389 								\
6390 	return r;						\
6391 }								\
6392 EXPORT_SYMBOL(func);
6393 
6394 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6395 define_netdev_printk_level(netdev_alert, KERN_ALERT);
6396 define_netdev_printk_level(netdev_crit, KERN_CRIT);
6397 define_netdev_printk_level(netdev_err, KERN_ERR);
6398 define_netdev_printk_level(netdev_warn, KERN_WARNING);
6399 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6400 define_netdev_printk_level(netdev_info, KERN_INFO);
6401 
6402 static void __net_exit netdev_exit(struct net *net)
6403 {
6404 	kfree(net->dev_name_head);
6405 	kfree(net->dev_index_head);
6406 }
6407 
6408 static struct pernet_operations __net_initdata netdev_net_ops = {
6409 	.init = netdev_init,
6410 	.exit = netdev_exit,
6411 };
6412 
6413 static void __net_exit default_device_exit(struct net *net)
6414 {
6415 	struct net_device *dev, *aux;
6416 	/*
6417 	 * Push all migratable network devices back to the
6418 	 * initial network namespace
6419 	 */
6420 	rtnl_lock();
6421 	for_each_netdev_safe(net, dev, aux) {
6422 		int err;
6423 		char fb_name[IFNAMSIZ];
6424 
6425 		/* Ignore unmoveable devices (i.e. loopback) */
6426 		if (dev->features & NETIF_F_NETNS_LOCAL)
6427 			continue;
6428 
6429 		/* Leave virtual devices for the generic cleanup */
6430 		if (dev->rtnl_link_ops)
6431 			continue;
6432 
6433 		/* Push remaining network devices to init_net */
6434 		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6435 		err = dev_change_net_namespace(dev, &init_net, fb_name);
6436 		if (err) {
6437 			pr_emerg("%s: failed to move %s to init_net: %d\n",
6438 				 __func__, dev->name, err);
6439 			BUG();
6440 		}
6441 	}
6442 	rtnl_unlock();
6443 }
6444 
6445 static void __net_exit default_device_exit_batch(struct list_head *net_list)
6446 {
6447 	/* At exit all network devices most be removed from a network
6448 	 * namespace.  Do this in the reverse order of registration.
6449 	 * Do this across as many network namespaces as possible to
6450 	 * improve batching efficiency.
6451 	 */
6452 	struct net_device *dev;
6453 	struct net *net;
6454 	LIST_HEAD(dev_kill_list);
6455 
6456 	rtnl_lock();
6457 	list_for_each_entry(net, net_list, exit_list) {
6458 		for_each_netdev_reverse(net, dev) {
6459 			if (dev->rtnl_link_ops)
6460 				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6461 			else
6462 				unregister_netdevice_queue(dev, &dev_kill_list);
6463 		}
6464 	}
6465 	unregister_netdevice_many(&dev_kill_list);
6466 	list_del(&dev_kill_list);
6467 	rtnl_unlock();
6468 }
6469 
6470 static struct pernet_operations __net_initdata default_device_ops = {
6471 	.exit = default_device_exit,
6472 	.exit_batch = default_device_exit_batch,
6473 };
6474 
6475 /*
6476  *	Initialize the DEV module. At boot time this walks the device list and
6477  *	unhooks any devices that fail to initialise (normally hardware not
6478  *	present) and leaves us with a valid list of present and active devices.
6479  *
6480  */
6481 
6482 /*
6483  *       This is called single threaded during boot, so no need
6484  *       to take the rtnl semaphore.
6485  */
6486 static int __init net_dev_init(void)
6487 {
6488 	int i, rc = -ENOMEM;
6489 
6490 	BUG_ON(!dev_boot_phase);
6491 
6492 	if (dev_proc_init())
6493 		goto out;
6494 
6495 	if (netdev_kobject_init())
6496 		goto out;
6497 
6498 	INIT_LIST_HEAD(&ptype_all);
6499 	for (i = 0; i < PTYPE_HASH_SIZE; i++)
6500 		INIT_LIST_HEAD(&ptype_base[i]);
6501 
6502 	if (register_pernet_subsys(&netdev_net_ops))
6503 		goto out;
6504 
6505 	/*
6506 	 *	Initialise the packet receive queues.
6507 	 */
6508 
6509 	for_each_possible_cpu(i) {
6510 		struct softnet_data *sd = &per_cpu(softnet_data, i);
6511 
6512 		memset(sd, 0, sizeof(*sd));
6513 		skb_queue_head_init(&sd->input_pkt_queue);
6514 		skb_queue_head_init(&sd->process_queue);
6515 		sd->completion_queue = NULL;
6516 		INIT_LIST_HEAD(&sd->poll_list);
6517 		sd->output_queue = NULL;
6518 		sd->output_queue_tailp = &sd->output_queue;
6519 #ifdef CONFIG_RPS
6520 		sd->csd.func = rps_trigger_softirq;
6521 		sd->csd.info = sd;
6522 		sd->csd.flags = 0;
6523 		sd->cpu = i;
6524 #endif
6525 
6526 		sd->backlog.poll = process_backlog;
6527 		sd->backlog.weight = weight_p;
6528 		sd->backlog.gro_list = NULL;
6529 		sd->backlog.gro_count = 0;
6530 	}
6531 
6532 	dev_boot_phase = 0;
6533 
6534 	/* The loopback device is special if any other network devices
6535 	 * is present in a network namespace the loopback device must
6536 	 * be present. Since we now dynamically allocate and free the
6537 	 * loopback device ensure this invariant is maintained by
6538 	 * keeping the loopback device as the first device on the
6539 	 * list of network devices.  Ensuring the loopback devices
6540 	 * is the first device that appears and the last network device
6541 	 * that disappears.
6542 	 */
6543 	if (register_pernet_device(&loopback_net_ops))
6544 		goto out;
6545 
6546 	if (register_pernet_device(&default_device_ops))
6547 		goto out;
6548 
6549 	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6550 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
6551 
6552 	hotcpu_notifier(dev_cpu_callback, 0);
6553 	dst_init();
6554 	dev_mcast_init();
6555 	rc = 0;
6556 out:
6557 	return rc;
6558 }
6559 
6560 subsys_initcall(net_dev_init);
6561 
6562 static int __init initialize_hashrnd(void)
6563 {
6564 	get_random_bytes(&hashrnd, sizeof(hashrnd));
6565 	return 0;
6566 }
6567 
6568 late_initcall_sync(initialize_hashrnd);
6569 
6570