xref: /linux-6.15/net/core/dev.c (revision dcbdf135)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      NET3    Protocol independent device support routines.
4  *
5  *	Derived from the non IP parts of dev.c 1.0.19
6  *              Authors:	Ross Biro
7  *				Fred N. van Kempen, <[email protected]>
8  *				Mark Evans, <[email protected]>
9  *
10  *	Additional Authors:
11  *		Florian la Roche <[email protected]>
12  *		Alan Cox <[email protected]>
13  *		David Hinds <[email protected]>
14  *		Alexey Kuznetsov <[email protected]>
15  *		Adam Sulmicki <[email protected]>
16  *              Pekka Riikonen <[email protected]>
17  *
18  *	Changes:
19  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
20  *                                      to 2 if register_netdev gets called
21  *                                      before net_dev_init & also removed a
22  *                                      few lines of code in the process.
23  *		Alan Cox	:	device private ioctl copies fields back.
24  *		Alan Cox	:	Transmit queue code does relevant
25  *					stunts to keep the queue safe.
26  *		Alan Cox	:	Fixed double lock.
27  *		Alan Cox	:	Fixed promisc NULL pointer trap
28  *		????????	:	Support the full private ioctl range
29  *		Alan Cox	:	Moved ioctl permission check into
30  *					drivers
31  *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
32  *		Alan Cox	:	100 backlog just doesn't cut it when
33  *					you start doing multicast video 8)
34  *		Alan Cox	:	Rewrote net_bh and list manager.
35  *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
36  *		Alan Cox	:	Took out transmit every packet pass
37  *					Saved a few bytes in the ioctl handler
38  *		Alan Cox	:	Network driver sets packet type before
39  *					calling netif_rx. Saves a function
40  *					call a packet.
41  *		Alan Cox	:	Hashed net_bh()
42  *		Richard Kooijman:	Timestamp fixes.
43  *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
44  *		Alan Cox	:	Device lock protection.
45  *              Alan Cox        :       Fixed nasty side effect of device close
46  *					changes.
47  *		Rudi Cilibrasi	:	Pass the right thing to
48  *					set_mac_address()
49  *		Dave Miller	:	32bit quantity for the device lock to
50  *					make it work out on a Sparc.
51  *		Bjorn Ekwall	:	Added KERNELD hack.
52  *		Alan Cox	:	Cleaned up the backlog initialise.
53  *		Craig Metz	:	SIOCGIFCONF fix if space for under
54  *					1 device.
55  *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
56  *					is no device open function.
57  *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
58  *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
59  *		Cyrus Durgin	:	Cleaned for KMOD
60  *		Adam Sulmicki   :	Bug Fix : Network Device Unload
61  *					A network device unload needs to purge
62  *					the backlog queue.
63  *	Paul Rusty Russell	:	SIOCSIFNAME
64  *              Pekka Riikonen  :	Netdev boot-time settings code
65  *              Andrew Morton   :       Make unregister_netdevice wait
66  *                                      indefinitely on dev->refcnt
67  *              J Hadi Salim    :       - Backlog queue sampling
68  *				        - netif_rx() feedback
69  */
70 
71 #include <linux/uaccess.h>
72 #include <linux/bitops.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/mm.h>
81 #include <linux/mutex.h>
82 #include <linux/rwsem.h>
83 #include <linux/string.h>
84 #include <linux/mm.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/ethtool.h>
93 #include <linux/skbuff.h>
94 #include <linux/kthread.h>
95 #include <linux/bpf.h>
96 #include <linux/bpf_trace.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <net/busy_poll.h>
100 #include <linux/rtnetlink.h>
101 #include <linux/stat.h>
102 #include <net/dsa.h>
103 #include <net/dst.h>
104 #include <net/dst_metadata.h>
105 #include <net/gro.h>
106 #include <net/pkt_sched.h>
107 #include <net/pkt_cls.h>
108 #include <net/checksum.h>
109 #include <net/xfrm.h>
110 #include <linux/highmem.h>
111 #include <linux/init.h>
112 #include <linux/module.h>
113 #include <linux/netpoll.h>
114 #include <linux/rcupdate.h>
115 #include <linux/delay.h>
116 #include <net/iw_handler.h>
117 #include <asm/current.h>
118 #include <linux/audit.h>
119 #include <linux/dmaengine.h>
120 #include <linux/err.h>
121 #include <linux/ctype.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_vlan.h>
124 #include <linux/ip.h>
125 #include <net/ip.h>
126 #include <net/mpls.h>
127 #include <linux/ipv6.h>
128 #include <linux/in.h>
129 #include <linux/jhash.h>
130 #include <linux/random.h>
131 #include <trace/events/napi.h>
132 #include <trace/events/net.h>
133 #include <trace/events/skb.h>
134 #include <linux/inetdevice.h>
135 #include <linux/cpu_rmap.h>
136 #include <linux/static_key.h>
137 #include <linux/hashtable.h>
138 #include <linux/vmalloc.h>
139 #include <linux/if_macvlan.h>
140 #include <linux/errqueue.h>
141 #include <linux/hrtimer.h>
142 #include <linux/netfilter_ingress.h>
143 #include <linux/crash_dump.h>
144 #include <linux/sctp.h>
145 #include <net/udp_tunnel.h>
146 #include <linux/net_namespace.h>
147 #include <linux/indirect_call_wrapper.h>
148 #include <net/devlink.h>
149 #include <linux/pm_runtime.h>
150 #include <linux/prandom.h>
151 
152 #include "net-sysfs.h"
153 
154 #define MAX_GRO_SKBS 8
155 
156 /* This should be increased if a protocol with a bigger head is added. */
157 #define GRO_MAX_HEAD (MAX_HEADER + 128)
158 
159 static DEFINE_SPINLOCK(ptype_lock);
160 static DEFINE_SPINLOCK(offload_lock);
161 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
162 struct list_head ptype_all __read_mostly;	/* Taps */
163 static struct list_head offload_base __read_mostly;
164 
165 static int netif_rx_internal(struct sk_buff *skb);
166 static int call_netdevice_notifiers_info(unsigned long val,
167 					 struct netdev_notifier_info *info);
168 static int call_netdevice_notifiers_extack(unsigned long val,
169 					   struct net_device *dev,
170 					   struct netlink_ext_ack *extack);
171 static struct napi_struct *napi_by_id(unsigned int napi_id);
172 
173 /*
174  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
175  * semaphore.
176  *
177  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
178  *
179  * Writers must hold the rtnl semaphore while they loop through the
180  * dev_base_head list, and hold dev_base_lock for writing when they do the
181  * actual updates.  This allows pure readers to access the list even
182  * while a writer is preparing to update it.
183  *
184  * To put it another way, dev_base_lock is held for writing only to
185  * protect against pure readers; the rtnl semaphore provides the
186  * protection against other writers.
187  *
188  * See, for example usages, register_netdevice() and
189  * unregister_netdevice(), which must be called with the rtnl
190  * semaphore held.
191  */
192 DEFINE_RWLOCK(dev_base_lock);
193 EXPORT_SYMBOL(dev_base_lock);
194 
195 static DEFINE_MUTEX(ifalias_mutex);
196 
197 /* protects napi_hash addition/deletion and napi_gen_id */
198 static DEFINE_SPINLOCK(napi_hash_lock);
199 
200 static unsigned int napi_gen_id = NR_CPUS;
201 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
202 
203 static DECLARE_RWSEM(devnet_rename_sem);
204 
205 static inline void dev_base_seq_inc(struct net *net)
206 {
207 	while (++net->dev_base_seq == 0)
208 		;
209 }
210 
211 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
212 {
213 	unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
214 
215 	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
216 }
217 
218 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
219 {
220 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
221 }
222 
223 static inline void rps_lock(struct softnet_data *sd)
224 {
225 #ifdef CONFIG_RPS
226 	spin_lock(&sd->input_pkt_queue.lock);
227 #endif
228 }
229 
230 static inline void rps_unlock(struct softnet_data *sd)
231 {
232 #ifdef CONFIG_RPS
233 	spin_unlock(&sd->input_pkt_queue.lock);
234 #endif
235 }
236 
237 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
238 						       const char *name)
239 {
240 	struct netdev_name_node *name_node;
241 
242 	name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
243 	if (!name_node)
244 		return NULL;
245 	INIT_HLIST_NODE(&name_node->hlist);
246 	name_node->dev = dev;
247 	name_node->name = name;
248 	return name_node;
249 }
250 
251 static struct netdev_name_node *
252 netdev_name_node_head_alloc(struct net_device *dev)
253 {
254 	struct netdev_name_node *name_node;
255 
256 	name_node = netdev_name_node_alloc(dev, dev->name);
257 	if (!name_node)
258 		return NULL;
259 	INIT_LIST_HEAD(&name_node->list);
260 	return name_node;
261 }
262 
263 static void netdev_name_node_free(struct netdev_name_node *name_node)
264 {
265 	kfree(name_node);
266 }
267 
268 static void netdev_name_node_add(struct net *net,
269 				 struct netdev_name_node *name_node)
270 {
271 	hlist_add_head_rcu(&name_node->hlist,
272 			   dev_name_hash(net, name_node->name));
273 }
274 
275 static void netdev_name_node_del(struct netdev_name_node *name_node)
276 {
277 	hlist_del_rcu(&name_node->hlist);
278 }
279 
280 static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
281 							const char *name)
282 {
283 	struct hlist_head *head = dev_name_hash(net, name);
284 	struct netdev_name_node *name_node;
285 
286 	hlist_for_each_entry(name_node, head, hlist)
287 		if (!strcmp(name_node->name, name))
288 			return name_node;
289 	return NULL;
290 }
291 
292 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
293 							    const char *name)
294 {
295 	struct hlist_head *head = dev_name_hash(net, name);
296 	struct netdev_name_node *name_node;
297 
298 	hlist_for_each_entry_rcu(name_node, head, hlist)
299 		if (!strcmp(name_node->name, name))
300 			return name_node;
301 	return NULL;
302 }
303 
304 int netdev_name_node_alt_create(struct net_device *dev, const char *name)
305 {
306 	struct netdev_name_node *name_node;
307 	struct net *net = dev_net(dev);
308 
309 	name_node = netdev_name_node_lookup(net, name);
310 	if (name_node)
311 		return -EEXIST;
312 	name_node = netdev_name_node_alloc(dev, name);
313 	if (!name_node)
314 		return -ENOMEM;
315 	netdev_name_node_add(net, name_node);
316 	/* The node that holds dev->name acts as a head of per-device list. */
317 	list_add_tail(&name_node->list, &dev->name_node->list);
318 
319 	return 0;
320 }
321 EXPORT_SYMBOL(netdev_name_node_alt_create);
322 
323 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
324 {
325 	list_del(&name_node->list);
326 	netdev_name_node_del(name_node);
327 	kfree(name_node->name);
328 	netdev_name_node_free(name_node);
329 }
330 
331 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
332 {
333 	struct netdev_name_node *name_node;
334 	struct net *net = dev_net(dev);
335 
336 	name_node = netdev_name_node_lookup(net, name);
337 	if (!name_node)
338 		return -ENOENT;
339 	/* lookup might have found our primary name or a name belonging
340 	 * to another device.
341 	 */
342 	if (name_node == dev->name_node || name_node->dev != dev)
343 		return -EINVAL;
344 
345 	__netdev_name_node_alt_destroy(name_node);
346 
347 	return 0;
348 }
349 EXPORT_SYMBOL(netdev_name_node_alt_destroy);
350 
351 static void netdev_name_node_alt_flush(struct net_device *dev)
352 {
353 	struct netdev_name_node *name_node, *tmp;
354 
355 	list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
356 		__netdev_name_node_alt_destroy(name_node);
357 }
358 
359 /* Device list insertion */
360 static void list_netdevice(struct net_device *dev)
361 {
362 	struct net *net = dev_net(dev);
363 
364 	ASSERT_RTNL();
365 
366 	write_lock_bh(&dev_base_lock);
367 	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
368 	netdev_name_node_add(net, dev->name_node);
369 	hlist_add_head_rcu(&dev->index_hlist,
370 			   dev_index_hash(net, dev->ifindex));
371 	write_unlock_bh(&dev_base_lock);
372 
373 	dev_base_seq_inc(net);
374 }
375 
376 /* Device list removal
377  * caller must respect a RCU grace period before freeing/reusing dev
378  */
379 static void unlist_netdevice(struct net_device *dev)
380 {
381 	ASSERT_RTNL();
382 
383 	/* Unlink dev from the device chain */
384 	write_lock_bh(&dev_base_lock);
385 	list_del_rcu(&dev->dev_list);
386 	netdev_name_node_del(dev->name_node);
387 	hlist_del_rcu(&dev->index_hlist);
388 	write_unlock_bh(&dev_base_lock);
389 
390 	dev_base_seq_inc(dev_net(dev));
391 }
392 
393 /*
394  *	Our notifier list
395  */
396 
397 static RAW_NOTIFIER_HEAD(netdev_chain);
398 
399 /*
400  *	Device drivers call our routines to queue packets here. We empty the
401  *	queue in the local softnet handler.
402  */
403 
404 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
405 EXPORT_PER_CPU_SYMBOL(softnet_data);
406 
407 #ifdef CONFIG_LOCKDEP
408 /*
409  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
410  * according to dev->type
411  */
412 static const unsigned short netdev_lock_type[] = {
413 	 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
414 	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
415 	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
416 	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
417 	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
418 	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
419 	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
420 	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
421 	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
422 	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
423 	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
424 	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
425 	 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
426 	 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
427 	 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
428 
429 static const char *const netdev_lock_name[] = {
430 	"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
431 	"_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
432 	"_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
433 	"_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
434 	"_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
435 	"_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
436 	"_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
437 	"_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
438 	"_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
439 	"_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
440 	"_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
441 	"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
442 	"_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
443 	"_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
444 	"_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
445 
446 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
447 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
448 
449 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
450 {
451 	int i;
452 
453 	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
454 		if (netdev_lock_type[i] == dev_type)
455 			return i;
456 	/* the last key is used by default */
457 	return ARRAY_SIZE(netdev_lock_type) - 1;
458 }
459 
460 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
461 						 unsigned short dev_type)
462 {
463 	int i;
464 
465 	i = netdev_lock_pos(dev_type);
466 	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
467 				   netdev_lock_name[i]);
468 }
469 
470 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
471 {
472 	int i;
473 
474 	i = netdev_lock_pos(dev->type);
475 	lockdep_set_class_and_name(&dev->addr_list_lock,
476 				   &netdev_addr_lock_key[i],
477 				   netdev_lock_name[i]);
478 }
479 #else
480 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
481 						 unsigned short dev_type)
482 {
483 }
484 
485 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
486 {
487 }
488 #endif
489 
490 /*******************************************************************************
491  *
492  *		Protocol management and registration routines
493  *
494  *******************************************************************************/
495 
496 
497 /*
498  *	Add a protocol ID to the list. Now that the input handler is
499  *	smarter we can dispense with all the messy stuff that used to be
500  *	here.
501  *
502  *	BEWARE!!! Protocol handlers, mangling input packets,
503  *	MUST BE last in hash buckets and checking protocol handlers
504  *	MUST start from promiscuous ptype_all chain in net_bh.
505  *	It is true now, do not change it.
506  *	Explanation follows: if protocol handler, mangling packet, will
507  *	be the first on list, it is not able to sense, that packet
508  *	is cloned and should be copied-on-write, so that it will
509  *	change it and subsequent readers will get broken packet.
510  *							--ANK (980803)
511  */
512 
513 static inline struct list_head *ptype_head(const struct packet_type *pt)
514 {
515 	if (pt->type == htons(ETH_P_ALL))
516 		return pt->dev ? &pt->dev->ptype_all : &ptype_all;
517 	else
518 		return pt->dev ? &pt->dev->ptype_specific :
519 				 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
520 }
521 
522 /**
523  *	dev_add_pack - add packet handler
524  *	@pt: packet type declaration
525  *
526  *	Add a protocol handler to the networking stack. The passed &packet_type
527  *	is linked into kernel lists and may not be freed until it has been
528  *	removed from the kernel lists.
529  *
530  *	This call does not sleep therefore it can not
531  *	guarantee all CPU's that are in middle of receiving packets
532  *	will see the new packet type (until the next received packet).
533  */
534 
535 void dev_add_pack(struct packet_type *pt)
536 {
537 	struct list_head *head = ptype_head(pt);
538 
539 	spin_lock(&ptype_lock);
540 	list_add_rcu(&pt->list, head);
541 	spin_unlock(&ptype_lock);
542 }
543 EXPORT_SYMBOL(dev_add_pack);
544 
545 /**
546  *	__dev_remove_pack	 - remove packet handler
547  *	@pt: packet type declaration
548  *
549  *	Remove a protocol handler that was previously added to the kernel
550  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
551  *	from the kernel lists and can be freed or reused once this function
552  *	returns.
553  *
554  *      The packet type might still be in use by receivers
555  *	and must not be freed until after all the CPU's have gone
556  *	through a quiescent state.
557  */
558 void __dev_remove_pack(struct packet_type *pt)
559 {
560 	struct list_head *head = ptype_head(pt);
561 	struct packet_type *pt1;
562 
563 	spin_lock(&ptype_lock);
564 
565 	list_for_each_entry(pt1, head, list) {
566 		if (pt == pt1) {
567 			list_del_rcu(&pt->list);
568 			goto out;
569 		}
570 	}
571 
572 	pr_warn("dev_remove_pack: %p not found\n", pt);
573 out:
574 	spin_unlock(&ptype_lock);
575 }
576 EXPORT_SYMBOL(__dev_remove_pack);
577 
578 /**
579  *	dev_remove_pack	 - remove packet handler
580  *	@pt: packet type declaration
581  *
582  *	Remove a protocol handler that was previously added to the kernel
583  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
584  *	from the kernel lists and can be freed or reused once this function
585  *	returns.
586  *
587  *	This call sleeps to guarantee that no CPU is looking at the packet
588  *	type after return.
589  */
590 void dev_remove_pack(struct packet_type *pt)
591 {
592 	__dev_remove_pack(pt);
593 
594 	synchronize_net();
595 }
596 EXPORT_SYMBOL(dev_remove_pack);
597 
598 
599 /**
600  *	dev_add_offload - register offload handlers
601  *	@po: protocol offload declaration
602  *
603  *	Add protocol offload handlers to the networking stack. The passed
604  *	&proto_offload is linked into kernel lists and may not be freed until
605  *	it has been removed from the kernel lists.
606  *
607  *	This call does not sleep therefore it can not
608  *	guarantee all CPU's that are in middle of receiving packets
609  *	will see the new offload handlers (until the next received packet).
610  */
611 void dev_add_offload(struct packet_offload *po)
612 {
613 	struct packet_offload *elem;
614 
615 	spin_lock(&offload_lock);
616 	list_for_each_entry(elem, &offload_base, list) {
617 		if (po->priority < elem->priority)
618 			break;
619 	}
620 	list_add_rcu(&po->list, elem->list.prev);
621 	spin_unlock(&offload_lock);
622 }
623 EXPORT_SYMBOL(dev_add_offload);
624 
625 /**
626  *	__dev_remove_offload	 - remove offload handler
627  *	@po: packet offload declaration
628  *
629  *	Remove a protocol offload handler that was previously added to the
630  *	kernel offload handlers by dev_add_offload(). The passed &offload_type
631  *	is removed from the kernel lists and can be freed or reused once this
632  *	function returns.
633  *
634  *      The packet type might still be in use by receivers
635  *	and must not be freed until after all the CPU's have gone
636  *	through a quiescent state.
637  */
638 static void __dev_remove_offload(struct packet_offload *po)
639 {
640 	struct list_head *head = &offload_base;
641 	struct packet_offload *po1;
642 
643 	spin_lock(&offload_lock);
644 
645 	list_for_each_entry(po1, head, list) {
646 		if (po == po1) {
647 			list_del_rcu(&po->list);
648 			goto out;
649 		}
650 	}
651 
652 	pr_warn("dev_remove_offload: %p not found\n", po);
653 out:
654 	spin_unlock(&offload_lock);
655 }
656 
657 /**
658  *	dev_remove_offload	 - remove packet offload handler
659  *	@po: packet offload declaration
660  *
661  *	Remove a packet offload handler that was previously added to the kernel
662  *	offload handlers by dev_add_offload(). The passed &offload_type is
663  *	removed from the kernel lists and can be freed or reused once this
664  *	function returns.
665  *
666  *	This call sleeps to guarantee that no CPU is looking at the packet
667  *	type after return.
668  */
669 void dev_remove_offload(struct packet_offload *po)
670 {
671 	__dev_remove_offload(po);
672 
673 	synchronize_net();
674 }
675 EXPORT_SYMBOL(dev_remove_offload);
676 
677 /******************************************************************************
678  *
679  *		      Device Boot-time Settings Routines
680  *
681  ******************************************************************************/
682 
683 /* Boot time configuration table */
684 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
685 
686 /**
687  *	netdev_boot_setup_add	- add new setup entry
688  *	@name: name of the device
689  *	@map: configured settings for the device
690  *
691  *	Adds new setup entry to the dev_boot_setup list.  The function
692  *	returns 0 on error and 1 on success.  This is a generic routine to
693  *	all netdevices.
694  */
695 static int netdev_boot_setup_add(char *name, struct ifmap *map)
696 {
697 	struct netdev_boot_setup *s;
698 	int i;
699 
700 	s = dev_boot_setup;
701 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
702 		if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
703 			memset(s[i].name, 0, sizeof(s[i].name));
704 			strlcpy(s[i].name, name, IFNAMSIZ);
705 			memcpy(&s[i].map, map, sizeof(s[i].map));
706 			break;
707 		}
708 	}
709 
710 	return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
711 }
712 
713 /**
714  * netdev_boot_setup_check	- check boot time settings
715  * @dev: the netdevice
716  *
717  * Check boot time settings for the device.
718  * The found settings are set for the device to be used
719  * later in the device probing.
720  * Returns 0 if no settings found, 1 if they are.
721  */
722 int netdev_boot_setup_check(struct net_device *dev)
723 {
724 	struct netdev_boot_setup *s = dev_boot_setup;
725 	int i;
726 
727 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
728 		if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
729 		    !strcmp(dev->name, s[i].name)) {
730 			dev->irq = s[i].map.irq;
731 			dev->base_addr = s[i].map.base_addr;
732 			dev->mem_start = s[i].map.mem_start;
733 			dev->mem_end = s[i].map.mem_end;
734 			return 1;
735 		}
736 	}
737 	return 0;
738 }
739 EXPORT_SYMBOL(netdev_boot_setup_check);
740 
741 
742 /**
743  * netdev_boot_base	- get address from boot time settings
744  * @prefix: prefix for network device
745  * @unit: id for network device
746  *
747  * Check boot time settings for the base address of device.
748  * The found settings are set for the device to be used
749  * later in the device probing.
750  * Returns 0 if no settings found.
751  */
752 unsigned long netdev_boot_base(const char *prefix, int unit)
753 {
754 	const struct netdev_boot_setup *s = dev_boot_setup;
755 	char name[IFNAMSIZ];
756 	int i;
757 
758 	sprintf(name, "%s%d", prefix, unit);
759 
760 	/*
761 	 * If device already registered then return base of 1
762 	 * to indicate not to probe for this interface
763 	 */
764 	if (__dev_get_by_name(&init_net, name))
765 		return 1;
766 
767 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
768 		if (!strcmp(name, s[i].name))
769 			return s[i].map.base_addr;
770 	return 0;
771 }
772 
773 /*
774  * Saves at boot time configured settings for any netdevice.
775  */
776 int __init netdev_boot_setup(char *str)
777 {
778 	int ints[5];
779 	struct ifmap map;
780 
781 	str = get_options(str, ARRAY_SIZE(ints), ints);
782 	if (!str || !*str)
783 		return 0;
784 
785 	/* Save settings */
786 	memset(&map, 0, sizeof(map));
787 	if (ints[0] > 0)
788 		map.irq = ints[1];
789 	if (ints[0] > 1)
790 		map.base_addr = ints[2];
791 	if (ints[0] > 2)
792 		map.mem_start = ints[3];
793 	if (ints[0] > 3)
794 		map.mem_end = ints[4];
795 
796 	/* Add new entry to the list */
797 	return netdev_boot_setup_add(str, &map);
798 }
799 
800 __setup("netdev=", netdev_boot_setup);
801 
802 /*******************************************************************************
803  *
804  *			    Device Interface Subroutines
805  *
806  *******************************************************************************/
807 
808 /**
809  *	dev_get_iflink	- get 'iflink' value of a interface
810  *	@dev: targeted interface
811  *
812  *	Indicates the ifindex the interface is linked to.
813  *	Physical interfaces have the same 'ifindex' and 'iflink' values.
814  */
815 
816 int dev_get_iflink(const struct net_device *dev)
817 {
818 	if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
819 		return dev->netdev_ops->ndo_get_iflink(dev);
820 
821 	return dev->ifindex;
822 }
823 EXPORT_SYMBOL(dev_get_iflink);
824 
825 /**
826  *	dev_fill_metadata_dst - Retrieve tunnel egress information.
827  *	@dev: targeted interface
828  *	@skb: The packet.
829  *
830  *	For better visibility of tunnel traffic OVS needs to retrieve
831  *	egress tunnel information for a packet. Following API allows
832  *	user to get this info.
833  */
834 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
835 {
836 	struct ip_tunnel_info *info;
837 
838 	if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
839 		return -EINVAL;
840 
841 	info = skb_tunnel_info_unclone(skb);
842 	if (!info)
843 		return -ENOMEM;
844 	if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
845 		return -EINVAL;
846 
847 	return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
848 }
849 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
850 
851 /**
852  *	__dev_get_by_name	- find a device by its name
853  *	@net: the applicable net namespace
854  *	@name: name to find
855  *
856  *	Find an interface by name. Must be called under RTNL semaphore
857  *	or @dev_base_lock. If the name is found a pointer to the device
858  *	is returned. If the name is not found then %NULL is returned. The
859  *	reference counters are not incremented so the caller must be
860  *	careful with locks.
861  */
862 
863 struct net_device *__dev_get_by_name(struct net *net, const char *name)
864 {
865 	struct netdev_name_node *node_name;
866 
867 	node_name = netdev_name_node_lookup(net, name);
868 	return node_name ? node_name->dev : NULL;
869 }
870 EXPORT_SYMBOL(__dev_get_by_name);
871 
872 /**
873  * dev_get_by_name_rcu	- find a device by its name
874  * @net: the applicable net namespace
875  * @name: name to find
876  *
877  * Find an interface by name.
878  * If the name is found a pointer to the device is returned.
879  * If the name is not found then %NULL is returned.
880  * The reference counters are not incremented so the caller must be
881  * careful with locks. The caller must hold RCU lock.
882  */
883 
884 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
885 {
886 	struct netdev_name_node *node_name;
887 
888 	node_name = netdev_name_node_lookup_rcu(net, name);
889 	return node_name ? node_name->dev : NULL;
890 }
891 EXPORT_SYMBOL(dev_get_by_name_rcu);
892 
893 /**
894  *	dev_get_by_name		- find a device by its name
895  *	@net: the applicable net namespace
896  *	@name: name to find
897  *
898  *	Find an interface by name. This can be called from any
899  *	context and does its own locking. The returned handle has
900  *	the usage count incremented and the caller must use dev_put() to
901  *	release it when it is no longer needed. %NULL is returned if no
902  *	matching device is found.
903  */
904 
905 struct net_device *dev_get_by_name(struct net *net, const char *name)
906 {
907 	struct net_device *dev;
908 
909 	rcu_read_lock();
910 	dev = dev_get_by_name_rcu(net, name);
911 	if (dev)
912 		dev_hold(dev);
913 	rcu_read_unlock();
914 	return dev;
915 }
916 EXPORT_SYMBOL(dev_get_by_name);
917 
918 /**
919  *	__dev_get_by_index - find a device by its ifindex
920  *	@net: the applicable net namespace
921  *	@ifindex: index of device
922  *
923  *	Search for an interface by index. Returns %NULL if the device
924  *	is not found or a pointer to the device. The device has not
925  *	had its reference counter increased so the caller must be careful
926  *	about locking. The caller must hold either the RTNL semaphore
927  *	or @dev_base_lock.
928  */
929 
930 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
931 {
932 	struct net_device *dev;
933 	struct hlist_head *head = dev_index_hash(net, ifindex);
934 
935 	hlist_for_each_entry(dev, head, index_hlist)
936 		if (dev->ifindex == ifindex)
937 			return dev;
938 
939 	return NULL;
940 }
941 EXPORT_SYMBOL(__dev_get_by_index);
942 
943 /**
944  *	dev_get_by_index_rcu - find a device by its ifindex
945  *	@net: the applicable net namespace
946  *	@ifindex: index of device
947  *
948  *	Search for an interface by index. Returns %NULL if the device
949  *	is not found or a pointer to the device. The device has not
950  *	had its reference counter increased so the caller must be careful
951  *	about locking. The caller must hold RCU lock.
952  */
953 
954 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
955 {
956 	struct net_device *dev;
957 	struct hlist_head *head = dev_index_hash(net, ifindex);
958 
959 	hlist_for_each_entry_rcu(dev, head, index_hlist)
960 		if (dev->ifindex == ifindex)
961 			return dev;
962 
963 	return NULL;
964 }
965 EXPORT_SYMBOL(dev_get_by_index_rcu);
966 
967 
968 /**
969  *	dev_get_by_index - find a device by its ifindex
970  *	@net: the applicable net namespace
971  *	@ifindex: index of device
972  *
973  *	Search for an interface by index. Returns NULL if the device
974  *	is not found or a pointer to the device. The device returned has
975  *	had a reference added and the pointer is safe until the user calls
976  *	dev_put to indicate they have finished with it.
977  */
978 
979 struct net_device *dev_get_by_index(struct net *net, int ifindex)
980 {
981 	struct net_device *dev;
982 
983 	rcu_read_lock();
984 	dev = dev_get_by_index_rcu(net, ifindex);
985 	if (dev)
986 		dev_hold(dev);
987 	rcu_read_unlock();
988 	return dev;
989 }
990 EXPORT_SYMBOL(dev_get_by_index);
991 
992 /**
993  *	dev_get_by_napi_id - find a device by napi_id
994  *	@napi_id: ID of the NAPI struct
995  *
996  *	Search for an interface by NAPI ID. Returns %NULL if the device
997  *	is not found or a pointer to the device. The device has not had
998  *	its reference counter increased so the caller must be careful
999  *	about locking. The caller must hold RCU lock.
1000  */
1001 
1002 struct net_device *dev_get_by_napi_id(unsigned int napi_id)
1003 {
1004 	struct napi_struct *napi;
1005 
1006 	WARN_ON_ONCE(!rcu_read_lock_held());
1007 
1008 	if (napi_id < MIN_NAPI_ID)
1009 		return NULL;
1010 
1011 	napi = napi_by_id(napi_id);
1012 
1013 	return napi ? napi->dev : NULL;
1014 }
1015 EXPORT_SYMBOL(dev_get_by_napi_id);
1016 
1017 /**
1018  *	netdev_get_name - get a netdevice name, knowing its ifindex.
1019  *	@net: network namespace
1020  *	@name: a pointer to the buffer where the name will be stored.
1021  *	@ifindex: the ifindex of the interface to get the name from.
1022  */
1023 int netdev_get_name(struct net *net, char *name, int ifindex)
1024 {
1025 	struct net_device *dev;
1026 	int ret;
1027 
1028 	down_read(&devnet_rename_sem);
1029 	rcu_read_lock();
1030 
1031 	dev = dev_get_by_index_rcu(net, ifindex);
1032 	if (!dev) {
1033 		ret = -ENODEV;
1034 		goto out;
1035 	}
1036 
1037 	strcpy(name, dev->name);
1038 
1039 	ret = 0;
1040 out:
1041 	rcu_read_unlock();
1042 	up_read(&devnet_rename_sem);
1043 	return ret;
1044 }
1045 
1046 /**
1047  *	dev_getbyhwaddr_rcu - find a device by its hardware address
1048  *	@net: the applicable net namespace
1049  *	@type: media type of device
1050  *	@ha: hardware address
1051  *
1052  *	Search for an interface by MAC address. Returns NULL if the device
1053  *	is not found or a pointer to the device.
1054  *	The caller must hold RCU or RTNL.
1055  *	The returned device has not had its ref count increased
1056  *	and the caller must therefore be careful about locking
1057  *
1058  */
1059 
1060 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1061 				       const char *ha)
1062 {
1063 	struct net_device *dev;
1064 
1065 	for_each_netdev_rcu(net, dev)
1066 		if (dev->type == type &&
1067 		    !memcmp(dev->dev_addr, ha, dev->addr_len))
1068 			return dev;
1069 
1070 	return NULL;
1071 }
1072 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
1073 
1074 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
1075 {
1076 	struct net_device *dev, *ret = NULL;
1077 
1078 	rcu_read_lock();
1079 	for_each_netdev_rcu(net, dev)
1080 		if (dev->type == type) {
1081 			dev_hold(dev);
1082 			ret = dev;
1083 			break;
1084 		}
1085 	rcu_read_unlock();
1086 	return ret;
1087 }
1088 EXPORT_SYMBOL(dev_getfirstbyhwtype);
1089 
1090 /**
1091  *	__dev_get_by_flags - find any device with given flags
1092  *	@net: the applicable net namespace
1093  *	@if_flags: IFF_* values
1094  *	@mask: bitmask of bits in if_flags to check
1095  *
1096  *	Search for any interface with the given flags. Returns NULL if a device
1097  *	is not found or a pointer to the device. Must be called inside
1098  *	rtnl_lock(), and result refcount is unchanged.
1099  */
1100 
1101 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1102 				      unsigned short mask)
1103 {
1104 	struct net_device *dev, *ret;
1105 
1106 	ASSERT_RTNL();
1107 
1108 	ret = NULL;
1109 	for_each_netdev(net, dev) {
1110 		if (((dev->flags ^ if_flags) & mask) == 0) {
1111 			ret = dev;
1112 			break;
1113 		}
1114 	}
1115 	return ret;
1116 }
1117 EXPORT_SYMBOL(__dev_get_by_flags);
1118 
1119 /**
1120  *	dev_valid_name - check if name is okay for network device
1121  *	@name: name string
1122  *
1123  *	Network device names need to be valid file names to
1124  *	allow sysfs to work.  We also disallow any kind of
1125  *	whitespace.
1126  */
1127 bool dev_valid_name(const char *name)
1128 {
1129 	if (*name == '\0')
1130 		return false;
1131 	if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1132 		return false;
1133 	if (!strcmp(name, ".") || !strcmp(name, ".."))
1134 		return false;
1135 
1136 	while (*name) {
1137 		if (*name == '/' || *name == ':' || isspace(*name))
1138 			return false;
1139 		name++;
1140 	}
1141 	return true;
1142 }
1143 EXPORT_SYMBOL(dev_valid_name);
1144 
1145 /**
1146  *	__dev_alloc_name - allocate a name for a device
1147  *	@net: network namespace to allocate the device name in
1148  *	@name: name format string
1149  *	@buf:  scratch buffer and result name string
1150  *
1151  *	Passed a format string - eg "lt%d" it will try and find a suitable
1152  *	id. It scans list of devices to build up a free map, then chooses
1153  *	the first empty slot. The caller must hold the dev_base or rtnl lock
1154  *	while allocating the name and adding the device in order to avoid
1155  *	duplicates.
1156  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1157  *	Returns the number of the unit assigned or a negative errno code.
1158  */
1159 
1160 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1161 {
1162 	int i = 0;
1163 	const char *p;
1164 	const int max_netdevices = 8*PAGE_SIZE;
1165 	unsigned long *inuse;
1166 	struct net_device *d;
1167 
1168 	if (!dev_valid_name(name))
1169 		return -EINVAL;
1170 
1171 	p = strchr(name, '%');
1172 	if (p) {
1173 		/*
1174 		 * Verify the string as this thing may have come from
1175 		 * the user.  There must be either one "%d" and no other "%"
1176 		 * characters.
1177 		 */
1178 		if (p[1] != 'd' || strchr(p + 2, '%'))
1179 			return -EINVAL;
1180 
1181 		/* Use one page as a bit array of possible slots */
1182 		inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1183 		if (!inuse)
1184 			return -ENOMEM;
1185 
1186 		for_each_netdev(net, d) {
1187 			if (!sscanf(d->name, name, &i))
1188 				continue;
1189 			if (i < 0 || i >= max_netdevices)
1190 				continue;
1191 
1192 			/*  avoid cases where sscanf is not exact inverse of printf */
1193 			snprintf(buf, IFNAMSIZ, name, i);
1194 			if (!strncmp(buf, d->name, IFNAMSIZ))
1195 				set_bit(i, inuse);
1196 		}
1197 
1198 		i = find_first_zero_bit(inuse, max_netdevices);
1199 		free_page((unsigned long) inuse);
1200 	}
1201 
1202 	snprintf(buf, IFNAMSIZ, name, i);
1203 	if (!__dev_get_by_name(net, buf))
1204 		return i;
1205 
1206 	/* It is possible to run out of possible slots
1207 	 * when the name is long and there isn't enough space left
1208 	 * for the digits, or if all bits are used.
1209 	 */
1210 	return -ENFILE;
1211 }
1212 
1213 static int dev_alloc_name_ns(struct net *net,
1214 			     struct net_device *dev,
1215 			     const char *name)
1216 {
1217 	char buf[IFNAMSIZ];
1218 	int ret;
1219 
1220 	BUG_ON(!net);
1221 	ret = __dev_alloc_name(net, name, buf);
1222 	if (ret >= 0)
1223 		strlcpy(dev->name, buf, IFNAMSIZ);
1224 	return ret;
1225 }
1226 
1227 /**
1228  *	dev_alloc_name - allocate a name for a device
1229  *	@dev: device
1230  *	@name: name format string
1231  *
1232  *	Passed a format string - eg "lt%d" it will try and find a suitable
1233  *	id. It scans list of devices to build up a free map, then chooses
1234  *	the first empty slot. The caller must hold the dev_base or rtnl lock
1235  *	while allocating the name and adding the device in order to avoid
1236  *	duplicates.
1237  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1238  *	Returns the number of the unit assigned or a negative errno code.
1239  */
1240 
1241 int dev_alloc_name(struct net_device *dev, const char *name)
1242 {
1243 	return dev_alloc_name_ns(dev_net(dev), dev, name);
1244 }
1245 EXPORT_SYMBOL(dev_alloc_name);
1246 
1247 static int dev_get_valid_name(struct net *net, struct net_device *dev,
1248 			      const char *name)
1249 {
1250 	BUG_ON(!net);
1251 
1252 	if (!dev_valid_name(name))
1253 		return -EINVAL;
1254 
1255 	if (strchr(name, '%'))
1256 		return dev_alloc_name_ns(net, dev, name);
1257 	else if (__dev_get_by_name(net, name))
1258 		return -EEXIST;
1259 	else if (dev->name != name)
1260 		strlcpy(dev->name, name, IFNAMSIZ);
1261 
1262 	return 0;
1263 }
1264 
1265 /**
1266  *	dev_change_name - change name of a device
1267  *	@dev: device
1268  *	@newname: name (or format string) must be at least IFNAMSIZ
1269  *
1270  *	Change name of a device, can pass format strings "eth%d".
1271  *	for wildcarding.
1272  */
1273 int dev_change_name(struct net_device *dev, const char *newname)
1274 {
1275 	unsigned char old_assign_type;
1276 	char oldname[IFNAMSIZ];
1277 	int err = 0;
1278 	int ret;
1279 	struct net *net;
1280 
1281 	ASSERT_RTNL();
1282 	BUG_ON(!dev_net(dev));
1283 
1284 	net = dev_net(dev);
1285 
1286 	/* Some auto-enslaved devices e.g. failover slaves are
1287 	 * special, as userspace might rename the device after
1288 	 * the interface had been brought up and running since
1289 	 * the point kernel initiated auto-enslavement. Allow
1290 	 * live name change even when these slave devices are
1291 	 * up and running.
1292 	 *
1293 	 * Typically, users of these auto-enslaving devices
1294 	 * don't actually care about slave name change, as
1295 	 * they are supposed to operate on master interface
1296 	 * directly.
1297 	 */
1298 	if (dev->flags & IFF_UP &&
1299 	    likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
1300 		return -EBUSY;
1301 
1302 	down_write(&devnet_rename_sem);
1303 
1304 	if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1305 		up_write(&devnet_rename_sem);
1306 		return 0;
1307 	}
1308 
1309 	memcpy(oldname, dev->name, IFNAMSIZ);
1310 
1311 	err = dev_get_valid_name(net, dev, newname);
1312 	if (err < 0) {
1313 		up_write(&devnet_rename_sem);
1314 		return err;
1315 	}
1316 
1317 	if (oldname[0] && !strchr(oldname, '%'))
1318 		netdev_info(dev, "renamed from %s\n", oldname);
1319 
1320 	old_assign_type = dev->name_assign_type;
1321 	dev->name_assign_type = NET_NAME_RENAMED;
1322 
1323 rollback:
1324 	ret = device_rename(&dev->dev, dev->name);
1325 	if (ret) {
1326 		memcpy(dev->name, oldname, IFNAMSIZ);
1327 		dev->name_assign_type = old_assign_type;
1328 		up_write(&devnet_rename_sem);
1329 		return ret;
1330 	}
1331 
1332 	up_write(&devnet_rename_sem);
1333 
1334 	netdev_adjacent_rename_links(dev, oldname);
1335 
1336 	write_lock_bh(&dev_base_lock);
1337 	netdev_name_node_del(dev->name_node);
1338 	write_unlock_bh(&dev_base_lock);
1339 
1340 	synchronize_rcu();
1341 
1342 	write_lock_bh(&dev_base_lock);
1343 	netdev_name_node_add(net, dev->name_node);
1344 	write_unlock_bh(&dev_base_lock);
1345 
1346 	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1347 	ret = notifier_to_errno(ret);
1348 
1349 	if (ret) {
1350 		/* err >= 0 after dev_alloc_name() or stores the first errno */
1351 		if (err >= 0) {
1352 			err = ret;
1353 			down_write(&devnet_rename_sem);
1354 			memcpy(dev->name, oldname, IFNAMSIZ);
1355 			memcpy(oldname, newname, IFNAMSIZ);
1356 			dev->name_assign_type = old_assign_type;
1357 			old_assign_type = NET_NAME_RENAMED;
1358 			goto rollback;
1359 		} else {
1360 			pr_err("%s: name change rollback failed: %d\n",
1361 			       dev->name, ret);
1362 		}
1363 	}
1364 
1365 	return err;
1366 }
1367 
1368 /**
1369  *	dev_set_alias - change ifalias of a device
1370  *	@dev: device
1371  *	@alias: name up to IFALIASZ
1372  *	@len: limit of bytes to copy from info
1373  *
1374  *	Set ifalias for a device,
1375  */
1376 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1377 {
1378 	struct dev_ifalias *new_alias = NULL;
1379 
1380 	if (len >= IFALIASZ)
1381 		return -EINVAL;
1382 
1383 	if (len) {
1384 		new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1385 		if (!new_alias)
1386 			return -ENOMEM;
1387 
1388 		memcpy(new_alias->ifalias, alias, len);
1389 		new_alias->ifalias[len] = 0;
1390 	}
1391 
1392 	mutex_lock(&ifalias_mutex);
1393 	new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1394 					mutex_is_locked(&ifalias_mutex));
1395 	mutex_unlock(&ifalias_mutex);
1396 
1397 	if (new_alias)
1398 		kfree_rcu(new_alias, rcuhead);
1399 
1400 	return len;
1401 }
1402 EXPORT_SYMBOL(dev_set_alias);
1403 
1404 /**
1405  *	dev_get_alias - get ifalias of a device
1406  *	@dev: device
1407  *	@name: buffer to store name of ifalias
1408  *	@len: size of buffer
1409  *
1410  *	get ifalias for a device.  Caller must make sure dev cannot go
1411  *	away,  e.g. rcu read lock or own a reference count to device.
1412  */
1413 int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1414 {
1415 	const struct dev_ifalias *alias;
1416 	int ret = 0;
1417 
1418 	rcu_read_lock();
1419 	alias = rcu_dereference(dev->ifalias);
1420 	if (alias)
1421 		ret = snprintf(name, len, "%s", alias->ifalias);
1422 	rcu_read_unlock();
1423 
1424 	return ret;
1425 }
1426 
1427 /**
1428  *	netdev_features_change - device changes features
1429  *	@dev: device to cause notification
1430  *
1431  *	Called to indicate a device has changed features.
1432  */
1433 void netdev_features_change(struct net_device *dev)
1434 {
1435 	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1436 }
1437 EXPORT_SYMBOL(netdev_features_change);
1438 
1439 /**
1440  *	netdev_state_change - device changes state
1441  *	@dev: device to cause notification
1442  *
1443  *	Called to indicate a device has changed state. This function calls
1444  *	the notifier chains for netdev_chain and sends a NEWLINK message
1445  *	to the routing socket.
1446  */
1447 void netdev_state_change(struct net_device *dev)
1448 {
1449 	if (dev->flags & IFF_UP) {
1450 		struct netdev_notifier_change_info change_info = {
1451 			.info.dev = dev,
1452 		};
1453 
1454 		call_netdevice_notifiers_info(NETDEV_CHANGE,
1455 					      &change_info.info);
1456 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1457 	}
1458 }
1459 EXPORT_SYMBOL(netdev_state_change);
1460 
1461 /**
1462  * __netdev_notify_peers - notify network peers about existence of @dev,
1463  * to be called when rtnl lock is already held.
1464  * @dev: network device
1465  *
1466  * Generate traffic such that interested network peers are aware of
1467  * @dev, such as by generating a gratuitous ARP. This may be used when
1468  * a device wants to inform the rest of the network about some sort of
1469  * reconfiguration such as a failover event or virtual machine
1470  * migration.
1471  */
1472 void __netdev_notify_peers(struct net_device *dev)
1473 {
1474 	ASSERT_RTNL();
1475 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1476 	call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1477 }
1478 EXPORT_SYMBOL(__netdev_notify_peers);
1479 
1480 /**
1481  * netdev_notify_peers - notify network peers about existence of @dev
1482  * @dev: network device
1483  *
1484  * Generate traffic such that interested network peers are aware of
1485  * @dev, such as by generating a gratuitous ARP. This may be used when
1486  * a device wants to inform the rest of the network about some sort of
1487  * reconfiguration such as a failover event or virtual machine
1488  * migration.
1489  */
1490 void netdev_notify_peers(struct net_device *dev)
1491 {
1492 	rtnl_lock();
1493 	__netdev_notify_peers(dev);
1494 	rtnl_unlock();
1495 }
1496 EXPORT_SYMBOL(netdev_notify_peers);
1497 
1498 static int napi_threaded_poll(void *data);
1499 
1500 static int napi_kthread_create(struct napi_struct *n)
1501 {
1502 	int err = 0;
1503 
1504 	/* Create and wake up the kthread once to put it in
1505 	 * TASK_INTERRUPTIBLE mode to avoid the blocked task
1506 	 * warning and work with loadavg.
1507 	 */
1508 	n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
1509 				n->dev->name, n->napi_id);
1510 	if (IS_ERR(n->thread)) {
1511 		err = PTR_ERR(n->thread);
1512 		pr_err("kthread_run failed with err %d\n", err);
1513 		n->thread = NULL;
1514 	}
1515 
1516 	return err;
1517 }
1518 
1519 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1520 {
1521 	const struct net_device_ops *ops = dev->netdev_ops;
1522 	int ret;
1523 
1524 	ASSERT_RTNL();
1525 
1526 	if (!netif_device_present(dev)) {
1527 		/* may be detached because parent is runtime-suspended */
1528 		if (dev->dev.parent)
1529 			pm_runtime_resume(dev->dev.parent);
1530 		if (!netif_device_present(dev))
1531 			return -ENODEV;
1532 	}
1533 
1534 	/* Block netpoll from trying to do any rx path servicing.
1535 	 * If we don't do this there is a chance ndo_poll_controller
1536 	 * or ndo_poll may be running while we open the device
1537 	 */
1538 	netpoll_poll_disable(dev);
1539 
1540 	ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1541 	ret = notifier_to_errno(ret);
1542 	if (ret)
1543 		return ret;
1544 
1545 	set_bit(__LINK_STATE_START, &dev->state);
1546 
1547 	if (ops->ndo_validate_addr)
1548 		ret = ops->ndo_validate_addr(dev);
1549 
1550 	if (!ret && ops->ndo_open)
1551 		ret = ops->ndo_open(dev);
1552 
1553 	netpoll_poll_enable(dev);
1554 
1555 	if (ret)
1556 		clear_bit(__LINK_STATE_START, &dev->state);
1557 	else {
1558 		dev->flags |= IFF_UP;
1559 		dev_set_rx_mode(dev);
1560 		dev_activate(dev);
1561 		add_device_randomness(dev->dev_addr, dev->addr_len);
1562 	}
1563 
1564 	return ret;
1565 }
1566 
1567 /**
1568  *	dev_open	- prepare an interface for use.
1569  *	@dev: device to open
1570  *	@extack: netlink extended ack
1571  *
1572  *	Takes a device from down to up state. The device's private open
1573  *	function is invoked and then the multicast lists are loaded. Finally
1574  *	the device is moved into the up state and a %NETDEV_UP message is
1575  *	sent to the netdev notifier chain.
1576  *
1577  *	Calling this function on an active interface is a nop. On a failure
1578  *	a negative errno code is returned.
1579  */
1580 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1581 {
1582 	int ret;
1583 
1584 	if (dev->flags & IFF_UP)
1585 		return 0;
1586 
1587 	ret = __dev_open(dev, extack);
1588 	if (ret < 0)
1589 		return ret;
1590 
1591 	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1592 	call_netdevice_notifiers(NETDEV_UP, dev);
1593 
1594 	return ret;
1595 }
1596 EXPORT_SYMBOL(dev_open);
1597 
1598 static void __dev_close_many(struct list_head *head)
1599 {
1600 	struct net_device *dev;
1601 
1602 	ASSERT_RTNL();
1603 	might_sleep();
1604 
1605 	list_for_each_entry(dev, head, close_list) {
1606 		/* Temporarily disable netpoll until the interface is down */
1607 		netpoll_poll_disable(dev);
1608 
1609 		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1610 
1611 		clear_bit(__LINK_STATE_START, &dev->state);
1612 
1613 		/* Synchronize to scheduled poll. We cannot touch poll list, it
1614 		 * can be even on different cpu. So just clear netif_running().
1615 		 *
1616 		 * dev->stop() will invoke napi_disable() on all of it's
1617 		 * napi_struct instances on this device.
1618 		 */
1619 		smp_mb__after_atomic(); /* Commit netif_running(). */
1620 	}
1621 
1622 	dev_deactivate_many(head);
1623 
1624 	list_for_each_entry(dev, head, close_list) {
1625 		const struct net_device_ops *ops = dev->netdev_ops;
1626 
1627 		/*
1628 		 *	Call the device specific close. This cannot fail.
1629 		 *	Only if device is UP
1630 		 *
1631 		 *	We allow it to be called even after a DETACH hot-plug
1632 		 *	event.
1633 		 */
1634 		if (ops->ndo_stop)
1635 			ops->ndo_stop(dev);
1636 
1637 		dev->flags &= ~IFF_UP;
1638 		netpoll_poll_enable(dev);
1639 	}
1640 }
1641 
1642 static void __dev_close(struct net_device *dev)
1643 {
1644 	LIST_HEAD(single);
1645 
1646 	list_add(&dev->close_list, &single);
1647 	__dev_close_many(&single);
1648 	list_del(&single);
1649 }
1650 
1651 void dev_close_many(struct list_head *head, bool unlink)
1652 {
1653 	struct net_device *dev, *tmp;
1654 
1655 	/* Remove the devices that don't need to be closed */
1656 	list_for_each_entry_safe(dev, tmp, head, close_list)
1657 		if (!(dev->flags & IFF_UP))
1658 			list_del_init(&dev->close_list);
1659 
1660 	__dev_close_many(head);
1661 
1662 	list_for_each_entry_safe(dev, tmp, head, close_list) {
1663 		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1664 		call_netdevice_notifiers(NETDEV_DOWN, dev);
1665 		if (unlink)
1666 			list_del_init(&dev->close_list);
1667 	}
1668 }
1669 EXPORT_SYMBOL(dev_close_many);
1670 
1671 /**
1672  *	dev_close - shutdown an interface.
1673  *	@dev: device to shutdown
1674  *
1675  *	This function moves an active device into down state. A
1676  *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1677  *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1678  *	chain.
1679  */
1680 void dev_close(struct net_device *dev)
1681 {
1682 	if (dev->flags & IFF_UP) {
1683 		LIST_HEAD(single);
1684 
1685 		list_add(&dev->close_list, &single);
1686 		dev_close_many(&single, true);
1687 		list_del(&single);
1688 	}
1689 }
1690 EXPORT_SYMBOL(dev_close);
1691 
1692 
1693 /**
1694  *	dev_disable_lro - disable Large Receive Offload on a device
1695  *	@dev: device
1696  *
1697  *	Disable Large Receive Offload (LRO) on a net device.  Must be
1698  *	called under RTNL.  This is needed if received packets may be
1699  *	forwarded to another interface.
1700  */
1701 void dev_disable_lro(struct net_device *dev)
1702 {
1703 	struct net_device *lower_dev;
1704 	struct list_head *iter;
1705 
1706 	dev->wanted_features &= ~NETIF_F_LRO;
1707 	netdev_update_features(dev);
1708 
1709 	if (unlikely(dev->features & NETIF_F_LRO))
1710 		netdev_WARN(dev, "failed to disable LRO!\n");
1711 
1712 	netdev_for_each_lower_dev(dev, lower_dev, iter)
1713 		dev_disable_lro(lower_dev);
1714 }
1715 EXPORT_SYMBOL(dev_disable_lro);
1716 
1717 /**
1718  *	dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1719  *	@dev: device
1720  *
1721  *	Disable HW Generic Receive Offload (GRO_HW) on a net device.  Must be
1722  *	called under RTNL.  This is needed if Generic XDP is installed on
1723  *	the device.
1724  */
1725 static void dev_disable_gro_hw(struct net_device *dev)
1726 {
1727 	dev->wanted_features &= ~NETIF_F_GRO_HW;
1728 	netdev_update_features(dev);
1729 
1730 	if (unlikely(dev->features & NETIF_F_GRO_HW))
1731 		netdev_WARN(dev, "failed to disable GRO_HW!\n");
1732 }
1733 
1734 const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1735 {
1736 #define N(val) 						\
1737 	case NETDEV_##val:				\
1738 		return "NETDEV_" __stringify(val);
1739 	switch (cmd) {
1740 	N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1741 	N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1742 	N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1743 	N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1744 	N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1745 	N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1746 	N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1747 	N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1748 	N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1749 	N(PRE_CHANGEADDR)
1750 	}
1751 #undef N
1752 	return "UNKNOWN_NETDEV_EVENT";
1753 }
1754 EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1755 
1756 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1757 				   struct net_device *dev)
1758 {
1759 	struct netdev_notifier_info info = {
1760 		.dev = dev,
1761 	};
1762 
1763 	return nb->notifier_call(nb, val, &info);
1764 }
1765 
1766 static int call_netdevice_register_notifiers(struct notifier_block *nb,
1767 					     struct net_device *dev)
1768 {
1769 	int err;
1770 
1771 	err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1772 	err = notifier_to_errno(err);
1773 	if (err)
1774 		return err;
1775 
1776 	if (!(dev->flags & IFF_UP))
1777 		return 0;
1778 
1779 	call_netdevice_notifier(nb, NETDEV_UP, dev);
1780 	return 0;
1781 }
1782 
1783 static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1784 						struct net_device *dev)
1785 {
1786 	if (dev->flags & IFF_UP) {
1787 		call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1788 					dev);
1789 		call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1790 	}
1791 	call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1792 }
1793 
1794 static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1795 						 struct net *net)
1796 {
1797 	struct net_device *dev;
1798 	int err;
1799 
1800 	for_each_netdev(net, dev) {
1801 		err = call_netdevice_register_notifiers(nb, dev);
1802 		if (err)
1803 			goto rollback;
1804 	}
1805 	return 0;
1806 
1807 rollback:
1808 	for_each_netdev_continue_reverse(net, dev)
1809 		call_netdevice_unregister_notifiers(nb, dev);
1810 	return err;
1811 }
1812 
1813 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1814 						    struct net *net)
1815 {
1816 	struct net_device *dev;
1817 
1818 	for_each_netdev(net, dev)
1819 		call_netdevice_unregister_notifiers(nb, dev);
1820 }
1821 
1822 static int dev_boot_phase = 1;
1823 
1824 /**
1825  * register_netdevice_notifier - register a network notifier block
1826  * @nb: notifier
1827  *
1828  * Register a notifier to be called when network device events occur.
1829  * The notifier passed is linked into the kernel structures and must
1830  * not be reused until it has been unregistered. A negative errno code
1831  * is returned on a failure.
1832  *
1833  * When registered all registration and up events are replayed
1834  * to the new notifier to allow device to have a race free
1835  * view of the network device list.
1836  */
1837 
1838 int register_netdevice_notifier(struct notifier_block *nb)
1839 {
1840 	struct net *net;
1841 	int err;
1842 
1843 	/* Close race with setup_net() and cleanup_net() */
1844 	down_write(&pernet_ops_rwsem);
1845 	rtnl_lock();
1846 	err = raw_notifier_chain_register(&netdev_chain, nb);
1847 	if (err)
1848 		goto unlock;
1849 	if (dev_boot_phase)
1850 		goto unlock;
1851 	for_each_net(net) {
1852 		err = call_netdevice_register_net_notifiers(nb, net);
1853 		if (err)
1854 			goto rollback;
1855 	}
1856 
1857 unlock:
1858 	rtnl_unlock();
1859 	up_write(&pernet_ops_rwsem);
1860 	return err;
1861 
1862 rollback:
1863 	for_each_net_continue_reverse(net)
1864 		call_netdevice_unregister_net_notifiers(nb, net);
1865 
1866 	raw_notifier_chain_unregister(&netdev_chain, nb);
1867 	goto unlock;
1868 }
1869 EXPORT_SYMBOL(register_netdevice_notifier);
1870 
1871 /**
1872  * unregister_netdevice_notifier - unregister a network notifier block
1873  * @nb: notifier
1874  *
1875  * Unregister a notifier previously registered by
1876  * register_netdevice_notifier(). The notifier is unlinked into the
1877  * kernel structures and may then be reused. A negative errno code
1878  * is returned on a failure.
1879  *
1880  * After unregistering unregister and down device events are synthesized
1881  * for all devices on the device list to the removed notifier to remove
1882  * the need for special case cleanup code.
1883  */
1884 
1885 int unregister_netdevice_notifier(struct notifier_block *nb)
1886 {
1887 	struct net *net;
1888 	int err;
1889 
1890 	/* Close race with setup_net() and cleanup_net() */
1891 	down_write(&pernet_ops_rwsem);
1892 	rtnl_lock();
1893 	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1894 	if (err)
1895 		goto unlock;
1896 
1897 	for_each_net(net)
1898 		call_netdevice_unregister_net_notifiers(nb, net);
1899 
1900 unlock:
1901 	rtnl_unlock();
1902 	up_write(&pernet_ops_rwsem);
1903 	return err;
1904 }
1905 EXPORT_SYMBOL(unregister_netdevice_notifier);
1906 
1907 static int __register_netdevice_notifier_net(struct net *net,
1908 					     struct notifier_block *nb,
1909 					     bool ignore_call_fail)
1910 {
1911 	int err;
1912 
1913 	err = raw_notifier_chain_register(&net->netdev_chain, nb);
1914 	if (err)
1915 		return err;
1916 	if (dev_boot_phase)
1917 		return 0;
1918 
1919 	err = call_netdevice_register_net_notifiers(nb, net);
1920 	if (err && !ignore_call_fail)
1921 		goto chain_unregister;
1922 
1923 	return 0;
1924 
1925 chain_unregister:
1926 	raw_notifier_chain_unregister(&net->netdev_chain, nb);
1927 	return err;
1928 }
1929 
1930 static int __unregister_netdevice_notifier_net(struct net *net,
1931 					       struct notifier_block *nb)
1932 {
1933 	int err;
1934 
1935 	err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1936 	if (err)
1937 		return err;
1938 
1939 	call_netdevice_unregister_net_notifiers(nb, net);
1940 	return 0;
1941 }
1942 
1943 /**
1944  * register_netdevice_notifier_net - register a per-netns network notifier block
1945  * @net: network namespace
1946  * @nb: notifier
1947  *
1948  * Register a notifier to be called when network device events occur.
1949  * The notifier passed is linked into the kernel structures and must
1950  * not be reused until it has been unregistered. A negative errno code
1951  * is returned on a failure.
1952  *
1953  * When registered all registration and up events are replayed
1954  * to the new notifier to allow device to have a race free
1955  * view of the network device list.
1956  */
1957 
1958 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
1959 {
1960 	int err;
1961 
1962 	rtnl_lock();
1963 	err = __register_netdevice_notifier_net(net, nb, false);
1964 	rtnl_unlock();
1965 	return err;
1966 }
1967 EXPORT_SYMBOL(register_netdevice_notifier_net);
1968 
1969 /**
1970  * unregister_netdevice_notifier_net - unregister a per-netns
1971  *                                     network notifier block
1972  * @net: network namespace
1973  * @nb: notifier
1974  *
1975  * Unregister a notifier previously registered by
1976  * register_netdevice_notifier(). The notifier is unlinked into the
1977  * kernel structures and may then be reused. A negative errno code
1978  * is returned on a failure.
1979  *
1980  * After unregistering unregister and down device events are synthesized
1981  * for all devices on the device list to the removed notifier to remove
1982  * the need for special case cleanup code.
1983  */
1984 
1985 int unregister_netdevice_notifier_net(struct net *net,
1986 				      struct notifier_block *nb)
1987 {
1988 	int err;
1989 
1990 	rtnl_lock();
1991 	err = __unregister_netdevice_notifier_net(net, nb);
1992 	rtnl_unlock();
1993 	return err;
1994 }
1995 EXPORT_SYMBOL(unregister_netdevice_notifier_net);
1996 
1997 int register_netdevice_notifier_dev_net(struct net_device *dev,
1998 					struct notifier_block *nb,
1999 					struct netdev_net_notifier *nn)
2000 {
2001 	int err;
2002 
2003 	rtnl_lock();
2004 	err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
2005 	if (!err) {
2006 		nn->nb = nb;
2007 		list_add(&nn->list, &dev->net_notifier_list);
2008 	}
2009 	rtnl_unlock();
2010 	return err;
2011 }
2012 EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
2013 
2014 int unregister_netdevice_notifier_dev_net(struct net_device *dev,
2015 					  struct notifier_block *nb,
2016 					  struct netdev_net_notifier *nn)
2017 {
2018 	int err;
2019 
2020 	rtnl_lock();
2021 	list_del(&nn->list);
2022 	err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
2023 	rtnl_unlock();
2024 	return err;
2025 }
2026 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
2027 
2028 static void move_netdevice_notifiers_dev_net(struct net_device *dev,
2029 					     struct net *net)
2030 {
2031 	struct netdev_net_notifier *nn;
2032 
2033 	list_for_each_entry(nn, &dev->net_notifier_list, list) {
2034 		__unregister_netdevice_notifier_net(dev_net(dev), nn->nb);
2035 		__register_netdevice_notifier_net(net, nn->nb, true);
2036 	}
2037 }
2038 
2039 /**
2040  *	call_netdevice_notifiers_info - call all network notifier blocks
2041  *	@val: value passed unmodified to notifier function
2042  *	@info: notifier information data
2043  *
2044  *	Call all network notifier blocks.  Parameters and return value
2045  *	are as for raw_notifier_call_chain().
2046  */
2047 
2048 static int call_netdevice_notifiers_info(unsigned long val,
2049 					 struct netdev_notifier_info *info)
2050 {
2051 	struct net *net = dev_net(info->dev);
2052 	int ret;
2053 
2054 	ASSERT_RTNL();
2055 
2056 	/* Run per-netns notifier block chain first, then run the global one.
2057 	 * Hopefully, one day, the global one is going to be removed after
2058 	 * all notifier block registrators get converted to be per-netns.
2059 	 */
2060 	ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
2061 	if (ret & NOTIFY_STOP_MASK)
2062 		return ret;
2063 	return raw_notifier_call_chain(&netdev_chain, val, info);
2064 }
2065 
2066 static int call_netdevice_notifiers_extack(unsigned long val,
2067 					   struct net_device *dev,
2068 					   struct netlink_ext_ack *extack)
2069 {
2070 	struct netdev_notifier_info info = {
2071 		.dev = dev,
2072 		.extack = extack,
2073 	};
2074 
2075 	return call_netdevice_notifiers_info(val, &info);
2076 }
2077 
2078 /**
2079  *	call_netdevice_notifiers - call all network notifier blocks
2080  *      @val: value passed unmodified to notifier function
2081  *      @dev: net_device pointer passed unmodified to notifier function
2082  *
2083  *	Call all network notifier blocks.  Parameters and return value
2084  *	are as for raw_notifier_call_chain().
2085  */
2086 
2087 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
2088 {
2089 	return call_netdevice_notifiers_extack(val, dev, NULL);
2090 }
2091 EXPORT_SYMBOL(call_netdevice_notifiers);
2092 
2093 /**
2094  *	call_netdevice_notifiers_mtu - call all network notifier blocks
2095  *	@val: value passed unmodified to notifier function
2096  *	@dev: net_device pointer passed unmodified to notifier function
2097  *	@arg: additional u32 argument passed to the notifier function
2098  *
2099  *	Call all network notifier blocks.  Parameters and return value
2100  *	are as for raw_notifier_call_chain().
2101  */
2102 static int call_netdevice_notifiers_mtu(unsigned long val,
2103 					struct net_device *dev, u32 arg)
2104 {
2105 	struct netdev_notifier_info_ext info = {
2106 		.info.dev = dev,
2107 		.ext.mtu = arg,
2108 	};
2109 
2110 	BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
2111 
2112 	return call_netdevice_notifiers_info(val, &info.info);
2113 }
2114 
2115 #ifdef CONFIG_NET_INGRESS
2116 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
2117 
2118 void net_inc_ingress_queue(void)
2119 {
2120 	static_branch_inc(&ingress_needed_key);
2121 }
2122 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
2123 
2124 void net_dec_ingress_queue(void)
2125 {
2126 	static_branch_dec(&ingress_needed_key);
2127 }
2128 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
2129 #endif
2130 
2131 #ifdef CONFIG_NET_EGRESS
2132 static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
2133 
2134 void net_inc_egress_queue(void)
2135 {
2136 	static_branch_inc(&egress_needed_key);
2137 }
2138 EXPORT_SYMBOL_GPL(net_inc_egress_queue);
2139 
2140 void net_dec_egress_queue(void)
2141 {
2142 	static_branch_dec(&egress_needed_key);
2143 }
2144 EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2145 #endif
2146 
2147 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
2148 #ifdef CONFIG_JUMP_LABEL
2149 static atomic_t netstamp_needed_deferred;
2150 static atomic_t netstamp_wanted;
2151 static void netstamp_clear(struct work_struct *work)
2152 {
2153 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
2154 	int wanted;
2155 
2156 	wanted = atomic_add_return(deferred, &netstamp_wanted);
2157 	if (wanted > 0)
2158 		static_branch_enable(&netstamp_needed_key);
2159 	else
2160 		static_branch_disable(&netstamp_needed_key);
2161 }
2162 static DECLARE_WORK(netstamp_work, netstamp_clear);
2163 #endif
2164 
2165 void net_enable_timestamp(void)
2166 {
2167 #ifdef CONFIG_JUMP_LABEL
2168 	int wanted;
2169 
2170 	while (1) {
2171 		wanted = atomic_read(&netstamp_wanted);
2172 		if (wanted <= 0)
2173 			break;
2174 		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
2175 			return;
2176 	}
2177 	atomic_inc(&netstamp_needed_deferred);
2178 	schedule_work(&netstamp_work);
2179 #else
2180 	static_branch_inc(&netstamp_needed_key);
2181 #endif
2182 }
2183 EXPORT_SYMBOL(net_enable_timestamp);
2184 
2185 void net_disable_timestamp(void)
2186 {
2187 #ifdef CONFIG_JUMP_LABEL
2188 	int wanted;
2189 
2190 	while (1) {
2191 		wanted = atomic_read(&netstamp_wanted);
2192 		if (wanted <= 1)
2193 			break;
2194 		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
2195 			return;
2196 	}
2197 	atomic_dec(&netstamp_needed_deferred);
2198 	schedule_work(&netstamp_work);
2199 #else
2200 	static_branch_dec(&netstamp_needed_key);
2201 #endif
2202 }
2203 EXPORT_SYMBOL(net_disable_timestamp);
2204 
2205 static inline void net_timestamp_set(struct sk_buff *skb)
2206 {
2207 	skb->tstamp = 0;
2208 	if (static_branch_unlikely(&netstamp_needed_key))
2209 		__net_timestamp(skb);
2210 }
2211 
2212 #define net_timestamp_check(COND, SKB)				\
2213 	if (static_branch_unlikely(&netstamp_needed_key)) {	\
2214 		if ((COND) && !(SKB)->tstamp)			\
2215 			__net_timestamp(SKB);			\
2216 	}							\
2217 
2218 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2219 {
2220 	unsigned int len;
2221 
2222 	if (!(dev->flags & IFF_UP))
2223 		return false;
2224 
2225 	len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
2226 	if (skb->len <= len)
2227 		return true;
2228 
2229 	/* if TSO is enabled, we don't care about the length as the packet
2230 	 * could be forwarded without being segmented before
2231 	 */
2232 	if (skb_is_gso(skb))
2233 		return true;
2234 
2235 	return false;
2236 }
2237 EXPORT_SYMBOL_GPL(is_skb_forwardable);
2238 
2239 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2240 {
2241 	int ret = ____dev_forward_skb(dev, skb);
2242 
2243 	if (likely(!ret)) {
2244 		skb->protocol = eth_type_trans(skb, dev);
2245 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2246 	}
2247 
2248 	return ret;
2249 }
2250 EXPORT_SYMBOL_GPL(__dev_forward_skb);
2251 
2252 /**
2253  * dev_forward_skb - loopback an skb to another netif
2254  *
2255  * @dev: destination network device
2256  * @skb: buffer to forward
2257  *
2258  * return values:
2259  *	NET_RX_SUCCESS	(no congestion)
2260  *	NET_RX_DROP     (packet was dropped, but freed)
2261  *
2262  * dev_forward_skb can be used for injecting an skb from the
2263  * start_xmit function of one device into the receive queue
2264  * of another device.
2265  *
2266  * The receiving device may be in another namespace, so
2267  * we have to clear all information in the skb that could
2268  * impact namespace isolation.
2269  */
2270 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2271 {
2272 	return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2273 }
2274 EXPORT_SYMBOL_GPL(dev_forward_skb);
2275 
2276 static inline int deliver_skb(struct sk_buff *skb,
2277 			      struct packet_type *pt_prev,
2278 			      struct net_device *orig_dev)
2279 {
2280 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
2281 		return -ENOMEM;
2282 	refcount_inc(&skb->users);
2283 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2284 }
2285 
2286 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2287 					  struct packet_type **pt,
2288 					  struct net_device *orig_dev,
2289 					  __be16 type,
2290 					  struct list_head *ptype_list)
2291 {
2292 	struct packet_type *ptype, *pt_prev = *pt;
2293 
2294 	list_for_each_entry_rcu(ptype, ptype_list, list) {
2295 		if (ptype->type != type)
2296 			continue;
2297 		if (pt_prev)
2298 			deliver_skb(skb, pt_prev, orig_dev);
2299 		pt_prev = ptype;
2300 	}
2301 	*pt = pt_prev;
2302 }
2303 
2304 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2305 {
2306 	if (!ptype->af_packet_priv || !skb->sk)
2307 		return false;
2308 
2309 	if (ptype->id_match)
2310 		return ptype->id_match(ptype, skb->sk);
2311 	else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2312 		return true;
2313 
2314 	return false;
2315 }
2316 
2317 /**
2318  * dev_nit_active - return true if any network interface taps are in use
2319  *
2320  * @dev: network device to check for the presence of taps
2321  */
2322 bool dev_nit_active(struct net_device *dev)
2323 {
2324 	return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2325 }
2326 EXPORT_SYMBOL_GPL(dev_nit_active);
2327 
2328 /*
2329  *	Support routine. Sends outgoing frames to any network
2330  *	taps currently in use.
2331  */
2332 
2333 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2334 {
2335 	struct packet_type *ptype;
2336 	struct sk_buff *skb2 = NULL;
2337 	struct packet_type *pt_prev = NULL;
2338 	struct list_head *ptype_list = &ptype_all;
2339 
2340 	rcu_read_lock();
2341 again:
2342 	list_for_each_entry_rcu(ptype, ptype_list, list) {
2343 		if (ptype->ignore_outgoing)
2344 			continue;
2345 
2346 		/* Never send packets back to the socket
2347 		 * they originated from - MvS ([email protected])
2348 		 */
2349 		if (skb_loop_sk(ptype, skb))
2350 			continue;
2351 
2352 		if (pt_prev) {
2353 			deliver_skb(skb2, pt_prev, skb->dev);
2354 			pt_prev = ptype;
2355 			continue;
2356 		}
2357 
2358 		/* need to clone skb, done only once */
2359 		skb2 = skb_clone(skb, GFP_ATOMIC);
2360 		if (!skb2)
2361 			goto out_unlock;
2362 
2363 		net_timestamp_set(skb2);
2364 
2365 		/* skb->nh should be correctly
2366 		 * set by sender, so that the second statement is
2367 		 * just protection against buggy protocols.
2368 		 */
2369 		skb_reset_mac_header(skb2);
2370 
2371 		if (skb_network_header(skb2) < skb2->data ||
2372 		    skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2373 			net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2374 					     ntohs(skb2->protocol),
2375 					     dev->name);
2376 			skb_reset_network_header(skb2);
2377 		}
2378 
2379 		skb2->transport_header = skb2->network_header;
2380 		skb2->pkt_type = PACKET_OUTGOING;
2381 		pt_prev = ptype;
2382 	}
2383 
2384 	if (ptype_list == &ptype_all) {
2385 		ptype_list = &dev->ptype_all;
2386 		goto again;
2387 	}
2388 out_unlock:
2389 	if (pt_prev) {
2390 		if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2391 			pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2392 		else
2393 			kfree_skb(skb2);
2394 	}
2395 	rcu_read_unlock();
2396 }
2397 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2398 
2399 /**
2400  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2401  * @dev: Network device
2402  * @txq: number of queues available
2403  *
2404  * If real_num_tx_queues is changed the tc mappings may no longer be
2405  * valid. To resolve this verify the tc mapping remains valid and if
2406  * not NULL the mapping. With no priorities mapping to this
2407  * offset/count pair it will no longer be used. In the worst case TC0
2408  * is invalid nothing can be done so disable priority mappings. If is
2409  * expected that drivers will fix this mapping if they can before
2410  * calling netif_set_real_num_tx_queues.
2411  */
2412 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2413 {
2414 	int i;
2415 	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2416 
2417 	/* If TC0 is invalidated disable TC mapping */
2418 	if (tc->offset + tc->count > txq) {
2419 		pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2420 		dev->num_tc = 0;
2421 		return;
2422 	}
2423 
2424 	/* Invalidated prio to tc mappings set to TC0 */
2425 	for (i = 1; i < TC_BITMASK + 1; i++) {
2426 		int q = netdev_get_prio_tc_map(dev, i);
2427 
2428 		tc = &dev->tc_to_txq[q];
2429 		if (tc->offset + tc->count > txq) {
2430 			pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2431 				i, q);
2432 			netdev_set_prio_tc_map(dev, i, 0);
2433 		}
2434 	}
2435 }
2436 
2437 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2438 {
2439 	if (dev->num_tc) {
2440 		struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2441 		int i;
2442 
2443 		/* walk through the TCs and see if it falls into any of them */
2444 		for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2445 			if ((txq - tc->offset) < tc->count)
2446 				return i;
2447 		}
2448 
2449 		/* didn't find it, just return -1 to indicate no match */
2450 		return -1;
2451 	}
2452 
2453 	return 0;
2454 }
2455 EXPORT_SYMBOL(netdev_txq_to_tc);
2456 
2457 #ifdef CONFIG_XPS
2458 struct static_key xps_needed __read_mostly;
2459 EXPORT_SYMBOL(xps_needed);
2460 struct static_key xps_rxqs_needed __read_mostly;
2461 EXPORT_SYMBOL(xps_rxqs_needed);
2462 static DEFINE_MUTEX(xps_map_mutex);
2463 #define xmap_dereference(P)		\
2464 	rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2465 
2466 static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2467 			     int tci, u16 index)
2468 {
2469 	struct xps_map *map = NULL;
2470 	int pos;
2471 
2472 	if (dev_maps)
2473 		map = xmap_dereference(dev_maps->attr_map[tci]);
2474 	if (!map)
2475 		return false;
2476 
2477 	for (pos = map->len; pos--;) {
2478 		if (map->queues[pos] != index)
2479 			continue;
2480 
2481 		if (map->len > 1) {
2482 			map->queues[pos] = map->queues[--map->len];
2483 			break;
2484 		}
2485 
2486 		RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2487 		kfree_rcu(map, rcu);
2488 		return false;
2489 	}
2490 
2491 	return true;
2492 }
2493 
2494 static bool remove_xps_queue_cpu(struct net_device *dev,
2495 				 struct xps_dev_maps *dev_maps,
2496 				 int cpu, u16 offset, u16 count)
2497 {
2498 	int num_tc = dev->num_tc ? : 1;
2499 	bool active = false;
2500 	int tci;
2501 
2502 	for (tci = cpu * num_tc; num_tc--; tci++) {
2503 		int i, j;
2504 
2505 		for (i = count, j = offset; i--; j++) {
2506 			if (!remove_xps_queue(dev_maps, tci, j))
2507 				break;
2508 		}
2509 
2510 		active |= i < 0;
2511 	}
2512 
2513 	return active;
2514 }
2515 
2516 static void reset_xps_maps(struct net_device *dev,
2517 			   struct xps_dev_maps *dev_maps,
2518 			   bool is_rxqs_map)
2519 {
2520 	if (is_rxqs_map) {
2521 		static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2522 		RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2523 	} else {
2524 		RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2525 	}
2526 	static_key_slow_dec_cpuslocked(&xps_needed);
2527 	kfree_rcu(dev_maps, rcu);
2528 }
2529 
2530 static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
2531 			   struct xps_dev_maps *dev_maps, unsigned int nr_ids,
2532 			   u16 offset, u16 count, bool is_rxqs_map)
2533 {
2534 	bool active = false;
2535 	int i, j;
2536 
2537 	for (j = -1; j = netif_attrmask_next(j, mask, nr_ids),
2538 	     j < nr_ids;)
2539 		active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
2540 					       count);
2541 	if (!active)
2542 		reset_xps_maps(dev, dev_maps, is_rxqs_map);
2543 
2544 	if (!is_rxqs_map) {
2545 		for (i = offset + (count - 1); count--; i--) {
2546 			netdev_queue_numa_node_write(
2547 				netdev_get_tx_queue(dev, i),
2548 				NUMA_NO_NODE);
2549 		}
2550 	}
2551 }
2552 
2553 static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2554 				   u16 count)
2555 {
2556 	const unsigned long *possible_mask = NULL;
2557 	struct xps_dev_maps *dev_maps;
2558 	unsigned int nr_ids;
2559 
2560 	if (!static_key_false(&xps_needed))
2561 		return;
2562 
2563 	cpus_read_lock();
2564 	mutex_lock(&xps_map_mutex);
2565 
2566 	if (static_key_false(&xps_rxqs_needed)) {
2567 		dev_maps = xmap_dereference(dev->xps_rxqs_map);
2568 		if (dev_maps) {
2569 			nr_ids = dev->num_rx_queues;
2570 			clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
2571 				       offset, count, true);
2572 		}
2573 	}
2574 
2575 	dev_maps = xmap_dereference(dev->xps_cpus_map);
2576 	if (!dev_maps)
2577 		goto out_no_maps;
2578 
2579 	if (num_possible_cpus() > 1)
2580 		possible_mask = cpumask_bits(cpu_possible_mask);
2581 	nr_ids = nr_cpu_ids;
2582 	clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count,
2583 		       false);
2584 
2585 out_no_maps:
2586 	mutex_unlock(&xps_map_mutex);
2587 	cpus_read_unlock();
2588 }
2589 
2590 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2591 {
2592 	netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2593 }
2594 
2595 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2596 				      u16 index, bool is_rxqs_map)
2597 {
2598 	struct xps_map *new_map;
2599 	int alloc_len = XPS_MIN_MAP_ALLOC;
2600 	int i, pos;
2601 
2602 	for (pos = 0; map && pos < map->len; pos++) {
2603 		if (map->queues[pos] != index)
2604 			continue;
2605 		return map;
2606 	}
2607 
2608 	/* Need to add tx-queue to this CPU's/rx-queue's existing map */
2609 	if (map) {
2610 		if (pos < map->alloc_len)
2611 			return map;
2612 
2613 		alloc_len = map->alloc_len * 2;
2614 	}
2615 
2616 	/* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2617 	 *  map
2618 	 */
2619 	if (is_rxqs_map)
2620 		new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2621 	else
2622 		new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2623 				       cpu_to_node(attr_index));
2624 	if (!new_map)
2625 		return NULL;
2626 
2627 	for (i = 0; i < pos; i++)
2628 		new_map->queues[i] = map->queues[i];
2629 	new_map->alloc_len = alloc_len;
2630 	new_map->len = pos;
2631 
2632 	return new_map;
2633 }
2634 
2635 /* Must be called under cpus_read_lock */
2636 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2637 			  u16 index, bool is_rxqs_map)
2638 {
2639 	const unsigned long *online_mask = NULL, *possible_mask = NULL;
2640 	struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
2641 	int i, j, tci, numa_node_id = -2;
2642 	int maps_sz, num_tc = 1, tc = 0;
2643 	struct xps_map *map, *new_map;
2644 	bool active = false;
2645 	unsigned int nr_ids;
2646 
2647 	if (dev->num_tc) {
2648 		/* Do not allow XPS on subordinate device directly */
2649 		num_tc = dev->num_tc;
2650 		if (num_tc < 0)
2651 			return -EINVAL;
2652 
2653 		/* If queue belongs to subordinate dev use its map */
2654 		dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2655 
2656 		tc = netdev_txq_to_tc(dev, index);
2657 		if (tc < 0)
2658 			return -EINVAL;
2659 	}
2660 
2661 	mutex_lock(&xps_map_mutex);
2662 	if (is_rxqs_map) {
2663 		maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2664 		dev_maps = xmap_dereference(dev->xps_rxqs_map);
2665 		nr_ids = dev->num_rx_queues;
2666 	} else {
2667 		maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2668 		if (num_possible_cpus() > 1) {
2669 			online_mask = cpumask_bits(cpu_online_mask);
2670 			possible_mask = cpumask_bits(cpu_possible_mask);
2671 		}
2672 		dev_maps = xmap_dereference(dev->xps_cpus_map);
2673 		nr_ids = nr_cpu_ids;
2674 	}
2675 
2676 	if (maps_sz < L1_CACHE_BYTES)
2677 		maps_sz = L1_CACHE_BYTES;
2678 
2679 	/* allocate memory for queue storage */
2680 	for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2681 	     j < nr_ids;) {
2682 		if (!new_dev_maps)
2683 			new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2684 		if (!new_dev_maps) {
2685 			mutex_unlock(&xps_map_mutex);
2686 			return -ENOMEM;
2687 		}
2688 
2689 		tci = j * num_tc + tc;
2690 		map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) :
2691 				 NULL;
2692 
2693 		map = expand_xps_map(map, j, index, is_rxqs_map);
2694 		if (!map)
2695 			goto error;
2696 
2697 		RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2698 	}
2699 
2700 	if (!new_dev_maps)
2701 		goto out_no_new_maps;
2702 
2703 	if (!dev_maps) {
2704 		/* Increment static keys at most once per type */
2705 		static_key_slow_inc_cpuslocked(&xps_needed);
2706 		if (is_rxqs_map)
2707 			static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2708 	}
2709 
2710 	for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2711 	     j < nr_ids;) {
2712 		/* copy maps belonging to foreign traffic classes */
2713 		for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) {
2714 			/* fill in the new device map from the old device map */
2715 			map = xmap_dereference(dev_maps->attr_map[tci]);
2716 			RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2717 		}
2718 
2719 		/* We need to explicitly update tci as prevous loop
2720 		 * could break out early if dev_maps is NULL.
2721 		 */
2722 		tci = j * num_tc + tc;
2723 
2724 		if (netif_attr_test_mask(j, mask, nr_ids) &&
2725 		    netif_attr_test_online(j, online_mask, nr_ids)) {
2726 			/* add tx-queue to CPU/rx-queue maps */
2727 			int pos = 0;
2728 
2729 			map = xmap_dereference(new_dev_maps->attr_map[tci]);
2730 			while ((pos < map->len) && (map->queues[pos] != index))
2731 				pos++;
2732 
2733 			if (pos == map->len)
2734 				map->queues[map->len++] = index;
2735 #ifdef CONFIG_NUMA
2736 			if (!is_rxqs_map) {
2737 				if (numa_node_id == -2)
2738 					numa_node_id = cpu_to_node(j);
2739 				else if (numa_node_id != cpu_to_node(j))
2740 					numa_node_id = -1;
2741 			}
2742 #endif
2743 		} else if (dev_maps) {
2744 			/* fill in the new device map from the old device map */
2745 			map = xmap_dereference(dev_maps->attr_map[tci]);
2746 			RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2747 		}
2748 
2749 		/* copy maps belonging to foreign traffic classes */
2750 		for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2751 			/* fill in the new device map from the old device map */
2752 			map = xmap_dereference(dev_maps->attr_map[tci]);
2753 			RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2754 		}
2755 	}
2756 
2757 	if (is_rxqs_map)
2758 		rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps);
2759 	else
2760 		rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps);
2761 
2762 	/* Cleanup old maps */
2763 	if (!dev_maps)
2764 		goto out_no_old_maps;
2765 
2766 	for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2767 	     j < nr_ids;) {
2768 		for (i = num_tc, tci = j * num_tc; i--; tci++) {
2769 			new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2770 			map = xmap_dereference(dev_maps->attr_map[tci]);
2771 			if (map && map != new_map)
2772 				kfree_rcu(map, rcu);
2773 		}
2774 	}
2775 
2776 	kfree_rcu(dev_maps, rcu);
2777 
2778 out_no_old_maps:
2779 	dev_maps = new_dev_maps;
2780 	active = true;
2781 
2782 out_no_new_maps:
2783 	if (!is_rxqs_map) {
2784 		/* update Tx queue numa node */
2785 		netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2786 					     (numa_node_id >= 0) ?
2787 					     numa_node_id : NUMA_NO_NODE);
2788 	}
2789 
2790 	if (!dev_maps)
2791 		goto out_no_maps;
2792 
2793 	/* removes tx-queue from unused CPUs/rx-queues */
2794 	for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2795 	     j < nr_ids;) {
2796 		for (i = tc, tci = j * num_tc; i--; tci++)
2797 			active |= remove_xps_queue(dev_maps, tci, index);
2798 		if (!netif_attr_test_mask(j, mask, nr_ids) ||
2799 		    !netif_attr_test_online(j, online_mask, nr_ids))
2800 			active |= remove_xps_queue(dev_maps, tci, index);
2801 		for (i = num_tc - tc, tci++; --i; tci++)
2802 			active |= remove_xps_queue(dev_maps, tci, index);
2803 	}
2804 
2805 	/* free map if not active */
2806 	if (!active)
2807 		reset_xps_maps(dev, dev_maps, is_rxqs_map);
2808 
2809 out_no_maps:
2810 	mutex_unlock(&xps_map_mutex);
2811 
2812 	return 0;
2813 error:
2814 	/* remove any maps that we added */
2815 	for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2816 	     j < nr_ids;) {
2817 		for (i = num_tc, tci = j * num_tc; i--; tci++) {
2818 			new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2819 			map = dev_maps ?
2820 			      xmap_dereference(dev_maps->attr_map[tci]) :
2821 			      NULL;
2822 			if (new_map && new_map != map)
2823 				kfree(new_map);
2824 		}
2825 	}
2826 
2827 	mutex_unlock(&xps_map_mutex);
2828 
2829 	kfree(new_dev_maps);
2830 	return -ENOMEM;
2831 }
2832 EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2833 
2834 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2835 			u16 index)
2836 {
2837 	int ret;
2838 
2839 	cpus_read_lock();
2840 	ret =  __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
2841 	cpus_read_unlock();
2842 
2843 	return ret;
2844 }
2845 EXPORT_SYMBOL(netif_set_xps_queue);
2846 
2847 #endif
2848 static void netdev_unbind_all_sb_channels(struct net_device *dev)
2849 {
2850 	struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2851 
2852 	/* Unbind any subordinate channels */
2853 	while (txq-- != &dev->_tx[0]) {
2854 		if (txq->sb_dev)
2855 			netdev_unbind_sb_channel(dev, txq->sb_dev);
2856 	}
2857 }
2858 
2859 void netdev_reset_tc(struct net_device *dev)
2860 {
2861 #ifdef CONFIG_XPS
2862 	netif_reset_xps_queues_gt(dev, 0);
2863 #endif
2864 	netdev_unbind_all_sb_channels(dev);
2865 
2866 	/* Reset TC configuration of device */
2867 	dev->num_tc = 0;
2868 	memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2869 	memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2870 }
2871 EXPORT_SYMBOL(netdev_reset_tc);
2872 
2873 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2874 {
2875 	if (tc >= dev->num_tc)
2876 		return -EINVAL;
2877 
2878 #ifdef CONFIG_XPS
2879 	netif_reset_xps_queues(dev, offset, count);
2880 #endif
2881 	dev->tc_to_txq[tc].count = count;
2882 	dev->tc_to_txq[tc].offset = offset;
2883 	return 0;
2884 }
2885 EXPORT_SYMBOL(netdev_set_tc_queue);
2886 
2887 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2888 {
2889 	if (num_tc > TC_MAX_QUEUE)
2890 		return -EINVAL;
2891 
2892 #ifdef CONFIG_XPS
2893 	netif_reset_xps_queues_gt(dev, 0);
2894 #endif
2895 	netdev_unbind_all_sb_channels(dev);
2896 
2897 	dev->num_tc = num_tc;
2898 	return 0;
2899 }
2900 EXPORT_SYMBOL(netdev_set_num_tc);
2901 
2902 void netdev_unbind_sb_channel(struct net_device *dev,
2903 			      struct net_device *sb_dev)
2904 {
2905 	struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2906 
2907 #ifdef CONFIG_XPS
2908 	netif_reset_xps_queues_gt(sb_dev, 0);
2909 #endif
2910 	memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2911 	memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2912 
2913 	while (txq-- != &dev->_tx[0]) {
2914 		if (txq->sb_dev == sb_dev)
2915 			txq->sb_dev = NULL;
2916 	}
2917 }
2918 EXPORT_SYMBOL(netdev_unbind_sb_channel);
2919 
2920 int netdev_bind_sb_channel_queue(struct net_device *dev,
2921 				 struct net_device *sb_dev,
2922 				 u8 tc, u16 count, u16 offset)
2923 {
2924 	/* Make certain the sb_dev and dev are already configured */
2925 	if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2926 		return -EINVAL;
2927 
2928 	/* We cannot hand out queues we don't have */
2929 	if ((offset + count) > dev->real_num_tx_queues)
2930 		return -EINVAL;
2931 
2932 	/* Record the mapping */
2933 	sb_dev->tc_to_txq[tc].count = count;
2934 	sb_dev->tc_to_txq[tc].offset = offset;
2935 
2936 	/* Provide a way for Tx queue to find the tc_to_txq map or
2937 	 * XPS map for itself.
2938 	 */
2939 	while (count--)
2940 		netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2941 
2942 	return 0;
2943 }
2944 EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2945 
2946 int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2947 {
2948 	/* Do not use a multiqueue device to represent a subordinate channel */
2949 	if (netif_is_multiqueue(dev))
2950 		return -ENODEV;
2951 
2952 	/* We allow channels 1 - 32767 to be used for subordinate channels.
2953 	 * Channel 0 is meant to be "native" mode and used only to represent
2954 	 * the main root device. We allow writing 0 to reset the device back
2955 	 * to normal mode after being used as a subordinate channel.
2956 	 */
2957 	if (channel > S16_MAX)
2958 		return -EINVAL;
2959 
2960 	dev->num_tc = -channel;
2961 
2962 	return 0;
2963 }
2964 EXPORT_SYMBOL(netdev_set_sb_channel);
2965 
2966 /*
2967  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2968  * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2969  */
2970 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2971 {
2972 	bool disabling;
2973 	int rc;
2974 
2975 	disabling = txq < dev->real_num_tx_queues;
2976 
2977 	if (txq < 1 || txq > dev->num_tx_queues)
2978 		return -EINVAL;
2979 
2980 	if (dev->reg_state == NETREG_REGISTERED ||
2981 	    dev->reg_state == NETREG_UNREGISTERING) {
2982 		ASSERT_RTNL();
2983 
2984 		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2985 						  txq);
2986 		if (rc)
2987 			return rc;
2988 
2989 		if (dev->num_tc)
2990 			netif_setup_tc(dev, txq);
2991 
2992 		dev->real_num_tx_queues = txq;
2993 
2994 		if (disabling) {
2995 			synchronize_net();
2996 			qdisc_reset_all_tx_gt(dev, txq);
2997 #ifdef CONFIG_XPS
2998 			netif_reset_xps_queues_gt(dev, txq);
2999 #endif
3000 		}
3001 	} else {
3002 		dev->real_num_tx_queues = txq;
3003 	}
3004 
3005 	return 0;
3006 }
3007 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
3008 
3009 #ifdef CONFIG_SYSFS
3010 /**
3011  *	netif_set_real_num_rx_queues - set actual number of RX queues used
3012  *	@dev: Network device
3013  *	@rxq: Actual number of RX queues
3014  *
3015  *	This must be called either with the rtnl_lock held or before
3016  *	registration of the net device.  Returns 0 on success, or a
3017  *	negative error code.  If called before registration, it always
3018  *	succeeds.
3019  */
3020 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
3021 {
3022 	int rc;
3023 
3024 	if (rxq < 1 || rxq > dev->num_rx_queues)
3025 		return -EINVAL;
3026 
3027 	if (dev->reg_state == NETREG_REGISTERED) {
3028 		ASSERT_RTNL();
3029 
3030 		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
3031 						  rxq);
3032 		if (rc)
3033 			return rc;
3034 	}
3035 
3036 	dev->real_num_rx_queues = rxq;
3037 	return 0;
3038 }
3039 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
3040 #endif
3041 
3042 /**
3043  * netif_get_num_default_rss_queues - default number of RSS queues
3044  *
3045  * This routine should set an upper limit on the number of RSS queues
3046  * used by default by multiqueue devices.
3047  */
3048 int netif_get_num_default_rss_queues(void)
3049 {
3050 	return is_kdump_kernel() ?
3051 		1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
3052 }
3053 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
3054 
3055 static void __netif_reschedule(struct Qdisc *q)
3056 {
3057 	struct softnet_data *sd;
3058 	unsigned long flags;
3059 
3060 	local_irq_save(flags);
3061 	sd = this_cpu_ptr(&softnet_data);
3062 	q->next_sched = NULL;
3063 	*sd->output_queue_tailp = q;
3064 	sd->output_queue_tailp = &q->next_sched;
3065 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
3066 	local_irq_restore(flags);
3067 }
3068 
3069 void __netif_schedule(struct Qdisc *q)
3070 {
3071 	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
3072 		__netif_reschedule(q);
3073 }
3074 EXPORT_SYMBOL(__netif_schedule);
3075 
3076 struct dev_kfree_skb_cb {
3077 	enum skb_free_reason reason;
3078 };
3079 
3080 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
3081 {
3082 	return (struct dev_kfree_skb_cb *)skb->cb;
3083 }
3084 
3085 void netif_schedule_queue(struct netdev_queue *txq)
3086 {
3087 	rcu_read_lock();
3088 	if (!netif_xmit_stopped(txq)) {
3089 		struct Qdisc *q = rcu_dereference(txq->qdisc);
3090 
3091 		__netif_schedule(q);
3092 	}
3093 	rcu_read_unlock();
3094 }
3095 EXPORT_SYMBOL(netif_schedule_queue);
3096 
3097 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
3098 {
3099 	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3100 		struct Qdisc *q;
3101 
3102 		rcu_read_lock();
3103 		q = rcu_dereference(dev_queue->qdisc);
3104 		__netif_schedule(q);
3105 		rcu_read_unlock();
3106 	}
3107 }
3108 EXPORT_SYMBOL(netif_tx_wake_queue);
3109 
3110 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
3111 {
3112 	unsigned long flags;
3113 
3114 	if (unlikely(!skb))
3115 		return;
3116 
3117 	if (likely(refcount_read(&skb->users) == 1)) {
3118 		smp_rmb();
3119 		refcount_set(&skb->users, 0);
3120 	} else if (likely(!refcount_dec_and_test(&skb->users))) {
3121 		return;
3122 	}
3123 	get_kfree_skb_cb(skb)->reason = reason;
3124 	local_irq_save(flags);
3125 	skb->next = __this_cpu_read(softnet_data.completion_queue);
3126 	__this_cpu_write(softnet_data.completion_queue, skb);
3127 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
3128 	local_irq_restore(flags);
3129 }
3130 EXPORT_SYMBOL(__dev_kfree_skb_irq);
3131 
3132 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
3133 {
3134 	if (in_irq() || irqs_disabled())
3135 		__dev_kfree_skb_irq(skb, reason);
3136 	else
3137 		dev_kfree_skb(skb);
3138 }
3139 EXPORT_SYMBOL(__dev_kfree_skb_any);
3140 
3141 
3142 /**
3143  * netif_device_detach - mark device as removed
3144  * @dev: network device
3145  *
3146  * Mark device as removed from system and therefore no longer available.
3147  */
3148 void netif_device_detach(struct net_device *dev)
3149 {
3150 	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3151 	    netif_running(dev)) {
3152 		netif_tx_stop_all_queues(dev);
3153 	}
3154 }
3155 EXPORT_SYMBOL(netif_device_detach);
3156 
3157 /**
3158  * netif_device_attach - mark device as attached
3159  * @dev: network device
3160  *
3161  * Mark device as attached from system and restart if needed.
3162  */
3163 void netif_device_attach(struct net_device *dev)
3164 {
3165 	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3166 	    netif_running(dev)) {
3167 		netif_tx_wake_all_queues(dev);
3168 		__netdev_watchdog_up(dev);
3169 	}
3170 }
3171 EXPORT_SYMBOL(netif_device_attach);
3172 
3173 /*
3174  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3175  * to be used as a distribution range.
3176  */
3177 static u16 skb_tx_hash(const struct net_device *dev,
3178 		       const struct net_device *sb_dev,
3179 		       struct sk_buff *skb)
3180 {
3181 	u32 hash;
3182 	u16 qoffset = 0;
3183 	u16 qcount = dev->real_num_tx_queues;
3184 
3185 	if (dev->num_tc) {
3186 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3187 
3188 		qoffset = sb_dev->tc_to_txq[tc].offset;
3189 		qcount = sb_dev->tc_to_txq[tc].count;
3190 	}
3191 
3192 	if (skb_rx_queue_recorded(skb)) {
3193 		hash = skb_get_rx_queue(skb);
3194 		if (hash >= qoffset)
3195 			hash -= qoffset;
3196 		while (unlikely(hash >= qcount))
3197 			hash -= qcount;
3198 		return hash + qoffset;
3199 	}
3200 
3201 	return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3202 }
3203 
3204 static void skb_warn_bad_offload(const struct sk_buff *skb)
3205 {
3206 	static const netdev_features_t null_features;
3207 	struct net_device *dev = skb->dev;
3208 	const char *name = "";
3209 
3210 	if (!net_ratelimit())
3211 		return;
3212 
3213 	if (dev) {
3214 		if (dev->dev.parent)
3215 			name = dev_driver_string(dev->dev.parent);
3216 		else
3217 			name = netdev_name(dev);
3218 	}
3219 	skb_dump(KERN_WARNING, skb, false);
3220 	WARN(1, "%s: caps=(%pNF, %pNF)\n",
3221 	     name, dev ? &dev->features : &null_features,
3222 	     skb->sk ? &skb->sk->sk_route_caps : &null_features);
3223 }
3224 
3225 /*
3226  * Invalidate hardware checksum when packet is to be mangled, and
3227  * complete checksum manually on outgoing path.
3228  */
3229 int skb_checksum_help(struct sk_buff *skb)
3230 {
3231 	__wsum csum;
3232 	int ret = 0, offset;
3233 
3234 	if (skb->ip_summed == CHECKSUM_COMPLETE)
3235 		goto out_set_summed;
3236 
3237 	if (unlikely(skb_is_gso(skb))) {
3238 		skb_warn_bad_offload(skb);
3239 		return -EINVAL;
3240 	}
3241 
3242 	/* Before computing a checksum, we should make sure no frag could
3243 	 * be modified by an external entity : checksum could be wrong.
3244 	 */
3245 	if (skb_has_shared_frag(skb)) {
3246 		ret = __skb_linearize(skb);
3247 		if (ret)
3248 			goto out;
3249 	}
3250 
3251 	offset = skb_checksum_start_offset(skb);
3252 	BUG_ON(offset >= skb_headlen(skb));
3253 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
3254 
3255 	offset += skb->csum_offset;
3256 	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
3257 
3258 	ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3259 	if (ret)
3260 		goto out;
3261 
3262 	*(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3263 out_set_summed:
3264 	skb->ip_summed = CHECKSUM_NONE;
3265 out:
3266 	return ret;
3267 }
3268 EXPORT_SYMBOL(skb_checksum_help);
3269 
3270 int skb_crc32c_csum_help(struct sk_buff *skb)
3271 {
3272 	__le32 crc32c_csum;
3273 	int ret = 0, offset, start;
3274 
3275 	if (skb->ip_summed != CHECKSUM_PARTIAL)
3276 		goto out;
3277 
3278 	if (unlikely(skb_is_gso(skb)))
3279 		goto out;
3280 
3281 	/* Before computing a checksum, we should make sure no frag could
3282 	 * be modified by an external entity : checksum could be wrong.
3283 	 */
3284 	if (unlikely(skb_has_shared_frag(skb))) {
3285 		ret = __skb_linearize(skb);
3286 		if (ret)
3287 			goto out;
3288 	}
3289 	start = skb_checksum_start_offset(skb);
3290 	offset = start + offsetof(struct sctphdr, checksum);
3291 	if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3292 		ret = -EINVAL;
3293 		goto out;
3294 	}
3295 
3296 	ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3297 	if (ret)
3298 		goto out;
3299 
3300 	crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3301 						  skb->len - start, ~(__u32)0,
3302 						  crc32c_csum_stub));
3303 	*(__le32 *)(skb->data + offset) = crc32c_csum;
3304 	skb->ip_summed = CHECKSUM_NONE;
3305 	skb->csum_not_inet = 0;
3306 out:
3307 	return ret;
3308 }
3309 
3310 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3311 {
3312 	__be16 type = skb->protocol;
3313 
3314 	/* Tunnel gso handlers can set protocol to ethernet. */
3315 	if (type == htons(ETH_P_TEB)) {
3316 		struct ethhdr *eth;
3317 
3318 		if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3319 			return 0;
3320 
3321 		eth = (struct ethhdr *)skb->data;
3322 		type = eth->h_proto;
3323 	}
3324 
3325 	return __vlan_get_protocol(skb, type, depth);
3326 }
3327 
3328 /**
3329  *	skb_mac_gso_segment - mac layer segmentation handler.
3330  *	@skb: buffer to segment
3331  *	@features: features for the output path (see dev->features)
3332  */
3333 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3334 				    netdev_features_t features)
3335 {
3336 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
3337 	struct packet_offload *ptype;
3338 	int vlan_depth = skb->mac_len;
3339 	__be16 type = skb_network_protocol(skb, &vlan_depth);
3340 
3341 	if (unlikely(!type))
3342 		return ERR_PTR(-EINVAL);
3343 
3344 	__skb_pull(skb, vlan_depth);
3345 
3346 	rcu_read_lock();
3347 	list_for_each_entry_rcu(ptype, &offload_base, list) {
3348 		if (ptype->type == type && ptype->callbacks.gso_segment) {
3349 			segs = ptype->callbacks.gso_segment(skb, features);
3350 			break;
3351 		}
3352 	}
3353 	rcu_read_unlock();
3354 
3355 	__skb_push(skb, skb->data - skb_mac_header(skb));
3356 
3357 	return segs;
3358 }
3359 EXPORT_SYMBOL(skb_mac_gso_segment);
3360 
3361 
3362 /* openvswitch calls this on rx path, so we need a different check.
3363  */
3364 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
3365 {
3366 	if (tx_path)
3367 		return skb->ip_summed != CHECKSUM_PARTIAL &&
3368 		       skb->ip_summed != CHECKSUM_UNNECESSARY;
3369 
3370 	return skb->ip_summed == CHECKSUM_NONE;
3371 }
3372 
3373 /**
3374  *	__skb_gso_segment - Perform segmentation on skb.
3375  *	@skb: buffer to segment
3376  *	@features: features for the output path (see dev->features)
3377  *	@tx_path: whether it is called in TX path
3378  *
3379  *	This function segments the given skb and returns a list of segments.
3380  *
3381  *	It may return NULL if the skb requires no segmentation.  This is
3382  *	only possible when GSO is used for verifying header integrity.
3383  *
3384  *	Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb.
3385  */
3386 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3387 				  netdev_features_t features, bool tx_path)
3388 {
3389 	struct sk_buff *segs;
3390 
3391 	if (unlikely(skb_needs_check(skb, tx_path))) {
3392 		int err;
3393 
3394 		/* We're going to init ->check field in TCP or UDP header */
3395 		err = skb_cow_head(skb, 0);
3396 		if (err < 0)
3397 			return ERR_PTR(err);
3398 	}
3399 
3400 	/* Only report GSO partial support if it will enable us to
3401 	 * support segmentation on this frame without needing additional
3402 	 * work.
3403 	 */
3404 	if (features & NETIF_F_GSO_PARTIAL) {
3405 		netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3406 		struct net_device *dev = skb->dev;
3407 
3408 		partial_features |= dev->features & dev->gso_partial_features;
3409 		if (!skb_gso_ok(skb, features | partial_features))
3410 			features &= ~NETIF_F_GSO_PARTIAL;
3411 	}
3412 
3413 	BUILD_BUG_ON(SKB_GSO_CB_OFFSET +
3414 		     sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3415 
3416 	SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3417 	SKB_GSO_CB(skb)->encap_level = 0;
3418 
3419 	skb_reset_mac_header(skb);
3420 	skb_reset_mac_len(skb);
3421 
3422 	segs = skb_mac_gso_segment(skb, features);
3423 
3424 	if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
3425 		skb_warn_bad_offload(skb);
3426 
3427 	return segs;
3428 }
3429 EXPORT_SYMBOL(__skb_gso_segment);
3430 
3431 /* Take action when hardware reception checksum errors are detected. */
3432 #ifdef CONFIG_BUG
3433 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3434 {
3435 	if (net_ratelimit()) {
3436 		pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
3437 		skb_dump(KERN_ERR, skb, true);
3438 		dump_stack();
3439 	}
3440 }
3441 EXPORT_SYMBOL(netdev_rx_csum_fault);
3442 #endif
3443 
3444 /* XXX: check that highmem exists at all on the given machine. */
3445 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3446 {
3447 #ifdef CONFIG_HIGHMEM
3448 	int i;
3449 
3450 	if (!(dev->features & NETIF_F_HIGHDMA)) {
3451 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3452 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3453 
3454 			if (PageHighMem(skb_frag_page(frag)))
3455 				return 1;
3456 		}
3457 	}
3458 #endif
3459 	return 0;
3460 }
3461 
3462 /* If MPLS offload request, verify we are testing hardware MPLS features
3463  * instead of standard features for the netdev.
3464  */
3465 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3466 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3467 					   netdev_features_t features,
3468 					   __be16 type)
3469 {
3470 	if (eth_p_mpls(type))
3471 		features &= skb->dev->mpls_features;
3472 
3473 	return features;
3474 }
3475 #else
3476 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3477 					   netdev_features_t features,
3478 					   __be16 type)
3479 {
3480 	return features;
3481 }
3482 #endif
3483 
3484 static netdev_features_t harmonize_features(struct sk_buff *skb,
3485 	netdev_features_t features)
3486 {
3487 	__be16 type;
3488 
3489 	type = skb_network_protocol(skb, NULL);
3490 	features = net_mpls_features(skb, features, type);
3491 
3492 	if (skb->ip_summed != CHECKSUM_NONE &&
3493 	    !can_checksum_protocol(features, type)) {
3494 		features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3495 	}
3496 	if (illegal_highdma(skb->dev, skb))
3497 		features &= ~NETIF_F_SG;
3498 
3499 	return features;
3500 }
3501 
3502 netdev_features_t passthru_features_check(struct sk_buff *skb,
3503 					  struct net_device *dev,
3504 					  netdev_features_t features)
3505 {
3506 	return features;
3507 }
3508 EXPORT_SYMBOL(passthru_features_check);
3509 
3510 static netdev_features_t dflt_features_check(struct sk_buff *skb,
3511 					     struct net_device *dev,
3512 					     netdev_features_t features)
3513 {
3514 	return vlan_features_check(skb, features);
3515 }
3516 
3517 static netdev_features_t gso_features_check(const struct sk_buff *skb,
3518 					    struct net_device *dev,
3519 					    netdev_features_t features)
3520 {
3521 	u16 gso_segs = skb_shinfo(skb)->gso_segs;
3522 
3523 	if (gso_segs > dev->gso_max_segs)
3524 		return features & ~NETIF_F_GSO_MASK;
3525 
3526 	if (!skb_shinfo(skb)->gso_type) {
3527 		skb_warn_bad_offload(skb);
3528 		return features & ~NETIF_F_GSO_MASK;
3529 	}
3530 
3531 	/* Support for GSO partial features requires software
3532 	 * intervention before we can actually process the packets
3533 	 * so we need to strip support for any partial features now
3534 	 * and we can pull them back in after we have partially
3535 	 * segmented the frame.
3536 	 */
3537 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3538 		features &= ~dev->gso_partial_features;
3539 
3540 	/* Make sure to clear the IPv4 ID mangling feature if the
3541 	 * IPv4 header has the potential to be fragmented.
3542 	 */
3543 	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3544 		struct iphdr *iph = skb->encapsulation ?
3545 				    inner_ip_hdr(skb) : ip_hdr(skb);
3546 
3547 		if (!(iph->frag_off & htons(IP_DF)))
3548 			features &= ~NETIF_F_TSO_MANGLEID;
3549 	}
3550 
3551 	return features;
3552 }
3553 
3554 netdev_features_t netif_skb_features(struct sk_buff *skb)
3555 {
3556 	struct net_device *dev = skb->dev;
3557 	netdev_features_t features = dev->features;
3558 
3559 	if (skb_is_gso(skb))
3560 		features = gso_features_check(skb, dev, features);
3561 
3562 	/* If encapsulation offload request, verify we are testing
3563 	 * hardware encapsulation features instead of standard
3564 	 * features for the netdev
3565 	 */
3566 	if (skb->encapsulation)
3567 		features &= dev->hw_enc_features;
3568 
3569 	if (skb_vlan_tagged(skb))
3570 		features = netdev_intersect_features(features,
3571 						     dev->vlan_features |
3572 						     NETIF_F_HW_VLAN_CTAG_TX |
3573 						     NETIF_F_HW_VLAN_STAG_TX);
3574 
3575 	if (dev->netdev_ops->ndo_features_check)
3576 		features &= dev->netdev_ops->ndo_features_check(skb, dev,
3577 								features);
3578 	else
3579 		features &= dflt_features_check(skb, dev, features);
3580 
3581 	return harmonize_features(skb, features);
3582 }
3583 EXPORT_SYMBOL(netif_skb_features);
3584 
3585 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3586 		    struct netdev_queue *txq, bool more)
3587 {
3588 	unsigned int len;
3589 	int rc;
3590 
3591 	if (dev_nit_active(dev))
3592 		dev_queue_xmit_nit(skb, dev);
3593 
3594 	len = skb->len;
3595 	PRANDOM_ADD_NOISE(skb, dev, txq, len + jiffies);
3596 	trace_net_dev_start_xmit(skb, dev);
3597 	rc = netdev_start_xmit(skb, dev, txq, more);
3598 	trace_net_dev_xmit(skb, rc, dev, len);
3599 
3600 	return rc;
3601 }
3602 
3603 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3604 				    struct netdev_queue *txq, int *ret)
3605 {
3606 	struct sk_buff *skb = first;
3607 	int rc = NETDEV_TX_OK;
3608 
3609 	while (skb) {
3610 		struct sk_buff *next = skb->next;
3611 
3612 		skb_mark_not_on_list(skb);
3613 		rc = xmit_one(skb, dev, txq, next != NULL);
3614 		if (unlikely(!dev_xmit_complete(rc))) {
3615 			skb->next = next;
3616 			goto out;
3617 		}
3618 
3619 		skb = next;
3620 		if (netif_tx_queue_stopped(txq) && skb) {
3621 			rc = NETDEV_TX_BUSY;
3622 			break;
3623 		}
3624 	}
3625 
3626 out:
3627 	*ret = rc;
3628 	return skb;
3629 }
3630 
3631 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3632 					  netdev_features_t features)
3633 {
3634 	if (skb_vlan_tag_present(skb) &&
3635 	    !vlan_hw_offload_capable(features, skb->vlan_proto))
3636 		skb = __vlan_hwaccel_push_inside(skb);
3637 	return skb;
3638 }
3639 
3640 int skb_csum_hwoffload_help(struct sk_buff *skb,
3641 			    const netdev_features_t features)
3642 {
3643 	if (unlikely(skb_csum_is_sctp(skb)))
3644 		return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3645 			skb_crc32c_csum_help(skb);
3646 
3647 	if (features & NETIF_F_HW_CSUM)
3648 		return 0;
3649 
3650 	if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3651 		switch (skb->csum_offset) {
3652 		case offsetof(struct tcphdr, check):
3653 		case offsetof(struct udphdr, check):
3654 			return 0;
3655 		}
3656 	}
3657 
3658 	return skb_checksum_help(skb);
3659 }
3660 EXPORT_SYMBOL(skb_csum_hwoffload_help);
3661 
3662 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3663 {
3664 	netdev_features_t features;
3665 
3666 	features = netif_skb_features(skb);
3667 	skb = validate_xmit_vlan(skb, features);
3668 	if (unlikely(!skb))
3669 		goto out_null;
3670 
3671 	skb = sk_validate_xmit_skb(skb, dev);
3672 	if (unlikely(!skb))
3673 		goto out_null;
3674 
3675 	if (netif_needs_gso(skb, features)) {
3676 		struct sk_buff *segs;
3677 
3678 		segs = skb_gso_segment(skb, features);
3679 		if (IS_ERR(segs)) {
3680 			goto out_kfree_skb;
3681 		} else if (segs) {
3682 			consume_skb(skb);
3683 			skb = segs;
3684 		}
3685 	} else {
3686 		if (skb_needs_linearize(skb, features) &&
3687 		    __skb_linearize(skb))
3688 			goto out_kfree_skb;
3689 
3690 		/* If packet is not checksummed and device does not
3691 		 * support checksumming for this protocol, complete
3692 		 * checksumming here.
3693 		 */
3694 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
3695 			if (skb->encapsulation)
3696 				skb_set_inner_transport_header(skb,
3697 							       skb_checksum_start_offset(skb));
3698 			else
3699 				skb_set_transport_header(skb,
3700 							 skb_checksum_start_offset(skb));
3701 			if (skb_csum_hwoffload_help(skb, features))
3702 				goto out_kfree_skb;
3703 		}
3704 	}
3705 
3706 	skb = validate_xmit_xfrm(skb, features, again);
3707 
3708 	return skb;
3709 
3710 out_kfree_skb:
3711 	kfree_skb(skb);
3712 out_null:
3713 	atomic_long_inc(&dev->tx_dropped);
3714 	return NULL;
3715 }
3716 
3717 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3718 {
3719 	struct sk_buff *next, *head = NULL, *tail;
3720 
3721 	for (; skb != NULL; skb = next) {
3722 		next = skb->next;
3723 		skb_mark_not_on_list(skb);
3724 
3725 		/* in case skb wont be segmented, point to itself */
3726 		skb->prev = skb;
3727 
3728 		skb = validate_xmit_skb(skb, dev, again);
3729 		if (!skb)
3730 			continue;
3731 
3732 		if (!head)
3733 			head = skb;
3734 		else
3735 			tail->next = skb;
3736 		/* If skb was segmented, skb->prev points to
3737 		 * the last segment. If not, it still contains skb.
3738 		 */
3739 		tail = skb->prev;
3740 	}
3741 	return head;
3742 }
3743 EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3744 
3745 static void qdisc_pkt_len_init(struct sk_buff *skb)
3746 {
3747 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
3748 
3749 	qdisc_skb_cb(skb)->pkt_len = skb->len;
3750 
3751 	/* To get more precise estimation of bytes sent on wire,
3752 	 * we add to pkt_len the headers size of all segments
3753 	 */
3754 	if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3755 		unsigned int hdr_len;
3756 		u16 gso_segs = shinfo->gso_segs;
3757 
3758 		/* mac layer + network layer */
3759 		hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3760 
3761 		/* + transport layer */
3762 		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3763 			const struct tcphdr *th;
3764 			struct tcphdr _tcphdr;
3765 
3766 			th = skb_header_pointer(skb, skb_transport_offset(skb),
3767 						sizeof(_tcphdr), &_tcphdr);
3768 			if (likely(th))
3769 				hdr_len += __tcp_hdrlen(th);
3770 		} else {
3771 			struct udphdr _udphdr;
3772 
3773 			if (skb_header_pointer(skb, skb_transport_offset(skb),
3774 					       sizeof(_udphdr), &_udphdr))
3775 				hdr_len += sizeof(struct udphdr);
3776 		}
3777 
3778 		if (shinfo->gso_type & SKB_GSO_DODGY)
3779 			gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3780 						shinfo->gso_size);
3781 
3782 		qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3783 	}
3784 }
3785 
3786 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3787 				 struct net_device *dev,
3788 				 struct netdev_queue *txq)
3789 {
3790 	spinlock_t *root_lock = qdisc_lock(q);
3791 	struct sk_buff *to_free = NULL;
3792 	bool contended;
3793 	int rc;
3794 
3795 	qdisc_calculate_pkt_len(skb, q);
3796 
3797 	if (q->flags & TCQ_F_NOLOCK) {
3798 		rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3799 		qdisc_run(q);
3800 
3801 		if (unlikely(to_free))
3802 			kfree_skb_list(to_free);
3803 		return rc;
3804 	}
3805 
3806 	/*
3807 	 * Heuristic to force contended enqueues to serialize on a
3808 	 * separate lock before trying to get qdisc main lock.
3809 	 * This permits qdisc->running owner to get the lock more
3810 	 * often and dequeue packets faster.
3811 	 */
3812 	contended = qdisc_is_running(q);
3813 	if (unlikely(contended))
3814 		spin_lock(&q->busylock);
3815 
3816 	spin_lock(root_lock);
3817 	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3818 		__qdisc_drop(skb, &to_free);
3819 		rc = NET_XMIT_DROP;
3820 	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3821 		   qdisc_run_begin(q)) {
3822 		/*
3823 		 * This is a work-conserving queue; there are no old skbs
3824 		 * waiting to be sent out; and the qdisc is not running -
3825 		 * xmit the skb directly.
3826 		 */
3827 
3828 		qdisc_bstats_update(q, skb);
3829 
3830 		if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3831 			if (unlikely(contended)) {
3832 				spin_unlock(&q->busylock);
3833 				contended = false;
3834 			}
3835 			__qdisc_run(q);
3836 		}
3837 
3838 		qdisc_run_end(q);
3839 		rc = NET_XMIT_SUCCESS;
3840 	} else {
3841 		rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3842 		if (qdisc_run_begin(q)) {
3843 			if (unlikely(contended)) {
3844 				spin_unlock(&q->busylock);
3845 				contended = false;
3846 			}
3847 			__qdisc_run(q);
3848 			qdisc_run_end(q);
3849 		}
3850 	}
3851 	spin_unlock(root_lock);
3852 	if (unlikely(to_free))
3853 		kfree_skb_list(to_free);
3854 	if (unlikely(contended))
3855 		spin_unlock(&q->busylock);
3856 	return rc;
3857 }
3858 
3859 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3860 static void skb_update_prio(struct sk_buff *skb)
3861 {
3862 	const struct netprio_map *map;
3863 	const struct sock *sk;
3864 	unsigned int prioidx;
3865 
3866 	if (skb->priority)
3867 		return;
3868 	map = rcu_dereference_bh(skb->dev->priomap);
3869 	if (!map)
3870 		return;
3871 	sk = skb_to_full_sk(skb);
3872 	if (!sk)
3873 		return;
3874 
3875 	prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3876 
3877 	if (prioidx < map->priomap_len)
3878 		skb->priority = map->priomap[prioidx];
3879 }
3880 #else
3881 #define skb_update_prio(skb)
3882 #endif
3883 
3884 /**
3885  *	dev_loopback_xmit - loop back @skb
3886  *	@net: network namespace this loopback is happening in
3887  *	@sk:  sk needed to be a netfilter okfn
3888  *	@skb: buffer to transmit
3889  */
3890 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3891 {
3892 	skb_reset_mac_header(skb);
3893 	__skb_pull(skb, skb_network_offset(skb));
3894 	skb->pkt_type = PACKET_LOOPBACK;
3895 	skb->ip_summed = CHECKSUM_UNNECESSARY;
3896 	WARN_ON(!skb_dst(skb));
3897 	skb_dst_force(skb);
3898 	netif_rx_ni(skb);
3899 	return 0;
3900 }
3901 EXPORT_SYMBOL(dev_loopback_xmit);
3902 
3903 #ifdef CONFIG_NET_EGRESS
3904 static struct sk_buff *
3905 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3906 {
3907 	struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
3908 	struct tcf_result cl_res;
3909 
3910 	if (!miniq)
3911 		return skb;
3912 
3913 	/* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3914 	qdisc_skb_cb(skb)->mru = 0;
3915 	qdisc_skb_cb(skb)->post_ct = false;
3916 	mini_qdisc_bstats_cpu_update(miniq, skb);
3917 
3918 	switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
3919 	case TC_ACT_OK:
3920 	case TC_ACT_RECLASSIFY:
3921 		skb->tc_index = TC_H_MIN(cl_res.classid);
3922 		break;
3923 	case TC_ACT_SHOT:
3924 		mini_qdisc_qstats_cpu_drop(miniq);
3925 		*ret = NET_XMIT_DROP;
3926 		kfree_skb(skb);
3927 		return NULL;
3928 	case TC_ACT_STOLEN:
3929 	case TC_ACT_QUEUED:
3930 	case TC_ACT_TRAP:
3931 		*ret = NET_XMIT_SUCCESS;
3932 		consume_skb(skb);
3933 		return NULL;
3934 	case TC_ACT_REDIRECT:
3935 		/* No need to push/pop skb's mac_header here on egress! */
3936 		skb_do_redirect(skb);
3937 		*ret = NET_XMIT_SUCCESS;
3938 		return NULL;
3939 	default:
3940 		break;
3941 	}
3942 
3943 	return skb;
3944 }
3945 #endif /* CONFIG_NET_EGRESS */
3946 
3947 #ifdef CONFIG_XPS
3948 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
3949 			       struct xps_dev_maps *dev_maps, unsigned int tci)
3950 {
3951 	struct xps_map *map;
3952 	int queue_index = -1;
3953 
3954 	if (dev->num_tc) {
3955 		tci *= dev->num_tc;
3956 		tci += netdev_get_prio_tc_map(dev, skb->priority);
3957 	}
3958 
3959 	map = rcu_dereference(dev_maps->attr_map[tci]);
3960 	if (map) {
3961 		if (map->len == 1)
3962 			queue_index = map->queues[0];
3963 		else
3964 			queue_index = map->queues[reciprocal_scale(
3965 						skb_get_hash(skb), map->len)];
3966 		if (unlikely(queue_index >= dev->real_num_tx_queues))
3967 			queue_index = -1;
3968 	}
3969 	return queue_index;
3970 }
3971 #endif
3972 
3973 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
3974 			 struct sk_buff *skb)
3975 {
3976 #ifdef CONFIG_XPS
3977 	struct xps_dev_maps *dev_maps;
3978 	struct sock *sk = skb->sk;
3979 	int queue_index = -1;
3980 
3981 	if (!static_key_false(&xps_needed))
3982 		return -1;
3983 
3984 	rcu_read_lock();
3985 	if (!static_key_false(&xps_rxqs_needed))
3986 		goto get_cpus_map;
3987 
3988 	dev_maps = rcu_dereference(sb_dev->xps_rxqs_map);
3989 	if (dev_maps) {
3990 		int tci = sk_rx_queue_get(sk);
3991 
3992 		if (tci >= 0 && tci < dev->num_rx_queues)
3993 			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3994 							  tci);
3995 	}
3996 
3997 get_cpus_map:
3998 	if (queue_index < 0) {
3999 		dev_maps = rcu_dereference(sb_dev->xps_cpus_map);
4000 		if (dev_maps) {
4001 			unsigned int tci = skb->sender_cpu - 1;
4002 
4003 			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4004 							  tci);
4005 		}
4006 	}
4007 	rcu_read_unlock();
4008 
4009 	return queue_index;
4010 #else
4011 	return -1;
4012 #endif
4013 }
4014 
4015 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
4016 		     struct net_device *sb_dev)
4017 {
4018 	return 0;
4019 }
4020 EXPORT_SYMBOL(dev_pick_tx_zero);
4021 
4022 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
4023 		       struct net_device *sb_dev)
4024 {
4025 	return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
4026 }
4027 EXPORT_SYMBOL(dev_pick_tx_cpu_id);
4028 
4029 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
4030 		     struct net_device *sb_dev)
4031 {
4032 	struct sock *sk = skb->sk;
4033 	int queue_index = sk_tx_queue_get(sk);
4034 
4035 	sb_dev = sb_dev ? : dev;
4036 
4037 	if (queue_index < 0 || skb->ooo_okay ||
4038 	    queue_index >= dev->real_num_tx_queues) {
4039 		int new_index = get_xps_queue(dev, sb_dev, skb);
4040 
4041 		if (new_index < 0)
4042 			new_index = skb_tx_hash(dev, sb_dev, skb);
4043 
4044 		if (queue_index != new_index && sk &&
4045 		    sk_fullsock(sk) &&
4046 		    rcu_access_pointer(sk->sk_dst_cache))
4047 			sk_tx_queue_set(sk, new_index);
4048 
4049 		queue_index = new_index;
4050 	}
4051 
4052 	return queue_index;
4053 }
4054 EXPORT_SYMBOL(netdev_pick_tx);
4055 
4056 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
4057 					 struct sk_buff *skb,
4058 					 struct net_device *sb_dev)
4059 {
4060 	int queue_index = 0;
4061 
4062 #ifdef CONFIG_XPS
4063 	u32 sender_cpu = skb->sender_cpu - 1;
4064 
4065 	if (sender_cpu >= (u32)NR_CPUS)
4066 		skb->sender_cpu = raw_smp_processor_id() + 1;
4067 #endif
4068 
4069 	if (dev->real_num_tx_queues != 1) {
4070 		const struct net_device_ops *ops = dev->netdev_ops;
4071 
4072 		if (ops->ndo_select_queue)
4073 			queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4074 		else
4075 			queue_index = netdev_pick_tx(dev, skb, sb_dev);
4076 
4077 		queue_index = netdev_cap_txqueue(dev, queue_index);
4078 	}
4079 
4080 	skb_set_queue_mapping(skb, queue_index);
4081 	return netdev_get_tx_queue(dev, queue_index);
4082 }
4083 
4084 /**
4085  *	__dev_queue_xmit - transmit a buffer
4086  *	@skb: buffer to transmit
4087  *	@sb_dev: suboordinate device used for L2 forwarding offload
4088  *
4089  *	Queue a buffer for transmission to a network device. The caller must
4090  *	have set the device and priority and built the buffer before calling
4091  *	this function. The function can be called from an interrupt.
4092  *
4093  *	A negative errno code is returned on a failure. A success does not
4094  *	guarantee the frame will be transmitted as it may be dropped due
4095  *	to congestion or traffic shaping.
4096  *
4097  * -----------------------------------------------------------------------------------
4098  *      I notice this method can also return errors from the queue disciplines,
4099  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
4100  *      be positive.
4101  *
4102  *      Regardless of the return value, the skb is consumed, so it is currently
4103  *      difficult to retry a send to this method.  (You can bump the ref count
4104  *      before sending to hold a reference for retry if you are careful.)
4105  *
4106  *      When calling this method, interrupts MUST be enabled.  This is because
4107  *      the BH enable code must have IRQs enabled so that it will not deadlock.
4108  *          --BLG
4109  */
4110 static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4111 {
4112 	struct net_device *dev = skb->dev;
4113 	struct netdev_queue *txq;
4114 	struct Qdisc *q;
4115 	int rc = -ENOMEM;
4116 	bool again = false;
4117 
4118 	skb_reset_mac_header(skb);
4119 
4120 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4121 		__skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
4122 
4123 	/* Disable soft irqs for various locks below. Also
4124 	 * stops preemption for RCU.
4125 	 */
4126 	rcu_read_lock_bh();
4127 
4128 	skb_update_prio(skb);
4129 
4130 	qdisc_pkt_len_init(skb);
4131 #ifdef CONFIG_NET_CLS_ACT
4132 	skb->tc_at_ingress = 0;
4133 # ifdef CONFIG_NET_EGRESS
4134 	if (static_branch_unlikely(&egress_needed_key)) {
4135 		skb = sch_handle_egress(skb, &rc, dev);
4136 		if (!skb)
4137 			goto out;
4138 	}
4139 # endif
4140 #endif
4141 	/* If device/qdisc don't need skb->dst, release it right now while
4142 	 * its hot in this cpu cache.
4143 	 */
4144 	if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4145 		skb_dst_drop(skb);
4146 	else
4147 		skb_dst_force(skb);
4148 
4149 	txq = netdev_core_pick_tx(dev, skb, sb_dev);
4150 	q = rcu_dereference_bh(txq->qdisc);
4151 
4152 	trace_net_dev_queue(skb);
4153 	if (q->enqueue) {
4154 		rc = __dev_xmit_skb(skb, q, dev, txq);
4155 		goto out;
4156 	}
4157 
4158 	/* The device has no queue. Common case for software devices:
4159 	 * loopback, all the sorts of tunnels...
4160 
4161 	 * Really, it is unlikely that netif_tx_lock protection is necessary
4162 	 * here.  (f.e. loopback and IP tunnels are clean ignoring statistics
4163 	 * counters.)
4164 	 * However, it is possible, that they rely on protection
4165 	 * made by us here.
4166 
4167 	 * Check this and shot the lock. It is not prone from deadlocks.
4168 	 *Either shot noqueue qdisc, it is even simpler 8)
4169 	 */
4170 	if (dev->flags & IFF_UP) {
4171 		int cpu = smp_processor_id(); /* ok because BHs are off */
4172 
4173 		if (txq->xmit_lock_owner != cpu) {
4174 			if (dev_xmit_recursion())
4175 				goto recursion_alert;
4176 
4177 			skb = validate_xmit_skb(skb, dev, &again);
4178 			if (!skb)
4179 				goto out;
4180 
4181 			PRANDOM_ADD_NOISE(skb, dev, txq, jiffies);
4182 			HARD_TX_LOCK(dev, txq, cpu);
4183 
4184 			if (!netif_xmit_stopped(txq)) {
4185 				dev_xmit_recursion_inc();
4186 				skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4187 				dev_xmit_recursion_dec();
4188 				if (dev_xmit_complete(rc)) {
4189 					HARD_TX_UNLOCK(dev, txq);
4190 					goto out;
4191 				}
4192 			}
4193 			HARD_TX_UNLOCK(dev, txq);
4194 			net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4195 					     dev->name);
4196 		} else {
4197 			/* Recursion is detected! It is possible,
4198 			 * unfortunately
4199 			 */
4200 recursion_alert:
4201 			net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4202 					     dev->name);
4203 		}
4204 	}
4205 
4206 	rc = -ENETDOWN;
4207 	rcu_read_unlock_bh();
4208 
4209 	atomic_long_inc(&dev->tx_dropped);
4210 	kfree_skb_list(skb);
4211 	return rc;
4212 out:
4213 	rcu_read_unlock_bh();
4214 	return rc;
4215 }
4216 
4217 int dev_queue_xmit(struct sk_buff *skb)
4218 {
4219 	return __dev_queue_xmit(skb, NULL);
4220 }
4221 EXPORT_SYMBOL(dev_queue_xmit);
4222 
4223 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
4224 {
4225 	return __dev_queue_xmit(skb, sb_dev);
4226 }
4227 EXPORT_SYMBOL(dev_queue_xmit_accel);
4228 
4229 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4230 {
4231 	struct net_device *dev = skb->dev;
4232 	struct sk_buff *orig_skb = skb;
4233 	struct netdev_queue *txq;
4234 	int ret = NETDEV_TX_BUSY;
4235 	bool again = false;
4236 
4237 	if (unlikely(!netif_running(dev) ||
4238 		     !netif_carrier_ok(dev)))
4239 		goto drop;
4240 
4241 	skb = validate_xmit_skb_list(skb, dev, &again);
4242 	if (skb != orig_skb)
4243 		goto drop;
4244 
4245 	skb_set_queue_mapping(skb, queue_id);
4246 	txq = skb_get_tx_queue(dev, skb);
4247 	PRANDOM_ADD_NOISE(skb, dev, txq, jiffies);
4248 
4249 	local_bh_disable();
4250 
4251 	dev_xmit_recursion_inc();
4252 	HARD_TX_LOCK(dev, txq, smp_processor_id());
4253 	if (!netif_xmit_frozen_or_drv_stopped(txq))
4254 		ret = netdev_start_xmit(skb, dev, txq, false);
4255 	HARD_TX_UNLOCK(dev, txq);
4256 	dev_xmit_recursion_dec();
4257 
4258 	local_bh_enable();
4259 	return ret;
4260 drop:
4261 	atomic_long_inc(&dev->tx_dropped);
4262 	kfree_skb_list(skb);
4263 	return NET_XMIT_DROP;
4264 }
4265 EXPORT_SYMBOL(__dev_direct_xmit);
4266 
4267 /*************************************************************************
4268  *			Receiver routines
4269  *************************************************************************/
4270 
4271 int netdev_max_backlog __read_mostly = 1000;
4272 EXPORT_SYMBOL(netdev_max_backlog);
4273 
4274 int netdev_tstamp_prequeue __read_mostly = 1;
4275 int netdev_budget __read_mostly = 300;
4276 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
4277 unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
4278 int weight_p __read_mostly = 64;           /* old backlog weight */
4279 int dev_weight_rx_bias __read_mostly = 1;  /* bias for backlog weight */
4280 int dev_weight_tx_bias __read_mostly = 1;  /* bias for output_queue quota */
4281 int dev_rx_weight __read_mostly = 64;
4282 int dev_tx_weight __read_mostly = 64;
4283 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
4284 int gro_normal_batch __read_mostly = 8;
4285 
4286 /* Called with irq disabled */
4287 static inline void ____napi_schedule(struct softnet_data *sd,
4288 				     struct napi_struct *napi)
4289 {
4290 	struct task_struct *thread;
4291 
4292 	if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
4293 		/* Paired with smp_mb__before_atomic() in
4294 		 * napi_enable()/dev_set_threaded().
4295 		 * Use READ_ONCE() to guarantee a complete
4296 		 * read on napi->thread. Only call
4297 		 * wake_up_process() when it's not NULL.
4298 		 */
4299 		thread = READ_ONCE(napi->thread);
4300 		if (thread) {
4301 			wake_up_process(thread);
4302 			return;
4303 		}
4304 	}
4305 
4306 	list_add_tail(&napi->poll_list, &sd->poll_list);
4307 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4308 }
4309 
4310 #ifdef CONFIG_RPS
4311 
4312 /* One global table that all flow-based protocols share. */
4313 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
4314 EXPORT_SYMBOL(rps_sock_flow_table);
4315 u32 rps_cpu_mask __read_mostly;
4316 EXPORT_SYMBOL(rps_cpu_mask);
4317 
4318 struct static_key_false rps_needed __read_mostly;
4319 EXPORT_SYMBOL(rps_needed);
4320 struct static_key_false rfs_needed __read_mostly;
4321 EXPORT_SYMBOL(rfs_needed);
4322 
4323 static struct rps_dev_flow *
4324 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4325 	    struct rps_dev_flow *rflow, u16 next_cpu)
4326 {
4327 	if (next_cpu < nr_cpu_ids) {
4328 #ifdef CONFIG_RFS_ACCEL
4329 		struct netdev_rx_queue *rxqueue;
4330 		struct rps_dev_flow_table *flow_table;
4331 		struct rps_dev_flow *old_rflow;
4332 		u32 flow_id;
4333 		u16 rxq_index;
4334 		int rc;
4335 
4336 		/* Should we steer this flow to a different hardware queue? */
4337 		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4338 		    !(dev->features & NETIF_F_NTUPLE))
4339 			goto out;
4340 		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4341 		if (rxq_index == skb_get_rx_queue(skb))
4342 			goto out;
4343 
4344 		rxqueue = dev->_rx + rxq_index;
4345 		flow_table = rcu_dereference(rxqueue->rps_flow_table);
4346 		if (!flow_table)
4347 			goto out;
4348 		flow_id = skb_get_hash(skb) & flow_table->mask;
4349 		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4350 							rxq_index, flow_id);
4351 		if (rc < 0)
4352 			goto out;
4353 		old_rflow = rflow;
4354 		rflow = &flow_table->flows[flow_id];
4355 		rflow->filter = rc;
4356 		if (old_rflow->filter == rflow->filter)
4357 			old_rflow->filter = RPS_NO_FILTER;
4358 	out:
4359 #endif
4360 		rflow->last_qtail =
4361 			per_cpu(softnet_data, next_cpu).input_queue_head;
4362 	}
4363 
4364 	rflow->cpu = next_cpu;
4365 	return rflow;
4366 }
4367 
4368 /*
4369  * get_rps_cpu is called from netif_receive_skb and returns the target
4370  * CPU from the RPS map of the receiving queue for a given skb.
4371  * rcu_read_lock must be held on entry.
4372  */
4373 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4374 		       struct rps_dev_flow **rflowp)
4375 {
4376 	const struct rps_sock_flow_table *sock_flow_table;
4377 	struct netdev_rx_queue *rxqueue = dev->_rx;
4378 	struct rps_dev_flow_table *flow_table;
4379 	struct rps_map *map;
4380 	int cpu = -1;
4381 	u32 tcpu;
4382 	u32 hash;
4383 
4384 	if (skb_rx_queue_recorded(skb)) {
4385 		u16 index = skb_get_rx_queue(skb);
4386 
4387 		if (unlikely(index >= dev->real_num_rx_queues)) {
4388 			WARN_ONCE(dev->real_num_rx_queues > 1,
4389 				  "%s received packet on queue %u, but number "
4390 				  "of RX queues is %u\n",
4391 				  dev->name, index, dev->real_num_rx_queues);
4392 			goto done;
4393 		}
4394 		rxqueue += index;
4395 	}
4396 
4397 	/* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4398 
4399 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
4400 	map = rcu_dereference(rxqueue->rps_map);
4401 	if (!flow_table && !map)
4402 		goto done;
4403 
4404 	skb_reset_network_header(skb);
4405 	hash = skb_get_hash(skb);
4406 	if (!hash)
4407 		goto done;
4408 
4409 	sock_flow_table = rcu_dereference(rps_sock_flow_table);
4410 	if (flow_table && sock_flow_table) {
4411 		struct rps_dev_flow *rflow;
4412 		u32 next_cpu;
4413 		u32 ident;
4414 
4415 		/* First check into global flow table if there is a match */
4416 		ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4417 		if ((ident ^ hash) & ~rps_cpu_mask)
4418 			goto try_rps;
4419 
4420 		next_cpu = ident & rps_cpu_mask;
4421 
4422 		/* OK, now we know there is a match,
4423 		 * we can look at the local (per receive queue) flow table
4424 		 */
4425 		rflow = &flow_table->flows[hash & flow_table->mask];
4426 		tcpu = rflow->cpu;
4427 
4428 		/*
4429 		 * If the desired CPU (where last recvmsg was done) is
4430 		 * different from current CPU (one in the rx-queue flow
4431 		 * table entry), switch if one of the following holds:
4432 		 *   - Current CPU is unset (>= nr_cpu_ids).
4433 		 *   - Current CPU is offline.
4434 		 *   - The current CPU's queue tail has advanced beyond the
4435 		 *     last packet that was enqueued using this table entry.
4436 		 *     This guarantees that all previous packets for the flow
4437 		 *     have been dequeued, thus preserving in order delivery.
4438 		 */
4439 		if (unlikely(tcpu != next_cpu) &&
4440 		    (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4441 		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4442 		      rflow->last_qtail)) >= 0)) {
4443 			tcpu = next_cpu;
4444 			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4445 		}
4446 
4447 		if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4448 			*rflowp = rflow;
4449 			cpu = tcpu;
4450 			goto done;
4451 		}
4452 	}
4453 
4454 try_rps:
4455 
4456 	if (map) {
4457 		tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4458 		if (cpu_online(tcpu)) {
4459 			cpu = tcpu;
4460 			goto done;
4461 		}
4462 	}
4463 
4464 done:
4465 	return cpu;
4466 }
4467 
4468 #ifdef CONFIG_RFS_ACCEL
4469 
4470 /**
4471  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4472  * @dev: Device on which the filter was set
4473  * @rxq_index: RX queue index
4474  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4475  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4476  *
4477  * Drivers that implement ndo_rx_flow_steer() should periodically call
4478  * this function for each installed filter and remove the filters for
4479  * which it returns %true.
4480  */
4481 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4482 			 u32 flow_id, u16 filter_id)
4483 {
4484 	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4485 	struct rps_dev_flow_table *flow_table;
4486 	struct rps_dev_flow *rflow;
4487 	bool expire = true;
4488 	unsigned int cpu;
4489 
4490 	rcu_read_lock();
4491 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
4492 	if (flow_table && flow_id <= flow_table->mask) {
4493 		rflow = &flow_table->flows[flow_id];
4494 		cpu = READ_ONCE(rflow->cpu);
4495 		if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4496 		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4497 			   rflow->last_qtail) <
4498 		     (int)(10 * flow_table->mask)))
4499 			expire = false;
4500 	}
4501 	rcu_read_unlock();
4502 	return expire;
4503 }
4504 EXPORT_SYMBOL(rps_may_expire_flow);
4505 
4506 #endif /* CONFIG_RFS_ACCEL */
4507 
4508 /* Called from hardirq (IPI) context */
4509 static void rps_trigger_softirq(void *data)
4510 {
4511 	struct softnet_data *sd = data;
4512 
4513 	____napi_schedule(sd, &sd->backlog);
4514 	sd->received_rps++;
4515 }
4516 
4517 #endif /* CONFIG_RPS */
4518 
4519 /*
4520  * Check if this softnet_data structure is another cpu one
4521  * If yes, queue it to our IPI list and return 1
4522  * If no, return 0
4523  */
4524 static int rps_ipi_queued(struct softnet_data *sd)
4525 {
4526 #ifdef CONFIG_RPS
4527 	struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4528 
4529 	if (sd != mysd) {
4530 		sd->rps_ipi_next = mysd->rps_ipi_list;
4531 		mysd->rps_ipi_list = sd;
4532 
4533 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4534 		return 1;
4535 	}
4536 #endif /* CONFIG_RPS */
4537 	return 0;
4538 }
4539 
4540 #ifdef CONFIG_NET_FLOW_LIMIT
4541 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4542 #endif
4543 
4544 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4545 {
4546 #ifdef CONFIG_NET_FLOW_LIMIT
4547 	struct sd_flow_limit *fl;
4548 	struct softnet_data *sd;
4549 	unsigned int old_flow, new_flow;
4550 
4551 	if (qlen < (netdev_max_backlog >> 1))
4552 		return false;
4553 
4554 	sd = this_cpu_ptr(&softnet_data);
4555 
4556 	rcu_read_lock();
4557 	fl = rcu_dereference(sd->flow_limit);
4558 	if (fl) {
4559 		new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4560 		old_flow = fl->history[fl->history_head];
4561 		fl->history[fl->history_head] = new_flow;
4562 
4563 		fl->history_head++;
4564 		fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4565 
4566 		if (likely(fl->buckets[old_flow]))
4567 			fl->buckets[old_flow]--;
4568 
4569 		if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4570 			fl->count++;
4571 			rcu_read_unlock();
4572 			return true;
4573 		}
4574 	}
4575 	rcu_read_unlock();
4576 #endif
4577 	return false;
4578 }
4579 
4580 /*
4581  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4582  * queue (may be a remote CPU queue).
4583  */
4584 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4585 			      unsigned int *qtail)
4586 {
4587 	struct softnet_data *sd;
4588 	unsigned long flags;
4589 	unsigned int qlen;
4590 
4591 	sd = &per_cpu(softnet_data, cpu);
4592 
4593 	local_irq_save(flags);
4594 
4595 	rps_lock(sd);
4596 	if (!netif_running(skb->dev))
4597 		goto drop;
4598 	qlen = skb_queue_len(&sd->input_pkt_queue);
4599 	if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
4600 		if (qlen) {
4601 enqueue:
4602 			__skb_queue_tail(&sd->input_pkt_queue, skb);
4603 			input_queue_tail_incr_save(sd, qtail);
4604 			rps_unlock(sd);
4605 			local_irq_restore(flags);
4606 			return NET_RX_SUCCESS;
4607 		}
4608 
4609 		/* Schedule NAPI for backlog device
4610 		 * We can use non atomic operation since we own the queue lock
4611 		 */
4612 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
4613 			if (!rps_ipi_queued(sd))
4614 				____napi_schedule(sd, &sd->backlog);
4615 		}
4616 		goto enqueue;
4617 	}
4618 
4619 drop:
4620 	sd->dropped++;
4621 	rps_unlock(sd);
4622 
4623 	local_irq_restore(flags);
4624 
4625 	atomic_long_inc(&skb->dev->rx_dropped);
4626 	kfree_skb(skb);
4627 	return NET_RX_DROP;
4628 }
4629 
4630 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4631 {
4632 	struct net_device *dev = skb->dev;
4633 	struct netdev_rx_queue *rxqueue;
4634 
4635 	rxqueue = dev->_rx;
4636 
4637 	if (skb_rx_queue_recorded(skb)) {
4638 		u16 index = skb_get_rx_queue(skb);
4639 
4640 		if (unlikely(index >= dev->real_num_rx_queues)) {
4641 			WARN_ONCE(dev->real_num_rx_queues > 1,
4642 				  "%s received packet on queue %u, but number "
4643 				  "of RX queues is %u\n",
4644 				  dev->name, index, dev->real_num_rx_queues);
4645 
4646 			return rxqueue; /* Return first rxqueue */
4647 		}
4648 		rxqueue += index;
4649 	}
4650 	return rxqueue;
4651 }
4652 
4653 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4654 				     struct xdp_buff *xdp,
4655 				     struct bpf_prog *xdp_prog)
4656 {
4657 	void *orig_data, *orig_data_end, *hard_start;
4658 	struct netdev_rx_queue *rxqueue;
4659 	u32 metalen, act = XDP_DROP;
4660 	u32 mac_len, frame_sz;
4661 	__be16 orig_eth_type;
4662 	struct ethhdr *eth;
4663 	bool orig_bcast;
4664 	int off;
4665 
4666 	/* Reinjected packets coming from act_mirred or similar should
4667 	 * not get XDP generic processing.
4668 	 */
4669 	if (skb_is_redirected(skb))
4670 		return XDP_PASS;
4671 
4672 	/* XDP packets must be linear and must have sufficient headroom
4673 	 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4674 	 * native XDP provides, thus we need to do it here as well.
4675 	 */
4676 	if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
4677 	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4678 		int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4679 		int troom = skb->tail + skb->data_len - skb->end;
4680 
4681 		/* In case we have to go down the path and also linearize,
4682 		 * then lets do the pskb_expand_head() work just once here.
4683 		 */
4684 		if (pskb_expand_head(skb,
4685 				     hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4686 				     troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4687 			goto do_drop;
4688 		if (skb_linearize(skb))
4689 			goto do_drop;
4690 	}
4691 
4692 	/* The XDP program wants to see the packet starting at the MAC
4693 	 * header.
4694 	 */
4695 	mac_len = skb->data - skb_mac_header(skb);
4696 	hard_start = skb->data - skb_headroom(skb);
4697 
4698 	/* SKB "head" area always have tailroom for skb_shared_info */
4699 	frame_sz = (void *)skb_end_pointer(skb) - hard_start;
4700 	frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4701 
4702 	rxqueue = netif_get_rxqueue(skb);
4703 	xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
4704 	xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
4705 			 skb_headlen(skb) + mac_len, true);
4706 
4707 	orig_data_end = xdp->data_end;
4708 	orig_data = xdp->data;
4709 	eth = (struct ethhdr *)xdp->data;
4710 	orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4711 	orig_eth_type = eth->h_proto;
4712 
4713 	act = bpf_prog_run_xdp(xdp_prog, xdp);
4714 
4715 	/* check if bpf_xdp_adjust_head was used */
4716 	off = xdp->data - orig_data;
4717 	if (off) {
4718 		if (off > 0)
4719 			__skb_pull(skb, off);
4720 		else if (off < 0)
4721 			__skb_push(skb, -off);
4722 
4723 		skb->mac_header += off;
4724 		skb_reset_network_header(skb);
4725 	}
4726 
4727 	/* check if bpf_xdp_adjust_tail was used */
4728 	off = xdp->data_end - orig_data_end;
4729 	if (off != 0) {
4730 		skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4731 		skb->len += off; /* positive on grow, negative on shrink */
4732 	}
4733 
4734 	/* check if XDP changed eth hdr such SKB needs update */
4735 	eth = (struct ethhdr *)xdp->data;
4736 	if ((orig_eth_type != eth->h_proto) ||
4737 	    (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4738 		__skb_push(skb, ETH_HLEN);
4739 		skb->protocol = eth_type_trans(skb, skb->dev);
4740 	}
4741 
4742 	switch (act) {
4743 	case XDP_REDIRECT:
4744 	case XDP_TX:
4745 		__skb_push(skb, mac_len);
4746 		break;
4747 	case XDP_PASS:
4748 		metalen = xdp->data - xdp->data_meta;
4749 		if (metalen)
4750 			skb_metadata_set(skb, metalen);
4751 		break;
4752 	default:
4753 		bpf_warn_invalid_xdp_action(act);
4754 		fallthrough;
4755 	case XDP_ABORTED:
4756 		trace_xdp_exception(skb->dev, xdp_prog, act);
4757 		fallthrough;
4758 	case XDP_DROP:
4759 	do_drop:
4760 		kfree_skb(skb);
4761 		break;
4762 	}
4763 
4764 	return act;
4765 }
4766 
4767 /* When doing generic XDP we have to bypass the qdisc layer and the
4768  * network taps in order to match in-driver-XDP behavior.
4769  */
4770 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4771 {
4772 	struct net_device *dev = skb->dev;
4773 	struct netdev_queue *txq;
4774 	bool free_skb = true;
4775 	int cpu, rc;
4776 
4777 	txq = netdev_core_pick_tx(dev, skb, NULL);
4778 	cpu = smp_processor_id();
4779 	HARD_TX_LOCK(dev, txq, cpu);
4780 	if (!netif_xmit_stopped(txq)) {
4781 		rc = netdev_start_xmit(skb, dev, txq, 0);
4782 		if (dev_xmit_complete(rc))
4783 			free_skb = false;
4784 	}
4785 	HARD_TX_UNLOCK(dev, txq);
4786 	if (free_skb) {
4787 		trace_xdp_exception(dev, xdp_prog, XDP_TX);
4788 		kfree_skb(skb);
4789 	}
4790 }
4791 
4792 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
4793 
4794 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
4795 {
4796 	if (xdp_prog) {
4797 		struct xdp_buff xdp;
4798 		u32 act;
4799 		int err;
4800 
4801 		act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
4802 		if (act != XDP_PASS) {
4803 			switch (act) {
4804 			case XDP_REDIRECT:
4805 				err = xdp_do_generic_redirect(skb->dev, skb,
4806 							      &xdp, xdp_prog);
4807 				if (err)
4808 					goto out_redir;
4809 				break;
4810 			case XDP_TX:
4811 				generic_xdp_tx(skb, xdp_prog);
4812 				break;
4813 			}
4814 			return XDP_DROP;
4815 		}
4816 	}
4817 	return XDP_PASS;
4818 out_redir:
4819 	kfree_skb(skb);
4820 	return XDP_DROP;
4821 }
4822 EXPORT_SYMBOL_GPL(do_xdp_generic);
4823 
4824 static int netif_rx_internal(struct sk_buff *skb)
4825 {
4826 	int ret;
4827 
4828 	net_timestamp_check(netdev_tstamp_prequeue, skb);
4829 
4830 	trace_netif_rx(skb);
4831 
4832 #ifdef CONFIG_RPS
4833 	if (static_branch_unlikely(&rps_needed)) {
4834 		struct rps_dev_flow voidflow, *rflow = &voidflow;
4835 		int cpu;
4836 
4837 		preempt_disable();
4838 		rcu_read_lock();
4839 
4840 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
4841 		if (cpu < 0)
4842 			cpu = smp_processor_id();
4843 
4844 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4845 
4846 		rcu_read_unlock();
4847 		preempt_enable();
4848 	} else
4849 #endif
4850 	{
4851 		unsigned int qtail;
4852 
4853 		ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4854 		put_cpu();
4855 	}
4856 	return ret;
4857 }
4858 
4859 /**
4860  *	netif_rx	-	post buffer to the network code
4861  *	@skb: buffer to post
4862  *
4863  *	This function receives a packet from a device driver and queues it for
4864  *	the upper (protocol) levels to process.  It always succeeds. The buffer
4865  *	may be dropped during processing for congestion control or by the
4866  *	protocol layers.
4867  *
4868  *	return values:
4869  *	NET_RX_SUCCESS	(no congestion)
4870  *	NET_RX_DROP     (packet was dropped)
4871  *
4872  */
4873 
4874 int netif_rx(struct sk_buff *skb)
4875 {
4876 	int ret;
4877 
4878 	trace_netif_rx_entry(skb);
4879 
4880 	ret = netif_rx_internal(skb);
4881 	trace_netif_rx_exit(ret);
4882 
4883 	return ret;
4884 }
4885 EXPORT_SYMBOL(netif_rx);
4886 
4887 int netif_rx_ni(struct sk_buff *skb)
4888 {
4889 	int err;
4890 
4891 	trace_netif_rx_ni_entry(skb);
4892 
4893 	preempt_disable();
4894 	err = netif_rx_internal(skb);
4895 	if (local_softirq_pending())
4896 		do_softirq();
4897 	preempt_enable();
4898 	trace_netif_rx_ni_exit(err);
4899 
4900 	return err;
4901 }
4902 EXPORT_SYMBOL(netif_rx_ni);
4903 
4904 int netif_rx_any_context(struct sk_buff *skb)
4905 {
4906 	/*
4907 	 * If invoked from contexts which do not invoke bottom half
4908 	 * processing either at return from interrupt or when softrqs are
4909 	 * reenabled, use netif_rx_ni() which invokes bottomhalf processing
4910 	 * directly.
4911 	 */
4912 	if (in_interrupt())
4913 		return netif_rx(skb);
4914 	else
4915 		return netif_rx_ni(skb);
4916 }
4917 EXPORT_SYMBOL(netif_rx_any_context);
4918 
4919 static __latent_entropy void net_tx_action(struct softirq_action *h)
4920 {
4921 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4922 
4923 	if (sd->completion_queue) {
4924 		struct sk_buff *clist;
4925 
4926 		local_irq_disable();
4927 		clist = sd->completion_queue;
4928 		sd->completion_queue = NULL;
4929 		local_irq_enable();
4930 
4931 		while (clist) {
4932 			struct sk_buff *skb = clist;
4933 
4934 			clist = clist->next;
4935 
4936 			WARN_ON(refcount_read(&skb->users));
4937 			if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4938 				trace_consume_skb(skb);
4939 			else
4940 				trace_kfree_skb(skb, net_tx_action);
4941 
4942 			if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4943 				__kfree_skb(skb);
4944 			else
4945 				__kfree_skb_defer(skb);
4946 		}
4947 	}
4948 
4949 	if (sd->output_queue) {
4950 		struct Qdisc *head;
4951 
4952 		local_irq_disable();
4953 		head = sd->output_queue;
4954 		sd->output_queue = NULL;
4955 		sd->output_queue_tailp = &sd->output_queue;
4956 		local_irq_enable();
4957 
4958 		while (head) {
4959 			struct Qdisc *q = head;
4960 			spinlock_t *root_lock = NULL;
4961 
4962 			head = head->next_sched;
4963 
4964 			if (!(q->flags & TCQ_F_NOLOCK)) {
4965 				root_lock = qdisc_lock(q);
4966 				spin_lock(root_lock);
4967 			}
4968 			/* We need to make sure head->next_sched is read
4969 			 * before clearing __QDISC_STATE_SCHED
4970 			 */
4971 			smp_mb__before_atomic();
4972 			clear_bit(__QDISC_STATE_SCHED, &q->state);
4973 			qdisc_run(q);
4974 			if (root_lock)
4975 				spin_unlock(root_lock);
4976 		}
4977 	}
4978 
4979 	xfrm_dev_backlog(sd);
4980 }
4981 
4982 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
4983 /* This hook is defined here for ATM LANE */
4984 int (*br_fdb_test_addr_hook)(struct net_device *dev,
4985 			     unsigned char *addr) __read_mostly;
4986 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
4987 #endif
4988 
4989 static inline struct sk_buff *
4990 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4991 		   struct net_device *orig_dev, bool *another)
4992 {
4993 #ifdef CONFIG_NET_CLS_ACT
4994 	struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
4995 	struct tcf_result cl_res;
4996 
4997 	/* If there's at least one ingress present somewhere (so
4998 	 * we get here via enabled static key), remaining devices
4999 	 * that are not configured with an ingress qdisc will bail
5000 	 * out here.
5001 	 */
5002 	if (!miniq)
5003 		return skb;
5004 
5005 	if (*pt_prev) {
5006 		*ret = deliver_skb(skb, *pt_prev, orig_dev);
5007 		*pt_prev = NULL;
5008 	}
5009 
5010 	qdisc_skb_cb(skb)->pkt_len = skb->len;
5011 	qdisc_skb_cb(skb)->mru = 0;
5012 	qdisc_skb_cb(skb)->post_ct = false;
5013 	skb->tc_at_ingress = 1;
5014 	mini_qdisc_bstats_cpu_update(miniq, skb);
5015 
5016 	switch (tcf_classify_ingress(skb, miniq->block, miniq->filter_list,
5017 				     &cl_res, false)) {
5018 	case TC_ACT_OK:
5019 	case TC_ACT_RECLASSIFY:
5020 		skb->tc_index = TC_H_MIN(cl_res.classid);
5021 		break;
5022 	case TC_ACT_SHOT:
5023 		mini_qdisc_qstats_cpu_drop(miniq);
5024 		kfree_skb(skb);
5025 		return NULL;
5026 	case TC_ACT_STOLEN:
5027 	case TC_ACT_QUEUED:
5028 	case TC_ACT_TRAP:
5029 		consume_skb(skb);
5030 		return NULL;
5031 	case TC_ACT_REDIRECT:
5032 		/* skb_mac_header check was done by cls/act_bpf, so
5033 		 * we can safely push the L2 header back before
5034 		 * redirecting to another netdev
5035 		 */
5036 		__skb_push(skb, skb->mac_len);
5037 		if (skb_do_redirect(skb) == -EAGAIN) {
5038 			__skb_pull(skb, skb->mac_len);
5039 			*another = true;
5040 			break;
5041 		}
5042 		return NULL;
5043 	case TC_ACT_CONSUMED:
5044 		return NULL;
5045 	default:
5046 		break;
5047 	}
5048 #endif /* CONFIG_NET_CLS_ACT */
5049 	return skb;
5050 }
5051 
5052 /**
5053  *	netdev_is_rx_handler_busy - check if receive handler is registered
5054  *	@dev: device to check
5055  *
5056  *	Check if a receive handler is already registered for a given device.
5057  *	Return true if there one.
5058  *
5059  *	The caller must hold the rtnl_mutex.
5060  */
5061 bool netdev_is_rx_handler_busy(struct net_device *dev)
5062 {
5063 	ASSERT_RTNL();
5064 	return dev && rtnl_dereference(dev->rx_handler);
5065 }
5066 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
5067 
5068 /**
5069  *	netdev_rx_handler_register - register receive handler
5070  *	@dev: device to register a handler for
5071  *	@rx_handler: receive handler to register
5072  *	@rx_handler_data: data pointer that is used by rx handler
5073  *
5074  *	Register a receive handler for a device. This handler will then be
5075  *	called from __netif_receive_skb. A negative errno code is returned
5076  *	on a failure.
5077  *
5078  *	The caller must hold the rtnl_mutex.
5079  *
5080  *	For a general description of rx_handler, see enum rx_handler_result.
5081  */
5082 int netdev_rx_handler_register(struct net_device *dev,
5083 			       rx_handler_func_t *rx_handler,
5084 			       void *rx_handler_data)
5085 {
5086 	if (netdev_is_rx_handler_busy(dev))
5087 		return -EBUSY;
5088 
5089 	if (dev->priv_flags & IFF_NO_RX_HANDLER)
5090 		return -EINVAL;
5091 
5092 	/* Note: rx_handler_data must be set before rx_handler */
5093 	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
5094 	rcu_assign_pointer(dev->rx_handler, rx_handler);
5095 
5096 	return 0;
5097 }
5098 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
5099 
5100 /**
5101  *	netdev_rx_handler_unregister - unregister receive handler
5102  *	@dev: device to unregister a handler from
5103  *
5104  *	Unregister a receive handler from a device.
5105  *
5106  *	The caller must hold the rtnl_mutex.
5107  */
5108 void netdev_rx_handler_unregister(struct net_device *dev)
5109 {
5110 
5111 	ASSERT_RTNL();
5112 	RCU_INIT_POINTER(dev->rx_handler, NULL);
5113 	/* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5114 	 * section has a guarantee to see a non NULL rx_handler_data
5115 	 * as well.
5116 	 */
5117 	synchronize_net();
5118 	RCU_INIT_POINTER(dev->rx_handler_data, NULL);
5119 }
5120 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
5121 
5122 /*
5123  * Limit the use of PFMEMALLOC reserves to those protocols that implement
5124  * the special handling of PFMEMALLOC skbs.
5125  */
5126 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
5127 {
5128 	switch (skb->protocol) {
5129 	case htons(ETH_P_ARP):
5130 	case htons(ETH_P_IP):
5131 	case htons(ETH_P_IPV6):
5132 	case htons(ETH_P_8021Q):
5133 	case htons(ETH_P_8021AD):
5134 		return true;
5135 	default:
5136 		return false;
5137 	}
5138 }
5139 
5140 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5141 			     int *ret, struct net_device *orig_dev)
5142 {
5143 	if (nf_hook_ingress_active(skb)) {
5144 		int ingress_retval;
5145 
5146 		if (*pt_prev) {
5147 			*ret = deliver_skb(skb, *pt_prev, orig_dev);
5148 			*pt_prev = NULL;
5149 		}
5150 
5151 		rcu_read_lock();
5152 		ingress_retval = nf_hook_ingress(skb);
5153 		rcu_read_unlock();
5154 		return ingress_retval;
5155 	}
5156 	return 0;
5157 }
5158 
5159 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
5160 				    struct packet_type **ppt_prev)
5161 {
5162 	struct packet_type *ptype, *pt_prev;
5163 	rx_handler_func_t *rx_handler;
5164 	struct sk_buff *skb = *pskb;
5165 	struct net_device *orig_dev;
5166 	bool deliver_exact = false;
5167 	int ret = NET_RX_DROP;
5168 	__be16 type;
5169 
5170 	net_timestamp_check(!netdev_tstamp_prequeue, skb);
5171 
5172 	trace_netif_receive_skb(skb);
5173 
5174 	orig_dev = skb->dev;
5175 
5176 	skb_reset_network_header(skb);
5177 	if (!skb_transport_header_was_set(skb))
5178 		skb_reset_transport_header(skb);
5179 	skb_reset_mac_len(skb);
5180 
5181 	pt_prev = NULL;
5182 
5183 another_round:
5184 	skb->skb_iif = skb->dev->ifindex;
5185 
5186 	__this_cpu_inc(softnet_data.processed);
5187 
5188 	if (static_branch_unlikely(&generic_xdp_needed_key)) {
5189 		int ret2;
5190 
5191 		preempt_disable();
5192 		ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5193 		preempt_enable();
5194 
5195 		if (ret2 != XDP_PASS) {
5196 			ret = NET_RX_DROP;
5197 			goto out;
5198 		}
5199 		skb_reset_mac_len(skb);
5200 	}
5201 
5202 	if (eth_type_vlan(skb->protocol)) {
5203 		skb = skb_vlan_untag(skb);
5204 		if (unlikely(!skb))
5205 			goto out;
5206 	}
5207 
5208 	if (skb_skip_tc_classify(skb))
5209 		goto skip_classify;
5210 
5211 	if (pfmemalloc)
5212 		goto skip_taps;
5213 
5214 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
5215 		if (pt_prev)
5216 			ret = deliver_skb(skb, pt_prev, orig_dev);
5217 		pt_prev = ptype;
5218 	}
5219 
5220 	list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5221 		if (pt_prev)
5222 			ret = deliver_skb(skb, pt_prev, orig_dev);
5223 		pt_prev = ptype;
5224 	}
5225 
5226 skip_taps:
5227 #ifdef CONFIG_NET_INGRESS
5228 	if (static_branch_unlikely(&ingress_needed_key)) {
5229 		bool another = false;
5230 
5231 		skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
5232 					 &another);
5233 		if (another)
5234 			goto another_round;
5235 		if (!skb)
5236 			goto out;
5237 
5238 		if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
5239 			goto out;
5240 	}
5241 #endif
5242 	skb_reset_redirect(skb);
5243 skip_classify:
5244 	if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
5245 		goto drop;
5246 
5247 	if (skb_vlan_tag_present(skb)) {
5248 		if (pt_prev) {
5249 			ret = deliver_skb(skb, pt_prev, orig_dev);
5250 			pt_prev = NULL;
5251 		}
5252 		if (vlan_do_receive(&skb))
5253 			goto another_round;
5254 		else if (unlikely(!skb))
5255 			goto out;
5256 	}
5257 
5258 	rx_handler = rcu_dereference(skb->dev->rx_handler);
5259 	if (rx_handler) {
5260 		if (pt_prev) {
5261 			ret = deliver_skb(skb, pt_prev, orig_dev);
5262 			pt_prev = NULL;
5263 		}
5264 		switch (rx_handler(&skb)) {
5265 		case RX_HANDLER_CONSUMED:
5266 			ret = NET_RX_SUCCESS;
5267 			goto out;
5268 		case RX_HANDLER_ANOTHER:
5269 			goto another_round;
5270 		case RX_HANDLER_EXACT:
5271 			deliver_exact = true;
5272 		case RX_HANDLER_PASS:
5273 			break;
5274 		default:
5275 			BUG();
5276 		}
5277 	}
5278 
5279 	if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
5280 check_vlan_id:
5281 		if (skb_vlan_tag_get_id(skb)) {
5282 			/* Vlan id is non 0 and vlan_do_receive() above couldn't
5283 			 * find vlan device.
5284 			 */
5285 			skb->pkt_type = PACKET_OTHERHOST;
5286 		} else if (eth_type_vlan(skb->protocol)) {
5287 			/* Outer header is 802.1P with vlan 0, inner header is
5288 			 * 802.1Q or 802.1AD and vlan_do_receive() above could
5289 			 * not find vlan dev for vlan id 0.
5290 			 */
5291 			__vlan_hwaccel_clear_tag(skb);
5292 			skb = skb_vlan_untag(skb);
5293 			if (unlikely(!skb))
5294 				goto out;
5295 			if (vlan_do_receive(&skb))
5296 				/* After stripping off 802.1P header with vlan 0
5297 				 * vlan dev is found for inner header.
5298 				 */
5299 				goto another_round;
5300 			else if (unlikely(!skb))
5301 				goto out;
5302 			else
5303 				/* We have stripped outer 802.1P vlan 0 header.
5304 				 * But could not find vlan dev.
5305 				 * check again for vlan id to set OTHERHOST.
5306 				 */
5307 				goto check_vlan_id;
5308 		}
5309 		/* Note: we might in the future use prio bits
5310 		 * and set skb->priority like in vlan_do_receive()
5311 		 * For the time being, just ignore Priority Code Point
5312 		 */
5313 		__vlan_hwaccel_clear_tag(skb);
5314 	}
5315 
5316 	type = skb->protocol;
5317 
5318 	/* deliver only exact match when indicated */
5319 	if (likely(!deliver_exact)) {
5320 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5321 				       &ptype_base[ntohs(type) &
5322 						   PTYPE_HASH_MASK]);
5323 	}
5324 
5325 	deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5326 			       &orig_dev->ptype_specific);
5327 
5328 	if (unlikely(skb->dev != orig_dev)) {
5329 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5330 				       &skb->dev->ptype_specific);
5331 	}
5332 
5333 	if (pt_prev) {
5334 		if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
5335 			goto drop;
5336 		*ppt_prev = pt_prev;
5337 	} else {
5338 drop:
5339 		if (!deliver_exact)
5340 			atomic_long_inc(&skb->dev->rx_dropped);
5341 		else
5342 			atomic_long_inc(&skb->dev->rx_nohandler);
5343 		kfree_skb(skb);
5344 		/* Jamal, now you will not able to escape explaining
5345 		 * me how you were going to use this. :-)
5346 		 */
5347 		ret = NET_RX_DROP;
5348 	}
5349 
5350 out:
5351 	/* The invariant here is that if *ppt_prev is not NULL
5352 	 * then skb should also be non-NULL.
5353 	 *
5354 	 * Apparently *ppt_prev assignment above holds this invariant due to
5355 	 * skb dereferencing near it.
5356 	 */
5357 	*pskb = skb;
5358 	return ret;
5359 }
5360 
5361 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5362 {
5363 	struct net_device *orig_dev = skb->dev;
5364 	struct packet_type *pt_prev = NULL;
5365 	int ret;
5366 
5367 	ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5368 	if (pt_prev)
5369 		ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5370 					 skb->dev, pt_prev, orig_dev);
5371 	return ret;
5372 }
5373 
5374 /**
5375  *	netif_receive_skb_core - special purpose version of netif_receive_skb
5376  *	@skb: buffer to process
5377  *
5378  *	More direct receive version of netif_receive_skb().  It should
5379  *	only be used by callers that have a need to skip RPS and Generic XDP.
5380  *	Caller must also take care of handling if ``(page_is_)pfmemalloc``.
5381  *
5382  *	This function may only be called from softirq context and interrupts
5383  *	should be enabled.
5384  *
5385  *	Return values (usually ignored):
5386  *	NET_RX_SUCCESS: no congestion
5387  *	NET_RX_DROP: packet was dropped
5388  */
5389 int netif_receive_skb_core(struct sk_buff *skb)
5390 {
5391 	int ret;
5392 
5393 	rcu_read_lock();
5394 	ret = __netif_receive_skb_one_core(skb, false);
5395 	rcu_read_unlock();
5396 
5397 	return ret;
5398 }
5399 EXPORT_SYMBOL(netif_receive_skb_core);
5400 
5401 static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5402 						  struct packet_type *pt_prev,
5403 						  struct net_device *orig_dev)
5404 {
5405 	struct sk_buff *skb, *next;
5406 
5407 	if (!pt_prev)
5408 		return;
5409 	if (list_empty(head))
5410 		return;
5411 	if (pt_prev->list_func != NULL)
5412 		INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5413 				   ip_list_rcv, head, pt_prev, orig_dev);
5414 	else
5415 		list_for_each_entry_safe(skb, next, head, list) {
5416 			skb_list_del_init(skb);
5417 			pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5418 		}
5419 }
5420 
5421 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5422 {
5423 	/* Fast-path assumptions:
5424 	 * - There is no RX handler.
5425 	 * - Only one packet_type matches.
5426 	 * If either of these fails, we will end up doing some per-packet
5427 	 * processing in-line, then handling the 'last ptype' for the whole
5428 	 * sublist.  This can't cause out-of-order delivery to any single ptype,
5429 	 * because the 'last ptype' must be constant across the sublist, and all
5430 	 * other ptypes are handled per-packet.
5431 	 */
5432 	/* Current (common) ptype of sublist */
5433 	struct packet_type *pt_curr = NULL;
5434 	/* Current (common) orig_dev of sublist */
5435 	struct net_device *od_curr = NULL;
5436 	struct list_head sublist;
5437 	struct sk_buff *skb, *next;
5438 
5439 	INIT_LIST_HEAD(&sublist);
5440 	list_for_each_entry_safe(skb, next, head, list) {
5441 		struct net_device *orig_dev = skb->dev;
5442 		struct packet_type *pt_prev = NULL;
5443 
5444 		skb_list_del_init(skb);
5445 		__netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5446 		if (!pt_prev)
5447 			continue;
5448 		if (pt_curr != pt_prev || od_curr != orig_dev) {
5449 			/* dispatch old sublist */
5450 			__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5451 			/* start new sublist */
5452 			INIT_LIST_HEAD(&sublist);
5453 			pt_curr = pt_prev;
5454 			od_curr = orig_dev;
5455 		}
5456 		list_add_tail(&skb->list, &sublist);
5457 	}
5458 
5459 	/* dispatch final sublist */
5460 	__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5461 }
5462 
5463 static int __netif_receive_skb(struct sk_buff *skb)
5464 {
5465 	int ret;
5466 
5467 	if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5468 		unsigned int noreclaim_flag;
5469 
5470 		/*
5471 		 * PFMEMALLOC skbs are special, they should
5472 		 * - be delivered to SOCK_MEMALLOC sockets only
5473 		 * - stay away from userspace
5474 		 * - have bounded memory usage
5475 		 *
5476 		 * Use PF_MEMALLOC as this saves us from propagating the allocation
5477 		 * context down to all allocation sites.
5478 		 */
5479 		noreclaim_flag = memalloc_noreclaim_save();
5480 		ret = __netif_receive_skb_one_core(skb, true);
5481 		memalloc_noreclaim_restore(noreclaim_flag);
5482 	} else
5483 		ret = __netif_receive_skb_one_core(skb, false);
5484 
5485 	return ret;
5486 }
5487 
5488 static void __netif_receive_skb_list(struct list_head *head)
5489 {
5490 	unsigned long noreclaim_flag = 0;
5491 	struct sk_buff *skb, *next;
5492 	bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
5493 
5494 	list_for_each_entry_safe(skb, next, head, list) {
5495 		if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5496 			struct list_head sublist;
5497 
5498 			/* Handle the previous sublist */
5499 			list_cut_before(&sublist, head, &skb->list);
5500 			if (!list_empty(&sublist))
5501 				__netif_receive_skb_list_core(&sublist, pfmemalloc);
5502 			pfmemalloc = !pfmemalloc;
5503 			/* See comments in __netif_receive_skb */
5504 			if (pfmemalloc)
5505 				noreclaim_flag = memalloc_noreclaim_save();
5506 			else
5507 				memalloc_noreclaim_restore(noreclaim_flag);
5508 		}
5509 	}
5510 	/* Handle the remaining sublist */
5511 	if (!list_empty(head))
5512 		__netif_receive_skb_list_core(head, pfmemalloc);
5513 	/* Restore pflags */
5514 	if (pfmemalloc)
5515 		memalloc_noreclaim_restore(noreclaim_flag);
5516 }
5517 
5518 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5519 {
5520 	struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5521 	struct bpf_prog *new = xdp->prog;
5522 	int ret = 0;
5523 
5524 	if (new) {
5525 		u32 i;
5526 
5527 		mutex_lock(&new->aux->used_maps_mutex);
5528 
5529 		/* generic XDP does not work with DEVMAPs that can
5530 		 * have a bpf_prog installed on an entry
5531 		 */
5532 		for (i = 0; i < new->aux->used_map_cnt; i++) {
5533 			if (dev_map_can_have_prog(new->aux->used_maps[i]) ||
5534 			    cpu_map_prog_allowed(new->aux->used_maps[i])) {
5535 				mutex_unlock(&new->aux->used_maps_mutex);
5536 				return -EINVAL;
5537 			}
5538 		}
5539 
5540 		mutex_unlock(&new->aux->used_maps_mutex);
5541 	}
5542 
5543 	switch (xdp->command) {
5544 	case XDP_SETUP_PROG:
5545 		rcu_assign_pointer(dev->xdp_prog, new);
5546 		if (old)
5547 			bpf_prog_put(old);
5548 
5549 		if (old && !new) {
5550 			static_branch_dec(&generic_xdp_needed_key);
5551 		} else if (new && !old) {
5552 			static_branch_inc(&generic_xdp_needed_key);
5553 			dev_disable_lro(dev);
5554 			dev_disable_gro_hw(dev);
5555 		}
5556 		break;
5557 
5558 	default:
5559 		ret = -EINVAL;
5560 		break;
5561 	}
5562 
5563 	return ret;
5564 }
5565 
5566 static int netif_receive_skb_internal(struct sk_buff *skb)
5567 {
5568 	int ret;
5569 
5570 	net_timestamp_check(netdev_tstamp_prequeue, skb);
5571 
5572 	if (skb_defer_rx_timestamp(skb))
5573 		return NET_RX_SUCCESS;
5574 
5575 	rcu_read_lock();
5576 #ifdef CONFIG_RPS
5577 	if (static_branch_unlikely(&rps_needed)) {
5578 		struct rps_dev_flow voidflow, *rflow = &voidflow;
5579 		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5580 
5581 		if (cpu >= 0) {
5582 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5583 			rcu_read_unlock();
5584 			return ret;
5585 		}
5586 	}
5587 #endif
5588 	ret = __netif_receive_skb(skb);
5589 	rcu_read_unlock();
5590 	return ret;
5591 }
5592 
5593 static void netif_receive_skb_list_internal(struct list_head *head)
5594 {
5595 	struct sk_buff *skb, *next;
5596 	struct list_head sublist;
5597 
5598 	INIT_LIST_HEAD(&sublist);
5599 	list_for_each_entry_safe(skb, next, head, list) {
5600 		net_timestamp_check(netdev_tstamp_prequeue, skb);
5601 		skb_list_del_init(skb);
5602 		if (!skb_defer_rx_timestamp(skb))
5603 			list_add_tail(&skb->list, &sublist);
5604 	}
5605 	list_splice_init(&sublist, head);
5606 
5607 	rcu_read_lock();
5608 #ifdef CONFIG_RPS
5609 	if (static_branch_unlikely(&rps_needed)) {
5610 		list_for_each_entry_safe(skb, next, head, list) {
5611 			struct rps_dev_flow voidflow, *rflow = &voidflow;
5612 			int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5613 
5614 			if (cpu >= 0) {
5615 				/* Will be handled, remove from list */
5616 				skb_list_del_init(skb);
5617 				enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5618 			}
5619 		}
5620 	}
5621 #endif
5622 	__netif_receive_skb_list(head);
5623 	rcu_read_unlock();
5624 }
5625 
5626 /**
5627  *	netif_receive_skb - process receive buffer from network
5628  *	@skb: buffer to process
5629  *
5630  *	netif_receive_skb() is the main receive data processing function.
5631  *	It always succeeds. The buffer may be dropped during processing
5632  *	for congestion control or by the protocol layers.
5633  *
5634  *	This function may only be called from softirq context and interrupts
5635  *	should be enabled.
5636  *
5637  *	Return values (usually ignored):
5638  *	NET_RX_SUCCESS: no congestion
5639  *	NET_RX_DROP: packet was dropped
5640  */
5641 int netif_receive_skb(struct sk_buff *skb)
5642 {
5643 	int ret;
5644 
5645 	trace_netif_receive_skb_entry(skb);
5646 
5647 	ret = netif_receive_skb_internal(skb);
5648 	trace_netif_receive_skb_exit(ret);
5649 
5650 	return ret;
5651 }
5652 EXPORT_SYMBOL(netif_receive_skb);
5653 
5654 /**
5655  *	netif_receive_skb_list - process many receive buffers from network
5656  *	@head: list of skbs to process.
5657  *
5658  *	Since return value of netif_receive_skb() is normally ignored, and
5659  *	wouldn't be meaningful for a list, this function returns void.
5660  *
5661  *	This function may only be called from softirq context and interrupts
5662  *	should be enabled.
5663  */
5664 void netif_receive_skb_list(struct list_head *head)
5665 {
5666 	struct sk_buff *skb;
5667 
5668 	if (list_empty(head))
5669 		return;
5670 	if (trace_netif_receive_skb_list_entry_enabled()) {
5671 		list_for_each_entry(skb, head, list)
5672 			trace_netif_receive_skb_list_entry(skb);
5673 	}
5674 	netif_receive_skb_list_internal(head);
5675 	trace_netif_receive_skb_list_exit(0);
5676 }
5677 EXPORT_SYMBOL(netif_receive_skb_list);
5678 
5679 static DEFINE_PER_CPU(struct work_struct, flush_works);
5680 
5681 /* Network device is going away, flush any packets still pending */
5682 static void flush_backlog(struct work_struct *work)
5683 {
5684 	struct sk_buff *skb, *tmp;
5685 	struct softnet_data *sd;
5686 
5687 	local_bh_disable();
5688 	sd = this_cpu_ptr(&softnet_data);
5689 
5690 	local_irq_disable();
5691 	rps_lock(sd);
5692 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5693 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5694 			__skb_unlink(skb, &sd->input_pkt_queue);
5695 			dev_kfree_skb_irq(skb);
5696 			input_queue_head_incr(sd);
5697 		}
5698 	}
5699 	rps_unlock(sd);
5700 	local_irq_enable();
5701 
5702 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5703 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5704 			__skb_unlink(skb, &sd->process_queue);
5705 			kfree_skb(skb);
5706 			input_queue_head_incr(sd);
5707 		}
5708 	}
5709 	local_bh_enable();
5710 }
5711 
5712 static bool flush_required(int cpu)
5713 {
5714 #if IS_ENABLED(CONFIG_RPS)
5715 	struct softnet_data *sd = &per_cpu(softnet_data, cpu);
5716 	bool do_flush;
5717 
5718 	local_irq_disable();
5719 	rps_lock(sd);
5720 
5721 	/* as insertion into process_queue happens with the rps lock held,
5722 	 * process_queue access may race only with dequeue
5723 	 */
5724 	do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
5725 		   !skb_queue_empty_lockless(&sd->process_queue);
5726 	rps_unlock(sd);
5727 	local_irq_enable();
5728 
5729 	return do_flush;
5730 #endif
5731 	/* without RPS we can't safely check input_pkt_queue: during a
5732 	 * concurrent remote skb_queue_splice() we can detect as empty both
5733 	 * input_pkt_queue and process_queue even if the latter could end-up
5734 	 * containing a lot of packets.
5735 	 */
5736 	return true;
5737 }
5738 
5739 static void flush_all_backlogs(void)
5740 {
5741 	static cpumask_t flush_cpus;
5742 	unsigned int cpu;
5743 
5744 	/* since we are under rtnl lock protection we can use static data
5745 	 * for the cpumask and avoid allocating on stack the possibly
5746 	 * large mask
5747 	 */
5748 	ASSERT_RTNL();
5749 
5750 	get_online_cpus();
5751 
5752 	cpumask_clear(&flush_cpus);
5753 	for_each_online_cpu(cpu) {
5754 		if (flush_required(cpu)) {
5755 			queue_work_on(cpu, system_highpri_wq,
5756 				      per_cpu_ptr(&flush_works, cpu));
5757 			cpumask_set_cpu(cpu, &flush_cpus);
5758 		}
5759 	}
5760 
5761 	/* we can have in flight packet[s] on the cpus we are not flushing,
5762 	 * synchronize_net() in unregister_netdevice_many() will take care of
5763 	 * them
5764 	 */
5765 	for_each_cpu(cpu, &flush_cpus)
5766 		flush_work(per_cpu_ptr(&flush_works, cpu));
5767 
5768 	put_online_cpus();
5769 }
5770 
5771 /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
5772 static void gro_normal_list(struct napi_struct *napi)
5773 {
5774 	if (!napi->rx_count)
5775 		return;
5776 	netif_receive_skb_list_internal(&napi->rx_list);
5777 	INIT_LIST_HEAD(&napi->rx_list);
5778 	napi->rx_count = 0;
5779 }
5780 
5781 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
5782  * pass the whole batch up to the stack.
5783  */
5784 static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
5785 {
5786 	list_add_tail(&skb->list, &napi->rx_list);
5787 	napi->rx_count += segs;
5788 	if (napi->rx_count >= gro_normal_batch)
5789 		gro_normal_list(napi);
5790 }
5791 
5792 static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
5793 {
5794 	struct packet_offload *ptype;
5795 	__be16 type = skb->protocol;
5796 	struct list_head *head = &offload_base;
5797 	int err = -ENOENT;
5798 
5799 	BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
5800 
5801 	if (NAPI_GRO_CB(skb)->count == 1) {
5802 		skb_shinfo(skb)->gso_size = 0;
5803 		goto out;
5804 	}
5805 
5806 	rcu_read_lock();
5807 	list_for_each_entry_rcu(ptype, head, list) {
5808 		if (ptype->type != type || !ptype->callbacks.gro_complete)
5809 			continue;
5810 
5811 		err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
5812 					 ipv6_gro_complete, inet_gro_complete,
5813 					 skb, 0);
5814 		break;
5815 	}
5816 	rcu_read_unlock();
5817 
5818 	if (err) {
5819 		WARN_ON(&ptype->list == head);
5820 		kfree_skb(skb);
5821 		return NET_RX_SUCCESS;
5822 	}
5823 
5824 out:
5825 	gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
5826 	return NET_RX_SUCCESS;
5827 }
5828 
5829 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
5830 				   bool flush_old)
5831 {
5832 	struct list_head *head = &napi->gro_hash[index].list;
5833 	struct sk_buff *skb, *p;
5834 
5835 	list_for_each_entry_safe_reverse(skb, p, head, list) {
5836 		if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
5837 			return;
5838 		skb_list_del_init(skb);
5839 		napi_gro_complete(napi, skb);
5840 		napi->gro_hash[index].count--;
5841 	}
5842 
5843 	if (!napi->gro_hash[index].count)
5844 		__clear_bit(index, &napi->gro_bitmask);
5845 }
5846 
5847 /* napi->gro_hash[].list contains packets ordered by age.
5848  * youngest packets at the head of it.
5849  * Complete skbs in reverse order to reduce latencies.
5850  */
5851 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
5852 {
5853 	unsigned long bitmask = napi->gro_bitmask;
5854 	unsigned int i, base = ~0U;
5855 
5856 	while ((i = ffs(bitmask)) != 0) {
5857 		bitmask >>= i;
5858 		base += i;
5859 		__napi_gro_flush_chain(napi, base, flush_old);
5860 	}
5861 }
5862 EXPORT_SYMBOL(napi_gro_flush);
5863 
5864 static struct list_head *gro_list_prepare(struct napi_struct *napi,
5865 					  struct sk_buff *skb)
5866 {
5867 	unsigned int maclen = skb->dev->hard_header_len;
5868 	u32 hash = skb_get_hash_raw(skb);
5869 	struct list_head *head;
5870 	struct sk_buff *p;
5871 
5872 	head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list;
5873 	list_for_each_entry(p, head, list) {
5874 		unsigned long diffs;
5875 
5876 		NAPI_GRO_CB(p)->flush = 0;
5877 
5878 		if (hash != skb_get_hash_raw(p)) {
5879 			NAPI_GRO_CB(p)->same_flow = 0;
5880 			continue;
5881 		}
5882 
5883 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
5884 		diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
5885 		if (skb_vlan_tag_present(p))
5886 			diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
5887 		diffs |= skb_metadata_dst_cmp(p, skb);
5888 		diffs |= skb_metadata_differs(p, skb);
5889 		if (maclen == ETH_HLEN)
5890 			diffs |= compare_ether_header(skb_mac_header(p),
5891 						      skb_mac_header(skb));
5892 		else if (!diffs)
5893 			diffs = memcmp(skb_mac_header(p),
5894 				       skb_mac_header(skb),
5895 				       maclen);
5896 		NAPI_GRO_CB(p)->same_flow = !diffs;
5897 	}
5898 
5899 	return head;
5900 }
5901 
5902 static void skb_gro_reset_offset(struct sk_buff *skb)
5903 {
5904 	const struct skb_shared_info *pinfo = skb_shinfo(skb);
5905 	const skb_frag_t *frag0 = &pinfo->frags[0];
5906 
5907 	NAPI_GRO_CB(skb)->data_offset = 0;
5908 	NAPI_GRO_CB(skb)->frag0 = NULL;
5909 	NAPI_GRO_CB(skb)->frag0_len = 0;
5910 
5911 	if (!skb_headlen(skb) && pinfo->nr_frags &&
5912 	    !PageHighMem(skb_frag_page(frag0))) {
5913 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
5914 		NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
5915 						    skb_frag_size(frag0),
5916 						    skb->end - skb->tail);
5917 	}
5918 }
5919 
5920 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
5921 {
5922 	struct skb_shared_info *pinfo = skb_shinfo(skb);
5923 
5924 	BUG_ON(skb->end - skb->tail < grow);
5925 
5926 	memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
5927 
5928 	skb->data_len -= grow;
5929 	skb->tail += grow;
5930 
5931 	skb_frag_off_add(&pinfo->frags[0], grow);
5932 	skb_frag_size_sub(&pinfo->frags[0], grow);
5933 
5934 	if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
5935 		skb_frag_unref(skb, 0);
5936 		memmove(pinfo->frags, pinfo->frags + 1,
5937 			--pinfo->nr_frags * sizeof(pinfo->frags[0]));
5938 	}
5939 }
5940 
5941 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
5942 {
5943 	struct sk_buff *oldest;
5944 
5945 	oldest = list_last_entry(head, struct sk_buff, list);
5946 
5947 	/* We are called with head length >= MAX_GRO_SKBS, so this is
5948 	 * impossible.
5949 	 */
5950 	if (WARN_ON_ONCE(!oldest))
5951 		return;
5952 
5953 	/* Do not adjust napi->gro_hash[].count, caller is adding a new
5954 	 * SKB to the chain.
5955 	 */
5956 	skb_list_del_init(oldest);
5957 	napi_gro_complete(napi, oldest);
5958 }
5959 
5960 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5961 {
5962 	u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
5963 	struct list_head *head = &offload_base;
5964 	struct packet_offload *ptype;
5965 	__be16 type = skb->protocol;
5966 	struct list_head *gro_head;
5967 	struct sk_buff *pp = NULL;
5968 	enum gro_result ret;
5969 	int same_flow;
5970 	int grow;
5971 
5972 	if (netif_elide_gro(skb->dev))
5973 		goto normal;
5974 
5975 	gro_head = gro_list_prepare(napi, skb);
5976 
5977 	rcu_read_lock();
5978 	list_for_each_entry_rcu(ptype, head, list) {
5979 		if (ptype->type != type || !ptype->callbacks.gro_receive)
5980 			continue;
5981 
5982 		skb_set_network_header(skb, skb_gro_offset(skb));
5983 		skb_reset_mac_len(skb);
5984 		NAPI_GRO_CB(skb)->same_flow = 0;
5985 		NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
5986 		NAPI_GRO_CB(skb)->free = 0;
5987 		NAPI_GRO_CB(skb)->encap_mark = 0;
5988 		NAPI_GRO_CB(skb)->recursion_counter = 0;
5989 		NAPI_GRO_CB(skb)->is_fou = 0;
5990 		NAPI_GRO_CB(skb)->is_atomic = 1;
5991 		NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
5992 
5993 		/* Setup for GRO checksum validation */
5994 		switch (skb->ip_summed) {
5995 		case CHECKSUM_COMPLETE:
5996 			NAPI_GRO_CB(skb)->csum = skb->csum;
5997 			NAPI_GRO_CB(skb)->csum_valid = 1;
5998 			NAPI_GRO_CB(skb)->csum_cnt = 0;
5999 			break;
6000 		case CHECKSUM_UNNECESSARY:
6001 			NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
6002 			NAPI_GRO_CB(skb)->csum_valid = 0;
6003 			break;
6004 		default:
6005 			NAPI_GRO_CB(skb)->csum_cnt = 0;
6006 			NAPI_GRO_CB(skb)->csum_valid = 0;
6007 		}
6008 
6009 		pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
6010 					ipv6_gro_receive, inet_gro_receive,
6011 					gro_head, skb);
6012 		break;
6013 	}
6014 	rcu_read_unlock();
6015 
6016 	if (&ptype->list == head)
6017 		goto normal;
6018 
6019 	if (PTR_ERR(pp) == -EINPROGRESS) {
6020 		ret = GRO_CONSUMED;
6021 		goto ok;
6022 	}
6023 
6024 	same_flow = NAPI_GRO_CB(skb)->same_flow;
6025 	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
6026 
6027 	if (pp) {
6028 		skb_list_del_init(pp);
6029 		napi_gro_complete(napi, pp);
6030 		napi->gro_hash[hash].count--;
6031 	}
6032 
6033 	if (same_flow)
6034 		goto ok;
6035 
6036 	if (NAPI_GRO_CB(skb)->flush)
6037 		goto normal;
6038 
6039 	if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
6040 		gro_flush_oldest(napi, gro_head);
6041 	} else {
6042 		napi->gro_hash[hash].count++;
6043 	}
6044 	NAPI_GRO_CB(skb)->count = 1;
6045 	NAPI_GRO_CB(skb)->age = jiffies;
6046 	NAPI_GRO_CB(skb)->last = skb;
6047 	skb_shinfo(skb)->gso_size = skb_gro_len(skb);
6048 	list_add(&skb->list, gro_head);
6049 	ret = GRO_HELD;
6050 
6051 pull:
6052 	grow = skb_gro_offset(skb) - skb_headlen(skb);
6053 	if (grow > 0)
6054 		gro_pull_from_frag0(skb, grow);
6055 ok:
6056 	if (napi->gro_hash[hash].count) {
6057 		if (!test_bit(hash, &napi->gro_bitmask))
6058 			__set_bit(hash, &napi->gro_bitmask);
6059 	} else if (test_bit(hash, &napi->gro_bitmask)) {
6060 		__clear_bit(hash, &napi->gro_bitmask);
6061 	}
6062 
6063 	return ret;
6064 
6065 normal:
6066 	ret = GRO_NORMAL;
6067 	goto pull;
6068 }
6069 
6070 struct packet_offload *gro_find_receive_by_type(__be16 type)
6071 {
6072 	struct list_head *offload_head = &offload_base;
6073 	struct packet_offload *ptype;
6074 
6075 	list_for_each_entry_rcu(ptype, offload_head, list) {
6076 		if (ptype->type != type || !ptype->callbacks.gro_receive)
6077 			continue;
6078 		return ptype;
6079 	}
6080 	return NULL;
6081 }
6082 EXPORT_SYMBOL(gro_find_receive_by_type);
6083 
6084 struct packet_offload *gro_find_complete_by_type(__be16 type)
6085 {
6086 	struct list_head *offload_head = &offload_base;
6087 	struct packet_offload *ptype;
6088 
6089 	list_for_each_entry_rcu(ptype, offload_head, list) {
6090 		if (ptype->type != type || !ptype->callbacks.gro_complete)
6091 			continue;
6092 		return ptype;
6093 	}
6094 	return NULL;
6095 }
6096 EXPORT_SYMBOL(gro_find_complete_by_type);
6097 
6098 static gro_result_t napi_skb_finish(struct napi_struct *napi,
6099 				    struct sk_buff *skb,
6100 				    gro_result_t ret)
6101 {
6102 	switch (ret) {
6103 	case GRO_NORMAL:
6104 		gro_normal_one(napi, skb, 1);
6105 		break;
6106 
6107 	case GRO_MERGED_FREE:
6108 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
6109 			napi_skb_free_stolen_head(skb);
6110 		else
6111 			__kfree_skb_defer(skb);
6112 		break;
6113 
6114 	case GRO_HELD:
6115 	case GRO_MERGED:
6116 	case GRO_CONSUMED:
6117 		break;
6118 	}
6119 
6120 	return ret;
6121 }
6122 
6123 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
6124 {
6125 	gro_result_t ret;
6126 
6127 	skb_mark_napi_id(skb, napi);
6128 	trace_napi_gro_receive_entry(skb);
6129 
6130 	skb_gro_reset_offset(skb);
6131 
6132 	ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
6133 	trace_napi_gro_receive_exit(ret);
6134 
6135 	return ret;
6136 }
6137 EXPORT_SYMBOL(napi_gro_receive);
6138 
6139 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
6140 {
6141 	if (unlikely(skb->pfmemalloc)) {
6142 		consume_skb(skb);
6143 		return;
6144 	}
6145 	__skb_pull(skb, skb_headlen(skb));
6146 	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
6147 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
6148 	__vlan_hwaccel_clear_tag(skb);
6149 	skb->dev = napi->dev;
6150 	skb->skb_iif = 0;
6151 
6152 	/* eth_type_trans() assumes pkt_type is PACKET_HOST */
6153 	skb->pkt_type = PACKET_HOST;
6154 
6155 	skb->encapsulation = 0;
6156 	skb_shinfo(skb)->gso_type = 0;
6157 	skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
6158 	skb_ext_reset(skb);
6159 
6160 	napi->skb = skb;
6161 }
6162 
6163 struct sk_buff *napi_get_frags(struct napi_struct *napi)
6164 {
6165 	struct sk_buff *skb = napi->skb;
6166 
6167 	if (!skb) {
6168 		skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
6169 		if (skb) {
6170 			napi->skb = skb;
6171 			skb_mark_napi_id(skb, napi);
6172 		}
6173 	}
6174 	return skb;
6175 }
6176 EXPORT_SYMBOL(napi_get_frags);
6177 
6178 static gro_result_t napi_frags_finish(struct napi_struct *napi,
6179 				      struct sk_buff *skb,
6180 				      gro_result_t ret)
6181 {
6182 	switch (ret) {
6183 	case GRO_NORMAL:
6184 	case GRO_HELD:
6185 		__skb_push(skb, ETH_HLEN);
6186 		skb->protocol = eth_type_trans(skb, skb->dev);
6187 		if (ret == GRO_NORMAL)
6188 			gro_normal_one(napi, skb, 1);
6189 		break;
6190 
6191 	case GRO_MERGED_FREE:
6192 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
6193 			napi_skb_free_stolen_head(skb);
6194 		else
6195 			napi_reuse_skb(napi, skb);
6196 		break;
6197 
6198 	case GRO_MERGED:
6199 	case GRO_CONSUMED:
6200 		break;
6201 	}
6202 
6203 	return ret;
6204 }
6205 
6206 /* Upper GRO stack assumes network header starts at gro_offset=0
6207  * Drivers could call both napi_gro_frags() and napi_gro_receive()
6208  * We copy ethernet header into skb->data to have a common layout.
6209  */
6210 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
6211 {
6212 	struct sk_buff *skb = napi->skb;
6213 	const struct ethhdr *eth;
6214 	unsigned int hlen = sizeof(*eth);
6215 
6216 	napi->skb = NULL;
6217 
6218 	skb_reset_mac_header(skb);
6219 	skb_gro_reset_offset(skb);
6220 
6221 	if (unlikely(skb_gro_header_hard(skb, hlen))) {
6222 		eth = skb_gro_header_slow(skb, hlen, 0);
6223 		if (unlikely(!eth)) {
6224 			net_warn_ratelimited("%s: dropping impossible skb from %s\n",
6225 					     __func__, napi->dev->name);
6226 			napi_reuse_skb(napi, skb);
6227 			return NULL;
6228 		}
6229 	} else {
6230 		eth = (const struct ethhdr *)skb->data;
6231 		gro_pull_from_frag0(skb, hlen);
6232 		NAPI_GRO_CB(skb)->frag0 += hlen;
6233 		NAPI_GRO_CB(skb)->frag0_len -= hlen;
6234 	}
6235 	__skb_pull(skb, hlen);
6236 
6237 	/*
6238 	 * This works because the only protocols we care about don't require
6239 	 * special handling.
6240 	 * We'll fix it up properly in napi_frags_finish()
6241 	 */
6242 	skb->protocol = eth->h_proto;
6243 
6244 	return skb;
6245 }
6246 
6247 gro_result_t napi_gro_frags(struct napi_struct *napi)
6248 {
6249 	gro_result_t ret;
6250 	struct sk_buff *skb = napi_frags_skb(napi);
6251 
6252 	trace_napi_gro_frags_entry(skb);
6253 
6254 	ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
6255 	trace_napi_gro_frags_exit(ret);
6256 
6257 	return ret;
6258 }
6259 EXPORT_SYMBOL(napi_gro_frags);
6260 
6261 /* Compute the checksum from gro_offset and return the folded value
6262  * after adding in any pseudo checksum.
6263  */
6264 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
6265 {
6266 	__wsum wsum;
6267 	__sum16 sum;
6268 
6269 	wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
6270 
6271 	/* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
6272 	sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
6273 	/* See comments in __skb_checksum_complete(). */
6274 	if (likely(!sum)) {
6275 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
6276 		    !skb->csum_complete_sw)
6277 			netdev_rx_csum_fault(skb->dev, skb);
6278 	}
6279 
6280 	NAPI_GRO_CB(skb)->csum = wsum;
6281 	NAPI_GRO_CB(skb)->csum_valid = 1;
6282 
6283 	return sum;
6284 }
6285 EXPORT_SYMBOL(__skb_gro_checksum_complete);
6286 
6287 static void net_rps_send_ipi(struct softnet_data *remsd)
6288 {
6289 #ifdef CONFIG_RPS
6290 	while (remsd) {
6291 		struct softnet_data *next = remsd->rps_ipi_next;
6292 
6293 		if (cpu_online(remsd->cpu))
6294 			smp_call_function_single_async(remsd->cpu, &remsd->csd);
6295 		remsd = next;
6296 	}
6297 #endif
6298 }
6299 
6300 /*
6301  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
6302  * Note: called with local irq disabled, but exits with local irq enabled.
6303  */
6304 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
6305 {
6306 #ifdef CONFIG_RPS
6307 	struct softnet_data *remsd = sd->rps_ipi_list;
6308 
6309 	if (remsd) {
6310 		sd->rps_ipi_list = NULL;
6311 
6312 		local_irq_enable();
6313 
6314 		/* Send pending IPI's to kick RPS processing on remote cpus. */
6315 		net_rps_send_ipi(remsd);
6316 	} else
6317 #endif
6318 		local_irq_enable();
6319 }
6320 
6321 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
6322 {
6323 #ifdef CONFIG_RPS
6324 	return sd->rps_ipi_list != NULL;
6325 #else
6326 	return false;
6327 #endif
6328 }
6329 
6330 static int process_backlog(struct napi_struct *napi, int quota)
6331 {
6332 	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
6333 	bool again = true;
6334 	int work = 0;
6335 
6336 	/* Check if we have pending ipi, its better to send them now,
6337 	 * not waiting net_rx_action() end.
6338 	 */
6339 	if (sd_has_rps_ipi_waiting(sd)) {
6340 		local_irq_disable();
6341 		net_rps_action_and_irq_enable(sd);
6342 	}
6343 
6344 	napi->weight = dev_rx_weight;
6345 	while (again) {
6346 		struct sk_buff *skb;
6347 
6348 		while ((skb = __skb_dequeue(&sd->process_queue))) {
6349 			rcu_read_lock();
6350 			__netif_receive_skb(skb);
6351 			rcu_read_unlock();
6352 			input_queue_head_incr(sd);
6353 			if (++work >= quota)
6354 				return work;
6355 
6356 		}
6357 
6358 		local_irq_disable();
6359 		rps_lock(sd);
6360 		if (skb_queue_empty(&sd->input_pkt_queue)) {
6361 			/*
6362 			 * Inline a custom version of __napi_complete().
6363 			 * only current cpu owns and manipulates this napi,
6364 			 * and NAPI_STATE_SCHED is the only possible flag set
6365 			 * on backlog.
6366 			 * We can use a plain write instead of clear_bit(),
6367 			 * and we dont need an smp_mb() memory barrier.
6368 			 */
6369 			napi->state = 0;
6370 			again = false;
6371 		} else {
6372 			skb_queue_splice_tail_init(&sd->input_pkt_queue,
6373 						   &sd->process_queue);
6374 		}
6375 		rps_unlock(sd);
6376 		local_irq_enable();
6377 	}
6378 
6379 	return work;
6380 }
6381 
6382 /**
6383  * __napi_schedule - schedule for receive
6384  * @n: entry to schedule
6385  *
6386  * The entry's receive function will be scheduled to run.
6387  * Consider using __napi_schedule_irqoff() if hard irqs are masked.
6388  */
6389 void __napi_schedule(struct napi_struct *n)
6390 {
6391 	unsigned long flags;
6392 
6393 	local_irq_save(flags);
6394 	____napi_schedule(this_cpu_ptr(&softnet_data), n);
6395 	local_irq_restore(flags);
6396 }
6397 EXPORT_SYMBOL(__napi_schedule);
6398 
6399 /**
6400  *	napi_schedule_prep - check if napi can be scheduled
6401  *	@n: napi context
6402  *
6403  * Test if NAPI routine is already running, and if not mark
6404  * it as running.  This is used as a condition variable to
6405  * insure only one NAPI poll instance runs.  We also make
6406  * sure there is no pending NAPI disable.
6407  */
6408 bool napi_schedule_prep(struct napi_struct *n)
6409 {
6410 	unsigned long val, new;
6411 
6412 	do {
6413 		val = READ_ONCE(n->state);
6414 		if (unlikely(val & NAPIF_STATE_DISABLE))
6415 			return false;
6416 		new = val | NAPIF_STATE_SCHED;
6417 
6418 		/* Sets STATE_MISSED bit if STATE_SCHED was already set
6419 		 * This was suggested by Alexander Duyck, as compiler
6420 		 * emits better code than :
6421 		 * if (val & NAPIF_STATE_SCHED)
6422 		 *     new |= NAPIF_STATE_MISSED;
6423 		 */
6424 		new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
6425 						   NAPIF_STATE_MISSED;
6426 	} while (cmpxchg(&n->state, val, new) != val);
6427 
6428 	return !(val & NAPIF_STATE_SCHED);
6429 }
6430 EXPORT_SYMBOL(napi_schedule_prep);
6431 
6432 /**
6433  * __napi_schedule_irqoff - schedule for receive
6434  * @n: entry to schedule
6435  *
6436  * Variant of __napi_schedule() assuming hard irqs are masked
6437  */
6438 void __napi_schedule_irqoff(struct napi_struct *n)
6439 {
6440 	____napi_schedule(this_cpu_ptr(&softnet_data), n);
6441 }
6442 EXPORT_SYMBOL(__napi_schedule_irqoff);
6443 
6444 bool napi_complete_done(struct napi_struct *n, int work_done)
6445 {
6446 	unsigned long flags, val, new, timeout = 0;
6447 	bool ret = true;
6448 
6449 	/*
6450 	 * 1) Don't let napi dequeue from the cpu poll list
6451 	 *    just in case its running on a different cpu.
6452 	 * 2) If we are busy polling, do nothing here, we have
6453 	 *    the guarantee we will be called later.
6454 	 */
6455 	if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6456 				 NAPIF_STATE_IN_BUSY_POLL)))
6457 		return false;
6458 
6459 	if (work_done) {
6460 		if (n->gro_bitmask)
6461 			timeout = READ_ONCE(n->dev->gro_flush_timeout);
6462 		n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
6463 	}
6464 	if (n->defer_hard_irqs_count > 0) {
6465 		n->defer_hard_irqs_count--;
6466 		timeout = READ_ONCE(n->dev->gro_flush_timeout);
6467 		if (timeout)
6468 			ret = false;
6469 	}
6470 	if (n->gro_bitmask) {
6471 		/* When the NAPI instance uses a timeout and keeps postponing
6472 		 * it, we need to bound somehow the time packets are kept in
6473 		 * the GRO layer
6474 		 */
6475 		napi_gro_flush(n, !!timeout);
6476 	}
6477 
6478 	gro_normal_list(n);
6479 
6480 	if (unlikely(!list_empty(&n->poll_list))) {
6481 		/* If n->poll_list is not empty, we need to mask irqs */
6482 		local_irq_save(flags);
6483 		list_del_init(&n->poll_list);
6484 		local_irq_restore(flags);
6485 	}
6486 
6487 	do {
6488 		val = READ_ONCE(n->state);
6489 
6490 		WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6491 
6492 		new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
6493 			      NAPIF_STATE_PREFER_BUSY_POLL);
6494 
6495 		/* If STATE_MISSED was set, leave STATE_SCHED set,
6496 		 * because we will call napi->poll() one more time.
6497 		 * This C code was suggested by Alexander Duyck to help gcc.
6498 		 */
6499 		new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6500 						    NAPIF_STATE_SCHED;
6501 	} while (cmpxchg(&n->state, val, new) != val);
6502 
6503 	if (unlikely(val & NAPIF_STATE_MISSED)) {
6504 		__napi_schedule(n);
6505 		return false;
6506 	}
6507 
6508 	if (timeout)
6509 		hrtimer_start(&n->timer, ns_to_ktime(timeout),
6510 			      HRTIMER_MODE_REL_PINNED);
6511 	return ret;
6512 }
6513 EXPORT_SYMBOL(napi_complete_done);
6514 
6515 /* must be called under rcu_read_lock(), as we dont take a reference */
6516 static struct napi_struct *napi_by_id(unsigned int napi_id)
6517 {
6518 	unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6519 	struct napi_struct *napi;
6520 
6521 	hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6522 		if (napi->napi_id == napi_id)
6523 			return napi;
6524 
6525 	return NULL;
6526 }
6527 
6528 #if defined(CONFIG_NET_RX_BUSY_POLL)
6529 
6530 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
6531 {
6532 	if (!skip_schedule) {
6533 		gro_normal_list(napi);
6534 		__napi_schedule(napi);
6535 		return;
6536 	}
6537 
6538 	if (napi->gro_bitmask) {
6539 		/* flush too old packets
6540 		 * If HZ < 1000, flush all packets.
6541 		 */
6542 		napi_gro_flush(napi, HZ >= 1000);
6543 	}
6544 
6545 	gro_normal_list(napi);
6546 	clear_bit(NAPI_STATE_SCHED, &napi->state);
6547 }
6548 
6549 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll,
6550 			   u16 budget)
6551 {
6552 	bool skip_schedule = false;
6553 	unsigned long timeout;
6554 	int rc;
6555 
6556 	/* Busy polling means there is a high chance device driver hard irq
6557 	 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6558 	 * set in napi_schedule_prep().
6559 	 * Since we are about to call napi->poll() once more, we can safely
6560 	 * clear NAPI_STATE_MISSED.
6561 	 *
6562 	 * Note: x86 could use a single "lock and ..." instruction
6563 	 * to perform these two clear_bit()
6564 	 */
6565 	clear_bit(NAPI_STATE_MISSED, &napi->state);
6566 	clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6567 
6568 	local_bh_disable();
6569 
6570 	if (prefer_busy_poll) {
6571 		napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
6572 		timeout = READ_ONCE(napi->dev->gro_flush_timeout);
6573 		if (napi->defer_hard_irqs_count && timeout) {
6574 			hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
6575 			skip_schedule = true;
6576 		}
6577 	}
6578 
6579 	/* All we really want here is to re-enable device interrupts.
6580 	 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6581 	 */
6582 	rc = napi->poll(napi, budget);
6583 	/* We can't gro_normal_list() here, because napi->poll() might have
6584 	 * rearmed the napi (napi_complete_done()) in which case it could
6585 	 * already be running on another CPU.
6586 	 */
6587 	trace_napi_poll(napi, rc, budget);
6588 	netpoll_poll_unlock(have_poll_lock);
6589 	if (rc == budget)
6590 		__busy_poll_stop(napi, skip_schedule);
6591 	local_bh_enable();
6592 }
6593 
6594 void napi_busy_loop(unsigned int napi_id,
6595 		    bool (*loop_end)(void *, unsigned long),
6596 		    void *loop_end_arg, bool prefer_busy_poll, u16 budget)
6597 {
6598 	unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6599 	int (*napi_poll)(struct napi_struct *napi, int budget);
6600 	void *have_poll_lock = NULL;
6601 	struct napi_struct *napi;
6602 
6603 restart:
6604 	napi_poll = NULL;
6605 
6606 	rcu_read_lock();
6607 
6608 	napi = napi_by_id(napi_id);
6609 	if (!napi)
6610 		goto out;
6611 
6612 	preempt_disable();
6613 	for (;;) {
6614 		int work = 0;
6615 
6616 		local_bh_disable();
6617 		if (!napi_poll) {
6618 			unsigned long val = READ_ONCE(napi->state);
6619 
6620 			/* If multiple threads are competing for this napi,
6621 			 * we avoid dirtying napi->state as much as we can.
6622 			 */
6623 			if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6624 				   NAPIF_STATE_IN_BUSY_POLL)) {
6625 				if (prefer_busy_poll)
6626 					set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6627 				goto count;
6628 			}
6629 			if (cmpxchg(&napi->state, val,
6630 				    val | NAPIF_STATE_IN_BUSY_POLL |
6631 					  NAPIF_STATE_SCHED) != val) {
6632 				if (prefer_busy_poll)
6633 					set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6634 				goto count;
6635 			}
6636 			have_poll_lock = netpoll_poll_lock(napi);
6637 			napi_poll = napi->poll;
6638 		}
6639 		work = napi_poll(napi, budget);
6640 		trace_napi_poll(napi, work, budget);
6641 		gro_normal_list(napi);
6642 count:
6643 		if (work > 0)
6644 			__NET_ADD_STATS(dev_net(napi->dev),
6645 					LINUX_MIB_BUSYPOLLRXPACKETS, work);
6646 		local_bh_enable();
6647 
6648 		if (!loop_end || loop_end(loop_end_arg, start_time))
6649 			break;
6650 
6651 		if (unlikely(need_resched())) {
6652 			if (napi_poll)
6653 				busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6654 			preempt_enable();
6655 			rcu_read_unlock();
6656 			cond_resched();
6657 			if (loop_end(loop_end_arg, start_time))
6658 				return;
6659 			goto restart;
6660 		}
6661 		cpu_relax();
6662 	}
6663 	if (napi_poll)
6664 		busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6665 	preempt_enable();
6666 out:
6667 	rcu_read_unlock();
6668 }
6669 EXPORT_SYMBOL(napi_busy_loop);
6670 
6671 #endif /* CONFIG_NET_RX_BUSY_POLL */
6672 
6673 static void napi_hash_add(struct napi_struct *napi)
6674 {
6675 	if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
6676 		return;
6677 
6678 	spin_lock(&napi_hash_lock);
6679 
6680 	/* 0..NR_CPUS range is reserved for sender_cpu use */
6681 	do {
6682 		if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6683 			napi_gen_id = MIN_NAPI_ID;
6684 	} while (napi_by_id(napi_gen_id));
6685 	napi->napi_id = napi_gen_id;
6686 
6687 	hlist_add_head_rcu(&napi->napi_hash_node,
6688 			   &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6689 
6690 	spin_unlock(&napi_hash_lock);
6691 }
6692 
6693 /* Warning : caller is responsible to make sure rcu grace period
6694  * is respected before freeing memory containing @napi
6695  */
6696 static void napi_hash_del(struct napi_struct *napi)
6697 {
6698 	spin_lock(&napi_hash_lock);
6699 
6700 	hlist_del_init_rcu(&napi->napi_hash_node);
6701 
6702 	spin_unlock(&napi_hash_lock);
6703 }
6704 
6705 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6706 {
6707 	struct napi_struct *napi;
6708 
6709 	napi = container_of(timer, struct napi_struct, timer);
6710 
6711 	/* Note : we use a relaxed variant of napi_schedule_prep() not setting
6712 	 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6713 	 */
6714 	if (!napi_disable_pending(napi) &&
6715 	    !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) {
6716 		clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6717 		__napi_schedule_irqoff(napi);
6718 	}
6719 
6720 	return HRTIMER_NORESTART;
6721 }
6722 
6723 static void init_gro_hash(struct napi_struct *napi)
6724 {
6725 	int i;
6726 
6727 	for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6728 		INIT_LIST_HEAD(&napi->gro_hash[i].list);
6729 		napi->gro_hash[i].count = 0;
6730 	}
6731 	napi->gro_bitmask = 0;
6732 }
6733 
6734 int dev_set_threaded(struct net_device *dev, bool threaded)
6735 {
6736 	struct napi_struct *napi;
6737 	int err = 0;
6738 
6739 	if (dev->threaded == threaded)
6740 		return 0;
6741 
6742 	if (threaded) {
6743 		list_for_each_entry(napi, &dev->napi_list, dev_list) {
6744 			if (!napi->thread) {
6745 				err = napi_kthread_create(napi);
6746 				if (err) {
6747 					threaded = false;
6748 					break;
6749 				}
6750 			}
6751 		}
6752 	}
6753 
6754 	dev->threaded = threaded;
6755 
6756 	/* Make sure kthread is created before THREADED bit
6757 	 * is set.
6758 	 */
6759 	smp_mb__before_atomic();
6760 
6761 	/* Setting/unsetting threaded mode on a napi might not immediately
6762 	 * take effect, if the current napi instance is actively being
6763 	 * polled. In this case, the switch between threaded mode and
6764 	 * softirq mode will happen in the next round of napi_schedule().
6765 	 * This should not cause hiccups/stalls to the live traffic.
6766 	 */
6767 	list_for_each_entry(napi, &dev->napi_list, dev_list) {
6768 		if (threaded)
6769 			set_bit(NAPI_STATE_THREADED, &napi->state);
6770 		else
6771 			clear_bit(NAPI_STATE_THREADED, &napi->state);
6772 	}
6773 
6774 	return err;
6775 }
6776 
6777 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
6778 		    int (*poll)(struct napi_struct *, int), int weight)
6779 {
6780 	if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
6781 		return;
6782 
6783 	INIT_LIST_HEAD(&napi->poll_list);
6784 	INIT_HLIST_NODE(&napi->napi_hash_node);
6785 	hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6786 	napi->timer.function = napi_watchdog;
6787 	init_gro_hash(napi);
6788 	napi->skb = NULL;
6789 	INIT_LIST_HEAD(&napi->rx_list);
6790 	napi->rx_count = 0;
6791 	napi->poll = poll;
6792 	if (weight > NAPI_POLL_WEIGHT)
6793 		netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6794 				weight);
6795 	napi->weight = weight;
6796 	napi->dev = dev;
6797 #ifdef CONFIG_NETPOLL
6798 	napi->poll_owner = -1;
6799 #endif
6800 	set_bit(NAPI_STATE_SCHED, &napi->state);
6801 	set_bit(NAPI_STATE_NPSVC, &napi->state);
6802 	list_add_rcu(&napi->dev_list, &dev->napi_list);
6803 	napi_hash_add(napi);
6804 	/* Create kthread for this napi if dev->threaded is set.
6805 	 * Clear dev->threaded if kthread creation failed so that
6806 	 * threaded mode will not be enabled in napi_enable().
6807 	 */
6808 	if (dev->threaded && napi_kthread_create(napi))
6809 		dev->threaded = 0;
6810 }
6811 EXPORT_SYMBOL(netif_napi_add);
6812 
6813 void napi_disable(struct napi_struct *n)
6814 {
6815 	might_sleep();
6816 	set_bit(NAPI_STATE_DISABLE, &n->state);
6817 
6818 	while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
6819 		msleep(1);
6820 	while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
6821 		msleep(1);
6822 
6823 	hrtimer_cancel(&n->timer);
6824 
6825 	clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
6826 	clear_bit(NAPI_STATE_DISABLE, &n->state);
6827 	clear_bit(NAPI_STATE_THREADED, &n->state);
6828 }
6829 EXPORT_SYMBOL(napi_disable);
6830 
6831 /**
6832  *	napi_enable - enable NAPI scheduling
6833  *	@n: NAPI context
6834  *
6835  * Resume NAPI from being scheduled on this context.
6836  * Must be paired with napi_disable.
6837  */
6838 void napi_enable(struct napi_struct *n)
6839 {
6840 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
6841 	smp_mb__before_atomic();
6842 	clear_bit(NAPI_STATE_SCHED, &n->state);
6843 	clear_bit(NAPI_STATE_NPSVC, &n->state);
6844 	if (n->dev->threaded && n->thread)
6845 		set_bit(NAPI_STATE_THREADED, &n->state);
6846 }
6847 EXPORT_SYMBOL(napi_enable);
6848 
6849 static void flush_gro_hash(struct napi_struct *napi)
6850 {
6851 	int i;
6852 
6853 	for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6854 		struct sk_buff *skb, *n;
6855 
6856 		list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6857 			kfree_skb(skb);
6858 		napi->gro_hash[i].count = 0;
6859 	}
6860 }
6861 
6862 /* Must be called in process context */
6863 void __netif_napi_del(struct napi_struct *napi)
6864 {
6865 	if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
6866 		return;
6867 
6868 	napi_hash_del(napi);
6869 	list_del_rcu(&napi->dev_list);
6870 	napi_free_frags(napi);
6871 
6872 	flush_gro_hash(napi);
6873 	napi->gro_bitmask = 0;
6874 
6875 	if (napi->thread) {
6876 		kthread_stop(napi->thread);
6877 		napi->thread = NULL;
6878 	}
6879 }
6880 EXPORT_SYMBOL(__netif_napi_del);
6881 
6882 static int __napi_poll(struct napi_struct *n, bool *repoll)
6883 {
6884 	int work, weight;
6885 
6886 	weight = n->weight;
6887 
6888 	/* This NAPI_STATE_SCHED test is for avoiding a race
6889 	 * with netpoll's poll_napi().  Only the entity which
6890 	 * obtains the lock and sees NAPI_STATE_SCHED set will
6891 	 * actually make the ->poll() call.  Therefore we avoid
6892 	 * accidentally calling ->poll() when NAPI is not scheduled.
6893 	 */
6894 	work = 0;
6895 	if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6896 		work = n->poll(n, weight);
6897 		trace_napi_poll(n, work, weight);
6898 	}
6899 
6900 	if (unlikely(work > weight))
6901 		pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6902 			    n->poll, work, weight);
6903 
6904 	if (likely(work < weight))
6905 		return work;
6906 
6907 	/* Drivers must not modify the NAPI state if they
6908 	 * consume the entire weight.  In such cases this code
6909 	 * still "owns" the NAPI instance and therefore can
6910 	 * move the instance around on the list at-will.
6911 	 */
6912 	if (unlikely(napi_disable_pending(n))) {
6913 		napi_complete(n);
6914 		return work;
6915 	}
6916 
6917 	/* The NAPI context has more processing work, but busy-polling
6918 	 * is preferred. Exit early.
6919 	 */
6920 	if (napi_prefer_busy_poll(n)) {
6921 		if (napi_complete_done(n, work)) {
6922 			/* If timeout is not set, we need to make sure
6923 			 * that the NAPI is re-scheduled.
6924 			 */
6925 			napi_schedule(n);
6926 		}
6927 		return work;
6928 	}
6929 
6930 	if (n->gro_bitmask) {
6931 		/* flush too old packets
6932 		 * If HZ < 1000, flush all packets.
6933 		 */
6934 		napi_gro_flush(n, HZ >= 1000);
6935 	}
6936 
6937 	gro_normal_list(n);
6938 
6939 	/* Some drivers may have called napi_schedule
6940 	 * prior to exhausting their budget.
6941 	 */
6942 	if (unlikely(!list_empty(&n->poll_list))) {
6943 		pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6944 			     n->dev ? n->dev->name : "backlog");
6945 		return work;
6946 	}
6947 
6948 	*repoll = true;
6949 
6950 	return work;
6951 }
6952 
6953 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6954 {
6955 	bool do_repoll = false;
6956 	void *have;
6957 	int work;
6958 
6959 	list_del_init(&n->poll_list);
6960 
6961 	have = netpoll_poll_lock(n);
6962 
6963 	work = __napi_poll(n, &do_repoll);
6964 
6965 	if (do_repoll)
6966 		list_add_tail(&n->poll_list, repoll);
6967 
6968 	netpoll_poll_unlock(have);
6969 
6970 	return work;
6971 }
6972 
6973 static int napi_thread_wait(struct napi_struct *napi)
6974 {
6975 	set_current_state(TASK_INTERRUPTIBLE);
6976 
6977 	while (!kthread_should_stop() && !napi_disable_pending(napi)) {
6978 		if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
6979 			WARN_ON(!list_empty(&napi->poll_list));
6980 			__set_current_state(TASK_RUNNING);
6981 			return 0;
6982 		}
6983 
6984 		schedule();
6985 		set_current_state(TASK_INTERRUPTIBLE);
6986 	}
6987 	__set_current_state(TASK_RUNNING);
6988 	return -1;
6989 }
6990 
6991 static int napi_threaded_poll(void *data)
6992 {
6993 	struct napi_struct *napi = data;
6994 	void *have;
6995 
6996 	while (!napi_thread_wait(napi)) {
6997 		for (;;) {
6998 			bool repoll = false;
6999 
7000 			local_bh_disable();
7001 
7002 			have = netpoll_poll_lock(napi);
7003 			__napi_poll(napi, &repoll);
7004 			netpoll_poll_unlock(have);
7005 
7006 			local_bh_enable();
7007 
7008 			if (!repoll)
7009 				break;
7010 
7011 			cond_resched();
7012 		}
7013 	}
7014 	return 0;
7015 }
7016 
7017 static __latent_entropy void net_rx_action(struct softirq_action *h)
7018 {
7019 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
7020 	unsigned long time_limit = jiffies +
7021 		usecs_to_jiffies(netdev_budget_usecs);
7022 	int budget = netdev_budget;
7023 	LIST_HEAD(list);
7024 	LIST_HEAD(repoll);
7025 
7026 	local_irq_disable();
7027 	list_splice_init(&sd->poll_list, &list);
7028 	local_irq_enable();
7029 
7030 	for (;;) {
7031 		struct napi_struct *n;
7032 
7033 		if (list_empty(&list)) {
7034 			if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
7035 				return;
7036 			break;
7037 		}
7038 
7039 		n = list_first_entry(&list, struct napi_struct, poll_list);
7040 		budget -= napi_poll(n, &repoll);
7041 
7042 		/* If softirq window is exhausted then punt.
7043 		 * Allow this to run for 2 jiffies since which will allow
7044 		 * an average latency of 1.5/HZ.
7045 		 */
7046 		if (unlikely(budget <= 0 ||
7047 			     time_after_eq(jiffies, time_limit))) {
7048 			sd->time_squeeze++;
7049 			break;
7050 		}
7051 	}
7052 
7053 	local_irq_disable();
7054 
7055 	list_splice_tail_init(&sd->poll_list, &list);
7056 	list_splice_tail(&repoll, &list);
7057 	list_splice(&list, &sd->poll_list);
7058 	if (!list_empty(&sd->poll_list))
7059 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
7060 
7061 	net_rps_action_and_irq_enable(sd);
7062 }
7063 
7064 struct netdev_adjacent {
7065 	struct net_device *dev;
7066 
7067 	/* upper master flag, there can only be one master device per list */
7068 	bool master;
7069 
7070 	/* lookup ignore flag */
7071 	bool ignore;
7072 
7073 	/* counter for the number of times this device was added to us */
7074 	u16 ref_nr;
7075 
7076 	/* private field for the users */
7077 	void *private;
7078 
7079 	struct list_head list;
7080 	struct rcu_head rcu;
7081 };
7082 
7083 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
7084 						 struct list_head *adj_list)
7085 {
7086 	struct netdev_adjacent *adj;
7087 
7088 	list_for_each_entry(adj, adj_list, list) {
7089 		if (adj->dev == adj_dev)
7090 			return adj;
7091 	}
7092 	return NULL;
7093 }
7094 
7095 static int ____netdev_has_upper_dev(struct net_device *upper_dev,
7096 				    struct netdev_nested_priv *priv)
7097 {
7098 	struct net_device *dev = (struct net_device *)priv->data;
7099 
7100 	return upper_dev == dev;
7101 }
7102 
7103 /**
7104  * netdev_has_upper_dev - Check if device is linked to an upper device
7105  * @dev: device
7106  * @upper_dev: upper device to check
7107  *
7108  * Find out if a device is linked to specified upper device and return true
7109  * in case it is. Note that this checks only immediate upper device,
7110  * not through a complete stack of devices. The caller must hold the RTNL lock.
7111  */
7112 bool netdev_has_upper_dev(struct net_device *dev,
7113 			  struct net_device *upper_dev)
7114 {
7115 	struct netdev_nested_priv priv = {
7116 		.data = (void *)upper_dev,
7117 	};
7118 
7119 	ASSERT_RTNL();
7120 
7121 	return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
7122 					     &priv);
7123 }
7124 EXPORT_SYMBOL(netdev_has_upper_dev);
7125 
7126 /**
7127  * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
7128  * @dev: device
7129  * @upper_dev: upper device to check
7130  *
7131  * Find out if a device is linked to specified upper device and return true
7132  * in case it is. Note that this checks the entire upper device chain.
7133  * The caller must hold rcu lock.
7134  */
7135 
7136 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
7137 				  struct net_device *upper_dev)
7138 {
7139 	struct netdev_nested_priv priv = {
7140 		.data = (void *)upper_dev,
7141 	};
7142 
7143 	return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
7144 					       &priv);
7145 }
7146 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
7147 
7148 /**
7149  * netdev_has_any_upper_dev - Check if device is linked to some device
7150  * @dev: device
7151  *
7152  * Find out if a device is linked to an upper device and return true in case
7153  * it is. The caller must hold the RTNL lock.
7154  */
7155 bool netdev_has_any_upper_dev(struct net_device *dev)
7156 {
7157 	ASSERT_RTNL();
7158 
7159 	return !list_empty(&dev->adj_list.upper);
7160 }
7161 EXPORT_SYMBOL(netdev_has_any_upper_dev);
7162 
7163 /**
7164  * netdev_master_upper_dev_get - Get master upper device
7165  * @dev: device
7166  *
7167  * Find a master upper device and return pointer to it or NULL in case
7168  * it's not there. The caller must hold the RTNL lock.
7169  */
7170 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
7171 {
7172 	struct netdev_adjacent *upper;
7173 
7174 	ASSERT_RTNL();
7175 
7176 	if (list_empty(&dev->adj_list.upper))
7177 		return NULL;
7178 
7179 	upper = list_first_entry(&dev->adj_list.upper,
7180 				 struct netdev_adjacent, list);
7181 	if (likely(upper->master))
7182 		return upper->dev;
7183 	return NULL;
7184 }
7185 EXPORT_SYMBOL(netdev_master_upper_dev_get);
7186 
7187 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
7188 {
7189 	struct netdev_adjacent *upper;
7190 
7191 	ASSERT_RTNL();
7192 
7193 	if (list_empty(&dev->adj_list.upper))
7194 		return NULL;
7195 
7196 	upper = list_first_entry(&dev->adj_list.upper,
7197 				 struct netdev_adjacent, list);
7198 	if (likely(upper->master) && !upper->ignore)
7199 		return upper->dev;
7200 	return NULL;
7201 }
7202 
7203 /**
7204  * netdev_has_any_lower_dev - Check if device is linked to some device
7205  * @dev: device
7206  *
7207  * Find out if a device is linked to a lower device and return true in case
7208  * it is. The caller must hold the RTNL lock.
7209  */
7210 static bool netdev_has_any_lower_dev(struct net_device *dev)
7211 {
7212 	ASSERT_RTNL();
7213 
7214 	return !list_empty(&dev->adj_list.lower);
7215 }
7216 
7217 void *netdev_adjacent_get_private(struct list_head *adj_list)
7218 {
7219 	struct netdev_adjacent *adj;
7220 
7221 	adj = list_entry(adj_list, struct netdev_adjacent, list);
7222 
7223 	return adj->private;
7224 }
7225 EXPORT_SYMBOL(netdev_adjacent_get_private);
7226 
7227 /**
7228  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
7229  * @dev: device
7230  * @iter: list_head ** of the current position
7231  *
7232  * Gets the next device from the dev's upper list, starting from iter
7233  * position. The caller must hold RCU read lock.
7234  */
7235 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
7236 						 struct list_head **iter)
7237 {
7238 	struct netdev_adjacent *upper;
7239 
7240 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7241 
7242 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7243 
7244 	if (&upper->list == &dev->adj_list.upper)
7245 		return NULL;
7246 
7247 	*iter = &upper->list;
7248 
7249 	return upper->dev;
7250 }
7251 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
7252 
7253 static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
7254 						  struct list_head **iter,
7255 						  bool *ignore)
7256 {
7257 	struct netdev_adjacent *upper;
7258 
7259 	upper = list_entry((*iter)->next, struct netdev_adjacent, list);
7260 
7261 	if (&upper->list == &dev->adj_list.upper)
7262 		return NULL;
7263 
7264 	*iter = &upper->list;
7265 	*ignore = upper->ignore;
7266 
7267 	return upper->dev;
7268 }
7269 
7270 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
7271 						    struct list_head **iter)
7272 {
7273 	struct netdev_adjacent *upper;
7274 
7275 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7276 
7277 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7278 
7279 	if (&upper->list == &dev->adj_list.upper)
7280 		return NULL;
7281 
7282 	*iter = &upper->list;
7283 
7284 	return upper->dev;
7285 }
7286 
7287 static int __netdev_walk_all_upper_dev(struct net_device *dev,
7288 				       int (*fn)(struct net_device *dev,
7289 					 struct netdev_nested_priv *priv),
7290 				       struct netdev_nested_priv *priv)
7291 {
7292 	struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7293 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7294 	int ret, cur = 0;
7295 	bool ignore;
7296 
7297 	now = dev;
7298 	iter = &dev->adj_list.upper;
7299 
7300 	while (1) {
7301 		if (now != dev) {
7302 			ret = fn(now, priv);
7303 			if (ret)
7304 				return ret;
7305 		}
7306 
7307 		next = NULL;
7308 		while (1) {
7309 			udev = __netdev_next_upper_dev(now, &iter, &ignore);
7310 			if (!udev)
7311 				break;
7312 			if (ignore)
7313 				continue;
7314 
7315 			next = udev;
7316 			niter = &udev->adj_list.upper;
7317 			dev_stack[cur] = now;
7318 			iter_stack[cur++] = iter;
7319 			break;
7320 		}
7321 
7322 		if (!next) {
7323 			if (!cur)
7324 				return 0;
7325 			next = dev_stack[--cur];
7326 			niter = iter_stack[cur];
7327 		}
7328 
7329 		now = next;
7330 		iter = niter;
7331 	}
7332 
7333 	return 0;
7334 }
7335 
7336 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
7337 				  int (*fn)(struct net_device *dev,
7338 					    struct netdev_nested_priv *priv),
7339 				  struct netdev_nested_priv *priv)
7340 {
7341 	struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7342 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7343 	int ret, cur = 0;
7344 
7345 	now = dev;
7346 	iter = &dev->adj_list.upper;
7347 
7348 	while (1) {
7349 		if (now != dev) {
7350 			ret = fn(now, priv);
7351 			if (ret)
7352 				return ret;
7353 		}
7354 
7355 		next = NULL;
7356 		while (1) {
7357 			udev = netdev_next_upper_dev_rcu(now, &iter);
7358 			if (!udev)
7359 				break;
7360 
7361 			next = udev;
7362 			niter = &udev->adj_list.upper;
7363 			dev_stack[cur] = now;
7364 			iter_stack[cur++] = iter;
7365 			break;
7366 		}
7367 
7368 		if (!next) {
7369 			if (!cur)
7370 				return 0;
7371 			next = dev_stack[--cur];
7372 			niter = iter_stack[cur];
7373 		}
7374 
7375 		now = next;
7376 		iter = niter;
7377 	}
7378 
7379 	return 0;
7380 }
7381 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
7382 
7383 static bool __netdev_has_upper_dev(struct net_device *dev,
7384 				   struct net_device *upper_dev)
7385 {
7386 	struct netdev_nested_priv priv = {
7387 		.flags = 0,
7388 		.data = (void *)upper_dev,
7389 	};
7390 
7391 	ASSERT_RTNL();
7392 
7393 	return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
7394 					   &priv);
7395 }
7396 
7397 /**
7398  * netdev_lower_get_next_private - Get the next ->private from the
7399  *				   lower neighbour list
7400  * @dev: device
7401  * @iter: list_head ** of the current position
7402  *
7403  * Gets the next netdev_adjacent->private from the dev's lower neighbour
7404  * list, starting from iter position. The caller must hold either hold the
7405  * RTNL lock or its own locking that guarantees that the neighbour lower
7406  * list will remain unchanged.
7407  */
7408 void *netdev_lower_get_next_private(struct net_device *dev,
7409 				    struct list_head **iter)
7410 {
7411 	struct netdev_adjacent *lower;
7412 
7413 	lower = list_entry(*iter, struct netdev_adjacent, list);
7414 
7415 	if (&lower->list == &dev->adj_list.lower)
7416 		return NULL;
7417 
7418 	*iter = lower->list.next;
7419 
7420 	return lower->private;
7421 }
7422 EXPORT_SYMBOL(netdev_lower_get_next_private);
7423 
7424 /**
7425  * netdev_lower_get_next_private_rcu - Get the next ->private from the
7426  *				       lower neighbour list, RCU
7427  *				       variant
7428  * @dev: device
7429  * @iter: list_head ** of the current position
7430  *
7431  * Gets the next netdev_adjacent->private from the dev's lower neighbour
7432  * list, starting from iter position. The caller must hold RCU read lock.
7433  */
7434 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
7435 					struct list_head **iter)
7436 {
7437 	struct netdev_adjacent *lower;
7438 
7439 	WARN_ON_ONCE(!rcu_read_lock_held());
7440 
7441 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7442 
7443 	if (&lower->list == &dev->adj_list.lower)
7444 		return NULL;
7445 
7446 	*iter = &lower->list;
7447 
7448 	return lower->private;
7449 }
7450 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
7451 
7452 /**
7453  * netdev_lower_get_next - Get the next device from the lower neighbour
7454  *                         list
7455  * @dev: device
7456  * @iter: list_head ** of the current position
7457  *
7458  * Gets the next netdev_adjacent from the dev's lower neighbour
7459  * list, starting from iter position. The caller must hold RTNL lock or
7460  * its own locking that guarantees that the neighbour lower
7461  * list will remain unchanged.
7462  */
7463 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
7464 {
7465 	struct netdev_adjacent *lower;
7466 
7467 	lower = list_entry(*iter, struct netdev_adjacent, list);
7468 
7469 	if (&lower->list == &dev->adj_list.lower)
7470 		return NULL;
7471 
7472 	*iter = lower->list.next;
7473 
7474 	return lower->dev;
7475 }
7476 EXPORT_SYMBOL(netdev_lower_get_next);
7477 
7478 static struct net_device *netdev_next_lower_dev(struct net_device *dev,
7479 						struct list_head **iter)
7480 {
7481 	struct netdev_adjacent *lower;
7482 
7483 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7484 
7485 	if (&lower->list == &dev->adj_list.lower)
7486 		return NULL;
7487 
7488 	*iter = &lower->list;
7489 
7490 	return lower->dev;
7491 }
7492 
7493 static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
7494 						  struct list_head **iter,
7495 						  bool *ignore)
7496 {
7497 	struct netdev_adjacent *lower;
7498 
7499 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7500 
7501 	if (&lower->list == &dev->adj_list.lower)
7502 		return NULL;
7503 
7504 	*iter = &lower->list;
7505 	*ignore = lower->ignore;
7506 
7507 	return lower->dev;
7508 }
7509 
7510 int netdev_walk_all_lower_dev(struct net_device *dev,
7511 			      int (*fn)(struct net_device *dev,
7512 					struct netdev_nested_priv *priv),
7513 			      struct netdev_nested_priv *priv)
7514 {
7515 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7516 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7517 	int ret, cur = 0;
7518 
7519 	now = dev;
7520 	iter = &dev->adj_list.lower;
7521 
7522 	while (1) {
7523 		if (now != dev) {
7524 			ret = fn(now, priv);
7525 			if (ret)
7526 				return ret;
7527 		}
7528 
7529 		next = NULL;
7530 		while (1) {
7531 			ldev = netdev_next_lower_dev(now, &iter);
7532 			if (!ldev)
7533 				break;
7534 
7535 			next = ldev;
7536 			niter = &ldev->adj_list.lower;
7537 			dev_stack[cur] = now;
7538 			iter_stack[cur++] = iter;
7539 			break;
7540 		}
7541 
7542 		if (!next) {
7543 			if (!cur)
7544 				return 0;
7545 			next = dev_stack[--cur];
7546 			niter = iter_stack[cur];
7547 		}
7548 
7549 		now = next;
7550 		iter = niter;
7551 	}
7552 
7553 	return 0;
7554 }
7555 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
7556 
7557 static int __netdev_walk_all_lower_dev(struct net_device *dev,
7558 				       int (*fn)(struct net_device *dev,
7559 					 struct netdev_nested_priv *priv),
7560 				       struct netdev_nested_priv *priv)
7561 {
7562 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7563 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7564 	int ret, cur = 0;
7565 	bool ignore;
7566 
7567 	now = dev;
7568 	iter = &dev->adj_list.lower;
7569 
7570 	while (1) {
7571 		if (now != dev) {
7572 			ret = fn(now, priv);
7573 			if (ret)
7574 				return ret;
7575 		}
7576 
7577 		next = NULL;
7578 		while (1) {
7579 			ldev = __netdev_next_lower_dev(now, &iter, &ignore);
7580 			if (!ldev)
7581 				break;
7582 			if (ignore)
7583 				continue;
7584 
7585 			next = ldev;
7586 			niter = &ldev->adj_list.lower;
7587 			dev_stack[cur] = now;
7588 			iter_stack[cur++] = iter;
7589 			break;
7590 		}
7591 
7592 		if (!next) {
7593 			if (!cur)
7594 				return 0;
7595 			next = dev_stack[--cur];
7596 			niter = iter_stack[cur];
7597 		}
7598 
7599 		now = next;
7600 		iter = niter;
7601 	}
7602 
7603 	return 0;
7604 }
7605 
7606 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
7607 					     struct list_head **iter)
7608 {
7609 	struct netdev_adjacent *lower;
7610 
7611 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7612 	if (&lower->list == &dev->adj_list.lower)
7613 		return NULL;
7614 
7615 	*iter = &lower->list;
7616 
7617 	return lower->dev;
7618 }
7619 EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
7620 
7621 static u8 __netdev_upper_depth(struct net_device *dev)
7622 {
7623 	struct net_device *udev;
7624 	struct list_head *iter;
7625 	u8 max_depth = 0;
7626 	bool ignore;
7627 
7628 	for (iter = &dev->adj_list.upper,
7629 	     udev = __netdev_next_upper_dev(dev, &iter, &ignore);
7630 	     udev;
7631 	     udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
7632 		if (ignore)
7633 			continue;
7634 		if (max_depth < udev->upper_level)
7635 			max_depth = udev->upper_level;
7636 	}
7637 
7638 	return max_depth;
7639 }
7640 
7641 static u8 __netdev_lower_depth(struct net_device *dev)
7642 {
7643 	struct net_device *ldev;
7644 	struct list_head *iter;
7645 	u8 max_depth = 0;
7646 	bool ignore;
7647 
7648 	for (iter = &dev->adj_list.lower,
7649 	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
7650 	     ldev;
7651 	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
7652 		if (ignore)
7653 			continue;
7654 		if (max_depth < ldev->lower_level)
7655 			max_depth = ldev->lower_level;
7656 	}
7657 
7658 	return max_depth;
7659 }
7660 
7661 static int __netdev_update_upper_level(struct net_device *dev,
7662 				       struct netdev_nested_priv *__unused)
7663 {
7664 	dev->upper_level = __netdev_upper_depth(dev) + 1;
7665 	return 0;
7666 }
7667 
7668 static int __netdev_update_lower_level(struct net_device *dev,
7669 				       struct netdev_nested_priv *priv)
7670 {
7671 	dev->lower_level = __netdev_lower_depth(dev) + 1;
7672 
7673 #ifdef CONFIG_LOCKDEP
7674 	if (!priv)
7675 		return 0;
7676 
7677 	if (priv->flags & NESTED_SYNC_IMM)
7678 		dev->nested_level = dev->lower_level - 1;
7679 	if (priv->flags & NESTED_SYNC_TODO)
7680 		net_unlink_todo(dev);
7681 #endif
7682 	return 0;
7683 }
7684 
7685 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7686 				  int (*fn)(struct net_device *dev,
7687 					    struct netdev_nested_priv *priv),
7688 				  struct netdev_nested_priv *priv)
7689 {
7690 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7691 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7692 	int ret, cur = 0;
7693 
7694 	now = dev;
7695 	iter = &dev->adj_list.lower;
7696 
7697 	while (1) {
7698 		if (now != dev) {
7699 			ret = fn(now, priv);
7700 			if (ret)
7701 				return ret;
7702 		}
7703 
7704 		next = NULL;
7705 		while (1) {
7706 			ldev = netdev_next_lower_dev_rcu(now, &iter);
7707 			if (!ldev)
7708 				break;
7709 
7710 			next = ldev;
7711 			niter = &ldev->adj_list.lower;
7712 			dev_stack[cur] = now;
7713 			iter_stack[cur++] = iter;
7714 			break;
7715 		}
7716 
7717 		if (!next) {
7718 			if (!cur)
7719 				return 0;
7720 			next = dev_stack[--cur];
7721 			niter = iter_stack[cur];
7722 		}
7723 
7724 		now = next;
7725 		iter = niter;
7726 	}
7727 
7728 	return 0;
7729 }
7730 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
7731 
7732 /**
7733  * netdev_lower_get_first_private_rcu - Get the first ->private from the
7734  *				       lower neighbour list, RCU
7735  *				       variant
7736  * @dev: device
7737  *
7738  * Gets the first netdev_adjacent->private from the dev's lower neighbour
7739  * list. The caller must hold RCU read lock.
7740  */
7741 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
7742 {
7743 	struct netdev_adjacent *lower;
7744 
7745 	lower = list_first_or_null_rcu(&dev->adj_list.lower,
7746 			struct netdev_adjacent, list);
7747 	if (lower)
7748 		return lower->private;
7749 	return NULL;
7750 }
7751 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
7752 
7753 /**
7754  * netdev_master_upper_dev_get_rcu - Get master upper device
7755  * @dev: device
7756  *
7757  * Find a master upper device and return pointer to it or NULL in case
7758  * it's not there. The caller must hold the RCU read lock.
7759  */
7760 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
7761 {
7762 	struct netdev_adjacent *upper;
7763 
7764 	upper = list_first_or_null_rcu(&dev->adj_list.upper,
7765 				       struct netdev_adjacent, list);
7766 	if (upper && likely(upper->master))
7767 		return upper->dev;
7768 	return NULL;
7769 }
7770 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
7771 
7772 static int netdev_adjacent_sysfs_add(struct net_device *dev,
7773 			      struct net_device *adj_dev,
7774 			      struct list_head *dev_list)
7775 {
7776 	char linkname[IFNAMSIZ+7];
7777 
7778 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
7779 		"upper_%s" : "lower_%s", adj_dev->name);
7780 	return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7781 				 linkname);
7782 }
7783 static void netdev_adjacent_sysfs_del(struct net_device *dev,
7784 			       char *name,
7785 			       struct list_head *dev_list)
7786 {
7787 	char linkname[IFNAMSIZ+7];
7788 
7789 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
7790 		"upper_%s" : "lower_%s", name);
7791 	sysfs_remove_link(&(dev->dev.kobj), linkname);
7792 }
7793 
7794 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7795 						 struct net_device *adj_dev,
7796 						 struct list_head *dev_list)
7797 {
7798 	return (dev_list == &dev->adj_list.upper ||
7799 		dev_list == &dev->adj_list.lower) &&
7800 		net_eq(dev_net(dev), dev_net(adj_dev));
7801 }
7802 
7803 static int __netdev_adjacent_dev_insert(struct net_device *dev,
7804 					struct net_device *adj_dev,
7805 					struct list_head *dev_list,
7806 					void *private, bool master)
7807 {
7808 	struct netdev_adjacent *adj;
7809 	int ret;
7810 
7811 	adj = __netdev_find_adj(adj_dev, dev_list);
7812 
7813 	if (adj) {
7814 		adj->ref_nr += 1;
7815 		pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7816 			 dev->name, adj_dev->name, adj->ref_nr);
7817 
7818 		return 0;
7819 	}
7820 
7821 	adj = kmalloc(sizeof(*adj), GFP_KERNEL);
7822 	if (!adj)
7823 		return -ENOMEM;
7824 
7825 	adj->dev = adj_dev;
7826 	adj->master = master;
7827 	adj->ref_nr = 1;
7828 	adj->private = private;
7829 	adj->ignore = false;
7830 	dev_hold(adj_dev);
7831 
7832 	pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7833 		 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
7834 
7835 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
7836 		ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
7837 		if (ret)
7838 			goto free_adj;
7839 	}
7840 
7841 	/* Ensure that master link is always the first item in list. */
7842 	if (master) {
7843 		ret = sysfs_create_link(&(dev->dev.kobj),
7844 					&(adj_dev->dev.kobj), "master");
7845 		if (ret)
7846 			goto remove_symlinks;
7847 
7848 		list_add_rcu(&adj->list, dev_list);
7849 	} else {
7850 		list_add_tail_rcu(&adj->list, dev_list);
7851 	}
7852 
7853 	return 0;
7854 
7855 remove_symlinks:
7856 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7857 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7858 free_adj:
7859 	kfree(adj);
7860 	dev_put(adj_dev);
7861 
7862 	return ret;
7863 }
7864 
7865 static void __netdev_adjacent_dev_remove(struct net_device *dev,
7866 					 struct net_device *adj_dev,
7867 					 u16 ref_nr,
7868 					 struct list_head *dev_list)
7869 {
7870 	struct netdev_adjacent *adj;
7871 
7872 	pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7873 		 dev->name, adj_dev->name, ref_nr);
7874 
7875 	adj = __netdev_find_adj(adj_dev, dev_list);
7876 
7877 	if (!adj) {
7878 		pr_err("Adjacency does not exist for device %s from %s\n",
7879 		       dev->name, adj_dev->name);
7880 		WARN_ON(1);
7881 		return;
7882 	}
7883 
7884 	if (adj->ref_nr > ref_nr) {
7885 		pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7886 			 dev->name, adj_dev->name, ref_nr,
7887 			 adj->ref_nr - ref_nr);
7888 		adj->ref_nr -= ref_nr;
7889 		return;
7890 	}
7891 
7892 	if (adj->master)
7893 		sysfs_remove_link(&(dev->dev.kobj), "master");
7894 
7895 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7896 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7897 
7898 	list_del_rcu(&adj->list);
7899 	pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
7900 		 adj_dev->name, dev->name, adj_dev->name);
7901 	dev_put(adj_dev);
7902 	kfree_rcu(adj, rcu);
7903 }
7904 
7905 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
7906 					    struct net_device *upper_dev,
7907 					    struct list_head *up_list,
7908 					    struct list_head *down_list,
7909 					    void *private, bool master)
7910 {
7911 	int ret;
7912 
7913 	ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
7914 					   private, master);
7915 	if (ret)
7916 		return ret;
7917 
7918 	ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
7919 					   private, false);
7920 	if (ret) {
7921 		__netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
7922 		return ret;
7923 	}
7924 
7925 	return 0;
7926 }
7927 
7928 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7929 					       struct net_device *upper_dev,
7930 					       u16 ref_nr,
7931 					       struct list_head *up_list,
7932 					       struct list_head *down_list)
7933 {
7934 	__netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7935 	__netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
7936 }
7937 
7938 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7939 						struct net_device *upper_dev,
7940 						void *private, bool master)
7941 {
7942 	return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7943 						&dev->adj_list.upper,
7944 						&upper_dev->adj_list.lower,
7945 						private, master);
7946 }
7947 
7948 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7949 						   struct net_device *upper_dev)
7950 {
7951 	__netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
7952 					   &dev->adj_list.upper,
7953 					   &upper_dev->adj_list.lower);
7954 }
7955 
7956 static int __netdev_upper_dev_link(struct net_device *dev,
7957 				   struct net_device *upper_dev, bool master,
7958 				   void *upper_priv, void *upper_info,
7959 				   struct netdev_nested_priv *priv,
7960 				   struct netlink_ext_ack *extack)
7961 {
7962 	struct netdev_notifier_changeupper_info changeupper_info = {
7963 		.info = {
7964 			.dev = dev,
7965 			.extack = extack,
7966 		},
7967 		.upper_dev = upper_dev,
7968 		.master = master,
7969 		.linking = true,
7970 		.upper_info = upper_info,
7971 	};
7972 	struct net_device *master_dev;
7973 	int ret = 0;
7974 
7975 	ASSERT_RTNL();
7976 
7977 	if (dev == upper_dev)
7978 		return -EBUSY;
7979 
7980 	/* To prevent loops, check if dev is not upper device to upper_dev. */
7981 	if (__netdev_has_upper_dev(upper_dev, dev))
7982 		return -EBUSY;
7983 
7984 	if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7985 		return -EMLINK;
7986 
7987 	if (!master) {
7988 		if (__netdev_has_upper_dev(dev, upper_dev))
7989 			return -EEXIST;
7990 	} else {
7991 		master_dev = __netdev_master_upper_dev_get(dev);
7992 		if (master_dev)
7993 			return master_dev == upper_dev ? -EEXIST : -EBUSY;
7994 	}
7995 
7996 	ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7997 					    &changeupper_info.info);
7998 	ret = notifier_to_errno(ret);
7999 	if (ret)
8000 		return ret;
8001 
8002 	ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
8003 						   master);
8004 	if (ret)
8005 		return ret;
8006 
8007 	ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
8008 					    &changeupper_info.info);
8009 	ret = notifier_to_errno(ret);
8010 	if (ret)
8011 		goto rollback;
8012 
8013 	__netdev_update_upper_level(dev, NULL);
8014 	__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
8015 
8016 	__netdev_update_lower_level(upper_dev, priv);
8017 	__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
8018 				    priv);
8019 
8020 	return 0;
8021 
8022 rollback:
8023 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
8024 
8025 	return ret;
8026 }
8027 
8028 /**
8029  * netdev_upper_dev_link - Add a link to the upper device
8030  * @dev: device
8031  * @upper_dev: new upper device
8032  * @extack: netlink extended ack
8033  *
8034  * Adds a link to device which is upper to this one. The caller must hold
8035  * the RTNL lock. On a failure a negative errno code is returned.
8036  * On success the reference counts are adjusted and the function
8037  * returns zero.
8038  */
8039 int netdev_upper_dev_link(struct net_device *dev,
8040 			  struct net_device *upper_dev,
8041 			  struct netlink_ext_ack *extack)
8042 {
8043 	struct netdev_nested_priv priv = {
8044 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
8045 		.data = NULL,
8046 	};
8047 
8048 	return __netdev_upper_dev_link(dev, upper_dev, false,
8049 				       NULL, NULL, &priv, extack);
8050 }
8051 EXPORT_SYMBOL(netdev_upper_dev_link);
8052 
8053 /**
8054  * netdev_master_upper_dev_link - Add a master link to the upper device
8055  * @dev: device
8056  * @upper_dev: new upper device
8057  * @upper_priv: upper device private
8058  * @upper_info: upper info to be passed down via notifier
8059  * @extack: netlink extended ack
8060  *
8061  * Adds a link to device which is upper to this one. In this case, only
8062  * one master upper device can be linked, although other non-master devices
8063  * might be linked as well. The caller must hold the RTNL lock.
8064  * On a failure a negative errno code is returned. On success the reference
8065  * counts are adjusted and the function returns zero.
8066  */
8067 int netdev_master_upper_dev_link(struct net_device *dev,
8068 				 struct net_device *upper_dev,
8069 				 void *upper_priv, void *upper_info,
8070 				 struct netlink_ext_ack *extack)
8071 {
8072 	struct netdev_nested_priv priv = {
8073 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
8074 		.data = NULL,
8075 	};
8076 
8077 	return __netdev_upper_dev_link(dev, upper_dev, true,
8078 				       upper_priv, upper_info, &priv, extack);
8079 }
8080 EXPORT_SYMBOL(netdev_master_upper_dev_link);
8081 
8082 static void __netdev_upper_dev_unlink(struct net_device *dev,
8083 				      struct net_device *upper_dev,
8084 				      struct netdev_nested_priv *priv)
8085 {
8086 	struct netdev_notifier_changeupper_info changeupper_info = {
8087 		.info = {
8088 			.dev = dev,
8089 		},
8090 		.upper_dev = upper_dev,
8091 		.linking = false,
8092 	};
8093 
8094 	ASSERT_RTNL();
8095 
8096 	changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
8097 
8098 	call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
8099 				      &changeupper_info.info);
8100 
8101 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
8102 
8103 	call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
8104 				      &changeupper_info.info);
8105 
8106 	__netdev_update_upper_level(dev, NULL);
8107 	__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
8108 
8109 	__netdev_update_lower_level(upper_dev, priv);
8110 	__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
8111 				    priv);
8112 }
8113 
8114 /**
8115  * netdev_upper_dev_unlink - Removes a link to upper device
8116  * @dev: device
8117  * @upper_dev: new upper device
8118  *
8119  * Removes a link to device which is upper to this one. The caller must hold
8120  * the RTNL lock.
8121  */
8122 void netdev_upper_dev_unlink(struct net_device *dev,
8123 			     struct net_device *upper_dev)
8124 {
8125 	struct netdev_nested_priv priv = {
8126 		.flags = NESTED_SYNC_TODO,
8127 		.data = NULL,
8128 	};
8129 
8130 	__netdev_upper_dev_unlink(dev, upper_dev, &priv);
8131 }
8132 EXPORT_SYMBOL(netdev_upper_dev_unlink);
8133 
8134 static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
8135 				      struct net_device *lower_dev,
8136 				      bool val)
8137 {
8138 	struct netdev_adjacent *adj;
8139 
8140 	adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
8141 	if (adj)
8142 		adj->ignore = val;
8143 
8144 	adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
8145 	if (adj)
8146 		adj->ignore = val;
8147 }
8148 
8149 static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
8150 					struct net_device *lower_dev)
8151 {
8152 	__netdev_adjacent_dev_set(upper_dev, lower_dev, true);
8153 }
8154 
8155 static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
8156 				       struct net_device *lower_dev)
8157 {
8158 	__netdev_adjacent_dev_set(upper_dev, lower_dev, false);
8159 }
8160 
8161 int netdev_adjacent_change_prepare(struct net_device *old_dev,
8162 				   struct net_device *new_dev,
8163 				   struct net_device *dev,
8164 				   struct netlink_ext_ack *extack)
8165 {
8166 	struct netdev_nested_priv priv = {
8167 		.flags = 0,
8168 		.data = NULL,
8169 	};
8170 	int err;
8171 
8172 	if (!new_dev)
8173 		return 0;
8174 
8175 	if (old_dev && new_dev != old_dev)
8176 		netdev_adjacent_dev_disable(dev, old_dev);
8177 	err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv,
8178 				      extack);
8179 	if (err) {
8180 		if (old_dev && new_dev != old_dev)
8181 			netdev_adjacent_dev_enable(dev, old_dev);
8182 		return err;
8183 	}
8184 
8185 	return 0;
8186 }
8187 EXPORT_SYMBOL(netdev_adjacent_change_prepare);
8188 
8189 void netdev_adjacent_change_commit(struct net_device *old_dev,
8190 				   struct net_device *new_dev,
8191 				   struct net_device *dev)
8192 {
8193 	struct netdev_nested_priv priv = {
8194 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
8195 		.data = NULL,
8196 	};
8197 
8198 	if (!new_dev || !old_dev)
8199 		return;
8200 
8201 	if (new_dev == old_dev)
8202 		return;
8203 
8204 	netdev_adjacent_dev_enable(dev, old_dev);
8205 	__netdev_upper_dev_unlink(old_dev, dev, &priv);
8206 }
8207 EXPORT_SYMBOL(netdev_adjacent_change_commit);
8208 
8209 void netdev_adjacent_change_abort(struct net_device *old_dev,
8210 				  struct net_device *new_dev,
8211 				  struct net_device *dev)
8212 {
8213 	struct netdev_nested_priv priv = {
8214 		.flags = 0,
8215 		.data = NULL,
8216 	};
8217 
8218 	if (!new_dev)
8219 		return;
8220 
8221 	if (old_dev && new_dev != old_dev)
8222 		netdev_adjacent_dev_enable(dev, old_dev);
8223 
8224 	__netdev_upper_dev_unlink(new_dev, dev, &priv);
8225 }
8226 EXPORT_SYMBOL(netdev_adjacent_change_abort);
8227 
8228 /**
8229  * netdev_bonding_info_change - Dispatch event about slave change
8230  * @dev: device
8231  * @bonding_info: info to dispatch
8232  *
8233  * Send NETDEV_BONDING_INFO to netdev notifiers with info.
8234  * The caller must hold the RTNL lock.
8235  */
8236 void netdev_bonding_info_change(struct net_device *dev,
8237 				struct netdev_bonding_info *bonding_info)
8238 {
8239 	struct netdev_notifier_bonding_info info = {
8240 		.info.dev = dev,
8241 	};
8242 
8243 	memcpy(&info.bonding_info, bonding_info,
8244 	       sizeof(struct netdev_bonding_info));
8245 	call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
8246 				      &info.info);
8247 }
8248 EXPORT_SYMBOL(netdev_bonding_info_change);
8249 
8250 /**
8251  * netdev_get_xmit_slave - Get the xmit slave of master device
8252  * @dev: device
8253  * @skb: The packet
8254  * @all_slaves: assume all the slaves are active
8255  *
8256  * The reference counters are not incremented so the caller must be
8257  * careful with locks. The caller must hold RCU lock.
8258  * %NULL is returned if no slave is found.
8259  */
8260 
8261 struct net_device *netdev_get_xmit_slave(struct net_device *dev,
8262 					 struct sk_buff *skb,
8263 					 bool all_slaves)
8264 {
8265 	const struct net_device_ops *ops = dev->netdev_ops;
8266 
8267 	if (!ops->ndo_get_xmit_slave)
8268 		return NULL;
8269 	return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
8270 }
8271 EXPORT_SYMBOL(netdev_get_xmit_slave);
8272 
8273 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
8274 						  struct sock *sk)
8275 {
8276 	const struct net_device_ops *ops = dev->netdev_ops;
8277 
8278 	if (!ops->ndo_sk_get_lower_dev)
8279 		return NULL;
8280 	return ops->ndo_sk_get_lower_dev(dev, sk);
8281 }
8282 
8283 /**
8284  * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
8285  * @dev: device
8286  * @sk: the socket
8287  *
8288  * %NULL is returned if no lower device is found.
8289  */
8290 
8291 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
8292 					    struct sock *sk)
8293 {
8294 	struct net_device *lower;
8295 
8296 	lower = netdev_sk_get_lower_dev(dev, sk);
8297 	while (lower) {
8298 		dev = lower;
8299 		lower = netdev_sk_get_lower_dev(dev, sk);
8300 	}
8301 
8302 	return dev;
8303 }
8304 EXPORT_SYMBOL(netdev_sk_get_lowest_dev);
8305 
8306 static void netdev_adjacent_add_links(struct net_device *dev)
8307 {
8308 	struct netdev_adjacent *iter;
8309 
8310 	struct net *net = dev_net(dev);
8311 
8312 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
8313 		if (!net_eq(net, dev_net(iter->dev)))
8314 			continue;
8315 		netdev_adjacent_sysfs_add(iter->dev, dev,
8316 					  &iter->dev->adj_list.lower);
8317 		netdev_adjacent_sysfs_add(dev, iter->dev,
8318 					  &dev->adj_list.upper);
8319 	}
8320 
8321 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
8322 		if (!net_eq(net, dev_net(iter->dev)))
8323 			continue;
8324 		netdev_adjacent_sysfs_add(iter->dev, dev,
8325 					  &iter->dev->adj_list.upper);
8326 		netdev_adjacent_sysfs_add(dev, iter->dev,
8327 					  &dev->adj_list.lower);
8328 	}
8329 }
8330 
8331 static void netdev_adjacent_del_links(struct net_device *dev)
8332 {
8333 	struct netdev_adjacent *iter;
8334 
8335 	struct net *net = dev_net(dev);
8336 
8337 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
8338 		if (!net_eq(net, dev_net(iter->dev)))
8339 			continue;
8340 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
8341 					  &iter->dev->adj_list.lower);
8342 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
8343 					  &dev->adj_list.upper);
8344 	}
8345 
8346 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
8347 		if (!net_eq(net, dev_net(iter->dev)))
8348 			continue;
8349 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
8350 					  &iter->dev->adj_list.upper);
8351 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
8352 					  &dev->adj_list.lower);
8353 	}
8354 }
8355 
8356 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
8357 {
8358 	struct netdev_adjacent *iter;
8359 
8360 	struct net *net = dev_net(dev);
8361 
8362 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
8363 		if (!net_eq(net, dev_net(iter->dev)))
8364 			continue;
8365 		netdev_adjacent_sysfs_del(iter->dev, oldname,
8366 					  &iter->dev->adj_list.lower);
8367 		netdev_adjacent_sysfs_add(iter->dev, dev,
8368 					  &iter->dev->adj_list.lower);
8369 	}
8370 
8371 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
8372 		if (!net_eq(net, dev_net(iter->dev)))
8373 			continue;
8374 		netdev_adjacent_sysfs_del(iter->dev, oldname,
8375 					  &iter->dev->adj_list.upper);
8376 		netdev_adjacent_sysfs_add(iter->dev, dev,
8377 					  &iter->dev->adj_list.upper);
8378 	}
8379 }
8380 
8381 void *netdev_lower_dev_get_private(struct net_device *dev,
8382 				   struct net_device *lower_dev)
8383 {
8384 	struct netdev_adjacent *lower;
8385 
8386 	if (!lower_dev)
8387 		return NULL;
8388 	lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
8389 	if (!lower)
8390 		return NULL;
8391 
8392 	return lower->private;
8393 }
8394 EXPORT_SYMBOL(netdev_lower_dev_get_private);
8395 
8396 
8397 /**
8398  * netdev_lower_state_changed - Dispatch event about lower device state change
8399  * @lower_dev: device
8400  * @lower_state_info: state to dispatch
8401  *
8402  * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
8403  * The caller must hold the RTNL lock.
8404  */
8405 void netdev_lower_state_changed(struct net_device *lower_dev,
8406 				void *lower_state_info)
8407 {
8408 	struct netdev_notifier_changelowerstate_info changelowerstate_info = {
8409 		.info.dev = lower_dev,
8410 	};
8411 
8412 	ASSERT_RTNL();
8413 	changelowerstate_info.lower_state_info = lower_state_info;
8414 	call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
8415 				      &changelowerstate_info.info);
8416 }
8417 EXPORT_SYMBOL(netdev_lower_state_changed);
8418 
8419 static void dev_change_rx_flags(struct net_device *dev, int flags)
8420 {
8421 	const struct net_device_ops *ops = dev->netdev_ops;
8422 
8423 	if (ops->ndo_change_rx_flags)
8424 		ops->ndo_change_rx_flags(dev, flags);
8425 }
8426 
8427 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
8428 {
8429 	unsigned int old_flags = dev->flags;
8430 	kuid_t uid;
8431 	kgid_t gid;
8432 
8433 	ASSERT_RTNL();
8434 
8435 	dev->flags |= IFF_PROMISC;
8436 	dev->promiscuity += inc;
8437 	if (dev->promiscuity == 0) {
8438 		/*
8439 		 * Avoid overflow.
8440 		 * If inc causes overflow, untouch promisc and return error.
8441 		 */
8442 		if (inc < 0)
8443 			dev->flags &= ~IFF_PROMISC;
8444 		else {
8445 			dev->promiscuity -= inc;
8446 			pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
8447 				dev->name);
8448 			return -EOVERFLOW;
8449 		}
8450 	}
8451 	if (dev->flags != old_flags) {
8452 		pr_info("device %s %s promiscuous mode\n",
8453 			dev->name,
8454 			dev->flags & IFF_PROMISC ? "entered" : "left");
8455 		if (audit_enabled) {
8456 			current_uid_gid(&uid, &gid);
8457 			audit_log(audit_context(), GFP_ATOMIC,
8458 				  AUDIT_ANOM_PROMISCUOUS,
8459 				  "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8460 				  dev->name, (dev->flags & IFF_PROMISC),
8461 				  (old_flags & IFF_PROMISC),
8462 				  from_kuid(&init_user_ns, audit_get_loginuid(current)),
8463 				  from_kuid(&init_user_ns, uid),
8464 				  from_kgid(&init_user_ns, gid),
8465 				  audit_get_sessionid(current));
8466 		}
8467 
8468 		dev_change_rx_flags(dev, IFF_PROMISC);
8469 	}
8470 	if (notify)
8471 		__dev_notify_flags(dev, old_flags, IFF_PROMISC);
8472 	return 0;
8473 }
8474 
8475 /**
8476  *	dev_set_promiscuity	- update promiscuity count on a device
8477  *	@dev: device
8478  *	@inc: modifier
8479  *
8480  *	Add or remove promiscuity from a device. While the count in the device
8481  *	remains above zero the interface remains promiscuous. Once it hits zero
8482  *	the device reverts back to normal filtering operation. A negative inc
8483  *	value is used to drop promiscuity on the device.
8484  *	Return 0 if successful or a negative errno code on error.
8485  */
8486 int dev_set_promiscuity(struct net_device *dev, int inc)
8487 {
8488 	unsigned int old_flags = dev->flags;
8489 	int err;
8490 
8491 	err = __dev_set_promiscuity(dev, inc, true);
8492 	if (err < 0)
8493 		return err;
8494 	if (dev->flags != old_flags)
8495 		dev_set_rx_mode(dev);
8496 	return err;
8497 }
8498 EXPORT_SYMBOL(dev_set_promiscuity);
8499 
8500 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
8501 {
8502 	unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
8503 
8504 	ASSERT_RTNL();
8505 
8506 	dev->flags |= IFF_ALLMULTI;
8507 	dev->allmulti += inc;
8508 	if (dev->allmulti == 0) {
8509 		/*
8510 		 * Avoid overflow.
8511 		 * If inc causes overflow, untouch allmulti and return error.
8512 		 */
8513 		if (inc < 0)
8514 			dev->flags &= ~IFF_ALLMULTI;
8515 		else {
8516 			dev->allmulti -= inc;
8517 			pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
8518 				dev->name);
8519 			return -EOVERFLOW;
8520 		}
8521 	}
8522 	if (dev->flags ^ old_flags) {
8523 		dev_change_rx_flags(dev, IFF_ALLMULTI);
8524 		dev_set_rx_mode(dev);
8525 		if (notify)
8526 			__dev_notify_flags(dev, old_flags,
8527 					   dev->gflags ^ old_gflags);
8528 	}
8529 	return 0;
8530 }
8531 
8532 /**
8533  *	dev_set_allmulti	- update allmulti count on a device
8534  *	@dev: device
8535  *	@inc: modifier
8536  *
8537  *	Add or remove reception of all multicast frames to a device. While the
8538  *	count in the device remains above zero the interface remains listening
8539  *	to all interfaces. Once it hits zero the device reverts back to normal
8540  *	filtering operation. A negative @inc value is used to drop the counter
8541  *	when releasing a resource needing all multicasts.
8542  *	Return 0 if successful or a negative errno code on error.
8543  */
8544 
8545 int dev_set_allmulti(struct net_device *dev, int inc)
8546 {
8547 	return __dev_set_allmulti(dev, inc, true);
8548 }
8549 EXPORT_SYMBOL(dev_set_allmulti);
8550 
8551 /*
8552  *	Upload unicast and multicast address lists to device and
8553  *	configure RX filtering. When the device doesn't support unicast
8554  *	filtering it is put in promiscuous mode while unicast addresses
8555  *	are present.
8556  */
8557 void __dev_set_rx_mode(struct net_device *dev)
8558 {
8559 	const struct net_device_ops *ops = dev->netdev_ops;
8560 
8561 	/* dev_open will call this function so the list will stay sane. */
8562 	if (!(dev->flags&IFF_UP))
8563 		return;
8564 
8565 	if (!netif_device_present(dev))
8566 		return;
8567 
8568 	if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
8569 		/* Unicast addresses changes may only happen under the rtnl,
8570 		 * therefore calling __dev_set_promiscuity here is safe.
8571 		 */
8572 		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
8573 			__dev_set_promiscuity(dev, 1, false);
8574 			dev->uc_promisc = true;
8575 		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
8576 			__dev_set_promiscuity(dev, -1, false);
8577 			dev->uc_promisc = false;
8578 		}
8579 	}
8580 
8581 	if (ops->ndo_set_rx_mode)
8582 		ops->ndo_set_rx_mode(dev);
8583 }
8584 
8585 void dev_set_rx_mode(struct net_device *dev)
8586 {
8587 	netif_addr_lock_bh(dev);
8588 	__dev_set_rx_mode(dev);
8589 	netif_addr_unlock_bh(dev);
8590 }
8591 
8592 /**
8593  *	dev_get_flags - get flags reported to userspace
8594  *	@dev: device
8595  *
8596  *	Get the combination of flag bits exported through APIs to userspace.
8597  */
8598 unsigned int dev_get_flags(const struct net_device *dev)
8599 {
8600 	unsigned int flags;
8601 
8602 	flags = (dev->flags & ~(IFF_PROMISC |
8603 				IFF_ALLMULTI |
8604 				IFF_RUNNING |
8605 				IFF_LOWER_UP |
8606 				IFF_DORMANT)) |
8607 		(dev->gflags & (IFF_PROMISC |
8608 				IFF_ALLMULTI));
8609 
8610 	if (netif_running(dev)) {
8611 		if (netif_oper_up(dev))
8612 			flags |= IFF_RUNNING;
8613 		if (netif_carrier_ok(dev))
8614 			flags |= IFF_LOWER_UP;
8615 		if (netif_dormant(dev))
8616 			flags |= IFF_DORMANT;
8617 	}
8618 
8619 	return flags;
8620 }
8621 EXPORT_SYMBOL(dev_get_flags);
8622 
8623 int __dev_change_flags(struct net_device *dev, unsigned int flags,
8624 		       struct netlink_ext_ack *extack)
8625 {
8626 	unsigned int old_flags = dev->flags;
8627 	int ret;
8628 
8629 	ASSERT_RTNL();
8630 
8631 	/*
8632 	 *	Set the flags on our device.
8633 	 */
8634 
8635 	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8636 			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
8637 			       IFF_AUTOMEDIA)) |
8638 		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8639 				    IFF_ALLMULTI));
8640 
8641 	/*
8642 	 *	Load in the correct multicast list now the flags have changed.
8643 	 */
8644 
8645 	if ((old_flags ^ flags) & IFF_MULTICAST)
8646 		dev_change_rx_flags(dev, IFF_MULTICAST);
8647 
8648 	dev_set_rx_mode(dev);
8649 
8650 	/*
8651 	 *	Have we downed the interface. We handle IFF_UP ourselves
8652 	 *	according to user attempts to set it, rather than blindly
8653 	 *	setting it.
8654 	 */
8655 
8656 	ret = 0;
8657 	if ((old_flags ^ flags) & IFF_UP) {
8658 		if (old_flags & IFF_UP)
8659 			__dev_close(dev);
8660 		else
8661 			ret = __dev_open(dev, extack);
8662 	}
8663 
8664 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
8665 		int inc = (flags & IFF_PROMISC) ? 1 : -1;
8666 		unsigned int old_flags = dev->flags;
8667 
8668 		dev->gflags ^= IFF_PROMISC;
8669 
8670 		if (__dev_set_promiscuity(dev, inc, false) >= 0)
8671 			if (dev->flags != old_flags)
8672 				dev_set_rx_mode(dev);
8673 	}
8674 
8675 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
8676 	 * is important. Some (broken) drivers set IFF_PROMISC, when
8677 	 * IFF_ALLMULTI is requested not asking us and not reporting.
8678 	 */
8679 	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
8680 		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
8681 
8682 		dev->gflags ^= IFF_ALLMULTI;
8683 		__dev_set_allmulti(dev, inc, false);
8684 	}
8685 
8686 	return ret;
8687 }
8688 
8689 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
8690 			unsigned int gchanges)
8691 {
8692 	unsigned int changes = dev->flags ^ old_flags;
8693 
8694 	if (gchanges)
8695 		rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
8696 
8697 	if (changes & IFF_UP) {
8698 		if (dev->flags & IFF_UP)
8699 			call_netdevice_notifiers(NETDEV_UP, dev);
8700 		else
8701 			call_netdevice_notifiers(NETDEV_DOWN, dev);
8702 	}
8703 
8704 	if (dev->flags & IFF_UP &&
8705 	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
8706 		struct netdev_notifier_change_info change_info = {
8707 			.info = {
8708 				.dev = dev,
8709 			},
8710 			.flags_changed = changes,
8711 		};
8712 
8713 		call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
8714 	}
8715 }
8716 
8717 /**
8718  *	dev_change_flags - change device settings
8719  *	@dev: device
8720  *	@flags: device state flags
8721  *	@extack: netlink extended ack
8722  *
8723  *	Change settings on device based state flags. The flags are
8724  *	in the userspace exported format.
8725  */
8726 int dev_change_flags(struct net_device *dev, unsigned int flags,
8727 		     struct netlink_ext_ack *extack)
8728 {
8729 	int ret;
8730 	unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
8731 
8732 	ret = __dev_change_flags(dev, flags, extack);
8733 	if (ret < 0)
8734 		return ret;
8735 
8736 	changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
8737 	__dev_notify_flags(dev, old_flags, changes);
8738 	return ret;
8739 }
8740 EXPORT_SYMBOL(dev_change_flags);
8741 
8742 int __dev_set_mtu(struct net_device *dev, int new_mtu)
8743 {
8744 	const struct net_device_ops *ops = dev->netdev_ops;
8745 
8746 	if (ops->ndo_change_mtu)
8747 		return ops->ndo_change_mtu(dev, new_mtu);
8748 
8749 	/* Pairs with all the lockless reads of dev->mtu in the stack */
8750 	WRITE_ONCE(dev->mtu, new_mtu);
8751 	return 0;
8752 }
8753 EXPORT_SYMBOL(__dev_set_mtu);
8754 
8755 int dev_validate_mtu(struct net_device *dev, int new_mtu,
8756 		     struct netlink_ext_ack *extack)
8757 {
8758 	/* MTU must be positive, and in range */
8759 	if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8760 		NL_SET_ERR_MSG(extack, "mtu less than device minimum");
8761 		return -EINVAL;
8762 	}
8763 
8764 	if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8765 		NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
8766 		return -EINVAL;
8767 	}
8768 	return 0;
8769 }
8770 
8771 /**
8772  *	dev_set_mtu_ext - Change maximum transfer unit
8773  *	@dev: device
8774  *	@new_mtu: new transfer unit
8775  *	@extack: netlink extended ack
8776  *
8777  *	Change the maximum transfer size of the network device.
8778  */
8779 int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
8780 		    struct netlink_ext_ack *extack)
8781 {
8782 	int err, orig_mtu;
8783 
8784 	if (new_mtu == dev->mtu)
8785 		return 0;
8786 
8787 	err = dev_validate_mtu(dev, new_mtu, extack);
8788 	if (err)
8789 		return err;
8790 
8791 	if (!netif_device_present(dev))
8792 		return -ENODEV;
8793 
8794 	err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
8795 	err = notifier_to_errno(err);
8796 	if (err)
8797 		return err;
8798 
8799 	orig_mtu = dev->mtu;
8800 	err = __dev_set_mtu(dev, new_mtu);
8801 
8802 	if (!err) {
8803 		err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8804 						   orig_mtu);
8805 		err = notifier_to_errno(err);
8806 		if (err) {
8807 			/* setting mtu back and notifying everyone again,
8808 			 * so that they have a chance to revert changes.
8809 			 */
8810 			__dev_set_mtu(dev, orig_mtu);
8811 			call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8812 						     new_mtu);
8813 		}
8814 	}
8815 	return err;
8816 }
8817 
8818 int dev_set_mtu(struct net_device *dev, int new_mtu)
8819 {
8820 	struct netlink_ext_ack extack;
8821 	int err;
8822 
8823 	memset(&extack, 0, sizeof(extack));
8824 	err = dev_set_mtu_ext(dev, new_mtu, &extack);
8825 	if (err && extack._msg)
8826 		net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
8827 	return err;
8828 }
8829 EXPORT_SYMBOL(dev_set_mtu);
8830 
8831 /**
8832  *	dev_change_tx_queue_len - Change TX queue length of a netdevice
8833  *	@dev: device
8834  *	@new_len: new tx queue length
8835  */
8836 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
8837 {
8838 	unsigned int orig_len = dev->tx_queue_len;
8839 	int res;
8840 
8841 	if (new_len != (unsigned int)new_len)
8842 		return -ERANGE;
8843 
8844 	if (new_len != orig_len) {
8845 		dev->tx_queue_len = new_len;
8846 		res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
8847 		res = notifier_to_errno(res);
8848 		if (res)
8849 			goto err_rollback;
8850 		res = dev_qdisc_change_tx_queue_len(dev);
8851 		if (res)
8852 			goto err_rollback;
8853 	}
8854 
8855 	return 0;
8856 
8857 err_rollback:
8858 	netdev_err(dev, "refused to change device tx_queue_len\n");
8859 	dev->tx_queue_len = orig_len;
8860 	return res;
8861 }
8862 
8863 /**
8864  *	dev_set_group - Change group this device belongs to
8865  *	@dev: device
8866  *	@new_group: group this device should belong to
8867  */
8868 void dev_set_group(struct net_device *dev, int new_group)
8869 {
8870 	dev->group = new_group;
8871 }
8872 EXPORT_SYMBOL(dev_set_group);
8873 
8874 /**
8875  *	dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8876  *	@dev: device
8877  *	@addr: new address
8878  *	@extack: netlink extended ack
8879  */
8880 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
8881 			      struct netlink_ext_ack *extack)
8882 {
8883 	struct netdev_notifier_pre_changeaddr_info info = {
8884 		.info.dev = dev,
8885 		.info.extack = extack,
8886 		.dev_addr = addr,
8887 	};
8888 	int rc;
8889 
8890 	rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
8891 	return notifier_to_errno(rc);
8892 }
8893 EXPORT_SYMBOL(dev_pre_changeaddr_notify);
8894 
8895 /**
8896  *	dev_set_mac_address - Change Media Access Control Address
8897  *	@dev: device
8898  *	@sa: new address
8899  *	@extack: netlink extended ack
8900  *
8901  *	Change the hardware (MAC) address of the device
8902  */
8903 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
8904 			struct netlink_ext_ack *extack)
8905 {
8906 	const struct net_device_ops *ops = dev->netdev_ops;
8907 	int err;
8908 
8909 	if (!ops->ndo_set_mac_address)
8910 		return -EOPNOTSUPP;
8911 	if (sa->sa_family != dev->type)
8912 		return -EINVAL;
8913 	if (!netif_device_present(dev))
8914 		return -ENODEV;
8915 	err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
8916 	if (err)
8917 		return err;
8918 	err = ops->ndo_set_mac_address(dev, sa);
8919 	if (err)
8920 		return err;
8921 	dev->addr_assign_type = NET_ADDR_SET;
8922 	call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
8923 	add_device_randomness(dev->dev_addr, dev->addr_len);
8924 	return 0;
8925 }
8926 EXPORT_SYMBOL(dev_set_mac_address);
8927 
8928 static DECLARE_RWSEM(dev_addr_sem);
8929 
8930 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
8931 			     struct netlink_ext_ack *extack)
8932 {
8933 	int ret;
8934 
8935 	down_write(&dev_addr_sem);
8936 	ret = dev_set_mac_address(dev, sa, extack);
8937 	up_write(&dev_addr_sem);
8938 	return ret;
8939 }
8940 EXPORT_SYMBOL(dev_set_mac_address_user);
8941 
8942 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
8943 {
8944 	size_t size = sizeof(sa->sa_data);
8945 	struct net_device *dev;
8946 	int ret = 0;
8947 
8948 	down_read(&dev_addr_sem);
8949 	rcu_read_lock();
8950 
8951 	dev = dev_get_by_name_rcu(net, dev_name);
8952 	if (!dev) {
8953 		ret = -ENODEV;
8954 		goto unlock;
8955 	}
8956 	if (!dev->addr_len)
8957 		memset(sa->sa_data, 0, size);
8958 	else
8959 		memcpy(sa->sa_data, dev->dev_addr,
8960 		       min_t(size_t, size, dev->addr_len));
8961 	sa->sa_family = dev->type;
8962 
8963 unlock:
8964 	rcu_read_unlock();
8965 	up_read(&dev_addr_sem);
8966 	return ret;
8967 }
8968 EXPORT_SYMBOL(dev_get_mac_address);
8969 
8970 /**
8971  *	dev_change_carrier - Change device carrier
8972  *	@dev: device
8973  *	@new_carrier: new value
8974  *
8975  *	Change device carrier
8976  */
8977 int dev_change_carrier(struct net_device *dev, bool new_carrier)
8978 {
8979 	const struct net_device_ops *ops = dev->netdev_ops;
8980 
8981 	if (!ops->ndo_change_carrier)
8982 		return -EOPNOTSUPP;
8983 	if (!netif_device_present(dev))
8984 		return -ENODEV;
8985 	return ops->ndo_change_carrier(dev, new_carrier);
8986 }
8987 EXPORT_SYMBOL(dev_change_carrier);
8988 
8989 /**
8990  *	dev_get_phys_port_id - Get device physical port ID
8991  *	@dev: device
8992  *	@ppid: port ID
8993  *
8994  *	Get device physical port ID
8995  */
8996 int dev_get_phys_port_id(struct net_device *dev,
8997 			 struct netdev_phys_item_id *ppid)
8998 {
8999 	const struct net_device_ops *ops = dev->netdev_ops;
9000 
9001 	if (!ops->ndo_get_phys_port_id)
9002 		return -EOPNOTSUPP;
9003 	return ops->ndo_get_phys_port_id(dev, ppid);
9004 }
9005 EXPORT_SYMBOL(dev_get_phys_port_id);
9006 
9007 /**
9008  *	dev_get_phys_port_name - Get device physical port name
9009  *	@dev: device
9010  *	@name: port name
9011  *	@len: limit of bytes to copy to name
9012  *
9013  *	Get device physical port name
9014  */
9015 int dev_get_phys_port_name(struct net_device *dev,
9016 			   char *name, size_t len)
9017 {
9018 	const struct net_device_ops *ops = dev->netdev_ops;
9019 	int err;
9020 
9021 	if (ops->ndo_get_phys_port_name) {
9022 		err = ops->ndo_get_phys_port_name(dev, name, len);
9023 		if (err != -EOPNOTSUPP)
9024 			return err;
9025 	}
9026 	return devlink_compat_phys_port_name_get(dev, name, len);
9027 }
9028 EXPORT_SYMBOL(dev_get_phys_port_name);
9029 
9030 /**
9031  *	dev_get_port_parent_id - Get the device's port parent identifier
9032  *	@dev: network device
9033  *	@ppid: pointer to a storage for the port's parent identifier
9034  *	@recurse: allow/disallow recursion to lower devices
9035  *
9036  *	Get the devices's port parent identifier
9037  */
9038 int dev_get_port_parent_id(struct net_device *dev,
9039 			   struct netdev_phys_item_id *ppid,
9040 			   bool recurse)
9041 {
9042 	const struct net_device_ops *ops = dev->netdev_ops;
9043 	struct netdev_phys_item_id first = { };
9044 	struct net_device *lower_dev;
9045 	struct list_head *iter;
9046 	int err;
9047 
9048 	if (ops->ndo_get_port_parent_id) {
9049 		err = ops->ndo_get_port_parent_id(dev, ppid);
9050 		if (err != -EOPNOTSUPP)
9051 			return err;
9052 	}
9053 
9054 	err = devlink_compat_switch_id_get(dev, ppid);
9055 	if (!err || err != -EOPNOTSUPP)
9056 		return err;
9057 
9058 	if (!recurse)
9059 		return -EOPNOTSUPP;
9060 
9061 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
9062 		err = dev_get_port_parent_id(lower_dev, ppid, recurse);
9063 		if (err)
9064 			break;
9065 		if (!first.id_len)
9066 			first = *ppid;
9067 		else if (memcmp(&first, ppid, sizeof(*ppid)))
9068 			return -EOPNOTSUPP;
9069 	}
9070 
9071 	return err;
9072 }
9073 EXPORT_SYMBOL(dev_get_port_parent_id);
9074 
9075 /**
9076  *	netdev_port_same_parent_id - Indicate if two network devices have
9077  *	the same port parent identifier
9078  *	@a: first network device
9079  *	@b: second network device
9080  */
9081 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
9082 {
9083 	struct netdev_phys_item_id a_id = { };
9084 	struct netdev_phys_item_id b_id = { };
9085 
9086 	if (dev_get_port_parent_id(a, &a_id, true) ||
9087 	    dev_get_port_parent_id(b, &b_id, true))
9088 		return false;
9089 
9090 	return netdev_phys_item_id_same(&a_id, &b_id);
9091 }
9092 EXPORT_SYMBOL(netdev_port_same_parent_id);
9093 
9094 /**
9095  *	dev_change_proto_down - update protocol port state information
9096  *	@dev: device
9097  *	@proto_down: new value
9098  *
9099  *	This info can be used by switch drivers to set the phys state of the
9100  *	port.
9101  */
9102 int dev_change_proto_down(struct net_device *dev, bool proto_down)
9103 {
9104 	const struct net_device_ops *ops = dev->netdev_ops;
9105 
9106 	if (!ops->ndo_change_proto_down)
9107 		return -EOPNOTSUPP;
9108 	if (!netif_device_present(dev))
9109 		return -ENODEV;
9110 	return ops->ndo_change_proto_down(dev, proto_down);
9111 }
9112 EXPORT_SYMBOL(dev_change_proto_down);
9113 
9114 /**
9115  *	dev_change_proto_down_generic - generic implementation for
9116  * 	ndo_change_proto_down that sets carrier according to
9117  * 	proto_down.
9118  *
9119  *	@dev: device
9120  *	@proto_down: new value
9121  */
9122 int dev_change_proto_down_generic(struct net_device *dev, bool proto_down)
9123 {
9124 	if (proto_down)
9125 		netif_carrier_off(dev);
9126 	else
9127 		netif_carrier_on(dev);
9128 	dev->proto_down = proto_down;
9129 	return 0;
9130 }
9131 EXPORT_SYMBOL(dev_change_proto_down_generic);
9132 
9133 /**
9134  *	dev_change_proto_down_reason - proto down reason
9135  *
9136  *	@dev: device
9137  *	@mask: proto down mask
9138  *	@value: proto down value
9139  */
9140 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
9141 				  u32 value)
9142 {
9143 	int b;
9144 
9145 	if (!mask) {
9146 		dev->proto_down_reason = value;
9147 	} else {
9148 		for_each_set_bit(b, &mask, 32) {
9149 			if (value & (1 << b))
9150 				dev->proto_down_reason |= BIT(b);
9151 			else
9152 				dev->proto_down_reason &= ~BIT(b);
9153 		}
9154 	}
9155 }
9156 EXPORT_SYMBOL(dev_change_proto_down_reason);
9157 
9158 struct bpf_xdp_link {
9159 	struct bpf_link link;
9160 	struct net_device *dev; /* protected by rtnl_lock, no refcnt held */
9161 	int flags;
9162 };
9163 
9164 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
9165 {
9166 	if (flags & XDP_FLAGS_HW_MODE)
9167 		return XDP_MODE_HW;
9168 	if (flags & XDP_FLAGS_DRV_MODE)
9169 		return XDP_MODE_DRV;
9170 	if (flags & XDP_FLAGS_SKB_MODE)
9171 		return XDP_MODE_SKB;
9172 	return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
9173 }
9174 
9175 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
9176 {
9177 	switch (mode) {
9178 	case XDP_MODE_SKB:
9179 		return generic_xdp_install;
9180 	case XDP_MODE_DRV:
9181 	case XDP_MODE_HW:
9182 		return dev->netdev_ops->ndo_bpf;
9183 	default:
9184 		return NULL;
9185 	}
9186 }
9187 
9188 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
9189 					 enum bpf_xdp_mode mode)
9190 {
9191 	return dev->xdp_state[mode].link;
9192 }
9193 
9194 static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
9195 				     enum bpf_xdp_mode mode)
9196 {
9197 	struct bpf_xdp_link *link = dev_xdp_link(dev, mode);
9198 
9199 	if (link)
9200 		return link->link.prog;
9201 	return dev->xdp_state[mode].prog;
9202 }
9203 
9204 static u8 dev_xdp_prog_count(struct net_device *dev)
9205 {
9206 	u8 count = 0;
9207 	int i;
9208 
9209 	for (i = 0; i < __MAX_XDP_MODE; i++)
9210 		if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
9211 			count++;
9212 	return count;
9213 }
9214 
9215 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
9216 {
9217 	struct bpf_prog *prog = dev_xdp_prog(dev, mode);
9218 
9219 	return prog ? prog->aux->id : 0;
9220 }
9221 
9222 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
9223 			     struct bpf_xdp_link *link)
9224 {
9225 	dev->xdp_state[mode].link = link;
9226 	dev->xdp_state[mode].prog = NULL;
9227 }
9228 
9229 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
9230 			     struct bpf_prog *prog)
9231 {
9232 	dev->xdp_state[mode].link = NULL;
9233 	dev->xdp_state[mode].prog = prog;
9234 }
9235 
9236 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
9237 			   bpf_op_t bpf_op, struct netlink_ext_ack *extack,
9238 			   u32 flags, struct bpf_prog *prog)
9239 {
9240 	struct netdev_bpf xdp;
9241 	int err;
9242 
9243 	memset(&xdp, 0, sizeof(xdp));
9244 	xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
9245 	xdp.extack = extack;
9246 	xdp.flags = flags;
9247 	xdp.prog = prog;
9248 
9249 	/* Drivers assume refcnt is already incremented (i.e, prog pointer is
9250 	 * "moved" into driver), so they don't increment it on their own, but
9251 	 * they do decrement refcnt when program is detached or replaced.
9252 	 * Given net_device also owns link/prog, we need to bump refcnt here
9253 	 * to prevent drivers from underflowing it.
9254 	 */
9255 	if (prog)
9256 		bpf_prog_inc(prog);
9257 	err = bpf_op(dev, &xdp);
9258 	if (err) {
9259 		if (prog)
9260 			bpf_prog_put(prog);
9261 		return err;
9262 	}
9263 
9264 	if (mode != XDP_MODE_HW)
9265 		bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog);
9266 
9267 	return 0;
9268 }
9269 
9270 static void dev_xdp_uninstall(struct net_device *dev)
9271 {
9272 	struct bpf_xdp_link *link;
9273 	struct bpf_prog *prog;
9274 	enum bpf_xdp_mode mode;
9275 	bpf_op_t bpf_op;
9276 
9277 	ASSERT_RTNL();
9278 
9279 	for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) {
9280 		prog = dev_xdp_prog(dev, mode);
9281 		if (!prog)
9282 			continue;
9283 
9284 		bpf_op = dev_xdp_bpf_op(dev, mode);
9285 		if (!bpf_op)
9286 			continue;
9287 
9288 		WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9289 
9290 		/* auto-detach link from net device */
9291 		link = dev_xdp_link(dev, mode);
9292 		if (link)
9293 			link->dev = NULL;
9294 		else
9295 			bpf_prog_put(prog);
9296 
9297 		dev_xdp_set_link(dev, mode, NULL);
9298 	}
9299 }
9300 
9301 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
9302 			  struct bpf_xdp_link *link, struct bpf_prog *new_prog,
9303 			  struct bpf_prog *old_prog, u32 flags)
9304 {
9305 	unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
9306 	struct bpf_prog *cur_prog;
9307 	enum bpf_xdp_mode mode;
9308 	bpf_op_t bpf_op;
9309 	int err;
9310 
9311 	ASSERT_RTNL();
9312 
9313 	/* either link or prog attachment, never both */
9314 	if (link && (new_prog || old_prog))
9315 		return -EINVAL;
9316 	/* link supports only XDP mode flags */
9317 	if (link && (flags & ~XDP_FLAGS_MODES)) {
9318 		NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
9319 		return -EINVAL;
9320 	}
9321 	/* just one XDP mode bit should be set, zero defaults to drv/skb mode */
9322 	if (num_modes > 1) {
9323 		NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
9324 		return -EINVAL;
9325 	}
9326 	/* avoid ambiguity if offload + drv/skb mode progs are both loaded */
9327 	if (!num_modes && dev_xdp_prog_count(dev) > 1) {
9328 		NL_SET_ERR_MSG(extack,
9329 			       "More than one program loaded, unset mode is ambiguous");
9330 		return -EINVAL;
9331 	}
9332 	/* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
9333 	if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
9334 		NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
9335 		return -EINVAL;
9336 	}
9337 
9338 	mode = dev_xdp_mode(dev, flags);
9339 	/* can't replace attached link */
9340 	if (dev_xdp_link(dev, mode)) {
9341 		NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
9342 		return -EBUSY;
9343 	}
9344 
9345 	cur_prog = dev_xdp_prog(dev, mode);
9346 	/* can't replace attached prog with link */
9347 	if (link && cur_prog) {
9348 		NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link");
9349 		return -EBUSY;
9350 	}
9351 	if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) {
9352 		NL_SET_ERR_MSG(extack, "Active program does not match expected");
9353 		return -EEXIST;
9354 	}
9355 
9356 	/* put effective new program into new_prog */
9357 	if (link)
9358 		new_prog = link->link.prog;
9359 
9360 	if (new_prog) {
9361 		bool offload = mode == XDP_MODE_HW;
9362 		enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
9363 					       ? XDP_MODE_DRV : XDP_MODE_SKB;
9364 
9365 		if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
9366 			NL_SET_ERR_MSG(extack, "XDP program already attached");
9367 			return -EBUSY;
9368 		}
9369 		if (!offload && dev_xdp_prog(dev, other_mode)) {
9370 			NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
9371 			return -EEXIST;
9372 		}
9373 		if (!offload && bpf_prog_is_dev_bound(new_prog->aux)) {
9374 			NL_SET_ERR_MSG(extack, "Using device-bound program without HW_MODE flag is not supported");
9375 			return -EINVAL;
9376 		}
9377 		if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
9378 			NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
9379 			return -EINVAL;
9380 		}
9381 		if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) {
9382 			NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device");
9383 			return -EINVAL;
9384 		}
9385 	}
9386 
9387 	/* don't call drivers if the effective program didn't change */
9388 	if (new_prog != cur_prog) {
9389 		bpf_op = dev_xdp_bpf_op(dev, mode);
9390 		if (!bpf_op) {
9391 			NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode");
9392 			return -EOPNOTSUPP;
9393 		}
9394 
9395 		err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog);
9396 		if (err)
9397 			return err;
9398 	}
9399 
9400 	if (link)
9401 		dev_xdp_set_link(dev, mode, link);
9402 	else
9403 		dev_xdp_set_prog(dev, mode, new_prog);
9404 	if (cur_prog)
9405 		bpf_prog_put(cur_prog);
9406 
9407 	return 0;
9408 }
9409 
9410 static int dev_xdp_attach_link(struct net_device *dev,
9411 			       struct netlink_ext_ack *extack,
9412 			       struct bpf_xdp_link *link)
9413 {
9414 	return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags);
9415 }
9416 
9417 static int dev_xdp_detach_link(struct net_device *dev,
9418 			       struct netlink_ext_ack *extack,
9419 			       struct bpf_xdp_link *link)
9420 {
9421 	enum bpf_xdp_mode mode;
9422 	bpf_op_t bpf_op;
9423 
9424 	ASSERT_RTNL();
9425 
9426 	mode = dev_xdp_mode(dev, link->flags);
9427 	if (dev_xdp_link(dev, mode) != link)
9428 		return -EINVAL;
9429 
9430 	bpf_op = dev_xdp_bpf_op(dev, mode);
9431 	WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9432 	dev_xdp_set_link(dev, mode, NULL);
9433 	return 0;
9434 }
9435 
9436 static void bpf_xdp_link_release(struct bpf_link *link)
9437 {
9438 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9439 
9440 	rtnl_lock();
9441 
9442 	/* if racing with net_device's tear down, xdp_link->dev might be
9443 	 * already NULL, in which case link was already auto-detached
9444 	 */
9445 	if (xdp_link->dev) {
9446 		WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
9447 		xdp_link->dev = NULL;
9448 	}
9449 
9450 	rtnl_unlock();
9451 }
9452 
9453 static int bpf_xdp_link_detach(struct bpf_link *link)
9454 {
9455 	bpf_xdp_link_release(link);
9456 	return 0;
9457 }
9458 
9459 static void bpf_xdp_link_dealloc(struct bpf_link *link)
9460 {
9461 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9462 
9463 	kfree(xdp_link);
9464 }
9465 
9466 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link,
9467 				     struct seq_file *seq)
9468 {
9469 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9470 	u32 ifindex = 0;
9471 
9472 	rtnl_lock();
9473 	if (xdp_link->dev)
9474 		ifindex = xdp_link->dev->ifindex;
9475 	rtnl_unlock();
9476 
9477 	seq_printf(seq, "ifindex:\t%u\n", ifindex);
9478 }
9479 
9480 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link,
9481 				       struct bpf_link_info *info)
9482 {
9483 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9484 	u32 ifindex = 0;
9485 
9486 	rtnl_lock();
9487 	if (xdp_link->dev)
9488 		ifindex = xdp_link->dev->ifindex;
9489 	rtnl_unlock();
9490 
9491 	info->xdp.ifindex = ifindex;
9492 	return 0;
9493 }
9494 
9495 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
9496 			       struct bpf_prog *old_prog)
9497 {
9498 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9499 	enum bpf_xdp_mode mode;
9500 	bpf_op_t bpf_op;
9501 	int err = 0;
9502 
9503 	rtnl_lock();
9504 
9505 	/* link might have been auto-released already, so fail */
9506 	if (!xdp_link->dev) {
9507 		err = -ENOLINK;
9508 		goto out_unlock;
9509 	}
9510 
9511 	if (old_prog && link->prog != old_prog) {
9512 		err = -EPERM;
9513 		goto out_unlock;
9514 	}
9515 	old_prog = link->prog;
9516 	if (old_prog == new_prog) {
9517 		/* no-op, don't disturb drivers */
9518 		bpf_prog_put(new_prog);
9519 		goto out_unlock;
9520 	}
9521 
9522 	mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
9523 	bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
9524 	err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
9525 			      xdp_link->flags, new_prog);
9526 	if (err)
9527 		goto out_unlock;
9528 
9529 	old_prog = xchg(&link->prog, new_prog);
9530 	bpf_prog_put(old_prog);
9531 
9532 out_unlock:
9533 	rtnl_unlock();
9534 	return err;
9535 }
9536 
9537 static const struct bpf_link_ops bpf_xdp_link_lops = {
9538 	.release = bpf_xdp_link_release,
9539 	.dealloc = bpf_xdp_link_dealloc,
9540 	.detach = bpf_xdp_link_detach,
9541 	.show_fdinfo = bpf_xdp_link_show_fdinfo,
9542 	.fill_link_info = bpf_xdp_link_fill_link_info,
9543 	.update_prog = bpf_xdp_link_update,
9544 };
9545 
9546 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
9547 {
9548 	struct net *net = current->nsproxy->net_ns;
9549 	struct bpf_link_primer link_primer;
9550 	struct bpf_xdp_link *link;
9551 	struct net_device *dev;
9552 	int err, fd;
9553 
9554 	dev = dev_get_by_index(net, attr->link_create.target_ifindex);
9555 	if (!dev)
9556 		return -EINVAL;
9557 
9558 	link = kzalloc(sizeof(*link), GFP_USER);
9559 	if (!link) {
9560 		err = -ENOMEM;
9561 		goto out_put_dev;
9562 	}
9563 
9564 	bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
9565 	link->dev = dev;
9566 	link->flags = attr->link_create.flags;
9567 
9568 	err = bpf_link_prime(&link->link, &link_primer);
9569 	if (err) {
9570 		kfree(link);
9571 		goto out_put_dev;
9572 	}
9573 
9574 	rtnl_lock();
9575 	err = dev_xdp_attach_link(dev, NULL, link);
9576 	rtnl_unlock();
9577 
9578 	if (err) {
9579 		bpf_link_cleanup(&link_primer);
9580 		goto out_put_dev;
9581 	}
9582 
9583 	fd = bpf_link_settle(&link_primer);
9584 	/* link itself doesn't hold dev's refcnt to not complicate shutdown */
9585 	dev_put(dev);
9586 	return fd;
9587 
9588 out_put_dev:
9589 	dev_put(dev);
9590 	return err;
9591 }
9592 
9593 /**
9594  *	dev_change_xdp_fd - set or clear a bpf program for a device rx path
9595  *	@dev: device
9596  *	@extack: netlink extended ack
9597  *	@fd: new program fd or negative value to clear
9598  *	@expected_fd: old program fd that userspace expects to replace or clear
9599  *	@flags: xdp-related flags
9600  *
9601  *	Set or clear a bpf program for a device
9602  */
9603 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
9604 		      int fd, int expected_fd, u32 flags)
9605 {
9606 	enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
9607 	struct bpf_prog *new_prog = NULL, *old_prog = NULL;
9608 	int err;
9609 
9610 	ASSERT_RTNL();
9611 
9612 	if (fd >= 0) {
9613 		new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
9614 						 mode != XDP_MODE_SKB);
9615 		if (IS_ERR(new_prog))
9616 			return PTR_ERR(new_prog);
9617 	}
9618 
9619 	if (expected_fd >= 0) {
9620 		old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP,
9621 						 mode != XDP_MODE_SKB);
9622 		if (IS_ERR(old_prog)) {
9623 			err = PTR_ERR(old_prog);
9624 			old_prog = NULL;
9625 			goto err_out;
9626 		}
9627 	}
9628 
9629 	err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags);
9630 
9631 err_out:
9632 	if (err && new_prog)
9633 		bpf_prog_put(new_prog);
9634 	if (old_prog)
9635 		bpf_prog_put(old_prog);
9636 	return err;
9637 }
9638 
9639 /**
9640  *	dev_new_index	-	allocate an ifindex
9641  *	@net: the applicable net namespace
9642  *
9643  *	Returns a suitable unique value for a new device interface
9644  *	number.  The caller must hold the rtnl semaphore or the
9645  *	dev_base_lock to be sure it remains unique.
9646  */
9647 static int dev_new_index(struct net *net)
9648 {
9649 	int ifindex = net->ifindex;
9650 
9651 	for (;;) {
9652 		if (++ifindex <= 0)
9653 			ifindex = 1;
9654 		if (!__dev_get_by_index(net, ifindex))
9655 			return net->ifindex = ifindex;
9656 	}
9657 }
9658 
9659 /* Delayed registration/unregisteration */
9660 static LIST_HEAD(net_todo_list);
9661 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
9662 
9663 static void net_set_todo(struct net_device *dev)
9664 {
9665 	list_add_tail(&dev->todo_list, &net_todo_list);
9666 	dev_net(dev)->dev_unreg_count++;
9667 }
9668 
9669 static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
9670 	struct net_device *upper, netdev_features_t features)
9671 {
9672 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9673 	netdev_features_t feature;
9674 	int feature_bit;
9675 
9676 	for_each_netdev_feature(upper_disables, feature_bit) {
9677 		feature = __NETIF_F_BIT(feature_bit);
9678 		if (!(upper->wanted_features & feature)
9679 		    && (features & feature)) {
9680 			netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
9681 				   &feature, upper->name);
9682 			features &= ~feature;
9683 		}
9684 	}
9685 
9686 	return features;
9687 }
9688 
9689 static void netdev_sync_lower_features(struct net_device *upper,
9690 	struct net_device *lower, netdev_features_t features)
9691 {
9692 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9693 	netdev_features_t feature;
9694 	int feature_bit;
9695 
9696 	for_each_netdev_feature(upper_disables, feature_bit) {
9697 		feature = __NETIF_F_BIT(feature_bit);
9698 		if (!(features & feature) && (lower->features & feature)) {
9699 			netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
9700 				   &feature, lower->name);
9701 			lower->wanted_features &= ~feature;
9702 			__netdev_update_features(lower);
9703 
9704 			if (unlikely(lower->features & feature))
9705 				netdev_WARN(upper, "failed to disable %pNF on %s!\n",
9706 					    &feature, lower->name);
9707 			else
9708 				netdev_features_change(lower);
9709 		}
9710 	}
9711 }
9712 
9713 static netdev_features_t netdev_fix_features(struct net_device *dev,
9714 	netdev_features_t features)
9715 {
9716 	/* Fix illegal checksum combinations */
9717 	if ((features & NETIF_F_HW_CSUM) &&
9718 	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
9719 		netdev_warn(dev, "mixed HW and IP checksum settings.\n");
9720 		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
9721 	}
9722 
9723 	/* TSO requires that SG is present as well. */
9724 	if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
9725 		netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
9726 		features &= ~NETIF_F_ALL_TSO;
9727 	}
9728 
9729 	if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
9730 					!(features & NETIF_F_IP_CSUM)) {
9731 		netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
9732 		features &= ~NETIF_F_TSO;
9733 		features &= ~NETIF_F_TSO_ECN;
9734 	}
9735 
9736 	if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
9737 					 !(features & NETIF_F_IPV6_CSUM)) {
9738 		netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
9739 		features &= ~NETIF_F_TSO6;
9740 	}
9741 
9742 	/* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
9743 	if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
9744 		features &= ~NETIF_F_TSO_MANGLEID;
9745 
9746 	/* TSO ECN requires that TSO is present as well. */
9747 	if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
9748 		features &= ~NETIF_F_TSO_ECN;
9749 
9750 	/* Software GSO depends on SG. */
9751 	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
9752 		netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
9753 		features &= ~NETIF_F_GSO;
9754 	}
9755 
9756 	/* GSO partial features require GSO partial be set */
9757 	if ((features & dev->gso_partial_features) &&
9758 	    !(features & NETIF_F_GSO_PARTIAL)) {
9759 		netdev_dbg(dev,
9760 			   "Dropping partially supported GSO features since no GSO partial.\n");
9761 		features &= ~dev->gso_partial_features;
9762 	}
9763 
9764 	if (!(features & NETIF_F_RXCSUM)) {
9765 		/* NETIF_F_GRO_HW implies doing RXCSUM since every packet
9766 		 * successfully merged by hardware must also have the
9767 		 * checksum verified by hardware.  If the user does not
9768 		 * want to enable RXCSUM, logically, we should disable GRO_HW.
9769 		 */
9770 		if (features & NETIF_F_GRO_HW) {
9771 			netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9772 			features &= ~NETIF_F_GRO_HW;
9773 		}
9774 	}
9775 
9776 	/* LRO/HW-GRO features cannot be combined with RX-FCS */
9777 	if (features & NETIF_F_RXFCS) {
9778 		if (features & NETIF_F_LRO) {
9779 			netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
9780 			features &= ~NETIF_F_LRO;
9781 		}
9782 
9783 		if (features & NETIF_F_GRO_HW) {
9784 			netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9785 			features &= ~NETIF_F_GRO_HW;
9786 		}
9787 	}
9788 
9789 	if (features & NETIF_F_HW_TLS_TX) {
9790 		bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
9791 			(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
9792 		bool hw_csum = features & NETIF_F_HW_CSUM;
9793 
9794 		if (!ip_csum && !hw_csum) {
9795 			netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
9796 			features &= ~NETIF_F_HW_TLS_TX;
9797 		}
9798 	}
9799 
9800 	if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
9801 		netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
9802 		features &= ~NETIF_F_HW_TLS_RX;
9803 	}
9804 
9805 	return features;
9806 }
9807 
9808 int __netdev_update_features(struct net_device *dev)
9809 {
9810 	struct net_device *upper, *lower;
9811 	netdev_features_t features;
9812 	struct list_head *iter;
9813 	int err = -1;
9814 
9815 	ASSERT_RTNL();
9816 
9817 	features = netdev_get_wanted_features(dev);
9818 
9819 	if (dev->netdev_ops->ndo_fix_features)
9820 		features = dev->netdev_ops->ndo_fix_features(dev, features);
9821 
9822 	/* driver might be less strict about feature dependencies */
9823 	features = netdev_fix_features(dev, features);
9824 
9825 	/* some features can't be enabled if they're off on an upper device */
9826 	netdev_for_each_upper_dev_rcu(dev, upper, iter)
9827 		features = netdev_sync_upper_features(dev, upper, features);
9828 
9829 	if (dev->features == features)
9830 		goto sync_lower;
9831 
9832 	netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
9833 		&dev->features, &features);
9834 
9835 	if (dev->netdev_ops->ndo_set_features)
9836 		err = dev->netdev_ops->ndo_set_features(dev, features);
9837 	else
9838 		err = 0;
9839 
9840 	if (unlikely(err < 0)) {
9841 		netdev_err(dev,
9842 			"set_features() failed (%d); wanted %pNF, left %pNF\n",
9843 			err, &features, &dev->features);
9844 		/* return non-0 since some features might have changed and
9845 		 * it's better to fire a spurious notification than miss it
9846 		 */
9847 		return -1;
9848 	}
9849 
9850 sync_lower:
9851 	/* some features must be disabled on lower devices when disabled
9852 	 * on an upper device (think: bonding master or bridge)
9853 	 */
9854 	netdev_for_each_lower_dev(dev, lower, iter)
9855 		netdev_sync_lower_features(dev, lower, features);
9856 
9857 	if (!err) {
9858 		netdev_features_t diff = features ^ dev->features;
9859 
9860 		if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
9861 			/* udp_tunnel_{get,drop}_rx_info both need
9862 			 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
9863 			 * device, or they won't do anything.
9864 			 * Thus we need to update dev->features
9865 			 * *before* calling udp_tunnel_get_rx_info,
9866 			 * but *after* calling udp_tunnel_drop_rx_info.
9867 			 */
9868 			if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
9869 				dev->features = features;
9870 				udp_tunnel_get_rx_info(dev);
9871 			} else {
9872 				udp_tunnel_drop_rx_info(dev);
9873 			}
9874 		}
9875 
9876 		if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
9877 			if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
9878 				dev->features = features;
9879 				err |= vlan_get_rx_ctag_filter_info(dev);
9880 			} else {
9881 				vlan_drop_rx_ctag_filter_info(dev);
9882 			}
9883 		}
9884 
9885 		if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
9886 			if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
9887 				dev->features = features;
9888 				err |= vlan_get_rx_stag_filter_info(dev);
9889 			} else {
9890 				vlan_drop_rx_stag_filter_info(dev);
9891 			}
9892 		}
9893 
9894 		dev->features = features;
9895 	}
9896 
9897 	return err < 0 ? 0 : 1;
9898 }
9899 
9900 /**
9901  *	netdev_update_features - recalculate device features
9902  *	@dev: the device to check
9903  *
9904  *	Recalculate dev->features set and send notifications if it
9905  *	has changed. Should be called after driver or hardware dependent
9906  *	conditions might have changed that influence the features.
9907  */
9908 void netdev_update_features(struct net_device *dev)
9909 {
9910 	if (__netdev_update_features(dev))
9911 		netdev_features_change(dev);
9912 }
9913 EXPORT_SYMBOL(netdev_update_features);
9914 
9915 /**
9916  *	netdev_change_features - recalculate device features
9917  *	@dev: the device to check
9918  *
9919  *	Recalculate dev->features set and send notifications even
9920  *	if they have not changed. Should be called instead of
9921  *	netdev_update_features() if also dev->vlan_features might
9922  *	have changed to allow the changes to be propagated to stacked
9923  *	VLAN devices.
9924  */
9925 void netdev_change_features(struct net_device *dev)
9926 {
9927 	__netdev_update_features(dev);
9928 	netdev_features_change(dev);
9929 }
9930 EXPORT_SYMBOL(netdev_change_features);
9931 
9932 /**
9933  *	netif_stacked_transfer_operstate -	transfer operstate
9934  *	@rootdev: the root or lower level device to transfer state from
9935  *	@dev: the device to transfer operstate to
9936  *
9937  *	Transfer operational state from root to device. This is normally
9938  *	called when a stacking relationship exists between the root
9939  *	device and the device(a leaf device).
9940  */
9941 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
9942 					struct net_device *dev)
9943 {
9944 	if (rootdev->operstate == IF_OPER_DORMANT)
9945 		netif_dormant_on(dev);
9946 	else
9947 		netif_dormant_off(dev);
9948 
9949 	if (rootdev->operstate == IF_OPER_TESTING)
9950 		netif_testing_on(dev);
9951 	else
9952 		netif_testing_off(dev);
9953 
9954 	if (netif_carrier_ok(rootdev))
9955 		netif_carrier_on(dev);
9956 	else
9957 		netif_carrier_off(dev);
9958 }
9959 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
9960 
9961 static int netif_alloc_rx_queues(struct net_device *dev)
9962 {
9963 	unsigned int i, count = dev->num_rx_queues;
9964 	struct netdev_rx_queue *rx;
9965 	size_t sz = count * sizeof(*rx);
9966 	int err = 0;
9967 
9968 	BUG_ON(count < 1);
9969 
9970 	rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
9971 	if (!rx)
9972 		return -ENOMEM;
9973 
9974 	dev->_rx = rx;
9975 
9976 	for (i = 0; i < count; i++) {
9977 		rx[i].dev = dev;
9978 
9979 		/* XDP RX-queue setup */
9980 		err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
9981 		if (err < 0)
9982 			goto err_rxq_info;
9983 	}
9984 	return 0;
9985 
9986 err_rxq_info:
9987 	/* Rollback successful reg's and free other resources */
9988 	while (i--)
9989 		xdp_rxq_info_unreg(&rx[i].xdp_rxq);
9990 	kvfree(dev->_rx);
9991 	dev->_rx = NULL;
9992 	return err;
9993 }
9994 
9995 static void netif_free_rx_queues(struct net_device *dev)
9996 {
9997 	unsigned int i, count = dev->num_rx_queues;
9998 
9999 	/* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
10000 	if (!dev->_rx)
10001 		return;
10002 
10003 	for (i = 0; i < count; i++)
10004 		xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
10005 
10006 	kvfree(dev->_rx);
10007 }
10008 
10009 static void netdev_init_one_queue(struct net_device *dev,
10010 				  struct netdev_queue *queue, void *_unused)
10011 {
10012 	/* Initialize queue lock */
10013 	spin_lock_init(&queue->_xmit_lock);
10014 	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
10015 	queue->xmit_lock_owner = -1;
10016 	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
10017 	queue->dev = dev;
10018 #ifdef CONFIG_BQL
10019 	dql_init(&queue->dql, HZ);
10020 #endif
10021 }
10022 
10023 static void netif_free_tx_queues(struct net_device *dev)
10024 {
10025 	kvfree(dev->_tx);
10026 }
10027 
10028 static int netif_alloc_netdev_queues(struct net_device *dev)
10029 {
10030 	unsigned int count = dev->num_tx_queues;
10031 	struct netdev_queue *tx;
10032 	size_t sz = count * sizeof(*tx);
10033 
10034 	if (count < 1 || count > 0xffff)
10035 		return -EINVAL;
10036 
10037 	tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
10038 	if (!tx)
10039 		return -ENOMEM;
10040 
10041 	dev->_tx = tx;
10042 
10043 	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
10044 	spin_lock_init(&dev->tx_global_lock);
10045 
10046 	return 0;
10047 }
10048 
10049 void netif_tx_stop_all_queues(struct net_device *dev)
10050 {
10051 	unsigned int i;
10052 
10053 	for (i = 0; i < dev->num_tx_queues; i++) {
10054 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
10055 
10056 		netif_tx_stop_queue(txq);
10057 	}
10058 }
10059 EXPORT_SYMBOL(netif_tx_stop_all_queues);
10060 
10061 /**
10062  *	register_netdevice	- register a network device
10063  *	@dev: device to register
10064  *
10065  *	Take a completed network device structure and add it to the kernel
10066  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10067  *	chain. 0 is returned on success. A negative errno code is returned
10068  *	on a failure to set up the device, or if the name is a duplicate.
10069  *
10070  *	Callers must hold the rtnl semaphore. You may want
10071  *	register_netdev() instead of this.
10072  *
10073  *	BUGS:
10074  *	The locking appears insufficient to guarantee two parallel registers
10075  *	will not get the same name.
10076  */
10077 
10078 int register_netdevice(struct net_device *dev)
10079 {
10080 	int ret;
10081 	struct net *net = dev_net(dev);
10082 
10083 	BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
10084 		     NETDEV_FEATURE_COUNT);
10085 	BUG_ON(dev_boot_phase);
10086 	ASSERT_RTNL();
10087 
10088 	might_sleep();
10089 
10090 	/* When net_device's are persistent, this will be fatal. */
10091 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
10092 	BUG_ON(!net);
10093 
10094 	ret = ethtool_check_ops(dev->ethtool_ops);
10095 	if (ret)
10096 		return ret;
10097 
10098 	spin_lock_init(&dev->addr_list_lock);
10099 	netdev_set_addr_lockdep_class(dev);
10100 
10101 	ret = dev_get_valid_name(net, dev, dev->name);
10102 	if (ret < 0)
10103 		goto out;
10104 
10105 	ret = -ENOMEM;
10106 	dev->name_node = netdev_name_node_head_alloc(dev);
10107 	if (!dev->name_node)
10108 		goto out;
10109 
10110 	/* Init, if this function is available */
10111 	if (dev->netdev_ops->ndo_init) {
10112 		ret = dev->netdev_ops->ndo_init(dev);
10113 		if (ret) {
10114 			if (ret > 0)
10115 				ret = -EIO;
10116 			goto err_free_name;
10117 		}
10118 	}
10119 
10120 	if (((dev->hw_features | dev->features) &
10121 	     NETIF_F_HW_VLAN_CTAG_FILTER) &&
10122 	    (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
10123 	     !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
10124 		netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
10125 		ret = -EINVAL;
10126 		goto err_uninit;
10127 	}
10128 
10129 	ret = -EBUSY;
10130 	if (!dev->ifindex)
10131 		dev->ifindex = dev_new_index(net);
10132 	else if (__dev_get_by_index(net, dev->ifindex))
10133 		goto err_uninit;
10134 
10135 	/* Transfer changeable features to wanted_features and enable
10136 	 * software offloads (GSO and GRO).
10137 	 */
10138 	dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
10139 	dev->features |= NETIF_F_SOFT_FEATURES;
10140 
10141 	if (dev->udp_tunnel_nic_info) {
10142 		dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10143 		dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10144 	}
10145 
10146 	dev->wanted_features = dev->features & dev->hw_features;
10147 
10148 	if (!(dev->flags & IFF_LOOPBACK))
10149 		dev->hw_features |= NETIF_F_NOCACHE_COPY;
10150 
10151 	/* If IPv4 TCP segmentation offload is supported we should also
10152 	 * allow the device to enable segmenting the frame with the option
10153 	 * of ignoring a static IP ID value.  This doesn't enable the
10154 	 * feature itself but allows the user to enable it later.
10155 	 */
10156 	if (dev->hw_features & NETIF_F_TSO)
10157 		dev->hw_features |= NETIF_F_TSO_MANGLEID;
10158 	if (dev->vlan_features & NETIF_F_TSO)
10159 		dev->vlan_features |= NETIF_F_TSO_MANGLEID;
10160 	if (dev->mpls_features & NETIF_F_TSO)
10161 		dev->mpls_features |= NETIF_F_TSO_MANGLEID;
10162 	if (dev->hw_enc_features & NETIF_F_TSO)
10163 		dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
10164 
10165 	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
10166 	 */
10167 	dev->vlan_features |= NETIF_F_HIGHDMA;
10168 
10169 	/* Make NETIF_F_SG inheritable to tunnel devices.
10170 	 */
10171 	dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
10172 
10173 	/* Make NETIF_F_SG inheritable to MPLS.
10174 	 */
10175 	dev->mpls_features |= NETIF_F_SG;
10176 
10177 	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
10178 	ret = notifier_to_errno(ret);
10179 	if (ret)
10180 		goto err_uninit;
10181 
10182 	ret = netdev_register_kobject(dev);
10183 	if (ret) {
10184 		dev->reg_state = NETREG_UNREGISTERED;
10185 		goto err_uninit;
10186 	}
10187 	dev->reg_state = NETREG_REGISTERED;
10188 
10189 	__netdev_update_features(dev);
10190 
10191 	/*
10192 	 *	Default initial state at registry is that the
10193 	 *	device is present.
10194 	 */
10195 
10196 	set_bit(__LINK_STATE_PRESENT, &dev->state);
10197 
10198 	linkwatch_init_dev(dev);
10199 
10200 	dev_init_scheduler(dev);
10201 	dev_hold(dev);
10202 	list_netdevice(dev);
10203 	add_device_randomness(dev->dev_addr, dev->addr_len);
10204 
10205 	/* If the device has permanent device address, driver should
10206 	 * set dev_addr and also addr_assign_type should be set to
10207 	 * NET_ADDR_PERM (default value).
10208 	 */
10209 	if (dev->addr_assign_type == NET_ADDR_PERM)
10210 		memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10211 
10212 	/* Notify protocols, that a new device appeared. */
10213 	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
10214 	ret = notifier_to_errno(ret);
10215 	if (ret) {
10216 		/* Expect explicit free_netdev() on failure */
10217 		dev->needs_free_netdev = false;
10218 		unregister_netdevice_queue(dev, NULL);
10219 		goto out;
10220 	}
10221 	/*
10222 	 *	Prevent userspace races by waiting until the network
10223 	 *	device is fully setup before sending notifications.
10224 	 */
10225 	if (!dev->rtnl_link_ops ||
10226 	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10227 		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
10228 
10229 out:
10230 	return ret;
10231 
10232 err_uninit:
10233 	if (dev->netdev_ops->ndo_uninit)
10234 		dev->netdev_ops->ndo_uninit(dev);
10235 	if (dev->priv_destructor)
10236 		dev->priv_destructor(dev);
10237 err_free_name:
10238 	netdev_name_node_free(dev->name_node);
10239 	goto out;
10240 }
10241 EXPORT_SYMBOL(register_netdevice);
10242 
10243 /**
10244  *	init_dummy_netdev	- init a dummy network device for NAPI
10245  *	@dev: device to init
10246  *
10247  *	This takes a network device structure and initialize the minimum
10248  *	amount of fields so it can be used to schedule NAPI polls without
10249  *	registering a full blown interface. This is to be used by drivers
10250  *	that need to tie several hardware interfaces to a single NAPI
10251  *	poll scheduler due to HW limitations.
10252  */
10253 int init_dummy_netdev(struct net_device *dev)
10254 {
10255 	/* Clear everything. Note we don't initialize spinlocks
10256 	 * are they aren't supposed to be taken by any of the
10257 	 * NAPI code and this dummy netdev is supposed to be
10258 	 * only ever used for NAPI polls
10259 	 */
10260 	memset(dev, 0, sizeof(struct net_device));
10261 
10262 	/* make sure we BUG if trying to hit standard
10263 	 * register/unregister code path
10264 	 */
10265 	dev->reg_state = NETREG_DUMMY;
10266 
10267 	/* NAPI wants this */
10268 	INIT_LIST_HEAD(&dev->napi_list);
10269 
10270 	/* a dummy interface is started by default */
10271 	set_bit(__LINK_STATE_PRESENT, &dev->state);
10272 	set_bit(__LINK_STATE_START, &dev->state);
10273 
10274 	/* napi_busy_loop stats accounting wants this */
10275 	dev_net_set(dev, &init_net);
10276 
10277 	/* Note : We dont allocate pcpu_refcnt for dummy devices,
10278 	 * because users of this 'device' dont need to change
10279 	 * its refcount.
10280 	 */
10281 
10282 	return 0;
10283 }
10284 EXPORT_SYMBOL_GPL(init_dummy_netdev);
10285 
10286 
10287 /**
10288  *	register_netdev	- register a network device
10289  *	@dev: device to register
10290  *
10291  *	Take a completed network device structure and add it to the kernel
10292  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10293  *	chain. 0 is returned on success. A negative errno code is returned
10294  *	on a failure to set up the device, or if the name is a duplicate.
10295  *
10296  *	This is a wrapper around register_netdevice that takes the rtnl semaphore
10297  *	and expands the device name if you passed a format string to
10298  *	alloc_netdev.
10299  */
10300 int register_netdev(struct net_device *dev)
10301 {
10302 	int err;
10303 
10304 	if (rtnl_lock_killable())
10305 		return -EINTR;
10306 	err = register_netdevice(dev);
10307 	rtnl_unlock();
10308 	return err;
10309 }
10310 EXPORT_SYMBOL(register_netdev);
10311 
10312 int netdev_refcnt_read(const struct net_device *dev)
10313 {
10314 	int i, refcnt = 0;
10315 
10316 	for_each_possible_cpu(i)
10317 		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
10318 	return refcnt;
10319 }
10320 EXPORT_SYMBOL(netdev_refcnt_read);
10321 
10322 #define WAIT_REFS_MIN_MSECS 1
10323 #define WAIT_REFS_MAX_MSECS 250
10324 /**
10325  * netdev_wait_allrefs - wait until all references are gone.
10326  * @dev: target net_device
10327  *
10328  * This is called when unregistering network devices.
10329  *
10330  * Any protocol or device that holds a reference should register
10331  * for netdevice notification, and cleanup and put back the
10332  * reference if they receive an UNREGISTER event.
10333  * We can get stuck here if buggy protocols don't correctly
10334  * call dev_put.
10335  */
10336 static void netdev_wait_allrefs(struct net_device *dev)
10337 {
10338 	unsigned long rebroadcast_time, warning_time;
10339 	int wait = 0, refcnt;
10340 
10341 	linkwatch_forget_dev(dev);
10342 
10343 	rebroadcast_time = warning_time = jiffies;
10344 	refcnt = netdev_refcnt_read(dev);
10345 
10346 	while (refcnt != 0) {
10347 		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
10348 			rtnl_lock();
10349 
10350 			/* Rebroadcast unregister notification */
10351 			call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10352 
10353 			__rtnl_unlock();
10354 			rcu_barrier();
10355 			rtnl_lock();
10356 
10357 			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
10358 				     &dev->state)) {
10359 				/* We must not have linkwatch events
10360 				 * pending on unregister. If this
10361 				 * happens, we simply run the queue
10362 				 * unscheduled, resulting in a noop
10363 				 * for this device.
10364 				 */
10365 				linkwatch_run_queue();
10366 			}
10367 
10368 			__rtnl_unlock();
10369 
10370 			rebroadcast_time = jiffies;
10371 		}
10372 
10373 		if (!wait) {
10374 			rcu_barrier();
10375 			wait = WAIT_REFS_MIN_MSECS;
10376 		} else {
10377 			msleep(wait);
10378 			wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
10379 		}
10380 
10381 		refcnt = netdev_refcnt_read(dev);
10382 
10383 		if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) {
10384 			pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
10385 				 dev->name, refcnt);
10386 			warning_time = jiffies;
10387 		}
10388 	}
10389 }
10390 
10391 /* The sequence is:
10392  *
10393  *	rtnl_lock();
10394  *	...
10395  *	register_netdevice(x1);
10396  *	register_netdevice(x2);
10397  *	...
10398  *	unregister_netdevice(y1);
10399  *	unregister_netdevice(y2);
10400  *      ...
10401  *	rtnl_unlock();
10402  *	free_netdev(y1);
10403  *	free_netdev(y2);
10404  *
10405  * We are invoked by rtnl_unlock().
10406  * This allows us to deal with problems:
10407  * 1) We can delete sysfs objects which invoke hotplug
10408  *    without deadlocking with linkwatch via keventd.
10409  * 2) Since we run with the RTNL semaphore not held, we can sleep
10410  *    safely in order to wait for the netdev refcnt to drop to zero.
10411  *
10412  * We must not return until all unregister events added during
10413  * the interval the lock was held have been completed.
10414  */
10415 void netdev_run_todo(void)
10416 {
10417 	struct list_head list;
10418 #ifdef CONFIG_LOCKDEP
10419 	struct list_head unlink_list;
10420 
10421 	list_replace_init(&net_unlink_list, &unlink_list);
10422 
10423 	while (!list_empty(&unlink_list)) {
10424 		struct net_device *dev = list_first_entry(&unlink_list,
10425 							  struct net_device,
10426 							  unlink_list);
10427 		list_del_init(&dev->unlink_list);
10428 		dev->nested_level = dev->lower_level - 1;
10429 	}
10430 #endif
10431 
10432 	/* Snapshot list, allow later requests */
10433 	list_replace_init(&net_todo_list, &list);
10434 
10435 	__rtnl_unlock();
10436 
10437 
10438 	/* Wait for rcu callbacks to finish before next phase */
10439 	if (!list_empty(&list))
10440 		rcu_barrier();
10441 
10442 	while (!list_empty(&list)) {
10443 		struct net_device *dev
10444 			= list_first_entry(&list, struct net_device, todo_list);
10445 		list_del(&dev->todo_list);
10446 
10447 		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
10448 			pr_err("network todo '%s' but state %d\n",
10449 			       dev->name, dev->reg_state);
10450 			dump_stack();
10451 			continue;
10452 		}
10453 
10454 		dev->reg_state = NETREG_UNREGISTERED;
10455 
10456 		netdev_wait_allrefs(dev);
10457 
10458 		/* paranoia */
10459 		BUG_ON(netdev_refcnt_read(dev));
10460 		BUG_ON(!list_empty(&dev->ptype_all));
10461 		BUG_ON(!list_empty(&dev->ptype_specific));
10462 		WARN_ON(rcu_access_pointer(dev->ip_ptr));
10463 		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
10464 #if IS_ENABLED(CONFIG_DECNET)
10465 		WARN_ON(dev->dn_ptr);
10466 #endif
10467 		if (dev->priv_destructor)
10468 			dev->priv_destructor(dev);
10469 		if (dev->needs_free_netdev)
10470 			free_netdev(dev);
10471 
10472 		/* Report a network device has been unregistered */
10473 		rtnl_lock();
10474 		dev_net(dev)->dev_unreg_count--;
10475 		__rtnl_unlock();
10476 		wake_up(&netdev_unregistering_wq);
10477 
10478 		/* Free network device */
10479 		kobject_put(&dev->dev.kobj);
10480 	}
10481 }
10482 
10483 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
10484  * all the same fields in the same order as net_device_stats, with only
10485  * the type differing, but rtnl_link_stats64 may have additional fields
10486  * at the end for newer counters.
10487  */
10488 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
10489 			     const struct net_device_stats *netdev_stats)
10490 {
10491 #if BITS_PER_LONG == 64
10492 	BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
10493 	memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
10494 	/* zero out counters that only exist in rtnl_link_stats64 */
10495 	memset((char *)stats64 + sizeof(*netdev_stats), 0,
10496 	       sizeof(*stats64) - sizeof(*netdev_stats));
10497 #else
10498 	size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
10499 	const unsigned long *src = (const unsigned long *)netdev_stats;
10500 	u64 *dst = (u64 *)stats64;
10501 
10502 	BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
10503 	for (i = 0; i < n; i++)
10504 		dst[i] = src[i];
10505 	/* zero out counters that only exist in rtnl_link_stats64 */
10506 	memset((char *)stats64 + n * sizeof(u64), 0,
10507 	       sizeof(*stats64) - n * sizeof(u64));
10508 #endif
10509 }
10510 EXPORT_SYMBOL(netdev_stats_to_stats64);
10511 
10512 /**
10513  *	dev_get_stats	- get network device statistics
10514  *	@dev: device to get statistics from
10515  *	@storage: place to store stats
10516  *
10517  *	Get network statistics from device. Return @storage.
10518  *	The device driver may provide its own method by setting
10519  *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
10520  *	otherwise the internal statistics structure is used.
10521  */
10522 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
10523 					struct rtnl_link_stats64 *storage)
10524 {
10525 	const struct net_device_ops *ops = dev->netdev_ops;
10526 
10527 	if (ops->ndo_get_stats64) {
10528 		memset(storage, 0, sizeof(*storage));
10529 		ops->ndo_get_stats64(dev, storage);
10530 	} else if (ops->ndo_get_stats) {
10531 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
10532 	} else {
10533 		netdev_stats_to_stats64(storage, &dev->stats);
10534 	}
10535 	storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
10536 	storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
10537 	storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
10538 	return storage;
10539 }
10540 EXPORT_SYMBOL(dev_get_stats);
10541 
10542 /**
10543  *	dev_fetch_sw_netstats - get per-cpu network device statistics
10544  *	@s: place to store stats
10545  *	@netstats: per-cpu network stats to read from
10546  *
10547  *	Read per-cpu network statistics and populate the related fields in @s.
10548  */
10549 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
10550 			   const struct pcpu_sw_netstats __percpu *netstats)
10551 {
10552 	int cpu;
10553 
10554 	for_each_possible_cpu(cpu) {
10555 		const struct pcpu_sw_netstats *stats;
10556 		struct pcpu_sw_netstats tmp;
10557 		unsigned int start;
10558 
10559 		stats = per_cpu_ptr(netstats, cpu);
10560 		do {
10561 			start = u64_stats_fetch_begin_irq(&stats->syncp);
10562 			tmp.rx_packets = stats->rx_packets;
10563 			tmp.rx_bytes   = stats->rx_bytes;
10564 			tmp.tx_packets = stats->tx_packets;
10565 			tmp.tx_bytes   = stats->tx_bytes;
10566 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
10567 
10568 		s->rx_packets += tmp.rx_packets;
10569 		s->rx_bytes   += tmp.rx_bytes;
10570 		s->tx_packets += tmp.tx_packets;
10571 		s->tx_bytes   += tmp.tx_bytes;
10572 	}
10573 }
10574 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
10575 
10576 /**
10577  *	dev_get_tstats64 - ndo_get_stats64 implementation
10578  *	@dev: device to get statistics from
10579  *	@s: place to store stats
10580  *
10581  *	Populate @s from dev->stats and dev->tstats. Can be used as
10582  *	ndo_get_stats64() callback.
10583  */
10584 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
10585 {
10586 	netdev_stats_to_stats64(s, &dev->stats);
10587 	dev_fetch_sw_netstats(s, dev->tstats);
10588 }
10589 EXPORT_SYMBOL_GPL(dev_get_tstats64);
10590 
10591 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
10592 {
10593 	struct netdev_queue *queue = dev_ingress_queue(dev);
10594 
10595 #ifdef CONFIG_NET_CLS_ACT
10596 	if (queue)
10597 		return queue;
10598 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
10599 	if (!queue)
10600 		return NULL;
10601 	netdev_init_one_queue(dev, queue, NULL);
10602 	RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
10603 	queue->qdisc_sleeping = &noop_qdisc;
10604 	rcu_assign_pointer(dev->ingress_queue, queue);
10605 #endif
10606 	return queue;
10607 }
10608 
10609 static const struct ethtool_ops default_ethtool_ops;
10610 
10611 void netdev_set_default_ethtool_ops(struct net_device *dev,
10612 				    const struct ethtool_ops *ops)
10613 {
10614 	if (dev->ethtool_ops == &default_ethtool_ops)
10615 		dev->ethtool_ops = ops;
10616 }
10617 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
10618 
10619 void netdev_freemem(struct net_device *dev)
10620 {
10621 	char *addr = (char *)dev - dev->padded;
10622 
10623 	kvfree(addr);
10624 }
10625 
10626 /**
10627  * alloc_netdev_mqs - allocate network device
10628  * @sizeof_priv: size of private data to allocate space for
10629  * @name: device name format string
10630  * @name_assign_type: origin of device name
10631  * @setup: callback to initialize device
10632  * @txqs: the number of TX subqueues to allocate
10633  * @rxqs: the number of RX subqueues to allocate
10634  *
10635  * Allocates a struct net_device with private data area for driver use
10636  * and performs basic initialization.  Also allocates subqueue structs
10637  * for each queue on the device.
10638  */
10639 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
10640 		unsigned char name_assign_type,
10641 		void (*setup)(struct net_device *),
10642 		unsigned int txqs, unsigned int rxqs)
10643 {
10644 	struct net_device *dev;
10645 	unsigned int alloc_size;
10646 	struct net_device *p;
10647 
10648 	BUG_ON(strlen(name) >= sizeof(dev->name));
10649 
10650 	if (txqs < 1) {
10651 		pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
10652 		return NULL;
10653 	}
10654 
10655 	if (rxqs < 1) {
10656 		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
10657 		return NULL;
10658 	}
10659 
10660 	alloc_size = sizeof(struct net_device);
10661 	if (sizeof_priv) {
10662 		/* ensure 32-byte alignment of private area */
10663 		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
10664 		alloc_size += sizeof_priv;
10665 	}
10666 	/* ensure 32-byte alignment of whole construct */
10667 	alloc_size += NETDEV_ALIGN - 1;
10668 
10669 	p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
10670 	if (!p)
10671 		return NULL;
10672 
10673 	dev = PTR_ALIGN(p, NETDEV_ALIGN);
10674 	dev->padded = (char *)dev - (char *)p;
10675 
10676 	dev->pcpu_refcnt = alloc_percpu(int);
10677 	if (!dev->pcpu_refcnt)
10678 		goto free_dev;
10679 
10680 	if (dev_addr_init(dev))
10681 		goto free_pcpu;
10682 
10683 	dev_mc_init(dev);
10684 	dev_uc_init(dev);
10685 
10686 	dev_net_set(dev, &init_net);
10687 
10688 	dev->gso_max_size = GSO_MAX_SIZE;
10689 	dev->gso_max_segs = GSO_MAX_SEGS;
10690 	dev->upper_level = 1;
10691 	dev->lower_level = 1;
10692 #ifdef CONFIG_LOCKDEP
10693 	dev->nested_level = 0;
10694 	INIT_LIST_HEAD(&dev->unlink_list);
10695 #endif
10696 
10697 	INIT_LIST_HEAD(&dev->napi_list);
10698 	INIT_LIST_HEAD(&dev->unreg_list);
10699 	INIT_LIST_HEAD(&dev->close_list);
10700 	INIT_LIST_HEAD(&dev->link_watch_list);
10701 	INIT_LIST_HEAD(&dev->adj_list.upper);
10702 	INIT_LIST_HEAD(&dev->adj_list.lower);
10703 	INIT_LIST_HEAD(&dev->ptype_all);
10704 	INIT_LIST_HEAD(&dev->ptype_specific);
10705 	INIT_LIST_HEAD(&dev->net_notifier_list);
10706 #ifdef CONFIG_NET_SCHED
10707 	hash_init(dev->qdisc_hash);
10708 #endif
10709 	dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
10710 	setup(dev);
10711 
10712 	if (!dev->tx_queue_len) {
10713 		dev->priv_flags |= IFF_NO_QUEUE;
10714 		dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
10715 	}
10716 
10717 	dev->num_tx_queues = txqs;
10718 	dev->real_num_tx_queues = txqs;
10719 	if (netif_alloc_netdev_queues(dev))
10720 		goto free_all;
10721 
10722 	dev->num_rx_queues = rxqs;
10723 	dev->real_num_rx_queues = rxqs;
10724 	if (netif_alloc_rx_queues(dev))
10725 		goto free_all;
10726 
10727 	strcpy(dev->name, name);
10728 	dev->name_assign_type = name_assign_type;
10729 	dev->group = INIT_NETDEV_GROUP;
10730 	if (!dev->ethtool_ops)
10731 		dev->ethtool_ops = &default_ethtool_ops;
10732 
10733 	nf_hook_ingress_init(dev);
10734 
10735 	return dev;
10736 
10737 free_all:
10738 	free_netdev(dev);
10739 	return NULL;
10740 
10741 free_pcpu:
10742 	free_percpu(dev->pcpu_refcnt);
10743 free_dev:
10744 	netdev_freemem(dev);
10745 	return NULL;
10746 }
10747 EXPORT_SYMBOL(alloc_netdev_mqs);
10748 
10749 /**
10750  * free_netdev - free network device
10751  * @dev: device
10752  *
10753  * This function does the last stage of destroying an allocated device
10754  * interface. The reference to the device object is released. If this
10755  * is the last reference then it will be freed.Must be called in process
10756  * context.
10757  */
10758 void free_netdev(struct net_device *dev)
10759 {
10760 	struct napi_struct *p, *n;
10761 
10762 	might_sleep();
10763 
10764 	/* When called immediately after register_netdevice() failed the unwind
10765 	 * handling may still be dismantling the device. Handle that case by
10766 	 * deferring the free.
10767 	 */
10768 	if (dev->reg_state == NETREG_UNREGISTERING) {
10769 		ASSERT_RTNL();
10770 		dev->needs_free_netdev = true;
10771 		return;
10772 	}
10773 
10774 	netif_free_tx_queues(dev);
10775 	netif_free_rx_queues(dev);
10776 
10777 	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
10778 
10779 	/* Flush device addresses */
10780 	dev_addr_flush(dev);
10781 
10782 	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
10783 		netif_napi_del(p);
10784 
10785 	free_percpu(dev->pcpu_refcnt);
10786 	dev->pcpu_refcnt = NULL;
10787 	free_percpu(dev->xdp_bulkq);
10788 	dev->xdp_bulkq = NULL;
10789 
10790 	/*  Compatibility with error handling in drivers */
10791 	if (dev->reg_state == NETREG_UNINITIALIZED) {
10792 		netdev_freemem(dev);
10793 		return;
10794 	}
10795 
10796 	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
10797 	dev->reg_state = NETREG_RELEASED;
10798 
10799 	/* will free via device release */
10800 	put_device(&dev->dev);
10801 }
10802 EXPORT_SYMBOL(free_netdev);
10803 
10804 /**
10805  *	synchronize_net -  Synchronize with packet receive processing
10806  *
10807  *	Wait for packets currently being received to be done.
10808  *	Does not block later packets from starting.
10809  */
10810 void synchronize_net(void)
10811 {
10812 	might_sleep();
10813 	if (rtnl_is_locked())
10814 		synchronize_rcu_expedited();
10815 	else
10816 		synchronize_rcu();
10817 }
10818 EXPORT_SYMBOL(synchronize_net);
10819 
10820 /**
10821  *	unregister_netdevice_queue - remove device from the kernel
10822  *	@dev: device
10823  *	@head: list
10824  *
10825  *	This function shuts down a device interface and removes it
10826  *	from the kernel tables.
10827  *	If head not NULL, device is queued to be unregistered later.
10828  *
10829  *	Callers must hold the rtnl semaphore.  You may want
10830  *	unregister_netdev() instead of this.
10831  */
10832 
10833 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
10834 {
10835 	ASSERT_RTNL();
10836 
10837 	if (head) {
10838 		list_move_tail(&dev->unreg_list, head);
10839 	} else {
10840 		LIST_HEAD(single);
10841 
10842 		list_add(&dev->unreg_list, &single);
10843 		unregister_netdevice_many(&single);
10844 	}
10845 }
10846 EXPORT_SYMBOL(unregister_netdevice_queue);
10847 
10848 /**
10849  *	unregister_netdevice_many - unregister many devices
10850  *	@head: list of devices
10851  *
10852  *  Note: As most callers use a stack allocated list_head,
10853  *  we force a list_del() to make sure stack wont be corrupted later.
10854  */
10855 void unregister_netdevice_many(struct list_head *head)
10856 {
10857 	struct net_device *dev, *tmp;
10858 	LIST_HEAD(close_head);
10859 
10860 	BUG_ON(dev_boot_phase);
10861 	ASSERT_RTNL();
10862 
10863 	if (list_empty(head))
10864 		return;
10865 
10866 	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
10867 		/* Some devices call without registering
10868 		 * for initialization unwind. Remove those
10869 		 * devices and proceed with the remaining.
10870 		 */
10871 		if (dev->reg_state == NETREG_UNINITIALIZED) {
10872 			pr_debug("unregister_netdevice: device %s/%p never was registered\n",
10873 				 dev->name, dev);
10874 
10875 			WARN_ON(1);
10876 			list_del(&dev->unreg_list);
10877 			continue;
10878 		}
10879 		dev->dismantle = true;
10880 		BUG_ON(dev->reg_state != NETREG_REGISTERED);
10881 	}
10882 
10883 	/* If device is running, close it first. */
10884 	list_for_each_entry(dev, head, unreg_list)
10885 		list_add_tail(&dev->close_list, &close_head);
10886 	dev_close_many(&close_head, true);
10887 
10888 	list_for_each_entry(dev, head, unreg_list) {
10889 		/* And unlink it from device chain. */
10890 		unlist_netdevice(dev);
10891 
10892 		dev->reg_state = NETREG_UNREGISTERING;
10893 	}
10894 	flush_all_backlogs();
10895 
10896 	synchronize_net();
10897 
10898 	list_for_each_entry(dev, head, unreg_list) {
10899 		struct sk_buff *skb = NULL;
10900 
10901 		/* Shutdown queueing discipline. */
10902 		dev_shutdown(dev);
10903 
10904 		dev_xdp_uninstall(dev);
10905 
10906 		/* Notify protocols, that we are about to destroy
10907 		 * this device. They should clean all the things.
10908 		 */
10909 		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10910 
10911 		if (!dev->rtnl_link_ops ||
10912 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10913 			skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
10914 						     GFP_KERNEL, NULL, 0);
10915 
10916 		/*
10917 		 *	Flush the unicast and multicast chains
10918 		 */
10919 		dev_uc_flush(dev);
10920 		dev_mc_flush(dev);
10921 
10922 		netdev_name_node_alt_flush(dev);
10923 		netdev_name_node_free(dev->name_node);
10924 
10925 		if (dev->netdev_ops->ndo_uninit)
10926 			dev->netdev_ops->ndo_uninit(dev);
10927 
10928 		if (skb)
10929 			rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
10930 
10931 		/* Notifier chain MUST detach us all upper devices. */
10932 		WARN_ON(netdev_has_any_upper_dev(dev));
10933 		WARN_ON(netdev_has_any_lower_dev(dev));
10934 
10935 		/* Remove entries from kobject tree */
10936 		netdev_unregister_kobject(dev);
10937 #ifdef CONFIG_XPS
10938 		/* Remove XPS queueing entries */
10939 		netif_reset_xps_queues_gt(dev, 0);
10940 #endif
10941 	}
10942 
10943 	synchronize_net();
10944 
10945 	list_for_each_entry(dev, head, unreg_list) {
10946 		dev_put(dev);
10947 		net_set_todo(dev);
10948 	}
10949 
10950 	list_del(head);
10951 }
10952 EXPORT_SYMBOL(unregister_netdevice_many);
10953 
10954 /**
10955  *	unregister_netdev - remove device from the kernel
10956  *	@dev: device
10957  *
10958  *	This function shuts down a device interface and removes it
10959  *	from the kernel tables.
10960  *
10961  *	This is just a wrapper for unregister_netdevice that takes
10962  *	the rtnl semaphore.  In general you want to use this and not
10963  *	unregister_netdevice.
10964  */
10965 void unregister_netdev(struct net_device *dev)
10966 {
10967 	rtnl_lock();
10968 	unregister_netdevice(dev);
10969 	rtnl_unlock();
10970 }
10971 EXPORT_SYMBOL(unregister_netdev);
10972 
10973 /**
10974  *	dev_change_net_namespace - move device to different nethost namespace
10975  *	@dev: device
10976  *	@net: network namespace
10977  *	@pat: If not NULL name pattern to try if the current device name
10978  *	      is already taken in the destination network namespace.
10979  *
10980  *	This function shuts down a device interface and moves it
10981  *	to a new network namespace. On success 0 is returned, on
10982  *	a failure a netagive errno code is returned.
10983  *
10984  *	Callers must hold the rtnl semaphore.
10985  */
10986 
10987 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
10988 {
10989 	struct net *net_old = dev_net(dev);
10990 	int err, new_nsid, new_ifindex;
10991 
10992 	ASSERT_RTNL();
10993 
10994 	/* Don't allow namespace local devices to be moved. */
10995 	err = -EINVAL;
10996 	if (dev->features & NETIF_F_NETNS_LOCAL)
10997 		goto out;
10998 
10999 	/* Ensure the device has been registrered */
11000 	if (dev->reg_state != NETREG_REGISTERED)
11001 		goto out;
11002 
11003 	/* Get out if there is nothing todo */
11004 	err = 0;
11005 	if (net_eq(net_old, net))
11006 		goto out;
11007 
11008 	/* Pick the destination device name, and ensure
11009 	 * we can use it in the destination network namespace.
11010 	 */
11011 	err = -EEXIST;
11012 	if (__dev_get_by_name(net, dev->name)) {
11013 		/* We get here if we can't use the current device name */
11014 		if (!pat)
11015 			goto out;
11016 		err = dev_get_valid_name(net, dev, pat);
11017 		if (err < 0)
11018 			goto out;
11019 	}
11020 
11021 	/*
11022 	 * And now a mini version of register_netdevice unregister_netdevice.
11023 	 */
11024 
11025 	/* If device is running close it first. */
11026 	dev_close(dev);
11027 
11028 	/* And unlink it from device chain */
11029 	unlist_netdevice(dev);
11030 
11031 	synchronize_net();
11032 
11033 	/* Shutdown queueing discipline. */
11034 	dev_shutdown(dev);
11035 
11036 	/* Notify protocols, that we are about to destroy
11037 	 * this device. They should clean all the things.
11038 	 *
11039 	 * Note that dev->reg_state stays at NETREG_REGISTERED.
11040 	 * This is wanted because this way 8021q and macvlan know
11041 	 * the device is just moving and can keep their slaves up.
11042 	 */
11043 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11044 	rcu_barrier();
11045 
11046 	new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
11047 	/* If there is an ifindex conflict assign a new one */
11048 	if (__dev_get_by_index(net, dev->ifindex))
11049 		new_ifindex = dev_new_index(net);
11050 	else
11051 		new_ifindex = dev->ifindex;
11052 
11053 	rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
11054 			    new_ifindex);
11055 
11056 	/*
11057 	 *	Flush the unicast and multicast chains
11058 	 */
11059 	dev_uc_flush(dev);
11060 	dev_mc_flush(dev);
11061 
11062 	/* Send a netdev-removed uevent to the old namespace */
11063 	kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
11064 	netdev_adjacent_del_links(dev);
11065 
11066 	/* Move per-net netdevice notifiers that are following the netdevice */
11067 	move_netdevice_notifiers_dev_net(dev, net);
11068 
11069 	/* Actually switch the network namespace */
11070 	dev_net_set(dev, net);
11071 	dev->ifindex = new_ifindex;
11072 
11073 	/* Send a netdev-add uevent to the new namespace */
11074 	kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
11075 	netdev_adjacent_add_links(dev);
11076 
11077 	/* Fixup kobjects */
11078 	err = device_rename(&dev->dev, dev->name);
11079 	WARN_ON(err);
11080 
11081 	/* Adapt owner in case owning user namespace of target network
11082 	 * namespace is different from the original one.
11083 	 */
11084 	err = netdev_change_owner(dev, net_old, net);
11085 	WARN_ON(err);
11086 
11087 	/* Add the device back in the hashes */
11088 	list_netdevice(dev);
11089 
11090 	/* Notify protocols, that a new device appeared. */
11091 	call_netdevice_notifiers(NETDEV_REGISTER, dev);
11092 
11093 	/*
11094 	 *	Prevent userspace races by waiting until the network
11095 	 *	device is fully setup before sending notifications.
11096 	 */
11097 	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
11098 
11099 	synchronize_net();
11100 	err = 0;
11101 out:
11102 	return err;
11103 }
11104 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
11105 
11106 static int dev_cpu_dead(unsigned int oldcpu)
11107 {
11108 	struct sk_buff **list_skb;
11109 	struct sk_buff *skb;
11110 	unsigned int cpu;
11111 	struct softnet_data *sd, *oldsd, *remsd = NULL;
11112 
11113 	local_irq_disable();
11114 	cpu = smp_processor_id();
11115 	sd = &per_cpu(softnet_data, cpu);
11116 	oldsd = &per_cpu(softnet_data, oldcpu);
11117 
11118 	/* Find end of our completion_queue. */
11119 	list_skb = &sd->completion_queue;
11120 	while (*list_skb)
11121 		list_skb = &(*list_skb)->next;
11122 	/* Append completion queue from offline CPU. */
11123 	*list_skb = oldsd->completion_queue;
11124 	oldsd->completion_queue = NULL;
11125 
11126 	/* Append output queue from offline CPU. */
11127 	if (oldsd->output_queue) {
11128 		*sd->output_queue_tailp = oldsd->output_queue;
11129 		sd->output_queue_tailp = oldsd->output_queue_tailp;
11130 		oldsd->output_queue = NULL;
11131 		oldsd->output_queue_tailp = &oldsd->output_queue;
11132 	}
11133 	/* Append NAPI poll list from offline CPU, with one exception :
11134 	 * process_backlog() must be called by cpu owning percpu backlog.
11135 	 * We properly handle process_queue & input_pkt_queue later.
11136 	 */
11137 	while (!list_empty(&oldsd->poll_list)) {
11138 		struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
11139 							    struct napi_struct,
11140 							    poll_list);
11141 
11142 		list_del_init(&napi->poll_list);
11143 		if (napi->poll == process_backlog)
11144 			napi->state = 0;
11145 		else
11146 			____napi_schedule(sd, napi);
11147 	}
11148 
11149 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
11150 	local_irq_enable();
11151 
11152 #ifdef CONFIG_RPS
11153 	remsd = oldsd->rps_ipi_list;
11154 	oldsd->rps_ipi_list = NULL;
11155 #endif
11156 	/* send out pending IPI's on offline CPU */
11157 	net_rps_send_ipi(remsd);
11158 
11159 	/* Process offline CPU's input_pkt_queue */
11160 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
11161 		netif_rx_ni(skb);
11162 		input_queue_head_incr(oldsd);
11163 	}
11164 	while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
11165 		netif_rx_ni(skb);
11166 		input_queue_head_incr(oldsd);
11167 	}
11168 
11169 	return 0;
11170 }
11171 
11172 /**
11173  *	netdev_increment_features - increment feature set by one
11174  *	@all: current feature set
11175  *	@one: new feature set
11176  *	@mask: mask feature set
11177  *
11178  *	Computes a new feature set after adding a device with feature set
11179  *	@one to the master device with current feature set @all.  Will not
11180  *	enable anything that is off in @mask. Returns the new feature set.
11181  */
11182 netdev_features_t netdev_increment_features(netdev_features_t all,
11183 	netdev_features_t one, netdev_features_t mask)
11184 {
11185 	if (mask & NETIF_F_HW_CSUM)
11186 		mask |= NETIF_F_CSUM_MASK;
11187 	mask |= NETIF_F_VLAN_CHALLENGED;
11188 
11189 	all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
11190 	all &= one | ~NETIF_F_ALL_FOR_ALL;
11191 
11192 	/* If one device supports hw checksumming, set for all. */
11193 	if (all & NETIF_F_HW_CSUM)
11194 		all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
11195 
11196 	return all;
11197 }
11198 EXPORT_SYMBOL(netdev_increment_features);
11199 
11200 static struct hlist_head * __net_init netdev_create_hash(void)
11201 {
11202 	int i;
11203 	struct hlist_head *hash;
11204 
11205 	hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
11206 	if (hash != NULL)
11207 		for (i = 0; i < NETDEV_HASHENTRIES; i++)
11208 			INIT_HLIST_HEAD(&hash[i]);
11209 
11210 	return hash;
11211 }
11212 
11213 /* Initialize per network namespace state */
11214 static int __net_init netdev_init(struct net *net)
11215 {
11216 	BUILD_BUG_ON(GRO_HASH_BUCKETS >
11217 		     8 * sizeof_field(struct napi_struct, gro_bitmask));
11218 
11219 	if (net != &init_net)
11220 		INIT_LIST_HEAD(&net->dev_base_head);
11221 
11222 	net->dev_name_head = netdev_create_hash();
11223 	if (net->dev_name_head == NULL)
11224 		goto err_name;
11225 
11226 	net->dev_index_head = netdev_create_hash();
11227 	if (net->dev_index_head == NULL)
11228 		goto err_idx;
11229 
11230 	RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
11231 
11232 	return 0;
11233 
11234 err_idx:
11235 	kfree(net->dev_name_head);
11236 err_name:
11237 	return -ENOMEM;
11238 }
11239 
11240 /**
11241  *	netdev_drivername - network driver for the device
11242  *	@dev: network device
11243  *
11244  *	Determine network driver for device.
11245  */
11246 const char *netdev_drivername(const struct net_device *dev)
11247 {
11248 	const struct device_driver *driver;
11249 	const struct device *parent;
11250 	const char *empty = "";
11251 
11252 	parent = dev->dev.parent;
11253 	if (!parent)
11254 		return empty;
11255 
11256 	driver = parent->driver;
11257 	if (driver && driver->name)
11258 		return driver->name;
11259 	return empty;
11260 }
11261 
11262 static void __netdev_printk(const char *level, const struct net_device *dev,
11263 			    struct va_format *vaf)
11264 {
11265 	if (dev && dev->dev.parent) {
11266 		dev_printk_emit(level[1] - '0',
11267 				dev->dev.parent,
11268 				"%s %s %s%s: %pV",
11269 				dev_driver_string(dev->dev.parent),
11270 				dev_name(dev->dev.parent),
11271 				netdev_name(dev), netdev_reg_state(dev),
11272 				vaf);
11273 	} else if (dev) {
11274 		printk("%s%s%s: %pV",
11275 		       level, netdev_name(dev), netdev_reg_state(dev), vaf);
11276 	} else {
11277 		printk("%s(NULL net_device): %pV", level, vaf);
11278 	}
11279 }
11280 
11281 void netdev_printk(const char *level, const struct net_device *dev,
11282 		   const char *format, ...)
11283 {
11284 	struct va_format vaf;
11285 	va_list args;
11286 
11287 	va_start(args, format);
11288 
11289 	vaf.fmt = format;
11290 	vaf.va = &args;
11291 
11292 	__netdev_printk(level, dev, &vaf);
11293 
11294 	va_end(args);
11295 }
11296 EXPORT_SYMBOL(netdev_printk);
11297 
11298 #define define_netdev_printk_level(func, level)			\
11299 void func(const struct net_device *dev, const char *fmt, ...)	\
11300 {								\
11301 	struct va_format vaf;					\
11302 	va_list args;						\
11303 								\
11304 	va_start(args, fmt);					\
11305 								\
11306 	vaf.fmt = fmt;						\
11307 	vaf.va = &args;						\
11308 								\
11309 	__netdev_printk(level, dev, &vaf);			\
11310 								\
11311 	va_end(args);						\
11312 }								\
11313 EXPORT_SYMBOL(func);
11314 
11315 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
11316 define_netdev_printk_level(netdev_alert, KERN_ALERT);
11317 define_netdev_printk_level(netdev_crit, KERN_CRIT);
11318 define_netdev_printk_level(netdev_err, KERN_ERR);
11319 define_netdev_printk_level(netdev_warn, KERN_WARNING);
11320 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
11321 define_netdev_printk_level(netdev_info, KERN_INFO);
11322 
11323 static void __net_exit netdev_exit(struct net *net)
11324 {
11325 	kfree(net->dev_name_head);
11326 	kfree(net->dev_index_head);
11327 	if (net != &init_net)
11328 		WARN_ON_ONCE(!list_empty(&net->dev_base_head));
11329 }
11330 
11331 static struct pernet_operations __net_initdata netdev_net_ops = {
11332 	.init = netdev_init,
11333 	.exit = netdev_exit,
11334 };
11335 
11336 static void __net_exit default_device_exit(struct net *net)
11337 {
11338 	struct net_device *dev, *aux;
11339 	/*
11340 	 * Push all migratable network devices back to the
11341 	 * initial network namespace
11342 	 */
11343 	rtnl_lock();
11344 	for_each_netdev_safe(net, dev, aux) {
11345 		int err;
11346 		char fb_name[IFNAMSIZ];
11347 
11348 		/* Ignore unmoveable devices (i.e. loopback) */
11349 		if (dev->features & NETIF_F_NETNS_LOCAL)
11350 			continue;
11351 
11352 		/* Leave virtual devices for the generic cleanup */
11353 		if (dev->rtnl_link_ops)
11354 			continue;
11355 
11356 		/* Push remaining network devices to init_net */
11357 		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
11358 		if (__dev_get_by_name(&init_net, fb_name))
11359 			snprintf(fb_name, IFNAMSIZ, "dev%%d");
11360 		err = dev_change_net_namespace(dev, &init_net, fb_name);
11361 		if (err) {
11362 			pr_emerg("%s: failed to move %s to init_net: %d\n",
11363 				 __func__, dev->name, err);
11364 			BUG();
11365 		}
11366 	}
11367 	rtnl_unlock();
11368 }
11369 
11370 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
11371 {
11372 	/* Return with the rtnl_lock held when there are no network
11373 	 * devices unregistering in any network namespace in net_list.
11374 	 */
11375 	struct net *net;
11376 	bool unregistering;
11377 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
11378 
11379 	add_wait_queue(&netdev_unregistering_wq, &wait);
11380 	for (;;) {
11381 		unregistering = false;
11382 		rtnl_lock();
11383 		list_for_each_entry(net, net_list, exit_list) {
11384 			if (net->dev_unreg_count > 0) {
11385 				unregistering = true;
11386 				break;
11387 			}
11388 		}
11389 		if (!unregistering)
11390 			break;
11391 		__rtnl_unlock();
11392 
11393 		wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
11394 	}
11395 	remove_wait_queue(&netdev_unregistering_wq, &wait);
11396 }
11397 
11398 static void __net_exit default_device_exit_batch(struct list_head *net_list)
11399 {
11400 	/* At exit all network devices most be removed from a network
11401 	 * namespace.  Do this in the reverse order of registration.
11402 	 * Do this across as many network namespaces as possible to
11403 	 * improve batching efficiency.
11404 	 */
11405 	struct net_device *dev;
11406 	struct net *net;
11407 	LIST_HEAD(dev_kill_list);
11408 
11409 	/* To prevent network device cleanup code from dereferencing
11410 	 * loopback devices or network devices that have been freed
11411 	 * wait here for all pending unregistrations to complete,
11412 	 * before unregistring the loopback device and allowing the
11413 	 * network namespace be freed.
11414 	 *
11415 	 * The netdev todo list containing all network devices
11416 	 * unregistrations that happen in default_device_exit_batch
11417 	 * will run in the rtnl_unlock() at the end of
11418 	 * default_device_exit_batch.
11419 	 */
11420 	rtnl_lock_unregistering(net_list);
11421 	list_for_each_entry(net, net_list, exit_list) {
11422 		for_each_netdev_reverse(net, dev) {
11423 			if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
11424 				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
11425 			else
11426 				unregister_netdevice_queue(dev, &dev_kill_list);
11427 		}
11428 	}
11429 	unregister_netdevice_many(&dev_kill_list);
11430 	rtnl_unlock();
11431 }
11432 
11433 static struct pernet_operations __net_initdata default_device_ops = {
11434 	.exit = default_device_exit,
11435 	.exit_batch = default_device_exit_batch,
11436 };
11437 
11438 /*
11439  *	Initialize the DEV module. At boot time this walks the device list and
11440  *	unhooks any devices that fail to initialise (normally hardware not
11441  *	present) and leaves us with a valid list of present and active devices.
11442  *
11443  */
11444 
11445 /*
11446  *       This is called single threaded during boot, so no need
11447  *       to take the rtnl semaphore.
11448  */
11449 static int __init net_dev_init(void)
11450 {
11451 	int i, rc = -ENOMEM;
11452 
11453 	BUG_ON(!dev_boot_phase);
11454 
11455 	if (dev_proc_init())
11456 		goto out;
11457 
11458 	if (netdev_kobject_init())
11459 		goto out;
11460 
11461 	INIT_LIST_HEAD(&ptype_all);
11462 	for (i = 0; i < PTYPE_HASH_SIZE; i++)
11463 		INIT_LIST_HEAD(&ptype_base[i]);
11464 
11465 	INIT_LIST_HEAD(&offload_base);
11466 
11467 	if (register_pernet_subsys(&netdev_net_ops))
11468 		goto out;
11469 
11470 	/*
11471 	 *	Initialise the packet receive queues.
11472 	 */
11473 
11474 	for_each_possible_cpu(i) {
11475 		struct work_struct *flush = per_cpu_ptr(&flush_works, i);
11476 		struct softnet_data *sd = &per_cpu(softnet_data, i);
11477 
11478 		INIT_WORK(flush, flush_backlog);
11479 
11480 		skb_queue_head_init(&sd->input_pkt_queue);
11481 		skb_queue_head_init(&sd->process_queue);
11482 #ifdef CONFIG_XFRM_OFFLOAD
11483 		skb_queue_head_init(&sd->xfrm_backlog);
11484 #endif
11485 		INIT_LIST_HEAD(&sd->poll_list);
11486 		sd->output_queue_tailp = &sd->output_queue;
11487 #ifdef CONFIG_RPS
11488 		INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
11489 		sd->cpu = i;
11490 #endif
11491 
11492 		init_gro_hash(&sd->backlog);
11493 		sd->backlog.poll = process_backlog;
11494 		sd->backlog.weight = weight_p;
11495 	}
11496 
11497 	dev_boot_phase = 0;
11498 
11499 	/* The loopback device is special if any other network devices
11500 	 * is present in a network namespace the loopback device must
11501 	 * be present. Since we now dynamically allocate and free the
11502 	 * loopback device ensure this invariant is maintained by
11503 	 * keeping the loopback device as the first device on the
11504 	 * list of network devices.  Ensuring the loopback devices
11505 	 * is the first device that appears and the last network device
11506 	 * that disappears.
11507 	 */
11508 	if (register_pernet_device(&loopback_net_ops))
11509 		goto out;
11510 
11511 	if (register_pernet_device(&default_device_ops))
11512 		goto out;
11513 
11514 	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
11515 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
11516 
11517 	rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
11518 				       NULL, dev_cpu_dead);
11519 	WARN_ON(rc < 0);
11520 	rc = 0;
11521 out:
11522 	return rc;
11523 }
11524 
11525 subsys_initcall(net_dev_init);
11526