xref: /linux-6.15/net/core/dev.c (revision 36ed81bc)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      NET3    Protocol independent device support routines.
4  *
5  *	Derived from the non IP parts of dev.c 1.0.19
6  *              Authors:	Ross Biro
7  *				Fred N. van Kempen, <[email protected]>
8  *				Mark Evans, <[email protected]>
9  *
10  *	Additional Authors:
11  *		Florian la Roche <[email protected]>
12  *		Alan Cox <[email protected]>
13  *		David Hinds <[email protected]>
14  *		Alexey Kuznetsov <[email protected]>
15  *		Adam Sulmicki <[email protected]>
16  *              Pekka Riikonen <[email protected]>
17  *
18  *	Changes:
19  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
20  *                                      to 2 if register_netdev gets called
21  *                                      before net_dev_init & also removed a
22  *                                      few lines of code in the process.
23  *		Alan Cox	:	device private ioctl copies fields back.
24  *		Alan Cox	:	Transmit queue code does relevant
25  *					stunts to keep the queue safe.
26  *		Alan Cox	:	Fixed double lock.
27  *		Alan Cox	:	Fixed promisc NULL pointer trap
28  *		????????	:	Support the full private ioctl range
29  *		Alan Cox	:	Moved ioctl permission check into
30  *					drivers
31  *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
32  *		Alan Cox	:	100 backlog just doesn't cut it when
33  *					you start doing multicast video 8)
34  *		Alan Cox	:	Rewrote net_bh and list manager.
35  *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
36  *		Alan Cox	:	Took out transmit every packet pass
37  *					Saved a few bytes in the ioctl handler
38  *		Alan Cox	:	Network driver sets packet type before
39  *					calling netif_rx. Saves a function
40  *					call a packet.
41  *		Alan Cox	:	Hashed net_bh()
42  *		Richard Kooijman:	Timestamp fixes.
43  *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
44  *		Alan Cox	:	Device lock protection.
45  *              Alan Cox        :       Fixed nasty side effect of device close
46  *					changes.
47  *		Rudi Cilibrasi	:	Pass the right thing to
48  *					set_mac_address()
49  *		Dave Miller	:	32bit quantity for the device lock to
50  *					make it work out on a Sparc.
51  *		Bjorn Ekwall	:	Added KERNELD hack.
52  *		Alan Cox	:	Cleaned up the backlog initialise.
53  *		Craig Metz	:	SIOCGIFCONF fix if space for under
54  *					1 device.
55  *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
56  *					is no device open function.
57  *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
58  *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
59  *		Cyrus Durgin	:	Cleaned for KMOD
60  *		Adam Sulmicki   :	Bug Fix : Network Device Unload
61  *					A network device unload needs to purge
62  *					the backlog queue.
63  *	Paul Rusty Russell	:	SIOCSIFNAME
64  *              Pekka Riikonen  :	Netdev boot-time settings code
65  *              Andrew Morton   :       Make unregister_netdevice wait
66  *                                      indefinitely on dev->refcnt
67  *              J Hadi Salim    :       - Backlog queue sampling
68  *				        - netif_rx() feedback
69  */
70 
71 #include <linux/uaccess.h>
72 #include <linux/bitmap.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/isolation.h>
81 #include <linux/sched/mm.h>
82 #include <linux/smpboot.h>
83 #include <linux/mutex.h>
84 #include <linux/rwsem.h>
85 #include <linux/string.h>
86 #include <linux/mm.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/ethtool_netlink.h>
96 #include <linux/skbuff.h>
97 #include <linux/kthread.h>
98 #include <linux/bpf.h>
99 #include <linux/bpf_trace.h>
100 #include <net/net_namespace.h>
101 #include <net/sock.h>
102 #include <net/busy_poll.h>
103 #include <linux/rtnetlink.h>
104 #include <linux/stat.h>
105 #include <net/dsa.h>
106 #include <net/dst.h>
107 #include <net/dst_metadata.h>
108 #include <net/gro.h>
109 #include <net/netdev_queues.h>
110 #include <net/pkt_sched.h>
111 #include <net/pkt_cls.h>
112 #include <net/checksum.h>
113 #include <net/xfrm.h>
114 #include <net/tcx.h>
115 #include <linux/highmem.h>
116 #include <linux/init.h>
117 #include <linux/module.h>
118 #include <linux/netpoll.h>
119 #include <linux/rcupdate.h>
120 #include <linux/delay.h>
121 #include <net/iw_handler.h>
122 #include <asm/current.h>
123 #include <linux/audit.h>
124 #include <linux/dmaengine.h>
125 #include <linux/err.h>
126 #include <linux/ctype.h>
127 #include <linux/if_arp.h>
128 #include <linux/if_vlan.h>
129 #include <linux/ip.h>
130 #include <net/ip.h>
131 #include <net/mpls.h>
132 #include <linux/ipv6.h>
133 #include <linux/in.h>
134 #include <linux/jhash.h>
135 #include <linux/random.h>
136 #include <trace/events/napi.h>
137 #include <trace/events/net.h>
138 #include <trace/events/skb.h>
139 #include <trace/events/qdisc.h>
140 #include <trace/events/xdp.h>
141 #include <linux/inetdevice.h>
142 #include <linux/cpu_rmap.h>
143 #include <linux/static_key.h>
144 #include <linux/hashtable.h>
145 #include <linux/vmalloc.h>
146 #include <linux/if_macvlan.h>
147 #include <linux/errqueue.h>
148 #include <linux/hrtimer.h>
149 #include <linux/netfilter_netdev.h>
150 #include <linux/crash_dump.h>
151 #include <linux/sctp.h>
152 #include <net/udp_tunnel.h>
153 #include <linux/net_namespace.h>
154 #include <linux/indirect_call_wrapper.h>
155 #include <net/devlink.h>
156 #include <linux/pm_runtime.h>
157 #include <linux/prandom.h>
158 #include <linux/once_lite.h>
159 #include <net/netdev_lock.h>
160 #include <net/netdev_rx_queue.h>
161 #include <net/page_pool/types.h>
162 #include <net/page_pool/helpers.h>
163 #include <net/page_pool/memory_provider.h>
164 #include <net/rps.h>
165 #include <linux/phy_link_topology.h>
166 
167 #include "dev.h"
168 #include "devmem.h"
169 #include "net-sysfs.h"
170 
171 static DEFINE_SPINLOCK(ptype_lock);
172 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
173 
174 static int netif_rx_internal(struct sk_buff *skb);
175 static int call_netdevice_notifiers_extack(unsigned long val,
176 					   struct net_device *dev,
177 					   struct netlink_ext_ack *extack);
178 
179 static DEFINE_MUTEX(ifalias_mutex);
180 
181 /* protects napi_hash addition/deletion and napi_gen_id */
182 static DEFINE_SPINLOCK(napi_hash_lock);
183 
184 static unsigned int napi_gen_id = NR_CPUS;
185 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
186 
187 static inline void dev_base_seq_inc(struct net *net)
188 {
189 	unsigned int val = net->dev_base_seq + 1;
190 
191 	WRITE_ONCE(net->dev_base_seq, val ?: 1);
192 }
193 
194 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
195 {
196 	unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
197 
198 	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
199 }
200 
201 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
202 {
203 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
204 }
205 
206 #ifndef CONFIG_PREEMPT_RT
207 
208 static DEFINE_STATIC_KEY_FALSE(use_backlog_threads_key);
209 
210 static int __init setup_backlog_napi_threads(char *arg)
211 {
212 	static_branch_enable(&use_backlog_threads_key);
213 	return 0;
214 }
215 early_param("thread_backlog_napi", setup_backlog_napi_threads);
216 
217 static bool use_backlog_threads(void)
218 {
219 	return static_branch_unlikely(&use_backlog_threads_key);
220 }
221 
222 #else
223 
224 static bool use_backlog_threads(void)
225 {
226 	return true;
227 }
228 
229 #endif
230 
231 static inline void backlog_lock_irq_save(struct softnet_data *sd,
232 					 unsigned long *flags)
233 {
234 	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
235 		spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
236 	else
237 		local_irq_save(*flags);
238 }
239 
240 static inline void backlog_lock_irq_disable(struct softnet_data *sd)
241 {
242 	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
243 		spin_lock_irq(&sd->input_pkt_queue.lock);
244 	else
245 		local_irq_disable();
246 }
247 
248 static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
249 					      unsigned long *flags)
250 {
251 	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
252 		spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
253 	else
254 		local_irq_restore(*flags);
255 }
256 
257 static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
258 {
259 	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
260 		spin_unlock_irq(&sd->input_pkt_queue.lock);
261 	else
262 		local_irq_enable();
263 }
264 
265 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
266 						       const char *name)
267 {
268 	struct netdev_name_node *name_node;
269 
270 	name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
271 	if (!name_node)
272 		return NULL;
273 	INIT_HLIST_NODE(&name_node->hlist);
274 	name_node->dev = dev;
275 	name_node->name = name;
276 	return name_node;
277 }
278 
279 static struct netdev_name_node *
280 netdev_name_node_head_alloc(struct net_device *dev)
281 {
282 	struct netdev_name_node *name_node;
283 
284 	name_node = netdev_name_node_alloc(dev, dev->name);
285 	if (!name_node)
286 		return NULL;
287 	INIT_LIST_HEAD(&name_node->list);
288 	return name_node;
289 }
290 
291 static void netdev_name_node_free(struct netdev_name_node *name_node)
292 {
293 	kfree(name_node);
294 }
295 
296 static void netdev_name_node_add(struct net *net,
297 				 struct netdev_name_node *name_node)
298 {
299 	hlist_add_head_rcu(&name_node->hlist,
300 			   dev_name_hash(net, name_node->name));
301 }
302 
303 static void netdev_name_node_del(struct netdev_name_node *name_node)
304 {
305 	hlist_del_rcu(&name_node->hlist);
306 }
307 
308 static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
309 							const char *name)
310 {
311 	struct hlist_head *head = dev_name_hash(net, name);
312 	struct netdev_name_node *name_node;
313 
314 	hlist_for_each_entry(name_node, head, hlist)
315 		if (!strcmp(name_node->name, name))
316 			return name_node;
317 	return NULL;
318 }
319 
320 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
321 							    const char *name)
322 {
323 	struct hlist_head *head = dev_name_hash(net, name);
324 	struct netdev_name_node *name_node;
325 
326 	hlist_for_each_entry_rcu(name_node, head, hlist)
327 		if (!strcmp(name_node->name, name))
328 			return name_node;
329 	return NULL;
330 }
331 
332 bool netdev_name_in_use(struct net *net, const char *name)
333 {
334 	return netdev_name_node_lookup(net, name);
335 }
336 EXPORT_SYMBOL(netdev_name_in_use);
337 
338 int netdev_name_node_alt_create(struct net_device *dev, const char *name)
339 {
340 	struct netdev_name_node *name_node;
341 	struct net *net = dev_net(dev);
342 
343 	name_node = netdev_name_node_lookup(net, name);
344 	if (name_node)
345 		return -EEXIST;
346 	name_node = netdev_name_node_alloc(dev, name);
347 	if (!name_node)
348 		return -ENOMEM;
349 	netdev_name_node_add(net, name_node);
350 	/* The node that holds dev->name acts as a head of per-device list. */
351 	list_add_tail_rcu(&name_node->list, &dev->name_node->list);
352 
353 	return 0;
354 }
355 
356 static void netdev_name_node_alt_free(struct rcu_head *head)
357 {
358 	struct netdev_name_node *name_node =
359 		container_of(head, struct netdev_name_node, rcu);
360 
361 	kfree(name_node->name);
362 	netdev_name_node_free(name_node);
363 }
364 
365 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
366 {
367 	netdev_name_node_del(name_node);
368 	list_del(&name_node->list);
369 	call_rcu(&name_node->rcu, netdev_name_node_alt_free);
370 }
371 
372 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
373 {
374 	struct netdev_name_node *name_node;
375 	struct net *net = dev_net(dev);
376 
377 	name_node = netdev_name_node_lookup(net, name);
378 	if (!name_node)
379 		return -ENOENT;
380 	/* lookup might have found our primary name or a name belonging
381 	 * to another device.
382 	 */
383 	if (name_node == dev->name_node || name_node->dev != dev)
384 		return -EINVAL;
385 
386 	__netdev_name_node_alt_destroy(name_node);
387 	return 0;
388 }
389 
390 static void netdev_name_node_alt_flush(struct net_device *dev)
391 {
392 	struct netdev_name_node *name_node, *tmp;
393 
394 	list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) {
395 		list_del(&name_node->list);
396 		netdev_name_node_alt_free(&name_node->rcu);
397 	}
398 }
399 
400 /* Device list insertion */
401 static void list_netdevice(struct net_device *dev)
402 {
403 	struct netdev_name_node *name_node;
404 	struct net *net = dev_net(dev);
405 
406 	ASSERT_RTNL();
407 
408 	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
409 	netdev_name_node_add(net, dev->name_node);
410 	hlist_add_head_rcu(&dev->index_hlist,
411 			   dev_index_hash(net, dev->ifindex));
412 
413 	netdev_for_each_altname(dev, name_node)
414 		netdev_name_node_add(net, name_node);
415 
416 	/* We reserved the ifindex, this can't fail */
417 	WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL));
418 
419 	dev_base_seq_inc(net);
420 }
421 
422 /* Device list removal
423  * caller must respect a RCU grace period before freeing/reusing dev
424  */
425 static void unlist_netdevice(struct net_device *dev)
426 {
427 	struct netdev_name_node *name_node;
428 	struct net *net = dev_net(dev);
429 
430 	ASSERT_RTNL();
431 
432 	xa_erase(&net->dev_by_index, dev->ifindex);
433 
434 	netdev_for_each_altname(dev, name_node)
435 		netdev_name_node_del(name_node);
436 
437 	/* Unlink dev from the device chain */
438 	list_del_rcu(&dev->dev_list);
439 	netdev_name_node_del(dev->name_node);
440 	hlist_del_rcu(&dev->index_hlist);
441 
442 	dev_base_seq_inc(dev_net(dev));
443 }
444 
445 /*
446  *	Our notifier list
447  */
448 
449 static RAW_NOTIFIER_HEAD(netdev_chain);
450 
451 /*
452  *	Device drivers call our routines to queue packets here. We empty the
453  *	queue in the local softnet handler.
454  */
455 
456 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data) = {
457 	.process_queue_bh_lock = INIT_LOCAL_LOCK(process_queue_bh_lock),
458 };
459 EXPORT_PER_CPU_SYMBOL(softnet_data);
460 
461 /* Page_pool has a lockless array/stack to alloc/recycle pages.
462  * PP consumers must pay attention to run APIs in the appropriate context
463  * (e.g. NAPI context).
464  */
465 DEFINE_PER_CPU(struct page_pool *, system_page_pool);
466 
467 #ifdef CONFIG_LOCKDEP
468 /*
469  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
470  * according to dev->type
471  */
472 static const unsigned short netdev_lock_type[] = {
473 	 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
474 	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
475 	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
476 	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
477 	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
478 	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
479 	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
480 	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
481 	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
482 	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
483 	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
484 	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
485 	 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
486 	 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
487 	 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
488 
489 static const char *const netdev_lock_name[] = {
490 	"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
491 	"_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
492 	"_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
493 	"_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
494 	"_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
495 	"_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
496 	"_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
497 	"_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
498 	"_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
499 	"_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
500 	"_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
501 	"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
502 	"_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
503 	"_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
504 	"_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
505 
506 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
507 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
508 
509 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
510 {
511 	int i;
512 
513 	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
514 		if (netdev_lock_type[i] == dev_type)
515 			return i;
516 	/* the last key is used by default */
517 	return ARRAY_SIZE(netdev_lock_type) - 1;
518 }
519 
520 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
521 						 unsigned short dev_type)
522 {
523 	int i;
524 
525 	i = netdev_lock_pos(dev_type);
526 	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
527 				   netdev_lock_name[i]);
528 }
529 
530 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
531 {
532 	int i;
533 
534 	i = netdev_lock_pos(dev->type);
535 	lockdep_set_class_and_name(&dev->addr_list_lock,
536 				   &netdev_addr_lock_key[i],
537 				   netdev_lock_name[i]);
538 }
539 #else
540 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
541 						 unsigned short dev_type)
542 {
543 }
544 
545 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
546 {
547 }
548 #endif
549 
550 /*******************************************************************************
551  *
552  *		Protocol management and registration routines
553  *
554  *******************************************************************************/
555 
556 
557 /*
558  *	Add a protocol ID to the list. Now that the input handler is
559  *	smarter we can dispense with all the messy stuff that used to be
560  *	here.
561  *
562  *	BEWARE!!! Protocol handlers, mangling input packets,
563  *	MUST BE last in hash buckets and checking protocol handlers
564  *	MUST start from promiscuous ptype_all chain in net_bh.
565  *	It is true now, do not change it.
566  *	Explanation follows: if protocol handler, mangling packet, will
567  *	be the first on list, it is not able to sense, that packet
568  *	is cloned and should be copied-on-write, so that it will
569  *	change it and subsequent readers will get broken packet.
570  *							--ANK (980803)
571  */
572 
573 static inline struct list_head *ptype_head(const struct packet_type *pt)
574 {
575 	if (pt->type == htons(ETH_P_ALL)) {
576 		if (!pt->af_packet_net && !pt->dev)
577 			return NULL;
578 
579 		return pt->dev ? &pt->dev->ptype_all :
580 				 &pt->af_packet_net->ptype_all;
581 	}
582 
583 	if (pt->dev)
584 		return &pt->dev->ptype_specific;
585 
586 	return pt->af_packet_net ? &pt->af_packet_net->ptype_specific :
587 				 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
588 }
589 
590 /**
591  *	dev_add_pack - add packet handler
592  *	@pt: packet type declaration
593  *
594  *	Add a protocol handler to the networking stack. The passed &packet_type
595  *	is linked into kernel lists and may not be freed until it has been
596  *	removed from the kernel lists.
597  *
598  *	This call does not sleep therefore it can not
599  *	guarantee all CPU's that are in middle of receiving packets
600  *	will see the new packet type (until the next received packet).
601  */
602 
603 void dev_add_pack(struct packet_type *pt)
604 {
605 	struct list_head *head = ptype_head(pt);
606 
607 	if (WARN_ON_ONCE(!head))
608 		return;
609 
610 	spin_lock(&ptype_lock);
611 	list_add_rcu(&pt->list, head);
612 	spin_unlock(&ptype_lock);
613 }
614 EXPORT_SYMBOL(dev_add_pack);
615 
616 /**
617  *	__dev_remove_pack	 - remove packet handler
618  *	@pt: packet type declaration
619  *
620  *	Remove a protocol handler that was previously added to the kernel
621  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
622  *	from the kernel lists and can be freed or reused once this function
623  *	returns.
624  *
625  *      The packet type might still be in use by receivers
626  *	and must not be freed until after all the CPU's have gone
627  *	through a quiescent state.
628  */
629 void __dev_remove_pack(struct packet_type *pt)
630 {
631 	struct list_head *head = ptype_head(pt);
632 	struct packet_type *pt1;
633 
634 	if (!head)
635 		return;
636 
637 	spin_lock(&ptype_lock);
638 
639 	list_for_each_entry(pt1, head, list) {
640 		if (pt == pt1) {
641 			list_del_rcu(&pt->list);
642 			goto out;
643 		}
644 	}
645 
646 	pr_warn("dev_remove_pack: %p not found\n", pt);
647 out:
648 	spin_unlock(&ptype_lock);
649 }
650 EXPORT_SYMBOL(__dev_remove_pack);
651 
652 /**
653  *	dev_remove_pack	 - remove packet handler
654  *	@pt: packet type declaration
655  *
656  *	Remove a protocol handler that was previously added to the kernel
657  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
658  *	from the kernel lists and can be freed or reused once this function
659  *	returns.
660  *
661  *	This call sleeps to guarantee that no CPU is looking at the packet
662  *	type after return.
663  */
664 void dev_remove_pack(struct packet_type *pt)
665 {
666 	__dev_remove_pack(pt);
667 
668 	synchronize_net();
669 }
670 EXPORT_SYMBOL(dev_remove_pack);
671 
672 
673 /*******************************************************************************
674  *
675  *			    Device Interface Subroutines
676  *
677  *******************************************************************************/
678 
679 /**
680  *	dev_get_iflink	- get 'iflink' value of a interface
681  *	@dev: targeted interface
682  *
683  *	Indicates the ifindex the interface is linked to.
684  *	Physical interfaces have the same 'ifindex' and 'iflink' values.
685  */
686 
687 int dev_get_iflink(const struct net_device *dev)
688 {
689 	if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
690 		return dev->netdev_ops->ndo_get_iflink(dev);
691 
692 	return READ_ONCE(dev->ifindex);
693 }
694 EXPORT_SYMBOL(dev_get_iflink);
695 
696 /**
697  *	dev_fill_metadata_dst - Retrieve tunnel egress information.
698  *	@dev: targeted interface
699  *	@skb: The packet.
700  *
701  *	For better visibility of tunnel traffic OVS needs to retrieve
702  *	egress tunnel information for a packet. Following API allows
703  *	user to get this info.
704  */
705 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
706 {
707 	struct ip_tunnel_info *info;
708 
709 	if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
710 		return -EINVAL;
711 
712 	info = skb_tunnel_info_unclone(skb);
713 	if (!info)
714 		return -ENOMEM;
715 	if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
716 		return -EINVAL;
717 
718 	return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
719 }
720 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
721 
722 static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
723 {
724 	int k = stack->num_paths++;
725 
726 	if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX))
727 		return NULL;
728 
729 	return &stack->path[k];
730 }
731 
732 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
733 			  struct net_device_path_stack *stack)
734 {
735 	const struct net_device *last_dev;
736 	struct net_device_path_ctx ctx = {
737 		.dev	= dev,
738 	};
739 	struct net_device_path *path;
740 	int ret = 0;
741 
742 	memcpy(ctx.daddr, daddr, sizeof(ctx.daddr));
743 	stack->num_paths = 0;
744 	while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
745 		last_dev = ctx.dev;
746 		path = dev_fwd_path(stack);
747 		if (!path)
748 			return -1;
749 
750 		memset(path, 0, sizeof(struct net_device_path));
751 		ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path);
752 		if (ret < 0)
753 			return -1;
754 
755 		if (WARN_ON_ONCE(last_dev == ctx.dev))
756 			return -1;
757 	}
758 
759 	if (!ctx.dev)
760 		return ret;
761 
762 	path = dev_fwd_path(stack);
763 	if (!path)
764 		return -1;
765 	path->type = DEV_PATH_ETHERNET;
766 	path->dev = ctx.dev;
767 
768 	return ret;
769 }
770 EXPORT_SYMBOL_GPL(dev_fill_forward_path);
771 
772 /* must be called under rcu_read_lock(), as we dont take a reference */
773 static struct napi_struct *napi_by_id(unsigned int napi_id)
774 {
775 	unsigned int hash = napi_id % HASH_SIZE(napi_hash);
776 	struct napi_struct *napi;
777 
778 	hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
779 		if (napi->napi_id == napi_id)
780 			return napi;
781 
782 	return NULL;
783 }
784 
785 /* must be called under rcu_read_lock(), as we dont take a reference */
786 static struct napi_struct *
787 netdev_napi_by_id(struct net *net, unsigned int napi_id)
788 {
789 	struct napi_struct *napi;
790 
791 	napi = napi_by_id(napi_id);
792 	if (!napi)
793 		return NULL;
794 
795 	if (WARN_ON_ONCE(!napi->dev))
796 		return NULL;
797 	if (!net_eq(net, dev_net(napi->dev)))
798 		return NULL;
799 
800 	return napi;
801 }
802 
803 /**
804  *	netdev_napi_by_id_lock() - find a device by NAPI ID and lock it
805  *	@net: the applicable net namespace
806  *	@napi_id: ID of a NAPI of a target device
807  *
808  *	Find a NAPI instance with @napi_id. Lock its device.
809  *	The device must be in %NETREG_REGISTERED state for lookup to succeed.
810  *	netdev_unlock() must be called to release it.
811  *
812  *	Return: pointer to NAPI, its device with lock held, NULL if not found.
813  */
814 struct napi_struct *
815 netdev_napi_by_id_lock(struct net *net, unsigned int napi_id)
816 {
817 	struct napi_struct *napi;
818 	struct net_device *dev;
819 
820 	rcu_read_lock();
821 	napi = netdev_napi_by_id(net, napi_id);
822 	if (!napi || READ_ONCE(napi->dev->reg_state) != NETREG_REGISTERED) {
823 		rcu_read_unlock();
824 		return NULL;
825 	}
826 
827 	dev = napi->dev;
828 	dev_hold(dev);
829 	rcu_read_unlock();
830 
831 	dev = __netdev_put_lock(dev);
832 	if (!dev)
833 		return NULL;
834 
835 	rcu_read_lock();
836 	napi = netdev_napi_by_id(net, napi_id);
837 	if (napi && napi->dev != dev)
838 		napi = NULL;
839 	rcu_read_unlock();
840 
841 	if (!napi)
842 		netdev_unlock(dev);
843 	return napi;
844 }
845 
846 /**
847  *	__dev_get_by_name	- find a device by its name
848  *	@net: the applicable net namespace
849  *	@name: name to find
850  *
851  *	Find an interface by name. Must be called under RTNL semaphore.
852  *	If the name is found a pointer to the device is returned.
853  *	If the name is not found then %NULL is returned. The
854  *	reference counters are not incremented so the caller must be
855  *	careful with locks.
856  */
857 
858 struct net_device *__dev_get_by_name(struct net *net, const char *name)
859 {
860 	struct netdev_name_node *node_name;
861 
862 	node_name = netdev_name_node_lookup(net, name);
863 	return node_name ? node_name->dev : NULL;
864 }
865 EXPORT_SYMBOL(__dev_get_by_name);
866 
867 /**
868  * dev_get_by_name_rcu	- find a device by its name
869  * @net: the applicable net namespace
870  * @name: name to find
871  *
872  * Find an interface by name.
873  * If the name is found a pointer to the device is returned.
874  * If the name is not found then %NULL is returned.
875  * The reference counters are not incremented so the caller must be
876  * careful with locks. The caller must hold RCU lock.
877  */
878 
879 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
880 {
881 	struct netdev_name_node *node_name;
882 
883 	node_name = netdev_name_node_lookup_rcu(net, name);
884 	return node_name ? node_name->dev : NULL;
885 }
886 EXPORT_SYMBOL(dev_get_by_name_rcu);
887 
888 /* Deprecated for new users, call netdev_get_by_name() instead */
889 struct net_device *dev_get_by_name(struct net *net, const char *name)
890 {
891 	struct net_device *dev;
892 
893 	rcu_read_lock();
894 	dev = dev_get_by_name_rcu(net, name);
895 	dev_hold(dev);
896 	rcu_read_unlock();
897 	return dev;
898 }
899 EXPORT_SYMBOL(dev_get_by_name);
900 
901 /**
902  *	netdev_get_by_name() - find a device by its name
903  *	@net: the applicable net namespace
904  *	@name: name to find
905  *	@tracker: tracking object for the acquired reference
906  *	@gfp: allocation flags for the tracker
907  *
908  *	Find an interface by name. This can be called from any
909  *	context and does its own locking. The returned handle has
910  *	the usage count incremented and the caller must use netdev_put() to
911  *	release it when it is no longer needed. %NULL is returned if no
912  *	matching device is found.
913  */
914 struct net_device *netdev_get_by_name(struct net *net, const char *name,
915 				      netdevice_tracker *tracker, gfp_t gfp)
916 {
917 	struct net_device *dev;
918 
919 	dev = dev_get_by_name(net, name);
920 	if (dev)
921 		netdev_tracker_alloc(dev, tracker, gfp);
922 	return dev;
923 }
924 EXPORT_SYMBOL(netdev_get_by_name);
925 
926 /**
927  *	__dev_get_by_index - find a device by its ifindex
928  *	@net: the applicable net namespace
929  *	@ifindex: index of device
930  *
931  *	Search for an interface by index. Returns %NULL if the device
932  *	is not found or a pointer to the device. The device has not
933  *	had its reference counter increased so the caller must be careful
934  *	about locking. The caller must hold the RTNL semaphore.
935  */
936 
937 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
938 {
939 	struct net_device *dev;
940 	struct hlist_head *head = dev_index_hash(net, ifindex);
941 
942 	hlist_for_each_entry(dev, head, index_hlist)
943 		if (dev->ifindex == ifindex)
944 			return dev;
945 
946 	return NULL;
947 }
948 EXPORT_SYMBOL(__dev_get_by_index);
949 
950 /**
951  *	dev_get_by_index_rcu - find a device by its ifindex
952  *	@net: the applicable net namespace
953  *	@ifindex: index of device
954  *
955  *	Search for an interface by index. Returns %NULL if the device
956  *	is not found or a pointer to the device. The device has not
957  *	had its reference counter increased so the caller must be careful
958  *	about locking. The caller must hold RCU lock.
959  */
960 
961 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
962 {
963 	struct net_device *dev;
964 	struct hlist_head *head = dev_index_hash(net, ifindex);
965 
966 	hlist_for_each_entry_rcu(dev, head, index_hlist)
967 		if (dev->ifindex == ifindex)
968 			return dev;
969 
970 	return NULL;
971 }
972 EXPORT_SYMBOL(dev_get_by_index_rcu);
973 
974 /* Deprecated for new users, call netdev_get_by_index() instead */
975 struct net_device *dev_get_by_index(struct net *net, int ifindex)
976 {
977 	struct net_device *dev;
978 
979 	rcu_read_lock();
980 	dev = dev_get_by_index_rcu(net, ifindex);
981 	dev_hold(dev);
982 	rcu_read_unlock();
983 	return dev;
984 }
985 EXPORT_SYMBOL(dev_get_by_index);
986 
987 /**
988  *	netdev_get_by_index() - find a device by its ifindex
989  *	@net: the applicable net namespace
990  *	@ifindex: index of device
991  *	@tracker: tracking object for the acquired reference
992  *	@gfp: allocation flags for the tracker
993  *
994  *	Search for an interface by index. Returns NULL if the device
995  *	is not found or a pointer to the device. The device returned has
996  *	had a reference added and the pointer is safe until the user calls
997  *	netdev_put() to indicate they have finished with it.
998  */
999 struct net_device *netdev_get_by_index(struct net *net, int ifindex,
1000 				       netdevice_tracker *tracker, gfp_t gfp)
1001 {
1002 	struct net_device *dev;
1003 
1004 	dev = dev_get_by_index(net, ifindex);
1005 	if (dev)
1006 		netdev_tracker_alloc(dev, tracker, gfp);
1007 	return dev;
1008 }
1009 EXPORT_SYMBOL(netdev_get_by_index);
1010 
1011 /**
1012  *	dev_get_by_napi_id - find a device by napi_id
1013  *	@napi_id: ID of the NAPI struct
1014  *
1015  *	Search for an interface by NAPI ID. Returns %NULL if the device
1016  *	is not found or a pointer to the device. The device has not had
1017  *	its reference counter increased so the caller must be careful
1018  *	about locking. The caller must hold RCU lock.
1019  */
1020 struct net_device *dev_get_by_napi_id(unsigned int napi_id)
1021 {
1022 	struct napi_struct *napi;
1023 
1024 	WARN_ON_ONCE(!rcu_read_lock_held());
1025 
1026 	if (!napi_id_valid(napi_id))
1027 		return NULL;
1028 
1029 	napi = napi_by_id(napi_id);
1030 
1031 	return napi ? napi->dev : NULL;
1032 }
1033 
1034 /* Release the held reference on the net_device, and if the net_device
1035  * is still registered try to lock the instance lock. If device is being
1036  * unregistered NULL will be returned (but the reference has been released,
1037  * either way!)
1038  *
1039  * This helper is intended for locking net_device after it has been looked up
1040  * using a lockless lookup helper. Lock prevents the instance from going away.
1041  */
1042 struct net_device *__netdev_put_lock(struct net_device *dev)
1043 {
1044 	netdev_lock(dev);
1045 	if (dev->reg_state > NETREG_REGISTERED) {
1046 		netdev_unlock(dev);
1047 		dev_put(dev);
1048 		return NULL;
1049 	}
1050 	dev_put(dev);
1051 	return dev;
1052 }
1053 
1054 /**
1055  *	netdev_get_by_index_lock() - find a device by its ifindex
1056  *	@net: the applicable net namespace
1057  *	@ifindex: index of device
1058  *
1059  *	Search for an interface by index. If a valid device
1060  *	with @ifindex is found it will be returned with netdev->lock held.
1061  *	netdev_unlock() must be called to release it.
1062  *
1063  *	Return: pointer to a device with lock held, NULL if not found.
1064  */
1065 struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex)
1066 {
1067 	struct net_device *dev;
1068 
1069 	dev = dev_get_by_index(net, ifindex);
1070 	if (!dev)
1071 		return NULL;
1072 
1073 	return __netdev_put_lock(dev);
1074 }
1075 
1076 struct net_device *
1077 netdev_xa_find_lock(struct net *net, struct net_device *dev,
1078 		    unsigned long *index)
1079 {
1080 	if (dev)
1081 		netdev_unlock(dev);
1082 
1083 	do {
1084 		rcu_read_lock();
1085 		dev = xa_find(&net->dev_by_index, index, ULONG_MAX, XA_PRESENT);
1086 		if (!dev) {
1087 			rcu_read_unlock();
1088 			return NULL;
1089 		}
1090 		dev_hold(dev);
1091 		rcu_read_unlock();
1092 
1093 		dev = __netdev_put_lock(dev);
1094 		if (dev)
1095 			return dev;
1096 
1097 		(*index)++;
1098 	} while (true);
1099 }
1100 
1101 static DEFINE_SEQLOCK(netdev_rename_lock);
1102 
1103 void netdev_copy_name(struct net_device *dev, char *name)
1104 {
1105 	unsigned int seq;
1106 
1107 	do {
1108 		seq = read_seqbegin(&netdev_rename_lock);
1109 		strscpy(name, dev->name, IFNAMSIZ);
1110 	} while (read_seqretry(&netdev_rename_lock, seq));
1111 }
1112 
1113 /**
1114  *	netdev_get_name - get a netdevice name, knowing its ifindex.
1115  *	@net: network namespace
1116  *	@name: a pointer to the buffer where the name will be stored.
1117  *	@ifindex: the ifindex of the interface to get the name from.
1118  */
1119 int netdev_get_name(struct net *net, char *name, int ifindex)
1120 {
1121 	struct net_device *dev;
1122 	int ret;
1123 
1124 	rcu_read_lock();
1125 
1126 	dev = dev_get_by_index_rcu(net, ifindex);
1127 	if (!dev) {
1128 		ret = -ENODEV;
1129 		goto out;
1130 	}
1131 
1132 	netdev_copy_name(dev, name);
1133 
1134 	ret = 0;
1135 out:
1136 	rcu_read_unlock();
1137 	return ret;
1138 }
1139 
1140 static bool dev_addr_cmp(struct net_device *dev, unsigned short type,
1141 			 const char *ha)
1142 {
1143 	return dev->type == type && !memcmp(dev->dev_addr, ha, dev->addr_len);
1144 }
1145 
1146 /**
1147  *	dev_getbyhwaddr_rcu - find a device by its hardware address
1148  *	@net: the applicable net namespace
1149  *	@type: media type of device
1150  *	@ha: hardware address
1151  *
1152  *	Search for an interface by MAC address. Returns NULL if the device
1153  *	is not found or a pointer to the device.
1154  *	The caller must hold RCU.
1155  *	The returned device has not had its ref count increased
1156  *	and the caller must therefore be careful about locking
1157  *
1158  */
1159 
1160 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1161 				       const char *ha)
1162 {
1163 	struct net_device *dev;
1164 
1165 	for_each_netdev_rcu(net, dev)
1166 		if (dev_addr_cmp(dev, type, ha))
1167 			return dev;
1168 
1169 	return NULL;
1170 }
1171 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
1172 
1173 /**
1174  * dev_getbyhwaddr() - find a device by its hardware address
1175  * @net: the applicable net namespace
1176  * @type: media type of device
1177  * @ha: hardware address
1178  *
1179  * Similar to dev_getbyhwaddr_rcu(), but the owner needs to hold
1180  * rtnl_lock.
1181  *
1182  * Context: rtnl_lock() must be held.
1183  * Return: pointer to the net_device, or NULL if not found
1184  */
1185 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type,
1186 				   const char *ha)
1187 {
1188 	struct net_device *dev;
1189 
1190 	ASSERT_RTNL();
1191 	for_each_netdev(net, dev)
1192 		if (dev_addr_cmp(dev, type, ha))
1193 			return dev;
1194 
1195 	return NULL;
1196 }
1197 EXPORT_SYMBOL(dev_getbyhwaddr);
1198 
1199 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
1200 {
1201 	struct net_device *dev, *ret = NULL;
1202 
1203 	rcu_read_lock();
1204 	for_each_netdev_rcu(net, dev)
1205 		if (dev->type == type) {
1206 			dev_hold(dev);
1207 			ret = dev;
1208 			break;
1209 		}
1210 	rcu_read_unlock();
1211 	return ret;
1212 }
1213 EXPORT_SYMBOL(dev_getfirstbyhwtype);
1214 
1215 /**
1216  *	__dev_get_by_flags - find any device with given flags
1217  *	@net: the applicable net namespace
1218  *	@if_flags: IFF_* values
1219  *	@mask: bitmask of bits in if_flags to check
1220  *
1221  *	Search for any interface with the given flags. Returns NULL if a device
1222  *	is not found or a pointer to the device. Must be called inside
1223  *	rtnl_lock(), and result refcount is unchanged.
1224  */
1225 
1226 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1227 				      unsigned short mask)
1228 {
1229 	struct net_device *dev, *ret;
1230 
1231 	ASSERT_RTNL();
1232 
1233 	ret = NULL;
1234 	for_each_netdev(net, dev) {
1235 		if (((dev->flags ^ if_flags) & mask) == 0) {
1236 			ret = dev;
1237 			break;
1238 		}
1239 	}
1240 	return ret;
1241 }
1242 EXPORT_SYMBOL(__dev_get_by_flags);
1243 
1244 /**
1245  *	dev_valid_name - check if name is okay for network device
1246  *	@name: name string
1247  *
1248  *	Network device names need to be valid file names to
1249  *	allow sysfs to work.  We also disallow any kind of
1250  *	whitespace.
1251  */
1252 bool dev_valid_name(const char *name)
1253 {
1254 	if (*name == '\0')
1255 		return false;
1256 	if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1257 		return false;
1258 	if (!strcmp(name, ".") || !strcmp(name, ".."))
1259 		return false;
1260 
1261 	while (*name) {
1262 		if (*name == '/' || *name == ':' || isspace(*name))
1263 			return false;
1264 		name++;
1265 	}
1266 	return true;
1267 }
1268 EXPORT_SYMBOL(dev_valid_name);
1269 
1270 /**
1271  *	__dev_alloc_name - allocate a name for a device
1272  *	@net: network namespace to allocate the device name in
1273  *	@name: name format string
1274  *	@res: result name string
1275  *
1276  *	Passed a format string - eg "lt%d" it will try and find a suitable
1277  *	id. It scans list of devices to build up a free map, then chooses
1278  *	the first empty slot. The caller must hold the dev_base or rtnl lock
1279  *	while allocating the name and adding the device in order to avoid
1280  *	duplicates.
1281  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1282  *	Returns the number of the unit assigned or a negative errno code.
1283  */
1284 
1285 static int __dev_alloc_name(struct net *net, const char *name, char *res)
1286 {
1287 	int i = 0;
1288 	const char *p;
1289 	const int max_netdevices = 8*PAGE_SIZE;
1290 	unsigned long *inuse;
1291 	struct net_device *d;
1292 	char buf[IFNAMSIZ];
1293 
1294 	/* Verify the string as this thing may have come from the user.
1295 	 * There must be one "%d" and no other "%" characters.
1296 	 */
1297 	p = strchr(name, '%');
1298 	if (!p || p[1] != 'd' || strchr(p + 2, '%'))
1299 		return -EINVAL;
1300 
1301 	/* Use one page as a bit array of possible slots */
1302 	inuse = bitmap_zalloc(max_netdevices, GFP_ATOMIC);
1303 	if (!inuse)
1304 		return -ENOMEM;
1305 
1306 	for_each_netdev(net, d) {
1307 		struct netdev_name_node *name_node;
1308 
1309 		netdev_for_each_altname(d, name_node) {
1310 			if (!sscanf(name_node->name, name, &i))
1311 				continue;
1312 			if (i < 0 || i >= max_netdevices)
1313 				continue;
1314 
1315 			/* avoid cases where sscanf is not exact inverse of printf */
1316 			snprintf(buf, IFNAMSIZ, name, i);
1317 			if (!strncmp(buf, name_node->name, IFNAMSIZ))
1318 				__set_bit(i, inuse);
1319 		}
1320 		if (!sscanf(d->name, name, &i))
1321 			continue;
1322 		if (i < 0 || i >= max_netdevices)
1323 			continue;
1324 
1325 		/* avoid cases where sscanf is not exact inverse of printf */
1326 		snprintf(buf, IFNAMSIZ, name, i);
1327 		if (!strncmp(buf, d->name, IFNAMSIZ))
1328 			__set_bit(i, inuse);
1329 	}
1330 
1331 	i = find_first_zero_bit(inuse, max_netdevices);
1332 	bitmap_free(inuse);
1333 	if (i == max_netdevices)
1334 		return -ENFILE;
1335 
1336 	/* 'res' and 'name' could overlap, use 'buf' as an intermediate buffer */
1337 	strscpy(buf, name, IFNAMSIZ);
1338 	snprintf(res, IFNAMSIZ, buf, i);
1339 	return i;
1340 }
1341 
1342 /* Returns negative errno or allocated unit id (see __dev_alloc_name()) */
1343 static int dev_prep_valid_name(struct net *net, struct net_device *dev,
1344 			       const char *want_name, char *out_name,
1345 			       int dup_errno)
1346 {
1347 	if (!dev_valid_name(want_name))
1348 		return -EINVAL;
1349 
1350 	if (strchr(want_name, '%'))
1351 		return __dev_alloc_name(net, want_name, out_name);
1352 
1353 	if (netdev_name_in_use(net, want_name))
1354 		return -dup_errno;
1355 	if (out_name != want_name)
1356 		strscpy(out_name, want_name, IFNAMSIZ);
1357 	return 0;
1358 }
1359 
1360 /**
1361  *	dev_alloc_name - allocate a name for a device
1362  *	@dev: device
1363  *	@name: name format string
1364  *
1365  *	Passed a format string - eg "lt%d" it will try and find a suitable
1366  *	id. It scans list of devices to build up a free map, then chooses
1367  *	the first empty slot. The caller must hold the dev_base or rtnl lock
1368  *	while allocating the name and adding the device in order to avoid
1369  *	duplicates.
1370  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1371  *	Returns the number of the unit assigned or a negative errno code.
1372  */
1373 
1374 int dev_alloc_name(struct net_device *dev, const char *name)
1375 {
1376 	return dev_prep_valid_name(dev_net(dev), dev, name, dev->name, ENFILE);
1377 }
1378 EXPORT_SYMBOL(dev_alloc_name);
1379 
1380 static int dev_get_valid_name(struct net *net, struct net_device *dev,
1381 			      const char *name)
1382 {
1383 	int ret;
1384 
1385 	ret = dev_prep_valid_name(net, dev, name, dev->name, EEXIST);
1386 	return ret < 0 ? ret : 0;
1387 }
1388 
1389 int netif_change_name(struct net_device *dev, const char *newname)
1390 {
1391 	struct net *net = dev_net(dev);
1392 	unsigned char old_assign_type;
1393 	char oldname[IFNAMSIZ];
1394 	int err = 0;
1395 	int ret;
1396 
1397 	ASSERT_RTNL_NET(net);
1398 
1399 	if (!strncmp(newname, dev->name, IFNAMSIZ))
1400 		return 0;
1401 
1402 	memcpy(oldname, dev->name, IFNAMSIZ);
1403 
1404 	write_seqlock_bh(&netdev_rename_lock);
1405 	err = dev_get_valid_name(net, dev, newname);
1406 	write_sequnlock_bh(&netdev_rename_lock);
1407 
1408 	if (err < 0)
1409 		return err;
1410 
1411 	if (oldname[0] && !strchr(oldname, '%'))
1412 		netdev_info(dev, "renamed from %s%s\n", oldname,
1413 			    dev->flags & IFF_UP ? " (while UP)" : "");
1414 
1415 	old_assign_type = dev->name_assign_type;
1416 	WRITE_ONCE(dev->name_assign_type, NET_NAME_RENAMED);
1417 
1418 rollback:
1419 	ret = device_rename(&dev->dev, dev->name);
1420 	if (ret) {
1421 		write_seqlock_bh(&netdev_rename_lock);
1422 		memcpy(dev->name, oldname, IFNAMSIZ);
1423 		write_sequnlock_bh(&netdev_rename_lock);
1424 		WRITE_ONCE(dev->name_assign_type, old_assign_type);
1425 		return ret;
1426 	}
1427 
1428 	netdev_adjacent_rename_links(dev, oldname);
1429 
1430 	netdev_name_node_del(dev->name_node);
1431 
1432 	synchronize_net();
1433 
1434 	netdev_name_node_add(net, dev->name_node);
1435 
1436 	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1437 	ret = notifier_to_errno(ret);
1438 
1439 	if (ret) {
1440 		/* err >= 0 after dev_alloc_name() or stores the first errno */
1441 		if (err >= 0) {
1442 			err = ret;
1443 			write_seqlock_bh(&netdev_rename_lock);
1444 			memcpy(dev->name, oldname, IFNAMSIZ);
1445 			write_sequnlock_bh(&netdev_rename_lock);
1446 			memcpy(oldname, newname, IFNAMSIZ);
1447 			WRITE_ONCE(dev->name_assign_type, old_assign_type);
1448 			old_assign_type = NET_NAME_RENAMED;
1449 			goto rollback;
1450 		} else {
1451 			netdev_err(dev, "name change rollback failed: %d\n",
1452 				   ret);
1453 		}
1454 	}
1455 
1456 	return err;
1457 }
1458 
1459 int netif_set_alias(struct net_device *dev, const char *alias, size_t len)
1460 {
1461 	struct dev_ifalias *new_alias = NULL;
1462 
1463 	if (len >= IFALIASZ)
1464 		return -EINVAL;
1465 
1466 	if (len) {
1467 		new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1468 		if (!new_alias)
1469 			return -ENOMEM;
1470 
1471 		memcpy(new_alias->ifalias, alias, len);
1472 		new_alias->ifalias[len] = 0;
1473 	}
1474 
1475 	mutex_lock(&ifalias_mutex);
1476 	new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1477 					mutex_is_locked(&ifalias_mutex));
1478 	mutex_unlock(&ifalias_mutex);
1479 
1480 	if (new_alias)
1481 		kfree_rcu(new_alias, rcuhead);
1482 
1483 	return len;
1484 }
1485 
1486 /**
1487  *	dev_get_alias - get ifalias of a device
1488  *	@dev: device
1489  *	@name: buffer to store name of ifalias
1490  *	@len: size of buffer
1491  *
1492  *	get ifalias for a device.  Caller must make sure dev cannot go
1493  *	away,  e.g. rcu read lock or own a reference count to device.
1494  */
1495 int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1496 {
1497 	const struct dev_ifalias *alias;
1498 	int ret = 0;
1499 
1500 	rcu_read_lock();
1501 	alias = rcu_dereference(dev->ifalias);
1502 	if (alias)
1503 		ret = snprintf(name, len, "%s", alias->ifalias);
1504 	rcu_read_unlock();
1505 
1506 	return ret;
1507 }
1508 
1509 /**
1510  *	netdev_features_change - device changes features
1511  *	@dev: device to cause notification
1512  *
1513  *	Called to indicate a device has changed features.
1514  */
1515 void netdev_features_change(struct net_device *dev)
1516 {
1517 	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1518 }
1519 EXPORT_SYMBOL(netdev_features_change);
1520 
1521 /**
1522  *	netdev_state_change - device changes state
1523  *	@dev: device to cause notification
1524  *
1525  *	Called to indicate a device has changed state. This function calls
1526  *	the notifier chains for netdev_chain and sends a NEWLINK message
1527  *	to the routing socket.
1528  */
1529 void netdev_state_change(struct net_device *dev)
1530 {
1531 	if (dev->flags & IFF_UP) {
1532 		struct netdev_notifier_change_info change_info = {
1533 			.info.dev = dev,
1534 		};
1535 
1536 		call_netdevice_notifiers_info(NETDEV_CHANGE,
1537 					      &change_info.info);
1538 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL);
1539 	}
1540 }
1541 EXPORT_SYMBOL(netdev_state_change);
1542 
1543 /**
1544  * __netdev_notify_peers - notify network peers about existence of @dev,
1545  * to be called when rtnl lock is already held.
1546  * @dev: network device
1547  *
1548  * Generate traffic such that interested network peers are aware of
1549  * @dev, such as by generating a gratuitous ARP. This may be used when
1550  * a device wants to inform the rest of the network about some sort of
1551  * reconfiguration such as a failover event or virtual machine
1552  * migration.
1553  */
1554 void __netdev_notify_peers(struct net_device *dev)
1555 {
1556 	ASSERT_RTNL();
1557 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1558 	call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1559 }
1560 EXPORT_SYMBOL(__netdev_notify_peers);
1561 
1562 /**
1563  * netdev_notify_peers - notify network peers about existence of @dev
1564  * @dev: network device
1565  *
1566  * Generate traffic such that interested network peers are aware of
1567  * @dev, such as by generating a gratuitous ARP. This may be used when
1568  * a device wants to inform the rest of the network about some sort of
1569  * reconfiguration such as a failover event or virtual machine
1570  * migration.
1571  */
1572 void netdev_notify_peers(struct net_device *dev)
1573 {
1574 	rtnl_lock();
1575 	__netdev_notify_peers(dev);
1576 	rtnl_unlock();
1577 }
1578 EXPORT_SYMBOL(netdev_notify_peers);
1579 
1580 static int napi_threaded_poll(void *data);
1581 
1582 static int napi_kthread_create(struct napi_struct *n)
1583 {
1584 	int err = 0;
1585 
1586 	/* Create and wake up the kthread once to put it in
1587 	 * TASK_INTERRUPTIBLE mode to avoid the blocked task
1588 	 * warning and work with loadavg.
1589 	 */
1590 	n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
1591 				n->dev->name, n->napi_id);
1592 	if (IS_ERR(n->thread)) {
1593 		err = PTR_ERR(n->thread);
1594 		pr_err("kthread_run failed with err %d\n", err);
1595 		n->thread = NULL;
1596 	}
1597 
1598 	return err;
1599 }
1600 
1601 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1602 {
1603 	const struct net_device_ops *ops = dev->netdev_ops;
1604 	int ret;
1605 
1606 	ASSERT_RTNL();
1607 	dev_addr_check(dev);
1608 
1609 	if (!netif_device_present(dev)) {
1610 		/* may be detached because parent is runtime-suspended */
1611 		if (dev->dev.parent)
1612 			pm_runtime_resume(dev->dev.parent);
1613 		if (!netif_device_present(dev))
1614 			return -ENODEV;
1615 	}
1616 
1617 	/* Block netpoll from trying to do any rx path servicing.
1618 	 * If we don't do this there is a chance ndo_poll_controller
1619 	 * or ndo_poll may be running while we open the device
1620 	 */
1621 	netpoll_poll_disable(dev);
1622 
1623 	ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1624 	ret = notifier_to_errno(ret);
1625 	if (ret)
1626 		return ret;
1627 
1628 	set_bit(__LINK_STATE_START, &dev->state);
1629 
1630 	netdev_ops_assert_locked(dev);
1631 
1632 	if (ops->ndo_validate_addr)
1633 		ret = ops->ndo_validate_addr(dev);
1634 
1635 	if (!ret && ops->ndo_open)
1636 		ret = ops->ndo_open(dev);
1637 
1638 	netpoll_poll_enable(dev);
1639 
1640 	if (ret)
1641 		clear_bit(__LINK_STATE_START, &dev->state);
1642 	else {
1643 		netif_set_up(dev, true);
1644 		dev_set_rx_mode(dev);
1645 		dev_activate(dev);
1646 		add_device_randomness(dev->dev_addr, dev->addr_len);
1647 	}
1648 
1649 	return ret;
1650 }
1651 
1652 int netif_open(struct net_device *dev, struct netlink_ext_ack *extack)
1653 {
1654 	int ret;
1655 
1656 	if (dev->flags & IFF_UP)
1657 		return 0;
1658 
1659 	ret = __dev_open(dev, extack);
1660 	if (ret < 0)
1661 		return ret;
1662 
1663 	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
1664 	call_netdevice_notifiers(NETDEV_UP, dev);
1665 
1666 	return ret;
1667 }
1668 
1669 static void __dev_close_many(struct list_head *head)
1670 {
1671 	struct net_device *dev;
1672 
1673 	ASSERT_RTNL();
1674 	might_sleep();
1675 
1676 	list_for_each_entry(dev, head, close_list) {
1677 		/* Temporarily disable netpoll until the interface is down */
1678 		netpoll_poll_disable(dev);
1679 
1680 		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1681 
1682 		clear_bit(__LINK_STATE_START, &dev->state);
1683 
1684 		/* Synchronize to scheduled poll. We cannot touch poll list, it
1685 		 * can be even on different cpu. So just clear netif_running().
1686 		 *
1687 		 * dev->stop() will invoke napi_disable() on all of it's
1688 		 * napi_struct instances on this device.
1689 		 */
1690 		smp_mb__after_atomic(); /* Commit netif_running(). */
1691 	}
1692 
1693 	dev_deactivate_many(head);
1694 
1695 	list_for_each_entry(dev, head, close_list) {
1696 		const struct net_device_ops *ops = dev->netdev_ops;
1697 
1698 		/*
1699 		 *	Call the device specific close. This cannot fail.
1700 		 *	Only if device is UP
1701 		 *
1702 		 *	We allow it to be called even after a DETACH hot-plug
1703 		 *	event.
1704 		 */
1705 
1706 		netdev_ops_assert_locked(dev);
1707 
1708 		if (ops->ndo_stop)
1709 			ops->ndo_stop(dev);
1710 
1711 		netif_set_up(dev, false);
1712 		netpoll_poll_enable(dev);
1713 	}
1714 }
1715 
1716 static void __dev_close(struct net_device *dev)
1717 {
1718 	LIST_HEAD(single);
1719 
1720 	list_add(&dev->close_list, &single);
1721 	__dev_close_many(&single);
1722 	list_del(&single);
1723 }
1724 
1725 void dev_close_many(struct list_head *head, bool unlink)
1726 {
1727 	struct net_device *dev, *tmp;
1728 
1729 	/* Remove the devices that don't need to be closed */
1730 	list_for_each_entry_safe(dev, tmp, head, close_list)
1731 		if (!(dev->flags & IFF_UP))
1732 			list_del_init(&dev->close_list);
1733 
1734 	__dev_close_many(head);
1735 
1736 	list_for_each_entry_safe(dev, tmp, head, close_list) {
1737 		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
1738 		call_netdevice_notifiers(NETDEV_DOWN, dev);
1739 		if (unlink)
1740 			list_del_init(&dev->close_list);
1741 	}
1742 }
1743 EXPORT_SYMBOL(dev_close_many);
1744 
1745 void netif_close(struct net_device *dev)
1746 {
1747 	if (dev->flags & IFF_UP) {
1748 		LIST_HEAD(single);
1749 
1750 		list_add(&dev->close_list, &single);
1751 		dev_close_many(&single, true);
1752 		list_del(&single);
1753 	}
1754 }
1755 EXPORT_SYMBOL(netif_close);
1756 
1757 void netif_disable_lro(struct net_device *dev)
1758 {
1759 	struct net_device *lower_dev;
1760 	struct list_head *iter;
1761 
1762 	dev->wanted_features &= ~NETIF_F_LRO;
1763 	netdev_update_features(dev);
1764 
1765 	if (unlikely(dev->features & NETIF_F_LRO))
1766 		netdev_WARN(dev, "failed to disable LRO!\n");
1767 
1768 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
1769 		netdev_lock_ops(lower_dev);
1770 		netif_disable_lro(lower_dev);
1771 		netdev_unlock_ops(lower_dev);
1772 	}
1773 }
1774 
1775 /**
1776  *	dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1777  *	@dev: device
1778  *
1779  *	Disable HW Generic Receive Offload (GRO_HW) on a net device.  Must be
1780  *	called under RTNL.  This is needed if Generic XDP is installed on
1781  *	the device.
1782  */
1783 static void dev_disable_gro_hw(struct net_device *dev)
1784 {
1785 	dev->wanted_features &= ~NETIF_F_GRO_HW;
1786 	netdev_update_features(dev);
1787 
1788 	if (unlikely(dev->features & NETIF_F_GRO_HW))
1789 		netdev_WARN(dev, "failed to disable GRO_HW!\n");
1790 }
1791 
1792 const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1793 {
1794 #define N(val) 						\
1795 	case NETDEV_##val:				\
1796 		return "NETDEV_" __stringify(val);
1797 	switch (cmd) {
1798 	N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1799 	N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1800 	N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1801 	N(POST_INIT) N(PRE_UNINIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN)
1802 	N(CHANGEUPPER) N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA)
1803 	N(BONDING_INFO) N(PRECHANGEUPPER) N(CHANGELOWERSTATE)
1804 	N(UDP_TUNNEL_PUSH_INFO) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1805 	N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1806 	N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1807 	N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE)
1808 	N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA)
1809 	N(XDP_FEAT_CHANGE)
1810 	}
1811 #undef N
1812 	return "UNKNOWN_NETDEV_EVENT";
1813 }
1814 EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1815 
1816 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1817 				   struct net_device *dev)
1818 {
1819 	struct netdev_notifier_info info = {
1820 		.dev = dev,
1821 	};
1822 
1823 	return nb->notifier_call(nb, val, &info);
1824 }
1825 
1826 static int call_netdevice_register_notifiers(struct notifier_block *nb,
1827 					     struct net_device *dev)
1828 {
1829 	int err;
1830 
1831 	err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1832 	err = notifier_to_errno(err);
1833 	if (err)
1834 		return err;
1835 
1836 	if (!(dev->flags & IFF_UP))
1837 		return 0;
1838 
1839 	call_netdevice_notifier(nb, NETDEV_UP, dev);
1840 	return 0;
1841 }
1842 
1843 static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1844 						struct net_device *dev)
1845 {
1846 	if (dev->flags & IFF_UP) {
1847 		call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1848 					dev);
1849 		call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1850 	}
1851 	call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1852 }
1853 
1854 static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1855 						 struct net *net)
1856 {
1857 	struct net_device *dev;
1858 	int err;
1859 
1860 	for_each_netdev(net, dev) {
1861 		err = call_netdevice_register_notifiers(nb, dev);
1862 		if (err)
1863 			goto rollback;
1864 	}
1865 	return 0;
1866 
1867 rollback:
1868 	for_each_netdev_continue_reverse(net, dev)
1869 		call_netdevice_unregister_notifiers(nb, dev);
1870 	return err;
1871 }
1872 
1873 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1874 						    struct net *net)
1875 {
1876 	struct net_device *dev;
1877 
1878 	for_each_netdev(net, dev)
1879 		call_netdevice_unregister_notifiers(nb, dev);
1880 }
1881 
1882 static int dev_boot_phase = 1;
1883 
1884 /**
1885  * register_netdevice_notifier - register a network notifier block
1886  * @nb: notifier
1887  *
1888  * Register a notifier to be called when network device events occur.
1889  * The notifier passed is linked into the kernel structures and must
1890  * not be reused until it has been unregistered. A negative errno code
1891  * is returned on a failure.
1892  *
1893  * When registered all registration and up events are replayed
1894  * to the new notifier to allow device to have a race free
1895  * view of the network device list.
1896  */
1897 
1898 int register_netdevice_notifier(struct notifier_block *nb)
1899 {
1900 	struct net *net;
1901 	int err;
1902 
1903 	/* Close race with setup_net() and cleanup_net() */
1904 	down_write(&pernet_ops_rwsem);
1905 
1906 	/* When RTNL is removed, we need protection for netdev_chain. */
1907 	rtnl_lock();
1908 
1909 	err = raw_notifier_chain_register(&netdev_chain, nb);
1910 	if (err)
1911 		goto unlock;
1912 	if (dev_boot_phase)
1913 		goto unlock;
1914 	for_each_net(net) {
1915 		__rtnl_net_lock(net);
1916 		err = call_netdevice_register_net_notifiers(nb, net);
1917 		__rtnl_net_unlock(net);
1918 		if (err)
1919 			goto rollback;
1920 	}
1921 
1922 unlock:
1923 	rtnl_unlock();
1924 	up_write(&pernet_ops_rwsem);
1925 	return err;
1926 
1927 rollback:
1928 	for_each_net_continue_reverse(net) {
1929 		__rtnl_net_lock(net);
1930 		call_netdevice_unregister_net_notifiers(nb, net);
1931 		__rtnl_net_unlock(net);
1932 	}
1933 
1934 	raw_notifier_chain_unregister(&netdev_chain, nb);
1935 	goto unlock;
1936 }
1937 EXPORT_SYMBOL(register_netdevice_notifier);
1938 
1939 /**
1940  * unregister_netdevice_notifier - unregister a network notifier block
1941  * @nb: notifier
1942  *
1943  * Unregister a notifier previously registered by
1944  * register_netdevice_notifier(). The notifier is unlinked into the
1945  * kernel structures and may then be reused. A negative errno code
1946  * is returned on a failure.
1947  *
1948  * After unregistering unregister and down device events are synthesized
1949  * for all devices on the device list to the removed notifier to remove
1950  * the need for special case cleanup code.
1951  */
1952 
1953 int unregister_netdevice_notifier(struct notifier_block *nb)
1954 {
1955 	struct net *net;
1956 	int err;
1957 
1958 	/* Close race with setup_net() and cleanup_net() */
1959 	down_write(&pernet_ops_rwsem);
1960 	rtnl_lock();
1961 	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1962 	if (err)
1963 		goto unlock;
1964 
1965 	for_each_net(net) {
1966 		__rtnl_net_lock(net);
1967 		call_netdevice_unregister_net_notifiers(nb, net);
1968 		__rtnl_net_unlock(net);
1969 	}
1970 
1971 unlock:
1972 	rtnl_unlock();
1973 	up_write(&pernet_ops_rwsem);
1974 	return err;
1975 }
1976 EXPORT_SYMBOL(unregister_netdevice_notifier);
1977 
1978 static int __register_netdevice_notifier_net(struct net *net,
1979 					     struct notifier_block *nb,
1980 					     bool ignore_call_fail)
1981 {
1982 	int err;
1983 
1984 	err = raw_notifier_chain_register(&net->netdev_chain, nb);
1985 	if (err)
1986 		return err;
1987 	if (dev_boot_phase)
1988 		return 0;
1989 
1990 	err = call_netdevice_register_net_notifiers(nb, net);
1991 	if (err && !ignore_call_fail)
1992 		goto chain_unregister;
1993 
1994 	return 0;
1995 
1996 chain_unregister:
1997 	raw_notifier_chain_unregister(&net->netdev_chain, nb);
1998 	return err;
1999 }
2000 
2001 static int __unregister_netdevice_notifier_net(struct net *net,
2002 					       struct notifier_block *nb)
2003 {
2004 	int err;
2005 
2006 	err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
2007 	if (err)
2008 		return err;
2009 
2010 	call_netdevice_unregister_net_notifiers(nb, net);
2011 	return 0;
2012 }
2013 
2014 /**
2015  * register_netdevice_notifier_net - register a per-netns network notifier block
2016  * @net: network namespace
2017  * @nb: notifier
2018  *
2019  * Register a notifier to be called when network device events occur.
2020  * The notifier passed is linked into the kernel structures and must
2021  * not be reused until it has been unregistered. A negative errno code
2022  * is returned on a failure.
2023  *
2024  * When registered all registration and up events are replayed
2025  * to the new notifier to allow device to have a race free
2026  * view of the network device list.
2027  */
2028 
2029 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
2030 {
2031 	int err;
2032 
2033 	rtnl_net_lock(net);
2034 	err = __register_netdevice_notifier_net(net, nb, false);
2035 	rtnl_net_unlock(net);
2036 
2037 	return err;
2038 }
2039 EXPORT_SYMBOL(register_netdevice_notifier_net);
2040 
2041 /**
2042  * unregister_netdevice_notifier_net - unregister a per-netns
2043  *                                     network notifier block
2044  * @net: network namespace
2045  * @nb: notifier
2046  *
2047  * Unregister a notifier previously registered by
2048  * register_netdevice_notifier_net(). The notifier is unlinked from the
2049  * kernel structures and may then be reused. A negative errno code
2050  * is returned on a failure.
2051  *
2052  * After unregistering unregister and down device events are synthesized
2053  * for all devices on the device list to the removed notifier to remove
2054  * the need for special case cleanup code.
2055  */
2056 
2057 int unregister_netdevice_notifier_net(struct net *net,
2058 				      struct notifier_block *nb)
2059 {
2060 	int err;
2061 
2062 	rtnl_net_lock(net);
2063 	err = __unregister_netdevice_notifier_net(net, nb);
2064 	rtnl_net_unlock(net);
2065 
2066 	return err;
2067 }
2068 EXPORT_SYMBOL(unregister_netdevice_notifier_net);
2069 
2070 static void __move_netdevice_notifier_net(struct net *src_net,
2071 					  struct net *dst_net,
2072 					  struct notifier_block *nb)
2073 {
2074 	__unregister_netdevice_notifier_net(src_net, nb);
2075 	__register_netdevice_notifier_net(dst_net, nb, true);
2076 }
2077 
2078 static void rtnl_net_dev_lock(struct net_device *dev)
2079 {
2080 	bool again;
2081 
2082 	do {
2083 		struct net *net;
2084 
2085 		again = false;
2086 
2087 		/* netns might be being dismantled. */
2088 		rcu_read_lock();
2089 		net = dev_net_rcu(dev);
2090 		net_passive_inc(net);
2091 		rcu_read_unlock();
2092 
2093 		rtnl_net_lock(net);
2094 
2095 #ifdef CONFIG_NET_NS
2096 		/* dev might have been moved to another netns. */
2097 		if (!net_eq(net, rcu_access_pointer(dev->nd_net.net))) {
2098 			rtnl_net_unlock(net);
2099 			net_passive_dec(net);
2100 			again = true;
2101 		}
2102 #endif
2103 	} while (again);
2104 }
2105 
2106 static void rtnl_net_dev_unlock(struct net_device *dev)
2107 {
2108 	struct net *net = dev_net(dev);
2109 
2110 	rtnl_net_unlock(net);
2111 	net_passive_dec(net);
2112 }
2113 
2114 int register_netdevice_notifier_dev_net(struct net_device *dev,
2115 					struct notifier_block *nb,
2116 					struct netdev_net_notifier *nn)
2117 {
2118 	int err;
2119 
2120 	rtnl_net_dev_lock(dev);
2121 	err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
2122 	if (!err) {
2123 		nn->nb = nb;
2124 		list_add(&nn->list, &dev->net_notifier_list);
2125 	}
2126 	rtnl_net_dev_unlock(dev);
2127 
2128 	return err;
2129 }
2130 EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
2131 
2132 int unregister_netdevice_notifier_dev_net(struct net_device *dev,
2133 					  struct notifier_block *nb,
2134 					  struct netdev_net_notifier *nn)
2135 {
2136 	int err;
2137 
2138 	rtnl_net_dev_lock(dev);
2139 	list_del(&nn->list);
2140 	err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
2141 	rtnl_net_dev_unlock(dev);
2142 
2143 	return err;
2144 }
2145 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
2146 
2147 static void move_netdevice_notifiers_dev_net(struct net_device *dev,
2148 					     struct net *net)
2149 {
2150 	struct netdev_net_notifier *nn;
2151 
2152 	list_for_each_entry(nn, &dev->net_notifier_list, list)
2153 		__move_netdevice_notifier_net(dev_net(dev), net, nn->nb);
2154 }
2155 
2156 /**
2157  *	call_netdevice_notifiers_info - call all network notifier blocks
2158  *	@val: value passed unmodified to notifier function
2159  *	@info: notifier information data
2160  *
2161  *	Call all network notifier blocks.  Parameters and return value
2162  *	are as for raw_notifier_call_chain().
2163  */
2164 
2165 int call_netdevice_notifiers_info(unsigned long val,
2166 				  struct netdev_notifier_info *info)
2167 {
2168 	struct net *net = dev_net(info->dev);
2169 	int ret;
2170 
2171 	ASSERT_RTNL();
2172 
2173 	/* Run per-netns notifier block chain first, then run the global one.
2174 	 * Hopefully, one day, the global one is going to be removed after
2175 	 * all notifier block registrators get converted to be per-netns.
2176 	 */
2177 	ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
2178 	if (ret & NOTIFY_STOP_MASK)
2179 		return ret;
2180 	return raw_notifier_call_chain(&netdev_chain, val, info);
2181 }
2182 
2183 /**
2184  *	call_netdevice_notifiers_info_robust - call per-netns notifier blocks
2185  *	                                       for and rollback on error
2186  *	@val_up: value passed unmodified to notifier function
2187  *	@val_down: value passed unmodified to the notifier function when
2188  *	           recovering from an error on @val_up
2189  *	@info: notifier information data
2190  *
2191  *	Call all per-netns network notifier blocks, but not notifier blocks on
2192  *	the global notifier chain. Parameters and return value are as for
2193  *	raw_notifier_call_chain_robust().
2194  */
2195 
2196 static int
2197 call_netdevice_notifiers_info_robust(unsigned long val_up,
2198 				     unsigned long val_down,
2199 				     struct netdev_notifier_info *info)
2200 {
2201 	struct net *net = dev_net(info->dev);
2202 
2203 	ASSERT_RTNL();
2204 
2205 	return raw_notifier_call_chain_robust(&net->netdev_chain,
2206 					      val_up, val_down, info);
2207 }
2208 
2209 static int call_netdevice_notifiers_extack(unsigned long val,
2210 					   struct net_device *dev,
2211 					   struct netlink_ext_ack *extack)
2212 {
2213 	struct netdev_notifier_info info = {
2214 		.dev = dev,
2215 		.extack = extack,
2216 	};
2217 
2218 	return call_netdevice_notifiers_info(val, &info);
2219 }
2220 
2221 /**
2222  *	call_netdevice_notifiers - call all network notifier blocks
2223  *      @val: value passed unmodified to notifier function
2224  *      @dev: net_device pointer passed unmodified to notifier function
2225  *
2226  *	Call all network notifier blocks.  Parameters and return value
2227  *	are as for raw_notifier_call_chain().
2228  */
2229 
2230 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
2231 {
2232 	return call_netdevice_notifiers_extack(val, dev, NULL);
2233 }
2234 EXPORT_SYMBOL(call_netdevice_notifiers);
2235 
2236 /**
2237  *	call_netdevice_notifiers_mtu - call all network notifier blocks
2238  *	@val: value passed unmodified to notifier function
2239  *	@dev: net_device pointer passed unmodified to notifier function
2240  *	@arg: additional u32 argument passed to the notifier function
2241  *
2242  *	Call all network notifier blocks.  Parameters and return value
2243  *	are as for raw_notifier_call_chain().
2244  */
2245 static int call_netdevice_notifiers_mtu(unsigned long val,
2246 					struct net_device *dev, u32 arg)
2247 {
2248 	struct netdev_notifier_info_ext info = {
2249 		.info.dev = dev,
2250 		.ext.mtu = arg,
2251 	};
2252 
2253 	BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
2254 
2255 	return call_netdevice_notifiers_info(val, &info.info);
2256 }
2257 
2258 #ifdef CONFIG_NET_INGRESS
2259 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
2260 
2261 void net_inc_ingress_queue(void)
2262 {
2263 	static_branch_inc(&ingress_needed_key);
2264 }
2265 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
2266 
2267 void net_dec_ingress_queue(void)
2268 {
2269 	static_branch_dec(&ingress_needed_key);
2270 }
2271 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
2272 #endif
2273 
2274 #ifdef CONFIG_NET_EGRESS
2275 static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
2276 
2277 void net_inc_egress_queue(void)
2278 {
2279 	static_branch_inc(&egress_needed_key);
2280 }
2281 EXPORT_SYMBOL_GPL(net_inc_egress_queue);
2282 
2283 void net_dec_egress_queue(void)
2284 {
2285 	static_branch_dec(&egress_needed_key);
2286 }
2287 EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2288 #endif
2289 
2290 #ifdef CONFIG_NET_CLS_ACT
2291 DEFINE_STATIC_KEY_FALSE(tcf_sw_enabled_key);
2292 EXPORT_SYMBOL(tcf_sw_enabled_key);
2293 #endif
2294 
2295 DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
2296 EXPORT_SYMBOL(netstamp_needed_key);
2297 #ifdef CONFIG_JUMP_LABEL
2298 static atomic_t netstamp_needed_deferred;
2299 static atomic_t netstamp_wanted;
2300 static void netstamp_clear(struct work_struct *work)
2301 {
2302 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
2303 	int wanted;
2304 
2305 	wanted = atomic_add_return(deferred, &netstamp_wanted);
2306 	if (wanted > 0)
2307 		static_branch_enable(&netstamp_needed_key);
2308 	else
2309 		static_branch_disable(&netstamp_needed_key);
2310 }
2311 static DECLARE_WORK(netstamp_work, netstamp_clear);
2312 #endif
2313 
2314 void net_enable_timestamp(void)
2315 {
2316 #ifdef CONFIG_JUMP_LABEL
2317 	int wanted = atomic_read(&netstamp_wanted);
2318 
2319 	while (wanted > 0) {
2320 		if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted + 1))
2321 			return;
2322 	}
2323 	atomic_inc(&netstamp_needed_deferred);
2324 	schedule_work(&netstamp_work);
2325 #else
2326 	static_branch_inc(&netstamp_needed_key);
2327 #endif
2328 }
2329 EXPORT_SYMBOL(net_enable_timestamp);
2330 
2331 void net_disable_timestamp(void)
2332 {
2333 #ifdef CONFIG_JUMP_LABEL
2334 	int wanted = atomic_read(&netstamp_wanted);
2335 
2336 	while (wanted > 1) {
2337 		if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted - 1))
2338 			return;
2339 	}
2340 	atomic_dec(&netstamp_needed_deferred);
2341 	schedule_work(&netstamp_work);
2342 #else
2343 	static_branch_dec(&netstamp_needed_key);
2344 #endif
2345 }
2346 EXPORT_SYMBOL(net_disable_timestamp);
2347 
2348 static inline void net_timestamp_set(struct sk_buff *skb)
2349 {
2350 	skb->tstamp = 0;
2351 	skb->tstamp_type = SKB_CLOCK_REALTIME;
2352 	if (static_branch_unlikely(&netstamp_needed_key))
2353 		skb->tstamp = ktime_get_real();
2354 }
2355 
2356 #define net_timestamp_check(COND, SKB)				\
2357 	if (static_branch_unlikely(&netstamp_needed_key)) {	\
2358 		if ((COND) && !(SKB)->tstamp)			\
2359 			(SKB)->tstamp = ktime_get_real();	\
2360 	}							\
2361 
2362 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2363 {
2364 	return __is_skb_forwardable(dev, skb, true);
2365 }
2366 EXPORT_SYMBOL_GPL(is_skb_forwardable);
2367 
2368 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
2369 			      bool check_mtu)
2370 {
2371 	int ret = ____dev_forward_skb(dev, skb, check_mtu);
2372 
2373 	if (likely(!ret)) {
2374 		skb->protocol = eth_type_trans(skb, dev);
2375 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2376 	}
2377 
2378 	return ret;
2379 }
2380 
2381 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2382 {
2383 	return __dev_forward_skb2(dev, skb, true);
2384 }
2385 EXPORT_SYMBOL_GPL(__dev_forward_skb);
2386 
2387 /**
2388  * dev_forward_skb - loopback an skb to another netif
2389  *
2390  * @dev: destination network device
2391  * @skb: buffer to forward
2392  *
2393  * return values:
2394  *	NET_RX_SUCCESS	(no congestion)
2395  *	NET_RX_DROP     (packet was dropped, but freed)
2396  *
2397  * dev_forward_skb can be used for injecting an skb from the
2398  * start_xmit function of one device into the receive queue
2399  * of another device.
2400  *
2401  * The receiving device may be in another namespace, so
2402  * we have to clear all information in the skb that could
2403  * impact namespace isolation.
2404  */
2405 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2406 {
2407 	return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2408 }
2409 EXPORT_SYMBOL_GPL(dev_forward_skb);
2410 
2411 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
2412 {
2413 	return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
2414 }
2415 
2416 static inline int deliver_skb(struct sk_buff *skb,
2417 			      struct packet_type *pt_prev,
2418 			      struct net_device *orig_dev)
2419 {
2420 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
2421 		return -ENOMEM;
2422 	refcount_inc(&skb->users);
2423 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2424 }
2425 
2426 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2427 					  struct packet_type **pt,
2428 					  struct net_device *orig_dev,
2429 					  __be16 type,
2430 					  struct list_head *ptype_list)
2431 {
2432 	struct packet_type *ptype, *pt_prev = *pt;
2433 
2434 	list_for_each_entry_rcu(ptype, ptype_list, list) {
2435 		if (ptype->type != type)
2436 			continue;
2437 		if (pt_prev)
2438 			deliver_skb(skb, pt_prev, orig_dev);
2439 		pt_prev = ptype;
2440 	}
2441 	*pt = pt_prev;
2442 }
2443 
2444 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2445 {
2446 	if (!ptype->af_packet_priv || !skb->sk)
2447 		return false;
2448 
2449 	if (ptype->id_match)
2450 		return ptype->id_match(ptype, skb->sk);
2451 	else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2452 		return true;
2453 
2454 	return false;
2455 }
2456 
2457 /**
2458  * dev_nit_active_rcu - return true if any network interface taps are in use
2459  *
2460  * The caller must hold the RCU lock
2461  *
2462  * @dev: network device to check for the presence of taps
2463  */
2464 bool dev_nit_active_rcu(const struct net_device *dev)
2465 {
2466 	/* Callers may hold either RCU or RCU BH lock */
2467 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
2468 
2469 	return !list_empty(&dev_net(dev)->ptype_all) ||
2470 	       !list_empty(&dev->ptype_all);
2471 }
2472 EXPORT_SYMBOL_GPL(dev_nit_active_rcu);
2473 
2474 /*
2475  *	Support routine. Sends outgoing frames to any network
2476  *	taps currently in use.
2477  */
2478 
2479 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2480 {
2481 	struct packet_type *ptype, *pt_prev = NULL;
2482 	struct list_head *ptype_list;
2483 	struct sk_buff *skb2 = NULL;
2484 
2485 	rcu_read_lock();
2486 	ptype_list = &dev_net_rcu(dev)->ptype_all;
2487 again:
2488 	list_for_each_entry_rcu(ptype, ptype_list, list) {
2489 		if (READ_ONCE(ptype->ignore_outgoing))
2490 			continue;
2491 
2492 		/* Never send packets back to the socket
2493 		 * they originated from - MvS ([email protected])
2494 		 */
2495 		if (skb_loop_sk(ptype, skb))
2496 			continue;
2497 
2498 		if (pt_prev) {
2499 			deliver_skb(skb2, pt_prev, skb->dev);
2500 			pt_prev = ptype;
2501 			continue;
2502 		}
2503 
2504 		/* need to clone skb, done only once */
2505 		skb2 = skb_clone(skb, GFP_ATOMIC);
2506 		if (!skb2)
2507 			goto out_unlock;
2508 
2509 		net_timestamp_set(skb2);
2510 
2511 		/* skb->nh should be correctly
2512 		 * set by sender, so that the second statement is
2513 		 * just protection against buggy protocols.
2514 		 */
2515 		skb_reset_mac_header(skb2);
2516 
2517 		if (skb_network_header(skb2) < skb2->data ||
2518 		    skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2519 			net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2520 					     ntohs(skb2->protocol),
2521 					     dev->name);
2522 			skb_reset_network_header(skb2);
2523 		}
2524 
2525 		skb2->transport_header = skb2->network_header;
2526 		skb2->pkt_type = PACKET_OUTGOING;
2527 		pt_prev = ptype;
2528 	}
2529 
2530 	if (ptype_list != &dev->ptype_all) {
2531 		ptype_list = &dev->ptype_all;
2532 		goto again;
2533 	}
2534 out_unlock:
2535 	if (pt_prev) {
2536 		if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2537 			pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2538 		else
2539 			kfree_skb(skb2);
2540 	}
2541 	rcu_read_unlock();
2542 }
2543 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2544 
2545 /**
2546  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2547  * @dev: Network device
2548  * @txq: number of queues available
2549  *
2550  * If real_num_tx_queues is changed the tc mappings may no longer be
2551  * valid. To resolve this verify the tc mapping remains valid and if
2552  * not NULL the mapping. With no priorities mapping to this
2553  * offset/count pair it will no longer be used. In the worst case TC0
2554  * is invalid nothing can be done so disable priority mappings. If is
2555  * expected that drivers will fix this mapping if they can before
2556  * calling netif_set_real_num_tx_queues.
2557  */
2558 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2559 {
2560 	int i;
2561 	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2562 
2563 	/* If TC0 is invalidated disable TC mapping */
2564 	if (tc->offset + tc->count > txq) {
2565 		netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2566 		dev->num_tc = 0;
2567 		return;
2568 	}
2569 
2570 	/* Invalidated prio to tc mappings set to TC0 */
2571 	for (i = 1; i < TC_BITMASK + 1; i++) {
2572 		int q = netdev_get_prio_tc_map(dev, i);
2573 
2574 		tc = &dev->tc_to_txq[q];
2575 		if (tc->offset + tc->count > txq) {
2576 			netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2577 				    i, q);
2578 			netdev_set_prio_tc_map(dev, i, 0);
2579 		}
2580 	}
2581 }
2582 
2583 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2584 {
2585 	if (dev->num_tc) {
2586 		struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2587 		int i;
2588 
2589 		/* walk through the TCs and see if it falls into any of them */
2590 		for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2591 			if ((txq - tc->offset) < tc->count)
2592 				return i;
2593 		}
2594 
2595 		/* didn't find it, just return -1 to indicate no match */
2596 		return -1;
2597 	}
2598 
2599 	return 0;
2600 }
2601 EXPORT_SYMBOL(netdev_txq_to_tc);
2602 
2603 #ifdef CONFIG_XPS
2604 static struct static_key xps_needed __read_mostly;
2605 static struct static_key xps_rxqs_needed __read_mostly;
2606 static DEFINE_MUTEX(xps_map_mutex);
2607 #define xmap_dereference(P)		\
2608 	rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2609 
2610 static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2611 			     struct xps_dev_maps *old_maps, int tci, u16 index)
2612 {
2613 	struct xps_map *map = NULL;
2614 	int pos;
2615 
2616 	map = xmap_dereference(dev_maps->attr_map[tci]);
2617 	if (!map)
2618 		return false;
2619 
2620 	for (pos = map->len; pos--;) {
2621 		if (map->queues[pos] != index)
2622 			continue;
2623 
2624 		if (map->len > 1) {
2625 			map->queues[pos] = map->queues[--map->len];
2626 			break;
2627 		}
2628 
2629 		if (old_maps)
2630 			RCU_INIT_POINTER(old_maps->attr_map[tci], NULL);
2631 		RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2632 		kfree_rcu(map, rcu);
2633 		return false;
2634 	}
2635 
2636 	return true;
2637 }
2638 
2639 static bool remove_xps_queue_cpu(struct net_device *dev,
2640 				 struct xps_dev_maps *dev_maps,
2641 				 int cpu, u16 offset, u16 count)
2642 {
2643 	int num_tc = dev_maps->num_tc;
2644 	bool active = false;
2645 	int tci;
2646 
2647 	for (tci = cpu * num_tc; num_tc--; tci++) {
2648 		int i, j;
2649 
2650 		for (i = count, j = offset; i--; j++) {
2651 			if (!remove_xps_queue(dev_maps, NULL, tci, j))
2652 				break;
2653 		}
2654 
2655 		active |= i < 0;
2656 	}
2657 
2658 	return active;
2659 }
2660 
2661 static void reset_xps_maps(struct net_device *dev,
2662 			   struct xps_dev_maps *dev_maps,
2663 			   enum xps_map_type type)
2664 {
2665 	static_key_slow_dec_cpuslocked(&xps_needed);
2666 	if (type == XPS_RXQS)
2667 		static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2668 
2669 	RCU_INIT_POINTER(dev->xps_maps[type], NULL);
2670 
2671 	kfree_rcu(dev_maps, rcu);
2672 }
2673 
2674 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
2675 			   u16 offset, u16 count)
2676 {
2677 	struct xps_dev_maps *dev_maps;
2678 	bool active = false;
2679 	int i, j;
2680 
2681 	dev_maps = xmap_dereference(dev->xps_maps[type]);
2682 	if (!dev_maps)
2683 		return;
2684 
2685 	for (j = 0; j < dev_maps->nr_ids; j++)
2686 		active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count);
2687 	if (!active)
2688 		reset_xps_maps(dev, dev_maps, type);
2689 
2690 	if (type == XPS_CPUS) {
2691 		for (i = offset + (count - 1); count--; i--)
2692 			netdev_queue_numa_node_write(
2693 				netdev_get_tx_queue(dev, i), NUMA_NO_NODE);
2694 	}
2695 }
2696 
2697 static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2698 				   u16 count)
2699 {
2700 	if (!static_key_false(&xps_needed))
2701 		return;
2702 
2703 	cpus_read_lock();
2704 	mutex_lock(&xps_map_mutex);
2705 
2706 	if (static_key_false(&xps_rxqs_needed))
2707 		clean_xps_maps(dev, XPS_RXQS, offset, count);
2708 
2709 	clean_xps_maps(dev, XPS_CPUS, offset, count);
2710 
2711 	mutex_unlock(&xps_map_mutex);
2712 	cpus_read_unlock();
2713 }
2714 
2715 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2716 {
2717 	netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2718 }
2719 
2720 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2721 				      u16 index, bool is_rxqs_map)
2722 {
2723 	struct xps_map *new_map;
2724 	int alloc_len = XPS_MIN_MAP_ALLOC;
2725 	int i, pos;
2726 
2727 	for (pos = 0; map && pos < map->len; pos++) {
2728 		if (map->queues[pos] != index)
2729 			continue;
2730 		return map;
2731 	}
2732 
2733 	/* Need to add tx-queue to this CPU's/rx-queue's existing map */
2734 	if (map) {
2735 		if (pos < map->alloc_len)
2736 			return map;
2737 
2738 		alloc_len = map->alloc_len * 2;
2739 	}
2740 
2741 	/* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2742 	 *  map
2743 	 */
2744 	if (is_rxqs_map)
2745 		new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2746 	else
2747 		new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2748 				       cpu_to_node(attr_index));
2749 	if (!new_map)
2750 		return NULL;
2751 
2752 	for (i = 0; i < pos; i++)
2753 		new_map->queues[i] = map->queues[i];
2754 	new_map->alloc_len = alloc_len;
2755 	new_map->len = pos;
2756 
2757 	return new_map;
2758 }
2759 
2760 /* Copy xps maps at a given index */
2761 static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps,
2762 			      struct xps_dev_maps *new_dev_maps, int index,
2763 			      int tc, bool skip_tc)
2764 {
2765 	int i, tci = index * dev_maps->num_tc;
2766 	struct xps_map *map;
2767 
2768 	/* copy maps belonging to foreign traffic classes */
2769 	for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2770 		if (i == tc && skip_tc)
2771 			continue;
2772 
2773 		/* fill in the new device map from the old device map */
2774 		map = xmap_dereference(dev_maps->attr_map[tci]);
2775 		RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2776 	}
2777 }
2778 
2779 /* Must be called under cpus_read_lock */
2780 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2781 			  u16 index, enum xps_map_type type)
2782 {
2783 	struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL;
2784 	const unsigned long *online_mask = NULL;
2785 	bool active = false, copy = false;
2786 	int i, j, tci, numa_node_id = -2;
2787 	int maps_sz, num_tc = 1, tc = 0;
2788 	struct xps_map *map, *new_map;
2789 	unsigned int nr_ids;
2790 
2791 	WARN_ON_ONCE(index >= dev->num_tx_queues);
2792 
2793 	if (dev->num_tc) {
2794 		/* Do not allow XPS on subordinate device directly */
2795 		num_tc = dev->num_tc;
2796 		if (num_tc < 0)
2797 			return -EINVAL;
2798 
2799 		/* If queue belongs to subordinate dev use its map */
2800 		dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2801 
2802 		tc = netdev_txq_to_tc(dev, index);
2803 		if (tc < 0)
2804 			return -EINVAL;
2805 	}
2806 
2807 	mutex_lock(&xps_map_mutex);
2808 
2809 	dev_maps = xmap_dereference(dev->xps_maps[type]);
2810 	if (type == XPS_RXQS) {
2811 		maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2812 		nr_ids = dev->num_rx_queues;
2813 	} else {
2814 		maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2815 		if (num_possible_cpus() > 1)
2816 			online_mask = cpumask_bits(cpu_online_mask);
2817 		nr_ids = nr_cpu_ids;
2818 	}
2819 
2820 	if (maps_sz < L1_CACHE_BYTES)
2821 		maps_sz = L1_CACHE_BYTES;
2822 
2823 	/* The old dev_maps could be larger or smaller than the one we're
2824 	 * setting up now, as dev->num_tc or nr_ids could have been updated in
2825 	 * between. We could try to be smart, but let's be safe instead and only
2826 	 * copy foreign traffic classes if the two map sizes match.
2827 	 */
2828 	if (dev_maps &&
2829 	    dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids)
2830 		copy = true;
2831 
2832 	/* allocate memory for queue storage */
2833 	for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2834 	     j < nr_ids;) {
2835 		if (!new_dev_maps) {
2836 			new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2837 			if (!new_dev_maps) {
2838 				mutex_unlock(&xps_map_mutex);
2839 				return -ENOMEM;
2840 			}
2841 
2842 			new_dev_maps->nr_ids = nr_ids;
2843 			new_dev_maps->num_tc = num_tc;
2844 		}
2845 
2846 		tci = j * num_tc + tc;
2847 		map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
2848 
2849 		map = expand_xps_map(map, j, index, type == XPS_RXQS);
2850 		if (!map)
2851 			goto error;
2852 
2853 		RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2854 	}
2855 
2856 	if (!new_dev_maps)
2857 		goto out_no_new_maps;
2858 
2859 	if (!dev_maps) {
2860 		/* Increment static keys at most once per type */
2861 		static_key_slow_inc_cpuslocked(&xps_needed);
2862 		if (type == XPS_RXQS)
2863 			static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2864 	}
2865 
2866 	for (j = 0; j < nr_ids; j++) {
2867 		bool skip_tc = false;
2868 
2869 		tci = j * num_tc + tc;
2870 		if (netif_attr_test_mask(j, mask, nr_ids) &&
2871 		    netif_attr_test_online(j, online_mask, nr_ids)) {
2872 			/* add tx-queue to CPU/rx-queue maps */
2873 			int pos = 0;
2874 
2875 			skip_tc = true;
2876 
2877 			map = xmap_dereference(new_dev_maps->attr_map[tci]);
2878 			while ((pos < map->len) && (map->queues[pos] != index))
2879 				pos++;
2880 
2881 			if (pos == map->len)
2882 				map->queues[map->len++] = index;
2883 #ifdef CONFIG_NUMA
2884 			if (type == XPS_CPUS) {
2885 				if (numa_node_id == -2)
2886 					numa_node_id = cpu_to_node(j);
2887 				else if (numa_node_id != cpu_to_node(j))
2888 					numa_node_id = -1;
2889 			}
2890 #endif
2891 		}
2892 
2893 		if (copy)
2894 			xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc,
2895 					  skip_tc);
2896 	}
2897 
2898 	rcu_assign_pointer(dev->xps_maps[type], new_dev_maps);
2899 
2900 	/* Cleanup old maps */
2901 	if (!dev_maps)
2902 		goto out_no_old_maps;
2903 
2904 	for (j = 0; j < dev_maps->nr_ids; j++) {
2905 		for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) {
2906 			map = xmap_dereference(dev_maps->attr_map[tci]);
2907 			if (!map)
2908 				continue;
2909 
2910 			if (copy) {
2911 				new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2912 				if (map == new_map)
2913 					continue;
2914 			}
2915 
2916 			RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2917 			kfree_rcu(map, rcu);
2918 		}
2919 	}
2920 
2921 	old_dev_maps = dev_maps;
2922 
2923 out_no_old_maps:
2924 	dev_maps = new_dev_maps;
2925 	active = true;
2926 
2927 out_no_new_maps:
2928 	if (type == XPS_CPUS)
2929 		/* update Tx queue numa node */
2930 		netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2931 					     (numa_node_id >= 0) ?
2932 					     numa_node_id : NUMA_NO_NODE);
2933 
2934 	if (!dev_maps)
2935 		goto out_no_maps;
2936 
2937 	/* removes tx-queue from unused CPUs/rx-queues */
2938 	for (j = 0; j < dev_maps->nr_ids; j++) {
2939 		tci = j * dev_maps->num_tc;
2940 
2941 		for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2942 			if (i == tc &&
2943 			    netif_attr_test_mask(j, mask, dev_maps->nr_ids) &&
2944 			    netif_attr_test_online(j, online_mask, dev_maps->nr_ids))
2945 				continue;
2946 
2947 			active |= remove_xps_queue(dev_maps,
2948 						   copy ? old_dev_maps : NULL,
2949 						   tci, index);
2950 		}
2951 	}
2952 
2953 	if (old_dev_maps)
2954 		kfree_rcu(old_dev_maps, rcu);
2955 
2956 	/* free map if not active */
2957 	if (!active)
2958 		reset_xps_maps(dev, dev_maps, type);
2959 
2960 out_no_maps:
2961 	mutex_unlock(&xps_map_mutex);
2962 
2963 	return 0;
2964 error:
2965 	/* remove any maps that we added */
2966 	for (j = 0; j < nr_ids; j++) {
2967 		for (i = num_tc, tci = j * num_tc; i--; tci++) {
2968 			new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2969 			map = copy ?
2970 			      xmap_dereference(dev_maps->attr_map[tci]) :
2971 			      NULL;
2972 			if (new_map && new_map != map)
2973 				kfree(new_map);
2974 		}
2975 	}
2976 
2977 	mutex_unlock(&xps_map_mutex);
2978 
2979 	kfree(new_dev_maps);
2980 	return -ENOMEM;
2981 }
2982 EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2983 
2984 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2985 			u16 index)
2986 {
2987 	int ret;
2988 
2989 	cpus_read_lock();
2990 	ret =  __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS);
2991 	cpus_read_unlock();
2992 
2993 	return ret;
2994 }
2995 EXPORT_SYMBOL(netif_set_xps_queue);
2996 
2997 #endif
2998 static void netdev_unbind_all_sb_channels(struct net_device *dev)
2999 {
3000 	struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
3001 
3002 	/* Unbind any subordinate channels */
3003 	while (txq-- != &dev->_tx[0]) {
3004 		if (txq->sb_dev)
3005 			netdev_unbind_sb_channel(dev, txq->sb_dev);
3006 	}
3007 }
3008 
3009 void netdev_reset_tc(struct net_device *dev)
3010 {
3011 #ifdef CONFIG_XPS
3012 	netif_reset_xps_queues_gt(dev, 0);
3013 #endif
3014 	netdev_unbind_all_sb_channels(dev);
3015 
3016 	/* Reset TC configuration of device */
3017 	dev->num_tc = 0;
3018 	memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
3019 	memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
3020 }
3021 EXPORT_SYMBOL(netdev_reset_tc);
3022 
3023 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
3024 {
3025 	if (tc >= dev->num_tc)
3026 		return -EINVAL;
3027 
3028 #ifdef CONFIG_XPS
3029 	netif_reset_xps_queues(dev, offset, count);
3030 #endif
3031 	dev->tc_to_txq[tc].count = count;
3032 	dev->tc_to_txq[tc].offset = offset;
3033 	return 0;
3034 }
3035 EXPORT_SYMBOL(netdev_set_tc_queue);
3036 
3037 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
3038 {
3039 	if (num_tc > TC_MAX_QUEUE)
3040 		return -EINVAL;
3041 
3042 #ifdef CONFIG_XPS
3043 	netif_reset_xps_queues_gt(dev, 0);
3044 #endif
3045 	netdev_unbind_all_sb_channels(dev);
3046 
3047 	dev->num_tc = num_tc;
3048 	return 0;
3049 }
3050 EXPORT_SYMBOL(netdev_set_num_tc);
3051 
3052 void netdev_unbind_sb_channel(struct net_device *dev,
3053 			      struct net_device *sb_dev)
3054 {
3055 	struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
3056 
3057 #ifdef CONFIG_XPS
3058 	netif_reset_xps_queues_gt(sb_dev, 0);
3059 #endif
3060 	memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
3061 	memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
3062 
3063 	while (txq-- != &dev->_tx[0]) {
3064 		if (txq->sb_dev == sb_dev)
3065 			txq->sb_dev = NULL;
3066 	}
3067 }
3068 EXPORT_SYMBOL(netdev_unbind_sb_channel);
3069 
3070 int netdev_bind_sb_channel_queue(struct net_device *dev,
3071 				 struct net_device *sb_dev,
3072 				 u8 tc, u16 count, u16 offset)
3073 {
3074 	/* Make certain the sb_dev and dev are already configured */
3075 	if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
3076 		return -EINVAL;
3077 
3078 	/* We cannot hand out queues we don't have */
3079 	if ((offset + count) > dev->real_num_tx_queues)
3080 		return -EINVAL;
3081 
3082 	/* Record the mapping */
3083 	sb_dev->tc_to_txq[tc].count = count;
3084 	sb_dev->tc_to_txq[tc].offset = offset;
3085 
3086 	/* Provide a way for Tx queue to find the tc_to_txq map or
3087 	 * XPS map for itself.
3088 	 */
3089 	while (count--)
3090 		netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
3091 
3092 	return 0;
3093 }
3094 EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
3095 
3096 int netdev_set_sb_channel(struct net_device *dev, u16 channel)
3097 {
3098 	/* Do not use a multiqueue device to represent a subordinate channel */
3099 	if (netif_is_multiqueue(dev))
3100 		return -ENODEV;
3101 
3102 	/* We allow channels 1 - 32767 to be used for subordinate channels.
3103 	 * Channel 0 is meant to be "native" mode and used only to represent
3104 	 * the main root device. We allow writing 0 to reset the device back
3105 	 * to normal mode after being used as a subordinate channel.
3106 	 */
3107 	if (channel > S16_MAX)
3108 		return -EINVAL;
3109 
3110 	dev->num_tc = -channel;
3111 
3112 	return 0;
3113 }
3114 EXPORT_SYMBOL(netdev_set_sb_channel);
3115 
3116 /*
3117  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
3118  * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
3119  */
3120 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
3121 {
3122 	bool disabling;
3123 	int rc;
3124 
3125 	disabling = txq < dev->real_num_tx_queues;
3126 
3127 	if (txq < 1 || txq > dev->num_tx_queues)
3128 		return -EINVAL;
3129 
3130 	if (dev->reg_state == NETREG_REGISTERED ||
3131 	    dev->reg_state == NETREG_UNREGISTERING) {
3132 		ASSERT_RTNL();
3133 
3134 		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
3135 						  txq);
3136 		if (rc)
3137 			return rc;
3138 
3139 		if (dev->num_tc)
3140 			netif_setup_tc(dev, txq);
3141 
3142 		net_shaper_set_real_num_tx_queues(dev, txq);
3143 
3144 		dev_qdisc_change_real_num_tx(dev, txq);
3145 
3146 		dev->real_num_tx_queues = txq;
3147 
3148 		if (disabling) {
3149 			synchronize_net();
3150 			qdisc_reset_all_tx_gt(dev, txq);
3151 #ifdef CONFIG_XPS
3152 			netif_reset_xps_queues_gt(dev, txq);
3153 #endif
3154 		}
3155 	} else {
3156 		dev->real_num_tx_queues = txq;
3157 	}
3158 
3159 	return 0;
3160 }
3161 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
3162 
3163 #ifdef CONFIG_SYSFS
3164 /**
3165  *	netif_set_real_num_rx_queues - set actual number of RX queues used
3166  *	@dev: Network device
3167  *	@rxq: Actual number of RX queues
3168  *
3169  *	This must be called either with the rtnl_lock held or before
3170  *	registration of the net device.  Returns 0 on success, or a
3171  *	negative error code.  If called before registration, it always
3172  *	succeeds.
3173  */
3174 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
3175 {
3176 	int rc;
3177 
3178 	if (rxq < 1 || rxq > dev->num_rx_queues)
3179 		return -EINVAL;
3180 
3181 	if (dev->reg_state == NETREG_REGISTERED) {
3182 		ASSERT_RTNL();
3183 
3184 		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
3185 						  rxq);
3186 		if (rc)
3187 			return rc;
3188 	}
3189 
3190 	dev->real_num_rx_queues = rxq;
3191 	return 0;
3192 }
3193 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
3194 #endif
3195 
3196 /**
3197  *	netif_set_real_num_queues - set actual number of RX and TX queues used
3198  *	@dev: Network device
3199  *	@txq: Actual number of TX queues
3200  *	@rxq: Actual number of RX queues
3201  *
3202  *	Set the real number of both TX and RX queues.
3203  *	Does nothing if the number of queues is already correct.
3204  */
3205 int netif_set_real_num_queues(struct net_device *dev,
3206 			      unsigned int txq, unsigned int rxq)
3207 {
3208 	unsigned int old_rxq = dev->real_num_rx_queues;
3209 	int err;
3210 
3211 	if (txq < 1 || txq > dev->num_tx_queues ||
3212 	    rxq < 1 || rxq > dev->num_rx_queues)
3213 		return -EINVAL;
3214 
3215 	/* Start from increases, so the error path only does decreases -
3216 	 * decreases can't fail.
3217 	 */
3218 	if (rxq > dev->real_num_rx_queues) {
3219 		err = netif_set_real_num_rx_queues(dev, rxq);
3220 		if (err)
3221 			return err;
3222 	}
3223 	if (txq > dev->real_num_tx_queues) {
3224 		err = netif_set_real_num_tx_queues(dev, txq);
3225 		if (err)
3226 			goto undo_rx;
3227 	}
3228 	if (rxq < dev->real_num_rx_queues)
3229 		WARN_ON(netif_set_real_num_rx_queues(dev, rxq));
3230 	if (txq < dev->real_num_tx_queues)
3231 		WARN_ON(netif_set_real_num_tx_queues(dev, txq));
3232 
3233 	return 0;
3234 undo_rx:
3235 	WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq));
3236 	return err;
3237 }
3238 EXPORT_SYMBOL(netif_set_real_num_queues);
3239 
3240 /**
3241  * netif_set_tso_max_size() - set the max size of TSO frames supported
3242  * @dev:	netdev to update
3243  * @size:	max skb->len of a TSO frame
3244  *
3245  * Set the limit on the size of TSO super-frames the device can handle.
3246  * Unless explicitly set the stack will assume the value of
3247  * %GSO_LEGACY_MAX_SIZE.
3248  */
3249 void netif_set_tso_max_size(struct net_device *dev, unsigned int size)
3250 {
3251 	dev->tso_max_size = min(GSO_MAX_SIZE, size);
3252 	if (size < READ_ONCE(dev->gso_max_size))
3253 		netif_set_gso_max_size(dev, size);
3254 	if (size < READ_ONCE(dev->gso_ipv4_max_size))
3255 		netif_set_gso_ipv4_max_size(dev, size);
3256 }
3257 EXPORT_SYMBOL(netif_set_tso_max_size);
3258 
3259 /**
3260  * netif_set_tso_max_segs() - set the max number of segs supported for TSO
3261  * @dev:	netdev to update
3262  * @segs:	max number of TCP segments
3263  *
3264  * Set the limit on the number of TCP segments the device can generate from
3265  * a single TSO super-frame.
3266  * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS.
3267  */
3268 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs)
3269 {
3270 	dev->tso_max_segs = segs;
3271 	if (segs < READ_ONCE(dev->gso_max_segs))
3272 		netif_set_gso_max_segs(dev, segs);
3273 }
3274 EXPORT_SYMBOL(netif_set_tso_max_segs);
3275 
3276 /**
3277  * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper
3278  * @to:		netdev to update
3279  * @from:	netdev from which to copy the limits
3280  */
3281 void netif_inherit_tso_max(struct net_device *to, const struct net_device *from)
3282 {
3283 	netif_set_tso_max_size(to, from->tso_max_size);
3284 	netif_set_tso_max_segs(to, from->tso_max_segs);
3285 }
3286 EXPORT_SYMBOL(netif_inherit_tso_max);
3287 
3288 /**
3289  * netif_get_num_default_rss_queues - default number of RSS queues
3290  *
3291  * Default value is the number of physical cores if there are only 1 or 2, or
3292  * divided by 2 if there are more.
3293  */
3294 int netif_get_num_default_rss_queues(void)
3295 {
3296 	cpumask_var_t cpus;
3297 	int cpu, count = 0;
3298 
3299 	if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL)))
3300 		return 1;
3301 
3302 	cpumask_copy(cpus, cpu_online_mask);
3303 	for_each_cpu(cpu, cpus) {
3304 		++count;
3305 		cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
3306 	}
3307 	free_cpumask_var(cpus);
3308 
3309 	return count > 2 ? DIV_ROUND_UP(count, 2) : count;
3310 }
3311 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
3312 
3313 static void __netif_reschedule(struct Qdisc *q)
3314 {
3315 	struct softnet_data *sd;
3316 	unsigned long flags;
3317 
3318 	local_irq_save(flags);
3319 	sd = this_cpu_ptr(&softnet_data);
3320 	q->next_sched = NULL;
3321 	*sd->output_queue_tailp = q;
3322 	sd->output_queue_tailp = &q->next_sched;
3323 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
3324 	local_irq_restore(flags);
3325 }
3326 
3327 void __netif_schedule(struct Qdisc *q)
3328 {
3329 	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
3330 		__netif_reschedule(q);
3331 }
3332 EXPORT_SYMBOL(__netif_schedule);
3333 
3334 struct dev_kfree_skb_cb {
3335 	enum skb_drop_reason reason;
3336 };
3337 
3338 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
3339 {
3340 	return (struct dev_kfree_skb_cb *)skb->cb;
3341 }
3342 
3343 void netif_schedule_queue(struct netdev_queue *txq)
3344 {
3345 	rcu_read_lock();
3346 	if (!netif_xmit_stopped(txq)) {
3347 		struct Qdisc *q = rcu_dereference(txq->qdisc);
3348 
3349 		__netif_schedule(q);
3350 	}
3351 	rcu_read_unlock();
3352 }
3353 EXPORT_SYMBOL(netif_schedule_queue);
3354 
3355 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
3356 {
3357 	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3358 		struct Qdisc *q;
3359 
3360 		rcu_read_lock();
3361 		q = rcu_dereference(dev_queue->qdisc);
3362 		__netif_schedule(q);
3363 		rcu_read_unlock();
3364 	}
3365 }
3366 EXPORT_SYMBOL(netif_tx_wake_queue);
3367 
3368 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason)
3369 {
3370 	unsigned long flags;
3371 
3372 	if (unlikely(!skb))
3373 		return;
3374 
3375 	if (likely(refcount_read(&skb->users) == 1)) {
3376 		smp_rmb();
3377 		refcount_set(&skb->users, 0);
3378 	} else if (likely(!refcount_dec_and_test(&skb->users))) {
3379 		return;
3380 	}
3381 	get_kfree_skb_cb(skb)->reason = reason;
3382 	local_irq_save(flags);
3383 	skb->next = __this_cpu_read(softnet_data.completion_queue);
3384 	__this_cpu_write(softnet_data.completion_queue, skb);
3385 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
3386 	local_irq_restore(flags);
3387 }
3388 EXPORT_SYMBOL(dev_kfree_skb_irq_reason);
3389 
3390 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason)
3391 {
3392 	if (in_hardirq() || irqs_disabled())
3393 		dev_kfree_skb_irq_reason(skb, reason);
3394 	else
3395 		kfree_skb_reason(skb, reason);
3396 }
3397 EXPORT_SYMBOL(dev_kfree_skb_any_reason);
3398 
3399 
3400 /**
3401  * netif_device_detach - mark device as removed
3402  * @dev: network device
3403  *
3404  * Mark device as removed from system and therefore no longer available.
3405  */
3406 void netif_device_detach(struct net_device *dev)
3407 {
3408 	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3409 	    netif_running(dev)) {
3410 		netif_tx_stop_all_queues(dev);
3411 	}
3412 }
3413 EXPORT_SYMBOL(netif_device_detach);
3414 
3415 /**
3416  * netif_device_attach - mark device as attached
3417  * @dev: network device
3418  *
3419  * Mark device as attached from system and restart if needed.
3420  */
3421 void netif_device_attach(struct net_device *dev)
3422 {
3423 	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3424 	    netif_running(dev)) {
3425 		netif_tx_wake_all_queues(dev);
3426 		netdev_watchdog_up(dev);
3427 	}
3428 }
3429 EXPORT_SYMBOL(netif_device_attach);
3430 
3431 /*
3432  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3433  * to be used as a distribution range.
3434  */
3435 static u16 skb_tx_hash(const struct net_device *dev,
3436 		       const struct net_device *sb_dev,
3437 		       struct sk_buff *skb)
3438 {
3439 	u32 hash;
3440 	u16 qoffset = 0;
3441 	u16 qcount = dev->real_num_tx_queues;
3442 
3443 	if (dev->num_tc) {
3444 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3445 
3446 		qoffset = sb_dev->tc_to_txq[tc].offset;
3447 		qcount = sb_dev->tc_to_txq[tc].count;
3448 		if (unlikely(!qcount)) {
3449 			net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
3450 					     sb_dev->name, qoffset, tc);
3451 			qoffset = 0;
3452 			qcount = dev->real_num_tx_queues;
3453 		}
3454 	}
3455 
3456 	if (skb_rx_queue_recorded(skb)) {
3457 		DEBUG_NET_WARN_ON_ONCE(qcount == 0);
3458 		hash = skb_get_rx_queue(skb);
3459 		if (hash >= qoffset)
3460 			hash -= qoffset;
3461 		while (unlikely(hash >= qcount))
3462 			hash -= qcount;
3463 		return hash + qoffset;
3464 	}
3465 
3466 	return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3467 }
3468 
3469 void skb_warn_bad_offload(const struct sk_buff *skb)
3470 {
3471 	static const netdev_features_t null_features;
3472 	struct net_device *dev = skb->dev;
3473 	const char *name = "";
3474 
3475 	if (!net_ratelimit())
3476 		return;
3477 
3478 	if (dev) {
3479 		if (dev->dev.parent)
3480 			name = dev_driver_string(dev->dev.parent);
3481 		else
3482 			name = netdev_name(dev);
3483 	}
3484 	skb_dump(KERN_WARNING, skb, false);
3485 	WARN(1, "%s: caps=(%pNF, %pNF)\n",
3486 	     name, dev ? &dev->features : &null_features,
3487 	     skb->sk ? &skb->sk->sk_route_caps : &null_features);
3488 }
3489 
3490 /*
3491  * Invalidate hardware checksum when packet is to be mangled, and
3492  * complete checksum manually on outgoing path.
3493  */
3494 int skb_checksum_help(struct sk_buff *skb)
3495 {
3496 	__wsum csum;
3497 	int ret = 0, offset;
3498 
3499 	if (skb->ip_summed == CHECKSUM_COMPLETE)
3500 		goto out_set_summed;
3501 
3502 	if (unlikely(skb_is_gso(skb))) {
3503 		skb_warn_bad_offload(skb);
3504 		return -EINVAL;
3505 	}
3506 
3507 	if (!skb_frags_readable(skb)) {
3508 		return -EFAULT;
3509 	}
3510 
3511 	/* Before computing a checksum, we should make sure no frag could
3512 	 * be modified by an external entity : checksum could be wrong.
3513 	 */
3514 	if (skb_has_shared_frag(skb)) {
3515 		ret = __skb_linearize(skb);
3516 		if (ret)
3517 			goto out;
3518 	}
3519 
3520 	offset = skb_checksum_start_offset(skb);
3521 	ret = -EINVAL;
3522 	if (unlikely(offset >= skb_headlen(skb))) {
3523 		DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3524 		WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n",
3525 			  offset, skb_headlen(skb));
3526 		goto out;
3527 	}
3528 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
3529 
3530 	offset += skb->csum_offset;
3531 	if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) {
3532 		DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3533 		WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n",
3534 			  offset + sizeof(__sum16), skb_headlen(skb));
3535 		goto out;
3536 	}
3537 	ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3538 	if (ret)
3539 		goto out;
3540 
3541 	*(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3542 out_set_summed:
3543 	skb->ip_summed = CHECKSUM_NONE;
3544 out:
3545 	return ret;
3546 }
3547 EXPORT_SYMBOL(skb_checksum_help);
3548 
3549 int skb_crc32c_csum_help(struct sk_buff *skb)
3550 {
3551 	__le32 crc32c_csum;
3552 	int ret = 0, offset, start;
3553 
3554 	if (skb->ip_summed != CHECKSUM_PARTIAL)
3555 		goto out;
3556 
3557 	if (unlikely(skb_is_gso(skb)))
3558 		goto out;
3559 
3560 	/* Before computing a checksum, we should make sure no frag could
3561 	 * be modified by an external entity : checksum could be wrong.
3562 	 */
3563 	if (unlikely(skb_has_shared_frag(skb))) {
3564 		ret = __skb_linearize(skb);
3565 		if (ret)
3566 			goto out;
3567 	}
3568 	start = skb_checksum_start_offset(skb);
3569 	offset = start + offsetof(struct sctphdr, checksum);
3570 	if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3571 		ret = -EINVAL;
3572 		goto out;
3573 	}
3574 
3575 	ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3576 	if (ret)
3577 		goto out;
3578 
3579 	crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3580 						  skb->len - start, ~(__u32)0,
3581 						  crc32c_csum_stub));
3582 	*(__le32 *)(skb->data + offset) = crc32c_csum;
3583 	skb_reset_csum_not_inet(skb);
3584 out:
3585 	return ret;
3586 }
3587 EXPORT_SYMBOL(skb_crc32c_csum_help);
3588 
3589 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3590 {
3591 	__be16 type = skb->protocol;
3592 
3593 	/* Tunnel gso handlers can set protocol to ethernet. */
3594 	if (type == htons(ETH_P_TEB)) {
3595 		struct ethhdr *eth;
3596 
3597 		if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3598 			return 0;
3599 
3600 		eth = (struct ethhdr *)skb->data;
3601 		type = eth->h_proto;
3602 	}
3603 
3604 	return vlan_get_protocol_and_depth(skb, type, depth);
3605 }
3606 
3607 
3608 /* Take action when hardware reception checksum errors are detected. */
3609 #ifdef CONFIG_BUG
3610 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3611 {
3612 	netdev_err(dev, "hw csum failure\n");
3613 	skb_dump(KERN_ERR, skb, true);
3614 	dump_stack();
3615 }
3616 
3617 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3618 {
3619 	DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb);
3620 }
3621 EXPORT_SYMBOL(netdev_rx_csum_fault);
3622 #endif
3623 
3624 /* XXX: check that highmem exists at all on the given machine. */
3625 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3626 {
3627 #ifdef CONFIG_HIGHMEM
3628 	int i;
3629 
3630 	if (!(dev->features & NETIF_F_HIGHDMA)) {
3631 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3632 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3633 			struct page *page = skb_frag_page(frag);
3634 
3635 			if (page && PageHighMem(page))
3636 				return 1;
3637 		}
3638 	}
3639 #endif
3640 	return 0;
3641 }
3642 
3643 /* If MPLS offload request, verify we are testing hardware MPLS features
3644  * instead of standard features for the netdev.
3645  */
3646 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3647 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3648 					   netdev_features_t features,
3649 					   __be16 type)
3650 {
3651 	if (eth_p_mpls(type))
3652 		features &= skb->dev->mpls_features;
3653 
3654 	return features;
3655 }
3656 #else
3657 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3658 					   netdev_features_t features,
3659 					   __be16 type)
3660 {
3661 	return features;
3662 }
3663 #endif
3664 
3665 static netdev_features_t harmonize_features(struct sk_buff *skb,
3666 	netdev_features_t features)
3667 {
3668 	__be16 type;
3669 
3670 	type = skb_network_protocol(skb, NULL);
3671 	features = net_mpls_features(skb, features, type);
3672 
3673 	if (skb->ip_summed != CHECKSUM_NONE &&
3674 	    !can_checksum_protocol(features, type)) {
3675 		features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3676 	}
3677 	if (illegal_highdma(skb->dev, skb))
3678 		features &= ~NETIF_F_SG;
3679 
3680 	return features;
3681 }
3682 
3683 netdev_features_t passthru_features_check(struct sk_buff *skb,
3684 					  struct net_device *dev,
3685 					  netdev_features_t features)
3686 {
3687 	return features;
3688 }
3689 EXPORT_SYMBOL(passthru_features_check);
3690 
3691 static netdev_features_t dflt_features_check(struct sk_buff *skb,
3692 					     struct net_device *dev,
3693 					     netdev_features_t features)
3694 {
3695 	return vlan_features_check(skb, features);
3696 }
3697 
3698 static netdev_features_t gso_features_check(const struct sk_buff *skb,
3699 					    struct net_device *dev,
3700 					    netdev_features_t features)
3701 {
3702 	u16 gso_segs = skb_shinfo(skb)->gso_segs;
3703 
3704 	if (gso_segs > READ_ONCE(dev->gso_max_segs))
3705 		return features & ~NETIF_F_GSO_MASK;
3706 
3707 	if (unlikely(skb->len >= netif_get_gso_max_size(dev, skb)))
3708 		return features & ~NETIF_F_GSO_MASK;
3709 
3710 	if (!skb_shinfo(skb)->gso_type) {
3711 		skb_warn_bad_offload(skb);
3712 		return features & ~NETIF_F_GSO_MASK;
3713 	}
3714 
3715 	/* Support for GSO partial features requires software
3716 	 * intervention before we can actually process the packets
3717 	 * so we need to strip support for any partial features now
3718 	 * and we can pull them back in after we have partially
3719 	 * segmented the frame.
3720 	 */
3721 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3722 		features &= ~dev->gso_partial_features;
3723 
3724 	/* Make sure to clear the IPv4 ID mangling feature if the
3725 	 * IPv4 header has the potential to be fragmented.
3726 	 */
3727 	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3728 		struct iphdr *iph = skb->encapsulation ?
3729 				    inner_ip_hdr(skb) : ip_hdr(skb);
3730 
3731 		if (!(iph->frag_off & htons(IP_DF)))
3732 			features &= ~NETIF_F_TSO_MANGLEID;
3733 	}
3734 
3735 	return features;
3736 }
3737 
3738 netdev_features_t netif_skb_features(struct sk_buff *skb)
3739 {
3740 	struct net_device *dev = skb->dev;
3741 	netdev_features_t features = dev->features;
3742 
3743 	if (skb_is_gso(skb))
3744 		features = gso_features_check(skb, dev, features);
3745 
3746 	/* If encapsulation offload request, verify we are testing
3747 	 * hardware encapsulation features instead of standard
3748 	 * features for the netdev
3749 	 */
3750 	if (skb->encapsulation)
3751 		features &= dev->hw_enc_features;
3752 
3753 	if (skb_vlan_tagged(skb))
3754 		features = netdev_intersect_features(features,
3755 						     dev->vlan_features |
3756 						     NETIF_F_HW_VLAN_CTAG_TX |
3757 						     NETIF_F_HW_VLAN_STAG_TX);
3758 
3759 	if (dev->netdev_ops->ndo_features_check)
3760 		features &= dev->netdev_ops->ndo_features_check(skb, dev,
3761 								features);
3762 	else
3763 		features &= dflt_features_check(skb, dev, features);
3764 
3765 	return harmonize_features(skb, features);
3766 }
3767 EXPORT_SYMBOL(netif_skb_features);
3768 
3769 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3770 		    struct netdev_queue *txq, bool more)
3771 {
3772 	unsigned int len;
3773 	int rc;
3774 
3775 	if (dev_nit_active_rcu(dev))
3776 		dev_queue_xmit_nit(skb, dev);
3777 
3778 	len = skb->len;
3779 	trace_net_dev_start_xmit(skb, dev);
3780 	rc = netdev_start_xmit(skb, dev, txq, more);
3781 	trace_net_dev_xmit(skb, rc, dev, len);
3782 
3783 	return rc;
3784 }
3785 
3786 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3787 				    struct netdev_queue *txq, int *ret)
3788 {
3789 	struct sk_buff *skb = first;
3790 	int rc = NETDEV_TX_OK;
3791 
3792 	while (skb) {
3793 		struct sk_buff *next = skb->next;
3794 
3795 		skb_mark_not_on_list(skb);
3796 		rc = xmit_one(skb, dev, txq, next != NULL);
3797 		if (unlikely(!dev_xmit_complete(rc))) {
3798 			skb->next = next;
3799 			goto out;
3800 		}
3801 
3802 		skb = next;
3803 		if (netif_tx_queue_stopped(txq) && skb) {
3804 			rc = NETDEV_TX_BUSY;
3805 			break;
3806 		}
3807 	}
3808 
3809 out:
3810 	*ret = rc;
3811 	return skb;
3812 }
3813 
3814 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3815 					  netdev_features_t features)
3816 {
3817 	if (skb_vlan_tag_present(skb) &&
3818 	    !vlan_hw_offload_capable(features, skb->vlan_proto))
3819 		skb = __vlan_hwaccel_push_inside(skb);
3820 	return skb;
3821 }
3822 
3823 int skb_csum_hwoffload_help(struct sk_buff *skb,
3824 			    const netdev_features_t features)
3825 {
3826 	if (unlikely(skb_csum_is_sctp(skb)))
3827 		return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3828 			skb_crc32c_csum_help(skb);
3829 
3830 	if (features & NETIF_F_HW_CSUM)
3831 		return 0;
3832 
3833 	if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3834 		if (vlan_get_protocol(skb) == htons(ETH_P_IPV6) &&
3835 		    skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
3836 		    !ipv6_has_hopopt_jumbo(skb))
3837 			goto sw_checksum;
3838 
3839 		switch (skb->csum_offset) {
3840 		case offsetof(struct tcphdr, check):
3841 		case offsetof(struct udphdr, check):
3842 			return 0;
3843 		}
3844 	}
3845 
3846 sw_checksum:
3847 	return skb_checksum_help(skb);
3848 }
3849 EXPORT_SYMBOL(skb_csum_hwoffload_help);
3850 
3851 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3852 {
3853 	netdev_features_t features;
3854 
3855 	if (!skb_frags_readable(skb))
3856 		goto out_kfree_skb;
3857 
3858 	features = netif_skb_features(skb);
3859 	skb = validate_xmit_vlan(skb, features);
3860 	if (unlikely(!skb))
3861 		goto out_null;
3862 
3863 	skb = sk_validate_xmit_skb(skb, dev);
3864 	if (unlikely(!skb))
3865 		goto out_null;
3866 
3867 	if (netif_needs_gso(skb, features)) {
3868 		struct sk_buff *segs;
3869 
3870 		segs = skb_gso_segment(skb, features);
3871 		if (IS_ERR(segs)) {
3872 			goto out_kfree_skb;
3873 		} else if (segs) {
3874 			consume_skb(skb);
3875 			skb = segs;
3876 		}
3877 	} else {
3878 		if (skb_needs_linearize(skb, features) &&
3879 		    __skb_linearize(skb))
3880 			goto out_kfree_skb;
3881 
3882 		/* If packet is not checksummed and device does not
3883 		 * support checksumming for this protocol, complete
3884 		 * checksumming here.
3885 		 */
3886 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
3887 			if (skb->encapsulation)
3888 				skb_set_inner_transport_header(skb,
3889 							       skb_checksum_start_offset(skb));
3890 			else
3891 				skb_set_transport_header(skb,
3892 							 skb_checksum_start_offset(skb));
3893 			if (skb_csum_hwoffload_help(skb, features))
3894 				goto out_kfree_skb;
3895 		}
3896 	}
3897 
3898 	skb = validate_xmit_xfrm(skb, features, again);
3899 
3900 	return skb;
3901 
3902 out_kfree_skb:
3903 	kfree_skb(skb);
3904 out_null:
3905 	dev_core_stats_tx_dropped_inc(dev);
3906 	return NULL;
3907 }
3908 
3909 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3910 {
3911 	struct sk_buff *next, *head = NULL, *tail;
3912 
3913 	for (; skb != NULL; skb = next) {
3914 		next = skb->next;
3915 		skb_mark_not_on_list(skb);
3916 
3917 		/* in case skb won't be segmented, point to itself */
3918 		skb->prev = skb;
3919 
3920 		skb = validate_xmit_skb(skb, dev, again);
3921 		if (!skb)
3922 			continue;
3923 
3924 		if (!head)
3925 			head = skb;
3926 		else
3927 			tail->next = skb;
3928 		/* If skb was segmented, skb->prev points to
3929 		 * the last segment. If not, it still contains skb.
3930 		 */
3931 		tail = skb->prev;
3932 	}
3933 	return head;
3934 }
3935 EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3936 
3937 static void qdisc_pkt_len_init(struct sk_buff *skb)
3938 {
3939 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
3940 
3941 	qdisc_skb_cb(skb)->pkt_len = skb->len;
3942 
3943 	/* To get more precise estimation of bytes sent on wire,
3944 	 * we add to pkt_len the headers size of all segments
3945 	 */
3946 	if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3947 		u16 gso_segs = shinfo->gso_segs;
3948 		unsigned int hdr_len;
3949 
3950 		/* mac layer + network layer */
3951 		hdr_len = skb_transport_offset(skb);
3952 
3953 		/* + transport layer */
3954 		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3955 			const struct tcphdr *th;
3956 			struct tcphdr _tcphdr;
3957 
3958 			th = skb_header_pointer(skb, hdr_len,
3959 						sizeof(_tcphdr), &_tcphdr);
3960 			if (likely(th))
3961 				hdr_len += __tcp_hdrlen(th);
3962 		} else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
3963 			struct udphdr _udphdr;
3964 
3965 			if (skb_header_pointer(skb, hdr_len,
3966 					       sizeof(_udphdr), &_udphdr))
3967 				hdr_len += sizeof(struct udphdr);
3968 		}
3969 
3970 		if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) {
3971 			int payload = skb->len - hdr_len;
3972 
3973 			/* Malicious packet. */
3974 			if (payload <= 0)
3975 				return;
3976 			gso_segs = DIV_ROUND_UP(payload, shinfo->gso_size);
3977 		}
3978 		qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3979 	}
3980 }
3981 
3982 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
3983 			     struct sk_buff **to_free,
3984 			     struct netdev_queue *txq)
3985 {
3986 	int rc;
3987 
3988 	rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
3989 	if (rc == NET_XMIT_SUCCESS)
3990 		trace_qdisc_enqueue(q, txq, skb);
3991 	return rc;
3992 }
3993 
3994 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3995 				 struct net_device *dev,
3996 				 struct netdev_queue *txq)
3997 {
3998 	spinlock_t *root_lock = qdisc_lock(q);
3999 	struct sk_buff *to_free = NULL;
4000 	bool contended;
4001 	int rc;
4002 
4003 	qdisc_calculate_pkt_len(skb, q);
4004 
4005 	tcf_set_drop_reason(skb, SKB_DROP_REASON_QDISC_DROP);
4006 
4007 	if (q->flags & TCQ_F_NOLOCK) {
4008 		if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
4009 		    qdisc_run_begin(q)) {
4010 			/* Retest nolock_qdisc_is_empty() within the protection
4011 			 * of q->seqlock to protect from racing with requeuing.
4012 			 */
4013 			if (unlikely(!nolock_qdisc_is_empty(q))) {
4014 				rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
4015 				__qdisc_run(q);
4016 				qdisc_run_end(q);
4017 
4018 				goto no_lock_out;
4019 			}
4020 
4021 			qdisc_bstats_cpu_update(q, skb);
4022 			if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
4023 			    !nolock_qdisc_is_empty(q))
4024 				__qdisc_run(q);
4025 
4026 			qdisc_run_end(q);
4027 			return NET_XMIT_SUCCESS;
4028 		}
4029 
4030 		rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
4031 		qdisc_run(q);
4032 
4033 no_lock_out:
4034 		if (unlikely(to_free))
4035 			kfree_skb_list_reason(to_free,
4036 					      tcf_get_drop_reason(to_free));
4037 		return rc;
4038 	}
4039 
4040 	if (unlikely(READ_ONCE(q->owner) == smp_processor_id())) {
4041 		kfree_skb_reason(skb, SKB_DROP_REASON_TC_RECLASSIFY_LOOP);
4042 		return NET_XMIT_DROP;
4043 	}
4044 	/*
4045 	 * Heuristic to force contended enqueues to serialize on a
4046 	 * separate lock before trying to get qdisc main lock.
4047 	 * This permits qdisc->running owner to get the lock more
4048 	 * often and dequeue packets faster.
4049 	 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit
4050 	 * and then other tasks will only enqueue packets. The packets will be
4051 	 * sent after the qdisc owner is scheduled again. To prevent this
4052 	 * scenario the task always serialize on the lock.
4053 	 */
4054 	contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT);
4055 	if (unlikely(contended))
4056 		spin_lock(&q->busylock);
4057 
4058 	spin_lock(root_lock);
4059 	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
4060 		__qdisc_drop(skb, &to_free);
4061 		rc = NET_XMIT_DROP;
4062 	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
4063 		   qdisc_run_begin(q)) {
4064 		/*
4065 		 * This is a work-conserving queue; there are no old skbs
4066 		 * waiting to be sent out; and the qdisc is not running -
4067 		 * xmit the skb directly.
4068 		 */
4069 
4070 		qdisc_bstats_update(q, skb);
4071 
4072 		if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
4073 			if (unlikely(contended)) {
4074 				spin_unlock(&q->busylock);
4075 				contended = false;
4076 			}
4077 			__qdisc_run(q);
4078 		}
4079 
4080 		qdisc_run_end(q);
4081 		rc = NET_XMIT_SUCCESS;
4082 	} else {
4083 		WRITE_ONCE(q->owner, smp_processor_id());
4084 		rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
4085 		WRITE_ONCE(q->owner, -1);
4086 		if (qdisc_run_begin(q)) {
4087 			if (unlikely(contended)) {
4088 				spin_unlock(&q->busylock);
4089 				contended = false;
4090 			}
4091 			__qdisc_run(q);
4092 			qdisc_run_end(q);
4093 		}
4094 	}
4095 	spin_unlock(root_lock);
4096 	if (unlikely(to_free))
4097 		kfree_skb_list_reason(to_free,
4098 				      tcf_get_drop_reason(to_free));
4099 	if (unlikely(contended))
4100 		spin_unlock(&q->busylock);
4101 	return rc;
4102 }
4103 
4104 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
4105 static void skb_update_prio(struct sk_buff *skb)
4106 {
4107 	const struct netprio_map *map;
4108 	const struct sock *sk;
4109 	unsigned int prioidx;
4110 
4111 	if (skb->priority)
4112 		return;
4113 	map = rcu_dereference_bh(skb->dev->priomap);
4114 	if (!map)
4115 		return;
4116 	sk = skb_to_full_sk(skb);
4117 	if (!sk)
4118 		return;
4119 
4120 	prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
4121 
4122 	if (prioidx < map->priomap_len)
4123 		skb->priority = map->priomap[prioidx];
4124 }
4125 #else
4126 #define skb_update_prio(skb)
4127 #endif
4128 
4129 /**
4130  *	dev_loopback_xmit - loop back @skb
4131  *	@net: network namespace this loopback is happening in
4132  *	@sk:  sk needed to be a netfilter okfn
4133  *	@skb: buffer to transmit
4134  */
4135 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
4136 {
4137 	skb_reset_mac_header(skb);
4138 	__skb_pull(skb, skb_network_offset(skb));
4139 	skb->pkt_type = PACKET_LOOPBACK;
4140 	if (skb->ip_summed == CHECKSUM_NONE)
4141 		skb->ip_summed = CHECKSUM_UNNECESSARY;
4142 	DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb));
4143 	skb_dst_force(skb);
4144 	netif_rx(skb);
4145 	return 0;
4146 }
4147 EXPORT_SYMBOL(dev_loopback_xmit);
4148 
4149 #ifdef CONFIG_NET_EGRESS
4150 static struct netdev_queue *
4151 netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
4152 {
4153 	int qm = skb_get_queue_mapping(skb);
4154 
4155 	return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
4156 }
4157 
4158 #ifndef CONFIG_PREEMPT_RT
4159 static bool netdev_xmit_txqueue_skipped(void)
4160 {
4161 	return __this_cpu_read(softnet_data.xmit.skip_txqueue);
4162 }
4163 
4164 void netdev_xmit_skip_txqueue(bool skip)
4165 {
4166 	__this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
4167 }
4168 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
4169 
4170 #else
4171 static bool netdev_xmit_txqueue_skipped(void)
4172 {
4173 	return current->net_xmit.skip_txqueue;
4174 }
4175 
4176 void netdev_xmit_skip_txqueue(bool skip)
4177 {
4178 	current->net_xmit.skip_txqueue = skip;
4179 }
4180 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
4181 #endif
4182 #endif /* CONFIG_NET_EGRESS */
4183 
4184 #ifdef CONFIG_NET_XGRESS
4185 static int tc_run(struct tcx_entry *entry, struct sk_buff *skb,
4186 		  enum skb_drop_reason *drop_reason)
4187 {
4188 	int ret = TC_ACT_UNSPEC;
4189 #ifdef CONFIG_NET_CLS_ACT
4190 	struct mini_Qdisc *miniq = rcu_dereference_bh(entry->miniq);
4191 	struct tcf_result res;
4192 
4193 	if (!miniq)
4194 		return ret;
4195 
4196 	/* Global bypass */
4197 	if (!static_branch_likely(&tcf_sw_enabled_key))
4198 		return ret;
4199 
4200 	/* Block-wise bypass */
4201 	if (tcf_block_bypass_sw(miniq->block))
4202 		return ret;
4203 
4204 	tc_skb_cb(skb)->mru = 0;
4205 	tc_skb_cb(skb)->post_ct = false;
4206 	tcf_set_drop_reason(skb, *drop_reason);
4207 
4208 	mini_qdisc_bstats_cpu_update(miniq, skb);
4209 	ret = tcf_classify(skb, miniq->block, miniq->filter_list, &res, false);
4210 	/* Only tcf related quirks below. */
4211 	switch (ret) {
4212 	case TC_ACT_SHOT:
4213 		*drop_reason = tcf_get_drop_reason(skb);
4214 		mini_qdisc_qstats_cpu_drop(miniq);
4215 		break;
4216 	case TC_ACT_OK:
4217 	case TC_ACT_RECLASSIFY:
4218 		skb->tc_index = TC_H_MIN(res.classid);
4219 		break;
4220 	}
4221 #endif /* CONFIG_NET_CLS_ACT */
4222 	return ret;
4223 }
4224 
4225 static DEFINE_STATIC_KEY_FALSE(tcx_needed_key);
4226 
4227 void tcx_inc(void)
4228 {
4229 	static_branch_inc(&tcx_needed_key);
4230 }
4231 
4232 void tcx_dec(void)
4233 {
4234 	static_branch_dec(&tcx_needed_key);
4235 }
4236 
4237 static __always_inline enum tcx_action_base
4238 tcx_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb,
4239 	const bool needs_mac)
4240 {
4241 	const struct bpf_mprog_fp *fp;
4242 	const struct bpf_prog *prog;
4243 	int ret = TCX_NEXT;
4244 
4245 	if (needs_mac)
4246 		__skb_push(skb, skb->mac_len);
4247 	bpf_mprog_foreach_prog(entry, fp, prog) {
4248 		bpf_compute_data_pointers(skb);
4249 		ret = bpf_prog_run(prog, skb);
4250 		if (ret != TCX_NEXT)
4251 			break;
4252 	}
4253 	if (needs_mac)
4254 		__skb_pull(skb, skb->mac_len);
4255 	return tcx_action_code(skb, ret);
4256 }
4257 
4258 static __always_inline struct sk_buff *
4259 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4260 		   struct net_device *orig_dev, bool *another)
4261 {
4262 	struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress);
4263 	enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_INGRESS;
4264 	struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
4265 	int sch_ret;
4266 
4267 	if (!entry)
4268 		return skb;
4269 
4270 	bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
4271 	if (*pt_prev) {
4272 		*ret = deliver_skb(skb, *pt_prev, orig_dev);
4273 		*pt_prev = NULL;
4274 	}
4275 
4276 	qdisc_skb_cb(skb)->pkt_len = skb->len;
4277 	tcx_set_ingress(skb, true);
4278 
4279 	if (static_branch_unlikely(&tcx_needed_key)) {
4280 		sch_ret = tcx_run(entry, skb, true);
4281 		if (sch_ret != TC_ACT_UNSPEC)
4282 			goto ingress_verdict;
4283 	}
4284 	sch_ret = tc_run(tcx_entry(entry), skb, &drop_reason);
4285 ingress_verdict:
4286 	switch (sch_ret) {
4287 	case TC_ACT_REDIRECT:
4288 		/* skb_mac_header check was done by BPF, so we can safely
4289 		 * push the L2 header back before redirecting to another
4290 		 * netdev.
4291 		 */
4292 		__skb_push(skb, skb->mac_len);
4293 		if (skb_do_redirect(skb) == -EAGAIN) {
4294 			__skb_pull(skb, skb->mac_len);
4295 			*another = true;
4296 			break;
4297 		}
4298 		*ret = NET_RX_SUCCESS;
4299 		bpf_net_ctx_clear(bpf_net_ctx);
4300 		return NULL;
4301 	case TC_ACT_SHOT:
4302 		kfree_skb_reason(skb, drop_reason);
4303 		*ret = NET_RX_DROP;
4304 		bpf_net_ctx_clear(bpf_net_ctx);
4305 		return NULL;
4306 	/* used by tc_run */
4307 	case TC_ACT_STOLEN:
4308 	case TC_ACT_QUEUED:
4309 	case TC_ACT_TRAP:
4310 		consume_skb(skb);
4311 		fallthrough;
4312 	case TC_ACT_CONSUMED:
4313 		*ret = NET_RX_SUCCESS;
4314 		bpf_net_ctx_clear(bpf_net_ctx);
4315 		return NULL;
4316 	}
4317 	bpf_net_ctx_clear(bpf_net_ctx);
4318 
4319 	return skb;
4320 }
4321 
4322 static __always_inline struct sk_buff *
4323 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4324 {
4325 	struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress);
4326 	enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_EGRESS;
4327 	struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
4328 	int sch_ret;
4329 
4330 	if (!entry)
4331 		return skb;
4332 
4333 	bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
4334 
4335 	/* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was
4336 	 * already set by the caller.
4337 	 */
4338 	if (static_branch_unlikely(&tcx_needed_key)) {
4339 		sch_ret = tcx_run(entry, skb, false);
4340 		if (sch_ret != TC_ACT_UNSPEC)
4341 			goto egress_verdict;
4342 	}
4343 	sch_ret = tc_run(tcx_entry(entry), skb, &drop_reason);
4344 egress_verdict:
4345 	switch (sch_ret) {
4346 	case TC_ACT_REDIRECT:
4347 		/* No need to push/pop skb's mac_header here on egress! */
4348 		skb_do_redirect(skb);
4349 		*ret = NET_XMIT_SUCCESS;
4350 		bpf_net_ctx_clear(bpf_net_ctx);
4351 		return NULL;
4352 	case TC_ACT_SHOT:
4353 		kfree_skb_reason(skb, drop_reason);
4354 		*ret = NET_XMIT_DROP;
4355 		bpf_net_ctx_clear(bpf_net_ctx);
4356 		return NULL;
4357 	/* used by tc_run */
4358 	case TC_ACT_STOLEN:
4359 	case TC_ACT_QUEUED:
4360 	case TC_ACT_TRAP:
4361 		consume_skb(skb);
4362 		fallthrough;
4363 	case TC_ACT_CONSUMED:
4364 		*ret = NET_XMIT_SUCCESS;
4365 		bpf_net_ctx_clear(bpf_net_ctx);
4366 		return NULL;
4367 	}
4368 	bpf_net_ctx_clear(bpf_net_ctx);
4369 
4370 	return skb;
4371 }
4372 #else
4373 static __always_inline struct sk_buff *
4374 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4375 		   struct net_device *orig_dev, bool *another)
4376 {
4377 	return skb;
4378 }
4379 
4380 static __always_inline struct sk_buff *
4381 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4382 {
4383 	return skb;
4384 }
4385 #endif /* CONFIG_NET_XGRESS */
4386 
4387 #ifdef CONFIG_XPS
4388 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
4389 			       struct xps_dev_maps *dev_maps, unsigned int tci)
4390 {
4391 	int tc = netdev_get_prio_tc_map(dev, skb->priority);
4392 	struct xps_map *map;
4393 	int queue_index = -1;
4394 
4395 	if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids)
4396 		return queue_index;
4397 
4398 	tci *= dev_maps->num_tc;
4399 	tci += tc;
4400 
4401 	map = rcu_dereference(dev_maps->attr_map[tci]);
4402 	if (map) {
4403 		if (map->len == 1)
4404 			queue_index = map->queues[0];
4405 		else
4406 			queue_index = map->queues[reciprocal_scale(
4407 						skb_get_hash(skb), map->len)];
4408 		if (unlikely(queue_index >= dev->real_num_tx_queues))
4409 			queue_index = -1;
4410 	}
4411 	return queue_index;
4412 }
4413 #endif
4414 
4415 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
4416 			 struct sk_buff *skb)
4417 {
4418 #ifdef CONFIG_XPS
4419 	struct xps_dev_maps *dev_maps;
4420 	struct sock *sk = skb->sk;
4421 	int queue_index = -1;
4422 
4423 	if (!static_key_false(&xps_needed))
4424 		return -1;
4425 
4426 	rcu_read_lock();
4427 	if (!static_key_false(&xps_rxqs_needed))
4428 		goto get_cpus_map;
4429 
4430 	dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]);
4431 	if (dev_maps) {
4432 		int tci = sk_rx_queue_get(sk);
4433 
4434 		if (tci >= 0)
4435 			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4436 							  tci);
4437 	}
4438 
4439 get_cpus_map:
4440 	if (queue_index < 0) {
4441 		dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]);
4442 		if (dev_maps) {
4443 			unsigned int tci = skb->sender_cpu - 1;
4444 
4445 			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4446 							  tci);
4447 		}
4448 	}
4449 	rcu_read_unlock();
4450 
4451 	return queue_index;
4452 #else
4453 	return -1;
4454 #endif
4455 }
4456 
4457 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
4458 		     struct net_device *sb_dev)
4459 {
4460 	return 0;
4461 }
4462 EXPORT_SYMBOL(dev_pick_tx_zero);
4463 
4464 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
4465 		     struct net_device *sb_dev)
4466 {
4467 	struct sock *sk = skb->sk;
4468 	int queue_index = sk_tx_queue_get(sk);
4469 
4470 	sb_dev = sb_dev ? : dev;
4471 
4472 	if (queue_index < 0 || skb->ooo_okay ||
4473 	    queue_index >= dev->real_num_tx_queues) {
4474 		int new_index = get_xps_queue(dev, sb_dev, skb);
4475 
4476 		if (new_index < 0)
4477 			new_index = skb_tx_hash(dev, sb_dev, skb);
4478 
4479 		if (queue_index != new_index && sk &&
4480 		    sk_fullsock(sk) &&
4481 		    rcu_access_pointer(sk->sk_dst_cache))
4482 			sk_tx_queue_set(sk, new_index);
4483 
4484 		queue_index = new_index;
4485 	}
4486 
4487 	return queue_index;
4488 }
4489 EXPORT_SYMBOL(netdev_pick_tx);
4490 
4491 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
4492 					 struct sk_buff *skb,
4493 					 struct net_device *sb_dev)
4494 {
4495 	int queue_index = 0;
4496 
4497 #ifdef CONFIG_XPS
4498 	u32 sender_cpu = skb->sender_cpu - 1;
4499 
4500 	if (sender_cpu >= (u32)NR_CPUS)
4501 		skb->sender_cpu = raw_smp_processor_id() + 1;
4502 #endif
4503 
4504 	if (dev->real_num_tx_queues != 1) {
4505 		const struct net_device_ops *ops = dev->netdev_ops;
4506 
4507 		if (ops->ndo_select_queue)
4508 			queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4509 		else
4510 			queue_index = netdev_pick_tx(dev, skb, sb_dev);
4511 
4512 		queue_index = netdev_cap_txqueue(dev, queue_index);
4513 	}
4514 
4515 	skb_set_queue_mapping(skb, queue_index);
4516 	return netdev_get_tx_queue(dev, queue_index);
4517 }
4518 
4519 /**
4520  * __dev_queue_xmit() - transmit a buffer
4521  * @skb:	buffer to transmit
4522  * @sb_dev:	suboordinate device used for L2 forwarding offload
4523  *
4524  * Queue a buffer for transmission to a network device. The caller must
4525  * have set the device and priority and built the buffer before calling
4526  * this function. The function can be called from an interrupt.
4527  *
4528  * When calling this method, interrupts MUST be enabled. This is because
4529  * the BH enable code must have IRQs enabled so that it will not deadlock.
4530  *
4531  * Regardless of the return value, the skb is consumed, so it is currently
4532  * difficult to retry a send to this method. (You can bump the ref count
4533  * before sending to hold a reference for retry if you are careful.)
4534  *
4535  * Return:
4536  * * 0				- buffer successfully transmitted
4537  * * positive qdisc return code	- NET_XMIT_DROP etc.
4538  * * negative errno		- other errors
4539  */
4540 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4541 {
4542 	struct net_device *dev = skb->dev;
4543 	struct netdev_queue *txq = NULL;
4544 	struct Qdisc *q;
4545 	int rc = -ENOMEM;
4546 	bool again = false;
4547 
4548 	skb_reset_mac_header(skb);
4549 	skb_assert_len(skb);
4550 
4551 	if (unlikely(skb_shinfo(skb)->tx_flags &
4552 		     (SKBTX_SCHED_TSTAMP | SKBTX_BPF)))
4553 		__skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
4554 
4555 	/* Disable soft irqs for various locks below. Also
4556 	 * stops preemption for RCU.
4557 	 */
4558 	rcu_read_lock_bh();
4559 
4560 	skb_update_prio(skb);
4561 
4562 	qdisc_pkt_len_init(skb);
4563 	tcx_set_ingress(skb, false);
4564 #ifdef CONFIG_NET_EGRESS
4565 	if (static_branch_unlikely(&egress_needed_key)) {
4566 		if (nf_hook_egress_active()) {
4567 			skb = nf_hook_egress(skb, &rc, dev);
4568 			if (!skb)
4569 				goto out;
4570 		}
4571 
4572 		netdev_xmit_skip_txqueue(false);
4573 
4574 		nf_skip_egress(skb, true);
4575 		skb = sch_handle_egress(skb, &rc, dev);
4576 		if (!skb)
4577 			goto out;
4578 		nf_skip_egress(skb, false);
4579 
4580 		if (netdev_xmit_txqueue_skipped())
4581 			txq = netdev_tx_queue_mapping(dev, skb);
4582 	}
4583 #endif
4584 	/* If device/qdisc don't need skb->dst, release it right now while
4585 	 * its hot in this cpu cache.
4586 	 */
4587 	if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4588 		skb_dst_drop(skb);
4589 	else
4590 		skb_dst_force(skb);
4591 
4592 	if (!txq)
4593 		txq = netdev_core_pick_tx(dev, skb, sb_dev);
4594 
4595 	q = rcu_dereference_bh(txq->qdisc);
4596 
4597 	trace_net_dev_queue(skb);
4598 	if (q->enqueue) {
4599 		rc = __dev_xmit_skb(skb, q, dev, txq);
4600 		goto out;
4601 	}
4602 
4603 	/* The device has no queue. Common case for software devices:
4604 	 * loopback, all the sorts of tunnels...
4605 
4606 	 * Really, it is unlikely that netif_tx_lock protection is necessary
4607 	 * here.  (f.e. loopback and IP tunnels are clean ignoring statistics
4608 	 * counters.)
4609 	 * However, it is possible, that they rely on protection
4610 	 * made by us here.
4611 
4612 	 * Check this and shot the lock. It is not prone from deadlocks.
4613 	 *Either shot noqueue qdisc, it is even simpler 8)
4614 	 */
4615 	if (dev->flags & IFF_UP) {
4616 		int cpu = smp_processor_id(); /* ok because BHs are off */
4617 
4618 		/* Other cpus might concurrently change txq->xmit_lock_owner
4619 		 * to -1 or to their cpu id, but not to our id.
4620 		 */
4621 		if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
4622 			if (dev_xmit_recursion())
4623 				goto recursion_alert;
4624 
4625 			skb = validate_xmit_skb(skb, dev, &again);
4626 			if (!skb)
4627 				goto out;
4628 
4629 			HARD_TX_LOCK(dev, txq, cpu);
4630 
4631 			if (!netif_xmit_stopped(txq)) {
4632 				dev_xmit_recursion_inc();
4633 				skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4634 				dev_xmit_recursion_dec();
4635 				if (dev_xmit_complete(rc)) {
4636 					HARD_TX_UNLOCK(dev, txq);
4637 					goto out;
4638 				}
4639 			}
4640 			HARD_TX_UNLOCK(dev, txq);
4641 			net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4642 					     dev->name);
4643 		} else {
4644 			/* Recursion is detected! It is possible,
4645 			 * unfortunately
4646 			 */
4647 recursion_alert:
4648 			net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4649 					     dev->name);
4650 		}
4651 	}
4652 
4653 	rc = -ENETDOWN;
4654 	rcu_read_unlock_bh();
4655 
4656 	dev_core_stats_tx_dropped_inc(dev);
4657 	kfree_skb_list(skb);
4658 	return rc;
4659 out:
4660 	rcu_read_unlock_bh();
4661 	return rc;
4662 }
4663 EXPORT_SYMBOL(__dev_queue_xmit);
4664 
4665 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4666 {
4667 	struct net_device *dev = skb->dev;
4668 	struct sk_buff *orig_skb = skb;
4669 	struct netdev_queue *txq;
4670 	int ret = NETDEV_TX_BUSY;
4671 	bool again = false;
4672 
4673 	if (unlikely(!netif_running(dev) ||
4674 		     !netif_carrier_ok(dev)))
4675 		goto drop;
4676 
4677 	skb = validate_xmit_skb_list(skb, dev, &again);
4678 	if (skb != orig_skb)
4679 		goto drop;
4680 
4681 	skb_set_queue_mapping(skb, queue_id);
4682 	txq = skb_get_tx_queue(dev, skb);
4683 
4684 	local_bh_disable();
4685 
4686 	dev_xmit_recursion_inc();
4687 	HARD_TX_LOCK(dev, txq, smp_processor_id());
4688 	if (!netif_xmit_frozen_or_drv_stopped(txq))
4689 		ret = netdev_start_xmit(skb, dev, txq, false);
4690 	HARD_TX_UNLOCK(dev, txq);
4691 	dev_xmit_recursion_dec();
4692 
4693 	local_bh_enable();
4694 	return ret;
4695 drop:
4696 	dev_core_stats_tx_dropped_inc(dev);
4697 	kfree_skb_list(skb);
4698 	return NET_XMIT_DROP;
4699 }
4700 EXPORT_SYMBOL(__dev_direct_xmit);
4701 
4702 /*************************************************************************
4703  *			Receiver routines
4704  *************************************************************************/
4705 static DEFINE_PER_CPU(struct task_struct *, backlog_napi);
4706 
4707 int weight_p __read_mostly = 64;           /* old backlog weight */
4708 int dev_weight_rx_bias __read_mostly = 1;  /* bias for backlog weight */
4709 int dev_weight_tx_bias __read_mostly = 1;  /* bias for output_queue quota */
4710 
4711 /* Called with irq disabled */
4712 static inline void ____napi_schedule(struct softnet_data *sd,
4713 				     struct napi_struct *napi)
4714 {
4715 	struct task_struct *thread;
4716 
4717 	lockdep_assert_irqs_disabled();
4718 
4719 	if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
4720 		/* Paired with smp_mb__before_atomic() in
4721 		 * napi_enable()/dev_set_threaded().
4722 		 * Use READ_ONCE() to guarantee a complete
4723 		 * read on napi->thread. Only call
4724 		 * wake_up_process() when it's not NULL.
4725 		 */
4726 		thread = READ_ONCE(napi->thread);
4727 		if (thread) {
4728 			if (use_backlog_threads() && thread == raw_cpu_read(backlog_napi))
4729 				goto use_local_napi;
4730 
4731 			set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
4732 			wake_up_process(thread);
4733 			return;
4734 		}
4735 	}
4736 
4737 use_local_napi:
4738 	list_add_tail(&napi->poll_list, &sd->poll_list);
4739 	WRITE_ONCE(napi->list_owner, smp_processor_id());
4740 	/* If not called from net_rx_action()
4741 	 * we have to raise NET_RX_SOFTIRQ.
4742 	 */
4743 	if (!sd->in_net_rx_action)
4744 		raise_softirq_irqoff(NET_RX_SOFTIRQ);
4745 }
4746 
4747 #ifdef CONFIG_RPS
4748 
4749 struct static_key_false rps_needed __read_mostly;
4750 EXPORT_SYMBOL(rps_needed);
4751 struct static_key_false rfs_needed __read_mostly;
4752 EXPORT_SYMBOL(rfs_needed);
4753 
4754 static struct rps_dev_flow *
4755 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4756 	    struct rps_dev_flow *rflow, u16 next_cpu)
4757 {
4758 	if (next_cpu < nr_cpu_ids) {
4759 		u32 head;
4760 #ifdef CONFIG_RFS_ACCEL
4761 		struct netdev_rx_queue *rxqueue;
4762 		struct rps_dev_flow_table *flow_table;
4763 		struct rps_dev_flow *old_rflow;
4764 		u16 rxq_index;
4765 		u32 flow_id;
4766 		int rc;
4767 
4768 		/* Should we steer this flow to a different hardware queue? */
4769 		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4770 		    !(dev->features & NETIF_F_NTUPLE))
4771 			goto out;
4772 		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4773 		if (rxq_index == skb_get_rx_queue(skb))
4774 			goto out;
4775 
4776 		rxqueue = dev->_rx + rxq_index;
4777 		flow_table = rcu_dereference(rxqueue->rps_flow_table);
4778 		if (!flow_table)
4779 			goto out;
4780 		flow_id = skb_get_hash(skb) & flow_table->mask;
4781 		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4782 							rxq_index, flow_id);
4783 		if (rc < 0)
4784 			goto out;
4785 		old_rflow = rflow;
4786 		rflow = &flow_table->flows[flow_id];
4787 		WRITE_ONCE(rflow->filter, rc);
4788 		if (old_rflow->filter == rc)
4789 			WRITE_ONCE(old_rflow->filter, RPS_NO_FILTER);
4790 	out:
4791 #endif
4792 		head = READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head);
4793 		rps_input_queue_tail_save(&rflow->last_qtail, head);
4794 	}
4795 
4796 	WRITE_ONCE(rflow->cpu, next_cpu);
4797 	return rflow;
4798 }
4799 
4800 /*
4801  * get_rps_cpu is called from netif_receive_skb and returns the target
4802  * CPU from the RPS map of the receiving queue for a given skb.
4803  * rcu_read_lock must be held on entry.
4804  */
4805 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4806 		       struct rps_dev_flow **rflowp)
4807 {
4808 	const struct rps_sock_flow_table *sock_flow_table;
4809 	struct netdev_rx_queue *rxqueue = dev->_rx;
4810 	struct rps_dev_flow_table *flow_table;
4811 	struct rps_map *map;
4812 	int cpu = -1;
4813 	u32 tcpu;
4814 	u32 hash;
4815 
4816 	if (skb_rx_queue_recorded(skb)) {
4817 		u16 index = skb_get_rx_queue(skb);
4818 
4819 		if (unlikely(index >= dev->real_num_rx_queues)) {
4820 			WARN_ONCE(dev->real_num_rx_queues > 1,
4821 				  "%s received packet on queue %u, but number "
4822 				  "of RX queues is %u\n",
4823 				  dev->name, index, dev->real_num_rx_queues);
4824 			goto done;
4825 		}
4826 		rxqueue += index;
4827 	}
4828 
4829 	/* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4830 
4831 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
4832 	map = rcu_dereference(rxqueue->rps_map);
4833 	if (!flow_table && !map)
4834 		goto done;
4835 
4836 	skb_reset_network_header(skb);
4837 	hash = skb_get_hash(skb);
4838 	if (!hash)
4839 		goto done;
4840 
4841 	sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table);
4842 	if (flow_table && sock_flow_table) {
4843 		struct rps_dev_flow *rflow;
4844 		u32 next_cpu;
4845 		u32 ident;
4846 
4847 		/* First check into global flow table if there is a match.
4848 		 * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
4849 		 */
4850 		ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
4851 		if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask)
4852 			goto try_rps;
4853 
4854 		next_cpu = ident & net_hotdata.rps_cpu_mask;
4855 
4856 		/* OK, now we know there is a match,
4857 		 * we can look at the local (per receive queue) flow table
4858 		 */
4859 		rflow = &flow_table->flows[hash & flow_table->mask];
4860 		tcpu = rflow->cpu;
4861 
4862 		/*
4863 		 * If the desired CPU (where last recvmsg was done) is
4864 		 * different from current CPU (one in the rx-queue flow
4865 		 * table entry), switch if one of the following holds:
4866 		 *   - Current CPU is unset (>= nr_cpu_ids).
4867 		 *   - Current CPU is offline.
4868 		 *   - The current CPU's queue tail has advanced beyond the
4869 		 *     last packet that was enqueued using this table entry.
4870 		 *     This guarantees that all previous packets for the flow
4871 		 *     have been dequeued, thus preserving in order delivery.
4872 		 */
4873 		if (unlikely(tcpu != next_cpu) &&
4874 		    (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4875 		     ((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) -
4876 		      rflow->last_qtail)) >= 0)) {
4877 			tcpu = next_cpu;
4878 			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4879 		}
4880 
4881 		if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4882 			*rflowp = rflow;
4883 			cpu = tcpu;
4884 			goto done;
4885 		}
4886 	}
4887 
4888 try_rps:
4889 
4890 	if (map) {
4891 		tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4892 		if (cpu_online(tcpu)) {
4893 			cpu = tcpu;
4894 			goto done;
4895 		}
4896 	}
4897 
4898 done:
4899 	return cpu;
4900 }
4901 
4902 #ifdef CONFIG_RFS_ACCEL
4903 
4904 /**
4905  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4906  * @dev: Device on which the filter was set
4907  * @rxq_index: RX queue index
4908  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4909  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4910  *
4911  * Drivers that implement ndo_rx_flow_steer() should periodically call
4912  * this function for each installed filter and remove the filters for
4913  * which it returns %true.
4914  */
4915 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4916 			 u32 flow_id, u16 filter_id)
4917 {
4918 	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4919 	struct rps_dev_flow_table *flow_table;
4920 	struct rps_dev_flow *rflow;
4921 	bool expire = true;
4922 	unsigned int cpu;
4923 
4924 	rcu_read_lock();
4925 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
4926 	if (flow_table && flow_id <= flow_table->mask) {
4927 		rflow = &flow_table->flows[flow_id];
4928 		cpu = READ_ONCE(rflow->cpu);
4929 		if (READ_ONCE(rflow->filter) == filter_id && cpu < nr_cpu_ids &&
4930 		    ((int)(READ_ONCE(per_cpu(softnet_data, cpu).input_queue_head) -
4931 			   READ_ONCE(rflow->last_qtail)) <
4932 		     (int)(10 * flow_table->mask)))
4933 			expire = false;
4934 	}
4935 	rcu_read_unlock();
4936 	return expire;
4937 }
4938 EXPORT_SYMBOL(rps_may_expire_flow);
4939 
4940 #endif /* CONFIG_RFS_ACCEL */
4941 
4942 /* Called from hardirq (IPI) context */
4943 static void rps_trigger_softirq(void *data)
4944 {
4945 	struct softnet_data *sd = data;
4946 
4947 	____napi_schedule(sd, &sd->backlog);
4948 	sd->received_rps++;
4949 }
4950 
4951 #endif /* CONFIG_RPS */
4952 
4953 /* Called from hardirq (IPI) context */
4954 static void trigger_rx_softirq(void *data)
4955 {
4956 	struct softnet_data *sd = data;
4957 
4958 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4959 	smp_store_release(&sd->defer_ipi_scheduled, 0);
4960 }
4961 
4962 /*
4963  * After we queued a packet into sd->input_pkt_queue,
4964  * we need to make sure this queue is serviced soon.
4965  *
4966  * - If this is another cpu queue, link it to our rps_ipi_list,
4967  *   and make sure we will process rps_ipi_list from net_rx_action().
4968  *
4969  * - If this is our own queue, NAPI schedule our backlog.
4970  *   Note that this also raises NET_RX_SOFTIRQ.
4971  */
4972 static void napi_schedule_rps(struct softnet_data *sd)
4973 {
4974 	struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4975 
4976 #ifdef CONFIG_RPS
4977 	if (sd != mysd) {
4978 		if (use_backlog_threads()) {
4979 			__napi_schedule_irqoff(&sd->backlog);
4980 			return;
4981 		}
4982 
4983 		sd->rps_ipi_next = mysd->rps_ipi_list;
4984 		mysd->rps_ipi_list = sd;
4985 
4986 		/* If not called from net_rx_action() or napi_threaded_poll()
4987 		 * we have to raise NET_RX_SOFTIRQ.
4988 		 */
4989 		if (!mysd->in_net_rx_action && !mysd->in_napi_threaded_poll)
4990 			__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4991 		return;
4992 	}
4993 #endif /* CONFIG_RPS */
4994 	__napi_schedule_irqoff(&mysd->backlog);
4995 }
4996 
4997 void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu)
4998 {
4999 	unsigned long flags;
5000 
5001 	if (use_backlog_threads()) {
5002 		backlog_lock_irq_save(sd, &flags);
5003 
5004 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
5005 			__napi_schedule_irqoff(&sd->backlog);
5006 
5007 		backlog_unlock_irq_restore(sd, &flags);
5008 
5009 	} else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
5010 		smp_call_function_single_async(cpu, &sd->defer_csd);
5011 	}
5012 }
5013 
5014 #ifdef CONFIG_NET_FLOW_LIMIT
5015 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
5016 #endif
5017 
5018 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
5019 {
5020 #ifdef CONFIG_NET_FLOW_LIMIT
5021 	struct sd_flow_limit *fl;
5022 	struct softnet_data *sd;
5023 	unsigned int old_flow, new_flow;
5024 
5025 	if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1))
5026 		return false;
5027 
5028 	sd = this_cpu_ptr(&softnet_data);
5029 
5030 	rcu_read_lock();
5031 	fl = rcu_dereference(sd->flow_limit);
5032 	if (fl) {
5033 		new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
5034 		old_flow = fl->history[fl->history_head];
5035 		fl->history[fl->history_head] = new_flow;
5036 
5037 		fl->history_head++;
5038 		fl->history_head &= FLOW_LIMIT_HISTORY - 1;
5039 
5040 		if (likely(fl->buckets[old_flow]))
5041 			fl->buckets[old_flow]--;
5042 
5043 		if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
5044 			fl->count++;
5045 			rcu_read_unlock();
5046 			return true;
5047 		}
5048 	}
5049 	rcu_read_unlock();
5050 #endif
5051 	return false;
5052 }
5053 
5054 /*
5055  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
5056  * queue (may be a remote CPU queue).
5057  */
5058 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
5059 			      unsigned int *qtail)
5060 {
5061 	enum skb_drop_reason reason;
5062 	struct softnet_data *sd;
5063 	unsigned long flags;
5064 	unsigned int qlen;
5065 	int max_backlog;
5066 	u32 tail;
5067 
5068 	reason = SKB_DROP_REASON_DEV_READY;
5069 	if (!netif_running(skb->dev))
5070 		goto bad_dev;
5071 
5072 	reason = SKB_DROP_REASON_CPU_BACKLOG;
5073 	sd = &per_cpu(softnet_data, cpu);
5074 
5075 	qlen = skb_queue_len_lockless(&sd->input_pkt_queue);
5076 	max_backlog = READ_ONCE(net_hotdata.max_backlog);
5077 	if (unlikely(qlen > max_backlog))
5078 		goto cpu_backlog_drop;
5079 	backlog_lock_irq_save(sd, &flags);
5080 	qlen = skb_queue_len(&sd->input_pkt_queue);
5081 	if (qlen <= max_backlog && !skb_flow_limit(skb, qlen)) {
5082 		if (!qlen) {
5083 			/* Schedule NAPI for backlog device. We can use
5084 			 * non atomic operation as we own the queue lock.
5085 			 */
5086 			if (!__test_and_set_bit(NAPI_STATE_SCHED,
5087 						&sd->backlog.state))
5088 				napi_schedule_rps(sd);
5089 		}
5090 		__skb_queue_tail(&sd->input_pkt_queue, skb);
5091 		tail = rps_input_queue_tail_incr(sd);
5092 		backlog_unlock_irq_restore(sd, &flags);
5093 
5094 		/* save the tail outside of the critical section */
5095 		rps_input_queue_tail_save(qtail, tail);
5096 		return NET_RX_SUCCESS;
5097 	}
5098 
5099 	backlog_unlock_irq_restore(sd, &flags);
5100 
5101 cpu_backlog_drop:
5102 	atomic_inc(&sd->dropped);
5103 bad_dev:
5104 	dev_core_stats_rx_dropped_inc(skb->dev);
5105 	kfree_skb_reason(skb, reason);
5106 	return NET_RX_DROP;
5107 }
5108 
5109 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
5110 {
5111 	struct net_device *dev = skb->dev;
5112 	struct netdev_rx_queue *rxqueue;
5113 
5114 	rxqueue = dev->_rx;
5115 
5116 	if (skb_rx_queue_recorded(skb)) {
5117 		u16 index = skb_get_rx_queue(skb);
5118 
5119 		if (unlikely(index >= dev->real_num_rx_queues)) {
5120 			WARN_ONCE(dev->real_num_rx_queues > 1,
5121 				  "%s received packet on queue %u, but number "
5122 				  "of RX queues is %u\n",
5123 				  dev->name, index, dev->real_num_rx_queues);
5124 
5125 			return rxqueue; /* Return first rxqueue */
5126 		}
5127 		rxqueue += index;
5128 	}
5129 	return rxqueue;
5130 }
5131 
5132 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
5133 			     const struct bpf_prog *xdp_prog)
5134 {
5135 	void *orig_data, *orig_data_end, *hard_start;
5136 	struct netdev_rx_queue *rxqueue;
5137 	bool orig_bcast, orig_host;
5138 	u32 mac_len, frame_sz;
5139 	__be16 orig_eth_type;
5140 	struct ethhdr *eth;
5141 	u32 metalen, act;
5142 	int off;
5143 
5144 	/* The XDP program wants to see the packet starting at the MAC
5145 	 * header.
5146 	 */
5147 	mac_len = skb->data - skb_mac_header(skb);
5148 	hard_start = skb->data - skb_headroom(skb);
5149 
5150 	/* SKB "head" area always have tailroom for skb_shared_info */
5151 	frame_sz = (void *)skb_end_pointer(skb) - hard_start;
5152 	frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5153 
5154 	rxqueue = netif_get_rxqueue(skb);
5155 	xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
5156 	xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
5157 			 skb_headlen(skb) + mac_len, true);
5158 	if (skb_is_nonlinear(skb)) {
5159 		skb_shinfo(skb)->xdp_frags_size = skb->data_len;
5160 		xdp_buff_set_frags_flag(xdp);
5161 	} else {
5162 		xdp_buff_clear_frags_flag(xdp);
5163 	}
5164 
5165 	orig_data_end = xdp->data_end;
5166 	orig_data = xdp->data;
5167 	eth = (struct ethhdr *)xdp->data;
5168 	orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
5169 	orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
5170 	orig_eth_type = eth->h_proto;
5171 
5172 	act = bpf_prog_run_xdp(xdp_prog, xdp);
5173 
5174 	/* check if bpf_xdp_adjust_head was used */
5175 	off = xdp->data - orig_data;
5176 	if (off) {
5177 		if (off > 0)
5178 			__skb_pull(skb, off);
5179 		else if (off < 0)
5180 			__skb_push(skb, -off);
5181 
5182 		skb->mac_header += off;
5183 		skb_reset_network_header(skb);
5184 	}
5185 
5186 	/* check if bpf_xdp_adjust_tail was used */
5187 	off = xdp->data_end - orig_data_end;
5188 	if (off != 0) {
5189 		skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
5190 		skb->len += off; /* positive on grow, negative on shrink */
5191 	}
5192 
5193 	/* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
5194 	 * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
5195 	 */
5196 	if (xdp_buff_has_frags(xdp))
5197 		skb->data_len = skb_shinfo(skb)->xdp_frags_size;
5198 	else
5199 		skb->data_len = 0;
5200 
5201 	/* check if XDP changed eth hdr such SKB needs update */
5202 	eth = (struct ethhdr *)xdp->data;
5203 	if ((orig_eth_type != eth->h_proto) ||
5204 	    (orig_host != ether_addr_equal_64bits(eth->h_dest,
5205 						  skb->dev->dev_addr)) ||
5206 	    (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
5207 		__skb_push(skb, ETH_HLEN);
5208 		skb->pkt_type = PACKET_HOST;
5209 		skb->protocol = eth_type_trans(skb, skb->dev);
5210 	}
5211 
5212 	/* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
5213 	 * before calling us again on redirect path. We do not call do_redirect
5214 	 * as we leave that up to the caller.
5215 	 *
5216 	 * Caller is responsible for managing lifetime of skb (i.e. calling
5217 	 * kfree_skb in response to actions it cannot handle/XDP_DROP).
5218 	 */
5219 	switch (act) {
5220 	case XDP_REDIRECT:
5221 	case XDP_TX:
5222 		__skb_push(skb, mac_len);
5223 		break;
5224 	case XDP_PASS:
5225 		metalen = xdp->data - xdp->data_meta;
5226 		if (metalen)
5227 			skb_metadata_set(skb, metalen);
5228 		break;
5229 	}
5230 
5231 	return act;
5232 }
5233 
5234 static int
5235 netif_skb_check_for_xdp(struct sk_buff **pskb, const struct bpf_prog *prog)
5236 {
5237 	struct sk_buff *skb = *pskb;
5238 	int err, hroom, troom;
5239 
5240 	if (!skb_cow_data_for_xdp(this_cpu_read(system_page_pool), pskb, prog))
5241 		return 0;
5242 
5243 	/* In case we have to go down the path and also linearize,
5244 	 * then lets do the pskb_expand_head() work just once here.
5245 	 */
5246 	hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
5247 	troom = skb->tail + skb->data_len - skb->end;
5248 	err = pskb_expand_head(skb,
5249 			       hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
5250 			       troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
5251 	if (err)
5252 		return err;
5253 
5254 	return skb_linearize(skb);
5255 }
5256 
5257 static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
5258 				     struct xdp_buff *xdp,
5259 				     const struct bpf_prog *xdp_prog)
5260 {
5261 	struct sk_buff *skb = *pskb;
5262 	u32 mac_len, act = XDP_DROP;
5263 
5264 	/* Reinjected packets coming from act_mirred or similar should
5265 	 * not get XDP generic processing.
5266 	 */
5267 	if (skb_is_redirected(skb))
5268 		return XDP_PASS;
5269 
5270 	/* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
5271 	 * bytes. This is the guarantee that also native XDP provides,
5272 	 * thus we need to do it here as well.
5273 	 */
5274 	mac_len = skb->data - skb_mac_header(skb);
5275 	__skb_push(skb, mac_len);
5276 
5277 	if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
5278 	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
5279 		if (netif_skb_check_for_xdp(pskb, xdp_prog))
5280 			goto do_drop;
5281 	}
5282 
5283 	__skb_pull(*pskb, mac_len);
5284 
5285 	act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog);
5286 	switch (act) {
5287 	case XDP_REDIRECT:
5288 	case XDP_TX:
5289 	case XDP_PASS:
5290 		break;
5291 	default:
5292 		bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act);
5293 		fallthrough;
5294 	case XDP_ABORTED:
5295 		trace_xdp_exception((*pskb)->dev, xdp_prog, act);
5296 		fallthrough;
5297 	case XDP_DROP:
5298 	do_drop:
5299 		kfree_skb(*pskb);
5300 		break;
5301 	}
5302 
5303 	return act;
5304 }
5305 
5306 /* When doing generic XDP we have to bypass the qdisc layer and the
5307  * network taps in order to match in-driver-XDP behavior. This also means
5308  * that XDP packets are able to starve other packets going through a qdisc,
5309  * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX
5310  * queues, so they do not have this starvation issue.
5311  */
5312 void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog)
5313 {
5314 	struct net_device *dev = skb->dev;
5315 	struct netdev_queue *txq;
5316 	bool free_skb = true;
5317 	int cpu, rc;
5318 
5319 	txq = netdev_core_pick_tx(dev, skb, NULL);
5320 	cpu = smp_processor_id();
5321 	HARD_TX_LOCK(dev, txq, cpu);
5322 	if (!netif_xmit_frozen_or_drv_stopped(txq)) {
5323 		rc = netdev_start_xmit(skb, dev, txq, 0);
5324 		if (dev_xmit_complete(rc))
5325 			free_skb = false;
5326 	}
5327 	HARD_TX_UNLOCK(dev, txq);
5328 	if (free_skb) {
5329 		trace_xdp_exception(dev, xdp_prog, XDP_TX);
5330 		dev_core_stats_tx_dropped_inc(dev);
5331 		kfree_skb(skb);
5332 	}
5333 }
5334 
5335 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
5336 
5337 int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb)
5338 {
5339 	struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
5340 
5341 	if (xdp_prog) {
5342 		struct xdp_buff xdp;
5343 		u32 act;
5344 		int err;
5345 
5346 		bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
5347 		act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog);
5348 		if (act != XDP_PASS) {
5349 			switch (act) {
5350 			case XDP_REDIRECT:
5351 				err = xdp_do_generic_redirect((*pskb)->dev, *pskb,
5352 							      &xdp, xdp_prog);
5353 				if (err)
5354 					goto out_redir;
5355 				break;
5356 			case XDP_TX:
5357 				generic_xdp_tx(*pskb, xdp_prog);
5358 				break;
5359 			}
5360 			bpf_net_ctx_clear(bpf_net_ctx);
5361 			return XDP_DROP;
5362 		}
5363 		bpf_net_ctx_clear(bpf_net_ctx);
5364 	}
5365 	return XDP_PASS;
5366 out_redir:
5367 	bpf_net_ctx_clear(bpf_net_ctx);
5368 	kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP);
5369 	return XDP_DROP;
5370 }
5371 EXPORT_SYMBOL_GPL(do_xdp_generic);
5372 
5373 static int netif_rx_internal(struct sk_buff *skb)
5374 {
5375 	int ret;
5376 
5377 	net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
5378 
5379 	trace_netif_rx(skb);
5380 
5381 #ifdef CONFIG_RPS
5382 	if (static_branch_unlikely(&rps_needed)) {
5383 		struct rps_dev_flow voidflow, *rflow = &voidflow;
5384 		int cpu;
5385 
5386 		rcu_read_lock();
5387 
5388 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
5389 		if (cpu < 0)
5390 			cpu = smp_processor_id();
5391 
5392 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5393 
5394 		rcu_read_unlock();
5395 	} else
5396 #endif
5397 	{
5398 		unsigned int qtail;
5399 
5400 		ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail);
5401 	}
5402 	return ret;
5403 }
5404 
5405 /**
5406  *	__netif_rx	-	Slightly optimized version of netif_rx
5407  *	@skb: buffer to post
5408  *
5409  *	This behaves as netif_rx except that it does not disable bottom halves.
5410  *	As a result this function may only be invoked from the interrupt context
5411  *	(either hard or soft interrupt).
5412  */
5413 int __netif_rx(struct sk_buff *skb)
5414 {
5415 	int ret;
5416 
5417 	lockdep_assert_once(hardirq_count() | softirq_count());
5418 
5419 	trace_netif_rx_entry(skb);
5420 	ret = netif_rx_internal(skb);
5421 	trace_netif_rx_exit(ret);
5422 	return ret;
5423 }
5424 EXPORT_SYMBOL(__netif_rx);
5425 
5426 /**
5427  *	netif_rx	-	post buffer to the network code
5428  *	@skb: buffer to post
5429  *
5430  *	This function receives a packet from a device driver and queues it for
5431  *	the upper (protocol) levels to process via the backlog NAPI device. It
5432  *	always succeeds. The buffer may be dropped during processing for
5433  *	congestion control or by the protocol layers.
5434  *	The network buffer is passed via the backlog NAPI device. Modern NIC
5435  *	driver should use NAPI and GRO.
5436  *	This function can used from interrupt and from process context. The
5437  *	caller from process context must not disable interrupts before invoking
5438  *	this function.
5439  *
5440  *	return values:
5441  *	NET_RX_SUCCESS	(no congestion)
5442  *	NET_RX_DROP     (packet was dropped)
5443  *
5444  */
5445 int netif_rx(struct sk_buff *skb)
5446 {
5447 	bool need_bh_off = !(hardirq_count() | softirq_count());
5448 	int ret;
5449 
5450 	if (need_bh_off)
5451 		local_bh_disable();
5452 	trace_netif_rx_entry(skb);
5453 	ret = netif_rx_internal(skb);
5454 	trace_netif_rx_exit(ret);
5455 	if (need_bh_off)
5456 		local_bh_enable();
5457 	return ret;
5458 }
5459 EXPORT_SYMBOL(netif_rx);
5460 
5461 static __latent_entropy void net_tx_action(void)
5462 {
5463 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
5464 
5465 	if (sd->completion_queue) {
5466 		struct sk_buff *clist;
5467 
5468 		local_irq_disable();
5469 		clist = sd->completion_queue;
5470 		sd->completion_queue = NULL;
5471 		local_irq_enable();
5472 
5473 		while (clist) {
5474 			struct sk_buff *skb = clist;
5475 
5476 			clist = clist->next;
5477 
5478 			WARN_ON(refcount_read(&skb->users));
5479 			if (likely(get_kfree_skb_cb(skb)->reason == SKB_CONSUMED))
5480 				trace_consume_skb(skb, net_tx_action);
5481 			else
5482 				trace_kfree_skb(skb, net_tx_action,
5483 						get_kfree_skb_cb(skb)->reason, NULL);
5484 
5485 			if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
5486 				__kfree_skb(skb);
5487 			else
5488 				__napi_kfree_skb(skb,
5489 						 get_kfree_skb_cb(skb)->reason);
5490 		}
5491 	}
5492 
5493 	if (sd->output_queue) {
5494 		struct Qdisc *head;
5495 
5496 		local_irq_disable();
5497 		head = sd->output_queue;
5498 		sd->output_queue = NULL;
5499 		sd->output_queue_tailp = &sd->output_queue;
5500 		local_irq_enable();
5501 
5502 		rcu_read_lock();
5503 
5504 		while (head) {
5505 			struct Qdisc *q = head;
5506 			spinlock_t *root_lock = NULL;
5507 
5508 			head = head->next_sched;
5509 
5510 			/* We need to make sure head->next_sched is read
5511 			 * before clearing __QDISC_STATE_SCHED
5512 			 */
5513 			smp_mb__before_atomic();
5514 
5515 			if (!(q->flags & TCQ_F_NOLOCK)) {
5516 				root_lock = qdisc_lock(q);
5517 				spin_lock(root_lock);
5518 			} else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
5519 						     &q->state))) {
5520 				/* There is a synchronize_net() between
5521 				 * STATE_DEACTIVATED flag being set and
5522 				 * qdisc_reset()/some_qdisc_is_busy() in
5523 				 * dev_deactivate(), so we can safely bail out
5524 				 * early here to avoid data race between
5525 				 * qdisc_deactivate() and some_qdisc_is_busy()
5526 				 * for lockless qdisc.
5527 				 */
5528 				clear_bit(__QDISC_STATE_SCHED, &q->state);
5529 				continue;
5530 			}
5531 
5532 			clear_bit(__QDISC_STATE_SCHED, &q->state);
5533 			qdisc_run(q);
5534 			if (root_lock)
5535 				spin_unlock(root_lock);
5536 		}
5537 
5538 		rcu_read_unlock();
5539 	}
5540 
5541 	xfrm_dev_backlog(sd);
5542 }
5543 
5544 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
5545 /* This hook is defined here for ATM LANE */
5546 int (*br_fdb_test_addr_hook)(struct net_device *dev,
5547 			     unsigned char *addr) __read_mostly;
5548 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
5549 #endif
5550 
5551 /**
5552  *	netdev_is_rx_handler_busy - check if receive handler is registered
5553  *	@dev: device to check
5554  *
5555  *	Check if a receive handler is already registered for a given device.
5556  *	Return true if there one.
5557  *
5558  *	The caller must hold the rtnl_mutex.
5559  */
5560 bool netdev_is_rx_handler_busy(struct net_device *dev)
5561 {
5562 	ASSERT_RTNL();
5563 	return dev && rtnl_dereference(dev->rx_handler);
5564 }
5565 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
5566 
5567 /**
5568  *	netdev_rx_handler_register - register receive handler
5569  *	@dev: device to register a handler for
5570  *	@rx_handler: receive handler to register
5571  *	@rx_handler_data: data pointer that is used by rx handler
5572  *
5573  *	Register a receive handler for a device. This handler will then be
5574  *	called from __netif_receive_skb. A negative errno code is returned
5575  *	on a failure.
5576  *
5577  *	The caller must hold the rtnl_mutex.
5578  *
5579  *	For a general description of rx_handler, see enum rx_handler_result.
5580  */
5581 int netdev_rx_handler_register(struct net_device *dev,
5582 			       rx_handler_func_t *rx_handler,
5583 			       void *rx_handler_data)
5584 {
5585 	if (netdev_is_rx_handler_busy(dev))
5586 		return -EBUSY;
5587 
5588 	if (dev->priv_flags & IFF_NO_RX_HANDLER)
5589 		return -EINVAL;
5590 
5591 	/* Note: rx_handler_data must be set before rx_handler */
5592 	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
5593 	rcu_assign_pointer(dev->rx_handler, rx_handler);
5594 
5595 	return 0;
5596 }
5597 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
5598 
5599 /**
5600  *	netdev_rx_handler_unregister - unregister receive handler
5601  *	@dev: device to unregister a handler from
5602  *
5603  *	Unregister a receive handler from a device.
5604  *
5605  *	The caller must hold the rtnl_mutex.
5606  */
5607 void netdev_rx_handler_unregister(struct net_device *dev)
5608 {
5609 
5610 	ASSERT_RTNL();
5611 	RCU_INIT_POINTER(dev->rx_handler, NULL);
5612 	/* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5613 	 * section has a guarantee to see a non NULL rx_handler_data
5614 	 * as well.
5615 	 */
5616 	synchronize_net();
5617 	RCU_INIT_POINTER(dev->rx_handler_data, NULL);
5618 }
5619 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
5620 
5621 /*
5622  * Limit the use of PFMEMALLOC reserves to those protocols that implement
5623  * the special handling of PFMEMALLOC skbs.
5624  */
5625 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
5626 {
5627 	switch (skb->protocol) {
5628 	case htons(ETH_P_ARP):
5629 	case htons(ETH_P_IP):
5630 	case htons(ETH_P_IPV6):
5631 	case htons(ETH_P_8021Q):
5632 	case htons(ETH_P_8021AD):
5633 		return true;
5634 	default:
5635 		return false;
5636 	}
5637 }
5638 
5639 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5640 			     int *ret, struct net_device *orig_dev)
5641 {
5642 	if (nf_hook_ingress_active(skb)) {
5643 		int ingress_retval;
5644 
5645 		if (*pt_prev) {
5646 			*ret = deliver_skb(skb, *pt_prev, orig_dev);
5647 			*pt_prev = NULL;
5648 		}
5649 
5650 		rcu_read_lock();
5651 		ingress_retval = nf_hook_ingress(skb);
5652 		rcu_read_unlock();
5653 		return ingress_retval;
5654 	}
5655 	return 0;
5656 }
5657 
5658 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
5659 				    struct packet_type **ppt_prev)
5660 {
5661 	struct packet_type *ptype, *pt_prev;
5662 	rx_handler_func_t *rx_handler;
5663 	struct sk_buff *skb = *pskb;
5664 	struct net_device *orig_dev;
5665 	bool deliver_exact = false;
5666 	int ret = NET_RX_DROP;
5667 	__be16 type;
5668 
5669 	net_timestamp_check(!READ_ONCE(net_hotdata.tstamp_prequeue), skb);
5670 
5671 	trace_netif_receive_skb(skb);
5672 
5673 	orig_dev = skb->dev;
5674 
5675 	skb_reset_network_header(skb);
5676 #if !defined(CONFIG_DEBUG_NET)
5677 	/* We plan to no longer reset the transport header here.
5678 	 * Give some time to fuzzers and dev build to catch bugs
5679 	 * in network stacks.
5680 	 */
5681 	if (!skb_transport_header_was_set(skb))
5682 		skb_reset_transport_header(skb);
5683 #endif
5684 	skb_reset_mac_len(skb);
5685 
5686 	pt_prev = NULL;
5687 
5688 another_round:
5689 	skb->skb_iif = skb->dev->ifindex;
5690 
5691 	__this_cpu_inc(softnet_data.processed);
5692 
5693 	if (static_branch_unlikely(&generic_xdp_needed_key)) {
5694 		int ret2;
5695 
5696 		migrate_disable();
5697 		ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog),
5698 				      &skb);
5699 		migrate_enable();
5700 
5701 		if (ret2 != XDP_PASS) {
5702 			ret = NET_RX_DROP;
5703 			goto out;
5704 		}
5705 	}
5706 
5707 	if (eth_type_vlan(skb->protocol)) {
5708 		skb = skb_vlan_untag(skb);
5709 		if (unlikely(!skb))
5710 			goto out;
5711 	}
5712 
5713 	if (skb_skip_tc_classify(skb))
5714 		goto skip_classify;
5715 
5716 	if (pfmemalloc)
5717 		goto skip_taps;
5718 
5719 	list_for_each_entry_rcu(ptype, &dev_net_rcu(skb->dev)->ptype_all,
5720 				list) {
5721 		if (pt_prev)
5722 			ret = deliver_skb(skb, pt_prev, orig_dev);
5723 		pt_prev = ptype;
5724 	}
5725 
5726 	list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5727 		if (pt_prev)
5728 			ret = deliver_skb(skb, pt_prev, orig_dev);
5729 		pt_prev = ptype;
5730 	}
5731 
5732 skip_taps:
5733 #ifdef CONFIG_NET_INGRESS
5734 	if (static_branch_unlikely(&ingress_needed_key)) {
5735 		bool another = false;
5736 
5737 		nf_skip_egress(skb, true);
5738 		skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
5739 					 &another);
5740 		if (another)
5741 			goto another_round;
5742 		if (!skb)
5743 			goto out;
5744 
5745 		nf_skip_egress(skb, false);
5746 		if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
5747 			goto out;
5748 	}
5749 #endif
5750 	skb_reset_redirect(skb);
5751 skip_classify:
5752 	if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
5753 		goto drop;
5754 
5755 	if (skb_vlan_tag_present(skb)) {
5756 		if (pt_prev) {
5757 			ret = deliver_skb(skb, pt_prev, orig_dev);
5758 			pt_prev = NULL;
5759 		}
5760 		if (vlan_do_receive(&skb))
5761 			goto another_round;
5762 		else if (unlikely(!skb))
5763 			goto out;
5764 	}
5765 
5766 	rx_handler = rcu_dereference(skb->dev->rx_handler);
5767 	if (rx_handler) {
5768 		if (pt_prev) {
5769 			ret = deliver_skb(skb, pt_prev, orig_dev);
5770 			pt_prev = NULL;
5771 		}
5772 		switch (rx_handler(&skb)) {
5773 		case RX_HANDLER_CONSUMED:
5774 			ret = NET_RX_SUCCESS;
5775 			goto out;
5776 		case RX_HANDLER_ANOTHER:
5777 			goto another_round;
5778 		case RX_HANDLER_EXACT:
5779 			deliver_exact = true;
5780 			break;
5781 		case RX_HANDLER_PASS:
5782 			break;
5783 		default:
5784 			BUG();
5785 		}
5786 	}
5787 
5788 	if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
5789 check_vlan_id:
5790 		if (skb_vlan_tag_get_id(skb)) {
5791 			/* Vlan id is non 0 and vlan_do_receive() above couldn't
5792 			 * find vlan device.
5793 			 */
5794 			skb->pkt_type = PACKET_OTHERHOST;
5795 		} else if (eth_type_vlan(skb->protocol)) {
5796 			/* Outer header is 802.1P with vlan 0, inner header is
5797 			 * 802.1Q or 802.1AD and vlan_do_receive() above could
5798 			 * not find vlan dev for vlan id 0.
5799 			 */
5800 			__vlan_hwaccel_clear_tag(skb);
5801 			skb = skb_vlan_untag(skb);
5802 			if (unlikely(!skb))
5803 				goto out;
5804 			if (vlan_do_receive(&skb))
5805 				/* After stripping off 802.1P header with vlan 0
5806 				 * vlan dev is found for inner header.
5807 				 */
5808 				goto another_round;
5809 			else if (unlikely(!skb))
5810 				goto out;
5811 			else
5812 				/* We have stripped outer 802.1P vlan 0 header.
5813 				 * But could not find vlan dev.
5814 				 * check again for vlan id to set OTHERHOST.
5815 				 */
5816 				goto check_vlan_id;
5817 		}
5818 		/* Note: we might in the future use prio bits
5819 		 * and set skb->priority like in vlan_do_receive()
5820 		 * For the time being, just ignore Priority Code Point
5821 		 */
5822 		__vlan_hwaccel_clear_tag(skb);
5823 	}
5824 
5825 	type = skb->protocol;
5826 
5827 	/* deliver only exact match when indicated */
5828 	if (likely(!deliver_exact)) {
5829 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5830 				       &ptype_base[ntohs(type) &
5831 						   PTYPE_HASH_MASK]);
5832 
5833 		/* orig_dev and skb->dev could belong to different netns;
5834 		 * Even in such case we need to traverse only the list
5835 		 * coming from skb->dev, as the ptype owner (packet socket)
5836 		 * will use dev_net(skb->dev) to do namespace filtering.
5837 		 */
5838 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5839 				       &dev_net_rcu(skb->dev)->ptype_specific);
5840 	}
5841 
5842 	deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5843 			       &orig_dev->ptype_specific);
5844 
5845 	if (unlikely(skb->dev != orig_dev)) {
5846 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5847 				       &skb->dev->ptype_specific);
5848 	}
5849 
5850 	if (pt_prev) {
5851 		if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
5852 			goto drop;
5853 		*ppt_prev = pt_prev;
5854 	} else {
5855 drop:
5856 		if (!deliver_exact)
5857 			dev_core_stats_rx_dropped_inc(skb->dev);
5858 		else
5859 			dev_core_stats_rx_nohandler_inc(skb->dev);
5860 		kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO);
5861 		/* Jamal, now you will not able to escape explaining
5862 		 * me how you were going to use this. :-)
5863 		 */
5864 		ret = NET_RX_DROP;
5865 	}
5866 
5867 out:
5868 	/* The invariant here is that if *ppt_prev is not NULL
5869 	 * then skb should also be non-NULL.
5870 	 *
5871 	 * Apparently *ppt_prev assignment above holds this invariant due to
5872 	 * skb dereferencing near it.
5873 	 */
5874 	*pskb = skb;
5875 	return ret;
5876 }
5877 
5878 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5879 {
5880 	struct net_device *orig_dev = skb->dev;
5881 	struct packet_type *pt_prev = NULL;
5882 	int ret;
5883 
5884 	ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5885 	if (pt_prev)
5886 		ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5887 					 skb->dev, pt_prev, orig_dev);
5888 	return ret;
5889 }
5890 
5891 /**
5892  *	netif_receive_skb_core - special purpose version of netif_receive_skb
5893  *	@skb: buffer to process
5894  *
5895  *	More direct receive version of netif_receive_skb().  It should
5896  *	only be used by callers that have a need to skip RPS and Generic XDP.
5897  *	Caller must also take care of handling if ``(page_is_)pfmemalloc``.
5898  *
5899  *	This function may only be called from softirq context and interrupts
5900  *	should be enabled.
5901  *
5902  *	Return values (usually ignored):
5903  *	NET_RX_SUCCESS: no congestion
5904  *	NET_RX_DROP: packet was dropped
5905  */
5906 int netif_receive_skb_core(struct sk_buff *skb)
5907 {
5908 	int ret;
5909 
5910 	rcu_read_lock();
5911 	ret = __netif_receive_skb_one_core(skb, false);
5912 	rcu_read_unlock();
5913 
5914 	return ret;
5915 }
5916 EXPORT_SYMBOL(netif_receive_skb_core);
5917 
5918 static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5919 						  struct packet_type *pt_prev,
5920 						  struct net_device *orig_dev)
5921 {
5922 	struct sk_buff *skb, *next;
5923 
5924 	if (!pt_prev)
5925 		return;
5926 	if (list_empty(head))
5927 		return;
5928 	if (pt_prev->list_func != NULL)
5929 		INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5930 				   ip_list_rcv, head, pt_prev, orig_dev);
5931 	else
5932 		list_for_each_entry_safe(skb, next, head, list) {
5933 			skb_list_del_init(skb);
5934 			pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5935 		}
5936 }
5937 
5938 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5939 {
5940 	/* Fast-path assumptions:
5941 	 * - There is no RX handler.
5942 	 * - Only one packet_type matches.
5943 	 * If either of these fails, we will end up doing some per-packet
5944 	 * processing in-line, then handling the 'last ptype' for the whole
5945 	 * sublist.  This can't cause out-of-order delivery to any single ptype,
5946 	 * because the 'last ptype' must be constant across the sublist, and all
5947 	 * other ptypes are handled per-packet.
5948 	 */
5949 	/* Current (common) ptype of sublist */
5950 	struct packet_type *pt_curr = NULL;
5951 	/* Current (common) orig_dev of sublist */
5952 	struct net_device *od_curr = NULL;
5953 	struct sk_buff *skb, *next;
5954 	LIST_HEAD(sublist);
5955 
5956 	list_for_each_entry_safe(skb, next, head, list) {
5957 		struct net_device *orig_dev = skb->dev;
5958 		struct packet_type *pt_prev = NULL;
5959 
5960 		skb_list_del_init(skb);
5961 		__netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5962 		if (!pt_prev)
5963 			continue;
5964 		if (pt_curr != pt_prev || od_curr != orig_dev) {
5965 			/* dispatch old sublist */
5966 			__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5967 			/* start new sublist */
5968 			INIT_LIST_HEAD(&sublist);
5969 			pt_curr = pt_prev;
5970 			od_curr = orig_dev;
5971 		}
5972 		list_add_tail(&skb->list, &sublist);
5973 	}
5974 
5975 	/* dispatch final sublist */
5976 	__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5977 }
5978 
5979 static int __netif_receive_skb(struct sk_buff *skb)
5980 {
5981 	int ret;
5982 
5983 	if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5984 		unsigned int noreclaim_flag;
5985 
5986 		/*
5987 		 * PFMEMALLOC skbs are special, they should
5988 		 * - be delivered to SOCK_MEMALLOC sockets only
5989 		 * - stay away from userspace
5990 		 * - have bounded memory usage
5991 		 *
5992 		 * Use PF_MEMALLOC as this saves us from propagating the allocation
5993 		 * context down to all allocation sites.
5994 		 */
5995 		noreclaim_flag = memalloc_noreclaim_save();
5996 		ret = __netif_receive_skb_one_core(skb, true);
5997 		memalloc_noreclaim_restore(noreclaim_flag);
5998 	} else
5999 		ret = __netif_receive_skb_one_core(skb, false);
6000 
6001 	return ret;
6002 }
6003 
6004 static void __netif_receive_skb_list(struct list_head *head)
6005 {
6006 	unsigned long noreclaim_flag = 0;
6007 	struct sk_buff *skb, *next;
6008 	bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
6009 
6010 	list_for_each_entry_safe(skb, next, head, list) {
6011 		if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
6012 			struct list_head sublist;
6013 
6014 			/* Handle the previous sublist */
6015 			list_cut_before(&sublist, head, &skb->list);
6016 			if (!list_empty(&sublist))
6017 				__netif_receive_skb_list_core(&sublist, pfmemalloc);
6018 			pfmemalloc = !pfmemalloc;
6019 			/* See comments in __netif_receive_skb */
6020 			if (pfmemalloc)
6021 				noreclaim_flag = memalloc_noreclaim_save();
6022 			else
6023 				memalloc_noreclaim_restore(noreclaim_flag);
6024 		}
6025 	}
6026 	/* Handle the remaining sublist */
6027 	if (!list_empty(head))
6028 		__netif_receive_skb_list_core(head, pfmemalloc);
6029 	/* Restore pflags */
6030 	if (pfmemalloc)
6031 		memalloc_noreclaim_restore(noreclaim_flag);
6032 }
6033 
6034 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
6035 {
6036 	struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
6037 	struct bpf_prog *new = xdp->prog;
6038 	int ret = 0;
6039 
6040 	switch (xdp->command) {
6041 	case XDP_SETUP_PROG:
6042 		rcu_assign_pointer(dev->xdp_prog, new);
6043 		if (old)
6044 			bpf_prog_put(old);
6045 
6046 		if (old && !new) {
6047 			static_branch_dec(&generic_xdp_needed_key);
6048 		} else if (new && !old) {
6049 			static_branch_inc(&generic_xdp_needed_key);
6050 			netif_disable_lro(dev);
6051 			dev_disable_gro_hw(dev);
6052 		}
6053 		break;
6054 
6055 	default:
6056 		ret = -EINVAL;
6057 		break;
6058 	}
6059 
6060 	return ret;
6061 }
6062 
6063 static int netif_receive_skb_internal(struct sk_buff *skb)
6064 {
6065 	int ret;
6066 
6067 	net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
6068 
6069 	if (skb_defer_rx_timestamp(skb))
6070 		return NET_RX_SUCCESS;
6071 
6072 	rcu_read_lock();
6073 #ifdef CONFIG_RPS
6074 	if (static_branch_unlikely(&rps_needed)) {
6075 		struct rps_dev_flow voidflow, *rflow = &voidflow;
6076 		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
6077 
6078 		if (cpu >= 0) {
6079 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
6080 			rcu_read_unlock();
6081 			return ret;
6082 		}
6083 	}
6084 #endif
6085 	ret = __netif_receive_skb(skb);
6086 	rcu_read_unlock();
6087 	return ret;
6088 }
6089 
6090 void netif_receive_skb_list_internal(struct list_head *head)
6091 {
6092 	struct sk_buff *skb, *next;
6093 	LIST_HEAD(sublist);
6094 
6095 	list_for_each_entry_safe(skb, next, head, list) {
6096 		net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue),
6097 				    skb);
6098 		skb_list_del_init(skb);
6099 		if (!skb_defer_rx_timestamp(skb))
6100 			list_add_tail(&skb->list, &sublist);
6101 	}
6102 	list_splice_init(&sublist, head);
6103 
6104 	rcu_read_lock();
6105 #ifdef CONFIG_RPS
6106 	if (static_branch_unlikely(&rps_needed)) {
6107 		list_for_each_entry_safe(skb, next, head, list) {
6108 			struct rps_dev_flow voidflow, *rflow = &voidflow;
6109 			int cpu = get_rps_cpu(skb->dev, skb, &rflow);
6110 
6111 			if (cpu >= 0) {
6112 				/* Will be handled, remove from list */
6113 				skb_list_del_init(skb);
6114 				enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
6115 			}
6116 		}
6117 	}
6118 #endif
6119 	__netif_receive_skb_list(head);
6120 	rcu_read_unlock();
6121 }
6122 
6123 /**
6124  *	netif_receive_skb - process receive buffer from network
6125  *	@skb: buffer to process
6126  *
6127  *	netif_receive_skb() is the main receive data processing function.
6128  *	It always succeeds. The buffer may be dropped during processing
6129  *	for congestion control or by the protocol layers.
6130  *
6131  *	This function may only be called from softirq context and interrupts
6132  *	should be enabled.
6133  *
6134  *	Return values (usually ignored):
6135  *	NET_RX_SUCCESS: no congestion
6136  *	NET_RX_DROP: packet was dropped
6137  */
6138 int netif_receive_skb(struct sk_buff *skb)
6139 {
6140 	int ret;
6141 
6142 	trace_netif_receive_skb_entry(skb);
6143 
6144 	ret = netif_receive_skb_internal(skb);
6145 	trace_netif_receive_skb_exit(ret);
6146 
6147 	return ret;
6148 }
6149 EXPORT_SYMBOL(netif_receive_skb);
6150 
6151 /**
6152  *	netif_receive_skb_list - process many receive buffers from network
6153  *	@head: list of skbs to process.
6154  *
6155  *	Since return value of netif_receive_skb() is normally ignored, and
6156  *	wouldn't be meaningful for a list, this function returns void.
6157  *
6158  *	This function may only be called from softirq context and interrupts
6159  *	should be enabled.
6160  */
6161 void netif_receive_skb_list(struct list_head *head)
6162 {
6163 	struct sk_buff *skb;
6164 
6165 	if (list_empty(head))
6166 		return;
6167 	if (trace_netif_receive_skb_list_entry_enabled()) {
6168 		list_for_each_entry(skb, head, list)
6169 			trace_netif_receive_skb_list_entry(skb);
6170 	}
6171 	netif_receive_skb_list_internal(head);
6172 	trace_netif_receive_skb_list_exit(0);
6173 }
6174 EXPORT_SYMBOL(netif_receive_skb_list);
6175 
6176 /* Network device is going away, flush any packets still pending */
6177 static void flush_backlog(struct work_struct *work)
6178 {
6179 	struct sk_buff *skb, *tmp;
6180 	struct sk_buff_head list;
6181 	struct softnet_data *sd;
6182 
6183 	__skb_queue_head_init(&list);
6184 	local_bh_disable();
6185 	sd = this_cpu_ptr(&softnet_data);
6186 
6187 	backlog_lock_irq_disable(sd);
6188 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
6189 		if (READ_ONCE(skb->dev->reg_state) == NETREG_UNREGISTERING) {
6190 			__skb_unlink(skb, &sd->input_pkt_queue);
6191 			__skb_queue_tail(&list, skb);
6192 			rps_input_queue_head_incr(sd);
6193 		}
6194 	}
6195 	backlog_unlock_irq_enable(sd);
6196 
6197 	local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
6198 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
6199 		if (READ_ONCE(skb->dev->reg_state) == NETREG_UNREGISTERING) {
6200 			__skb_unlink(skb, &sd->process_queue);
6201 			__skb_queue_tail(&list, skb);
6202 			rps_input_queue_head_incr(sd);
6203 		}
6204 	}
6205 	local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
6206 	local_bh_enable();
6207 
6208 	__skb_queue_purge_reason(&list, SKB_DROP_REASON_DEV_READY);
6209 }
6210 
6211 static bool flush_required(int cpu)
6212 {
6213 #if IS_ENABLED(CONFIG_RPS)
6214 	struct softnet_data *sd = &per_cpu(softnet_data, cpu);
6215 	bool do_flush;
6216 
6217 	backlog_lock_irq_disable(sd);
6218 
6219 	/* as insertion into process_queue happens with the rps lock held,
6220 	 * process_queue access may race only with dequeue
6221 	 */
6222 	do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
6223 		   !skb_queue_empty_lockless(&sd->process_queue);
6224 	backlog_unlock_irq_enable(sd);
6225 
6226 	return do_flush;
6227 #endif
6228 	/* without RPS we can't safely check input_pkt_queue: during a
6229 	 * concurrent remote skb_queue_splice() we can detect as empty both
6230 	 * input_pkt_queue and process_queue even if the latter could end-up
6231 	 * containing a lot of packets.
6232 	 */
6233 	return true;
6234 }
6235 
6236 struct flush_backlogs {
6237 	cpumask_t		flush_cpus;
6238 	struct work_struct	w[];
6239 };
6240 
6241 static struct flush_backlogs *flush_backlogs_alloc(void)
6242 {
6243 	return kmalloc(struct_size_t(struct flush_backlogs, w, nr_cpu_ids),
6244 		       GFP_KERNEL);
6245 }
6246 
6247 static struct flush_backlogs *flush_backlogs_fallback;
6248 static DEFINE_MUTEX(flush_backlogs_mutex);
6249 
6250 static void flush_all_backlogs(void)
6251 {
6252 	struct flush_backlogs *ptr = flush_backlogs_alloc();
6253 	unsigned int cpu;
6254 
6255 	if (!ptr) {
6256 		mutex_lock(&flush_backlogs_mutex);
6257 		ptr = flush_backlogs_fallback;
6258 	}
6259 	cpumask_clear(&ptr->flush_cpus);
6260 
6261 	cpus_read_lock();
6262 
6263 	for_each_online_cpu(cpu) {
6264 		if (flush_required(cpu)) {
6265 			INIT_WORK(&ptr->w[cpu], flush_backlog);
6266 			queue_work_on(cpu, system_highpri_wq, &ptr->w[cpu]);
6267 			__cpumask_set_cpu(cpu, &ptr->flush_cpus);
6268 		}
6269 	}
6270 
6271 	/* we can have in flight packet[s] on the cpus we are not flushing,
6272 	 * synchronize_net() in unregister_netdevice_many() will take care of
6273 	 * them.
6274 	 */
6275 	for_each_cpu(cpu, &ptr->flush_cpus)
6276 		flush_work(&ptr->w[cpu]);
6277 
6278 	cpus_read_unlock();
6279 
6280 	if (ptr != flush_backlogs_fallback)
6281 		kfree(ptr);
6282 	else
6283 		mutex_unlock(&flush_backlogs_mutex);
6284 }
6285 
6286 static void net_rps_send_ipi(struct softnet_data *remsd)
6287 {
6288 #ifdef CONFIG_RPS
6289 	while (remsd) {
6290 		struct softnet_data *next = remsd->rps_ipi_next;
6291 
6292 		if (cpu_online(remsd->cpu))
6293 			smp_call_function_single_async(remsd->cpu, &remsd->csd);
6294 		remsd = next;
6295 	}
6296 #endif
6297 }
6298 
6299 /*
6300  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
6301  * Note: called with local irq disabled, but exits with local irq enabled.
6302  */
6303 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
6304 {
6305 #ifdef CONFIG_RPS
6306 	struct softnet_data *remsd = sd->rps_ipi_list;
6307 
6308 	if (!use_backlog_threads() && remsd) {
6309 		sd->rps_ipi_list = NULL;
6310 
6311 		local_irq_enable();
6312 
6313 		/* Send pending IPI's to kick RPS processing on remote cpus. */
6314 		net_rps_send_ipi(remsd);
6315 	} else
6316 #endif
6317 		local_irq_enable();
6318 }
6319 
6320 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
6321 {
6322 #ifdef CONFIG_RPS
6323 	return !use_backlog_threads() && sd->rps_ipi_list;
6324 #else
6325 	return false;
6326 #endif
6327 }
6328 
6329 static int process_backlog(struct napi_struct *napi, int quota)
6330 {
6331 	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
6332 	bool again = true;
6333 	int work = 0;
6334 
6335 	/* Check if we have pending ipi, its better to send them now,
6336 	 * not waiting net_rx_action() end.
6337 	 */
6338 	if (sd_has_rps_ipi_waiting(sd)) {
6339 		local_irq_disable();
6340 		net_rps_action_and_irq_enable(sd);
6341 	}
6342 
6343 	napi->weight = READ_ONCE(net_hotdata.dev_rx_weight);
6344 	while (again) {
6345 		struct sk_buff *skb;
6346 
6347 		local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
6348 		while ((skb = __skb_dequeue(&sd->process_queue))) {
6349 			local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
6350 			rcu_read_lock();
6351 			__netif_receive_skb(skb);
6352 			rcu_read_unlock();
6353 			if (++work >= quota) {
6354 				rps_input_queue_head_add(sd, work);
6355 				return work;
6356 			}
6357 
6358 			local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
6359 		}
6360 		local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
6361 
6362 		backlog_lock_irq_disable(sd);
6363 		if (skb_queue_empty(&sd->input_pkt_queue)) {
6364 			/*
6365 			 * Inline a custom version of __napi_complete().
6366 			 * only current cpu owns and manipulates this napi,
6367 			 * and NAPI_STATE_SCHED is the only possible flag set
6368 			 * on backlog.
6369 			 * We can use a plain write instead of clear_bit(),
6370 			 * and we dont need an smp_mb() memory barrier.
6371 			 */
6372 			napi->state &= NAPIF_STATE_THREADED;
6373 			again = false;
6374 		} else {
6375 			local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
6376 			skb_queue_splice_tail_init(&sd->input_pkt_queue,
6377 						   &sd->process_queue);
6378 			local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
6379 		}
6380 		backlog_unlock_irq_enable(sd);
6381 	}
6382 
6383 	if (work)
6384 		rps_input_queue_head_add(sd, work);
6385 	return work;
6386 }
6387 
6388 /**
6389  * __napi_schedule - schedule for receive
6390  * @n: entry to schedule
6391  *
6392  * The entry's receive function will be scheduled to run.
6393  * Consider using __napi_schedule_irqoff() if hard irqs are masked.
6394  */
6395 void __napi_schedule(struct napi_struct *n)
6396 {
6397 	unsigned long flags;
6398 
6399 	local_irq_save(flags);
6400 	____napi_schedule(this_cpu_ptr(&softnet_data), n);
6401 	local_irq_restore(flags);
6402 }
6403 EXPORT_SYMBOL(__napi_schedule);
6404 
6405 /**
6406  *	napi_schedule_prep - check if napi can be scheduled
6407  *	@n: napi context
6408  *
6409  * Test if NAPI routine is already running, and if not mark
6410  * it as running.  This is used as a condition variable to
6411  * insure only one NAPI poll instance runs.  We also make
6412  * sure there is no pending NAPI disable.
6413  */
6414 bool napi_schedule_prep(struct napi_struct *n)
6415 {
6416 	unsigned long new, val = READ_ONCE(n->state);
6417 
6418 	do {
6419 		if (unlikely(val & NAPIF_STATE_DISABLE))
6420 			return false;
6421 		new = val | NAPIF_STATE_SCHED;
6422 
6423 		/* Sets STATE_MISSED bit if STATE_SCHED was already set
6424 		 * This was suggested by Alexander Duyck, as compiler
6425 		 * emits better code than :
6426 		 * if (val & NAPIF_STATE_SCHED)
6427 		 *     new |= NAPIF_STATE_MISSED;
6428 		 */
6429 		new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
6430 						   NAPIF_STATE_MISSED;
6431 	} while (!try_cmpxchg(&n->state, &val, new));
6432 
6433 	return !(val & NAPIF_STATE_SCHED);
6434 }
6435 EXPORT_SYMBOL(napi_schedule_prep);
6436 
6437 /**
6438  * __napi_schedule_irqoff - schedule for receive
6439  * @n: entry to schedule
6440  *
6441  * Variant of __napi_schedule() assuming hard irqs are masked.
6442  *
6443  * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
6444  * because the interrupt disabled assumption might not be true
6445  * due to force-threaded interrupts and spinlock substitution.
6446  */
6447 void __napi_schedule_irqoff(struct napi_struct *n)
6448 {
6449 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6450 		____napi_schedule(this_cpu_ptr(&softnet_data), n);
6451 	else
6452 		__napi_schedule(n);
6453 }
6454 EXPORT_SYMBOL(__napi_schedule_irqoff);
6455 
6456 bool napi_complete_done(struct napi_struct *n, int work_done)
6457 {
6458 	unsigned long flags, val, new, timeout = 0;
6459 	bool ret = true;
6460 
6461 	/*
6462 	 * 1) Don't let napi dequeue from the cpu poll list
6463 	 *    just in case its running on a different cpu.
6464 	 * 2) If we are busy polling, do nothing here, we have
6465 	 *    the guarantee we will be called later.
6466 	 */
6467 	if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6468 				 NAPIF_STATE_IN_BUSY_POLL)))
6469 		return false;
6470 
6471 	if (work_done) {
6472 		if (n->gro.bitmask)
6473 			timeout = napi_get_gro_flush_timeout(n);
6474 		n->defer_hard_irqs_count = napi_get_defer_hard_irqs(n);
6475 	}
6476 	if (n->defer_hard_irqs_count > 0) {
6477 		n->defer_hard_irqs_count--;
6478 		timeout = napi_get_gro_flush_timeout(n);
6479 		if (timeout)
6480 			ret = false;
6481 	}
6482 
6483 	/*
6484 	 * When the NAPI instance uses a timeout and keeps postponing
6485 	 * it, we need to bound somehow the time packets are kept in
6486 	 * the GRO layer.
6487 	 */
6488 	gro_flush(&n->gro, !!timeout);
6489 	gro_normal_list(&n->gro);
6490 
6491 	if (unlikely(!list_empty(&n->poll_list))) {
6492 		/* If n->poll_list is not empty, we need to mask irqs */
6493 		local_irq_save(flags);
6494 		list_del_init(&n->poll_list);
6495 		local_irq_restore(flags);
6496 	}
6497 	WRITE_ONCE(n->list_owner, -1);
6498 
6499 	val = READ_ONCE(n->state);
6500 	do {
6501 		WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6502 
6503 		new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
6504 			      NAPIF_STATE_SCHED_THREADED |
6505 			      NAPIF_STATE_PREFER_BUSY_POLL);
6506 
6507 		/* If STATE_MISSED was set, leave STATE_SCHED set,
6508 		 * because we will call napi->poll() one more time.
6509 		 * This C code was suggested by Alexander Duyck to help gcc.
6510 		 */
6511 		new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6512 						    NAPIF_STATE_SCHED;
6513 	} while (!try_cmpxchg(&n->state, &val, new));
6514 
6515 	if (unlikely(val & NAPIF_STATE_MISSED)) {
6516 		__napi_schedule(n);
6517 		return false;
6518 	}
6519 
6520 	if (timeout)
6521 		hrtimer_start(&n->timer, ns_to_ktime(timeout),
6522 			      HRTIMER_MODE_REL_PINNED);
6523 	return ret;
6524 }
6525 EXPORT_SYMBOL(napi_complete_done);
6526 
6527 static void skb_defer_free_flush(struct softnet_data *sd)
6528 {
6529 	struct sk_buff *skb, *next;
6530 
6531 	/* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
6532 	if (!READ_ONCE(sd->defer_list))
6533 		return;
6534 
6535 	spin_lock(&sd->defer_lock);
6536 	skb = sd->defer_list;
6537 	sd->defer_list = NULL;
6538 	sd->defer_count = 0;
6539 	spin_unlock(&sd->defer_lock);
6540 
6541 	while (skb != NULL) {
6542 		next = skb->next;
6543 		napi_consume_skb(skb, 1);
6544 		skb = next;
6545 	}
6546 }
6547 
6548 #if defined(CONFIG_NET_RX_BUSY_POLL)
6549 
6550 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
6551 {
6552 	if (!skip_schedule) {
6553 		gro_normal_list(&napi->gro);
6554 		__napi_schedule(napi);
6555 		return;
6556 	}
6557 
6558 	/* Flush too old packets. If HZ < 1000, flush all packets */
6559 	gro_flush(&napi->gro, HZ >= 1000);
6560 	gro_normal_list(&napi->gro);
6561 
6562 	clear_bit(NAPI_STATE_SCHED, &napi->state);
6563 }
6564 
6565 enum {
6566 	NAPI_F_PREFER_BUSY_POLL	= 1,
6567 	NAPI_F_END_ON_RESCHED	= 2,
6568 };
6569 
6570 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
6571 			   unsigned flags, u16 budget)
6572 {
6573 	struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
6574 	bool skip_schedule = false;
6575 	unsigned long timeout;
6576 	int rc;
6577 
6578 	/* Busy polling means there is a high chance device driver hard irq
6579 	 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6580 	 * set in napi_schedule_prep().
6581 	 * Since we are about to call napi->poll() once more, we can safely
6582 	 * clear NAPI_STATE_MISSED.
6583 	 *
6584 	 * Note: x86 could use a single "lock and ..." instruction
6585 	 * to perform these two clear_bit()
6586 	 */
6587 	clear_bit(NAPI_STATE_MISSED, &napi->state);
6588 	clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6589 
6590 	local_bh_disable();
6591 	bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
6592 
6593 	if (flags & NAPI_F_PREFER_BUSY_POLL) {
6594 		napi->defer_hard_irqs_count = napi_get_defer_hard_irqs(napi);
6595 		timeout = napi_get_gro_flush_timeout(napi);
6596 		if (napi->defer_hard_irqs_count && timeout) {
6597 			hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
6598 			skip_schedule = true;
6599 		}
6600 	}
6601 
6602 	/* All we really want here is to re-enable device interrupts.
6603 	 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6604 	 */
6605 	rc = napi->poll(napi, budget);
6606 	/* We can't gro_normal_list() here, because napi->poll() might have
6607 	 * rearmed the napi (napi_complete_done()) in which case it could
6608 	 * already be running on another CPU.
6609 	 */
6610 	trace_napi_poll(napi, rc, budget);
6611 	netpoll_poll_unlock(have_poll_lock);
6612 	if (rc == budget)
6613 		__busy_poll_stop(napi, skip_schedule);
6614 	bpf_net_ctx_clear(bpf_net_ctx);
6615 	local_bh_enable();
6616 }
6617 
6618 static void __napi_busy_loop(unsigned int napi_id,
6619 		      bool (*loop_end)(void *, unsigned long),
6620 		      void *loop_end_arg, unsigned flags, u16 budget)
6621 {
6622 	unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6623 	int (*napi_poll)(struct napi_struct *napi, int budget);
6624 	struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
6625 	void *have_poll_lock = NULL;
6626 	struct napi_struct *napi;
6627 
6628 	WARN_ON_ONCE(!rcu_read_lock_held());
6629 
6630 restart:
6631 	napi_poll = NULL;
6632 
6633 	napi = napi_by_id(napi_id);
6634 	if (!napi)
6635 		return;
6636 
6637 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6638 		preempt_disable();
6639 	for (;;) {
6640 		int work = 0;
6641 
6642 		local_bh_disable();
6643 		bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
6644 		if (!napi_poll) {
6645 			unsigned long val = READ_ONCE(napi->state);
6646 
6647 			/* If multiple threads are competing for this napi,
6648 			 * we avoid dirtying napi->state as much as we can.
6649 			 */
6650 			if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6651 				   NAPIF_STATE_IN_BUSY_POLL)) {
6652 				if (flags & NAPI_F_PREFER_BUSY_POLL)
6653 					set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6654 				goto count;
6655 			}
6656 			if (cmpxchg(&napi->state, val,
6657 				    val | NAPIF_STATE_IN_BUSY_POLL |
6658 					  NAPIF_STATE_SCHED) != val) {
6659 				if (flags & NAPI_F_PREFER_BUSY_POLL)
6660 					set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6661 				goto count;
6662 			}
6663 			have_poll_lock = netpoll_poll_lock(napi);
6664 			napi_poll = napi->poll;
6665 		}
6666 		work = napi_poll(napi, budget);
6667 		trace_napi_poll(napi, work, budget);
6668 		gro_normal_list(&napi->gro);
6669 count:
6670 		if (work > 0)
6671 			__NET_ADD_STATS(dev_net(napi->dev),
6672 					LINUX_MIB_BUSYPOLLRXPACKETS, work);
6673 		skb_defer_free_flush(this_cpu_ptr(&softnet_data));
6674 		bpf_net_ctx_clear(bpf_net_ctx);
6675 		local_bh_enable();
6676 
6677 		if (!loop_end || loop_end(loop_end_arg, start_time))
6678 			break;
6679 
6680 		if (unlikely(need_resched())) {
6681 			if (flags & NAPI_F_END_ON_RESCHED)
6682 				break;
6683 			if (napi_poll)
6684 				busy_poll_stop(napi, have_poll_lock, flags, budget);
6685 			if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6686 				preempt_enable();
6687 			rcu_read_unlock();
6688 			cond_resched();
6689 			rcu_read_lock();
6690 			if (loop_end(loop_end_arg, start_time))
6691 				return;
6692 			goto restart;
6693 		}
6694 		cpu_relax();
6695 	}
6696 	if (napi_poll)
6697 		busy_poll_stop(napi, have_poll_lock, flags, budget);
6698 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6699 		preempt_enable();
6700 }
6701 
6702 void napi_busy_loop_rcu(unsigned int napi_id,
6703 			bool (*loop_end)(void *, unsigned long),
6704 			void *loop_end_arg, bool prefer_busy_poll, u16 budget)
6705 {
6706 	unsigned flags = NAPI_F_END_ON_RESCHED;
6707 
6708 	if (prefer_busy_poll)
6709 		flags |= NAPI_F_PREFER_BUSY_POLL;
6710 
6711 	__napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget);
6712 }
6713 
6714 void napi_busy_loop(unsigned int napi_id,
6715 		    bool (*loop_end)(void *, unsigned long),
6716 		    void *loop_end_arg, bool prefer_busy_poll, u16 budget)
6717 {
6718 	unsigned flags = prefer_busy_poll ? NAPI_F_PREFER_BUSY_POLL : 0;
6719 
6720 	rcu_read_lock();
6721 	__napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget);
6722 	rcu_read_unlock();
6723 }
6724 EXPORT_SYMBOL(napi_busy_loop);
6725 
6726 void napi_suspend_irqs(unsigned int napi_id)
6727 {
6728 	struct napi_struct *napi;
6729 
6730 	rcu_read_lock();
6731 	napi = napi_by_id(napi_id);
6732 	if (napi) {
6733 		unsigned long timeout = napi_get_irq_suspend_timeout(napi);
6734 
6735 		if (timeout)
6736 			hrtimer_start(&napi->timer, ns_to_ktime(timeout),
6737 				      HRTIMER_MODE_REL_PINNED);
6738 	}
6739 	rcu_read_unlock();
6740 }
6741 
6742 void napi_resume_irqs(unsigned int napi_id)
6743 {
6744 	struct napi_struct *napi;
6745 
6746 	rcu_read_lock();
6747 	napi = napi_by_id(napi_id);
6748 	if (napi) {
6749 		/* If irq_suspend_timeout is set to 0 between the call to
6750 		 * napi_suspend_irqs and now, the original value still
6751 		 * determines the safety timeout as intended and napi_watchdog
6752 		 * will resume irq processing.
6753 		 */
6754 		if (napi_get_irq_suspend_timeout(napi)) {
6755 			local_bh_disable();
6756 			napi_schedule(napi);
6757 			local_bh_enable();
6758 		}
6759 	}
6760 	rcu_read_unlock();
6761 }
6762 
6763 #endif /* CONFIG_NET_RX_BUSY_POLL */
6764 
6765 static void __napi_hash_add_with_id(struct napi_struct *napi,
6766 				    unsigned int napi_id)
6767 {
6768 	napi->gro.cached_napi_id = napi_id;
6769 
6770 	WRITE_ONCE(napi->napi_id, napi_id);
6771 	hlist_add_head_rcu(&napi->napi_hash_node,
6772 			   &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6773 }
6774 
6775 static void napi_hash_add_with_id(struct napi_struct *napi,
6776 				  unsigned int napi_id)
6777 {
6778 	unsigned long flags;
6779 
6780 	spin_lock_irqsave(&napi_hash_lock, flags);
6781 	WARN_ON_ONCE(napi_by_id(napi_id));
6782 	__napi_hash_add_with_id(napi, napi_id);
6783 	spin_unlock_irqrestore(&napi_hash_lock, flags);
6784 }
6785 
6786 static void napi_hash_add(struct napi_struct *napi)
6787 {
6788 	unsigned long flags;
6789 
6790 	if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
6791 		return;
6792 
6793 	spin_lock_irqsave(&napi_hash_lock, flags);
6794 
6795 	/* 0..NR_CPUS range is reserved for sender_cpu use */
6796 	do {
6797 		if (unlikely(!napi_id_valid(++napi_gen_id)))
6798 			napi_gen_id = MIN_NAPI_ID;
6799 	} while (napi_by_id(napi_gen_id));
6800 
6801 	__napi_hash_add_with_id(napi, napi_gen_id);
6802 
6803 	spin_unlock_irqrestore(&napi_hash_lock, flags);
6804 }
6805 
6806 /* Warning : caller is responsible to make sure rcu grace period
6807  * is respected before freeing memory containing @napi
6808  */
6809 static void napi_hash_del(struct napi_struct *napi)
6810 {
6811 	unsigned long flags;
6812 
6813 	spin_lock_irqsave(&napi_hash_lock, flags);
6814 
6815 	hlist_del_init_rcu(&napi->napi_hash_node);
6816 
6817 	spin_unlock_irqrestore(&napi_hash_lock, flags);
6818 }
6819 
6820 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6821 {
6822 	struct napi_struct *napi;
6823 
6824 	napi = container_of(timer, struct napi_struct, timer);
6825 
6826 	/* Note : we use a relaxed variant of napi_schedule_prep() not setting
6827 	 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6828 	 */
6829 	if (!napi_disable_pending(napi) &&
6830 	    !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) {
6831 		clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6832 		__napi_schedule_irqoff(napi);
6833 	}
6834 
6835 	return HRTIMER_NORESTART;
6836 }
6837 
6838 int dev_set_threaded(struct net_device *dev, bool threaded)
6839 {
6840 	struct napi_struct *napi;
6841 	int err = 0;
6842 
6843 	netdev_assert_locked_or_invisible(dev);
6844 
6845 	if (dev->threaded == threaded)
6846 		return 0;
6847 
6848 	if (threaded) {
6849 		list_for_each_entry(napi, &dev->napi_list, dev_list) {
6850 			if (!napi->thread) {
6851 				err = napi_kthread_create(napi);
6852 				if (err) {
6853 					threaded = false;
6854 					break;
6855 				}
6856 			}
6857 		}
6858 	}
6859 
6860 	WRITE_ONCE(dev->threaded, threaded);
6861 
6862 	/* Make sure kthread is created before THREADED bit
6863 	 * is set.
6864 	 */
6865 	smp_mb__before_atomic();
6866 
6867 	/* Setting/unsetting threaded mode on a napi might not immediately
6868 	 * take effect, if the current napi instance is actively being
6869 	 * polled. In this case, the switch between threaded mode and
6870 	 * softirq mode will happen in the next round of napi_schedule().
6871 	 * This should not cause hiccups/stalls to the live traffic.
6872 	 */
6873 	list_for_each_entry(napi, &dev->napi_list, dev_list)
6874 		assign_bit(NAPI_STATE_THREADED, &napi->state, threaded);
6875 
6876 	return err;
6877 }
6878 EXPORT_SYMBOL(dev_set_threaded);
6879 
6880 /**
6881  * netif_queue_set_napi - Associate queue with the napi
6882  * @dev: device to which NAPI and queue belong
6883  * @queue_index: Index of queue
6884  * @type: queue type as RX or TX
6885  * @napi: NAPI context, pass NULL to clear previously set NAPI
6886  *
6887  * Set queue with its corresponding napi context. This should be done after
6888  * registering the NAPI handler for the queue-vector and the queues have been
6889  * mapped to the corresponding interrupt vector.
6890  */
6891 void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
6892 			  enum netdev_queue_type type, struct napi_struct *napi)
6893 {
6894 	struct netdev_rx_queue *rxq;
6895 	struct netdev_queue *txq;
6896 
6897 	if (WARN_ON_ONCE(napi && !napi->dev))
6898 		return;
6899 	if (dev->reg_state >= NETREG_REGISTERED)
6900 		ASSERT_RTNL();
6901 
6902 	switch (type) {
6903 	case NETDEV_QUEUE_TYPE_RX:
6904 		rxq = __netif_get_rx_queue(dev, queue_index);
6905 		rxq->napi = napi;
6906 		return;
6907 	case NETDEV_QUEUE_TYPE_TX:
6908 		txq = netdev_get_tx_queue(dev, queue_index);
6909 		txq->napi = napi;
6910 		return;
6911 	default:
6912 		return;
6913 	}
6914 }
6915 EXPORT_SYMBOL(netif_queue_set_napi);
6916 
6917 static void
6918 netif_napi_irq_notify(struct irq_affinity_notify *notify,
6919 		      const cpumask_t *mask)
6920 {
6921 	struct napi_struct *napi =
6922 		container_of(notify, struct napi_struct, notify);
6923 #ifdef CONFIG_RFS_ACCEL
6924 	struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap;
6925 	int err;
6926 #endif
6927 
6928 	if (napi->config && napi->dev->irq_affinity_auto)
6929 		cpumask_copy(&napi->config->affinity_mask, mask);
6930 
6931 #ifdef CONFIG_RFS_ACCEL
6932 	if (napi->dev->rx_cpu_rmap_auto) {
6933 		err = cpu_rmap_update(rmap, napi->napi_rmap_idx, mask);
6934 		if (err)
6935 			netdev_warn(napi->dev, "RMAP update failed (%d)\n",
6936 				    err);
6937 	}
6938 #endif
6939 }
6940 
6941 #ifdef CONFIG_RFS_ACCEL
6942 static void netif_napi_affinity_release(struct kref *ref)
6943 {
6944 	struct napi_struct *napi =
6945 		container_of(ref, struct napi_struct, notify.kref);
6946 	struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap;
6947 
6948 	netdev_assert_locked(napi->dev);
6949 	WARN_ON(test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER,
6950 				   &napi->state));
6951 
6952 	if (!napi->dev->rx_cpu_rmap_auto)
6953 		return;
6954 	rmap->obj[napi->napi_rmap_idx] = NULL;
6955 	napi->napi_rmap_idx = -1;
6956 	cpu_rmap_put(rmap);
6957 }
6958 
6959 int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs)
6960 {
6961 	if (dev->rx_cpu_rmap_auto)
6962 		return 0;
6963 
6964 	dev->rx_cpu_rmap = alloc_irq_cpu_rmap(num_irqs);
6965 	if (!dev->rx_cpu_rmap)
6966 		return -ENOMEM;
6967 
6968 	dev->rx_cpu_rmap_auto = true;
6969 	return 0;
6970 }
6971 EXPORT_SYMBOL(netif_enable_cpu_rmap);
6972 
6973 static void netif_del_cpu_rmap(struct net_device *dev)
6974 {
6975 	struct cpu_rmap *rmap = dev->rx_cpu_rmap;
6976 
6977 	if (!dev->rx_cpu_rmap_auto)
6978 		return;
6979 
6980 	/* Free the rmap */
6981 	cpu_rmap_put(rmap);
6982 	dev->rx_cpu_rmap = NULL;
6983 	dev->rx_cpu_rmap_auto = false;
6984 }
6985 
6986 #else
6987 static void netif_napi_affinity_release(struct kref *ref)
6988 {
6989 }
6990 
6991 int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs)
6992 {
6993 	return 0;
6994 }
6995 EXPORT_SYMBOL(netif_enable_cpu_rmap);
6996 
6997 static void netif_del_cpu_rmap(struct net_device *dev)
6998 {
6999 }
7000 #endif
7001 
7002 void netif_set_affinity_auto(struct net_device *dev)
7003 {
7004 	unsigned int i, maxqs, numa;
7005 
7006 	maxqs = max(dev->num_tx_queues, dev->num_rx_queues);
7007 	numa = dev_to_node(&dev->dev);
7008 
7009 	for (i = 0; i < maxqs; i++)
7010 		cpumask_set_cpu(cpumask_local_spread(i, numa),
7011 				&dev->napi_config[i].affinity_mask);
7012 
7013 	dev->irq_affinity_auto = true;
7014 }
7015 EXPORT_SYMBOL(netif_set_affinity_auto);
7016 
7017 void netif_napi_set_irq_locked(struct napi_struct *napi, int irq)
7018 {
7019 	int rc;
7020 
7021 	netdev_assert_locked_or_invisible(napi->dev);
7022 
7023 	if (napi->irq == irq)
7024 		return;
7025 
7026 	/* Remove existing resources */
7027 	if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state))
7028 		irq_set_affinity_notifier(napi->irq, NULL);
7029 
7030 	napi->irq = irq;
7031 	if (irq < 0 ||
7032 	    (!napi->dev->rx_cpu_rmap_auto && !napi->dev->irq_affinity_auto))
7033 		return;
7034 
7035 	/* Abort for buggy drivers */
7036 	if (napi->dev->irq_affinity_auto && WARN_ON_ONCE(!napi->config))
7037 		return;
7038 
7039 #ifdef CONFIG_RFS_ACCEL
7040 	if (napi->dev->rx_cpu_rmap_auto) {
7041 		rc = cpu_rmap_add(napi->dev->rx_cpu_rmap, napi);
7042 		if (rc < 0)
7043 			return;
7044 
7045 		cpu_rmap_get(napi->dev->rx_cpu_rmap);
7046 		napi->napi_rmap_idx = rc;
7047 	}
7048 #endif
7049 
7050 	/* Use core IRQ notifier */
7051 	napi->notify.notify = netif_napi_irq_notify;
7052 	napi->notify.release = netif_napi_affinity_release;
7053 	rc = irq_set_affinity_notifier(irq, &napi->notify);
7054 	if (rc) {
7055 		netdev_warn(napi->dev, "Unable to set IRQ notifier (%d)\n",
7056 			    rc);
7057 		goto put_rmap;
7058 	}
7059 
7060 	set_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state);
7061 	return;
7062 
7063 put_rmap:
7064 #ifdef CONFIG_RFS_ACCEL
7065 	if (napi->dev->rx_cpu_rmap_auto) {
7066 		napi->dev->rx_cpu_rmap->obj[napi->napi_rmap_idx] = NULL;
7067 		cpu_rmap_put(napi->dev->rx_cpu_rmap);
7068 		napi->napi_rmap_idx = -1;
7069 	}
7070 #endif
7071 	napi->notify.notify = NULL;
7072 	napi->notify.release = NULL;
7073 }
7074 EXPORT_SYMBOL(netif_napi_set_irq_locked);
7075 
7076 static void napi_restore_config(struct napi_struct *n)
7077 {
7078 	n->defer_hard_irqs = n->config->defer_hard_irqs;
7079 	n->gro_flush_timeout = n->config->gro_flush_timeout;
7080 	n->irq_suspend_timeout = n->config->irq_suspend_timeout;
7081 
7082 	if (n->dev->irq_affinity_auto &&
7083 	    test_bit(NAPI_STATE_HAS_NOTIFIER, &n->state))
7084 		irq_set_affinity(n->irq, &n->config->affinity_mask);
7085 
7086 	/* a NAPI ID might be stored in the config, if so use it. if not, use
7087 	 * napi_hash_add to generate one for us.
7088 	 */
7089 	if (n->config->napi_id) {
7090 		napi_hash_add_with_id(n, n->config->napi_id);
7091 	} else {
7092 		napi_hash_add(n);
7093 		n->config->napi_id = n->napi_id;
7094 	}
7095 }
7096 
7097 static void napi_save_config(struct napi_struct *n)
7098 {
7099 	n->config->defer_hard_irqs = n->defer_hard_irqs;
7100 	n->config->gro_flush_timeout = n->gro_flush_timeout;
7101 	n->config->irq_suspend_timeout = n->irq_suspend_timeout;
7102 	napi_hash_del(n);
7103 }
7104 
7105 /* Netlink wants the NAPI list to be sorted by ID, if adding a NAPI which will
7106  * inherit an existing ID try to insert it at the right position.
7107  */
7108 static void
7109 netif_napi_dev_list_add(struct net_device *dev, struct napi_struct *napi)
7110 {
7111 	unsigned int new_id, pos_id;
7112 	struct list_head *higher;
7113 	struct napi_struct *pos;
7114 
7115 	new_id = UINT_MAX;
7116 	if (napi->config && napi->config->napi_id)
7117 		new_id = napi->config->napi_id;
7118 
7119 	higher = &dev->napi_list;
7120 	list_for_each_entry(pos, &dev->napi_list, dev_list) {
7121 		if (napi_id_valid(pos->napi_id))
7122 			pos_id = pos->napi_id;
7123 		else if (pos->config)
7124 			pos_id = pos->config->napi_id;
7125 		else
7126 			pos_id = UINT_MAX;
7127 
7128 		if (pos_id <= new_id)
7129 			break;
7130 		higher = &pos->dev_list;
7131 	}
7132 	list_add_rcu(&napi->dev_list, higher); /* adds after higher */
7133 }
7134 
7135 /* Double check that napi_get_frags() allocates skbs with
7136  * skb->head being backed by slab, not a page fragment.
7137  * This is to make sure bug fixed in 3226b158e67c
7138  * ("net: avoid 32 x truesize under-estimation for tiny skbs")
7139  * does not accidentally come back.
7140  */
7141 static void napi_get_frags_check(struct napi_struct *napi)
7142 {
7143 	struct sk_buff *skb;
7144 
7145 	local_bh_disable();
7146 	skb = napi_get_frags(napi);
7147 	WARN_ON_ONCE(skb && skb->head_frag);
7148 	napi_free_frags(napi);
7149 	local_bh_enable();
7150 }
7151 
7152 void netif_napi_add_weight_locked(struct net_device *dev,
7153 				  struct napi_struct *napi,
7154 				  int (*poll)(struct napi_struct *, int),
7155 				  int weight)
7156 {
7157 	netdev_assert_locked(dev);
7158 	if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
7159 		return;
7160 
7161 	INIT_LIST_HEAD(&napi->poll_list);
7162 	INIT_HLIST_NODE(&napi->napi_hash_node);
7163 	hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
7164 	napi->timer.function = napi_watchdog;
7165 	gro_init(&napi->gro);
7166 	napi->skb = NULL;
7167 	napi->poll = poll;
7168 	if (weight > NAPI_POLL_WEIGHT)
7169 		netdev_err_once(dev, "%s() called with weight %d\n", __func__,
7170 				weight);
7171 	napi->weight = weight;
7172 	napi->dev = dev;
7173 #ifdef CONFIG_NETPOLL
7174 	napi->poll_owner = -1;
7175 #endif
7176 	napi->list_owner = -1;
7177 	set_bit(NAPI_STATE_SCHED, &napi->state);
7178 	set_bit(NAPI_STATE_NPSVC, &napi->state);
7179 	netif_napi_dev_list_add(dev, napi);
7180 
7181 	/* default settings from sysfs are applied to all NAPIs. any per-NAPI
7182 	 * configuration will be loaded in napi_enable
7183 	 */
7184 	napi_set_defer_hard_irqs(napi, READ_ONCE(dev->napi_defer_hard_irqs));
7185 	napi_set_gro_flush_timeout(napi, READ_ONCE(dev->gro_flush_timeout));
7186 
7187 	napi_get_frags_check(napi);
7188 	/* Create kthread for this napi if dev->threaded is set.
7189 	 * Clear dev->threaded if kthread creation failed so that
7190 	 * threaded mode will not be enabled in napi_enable().
7191 	 */
7192 	if (dev->threaded && napi_kthread_create(napi))
7193 		dev->threaded = false;
7194 	netif_napi_set_irq_locked(napi, -1);
7195 }
7196 EXPORT_SYMBOL(netif_napi_add_weight_locked);
7197 
7198 void napi_disable_locked(struct napi_struct *n)
7199 {
7200 	unsigned long val, new;
7201 
7202 	might_sleep();
7203 	netdev_assert_locked(n->dev);
7204 
7205 	set_bit(NAPI_STATE_DISABLE, &n->state);
7206 
7207 	val = READ_ONCE(n->state);
7208 	do {
7209 		while (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) {
7210 			usleep_range(20, 200);
7211 			val = READ_ONCE(n->state);
7212 		}
7213 
7214 		new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC;
7215 		new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL);
7216 	} while (!try_cmpxchg(&n->state, &val, new));
7217 
7218 	hrtimer_cancel(&n->timer);
7219 
7220 	if (n->config)
7221 		napi_save_config(n);
7222 	else
7223 		napi_hash_del(n);
7224 
7225 	clear_bit(NAPI_STATE_DISABLE, &n->state);
7226 }
7227 EXPORT_SYMBOL(napi_disable_locked);
7228 
7229 /**
7230  * napi_disable() - prevent NAPI from scheduling
7231  * @n: NAPI context
7232  *
7233  * Stop NAPI from being scheduled on this context.
7234  * Waits till any outstanding processing completes.
7235  * Takes netdev_lock() for associated net_device.
7236  */
7237 void napi_disable(struct napi_struct *n)
7238 {
7239 	netdev_lock(n->dev);
7240 	napi_disable_locked(n);
7241 	netdev_unlock(n->dev);
7242 }
7243 EXPORT_SYMBOL(napi_disable);
7244 
7245 void napi_enable_locked(struct napi_struct *n)
7246 {
7247 	unsigned long new, val = READ_ONCE(n->state);
7248 
7249 	if (n->config)
7250 		napi_restore_config(n);
7251 	else
7252 		napi_hash_add(n);
7253 
7254 	do {
7255 		BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
7256 
7257 		new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
7258 		if (n->dev->threaded && n->thread)
7259 			new |= NAPIF_STATE_THREADED;
7260 	} while (!try_cmpxchg(&n->state, &val, new));
7261 }
7262 EXPORT_SYMBOL(napi_enable_locked);
7263 
7264 /**
7265  * napi_enable() - enable NAPI scheduling
7266  * @n: NAPI context
7267  *
7268  * Enable scheduling of a NAPI instance.
7269  * Must be paired with napi_disable().
7270  * Takes netdev_lock() for associated net_device.
7271  */
7272 void napi_enable(struct napi_struct *n)
7273 {
7274 	netdev_lock(n->dev);
7275 	napi_enable_locked(n);
7276 	netdev_unlock(n->dev);
7277 }
7278 EXPORT_SYMBOL(napi_enable);
7279 
7280 /* Must be called in process context */
7281 void __netif_napi_del_locked(struct napi_struct *napi)
7282 {
7283 	netdev_assert_locked(napi->dev);
7284 
7285 	if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
7286 		return;
7287 
7288 	/* Make sure NAPI is disabled (or was never enabled). */
7289 	WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state));
7290 
7291 	if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state))
7292 		irq_set_affinity_notifier(napi->irq, NULL);
7293 
7294 	if (napi->config) {
7295 		napi->index = -1;
7296 		napi->config = NULL;
7297 	}
7298 
7299 	list_del_rcu(&napi->dev_list);
7300 	napi_free_frags(napi);
7301 
7302 	gro_cleanup(&napi->gro);
7303 
7304 	if (napi->thread) {
7305 		kthread_stop(napi->thread);
7306 		napi->thread = NULL;
7307 	}
7308 }
7309 EXPORT_SYMBOL(__netif_napi_del_locked);
7310 
7311 static int __napi_poll(struct napi_struct *n, bool *repoll)
7312 {
7313 	int work, weight;
7314 
7315 	weight = n->weight;
7316 
7317 	/* This NAPI_STATE_SCHED test is for avoiding a race
7318 	 * with netpoll's poll_napi().  Only the entity which
7319 	 * obtains the lock and sees NAPI_STATE_SCHED set will
7320 	 * actually make the ->poll() call.  Therefore we avoid
7321 	 * accidentally calling ->poll() when NAPI is not scheduled.
7322 	 */
7323 	work = 0;
7324 	if (napi_is_scheduled(n)) {
7325 		work = n->poll(n, weight);
7326 		trace_napi_poll(n, work, weight);
7327 
7328 		xdp_do_check_flushed(n);
7329 	}
7330 
7331 	if (unlikely(work > weight))
7332 		netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
7333 				n->poll, work, weight);
7334 
7335 	if (likely(work < weight))
7336 		return work;
7337 
7338 	/* Drivers must not modify the NAPI state if they
7339 	 * consume the entire weight.  In such cases this code
7340 	 * still "owns" the NAPI instance and therefore can
7341 	 * move the instance around on the list at-will.
7342 	 */
7343 	if (unlikely(napi_disable_pending(n))) {
7344 		napi_complete(n);
7345 		return work;
7346 	}
7347 
7348 	/* The NAPI context has more processing work, but busy-polling
7349 	 * is preferred. Exit early.
7350 	 */
7351 	if (napi_prefer_busy_poll(n)) {
7352 		if (napi_complete_done(n, work)) {
7353 			/* If timeout is not set, we need to make sure
7354 			 * that the NAPI is re-scheduled.
7355 			 */
7356 			napi_schedule(n);
7357 		}
7358 		return work;
7359 	}
7360 
7361 	/* Flush too old packets. If HZ < 1000, flush all packets */
7362 	gro_flush(&n->gro, HZ >= 1000);
7363 	gro_normal_list(&n->gro);
7364 
7365 	/* Some drivers may have called napi_schedule
7366 	 * prior to exhausting their budget.
7367 	 */
7368 	if (unlikely(!list_empty(&n->poll_list))) {
7369 		pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
7370 			     n->dev ? n->dev->name : "backlog");
7371 		return work;
7372 	}
7373 
7374 	*repoll = true;
7375 
7376 	return work;
7377 }
7378 
7379 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
7380 {
7381 	bool do_repoll = false;
7382 	void *have;
7383 	int work;
7384 
7385 	list_del_init(&n->poll_list);
7386 
7387 	have = netpoll_poll_lock(n);
7388 
7389 	work = __napi_poll(n, &do_repoll);
7390 
7391 	if (do_repoll)
7392 		list_add_tail(&n->poll_list, repoll);
7393 
7394 	netpoll_poll_unlock(have);
7395 
7396 	return work;
7397 }
7398 
7399 static int napi_thread_wait(struct napi_struct *napi)
7400 {
7401 	set_current_state(TASK_INTERRUPTIBLE);
7402 
7403 	while (!kthread_should_stop()) {
7404 		/* Testing SCHED_THREADED bit here to make sure the current
7405 		 * kthread owns this napi and could poll on this napi.
7406 		 * Testing SCHED bit is not enough because SCHED bit might be
7407 		 * set by some other busy poll thread or by napi_disable().
7408 		 */
7409 		if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) {
7410 			WARN_ON(!list_empty(&napi->poll_list));
7411 			__set_current_state(TASK_RUNNING);
7412 			return 0;
7413 		}
7414 
7415 		schedule();
7416 		set_current_state(TASK_INTERRUPTIBLE);
7417 	}
7418 	__set_current_state(TASK_RUNNING);
7419 
7420 	return -1;
7421 }
7422 
7423 static void napi_threaded_poll_loop(struct napi_struct *napi)
7424 {
7425 	struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
7426 	struct softnet_data *sd;
7427 	unsigned long last_qs = jiffies;
7428 
7429 	for (;;) {
7430 		bool repoll = false;
7431 		void *have;
7432 
7433 		local_bh_disable();
7434 		bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
7435 
7436 		sd = this_cpu_ptr(&softnet_data);
7437 		sd->in_napi_threaded_poll = true;
7438 
7439 		have = netpoll_poll_lock(napi);
7440 		__napi_poll(napi, &repoll);
7441 		netpoll_poll_unlock(have);
7442 
7443 		sd->in_napi_threaded_poll = false;
7444 		barrier();
7445 
7446 		if (sd_has_rps_ipi_waiting(sd)) {
7447 			local_irq_disable();
7448 			net_rps_action_and_irq_enable(sd);
7449 		}
7450 		skb_defer_free_flush(sd);
7451 		bpf_net_ctx_clear(bpf_net_ctx);
7452 		local_bh_enable();
7453 
7454 		if (!repoll)
7455 			break;
7456 
7457 		rcu_softirq_qs_periodic(last_qs);
7458 		cond_resched();
7459 	}
7460 }
7461 
7462 static int napi_threaded_poll(void *data)
7463 {
7464 	struct napi_struct *napi = data;
7465 
7466 	while (!napi_thread_wait(napi))
7467 		napi_threaded_poll_loop(napi);
7468 
7469 	return 0;
7470 }
7471 
7472 static __latent_entropy void net_rx_action(void)
7473 {
7474 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
7475 	unsigned long time_limit = jiffies +
7476 		usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs));
7477 	struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
7478 	int budget = READ_ONCE(net_hotdata.netdev_budget);
7479 	LIST_HEAD(list);
7480 	LIST_HEAD(repoll);
7481 
7482 	bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
7483 start:
7484 	sd->in_net_rx_action = true;
7485 	local_irq_disable();
7486 	list_splice_init(&sd->poll_list, &list);
7487 	local_irq_enable();
7488 
7489 	for (;;) {
7490 		struct napi_struct *n;
7491 
7492 		skb_defer_free_flush(sd);
7493 
7494 		if (list_empty(&list)) {
7495 			if (list_empty(&repoll)) {
7496 				sd->in_net_rx_action = false;
7497 				barrier();
7498 				/* We need to check if ____napi_schedule()
7499 				 * had refilled poll_list while
7500 				 * sd->in_net_rx_action was true.
7501 				 */
7502 				if (!list_empty(&sd->poll_list))
7503 					goto start;
7504 				if (!sd_has_rps_ipi_waiting(sd))
7505 					goto end;
7506 			}
7507 			break;
7508 		}
7509 
7510 		n = list_first_entry(&list, struct napi_struct, poll_list);
7511 		budget -= napi_poll(n, &repoll);
7512 
7513 		/* If softirq window is exhausted then punt.
7514 		 * Allow this to run for 2 jiffies since which will allow
7515 		 * an average latency of 1.5/HZ.
7516 		 */
7517 		if (unlikely(budget <= 0 ||
7518 			     time_after_eq(jiffies, time_limit))) {
7519 			sd->time_squeeze++;
7520 			break;
7521 		}
7522 	}
7523 
7524 	local_irq_disable();
7525 
7526 	list_splice_tail_init(&sd->poll_list, &list);
7527 	list_splice_tail(&repoll, &list);
7528 	list_splice(&list, &sd->poll_list);
7529 	if (!list_empty(&sd->poll_list))
7530 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
7531 	else
7532 		sd->in_net_rx_action = false;
7533 
7534 	net_rps_action_and_irq_enable(sd);
7535 end:
7536 	bpf_net_ctx_clear(bpf_net_ctx);
7537 }
7538 
7539 struct netdev_adjacent {
7540 	struct net_device *dev;
7541 	netdevice_tracker dev_tracker;
7542 
7543 	/* upper master flag, there can only be one master device per list */
7544 	bool master;
7545 
7546 	/* lookup ignore flag */
7547 	bool ignore;
7548 
7549 	/* counter for the number of times this device was added to us */
7550 	u16 ref_nr;
7551 
7552 	/* private field for the users */
7553 	void *private;
7554 
7555 	struct list_head list;
7556 	struct rcu_head rcu;
7557 };
7558 
7559 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
7560 						 struct list_head *adj_list)
7561 {
7562 	struct netdev_adjacent *adj;
7563 
7564 	list_for_each_entry(adj, adj_list, list) {
7565 		if (adj->dev == adj_dev)
7566 			return adj;
7567 	}
7568 	return NULL;
7569 }
7570 
7571 static int ____netdev_has_upper_dev(struct net_device *upper_dev,
7572 				    struct netdev_nested_priv *priv)
7573 {
7574 	struct net_device *dev = (struct net_device *)priv->data;
7575 
7576 	return upper_dev == dev;
7577 }
7578 
7579 /**
7580  * netdev_has_upper_dev - Check if device is linked to an upper device
7581  * @dev: device
7582  * @upper_dev: upper device to check
7583  *
7584  * Find out if a device is linked to specified upper device and return true
7585  * in case it is. Note that this checks only immediate upper device,
7586  * not through a complete stack of devices. The caller must hold the RTNL lock.
7587  */
7588 bool netdev_has_upper_dev(struct net_device *dev,
7589 			  struct net_device *upper_dev)
7590 {
7591 	struct netdev_nested_priv priv = {
7592 		.data = (void *)upper_dev,
7593 	};
7594 
7595 	ASSERT_RTNL();
7596 
7597 	return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
7598 					     &priv);
7599 }
7600 EXPORT_SYMBOL(netdev_has_upper_dev);
7601 
7602 /**
7603  * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
7604  * @dev: device
7605  * @upper_dev: upper device to check
7606  *
7607  * Find out if a device is linked to specified upper device and return true
7608  * in case it is. Note that this checks the entire upper device chain.
7609  * The caller must hold rcu lock.
7610  */
7611 
7612 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
7613 				  struct net_device *upper_dev)
7614 {
7615 	struct netdev_nested_priv priv = {
7616 		.data = (void *)upper_dev,
7617 	};
7618 
7619 	return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
7620 					       &priv);
7621 }
7622 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
7623 
7624 /**
7625  * netdev_has_any_upper_dev - Check if device is linked to some device
7626  * @dev: device
7627  *
7628  * Find out if a device is linked to an upper device and return true in case
7629  * it is. The caller must hold the RTNL lock.
7630  */
7631 bool netdev_has_any_upper_dev(struct net_device *dev)
7632 {
7633 	ASSERT_RTNL();
7634 
7635 	return !list_empty(&dev->adj_list.upper);
7636 }
7637 EXPORT_SYMBOL(netdev_has_any_upper_dev);
7638 
7639 /**
7640  * netdev_master_upper_dev_get - Get master upper device
7641  * @dev: device
7642  *
7643  * Find a master upper device and return pointer to it or NULL in case
7644  * it's not there. The caller must hold the RTNL lock.
7645  */
7646 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
7647 {
7648 	struct netdev_adjacent *upper;
7649 
7650 	ASSERT_RTNL();
7651 
7652 	if (list_empty(&dev->adj_list.upper))
7653 		return NULL;
7654 
7655 	upper = list_first_entry(&dev->adj_list.upper,
7656 				 struct netdev_adjacent, list);
7657 	if (likely(upper->master))
7658 		return upper->dev;
7659 	return NULL;
7660 }
7661 EXPORT_SYMBOL(netdev_master_upper_dev_get);
7662 
7663 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
7664 {
7665 	struct netdev_adjacent *upper;
7666 
7667 	ASSERT_RTNL();
7668 
7669 	if (list_empty(&dev->adj_list.upper))
7670 		return NULL;
7671 
7672 	upper = list_first_entry(&dev->adj_list.upper,
7673 				 struct netdev_adjacent, list);
7674 	if (likely(upper->master) && !upper->ignore)
7675 		return upper->dev;
7676 	return NULL;
7677 }
7678 
7679 /**
7680  * netdev_has_any_lower_dev - Check if device is linked to some device
7681  * @dev: device
7682  *
7683  * Find out if a device is linked to a lower device and return true in case
7684  * it is. The caller must hold the RTNL lock.
7685  */
7686 static bool netdev_has_any_lower_dev(struct net_device *dev)
7687 {
7688 	ASSERT_RTNL();
7689 
7690 	return !list_empty(&dev->adj_list.lower);
7691 }
7692 
7693 void *netdev_adjacent_get_private(struct list_head *adj_list)
7694 {
7695 	struct netdev_adjacent *adj;
7696 
7697 	adj = list_entry(adj_list, struct netdev_adjacent, list);
7698 
7699 	return adj->private;
7700 }
7701 EXPORT_SYMBOL(netdev_adjacent_get_private);
7702 
7703 /**
7704  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
7705  * @dev: device
7706  * @iter: list_head ** of the current position
7707  *
7708  * Gets the next device from the dev's upper list, starting from iter
7709  * position. The caller must hold RCU read lock.
7710  */
7711 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
7712 						 struct list_head **iter)
7713 {
7714 	struct netdev_adjacent *upper;
7715 
7716 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7717 
7718 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7719 
7720 	if (&upper->list == &dev->adj_list.upper)
7721 		return NULL;
7722 
7723 	*iter = &upper->list;
7724 
7725 	return upper->dev;
7726 }
7727 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
7728 
7729 static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
7730 						  struct list_head **iter,
7731 						  bool *ignore)
7732 {
7733 	struct netdev_adjacent *upper;
7734 
7735 	upper = list_entry((*iter)->next, struct netdev_adjacent, list);
7736 
7737 	if (&upper->list == &dev->adj_list.upper)
7738 		return NULL;
7739 
7740 	*iter = &upper->list;
7741 	*ignore = upper->ignore;
7742 
7743 	return upper->dev;
7744 }
7745 
7746 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
7747 						    struct list_head **iter)
7748 {
7749 	struct netdev_adjacent *upper;
7750 
7751 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7752 
7753 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7754 
7755 	if (&upper->list == &dev->adj_list.upper)
7756 		return NULL;
7757 
7758 	*iter = &upper->list;
7759 
7760 	return upper->dev;
7761 }
7762 
7763 static int __netdev_walk_all_upper_dev(struct net_device *dev,
7764 				       int (*fn)(struct net_device *dev,
7765 					 struct netdev_nested_priv *priv),
7766 				       struct netdev_nested_priv *priv)
7767 {
7768 	struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7769 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7770 	int ret, cur = 0;
7771 	bool ignore;
7772 
7773 	now = dev;
7774 	iter = &dev->adj_list.upper;
7775 
7776 	while (1) {
7777 		if (now != dev) {
7778 			ret = fn(now, priv);
7779 			if (ret)
7780 				return ret;
7781 		}
7782 
7783 		next = NULL;
7784 		while (1) {
7785 			udev = __netdev_next_upper_dev(now, &iter, &ignore);
7786 			if (!udev)
7787 				break;
7788 			if (ignore)
7789 				continue;
7790 
7791 			next = udev;
7792 			niter = &udev->adj_list.upper;
7793 			dev_stack[cur] = now;
7794 			iter_stack[cur++] = iter;
7795 			break;
7796 		}
7797 
7798 		if (!next) {
7799 			if (!cur)
7800 				return 0;
7801 			next = dev_stack[--cur];
7802 			niter = iter_stack[cur];
7803 		}
7804 
7805 		now = next;
7806 		iter = niter;
7807 	}
7808 
7809 	return 0;
7810 }
7811 
7812 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
7813 				  int (*fn)(struct net_device *dev,
7814 					    struct netdev_nested_priv *priv),
7815 				  struct netdev_nested_priv *priv)
7816 {
7817 	struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7818 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7819 	int ret, cur = 0;
7820 
7821 	now = dev;
7822 	iter = &dev->adj_list.upper;
7823 
7824 	while (1) {
7825 		if (now != dev) {
7826 			ret = fn(now, priv);
7827 			if (ret)
7828 				return ret;
7829 		}
7830 
7831 		next = NULL;
7832 		while (1) {
7833 			udev = netdev_next_upper_dev_rcu(now, &iter);
7834 			if (!udev)
7835 				break;
7836 
7837 			next = udev;
7838 			niter = &udev->adj_list.upper;
7839 			dev_stack[cur] = now;
7840 			iter_stack[cur++] = iter;
7841 			break;
7842 		}
7843 
7844 		if (!next) {
7845 			if (!cur)
7846 				return 0;
7847 			next = dev_stack[--cur];
7848 			niter = iter_stack[cur];
7849 		}
7850 
7851 		now = next;
7852 		iter = niter;
7853 	}
7854 
7855 	return 0;
7856 }
7857 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
7858 
7859 static bool __netdev_has_upper_dev(struct net_device *dev,
7860 				   struct net_device *upper_dev)
7861 {
7862 	struct netdev_nested_priv priv = {
7863 		.flags = 0,
7864 		.data = (void *)upper_dev,
7865 	};
7866 
7867 	ASSERT_RTNL();
7868 
7869 	return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
7870 					   &priv);
7871 }
7872 
7873 /**
7874  * netdev_lower_get_next_private - Get the next ->private from the
7875  *				   lower neighbour list
7876  * @dev: device
7877  * @iter: list_head ** of the current position
7878  *
7879  * Gets the next netdev_adjacent->private from the dev's lower neighbour
7880  * list, starting from iter position. The caller must hold either hold the
7881  * RTNL lock or its own locking that guarantees that the neighbour lower
7882  * list will remain unchanged.
7883  */
7884 void *netdev_lower_get_next_private(struct net_device *dev,
7885 				    struct list_head **iter)
7886 {
7887 	struct netdev_adjacent *lower;
7888 
7889 	lower = list_entry(*iter, struct netdev_adjacent, list);
7890 
7891 	if (&lower->list == &dev->adj_list.lower)
7892 		return NULL;
7893 
7894 	*iter = lower->list.next;
7895 
7896 	return lower->private;
7897 }
7898 EXPORT_SYMBOL(netdev_lower_get_next_private);
7899 
7900 /**
7901  * netdev_lower_get_next_private_rcu - Get the next ->private from the
7902  *				       lower neighbour list, RCU
7903  *				       variant
7904  * @dev: device
7905  * @iter: list_head ** of the current position
7906  *
7907  * Gets the next netdev_adjacent->private from the dev's lower neighbour
7908  * list, starting from iter position. The caller must hold RCU read lock.
7909  */
7910 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
7911 					struct list_head **iter)
7912 {
7913 	struct netdev_adjacent *lower;
7914 
7915 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
7916 
7917 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7918 
7919 	if (&lower->list == &dev->adj_list.lower)
7920 		return NULL;
7921 
7922 	*iter = &lower->list;
7923 
7924 	return lower->private;
7925 }
7926 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
7927 
7928 /**
7929  * netdev_lower_get_next - Get the next device from the lower neighbour
7930  *                         list
7931  * @dev: device
7932  * @iter: list_head ** of the current position
7933  *
7934  * Gets the next netdev_adjacent from the dev's lower neighbour
7935  * list, starting from iter position. The caller must hold RTNL lock or
7936  * its own locking that guarantees that the neighbour lower
7937  * list will remain unchanged.
7938  */
7939 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
7940 {
7941 	struct netdev_adjacent *lower;
7942 
7943 	lower = list_entry(*iter, struct netdev_adjacent, list);
7944 
7945 	if (&lower->list == &dev->adj_list.lower)
7946 		return NULL;
7947 
7948 	*iter = lower->list.next;
7949 
7950 	return lower->dev;
7951 }
7952 EXPORT_SYMBOL(netdev_lower_get_next);
7953 
7954 static struct net_device *netdev_next_lower_dev(struct net_device *dev,
7955 						struct list_head **iter)
7956 {
7957 	struct netdev_adjacent *lower;
7958 
7959 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7960 
7961 	if (&lower->list == &dev->adj_list.lower)
7962 		return NULL;
7963 
7964 	*iter = &lower->list;
7965 
7966 	return lower->dev;
7967 }
7968 
7969 static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
7970 						  struct list_head **iter,
7971 						  bool *ignore)
7972 {
7973 	struct netdev_adjacent *lower;
7974 
7975 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7976 
7977 	if (&lower->list == &dev->adj_list.lower)
7978 		return NULL;
7979 
7980 	*iter = &lower->list;
7981 	*ignore = lower->ignore;
7982 
7983 	return lower->dev;
7984 }
7985 
7986 int netdev_walk_all_lower_dev(struct net_device *dev,
7987 			      int (*fn)(struct net_device *dev,
7988 					struct netdev_nested_priv *priv),
7989 			      struct netdev_nested_priv *priv)
7990 {
7991 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7992 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7993 	int ret, cur = 0;
7994 
7995 	now = dev;
7996 	iter = &dev->adj_list.lower;
7997 
7998 	while (1) {
7999 		if (now != dev) {
8000 			ret = fn(now, priv);
8001 			if (ret)
8002 				return ret;
8003 		}
8004 
8005 		next = NULL;
8006 		while (1) {
8007 			ldev = netdev_next_lower_dev(now, &iter);
8008 			if (!ldev)
8009 				break;
8010 
8011 			next = ldev;
8012 			niter = &ldev->adj_list.lower;
8013 			dev_stack[cur] = now;
8014 			iter_stack[cur++] = iter;
8015 			break;
8016 		}
8017 
8018 		if (!next) {
8019 			if (!cur)
8020 				return 0;
8021 			next = dev_stack[--cur];
8022 			niter = iter_stack[cur];
8023 		}
8024 
8025 		now = next;
8026 		iter = niter;
8027 	}
8028 
8029 	return 0;
8030 }
8031 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
8032 
8033 static int __netdev_walk_all_lower_dev(struct net_device *dev,
8034 				       int (*fn)(struct net_device *dev,
8035 					 struct netdev_nested_priv *priv),
8036 				       struct netdev_nested_priv *priv)
8037 {
8038 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
8039 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
8040 	int ret, cur = 0;
8041 	bool ignore;
8042 
8043 	now = dev;
8044 	iter = &dev->adj_list.lower;
8045 
8046 	while (1) {
8047 		if (now != dev) {
8048 			ret = fn(now, priv);
8049 			if (ret)
8050 				return ret;
8051 		}
8052 
8053 		next = NULL;
8054 		while (1) {
8055 			ldev = __netdev_next_lower_dev(now, &iter, &ignore);
8056 			if (!ldev)
8057 				break;
8058 			if (ignore)
8059 				continue;
8060 
8061 			next = ldev;
8062 			niter = &ldev->adj_list.lower;
8063 			dev_stack[cur] = now;
8064 			iter_stack[cur++] = iter;
8065 			break;
8066 		}
8067 
8068 		if (!next) {
8069 			if (!cur)
8070 				return 0;
8071 			next = dev_stack[--cur];
8072 			niter = iter_stack[cur];
8073 		}
8074 
8075 		now = next;
8076 		iter = niter;
8077 	}
8078 
8079 	return 0;
8080 }
8081 
8082 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
8083 					     struct list_head **iter)
8084 {
8085 	struct netdev_adjacent *lower;
8086 
8087 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
8088 	if (&lower->list == &dev->adj_list.lower)
8089 		return NULL;
8090 
8091 	*iter = &lower->list;
8092 
8093 	return lower->dev;
8094 }
8095 EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
8096 
8097 static u8 __netdev_upper_depth(struct net_device *dev)
8098 {
8099 	struct net_device *udev;
8100 	struct list_head *iter;
8101 	u8 max_depth = 0;
8102 	bool ignore;
8103 
8104 	for (iter = &dev->adj_list.upper,
8105 	     udev = __netdev_next_upper_dev(dev, &iter, &ignore);
8106 	     udev;
8107 	     udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
8108 		if (ignore)
8109 			continue;
8110 		if (max_depth < udev->upper_level)
8111 			max_depth = udev->upper_level;
8112 	}
8113 
8114 	return max_depth;
8115 }
8116 
8117 static u8 __netdev_lower_depth(struct net_device *dev)
8118 {
8119 	struct net_device *ldev;
8120 	struct list_head *iter;
8121 	u8 max_depth = 0;
8122 	bool ignore;
8123 
8124 	for (iter = &dev->adj_list.lower,
8125 	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
8126 	     ldev;
8127 	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
8128 		if (ignore)
8129 			continue;
8130 		if (max_depth < ldev->lower_level)
8131 			max_depth = ldev->lower_level;
8132 	}
8133 
8134 	return max_depth;
8135 }
8136 
8137 static int __netdev_update_upper_level(struct net_device *dev,
8138 				       struct netdev_nested_priv *__unused)
8139 {
8140 	dev->upper_level = __netdev_upper_depth(dev) + 1;
8141 	return 0;
8142 }
8143 
8144 #ifdef CONFIG_LOCKDEP
8145 static LIST_HEAD(net_unlink_list);
8146 
8147 static void net_unlink_todo(struct net_device *dev)
8148 {
8149 	if (list_empty(&dev->unlink_list))
8150 		list_add_tail(&dev->unlink_list, &net_unlink_list);
8151 }
8152 #endif
8153 
8154 static int __netdev_update_lower_level(struct net_device *dev,
8155 				       struct netdev_nested_priv *priv)
8156 {
8157 	dev->lower_level = __netdev_lower_depth(dev) + 1;
8158 
8159 #ifdef CONFIG_LOCKDEP
8160 	if (!priv)
8161 		return 0;
8162 
8163 	if (priv->flags & NESTED_SYNC_IMM)
8164 		dev->nested_level = dev->lower_level - 1;
8165 	if (priv->flags & NESTED_SYNC_TODO)
8166 		net_unlink_todo(dev);
8167 #endif
8168 	return 0;
8169 }
8170 
8171 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
8172 				  int (*fn)(struct net_device *dev,
8173 					    struct netdev_nested_priv *priv),
8174 				  struct netdev_nested_priv *priv)
8175 {
8176 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
8177 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
8178 	int ret, cur = 0;
8179 
8180 	now = dev;
8181 	iter = &dev->adj_list.lower;
8182 
8183 	while (1) {
8184 		if (now != dev) {
8185 			ret = fn(now, priv);
8186 			if (ret)
8187 				return ret;
8188 		}
8189 
8190 		next = NULL;
8191 		while (1) {
8192 			ldev = netdev_next_lower_dev_rcu(now, &iter);
8193 			if (!ldev)
8194 				break;
8195 
8196 			next = ldev;
8197 			niter = &ldev->adj_list.lower;
8198 			dev_stack[cur] = now;
8199 			iter_stack[cur++] = iter;
8200 			break;
8201 		}
8202 
8203 		if (!next) {
8204 			if (!cur)
8205 				return 0;
8206 			next = dev_stack[--cur];
8207 			niter = iter_stack[cur];
8208 		}
8209 
8210 		now = next;
8211 		iter = niter;
8212 	}
8213 
8214 	return 0;
8215 }
8216 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
8217 
8218 /**
8219  * netdev_lower_get_first_private_rcu - Get the first ->private from the
8220  *				       lower neighbour list, RCU
8221  *				       variant
8222  * @dev: device
8223  *
8224  * Gets the first netdev_adjacent->private from the dev's lower neighbour
8225  * list. The caller must hold RCU read lock.
8226  */
8227 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
8228 {
8229 	struct netdev_adjacent *lower;
8230 
8231 	lower = list_first_or_null_rcu(&dev->adj_list.lower,
8232 			struct netdev_adjacent, list);
8233 	if (lower)
8234 		return lower->private;
8235 	return NULL;
8236 }
8237 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
8238 
8239 /**
8240  * netdev_master_upper_dev_get_rcu - Get master upper device
8241  * @dev: device
8242  *
8243  * Find a master upper device and return pointer to it or NULL in case
8244  * it's not there. The caller must hold the RCU read lock.
8245  */
8246 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
8247 {
8248 	struct netdev_adjacent *upper;
8249 
8250 	upper = list_first_or_null_rcu(&dev->adj_list.upper,
8251 				       struct netdev_adjacent, list);
8252 	if (upper && likely(upper->master))
8253 		return upper->dev;
8254 	return NULL;
8255 }
8256 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
8257 
8258 static int netdev_adjacent_sysfs_add(struct net_device *dev,
8259 			      struct net_device *adj_dev,
8260 			      struct list_head *dev_list)
8261 {
8262 	char linkname[IFNAMSIZ+7];
8263 
8264 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
8265 		"upper_%s" : "lower_%s", adj_dev->name);
8266 	return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
8267 				 linkname);
8268 }
8269 static void netdev_adjacent_sysfs_del(struct net_device *dev,
8270 			       char *name,
8271 			       struct list_head *dev_list)
8272 {
8273 	char linkname[IFNAMSIZ+7];
8274 
8275 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
8276 		"upper_%s" : "lower_%s", name);
8277 	sysfs_remove_link(&(dev->dev.kobj), linkname);
8278 }
8279 
8280 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
8281 						 struct net_device *adj_dev,
8282 						 struct list_head *dev_list)
8283 {
8284 	return (dev_list == &dev->adj_list.upper ||
8285 		dev_list == &dev->adj_list.lower) &&
8286 		net_eq(dev_net(dev), dev_net(adj_dev));
8287 }
8288 
8289 static int __netdev_adjacent_dev_insert(struct net_device *dev,
8290 					struct net_device *adj_dev,
8291 					struct list_head *dev_list,
8292 					void *private, bool master)
8293 {
8294 	struct netdev_adjacent *adj;
8295 	int ret;
8296 
8297 	adj = __netdev_find_adj(adj_dev, dev_list);
8298 
8299 	if (adj) {
8300 		adj->ref_nr += 1;
8301 		pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
8302 			 dev->name, adj_dev->name, adj->ref_nr);
8303 
8304 		return 0;
8305 	}
8306 
8307 	adj = kmalloc(sizeof(*adj), GFP_KERNEL);
8308 	if (!adj)
8309 		return -ENOMEM;
8310 
8311 	adj->dev = adj_dev;
8312 	adj->master = master;
8313 	adj->ref_nr = 1;
8314 	adj->private = private;
8315 	adj->ignore = false;
8316 	netdev_hold(adj_dev, &adj->dev_tracker, GFP_KERNEL);
8317 
8318 	pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
8319 		 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
8320 
8321 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
8322 		ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
8323 		if (ret)
8324 			goto free_adj;
8325 	}
8326 
8327 	/* Ensure that master link is always the first item in list. */
8328 	if (master) {
8329 		ret = sysfs_create_link(&(dev->dev.kobj),
8330 					&(adj_dev->dev.kobj), "master");
8331 		if (ret)
8332 			goto remove_symlinks;
8333 
8334 		list_add_rcu(&adj->list, dev_list);
8335 	} else {
8336 		list_add_tail_rcu(&adj->list, dev_list);
8337 	}
8338 
8339 	return 0;
8340 
8341 remove_symlinks:
8342 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
8343 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
8344 free_adj:
8345 	netdev_put(adj_dev, &adj->dev_tracker);
8346 	kfree(adj);
8347 
8348 	return ret;
8349 }
8350 
8351 static void __netdev_adjacent_dev_remove(struct net_device *dev,
8352 					 struct net_device *adj_dev,
8353 					 u16 ref_nr,
8354 					 struct list_head *dev_list)
8355 {
8356 	struct netdev_adjacent *adj;
8357 
8358 	pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
8359 		 dev->name, adj_dev->name, ref_nr);
8360 
8361 	adj = __netdev_find_adj(adj_dev, dev_list);
8362 
8363 	if (!adj) {
8364 		pr_err("Adjacency does not exist for device %s from %s\n",
8365 		       dev->name, adj_dev->name);
8366 		WARN_ON(1);
8367 		return;
8368 	}
8369 
8370 	if (adj->ref_nr > ref_nr) {
8371 		pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
8372 			 dev->name, adj_dev->name, ref_nr,
8373 			 adj->ref_nr - ref_nr);
8374 		adj->ref_nr -= ref_nr;
8375 		return;
8376 	}
8377 
8378 	if (adj->master)
8379 		sysfs_remove_link(&(dev->dev.kobj), "master");
8380 
8381 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
8382 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
8383 
8384 	list_del_rcu(&adj->list);
8385 	pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
8386 		 adj_dev->name, dev->name, adj_dev->name);
8387 	netdev_put(adj_dev, &adj->dev_tracker);
8388 	kfree_rcu(adj, rcu);
8389 }
8390 
8391 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
8392 					    struct net_device *upper_dev,
8393 					    struct list_head *up_list,
8394 					    struct list_head *down_list,
8395 					    void *private, bool master)
8396 {
8397 	int ret;
8398 
8399 	ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
8400 					   private, master);
8401 	if (ret)
8402 		return ret;
8403 
8404 	ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
8405 					   private, false);
8406 	if (ret) {
8407 		__netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
8408 		return ret;
8409 	}
8410 
8411 	return 0;
8412 }
8413 
8414 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
8415 					       struct net_device *upper_dev,
8416 					       u16 ref_nr,
8417 					       struct list_head *up_list,
8418 					       struct list_head *down_list)
8419 {
8420 	__netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
8421 	__netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
8422 }
8423 
8424 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
8425 						struct net_device *upper_dev,
8426 						void *private, bool master)
8427 {
8428 	return __netdev_adjacent_dev_link_lists(dev, upper_dev,
8429 						&dev->adj_list.upper,
8430 						&upper_dev->adj_list.lower,
8431 						private, master);
8432 }
8433 
8434 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
8435 						   struct net_device *upper_dev)
8436 {
8437 	__netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
8438 					   &dev->adj_list.upper,
8439 					   &upper_dev->adj_list.lower);
8440 }
8441 
8442 static int __netdev_upper_dev_link(struct net_device *dev,
8443 				   struct net_device *upper_dev, bool master,
8444 				   void *upper_priv, void *upper_info,
8445 				   struct netdev_nested_priv *priv,
8446 				   struct netlink_ext_ack *extack)
8447 {
8448 	struct netdev_notifier_changeupper_info changeupper_info = {
8449 		.info = {
8450 			.dev = dev,
8451 			.extack = extack,
8452 		},
8453 		.upper_dev = upper_dev,
8454 		.master = master,
8455 		.linking = true,
8456 		.upper_info = upper_info,
8457 	};
8458 	struct net_device *master_dev;
8459 	int ret = 0;
8460 
8461 	ASSERT_RTNL();
8462 
8463 	if (dev == upper_dev)
8464 		return -EBUSY;
8465 
8466 	/* To prevent loops, check if dev is not upper device to upper_dev. */
8467 	if (__netdev_has_upper_dev(upper_dev, dev))
8468 		return -EBUSY;
8469 
8470 	if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
8471 		return -EMLINK;
8472 
8473 	if (!master) {
8474 		if (__netdev_has_upper_dev(dev, upper_dev))
8475 			return -EEXIST;
8476 	} else {
8477 		master_dev = __netdev_master_upper_dev_get(dev);
8478 		if (master_dev)
8479 			return master_dev == upper_dev ? -EEXIST : -EBUSY;
8480 	}
8481 
8482 	ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
8483 					    &changeupper_info.info);
8484 	ret = notifier_to_errno(ret);
8485 	if (ret)
8486 		return ret;
8487 
8488 	ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
8489 						   master);
8490 	if (ret)
8491 		return ret;
8492 
8493 	ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
8494 					    &changeupper_info.info);
8495 	ret = notifier_to_errno(ret);
8496 	if (ret)
8497 		goto rollback;
8498 
8499 	__netdev_update_upper_level(dev, NULL);
8500 	__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
8501 
8502 	__netdev_update_lower_level(upper_dev, priv);
8503 	__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
8504 				    priv);
8505 
8506 	return 0;
8507 
8508 rollback:
8509 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
8510 
8511 	return ret;
8512 }
8513 
8514 /**
8515  * netdev_upper_dev_link - Add a link to the upper device
8516  * @dev: device
8517  * @upper_dev: new upper device
8518  * @extack: netlink extended ack
8519  *
8520  * Adds a link to device which is upper to this one. The caller must hold
8521  * the RTNL lock. On a failure a negative errno code is returned.
8522  * On success the reference counts are adjusted and the function
8523  * returns zero.
8524  */
8525 int netdev_upper_dev_link(struct net_device *dev,
8526 			  struct net_device *upper_dev,
8527 			  struct netlink_ext_ack *extack)
8528 {
8529 	struct netdev_nested_priv priv = {
8530 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
8531 		.data = NULL,
8532 	};
8533 
8534 	return __netdev_upper_dev_link(dev, upper_dev, false,
8535 				       NULL, NULL, &priv, extack);
8536 }
8537 EXPORT_SYMBOL(netdev_upper_dev_link);
8538 
8539 /**
8540  * netdev_master_upper_dev_link - Add a master link to the upper device
8541  * @dev: device
8542  * @upper_dev: new upper device
8543  * @upper_priv: upper device private
8544  * @upper_info: upper info to be passed down via notifier
8545  * @extack: netlink extended ack
8546  *
8547  * Adds a link to device which is upper to this one. In this case, only
8548  * one master upper device can be linked, although other non-master devices
8549  * might be linked as well. The caller must hold the RTNL lock.
8550  * On a failure a negative errno code is returned. On success the reference
8551  * counts are adjusted and the function returns zero.
8552  */
8553 int netdev_master_upper_dev_link(struct net_device *dev,
8554 				 struct net_device *upper_dev,
8555 				 void *upper_priv, void *upper_info,
8556 				 struct netlink_ext_ack *extack)
8557 {
8558 	struct netdev_nested_priv priv = {
8559 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
8560 		.data = NULL,
8561 	};
8562 
8563 	return __netdev_upper_dev_link(dev, upper_dev, true,
8564 				       upper_priv, upper_info, &priv, extack);
8565 }
8566 EXPORT_SYMBOL(netdev_master_upper_dev_link);
8567 
8568 static void __netdev_upper_dev_unlink(struct net_device *dev,
8569 				      struct net_device *upper_dev,
8570 				      struct netdev_nested_priv *priv)
8571 {
8572 	struct netdev_notifier_changeupper_info changeupper_info = {
8573 		.info = {
8574 			.dev = dev,
8575 		},
8576 		.upper_dev = upper_dev,
8577 		.linking = false,
8578 	};
8579 
8580 	ASSERT_RTNL();
8581 
8582 	changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
8583 
8584 	call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
8585 				      &changeupper_info.info);
8586 
8587 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
8588 
8589 	call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
8590 				      &changeupper_info.info);
8591 
8592 	__netdev_update_upper_level(dev, NULL);
8593 	__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
8594 
8595 	__netdev_update_lower_level(upper_dev, priv);
8596 	__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
8597 				    priv);
8598 }
8599 
8600 /**
8601  * netdev_upper_dev_unlink - Removes a link to upper device
8602  * @dev: device
8603  * @upper_dev: new upper device
8604  *
8605  * Removes a link to device which is upper to this one. The caller must hold
8606  * the RTNL lock.
8607  */
8608 void netdev_upper_dev_unlink(struct net_device *dev,
8609 			     struct net_device *upper_dev)
8610 {
8611 	struct netdev_nested_priv priv = {
8612 		.flags = NESTED_SYNC_TODO,
8613 		.data = NULL,
8614 	};
8615 
8616 	__netdev_upper_dev_unlink(dev, upper_dev, &priv);
8617 }
8618 EXPORT_SYMBOL(netdev_upper_dev_unlink);
8619 
8620 static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
8621 				      struct net_device *lower_dev,
8622 				      bool val)
8623 {
8624 	struct netdev_adjacent *adj;
8625 
8626 	adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
8627 	if (adj)
8628 		adj->ignore = val;
8629 
8630 	adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
8631 	if (adj)
8632 		adj->ignore = val;
8633 }
8634 
8635 static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
8636 					struct net_device *lower_dev)
8637 {
8638 	__netdev_adjacent_dev_set(upper_dev, lower_dev, true);
8639 }
8640 
8641 static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
8642 				       struct net_device *lower_dev)
8643 {
8644 	__netdev_adjacent_dev_set(upper_dev, lower_dev, false);
8645 }
8646 
8647 int netdev_adjacent_change_prepare(struct net_device *old_dev,
8648 				   struct net_device *new_dev,
8649 				   struct net_device *dev,
8650 				   struct netlink_ext_ack *extack)
8651 {
8652 	struct netdev_nested_priv priv = {
8653 		.flags = 0,
8654 		.data = NULL,
8655 	};
8656 	int err;
8657 
8658 	if (!new_dev)
8659 		return 0;
8660 
8661 	if (old_dev && new_dev != old_dev)
8662 		netdev_adjacent_dev_disable(dev, old_dev);
8663 	err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv,
8664 				      extack);
8665 	if (err) {
8666 		if (old_dev && new_dev != old_dev)
8667 			netdev_adjacent_dev_enable(dev, old_dev);
8668 		return err;
8669 	}
8670 
8671 	return 0;
8672 }
8673 EXPORT_SYMBOL(netdev_adjacent_change_prepare);
8674 
8675 void netdev_adjacent_change_commit(struct net_device *old_dev,
8676 				   struct net_device *new_dev,
8677 				   struct net_device *dev)
8678 {
8679 	struct netdev_nested_priv priv = {
8680 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
8681 		.data = NULL,
8682 	};
8683 
8684 	if (!new_dev || !old_dev)
8685 		return;
8686 
8687 	if (new_dev == old_dev)
8688 		return;
8689 
8690 	netdev_adjacent_dev_enable(dev, old_dev);
8691 	__netdev_upper_dev_unlink(old_dev, dev, &priv);
8692 }
8693 EXPORT_SYMBOL(netdev_adjacent_change_commit);
8694 
8695 void netdev_adjacent_change_abort(struct net_device *old_dev,
8696 				  struct net_device *new_dev,
8697 				  struct net_device *dev)
8698 {
8699 	struct netdev_nested_priv priv = {
8700 		.flags = 0,
8701 		.data = NULL,
8702 	};
8703 
8704 	if (!new_dev)
8705 		return;
8706 
8707 	if (old_dev && new_dev != old_dev)
8708 		netdev_adjacent_dev_enable(dev, old_dev);
8709 
8710 	__netdev_upper_dev_unlink(new_dev, dev, &priv);
8711 }
8712 EXPORT_SYMBOL(netdev_adjacent_change_abort);
8713 
8714 /**
8715  * netdev_bonding_info_change - Dispatch event about slave change
8716  * @dev: device
8717  * @bonding_info: info to dispatch
8718  *
8719  * Send NETDEV_BONDING_INFO to netdev notifiers with info.
8720  * The caller must hold the RTNL lock.
8721  */
8722 void netdev_bonding_info_change(struct net_device *dev,
8723 				struct netdev_bonding_info *bonding_info)
8724 {
8725 	struct netdev_notifier_bonding_info info = {
8726 		.info.dev = dev,
8727 	};
8728 
8729 	memcpy(&info.bonding_info, bonding_info,
8730 	       sizeof(struct netdev_bonding_info));
8731 	call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
8732 				      &info.info);
8733 }
8734 EXPORT_SYMBOL(netdev_bonding_info_change);
8735 
8736 static int netdev_offload_xstats_enable_l3(struct net_device *dev,
8737 					   struct netlink_ext_ack *extack)
8738 {
8739 	struct netdev_notifier_offload_xstats_info info = {
8740 		.info.dev = dev,
8741 		.info.extack = extack,
8742 		.type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8743 	};
8744 	int err;
8745 	int rc;
8746 
8747 	dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3),
8748 					 GFP_KERNEL);
8749 	if (!dev->offload_xstats_l3)
8750 		return -ENOMEM;
8751 
8752 	rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE,
8753 						  NETDEV_OFFLOAD_XSTATS_DISABLE,
8754 						  &info.info);
8755 	err = notifier_to_errno(rc);
8756 	if (err)
8757 		goto free_stats;
8758 
8759 	return 0;
8760 
8761 free_stats:
8762 	kfree(dev->offload_xstats_l3);
8763 	dev->offload_xstats_l3 = NULL;
8764 	return err;
8765 }
8766 
8767 int netdev_offload_xstats_enable(struct net_device *dev,
8768 				 enum netdev_offload_xstats_type type,
8769 				 struct netlink_ext_ack *extack)
8770 {
8771 	ASSERT_RTNL();
8772 
8773 	if (netdev_offload_xstats_enabled(dev, type))
8774 		return -EALREADY;
8775 
8776 	switch (type) {
8777 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8778 		return netdev_offload_xstats_enable_l3(dev, extack);
8779 	}
8780 
8781 	WARN_ON(1);
8782 	return -EINVAL;
8783 }
8784 EXPORT_SYMBOL(netdev_offload_xstats_enable);
8785 
8786 static void netdev_offload_xstats_disable_l3(struct net_device *dev)
8787 {
8788 	struct netdev_notifier_offload_xstats_info info = {
8789 		.info.dev = dev,
8790 		.type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8791 	};
8792 
8793 	call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE,
8794 				      &info.info);
8795 	kfree(dev->offload_xstats_l3);
8796 	dev->offload_xstats_l3 = NULL;
8797 }
8798 
8799 int netdev_offload_xstats_disable(struct net_device *dev,
8800 				  enum netdev_offload_xstats_type type)
8801 {
8802 	ASSERT_RTNL();
8803 
8804 	if (!netdev_offload_xstats_enabled(dev, type))
8805 		return -EALREADY;
8806 
8807 	switch (type) {
8808 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8809 		netdev_offload_xstats_disable_l3(dev);
8810 		return 0;
8811 	}
8812 
8813 	WARN_ON(1);
8814 	return -EINVAL;
8815 }
8816 EXPORT_SYMBOL(netdev_offload_xstats_disable);
8817 
8818 static void netdev_offload_xstats_disable_all(struct net_device *dev)
8819 {
8820 	netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
8821 }
8822 
8823 static struct rtnl_hw_stats64 *
8824 netdev_offload_xstats_get_ptr(const struct net_device *dev,
8825 			      enum netdev_offload_xstats_type type)
8826 {
8827 	switch (type) {
8828 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8829 		return dev->offload_xstats_l3;
8830 	}
8831 
8832 	WARN_ON(1);
8833 	return NULL;
8834 }
8835 
8836 bool netdev_offload_xstats_enabled(const struct net_device *dev,
8837 				   enum netdev_offload_xstats_type type)
8838 {
8839 	ASSERT_RTNL();
8840 
8841 	return netdev_offload_xstats_get_ptr(dev, type);
8842 }
8843 EXPORT_SYMBOL(netdev_offload_xstats_enabled);
8844 
8845 struct netdev_notifier_offload_xstats_ru {
8846 	bool used;
8847 };
8848 
8849 struct netdev_notifier_offload_xstats_rd {
8850 	struct rtnl_hw_stats64 stats;
8851 	bool used;
8852 };
8853 
8854 static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest,
8855 				  const struct rtnl_hw_stats64 *src)
8856 {
8857 	dest->rx_packets	  += src->rx_packets;
8858 	dest->tx_packets	  += src->tx_packets;
8859 	dest->rx_bytes		  += src->rx_bytes;
8860 	dest->tx_bytes		  += src->tx_bytes;
8861 	dest->rx_errors		  += src->rx_errors;
8862 	dest->tx_errors		  += src->tx_errors;
8863 	dest->rx_dropped	  += src->rx_dropped;
8864 	dest->tx_dropped	  += src->tx_dropped;
8865 	dest->multicast		  += src->multicast;
8866 }
8867 
8868 static int netdev_offload_xstats_get_used(struct net_device *dev,
8869 					  enum netdev_offload_xstats_type type,
8870 					  bool *p_used,
8871 					  struct netlink_ext_ack *extack)
8872 {
8873 	struct netdev_notifier_offload_xstats_ru report_used = {};
8874 	struct netdev_notifier_offload_xstats_info info = {
8875 		.info.dev = dev,
8876 		.info.extack = extack,
8877 		.type = type,
8878 		.report_used = &report_used,
8879 	};
8880 	int rc;
8881 
8882 	WARN_ON(!netdev_offload_xstats_enabled(dev, type));
8883 	rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED,
8884 					   &info.info);
8885 	*p_used = report_used.used;
8886 	return notifier_to_errno(rc);
8887 }
8888 
8889 static int netdev_offload_xstats_get_stats(struct net_device *dev,
8890 					   enum netdev_offload_xstats_type type,
8891 					   struct rtnl_hw_stats64 *p_stats,
8892 					   bool *p_used,
8893 					   struct netlink_ext_ack *extack)
8894 {
8895 	struct netdev_notifier_offload_xstats_rd report_delta = {};
8896 	struct netdev_notifier_offload_xstats_info info = {
8897 		.info.dev = dev,
8898 		.info.extack = extack,
8899 		.type = type,
8900 		.report_delta = &report_delta,
8901 	};
8902 	struct rtnl_hw_stats64 *stats;
8903 	int rc;
8904 
8905 	stats = netdev_offload_xstats_get_ptr(dev, type);
8906 	if (WARN_ON(!stats))
8907 		return -EINVAL;
8908 
8909 	rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
8910 					   &info.info);
8911 
8912 	/* Cache whatever we got, even if there was an error, otherwise the
8913 	 * successful stats retrievals would get lost.
8914 	 */
8915 	netdev_hw_stats64_add(stats, &report_delta.stats);
8916 
8917 	if (p_stats)
8918 		*p_stats = *stats;
8919 	*p_used = report_delta.used;
8920 
8921 	return notifier_to_errno(rc);
8922 }
8923 
8924 int netdev_offload_xstats_get(struct net_device *dev,
8925 			      enum netdev_offload_xstats_type type,
8926 			      struct rtnl_hw_stats64 *p_stats, bool *p_used,
8927 			      struct netlink_ext_ack *extack)
8928 {
8929 	ASSERT_RTNL();
8930 
8931 	if (p_stats)
8932 		return netdev_offload_xstats_get_stats(dev, type, p_stats,
8933 						       p_used, extack);
8934 	else
8935 		return netdev_offload_xstats_get_used(dev, type, p_used,
8936 						      extack);
8937 }
8938 EXPORT_SYMBOL(netdev_offload_xstats_get);
8939 
8940 void
8941 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta,
8942 				   const struct rtnl_hw_stats64 *stats)
8943 {
8944 	report_delta->used = true;
8945 	netdev_hw_stats64_add(&report_delta->stats, stats);
8946 }
8947 EXPORT_SYMBOL(netdev_offload_xstats_report_delta);
8948 
8949 void
8950 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used)
8951 {
8952 	report_used->used = true;
8953 }
8954 EXPORT_SYMBOL(netdev_offload_xstats_report_used);
8955 
8956 void netdev_offload_xstats_push_delta(struct net_device *dev,
8957 				      enum netdev_offload_xstats_type type,
8958 				      const struct rtnl_hw_stats64 *p_stats)
8959 {
8960 	struct rtnl_hw_stats64 *stats;
8961 
8962 	ASSERT_RTNL();
8963 
8964 	stats = netdev_offload_xstats_get_ptr(dev, type);
8965 	if (WARN_ON(!stats))
8966 		return;
8967 
8968 	netdev_hw_stats64_add(stats, p_stats);
8969 }
8970 EXPORT_SYMBOL(netdev_offload_xstats_push_delta);
8971 
8972 /**
8973  * netdev_get_xmit_slave - Get the xmit slave of master device
8974  * @dev: device
8975  * @skb: The packet
8976  * @all_slaves: assume all the slaves are active
8977  *
8978  * The reference counters are not incremented so the caller must be
8979  * careful with locks. The caller must hold RCU lock.
8980  * %NULL is returned if no slave is found.
8981  */
8982 
8983 struct net_device *netdev_get_xmit_slave(struct net_device *dev,
8984 					 struct sk_buff *skb,
8985 					 bool all_slaves)
8986 {
8987 	const struct net_device_ops *ops = dev->netdev_ops;
8988 
8989 	if (!ops->ndo_get_xmit_slave)
8990 		return NULL;
8991 	return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
8992 }
8993 EXPORT_SYMBOL(netdev_get_xmit_slave);
8994 
8995 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
8996 						  struct sock *sk)
8997 {
8998 	const struct net_device_ops *ops = dev->netdev_ops;
8999 
9000 	if (!ops->ndo_sk_get_lower_dev)
9001 		return NULL;
9002 	return ops->ndo_sk_get_lower_dev(dev, sk);
9003 }
9004 
9005 /**
9006  * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
9007  * @dev: device
9008  * @sk: the socket
9009  *
9010  * %NULL is returned if no lower device is found.
9011  */
9012 
9013 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
9014 					    struct sock *sk)
9015 {
9016 	struct net_device *lower;
9017 
9018 	lower = netdev_sk_get_lower_dev(dev, sk);
9019 	while (lower) {
9020 		dev = lower;
9021 		lower = netdev_sk_get_lower_dev(dev, sk);
9022 	}
9023 
9024 	return dev;
9025 }
9026 EXPORT_SYMBOL(netdev_sk_get_lowest_dev);
9027 
9028 static void netdev_adjacent_add_links(struct net_device *dev)
9029 {
9030 	struct netdev_adjacent *iter;
9031 
9032 	struct net *net = dev_net(dev);
9033 
9034 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
9035 		if (!net_eq(net, dev_net(iter->dev)))
9036 			continue;
9037 		netdev_adjacent_sysfs_add(iter->dev, dev,
9038 					  &iter->dev->adj_list.lower);
9039 		netdev_adjacent_sysfs_add(dev, iter->dev,
9040 					  &dev->adj_list.upper);
9041 	}
9042 
9043 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
9044 		if (!net_eq(net, dev_net(iter->dev)))
9045 			continue;
9046 		netdev_adjacent_sysfs_add(iter->dev, dev,
9047 					  &iter->dev->adj_list.upper);
9048 		netdev_adjacent_sysfs_add(dev, iter->dev,
9049 					  &dev->adj_list.lower);
9050 	}
9051 }
9052 
9053 static void netdev_adjacent_del_links(struct net_device *dev)
9054 {
9055 	struct netdev_adjacent *iter;
9056 
9057 	struct net *net = dev_net(dev);
9058 
9059 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
9060 		if (!net_eq(net, dev_net(iter->dev)))
9061 			continue;
9062 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
9063 					  &iter->dev->adj_list.lower);
9064 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
9065 					  &dev->adj_list.upper);
9066 	}
9067 
9068 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
9069 		if (!net_eq(net, dev_net(iter->dev)))
9070 			continue;
9071 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
9072 					  &iter->dev->adj_list.upper);
9073 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
9074 					  &dev->adj_list.lower);
9075 	}
9076 }
9077 
9078 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
9079 {
9080 	struct netdev_adjacent *iter;
9081 
9082 	struct net *net = dev_net(dev);
9083 
9084 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
9085 		if (!net_eq(net, dev_net(iter->dev)))
9086 			continue;
9087 		netdev_adjacent_sysfs_del(iter->dev, oldname,
9088 					  &iter->dev->adj_list.lower);
9089 		netdev_adjacent_sysfs_add(iter->dev, dev,
9090 					  &iter->dev->adj_list.lower);
9091 	}
9092 
9093 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
9094 		if (!net_eq(net, dev_net(iter->dev)))
9095 			continue;
9096 		netdev_adjacent_sysfs_del(iter->dev, oldname,
9097 					  &iter->dev->adj_list.upper);
9098 		netdev_adjacent_sysfs_add(iter->dev, dev,
9099 					  &iter->dev->adj_list.upper);
9100 	}
9101 }
9102 
9103 void *netdev_lower_dev_get_private(struct net_device *dev,
9104 				   struct net_device *lower_dev)
9105 {
9106 	struct netdev_adjacent *lower;
9107 
9108 	if (!lower_dev)
9109 		return NULL;
9110 	lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
9111 	if (!lower)
9112 		return NULL;
9113 
9114 	return lower->private;
9115 }
9116 EXPORT_SYMBOL(netdev_lower_dev_get_private);
9117 
9118 
9119 /**
9120  * netdev_lower_state_changed - Dispatch event about lower device state change
9121  * @lower_dev: device
9122  * @lower_state_info: state to dispatch
9123  *
9124  * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
9125  * The caller must hold the RTNL lock.
9126  */
9127 void netdev_lower_state_changed(struct net_device *lower_dev,
9128 				void *lower_state_info)
9129 {
9130 	struct netdev_notifier_changelowerstate_info changelowerstate_info = {
9131 		.info.dev = lower_dev,
9132 	};
9133 
9134 	ASSERT_RTNL();
9135 	changelowerstate_info.lower_state_info = lower_state_info;
9136 	call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
9137 				      &changelowerstate_info.info);
9138 }
9139 EXPORT_SYMBOL(netdev_lower_state_changed);
9140 
9141 static void dev_change_rx_flags(struct net_device *dev, int flags)
9142 {
9143 	const struct net_device_ops *ops = dev->netdev_ops;
9144 
9145 	if (ops->ndo_change_rx_flags)
9146 		ops->ndo_change_rx_flags(dev, flags);
9147 }
9148 
9149 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
9150 {
9151 	unsigned int old_flags = dev->flags;
9152 	unsigned int promiscuity, flags;
9153 	kuid_t uid;
9154 	kgid_t gid;
9155 
9156 	ASSERT_RTNL();
9157 
9158 	promiscuity = dev->promiscuity + inc;
9159 	if (promiscuity == 0) {
9160 		/*
9161 		 * Avoid overflow.
9162 		 * If inc causes overflow, untouch promisc and return error.
9163 		 */
9164 		if (unlikely(inc > 0)) {
9165 			netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
9166 			return -EOVERFLOW;
9167 		}
9168 		flags = old_flags & ~IFF_PROMISC;
9169 	} else {
9170 		flags = old_flags | IFF_PROMISC;
9171 	}
9172 	WRITE_ONCE(dev->promiscuity, promiscuity);
9173 	if (flags != old_flags) {
9174 		WRITE_ONCE(dev->flags, flags);
9175 		netdev_info(dev, "%s promiscuous mode\n",
9176 			    dev->flags & IFF_PROMISC ? "entered" : "left");
9177 		if (audit_enabled) {
9178 			current_uid_gid(&uid, &gid);
9179 			audit_log(audit_context(), GFP_ATOMIC,
9180 				  AUDIT_ANOM_PROMISCUOUS,
9181 				  "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
9182 				  dev->name, (dev->flags & IFF_PROMISC),
9183 				  (old_flags & IFF_PROMISC),
9184 				  from_kuid(&init_user_ns, audit_get_loginuid(current)),
9185 				  from_kuid(&init_user_ns, uid),
9186 				  from_kgid(&init_user_ns, gid),
9187 				  audit_get_sessionid(current));
9188 		}
9189 
9190 		dev_change_rx_flags(dev, IFF_PROMISC);
9191 	}
9192 	if (notify)
9193 		__dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL);
9194 	return 0;
9195 }
9196 
9197 /**
9198  *	dev_set_promiscuity	- update promiscuity count on a device
9199  *	@dev: device
9200  *	@inc: modifier
9201  *
9202  *	Add or remove promiscuity from a device. While the count in the device
9203  *	remains above zero the interface remains promiscuous. Once it hits zero
9204  *	the device reverts back to normal filtering operation. A negative inc
9205  *	value is used to drop promiscuity on the device.
9206  *	Return 0 if successful or a negative errno code on error.
9207  */
9208 int dev_set_promiscuity(struct net_device *dev, int inc)
9209 {
9210 	unsigned int old_flags = dev->flags;
9211 	int err;
9212 
9213 	err = __dev_set_promiscuity(dev, inc, true);
9214 	if (err < 0)
9215 		return err;
9216 	if (dev->flags != old_flags)
9217 		dev_set_rx_mode(dev);
9218 	return err;
9219 }
9220 EXPORT_SYMBOL(dev_set_promiscuity);
9221 
9222 int netif_set_allmulti(struct net_device *dev, int inc, bool notify)
9223 {
9224 	unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
9225 	unsigned int allmulti, flags;
9226 
9227 	ASSERT_RTNL();
9228 
9229 	allmulti = dev->allmulti + inc;
9230 	if (allmulti == 0) {
9231 		/*
9232 		 * Avoid overflow.
9233 		 * If inc causes overflow, untouch allmulti and return error.
9234 		 */
9235 		if (unlikely(inc > 0)) {
9236 			netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
9237 			return -EOVERFLOW;
9238 		}
9239 		flags = old_flags & ~IFF_ALLMULTI;
9240 	} else {
9241 		flags = old_flags | IFF_ALLMULTI;
9242 	}
9243 	WRITE_ONCE(dev->allmulti, allmulti);
9244 	if (flags != old_flags) {
9245 		WRITE_ONCE(dev->flags, flags);
9246 		netdev_info(dev, "%s allmulticast mode\n",
9247 			    dev->flags & IFF_ALLMULTI ? "entered" : "left");
9248 		dev_change_rx_flags(dev, IFF_ALLMULTI);
9249 		dev_set_rx_mode(dev);
9250 		if (notify)
9251 			__dev_notify_flags(dev, old_flags,
9252 					   dev->gflags ^ old_gflags, 0, NULL);
9253 	}
9254 	return 0;
9255 }
9256 
9257 /*
9258  *	Upload unicast and multicast address lists to device and
9259  *	configure RX filtering. When the device doesn't support unicast
9260  *	filtering it is put in promiscuous mode while unicast addresses
9261  *	are present.
9262  */
9263 void __dev_set_rx_mode(struct net_device *dev)
9264 {
9265 	const struct net_device_ops *ops = dev->netdev_ops;
9266 
9267 	/* dev_open will call this function so the list will stay sane. */
9268 	if (!(dev->flags&IFF_UP))
9269 		return;
9270 
9271 	if (!netif_device_present(dev))
9272 		return;
9273 
9274 	if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
9275 		/* Unicast addresses changes may only happen under the rtnl,
9276 		 * therefore calling __dev_set_promiscuity here is safe.
9277 		 */
9278 		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
9279 			__dev_set_promiscuity(dev, 1, false);
9280 			dev->uc_promisc = true;
9281 		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
9282 			__dev_set_promiscuity(dev, -1, false);
9283 			dev->uc_promisc = false;
9284 		}
9285 	}
9286 
9287 	if (ops->ndo_set_rx_mode)
9288 		ops->ndo_set_rx_mode(dev);
9289 }
9290 
9291 void dev_set_rx_mode(struct net_device *dev)
9292 {
9293 	netif_addr_lock_bh(dev);
9294 	__dev_set_rx_mode(dev);
9295 	netif_addr_unlock_bh(dev);
9296 }
9297 
9298 /**
9299  *	dev_get_flags - get flags reported to userspace
9300  *	@dev: device
9301  *
9302  *	Get the combination of flag bits exported through APIs to userspace.
9303  */
9304 unsigned int dev_get_flags(const struct net_device *dev)
9305 {
9306 	unsigned int flags;
9307 
9308 	flags = (READ_ONCE(dev->flags) & ~(IFF_PROMISC |
9309 				IFF_ALLMULTI |
9310 				IFF_RUNNING |
9311 				IFF_LOWER_UP |
9312 				IFF_DORMANT)) |
9313 		(READ_ONCE(dev->gflags) & (IFF_PROMISC |
9314 				IFF_ALLMULTI));
9315 
9316 	if (netif_running(dev)) {
9317 		if (netif_oper_up(dev))
9318 			flags |= IFF_RUNNING;
9319 		if (netif_carrier_ok(dev))
9320 			flags |= IFF_LOWER_UP;
9321 		if (netif_dormant(dev))
9322 			flags |= IFF_DORMANT;
9323 	}
9324 
9325 	return flags;
9326 }
9327 EXPORT_SYMBOL(dev_get_flags);
9328 
9329 int __dev_change_flags(struct net_device *dev, unsigned int flags,
9330 		       struct netlink_ext_ack *extack)
9331 {
9332 	unsigned int old_flags = dev->flags;
9333 	int ret;
9334 
9335 	ASSERT_RTNL();
9336 
9337 	/*
9338 	 *	Set the flags on our device.
9339 	 */
9340 
9341 	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
9342 			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
9343 			       IFF_AUTOMEDIA)) |
9344 		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
9345 				    IFF_ALLMULTI));
9346 
9347 	/*
9348 	 *	Load in the correct multicast list now the flags have changed.
9349 	 */
9350 
9351 	if ((old_flags ^ flags) & IFF_MULTICAST)
9352 		dev_change_rx_flags(dev, IFF_MULTICAST);
9353 
9354 	dev_set_rx_mode(dev);
9355 
9356 	/*
9357 	 *	Have we downed the interface. We handle IFF_UP ourselves
9358 	 *	according to user attempts to set it, rather than blindly
9359 	 *	setting it.
9360 	 */
9361 
9362 	ret = 0;
9363 	if ((old_flags ^ flags) & IFF_UP) {
9364 		if (old_flags & IFF_UP)
9365 			__dev_close(dev);
9366 		else
9367 			ret = __dev_open(dev, extack);
9368 	}
9369 
9370 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
9371 		int inc = (flags & IFF_PROMISC) ? 1 : -1;
9372 		old_flags = dev->flags;
9373 
9374 		dev->gflags ^= IFF_PROMISC;
9375 
9376 		if (__dev_set_promiscuity(dev, inc, false) >= 0)
9377 			if (dev->flags != old_flags)
9378 				dev_set_rx_mode(dev);
9379 	}
9380 
9381 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
9382 	 * is important. Some (broken) drivers set IFF_PROMISC, when
9383 	 * IFF_ALLMULTI is requested not asking us and not reporting.
9384 	 */
9385 	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
9386 		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
9387 
9388 		dev->gflags ^= IFF_ALLMULTI;
9389 		netif_set_allmulti(dev, inc, false);
9390 	}
9391 
9392 	return ret;
9393 }
9394 
9395 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
9396 			unsigned int gchanges, u32 portid,
9397 			const struct nlmsghdr *nlh)
9398 {
9399 	unsigned int changes = dev->flags ^ old_flags;
9400 
9401 	if (gchanges)
9402 		rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC, portid, nlh);
9403 
9404 	if (changes & IFF_UP) {
9405 		if (dev->flags & IFF_UP)
9406 			call_netdevice_notifiers(NETDEV_UP, dev);
9407 		else
9408 			call_netdevice_notifiers(NETDEV_DOWN, dev);
9409 	}
9410 
9411 	if (dev->flags & IFF_UP &&
9412 	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
9413 		struct netdev_notifier_change_info change_info = {
9414 			.info = {
9415 				.dev = dev,
9416 			},
9417 			.flags_changed = changes,
9418 		};
9419 
9420 		call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
9421 	}
9422 }
9423 
9424 int netif_change_flags(struct net_device *dev, unsigned int flags,
9425 		       struct netlink_ext_ack *extack)
9426 {
9427 	int ret;
9428 	unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
9429 
9430 	ret = __dev_change_flags(dev, flags, extack);
9431 	if (ret < 0)
9432 		return ret;
9433 
9434 	changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
9435 	__dev_notify_flags(dev, old_flags, changes, 0, NULL);
9436 	return ret;
9437 }
9438 
9439 int __dev_set_mtu(struct net_device *dev, int new_mtu)
9440 {
9441 	const struct net_device_ops *ops = dev->netdev_ops;
9442 
9443 	if (ops->ndo_change_mtu)
9444 		return ops->ndo_change_mtu(dev, new_mtu);
9445 
9446 	/* Pairs with all the lockless reads of dev->mtu in the stack */
9447 	WRITE_ONCE(dev->mtu, new_mtu);
9448 	return 0;
9449 }
9450 EXPORT_SYMBOL(__dev_set_mtu);
9451 
9452 int dev_validate_mtu(struct net_device *dev, int new_mtu,
9453 		     struct netlink_ext_ack *extack)
9454 {
9455 	/* MTU must be positive, and in range */
9456 	if (new_mtu < 0 || new_mtu < dev->min_mtu) {
9457 		NL_SET_ERR_MSG(extack, "mtu less than device minimum");
9458 		return -EINVAL;
9459 	}
9460 
9461 	if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
9462 		NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
9463 		return -EINVAL;
9464 	}
9465 	return 0;
9466 }
9467 
9468 /**
9469  *	netif_set_mtu_ext - Change maximum transfer unit
9470  *	@dev: device
9471  *	@new_mtu: new transfer unit
9472  *	@extack: netlink extended ack
9473  *
9474  *	Change the maximum transfer size of the network device.
9475  */
9476 int netif_set_mtu_ext(struct net_device *dev, int new_mtu,
9477 		      struct netlink_ext_ack *extack)
9478 {
9479 	int err, orig_mtu;
9480 
9481 	if (new_mtu == dev->mtu)
9482 		return 0;
9483 
9484 	err = dev_validate_mtu(dev, new_mtu, extack);
9485 	if (err)
9486 		return err;
9487 
9488 	if (!netif_device_present(dev))
9489 		return -ENODEV;
9490 
9491 	err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
9492 	err = notifier_to_errno(err);
9493 	if (err)
9494 		return err;
9495 
9496 	orig_mtu = dev->mtu;
9497 	err = __dev_set_mtu(dev, new_mtu);
9498 
9499 	if (!err) {
9500 		err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
9501 						   orig_mtu);
9502 		err = notifier_to_errno(err);
9503 		if (err) {
9504 			/* setting mtu back and notifying everyone again,
9505 			 * so that they have a chance to revert changes.
9506 			 */
9507 			__dev_set_mtu(dev, orig_mtu);
9508 			call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
9509 						     new_mtu);
9510 		}
9511 	}
9512 	return err;
9513 }
9514 
9515 int netif_set_mtu(struct net_device *dev, int new_mtu)
9516 {
9517 	struct netlink_ext_ack extack;
9518 	int err;
9519 
9520 	memset(&extack, 0, sizeof(extack));
9521 	err = netif_set_mtu_ext(dev, new_mtu, &extack);
9522 	if (err && extack._msg)
9523 		net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
9524 	return err;
9525 }
9526 EXPORT_SYMBOL(netif_set_mtu);
9527 
9528 int netif_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
9529 {
9530 	unsigned int orig_len = dev->tx_queue_len;
9531 	int res;
9532 
9533 	if (new_len != (unsigned int)new_len)
9534 		return -ERANGE;
9535 
9536 	if (new_len != orig_len) {
9537 		WRITE_ONCE(dev->tx_queue_len, new_len);
9538 		res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
9539 		res = notifier_to_errno(res);
9540 		if (res)
9541 			goto err_rollback;
9542 		res = dev_qdisc_change_tx_queue_len(dev);
9543 		if (res)
9544 			goto err_rollback;
9545 	}
9546 
9547 	return 0;
9548 
9549 err_rollback:
9550 	netdev_err(dev, "refused to change device tx_queue_len\n");
9551 	WRITE_ONCE(dev->tx_queue_len, orig_len);
9552 	return res;
9553 }
9554 
9555 void netif_set_group(struct net_device *dev, int new_group)
9556 {
9557 	dev->group = new_group;
9558 }
9559 
9560 /**
9561  *	dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
9562  *	@dev: device
9563  *	@addr: new address
9564  *	@extack: netlink extended ack
9565  */
9566 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
9567 			      struct netlink_ext_ack *extack)
9568 {
9569 	struct netdev_notifier_pre_changeaddr_info info = {
9570 		.info.dev = dev,
9571 		.info.extack = extack,
9572 		.dev_addr = addr,
9573 	};
9574 	int rc;
9575 
9576 	rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
9577 	return notifier_to_errno(rc);
9578 }
9579 EXPORT_SYMBOL(dev_pre_changeaddr_notify);
9580 
9581 int netif_set_mac_address(struct net_device *dev, struct sockaddr *sa,
9582 			  struct netlink_ext_ack *extack)
9583 {
9584 	const struct net_device_ops *ops = dev->netdev_ops;
9585 	int err;
9586 
9587 	if (!ops->ndo_set_mac_address)
9588 		return -EOPNOTSUPP;
9589 	if (sa->sa_family != dev->type)
9590 		return -EINVAL;
9591 	if (!netif_device_present(dev))
9592 		return -ENODEV;
9593 	err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
9594 	if (err)
9595 		return err;
9596 	if (memcmp(dev->dev_addr, sa->sa_data, dev->addr_len)) {
9597 		err = ops->ndo_set_mac_address(dev, sa);
9598 		if (err)
9599 			return err;
9600 	}
9601 	dev->addr_assign_type = NET_ADDR_SET;
9602 	call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
9603 	add_device_randomness(dev->dev_addr, dev->addr_len);
9604 	return 0;
9605 }
9606 
9607 DECLARE_RWSEM(dev_addr_sem);
9608 
9609 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
9610 {
9611 	size_t size = sizeof(sa->sa_data_min);
9612 	struct net_device *dev;
9613 	int ret = 0;
9614 
9615 	down_read(&dev_addr_sem);
9616 	rcu_read_lock();
9617 
9618 	dev = dev_get_by_name_rcu(net, dev_name);
9619 	if (!dev) {
9620 		ret = -ENODEV;
9621 		goto unlock;
9622 	}
9623 	if (!dev->addr_len)
9624 		memset(sa->sa_data, 0, size);
9625 	else
9626 		memcpy(sa->sa_data, dev->dev_addr,
9627 		       min_t(size_t, size, dev->addr_len));
9628 	sa->sa_family = dev->type;
9629 
9630 unlock:
9631 	rcu_read_unlock();
9632 	up_read(&dev_addr_sem);
9633 	return ret;
9634 }
9635 EXPORT_SYMBOL(dev_get_mac_address);
9636 
9637 int netif_change_carrier(struct net_device *dev, bool new_carrier)
9638 {
9639 	const struct net_device_ops *ops = dev->netdev_ops;
9640 
9641 	if (!ops->ndo_change_carrier)
9642 		return -EOPNOTSUPP;
9643 	if (!netif_device_present(dev))
9644 		return -ENODEV;
9645 	return ops->ndo_change_carrier(dev, new_carrier);
9646 }
9647 
9648 /**
9649  *	dev_get_phys_port_id - Get device physical port ID
9650  *	@dev: device
9651  *	@ppid: port ID
9652  *
9653  *	Get device physical port ID
9654  */
9655 int dev_get_phys_port_id(struct net_device *dev,
9656 			 struct netdev_phys_item_id *ppid)
9657 {
9658 	const struct net_device_ops *ops = dev->netdev_ops;
9659 
9660 	if (!ops->ndo_get_phys_port_id)
9661 		return -EOPNOTSUPP;
9662 	return ops->ndo_get_phys_port_id(dev, ppid);
9663 }
9664 
9665 /**
9666  *	dev_get_phys_port_name - Get device physical port name
9667  *	@dev: device
9668  *	@name: port name
9669  *	@len: limit of bytes to copy to name
9670  *
9671  *	Get device physical port name
9672  */
9673 int dev_get_phys_port_name(struct net_device *dev,
9674 			   char *name, size_t len)
9675 {
9676 	const struct net_device_ops *ops = dev->netdev_ops;
9677 	int err;
9678 
9679 	if (ops->ndo_get_phys_port_name) {
9680 		err = ops->ndo_get_phys_port_name(dev, name, len);
9681 		if (err != -EOPNOTSUPP)
9682 			return err;
9683 	}
9684 	return devlink_compat_phys_port_name_get(dev, name, len);
9685 }
9686 
9687 /**
9688  *	dev_get_port_parent_id - Get the device's port parent identifier
9689  *	@dev: network device
9690  *	@ppid: pointer to a storage for the port's parent identifier
9691  *	@recurse: allow/disallow recursion to lower devices
9692  *
9693  *	Get the devices's port parent identifier
9694  */
9695 int dev_get_port_parent_id(struct net_device *dev,
9696 			   struct netdev_phys_item_id *ppid,
9697 			   bool recurse)
9698 {
9699 	const struct net_device_ops *ops = dev->netdev_ops;
9700 	struct netdev_phys_item_id first = { };
9701 	struct net_device *lower_dev;
9702 	struct list_head *iter;
9703 	int err;
9704 
9705 	if (ops->ndo_get_port_parent_id) {
9706 		err = ops->ndo_get_port_parent_id(dev, ppid);
9707 		if (err != -EOPNOTSUPP)
9708 			return err;
9709 	}
9710 
9711 	err = devlink_compat_switch_id_get(dev, ppid);
9712 	if (!recurse || err != -EOPNOTSUPP)
9713 		return err;
9714 
9715 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
9716 		err = dev_get_port_parent_id(lower_dev, ppid, true);
9717 		if (err)
9718 			break;
9719 		if (!first.id_len)
9720 			first = *ppid;
9721 		else if (memcmp(&first, ppid, sizeof(*ppid)))
9722 			return -EOPNOTSUPP;
9723 	}
9724 
9725 	return err;
9726 }
9727 EXPORT_SYMBOL(dev_get_port_parent_id);
9728 
9729 /**
9730  *	netdev_port_same_parent_id - Indicate if two network devices have
9731  *	the same port parent identifier
9732  *	@a: first network device
9733  *	@b: second network device
9734  */
9735 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
9736 {
9737 	struct netdev_phys_item_id a_id = { };
9738 	struct netdev_phys_item_id b_id = { };
9739 
9740 	if (dev_get_port_parent_id(a, &a_id, true) ||
9741 	    dev_get_port_parent_id(b, &b_id, true))
9742 		return false;
9743 
9744 	return netdev_phys_item_id_same(&a_id, &b_id);
9745 }
9746 EXPORT_SYMBOL(netdev_port_same_parent_id);
9747 
9748 int netif_change_proto_down(struct net_device *dev, bool proto_down)
9749 {
9750 	if (!dev->change_proto_down)
9751 		return -EOPNOTSUPP;
9752 	if (!netif_device_present(dev))
9753 		return -ENODEV;
9754 	if (proto_down)
9755 		netif_carrier_off(dev);
9756 	else
9757 		netif_carrier_on(dev);
9758 	WRITE_ONCE(dev->proto_down, proto_down);
9759 	return 0;
9760 }
9761 
9762 /**
9763  *	netdev_change_proto_down_reason_locked - proto down reason
9764  *
9765  *	@dev: device
9766  *	@mask: proto down mask
9767  *	@value: proto down value
9768  */
9769 void netdev_change_proto_down_reason_locked(struct net_device *dev,
9770 					    unsigned long mask, u32 value)
9771 {
9772 	u32 proto_down_reason;
9773 	int b;
9774 
9775 	if (!mask) {
9776 		proto_down_reason = value;
9777 	} else {
9778 		proto_down_reason = dev->proto_down_reason;
9779 		for_each_set_bit(b, &mask, 32) {
9780 			if (value & (1 << b))
9781 				proto_down_reason |= BIT(b);
9782 			else
9783 				proto_down_reason &= ~BIT(b);
9784 		}
9785 	}
9786 	WRITE_ONCE(dev->proto_down_reason, proto_down_reason);
9787 }
9788 
9789 struct bpf_xdp_link {
9790 	struct bpf_link link;
9791 	struct net_device *dev; /* protected by rtnl_lock, no refcnt held */
9792 	int flags;
9793 };
9794 
9795 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
9796 {
9797 	if (flags & XDP_FLAGS_HW_MODE)
9798 		return XDP_MODE_HW;
9799 	if (flags & XDP_FLAGS_DRV_MODE)
9800 		return XDP_MODE_DRV;
9801 	if (flags & XDP_FLAGS_SKB_MODE)
9802 		return XDP_MODE_SKB;
9803 	return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
9804 }
9805 
9806 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
9807 {
9808 	switch (mode) {
9809 	case XDP_MODE_SKB:
9810 		return generic_xdp_install;
9811 	case XDP_MODE_DRV:
9812 	case XDP_MODE_HW:
9813 		return dev->netdev_ops->ndo_bpf;
9814 	default:
9815 		return NULL;
9816 	}
9817 }
9818 
9819 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
9820 					 enum bpf_xdp_mode mode)
9821 {
9822 	return dev->xdp_state[mode].link;
9823 }
9824 
9825 static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
9826 				     enum bpf_xdp_mode mode)
9827 {
9828 	struct bpf_xdp_link *link = dev_xdp_link(dev, mode);
9829 
9830 	if (link)
9831 		return link->link.prog;
9832 	return dev->xdp_state[mode].prog;
9833 }
9834 
9835 u8 dev_xdp_prog_count(struct net_device *dev)
9836 {
9837 	u8 count = 0;
9838 	int i;
9839 
9840 	for (i = 0; i < __MAX_XDP_MODE; i++)
9841 		if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
9842 			count++;
9843 	return count;
9844 }
9845 EXPORT_SYMBOL_GPL(dev_xdp_prog_count);
9846 
9847 u8 dev_xdp_sb_prog_count(struct net_device *dev)
9848 {
9849 	u8 count = 0;
9850 	int i;
9851 
9852 	for (i = 0; i < __MAX_XDP_MODE; i++)
9853 		if (dev->xdp_state[i].prog &&
9854 		    !dev->xdp_state[i].prog->aux->xdp_has_frags)
9855 			count++;
9856 	return count;
9857 }
9858 
9859 int netif_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf)
9860 {
9861 	if (!dev->netdev_ops->ndo_bpf)
9862 		return -EOPNOTSUPP;
9863 
9864 	if (dev->cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
9865 	    bpf->command == XDP_SETUP_PROG &&
9866 	    bpf->prog && !bpf->prog->aux->xdp_has_frags) {
9867 		NL_SET_ERR_MSG(bpf->extack,
9868 			       "unable to propagate XDP to device using tcp-data-split");
9869 		return -EBUSY;
9870 	}
9871 
9872 	if (dev_get_min_mp_channel_count(dev)) {
9873 		NL_SET_ERR_MSG(bpf->extack, "unable to propagate XDP to device using memory provider");
9874 		return -EBUSY;
9875 	}
9876 
9877 	return dev->netdev_ops->ndo_bpf(dev, bpf);
9878 }
9879 
9880 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
9881 {
9882 	struct bpf_prog *prog = dev_xdp_prog(dev, mode);
9883 
9884 	return prog ? prog->aux->id : 0;
9885 }
9886 
9887 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
9888 			     struct bpf_xdp_link *link)
9889 {
9890 	dev->xdp_state[mode].link = link;
9891 	dev->xdp_state[mode].prog = NULL;
9892 }
9893 
9894 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
9895 			     struct bpf_prog *prog)
9896 {
9897 	dev->xdp_state[mode].link = NULL;
9898 	dev->xdp_state[mode].prog = prog;
9899 }
9900 
9901 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
9902 			   bpf_op_t bpf_op, struct netlink_ext_ack *extack,
9903 			   u32 flags, struct bpf_prog *prog)
9904 {
9905 	struct netdev_bpf xdp;
9906 	int err;
9907 
9908 	netdev_ops_assert_locked(dev);
9909 
9910 	if (dev->cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
9911 	    prog && !prog->aux->xdp_has_frags) {
9912 		NL_SET_ERR_MSG(extack, "unable to install XDP to device using tcp-data-split");
9913 		return -EBUSY;
9914 	}
9915 
9916 	if (dev_get_min_mp_channel_count(dev)) {
9917 		NL_SET_ERR_MSG(extack, "unable to install XDP to device using memory provider");
9918 		return -EBUSY;
9919 	}
9920 
9921 	memset(&xdp, 0, sizeof(xdp));
9922 	xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
9923 	xdp.extack = extack;
9924 	xdp.flags = flags;
9925 	xdp.prog = prog;
9926 
9927 	/* Drivers assume refcnt is already incremented (i.e, prog pointer is
9928 	 * "moved" into driver), so they don't increment it on their own, but
9929 	 * they do decrement refcnt when program is detached or replaced.
9930 	 * Given net_device also owns link/prog, we need to bump refcnt here
9931 	 * to prevent drivers from underflowing it.
9932 	 */
9933 	if (prog)
9934 		bpf_prog_inc(prog);
9935 	err = bpf_op(dev, &xdp);
9936 	if (err) {
9937 		if (prog)
9938 			bpf_prog_put(prog);
9939 		return err;
9940 	}
9941 
9942 	if (mode != XDP_MODE_HW)
9943 		bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog);
9944 
9945 	return 0;
9946 }
9947 
9948 static void dev_xdp_uninstall(struct net_device *dev)
9949 {
9950 	struct bpf_xdp_link *link;
9951 	struct bpf_prog *prog;
9952 	enum bpf_xdp_mode mode;
9953 	bpf_op_t bpf_op;
9954 
9955 	ASSERT_RTNL();
9956 
9957 	for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) {
9958 		prog = dev_xdp_prog(dev, mode);
9959 		if (!prog)
9960 			continue;
9961 
9962 		bpf_op = dev_xdp_bpf_op(dev, mode);
9963 		if (!bpf_op)
9964 			continue;
9965 
9966 		WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9967 
9968 		/* auto-detach link from net device */
9969 		link = dev_xdp_link(dev, mode);
9970 		if (link)
9971 			link->dev = NULL;
9972 		else
9973 			bpf_prog_put(prog);
9974 
9975 		dev_xdp_set_link(dev, mode, NULL);
9976 	}
9977 }
9978 
9979 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
9980 			  struct bpf_xdp_link *link, struct bpf_prog *new_prog,
9981 			  struct bpf_prog *old_prog, u32 flags)
9982 {
9983 	unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
9984 	struct bpf_prog *cur_prog;
9985 	struct net_device *upper;
9986 	struct list_head *iter;
9987 	enum bpf_xdp_mode mode;
9988 	bpf_op_t bpf_op;
9989 	int err;
9990 
9991 	ASSERT_RTNL();
9992 
9993 	/* either link or prog attachment, never both */
9994 	if (link && (new_prog || old_prog))
9995 		return -EINVAL;
9996 	/* link supports only XDP mode flags */
9997 	if (link && (flags & ~XDP_FLAGS_MODES)) {
9998 		NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
9999 		return -EINVAL;
10000 	}
10001 	/* just one XDP mode bit should be set, zero defaults to drv/skb mode */
10002 	if (num_modes > 1) {
10003 		NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
10004 		return -EINVAL;
10005 	}
10006 	/* avoid ambiguity if offload + drv/skb mode progs are both loaded */
10007 	if (!num_modes && dev_xdp_prog_count(dev) > 1) {
10008 		NL_SET_ERR_MSG(extack,
10009 			       "More than one program loaded, unset mode is ambiguous");
10010 		return -EINVAL;
10011 	}
10012 	/* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
10013 	if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
10014 		NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
10015 		return -EINVAL;
10016 	}
10017 
10018 	mode = dev_xdp_mode(dev, flags);
10019 	/* can't replace attached link */
10020 	if (dev_xdp_link(dev, mode)) {
10021 		NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
10022 		return -EBUSY;
10023 	}
10024 
10025 	/* don't allow if an upper device already has a program */
10026 	netdev_for_each_upper_dev_rcu(dev, upper, iter) {
10027 		if (dev_xdp_prog_count(upper) > 0) {
10028 			NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program");
10029 			return -EEXIST;
10030 		}
10031 	}
10032 
10033 	cur_prog = dev_xdp_prog(dev, mode);
10034 	/* can't replace attached prog with link */
10035 	if (link && cur_prog) {
10036 		NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link");
10037 		return -EBUSY;
10038 	}
10039 	if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) {
10040 		NL_SET_ERR_MSG(extack, "Active program does not match expected");
10041 		return -EEXIST;
10042 	}
10043 
10044 	/* put effective new program into new_prog */
10045 	if (link)
10046 		new_prog = link->link.prog;
10047 
10048 	if (new_prog) {
10049 		bool offload = mode == XDP_MODE_HW;
10050 		enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
10051 					       ? XDP_MODE_DRV : XDP_MODE_SKB;
10052 
10053 		if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
10054 			NL_SET_ERR_MSG(extack, "XDP program already attached");
10055 			return -EBUSY;
10056 		}
10057 		if (!offload && dev_xdp_prog(dev, other_mode)) {
10058 			NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
10059 			return -EEXIST;
10060 		}
10061 		if (!offload && bpf_prog_is_offloaded(new_prog->aux)) {
10062 			NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported");
10063 			return -EINVAL;
10064 		}
10065 		if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) {
10066 			NL_SET_ERR_MSG(extack, "Program bound to different device");
10067 			return -EINVAL;
10068 		}
10069 		if (bpf_prog_is_dev_bound(new_prog->aux) && mode == XDP_MODE_SKB) {
10070 			NL_SET_ERR_MSG(extack, "Can't attach device-bound programs in generic mode");
10071 			return -EINVAL;
10072 		}
10073 		if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
10074 			NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
10075 			return -EINVAL;
10076 		}
10077 		if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) {
10078 			NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device");
10079 			return -EINVAL;
10080 		}
10081 	}
10082 
10083 	/* don't call drivers if the effective program didn't change */
10084 	if (new_prog != cur_prog) {
10085 		bpf_op = dev_xdp_bpf_op(dev, mode);
10086 		if (!bpf_op) {
10087 			NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode");
10088 			return -EOPNOTSUPP;
10089 		}
10090 
10091 		err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog);
10092 		if (err)
10093 			return err;
10094 	}
10095 
10096 	if (link)
10097 		dev_xdp_set_link(dev, mode, link);
10098 	else
10099 		dev_xdp_set_prog(dev, mode, new_prog);
10100 	if (cur_prog)
10101 		bpf_prog_put(cur_prog);
10102 
10103 	return 0;
10104 }
10105 
10106 static int dev_xdp_attach_link(struct net_device *dev,
10107 			       struct netlink_ext_ack *extack,
10108 			       struct bpf_xdp_link *link)
10109 {
10110 	return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags);
10111 }
10112 
10113 static int dev_xdp_detach_link(struct net_device *dev,
10114 			       struct netlink_ext_ack *extack,
10115 			       struct bpf_xdp_link *link)
10116 {
10117 	enum bpf_xdp_mode mode;
10118 	bpf_op_t bpf_op;
10119 
10120 	ASSERT_RTNL();
10121 
10122 	mode = dev_xdp_mode(dev, link->flags);
10123 	if (dev_xdp_link(dev, mode) != link)
10124 		return -EINVAL;
10125 
10126 	bpf_op = dev_xdp_bpf_op(dev, mode);
10127 	WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
10128 	dev_xdp_set_link(dev, mode, NULL);
10129 	return 0;
10130 }
10131 
10132 static void bpf_xdp_link_release(struct bpf_link *link)
10133 {
10134 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
10135 
10136 	rtnl_lock();
10137 
10138 	/* if racing with net_device's tear down, xdp_link->dev might be
10139 	 * already NULL, in which case link was already auto-detached
10140 	 */
10141 	if (xdp_link->dev) {
10142 		netdev_lock_ops(xdp_link->dev);
10143 		WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
10144 		netdev_unlock_ops(xdp_link->dev);
10145 		xdp_link->dev = NULL;
10146 	}
10147 
10148 	rtnl_unlock();
10149 }
10150 
10151 static int bpf_xdp_link_detach(struct bpf_link *link)
10152 {
10153 	bpf_xdp_link_release(link);
10154 	return 0;
10155 }
10156 
10157 static void bpf_xdp_link_dealloc(struct bpf_link *link)
10158 {
10159 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
10160 
10161 	kfree(xdp_link);
10162 }
10163 
10164 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link,
10165 				     struct seq_file *seq)
10166 {
10167 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
10168 	u32 ifindex = 0;
10169 
10170 	rtnl_lock();
10171 	if (xdp_link->dev)
10172 		ifindex = xdp_link->dev->ifindex;
10173 	rtnl_unlock();
10174 
10175 	seq_printf(seq, "ifindex:\t%u\n", ifindex);
10176 }
10177 
10178 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link,
10179 				       struct bpf_link_info *info)
10180 {
10181 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
10182 	u32 ifindex = 0;
10183 
10184 	rtnl_lock();
10185 	if (xdp_link->dev)
10186 		ifindex = xdp_link->dev->ifindex;
10187 	rtnl_unlock();
10188 
10189 	info->xdp.ifindex = ifindex;
10190 	return 0;
10191 }
10192 
10193 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
10194 			       struct bpf_prog *old_prog)
10195 {
10196 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
10197 	enum bpf_xdp_mode mode;
10198 	bpf_op_t bpf_op;
10199 	int err = 0;
10200 
10201 	rtnl_lock();
10202 
10203 	/* link might have been auto-released already, so fail */
10204 	if (!xdp_link->dev) {
10205 		err = -ENOLINK;
10206 		goto out_unlock;
10207 	}
10208 
10209 	if (old_prog && link->prog != old_prog) {
10210 		err = -EPERM;
10211 		goto out_unlock;
10212 	}
10213 	old_prog = link->prog;
10214 	if (old_prog->type != new_prog->type ||
10215 	    old_prog->expected_attach_type != new_prog->expected_attach_type) {
10216 		err = -EINVAL;
10217 		goto out_unlock;
10218 	}
10219 
10220 	if (old_prog == new_prog) {
10221 		/* no-op, don't disturb drivers */
10222 		bpf_prog_put(new_prog);
10223 		goto out_unlock;
10224 	}
10225 
10226 	netdev_lock_ops(xdp_link->dev);
10227 	mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
10228 	bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
10229 	err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
10230 			      xdp_link->flags, new_prog);
10231 	netdev_unlock_ops(xdp_link->dev);
10232 	if (err)
10233 		goto out_unlock;
10234 
10235 	old_prog = xchg(&link->prog, new_prog);
10236 	bpf_prog_put(old_prog);
10237 
10238 out_unlock:
10239 	rtnl_unlock();
10240 	return err;
10241 }
10242 
10243 static const struct bpf_link_ops bpf_xdp_link_lops = {
10244 	.release = bpf_xdp_link_release,
10245 	.dealloc = bpf_xdp_link_dealloc,
10246 	.detach = bpf_xdp_link_detach,
10247 	.show_fdinfo = bpf_xdp_link_show_fdinfo,
10248 	.fill_link_info = bpf_xdp_link_fill_link_info,
10249 	.update_prog = bpf_xdp_link_update,
10250 };
10251 
10252 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
10253 {
10254 	struct net *net = current->nsproxy->net_ns;
10255 	struct bpf_link_primer link_primer;
10256 	struct netlink_ext_ack extack = {};
10257 	struct bpf_xdp_link *link;
10258 	struct net_device *dev;
10259 	int err, fd;
10260 
10261 	rtnl_lock();
10262 	dev = dev_get_by_index(net, attr->link_create.target_ifindex);
10263 	if (!dev) {
10264 		rtnl_unlock();
10265 		return -EINVAL;
10266 	}
10267 
10268 	link = kzalloc(sizeof(*link), GFP_USER);
10269 	if (!link) {
10270 		err = -ENOMEM;
10271 		goto unlock;
10272 	}
10273 
10274 	bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
10275 	link->dev = dev;
10276 	link->flags = attr->link_create.flags;
10277 
10278 	err = bpf_link_prime(&link->link, &link_primer);
10279 	if (err) {
10280 		kfree(link);
10281 		goto unlock;
10282 	}
10283 
10284 	err = dev_xdp_attach_link(dev, &extack, link);
10285 	rtnl_unlock();
10286 
10287 	if (err) {
10288 		link->dev = NULL;
10289 		bpf_link_cleanup(&link_primer);
10290 		trace_bpf_xdp_link_attach_failed(extack._msg);
10291 		goto out_put_dev;
10292 	}
10293 
10294 	fd = bpf_link_settle(&link_primer);
10295 	/* link itself doesn't hold dev's refcnt to not complicate shutdown */
10296 	dev_put(dev);
10297 	return fd;
10298 
10299 unlock:
10300 	rtnl_unlock();
10301 
10302 out_put_dev:
10303 	dev_put(dev);
10304 	return err;
10305 }
10306 
10307 /**
10308  *	dev_change_xdp_fd - set or clear a bpf program for a device rx path
10309  *	@dev: device
10310  *	@extack: netlink extended ack
10311  *	@fd: new program fd or negative value to clear
10312  *	@expected_fd: old program fd that userspace expects to replace or clear
10313  *	@flags: xdp-related flags
10314  *
10315  *	Set or clear a bpf program for a device
10316  */
10317 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
10318 		      int fd, int expected_fd, u32 flags)
10319 {
10320 	enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
10321 	struct bpf_prog *new_prog = NULL, *old_prog = NULL;
10322 	int err;
10323 
10324 	ASSERT_RTNL();
10325 
10326 	if (fd >= 0) {
10327 		new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
10328 						 mode != XDP_MODE_SKB);
10329 		if (IS_ERR(new_prog))
10330 			return PTR_ERR(new_prog);
10331 	}
10332 
10333 	if (expected_fd >= 0) {
10334 		old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP,
10335 						 mode != XDP_MODE_SKB);
10336 		if (IS_ERR(old_prog)) {
10337 			err = PTR_ERR(old_prog);
10338 			old_prog = NULL;
10339 			goto err_out;
10340 		}
10341 	}
10342 
10343 	err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags);
10344 
10345 err_out:
10346 	if (err && new_prog)
10347 		bpf_prog_put(new_prog);
10348 	if (old_prog)
10349 		bpf_prog_put(old_prog);
10350 	return err;
10351 }
10352 
10353 u32 dev_get_min_mp_channel_count(const struct net_device *dev)
10354 {
10355 	int i;
10356 
10357 	ASSERT_RTNL();
10358 
10359 	for (i = dev->real_num_rx_queues - 1; i >= 0; i--)
10360 		if (dev->_rx[i].mp_params.mp_priv)
10361 			/* The channel count is the idx plus 1. */
10362 			return i + 1;
10363 
10364 	return 0;
10365 }
10366 
10367 /**
10368  * dev_index_reserve() - allocate an ifindex in a namespace
10369  * @net: the applicable net namespace
10370  * @ifindex: requested ifindex, pass %0 to get one allocated
10371  *
10372  * Allocate a ifindex for a new device. Caller must either use the ifindex
10373  * to store the device (via list_netdevice()) or call dev_index_release()
10374  * to give the index up.
10375  *
10376  * Return: a suitable unique value for a new device interface number or -errno.
10377  */
10378 static int dev_index_reserve(struct net *net, u32 ifindex)
10379 {
10380 	int err;
10381 
10382 	if (ifindex > INT_MAX) {
10383 		DEBUG_NET_WARN_ON_ONCE(1);
10384 		return -EINVAL;
10385 	}
10386 
10387 	if (!ifindex)
10388 		err = xa_alloc_cyclic(&net->dev_by_index, &ifindex, NULL,
10389 				      xa_limit_31b, &net->ifindex, GFP_KERNEL);
10390 	else
10391 		err = xa_insert(&net->dev_by_index, ifindex, NULL, GFP_KERNEL);
10392 	if (err < 0)
10393 		return err;
10394 
10395 	return ifindex;
10396 }
10397 
10398 static void dev_index_release(struct net *net, int ifindex)
10399 {
10400 	/* Expect only unused indexes, unlist_netdevice() removes the used */
10401 	WARN_ON(xa_erase(&net->dev_by_index, ifindex));
10402 }
10403 
10404 static bool from_cleanup_net(void)
10405 {
10406 #ifdef CONFIG_NET_NS
10407 	return current == cleanup_net_task;
10408 #else
10409 	return false;
10410 #endif
10411 }
10412 
10413 /* Delayed registration/unregisteration */
10414 LIST_HEAD(net_todo_list);
10415 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
10416 atomic_t dev_unreg_count = ATOMIC_INIT(0);
10417 
10418 static void net_set_todo(struct net_device *dev)
10419 {
10420 	list_add_tail(&dev->todo_list, &net_todo_list);
10421 }
10422 
10423 static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
10424 	struct net_device *upper, netdev_features_t features)
10425 {
10426 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
10427 	netdev_features_t feature;
10428 	int feature_bit;
10429 
10430 	for_each_netdev_feature(upper_disables, feature_bit) {
10431 		feature = __NETIF_F_BIT(feature_bit);
10432 		if (!(upper->wanted_features & feature)
10433 		    && (features & feature)) {
10434 			netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
10435 				   &feature, upper->name);
10436 			features &= ~feature;
10437 		}
10438 	}
10439 
10440 	return features;
10441 }
10442 
10443 static void netdev_sync_lower_features(struct net_device *upper,
10444 	struct net_device *lower, netdev_features_t features)
10445 {
10446 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
10447 	netdev_features_t feature;
10448 	int feature_bit;
10449 
10450 	for_each_netdev_feature(upper_disables, feature_bit) {
10451 		feature = __NETIF_F_BIT(feature_bit);
10452 		if (!(features & feature) && (lower->features & feature)) {
10453 			netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
10454 				   &feature, lower->name);
10455 			lower->wanted_features &= ~feature;
10456 			__netdev_update_features(lower);
10457 
10458 			if (unlikely(lower->features & feature))
10459 				netdev_WARN(upper, "failed to disable %pNF on %s!\n",
10460 					    &feature, lower->name);
10461 			else
10462 				netdev_features_change(lower);
10463 		}
10464 	}
10465 }
10466 
10467 static bool netdev_has_ip_or_hw_csum(netdev_features_t features)
10468 {
10469 	netdev_features_t ip_csum_mask = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
10470 	bool ip_csum = (features & ip_csum_mask) == ip_csum_mask;
10471 	bool hw_csum = features & NETIF_F_HW_CSUM;
10472 
10473 	return ip_csum || hw_csum;
10474 }
10475 
10476 static netdev_features_t netdev_fix_features(struct net_device *dev,
10477 	netdev_features_t features)
10478 {
10479 	/* Fix illegal checksum combinations */
10480 	if ((features & NETIF_F_HW_CSUM) &&
10481 	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
10482 		netdev_warn(dev, "mixed HW and IP checksum settings.\n");
10483 		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
10484 	}
10485 
10486 	/* TSO requires that SG is present as well. */
10487 	if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
10488 		netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
10489 		features &= ~NETIF_F_ALL_TSO;
10490 	}
10491 
10492 	if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
10493 					!(features & NETIF_F_IP_CSUM)) {
10494 		netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
10495 		features &= ~NETIF_F_TSO;
10496 		features &= ~NETIF_F_TSO_ECN;
10497 	}
10498 
10499 	if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
10500 					 !(features & NETIF_F_IPV6_CSUM)) {
10501 		netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
10502 		features &= ~NETIF_F_TSO6;
10503 	}
10504 
10505 	/* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
10506 	if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
10507 		features &= ~NETIF_F_TSO_MANGLEID;
10508 
10509 	/* TSO ECN requires that TSO is present as well. */
10510 	if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
10511 		features &= ~NETIF_F_TSO_ECN;
10512 
10513 	/* Software GSO depends on SG. */
10514 	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
10515 		netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
10516 		features &= ~NETIF_F_GSO;
10517 	}
10518 
10519 	/* GSO partial features require GSO partial be set */
10520 	if ((features & dev->gso_partial_features) &&
10521 	    !(features & NETIF_F_GSO_PARTIAL)) {
10522 		netdev_dbg(dev,
10523 			   "Dropping partially supported GSO features since no GSO partial.\n");
10524 		features &= ~dev->gso_partial_features;
10525 	}
10526 
10527 	if (!(features & NETIF_F_RXCSUM)) {
10528 		/* NETIF_F_GRO_HW implies doing RXCSUM since every packet
10529 		 * successfully merged by hardware must also have the
10530 		 * checksum verified by hardware.  If the user does not
10531 		 * want to enable RXCSUM, logically, we should disable GRO_HW.
10532 		 */
10533 		if (features & NETIF_F_GRO_HW) {
10534 			netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
10535 			features &= ~NETIF_F_GRO_HW;
10536 		}
10537 	}
10538 
10539 	/* LRO/HW-GRO features cannot be combined with RX-FCS */
10540 	if (features & NETIF_F_RXFCS) {
10541 		if (features & NETIF_F_LRO) {
10542 			netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
10543 			features &= ~NETIF_F_LRO;
10544 		}
10545 
10546 		if (features & NETIF_F_GRO_HW) {
10547 			netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
10548 			features &= ~NETIF_F_GRO_HW;
10549 		}
10550 	}
10551 
10552 	if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) {
10553 		netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n");
10554 		features &= ~NETIF_F_LRO;
10555 	}
10556 
10557 	if ((features & NETIF_F_HW_TLS_TX) && !netdev_has_ip_or_hw_csum(features)) {
10558 		netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
10559 		features &= ~NETIF_F_HW_TLS_TX;
10560 	}
10561 
10562 	if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
10563 		netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
10564 		features &= ~NETIF_F_HW_TLS_RX;
10565 	}
10566 
10567 	if ((features & NETIF_F_GSO_UDP_L4) && !netdev_has_ip_or_hw_csum(features)) {
10568 		netdev_dbg(dev, "Dropping USO feature since no CSUM feature.\n");
10569 		features &= ~NETIF_F_GSO_UDP_L4;
10570 	}
10571 
10572 	return features;
10573 }
10574 
10575 int __netdev_update_features(struct net_device *dev)
10576 {
10577 	struct net_device *upper, *lower;
10578 	netdev_features_t features;
10579 	struct list_head *iter;
10580 	int err = -1;
10581 
10582 	ASSERT_RTNL();
10583 	netdev_ops_assert_locked(dev);
10584 
10585 	features = netdev_get_wanted_features(dev);
10586 
10587 	if (dev->netdev_ops->ndo_fix_features)
10588 		features = dev->netdev_ops->ndo_fix_features(dev, features);
10589 
10590 	/* driver might be less strict about feature dependencies */
10591 	features = netdev_fix_features(dev, features);
10592 
10593 	/* some features can't be enabled if they're off on an upper device */
10594 	netdev_for_each_upper_dev_rcu(dev, upper, iter)
10595 		features = netdev_sync_upper_features(dev, upper, features);
10596 
10597 	if (dev->features == features)
10598 		goto sync_lower;
10599 
10600 	netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
10601 		&dev->features, &features);
10602 
10603 	if (dev->netdev_ops->ndo_set_features)
10604 		err = dev->netdev_ops->ndo_set_features(dev, features);
10605 	else
10606 		err = 0;
10607 
10608 	if (unlikely(err < 0)) {
10609 		netdev_err(dev,
10610 			"set_features() failed (%d); wanted %pNF, left %pNF\n",
10611 			err, &features, &dev->features);
10612 		/* return non-0 since some features might have changed and
10613 		 * it's better to fire a spurious notification than miss it
10614 		 */
10615 		return -1;
10616 	}
10617 
10618 sync_lower:
10619 	/* some features must be disabled on lower devices when disabled
10620 	 * on an upper device (think: bonding master or bridge)
10621 	 */
10622 	netdev_for_each_lower_dev(dev, lower, iter)
10623 		netdev_sync_lower_features(dev, lower, features);
10624 
10625 	if (!err) {
10626 		netdev_features_t diff = features ^ dev->features;
10627 
10628 		if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
10629 			/* udp_tunnel_{get,drop}_rx_info both need
10630 			 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
10631 			 * device, or they won't do anything.
10632 			 * Thus we need to update dev->features
10633 			 * *before* calling udp_tunnel_get_rx_info,
10634 			 * but *after* calling udp_tunnel_drop_rx_info.
10635 			 */
10636 			if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
10637 				dev->features = features;
10638 				udp_tunnel_get_rx_info(dev);
10639 			} else {
10640 				udp_tunnel_drop_rx_info(dev);
10641 			}
10642 		}
10643 
10644 		if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
10645 			if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
10646 				dev->features = features;
10647 				err |= vlan_get_rx_ctag_filter_info(dev);
10648 			} else {
10649 				vlan_drop_rx_ctag_filter_info(dev);
10650 			}
10651 		}
10652 
10653 		if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
10654 			if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
10655 				dev->features = features;
10656 				err |= vlan_get_rx_stag_filter_info(dev);
10657 			} else {
10658 				vlan_drop_rx_stag_filter_info(dev);
10659 			}
10660 		}
10661 
10662 		dev->features = features;
10663 	}
10664 
10665 	return err < 0 ? 0 : 1;
10666 }
10667 
10668 /**
10669  *	netdev_update_features - recalculate device features
10670  *	@dev: the device to check
10671  *
10672  *	Recalculate dev->features set and send notifications if it
10673  *	has changed. Should be called after driver or hardware dependent
10674  *	conditions might have changed that influence the features.
10675  */
10676 void netdev_update_features(struct net_device *dev)
10677 {
10678 	if (__netdev_update_features(dev))
10679 		netdev_features_change(dev);
10680 }
10681 EXPORT_SYMBOL(netdev_update_features);
10682 
10683 /**
10684  *	netdev_change_features - recalculate device features
10685  *	@dev: the device to check
10686  *
10687  *	Recalculate dev->features set and send notifications even
10688  *	if they have not changed. Should be called instead of
10689  *	netdev_update_features() if also dev->vlan_features might
10690  *	have changed to allow the changes to be propagated to stacked
10691  *	VLAN devices.
10692  */
10693 void netdev_change_features(struct net_device *dev)
10694 {
10695 	__netdev_update_features(dev);
10696 	netdev_features_change(dev);
10697 }
10698 EXPORT_SYMBOL(netdev_change_features);
10699 
10700 /**
10701  *	netif_stacked_transfer_operstate -	transfer operstate
10702  *	@rootdev: the root or lower level device to transfer state from
10703  *	@dev: the device to transfer operstate to
10704  *
10705  *	Transfer operational state from root to device. This is normally
10706  *	called when a stacking relationship exists between the root
10707  *	device and the device(a leaf device).
10708  */
10709 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
10710 					struct net_device *dev)
10711 {
10712 	if (rootdev->operstate == IF_OPER_DORMANT)
10713 		netif_dormant_on(dev);
10714 	else
10715 		netif_dormant_off(dev);
10716 
10717 	if (rootdev->operstate == IF_OPER_TESTING)
10718 		netif_testing_on(dev);
10719 	else
10720 		netif_testing_off(dev);
10721 
10722 	if (netif_carrier_ok(rootdev))
10723 		netif_carrier_on(dev);
10724 	else
10725 		netif_carrier_off(dev);
10726 }
10727 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
10728 
10729 static int netif_alloc_rx_queues(struct net_device *dev)
10730 {
10731 	unsigned int i, count = dev->num_rx_queues;
10732 	struct netdev_rx_queue *rx;
10733 	size_t sz = count * sizeof(*rx);
10734 	int err = 0;
10735 
10736 	BUG_ON(count < 1);
10737 
10738 	rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10739 	if (!rx)
10740 		return -ENOMEM;
10741 
10742 	dev->_rx = rx;
10743 
10744 	for (i = 0; i < count; i++) {
10745 		rx[i].dev = dev;
10746 
10747 		/* XDP RX-queue setup */
10748 		err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
10749 		if (err < 0)
10750 			goto err_rxq_info;
10751 	}
10752 	return 0;
10753 
10754 err_rxq_info:
10755 	/* Rollback successful reg's and free other resources */
10756 	while (i--)
10757 		xdp_rxq_info_unreg(&rx[i].xdp_rxq);
10758 	kvfree(dev->_rx);
10759 	dev->_rx = NULL;
10760 	return err;
10761 }
10762 
10763 static void netif_free_rx_queues(struct net_device *dev)
10764 {
10765 	unsigned int i, count = dev->num_rx_queues;
10766 
10767 	/* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
10768 	if (!dev->_rx)
10769 		return;
10770 
10771 	for (i = 0; i < count; i++)
10772 		xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
10773 
10774 	kvfree(dev->_rx);
10775 }
10776 
10777 static void netdev_init_one_queue(struct net_device *dev,
10778 				  struct netdev_queue *queue, void *_unused)
10779 {
10780 	/* Initialize queue lock */
10781 	spin_lock_init(&queue->_xmit_lock);
10782 	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
10783 	queue->xmit_lock_owner = -1;
10784 	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
10785 	queue->dev = dev;
10786 #ifdef CONFIG_BQL
10787 	dql_init(&queue->dql, HZ);
10788 #endif
10789 }
10790 
10791 static void netif_free_tx_queues(struct net_device *dev)
10792 {
10793 	kvfree(dev->_tx);
10794 }
10795 
10796 static int netif_alloc_netdev_queues(struct net_device *dev)
10797 {
10798 	unsigned int count = dev->num_tx_queues;
10799 	struct netdev_queue *tx;
10800 	size_t sz = count * sizeof(*tx);
10801 
10802 	if (count < 1 || count > 0xffff)
10803 		return -EINVAL;
10804 
10805 	tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10806 	if (!tx)
10807 		return -ENOMEM;
10808 
10809 	dev->_tx = tx;
10810 
10811 	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
10812 	spin_lock_init(&dev->tx_global_lock);
10813 
10814 	return 0;
10815 }
10816 
10817 void netif_tx_stop_all_queues(struct net_device *dev)
10818 {
10819 	unsigned int i;
10820 
10821 	for (i = 0; i < dev->num_tx_queues; i++) {
10822 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
10823 
10824 		netif_tx_stop_queue(txq);
10825 	}
10826 }
10827 EXPORT_SYMBOL(netif_tx_stop_all_queues);
10828 
10829 static int netdev_do_alloc_pcpu_stats(struct net_device *dev)
10830 {
10831 	void __percpu *v;
10832 
10833 	/* Drivers implementing ndo_get_peer_dev must support tstat
10834 	 * accounting, so that skb_do_redirect() can bump the dev's
10835 	 * RX stats upon network namespace switch.
10836 	 */
10837 	if (dev->netdev_ops->ndo_get_peer_dev &&
10838 	    dev->pcpu_stat_type != NETDEV_PCPU_STAT_TSTATS)
10839 		return -EOPNOTSUPP;
10840 
10841 	switch (dev->pcpu_stat_type) {
10842 	case NETDEV_PCPU_STAT_NONE:
10843 		return 0;
10844 	case NETDEV_PCPU_STAT_LSTATS:
10845 		v = dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
10846 		break;
10847 	case NETDEV_PCPU_STAT_TSTATS:
10848 		v = dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
10849 		break;
10850 	case NETDEV_PCPU_STAT_DSTATS:
10851 		v = dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
10852 		break;
10853 	default:
10854 		return -EINVAL;
10855 	}
10856 
10857 	return v ? 0 : -ENOMEM;
10858 }
10859 
10860 static void netdev_do_free_pcpu_stats(struct net_device *dev)
10861 {
10862 	switch (dev->pcpu_stat_type) {
10863 	case NETDEV_PCPU_STAT_NONE:
10864 		return;
10865 	case NETDEV_PCPU_STAT_LSTATS:
10866 		free_percpu(dev->lstats);
10867 		break;
10868 	case NETDEV_PCPU_STAT_TSTATS:
10869 		free_percpu(dev->tstats);
10870 		break;
10871 	case NETDEV_PCPU_STAT_DSTATS:
10872 		free_percpu(dev->dstats);
10873 		break;
10874 	}
10875 }
10876 
10877 static void netdev_free_phy_link_topology(struct net_device *dev)
10878 {
10879 	struct phy_link_topology *topo = dev->link_topo;
10880 
10881 	if (IS_ENABLED(CONFIG_PHYLIB) && topo) {
10882 		xa_destroy(&topo->phys);
10883 		kfree(topo);
10884 		dev->link_topo = NULL;
10885 	}
10886 }
10887 
10888 /**
10889  * register_netdevice() - register a network device
10890  * @dev: device to register
10891  *
10892  * Take a prepared network device structure and make it externally accessible.
10893  * A %NETDEV_REGISTER message is sent to the netdev notifier chain.
10894  * Callers must hold the rtnl lock - you may want register_netdev()
10895  * instead of this.
10896  */
10897 int register_netdevice(struct net_device *dev)
10898 {
10899 	int ret;
10900 	struct net *net = dev_net(dev);
10901 
10902 	BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
10903 		     NETDEV_FEATURE_COUNT);
10904 	BUG_ON(dev_boot_phase);
10905 	ASSERT_RTNL();
10906 
10907 	might_sleep();
10908 
10909 	/* When net_device's are persistent, this will be fatal. */
10910 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
10911 	BUG_ON(!net);
10912 
10913 	ret = ethtool_check_ops(dev->ethtool_ops);
10914 	if (ret)
10915 		return ret;
10916 
10917 	/* rss ctx ID 0 is reserved for the default context, start from 1 */
10918 	xa_init_flags(&dev->ethtool->rss_ctx, XA_FLAGS_ALLOC1);
10919 	mutex_init(&dev->ethtool->rss_lock);
10920 
10921 	spin_lock_init(&dev->addr_list_lock);
10922 	netdev_set_addr_lockdep_class(dev);
10923 
10924 	ret = dev_get_valid_name(net, dev, dev->name);
10925 	if (ret < 0)
10926 		goto out;
10927 
10928 	ret = -ENOMEM;
10929 	dev->name_node = netdev_name_node_head_alloc(dev);
10930 	if (!dev->name_node)
10931 		goto out;
10932 
10933 	/* Init, if this function is available */
10934 	if (dev->netdev_ops->ndo_init) {
10935 		ret = dev->netdev_ops->ndo_init(dev);
10936 		if (ret) {
10937 			if (ret > 0)
10938 				ret = -EIO;
10939 			goto err_free_name;
10940 		}
10941 	}
10942 
10943 	if (((dev->hw_features | dev->features) &
10944 	     NETIF_F_HW_VLAN_CTAG_FILTER) &&
10945 	    (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
10946 	     !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
10947 		netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
10948 		ret = -EINVAL;
10949 		goto err_uninit;
10950 	}
10951 
10952 	ret = netdev_do_alloc_pcpu_stats(dev);
10953 	if (ret)
10954 		goto err_uninit;
10955 
10956 	ret = dev_index_reserve(net, dev->ifindex);
10957 	if (ret < 0)
10958 		goto err_free_pcpu;
10959 	dev->ifindex = ret;
10960 
10961 	/* Transfer changeable features to wanted_features and enable
10962 	 * software offloads (GSO and GRO).
10963 	 */
10964 	dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
10965 	dev->features |= NETIF_F_SOFT_FEATURES;
10966 
10967 	if (dev->udp_tunnel_nic_info) {
10968 		dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10969 		dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10970 	}
10971 
10972 	dev->wanted_features = dev->features & dev->hw_features;
10973 
10974 	if (!(dev->flags & IFF_LOOPBACK))
10975 		dev->hw_features |= NETIF_F_NOCACHE_COPY;
10976 
10977 	/* If IPv4 TCP segmentation offload is supported we should also
10978 	 * allow the device to enable segmenting the frame with the option
10979 	 * of ignoring a static IP ID value.  This doesn't enable the
10980 	 * feature itself but allows the user to enable it later.
10981 	 */
10982 	if (dev->hw_features & NETIF_F_TSO)
10983 		dev->hw_features |= NETIF_F_TSO_MANGLEID;
10984 	if (dev->vlan_features & NETIF_F_TSO)
10985 		dev->vlan_features |= NETIF_F_TSO_MANGLEID;
10986 	if (dev->mpls_features & NETIF_F_TSO)
10987 		dev->mpls_features |= NETIF_F_TSO_MANGLEID;
10988 	if (dev->hw_enc_features & NETIF_F_TSO)
10989 		dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
10990 
10991 	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
10992 	 */
10993 	dev->vlan_features |= NETIF_F_HIGHDMA;
10994 
10995 	/* Make NETIF_F_SG inheritable to tunnel devices.
10996 	 */
10997 	dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
10998 
10999 	/* Make NETIF_F_SG inheritable to MPLS.
11000 	 */
11001 	dev->mpls_features |= NETIF_F_SG;
11002 
11003 	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
11004 	ret = notifier_to_errno(ret);
11005 	if (ret)
11006 		goto err_ifindex_release;
11007 
11008 	ret = netdev_register_kobject(dev);
11009 
11010 	netdev_lock(dev);
11011 	WRITE_ONCE(dev->reg_state, ret ? NETREG_UNREGISTERED : NETREG_REGISTERED);
11012 	netdev_unlock(dev);
11013 
11014 	if (ret)
11015 		goto err_uninit_notify;
11016 
11017 	netdev_lock_ops(dev);
11018 	__netdev_update_features(dev);
11019 	netdev_unlock_ops(dev);
11020 
11021 	/*
11022 	 *	Default initial state at registry is that the
11023 	 *	device is present.
11024 	 */
11025 
11026 	set_bit(__LINK_STATE_PRESENT, &dev->state);
11027 
11028 	linkwatch_init_dev(dev);
11029 
11030 	dev_init_scheduler(dev);
11031 
11032 	netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL);
11033 	list_netdevice(dev);
11034 
11035 	add_device_randomness(dev->dev_addr, dev->addr_len);
11036 
11037 	/* If the device has permanent device address, driver should
11038 	 * set dev_addr and also addr_assign_type should be set to
11039 	 * NET_ADDR_PERM (default value).
11040 	 */
11041 	if (dev->addr_assign_type == NET_ADDR_PERM)
11042 		memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11043 
11044 	/* Notify protocols, that a new device appeared. */
11045 	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
11046 	ret = notifier_to_errno(ret);
11047 	if (ret) {
11048 		/* Expect explicit free_netdev() on failure */
11049 		dev->needs_free_netdev = false;
11050 		unregister_netdevice_queue(dev, NULL);
11051 		goto out;
11052 	}
11053 	/*
11054 	 *	Prevent userspace races by waiting until the network
11055 	 *	device is fully setup before sending notifications.
11056 	 */
11057 	if (!dev->rtnl_link_ops ||
11058 	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
11059 		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
11060 
11061 out:
11062 	return ret;
11063 
11064 err_uninit_notify:
11065 	call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
11066 err_ifindex_release:
11067 	dev_index_release(net, dev->ifindex);
11068 err_free_pcpu:
11069 	netdev_do_free_pcpu_stats(dev);
11070 err_uninit:
11071 	if (dev->netdev_ops->ndo_uninit)
11072 		dev->netdev_ops->ndo_uninit(dev);
11073 	if (dev->priv_destructor)
11074 		dev->priv_destructor(dev);
11075 err_free_name:
11076 	netdev_name_node_free(dev->name_node);
11077 	goto out;
11078 }
11079 EXPORT_SYMBOL(register_netdevice);
11080 
11081 /* Initialize the core of a dummy net device.
11082  * The setup steps dummy netdevs need which normal netdevs get by going
11083  * through register_netdevice().
11084  */
11085 static void init_dummy_netdev(struct net_device *dev)
11086 {
11087 	/* make sure we BUG if trying to hit standard
11088 	 * register/unregister code path
11089 	 */
11090 	dev->reg_state = NETREG_DUMMY;
11091 
11092 	/* a dummy interface is started by default */
11093 	set_bit(__LINK_STATE_PRESENT, &dev->state);
11094 	set_bit(__LINK_STATE_START, &dev->state);
11095 
11096 	/* Note : We dont allocate pcpu_refcnt for dummy devices,
11097 	 * because users of this 'device' dont need to change
11098 	 * its refcount.
11099 	 */
11100 }
11101 
11102 /**
11103  *	register_netdev	- register a network device
11104  *	@dev: device to register
11105  *
11106  *	Take a completed network device structure and add it to the kernel
11107  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
11108  *	chain. 0 is returned on success. A negative errno code is returned
11109  *	on a failure to set up the device, or if the name is a duplicate.
11110  *
11111  *	This is a wrapper around register_netdevice that takes the rtnl semaphore
11112  *	and expands the device name if you passed a format string to
11113  *	alloc_netdev.
11114  */
11115 int register_netdev(struct net_device *dev)
11116 {
11117 	struct net *net = dev_net(dev);
11118 	int err;
11119 
11120 	if (rtnl_net_lock_killable(net))
11121 		return -EINTR;
11122 
11123 	err = register_netdevice(dev);
11124 
11125 	rtnl_net_unlock(net);
11126 
11127 	return err;
11128 }
11129 EXPORT_SYMBOL(register_netdev);
11130 
11131 int netdev_refcnt_read(const struct net_device *dev)
11132 {
11133 #ifdef CONFIG_PCPU_DEV_REFCNT
11134 	int i, refcnt = 0;
11135 
11136 	for_each_possible_cpu(i)
11137 		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
11138 	return refcnt;
11139 #else
11140 	return refcount_read(&dev->dev_refcnt);
11141 #endif
11142 }
11143 EXPORT_SYMBOL(netdev_refcnt_read);
11144 
11145 int netdev_unregister_timeout_secs __read_mostly = 10;
11146 
11147 #define WAIT_REFS_MIN_MSECS 1
11148 #define WAIT_REFS_MAX_MSECS 250
11149 /**
11150  * netdev_wait_allrefs_any - wait until all references are gone.
11151  * @list: list of net_devices to wait on
11152  *
11153  * This is called when unregistering network devices.
11154  *
11155  * Any protocol or device that holds a reference should register
11156  * for netdevice notification, and cleanup and put back the
11157  * reference if they receive an UNREGISTER event.
11158  * We can get stuck here if buggy protocols don't correctly
11159  * call dev_put.
11160  */
11161 static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
11162 {
11163 	unsigned long rebroadcast_time, warning_time;
11164 	struct net_device *dev;
11165 	int wait = 0;
11166 
11167 	rebroadcast_time = warning_time = jiffies;
11168 
11169 	list_for_each_entry(dev, list, todo_list)
11170 		if (netdev_refcnt_read(dev) == 1)
11171 			return dev;
11172 
11173 	while (true) {
11174 		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
11175 			rtnl_lock();
11176 
11177 			/* Rebroadcast unregister notification */
11178 			list_for_each_entry(dev, list, todo_list)
11179 				call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11180 
11181 			__rtnl_unlock();
11182 			rcu_barrier();
11183 			rtnl_lock();
11184 
11185 			list_for_each_entry(dev, list, todo_list)
11186 				if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
11187 					     &dev->state)) {
11188 					/* We must not have linkwatch events
11189 					 * pending on unregister. If this
11190 					 * happens, we simply run the queue
11191 					 * unscheduled, resulting in a noop
11192 					 * for this device.
11193 					 */
11194 					linkwatch_run_queue();
11195 					break;
11196 				}
11197 
11198 			__rtnl_unlock();
11199 
11200 			rebroadcast_time = jiffies;
11201 		}
11202 
11203 		rcu_barrier();
11204 
11205 		if (!wait) {
11206 			wait = WAIT_REFS_MIN_MSECS;
11207 		} else {
11208 			msleep(wait);
11209 			wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
11210 		}
11211 
11212 		list_for_each_entry(dev, list, todo_list)
11213 			if (netdev_refcnt_read(dev) == 1)
11214 				return dev;
11215 
11216 		if (time_after(jiffies, warning_time +
11217 			       READ_ONCE(netdev_unregister_timeout_secs) * HZ)) {
11218 			list_for_each_entry(dev, list, todo_list) {
11219 				pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
11220 					 dev->name, netdev_refcnt_read(dev));
11221 				ref_tracker_dir_print(&dev->refcnt_tracker, 10);
11222 			}
11223 
11224 			warning_time = jiffies;
11225 		}
11226 	}
11227 }
11228 
11229 /* The sequence is:
11230  *
11231  *	rtnl_lock();
11232  *	...
11233  *	register_netdevice(x1);
11234  *	register_netdevice(x2);
11235  *	...
11236  *	unregister_netdevice(y1);
11237  *	unregister_netdevice(y2);
11238  *      ...
11239  *	rtnl_unlock();
11240  *	free_netdev(y1);
11241  *	free_netdev(y2);
11242  *
11243  * We are invoked by rtnl_unlock().
11244  * This allows us to deal with problems:
11245  * 1) We can delete sysfs objects which invoke hotplug
11246  *    without deadlocking with linkwatch via keventd.
11247  * 2) Since we run with the RTNL semaphore not held, we can sleep
11248  *    safely in order to wait for the netdev refcnt to drop to zero.
11249  *
11250  * We must not return until all unregister events added during
11251  * the interval the lock was held have been completed.
11252  */
11253 void netdev_run_todo(void)
11254 {
11255 	struct net_device *dev, *tmp;
11256 	struct list_head list;
11257 	int cnt;
11258 #ifdef CONFIG_LOCKDEP
11259 	struct list_head unlink_list;
11260 
11261 	list_replace_init(&net_unlink_list, &unlink_list);
11262 
11263 	while (!list_empty(&unlink_list)) {
11264 		dev = list_first_entry(&unlink_list, struct net_device,
11265 				       unlink_list);
11266 		list_del_init(&dev->unlink_list);
11267 		dev->nested_level = dev->lower_level - 1;
11268 	}
11269 #endif
11270 
11271 	/* Snapshot list, allow later requests */
11272 	list_replace_init(&net_todo_list, &list);
11273 
11274 	__rtnl_unlock();
11275 
11276 	/* Wait for rcu callbacks to finish before next phase */
11277 	if (!list_empty(&list))
11278 		rcu_barrier();
11279 
11280 	list_for_each_entry_safe(dev, tmp, &list, todo_list) {
11281 		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
11282 			netdev_WARN(dev, "run_todo but not unregistering\n");
11283 			list_del(&dev->todo_list);
11284 			continue;
11285 		}
11286 
11287 		netdev_lock(dev);
11288 		WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERED);
11289 		netdev_unlock(dev);
11290 		linkwatch_sync_dev(dev);
11291 	}
11292 
11293 	cnt = 0;
11294 	while (!list_empty(&list)) {
11295 		dev = netdev_wait_allrefs_any(&list);
11296 		list_del(&dev->todo_list);
11297 
11298 		/* paranoia */
11299 		BUG_ON(netdev_refcnt_read(dev) != 1);
11300 		BUG_ON(!list_empty(&dev->ptype_all));
11301 		BUG_ON(!list_empty(&dev->ptype_specific));
11302 		WARN_ON(rcu_access_pointer(dev->ip_ptr));
11303 		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
11304 
11305 		netdev_do_free_pcpu_stats(dev);
11306 		if (dev->priv_destructor)
11307 			dev->priv_destructor(dev);
11308 		if (dev->needs_free_netdev)
11309 			free_netdev(dev);
11310 
11311 		cnt++;
11312 
11313 		/* Free network device */
11314 		kobject_put(&dev->dev.kobj);
11315 	}
11316 	if (cnt && atomic_sub_and_test(cnt, &dev_unreg_count))
11317 		wake_up(&netdev_unregistering_wq);
11318 }
11319 
11320 /* Collate per-cpu network dstats statistics
11321  *
11322  * Read per-cpu network statistics from dev->dstats and populate the related
11323  * fields in @s.
11324  */
11325 static void dev_fetch_dstats(struct rtnl_link_stats64 *s,
11326 			     const struct pcpu_dstats __percpu *dstats)
11327 {
11328 	int cpu;
11329 
11330 	for_each_possible_cpu(cpu) {
11331 		u64 rx_packets, rx_bytes, rx_drops;
11332 		u64 tx_packets, tx_bytes, tx_drops;
11333 		const struct pcpu_dstats *stats;
11334 		unsigned int start;
11335 
11336 		stats = per_cpu_ptr(dstats, cpu);
11337 		do {
11338 			start = u64_stats_fetch_begin(&stats->syncp);
11339 			rx_packets = u64_stats_read(&stats->rx_packets);
11340 			rx_bytes   = u64_stats_read(&stats->rx_bytes);
11341 			rx_drops   = u64_stats_read(&stats->rx_drops);
11342 			tx_packets = u64_stats_read(&stats->tx_packets);
11343 			tx_bytes   = u64_stats_read(&stats->tx_bytes);
11344 			tx_drops   = u64_stats_read(&stats->tx_drops);
11345 		} while (u64_stats_fetch_retry(&stats->syncp, start));
11346 
11347 		s->rx_packets += rx_packets;
11348 		s->rx_bytes   += rx_bytes;
11349 		s->rx_dropped += rx_drops;
11350 		s->tx_packets += tx_packets;
11351 		s->tx_bytes   += tx_bytes;
11352 		s->tx_dropped += tx_drops;
11353 	}
11354 }
11355 
11356 /* ndo_get_stats64 implementation for dtstats-based accounting.
11357  *
11358  * Populate @s from dev->stats and dev->dstats. This is used internally by the
11359  * core for NETDEV_PCPU_STAT_DSTAT-type stats collection.
11360  */
11361 static void dev_get_dstats64(const struct net_device *dev,
11362 			     struct rtnl_link_stats64 *s)
11363 {
11364 	netdev_stats_to_stats64(s, &dev->stats);
11365 	dev_fetch_dstats(s, dev->dstats);
11366 }
11367 
11368 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
11369  * all the same fields in the same order as net_device_stats, with only
11370  * the type differing, but rtnl_link_stats64 may have additional fields
11371  * at the end for newer counters.
11372  */
11373 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
11374 			     const struct net_device_stats *netdev_stats)
11375 {
11376 	size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t);
11377 	const atomic_long_t *src = (atomic_long_t *)netdev_stats;
11378 	u64 *dst = (u64 *)stats64;
11379 
11380 	BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
11381 	for (i = 0; i < n; i++)
11382 		dst[i] = (unsigned long)atomic_long_read(&src[i]);
11383 	/* zero out counters that only exist in rtnl_link_stats64 */
11384 	memset((char *)stats64 + n * sizeof(u64), 0,
11385 	       sizeof(*stats64) - n * sizeof(u64));
11386 }
11387 EXPORT_SYMBOL(netdev_stats_to_stats64);
11388 
11389 static __cold struct net_device_core_stats __percpu *netdev_core_stats_alloc(
11390 		struct net_device *dev)
11391 {
11392 	struct net_device_core_stats __percpu *p;
11393 
11394 	p = alloc_percpu_gfp(struct net_device_core_stats,
11395 			     GFP_ATOMIC | __GFP_NOWARN);
11396 
11397 	if (p && cmpxchg(&dev->core_stats, NULL, p))
11398 		free_percpu(p);
11399 
11400 	/* This READ_ONCE() pairs with the cmpxchg() above */
11401 	return READ_ONCE(dev->core_stats);
11402 }
11403 
11404 noinline void netdev_core_stats_inc(struct net_device *dev, u32 offset)
11405 {
11406 	/* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
11407 	struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats);
11408 	unsigned long __percpu *field;
11409 
11410 	if (unlikely(!p)) {
11411 		p = netdev_core_stats_alloc(dev);
11412 		if (!p)
11413 			return;
11414 	}
11415 
11416 	field = (unsigned long __percpu *)((void __percpu *)p + offset);
11417 	this_cpu_inc(*field);
11418 }
11419 EXPORT_SYMBOL_GPL(netdev_core_stats_inc);
11420 
11421 /**
11422  *	dev_get_stats	- get network device statistics
11423  *	@dev: device to get statistics from
11424  *	@storage: place to store stats
11425  *
11426  *	Get network statistics from device. Return @storage.
11427  *	The device driver may provide its own method by setting
11428  *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
11429  *	otherwise the internal statistics structure is used.
11430  */
11431 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
11432 					struct rtnl_link_stats64 *storage)
11433 {
11434 	const struct net_device_ops *ops = dev->netdev_ops;
11435 	const struct net_device_core_stats __percpu *p;
11436 
11437 	/*
11438 	 * IPv{4,6} and udp tunnels share common stat helpers and use
11439 	 * different stat type (NETDEV_PCPU_STAT_TSTATS vs
11440 	 * NETDEV_PCPU_STAT_DSTATS). Ensure the accounting is consistent.
11441 	 */
11442 	BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, rx_bytes) !=
11443 		     offsetof(struct pcpu_dstats, rx_bytes));
11444 	BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, rx_packets) !=
11445 		     offsetof(struct pcpu_dstats, rx_packets));
11446 	BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, tx_bytes) !=
11447 		     offsetof(struct pcpu_dstats, tx_bytes));
11448 	BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, tx_packets) !=
11449 		     offsetof(struct pcpu_dstats, tx_packets));
11450 
11451 	if (ops->ndo_get_stats64) {
11452 		memset(storage, 0, sizeof(*storage));
11453 		ops->ndo_get_stats64(dev, storage);
11454 	} else if (ops->ndo_get_stats) {
11455 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
11456 	} else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) {
11457 		dev_get_tstats64(dev, storage);
11458 	} else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_DSTATS) {
11459 		dev_get_dstats64(dev, storage);
11460 	} else {
11461 		netdev_stats_to_stats64(storage, &dev->stats);
11462 	}
11463 
11464 	/* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
11465 	p = READ_ONCE(dev->core_stats);
11466 	if (p) {
11467 		const struct net_device_core_stats *core_stats;
11468 		int i;
11469 
11470 		for_each_possible_cpu(i) {
11471 			core_stats = per_cpu_ptr(p, i);
11472 			storage->rx_dropped += READ_ONCE(core_stats->rx_dropped);
11473 			storage->tx_dropped += READ_ONCE(core_stats->tx_dropped);
11474 			storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler);
11475 			storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped);
11476 		}
11477 	}
11478 	return storage;
11479 }
11480 EXPORT_SYMBOL(dev_get_stats);
11481 
11482 /**
11483  *	dev_fetch_sw_netstats - get per-cpu network device statistics
11484  *	@s: place to store stats
11485  *	@netstats: per-cpu network stats to read from
11486  *
11487  *	Read per-cpu network statistics and populate the related fields in @s.
11488  */
11489 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
11490 			   const struct pcpu_sw_netstats __percpu *netstats)
11491 {
11492 	int cpu;
11493 
11494 	for_each_possible_cpu(cpu) {
11495 		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
11496 		const struct pcpu_sw_netstats *stats;
11497 		unsigned int start;
11498 
11499 		stats = per_cpu_ptr(netstats, cpu);
11500 		do {
11501 			start = u64_stats_fetch_begin(&stats->syncp);
11502 			rx_packets = u64_stats_read(&stats->rx_packets);
11503 			rx_bytes   = u64_stats_read(&stats->rx_bytes);
11504 			tx_packets = u64_stats_read(&stats->tx_packets);
11505 			tx_bytes   = u64_stats_read(&stats->tx_bytes);
11506 		} while (u64_stats_fetch_retry(&stats->syncp, start));
11507 
11508 		s->rx_packets += rx_packets;
11509 		s->rx_bytes   += rx_bytes;
11510 		s->tx_packets += tx_packets;
11511 		s->tx_bytes   += tx_bytes;
11512 	}
11513 }
11514 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
11515 
11516 /**
11517  *	dev_get_tstats64 - ndo_get_stats64 implementation
11518  *	@dev: device to get statistics from
11519  *	@s: place to store stats
11520  *
11521  *	Populate @s from dev->stats and dev->tstats. Can be used as
11522  *	ndo_get_stats64() callback.
11523  */
11524 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
11525 {
11526 	netdev_stats_to_stats64(s, &dev->stats);
11527 	dev_fetch_sw_netstats(s, dev->tstats);
11528 }
11529 EXPORT_SYMBOL_GPL(dev_get_tstats64);
11530 
11531 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
11532 {
11533 	struct netdev_queue *queue = dev_ingress_queue(dev);
11534 
11535 #ifdef CONFIG_NET_CLS_ACT
11536 	if (queue)
11537 		return queue;
11538 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
11539 	if (!queue)
11540 		return NULL;
11541 	netdev_init_one_queue(dev, queue, NULL);
11542 	RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
11543 	RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc);
11544 	rcu_assign_pointer(dev->ingress_queue, queue);
11545 #endif
11546 	return queue;
11547 }
11548 
11549 static const struct ethtool_ops default_ethtool_ops;
11550 
11551 void netdev_set_default_ethtool_ops(struct net_device *dev,
11552 				    const struct ethtool_ops *ops)
11553 {
11554 	if (dev->ethtool_ops == &default_ethtool_ops)
11555 		dev->ethtool_ops = ops;
11556 }
11557 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
11558 
11559 /**
11560  * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default
11561  * @dev: netdev to enable the IRQ coalescing on
11562  *
11563  * Sets a conservative default for SW IRQ coalescing. Users can use
11564  * sysfs attributes to override the default values.
11565  */
11566 void netdev_sw_irq_coalesce_default_on(struct net_device *dev)
11567 {
11568 	WARN_ON(dev->reg_state == NETREG_REGISTERED);
11569 
11570 	if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
11571 		netdev_set_gro_flush_timeout(dev, 20000);
11572 		netdev_set_defer_hard_irqs(dev, 1);
11573 	}
11574 }
11575 EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on);
11576 
11577 /**
11578  * alloc_netdev_mqs - allocate network device
11579  * @sizeof_priv: size of private data to allocate space for
11580  * @name: device name format string
11581  * @name_assign_type: origin of device name
11582  * @setup: callback to initialize device
11583  * @txqs: the number of TX subqueues to allocate
11584  * @rxqs: the number of RX subqueues to allocate
11585  *
11586  * Allocates a struct net_device with private data area for driver use
11587  * and performs basic initialization.  Also allocates subqueue structs
11588  * for each queue on the device.
11589  */
11590 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
11591 		unsigned char name_assign_type,
11592 		void (*setup)(struct net_device *),
11593 		unsigned int txqs, unsigned int rxqs)
11594 {
11595 	struct net_device *dev;
11596 	size_t napi_config_sz;
11597 	unsigned int maxqs;
11598 
11599 	BUG_ON(strlen(name) >= sizeof(dev->name));
11600 
11601 	if (txqs < 1) {
11602 		pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
11603 		return NULL;
11604 	}
11605 
11606 	if (rxqs < 1) {
11607 		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
11608 		return NULL;
11609 	}
11610 
11611 	maxqs = max(txqs, rxqs);
11612 
11613 	dev = kvzalloc(struct_size(dev, priv, sizeof_priv),
11614 		       GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
11615 	if (!dev)
11616 		return NULL;
11617 
11618 	dev->priv_len = sizeof_priv;
11619 
11620 	ref_tracker_dir_init(&dev->refcnt_tracker, 128, name);
11621 #ifdef CONFIG_PCPU_DEV_REFCNT
11622 	dev->pcpu_refcnt = alloc_percpu(int);
11623 	if (!dev->pcpu_refcnt)
11624 		goto free_dev;
11625 	__dev_hold(dev);
11626 #else
11627 	refcount_set(&dev->dev_refcnt, 1);
11628 #endif
11629 
11630 	if (dev_addr_init(dev))
11631 		goto free_pcpu;
11632 
11633 	dev_mc_init(dev);
11634 	dev_uc_init(dev);
11635 
11636 	dev_net_set(dev, &init_net);
11637 
11638 	dev->gso_max_size = GSO_LEGACY_MAX_SIZE;
11639 	dev->xdp_zc_max_segs = 1;
11640 	dev->gso_max_segs = GSO_MAX_SEGS;
11641 	dev->gro_max_size = GRO_LEGACY_MAX_SIZE;
11642 	dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE;
11643 	dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE;
11644 	dev->tso_max_size = TSO_LEGACY_MAX_SIZE;
11645 	dev->tso_max_segs = TSO_MAX_SEGS;
11646 	dev->upper_level = 1;
11647 	dev->lower_level = 1;
11648 #ifdef CONFIG_LOCKDEP
11649 	dev->nested_level = 0;
11650 	INIT_LIST_HEAD(&dev->unlink_list);
11651 #endif
11652 
11653 	INIT_LIST_HEAD(&dev->napi_list);
11654 	INIT_LIST_HEAD(&dev->unreg_list);
11655 	INIT_LIST_HEAD(&dev->close_list);
11656 	INIT_LIST_HEAD(&dev->link_watch_list);
11657 	INIT_LIST_HEAD(&dev->adj_list.upper);
11658 	INIT_LIST_HEAD(&dev->adj_list.lower);
11659 	INIT_LIST_HEAD(&dev->ptype_all);
11660 	INIT_LIST_HEAD(&dev->ptype_specific);
11661 	INIT_LIST_HEAD(&dev->net_notifier_list);
11662 #ifdef CONFIG_NET_SCHED
11663 	hash_init(dev->qdisc_hash);
11664 #endif
11665 
11666 	mutex_init(&dev->lock);
11667 
11668 	dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
11669 	setup(dev);
11670 
11671 	if (!dev->tx_queue_len) {
11672 		dev->priv_flags |= IFF_NO_QUEUE;
11673 		dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
11674 	}
11675 
11676 	dev->num_tx_queues = txqs;
11677 	dev->real_num_tx_queues = txqs;
11678 	if (netif_alloc_netdev_queues(dev))
11679 		goto free_all;
11680 
11681 	dev->num_rx_queues = rxqs;
11682 	dev->real_num_rx_queues = rxqs;
11683 	if (netif_alloc_rx_queues(dev))
11684 		goto free_all;
11685 	dev->ethtool = kzalloc(sizeof(*dev->ethtool), GFP_KERNEL_ACCOUNT);
11686 	if (!dev->ethtool)
11687 		goto free_all;
11688 
11689 	dev->cfg = kzalloc(sizeof(*dev->cfg), GFP_KERNEL_ACCOUNT);
11690 	if (!dev->cfg)
11691 		goto free_all;
11692 	dev->cfg_pending = dev->cfg;
11693 
11694 	napi_config_sz = array_size(maxqs, sizeof(*dev->napi_config));
11695 	dev->napi_config = kvzalloc(napi_config_sz, GFP_KERNEL_ACCOUNT);
11696 	if (!dev->napi_config)
11697 		goto free_all;
11698 
11699 	strscpy(dev->name, name);
11700 	dev->name_assign_type = name_assign_type;
11701 	dev->group = INIT_NETDEV_GROUP;
11702 	if (!dev->ethtool_ops)
11703 		dev->ethtool_ops = &default_ethtool_ops;
11704 
11705 	nf_hook_netdev_init(dev);
11706 
11707 	return dev;
11708 
11709 free_all:
11710 	free_netdev(dev);
11711 	return NULL;
11712 
11713 free_pcpu:
11714 #ifdef CONFIG_PCPU_DEV_REFCNT
11715 	free_percpu(dev->pcpu_refcnt);
11716 free_dev:
11717 #endif
11718 	kvfree(dev);
11719 	return NULL;
11720 }
11721 EXPORT_SYMBOL(alloc_netdev_mqs);
11722 
11723 static void netdev_napi_exit(struct net_device *dev)
11724 {
11725 	if (!list_empty(&dev->napi_list)) {
11726 		struct napi_struct *p, *n;
11727 
11728 		netdev_lock(dev);
11729 		list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
11730 			__netif_napi_del_locked(p);
11731 		netdev_unlock(dev);
11732 
11733 		synchronize_net();
11734 	}
11735 
11736 	kvfree(dev->napi_config);
11737 }
11738 
11739 /**
11740  * free_netdev - free network device
11741  * @dev: device
11742  *
11743  * This function does the last stage of destroying an allocated device
11744  * interface. The reference to the device object is released. If this
11745  * is the last reference then it will be freed.Must be called in process
11746  * context.
11747  */
11748 void free_netdev(struct net_device *dev)
11749 {
11750 	might_sleep();
11751 
11752 	/* When called immediately after register_netdevice() failed the unwind
11753 	 * handling may still be dismantling the device. Handle that case by
11754 	 * deferring the free.
11755 	 */
11756 	if (dev->reg_state == NETREG_UNREGISTERING) {
11757 		ASSERT_RTNL();
11758 		dev->needs_free_netdev = true;
11759 		return;
11760 	}
11761 
11762 	WARN_ON(dev->cfg != dev->cfg_pending);
11763 	kfree(dev->cfg);
11764 	kfree(dev->ethtool);
11765 	netif_free_tx_queues(dev);
11766 	netif_free_rx_queues(dev);
11767 
11768 	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
11769 
11770 	/* Flush device addresses */
11771 	dev_addr_flush(dev);
11772 
11773 	netdev_napi_exit(dev);
11774 
11775 	netif_del_cpu_rmap(dev);
11776 
11777 	ref_tracker_dir_exit(&dev->refcnt_tracker);
11778 #ifdef CONFIG_PCPU_DEV_REFCNT
11779 	free_percpu(dev->pcpu_refcnt);
11780 	dev->pcpu_refcnt = NULL;
11781 #endif
11782 	free_percpu(dev->core_stats);
11783 	dev->core_stats = NULL;
11784 	free_percpu(dev->xdp_bulkq);
11785 	dev->xdp_bulkq = NULL;
11786 
11787 	netdev_free_phy_link_topology(dev);
11788 
11789 	mutex_destroy(&dev->lock);
11790 
11791 	/*  Compatibility with error handling in drivers */
11792 	if (dev->reg_state == NETREG_UNINITIALIZED ||
11793 	    dev->reg_state == NETREG_DUMMY) {
11794 		kvfree(dev);
11795 		return;
11796 	}
11797 
11798 	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
11799 	WRITE_ONCE(dev->reg_state, NETREG_RELEASED);
11800 
11801 	/* will free via device release */
11802 	put_device(&dev->dev);
11803 }
11804 EXPORT_SYMBOL(free_netdev);
11805 
11806 /**
11807  * alloc_netdev_dummy - Allocate and initialize a dummy net device.
11808  * @sizeof_priv: size of private data to allocate space for
11809  *
11810  * Return: the allocated net_device on success, NULL otherwise
11811  */
11812 struct net_device *alloc_netdev_dummy(int sizeof_priv)
11813 {
11814 	return alloc_netdev(sizeof_priv, "dummy#", NET_NAME_UNKNOWN,
11815 			    init_dummy_netdev);
11816 }
11817 EXPORT_SYMBOL_GPL(alloc_netdev_dummy);
11818 
11819 /**
11820  *	synchronize_net -  Synchronize with packet receive processing
11821  *
11822  *	Wait for packets currently being received to be done.
11823  *	Does not block later packets from starting.
11824  */
11825 void synchronize_net(void)
11826 {
11827 	might_sleep();
11828 	if (from_cleanup_net() || rtnl_is_locked())
11829 		synchronize_rcu_expedited();
11830 	else
11831 		synchronize_rcu();
11832 }
11833 EXPORT_SYMBOL(synchronize_net);
11834 
11835 static void netdev_rss_contexts_free(struct net_device *dev)
11836 {
11837 	struct ethtool_rxfh_context *ctx;
11838 	unsigned long context;
11839 
11840 	mutex_lock(&dev->ethtool->rss_lock);
11841 	xa_for_each(&dev->ethtool->rss_ctx, context, ctx) {
11842 		struct ethtool_rxfh_param rxfh;
11843 
11844 		rxfh.indir = ethtool_rxfh_context_indir(ctx);
11845 		rxfh.key = ethtool_rxfh_context_key(ctx);
11846 		rxfh.hfunc = ctx->hfunc;
11847 		rxfh.input_xfrm = ctx->input_xfrm;
11848 		rxfh.rss_context = context;
11849 		rxfh.rss_delete = true;
11850 
11851 		xa_erase(&dev->ethtool->rss_ctx, context);
11852 		if (dev->ethtool_ops->create_rxfh_context)
11853 			dev->ethtool_ops->remove_rxfh_context(dev, ctx,
11854 							      context, NULL);
11855 		else
11856 			dev->ethtool_ops->set_rxfh(dev, &rxfh, NULL);
11857 		kfree(ctx);
11858 	}
11859 	xa_destroy(&dev->ethtool->rss_ctx);
11860 	mutex_unlock(&dev->ethtool->rss_lock);
11861 }
11862 
11863 /**
11864  *	unregister_netdevice_queue - remove device from the kernel
11865  *	@dev: device
11866  *	@head: list
11867  *
11868  *	This function shuts down a device interface and removes it
11869  *	from the kernel tables.
11870  *	If head not NULL, device is queued to be unregistered later.
11871  *
11872  *	Callers must hold the rtnl semaphore.  You may want
11873  *	unregister_netdev() instead of this.
11874  */
11875 
11876 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
11877 {
11878 	ASSERT_RTNL();
11879 
11880 	if (head) {
11881 		list_move_tail(&dev->unreg_list, head);
11882 	} else {
11883 		LIST_HEAD(single);
11884 
11885 		list_add(&dev->unreg_list, &single);
11886 		unregister_netdevice_many(&single);
11887 	}
11888 }
11889 EXPORT_SYMBOL(unregister_netdevice_queue);
11890 
11891 static void dev_memory_provider_uninstall(struct net_device *dev)
11892 {
11893 	unsigned int i;
11894 
11895 	for (i = 0; i < dev->real_num_rx_queues; i++) {
11896 		struct netdev_rx_queue *rxq = &dev->_rx[i];
11897 		struct pp_memory_provider_params *p = &rxq->mp_params;
11898 
11899 		if (p->mp_ops && p->mp_ops->uninstall)
11900 			p->mp_ops->uninstall(rxq->mp_params.mp_priv, rxq);
11901 	}
11902 }
11903 
11904 void unregister_netdevice_many_notify(struct list_head *head,
11905 				      u32 portid, const struct nlmsghdr *nlh)
11906 {
11907 	struct net_device *dev, *tmp;
11908 	LIST_HEAD(close_head);
11909 	int cnt = 0;
11910 
11911 	BUG_ON(dev_boot_phase);
11912 	ASSERT_RTNL();
11913 
11914 	if (list_empty(head))
11915 		return;
11916 
11917 	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
11918 		/* Some devices call without registering
11919 		 * for initialization unwind. Remove those
11920 		 * devices and proceed with the remaining.
11921 		 */
11922 		if (dev->reg_state == NETREG_UNINITIALIZED) {
11923 			pr_debug("unregister_netdevice: device %s/%p never was registered\n",
11924 				 dev->name, dev);
11925 
11926 			WARN_ON(1);
11927 			list_del(&dev->unreg_list);
11928 			continue;
11929 		}
11930 		dev->dismantle = true;
11931 		BUG_ON(dev->reg_state != NETREG_REGISTERED);
11932 	}
11933 
11934 	/* If device is running, close it first. */
11935 	list_for_each_entry(dev, head, unreg_list) {
11936 		list_add_tail(&dev->close_list, &close_head);
11937 		netdev_lock_ops(dev);
11938 	}
11939 	dev_close_many(&close_head, true);
11940 
11941 	list_for_each_entry(dev, head, unreg_list) {
11942 		netdev_unlock_ops(dev);
11943 		/* And unlink it from device chain. */
11944 		unlist_netdevice(dev);
11945 		netdev_lock(dev);
11946 		WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERING);
11947 		netdev_unlock(dev);
11948 	}
11949 	flush_all_backlogs();
11950 
11951 	synchronize_net();
11952 
11953 	list_for_each_entry(dev, head, unreg_list) {
11954 		struct sk_buff *skb = NULL;
11955 
11956 		/* Shutdown queueing discipline. */
11957 		dev_shutdown(dev);
11958 		dev_tcx_uninstall(dev);
11959 		netdev_lock_ops(dev);
11960 		dev_xdp_uninstall(dev);
11961 		netdev_unlock_ops(dev);
11962 		bpf_dev_bound_netdev_unregister(dev);
11963 		dev_memory_provider_uninstall(dev);
11964 
11965 		netdev_offload_xstats_disable_all(dev);
11966 
11967 		/* Notify protocols, that we are about to destroy
11968 		 * this device. They should clean all the things.
11969 		 */
11970 		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11971 
11972 		if (!dev->rtnl_link_ops ||
11973 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
11974 			skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
11975 						     GFP_KERNEL, NULL, 0,
11976 						     portid, nlh);
11977 
11978 		/*
11979 		 *	Flush the unicast and multicast chains
11980 		 */
11981 		dev_uc_flush(dev);
11982 		dev_mc_flush(dev);
11983 
11984 		netdev_name_node_alt_flush(dev);
11985 		netdev_name_node_free(dev->name_node);
11986 
11987 		netdev_rss_contexts_free(dev);
11988 
11989 		call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
11990 
11991 		if (dev->netdev_ops->ndo_uninit)
11992 			dev->netdev_ops->ndo_uninit(dev);
11993 
11994 		mutex_destroy(&dev->ethtool->rss_lock);
11995 
11996 		net_shaper_flush_netdev(dev);
11997 
11998 		if (skb)
11999 			rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh);
12000 
12001 		/* Notifier chain MUST detach us all upper devices. */
12002 		WARN_ON(netdev_has_any_upper_dev(dev));
12003 		WARN_ON(netdev_has_any_lower_dev(dev));
12004 
12005 		/* Remove entries from kobject tree */
12006 		netdev_unregister_kobject(dev);
12007 #ifdef CONFIG_XPS
12008 		/* Remove XPS queueing entries */
12009 		netif_reset_xps_queues_gt(dev, 0);
12010 #endif
12011 	}
12012 
12013 	synchronize_net();
12014 
12015 	list_for_each_entry(dev, head, unreg_list) {
12016 		netdev_put(dev, &dev->dev_registered_tracker);
12017 		net_set_todo(dev);
12018 		cnt++;
12019 	}
12020 	atomic_add(cnt, &dev_unreg_count);
12021 
12022 	list_del(head);
12023 }
12024 
12025 /**
12026  *	unregister_netdevice_many - unregister many devices
12027  *	@head: list of devices
12028  *
12029  *  Note: As most callers use a stack allocated list_head,
12030  *  we force a list_del() to make sure stack won't be corrupted later.
12031  */
12032 void unregister_netdevice_many(struct list_head *head)
12033 {
12034 	unregister_netdevice_many_notify(head, 0, NULL);
12035 }
12036 EXPORT_SYMBOL(unregister_netdevice_many);
12037 
12038 /**
12039  *	unregister_netdev - remove device from the kernel
12040  *	@dev: device
12041  *
12042  *	This function shuts down a device interface and removes it
12043  *	from the kernel tables.
12044  *
12045  *	This is just a wrapper for unregister_netdevice that takes
12046  *	the rtnl semaphore.  In general you want to use this and not
12047  *	unregister_netdevice.
12048  */
12049 void unregister_netdev(struct net_device *dev)
12050 {
12051 	rtnl_net_dev_lock(dev);
12052 	unregister_netdevice(dev);
12053 	rtnl_net_dev_unlock(dev);
12054 }
12055 EXPORT_SYMBOL(unregister_netdev);
12056 
12057 int netif_change_net_namespace(struct net_device *dev, struct net *net,
12058 			       const char *pat, int new_ifindex,
12059 			       struct netlink_ext_ack *extack)
12060 {
12061 	struct netdev_name_node *name_node;
12062 	struct net *net_old = dev_net(dev);
12063 	char new_name[IFNAMSIZ] = {};
12064 	int err, new_nsid;
12065 
12066 	ASSERT_RTNL();
12067 
12068 	/* Don't allow namespace local devices to be moved. */
12069 	err = -EINVAL;
12070 	if (dev->netns_immutable) {
12071 		NL_SET_ERR_MSG(extack, "The interface netns is immutable");
12072 		goto out;
12073 	}
12074 
12075 	/* Ensure the device has been registered */
12076 	if (dev->reg_state != NETREG_REGISTERED) {
12077 		NL_SET_ERR_MSG(extack, "The interface isn't registered");
12078 		goto out;
12079 	}
12080 
12081 	/* Get out if there is nothing todo */
12082 	err = 0;
12083 	if (net_eq(net_old, net))
12084 		goto out;
12085 
12086 	/* Pick the destination device name, and ensure
12087 	 * we can use it in the destination network namespace.
12088 	 */
12089 	err = -EEXIST;
12090 	if (netdev_name_in_use(net, dev->name)) {
12091 		/* We get here if we can't use the current device name */
12092 		if (!pat) {
12093 			NL_SET_ERR_MSG(extack,
12094 				       "An interface with the same name exists in the target netns");
12095 			goto out;
12096 		}
12097 		err = dev_prep_valid_name(net, dev, pat, new_name, EEXIST);
12098 		if (err < 0) {
12099 			NL_SET_ERR_MSG_FMT(extack,
12100 					   "Unable to use '%s' for the new interface name in the target netns",
12101 					   pat);
12102 			goto out;
12103 		}
12104 	}
12105 	/* Check that none of the altnames conflicts. */
12106 	err = -EEXIST;
12107 	netdev_for_each_altname(dev, name_node) {
12108 		if (netdev_name_in_use(net, name_node->name)) {
12109 			NL_SET_ERR_MSG_FMT(extack,
12110 					   "An interface with the altname %s exists in the target netns",
12111 					   name_node->name);
12112 			goto out;
12113 		}
12114 	}
12115 
12116 	/* Check that new_ifindex isn't used yet. */
12117 	if (new_ifindex) {
12118 		err = dev_index_reserve(net, new_ifindex);
12119 		if (err < 0) {
12120 			NL_SET_ERR_MSG_FMT(extack,
12121 					   "The ifindex %d is not available in the target netns",
12122 					   new_ifindex);
12123 			goto out;
12124 		}
12125 	} else {
12126 		/* If there is an ifindex conflict assign a new one */
12127 		err = dev_index_reserve(net, dev->ifindex);
12128 		if (err == -EBUSY)
12129 			err = dev_index_reserve(net, 0);
12130 		if (err < 0) {
12131 			NL_SET_ERR_MSG(extack,
12132 				       "Unable to allocate a new ifindex in the target netns");
12133 			goto out;
12134 		}
12135 		new_ifindex = err;
12136 	}
12137 
12138 	/*
12139 	 * And now a mini version of register_netdevice unregister_netdevice.
12140 	 */
12141 
12142 	/* If device is running close it first. */
12143 	netif_close(dev);
12144 
12145 	/* And unlink it from device chain */
12146 	unlist_netdevice(dev);
12147 
12148 	synchronize_net();
12149 
12150 	/* Shutdown queueing discipline. */
12151 	dev_shutdown(dev);
12152 
12153 	/* Notify protocols, that we are about to destroy
12154 	 * this device. They should clean all the things.
12155 	 *
12156 	 * Note that dev->reg_state stays at NETREG_REGISTERED.
12157 	 * This is wanted because this way 8021q and macvlan know
12158 	 * the device is just moving and can keep their slaves up.
12159 	 */
12160 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
12161 	rcu_barrier();
12162 
12163 	new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
12164 
12165 	rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
12166 			    new_ifindex);
12167 
12168 	/*
12169 	 *	Flush the unicast and multicast chains
12170 	 */
12171 	dev_uc_flush(dev);
12172 	dev_mc_flush(dev);
12173 
12174 	/* Send a netdev-removed uevent to the old namespace */
12175 	kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
12176 	netdev_adjacent_del_links(dev);
12177 
12178 	/* Move per-net netdevice notifiers that are following the netdevice */
12179 	move_netdevice_notifiers_dev_net(dev, net);
12180 
12181 	/* Actually switch the network namespace */
12182 	dev_net_set(dev, net);
12183 	dev->ifindex = new_ifindex;
12184 
12185 	if (new_name[0]) {
12186 		/* Rename the netdev to prepared name */
12187 		write_seqlock_bh(&netdev_rename_lock);
12188 		strscpy(dev->name, new_name, IFNAMSIZ);
12189 		write_sequnlock_bh(&netdev_rename_lock);
12190 	}
12191 
12192 	/* Fixup kobjects */
12193 	dev_set_uevent_suppress(&dev->dev, 1);
12194 	err = device_rename(&dev->dev, dev->name);
12195 	dev_set_uevent_suppress(&dev->dev, 0);
12196 	WARN_ON(err);
12197 
12198 	/* Send a netdev-add uevent to the new namespace */
12199 	kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
12200 	netdev_adjacent_add_links(dev);
12201 
12202 	/* Adapt owner in case owning user namespace of target network
12203 	 * namespace is different from the original one.
12204 	 */
12205 	err = netdev_change_owner(dev, net_old, net);
12206 	WARN_ON(err);
12207 
12208 	/* Add the device back in the hashes */
12209 	list_netdevice(dev);
12210 
12211 	/* Notify protocols, that a new device appeared. */
12212 	call_netdevice_notifiers(NETDEV_REGISTER, dev);
12213 
12214 	/*
12215 	 *	Prevent userspace races by waiting until the network
12216 	 *	device is fully setup before sending notifications.
12217 	 */
12218 	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
12219 
12220 	synchronize_net();
12221 	err = 0;
12222 out:
12223 	return err;
12224 }
12225 
12226 static int dev_cpu_dead(unsigned int oldcpu)
12227 {
12228 	struct sk_buff **list_skb;
12229 	struct sk_buff *skb;
12230 	unsigned int cpu;
12231 	struct softnet_data *sd, *oldsd, *remsd = NULL;
12232 
12233 	local_irq_disable();
12234 	cpu = smp_processor_id();
12235 	sd = &per_cpu(softnet_data, cpu);
12236 	oldsd = &per_cpu(softnet_data, oldcpu);
12237 
12238 	/* Find end of our completion_queue. */
12239 	list_skb = &sd->completion_queue;
12240 	while (*list_skb)
12241 		list_skb = &(*list_skb)->next;
12242 	/* Append completion queue from offline CPU. */
12243 	*list_skb = oldsd->completion_queue;
12244 	oldsd->completion_queue = NULL;
12245 
12246 	/* Append output queue from offline CPU. */
12247 	if (oldsd->output_queue) {
12248 		*sd->output_queue_tailp = oldsd->output_queue;
12249 		sd->output_queue_tailp = oldsd->output_queue_tailp;
12250 		oldsd->output_queue = NULL;
12251 		oldsd->output_queue_tailp = &oldsd->output_queue;
12252 	}
12253 	/* Append NAPI poll list from offline CPU, with one exception :
12254 	 * process_backlog() must be called by cpu owning percpu backlog.
12255 	 * We properly handle process_queue & input_pkt_queue later.
12256 	 */
12257 	while (!list_empty(&oldsd->poll_list)) {
12258 		struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
12259 							    struct napi_struct,
12260 							    poll_list);
12261 
12262 		list_del_init(&napi->poll_list);
12263 		if (napi->poll == process_backlog)
12264 			napi->state &= NAPIF_STATE_THREADED;
12265 		else
12266 			____napi_schedule(sd, napi);
12267 	}
12268 
12269 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
12270 	local_irq_enable();
12271 
12272 	if (!use_backlog_threads()) {
12273 #ifdef CONFIG_RPS
12274 		remsd = oldsd->rps_ipi_list;
12275 		oldsd->rps_ipi_list = NULL;
12276 #endif
12277 		/* send out pending IPI's on offline CPU */
12278 		net_rps_send_ipi(remsd);
12279 	}
12280 
12281 	/* Process offline CPU's input_pkt_queue */
12282 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
12283 		netif_rx(skb);
12284 		rps_input_queue_head_incr(oldsd);
12285 	}
12286 	while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
12287 		netif_rx(skb);
12288 		rps_input_queue_head_incr(oldsd);
12289 	}
12290 
12291 	return 0;
12292 }
12293 
12294 /**
12295  *	netdev_increment_features - increment feature set by one
12296  *	@all: current feature set
12297  *	@one: new feature set
12298  *	@mask: mask feature set
12299  *
12300  *	Computes a new feature set after adding a device with feature set
12301  *	@one to the master device with current feature set @all.  Will not
12302  *	enable anything that is off in @mask. Returns the new feature set.
12303  */
12304 netdev_features_t netdev_increment_features(netdev_features_t all,
12305 	netdev_features_t one, netdev_features_t mask)
12306 {
12307 	if (mask & NETIF_F_HW_CSUM)
12308 		mask |= NETIF_F_CSUM_MASK;
12309 	mask |= NETIF_F_VLAN_CHALLENGED;
12310 
12311 	all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
12312 	all &= one | ~NETIF_F_ALL_FOR_ALL;
12313 
12314 	/* If one device supports hw checksumming, set for all. */
12315 	if (all & NETIF_F_HW_CSUM)
12316 		all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
12317 
12318 	return all;
12319 }
12320 EXPORT_SYMBOL(netdev_increment_features);
12321 
12322 static struct hlist_head * __net_init netdev_create_hash(void)
12323 {
12324 	int i;
12325 	struct hlist_head *hash;
12326 
12327 	hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
12328 	if (hash != NULL)
12329 		for (i = 0; i < NETDEV_HASHENTRIES; i++)
12330 			INIT_HLIST_HEAD(&hash[i]);
12331 
12332 	return hash;
12333 }
12334 
12335 /* Initialize per network namespace state */
12336 static int __net_init netdev_init(struct net *net)
12337 {
12338 	BUILD_BUG_ON(GRO_HASH_BUCKETS >
12339 		     BITS_PER_BYTE * sizeof_field(struct gro_node, bitmask));
12340 
12341 	INIT_LIST_HEAD(&net->dev_base_head);
12342 
12343 	net->dev_name_head = netdev_create_hash();
12344 	if (net->dev_name_head == NULL)
12345 		goto err_name;
12346 
12347 	net->dev_index_head = netdev_create_hash();
12348 	if (net->dev_index_head == NULL)
12349 		goto err_idx;
12350 
12351 	xa_init_flags(&net->dev_by_index, XA_FLAGS_ALLOC1);
12352 
12353 	RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
12354 
12355 	return 0;
12356 
12357 err_idx:
12358 	kfree(net->dev_name_head);
12359 err_name:
12360 	return -ENOMEM;
12361 }
12362 
12363 /**
12364  *	netdev_drivername - network driver for the device
12365  *	@dev: network device
12366  *
12367  *	Determine network driver for device.
12368  */
12369 const char *netdev_drivername(const struct net_device *dev)
12370 {
12371 	const struct device_driver *driver;
12372 	const struct device *parent;
12373 	const char *empty = "";
12374 
12375 	parent = dev->dev.parent;
12376 	if (!parent)
12377 		return empty;
12378 
12379 	driver = parent->driver;
12380 	if (driver && driver->name)
12381 		return driver->name;
12382 	return empty;
12383 }
12384 
12385 static void __netdev_printk(const char *level, const struct net_device *dev,
12386 			    struct va_format *vaf)
12387 {
12388 	if (dev && dev->dev.parent) {
12389 		dev_printk_emit(level[1] - '0',
12390 				dev->dev.parent,
12391 				"%s %s %s%s: %pV",
12392 				dev_driver_string(dev->dev.parent),
12393 				dev_name(dev->dev.parent),
12394 				netdev_name(dev), netdev_reg_state(dev),
12395 				vaf);
12396 	} else if (dev) {
12397 		printk("%s%s%s: %pV",
12398 		       level, netdev_name(dev), netdev_reg_state(dev), vaf);
12399 	} else {
12400 		printk("%s(NULL net_device): %pV", level, vaf);
12401 	}
12402 }
12403 
12404 void netdev_printk(const char *level, const struct net_device *dev,
12405 		   const char *format, ...)
12406 {
12407 	struct va_format vaf;
12408 	va_list args;
12409 
12410 	va_start(args, format);
12411 
12412 	vaf.fmt = format;
12413 	vaf.va = &args;
12414 
12415 	__netdev_printk(level, dev, &vaf);
12416 
12417 	va_end(args);
12418 }
12419 EXPORT_SYMBOL(netdev_printk);
12420 
12421 #define define_netdev_printk_level(func, level)			\
12422 void func(const struct net_device *dev, const char *fmt, ...)	\
12423 {								\
12424 	struct va_format vaf;					\
12425 	va_list args;						\
12426 								\
12427 	va_start(args, fmt);					\
12428 								\
12429 	vaf.fmt = fmt;						\
12430 	vaf.va = &args;						\
12431 								\
12432 	__netdev_printk(level, dev, &vaf);			\
12433 								\
12434 	va_end(args);						\
12435 }								\
12436 EXPORT_SYMBOL(func);
12437 
12438 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
12439 define_netdev_printk_level(netdev_alert, KERN_ALERT);
12440 define_netdev_printk_level(netdev_crit, KERN_CRIT);
12441 define_netdev_printk_level(netdev_err, KERN_ERR);
12442 define_netdev_printk_level(netdev_warn, KERN_WARNING);
12443 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
12444 define_netdev_printk_level(netdev_info, KERN_INFO);
12445 
12446 static void __net_exit netdev_exit(struct net *net)
12447 {
12448 	kfree(net->dev_name_head);
12449 	kfree(net->dev_index_head);
12450 	xa_destroy(&net->dev_by_index);
12451 	if (net != &init_net)
12452 		WARN_ON_ONCE(!list_empty(&net->dev_base_head));
12453 }
12454 
12455 static struct pernet_operations __net_initdata netdev_net_ops = {
12456 	.init = netdev_init,
12457 	.exit = netdev_exit,
12458 };
12459 
12460 static void __net_exit default_device_exit_net(struct net *net)
12461 {
12462 	struct netdev_name_node *name_node, *tmp;
12463 	struct net_device *dev, *aux;
12464 	/*
12465 	 * Push all migratable network devices back to the
12466 	 * initial network namespace
12467 	 */
12468 	ASSERT_RTNL();
12469 	for_each_netdev_safe(net, dev, aux) {
12470 		int err;
12471 		char fb_name[IFNAMSIZ];
12472 
12473 		/* Ignore unmoveable devices (i.e. loopback) */
12474 		if (dev->netns_immutable)
12475 			continue;
12476 
12477 		/* Leave virtual devices for the generic cleanup */
12478 		if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
12479 			continue;
12480 
12481 		/* Push remaining network devices to init_net */
12482 		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
12483 		if (netdev_name_in_use(&init_net, fb_name))
12484 			snprintf(fb_name, IFNAMSIZ, "dev%%d");
12485 
12486 		netdev_for_each_altname_safe(dev, name_node, tmp)
12487 			if (netdev_name_in_use(&init_net, name_node->name))
12488 				__netdev_name_node_alt_destroy(name_node);
12489 
12490 		err = dev_change_net_namespace(dev, &init_net, fb_name);
12491 		if (err) {
12492 			pr_emerg("%s: failed to move %s to init_net: %d\n",
12493 				 __func__, dev->name, err);
12494 			BUG();
12495 		}
12496 	}
12497 }
12498 
12499 static void __net_exit default_device_exit_batch(struct list_head *net_list)
12500 {
12501 	/* At exit all network devices most be removed from a network
12502 	 * namespace.  Do this in the reverse order of registration.
12503 	 * Do this across as many network namespaces as possible to
12504 	 * improve batching efficiency.
12505 	 */
12506 	struct net_device *dev;
12507 	struct net *net;
12508 	LIST_HEAD(dev_kill_list);
12509 
12510 	rtnl_lock();
12511 	list_for_each_entry(net, net_list, exit_list) {
12512 		default_device_exit_net(net);
12513 		cond_resched();
12514 	}
12515 
12516 	list_for_each_entry(net, net_list, exit_list) {
12517 		for_each_netdev_reverse(net, dev) {
12518 			if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
12519 				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
12520 			else
12521 				unregister_netdevice_queue(dev, &dev_kill_list);
12522 		}
12523 	}
12524 	unregister_netdevice_many(&dev_kill_list);
12525 	rtnl_unlock();
12526 }
12527 
12528 static struct pernet_operations __net_initdata default_device_ops = {
12529 	.exit_batch = default_device_exit_batch,
12530 };
12531 
12532 static void __init net_dev_struct_check(void)
12533 {
12534 	/* TX read-mostly hotpath */
12535 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, priv_flags_fast);
12536 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, netdev_ops);
12537 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, header_ops);
12538 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, _tx);
12539 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, real_num_tx_queues);
12540 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_size);
12541 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_ipv4_max_size);
12542 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_segs);
12543 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_partial_features);
12544 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, num_tc);
12545 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, mtu);
12546 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, needed_headroom);
12547 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tc_to_txq);
12548 #ifdef CONFIG_XPS
12549 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, xps_maps);
12550 #endif
12551 #ifdef CONFIG_NETFILTER_EGRESS
12552 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, nf_hooks_egress);
12553 #endif
12554 #ifdef CONFIG_NET_XGRESS
12555 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tcx_egress);
12556 #endif
12557 	CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_tx, 160);
12558 
12559 	/* TXRX read-mostly hotpath */
12560 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, lstats);
12561 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, state);
12562 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, flags);
12563 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, hard_header_len);
12564 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, features);
12565 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, ip6_ptr);
12566 	CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_txrx, 46);
12567 
12568 	/* RX read-mostly hotpath */
12569 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ptype_specific);
12570 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ifindex);
12571 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, real_num_rx_queues);
12572 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, _rx);
12573 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_max_size);
12574 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_ipv4_max_size);
12575 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler);
12576 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler_data);
12577 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, nd_net);
12578 #ifdef CONFIG_NETPOLL
12579 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, npinfo);
12580 #endif
12581 #ifdef CONFIG_NET_XGRESS
12582 	CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, tcx_ingress);
12583 #endif
12584 	CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_rx, 92);
12585 }
12586 
12587 /*
12588  *	Initialize the DEV module. At boot time this walks the device list and
12589  *	unhooks any devices that fail to initialise (normally hardware not
12590  *	present) and leaves us with a valid list of present and active devices.
12591  *
12592  */
12593 
12594 /* We allocate 256 pages for each CPU if PAGE_SHIFT is 12 */
12595 #define SYSTEM_PERCPU_PAGE_POOL_SIZE	((1 << 20) / PAGE_SIZE)
12596 
12597 static int net_page_pool_create(int cpuid)
12598 {
12599 #if IS_ENABLED(CONFIG_PAGE_POOL)
12600 	struct page_pool_params page_pool_params = {
12601 		.pool_size = SYSTEM_PERCPU_PAGE_POOL_SIZE,
12602 		.flags = PP_FLAG_SYSTEM_POOL,
12603 		.nid = cpu_to_mem(cpuid),
12604 	};
12605 	struct page_pool *pp_ptr;
12606 	int err;
12607 
12608 	pp_ptr = page_pool_create_percpu(&page_pool_params, cpuid);
12609 	if (IS_ERR(pp_ptr))
12610 		return -ENOMEM;
12611 
12612 	err = xdp_reg_page_pool(pp_ptr);
12613 	if (err) {
12614 		page_pool_destroy(pp_ptr);
12615 		return err;
12616 	}
12617 
12618 	per_cpu(system_page_pool, cpuid) = pp_ptr;
12619 #endif
12620 	return 0;
12621 }
12622 
12623 static int backlog_napi_should_run(unsigned int cpu)
12624 {
12625 	struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
12626 	struct napi_struct *napi = &sd->backlog;
12627 
12628 	return test_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
12629 }
12630 
12631 static void run_backlog_napi(unsigned int cpu)
12632 {
12633 	struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
12634 
12635 	napi_threaded_poll_loop(&sd->backlog);
12636 }
12637 
12638 static void backlog_napi_setup(unsigned int cpu)
12639 {
12640 	struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
12641 	struct napi_struct *napi = &sd->backlog;
12642 
12643 	napi->thread = this_cpu_read(backlog_napi);
12644 	set_bit(NAPI_STATE_THREADED, &napi->state);
12645 }
12646 
12647 static struct smp_hotplug_thread backlog_threads = {
12648 	.store			= &backlog_napi,
12649 	.thread_should_run	= backlog_napi_should_run,
12650 	.thread_fn		= run_backlog_napi,
12651 	.thread_comm		= "backlog_napi/%u",
12652 	.setup			= backlog_napi_setup,
12653 };
12654 
12655 /*
12656  *       This is called single threaded during boot, so no need
12657  *       to take the rtnl semaphore.
12658  */
12659 static int __init net_dev_init(void)
12660 {
12661 	int i, rc = -ENOMEM;
12662 
12663 	BUG_ON(!dev_boot_phase);
12664 
12665 	net_dev_struct_check();
12666 
12667 	if (dev_proc_init())
12668 		goto out;
12669 
12670 	if (netdev_kobject_init())
12671 		goto out;
12672 
12673 	for (i = 0; i < PTYPE_HASH_SIZE; i++)
12674 		INIT_LIST_HEAD(&ptype_base[i]);
12675 
12676 	if (register_pernet_subsys(&netdev_net_ops))
12677 		goto out;
12678 
12679 	/*
12680 	 *	Initialise the packet receive queues.
12681 	 */
12682 
12683 	flush_backlogs_fallback = flush_backlogs_alloc();
12684 	if (!flush_backlogs_fallback)
12685 		goto out;
12686 
12687 	for_each_possible_cpu(i) {
12688 		struct softnet_data *sd = &per_cpu(softnet_data, i);
12689 
12690 		skb_queue_head_init(&sd->input_pkt_queue);
12691 		skb_queue_head_init(&sd->process_queue);
12692 #ifdef CONFIG_XFRM_OFFLOAD
12693 		skb_queue_head_init(&sd->xfrm_backlog);
12694 #endif
12695 		INIT_LIST_HEAD(&sd->poll_list);
12696 		sd->output_queue_tailp = &sd->output_queue;
12697 #ifdef CONFIG_RPS
12698 		INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
12699 		sd->cpu = i;
12700 #endif
12701 		INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
12702 		spin_lock_init(&sd->defer_lock);
12703 
12704 		gro_init(&sd->backlog.gro);
12705 		sd->backlog.poll = process_backlog;
12706 		sd->backlog.weight = weight_p;
12707 		INIT_LIST_HEAD(&sd->backlog.poll_list);
12708 
12709 		if (net_page_pool_create(i))
12710 			goto out;
12711 	}
12712 	if (use_backlog_threads())
12713 		smpboot_register_percpu_thread(&backlog_threads);
12714 
12715 	dev_boot_phase = 0;
12716 
12717 	/* The loopback device is special if any other network devices
12718 	 * is present in a network namespace the loopback device must
12719 	 * be present. Since we now dynamically allocate and free the
12720 	 * loopback device ensure this invariant is maintained by
12721 	 * keeping the loopback device as the first device on the
12722 	 * list of network devices.  Ensuring the loopback devices
12723 	 * is the first device that appears and the last network device
12724 	 * that disappears.
12725 	 */
12726 	if (register_pernet_device(&loopback_net_ops))
12727 		goto out;
12728 
12729 	if (register_pernet_device(&default_device_ops))
12730 		goto out;
12731 
12732 	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
12733 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
12734 
12735 	rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
12736 				       NULL, dev_cpu_dead);
12737 	WARN_ON(rc < 0);
12738 	rc = 0;
12739 
12740 	/* avoid static key IPIs to isolated CPUs */
12741 	if (housekeeping_enabled(HK_TYPE_MISC))
12742 		net_enable_timestamp();
12743 out:
12744 	if (rc < 0) {
12745 		for_each_possible_cpu(i) {
12746 			struct page_pool *pp_ptr;
12747 
12748 			pp_ptr = per_cpu(system_page_pool, i);
12749 			if (!pp_ptr)
12750 				continue;
12751 
12752 			xdp_unreg_page_pool(pp_ptr);
12753 			page_pool_destroy(pp_ptr);
12754 			per_cpu(system_page_pool, i) = NULL;
12755 		}
12756 	}
12757 
12758 	return rc;
12759 }
12760 
12761 subsys_initcall(net_dev_init);
12762