xref: /linux-6.15/net/core/dev.h (revision 8033d2ae)
16264f58cSJakub Kicinski /* SPDX-License-Identifier: GPL-2.0-or-later */
26264f58cSJakub Kicinski #ifndef _NET_CORE_DEV_H
36264f58cSJakub Kicinski #define _NET_CORE_DEV_H
46264f58cSJakub Kicinski 
52628f495SJakub Kicinski #include <linux/cleanup.h>
66264f58cSJakub Kicinski #include <linux/types.h>
7c7d52737SEric Dumazet #include <linux/rwsem.h>
8b9495b56SEric Dumazet #include <linux/netdevice.h>
98ef890dfSJakub Kicinski #include <net/netdev_lock.h>
106264f58cSJakub Kicinski 
116264f58cSJakub Kicinski struct net;
126264f58cSJakub Kicinski struct netlink_ext_ack;
13370ca718SPaolo Abeni struct cpumask;
146264f58cSJakub Kicinski 
156264f58cSJakub Kicinski /* Random bits of netdevice that don't need to be exposed */
166264f58cSJakub Kicinski #define FLOW_LIMIT_HISTORY	(1 << 7)  /* must be ^2 and !overflow buckets */
176264f58cSJakub Kicinski struct sd_flow_limit {
186264f58cSJakub Kicinski 	u64			count;
196264f58cSJakub Kicinski 	unsigned int		num_buckets;
206264f58cSJakub Kicinski 	unsigned int		history_head;
216264f58cSJakub Kicinski 	u16			history[FLOW_LIMIT_HISTORY];
226264f58cSJakub Kicinski 	u8			buckets[];
236264f58cSJakub Kicinski };
246264f58cSJakub Kicinski 
256264f58cSJakub Kicinski extern int netdev_flow_limit_table_len;
266264f58cSJakub Kicinski 
272628f495SJakub Kicinski struct napi_struct *
282628f495SJakub Kicinski netdev_napi_by_id_lock(struct net *net, unsigned int napi_id);
2921520e74SJakub Kicinski struct net_device *dev_get_by_napi_id(unsigned int napi_id);
30d1cacd74SJakub Kicinski 
312628f495SJakub Kicinski struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex);
322628f495SJakub Kicinski struct net_device *__netdev_put_lock(struct net_device *dev);
332628f495SJakub Kicinski struct net_device *
342628f495SJakub Kicinski netdev_xa_find_lock(struct net *net, struct net_device *dev,
352628f495SJakub Kicinski 		    unsigned long *index);
362628f495SJakub Kicinski 
372628f495SJakub Kicinski DEFINE_FREE(netdev_unlock, struct net_device *, if (_T) netdev_unlock(_T));
382628f495SJakub Kicinski 
392628f495SJakub Kicinski #define for_each_netdev_lock_scoped(net, var_name, ifindex)		\
402628f495SJakub Kicinski 	for (struct net_device *var_name __free(netdev_unlock) = NULL;	\
412628f495SJakub Kicinski 	     (var_name = netdev_xa_find_lock(net, var_name, &ifindex)); \
422628f495SJakub Kicinski 	     ifindex++)
432628f495SJakub Kicinski 
446264f58cSJakub Kicinski #ifdef CONFIG_PROC_FS
456264f58cSJakub Kicinski int __init dev_proc_init(void);
466264f58cSJakub Kicinski #else
476264f58cSJakub Kicinski #define dev_proc_init() 0
486264f58cSJakub Kicinski #endif
496264f58cSJakub Kicinski 
506264f58cSJakub Kicinski void linkwatch_init_dev(struct net_device *dev);
516264f58cSJakub Kicinski void linkwatch_run_queue(void);
526264f58cSJakub Kicinski 
536264f58cSJakub Kicinski void dev_addr_flush(struct net_device *dev);
546264f58cSJakub Kicinski int dev_addr_init(struct net_device *dev);
556264f58cSJakub Kicinski void dev_addr_check(struct net_device *dev);
566264f58cSJakub Kicinski 
574b623f9fSPaolo Abeni #if IS_ENABLED(CONFIG_NET_SHAPER)
584b623f9fSPaolo Abeni void net_shaper_flush_netdev(struct net_device *dev);
59ff7d4debSPaolo Abeni void net_shaper_set_real_num_tx_queues(struct net_device *dev,
60ff7d4debSPaolo Abeni 				       unsigned int txq);
614b623f9fSPaolo Abeni #else
net_shaper_flush_netdev(struct net_device * dev)624b623f9fSPaolo Abeni static inline void net_shaper_flush_netdev(struct net_device *dev) {}
net_shaper_set_real_num_tx_queues(struct net_device * dev,unsigned int txq)63ff7d4debSPaolo Abeni static inline void net_shaper_set_real_num_tx_queues(struct net_device *dev,
64ff7d4debSPaolo Abeni 						     unsigned int txq) {}
654b623f9fSPaolo Abeni #endif
664b623f9fSPaolo Abeni 
676264f58cSJakub Kicinski /* sysctls not referred to from outside net/core/ */
686264f58cSJakub Kicinski extern int		netdev_unregister_timeout_secs;
696264f58cSJakub Kicinski extern int		weight_p;
706264f58cSJakub Kicinski extern int		dev_weight_rx_bias;
716264f58cSJakub Kicinski extern int		dev_weight_tx_bias;
726264f58cSJakub Kicinski 
73*8033d2aeSStanislav Fomichev extern struct rw_semaphore dev_addr_sem;
74*8033d2aeSStanislav Fomichev 
756264f58cSJakub Kicinski /* rtnl helpers */
766264f58cSJakub Kicinski extern struct list_head net_todo_list;
776264f58cSJakub Kicinski void netdev_run_todo(void);
786264f58cSJakub Kicinski 
796264f58cSJakub Kicinski /* netdev management, shared between various uAPI entry points */
806264f58cSJakub Kicinski struct netdev_name_node {
816264f58cSJakub Kicinski 	struct hlist_node hlist;
826264f58cSJakub Kicinski 	struct list_head list;
836264f58cSJakub Kicinski 	struct net_device *dev;
846264f58cSJakub Kicinski 	const char *name;
85723de3ebSJakub Kicinski 	struct rcu_head rcu;
866264f58cSJakub Kicinski };
876264f58cSJakub Kicinski 
886264f58cSJakub Kicinski int netdev_get_name(struct net *net, char *name, int ifindex);
897e4d784fSStanislav Fomichev int netif_change_name(struct net_device *dev, const char *newname);
906264f58cSJakub Kicinski int dev_change_name(struct net_device *dev, const char *newname);
916264f58cSJakub Kicinski 
927663d522SJakub Kicinski #define netdev_for_each_altname(dev, namenode)				\
937663d522SJakub Kicinski 	list_for_each_entry((namenode), &(dev)->name_node->list, list)
94d09486a0SJakub Kicinski #define netdev_for_each_altname_safe(dev, namenode, next)		\
95d09486a0SJakub Kicinski 	list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \
96d09486a0SJakub Kicinski 				 list)
977663d522SJakub Kicinski 
986264f58cSJakub Kicinski int netdev_name_node_alt_create(struct net_device *dev, const char *name);
996264f58cSJakub Kicinski int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
1006264f58cSJakub Kicinski 
1016264f58cSJakub Kicinski int dev_validate_mtu(struct net_device *dev, int mtu,
1026264f58cSJakub Kicinski 		     struct netlink_ext_ack *extack);
1037e4d784fSStanislav Fomichev int netif_set_mtu_ext(struct net_device *dev, int new_mtu,
1046264f58cSJakub Kicinski 		      struct netlink_ext_ack *extack);
1056264f58cSJakub Kicinski 
1066264f58cSJakub Kicinski int dev_get_phys_port_id(struct net_device *dev,
1076264f58cSJakub Kicinski 			 struct netdev_phys_item_id *ppid);
1086264f58cSJakub Kicinski int dev_get_phys_port_name(struct net_device *dev,
1096264f58cSJakub Kicinski 			   char *name, size_t len);
1106264f58cSJakub Kicinski 
1117e4d784fSStanislav Fomichev int netif_change_proto_down(struct net_device *dev, bool proto_down);
1126264f58cSJakub Kicinski int dev_change_proto_down(struct net_device *dev, bool proto_down);
1137e4d784fSStanislav Fomichev void netdev_change_proto_down_reason_locked(struct net_device *dev,
1147e4d784fSStanislav Fomichev 					    unsigned long mask, u32 value);
1156264f58cSJakub Kicinski 
1166264f58cSJakub Kicinski typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
1176264f58cSJakub Kicinski int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
1186264f58cSJakub Kicinski 		      int fd, int expected_fd, u32 flags);
1196264f58cSJakub Kicinski 
1207e4d784fSStanislav Fomichev int netif_change_tx_queue_len(struct net_device *dev, unsigned long new_len);
1216264f58cSJakub Kicinski int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len);
1227e4d784fSStanislav Fomichev void netif_set_group(struct net_device *dev, int new_group);
1236264f58cSJakub Kicinski void dev_set_group(struct net_device *dev, int new_group);
1247e4d784fSStanislav Fomichev int netif_change_carrier(struct net_device *dev, bool new_carrier);
1256264f58cSJakub Kicinski int dev_change_carrier(struct net_device *dev, bool new_carrier);
1266264f58cSJakub Kicinski 
1276264f58cSJakub Kicinski void __dev_set_rx_mode(struct net_device *dev);
1286264f58cSJakub Kicinski 
1291d997f10SHangbin Liu void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
1301d997f10SHangbin Liu 			unsigned int gchanges, u32 portid,
1311d997f10SHangbin Liu 			const struct nlmsghdr *nlh);
1321d997f10SHangbin Liu 
13377f4aa9aSHangbin Liu void unregister_netdevice_many_notify(struct list_head *head,
13477f4aa9aSHangbin Liu 				      u32 portid, const struct nlmsghdr *nlh);
13577f4aa9aSHangbin Liu 
netif_set_up(struct net_device * dev,bool value)1365112457fSJakub Kicinski static inline void netif_set_up(struct net_device *dev, bool value)
1375112457fSJakub Kicinski {
1385112457fSJakub Kicinski 	if (value)
1395112457fSJakub Kicinski 		dev->flags |= IFF_UP;
1405112457fSJakub Kicinski 	else
1415112457fSJakub Kicinski 		dev->flags &= ~IFF_UP;
1425112457fSJakub Kicinski 
143d4c22ec6SStanislav Fomichev 	if (!netdev_need_ops_lock(dev))
1445112457fSJakub Kicinski 		netdev_lock(dev);
1455112457fSJakub Kicinski 	dev->up = value;
146d4c22ec6SStanislav Fomichev 	if (!netdev_need_ops_lock(dev))
1475112457fSJakub Kicinski 		netdev_unlock(dev);
1485112457fSJakub Kicinski }
1495112457fSJakub Kicinski 
netif_set_gso_max_size(struct net_device * dev,unsigned int size)150744d49daSJakub Kicinski static inline void netif_set_gso_max_size(struct net_device *dev,
151744d49daSJakub Kicinski 					  unsigned int size)
152744d49daSJakub Kicinski {
153744d49daSJakub Kicinski 	/* dev->gso_max_size is read locklessly from sk_setup_caps() */
154744d49daSJakub Kicinski 	WRITE_ONCE(dev->gso_max_size, size);
1559eefedd5SXin Long 	if (size <= GSO_LEGACY_MAX_SIZE)
1569eefedd5SXin Long 		WRITE_ONCE(dev->gso_ipv4_max_size, size);
157744d49daSJakub Kicinski }
158744d49daSJakub Kicinski 
netif_set_gso_max_segs(struct net_device * dev,unsigned int segs)159744d49daSJakub Kicinski static inline void netif_set_gso_max_segs(struct net_device *dev,
160744d49daSJakub Kicinski 					  unsigned int segs)
161744d49daSJakub Kicinski {
162744d49daSJakub Kicinski 	/* dev->gso_max_segs is read locklessly from sk_setup_caps() */
163744d49daSJakub Kicinski 	WRITE_ONCE(dev->gso_max_segs, segs);
164744d49daSJakub Kicinski }
165744d49daSJakub Kicinski 
netif_set_gro_max_size(struct net_device * dev,unsigned int size)166744d49daSJakub Kicinski static inline void netif_set_gro_max_size(struct net_device *dev,
167744d49daSJakub Kicinski 					  unsigned int size)
168744d49daSJakub Kicinski {
169744d49daSJakub Kicinski 	/* This pairs with the READ_ONCE() in skb_gro_receive() */
170744d49daSJakub Kicinski 	WRITE_ONCE(dev->gro_max_size, size);
1719eefedd5SXin Long 	if (size <= GRO_LEGACY_MAX_SIZE)
1729eefedd5SXin Long 		WRITE_ONCE(dev->gro_ipv4_max_size, size);
1739eefedd5SXin Long }
1749eefedd5SXin Long 
netif_set_gso_ipv4_max_size(struct net_device * dev,unsigned int size)1759eefedd5SXin Long static inline void netif_set_gso_ipv4_max_size(struct net_device *dev,
1769eefedd5SXin Long 					       unsigned int size)
1779eefedd5SXin Long {
1789eefedd5SXin Long 	/* dev->gso_ipv4_max_size is read locklessly from sk_setup_caps() */
1799eefedd5SXin Long 	WRITE_ONCE(dev->gso_ipv4_max_size, size);
1809eefedd5SXin Long }
1819eefedd5SXin Long 
netif_set_gro_ipv4_max_size(struct net_device * dev,unsigned int size)1829eefedd5SXin Long static inline void netif_set_gro_ipv4_max_size(struct net_device *dev,
1839eefedd5SXin Long 					       unsigned int size)
1849eefedd5SXin Long {
1859eefedd5SXin Long 	/* This pairs with the READ_ONCE() in skb_gro_receive() */
1869eefedd5SXin Long 	WRITE_ONCE(dev->gro_ipv4_max_size, size);
187744d49daSJakub Kicinski }
188744d49daSJakub Kicinski 
189f15e3b3dSJoe Damato /**
190f15e3b3dSJoe Damato  * napi_get_defer_hard_irqs - get the NAPI's defer_hard_irqs
191f15e3b3dSJoe Damato  * @n: napi struct to get the defer_hard_irqs field from
192f15e3b3dSJoe Damato  *
193f15e3b3dSJoe Damato  * Return: the per-NAPI value of the defar_hard_irqs field.
194f15e3b3dSJoe Damato  */
napi_get_defer_hard_irqs(const struct napi_struct * n)195f15e3b3dSJoe Damato static inline u32 napi_get_defer_hard_irqs(const struct napi_struct *n)
196f15e3b3dSJoe Damato {
197f15e3b3dSJoe Damato 	return READ_ONCE(n->defer_hard_irqs);
198f15e3b3dSJoe Damato }
199f15e3b3dSJoe Damato 
200f15e3b3dSJoe Damato /**
201f15e3b3dSJoe Damato  * napi_set_defer_hard_irqs - set the defer_hard_irqs for a napi
202f15e3b3dSJoe Damato  * @n: napi_struct to set the defer_hard_irqs field
203f15e3b3dSJoe Damato  * @defer: the value the field should be set to
204f15e3b3dSJoe Damato  */
napi_set_defer_hard_irqs(struct napi_struct * n,u32 defer)205f15e3b3dSJoe Damato static inline void napi_set_defer_hard_irqs(struct napi_struct *n, u32 defer)
206f15e3b3dSJoe Damato {
207f15e3b3dSJoe Damato 	WRITE_ONCE(n->defer_hard_irqs, defer);
208f15e3b3dSJoe Damato }
209f15e3b3dSJoe Damato 
210f15e3b3dSJoe Damato /**
211f15e3b3dSJoe Damato  * netdev_set_defer_hard_irqs - set defer_hard_irqs for all NAPIs of a netdev
212f15e3b3dSJoe Damato  * @netdev: the net_device for which all NAPIs will have defer_hard_irqs set
213f15e3b3dSJoe Damato  * @defer: the defer_hard_irqs value to set
214f15e3b3dSJoe Damato  */
netdev_set_defer_hard_irqs(struct net_device * netdev,u32 defer)215f15e3b3dSJoe Damato static inline void netdev_set_defer_hard_irqs(struct net_device *netdev,
216f15e3b3dSJoe Damato 					      u32 defer)
217f15e3b3dSJoe Damato {
21886e25f40SJoe Damato 	unsigned int count = max(netdev->num_rx_queues,
21986e25f40SJoe Damato 				 netdev->num_tx_queues);
220f15e3b3dSJoe Damato 	struct napi_struct *napi;
22186e25f40SJoe Damato 	int i;
222f15e3b3dSJoe Damato 
223f15e3b3dSJoe Damato 	WRITE_ONCE(netdev->napi_defer_hard_irqs, defer);
224f15e3b3dSJoe Damato 	list_for_each_entry(napi, &netdev->napi_list, dev_list)
225f15e3b3dSJoe Damato 		napi_set_defer_hard_irqs(napi, defer);
22686e25f40SJoe Damato 
22786e25f40SJoe Damato 	for (i = 0; i < count; i++)
22886e25f40SJoe Damato 		netdev->napi_config[i].defer_hard_irqs = defer;
229f15e3b3dSJoe Damato }
230f15e3b3dSJoe Damato 
231acb8d4edSJoe Damato /**
232acb8d4edSJoe Damato  * napi_get_gro_flush_timeout - get the gro_flush_timeout
233acb8d4edSJoe Damato  * @n: napi struct to get the gro_flush_timeout from
234acb8d4edSJoe Damato  *
235acb8d4edSJoe Damato  * Return: the per-NAPI value of the gro_flush_timeout field.
236acb8d4edSJoe Damato  */
237acb8d4edSJoe Damato static inline unsigned long
napi_get_gro_flush_timeout(const struct napi_struct * n)238acb8d4edSJoe Damato napi_get_gro_flush_timeout(const struct napi_struct *n)
239acb8d4edSJoe Damato {
240acb8d4edSJoe Damato 	return READ_ONCE(n->gro_flush_timeout);
241acb8d4edSJoe Damato }
242acb8d4edSJoe Damato 
243acb8d4edSJoe Damato /**
244acb8d4edSJoe Damato  * napi_set_gro_flush_timeout - set the gro_flush_timeout for a napi
245acb8d4edSJoe Damato  * @n: napi struct to set the gro_flush_timeout
246acb8d4edSJoe Damato  * @timeout: timeout value to set
247acb8d4edSJoe Damato  *
248acb8d4edSJoe Damato  * napi_set_gro_flush_timeout sets the per-NAPI gro_flush_timeout
249acb8d4edSJoe Damato  */
napi_set_gro_flush_timeout(struct napi_struct * n,unsigned long timeout)250acb8d4edSJoe Damato static inline void napi_set_gro_flush_timeout(struct napi_struct *n,
251acb8d4edSJoe Damato 					      unsigned long timeout)
252acb8d4edSJoe Damato {
253acb8d4edSJoe Damato 	WRITE_ONCE(n->gro_flush_timeout, timeout);
254acb8d4edSJoe Damato }
255acb8d4edSJoe Damato 
256acb8d4edSJoe Damato /**
257acb8d4edSJoe Damato  * netdev_set_gro_flush_timeout - set gro_flush_timeout of a netdev's NAPIs
258acb8d4edSJoe Damato  * @netdev: the net_device for which all NAPIs will have gro_flush_timeout set
259acb8d4edSJoe Damato  * @timeout: the timeout value to set
260acb8d4edSJoe Damato  */
netdev_set_gro_flush_timeout(struct net_device * netdev,unsigned long timeout)261acb8d4edSJoe Damato static inline void netdev_set_gro_flush_timeout(struct net_device *netdev,
262acb8d4edSJoe Damato 						unsigned long timeout)
263acb8d4edSJoe Damato {
26486e25f40SJoe Damato 	unsigned int count = max(netdev->num_rx_queues,
26586e25f40SJoe Damato 				 netdev->num_tx_queues);
266acb8d4edSJoe Damato 	struct napi_struct *napi;
26786e25f40SJoe Damato 	int i;
268acb8d4edSJoe Damato 
269acb8d4edSJoe Damato 	WRITE_ONCE(netdev->gro_flush_timeout, timeout);
270acb8d4edSJoe Damato 	list_for_each_entry(napi, &netdev->napi_list, dev_list)
271acb8d4edSJoe Damato 		napi_set_gro_flush_timeout(napi, timeout);
27286e25f40SJoe Damato 
27386e25f40SJoe Damato 	for (i = 0; i < count; i++)
27486e25f40SJoe Damato 		netdev->napi_config[i].gro_flush_timeout = timeout;
275acb8d4edSJoe Damato }
276acb8d4edSJoe Damato 
2775dc51ec8SMartin Karsten /**
2785dc51ec8SMartin Karsten  * napi_get_irq_suspend_timeout - get the irq_suspend_timeout
2795dc51ec8SMartin Karsten  * @n: napi struct to get the irq_suspend_timeout from
2805dc51ec8SMartin Karsten  *
2815dc51ec8SMartin Karsten  * Return: the per-NAPI value of the irq_suspend_timeout field.
2825dc51ec8SMartin Karsten  */
2835dc51ec8SMartin Karsten static inline unsigned long
napi_get_irq_suspend_timeout(const struct napi_struct * n)2845dc51ec8SMartin Karsten napi_get_irq_suspend_timeout(const struct napi_struct *n)
2855dc51ec8SMartin Karsten {
2865dc51ec8SMartin Karsten 	return READ_ONCE(n->irq_suspend_timeout);
2875dc51ec8SMartin Karsten }
2885dc51ec8SMartin Karsten 
2895dc51ec8SMartin Karsten /**
2905dc51ec8SMartin Karsten  * napi_set_irq_suspend_timeout - set the irq_suspend_timeout for a napi
2915dc51ec8SMartin Karsten  * @n: napi struct to set the irq_suspend_timeout
2925dc51ec8SMartin Karsten  * @timeout: timeout value to set
2935dc51ec8SMartin Karsten  *
2945dc51ec8SMartin Karsten  * napi_set_irq_suspend_timeout sets the per-NAPI irq_suspend_timeout
2955dc51ec8SMartin Karsten  */
napi_set_irq_suspend_timeout(struct napi_struct * n,unsigned long timeout)2965dc51ec8SMartin Karsten static inline void napi_set_irq_suspend_timeout(struct napi_struct *n,
2975dc51ec8SMartin Karsten 						unsigned long timeout)
2985dc51ec8SMartin Karsten {
2995dc51ec8SMartin Karsten 	WRITE_ONCE(n->irq_suspend_timeout, timeout);
3005dc51ec8SMartin Karsten }
3015dc51ec8SMartin Karsten 
302370ca718SPaolo Abeni int rps_cpumask_housekeeping(struct cpumask *mask);
3039a675ba5SSebastian Andrzej Siewior 
3049a675ba5SSebastian Andrzej Siewior #if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL)
3059a675ba5SSebastian Andrzej Siewior void xdp_do_check_flushed(struct napi_struct *napi);
3069a675ba5SSebastian Andrzej Siewior #else
xdp_do_check_flushed(struct napi_struct * napi)3079a675ba5SSebastian Andrzej Siewior static inline void xdp_do_check_flushed(struct napi_struct *napi) { }
3089a675ba5SSebastian Andrzej Siewior #endif
30927f91aafSAmritha Nambiar 
310c1e00bc4SJakub Kicinski /* Best effort check that NAPI is not idle (can't be scheduled to run) */
napi_assert_will_not_race(const struct napi_struct * napi)311c1e00bc4SJakub Kicinski static inline void napi_assert_will_not_race(const struct napi_struct *napi)
312c1e00bc4SJakub Kicinski {
313c1e00bc4SJakub Kicinski 	/* uninitialized instance, can't race */
314c1e00bc4SJakub Kicinski 	if (!napi->poll_list.next)
315c1e00bc4SJakub Kicinski 		return;
316c1e00bc4SJakub Kicinski 
317c1e00bc4SJakub Kicinski 	/* SCHED bit is set on disabled instances */
318c1e00bc4SJakub Kicinski 	WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state));
319c1e00bc4SJakub Kicinski 	WARN_ON(READ_ONCE(napi->list_owner) != -1);
320c1e00bc4SJakub Kicinski }
321c1e00bc4SJakub Kicinski 
322b9495b56SEric Dumazet void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
323b9495b56SEric Dumazet 
3242fe50a4dSEric Dumazet #define XMIT_RECURSION_LIMIT	8
325ecefbc09SSebastian Andrzej Siewior 
326ecefbc09SSebastian Andrzej Siewior #ifndef CONFIG_PREEMPT_RT
dev_xmit_recursion(void)3272fe50a4dSEric Dumazet static inline bool dev_xmit_recursion(void)
3282fe50a4dSEric Dumazet {
3292fe50a4dSEric Dumazet 	return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
3302fe50a4dSEric Dumazet 			XMIT_RECURSION_LIMIT);
3312fe50a4dSEric Dumazet }
3322fe50a4dSEric Dumazet 
dev_xmit_recursion_inc(void)3332fe50a4dSEric Dumazet static inline void dev_xmit_recursion_inc(void)
3342fe50a4dSEric Dumazet {
3352fe50a4dSEric Dumazet 	__this_cpu_inc(softnet_data.xmit.recursion);
3362fe50a4dSEric Dumazet }
3372fe50a4dSEric Dumazet 
dev_xmit_recursion_dec(void)3382fe50a4dSEric Dumazet static inline void dev_xmit_recursion_dec(void)
3392fe50a4dSEric Dumazet {
3402fe50a4dSEric Dumazet 	__this_cpu_dec(softnet_data.xmit.recursion);
3412fe50a4dSEric Dumazet }
342ecefbc09SSebastian Andrzej Siewior #else
dev_xmit_recursion(void)343ecefbc09SSebastian Andrzej Siewior static inline bool dev_xmit_recursion(void)
344ecefbc09SSebastian Andrzej Siewior {
345ecefbc09SSebastian Andrzej Siewior 	return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT);
346ecefbc09SSebastian Andrzej Siewior }
347ecefbc09SSebastian Andrzej Siewior 
dev_xmit_recursion_inc(void)348ecefbc09SSebastian Andrzej Siewior static inline void dev_xmit_recursion_inc(void)
349ecefbc09SSebastian Andrzej Siewior {
350ecefbc09SSebastian Andrzej Siewior 	current->net_xmit.recursion++;
351ecefbc09SSebastian Andrzej Siewior }
352ecefbc09SSebastian Andrzej Siewior 
dev_xmit_recursion_dec(void)353ecefbc09SSebastian Andrzej Siewior static inline void dev_xmit_recursion_dec(void)
354ecefbc09SSebastian Andrzej Siewior {
355ecefbc09SSebastian Andrzej Siewior 	current->net_xmit.recursion--;
356ecefbc09SSebastian Andrzej Siewior }
357ecefbc09SSebastian Andrzej Siewior #endif
3582fe50a4dSEric Dumazet 
359efb45930SKory Maincent int dev_set_hwtstamp_phylib(struct net_device *dev,
360efb45930SKory Maincent 			    struct kernel_hwtstamp_config *cfg,
361efb45930SKory Maincent 			    struct netlink_ext_ack *extack);
3625e51e50eSKory Maincent int dev_get_hwtstamp_phylib(struct net_device *dev,
3635e51e50eSKory Maincent 			    struct kernel_hwtstamp_config *cfg);
364b18fe47cSKory Maincent int net_hwtstamp_validate(const struct kernel_hwtstamp_config *cfg);
365efb45930SKory Maincent 
3666264f58cSJakub Kicinski #endif
367