xref: /linux-6.15/include/linux/netpoll.h (revision ff607631)
1 /*
2  * Common code for low-level network console, dump, and debugger code
3  *
4  * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5  */
6 
7 #ifndef _LINUX_NETPOLL_H
8 #define _LINUX_NETPOLL_H
9 
10 #include <linux/netdevice.h>
11 #include <linux/interrupt.h>
12 #include <linux/rcupdate.h>
13 #include <linux/list.h>
14 
15 union inet_addr {
16 	__u32		all[4];
17 	__be32		ip;
18 	__be32		ip6[4];
19 	struct in_addr	in;
20 	struct in6_addr	in6;
21 };
22 
23 struct netpoll {
24 	struct net_device *dev;
25 	char dev_name[IFNAMSIZ];
26 	const char *name;
27 	void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
28 			    int offset, int len);
29 
30 	union inet_addr local_ip, remote_ip;
31 	bool ipv6;
32 	u16 local_port, remote_port;
33 	u8 remote_mac[ETH_ALEN];
34 
35 	struct list_head rx; /* rx_np list element */
36 	struct work_struct cleanup_work;
37 };
38 
39 struct netpoll_info {
40 	atomic_t refcnt;
41 
42 	unsigned long rx_flags;
43 	spinlock_t rx_lock;
44 	struct semaphore dev_lock;
45 	struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
46 
47 	struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
48 	struct sk_buff_head txq;
49 
50 	struct delayed_work tx_work;
51 
52 	struct netpoll *netpoll;
53 	struct rcu_head rcu;
54 };
55 
56 #ifdef CONFIG_NETPOLL
57 extern void netpoll_rx_disable(struct net_device *dev);
58 extern void netpoll_rx_enable(struct net_device *dev);
59 #else
60 static inline void netpoll_rx_disable(struct net_device *dev) { return; }
61 static inline void netpoll_rx_enable(struct net_device *dev) { return; }
62 #endif
63 
64 void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
65 void netpoll_print_options(struct netpoll *np);
66 int netpoll_parse_options(struct netpoll *np, char *opt);
67 int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);
68 int netpoll_setup(struct netpoll *np);
69 int netpoll_trap(void);
70 void netpoll_set_trap(int trap);
71 void __netpoll_cleanup(struct netpoll *np);
72 void __netpoll_free_async(struct netpoll *np);
73 void netpoll_cleanup(struct netpoll *np);
74 int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
75 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
76 			     struct net_device *dev);
77 static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
78 {
79 	unsigned long flags;
80 	local_irq_save(flags);
81 	netpoll_send_skb_on_dev(np, skb, np->dev);
82 	local_irq_restore(flags);
83 }
84 
85 #ifdef CONFIG_NETPOLL_TRAP
86 static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
87 {
88 	return !list_empty(&npinfo->rx_np);
89 }
90 #else
91 static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
92 {
93 	return false;
94 }
95 #endif
96 
97 #ifdef CONFIG_NETPOLL
98 static inline bool netpoll_rx_on(struct sk_buff *skb)
99 {
100 	struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
101 
102 	return npinfo && (netpoll_rx_processing(npinfo) || npinfo->rx_flags);
103 }
104 
105 static inline bool netpoll_rx(struct sk_buff *skb)
106 {
107 	struct netpoll_info *npinfo;
108 	unsigned long flags;
109 	bool ret = false;
110 
111 	local_irq_save(flags);
112 
113 	if (!netpoll_rx_on(skb))
114 		goto out;
115 
116 	npinfo = rcu_dereference_bh(skb->dev->npinfo);
117 	spin_lock(&npinfo->rx_lock);
118 	/* check rx_processing again with the lock held */
119 	if (netpoll_rx_processing(npinfo) && __netpoll_rx(skb, npinfo))
120 		ret = true;
121 	spin_unlock(&npinfo->rx_lock);
122 
123 out:
124 	local_irq_restore(flags);
125 	return ret;
126 }
127 
128 static inline int netpoll_receive_skb(struct sk_buff *skb)
129 {
130 	if (!list_empty(&skb->dev->napi_list))
131 		return netpoll_rx(skb);
132 	return 0;
133 }
134 
135 static inline void *netpoll_poll_lock(struct napi_struct *napi)
136 {
137 	struct net_device *dev = napi->dev;
138 
139 	if (dev && dev->npinfo) {
140 		spin_lock(&napi->poll_lock);
141 		napi->poll_owner = smp_processor_id();
142 		return napi;
143 	}
144 	return NULL;
145 }
146 
147 static inline void netpoll_poll_unlock(void *have)
148 {
149 	struct napi_struct *napi = have;
150 
151 	if (napi) {
152 		napi->poll_owner = -1;
153 		spin_unlock(&napi->poll_lock);
154 	}
155 }
156 
157 static inline bool netpoll_tx_running(struct net_device *dev)
158 {
159 	return irqs_disabled();
160 }
161 
162 #else
163 static inline bool netpoll_rx(struct sk_buff *skb)
164 {
165 	return false;
166 }
167 static inline bool netpoll_rx_on(struct sk_buff *skb)
168 {
169 	return false;
170 }
171 static inline int netpoll_receive_skb(struct sk_buff *skb)
172 {
173 	return 0;
174 }
175 static inline void *netpoll_poll_lock(struct napi_struct *napi)
176 {
177 	return NULL;
178 }
179 static inline void netpoll_poll_unlock(void *have)
180 {
181 }
182 static inline void netpoll_netdev_init(struct net_device *dev)
183 {
184 }
185 static inline bool netpoll_tx_running(struct net_device *dev)
186 {
187 	return false;
188 }
189 #endif
190 
191 #endif
192