xref: /linux-6.15/include/linux/netpoll.h (revision e1bd4d3d)
1 /*
2  * Common code for low-level network console, dump, and debugger code
3  *
4  * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5  */
6 
7 #ifndef _LINUX_NETPOLL_H
8 #define _LINUX_NETPOLL_H
9 
10 #include <linux/netdevice.h>
11 #include <linux/interrupt.h>
12 #include <linux/rcupdate.h>
13 #include <linux/list.h>
14 
15 union inet_addr {
16 	__u32		all[4];
17 	__be32		ip;
18 	__be32		ip6[4];
19 	struct in_addr	in;
20 	struct in6_addr	in6;
21 };
22 
23 struct netpoll {
24 	struct net_device *dev;
25 	char dev_name[IFNAMSIZ];
26 	const char *name;
27 
28 	union inet_addr local_ip, remote_ip;
29 	bool ipv6;
30 	u16 local_port, remote_port;
31 	u8 remote_mac[ETH_ALEN];
32 
33 	struct work_struct cleanup_work;
34 
35 #ifdef CONFIG_NETPOLL_TRAP
36 	void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
37 			    int offset, int len);
38 	struct list_head rx; /* rx_np list element */
39 #endif
40 };
41 
42 struct netpoll_info {
43 	atomic_t refcnt;
44 
45 	struct semaphore dev_lock;
46 
47 	struct sk_buff_head txq;
48 
49 	struct delayed_work tx_work;
50 
51 	struct netpoll *netpoll;
52 	struct rcu_head rcu;
53 
54 #ifdef CONFIG_NETPOLL_TRAP
55 	spinlock_t rx_lock;
56 	struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
57 	struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
58 #endif
59 };
60 
61 #ifdef CONFIG_NETPOLL
62 extern void netpoll_rx_disable(struct net_device *dev);
63 extern void netpoll_rx_enable(struct net_device *dev);
64 #else
65 static inline void netpoll_rx_disable(struct net_device *dev) { return; }
66 static inline void netpoll_rx_enable(struct net_device *dev) { return; }
67 #endif
68 
69 void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
70 void netpoll_print_options(struct netpoll *np);
71 int netpoll_parse_options(struct netpoll *np, char *opt);
72 int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);
73 int netpoll_setup(struct netpoll *np);
74 void __netpoll_cleanup(struct netpoll *np);
75 void __netpoll_free_async(struct netpoll *np);
76 void netpoll_cleanup(struct netpoll *np);
77 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
78 			     struct net_device *dev);
79 static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
80 {
81 	unsigned long flags;
82 	local_irq_save(flags);
83 	netpoll_send_skb_on_dev(np, skb, np->dev);
84 	local_irq_restore(flags);
85 }
86 
87 #ifdef CONFIG_NETPOLL_TRAP
88 int netpoll_trap(void);
89 void netpoll_set_trap(int trap);
90 int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
91 static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
92 {
93 	return !list_empty(&npinfo->rx_np);
94 }
95 
96 static inline bool netpoll_rx_on(struct sk_buff *skb)
97 {
98 	struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
99 
100 	return npinfo && netpoll_rx_processing(npinfo);
101 }
102 
103 static inline bool netpoll_rx(struct sk_buff *skb)
104 {
105 	struct netpoll_info *npinfo;
106 	unsigned long flags;
107 	bool ret = false;
108 
109 	local_irq_save(flags);
110 
111 	if (!netpoll_rx_on(skb))
112 		goto out;
113 
114 	npinfo = rcu_dereference_bh(skb->dev->npinfo);
115 	spin_lock(&npinfo->rx_lock);
116 	/* check rx_processing again with the lock held */
117 	if (netpoll_rx_processing(npinfo) && __netpoll_rx(skb, npinfo))
118 		ret = true;
119 	spin_unlock(&npinfo->rx_lock);
120 
121 out:
122 	local_irq_restore(flags);
123 	return ret;
124 }
125 
126 static inline int netpoll_receive_skb(struct sk_buff *skb)
127 {
128 	if (!list_empty(&skb->dev->napi_list))
129 		return netpoll_rx(skb);
130 	return 0;
131 }
132 
133 #else
134 static inline int netpoll_trap(void)
135 {
136 	return 0;
137 }
138 static inline void netpoll_set_trap(int trap)
139 {
140 }
141 static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
142 {
143 	return false;
144 }
145 static inline bool netpoll_rx(struct sk_buff *skb)
146 {
147 	return false;
148 }
149 static inline bool netpoll_rx_on(struct sk_buff *skb)
150 {
151 	return false;
152 }
153 static inline int netpoll_receive_skb(struct sk_buff *skb)
154 {
155 	return 0;
156 }
157 #endif
158 
159 #ifdef CONFIG_NETPOLL
160 static inline void *netpoll_poll_lock(struct napi_struct *napi)
161 {
162 	struct net_device *dev = napi->dev;
163 
164 	if (dev && dev->npinfo) {
165 		spin_lock(&napi->poll_lock);
166 		napi->poll_owner = smp_processor_id();
167 		return napi;
168 	}
169 	return NULL;
170 }
171 
172 static inline void netpoll_poll_unlock(void *have)
173 {
174 	struct napi_struct *napi = have;
175 
176 	if (napi) {
177 		napi->poll_owner = -1;
178 		spin_unlock(&napi->poll_lock);
179 	}
180 }
181 
182 static inline bool netpoll_tx_running(struct net_device *dev)
183 {
184 	return irqs_disabled();
185 }
186 
187 #else
188 static inline void *netpoll_poll_lock(struct napi_struct *napi)
189 {
190 	return NULL;
191 }
192 static inline void netpoll_poll_unlock(void *have)
193 {
194 }
195 static inline void netpoll_netdev_init(struct net_device *dev)
196 {
197 }
198 static inline bool netpoll_tx_running(struct net_device *dev)
199 {
200 	return false;
201 }
202 #endif
203 
204 #endif
205