xref: /linux-6.15/include/linux/netpoll.h (revision b6bacd55)
1 /*
2  * Common code for low-level network console, dump, and debugger code
3  *
4  * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5  */
6 
7 #ifndef _LINUX_NETPOLL_H
8 #define _LINUX_NETPOLL_H
9 
10 #include <linux/netdevice.h>
11 #include <linux/interrupt.h>
12 #include <linux/rcupdate.h>
13 #include <linux/list.h>
14 
15 union inet_addr {
16 	__u32		all[4];
17 	__be32		ip;
18 	__be32		ip6[4];
19 	struct in_addr	in;
20 	struct in6_addr	in6;
21 };
22 
23 struct netpoll {
24 	struct net_device *dev;
25 	char dev_name[IFNAMSIZ];
26 	const char *name;
27 	void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
28 			    int offset, int len);
29 
30 	union inet_addr local_ip, remote_ip;
31 	bool ipv6;
32 	u16 local_port, remote_port;
33 	u8 remote_mac[ETH_ALEN];
34 
35 	struct list_head rx; /* rx_np list element */
36 	struct work_struct cleanup_work;
37 };
38 
39 struct netpoll_info {
40 	atomic_t refcnt;
41 
42 	spinlock_t rx_lock;
43 	struct semaphore dev_lock;
44 	struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
45 
46 	struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
47 	struct sk_buff_head txq;
48 
49 	struct delayed_work tx_work;
50 
51 	struct netpoll *netpoll;
52 	struct rcu_head rcu;
53 };
54 
55 #ifdef CONFIG_NETPOLL
56 extern void netpoll_rx_disable(struct net_device *dev);
57 extern void netpoll_rx_enable(struct net_device *dev);
58 #else
59 static inline void netpoll_rx_disable(struct net_device *dev) { return; }
60 static inline void netpoll_rx_enable(struct net_device *dev) { return; }
61 #endif
62 
63 void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
64 void netpoll_print_options(struct netpoll *np);
65 int netpoll_parse_options(struct netpoll *np, char *opt);
66 int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);
67 int netpoll_setup(struct netpoll *np);
68 int netpoll_trap(void);
69 void netpoll_set_trap(int trap);
70 void __netpoll_cleanup(struct netpoll *np);
71 void __netpoll_free_async(struct netpoll *np);
72 void netpoll_cleanup(struct netpoll *np);
73 int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
74 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
75 			     struct net_device *dev);
76 static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
77 {
78 	unsigned long flags;
79 	local_irq_save(flags);
80 	netpoll_send_skb_on_dev(np, skb, np->dev);
81 	local_irq_restore(flags);
82 }
83 
84 #ifdef CONFIG_NETPOLL_TRAP
85 static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
86 {
87 	return !list_empty(&npinfo->rx_np);
88 }
89 #else
90 static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
91 {
92 	return false;
93 }
94 #endif
95 
96 #ifdef CONFIG_NETPOLL
97 static inline bool netpoll_rx_on(struct sk_buff *skb)
98 {
99 	struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
100 
101 	return npinfo && netpoll_rx_processing(npinfo);
102 }
103 
104 static inline bool netpoll_rx(struct sk_buff *skb)
105 {
106 	struct netpoll_info *npinfo;
107 	unsigned long flags;
108 	bool ret = false;
109 
110 	local_irq_save(flags);
111 
112 	if (!netpoll_rx_on(skb))
113 		goto out;
114 
115 	npinfo = rcu_dereference_bh(skb->dev->npinfo);
116 	spin_lock(&npinfo->rx_lock);
117 	/* check rx_processing again with the lock held */
118 	if (netpoll_rx_processing(npinfo) && __netpoll_rx(skb, npinfo))
119 		ret = true;
120 	spin_unlock(&npinfo->rx_lock);
121 
122 out:
123 	local_irq_restore(flags);
124 	return ret;
125 }
126 
127 static inline int netpoll_receive_skb(struct sk_buff *skb)
128 {
129 	if (!list_empty(&skb->dev->napi_list))
130 		return netpoll_rx(skb);
131 	return 0;
132 }
133 
134 static inline void *netpoll_poll_lock(struct napi_struct *napi)
135 {
136 	struct net_device *dev = napi->dev;
137 
138 	if (dev && dev->npinfo) {
139 		spin_lock(&napi->poll_lock);
140 		napi->poll_owner = smp_processor_id();
141 		return napi;
142 	}
143 	return NULL;
144 }
145 
146 static inline void netpoll_poll_unlock(void *have)
147 {
148 	struct napi_struct *napi = have;
149 
150 	if (napi) {
151 		napi->poll_owner = -1;
152 		spin_unlock(&napi->poll_lock);
153 	}
154 }
155 
156 static inline bool netpoll_tx_running(struct net_device *dev)
157 {
158 	return irqs_disabled();
159 }
160 
161 #else
162 static inline bool netpoll_rx(struct sk_buff *skb)
163 {
164 	return false;
165 }
166 static inline bool netpoll_rx_on(struct sk_buff *skb)
167 {
168 	return false;
169 }
170 static inline int netpoll_receive_skb(struct sk_buff *skb)
171 {
172 	return 0;
173 }
174 static inline void *netpoll_poll_lock(struct napi_struct *napi)
175 {
176 	return NULL;
177 }
178 static inline void netpoll_poll_unlock(void *have)
179 {
180 }
181 static inline void netpoll_netdev_init(struct net_device *dev)
182 {
183 }
184 static inline bool netpoll_tx_running(struct net_device *dev)
185 {
186 	return false;
187 }
188 #endif
189 
190 #endif
191