xref: /linux-6.15/include/linux/netpoll.h (revision b8bb7671)
1 /*
2  * Common code for low-level network console, dump, and debugger code
3  *
4  * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5  */
6 
7 #ifndef _LINUX_NETPOLL_H
8 #define _LINUX_NETPOLL_H
9 
10 #include <linux/netdevice.h>
11 #include <linux/interrupt.h>
12 #include <linux/rcupdate.h>
13 #include <linux/list.h>
14 
15 struct netpoll {
16 	struct net_device *dev;
17 	char dev_name[IFNAMSIZ];
18 	const char *name;
19 	void (*rx_hook)(struct netpoll *, int, char *, int);
20 
21 	__be32 local_ip, remote_ip;
22 	u16 local_port, remote_port;
23 	u8 remote_mac[ETH_ALEN];
24 };
25 
26 struct netpoll_info {
27 	atomic_t refcnt;
28 	int rx_flags;
29 	spinlock_t rx_lock;
30 	struct netpoll *rx_np; /* netpoll that registered an rx_hook */
31 	struct sk_buff_head arp_tx; /* list of arp requests to reply to */
32 	struct sk_buff_head txq;
33 	struct delayed_work tx_work;
34 };
35 
36 void netpoll_poll(struct netpoll *np);
37 void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
38 void netpoll_print_options(struct netpoll *np);
39 int netpoll_parse_options(struct netpoll *np, char *opt);
40 int netpoll_setup(struct netpoll *np);
41 int netpoll_trap(void);
42 void netpoll_set_trap(int trap);
43 void netpoll_cleanup(struct netpoll *np);
44 int __netpoll_rx(struct sk_buff *skb);
45 
46 
47 #ifdef CONFIG_NETPOLL
48 static inline int netpoll_rx(struct sk_buff *skb)
49 {
50 	struct netpoll_info *npinfo = skb->dev->npinfo;
51 	unsigned long flags;
52 	int ret = 0;
53 
54 	if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
55 		return 0;
56 
57 	spin_lock_irqsave(&npinfo->rx_lock, flags);
58 	/* check rx_flags again with the lock held */
59 	if (npinfo->rx_flags && __netpoll_rx(skb))
60 		ret = 1;
61 	spin_unlock_irqrestore(&npinfo->rx_lock, flags);
62 
63 	return ret;
64 }
65 
66 static inline int netpoll_rx_on(struct sk_buff *skb)
67 {
68 	struct netpoll_info *npinfo = skb->dev->npinfo;
69 
70 	return npinfo && (npinfo->rx_np || npinfo->rx_flags);
71 }
72 
73 static inline int netpoll_receive_skb(struct sk_buff *skb)
74 {
75 	if (!list_empty(&skb->dev->napi_list))
76 		return netpoll_rx(skb);
77 	return 0;
78 }
79 
80 static inline void *netpoll_poll_lock(struct napi_struct *napi)
81 {
82 	struct net_device *dev = napi->dev;
83 
84 	rcu_read_lock(); /* deal with race on ->npinfo */
85 	if (dev && dev->npinfo) {
86 		spin_lock(&napi->poll_lock);
87 		napi->poll_owner = smp_processor_id();
88 		return napi;
89 	}
90 	return NULL;
91 }
92 
93 static inline void netpoll_poll_unlock(void *have)
94 {
95 	struct napi_struct *napi = have;
96 
97 	if (napi) {
98 		napi->poll_owner = -1;
99 		spin_unlock(&napi->poll_lock);
100 	}
101 	rcu_read_unlock();
102 }
103 
104 #else
105 static inline int netpoll_rx(struct sk_buff *skb)
106 {
107 	return 0;
108 }
109 static inline int netpoll_rx_on(struct sk_buff *skb)
110 {
111 	return 0;
112 }
113 static inline int netpoll_receive_skb(struct sk_buff *skb)
114 {
115 	return 0;
116 }
117 static inline void *netpoll_poll_lock(struct napi_struct *napi)
118 {
119 	return NULL;
120 }
121 static inline void netpoll_poll_unlock(void *have)
122 {
123 }
124 static inline void netpoll_netdev_init(struct net_device *dev)
125 {
126 }
127 #endif
128 
129 #endif
130