xref: /linux-6.15/include/linux/netpoll.h (revision 6482f554)
1 /*
2  * Common code for low-level network console, dump, and debugger code
3  *
4  * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5  */
6 
7 #ifndef _LINUX_NETPOLL_H
8 #define _LINUX_NETPOLL_H
9 
10 #include <linux/netdevice.h>
11 #include <linux/interrupt.h>
12 #include <linux/rcupdate.h>
13 #include <linux/list.h>
14 
15 struct netpoll {
16 	struct net_device *dev;
17 	struct net_device *real_dev;
18 	char dev_name[IFNAMSIZ];
19 	const char *name;
20 	void (*rx_hook)(struct netpoll *, int, char *, int);
21 
22 	__be32 local_ip, remote_ip;
23 	u16 local_port, remote_port;
24 	u8 remote_mac[ETH_ALEN];
25 
26 	struct list_head rx; /* rx_np list element */
27 };
28 
29 struct netpoll_info {
30 	atomic_t refcnt;
31 
32 	int rx_flags;
33 	spinlock_t rx_lock;
34 	struct list_head rx_np; /* netpolls that registered an rx_hook */
35 
36 	struct sk_buff_head arp_tx; /* list of arp requests to reply to */
37 	struct sk_buff_head txq;
38 
39 	struct delayed_work tx_work;
40 
41 	struct netpoll *netpoll;
42 };
43 
44 void netpoll_poll_dev(struct net_device *dev);
45 void netpoll_poll(struct netpoll *np);
46 void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
47 void netpoll_print_options(struct netpoll *np);
48 int netpoll_parse_options(struct netpoll *np, char *opt);
49 int __netpoll_setup(struct netpoll *np);
50 int netpoll_setup(struct netpoll *np);
51 int netpoll_trap(void);
52 void netpoll_set_trap(int trap);
53 void __netpoll_cleanup(struct netpoll *np);
54 void netpoll_cleanup(struct netpoll *np);
55 int __netpoll_rx(struct sk_buff *skb);
56 void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
57 
58 
59 #ifdef CONFIG_NETPOLL
60 static inline bool netpoll_rx(struct sk_buff *skb)
61 {
62 	struct netpoll_info *npinfo;
63 	unsigned long flags;
64 	bool ret = false;
65 
66 	rcu_read_lock_bh();
67 	npinfo = rcu_dereference_bh(skb->dev->npinfo);
68 
69 	if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
70 		goto out;
71 
72 	spin_lock_irqsave(&npinfo->rx_lock, flags);
73 	/* check rx_flags again with the lock held */
74 	if (npinfo->rx_flags && __netpoll_rx(skb))
75 		ret = true;
76 	spin_unlock_irqrestore(&npinfo->rx_lock, flags);
77 
78 out:
79 	rcu_read_unlock_bh();
80 	return ret;
81 }
82 
83 static inline int netpoll_rx_on(struct sk_buff *skb)
84 {
85 	struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
86 
87 	return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
88 }
89 
90 static inline int netpoll_receive_skb(struct sk_buff *skb)
91 {
92 	if (!list_empty(&skb->dev->napi_list))
93 		return netpoll_rx(skb);
94 	return 0;
95 }
96 
97 static inline void *netpoll_poll_lock(struct napi_struct *napi)
98 {
99 	struct net_device *dev = napi->dev;
100 
101 	if (dev && dev->npinfo) {
102 		spin_lock(&napi->poll_lock);
103 		napi->poll_owner = smp_processor_id();
104 		return napi;
105 	}
106 	return NULL;
107 }
108 
109 static inline void netpoll_poll_unlock(void *have)
110 {
111 	struct napi_struct *napi = have;
112 
113 	if (napi) {
114 		napi->poll_owner = -1;
115 		spin_unlock(&napi->poll_lock);
116 	}
117 }
118 
119 static inline int netpoll_tx_running(struct net_device *dev)
120 {
121 	return irqs_disabled();
122 }
123 
124 #else
125 static inline bool netpoll_rx(struct sk_buff *skb)
126 {
127 	return 0;
128 }
129 static inline int netpoll_rx_on(struct sk_buff *skb)
130 {
131 	return 0;
132 }
133 static inline int netpoll_receive_skb(struct sk_buff *skb)
134 {
135 	return 0;
136 }
137 static inline void *netpoll_poll_lock(struct napi_struct *napi)
138 {
139 	return NULL;
140 }
141 static inline void netpoll_poll_unlock(void *have)
142 {
143 }
144 static inline void netpoll_netdev_init(struct net_device *dev)
145 {
146 }
147 static inline int netpoll_tx_running(struct net_device *dev)
148 {
149 	return 0;
150 }
151 #endif
152 
153 #endif
154