xref: /linux-6.15/net/netfilter/nf_queue.c (revision 71ccc212)
1 #include <linux/kernel.h>
2 #include <linux/slab.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/proc_fs.h>
6 #include <linux/skbuff.h>
7 #include <linux/netfilter.h>
8 #include <linux/seq_file.h>
9 #include <linux/rcupdate.h>
10 #include <net/protocol.h>
11 #include <net/netfilter/nf_queue.h>
12 #include <net/dst.h>
13 
14 #include "nf_internals.h"
15 
16 /*
17  * A queue handler may be registered for each protocol.  Each is protected by
18  * long term mutex.  The handler must provide an an outfn() to accept packets
19  * for queueing and must reinject all packets it receives, no matter what.
20  */
21 static const struct nf_queue_handler __rcu *queue_handler[NFPROTO_NUMPROTO] __read_mostly;
22 
23 static DEFINE_MUTEX(queue_handler_mutex);
24 
25 /* return EBUSY when somebody else is registered, return EEXIST if the
26  * same handler is registered, return 0 in case of success. */
27 int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
28 {
29 	int ret;
30 
31 	if (pf >= ARRAY_SIZE(queue_handler))
32 		return -EINVAL;
33 
34 	mutex_lock(&queue_handler_mutex);
35 	if (queue_handler[pf] == qh)
36 		ret = -EEXIST;
37 	else if (queue_handler[pf])
38 		ret = -EBUSY;
39 	else {
40 		rcu_assign_pointer(queue_handler[pf], qh);
41 		ret = 0;
42 	}
43 	mutex_unlock(&queue_handler_mutex);
44 
45 	return ret;
46 }
47 EXPORT_SYMBOL(nf_register_queue_handler);
48 
49 /* The caller must flush their queue before this */
50 int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
51 {
52 	if (pf >= ARRAY_SIZE(queue_handler))
53 		return -EINVAL;
54 
55 	mutex_lock(&queue_handler_mutex);
56 	if (queue_handler[pf] && queue_handler[pf] != qh) {
57 		mutex_unlock(&queue_handler_mutex);
58 		return -EINVAL;
59 	}
60 
61 	rcu_assign_pointer(queue_handler[pf], NULL);
62 	mutex_unlock(&queue_handler_mutex);
63 
64 	synchronize_rcu();
65 
66 	return 0;
67 }
68 EXPORT_SYMBOL(nf_unregister_queue_handler);
69 
70 void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
71 {
72 	u_int8_t pf;
73 
74 	mutex_lock(&queue_handler_mutex);
75 	for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++)  {
76 		if (queue_handler[pf] == qh)
77 			rcu_assign_pointer(queue_handler[pf], NULL);
78 	}
79 	mutex_unlock(&queue_handler_mutex);
80 
81 	synchronize_rcu();
82 }
83 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
84 
85 static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
86 {
87 	/* Release those devices we held, or Alexey will kill me. */
88 	if (entry->indev)
89 		dev_put(entry->indev);
90 	if (entry->outdev)
91 		dev_put(entry->outdev);
92 #ifdef CONFIG_BRIDGE_NETFILTER
93 	if (entry->skb->nf_bridge) {
94 		struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
95 
96 		if (nf_bridge->physindev)
97 			dev_put(nf_bridge->physindev);
98 		if (nf_bridge->physoutdev)
99 			dev_put(nf_bridge->physoutdev);
100 	}
101 #endif
102 	/* Drop reference to owner of hook which queued us. */
103 	module_put(entry->elem->owner);
104 }
105 
106 /*
107  * Any packet that leaves via this function must come back
108  * through nf_reinject().
109  */
110 static int __nf_queue(struct sk_buff *skb,
111 		      struct list_head *elem,
112 		      u_int8_t pf, unsigned int hook,
113 		      struct net_device *indev,
114 		      struct net_device *outdev,
115 		      int (*okfn)(struct sk_buff *),
116 		      unsigned int queuenum)
117 {
118 	int status;
119 	struct nf_queue_entry *entry = NULL;
120 #ifdef CONFIG_BRIDGE_NETFILTER
121 	struct net_device *physindev;
122 	struct net_device *physoutdev;
123 #endif
124 	const struct nf_afinfo *afinfo;
125 	const struct nf_queue_handler *qh;
126 
127 	/* QUEUE == DROP if noone is waiting, to be safe. */
128 	rcu_read_lock();
129 
130 	qh = rcu_dereference(queue_handler[pf]);
131 	if (!qh)
132 		goto err_unlock;
133 
134 	afinfo = nf_get_afinfo(pf);
135 	if (!afinfo)
136 		goto err_unlock;
137 
138 	entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
139 	if (!entry)
140 		goto err_unlock;
141 
142 	*entry = (struct nf_queue_entry) {
143 		.skb	= skb,
144 		.elem	= list_entry(elem, struct nf_hook_ops, list),
145 		.pf	= pf,
146 		.hook	= hook,
147 		.indev	= indev,
148 		.outdev	= outdev,
149 		.okfn	= okfn,
150 	};
151 
152 	/* If it's going away, ignore hook. */
153 	if (!try_module_get(entry->elem->owner)) {
154 		rcu_read_unlock();
155 		kfree(entry);
156 		return 0;
157 	}
158 
159 	/* Bump dev refs so they don't vanish while packet is out */
160 	if (indev)
161 		dev_hold(indev);
162 	if (outdev)
163 		dev_hold(outdev);
164 #ifdef CONFIG_BRIDGE_NETFILTER
165 	if (skb->nf_bridge) {
166 		physindev = skb->nf_bridge->physindev;
167 		if (physindev)
168 			dev_hold(physindev);
169 		physoutdev = skb->nf_bridge->physoutdev;
170 		if (physoutdev)
171 			dev_hold(physoutdev);
172 	}
173 #endif
174 	skb_dst_force(skb);
175 	afinfo->saveroute(skb, entry);
176 	status = qh->outfn(entry, queuenum);
177 
178 	rcu_read_unlock();
179 
180 	if (status < 0) {
181 		nf_queue_entry_release_refs(entry);
182 		goto err;
183 	}
184 
185 	return 1;
186 
187 err_unlock:
188 	rcu_read_unlock();
189 err:
190 	kfree_skb(skb);
191 	kfree(entry);
192 	return 1;
193 }
194 
195 int nf_queue(struct sk_buff *skb,
196 	     struct list_head *elem,
197 	     u_int8_t pf, unsigned int hook,
198 	     struct net_device *indev,
199 	     struct net_device *outdev,
200 	     int (*okfn)(struct sk_buff *),
201 	     unsigned int queuenum)
202 {
203 	struct sk_buff *segs;
204 
205 	if (!skb_is_gso(skb))
206 		return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
207 				  queuenum);
208 
209 	switch (pf) {
210 	case NFPROTO_IPV4:
211 		skb->protocol = htons(ETH_P_IP);
212 		break;
213 	case NFPROTO_IPV6:
214 		skb->protocol = htons(ETH_P_IPV6);
215 		break;
216 	}
217 
218 	segs = skb_gso_segment(skb, 0);
219 	kfree_skb(skb);
220 	if (IS_ERR(segs))
221 		return 1;
222 
223 	do {
224 		struct sk_buff *nskb = segs->next;
225 
226 		segs->next = NULL;
227 		if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn,
228 				queuenum))
229 			kfree_skb(segs);
230 		segs = nskb;
231 	} while (segs);
232 	return 1;
233 }
234 
235 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
236 {
237 	struct sk_buff *skb = entry->skb;
238 	struct list_head *elem = &entry->elem->list;
239 	const struct nf_afinfo *afinfo;
240 
241 	rcu_read_lock();
242 
243 	nf_queue_entry_release_refs(entry);
244 
245 	/* Continue traversal iff userspace said ok... */
246 	if (verdict == NF_REPEAT) {
247 		elem = elem->prev;
248 		verdict = NF_ACCEPT;
249 	}
250 
251 	if (verdict == NF_ACCEPT) {
252 		afinfo = nf_get_afinfo(entry->pf);
253 		if (!afinfo || afinfo->reroute(skb, entry) < 0)
254 			verdict = NF_DROP;
255 	}
256 
257 	if (verdict == NF_ACCEPT) {
258 	next_hook:
259 		verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook],
260 				     skb, entry->hook,
261 				     entry->indev, entry->outdev, &elem,
262 				     entry->okfn, INT_MIN);
263 	}
264 
265 	switch (verdict & NF_VERDICT_MASK) {
266 	case NF_ACCEPT:
267 	case NF_STOP:
268 		local_bh_disable();
269 		entry->okfn(skb);
270 		local_bh_enable();
271 		break;
272 	case NF_QUEUE:
273 		if (!__nf_queue(skb, elem, entry->pf, entry->hook,
274 				entry->indev, entry->outdev, entry->okfn,
275 				verdict >> NF_VERDICT_BITS))
276 			goto next_hook;
277 		break;
278 	case NF_STOLEN:
279 	default:
280 		kfree_skb(skb);
281 	}
282 	rcu_read_unlock();
283 	kfree(entry);
284 }
285 EXPORT_SYMBOL(nf_reinject);
286 
287 #ifdef CONFIG_PROC_FS
288 static void *seq_start(struct seq_file *seq, loff_t *pos)
289 {
290 	if (*pos >= ARRAY_SIZE(queue_handler))
291 		return NULL;
292 
293 	return pos;
294 }
295 
296 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
297 {
298 	(*pos)++;
299 
300 	if (*pos >= ARRAY_SIZE(queue_handler))
301 		return NULL;
302 
303 	return pos;
304 }
305 
306 static void seq_stop(struct seq_file *s, void *v)
307 {
308 
309 }
310 
311 static int seq_show(struct seq_file *s, void *v)
312 {
313 	int ret;
314 	loff_t *pos = v;
315 	const struct nf_queue_handler *qh;
316 
317 	rcu_read_lock();
318 	qh = rcu_dereference(queue_handler[*pos]);
319 	if (!qh)
320 		ret = seq_printf(s, "%2lld NONE\n", *pos);
321 	else
322 		ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
323 	rcu_read_unlock();
324 
325 	return ret;
326 }
327 
328 static const struct seq_operations nfqueue_seq_ops = {
329 	.start	= seq_start,
330 	.next	= seq_next,
331 	.stop	= seq_stop,
332 	.show	= seq_show,
333 };
334 
335 static int nfqueue_open(struct inode *inode, struct file *file)
336 {
337 	return seq_open(file, &nfqueue_seq_ops);
338 }
339 
340 static const struct file_operations nfqueue_file_ops = {
341 	.owner	 = THIS_MODULE,
342 	.open	 = nfqueue_open,
343 	.read	 = seq_read,
344 	.llseek	 = seq_lseek,
345 	.release = seq_release,
346 };
347 #endif /* PROC_FS */
348 
349 
350 int __init netfilter_queue_init(void)
351 {
352 #ifdef CONFIG_PROC_FS
353 	if (!proc_create("nf_queue", S_IRUGO,
354 			 proc_net_netfilter, &nfqueue_file_ops))
355 		return -1;
356 #endif
357 	return 0;
358 }
359 
360