xref: /linux-6.15/net/psample/psample.c (revision d8bed686)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * net/psample/psample.c - Netlink channel for packet sampling
4  * Copyright (c) 2017 Yotam Gigi <[email protected]>
5  */
6 
7 #include <linux/types.h>
8 #include <linux/kernel.h>
9 #include <linux/skbuff.h>
10 #include <linux/module.h>
11 #include <net/net_namespace.h>
12 #include <net/sock.h>
13 #include <net/netlink.h>
14 #include <net/genetlink.h>
15 #include <net/psample.h>
16 #include <linux/spinlock.h>
17 #include <net/ip_tunnels.h>
18 #include <net/dst_metadata.h>
19 
20 #define PSAMPLE_MAX_PACKET_SIZE 0xffff
21 
22 static LIST_HEAD(psample_groups_list);
23 static DEFINE_SPINLOCK(psample_groups_lock);
24 
25 /* multicast groups */
26 enum psample_nl_multicast_groups {
27 	PSAMPLE_NL_MCGRP_CONFIG,
28 	PSAMPLE_NL_MCGRP_SAMPLE,
29 };
30 
31 static const struct genl_multicast_group psample_nl_mcgrps[] = {
32 	[PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
33 	[PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME },
34 };
35 
36 static struct genl_family psample_nl_family __ro_after_init;
37 
38 static int psample_group_nl_fill(struct sk_buff *msg,
39 				 struct psample_group *group,
40 				 enum psample_command cmd, u32 portid, u32 seq,
41 				 int flags)
42 {
43 	void *hdr;
44 	int ret;
45 
46 	hdr = genlmsg_put(msg, portid, seq, &psample_nl_family, flags, cmd);
47 	if (!hdr)
48 		return -EMSGSIZE;
49 
50 	ret = nla_put_u32(msg, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
51 	if (ret < 0)
52 		goto error;
53 
54 	ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_REFCOUNT, group->refcount);
55 	if (ret < 0)
56 		goto error;
57 
58 	ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_SEQ, group->seq);
59 	if (ret < 0)
60 		goto error;
61 
62 	genlmsg_end(msg, hdr);
63 	return 0;
64 
65 error:
66 	genlmsg_cancel(msg, hdr);
67 	return -EMSGSIZE;
68 }
69 
70 static int psample_nl_cmd_get_group_dumpit(struct sk_buff *msg,
71 					   struct netlink_callback *cb)
72 {
73 	struct psample_group *group;
74 	int start = cb->args[0];
75 	int idx = 0;
76 	int err;
77 
78 	spin_lock_bh(&psample_groups_lock);
79 	list_for_each_entry(group, &psample_groups_list, list) {
80 		if (!net_eq(group->net, sock_net(msg->sk)))
81 			continue;
82 		if (idx < start) {
83 			idx++;
84 			continue;
85 		}
86 		err = psample_group_nl_fill(msg, group, PSAMPLE_CMD_NEW_GROUP,
87 					    NETLINK_CB(cb->skb).portid,
88 					    cb->nlh->nlmsg_seq, NLM_F_MULTI);
89 		if (err)
90 			break;
91 		idx++;
92 	}
93 
94 	spin_unlock_bh(&psample_groups_lock);
95 	cb->args[0] = idx;
96 	return msg->len;
97 }
98 
99 static const struct genl_ops psample_nl_ops[] = {
100 	{
101 		.cmd = PSAMPLE_CMD_GET_GROUP,
102 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
103 		.dumpit = psample_nl_cmd_get_group_dumpit,
104 		/* can be retrieved by unprivileged users */
105 	}
106 };
107 
108 static struct genl_family psample_nl_family __ro_after_init = {
109 	.name		= PSAMPLE_GENL_NAME,
110 	.version	= PSAMPLE_GENL_VERSION,
111 	.maxattr	= PSAMPLE_ATTR_MAX,
112 	.netnsok	= true,
113 	.module		= THIS_MODULE,
114 	.mcgrps		= psample_nl_mcgrps,
115 	.ops		= psample_nl_ops,
116 	.n_ops		= ARRAY_SIZE(psample_nl_ops),
117 	.n_mcgrps	= ARRAY_SIZE(psample_nl_mcgrps),
118 };
119 
120 static void psample_group_notify(struct psample_group *group,
121 				 enum psample_command cmd)
122 {
123 	struct sk_buff *msg;
124 	int err;
125 
126 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
127 	if (!msg)
128 		return;
129 
130 	err = psample_group_nl_fill(msg, group, cmd, 0, 0, NLM_F_MULTI);
131 	if (!err)
132 		genlmsg_multicast_netns(&psample_nl_family, group->net, msg, 0,
133 					PSAMPLE_NL_MCGRP_CONFIG, GFP_ATOMIC);
134 	else
135 		nlmsg_free(msg);
136 }
137 
138 static struct psample_group *psample_group_create(struct net *net,
139 						  u32 group_num)
140 {
141 	struct psample_group *group;
142 
143 	group = kzalloc(sizeof(*group), GFP_ATOMIC);
144 	if (!group)
145 		return NULL;
146 
147 	group->net = net;
148 	group->group_num = group_num;
149 	list_add_tail(&group->list, &psample_groups_list);
150 
151 	psample_group_notify(group, PSAMPLE_CMD_NEW_GROUP);
152 	return group;
153 }
154 
155 static void psample_group_destroy(struct psample_group *group)
156 {
157 	psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
158 	list_del(&group->list);
159 	kfree_rcu(group, rcu);
160 }
161 
162 static struct psample_group *
163 psample_group_lookup(struct net *net, u32 group_num)
164 {
165 	struct psample_group *group;
166 
167 	list_for_each_entry(group, &psample_groups_list, list)
168 		if ((group->group_num == group_num) && (group->net == net))
169 			return group;
170 	return NULL;
171 }
172 
173 struct psample_group *psample_group_get(struct net *net, u32 group_num)
174 {
175 	struct psample_group *group;
176 
177 	spin_lock_bh(&psample_groups_lock);
178 
179 	group = psample_group_lookup(net, group_num);
180 	if (!group) {
181 		group = psample_group_create(net, group_num);
182 		if (!group)
183 			goto out;
184 	}
185 	group->refcount++;
186 
187 out:
188 	spin_unlock_bh(&psample_groups_lock);
189 	return group;
190 }
191 EXPORT_SYMBOL_GPL(psample_group_get);
192 
193 void psample_group_take(struct psample_group *group)
194 {
195 	spin_lock_bh(&psample_groups_lock);
196 	group->refcount++;
197 	spin_unlock_bh(&psample_groups_lock);
198 }
199 EXPORT_SYMBOL_GPL(psample_group_take);
200 
201 void psample_group_put(struct psample_group *group)
202 {
203 	spin_lock_bh(&psample_groups_lock);
204 
205 	if (--group->refcount == 0)
206 		psample_group_destroy(group);
207 
208 	spin_unlock_bh(&psample_groups_lock);
209 }
210 EXPORT_SYMBOL_GPL(psample_group_put);
211 
212 static int __psample_ip_tun_to_nlattr(struct sk_buff *skb,
213 			      struct ip_tunnel_info *tun_info)
214 {
215 	unsigned short tun_proto = ip_tunnel_info_af(tun_info);
216 	const void *tun_opts = ip_tunnel_info_opts(tun_info);
217 	const struct ip_tunnel_key *tun_key = &tun_info->key;
218 	int tun_opts_len = tun_info->options_len;
219 
220 	if (tun_key->tun_flags & TUNNEL_KEY &&
221 	    nla_put_be64(skb, PSAMPLE_TUNNEL_KEY_ATTR_ID, tun_key->tun_id,
222 			 PSAMPLE_TUNNEL_KEY_ATTR_PAD))
223 		return -EMSGSIZE;
224 
225 	if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE &&
226 	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE))
227 		return -EMSGSIZE;
228 
229 	switch (tun_proto) {
230 	case AF_INET:
231 		if (tun_key->u.ipv4.src &&
232 		    nla_put_in_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_SRC,
233 				    tun_key->u.ipv4.src))
234 			return -EMSGSIZE;
235 		if (tun_key->u.ipv4.dst &&
236 		    nla_put_in_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_DST,
237 				    tun_key->u.ipv4.dst))
238 			return -EMSGSIZE;
239 		break;
240 	case AF_INET6:
241 		if (!ipv6_addr_any(&tun_key->u.ipv6.src) &&
242 		    nla_put_in6_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV6_SRC,
243 				     &tun_key->u.ipv6.src))
244 			return -EMSGSIZE;
245 		if (!ipv6_addr_any(&tun_key->u.ipv6.dst) &&
246 		    nla_put_in6_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV6_DST,
247 				     &tun_key->u.ipv6.dst))
248 			return -EMSGSIZE;
249 		break;
250 	}
251 	if (tun_key->tos &&
252 	    nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TOS, tun_key->tos))
253 		return -EMSGSIZE;
254 	if (nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TTL, tun_key->ttl))
255 		return -EMSGSIZE;
256 	if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) &&
257 	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
258 		return -EMSGSIZE;
259 	if ((tun_key->tun_flags & TUNNEL_CSUM) &&
260 	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_CSUM))
261 		return -EMSGSIZE;
262 	if (tun_key->tp_src &&
263 	    nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_SRC, tun_key->tp_src))
264 		return -EMSGSIZE;
265 	if (tun_key->tp_dst &&
266 	    nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst))
267 		return -EMSGSIZE;
268 	if ((tun_key->tun_flags & TUNNEL_OAM) &&
269 	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_OAM))
270 		return -EMSGSIZE;
271 	if (tun_opts_len) {
272 		if (tun_key->tun_flags & TUNNEL_GENEVE_OPT &&
273 		    nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_GENEVE_OPTS,
274 			    tun_opts_len, tun_opts))
275 			return -EMSGSIZE;
276 		else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT &&
277 			 nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
278 				 tun_opts_len, tun_opts))
279 			return -EMSGSIZE;
280 	}
281 
282 	return 0;
283 }
284 
285 static int psample_ip_tun_to_nlattr(struct sk_buff *skb,
286 			    struct ip_tunnel_info *tun_info)
287 {
288 	struct nlattr *nla;
289 	int err;
290 
291 	nla = nla_nest_start_noflag(skb, PSAMPLE_ATTR_TUNNEL);
292 	if (!nla)
293 		return -EMSGSIZE;
294 
295 	err = __psample_ip_tun_to_nlattr(skb, tun_info);
296 	if (err) {
297 		nla_nest_cancel(skb, nla);
298 		return err;
299 	}
300 
301 	nla_nest_end(skb, nla);
302 
303 	return 0;
304 }
305 
306 static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info)
307 {
308 	unsigned short tun_proto = ip_tunnel_info_af(tun_info);
309 	const struct ip_tunnel_key *tun_key = &tun_info->key;
310 	int tun_opts_len = tun_info->options_len;
311 	int sum = 0;
312 
313 	if (tun_key->tun_flags & TUNNEL_KEY)
314 		sum += nla_total_size(sizeof(u64));
315 
316 	if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE)
317 		sum += nla_total_size(0);
318 
319 	switch (tun_proto) {
320 	case AF_INET:
321 		if (tun_key->u.ipv4.src)
322 			sum += nla_total_size(sizeof(u32));
323 		if (tun_key->u.ipv4.dst)
324 			sum += nla_total_size(sizeof(u32));
325 		break;
326 	case AF_INET6:
327 		if (!ipv6_addr_any(&tun_key->u.ipv6.src))
328 			sum += nla_total_size(sizeof(struct in6_addr));
329 		if (!ipv6_addr_any(&tun_key->u.ipv6.dst))
330 			sum += nla_total_size(sizeof(struct in6_addr));
331 		break;
332 	}
333 	if (tun_key->tos)
334 		sum += nla_total_size(sizeof(u8));
335 	sum += nla_total_size(sizeof(u8));	/* TTL */
336 	if (tun_key->tun_flags & TUNNEL_DONT_FRAGMENT)
337 		sum += nla_total_size(0);
338 	if (tun_key->tun_flags & TUNNEL_CSUM)
339 		sum += nla_total_size(0);
340 	if (tun_key->tp_src)
341 		sum += nla_total_size(sizeof(u16));
342 	if (tun_key->tp_dst)
343 		sum += nla_total_size(sizeof(u16));
344 	if (tun_key->tun_flags & TUNNEL_OAM)
345 		sum += nla_total_size(0);
346 	if (tun_opts_len) {
347 		if (tun_key->tun_flags & TUNNEL_GENEVE_OPT)
348 			sum += nla_total_size(tun_opts_len);
349 		else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT)
350 			sum += nla_total_size(tun_opts_len);
351 	}
352 
353 	return sum;
354 }
355 
356 void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
357 			   u32 trunc_size, int in_ifindex, int out_ifindex,
358 			   u32 sample_rate)
359 {
360 	struct ip_tunnel_info *tun_info;
361 	struct sk_buff *nl_skb;
362 	int data_len;
363 	int meta_len;
364 	void *data;
365 	int ret;
366 
367 	meta_len = (in_ifindex ? nla_total_size(sizeof(u16)) : 0) +
368 		   (out_ifindex ? nla_total_size(sizeof(u16)) : 0) +
369 		   nla_total_size(sizeof(u32)) +	/* sample_rate */
370 		   nla_total_size(sizeof(u32)) +	/* orig_size */
371 		   nla_total_size(sizeof(u32)) +	/* group_num */
372 		   nla_total_size(sizeof(u32));		/* seq */
373 
374 	tun_info = skb_tunnel_info(skb);
375 	if (tun_info)
376 		meta_len += psample_tunnel_meta_len(tun_info);
377 
378 	data_len = min(skb->len, trunc_size);
379 	if (meta_len + nla_total_size(data_len) > PSAMPLE_MAX_PACKET_SIZE)
380 		data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN
381 			    - NLA_ALIGNTO;
382 
383 	nl_skb = genlmsg_new(meta_len + nla_total_size(data_len), GFP_ATOMIC);
384 	if (unlikely(!nl_skb))
385 		return;
386 
387 	data = genlmsg_put(nl_skb, 0, 0, &psample_nl_family, 0,
388 			   PSAMPLE_CMD_SAMPLE);
389 	if (unlikely(!data))
390 		goto error;
391 
392 	if (in_ifindex) {
393 		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_IIFINDEX, in_ifindex);
394 		if (unlikely(ret < 0))
395 			goto error;
396 	}
397 
398 	if (out_ifindex) {
399 		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_OIFINDEX, out_ifindex);
400 		if (unlikely(ret < 0))
401 			goto error;
402 	}
403 
404 	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_RATE, sample_rate);
405 	if (unlikely(ret < 0))
406 		goto error;
407 
408 	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_ORIGSIZE, skb->len);
409 	if (unlikely(ret < 0))
410 		goto error;
411 
412 	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
413 	if (unlikely(ret < 0))
414 		goto error;
415 
416 	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_GROUP_SEQ, group->seq++);
417 	if (unlikely(ret < 0))
418 		goto error;
419 
420 	if (data_len) {
421 		int nla_len = nla_total_size(data_len);
422 		struct nlattr *nla;
423 
424 		nla = skb_put(nl_skb, nla_len);
425 		nla->nla_type = PSAMPLE_ATTR_DATA;
426 		nla->nla_len = nla_attr_size(data_len);
427 
428 		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
429 			goto error;
430 	}
431 
432 	if (tun_info) {
433 		ret = psample_ip_tun_to_nlattr(nl_skb, tun_info);
434 		if (unlikely(ret < 0))
435 			goto error;
436 	}
437 
438 	genlmsg_end(nl_skb, data);
439 	genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,
440 				PSAMPLE_NL_MCGRP_SAMPLE, GFP_ATOMIC);
441 
442 	return;
443 error:
444 	pr_err_ratelimited("Could not create psample log message\n");
445 	nlmsg_free(nl_skb);
446 }
447 EXPORT_SYMBOL_GPL(psample_sample_packet);
448 
449 static int __init psample_module_init(void)
450 {
451 	return genl_register_family(&psample_nl_family);
452 }
453 
454 static void __exit psample_module_exit(void)
455 {
456 	genl_unregister_family(&psample_nl_family);
457 }
458 
459 module_init(psample_module_init);
460 module_exit(psample_module_exit);
461 
462 MODULE_AUTHOR("Yotam Gigi <[email protected]>");
463 MODULE_DESCRIPTION("netlink channel for packet sampling");
464 MODULE_LICENSE("GPL v2");
465