xref: /linux-6.15/kernel/bpf/offload.c (revision ce628966)
1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree.
7  *
8  * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9  * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10  * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11  * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12  * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13  * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14  */
15 
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/bug.h>
19 #include <linux/kdev_t.h>
20 #include <linux/list.h>
21 #include <linux/netdevice.h>
22 #include <linux/printk.h>
23 #include <linux/proc_ns.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/rwsem.h>
26 
27 /* Protects bpf_prog_offload_devs, bpf_map_offload_devs and offload members
28  * of all progs.
29  * RTNL lock cannot be taken when holding this lock.
30  */
31 static DECLARE_RWSEM(bpf_devs_lock);
32 static LIST_HEAD(bpf_prog_offload_devs);
33 static LIST_HEAD(bpf_map_offload_devs);
34 
35 static int bpf_dev_offload_check(struct net_device *netdev)
36 {
37 	if (!netdev)
38 		return -EINVAL;
39 	if (!netdev->netdev_ops->ndo_bpf)
40 		return -EOPNOTSUPP;
41 	return 0;
42 }
43 
44 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
45 {
46 	struct bpf_prog_offload *offload;
47 	int err;
48 
49 	if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
50 	    attr->prog_type != BPF_PROG_TYPE_XDP)
51 		return -EINVAL;
52 
53 	if (attr->prog_flags)
54 		return -EINVAL;
55 
56 	offload = kzalloc(sizeof(*offload), GFP_USER);
57 	if (!offload)
58 		return -ENOMEM;
59 
60 	offload->prog = prog;
61 
62 	offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
63 					   attr->prog_ifindex);
64 	err = bpf_dev_offload_check(offload->netdev);
65 	if (err)
66 		goto err_maybe_put;
67 
68 	down_write(&bpf_devs_lock);
69 	if (offload->netdev->reg_state != NETREG_REGISTERED) {
70 		err = -EINVAL;
71 		goto err_unlock;
72 	}
73 	prog->aux->offload = offload;
74 	list_add_tail(&offload->offloads, &bpf_prog_offload_devs);
75 	dev_put(offload->netdev);
76 	up_write(&bpf_devs_lock);
77 
78 	return 0;
79 err_unlock:
80 	up_write(&bpf_devs_lock);
81 err_maybe_put:
82 	if (offload->netdev)
83 		dev_put(offload->netdev);
84 	kfree(offload);
85 	return err;
86 }
87 
88 static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
89 			     struct netdev_bpf *data)
90 {
91 	struct bpf_prog_offload *offload = prog->aux->offload;
92 	struct net_device *netdev;
93 
94 	ASSERT_RTNL();
95 
96 	if (!offload)
97 		return -ENODEV;
98 	netdev = offload->netdev;
99 
100 	data->command = cmd;
101 
102 	return netdev->netdev_ops->ndo_bpf(netdev, data);
103 }
104 
105 int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
106 {
107 	struct netdev_bpf data = {};
108 	int err;
109 
110 	data.verifier.prog = env->prog;
111 
112 	rtnl_lock();
113 	err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data);
114 	if (err)
115 		goto exit_unlock;
116 
117 	env->prog->aux->offload->dev_ops = data.verifier.ops;
118 	env->prog->aux->offload->dev_state = true;
119 exit_unlock:
120 	rtnl_unlock();
121 	return err;
122 }
123 
124 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
125 				 int insn_idx, int prev_insn_idx)
126 {
127 	struct bpf_prog_offload *offload;
128 	int ret = -ENODEV;
129 
130 	down_read(&bpf_devs_lock);
131 	offload = env->prog->aux->offload;
132 	if (offload)
133 		ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
134 	up_read(&bpf_devs_lock);
135 
136 	return ret;
137 }
138 
139 static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
140 {
141 	struct bpf_prog_offload *offload = prog->aux->offload;
142 	struct netdev_bpf data = {};
143 
144 	data.offload.prog = prog;
145 
146 	if (offload->dev_state)
147 		WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
148 
149 	/* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
150 	bpf_prog_free_id(prog, true);
151 
152 	list_del_init(&offload->offloads);
153 	kfree(offload);
154 	prog->aux->offload = NULL;
155 }
156 
157 void bpf_prog_offload_destroy(struct bpf_prog *prog)
158 {
159 	rtnl_lock();
160 	down_write(&bpf_devs_lock);
161 	if (prog->aux->offload)
162 		__bpf_prog_offload_destroy(prog);
163 	up_write(&bpf_devs_lock);
164 	rtnl_unlock();
165 }
166 
167 static int bpf_prog_offload_translate(struct bpf_prog *prog)
168 {
169 	struct netdev_bpf data = {};
170 	int ret;
171 
172 	data.offload.prog = prog;
173 
174 	rtnl_lock();
175 	ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
176 	rtnl_unlock();
177 
178 	return ret;
179 }
180 
181 static unsigned int bpf_prog_warn_on_exec(const void *ctx,
182 					  const struct bpf_insn *insn)
183 {
184 	WARN(1, "attempt to execute device eBPF program on the host!");
185 	return 0;
186 }
187 
188 int bpf_prog_offload_compile(struct bpf_prog *prog)
189 {
190 	prog->bpf_func = bpf_prog_warn_on_exec;
191 
192 	return bpf_prog_offload_translate(prog);
193 }
194 
195 struct ns_get_path_bpf_prog_args {
196 	struct bpf_prog *prog;
197 	struct bpf_prog_info *info;
198 };
199 
200 static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
201 {
202 	struct ns_get_path_bpf_prog_args *args = private_data;
203 	struct bpf_prog_aux *aux = args->prog->aux;
204 	struct ns_common *ns;
205 	struct net *net;
206 
207 	rtnl_lock();
208 	down_read(&bpf_devs_lock);
209 
210 	if (aux->offload) {
211 		args->info->ifindex = aux->offload->netdev->ifindex;
212 		net = dev_net(aux->offload->netdev);
213 		get_net(net);
214 		ns = &net->ns;
215 	} else {
216 		args->info->ifindex = 0;
217 		ns = NULL;
218 	}
219 
220 	up_read(&bpf_devs_lock);
221 	rtnl_unlock();
222 
223 	return ns;
224 }
225 
226 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
227 			       struct bpf_prog *prog)
228 {
229 	struct ns_get_path_bpf_prog_args args = {
230 		.prog	= prog,
231 		.info	= info,
232 	};
233 	struct inode *ns_inode;
234 	struct path ns_path;
235 	void *res;
236 
237 	res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
238 	if (IS_ERR(res)) {
239 		if (!info->ifindex)
240 			return -ENODEV;
241 		return PTR_ERR(res);
242 	}
243 
244 	ns_inode = ns_path.dentry->d_inode;
245 	info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
246 	info->netns_ino = ns_inode->i_ino;
247 	path_put(&ns_path);
248 
249 	return 0;
250 }
251 
252 const struct bpf_prog_ops bpf_offload_prog_ops = {
253 };
254 
255 static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
256 			       enum bpf_netdev_command cmd)
257 {
258 	struct netdev_bpf data = {};
259 	struct net_device *netdev;
260 
261 	ASSERT_RTNL();
262 
263 	data.command = cmd;
264 	data.offmap = offmap;
265 	/* Caller must make sure netdev is valid */
266 	netdev = offmap->netdev;
267 
268 	return netdev->netdev_ops->ndo_bpf(netdev, &data);
269 }
270 
271 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
272 {
273 	struct net *net = current->nsproxy->net_ns;
274 	struct bpf_offloaded_map *offmap;
275 	int err;
276 
277 	if (!capable(CAP_SYS_ADMIN))
278 		return ERR_PTR(-EPERM);
279 	if (attr->map_type != BPF_MAP_TYPE_HASH)
280 		return ERR_PTR(-EINVAL);
281 
282 	offmap = kzalloc(sizeof(*offmap), GFP_USER);
283 	if (!offmap)
284 		return ERR_PTR(-ENOMEM);
285 
286 	bpf_map_init_from_attr(&offmap->map, attr);
287 
288 	rtnl_lock();
289 	down_write(&bpf_devs_lock);
290 	offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
291 	err = bpf_dev_offload_check(offmap->netdev);
292 	if (err)
293 		goto err_unlock;
294 
295 	err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
296 	if (err)
297 		goto err_unlock;
298 
299 	list_add_tail(&offmap->offloads, &bpf_map_offload_devs);
300 	up_write(&bpf_devs_lock);
301 	rtnl_unlock();
302 
303 	return &offmap->map;
304 
305 err_unlock:
306 	up_write(&bpf_devs_lock);
307 	rtnl_unlock();
308 	kfree(offmap);
309 	return ERR_PTR(err);
310 }
311 
312 static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
313 {
314 	WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
315 	/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
316 	bpf_map_free_id(&offmap->map, true);
317 	list_del_init(&offmap->offloads);
318 	offmap->netdev = NULL;
319 }
320 
321 void bpf_map_offload_map_free(struct bpf_map *map)
322 {
323 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
324 
325 	rtnl_lock();
326 	down_write(&bpf_devs_lock);
327 	if (offmap->netdev)
328 		__bpf_map_offload_destroy(offmap);
329 	up_write(&bpf_devs_lock);
330 	rtnl_unlock();
331 
332 	kfree(offmap);
333 }
334 
335 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
336 {
337 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
338 	int ret = -ENODEV;
339 
340 	down_read(&bpf_devs_lock);
341 	if (offmap->netdev)
342 		ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
343 	up_read(&bpf_devs_lock);
344 
345 	return ret;
346 }
347 
348 int bpf_map_offload_update_elem(struct bpf_map *map,
349 				void *key, void *value, u64 flags)
350 {
351 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
352 	int ret = -ENODEV;
353 
354 	if (unlikely(flags > BPF_EXIST))
355 		return -EINVAL;
356 
357 	down_read(&bpf_devs_lock);
358 	if (offmap->netdev)
359 		ret = offmap->dev_ops->map_update_elem(offmap, key, value,
360 						       flags);
361 	up_read(&bpf_devs_lock);
362 
363 	return ret;
364 }
365 
366 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
367 {
368 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
369 	int ret = -ENODEV;
370 
371 	down_read(&bpf_devs_lock);
372 	if (offmap->netdev)
373 		ret = offmap->dev_ops->map_delete_elem(offmap, key);
374 	up_read(&bpf_devs_lock);
375 
376 	return ret;
377 }
378 
379 int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
380 {
381 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
382 	int ret = -ENODEV;
383 
384 	down_read(&bpf_devs_lock);
385 	if (offmap->netdev)
386 		ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
387 	up_read(&bpf_devs_lock);
388 
389 	return ret;
390 }
391 
392 bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map)
393 {
394 	struct bpf_offloaded_map *offmap;
395 	struct bpf_prog_offload *offload;
396 	bool ret;
397 
398 	if (!bpf_prog_is_dev_bound(prog->aux) || !bpf_map_is_dev_bound(map))
399 		return false;
400 
401 	down_read(&bpf_devs_lock);
402 	offload = prog->aux->offload;
403 	offmap = map_to_offmap(map);
404 
405 	ret = offload && offload->netdev == offmap->netdev;
406 	up_read(&bpf_devs_lock);
407 
408 	return ret;
409 }
410 
411 static void bpf_offload_orphan_all_progs(struct net_device *netdev)
412 {
413 	struct bpf_prog_offload *offload, *tmp;
414 
415 	list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, offloads)
416 		if (offload->netdev == netdev)
417 			__bpf_prog_offload_destroy(offload->prog);
418 }
419 
420 static void bpf_offload_orphan_all_maps(struct net_device *netdev)
421 {
422 	struct bpf_offloaded_map *offmap, *tmp;
423 
424 	list_for_each_entry_safe(offmap, tmp, &bpf_map_offload_devs, offloads)
425 		if (offmap->netdev == netdev)
426 			__bpf_map_offload_destroy(offmap);
427 }
428 
429 static int bpf_offload_notification(struct notifier_block *notifier,
430 				    ulong event, void *ptr)
431 {
432 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
433 
434 	ASSERT_RTNL();
435 
436 	switch (event) {
437 	case NETDEV_UNREGISTER:
438 		/* ignore namespace changes */
439 		if (netdev->reg_state != NETREG_UNREGISTERING)
440 			break;
441 
442 		down_write(&bpf_devs_lock);
443 		bpf_offload_orphan_all_progs(netdev);
444 		bpf_offload_orphan_all_maps(netdev);
445 		up_write(&bpf_devs_lock);
446 		break;
447 	default:
448 		break;
449 	}
450 	return NOTIFY_OK;
451 }
452 
453 static struct notifier_block bpf_offload_notifier = {
454 	.notifier_call = bpf_offload_notification,
455 };
456 
457 static int __init bpf_offload_init(void)
458 {
459 	register_netdevice_notifier(&bpf_offload_notifier);
460 	return 0;
461 }
462 
463 subsys_initcall(bpf_offload_init);
464