xref: /linux-6.15/kernel/bpf/offload.c (revision bafd764a)
1 /*
2  * Copyright (C) 2017-2018 Netronome Systems, Inc.
3  *
4  * This software is licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree.
7  *
8  * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9  * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10  * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11  * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12  * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13  * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14  */
15 
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/bug.h>
19 #include <linux/kdev_t.h>
20 #include <linux/list.h>
21 #include <linux/lockdep.h>
22 #include <linux/netdevice.h>
23 #include <linux/printk.h>
24 #include <linux/proc_ns.h>
25 #include <linux/rhashtable.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/rwsem.h>
28 #include <net/xdp.h>
29 
30 /* Protects offdevs, members of bpf_offload_netdev and offload members
31  * of all progs.
32  * RTNL lock cannot be taken when holding this lock.
33  */
34 static DECLARE_RWSEM(bpf_devs_lock);
35 
36 struct bpf_offload_dev {
37 	const struct bpf_prog_offload_ops *ops;
38 	struct list_head netdevs;
39 	void *priv;
40 };
41 
42 struct bpf_offload_netdev {
43 	struct rhash_head l;
44 	struct net_device *netdev;
45 	struct bpf_offload_dev *offdev; /* NULL when bound-only */
46 	struct list_head progs;
47 	struct list_head maps;
48 	struct list_head offdev_netdevs;
49 };
50 
51 static const struct rhashtable_params offdevs_params = {
52 	.nelem_hint		= 4,
53 	.key_len		= sizeof(struct net_device *),
54 	.key_offset		= offsetof(struct bpf_offload_netdev, netdev),
55 	.head_offset		= offsetof(struct bpf_offload_netdev, l),
56 	.automatic_shrinking	= true,
57 };
58 
59 static struct rhashtable offdevs;
60 
61 static int bpf_dev_offload_check(struct net_device *netdev)
62 {
63 	if (!netdev)
64 		return -EINVAL;
65 	if (!netdev->netdev_ops->ndo_bpf)
66 		return -EOPNOTSUPP;
67 	return 0;
68 }
69 
70 static struct bpf_offload_netdev *
71 bpf_offload_find_netdev(struct net_device *netdev)
72 {
73 	lockdep_assert_held(&bpf_devs_lock);
74 
75 	return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
76 }
77 
78 static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
79 					     struct net_device *netdev)
80 {
81 	struct bpf_offload_netdev *ondev;
82 	int err;
83 
84 	ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
85 	if (!ondev)
86 		return -ENOMEM;
87 
88 	ondev->netdev = netdev;
89 	ondev->offdev = offdev;
90 	INIT_LIST_HEAD(&ondev->progs);
91 	INIT_LIST_HEAD(&ondev->maps);
92 
93 	err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
94 	if (err) {
95 		netdev_warn(netdev, "failed to register for BPF offload\n");
96 		goto err_free;
97 	}
98 
99 	if (offdev)
100 		list_add(&ondev->offdev_netdevs, &offdev->netdevs);
101 	return 0;
102 
103 err_free:
104 	kfree(ondev);
105 	return err;
106 }
107 
108 static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
109 {
110 	struct bpf_prog_offload *offload = prog->aux->offload;
111 
112 	if (offload->dev_state)
113 		offload->offdev->ops->destroy(prog);
114 
115 	list_del_init(&offload->offloads);
116 	kfree(offload);
117 	prog->aux->offload = NULL;
118 }
119 
120 static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
121 			       enum bpf_netdev_command cmd)
122 {
123 	struct netdev_bpf data = {};
124 	struct net_device *netdev;
125 
126 	ASSERT_RTNL();
127 
128 	data.command = cmd;
129 	data.offmap = offmap;
130 	/* Caller must make sure netdev is valid */
131 	netdev = offmap->netdev;
132 
133 	return netdev->netdev_ops->ndo_bpf(netdev, &data);
134 }
135 
136 static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
137 {
138 	WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
139 	/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
140 	bpf_map_free_id(&offmap->map);
141 	list_del_init(&offmap->offloads);
142 	offmap->netdev = NULL;
143 }
144 
145 static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
146 						struct net_device *netdev)
147 {
148 	struct bpf_offload_netdev *ondev, *altdev = NULL;
149 	struct bpf_offloaded_map *offmap, *mtmp;
150 	struct bpf_prog_offload *offload, *ptmp;
151 
152 	ASSERT_RTNL();
153 
154 	ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
155 	if (WARN_ON(!ondev))
156 		return;
157 
158 	WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
159 
160 	/* Try to move the objects to another netdev of the device */
161 	if (offdev) {
162 		list_del(&ondev->offdev_netdevs);
163 		altdev = list_first_entry_or_null(&offdev->netdevs,
164 						  struct bpf_offload_netdev,
165 						  offdev_netdevs);
166 	}
167 
168 	if (altdev) {
169 		list_for_each_entry(offload, &ondev->progs, offloads)
170 			offload->netdev = altdev->netdev;
171 		list_splice_init(&ondev->progs, &altdev->progs);
172 
173 		list_for_each_entry(offmap, &ondev->maps, offloads)
174 			offmap->netdev = altdev->netdev;
175 		list_splice_init(&ondev->maps, &altdev->maps);
176 	} else {
177 		list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
178 			__bpf_prog_offload_destroy(offload->prog);
179 		list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
180 			__bpf_map_offload_destroy(offmap);
181 	}
182 
183 	WARN_ON(!list_empty(&ondev->progs));
184 	WARN_ON(!list_empty(&ondev->maps));
185 	kfree(ondev);
186 }
187 
188 static int __bpf_prog_dev_bound_init(struct bpf_prog *prog, struct net_device *netdev)
189 {
190 	struct bpf_offload_netdev *ondev;
191 	struct bpf_prog_offload *offload;
192 	int err;
193 
194 	offload = kzalloc(sizeof(*offload), GFP_USER);
195 	if (!offload)
196 		return -ENOMEM;
197 
198 	offload->prog = prog;
199 	offload->netdev = netdev;
200 
201 	ondev = bpf_offload_find_netdev(offload->netdev);
202 	if (!ondev) {
203 		if (bpf_prog_is_offloaded(prog->aux)) {
204 			err = -EINVAL;
205 			goto err_free;
206 		}
207 
208 		/* When only binding to the device, explicitly
209 		 * create an entry in the hashtable.
210 		 */
211 		err = __bpf_offload_dev_netdev_register(NULL, offload->netdev);
212 		if (err)
213 			goto err_free;
214 		ondev = bpf_offload_find_netdev(offload->netdev);
215 	}
216 	offload->offdev = ondev->offdev;
217 	prog->aux->offload = offload;
218 	list_add_tail(&offload->offloads, &ondev->progs);
219 
220 	return 0;
221 err_free:
222 	kfree(offload);
223 	return err;
224 }
225 
226 int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr)
227 {
228 	struct net_device *netdev;
229 	int err;
230 
231 	if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
232 	    attr->prog_type != BPF_PROG_TYPE_XDP)
233 		return -EINVAL;
234 
235 	if (attr->prog_flags & ~(BPF_F_XDP_DEV_BOUND_ONLY | BPF_F_XDP_HAS_FRAGS))
236 		return -EINVAL;
237 
238 	/* Frags are allowed only if program is dev-bound-only, but not
239 	 * if it is requesting bpf offload.
240 	 */
241 	if (attr->prog_flags & BPF_F_XDP_HAS_FRAGS &&
242 	    !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY))
243 		return -EINVAL;
244 
245 	if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS &&
246 	    attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY)
247 		return -EINVAL;
248 
249 	netdev = dev_get_by_index(current->nsproxy->net_ns, attr->prog_ifindex);
250 	if (!netdev)
251 		return -EINVAL;
252 
253 	err = bpf_dev_offload_check(netdev);
254 	if (err)
255 		goto out;
256 
257 	prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY);
258 
259 	down_write(&bpf_devs_lock);
260 	err = __bpf_prog_dev_bound_init(prog, netdev);
261 	up_write(&bpf_devs_lock);
262 
263 out:
264 	dev_put(netdev);
265 	return err;
266 }
267 
268 int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog)
269 {
270 	int err;
271 
272 	if (!bpf_prog_is_dev_bound(old_prog->aux))
273 		return 0;
274 
275 	if (bpf_prog_is_offloaded(old_prog->aux))
276 		return -EINVAL;
277 
278 	new_prog->aux->dev_bound = old_prog->aux->dev_bound;
279 	new_prog->aux->offload_requested = old_prog->aux->offload_requested;
280 
281 	down_write(&bpf_devs_lock);
282 	if (!old_prog->aux->offload) {
283 		err = -EINVAL;
284 		goto out;
285 	}
286 
287 	err = __bpf_prog_dev_bound_init(new_prog, old_prog->aux->offload->netdev);
288 
289 out:
290 	up_write(&bpf_devs_lock);
291 	return err;
292 }
293 
294 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
295 {
296 	struct bpf_prog_offload *offload;
297 	int ret = -ENODEV;
298 
299 	down_read(&bpf_devs_lock);
300 	offload = prog->aux->offload;
301 	if (offload) {
302 		ret = offload->offdev->ops->prepare(prog);
303 		offload->dev_state = !ret;
304 	}
305 	up_read(&bpf_devs_lock);
306 
307 	return ret;
308 }
309 
310 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
311 				 int insn_idx, int prev_insn_idx)
312 {
313 	struct bpf_prog_offload *offload;
314 	int ret = -ENODEV;
315 
316 	down_read(&bpf_devs_lock);
317 	offload = env->prog->aux->offload;
318 	if (offload)
319 		ret = offload->offdev->ops->insn_hook(env, insn_idx,
320 						      prev_insn_idx);
321 	up_read(&bpf_devs_lock);
322 
323 	return ret;
324 }
325 
326 int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
327 {
328 	struct bpf_prog_offload *offload;
329 	int ret = -ENODEV;
330 
331 	down_read(&bpf_devs_lock);
332 	offload = env->prog->aux->offload;
333 	if (offload) {
334 		if (offload->offdev->ops->finalize)
335 			ret = offload->offdev->ops->finalize(env);
336 		else
337 			ret = 0;
338 	}
339 	up_read(&bpf_devs_lock);
340 
341 	return ret;
342 }
343 
344 void
345 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
346 			      struct bpf_insn *insn)
347 {
348 	const struct bpf_prog_offload_ops *ops;
349 	struct bpf_prog_offload *offload;
350 	int ret = -EOPNOTSUPP;
351 
352 	down_read(&bpf_devs_lock);
353 	offload = env->prog->aux->offload;
354 	if (offload) {
355 		ops = offload->offdev->ops;
356 		if (!offload->opt_failed && ops->replace_insn)
357 			ret = ops->replace_insn(env, off, insn);
358 		offload->opt_failed |= ret;
359 	}
360 	up_read(&bpf_devs_lock);
361 }
362 
363 void
364 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
365 {
366 	struct bpf_prog_offload *offload;
367 	int ret = -EOPNOTSUPP;
368 
369 	down_read(&bpf_devs_lock);
370 	offload = env->prog->aux->offload;
371 	if (offload) {
372 		if (!offload->opt_failed && offload->offdev->ops->remove_insns)
373 			ret = offload->offdev->ops->remove_insns(env, off, cnt);
374 		offload->opt_failed |= ret;
375 	}
376 	up_read(&bpf_devs_lock);
377 }
378 
379 void bpf_prog_dev_bound_destroy(struct bpf_prog *prog)
380 {
381 	struct bpf_offload_netdev *ondev;
382 	struct net_device *netdev;
383 
384 	rtnl_lock();
385 	down_write(&bpf_devs_lock);
386 	if (prog->aux->offload) {
387 		list_del_init(&prog->aux->offload->offloads);
388 
389 		netdev = prog->aux->offload->netdev;
390 		__bpf_prog_offload_destroy(prog);
391 
392 		ondev = bpf_offload_find_netdev(netdev);
393 		if (!ondev->offdev && list_empty(&ondev->progs))
394 			__bpf_offload_dev_netdev_unregister(NULL, netdev);
395 	}
396 	up_write(&bpf_devs_lock);
397 	rtnl_unlock();
398 }
399 
400 static int bpf_prog_offload_translate(struct bpf_prog *prog)
401 {
402 	struct bpf_prog_offload *offload;
403 	int ret = -ENODEV;
404 
405 	down_read(&bpf_devs_lock);
406 	offload = prog->aux->offload;
407 	if (offload)
408 		ret = offload->offdev->ops->translate(prog);
409 	up_read(&bpf_devs_lock);
410 
411 	return ret;
412 }
413 
414 static unsigned int bpf_prog_warn_on_exec(const void *ctx,
415 					  const struct bpf_insn *insn)
416 {
417 	WARN(1, "attempt to execute device eBPF program on the host!");
418 	return 0;
419 }
420 
421 int bpf_prog_offload_compile(struct bpf_prog *prog)
422 {
423 	prog->bpf_func = bpf_prog_warn_on_exec;
424 
425 	return bpf_prog_offload_translate(prog);
426 }
427 
428 struct ns_get_path_bpf_prog_args {
429 	struct bpf_prog *prog;
430 	struct bpf_prog_info *info;
431 };
432 
433 static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
434 {
435 	struct ns_get_path_bpf_prog_args *args = private_data;
436 	struct bpf_prog_aux *aux = args->prog->aux;
437 	struct ns_common *ns;
438 	struct net *net;
439 
440 	rtnl_lock();
441 	down_read(&bpf_devs_lock);
442 
443 	if (aux->offload) {
444 		args->info->ifindex = aux->offload->netdev->ifindex;
445 		net = dev_net(aux->offload->netdev);
446 		get_net(net);
447 		ns = &net->ns;
448 	} else {
449 		args->info->ifindex = 0;
450 		ns = NULL;
451 	}
452 
453 	up_read(&bpf_devs_lock);
454 	rtnl_unlock();
455 
456 	return ns;
457 }
458 
459 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
460 			       struct bpf_prog *prog)
461 {
462 	struct ns_get_path_bpf_prog_args args = {
463 		.prog	= prog,
464 		.info	= info,
465 	};
466 	struct bpf_prog_aux *aux = prog->aux;
467 	struct inode *ns_inode;
468 	struct path ns_path;
469 	char __user *uinsns;
470 	int res;
471 	u32 ulen;
472 
473 	res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
474 	if (res) {
475 		if (!info->ifindex)
476 			return -ENODEV;
477 		return res;
478 	}
479 
480 	down_read(&bpf_devs_lock);
481 
482 	if (!aux->offload) {
483 		up_read(&bpf_devs_lock);
484 		return -ENODEV;
485 	}
486 
487 	ulen = info->jited_prog_len;
488 	info->jited_prog_len = aux->offload->jited_len;
489 	if (info->jited_prog_len && ulen) {
490 		uinsns = u64_to_user_ptr(info->jited_prog_insns);
491 		ulen = min_t(u32, info->jited_prog_len, ulen);
492 		if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
493 			up_read(&bpf_devs_lock);
494 			return -EFAULT;
495 		}
496 	}
497 
498 	up_read(&bpf_devs_lock);
499 
500 	ns_inode = ns_path.dentry->d_inode;
501 	info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
502 	info->netns_ino = ns_inode->i_ino;
503 	path_put(&ns_path);
504 
505 	return 0;
506 }
507 
508 const struct bpf_prog_ops bpf_offload_prog_ops = {
509 };
510 
511 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
512 {
513 	struct net *net = current->nsproxy->net_ns;
514 	struct bpf_offload_netdev *ondev;
515 	struct bpf_offloaded_map *offmap;
516 	int err;
517 
518 	if (!capable(CAP_SYS_ADMIN))
519 		return ERR_PTR(-EPERM);
520 	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
521 	    attr->map_type != BPF_MAP_TYPE_HASH)
522 		return ERR_PTR(-EINVAL);
523 
524 	offmap = bpf_map_area_alloc(sizeof(*offmap), NUMA_NO_NODE);
525 	if (!offmap)
526 		return ERR_PTR(-ENOMEM);
527 
528 	bpf_map_init_from_attr(&offmap->map, attr);
529 
530 	rtnl_lock();
531 	down_write(&bpf_devs_lock);
532 	offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
533 	err = bpf_dev_offload_check(offmap->netdev);
534 	if (err)
535 		goto err_unlock;
536 
537 	ondev = bpf_offload_find_netdev(offmap->netdev);
538 	if (!ondev) {
539 		err = -EINVAL;
540 		goto err_unlock;
541 	}
542 
543 	err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
544 	if (err)
545 		goto err_unlock;
546 
547 	list_add_tail(&offmap->offloads, &ondev->maps);
548 	up_write(&bpf_devs_lock);
549 	rtnl_unlock();
550 
551 	return &offmap->map;
552 
553 err_unlock:
554 	up_write(&bpf_devs_lock);
555 	rtnl_unlock();
556 	bpf_map_area_free(offmap);
557 	return ERR_PTR(err);
558 }
559 
560 void bpf_map_offload_map_free(struct bpf_map *map)
561 {
562 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
563 
564 	rtnl_lock();
565 	down_write(&bpf_devs_lock);
566 	if (offmap->netdev)
567 		__bpf_map_offload_destroy(offmap);
568 	up_write(&bpf_devs_lock);
569 	rtnl_unlock();
570 
571 	bpf_map_area_free(offmap);
572 }
573 
574 u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
575 {
576 	/* The memory dynamically allocated in netdev dev_ops is not counted */
577 	return sizeof(struct bpf_offloaded_map);
578 }
579 
580 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
581 {
582 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
583 	int ret = -ENODEV;
584 
585 	down_read(&bpf_devs_lock);
586 	if (offmap->netdev)
587 		ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
588 	up_read(&bpf_devs_lock);
589 
590 	return ret;
591 }
592 
593 int bpf_map_offload_update_elem(struct bpf_map *map,
594 				void *key, void *value, u64 flags)
595 {
596 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
597 	int ret = -ENODEV;
598 
599 	if (unlikely(flags > BPF_EXIST))
600 		return -EINVAL;
601 
602 	down_read(&bpf_devs_lock);
603 	if (offmap->netdev)
604 		ret = offmap->dev_ops->map_update_elem(offmap, key, value,
605 						       flags);
606 	up_read(&bpf_devs_lock);
607 
608 	return ret;
609 }
610 
611 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
612 {
613 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
614 	int ret = -ENODEV;
615 
616 	down_read(&bpf_devs_lock);
617 	if (offmap->netdev)
618 		ret = offmap->dev_ops->map_delete_elem(offmap, key);
619 	up_read(&bpf_devs_lock);
620 
621 	return ret;
622 }
623 
624 int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
625 {
626 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
627 	int ret = -ENODEV;
628 
629 	down_read(&bpf_devs_lock);
630 	if (offmap->netdev)
631 		ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
632 	up_read(&bpf_devs_lock);
633 
634 	return ret;
635 }
636 
637 struct ns_get_path_bpf_map_args {
638 	struct bpf_offloaded_map *offmap;
639 	struct bpf_map_info *info;
640 };
641 
642 static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
643 {
644 	struct ns_get_path_bpf_map_args *args = private_data;
645 	struct ns_common *ns;
646 	struct net *net;
647 
648 	rtnl_lock();
649 	down_read(&bpf_devs_lock);
650 
651 	if (args->offmap->netdev) {
652 		args->info->ifindex = args->offmap->netdev->ifindex;
653 		net = dev_net(args->offmap->netdev);
654 		get_net(net);
655 		ns = &net->ns;
656 	} else {
657 		args->info->ifindex = 0;
658 		ns = NULL;
659 	}
660 
661 	up_read(&bpf_devs_lock);
662 	rtnl_unlock();
663 
664 	return ns;
665 }
666 
667 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
668 {
669 	struct ns_get_path_bpf_map_args args = {
670 		.offmap	= map_to_offmap(map),
671 		.info	= info,
672 	};
673 	struct inode *ns_inode;
674 	struct path ns_path;
675 	int res;
676 
677 	res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
678 	if (res) {
679 		if (!info->ifindex)
680 			return -ENODEV;
681 		return res;
682 	}
683 
684 	ns_inode = ns_path.dentry->d_inode;
685 	info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
686 	info->netns_ino = ns_inode->i_ino;
687 	path_put(&ns_path);
688 
689 	return 0;
690 }
691 
692 static bool __bpf_offload_dev_match(struct bpf_prog *prog,
693 				    struct net_device *netdev)
694 {
695 	struct bpf_offload_netdev *ondev1, *ondev2;
696 	struct bpf_prog_offload *offload;
697 
698 	if (!bpf_prog_is_dev_bound(prog->aux))
699 		return false;
700 
701 	offload = prog->aux->offload;
702 	if (!offload)
703 		return false;
704 	if (offload->netdev == netdev)
705 		return true;
706 
707 	ondev1 = bpf_offload_find_netdev(offload->netdev);
708 	ondev2 = bpf_offload_find_netdev(netdev);
709 
710 	return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
711 }
712 
713 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
714 {
715 	bool ret;
716 
717 	down_read(&bpf_devs_lock);
718 	ret = __bpf_offload_dev_match(prog, netdev);
719 	up_read(&bpf_devs_lock);
720 
721 	return ret;
722 }
723 EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
724 
725 bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
726 {
727 	bool ret;
728 
729 	if (bpf_prog_is_offloaded(lhs->aux) != bpf_prog_is_offloaded(rhs->aux))
730 		return false;
731 
732 	down_read(&bpf_devs_lock);
733 	ret = lhs->aux->offload && rhs->aux->offload &&
734 	      lhs->aux->offload->netdev &&
735 	      lhs->aux->offload->netdev == rhs->aux->offload->netdev;
736 	up_read(&bpf_devs_lock);
737 
738 	return ret;
739 }
740 
741 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
742 {
743 	struct bpf_offloaded_map *offmap;
744 	bool ret;
745 
746 	if (!bpf_map_is_offloaded(map))
747 		return bpf_map_offload_neutral(map);
748 	offmap = map_to_offmap(map);
749 
750 	down_read(&bpf_devs_lock);
751 	ret = __bpf_offload_dev_match(prog, offmap->netdev);
752 	up_read(&bpf_devs_lock);
753 
754 	return ret;
755 }
756 
757 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
758 				    struct net_device *netdev)
759 {
760 	int err;
761 
762 	down_write(&bpf_devs_lock);
763 	err = __bpf_offload_dev_netdev_register(offdev, netdev);
764 	up_write(&bpf_devs_lock);
765 	return err;
766 }
767 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
768 
769 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
770 				       struct net_device *netdev)
771 {
772 	down_write(&bpf_devs_lock);
773 	__bpf_offload_dev_netdev_unregister(offdev, netdev);
774 	up_write(&bpf_devs_lock);
775 }
776 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
777 
778 struct bpf_offload_dev *
779 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
780 {
781 	struct bpf_offload_dev *offdev;
782 
783 	offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
784 	if (!offdev)
785 		return ERR_PTR(-ENOMEM);
786 
787 	offdev->ops = ops;
788 	offdev->priv = priv;
789 	INIT_LIST_HEAD(&offdev->netdevs);
790 
791 	return offdev;
792 }
793 EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
794 
795 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
796 {
797 	WARN_ON(!list_empty(&offdev->netdevs));
798 	kfree(offdev);
799 }
800 EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
801 
802 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev)
803 {
804 	return offdev->priv;
805 }
806 EXPORT_SYMBOL_GPL(bpf_offload_dev_priv);
807 
808 void bpf_dev_bound_netdev_unregister(struct net_device *dev)
809 {
810 	struct bpf_offload_netdev *ondev;
811 
812 	ASSERT_RTNL();
813 
814 	down_write(&bpf_devs_lock);
815 	ondev = bpf_offload_find_netdev(dev);
816 	if (ondev && !ondev->offdev)
817 		__bpf_offload_dev_netdev_unregister(NULL, ondev->netdev);
818 	up_write(&bpf_devs_lock);
819 }
820 
821 int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
822 			      struct bpf_prog_aux *prog_aux)
823 {
824 	if (!bpf_prog_is_dev_bound(prog_aux)) {
825 		bpf_log(log, "metadata kfuncs require device-bound program\n");
826 		return -EINVAL;
827 	}
828 
829 	if (bpf_prog_is_offloaded(prog_aux)) {
830 		bpf_log(log, "metadata kfuncs can't be offloaded\n");
831 		return -EINVAL;
832 	}
833 
834 	return 0;
835 }
836 
837 void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id)
838 {
839 	const struct xdp_metadata_ops *ops;
840 	void *p = NULL;
841 
842 	/* We don't hold bpf_devs_lock while resolving several
843 	 * kfuncs and can race with the unregister_netdevice().
844 	 * We rely on bpf_dev_bound_match() check at attach
845 	 * to render this program unusable.
846 	 */
847 	down_read(&bpf_devs_lock);
848 	if (!prog->aux->offload)
849 		goto out;
850 
851 	ops = prog->aux->offload->netdev->xdp_metadata_ops;
852 	if (!ops)
853 		goto out;
854 
855 #define XDP_METADATA_KFUNC(name, _, __, xmo) \
856 	if (func_id == bpf_xdp_metadata_kfunc_id(name)) p = ops->xmo;
857 	XDP_METADATA_KFUNC_xxx
858 #undef XDP_METADATA_KFUNC
859 
860 out:
861 	up_read(&bpf_devs_lock);
862 
863 	return p;
864 }
865 
866 static int __init bpf_offload_init(void)
867 {
868 	return rhashtable_init(&offdevs, &offdevs_params);
869 }
870 
871 core_initcall(bpf_offload_init);
872