xref: /linux-6.15/kernel/bpf/devmap.c (revision f94ecbc9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3  */
4 
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6  * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7  * spent some effort to ensure the datapath with redirect maps does not use
8  * any locking. This is a quick note on the details.
9  *
10  * We have three possible paths to get into the devmap control plane bpf
11  * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12  * will invoke an update, delete, or lookup operation. To ensure updates and
13  * deletes appear atomic from the datapath side xchg() is used to modify the
14  * netdev_map array. Then because the datapath does a lookup into the netdev_map
15  * array (read-only) from an RCU critical section we use call_rcu() to wait for
16  * an rcu grace period before free'ing the old data structures. This ensures the
17  * datapath always has a valid copy. However, the datapath does a "flush"
18  * operation that pushes any pending packets in the driver outside the RCU
19  * critical section. Each bpf_dtab_netdev tracks these pending operations using
20  * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until
21  * this list is empty, indicating outstanding flush operations have completed.
22  *
23  * BPF syscalls may race with BPF program calls on any of the update, delete
24  * or lookup operations. As noted above the xchg() operation also keep the
25  * netdev_map consistent in this case. From the devmap side BPF programs
26  * calling into these operations are the same as multiple user space threads
27  * making system calls.
28  *
29  * Finally, any of the above may race with a netdev_unregister notifier. The
30  * unregister notifier must search for net devices in the map structure that
31  * contain a reference to the net device and remove them. This is a two step
32  * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33  * check to see if the ifindex is the same as the net_device being removed.
34  * When removing the dev a cmpxchg() is used to ensure the correct dev is
35  * removed, in the case of a concurrent update or delete operation it is
36  * possible that the initially referenced dev is no longer in the map. As the
37  * notifier hook walks the map we know that new dev references can not be
38  * added by the user because core infrastructure ensures dev_get_by_index()
39  * calls will fail at this point.
40  *
41  * The devmap_hash type is a map type which interprets keys as ifindexes and
42  * indexes these using a hashmap. This allows maps that use ifindex as key to be
43  * densely packed instead of having holes in the lookup array for unused
44  * ifindexes. The setup and packet enqueue/send code is shared between the two
45  * types of devmap; only the lookup and insertion is different.
46  */
47 #include <linux/bpf.h>
48 #include <net/xdp.h>
49 #include <linux/filter.h>
50 #include <trace/events/xdp.h>
51 #include <linux/btf_ids.h>
52 
53 #define DEV_CREATE_FLAG_MASK \
54 	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
55 
56 struct xdp_dev_bulk_queue {
57 	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
58 	struct list_head flush_node;
59 	struct net_device *dev;
60 	struct net_device *dev_rx;
61 	struct bpf_prog *xdp_prog;
62 	unsigned int count;
63 };
64 
65 struct bpf_dtab_netdev {
66 	struct net_device *dev; /* must be first member, due to tracepoint */
67 	struct hlist_node index_hlist;
68 	struct bpf_prog *xdp_prog;
69 	struct rcu_head rcu;
70 	unsigned int idx;
71 	struct bpf_devmap_val val;
72 };
73 
74 struct bpf_dtab {
75 	struct bpf_map map;
76 	struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
77 	struct list_head list;
78 
79 	/* these are only used for DEVMAP_HASH type maps */
80 	struct hlist_head *dev_index_head;
81 	spinlock_t index_lock;
82 	unsigned int items;
83 	u32 n_buckets;
84 };
85 
86 static DEFINE_SPINLOCK(dev_map_lock);
87 static LIST_HEAD(dev_map_list);
88 
89 static struct hlist_head *dev_map_create_hash(unsigned int entries,
90 					      int numa_node)
91 {
92 	int i;
93 	struct hlist_head *hash;
94 
95 	hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
96 	if (hash != NULL)
97 		for (i = 0; i < entries; i++)
98 			INIT_HLIST_HEAD(&hash[i]);
99 
100 	return hash;
101 }
102 
103 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
104 						    int idx)
105 {
106 	return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
107 }
108 
109 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
110 {
111 	u32 valsize = attr->value_size;
112 
113 	/* check sanity of attributes. 2 value sizes supported:
114 	 * 4 bytes: ifindex
115 	 * 8 bytes: ifindex + prog fd
116 	 */
117 	if (attr->max_entries == 0 || attr->key_size != 4 ||
118 	    (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
119 	     valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
120 	    attr->map_flags & ~DEV_CREATE_FLAG_MASK)
121 		return -EINVAL;
122 
123 	/* Lookup returns a pointer straight to dev->ifindex, so make sure the
124 	 * verifier prevents writes from the BPF side
125 	 */
126 	attr->map_flags |= BPF_F_RDONLY_PROG;
127 
128 
129 	bpf_map_init_from_attr(&dtab->map, attr);
130 
131 	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
132 		/* hash table size must be power of 2; roundup_pow_of_two() can
133 		 * overflow into UB on 32-bit arches, so check that first
134 		 */
135 		if (dtab->map.max_entries > 1UL << 31)
136 			return -EINVAL;
137 
138 		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
139 
140 		dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
141 							   dtab->map.numa_node);
142 		if (!dtab->dev_index_head)
143 			return -ENOMEM;
144 
145 		spin_lock_init(&dtab->index_lock);
146 	} else {
147 		dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
148 						      sizeof(struct bpf_dtab_netdev *),
149 						      dtab->map.numa_node);
150 		if (!dtab->netdev_map)
151 			return -ENOMEM;
152 	}
153 
154 	return 0;
155 }
156 
157 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
158 {
159 	struct bpf_dtab *dtab;
160 	int err;
161 
162 	dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE);
163 	if (!dtab)
164 		return ERR_PTR(-ENOMEM);
165 
166 	err = dev_map_init_map(dtab, attr);
167 	if (err) {
168 		bpf_map_area_free(dtab);
169 		return ERR_PTR(err);
170 	}
171 
172 	spin_lock(&dev_map_lock);
173 	list_add_tail_rcu(&dtab->list, &dev_map_list);
174 	spin_unlock(&dev_map_lock);
175 
176 	return &dtab->map;
177 }
178 
179 static void dev_map_free(struct bpf_map *map)
180 {
181 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
182 	int i;
183 
184 	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
185 	 * so the programs (can be more than one that used this map) were
186 	 * disconnected from events. The following synchronize_rcu() guarantees
187 	 * both rcu read critical sections complete and waits for
188 	 * preempt-disable regions (NAPI being the relevant context here) so we
189 	 * are certain there will be no further reads against the netdev_map and
190 	 * all flush operations are complete. Flush operations can only be done
191 	 * from NAPI context for this reason.
192 	 */
193 
194 	spin_lock(&dev_map_lock);
195 	list_del_rcu(&dtab->list);
196 	spin_unlock(&dev_map_lock);
197 
198 	/* bpf_redirect_info->map is assigned in __bpf_xdp_redirect_map()
199 	 * during NAPI callback and cleared after the XDP redirect. There is no
200 	 * explicit RCU read section which protects bpf_redirect_info->map but
201 	 * local_bh_disable() also marks the beginning an RCU section. This
202 	 * makes the complete softirq callback RCU protected. Thus after
203 	 * following synchronize_rcu() there no bpf_redirect_info->map == map
204 	 * assignment.
205 	 */
206 	synchronize_rcu();
207 
208 	/* Make sure prior __dev_map_entry_free() have completed. */
209 	rcu_barrier();
210 
211 	if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
212 		for (i = 0; i < dtab->n_buckets; i++) {
213 			struct bpf_dtab_netdev *dev;
214 			struct hlist_head *head;
215 			struct hlist_node *next;
216 
217 			head = dev_map_index_hash(dtab, i);
218 
219 			hlist_for_each_entry_safe(dev, next, head, index_hlist) {
220 				hlist_del_rcu(&dev->index_hlist);
221 				if (dev->xdp_prog)
222 					bpf_prog_put(dev->xdp_prog);
223 				dev_put(dev->dev);
224 				kfree(dev);
225 			}
226 		}
227 
228 		bpf_map_area_free(dtab->dev_index_head);
229 	} else {
230 		for (i = 0; i < dtab->map.max_entries; i++) {
231 			struct bpf_dtab_netdev *dev;
232 
233 			dev = rcu_dereference_raw(dtab->netdev_map[i]);
234 			if (!dev)
235 				continue;
236 
237 			if (dev->xdp_prog)
238 				bpf_prog_put(dev->xdp_prog);
239 			dev_put(dev->dev);
240 			kfree(dev);
241 		}
242 
243 		bpf_map_area_free(dtab->netdev_map);
244 	}
245 
246 	bpf_map_area_free(dtab);
247 }
248 
249 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
250 {
251 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
252 	u32 index = key ? *(u32 *)key : U32_MAX;
253 	u32 *next = next_key;
254 
255 	if (index >= dtab->map.max_entries) {
256 		*next = 0;
257 		return 0;
258 	}
259 
260 	if (index == dtab->map.max_entries - 1)
261 		return -ENOENT;
262 	*next = index + 1;
263 	return 0;
264 }
265 
266 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
267  * by local_bh_disable() (from XDP calls inside NAPI). The
268  * rcu_read_lock_bh_held() below makes lockdep accept both.
269  */
270 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
271 {
272 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
273 	struct hlist_head *head = dev_map_index_hash(dtab, key);
274 	struct bpf_dtab_netdev *dev;
275 
276 	hlist_for_each_entry_rcu(dev, head, index_hlist,
277 				 lockdep_is_held(&dtab->index_lock))
278 		if (dev->idx == key)
279 			return dev;
280 
281 	return NULL;
282 }
283 
284 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
285 				    void *next_key)
286 {
287 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
288 	u32 idx, *next = next_key;
289 	struct bpf_dtab_netdev *dev, *next_dev;
290 	struct hlist_head *head;
291 	int i = 0;
292 
293 	if (!key)
294 		goto find_first;
295 
296 	idx = *(u32 *)key;
297 
298 	dev = __dev_map_hash_lookup_elem(map, idx);
299 	if (!dev)
300 		goto find_first;
301 
302 	next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
303 				    struct bpf_dtab_netdev, index_hlist);
304 
305 	if (next_dev) {
306 		*next = next_dev->idx;
307 		return 0;
308 	}
309 
310 	i = idx & (dtab->n_buckets - 1);
311 	i++;
312 
313  find_first:
314 	for (; i < dtab->n_buckets; i++) {
315 		head = dev_map_index_hash(dtab, i);
316 
317 		next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
318 					    struct bpf_dtab_netdev,
319 					    index_hlist);
320 		if (next_dev) {
321 			*next = next_dev->idx;
322 			return 0;
323 		}
324 	}
325 
326 	return -ENOENT;
327 }
328 
329 static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
330 				struct xdp_frame **frames, int n,
331 				struct net_device *dev)
332 {
333 	struct xdp_txq_info txq = { .dev = dev };
334 	struct xdp_buff xdp;
335 	int i, nframes = 0;
336 
337 	for (i = 0; i < n; i++) {
338 		struct xdp_frame *xdpf = frames[i];
339 		u32 act;
340 		int err;
341 
342 		xdp_convert_frame_to_buff(xdpf, &xdp);
343 		xdp.txq = &txq;
344 
345 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
346 		switch (act) {
347 		case XDP_PASS:
348 			err = xdp_update_frame_from_buff(&xdp, xdpf);
349 			if (unlikely(err < 0))
350 				xdp_return_frame_rx_napi(xdpf);
351 			else
352 				frames[nframes++] = xdpf;
353 			break;
354 		default:
355 			bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
356 			fallthrough;
357 		case XDP_ABORTED:
358 			trace_xdp_exception(dev, xdp_prog, act);
359 			fallthrough;
360 		case XDP_DROP:
361 			xdp_return_frame_rx_napi(xdpf);
362 			break;
363 		}
364 	}
365 	return nframes; /* sent frames count */
366 }
367 
368 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
369 {
370 	struct net_device *dev = bq->dev;
371 	unsigned int cnt = bq->count;
372 	int sent = 0, err = 0;
373 	int to_send = cnt;
374 	int i;
375 
376 	if (unlikely(!cnt))
377 		return;
378 
379 	for (i = 0; i < cnt; i++) {
380 		struct xdp_frame *xdpf = bq->q[i];
381 
382 		prefetch(xdpf);
383 	}
384 
385 	if (bq->xdp_prog) {
386 		to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
387 		if (!to_send)
388 			goto out;
389 	}
390 
391 	sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
392 	if (sent < 0) {
393 		/* If ndo_xdp_xmit fails with an errno, no frames have
394 		 * been xmit'ed.
395 		 */
396 		err = sent;
397 		sent = 0;
398 	}
399 
400 	/* If not all frames have been transmitted, it is our
401 	 * responsibility to free them
402 	 */
403 	for (i = sent; unlikely(i < to_send); i++)
404 		xdp_return_frame_rx_napi(bq->q[i]);
405 
406 out:
407 	bq->count = 0;
408 	trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
409 }
410 
411 /* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
412  * driver before returning from its napi->poll() routine. See the comment above
413  * xdp_do_flush() in filter.c.
414  */
415 void __dev_flush(void)
416 {
417 	struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
418 	struct xdp_dev_bulk_queue *bq, *tmp;
419 
420 	list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
421 		bq_xmit_all(bq, XDP_XMIT_FLUSH);
422 		bq->dev_rx = NULL;
423 		bq->xdp_prog = NULL;
424 		__list_del_clearprev(&bq->flush_node);
425 	}
426 }
427 
428 #ifdef CONFIG_DEBUG_NET
429 bool dev_check_flush(void)
430 {
431 	if (list_empty(bpf_net_ctx_get_dev_flush_list()))
432 		return false;
433 	__dev_flush();
434 	return true;
435 }
436 #endif
437 
438 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
439  * by local_bh_disable() (from XDP calls inside NAPI). The
440  * rcu_read_lock_bh_held() below makes lockdep accept both.
441  */
442 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
443 {
444 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
445 	struct bpf_dtab_netdev *obj;
446 
447 	if (key >= map->max_entries)
448 		return NULL;
449 
450 	obj = rcu_dereference_check(dtab->netdev_map[key],
451 				    rcu_read_lock_bh_held());
452 	return obj;
453 }
454 
455 /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
456  * variable access, and map elements stick around. See comment above
457  * xdp_do_flush() in filter.c.
458  */
459 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
460 		       struct net_device *dev_rx, struct bpf_prog *xdp_prog)
461 {
462 	struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
463 	struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
464 
465 	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
466 		bq_xmit_all(bq, 0);
467 
468 	/* Ingress dev_rx will be the same for all xdp_frame's in
469 	 * bulk_queue, because bq stored per-CPU and must be flushed
470 	 * from net_device drivers NAPI func end.
471 	 *
472 	 * Do the same with xdp_prog and flush_list since these fields
473 	 * are only ever modified together.
474 	 */
475 	if (!bq->dev_rx) {
476 		bq->dev_rx = dev_rx;
477 		bq->xdp_prog = xdp_prog;
478 		list_add(&bq->flush_node, flush_list);
479 	}
480 
481 	bq->q[bq->count++] = xdpf;
482 }
483 
484 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
485 				struct net_device *dev_rx,
486 				struct bpf_prog *xdp_prog)
487 {
488 	int err;
489 
490 	if (!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
491 		return -EOPNOTSUPP;
492 
493 	if (unlikely(!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
494 		     xdp_frame_has_frags(xdpf)))
495 		return -EOPNOTSUPP;
496 
497 	err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
498 	if (unlikely(err))
499 		return err;
500 
501 	bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
502 	return 0;
503 }
504 
505 static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst)
506 {
507 	struct xdp_txq_info txq = { .dev = dst->dev };
508 	struct xdp_buff xdp;
509 	u32 act;
510 
511 	if (!dst->xdp_prog)
512 		return XDP_PASS;
513 
514 	__skb_pull(skb, skb->mac_len);
515 	xdp.txq = &txq;
516 
517 	act = bpf_prog_run_generic_xdp(skb, &xdp, dst->xdp_prog);
518 	switch (act) {
519 	case XDP_PASS:
520 		__skb_push(skb, skb->mac_len);
521 		break;
522 	default:
523 		bpf_warn_invalid_xdp_action(NULL, dst->xdp_prog, act);
524 		fallthrough;
525 	case XDP_ABORTED:
526 		trace_xdp_exception(dst->dev, dst->xdp_prog, act);
527 		fallthrough;
528 	case XDP_DROP:
529 		kfree_skb(skb);
530 		break;
531 	}
532 
533 	return act;
534 }
535 
536 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
537 		    struct net_device *dev_rx)
538 {
539 	return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
540 }
541 
542 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
543 		    struct net_device *dev_rx)
544 {
545 	struct net_device *dev = dst->dev;
546 
547 	return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog);
548 }
549 
550 static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
551 {
552 	if (!obj)
553 		return false;
554 
555 	if (!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
556 		return false;
557 
558 	if (unlikely(!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
559 		     xdp_frame_has_frags(xdpf)))
560 		return false;
561 
562 	if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
563 		return false;
564 
565 	return true;
566 }
567 
568 static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
569 				 struct net_device *dev_rx,
570 				 struct xdp_frame *xdpf)
571 {
572 	struct xdp_frame *nxdpf;
573 
574 	nxdpf = xdpf_clone(xdpf);
575 	if (!nxdpf)
576 		return -ENOMEM;
577 
578 	bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
579 
580 	return 0;
581 }
582 
583 static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex)
584 {
585 	while (num_excluded--) {
586 		if (ifindex == excluded[num_excluded])
587 			return true;
588 	}
589 	return false;
590 }
591 
592 /* Get ifindex of each upper device. 'indexes' must be able to hold at
593  * least MAX_NEST_DEV elements.
594  * Returns the number of ifindexes added.
595  */
596 static int get_upper_ifindexes(struct net_device *dev, int *indexes)
597 {
598 	struct net_device *upper;
599 	struct list_head *iter;
600 	int n = 0;
601 
602 	netdev_for_each_upper_dev_rcu(dev, upper, iter) {
603 		indexes[n++] = upper->ifindex;
604 	}
605 	return n;
606 }
607 
608 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
609 			  struct bpf_map *map, bool exclude_ingress)
610 {
611 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
612 	struct bpf_dtab_netdev *dst, *last_dst = NULL;
613 	int excluded_devices[1+MAX_NEST_DEV];
614 	struct hlist_head *head;
615 	int num_excluded = 0;
616 	unsigned int i;
617 	int err;
618 
619 	if (exclude_ingress) {
620 		num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
621 		excluded_devices[num_excluded++] = dev_rx->ifindex;
622 	}
623 
624 	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
625 		for (i = 0; i < map->max_entries; i++) {
626 			dst = rcu_dereference_check(dtab->netdev_map[i],
627 						    rcu_read_lock_bh_held());
628 			if (!is_valid_dst(dst, xdpf))
629 				continue;
630 
631 			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
632 				continue;
633 
634 			/* we only need n-1 clones; last_dst enqueued below */
635 			if (!last_dst) {
636 				last_dst = dst;
637 				continue;
638 			}
639 
640 			err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
641 			if (err)
642 				return err;
643 
644 			last_dst = dst;
645 		}
646 	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
647 		for (i = 0; i < dtab->n_buckets; i++) {
648 			head = dev_map_index_hash(dtab, i);
649 			hlist_for_each_entry_rcu(dst, head, index_hlist,
650 						 lockdep_is_held(&dtab->index_lock)) {
651 				if (!is_valid_dst(dst, xdpf))
652 					continue;
653 
654 				if (is_ifindex_excluded(excluded_devices, num_excluded,
655 							dst->dev->ifindex))
656 					continue;
657 
658 				/* we only need n-1 clones; last_dst enqueued below */
659 				if (!last_dst) {
660 					last_dst = dst;
661 					continue;
662 				}
663 
664 				err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
665 				if (err)
666 					return err;
667 
668 				last_dst = dst;
669 			}
670 		}
671 	}
672 
673 	/* consume the last copy of the frame */
674 	if (last_dst)
675 		bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
676 	else
677 		xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
678 
679 	return 0;
680 }
681 
682 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
683 			     struct bpf_prog *xdp_prog)
684 {
685 	int err;
686 
687 	err = xdp_ok_fwd_dev(dst->dev, skb->len);
688 	if (unlikely(err))
689 		return err;
690 
691 	/* Redirect has already succeeded semantically at this point, so we just
692 	 * return 0 even if packet is dropped. Helper below takes care of
693 	 * freeing skb.
694 	 */
695 	if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS)
696 		return 0;
697 
698 	skb->dev = dst->dev;
699 	generic_xdp_tx(skb, xdp_prog);
700 
701 	return 0;
702 }
703 
704 static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
705 				  struct sk_buff *skb,
706 				  struct bpf_prog *xdp_prog)
707 {
708 	struct sk_buff *nskb;
709 	int err;
710 
711 	nskb = skb_clone(skb, GFP_ATOMIC);
712 	if (!nskb)
713 		return -ENOMEM;
714 
715 	err = dev_map_generic_redirect(dst, nskb, xdp_prog);
716 	if (unlikely(err)) {
717 		consume_skb(nskb);
718 		return err;
719 	}
720 
721 	return 0;
722 }
723 
724 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
725 			   struct bpf_prog *xdp_prog, struct bpf_map *map,
726 			   bool exclude_ingress)
727 {
728 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
729 	struct bpf_dtab_netdev *dst, *last_dst = NULL;
730 	int excluded_devices[1+MAX_NEST_DEV];
731 	struct hlist_head *head;
732 	struct hlist_node *next;
733 	int num_excluded = 0;
734 	unsigned int i;
735 	int err;
736 
737 	if (exclude_ingress) {
738 		num_excluded = get_upper_ifindexes(dev, excluded_devices);
739 		excluded_devices[num_excluded++] = dev->ifindex;
740 	}
741 
742 	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
743 		for (i = 0; i < map->max_entries; i++) {
744 			dst = rcu_dereference_check(dtab->netdev_map[i],
745 						    rcu_read_lock_bh_held());
746 			if (!dst)
747 				continue;
748 
749 			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
750 				continue;
751 
752 			/* we only need n-1 clones; last_dst enqueued below */
753 			if (!last_dst) {
754 				last_dst = dst;
755 				continue;
756 			}
757 
758 			err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
759 			if (err)
760 				return err;
761 
762 			last_dst = dst;
763 
764 		}
765 	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
766 		for (i = 0; i < dtab->n_buckets; i++) {
767 			head = dev_map_index_hash(dtab, i);
768 			hlist_for_each_entry_safe(dst, next, head, index_hlist) {
769 				if (is_ifindex_excluded(excluded_devices, num_excluded,
770 							dst->dev->ifindex))
771 					continue;
772 
773 				/* we only need n-1 clones; last_dst enqueued below */
774 				if (!last_dst) {
775 					last_dst = dst;
776 					continue;
777 				}
778 
779 				err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
780 				if (err)
781 					return err;
782 
783 				last_dst = dst;
784 			}
785 		}
786 	}
787 
788 	/* consume the first skb and return */
789 	if (last_dst)
790 		return dev_map_generic_redirect(last_dst, skb, xdp_prog);
791 
792 	/* dtab is empty */
793 	consume_skb(skb);
794 	return 0;
795 }
796 
797 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
798 {
799 	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
800 
801 	return obj ? &obj->val : NULL;
802 }
803 
804 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
805 {
806 	struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
807 								*(u32 *)key);
808 	return obj ? &obj->val : NULL;
809 }
810 
811 static void __dev_map_entry_free(struct rcu_head *rcu)
812 {
813 	struct bpf_dtab_netdev *dev;
814 
815 	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
816 	if (dev->xdp_prog)
817 		bpf_prog_put(dev->xdp_prog);
818 	dev_put(dev->dev);
819 	kfree(dev);
820 }
821 
822 static long dev_map_delete_elem(struct bpf_map *map, void *key)
823 {
824 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
825 	struct bpf_dtab_netdev *old_dev;
826 	int k = *(u32 *)key;
827 
828 	if (k >= map->max_entries)
829 		return -EINVAL;
830 
831 	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
832 	if (old_dev) {
833 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
834 		atomic_dec((atomic_t *)&dtab->items);
835 	}
836 	return 0;
837 }
838 
839 static long dev_map_hash_delete_elem(struct bpf_map *map, void *key)
840 {
841 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
842 	struct bpf_dtab_netdev *old_dev;
843 	int k = *(u32 *)key;
844 	unsigned long flags;
845 	int ret = -ENOENT;
846 
847 	spin_lock_irqsave(&dtab->index_lock, flags);
848 
849 	old_dev = __dev_map_hash_lookup_elem(map, k);
850 	if (old_dev) {
851 		dtab->items--;
852 		hlist_del_init_rcu(&old_dev->index_hlist);
853 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
854 		ret = 0;
855 	}
856 	spin_unlock_irqrestore(&dtab->index_lock, flags);
857 
858 	return ret;
859 }
860 
861 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
862 						    struct bpf_dtab *dtab,
863 						    struct bpf_devmap_val *val,
864 						    unsigned int idx)
865 {
866 	struct bpf_prog *prog = NULL;
867 	struct bpf_dtab_netdev *dev;
868 
869 	dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
870 				   GFP_NOWAIT | __GFP_NOWARN,
871 				   dtab->map.numa_node);
872 	if (!dev)
873 		return ERR_PTR(-ENOMEM);
874 
875 	dev->dev = dev_get_by_index(net, val->ifindex);
876 	if (!dev->dev)
877 		goto err_out;
878 
879 	if (val->bpf_prog.fd > 0) {
880 		prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
881 					     BPF_PROG_TYPE_XDP, false);
882 		if (IS_ERR(prog))
883 			goto err_put_dev;
884 		if (prog->expected_attach_type != BPF_XDP_DEVMAP ||
885 		    !bpf_prog_map_compatible(&dtab->map, prog))
886 			goto err_put_prog;
887 	}
888 
889 	dev->idx = idx;
890 	if (prog) {
891 		dev->xdp_prog = prog;
892 		dev->val.bpf_prog.id = prog->aux->id;
893 	} else {
894 		dev->xdp_prog = NULL;
895 		dev->val.bpf_prog.id = 0;
896 	}
897 	dev->val.ifindex = val->ifindex;
898 
899 	return dev;
900 err_put_prog:
901 	bpf_prog_put(prog);
902 err_put_dev:
903 	dev_put(dev->dev);
904 err_out:
905 	kfree(dev);
906 	return ERR_PTR(-EINVAL);
907 }
908 
909 static long __dev_map_update_elem(struct net *net, struct bpf_map *map,
910 				  void *key, void *value, u64 map_flags)
911 {
912 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
913 	struct bpf_dtab_netdev *dev, *old_dev;
914 	struct bpf_devmap_val val = {};
915 	u32 i = *(u32 *)key;
916 
917 	if (unlikely(map_flags > BPF_EXIST))
918 		return -EINVAL;
919 	if (unlikely(i >= dtab->map.max_entries))
920 		return -E2BIG;
921 	if (unlikely(map_flags == BPF_NOEXIST))
922 		return -EEXIST;
923 
924 	/* already verified value_size <= sizeof val */
925 	memcpy(&val, value, map->value_size);
926 
927 	if (!val.ifindex) {
928 		dev = NULL;
929 		/* can not specify fd if ifindex is 0 */
930 		if (val.bpf_prog.fd > 0)
931 			return -EINVAL;
932 	} else {
933 		dev = __dev_map_alloc_node(net, dtab, &val, i);
934 		if (IS_ERR(dev))
935 			return PTR_ERR(dev);
936 	}
937 
938 	/* Use call_rcu() here to ensure rcu critical sections have completed
939 	 * Remembering the driver side flush operation will happen before the
940 	 * net device is removed.
941 	 */
942 	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
943 	if (old_dev)
944 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
945 	else
946 		atomic_inc((atomic_t *)&dtab->items);
947 
948 	return 0;
949 }
950 
951 static long dev_map_update_elem(struct bpf_map *map, void *key, void *value,
952 				u64 map_flags)
953 {
954 	return __dev_map_update_elem(current->nsproxy->net_ns,
955 				     map, key, value, map_flags);
956 }
957 
958 static long __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
959 				       void *key, void *value, u64 map_flags)
960 {
961 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
962 	struct bpf_dtab_netdev *dev, *old_dev;
963 	struct bpf_devmap_val val = {};
964 	u32 idx = *(u32 *)key;
965 	unsigned long flags;
966 	int err = -EEXIST;
967 
968 	/* already verified value_size <= sizeof val */
969 	memcpy(&val, value, map->value_size);
970 
971 	if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
972 		return -EINVAL;
973 
974 	spin_lock_irqsave(&dtab->index_lock, flags);
975 
976 	old_dev = __dev_map_hash_lookup_elem(map, idx);
977 	if (old_dev && (map_flags & BPF_NOEXIST))
978 		goto out_err;
979 
980 	dev = __dev_map_alloc_node(net, dtab, &val, idx);
981 	if (IS_ERR(dev)) {
982 		err = PTR_ERR(dev);
983 		goto out_err;
984 	}
985 
986 	if (old_dev) {
987 		hlist_del_rcu(&old_dev->index_hlist);
988 	} else {
989 		if (dtab->items >= dtab->map.max_entries) {
990 			spin_unlock_irqrestore(&dtab->index_lock, flags);
991 			call_rcu(&dev->rcu, __dev_map_entry_free);
992 			return -E2BIG;
993 		}
994 		dtab->items++;
995 	}
996 
997 	hlist_add_head_rcu(&dev->index_hlist,
998 			   dev_map_index_hash(dtab, idx));
999 	spin_unlock_irqrestore(&dtab->index_lock, flags);
1000 
1001 	if (old_dev)
1002 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
1003 
1004 	return 0;
1005 
1006 out_err:
1007 	spin_unlock_irqrestore(&dtab->index_lock, flags);
1008 	return err;
1009 }
1010 
1011 static long dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
1012 				     u64 map_flags)
1013 {
1014 	return __dev_map_hash_update_elem(current->nsproxy->net_ns,
1015 					 map, key, value, map_flags);
1016 }
1017 
1018 static long dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1019 {
1020 	return __bpf_xdp_redirect_map(map, ifindex, flags,
1021 				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1022 				      __dev_map_lookup_elem);
1023 }
1024 
1025 static long dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1026 {
1027 	return __bpf_xdp_redirect_map(map, ifindex, flags,
1028 				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1029 				      __dev_map_hash_lookup_elem);
1030 }
1031 
1032 static u64 dev_map_mem_usage(const struct bpf_map *map)
1033 {
1034 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
1035 	u64 usage = sizeof(struct bpf_dtab);
1036 
1037 	if (map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)
1038 		usage += (u64)dtab->n_buckets * sizeof(struct hlist_head);
1039 	else
1040 		usage += (u64)map->max_entries * sizeof(struct bpf_dtab_netdev *);
1041 	usage += atomic_read((atomic_t *)&dtab->items) *
1042 			 (u64)sizeof(struct bpf_dtab_netdev);
1043 	return usage;
1044 }
1045 
1046 BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
1047 const struct bpf_map_ops dev_map_ops = {
1048 	.map_meta_equal = bpf_map_meta_equal,
1049 	.map_alloc = dev_map_alloc,
1050 	.map_free = dev_map_free,
1051 	.map_get_next_key = dev_map_get_next_key,
1052 	.map_lookup_elem = dev_map_lookup_elem,
1053 	.map_update_elem = dev_map_update_elem,
1054 	.map_delete_elem = dev_map_delete_elem,
1055 	.map_check_btf = map_check_no_btf,
1056 	.map_mem_usage = dev_map_mem_usage,
1057 	.map_btf_id = &dev_map_btf_ids[0],
1058 	.map_redirect = dev_map_redirect,
1059 };
1060 
1061 const struct bpf_map_ops dev_map_hash_ops = {
1062 	.map_meta_equal = bpf_map_meta_equal,
1063 	.map_alloc = dev_map_alloc,
1064 	.map_free = dev_map_free,
1065 	.map_get_next_key = dev_map_hash_get_next_key,
1066 	.map_lookup_elem = dev_map_hash_lookup_elem,
1067 	.map_update_elem = dev_map_hash_update_elem,
1068 	.map_delete_elem = dev_map_hash_delete_elem,
1069 	.map_check_btf = map_check_no_btf,
1070 	.map_mem_usage = dev_map_mem_usage,
1071 	.map_btf_id = &dev_map_btf_ids[0],
1072 	.map_redirect = dev_hash_map_redirect,
1073 };
1074 
1075 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
1076 				       struct net_device *netdev)
1077 {
1078 	unsigned long flags;
1079 	u32 i;
1080 
1081 	spin_lock_irqsave(&dtab->index_lock, flags);
1082 	for (i = 0; i < dtab->n_buckets; i++) {
1083 		struct bpf_dtab_netdev *dev;
1084 		struct hlist_head *head;
1085 		struct hlist_node *next;
1086 
1087 		head = dev_map_index_hash(dtab, i);
1088 
1089 		hlist_for_each_entry_safe(dev, next, head, index_hlist) {
1090 			if (netdev != dev->dev)
1091 				continue;
1092 
1093 			dtab->items--;
1094 			hlist_del_rcu(&dev->index_hlist);
1095 			call_rcu(&dev->rcu, __dev_map_entry_free);
1096 		}
1097 	}
1098 	spin_unlock_irqrestore(&dtab->index_lock, flags);
1099 }
1100 
1101 static int dev_map_notification(struct notifier_block *notifier,
1102 				ulong event, void *ptr)
1103 {
1104 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1105 	struct bpf_dtab *dtab;
1106 	int i, cpu;
1107 
1108 	switch (event) {
1109 	case NETDEV_REGISTER:
1110 		if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
1111 			break;
1112 
1113 		/* will be freed in free_netdev() */
1114 		netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
1115 		if (!netdev->xdp_bulkq)
1116 			return NOTIFY_BAD;
1117 
1118 		for_each_possible_cpu(cpu)
1119 			per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
1120 		break;
1121 	case NETDEV_UNREGISTER:
1122 		/* This rcu_read_lock/unlock pair is needed because
1123 		 * dev_map_list is an RCU list AND to ensure a delete
1124 		 * operation does not free a netdev_map entry while we
1125 		 * are comparing it against the netdev being unregistered.
1126 		 */
1127 		rcu_read_lock();
1128 		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
1129 			if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
1130 				dev_map_hash_remove_netdev(dtab, netdev);
1131 				continue;
1132 			}
1133 
1134 			for (i = 0; i < dtab->map.max_entries; i++) {
1135 				struct bpf_dtab_netdev *dev, *odev;
1136 
1137 				dev = rcu_dereference(dtab->netdev_map[i]);
1138 				if (!dev || netdev != dev->dev)
1139 					continue;
1140 				odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1141 				if (dev == odev) {
1142 					call_rcu(&dev->rcu,
1143 						 __dev_map_entry_free);
1144 					atomic_dec((atomic_t *)&dtab->items);
1145 				}
1146 			}
1147 		}
1148 		rcu_read_unlock();
1149 		break;
1150 	default:
1151 		break;
1152 	}
1153 	return NOTIFY_OK;
1154 }
1155 
1156 static struct notifier_block dev_map_notifier = {
1157 	.notifier_call = dev_map_notification,
1158 };
1159 
1160 static int __init dev_map_init(void)
1161 {
1162 	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
1163 	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1164 		     offsetof(struct _bpf_dtab_netdev, dev));
1165 	register_netdevice_notifier(&dev_map_notifier);
1166 
1167 	return 0;
1168 }
1169 
1170 subsys_initcall(dev_map_init);
1171