xref: /linux-6.15/kernel/bpf/devmap.c (revision fd8db077)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3  */
4 
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6  * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7  * spent some effort to ensure the datapath with redirect maps does not use
8  * any locking. This is a quick note on the details.
9  *
10  * We have three possible paths to get into the devmap control plane bpf
11  * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12  * will invoke an update, delete, or lookup operation. To ensure updates and
13  * deletes appear atomic from the datapath side xchg() is used to modify the
14  * netdev_map array. Then because the datapath does a lookup into the netdev_map
15  * array (read-only) from an RCU critical section we use call_rcu() to wait for
16  * an rcu grace period before free'ing the old data structures. This ensures the
17  * datapath always has a valid copy. However, the datapath does a "flush"
18  * operation that pushes any pending packets in the driver outside the RCU
19  * critical section. Each bpf_dtab_netdev tracks these pending operations using
20  * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until
21  * this list is empty, indicating outstanding flush operations have completed.
22  *
23  * BPF syscalls may race with BPF program calls on any of the update, delete
24  * or lookup operations. As noted above the xchg() operation also keep the
25  * netdev_map consistent in this case. From the devmap side BPF programs
26  * calling into these operations are the same as multiple user space threads
27  * making system calls.
28  *
29  * Finally, any of the above may race with a netdev_unregister notifier. The
30  * unregister notifier must search for net devices in the map structure that
31  * contain a reference to the net device and remove them. This is a two step
32  * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33  * check to see if the ifindex is the same as the net_device being removed.
34  * When removing the dev a cmpxchg() is used to ensure the correct dev is
35  * removed, in the case of a concurrent update or delete operation it is
36  * possible that the initially referenced dev is no longer in the map. As the
37  * notifier hook walks the map we know that new dev references can not be
38  * added by the user because core infrastructure ensures dev_get_by_index()
39  * calls will fail at this point.
40  *
41  * The devmap_hash type is a map type which interprets keys as ifindexes and
42  * indexes these using a hashmap. This allows maps that use ifindex as key to be
43  * densely packed instead of having holes in the lookup array for unused
44  * ifindexes. The setup and packet enqueue/send code is shared between the two
45  * types of devmap; only the lookup and insertion is different.
46  */
47 #include <linux/bpf.h>
48 #include <net/xdp.h>
49 #include <linux/filter.h>
50 #include <trace/events/xdp.h>
51 #include <linux/btf_ids.h>
52 
53 #define DEV_CREATE_FLAG_MASK \
54 	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
55 
56 struct xdp_dev_bulk_queue {
57 	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
58 	struct list_head flush_node;
59 	struct net_device *dev;
60 	struct net_device *dev_rx;
61 	struct bpf_prog *xdp_prog;
62 	unsigned int count;
63 };
64 
65 struct bpf_dtab_netdev {
66 	struct net_device *dev; /* must be first member, due to tracepoint */
67 	struct hlist_node index_hlist;
68 	struct bpf_prog *xdp_prog;
69 	struct rcu_head rcu;
70 	unsigned int idx;
71 	struct bpf_devmap_val val;
72 };
73 
74 struct bpf_dtab {
75 	struct bpf_map map;
76 	struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
77 	struct list_head list;
78 
79 	/* these are only used for DEVMAP_HASH type maps */
80 	struct hlist_head *dev_index_head;
81 	spinlock_t index_lock;
82 	unsigned int items;
83 	u32 n_buckets;
84 };
85 
86 static DEFINE_PER_CPU(struct list_head, dev_flush_list);
87 static DEFINE_SPINLOCK(dev_map_lock);
88 static LIST_HEAD(dev_map_list);
89 
90 static struct hlist_head *dev_map_create_hash(unsigned int entries,
91 					      int numa_node)
92 {
93 	int i;
94 	struct hlist_head *hash;
95 
96 	hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
97 	if (hash != NULL)
98 		for (i = 0; i < entries; i++)
99 			INIT_HLIST_HEAD(&hash[i]);
100 
101 	return hash;
102 }
103 
104 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
105 						    int idx)
106 {
107 	return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
108 }
109 
110 static int dev_map_alloc_check(union bpf_attr *attr)
111 {
112 	u32 valsize = attr->value_size;
113 
114 	/* check sanity of attributes. 2 value sizes supported:
115 	 * 4 bytes: ifindex
116 	 * 8 bytes: ifindex + prog fd
117 	 */
118 	if (attr->max_entries == 0 || attr->key_size != 4 ||
119 	    (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
120 	     valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
121 	    attr->map_flags & ~DEV_CREATE_FLAG_MASK)
122 		return -EINVAL;
123 
124 	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
125 		/* Hash table size must be power of 2; roundup_pow_of_two()
126 		 * can overflow into UB on 32-bit arches
127 		 */
128 		if (attr->max_entries > 1UL << 31)
129 			return -EINVAL;
130 	}
131 
132 	return 0;
133 }
134 
135 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
136 {
137 	/* Lookup returns a pointer straight to dev->ifindex, so make sure the
138 	 * verifier prevents writes from the BPF side
139 	 */
140 	attr->map_flags |= BPF_F_RDONLY_PROG;
141 	bpf_map_init_from_attr(&dtab->map, attr);
142 
143 	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
144 		/* Hash table size must be power of 2 */
145 		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
146 		dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
147 							   dtab->map.numa_node);
148 		if (!dtab->dev_index_head)
149 			return -ENOMEM;
150 
151 		spin_lock_init(&dtab->index_lock);
152 	} else {
153 		dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
154 						      sizeof(struct bpf_dtab_netdev *),
155 						      dtab->map.numa_node);
156 		if (!dtab->netdev_map)
157 			return -ENOMEM;
158 	}
159 
160 	return 0;
161 }
162 
163 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
164 {
165 	struct bpf_dtab *dtab;
166 	int err;
167 
168 	dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE);
169 	if (!dtab)
170 		return ERR_PTR(-ENOMEM);
171 
172 	err = dev_map_init_map(dtab, attr);
173 	if (err) {
174 		bpf_map_area_free(dtab);
175 		return ERR_PTR(err);
176 	}
177 
178 	spin_lock(&dev_map_lock);
179 	list_add_tail_rcu(&dtab->list, &dev_map_list);
180 	spin_unlock(&dev_map_lock);
181 
182 	return &dtab->map;
183 }
184 
185 static void dev_map_free(struct bpf_map *map)
186 {
187 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
188 	int i;
189 
190 	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
191 	 * so the programs (can be more than one that used this map) were
192 	 * disconnected from events. The following synchronize_rcu() guarantees
193 	 * both rcu read critical sections complete and waits for
194 	 * preempt-disable regions (NAPI being the relevant context here) so we
195 	 * are certain there will be no further reads against the netdev_map and
196 	 * all flush operations are complete. Flush operations can only be done
197 	 * from NAPI context for this reason.
198 	 */
199 
200 	spin_lock(&dev_map_lock);
201 	list_del_rcu(&dtab->list);
202 	spin_unlock(&dev_map_lock);
203 
204 	bpf_clear_redirect_map(map);
205 	synchronize_rcu();
206 
207 	/* Make sure prior __dev_map_entry_free() have completed. */
208 	rcu_barrier();
209 
210 	if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
211 		for (i = 0; i < dtab->n_buckets; i++) {
212 			struct bpf_dtab_netdev *dev;
213 			struct hlist_head *head;
214 			struct hlist_node *next;
215 
216 			head = dev_map_index_hash(dtab, i);
217 
218 			hlist_for_each_entry_safe(dev, next, head, index_hlist) {
219 				hlist_del_rcu(&dev->index_hlist);
220 				if (dev->xdp_prog)
221 					bpf_prog_put(dev->xdp_prog);
222 				dev_put(dev->dev);
223 				kfree(dev);
224 			}
225 		}
226 
227 		bpf_map_area_free(dtab->dev_index_head);
228 	} else {
229 		for (i = 0; i < dtab->map.max_entries; i++) {
230 			struct bpf_dtab_netdev *dev;
231 
232 			dev = rcu_dereference_raw(dtab->netdev_map[i]);
233 			if (!dev)
234 				continue;
235 
236 			if (dev->xdp_prog)
237 				bpf_prog_put(dev->xdp_prog);
238 			dev_put(dev->dev);
239 			kfree(dev);
240 		}
241 
242 		bpf_map_area_free(dtab->netdev_map);
243 	}
244 
245 	bpf_map_area_free(dtab);
246 }
247 
248 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
249 {
250 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
251 	u32 index = key ? *(u32 *)key : U32_MAX;
252 	u32 *next = next_key;
253 
254 	if (index >= dtab->map.max_entries) {
255 		*next = 0;
256 		return 0;
257 	}
258 
259 	if (index == dtab->map.max_entries - 1)
260 		return -ENOENT;
261 	*next = index + 1;
262 	return 0;
263 }
264 
265 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
266  * by local_bh_disable() (from XDP calls inside NAPI). The
267  * rcu_read_lock_bh_held() below makes lockdep accept both.
268  */
269 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
270 {
271 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
272 	struct hlist_head *head = dev_map_index_hash(dtab, key);
273 	struct bpf_dtab_netdev *dev;
274 
275 	hlist_for_each_entry_rcu(dev, head, index_hlist,
276 				 lockdep_is_held(&dtab->index_lock))
277 		if (dev->idx == key)
278 			return dev;
279 
280 	return NULL;
281 }
282 
283 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
284 				    void *next_key)
285 {
286 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
287 	u32 idx, *next = next_key;
288 	struct bpf_dtab_netdev *dev, *next_dev;
289 	struct hlist_head *head;
290 	int i = 0;
291 
292 	if (!key)
293 		goto find_first;
294 
295 	idx = *(u32 *)key;
296 
297 	dev = __dev_map_hash_lookup_elem(map, idx);
298 	if (!dev)
299 		goto find_first;
300 
301 	next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
302 				    struct bpf_dtab_netdev, index_hlist);
303 
304 	if (next_dev) {
305 		*next = next_dev->idx;
306 		return 0;
307 	}
308 
309 	i = idx & (dtab->n_buckets - 1);
310 	i++;
311 
312  find_first:
313 	for (; i < dtab->n_buckets; i++) {
314 		head = dev_map_index_hash(dtab, i);
315 
316 		next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
317 					    struct bpf_dtab_netdev,
318 					    index_hlist);
319 		if (next_dev) {
320 			*next = next_dev->idx;
321 			return 0;
322 		}
323 	}
324 
325 	return -ENOENT;
326 }
327 
328 static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
329 				struct xdp_frame **frames, int n,
330 				struct net_device *dev)
331 {
332 	struct xdp_txq_info txq = { .dev = dev };
333 	struct xdp_buff xdp;
334 	int i, nframes = 0;
335 
336 	for (i = 0; i < n; i++) {
337 		struct xdp_frame *xdpf = frames[i];
338 		u32 act;
339 		int err;
340 
341 		xdp_convert_frame_to_buff(xdpf, &xdp);
342 		xdp.txq = &txq;
343 
344 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
345 		switch (act) {
346 		case XDP_PASS:
347 			err = xdp_update_frame_from_buff(&xdp, xdpf);
348 			if (unlikely(err < 0))
349 				xdp_return_frame_rx_napi(xdpf);
350 			else
351 				frames[nframes++] = xdpf;
352 			break;
353 		default:
354 			bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
355 			fallthrough;
356 		case XDP_ABORTED:
357 			trace_xdp_exception(dev, xdp_prog, act);
358 			fallthrough;
359 		case XDP_DROP:
360 			xdp_return_frame_rx_napi(xdpf);
361 			break;
362 		}
363 	}
364 	return nframes; /* sent frames count */
365 }
366 
367 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
368 {
369 	struct net_device *dev = bq->dev;
370 	unsigned int cnt = bq->count;
371 	int sent = 0, err = 0;
372 	int to_send = cnt;
373 	int i;
374 
375 	if (unlikely(!cnt))
376 		return;
377 
378 	for (i = 0; i < cnt; i++) {
379 		struct xdp_frame *xdpf = bq->q[i];
380 
381 		prefetch(xdpf);
382 	}
383 
384 	if (bq->xdp_prog) {
385 		to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
386 		if (!to_send)
387 			goto out;
388 	}
389 
390 	sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
391 	if (sent < 0) {
392 		/* If ndo_xdp_xmit fails with an errno, no frames have
393 		 * been xmit'ed.
394 		 */
395 		err = sent;
396 		sent = 0;
397 	}
398 
399 	/* If not all frames have been transmitted, it is our
400 	 * responsibility to free them
401 	 */
402 	for (i = sent; unlikely(i < to_send); i++)
403 		xdp_return_frame_rx_napi(bq->q[i]);
404 
405 out:
406 	bq->count = 0;
407 	trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
408 }
409 
410 /* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
411  * driver before returning from its napi->poll() routine. See the comment above
412  * xdp_do_flush() in filter.c.
413  */
414 void __dev_flush(void)
415 {
416 	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
417 	struct xdp_dev_bulk_queue *bq, *tmp;
418 
419 	list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
420 		bq_xmit_all(bq, XDP_XMIT_FLUSH);
421 		bq->dev_rx = NULL;
422 		bq->xdp_prog = NULL;
423 		__list_del_clearprev(&bq->flush_node);
424 	}
425 }
426 
427 #ifdef CONFIG_DEBUG_NET
428 bool dev_check_flush(void)
429 {
430 	if (list_empty(this_cpu_ptr(&dev_flush_list)))
431 		return false;
432 	__dev_flush();
433 	return true;
434 }
435 #endif
436 
437 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
438  * by local_bh_disable() (from XDP calls inside NAPI). The
439  * rcu_read_lock_bh_held() below makes lockdep accept both.
440  */
441 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
442 {
443 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
444 	struct bpf_dtab_netdev *obj;
445 
446 	if (key >= map->max_entries)
447 		return NULL;
448 
449 	obj = rcu_dereference_check(dtab->netdev_map[key],
450 				    rcu_read_lock_bh_held());
451 	return obj;
452 }
453 
454 /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
455  * variable access, and map elements stick around. See comment above
456  * xdp_do_flush() in filter.c.
457  */
458 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
459 		       struct net_device *dev_rx, struct bpf_prog *xdp_prog)
460 {
461 	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
462 	struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
463 
464 	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
465 		bq_xmit_all(bq, 0);
466 
467 	/* Ingress dev_rx will be the same for all xdp_frame's in
468 	 * bulk_queue, because bq stored per-CPU and must be flushed
469 	 * from net_device drivers NAPI func end.
470 	 *
471 	 * Do the same with xdp_prog and flush_list since these fields
472 	 * are only ever modified together.
473 	 */
474 	if (!bq->dev_rx) {
475 		bq->dev_rx = dev_rx;
476 		bq->xdp_prog = xdp_prog;
477 		list_add(&bq->flush_node, flush_list);
478 	}
479 
480 	bq->q[bq->count++] = xdpf;
481 }
482 
483 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
484 				struct net_device *dev_rx,
485 				struct bpf_prog *xdp_prog)
486 {
487 	int err;
488 
489 	if (!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
490 		return -EOPNOTSUPP;
491 
492 	if (unlikely(!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
493 		     xdp_frame_has_frags(xdpf)))
494 		return -EOPNOTSUPP;
495 
496 	err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
497 	if (unlikely(err))
498 		return err;
499 
500 	bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
501 	return 0;
502 }
503 
504 static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst)
505 {
506 	struct xdp_txq_info txq = { .dev = dst->dev };
507 	struct xdp_buff xdp;
508 	u32 act;
509 
510 	if (!dst->xdp_prog)
511 		return XDP_PASS;
512 
513 	__skb_pull(skb, skb->mac_len);
514 	xdp.txq = &txq;
515 
516 	act = bpf_prog_run_generic_xdp(skb, &xdp, dst->xdp_prog);
517 	switch (act) {
518 	case XDP_PASS:
519 		__skb_push(skb, skb->mac_len);
520 		break;
521 	default:
522 		bpf_warn_invalid_xdp_action(NULL, dst->xdp_prog, act);
523 		fallthrough;
524 	case XDP_ABORTED:
525 		trace_xdp_exception(dst->dev, dst->xdp_prog, act);
526 		fallthrough;
527 	case XDP_DROP:
528 		kfree_skb(skb);
529 		break;
530 	}
531 
532 	return act;
533 }
534 
535 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
536 		    struct net_device *dev_rx)
537 {
538 	return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
539 }
540 
541 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
542 		    struct net_device *dev_rx)
543 {
544 	struct net_device *dev = dst->dev;
545 
546 	return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog);
547 }
548 
549 static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
550 {
551 	if (!obj)
552 		return false;
553 
554 	if (!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
555 		return false;
556 
557 	if (unlikely(!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
558 		     xdp_frame_has_frags(xdpf)))
559 		return false;
560 
561 	if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
562 		return false;
563 
564 	return true;
565 }
566 
567 static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
568 				 struct net_device *dev_rx,
569 				 struct xdp_frame *xdpf)
570 {
571 	struct xdp_frame *nxdpf;
572 
573 	nxdpf = xdpf_clone(xdpf);
574 	if (!nxdpf)
575 		return -ENOMEM;
576 
577 	bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
578 
579 	return 0;
580 }
581 
582 static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex)
583 {
584 	while (num_excluded--) {
585 		if (ifindex == excluded[num_excluded])
586 			return true;
587 	}
588 	return false;
589 }
590 
591 /* Get ifindex of each upper device. 'indexes' must be able to hold at
592  * least MAX_NEST_DEV elements.
593  * Returns the number of ifindexes added.
594  */
595 static int get_upper_ifindexes(struct net_device *dev, int *indexes)
596 {
597 	struct net_device *upper;
598 	struct list_head *iter;
599 	int n = 0;
600 
601 	netdev_for_each_upper_dev_rcu(dev, upper, iter) {
602 		indexes[n++] = upper->ifindex;
603 	}
604 	return n;
605 }
606 
607 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
608 			  struct bpf_map *map, bool exclude_ingress)
609 {
610 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
611 	struct bpf_dtab_netdev *dst, *last_dst = NULL;
612 	int excluded_devices[1+MAX_NEST_DEV];
613 	struct hlist_head *head;
614 	int num_excluded = 0;
615 	unsigned int i;
616 	int err;
617 
618 	if (exclude_ingress) {
619 		num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
620 		excluded_devices[num_excluded++] = dev_rx->ifindex;
621 	}
622 
623 	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
624 		for (i = 0; i < map->max_entries; i++) {
625 			dst = rcu_dereference_check(dtab->netdev_map[i],
626 						    rcu_read_lock_bh_held());
627 			if (!is_valid_dst(dst, xdpf))
628 				continue;
629 
630 			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
631 				continue;
632 
633 			/* we only need n-1 clones; last_dst enqueued below */
634 			if (!last_dst) {
635 				last_dst = dst;
636 				continue;
637 			}
638 
639 			err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
640 			if (err)
641 				return err;
642 
643 			last_dst = dst;
644 		}
645 	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
646 		for (i = 0; i < dtab->n_buckets; i++) {
647 			head = dev_map_index_hash(dtab, i);
648 			hlist_for_each_entry_rcu(dst, head, index_hlist,
649 						 lockdep_is_held(&dtab->index_lock)) {
650 				if (!is_valid_dst(dst, xdpf))
651 					continue;
652 
653 				if (is_ifindex_excluded(excluded_devices, num_excluded,
654 							dst->dev->ifindex))
655 					continue;
656 
657 				/* we only need n-1 clones; last_dst enqueued below */
658 				if (!last_dst) {
659 					last_dst = dst;
660 					continue;
661 				}
662 
663 				err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
664 				if (err)
665 					return err;
666 
667 				last_dst = dst;
668 			}
669 		}
670 	}
671 
672 	/* consume the last copy of the frame */
673 	if (last_dst)
674 		bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
675 	else
676 		xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
677 
678 	return 0;
679 }
680 
681 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
682 			     struct bpf_prog *xdp_prog)
683 {
684 	int err;
685 
686 	err = xdp_ok_fwd_dev(dst->dev, skb->len);
687 	if (unlikely(err))
688 		return err;
689 
690 	/* Redirect has already succeeded semantically at this point, so we just
691 	 * return 0 even if packet is dropped. Helper below takes care of
692 	 * freeing skb.
693 	 */
694 	if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS)
695 		return 0;
696 
697 	skb->dev = dst->dev;
698 	generic_xdp_tx(skb, xdp_prog);
699 
700 	return 0;
701 }
702 
703 static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
704 				  struct sk_buff *skb,
705 				  struct bpf_prog *xdp_prog)
706 {
707 	struct sk_buff *nskb;
708 	int err;
709 
710 	nskb = skb_clone(skb, GFP_ATOMIC);
711 	if (!nskb)
712 		return -ENOMEM;
713 
714 	err = dev_map_generic_redirect(dst, nskb, xdp_prog);
715 	if (unlikely(err)) {
716 		consume_skb(nskb);
717 		return err;
718 	}
719 
720 	return 0;
721 }
722 
723 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
724 			   struct bpf_prog *xdp_prog, struct bpf_map *map,
725 			   bool exclude_ingress)
726 {
727 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
728 	struct bpf_dtab_netdev *dst, *last_dst = NULL;
729 	int excluded_devices[1+MAX_NEST_DEV];
730 	struct hlist_head *head;
731 	struct hlist_node *next;
732 	int num_excluded = 0;
733 	unsigned int i;
734 	int err;
735 
736 	if (exclude_ingress) {
737 		num_excluded = get_upper_ifindexes(dev, excluded_devices);
738 		excluded_devices[num_excluded++] = dev->ifindex;
739 	}
740 
741 	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
742 		for (i = 0; i < map->max_entries; i++) {
743 			dst = rcu_dereference_check(dtab->netdev_map[i],
744 						    rcu_read_lock_bh_held());
745 			if (!dst)
746 				continue;
747 
748 			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
749 				continue;
750 
751 			/* we only need n-1 clones; last_dst enqueued below */
752 			if (!last_dst) {
753 				last_dst = dst;
754 				continue;
755 			}
756 
757 			err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
758 			if (err)
759 				return err;
760 
761 			last_dst = dst;
762 
763 		}
764 	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
765 		for (i = 0; i < dtab->n_buckets; i++) {
766 			head = dev_map_index_hash(dtab, i);
767 			hlist_for_each_entry_safe(dst, next, head, index_hlist) {
768 				if (is_ifindex_excluded(excluded_devices, num_excluded,
769 							dst->dev->ifindex))
770 					continue;
771 
772 				/* we only need n-1 clones; last_dst enqueued below */
773 				if (!last_dst) {
774 					last_dst = dst;
775 					continue;
776 				}
777 
778 				err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
779 				if (err)
780 					return err;
781 
782 				last_dst = dst;
783 			}
784 		}
785 	}
786 
787 	/* consume the first skb and return */
788 	if (last_dst)
789 		return dev_map_generic_redirect(last_dst, skb, xdp_prog);
790 
791 	/* dtab is empty */
792 	consume_skb(skb);
793 	return 0;
794 }
795 
796 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
797 {
798 	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
799 
800 	return obj ? &obj->val : NULL;
801 }
802 
803 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
804 {
805 	struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
806 								*(u32 *)key);
807 	return obj ? &obj->val : NULL;
808 }
809 
810 static void __dev_map_entry_free(struct rcu_head *rcu)
811 {
812 	struct bpf_dtab_netdev *dev;
813 
814 	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
815 	if (dev->xdp_prog)
816 		bpf_prog_put(dev->xdp_prog);
817 	dev_put(dev->dev);
818 	kfree(dev);
819 }
820 
821 static long dev_map_delete_elem(struct bpf_map *map, void *key)
822 {
823 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
824 	struct bpf_dtab_netdev *old_dev;
825 	int k = *(u32 *)key;
826 
827 	if (k >= map->max_entries)
828 		return -EINVAL;
829 
830 	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
831 	if (old_dev) {
832 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
833 		atomic_dec((atomic_t *)&dtab->items);
834 	}
835 	return 0;
836 }
837 
838 static long dev_map_hash_delete_elem(struct bpf_map *map, void *key)
839 {
840 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
841 	struct bpf_dtab_netdev *old_dev;
842 	int k = *(u32 *)key;
843 	unsigned long flags;
844 	int ret = -ENOENT;
845 
846 	spin_lock_irqsave(&dtab->index_lock, flags);
847 
848 	old_dev = __dev_map_hash_lookup_elem(map, k);
849 	if (old_dev) {
850 		dtab->items--;
851 		hlist_del_init_rcu(&old_dev->index_hlist);
852 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
853 		ret = 0;
854 	}
855 	spin_unlock_irqrestore(&dtab->index_lock, flags);
856 
857 	return ret;
858 }
859 
860 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
861 						    struct bpf_dtab *dtab,
862 						    struct bpf_devmap_val *val,
863 						    unsigned int idx)
864 {
865 	struct bpf_prog *prog = NULL;
866 	struct bpf_dtab_netdev *dev;
867 
868 	dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
869 				   GFP_NOWAIT | __GFP_NOWARN,
870 				   dtab->map.numa_node);
871 	if (!dev)
872 		return ERR_PTR(-ENOMEM);
873 
874 	dev->dev = dev_get_by_index(net, val->ifindex);
875 	if (!dev->dev)
876 		goto err_out;
877 
878 	if (val->bpf_prog.fd > 0) {
879 		prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
880 					     BPF_PROG_TYPE_XDP, false);
881 		if (IS_ERR(prog))
882 			goto err_put_dev;
883 		if (prog->expected_attach_type != BPF_XDP_DEVMAP ||
884 		    !bpf_prog_map_compatible(&dtab->map, prog))
885 			goto err_put_prog;
886 	}
887 
888 	dev->idx = idx;
889 	if (prog) {
890 		dev->xdp_prog = prog;
891 		dev->val.bpf_prog.id = prog->aux->id;
892 	} else {
893 		dev->xdp_prog = NULL;
894 		dev->val.bpf_prog.id = 0;
895 	}
896 	dev->val.ifindex = val->ifindex;
897 
898 	return dev;
899 err_put_prog:
900 	bpf_prog_put(prog);
901 err_put_dev:
902 	dev_put(dev->dev);
903 err_out:
904 	kfree(dev);
905 	return ERR_PTR(-EINVAL);
906 }
907 
908 static long __dev_map_update_elem(struct net *net, struct bpf_map *map,
909 				  void *key, void *value, u64 map_flags)
910 {
911 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
912 	struct bpf_dtab_netdev *dev, *old_dev;
913 	struct bpf_devmap_val val = {};
914 	u32 i = *(u32 *)key;
915 
916 	if (unlikely(map_flags > BPF_EXIST))
917 		return -EINVAL;
918 	if (unlikely(i >= dtab->map.max_entries))
919 		return -E2BIG;
920 	if (unlikely(map_flags == BPF_NOEXIST))
921 		return -EEXIST;
922 
923 	/* already verified value_size <= sizeof val */
924 	memcpy(&val, value, map->value_size);
925 
926 	if (!val.ifindex) {
927 		dev = NULL;
928 		/* can not specify fd if ifindex is 0 */
929 		if (val.bpf_prog.fd > 0)
930 			return -EINVAL;
931 	} else {
932 		dev = __dev_map_alloc_node(net, dtab, &val, i);
933 		if (IS_ERR(dev))
934 			return PTR_ERR(dev);
935 	}
936 
937 	/* Use call_rcu() here to ensure rcu critical sections have completed
938 	 * Remembering the driver side flush operation will happen before the
939 	 * net device is removed.
940 	 */
941 	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
942 	if (old_dev)
943 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
944 	else
945 		atomic_inc((atomic_t *)&dtab->items);
946 
947 	return 0;
948 }
949 
950 static long dev_map_update_elem(struct bpf_map *map, void *key, void *value,
951 				u64 map_flags)
952 {
953 	return __dev_map_update_elem(current->nsproxy->net_ns,
954 				     map, key, value, map_flags);
955 }
956 
957 static long __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
958 				       void *key, void *value, u64 map_flags)
959 {
960 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
961 	struct bpf_dtab_netdev *dev, *old_dev;
962 	struct bpf_devmap_val val = {};
963 	u32 idx = *(u32 *)key;
964 	unsigned long flags;
965 	int err = -EEXIST;
966 
967 	/* already verified value_size <= sizeof val */
968 	memcpy(&val, value, map->value_size);
969 
970 	if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
971 		return -EINVAL;
972 
973 	spin_lock_irqsave(&dtab->index_lock, flags);
974 
975 	old_dev = __dev_map_hash_lookup_elem(map, idx);
976 	if (old_dev && (map_flags & BPF_NOEXIST))
977 		goto out_err;
978 
979 	dev = __dev_map_alloc_node(net, dtab, &val, idx);
980 	if (IS_ERR(dev)) {
981 		err = PTR_ERR(dev);
982 		goto out_err;
983 	}
984 
985 	if (old_dev) {
986 		hlist_del_rcu(&old_dev->index_hlist);
987 	} else {
988 		if (dtab->items >= dtab->map.max_entries) {
989 			spin_unlock_irqrestore(&dtab->index_lock, flags);
990 			call_rcu(&dev->rcu, __dev_map_entry_free);
991 			return -E2BIG;
992 		}
993 		dtab->items++;
994 	}
995 
996 	hlist_add_head_rcu(&dev->index_hlist,
997 			   dev_map_index_hash(dtab, idx));
998 	spin_unlock_irqrestore(&dtab->index_lock, flags);
999 
1000 	if (old_dev)
1001 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
1002 
1003 	return 0;
1004 
1005 out_err:
1006 	spin_unlock_irqrestore(&dtab->index_lock, flags);
1007 	return err;
1008 }
1009 
1010 static long dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
1011 				     u64 map_flags)
1012 {
1013 	return __dev_map_hash_update_elem(current->nsproxy->net_ns,
1014 					 map, key, value, map_flags);
1015 }
1016 
1017 static long dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1018 {
1019 	return __bpf_xdp_redirect_map(map, ifindex, flags,
1020 				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1021 				      __dev_map_lookup_elem);
1022 }
1023 
1024 static long dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1025 {
1026 	return __bpf_xdp_redirect_map(map, ifindex, flags,
1027 				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1028 				      __dev_map_hash_lookup_elem);
1029 }
1030 
1031 static u64 dev_map_mem_usage(const struct bpf_map *map)
1032 {
1033 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
1034 	u64 usage = sizeof(struct bpf_dtab);
1035 
1036 	if (map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)
1037 		usage += (u64)dtab->n_buckets * sizeof(struct hlist_head);
1038 	else
1039 		usage += (u64)map->max_entries * sizeof(struct bpf_dtab_netdev *);
1040 	usage += atomic_read((atomic_t *)&dtab->items) *
1041 			 (u64)sizeof(struct bpf_dtab_netdev);
1042 	return usage;
1043 }
1044 
1045 BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
1046 const struct bpf_map_ops dev_map_ops = {
1047 	.map_meta_equal = bpf_map_meta_equal,
1048 	.map_alloc_check = dev_map_alloc_check,
1049 	.map_alloc = dev_map_alloc,
1050 	.map_free = dev_map_free,
1051 	.map_get_next_key = dev_map_get_next_key,
1052 	.map_lookup_elem = dev_map_lookup_elem,
1053 	.map_update_elem = dev_map_update_elem,
1054 	.map_delete_elem = dev_map_delete_elem,
1055 	.map_check_btf = map_check_no_btf,
1056 	.map_mem_usage = dev_map_mem_usage,
1057 	.map_btf_id = &dev_map_btf_ids[0],
1058 	.map_redirect = dev_map_redirect,
1059 };
1060 
1061 const struct bpf_map_ops dev_map_hash_ops = {
1062 	.map_meta_equal = bpf_map_meta_equal,
1063 	.map_alloc_check = dev_map_alloc_check,
1064 	.map_alloc = dev_map_alloc,
1065 	.map_free = dev_map_free,
1066 	.map_get_next_key = dev_map_hash_get_next_key,
1067 	.map_lookup_elem = dev_map_hash_lookup_elem,
1068 	.map_update_elem = dev_map_hash_update_elem,
1069 	.map_delete_elem = dev_map_hash_delete_elem,
1070 	.map_check_btf = map_check_no_btf,
1071 	.map_mem_usage = dev_map_mem_usage,
1072 	.map_btf_id = &dev_map_btf_ids[0],
1073 	.map_redirect = dev_hash_map_redirect,
1074 };
1075 
1076 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
1077 				       struct net_device *netdev)
1078 {
1079 	unsigned long flags;
1080 	u32 i;
1081 
1082 	spin_lock_irqsave(&dtab->index_lock, flags);
1083 	for (i = 0; i < dtab->n_buckets; i++) {
1084 		struct bpf_dtab_netdev *dev;
1085 		struct hlist_head *head;
1086 		struct hlist_node *next;
1087 
1088 		head = dev_map_index_hash(dtab, i);
1089 
1090 		hlist_for_each_entry_safe(dev, next, head, index_hlist) {
1091 			if (netdev != dev->dev)
1092 				continue;
1093 
1094 			dtab->items--;
1095 			hlist_del_rcu(&dev->index_hlist);
1096 			call_rcu(&dev->rcu, __dev_map_entry_free);
1097 		}
1098 	}
1099 	spin_unlock_irqrestore(&dtab->index_lock, flags);
1100 }
1101 
1102 static int dev_map_notification(struct notifier_block *notifier,
1103 				ulong event, void *ptr)
1104 {
1105 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1106 	struct bpf_dtab *dtab;
1107 	int i, cpu;
1108 
1109 	switch (event) {
1110 	case NETDEV_REGISTER:
1111 		if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
1112 			break;
1113 
1114 		/* will be freed in free_netdev() */
1115 		netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
1116 		if (!netdev->xdp_bulkq)
1117 			return NOTIFY_BAD;
1118 
1119 		for_each_possible_cpu(cpu)
1120 			per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
1121 		break;
1122 	case NETDEV_UNREGISTER:
1123 		/* This rcu_read_lock/unlock pair is needed because
1124 		 * dev_map_list is an RCU list AND to ensure a delete
1125 		 * operation does not free a netdev_map entry while we
1126 		 * are comparing it against the netdev being unregistered.
1127 		 */
1128 		rcu_read_lock();
1129 		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
1130 			if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
1131 				dev_map_hash_remove_netdev(dtab, netdev);
1132 				continue;
1133 			}
1134 
1135 			for (i = 0; i < dtab->map.max_entries; i++) {
1136 				struct bpf_dtab_netdev *dev, *odev;
1137 
1138 				dev = rcu_dereference(dtab->netdev_map[i]);
1139 				if (!dev || netdev != dev->dev)
1140 					continue;
1141 				odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1142 				if (dev == odev) {
1143 					call_rcu(&dev->rcu,
1144 						 __dev_map_entry_free);
1145 					atomic_dec((atomic_t *)&dtab->items);
1146 				}
1147 			}
1148 		}
1149 		rcu_read_unlock();
1150 		break;
1151 	default:
1152 		break;
1153 	}
1154 	return NOTIFY_OK;
1155 }
1156 
1157 static struct notifier_block dev_map_notifier = {
1158 	.notifier_call = dev_map_notification,
1159 };
1160 
1161 static int __init dev_map_init(void)
1162 {
1163 	int cpu;
1164 
1165 	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
1166 	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1167 		     offsetof(struct _bpf_dtab_netdev, dev));
1168 	register_netdevice_notifier(&dev_map_notifier);
1169 
1170 	for_each_possible_cpu(cpu)
1171 		INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
1172 	return 0;
1173 }
1174 
1175 subsys_initcall(dev_map_init);
1176