xref: /linux-6.15/kernel/bpf/devmap.c (revision b936ca64)
1546ac1ffSJohn Fastabend /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
2546ac1ffSJohn Fastabend  *
3546ac1ffSJohn Fastabend  * This program is free software; you can redistribute it and/or
4546ac1ffSJohn Fastabend  * modify it under the terms of version 2 of the GNU General Public
5546ac1ffSJohn Fastabend  * License as published by the Free Software Foundation.
6546ac1ffSJohn Fastabend  *
7546ac1ffSJohn Fastabend  * This program is distributed in the hope that it will be useful, but
8546ac1ffSJohn Fastabend  * WITHOUT ANY WARRANTY; without even the implied warranty of
9546ac1ffSJohn Fastabend  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10546ac1ffSJohn Fastabend  * General Public License for more details.
11546ac1ffSJohn Fastabend  */
12546ac1ffSJohn Fastabend 
13546ac1ffSJohn Fastabend /* Devmaps primary use is as a backend map for XDP BPF helper call
14546ac1ffSJohn Fastabend  * bpf_redirect_map(). Because XDP is mostly concerned with performance we
15546ac1ffSJohn Fastabend  * spent some effort to ensure the datapath with redirect maps does not use
16546ac1ffSJohn Fastabend  * any locking. This is a quick note on the details.
17546ac1ffSJohn Fastabend  *
18546ac1ffSJohn Fastabend  * We have three possible paths to get into the devmap control plane bpf
19546ac1ffSJohn Fastabend  * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
20546ac1ffSJohn Fastabend  * will invoke an update, delete, or lookup operation. To ensure updates and
21546ac1ffSJohn Fastabend  * deletes appear atomic from the datapath side xchg() is used to modify the
22546ac1ffSJohn Fastabend  * netdev_map array. Then because the datapath does a lookup into the netdev_map
23546ac1ffSJohn Fastabend  * array (read-only) from an RCU critical section we use call_rcu() to wait for
24546ac1ffSJohn Fastabend  * an rcu grace period before free'ing the old data structures. This ensures the
25546ac1ffSJohn Fastabend  * datapath always has a valid copy. However, the datapath does a "flush"
26546ac1ffSJohn Fastabend  * operation that pushes any pending packets in the driver outside the RCU
27546ac1ffSJohn Fastabend  * critical section. Each bpf_dtab_netdev tracks these pending operations using
28546ac1ffSJohn Fastabend  * an atomic per-cpu bitmap. The bpf_dtab_netdev object will not be destroyed
29546ac1ffSJohn Fastabend  * until all bits are cleared indicating outstanding flush operations have
30546ac1ffSJohn Fastabend  * completed.
31546ac1ffSJohn Fastabend  *
32546ac1ffSJohn Fastabend  * BPF syscalls may race with BPF program calls on any of the update, delete
33546ac1ffSJohn Fastabend  * or lookup operations. As noted above the xchg() operation also keep the
34546ac1ffSJohn Fastabend  * netdev_map consistent in this case. From the devmap side BPF programs
35546ac1ffSJohn Fastabend  * calling into these operations are the same as multiple user space threads
36546ac1ffSJohn Fastabend  * making system calls.
372ddf71e2SJohn Fastabend  *
382ddf71e2SJohn Fastabend  * Finally, any of the above may race with a netdev_unregister notifier. The
392ddf71e2SJohn Fastabend  * unregister notifier must search for net devices in the map structure that
402ddf71e2SJohn Fastabend  * contain a reference to the net device and remove them. This is a two step
412ddf71e2SJohn Fastabend  * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
422ddf71e2SJohn Fastabend  * check to see if the ifindex is the same as the net_device being removed.
434cc7b954SJohn Fastabend  * When removing the dev a cmpxchg() is used to ensure the correct dev is
444cc7b954SJohn Fastabend  * removed, in the case of a concurrent update or delete operation it is
454cc7b954SJohn Fastabend  * possible that the initially referenced dev is no longer in the map. As the
464cc7b954SJohn Fastabend  * notifier hook walks the map we know that new dev references can not be
474cc7b954SJohn Fastabend  * added by the user because core infrastructure ensures dev_get_by_index()
484cc7b954SJohn Fastabend  * calls will fail at this point.
49546ac1ffSJohn Fastabend  */
50546ac1ffSJohn Fastabend #include <linux/bpf.h>
5167f29e07SJesper Dangaard Brouer #include <net/xdp.h>
52546ac1ffSJohn Fastabend #include <linux/filter.h>
5367f29e07SJesper Dangaard Brouer #include <trace/events/xdp.h>
54546ac1ffSJohn Fastabend 
556e71b04aSChenbo Feng #define DEV_CREATE_FLAG_MASK \
566e71b04aSChenbo Feng 	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
576e71b04aSChenbo Feng 
585d053f9dSJesper Dangaard Brouer #define DEV_MAP_BULK_SIZE 16
595d053f9dSJesper Dangaard Brouer struct xdp_bulk_queue {
605d053f9dSJesper Dangaard Brouer 	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
6138edddb8SJesper Dangaard Brouer 	struct net_device *dev_rx;
625d053f9dSJesper Dangaard Brouer 	unsigned int count;
635d053f9dSJesper Dangaard Brouer };
645d053f9dSJesper Dangaard Brouer 
65546ac1ffSJohn Fastabend struct bpf_dtab_netdev {
6667f29e07SJesper Dangaard Brouer 	struct net_device *dev; /* must be first member, due to tracepoint */
67546ac1ffSJohn Fastabend 	struct bpf_dtab *dtab;
68af4d045cSDaniel Borkmann 	unsigned int bit;
695d053f9dSJesper Dangaard Brouer 	struct xdp_bulk_queue __percpu *bulkq;
70af4d045cSDaniel Borkmann 	struct rcu_head rcu;
71546ac1ffSJohn Fastabend };
72546ac1ffSJohn Fastabend 
73546ac1ffSJohn Fastabend struct bpf_dtab {
74546ac1ffSJohn Fastabend 	struct bpf_map map;
75546ac1ffSJohn Fastabend 	struct bpf_dtab_netdev **netdev_map;
76af4d045cSDaniel Borkmann 	unsigned long __percpu *flush_needed;
772ddf71e2SJohn Fastabend 	struct list_head list;
78546ac1ffSJohn Fastabend };
79546ac1ffSJohn Fastabend 
804cc7b954SJohn Fastabend static DEFINE_SPINLOCK(dev_map_lock);
812ddf71e2SJohn Fastabend static LIST_HEAD(dev_map_list);
822ddf71e2SJohn Fastabend 
83af4d045cSDaniel Borkmann static u64 dev_map_bitmap_size(const union bpf_attr *attr)
84af4d045cSDaniel Borkmann {
858695a539SJohn Fastabend 	return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long);
86af4d045cSDaniel Borkmann }
87af4d045cSDaniel Borkmann 
88546ac1ffSJohn Fastabend static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
89546ac1ffSJohn Fastabend {
90546ac1ffSJohn Fastabend 	struct bpf_dtab *dtab;
91582db7e0STobias Klauser 	int err = -EINVAL;
92546ac1ffSJohn Fastabend 	u64 cost;
93546ac1ffSJohn Fastabend 
949ef2a8cdSJohn Fastabend 	if (!capable(CAP_NET_ADMIN))
959ef2a8cdSJohn Fastabend 		return ERR_PTR(-EPERM);
969ef2a8cdSJohn Fastabend 
97546ac1ffSJohn Fastabend 	/* check sanity of attributes */
98546ac1ffSJohn Fastabend 	if (attr->max_entries == 0 || attr->key_size != 4 ||
996e71b04aSChenbo Feng 	    attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
100546ac1ffSJohn Fastabend 		return ERR_PTR(-EINVAL);
101546ac1ffSJohn Fastabend 
102546ac1ffSJohn Fastabend 	dtab = kzalloc(sizeof(*dtab), GFP_USER);
103546ac1ffSJohn Fastabend 	if (!dtab)
104546ac1ffSJohn Fastabend 		return ERR_PTR(-ENOMEM);
105546ac1ffSJohn Fastabend 
106bd475643SJakub Kicinski 	bpf_map_init_from_attr(&dtab->map, attr);
107546ac1ffSJohn Fastabend 
108546ac1ffSJohn Fastabend 	/* make sure page count doesn't overflow */
109546ac1ffSJohn Fastabend 	cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
110af4d045cSDaniel Borkmann 	cost += dev_map_bitmap_size(attr) * num_possible_cpus();
111546ac1ffSJohn Fastabend 	if (cost >= U32_MAX - PAGE_SIZE)
112546ac1ffSJohn Fastabend 		goto free_dtab;
113546ac1ffSJohn Fastabend 
114*b936ca64SRoman Gushchin 	/* if map size is larger than memlock limit, reject it */
115*b936ca64SRoman Gushchin 	err = bpf_map_charge_init(&dtab->map.memory,
116*b936ca64SRoman Gushchin 				  round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
117546ac1ffSJohn Fastabend 	if (err)
118546ac1ffSJohn Fastabend 		goto free_dtab;
119546ac1ffSJohn Fastabend 
120582db7e0STobias Klauser 	err = -ENOMEM;
121582db7e0STobias Klauser 
12211393cc9SJohn Fastabend 	/* A per cpu bitfield with a bit per possible net device */
12382f8dd28SDaniel Borkmann 	dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr),
12482f8dd28SDaniel Borkmann 						__alignof__(unsigned long),
12582f8dd28SDaniel Borkmann 						GFP_KERNEL | __GFP_NOWARN);
12611393cc9SJohn Fastabend 	if (!dtab->flush_needed)
127*b936ca64SRoman Gushchin 		goto free_charge;
12811393cc9SJohn Fastabend 
129546ac1ffSJohn Fastabend 	dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
13096eabe7aSMartin KaFai Lau 					      sizeof(struct bpf_dtab_netdev *),
13196eabe7aSMartin KaFai Lau 					      dtab->map.numa_node);
132546ac1ffSJohn Fastabend 	if (!dtab->netdev_map)
133*b936ca64SRoman Gushchin 		goto free_charge;
134546ac1ffSJohn Fastabend 
1354cc7b954SJohn Fastabend 	spin_lock(&dev_map_lock);
1364cc7b954SJohn Fastabend 	list_add_tail_rcu(&dtab->list, &dev_map_list);
1374cc7b954SJohn Fastabend 	spin_unlock(&dev_map_lock);
138546ac1ffSJohn Fastabend 
139af4d045cSDaniel Borkmann 	return &dtab->map;
140*b936ca64SRoman Gushchin free_charge:
141*b936ca64SRoman Gushchin 	bpf_map_charge_finish(&dtab->map.memory);
142546ac1ffSJohn Fastabend free_dtab:
14311393cc9SJohn Fastabend 	free_percpu(dtab->flush_needed);
144546ac1ffSJohn Fastabend 	kfree(dtab);
145582db7e0STobias Klauser 	return ERR_PTR(err);
146546ac1ffSJohn Fastabend }
147546ac1ffSJohn Fastabend 
148546ac1ffSJohn Fastabend static void dev_map_free(struct bpf_map *map)
149546ac1ffSJohn Fastabend {
150546ac1ffSJohn Fastabend 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
15111393cc9SJohn Fastabend 	int i, cpu;
152546ac1ffSJohn Fastabend 
153546ac1ffSJohn Fastabend 	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
154546ac1ffSJohn Fastabend 	 * so the programs (can be more than one that used this map) were
155546ac1ffSJohn Fastabend 	 * disconnected from events. Wait for outstanding critical sections in
156546ac1ffSJohn Fastabend 	 * these programs to complete. The rcu critical section only guarantees
157546ac1ffSJohn Fastabend 	 * no further reads against netdev_map. It does __not__ ensure pending
158546ac1ffSJohn Fastabend 	 * flush operations (if any) are complete.
159546ac1ffSJohn Fastabend 	 */
160274043c6SDaniel Borkmann 
161274043c6SDaniel Borkmann 	spin_lock(&dev_map_lock);
162274043c6SDaniel Borkmann 	list_del_rcu(&dtab->list);
163274043c6SDaniel Borkmann 	spin_unlock(&dev_map_lock);
164274043c6SDaniel Borkmann 
165f6069b9aSDaniel Borkmann 	bpf_clear_redirect_map(map);
166546ac1ffSJohn Fastabend 	synchronize_rcu();
167546ac1ffSJohn Fastabend 
1682baae354SEric Dumazet 	/* Make sure prior __dev_map_entry_free() have completed. */
1692baae354SEric Dumazet 	rcu_barrier();
1702baae354SEric Dumazet 
17111393cc9SJohn Fastabend 	/* To ensure all pending flush operations have completed wait for flush
17211393cc9SJohn Fastabend 	 * bitmap to indicate all flush_needed bits to be zero on _all_ cpus.
17311393cc9SJohn Fastabend 	 * Because the above synchronize_rcu() ensures the map is disconnected
17411393cc9SJohn Fastabend 	 * from the program we can assume no new bits will be set.
17511393cc9SJohn Fastabend 	 */
17611393cc9SJohn Fastabend 	for_each_online_cpu(cpu) {
17711393cc9SJohn Fastabend 		unsigned long *bitmap = per_cpu_ptr(dtab->flush_needed, cpu);
17811393cc9SJohn Fastabend 
17911393cc9SJohn Fastabend 		while (!bitmap_empty(bitmap, dtab->map.max_entries))
180374fb014SJohn Fastabend 			cond_resched();
18111393cc9SJohn Fastabend 	}
18211393cc9SJohn Fastabend 
183546ac1ffSJohn Fastabend 	for (i = 0; i < dtab->map.max_entries; i++) {
184546ac1ffSJohn Fastabend 		struct bpf_dtab_netdev *dev;
185546ac1ffSJohn Fastabend 
186546ac1ffSJohn Fastabend 		dev = dtab->netdev_map[i];
187546ac1ffSJohn Fastabend 		if (!dev)
188546ac1ffSJohn Fastabend 			continue;
189546ac1ffSJohn Fastabend 
190546ac1ffSJohn Fastabend 		dev_put(dev->dev);
191546ac1ffSJohn Fastabend 		kfree(dev);
192546ac1ffSJohn Fastabend 	}
193546ac1ffSJohn Fastabend 
19411393cc9SJohn Fastabend 	free_percpu(dtab->flush_needed);
195546ac1ffSJohn Fastabend 	bpf_map_area_free(dtab->netdev_map);
196546ac1ffSJohn Fastabend 	kfree(dtab);
197546ac1ffSJohn Fastabend }
198546ac1ffSJohn Fastabend 
199546ac1ffSJohn Fastabend static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
200546ac1ffSJohn Fastabend {
201546ac1ffSJohn Fastabend 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
202546ac1ffSJohn Fastabend 	u32 index = key ? *(u32 *)key : U32_MAX;
203af4d045cSDaniel Borkmann 	u32 *next = next_key;
204546ac1ffSJohn Fastabend 
205546ac1ffSJohn Fastabend 	if (index >= dtab->map.max_entries) {
206546ac1ffSJohn Fastabend 		*next = 0;
207546ac1ffSJohn Fastabend 		return 0;
208546ac1ffSJohn Fastabend 	}
209546ac1ffSJohn Fastabend 
210546ac1ffSJohn Fastabend 	if (index == dtab->map.max_entries - 1)
211546ac1ffSJohn Fastabend 		return -ENOENT;
212546ac1ffSJohn Fastabend 	*next = index + 1;
213546ac1ffSJohn Fastabend 	return 0;
214546ac1ffSJohn Fastabend }
215546ac1ffSJohn Fastabend 
216af4d045cSDaniel Borkmann void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
21711393cc9SJohn Fastabend {
21811393cc9SJohn Fastabend 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
21911393cc9SJohn Fastabend 	unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
22011393cc9SJohn Fastabend 
221af4d045cSDaniel Borkmann 	__set_bit(bit, bitmap);
22297f91a7cSJohn Fastabend }
22397f91a7cSJohn Fastabend 
2245d053f9dSJesper Dangaard Brouer static int bq_xmit_all(struct bpf_dtab_netdev *obj,
2251bf9116dSJesper Dangaard Brouer 		       struct xdp_bulk_queue *bq, u32 flags,
2261bf9116dSJesper Dangaard Brouer 		       bool in_napi_ctx)
2275d053f9dSJesper Dangaard Brouer {
2285d053f9dSJesper Dangaard Brouer 	struct net_device *dev = obj->dev;
229e74de52eSJesper Dangaard Brouer 	int sent = 0, drops = 0, err = 0;
2305d053f9dSJesper Dangaard Brouer 	int i;
2315d053f9dSJesper Dangaard Brouer 
2325d053f9dSJesper Dangaard Brouer 	if (unlikely(!bq->count))
2335d053f9dSJesper Dangaard Brouer 		return 0;
2345d053f9dSJesper Dangaard Brouer 
2355d053f9dSJesper Dangaard Brouer 	for (i = 0; i < bq->count; i++) {
2365d053f9dSJesper Dangaard Brouer 		struct xdp_frame *xdpf = bq->q[i];
2375d053f9dSJesper Dangaard Brouer 
2385d053f9dSJesper Dangaard Brouer 		prefetch(xdpf);
2395d053f9dSJesper Dangaard Brouer 	}
2405d053f9dSJesper Dangaard Brouer 
241c1ece6b2SJesper Dangaard Brouer 	sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
242735fc405SJesper Dangaard Brouer 	if (sent < 0) {
243e74de52eSJesper Dangaard Brouer 		err = sent;
244735fc405SJesper Dangaard Brouer 		sent = 0;
245735fc405SJesper Dangaard Brouer 		goto error;
24638edddb8SJesper Dangaard Brouer 	}
247735fc405SJesper Dangaard Brouer 	drops = bq->count - sent;
248735fc405SJesper Dangaard Brouer out:
2495d053f9dSJesper Dangaard Brouer 	bq->count = 0;
2505d053f9dSJesper Dangaard Brouer 
25138edddb8SJesper Dangaard Brouer 	trace_xdp_devmap_xmit(&obj->dtab->map, obj->bit,
252e74de52eSJesper Dangaard Brouer 			      sent, drops, bq->dev_rx, dev, err);
25338edddb8SJesper Dangaard Brouer 	bq->dev_rx = NULL;
2545d053f9dSJesper Dangaard Brouer 	return 0;
255735fc405SJesper Dangaard Brouer error:
256735fc405SJesper Dangaard Brouer 	/* If ndo_xdp_xmit fails with an errno, no frames have been
257735fc405SJesper Dangaard Brouer 	 * xmit'ed and it's our responsibility to them free all.
258735fc405SJesper Dangaard Brouer 	 */
259735fc405SJesper Dangaard Brouer 	for (i = 0; i < bq->count; i++) {
260735fc405SJesper Dangaard Brouer 		struct xdp_frame *xdpf = bq->q[i];
261735fc405SJesper Dangaard Brouer 
262735fc405SJesper Dangaard Brouer 		/* RX path under NAPI protection, can return frames faster */
2631bf9116dSJesper Dangaard Brouer 		if (likely(in_napi_ctx))
264735fc405SJesper Dangaard Brouer 			xdp_return_frame_rx_napi(xdpf);
2651bf9116dSJesper Dangaard Brouer 		else
2661bf9116dSJesper Dangaard Brouer 			xdp_return_frame(xdpf);
267735fc405SJesper Dangaard Brouer 		drops++;
268735fc405SJesper Dangaard Brouer 	}
269735fc405SJesper Dangaard Brouer 	goto out;
2705d053f9dSJesper Dangaard Brouer }
2715d053f9dSJesper Dangaard Brouer 
27211393cc9SJohn Fastabend /* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
27311393cc9SJohn Fastabend  * from the driver before returning from its napi->poll() routine. The poll()
27411393cc9SJohn Fastabend  * routine is called either from busy_poll context or net_rx_action signaled
27511393cc9SJohn Fastabend  * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
27611393cc9SJohn Fastabend  * net device can be torn down. On devmap tear down we ensure the ctx bitmap
27711393cc9SJohn Fastabend  * is zeroed before completing to ensure all flush operations have completed.
27811393cc9SJohn Fastabend  */
27911393cc9SJohn Fastabend void __dev_map_flush(struct bpf_map *map)
28011393cc9SJohn Fastabend {
28111393cc9SJohn Fastabend 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
28211393cc9SJohn Fastabend 	unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
28311393cc9SJohn Fastabend 	u32 bit;
28411393cc9SJohn Fastabend 
28511393cc9SJohn Fastabend 	for_each_set_bit(bit, bitmap, map->max_entries) {
28611393cc9SJohn Fastabend 		struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]);
2875d053f9dSJesper Dangaard Brouer 		struct xdp_bulk_queue *bq;
28811393cc9SJohn Fastabend 
28911393cc9SJohn Fastabend 		/* This is possible if the dev entry is removed by user space
29011393cc9SJohn Fastabend 		 * between xdp redirect and flush op.
29111393cc9SJohn Fastabend 		 */
29211393cc9SJohn Fastabend 		if (unlikely(!dev))
29311393cc9SJohn Fastabend 			continue;
29411393cc9SJohn Fastabend 
29511393cc9SJohn Fastabend 		__clear_bit(bit, bitmap);
2965d053f9dSJesper Dangaard Brouer 
2975d053f9dSJesper Dangaard Brouer 		bq = this_cpu_ptr(dev->bulkq);
2981bf9116dSJesper Dangaard Brouer 		bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
29911393cc9SJohn Fastabend 	}
30011393cc9SJohn Fastabend }
30111393cc9SJohn Fastabend 
302546ac1ffSJohn Fastabend /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
303546ac1ffSJohn Fastabend  * update happens in parallel here a dev_put wont happen until after reading the
304546ac1ffSJohn Fastabend  * ifindex.
305546ac1ffSJohn Fastabend  */
30667f29e07SJesper Dangaard Brouer struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
307546ac1ffSJohn Fastabend {
308546ac1ffSJohn Fastabend 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
30967f29e07SJesper Dangaard Brouer 	struct bpf_dtab_netdev *obj;
310546ac1ffSJohn Fastabend 
311af4d045cSDaniel Borkmann 	if (key >= map->max_entries)
312546ac1ffSJohn Fastabend 		return NULL;
313546ac1ffSJohn Fastabend 
31467f29e07SJesper Dangaard Brouer 	obj = READ_ONCE(dtab->netdev_map[key]);
31567f29e07SJesper Dangaard Brouer 	return obj;
31667f29e07SJesper Dangaard Brouer }
31767f29e07SJesper Dangaard Brouer 
3185d053f9dSJesper Dangaard Brouer /* Runs under RCU-read-side, plus in softirq under NAPI protection.
3195d053f9dSJesper Dangaard Brouer  * Thus, safe percpu variable access.
3205d053f9dSJesper Dangaard Brouer  */
32138edddb8SJesper Dangaard Brouer static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
32238edddb8SJesper Dangaard Brouer 		      struct net_device *dev_rx)
32338edddb8SJesper Dangaard Brouer 
3245d053f9dSJesper Dangaard Brouer {
3255d053f9dSJesper Dangaard Brouer 	struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
3265d053f9dSJesper Dangaard Brouer 
3275d053f9dSJesper Dangaard Brouer 	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
3281bf9116dSJesper Dangaard Brouer 		bq_xmit_all(obj, bq, 0, true);
3295d053f9dSJesper Dangaard Brouer 
33038edddb8SJesper Dangaard Brouer 	/* Ingress dev_rx will be the same for all xdp_frame's in
33138edddb8SJesper Dangaard Brouer 	 * bulk_queue, because bq stored per-CPU and must be flushed
33238edddb8SJesper Dangaard Brouer 	 * from net_device drivers NAPI func end.
33338edddb8SJesper Dangaard Brouer 	 */
33438edddb8SJesper Dangaard Brouer 	if (!bq->dev_rx)
33538edddb8SJesper Dangaard Brouer 		bq->dev_rx = dev_rx;
33638edddb8SJesper Dangaard Brouer 
3375d053f9dSJesper Dangaard Brouer 	bq->q[bq->count++] = xdpf;
3385d053f9dSJesper Dangaard Brouer 	return 0;
3395d053f9dSJesper Dangaard Brouer }
3405d053f9dSJesper Dangaard Brouer 
34138edddb8SJesper Dangaard Brouer int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
34238edddb8SJesper Dangaard Brouer 		    struct net_device *dev_rx)
34367f29e07SJesper Dangaard Brouer {
34467f29e07SJesper Dangaard Brouer 	struct net_device *dev = dst->dev;
34567f29e07SJesper Dangaard Brouer 	struct xdp_frame *xdpf;
346d8d7218aSToshiaki Makita 	int err;
34767f29e07SJesper Dangaard Brouer 
34867f29e07SJesper Dangaard Brouer 	if (!dev->netdev_ops->ndo_xdp_xmit)
34967f29e07SJesper Dangaard Brouer 		return -EOPNOTSUPP;
35067f29e07SJesper Dangaard Brouer 
351d8d7218aSToshiaki Makita 	err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
352d8d7218aSToshiaki Makita 	if (unlikely(err))
353d8d7218aSToshiaki Makita 		return err;
354d8d7218aSToshiaki Makita 
35567f29e07SJesper Dangaard Brouer 	xdpf = convert_to_xdp_frame(xdp);
35667f29e07SJesper Dangaard Brouer 	if (unlikely(!xdpf))
35767f29e07SJesper Dangaard Brouer 		return -EOVERFLOW;
35867f29e07SJesper Dangaard Brouer 
35938edddb8SJesper Dangaard Brouer 	return bq_enqueue(dst, xdpf, dev_rx);
360546ac1ffSJohn Fastabend }
361546ac1ffSJohn Fastabend 
3626d5fc195SToshiaki Makita int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
3636d5fc195SToshiaki Makita 			     struct bpf_prog *xdp_prog)
3646d5fc195SToshiaki Makita {
3656d5fc195SToshiaki Makita 	int err;
3666d5fc195SToshiaki Makita 
367d8d7218aSToshiaki Makita 	err = xdp_ok_fwd_dev(dst->dev, skb->len);
3686d5fc195SToshiaki Makita 	if (unlikely(err))
3696d5fc195SToshiaki Makita 		return err;
3706d5fc195SToshiaki Makita 	skb->dev = dst->dev;
3716d5fc195SToshiaki Makita 	generic_xdp_tx(skb, xdp_prog);
3726d5fc195SToshiaki Makita 
3736d5fc195SToshiaki Makita 	return 0;
3746d5fc195SToshiaki Makita }
3756d5fc195SToshiaki Makita 
376af4d045cSDaniel Borkmann static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
37711393cc9SJohn Fastabend {
37867f29e07SJesper Dangaard Brouer 	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
37971b2c87dSColin Ian King 	struct net_device *dev = obj ? obj->dev : NULL;
380af4d045cSDaniel Borkmann 
381af4d045cSDaniel Borkmann 	return dev ? &dev->ifindex : NULL;
382af4d045cSDaniel Borkmann }
383af4d045cSDaniel Borkmann 
384af4d045cSDaniel Borkmann static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
385af4d045cSDaniel Borkmann {
386c1ece6b2SJesper Dangaard Brouer 	if (dev->dev->netdev_ops->ndo_xdp_xmit) {
3875d053f9dSJesper Dangaard Brouer 		struct xdp_bulk_queue *bq;
38811393cc9SJohn Fastabend 		unsigned long *bitmap;
3895d053f9dSJesper Dangaard Brouer 
39011393cc9SJohn Fastabend 		int cpu;
39111393cc9SJohn Fastabend 
39211393cc9SJohn Fastabend 		for_each_online_cpu(cpu) {
393af4d045cSDaniel Borkmann 			bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu);
394af4d045cSDaniel Borkmann 			__clear_bit(dev->bit, bitmap);
39511393cc9SJohn Fastabend 
3965d053f9dSJesper Dangaard Brouer 			bq = per_cpu_ptr(dev->bulkq, cpu);
3971bf9116dSJesper Dangaard Brouer 			bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
39811393cc9SJohn Fastabend 		}
39911393cc9SJohn Fastabend 	}
40011393cc9SJohn Fastabend }
40111393cc9SJohn Fastabend 
402546ac1ffSJohn Fastabend static void __dev_map_entry_free(struct rcu_head *rcu)
403546ac1ffSJohn Fastabend {
404af4d045cSDaniel Borkmann 	struct bpf_dtab_netdev *dev;
405546ac1ffSJohn Fastabend 
406af4d045cSDaniel Borkmann 	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
407af4d045cSDaniel Borkmann 	dev_map_flush_old(dev);
4085d053f9dSJesper Dangaard Brouer 	free_percpu(dev->bulkq);
409af4d045cSDaniel Borkmann 	dev_put(dev->dev);
410af4d045cSDaniel Borkmann 	kfree(dev);
411546ac1ffSJohn Fastabend }
412546ac1ffSJohn Fastabend 
413546ac1ffSJohn Fastabend static int dev_map_delete_elem(struct bpf_map *map, void *key)
414546ac1ffSJohn Fastabend {
415546ac1ffSJohn Fastabend 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
416546ac1ffSJohn Fastabend 	struct bpf_dtab_netdev *old_dev;
417546ac1ffSJohn Fastabend 	int k = *(u32 *)key;
418546ac1ffSJohn Fastabend 
419546ac1ffSJohn Fastabend 	if (k >= map->max_entries)
420546ac1ffSJohn Fastabend 		return -EINVAL;
421546ac1ffSJohn Fastabend 
422af4d045cSDaniel Borkmann 	/* Use call_rcu() here to ensure any rcu critical sections have
423af4d045cSDaniel Borkmann 	 * completed, but this does not guarantee a flush has happened
424546ac1ffSJohn Fastabend 	 * yet. Because driver side rcu_read_lock/unlock only protects the
425546ac1ffSJohn Fastabend 	 * running XDP program. However, for pending flush operations the
426546ac1ffSJohn Fastabend 	 * dev and ctx are stored in another per cpu map. And additionally,
427546ac1ffSJohn Fastabend 	 * the driver tear down ensures all soft irqs are complete before
428546ac1ffSJohn Fastabend 	 * removing the net device in the case of dev_put equals zero.
429546ac1ffSJohn Fastabend 	 */
430546ac1ffSJohn Fastabend 	old_dev = xchg(&dtab->netdev_map[k], NULL);
431546ac1ffSJohn Fastabend 	if (old_dev)
432546ac1ffSJohn Fastabend 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
433546ac1ffSJohn Fastabend 	return 0;
434546ac1ffSJohn Fastabend }
435546ac1ffSJohn Fastabend 
436546ac1ffSJohn Fastabend static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
437546ac1ffSJohn Fastabend 				u64 map_flags)
438546ac1ffSJohn Fastabend {
439546ac1ffSJohn Fastabend 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
440546ac1ffSJohn Fastabend 	struct net *net = current->nsproxy->net_ns;
4415d053f9dSJesper Dangaard Brouer 	gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
442546ac1ffSJohn Fastabend 	struct bpf_dtab_netdev *dev, *old_dev;
443546ac1ffSJohn Fastabend 	u32 i = *(u32 *)key;
444546ac1ffSJohn Fastabend 	u32 ifindex = *(u32 *)value;
445546ac1ffSJohn Fastabend 
446546ac1ffSJohn Fastabend 	if (unlikely(map_flags > BPF_EXIST))
447546ac1ffSJohn Fastabend 		return -EINVAL;
448546ac1ffSJohn Fastabend 	if (unlikely(i >= dtab->map.max_entries))
449546ac1ffSJohn Fastabend 		return -E2BIG;
450546ac1ffSJohn Fastabend 	if (unlikely(map_flags == BPF_NOEXIST))
451546ac1ffSJohn Fastabend 		return -EEXIST;
452546ac1ffSJohn Fastabend 
453546ac1ffSJohn Fastabend 	if (!ifindex) {
454546ac1ffSJohn Fastabend 		dev = NULL;
455546ac1ffSJohn Fastabend 	} else {
4565d053f9dSJesper Dangaard Brouer 		dev = kmalloc_node(sizeof(*dev), gfp, map->numa_node);
457546ac1ffSJohn Fastabend 		if (!dev)
458546ac1ffSJohn Fastabend 			return -ENOMEM;
459546ac1ffSJohn Fastabend 
4605d053f9dSJesper Dangaard Brouer 		dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq),
4615d053f9dSJesper Dangaard Brouer 						sizeof(void *), gfp);
4625d053f9dSJesper Dangaard Brouer 		if (!dev->bulkq) {
4635d053f9dSJesper Dangaard Brouer 			kfree(dev);
4645d053f9dSJesper Dangaard Brouer 			return -ENOMEM;
4655d053f9dSJesper Dangaard Brouer 		}
4665d053f9dSJesper Dangaard Brouer 
467546ac1ffSJohn Fastabend 		dev->dev = dev_get_by_index(net, ifindex);
468546ac1ffSJohn Fastabend 		if (!dev->dev) {
4695d053f9dSJesper Dangaard Brouer 			free_percpu(dev->bulkq);
470546ac1ffSJohn Fastabend 			kfree(dev);
471546ac1ffSJohn Fastabend 			return -EINVAL;
472546ac1ffSJohn Fastabend 		}
473546ac1ffSJohn Fastabend 
474af4d045cSDaniel Borkmann 		dev->bit = i;
475546ac1ffSJohn Fastabend 		dev->dtab = dtab;
476546ac1ffSJohn Fastabend 	}
477546ac1ffSJohn Fastabend 
478546ac1ffSJohn Fastabend 	/* Use call_rcu() here to ensure rcu critical sections have completed
479546ac1ffSJohn Fastabend 	 * Remembering the driver side flush operation will happen before the
480546ac1ffSJohn Fastabend 	 * net device is removed.
481546ac1ffSJohn Fastabend 	 */
482546ac1ffSJohn Fastabend 	old_dev = xchg(&dtab->netdev_map[i], dev);
483546ac1ffSJohn Fastabend 	if (old_dev)
484546ac1ffSJohn Fastabend 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
485546ac1ffSJohn Fastabend 
486546ac1ffSJohn Fastabend 	return 0;
487546ac1ffSJohn Fastabend }
488546ac1ffSJohn Fastabend 
489546ac1ffSJohn Fastabend const struct bpf_map_ops dev_map_ops = {
490546ac1ffSJohn Fastabend 	.map_alloc = dev_map_alloc,
491546ac1ffSJohn Fastabend 	.map_free = dev_map_free,
492546ac1ffSJohn Fastabend 	.map_get_next_key = dev_map_get_next_key,
493546ac1ffSJohn Fastabend 	.map_lookup_elem = dev_map_lookup_elem,
494546ac1ffSJohn Fastabend 	.map_update_elem = dev_map_update_elem,
495546ac1ffSJohn Fastabend 	.map_delete_elem = dev_map_delete_elem,
496e8d2bec0SDaniel Borkmann 	.map_check_btf = map_check_no_btf,
497546ac1ffSJohn Fastabend };
4982ddf71e2SJohn Fastabend 
4992ddf71e2SJohn Fastabend static int dev_map_notification(struct notifier_block *notifier,
5002ddf71e2SJohn Fastabend 				ulong event, void *ptr)
5012ddf71e2SJohn Fastabend {
5022ddf71e2SJohn Fastabend 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
5032ddf71e2SJohn Fastabend 	struct bpf_dtab *dtab;
5042ddf71e2SJohn Fastabend 	int i;
5052ddf71e2SJohn Fastabend 
5062ddf71e2SJohn Fastabend 	switch (event) {
5072ddf71e2SJohn Fastabend 	case NETDEV_UNREGISTER:
5084cc7b954SJohn Fastabend 		/* This rcu_read_lock/unlock pair is needed because
5094cc7b954SJohn Fastabend 		 * dev_map_list is an RCU list AND to ensure a delete
5104cc7b954SJohn Fastabend 		 * operation does not free a netdev_map entry while we
5114cc7b954SJohn Fastabend 		 * are comparing it against the netdev being unregistered.
5124cc7b954SJohn Fastabend 		 */
5134cc7b954SJohn Fastabend 		rcu_read_lock();
5144cc7b954SJohn Fastabend 		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
5152ddf71e2SJohn Fastabend 			for (i = 0; i < dtab->map.max_entries; i++) {
5164cc7b954SJohn Fastabend 				struct bpf_dtab_netdev *dev, *odev;
5172ddf71e2SJohn Fastabend 
5184cc7b954SJohn Fastabend 				dev = READ_ONCE(dtab->netdev_map[i]);
519f592f804STaehee Yoo 				if (!dev || netdev != dev->dev)
5202ddf71e2SJohn Fastabend 					continue;
5214cc7b954SJohn Fastabend 				odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
5224cc7b954SJohn Fastabend 				if (dev == odev)
5232ddf71e2SJohn Fastabend 					call_rcu(&dev->rcu,
5242ddf71e2SJohn Fastabend 						 __dev_map_entry_free);
5252ddf71e2SJohn Fastabend 			}
5262ddf71e2SJohn Fastabend 		}
5274cc7b954SJohn Fastabend 		rcu_read_unlock();
5282ddf71e2SJohn Fastabend 		break;
5292ddf71e2SJohn Fastabend 	default:
5302ddf71e2SJohn Fastabend 		break;
5312ddf71e2SJohn Fastabend 	}
5322ddf71e2SJohn Fastabend 	return NOTIFY_OK;
5332ddf71e2SJohn Fastabend }
5342ddf71e2SJohn Fastabend 
5352ddf71e2SJohn Fastabend static struct notifier_block dev_map_notifier = {
5362ddf71e2SJohn Fastabend 	.notifier_call = dev_map_notification,
5372ddf71e2SJohn Fastabend };
5382ddf71e2SJohn Fastabend 
5392ddf71e2SJohn Fastabend static int __init dev_map_init(void)
5402ddf71e2SJohn Fastabend {
54167f29e07SJesper Dangaard Brouer 	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
54267f29e07SJesper Dangaard Brouer 	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
54367f29e07SJesper Dangaard Brouer 		     offsetof(struct _bpf_dtab_netdev, dev));
5442ddf71e2SJohn Fastabend 	register_netdevice_notifier(&dev_map_notifier);
5452ddf71e2SJohn Fastabend 	return 0;
5462ddf71e2SJohn Fastabend }
5472ddf71e2SJohn Fastabend 
5482ddf71e2SJohn Fastabend subsys_initcall(dev_map_init);
549