1546ac1ffSJohn Fastabend /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io 2546ac1ffSJohn Fastabend * 3546ac1ffSJohn Fastabend * This program is free software; you can redistribute it and/or 4546ac1ffSJohn Fastabend * modify it under the terms of version 2 of the GNU General Public 5546ac1ffSJohn Fastabend * License as published by the Free Software Foundation. 6546ac1ffSJohn Fastabend * 7546ac1ffSJohn Fastabend * This program is distributed in the hope that it will be useful, but 8546ac1ffSJohn Fastabend * WITHOUT ANY WARRANTY; without even the implied warranty of 9546ac1ffSJohn Fastabend * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 10546ac1ffSJohn Fastabend * General Public License for more details. 11546ac1ffSJohn Fastabend */ 12546ac1ffSJohn Fastabend 13546ac1ffSJohn Fastabend /* Devmaps primary use is as a backend map for XDP BPF helper call 14546ac1ffSJohn Fastabend * bpf_redirect_map(). Because XDP is mostly concerned with performance we 15546ac1ffSJohn Fastabend * spent some effort to ensure the datapath with redirect maps does not use 16546ac1ffSJohn Fastabend * any locking. This is a quick note on the details. 17546ac1ffSJohn Fastabend * 18546ac1ffSJohn Fastabend * We have three possible paths to get into the devmap control plane bpf 19546ac1ffSJohn Fastabend * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall 20546ac1ffSJohn Fastabend * will invoke an update, delete, or lookup operation. To ensure updates and 21546ac1ffSJohn Fastabend * deletes appear atomic from the datapath side xchg() is used to modify the 22546ac1ffSJohn Fastabend * netdev_map array. Then because the datapath does a lookup into the netdev_map 23546ac1ffSJohn Fastabend * array (read-only) from an RCU critical section we use call_rcu() to wait for 24546ac1ffSJohn Fastabend * an rcu grace period before free'ing the old data structures. This ensures the 25546ac1ffSJohn Fastabend * datapath always has a valid copy. However, the datapath does a "flush" 26546ac1ffSJohn Fastabend * operation that pushes any pending packets in the driver outside the RCU 27546ac1ffSJohn Fastabend * critical section. Each bpf_dtab_netdev tracks these pending operations using 28546ac1ffSJohn Fastabend * an atomic per-cpu bitmap. The bpf_dtab_netdev object will not be destroyed 29546ac1ffSJohn Fastabend * until all bits are cleared indicating outstanding flush operations have 30546ac1ffSJohn Fastabend * completed. 31546ac1ffSJohn Fastabend * 32546ac1ffSJohn Fastabend * BPF syscalls may race with BPF program calls on any of the update, delete 33546ac1ffSJohn Fastabend * or lookup operations. As noted above the xchg() operation also keep the 34546ac1ffSJohn Fastabend * netdev_map consistent in this case. From the devmap side BPF programs 35546ac1ffSJohn Fastabend * calling into these operations are the same as multiple user space threads 36546ac1ffSJohn Fastabend * making system calls. 372ddf71e2SJohn Fastabend * 382ddf71e2SJohn Fastabend * Finally, any of the above may race with a netdev_unregister notifier. The 392ddf71e2SJohn Fastabend * unregister notifier must search for net devices in the map structure that 402ddf71e2SJohn Fastabend * contain a reference to the net device and remove them. This is a two step 412ddf71e2SJohn Fastabend * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) 422ddf71e2SJohn Fastabend * check to see if the ifindex is the same as the net_device being removed. 434cc7b954SJohn Fastabend * When removing the dev a cmpxchg() is used to ensure the correct dev is 444cc7b954SJohn Fastabend * removed, in the case of a concurrent update or delete operation it is 454cc7b954SJohn Fastabend * possible that the initially referenced dev is no longer in the map. As the 464cc7b954SJohn Fastabend * notifier hook walks the map we know that new dev references can not be 474cc7b954SJohn Fastabend * added by the user because core infrastructure ensures dev_get_by_index() 484cc7b954SJohn Fastabend * calls will fail at this point. 49546ac1ffSJohn Fastabend */ 50546ac1ffSJohn Fastabend #include <linux/bpf.h> 51546ac1ffSJohn Fastabend #include <linux/filter.h> 52546ac1ffSJohn Fastabend 53546ac1ffSJohn Fastabend struct bpf_dtab_netdev { 54546ac1ffSJohn Fastabend struct net_device *dev; 55546ac1ffSJohn Fastabend struct bpf_dtab *dtab; 56af4d045cSDaniel Borkmann unsigned int bit; 57af4d045cSDaniel Borkmann struct rcu_head rcu; 58546ac1ffSJohn Fastabend }; 59546ac1ffSJohn Fastabend 60546ac1ffSJohn Fastabend struct bpf_dtab { 61546ac1ffSJohn Fastabend struct bpf_map map; 62546ac1ffSJohn Fastabend struct bpf_dtab_netdev **netdev_map; 63af4d045cSDaniel Borkmann unsigned long __percpu *flush_needed; 642ddf71e2SJohn Fastabend struct list_head list; 65546ac1ffSJohn Fastabend }; 66546ac1ffSJohn Fastabend 674cc7b954SJohn Fastabend static DEFINE_SPINLOCK(dev_map_lock); 682ddf71e2SJohn Fastabend static LIST_HEAD(dev_map_list); 692ddf71e2SJohn Fastabend 70af4d045cSDaniel Borkmann static u64 dev_map_bitmap_size(const union bpf_attr *attr) 71af4d045cSDaniel Borkmann { 72af4d045cSDaniel Borkmann return BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long); 73af4d045cSDaniel Borkmann } 74af4d045cSDaniel Borkmann 75546ac1ffSJohn Fastabend static struct bpf_map *dev_map_alloc(union bpf_attr *attr) 76546ac1ffSJohn Fastabend { 77546ac1ffSJohn Fastabend struct bpf_dtab *dtab; 78582db7e0STobias Klauser int err = -EINVAL; 79546ac1ffSJohn Fastabend u64 cost; 80546ac1ffSJohn Fastabend 81*9ef2a8cdSJohn Fastabend if (!capable(CAP_NET_ADMIN)) 82*9ef2a8cdSJohn Fastabend return ERR_PTR(-EPERM); 83*9ef2a8cdSJohn Fastabend 84546ac1ffSJohn Fastabend /* check sanity of attributes */ 85546ac1ffSJohn Fastabend if (attr->max_entries == 0 || attr->key_size != 4 || 8696eabe7aSMartin KaFai Lau attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) 87546ac1ffSJohn Fastabend return ERR_PTR(-EINVAL); 88546ac1ffSJohn Fastabend 89546ac1ffSJohn Fastabend dtab = kzalloc(sizeof(*dtab), GFP_USER); 90546ac1ffSJohn Fastabend if (!dtab) 91546ac1ffSJohn Fastabend return ERR_PTR(-ENOMEM); 92546ac1ffSJohn Fastabend 93546ac1ffSJohn Fastabend /* mandatory map attributes */ 94546ac1ffSJohn Fastabend dtab->map.map_type = attr->map_type; 95546ac1ffSJohn Fastabend dtab->map.key_size = attr->key_size; 96546ac1ffSJohn Fastabend dtab->map.value_size = attr->value_size; 97546ac1ffSJohn Fastabend dtab->map.max_entries = attr->max_entries; 98546ac1ffSJohn Fastabend dtab->map.map_flags = attr->map_flags; 9996eabe7aSMartin KaFai Lau dtab->map.numa_node = bpf_map_attr_numa_node(attr); 100546ac1ffSJohn Fastabend 101546ac1ffSJohn Fastabend /* make sure page count doesn't overflow */ 102546ac1ffSJohn Fastabend cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); 103af4d045cSDaniel Borkmann cost += dev_map_bitmap_size(attr) * num_possible_cpus(); 104546ac1ffSJohn Fastabend if (cost >= U32_MAX - PAGE_SIZE) 105546ac1ffSJohn Fastabend goto free_dtab; 106546ac1ffSJohn Fastabend 107546ac1ffSJohn Fastabend dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; 108546ac1ffSJohn Fastabend 109546ac1ffSJohn Fastabend /* if map size is larger than memlock limit, reject it early */ 110546ac1ffSJohn Fastabend err = bpf_map_precharge_memlock(dtab->map.pages); 111546ac1ffSJohn Fastabend if (err) 112546ac1ffSJohn Fastabend goto free_dtab; 113546ac1ffSJohn Fastabend 114582db7e0STobias Klauser err = -ENOMEM; 115582db7e0STobias Klauser 11611393cc9SJohn Fastabend /* A per cpu bitfield with a bit per possible net device */ 11782f8dd28SDaniel Borkmann dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr), 11882f8dd28SDaniel Borkmann __alignof__(unsigned long), 11982f8dd28SDaniel Borkmann GFP_KERNEL | __GFP_NOWARN); 12011393cc9SJohn Fastabend if (!dtab->flush_needed) 12111393cc9SJohn Fastabend goto free_dtab; 12211393cc9SJohn Fastabend 123546ac1ffSJohn Fastabend dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * 12496eabe7aSMartin KaFai Lau sizeof(struct bpf_dtab_netdev *), 12596eabe7aSMartin KaFai Lau dtab->map.numa_node); 126546ac1ffSJohn Fastabend if (!dtab->netdev_map) 127546ac1ffSJohn Fastabend goto free_dtab; 128546ac1ffSJohn Fastabend 1294cc7b954SJohn Fastabend spin_lock(&dev_map_lock); 1304cc7b954SJohn Fastabend list_add_tail_rcu(&dtab->list, &dev_map_list); 1314cc7b954SJohn Fastabend spin_unlock(&dev_map_lock); 132546ac1ffSJohn Fastabend 133af4d045cSDaniel Borkmann return &dtab->map; 134546ac1ffSJohn Fastabend free_dtab: 13511393cc9SJohn Fastabend free_percpu(dtab->flush_needed); 136546ac1ffSJohn Fastabend kfree(dtab); 137582db7e0STobias Klauser return ERR_PTR(err); 138546ac1ffSJohn Fastabend } 139546ac1ffSJohn Fastabend 140546ac1ffSJohn Fastabend static void dev_map_free(struct bpf_map *map) 141546ac1ffSJohn Fastabend { 142546ac1ffSJohn Fastabend struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 14311393cc9SJohn Fastabend int i, cpu; 144546ac1ffSJohn Fastabend 145546ac1ffSJohn Fastabend /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 146546ac1ffSJohn Fastabend * so the programs (can be more than one that used this map) were 147546ac1ffSJohn Fastabend * disconnected from events. Wait for outstanding critical sections in 148546ac1ffSJohn Fastabend * these programs to complete. The rcu critical section only guarantees 149546ac1ffSJohn Fastabend * no further reads against netdev_map. It does __not__ ensure pending 150546ac1ffSJohn Fastabend * flush operations (if any) are complete. 151546ac1ffSJohn Fastabend */ 152274043c6SDaniel Borkmann 153274043c6SDaniel Borkmann spin_lock(&dev_map_lock); 154274043c6SDaniel Borkmann list_del_rcu(&dtab->list); 155274043c6SDaniel Borkmann spin_unlock(&dev_map_lock); 156274043c6SDaniel Borkmann 157546ac1ffSJohn Fastabend synchronize_rcu(); 158546ac1ffSJohn Fastabend 15911393cc9SJohn Fastabend /* To ensure all pending flush operations have completed wait for flush 16011393cc9SJohn Fastabend * bitmap to indicate all flush_needed bits to be zero on _all_ cpus. 16111393cc9SJohn Fastabend * Because the above synchronize_rcu() ensures the map is disconnected 16211393cc9SJohn Fastabend * from the program we can assume no new bits will be set. 16311393cc9SJohn Fastabend */ 16411393cc9SJohn Fastabend for_each_online_cpu(cpu) { 16511393cc9SJohn Fastabend unsigned long *bitmap = per_cpu_ptr(dtab->flush_needed, cpu); 16611393cc9SJohn Fastabend 16711393cc9SJohn Fastabend while (!bitmap_empty(bitmap, dtab->map.max_entries)) 168374fb014SJohn Fastabend cond_resched(); 16911393cc9SJohn Fastabend } 17011393cc9SJohn Fastabend 171546ac1ffSJohn Fastabend for (i = 0; i < dtab->map.max_entries; i++) { 172546ac1ffSJohn Fastabend struct bpf_dtab_netdev *dev; 173546ac1ffSJohn Fastabend 174546ac1ffSJohn Fastabend dev = dtab->netdev_map[i]; 175546ac1ffSJohn Fastabend if (!dev) 176546ac1ffSJohn Fastabend continue; 177546ac1ffSJohn Fastabend 178546ac1ffSJohn Fastabend dev_put(dev->dev); 179546ac1ffSJohn Fastabend kfree(dev); 180546ac1ffSJohn Fastabend } 181546ac1ffSJohn Fastabend 18211393cc9SJohn Fastabend free_percpu(dtab->flush_needed); 183546ac1ffSJohn Fastabend bpf_map_area_free(dtab->netdev_map); 184546ac1ffSJohn Fastabend kfree(dtab); 185546ac1ffSJohn Fastabend } 186546ac1ffSJohn Fastabend 187546ac1ffSJohn Fastabend static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 188546ac1ffSJohn Fastabend { 189546ac1ffSJohn Fastabend struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 190546ac1ffSJohn Fastabend u32 index = key ? *(u32 *)key : U32_MAX; 191af4d045cSDaniel Borkmann u32 *next = next_key; 192546ac1ffSJohn Fastabend 193546ac1ffSJohn Fastabend if (index >= dtab->map.max_entries) { 194546ac1ffSJohn Fastabend *next = 0; 195546ac1ffSJohn Fastabend return 0; 196546ac1ffSJohn Fastabend } 197546ac1ffSJohn Fastabend 198546ac1ffSJohn Fastabend if (index == dtab->map.max_entries - 1) 199546ac1ffSJohn Fastabend return -ENOENT; 200546ac1ffSJohn Fastabend *next = index + 1; 201546ac1ffSJohn Fastabend return 0; 202546ac1ffSJohn Fastabend } 203546ac1ffSJohn Fastabend 204af4d045cSDaniel Borkmann void __dev_map_insert_ctx(struct bpf_map *map, u32 bit) 20511393cc9SJohn Fastabend { 20611393cc9SJohn Fastabend struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 20711393cc9SJohn Fastabend unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); 20811393cc9SJohn Fastabend 209af4d045cSDaniel Borkmann __set_bit(bit, bitmap); 21097f91a7cSJohn Fastabend } 21197f91a7cSJohn Fastabend 21211393cc9SJohn Fastabend /* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled 21311393cc9SJohn Fastabend * from the driver before returning from its napi->poll() routine. The poll() 21411393cc9SJohn Fastabend * routine is called either from busy_poll context or net_rx_action signaled 21511393cc9SJohn Fastabend * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the 21611393cc9SJohn Fastabend * net device can be torn down. On devmap tear down we ensure the ctx bitmap 21711393cc9SJohn Fastabend * is zeroed before completing to ensure all flush operations have completed. 21811393cc9SJohn Fastabend */ 21911393cc9SJohn Fastabend void __dev_map_flush(struct bpf_map *map) 22011393cc9SJohn Fastabend { 22111393cc9SJohn Fastabend struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 22211393cc9SJohn Fastabend unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); 22311393cc9SJohn Fastabend u32 bit; 22411393cc9SJohn Fastabend 22511393cc9SJohn Fastabend for_each_set_bit(bit, bitmap, map->max_entries) { 22611393cc9SJohn Fastabend struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]); 22711393cc9SJohn Fastabend struct net_device *netdev; 22811393cc9SJohn Fastabend 22911393cc9SJohn Fastabend /* This is possible if the dev entry is removed by user space 23011393cc9SJohn Fastabend * between xdp redirect and flush op. 23111393cc9SJohn Fastabend */ 23211393cc9SJohn Fastabend if (unlikely(!dev)) 23311393cc9SJohn Fastabend continue; 23411393cc9SJohn Fastabend 23511393cc9SJohn Fastabend __clear_bit(bit, bitmap); 236a5e2da6eSDaniel Borkmann netdev = dev->dev; 237a5e2da6eSDaniel Borkmann if (likely(netdev->netdev_ops->ndo_xdp_flush)) 23811393cc9SJohn Fastabend netdev->netdev_ops->ndo_xdp_flush(netdev); 23911393cc9SJohn Fastabend } 24011393cc9SJohn Fastabend } 24111393cc9SJohn Fastabend 242546ac1ffSJohn Fastabend /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or 243546ac1ffSJohn Fastabend * update happens in parallel here a dev_put wont happen until after reading the 244546ac1ffSJohn Fastabend * ifindex. 245546ac1ffSJohn Fastabend */ 246af4d045cSDaniel Borkmann struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key) 247546ac1ffSJohn Fastabend { 248546ac1ffSJohn Fastabend struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 249546ac1ffSJohn Fastabend struct bpf_dtab_netdev *dev; 250546ac1ffSJohn Fastabend 251af4d045cSDaniel Borkmann if (key >= map->max_entries) 252546ac1ffSJohn Fastabend return NULL; 253546ac1ffSJohn Fastabend 254af4d045cSDaniel Borkmann dev = READ_ONCE(dtab->netdev_map[key]); 255af4d045cSDaniel Borkmann return dev ? dev->dev : NULL; 256546ac1ffSJohn Fastabend } 257546ac1ffSJohn Fastabend 258af4d045cSDaniel Borkmann static void *dev_map_lookup_elem(struct bpf_map *map, void *key) 25911393cc9SJohn Fastabend { 260af4d045cSDaniel Borkmann struct net_device *dev = __dev_map_lookup_elem(map, *(u32 *)key); 261af4d045cSDaniel Borkmann 262af4d045cSDaniel Borkmann return dev ? &dev->ifindex : NULL; 263af4d045cSDaniel Borkmann } 264af4d045cSDaniel Borkmann 265af4d045cSDaniel Borkmann static void dev_map_flush_old(struct bpf_dtab_netdev *dev) 266af4d045cSDaniel Borkmann { 267af4d045cSDaniel Borkmann if (dev->dev->netdev_ops->ndo_xdp_flush) { 268af4d045cSDaniel Borkmann struct net_device *fl = dev->dev; 26911393cc9SJohn Fastabend unsigned long *bitmap; 27011393cc9SJohn Fastabend int cpu; 27111393cc9SJohn Fastabend 27211393cc9SJohn Fastabend for_each_online_cpu(cpu) { 273af4d045cSDaniel Borkmann bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu); 274af4d045cSDaniel Borkmann __clear_bit(dev->bit, bitmap); 27511393cc9SJohn Fastabend 276af4d045cSDaniel Borkmann fl->netdev_ops->ndo_xdp_flush(dev->dev); 27711393cc9SJohn Fastabend } 27811393cc9SJohn Fastabend } 27911393cc9SJohn Fastabend } 28011393cc9SJohn Fastabend 281546ac1ffSJohn Fastabend static void __dev_map_entry_free(struct rcu_head *rcu) 282546ac1ffSJohn Fastabend { 283af4d045cSDaniel Borkmann struct bpf_dtab_netdev *dev; 284546ac1ffSJohn Fastabend 285af4d045cSDaniel Borkmann dev = container_of(rcu, struct bpf_dtab_netdev, rcu); 286af4d045cSDaniel Borkmann dev_map_flush_old(dev); 287af4d045cSDaniel Borkmann dev_put(dev->dev); 288af4d045cSDaniel Borkmann kfree(dev); 289546ac1ffSJohn Fastabend } 290546ac1ffSJohn Fastabend 291546ac1ffSJohn Fastabend static int dev_map_delete_elem(struct bpf_map *map, void *key) 292546ac1ffSJohn Fastabend { 293546ac1ffSJohn Fastabend struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 294546ac1ffSJohn Fastabend struct bpf_dtab_netdev *old_dev; 295546ac1ffSJohn Fastabend int k = *(u32 *)key; 296546ac1ffSJohn Fastabend 297546ac1ffSJohn Fastabend if (k >= map->max_entries) 298546ac1ffSJohn Fastabend return -EINVAL; 299546ac1ffSJohn Fastabend 300af4d045cSDaniel Borkmann /* Use call_rcu() here to ensure any rcu critical sections have 301af4d045cSDaniel Borkmann * completed, but this does not guarantee a flush has happened 302546ac1ffSJohn Fastabend * yet. Because driver side rcu_read_lock/unlock only protects the 303546ac1ffSJohn Fastabend * running XDP program. However, for pending flush operations the 304546ac1ffSJohn Fastabend * dev and ctx are stored in another per cpu map. And additionally, 305546ac1ffSJohn Fastabend * the driver tear down ensures all soft irqs are complete before 306546ac1ffSJohn Fastabend * removing the net device in the case of dev_put equals zero. 307546ac1ffSJohn Fastabend */ 308546ac1ffSJohn Fastabend old_dev = xchg(&dtab->netdev_map[k], NULL); 309546ac1ffSJohn Fastabend if (old_dev) 310546ac1ffSJohn Fastabend call_rcu(&old_dev->rcu, __dev_map_entry_free); 311546ac1ffSJohn Fastabend return 0; 312546ac1ffSJohn Fastabend } 313546ac1ffSJohn Fastabend 314546ac1ffSJohn Fastabend static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, 315546ac1ffSJohn Fastabend u64 map_flags) 316546ac1ffSJohn Fastabend { 317546ac1ffSJohn Fastabend struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 318546ac1ffSJohn Fastabend struct net *net = current->nsproxy->net_ns; 319546ac1ffSJohn Fastabend struct bpf_dtab_netdev *dev, *old_dev; 320546ac1ffSJohn Fastabend u32 i = *(u32 *)key; 321546ac1ffSJohn Fastabend u32 ifindex = *(u32 *)value; 322546ac1ffSJohn Fastabend 323546ac1ffSJohn Fastabend if (unlikely(map_flags > BPF_EXIST)) 324546ac1ffSJohn Fastabend return -EINVAL; 325546ac1ffSJohn Fastabend if (unlikely(i >= dtab->map.max_entries)) 326546ac1ffSJohn Fastabend return -E2BIG; 327546ac1ffSJohn Fastabend if (unlikely(map_flags == BPF_NOEXIST)) 328546ac1ffSJohn Fastabend return -EEXIST; 329546ac1ffSJohn Fastabend 330546ac1ffSJohn Fastabend if (!ifindex) { 331546ac1ffSJohn Fastabend dev = NULL; 332546ac1ffSJohn Fastabend } else { 33396eabe7aSMartin KaFai Lau dev = kmalloc_node(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN, 33496eabe7aSMartin KaFai Lau map->numa_node); 335546ac1ffSJohn Fastabend if (!dev) 336546ac1ffSJohn Fastabend return -ENOMEM; 337546ac1ffSJohn Fastabend 338546ac1ffSJohn Fastabend dev->dev = dev_get_by_index(net, ifindex); 339546ac1ffSJohn Fastabend if (!dev->dev) { 340546ac1ffSJohn Fastabend kfree(dev); 341546ac1ffSJohn Fastabend return -EINVAL; 342546ac1ffSJohn Fastabend } 343546ac1ffSJohn Fastabend 344af4d045cSDaniel Borkmann dev->bit = i; 345546ac1ffSJohn Fastabend dev->dtab = dtab; 346546ac1ffSJohn Fastabend } 347546ac1ffSJohn Fastabend 348546ac1ffSJohn Fastabend /* Use call_rcu() here to ensure rcu critical sections have completed 349546ac1ffSJohn Fastabend * Remembering the driver side flush operation will happen before the 350546ac1ffSJohn Fastabend * net device is removed. 351546ac1ffSJohn Fastabend */ 352546ac1ffSJohn Fastabend old_dev = xchg(&dtab->netdev_map[i], dev); 353546ac1ffSJohn Fastabend if (old_dev) 354546ac1ffSJohn Fastabend call_rcu(&old_dev->rcu, __dev_map_entry_free); 355546ac1ffSJohn Fastabend 356546ac1ffSJohn Fastabend return 0; 357546ac1ffSJohn Fastabend } 358546ac1ffSJohn Fastabend 359546ac1ffSJohn Fastabend const struct bpf_map_ops dev_map_ops = { 360546ac1ffSJohn Fastabend .map_alloc = dev_map_alloc, 361546ac1ffSJohn Fastabend .map_free = dev_map_free, 362546ac1ffSJohn Fastabend .map_get_next_key = dev_map_get_next_key, 363546ac1ffSJohn Fastabend .map_lookup_elem = dev_map_lookup_elem, 364546ac1ffSJohn Fastabend .map_update_elem = dev_map_update_elem, 365546ac1ffSJohn Fastabend .map_delete_elem = dev_map_delete_elem, 366546ac1ffSJohn Fastabend }; 3672ddf71e2SJohn Fastabend 3682ddf71e2SJohn Fastabend static int dev_map_notification(struct notifier_block *notifier, 3692ddf71e2SJohn Fastabend ulong event, void *ptr) 3702ddf71e2SJohn Fastabend { 3712ddf71e2SJohn Fastabend struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 3722ddf71e2SJohn Fastabend struct bpf_dtab *dtab; 3732ddf71e2SJohn Fastabend int i; 3742ddf71e2SJohn Fastabend 3752ddf71e2SJohn Fastabend switch (event) { 3762ddf71e2SJohn Fastabend case NETDEV_UNREGISTER: 3774cc7b954SJohn Fastabend /* This rcu_read_lock/unlock pair is needed because 3784cc7b954SJohn Fastabend * dev_map_list is an RCU list AND to ensure a delete 3794cc7b954SJohn Fastabend * operation does not free a netdev_map entry while we 3804cc7b954SJohn Fastabend * are comparing it against the netdev being unregistered. 3814cc7b954SJohn Fastabend */ 3824cc7b954SJohn Fastabend rcu_read_lock(); 3834cc7b954SJohn Fastabend list_for_each_entry_rcu(dtab, &dev_map_list, list) { 3842ddf71e2SJohn Fastabend for (i = 0; i < dtab->map.max_entries; i++) { 3854cc7b954SJohn Fastabend struct bpf_dtab_netdev *dev, *odev; 3862ddf71e2SJohn Fastabend 3874cc7b954SJohn Fastabend dev = READ_ONCE(dtab->netdev_map[i]); 3882ddf71e2SJohn Fastabend if (!dev || 3892ddf71e2SJohn Fastabend dev->dev->ifindex != netdev->ifindex) 3902ddf71e2SJohn Fastabend continue; 3914cc7b954SJohn Fastabend odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); 3924cc7b954SJohn Fastabend if (dev == odev) 3932ddf71e2SJohn Fastabend call_rcu(&dev->rcu, 3942ddf71e2SJohn Fastabend __dev_map_entry_free); 3952ddf71e2SJohn Fastabend } 3962ddf71e2SJohn Fastabend } 3974cc7b954SJohn Fastabend rcu_read_unlock(); 3982ddf71e2SJohn Fastabend break; 3992ddf71e2SJohn Fastabend default: 4002ddf71e2SJohn Fastabend break; 4012ddf71e2SJohn Fastabend } 4022ddf71e2SJohn Fastabend return NOTIFY_OK; 4032ddf71e2SJohn Fastabend } 4042ddf71e2SJohn Fastabend 4052ddf71e2SJohn Fastabend static struct notifier_block dev_map_notifier = { 4062ddf71e2SJohn Fastabend .notifier_call = dev_map_notification, 4072ddf71e2SJohn Fastabend }; 4082ddf71e2SJohn Fastabend 4092ddf71e2SJohn Fastabend static int __init dev_map_init(void) 4102ddf71e2SJohn Fastabend { 4112ddf71e2SJohn Fastabend register_netdevice_notifier(&dev_map_notifier); 4122ddf71e2SJohn Fastabend return 0; 4132ddf71e2SJohn Fastabend } 4142ddf71e2SJohn Fastabend 4152ddf71e2SJohn Fastabend subsys_initcall(dev_map_init); 416