15dc4c4b7SMartin KaFai Lau // SPDX-License-Identifier: GPL-2.0
25dc4c4b7SMartin KaFai Lau /*
35dc4c4b7SMartin KaFai Lau * Copyright (c) 2018 Facebook
45dc4c4b7SMartin KaFai Lau */
55dc4c4b7SMartin KaFai Lau #include <linux/bpf.h>
65dc4c4b7SMartin KaFai Lau #include <linux/err.h>
75dc4c4b7SMartin KaFai Lau #include <linux/sock_diag.h>
85dc4c4b7SMartin KaFai Lau #include <net/sock_reuseport.h>
9c317ab71SMenglong Dong #include <linux/btf_ids.h>
105dc4c4b7SMartin KaFai Lau
115dc4c4b7SMartin KaFai Lau struct reuseport_array {
125dc4c4b7SMartin KaFai Lau struct bpf_map map;
135dc4c4b7SMartin KaFai Lau struct sock __rcu *ptrs[];
145dc4c4b7SMartin KaFai Lau };
155dc4c4b7SMartin KaFai Lau
reuseport_array(struct bpf_map * map)165dc4c4b7SMartin KaFai Lau static struct reuseport_array *reuseport_array(struct bpf_map *map)
175dc4c4b7SMartin KaFai Lau {
185dc4c4b7SMartin KaFai Lau return (struct reuseport_array *)map;
195dc4c4b7SMartin KaFai Lau }
205dc4c4b7SMartin KaFai Lau
215dc4c4b7SMartin KaFai Lau /* The caller must hold the reuseport_lock */
bpf_sk_reuseport_detach(struct sock * sk)225dc4c4b7SMartin KaFai Lau void bpf_sk_reuseport_detach(struct sock *sk)
235dc4c4b7SMartin KaFai Lau {
24f3dda7a6SMartin KaFai Lau struct sock __rcu **socks;
25f3dda7a6SMartin KaFai Lau
26cf8c1e96SHawkins Jiawei write_lock_bh(&sk->sk_callback_lock);
27fc4aaf9fSDavid Howells socks = __locked_read_sk_user_data_with_flags(sk, SK_USER_DATA_BPF);
28cf8c1e96SHawkins Jiawei if (socks) {
295dc4c4b7SMartin KaFai Lau WRITE_ONCE(sk->sk_user_data, NULL);
305dc4c4b7SMartin KaFai Lau /*
315dc4c4b7SMartin KaFai Lau * Do not move this NULL assignment outside of
325dc4c4b7SMartin KaFai Lau * sk->sk_callback_lock because there is
335dc4c4b7SMartin KaFai Lau * a race with reuseport_array_free()
345dc4c4b7SMartin KaFai Lau * which does not hold the reuseport_lock.
355dc4c4b7SMartin KaFai Lau */
365dc4c4b7SMartin KaFai Lau RCU_INIT_POINTER(*socks, NULL);
375dc4c4b7SMartin KaFai Lau }
385dc4c4b7SMartin KaFai Lau write_unlock_bh(&sk->sk_callback_lock);
395dc4c4b7SMartin KaFai Lau }
405dc4c4b7SMartin KaFai Lau
reuseport_array_alloc_check(union bpf_attr * attr)415dc4c4b7SMartin KaFai Lau static int reuseport_array_alloc_check(union bpf_attr *attr)
425dc4c4b7SMartin KaFai Lau {
435dc4c4b7SMartin KaFai Lau if (attr->value_size != sizeof(u32) &&
445dc4c4b7SMartin KaFai Lau attr->value_size != sizeof(u64))
455dc4c4b7SMartin KaFai Lau return -EINVAL;
465dc4c4b7SMartin KaFai Lau
475dc4c4b7SMartin KaFai Lau return array_map_alloc_check(attr);
485dc4c4b7SMartin KaFai Lau }
495dc4c4b7SMartin KaFai Lau
reuseport_array_lookup_elem(struct bpf_map * map,void * key)505dc4c4b7SMartin KaFai Lau static void *reuseport_array_lookup_elem(struct bpf_map *map, void *key)
515dc4c4b7SMartin KaFai Lau {
525dc4c4b7SMartin KaFai Lau struct reuseport_array *array = reuseport_array(map);
535dc4c4b7SMartin KaFai Lau u32 index = *(u32 *)key;
545dc4c4b7SMartin KaFai Lau
555dc4c4b7SMartin KaFai Lau if (unlikely(index >= array->map.max_entries))
565dc4c4b7SMartin KaFai Lau return NULL;
575dc4c4b7SMartin KaFai Lau
585dc4c4b7SMartin KaFai Lau return rcu_dereference(array->ptrs[index]);
595dc4c4b7SMartin KaFai Lau }
605dc4c4b7SMartin KaFai Lau
615dc4c4b7SMartin KaFai Lau /* Called from syscall only */
reuseport_array_delete_elem(struct bpf_map * map,void * key)62d7ba4cc9SJP Kobryn static long reuseport_array_delete_elem(struct bpf_map *map, void *key)
635dc4c4b7SMartin KaFai Lau {
645dc4c4b7SMartin KaFai Lau struct reuseport_array *array = reuseport_array(map);
655dc4c4b7SMartin KaFai Lau u32 index = *(u32 *)key;
665dc4c4b7SMartin KaFai Lau struct sock *sk;
675dc4c4b7SMartin KaFai Lau int err;
685dc4c4b7SMartin KaFai Lau
695dc4c4b7SMartin KaFai Lau if (index >= map->max_entries)
705dc4c4b7SMartin KaFai Lau return -E2BIG;
715dc4c4b7SMartin KaFai Lau
725dc4c4b7SMartin KaFai Lau if (!rcu_access_pointer(array->ptrs[index]))
735dc4c4b7SMartin KaFai Lau return -ENOENT;
745dc4c4b7SMartin KaFai Lau
755dc4c4b7SMartin KaFai Lau spin_lock_bh(&reuseport_lock);
765dc4c4b7SMartin KaFai Lau
775dc4c4b7SMartin KaFai Lau sk = rcu_dereference_protected(array->ptrs[index],
785dc4c4b7SMartin KaFai Lau lockdep_is_held(&reuseport_lock));
795dc4c4b7SMartin KaFai Lau if (sk) {
805dc4c4b7SMartin KaFai Lau write_lock_bh(&sk->sk_callback_lock);
815dc4c4b7SMartin KaFai Lau WRITE_ONCE(sk->sk_user_data, NULL);
825dc4c4b7SMartin KaFai Lau RCU_INIT_POINTER(array->ptrs[index], NULL);
835dc4c4b7SMartin KaFai Lau write_unlock_bh(&sk->sk_callback_lock);
845dc4c4b7SMartin KaFai Lau err = 0;
855dc4c4b7SMartin KaFai Lau } else {
865dc4c4b7SMartin KaFai Lau err = -ENOENT;
875dc4c4b7SMartin KaFai Lau }
885dc4c4b7SMartin KaFai Lau
895dc4c4b7SMartin KaFai Lau spin_unlock_bh(&reuseport_lock);
905dc4c4b7SMartin KaFai Lau
915dc4c4b7SMartin KaFai Lau return err;
925dc4c4b7SMartin KaFai Lau }
935dc4c4b7SMartin KaFai Lau
reuseport_array_free(struct bpf_map * map)945dc4c4b7SMartin KaFai Lau static void reuseport_array_free(struct bpf_map *map)
955dc4c4b7SMartin KaFai Lau {
965dc4c4b7SMartin KaFai Lau struct reuseport_array *array = reuseport_array(map);
975dc4c4b7SMartin KaFai Lau struct sock *sk;
985dc4c4b7SMartin KaFai Lau u32 i;
995dc4c4b7SMartin KaFai Lau
1005dc4c4b7SMartin KaFai Lau /*
1015dc4c4b7SMartin KaFai Lau * ops->map_*_elem() will not be able to access this
1025dc4c4b7SMartin KaFai Lau * array now. Hence, this function only races with
1038fb33b60SZhen Lei * bpf_sk_reuseport_detach() which was triggered by
1045dc4c4b7SMartin KaFai Lau * close() or disconnect().
1055dc4c4b7SMartin KaFai Lau *
1065dc4c4b7SMartin KaFai Lau * This function and bpf_sk_reuseport_detach() are
1075dc4c4b7SMartin KaFai Lau * both removing sk from "array". Who removes it
1085dc4c4b7SMartin KaFai Lau * first does not matter.
1095dc4c4b7SMartin KaFai Lau *
1105dc4c4b7SMartin KaFai Lau * The only concern here is bpf_sk_reuseport_detach()
1115dc4c4b7SMartin KaFai Lau * may access "array" which is being freed here.
1125dc4c4b7SMartin KaFai Lau * bpf_sk_reuseport_detach() access this "array"
1135dc4c4b7SMartin KaFai Lau * through sk->sk_user_data _and_ with sk->sk_callback_lock
1145dc4c4b7SMartin KaFai Lau * held which is enough because this "array" is not freed
1155dc4c4b7SMartin KaFai Lau * until all sk->sk_user_data has stopped referencing this "array".
1165dc4c4b7SMartin KaFai Lau *
1175dc4c4b7SMartin KaFai Lau * Hence, due to the above, taking "reuseport_lock" is not
1185dc4c4b7SMartin KaFai Lau * needed here.
1195dc4c4b7SMartin KaFai Lau */
1205dc4c4b7SMartin KaFai Lau
1215dc4c4b7SMartin KaFai Lau /*
1225dc4c4b7SMartin KaFai Lau * Since reuseport_lock is not taken, sk is accessed under
1235dc4c4b7SMartin KaFai Lau * rcu_read_lock()
1245dc4c4b7SMartin KaFai Lau */
1255dc4c4b7SMartin KaFai Lau rcu_read_lock();
1265dc4c4b7SMartin KaFai Lau for (i = 0; i < map->max_entries; i++) {
1275dc4c4b7SMartin KaFai Lau sk = rcu_dereference(array->ptrs[i]);
1285dc4c4b7SMartin KaFai Lau if (sk) {
1295dc4c4b7SMartin KaFai Lau write_lock_bh(&sk->sk_callback_lock);
1305dc4c4b7SMartin KaFai Lau /*
1315dc4c4b7SMartin KaFai Lau * No need for WRITE_ONCE(). At this point,
1325dc4c4b7SMartin KaFai Lau * no one is reading it without taking the
1335dc4c4b7SMartin KaFai Lau * sk->sk_callback_lock.
1345dc4c4b7SMartin KaFai Lau */
1355dc4c4b7SMartin KaFai Lau sk->sk_user_data = NULL;
1365dc4c4b7SMartin KaFai Lau write_unlock_bh(&sk->sk_callback_lock);
1375dc4c4b7SMartin KaFai Lau RCU_INIT_POINTER(array->ptrs[i], NULL);
1385dc4c4b7SMartin KaFai Lau }
1395dc4c4b7SMartin KaFai Lau }
1405dc4c4b7SMartin KaFai Lau rcu_read_unlock();
1415dc4c4b7SMartin KaFai Lau
1425dc4c4b7SMartin KaFai Lau /*
1435dc4c4b7SMartin KaFai Lau * Once reaching here, all sk->sk_user_data is not
144c561d110STom Rix * referencing this "array". "array" can be freed now.
1455dc4c4b7SMartin KaFai Lau */
1465dc4c4b7SMartin KaFai Lau bpf_map_area_free(array);
1475dc4c4b7SMartin KaFai Lau }
1485dc4c4b7SMartin KaFai Lau
reuseport_array_alloc(union bpf_attr * attr)1495dc4c4b7SMartin KaFai Lau static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
1505dc4c4b7SMartin KaFai Lau {
151db54330dSRoman Gushchin int numa_node = bpf_map_attr_numa_node(attr);
1525dc4c4b7SMartin KaFai Lau struct reuseport_array *array;
1535dc4c4b7SMartin KaFai Lau
1545dc4c4b7SMartin KaFai Lau /* allocate all map elements and zero-initialize them */
1550dd668d2SXiu Jianfeng array = bpf_map_area_alloc(struct_size(array, ptrs, attr->max_entries), numa_node);
156db54330dSRoman Gushchin if (!array)
1575dc4c4b7SMartin KaFai Lau return ERR_PTR(-ENOMEM);
1585dc4c4b7SMartin KaFai Lau
1595dc4c4b7SMartin KaFai Lau /* copy mandatory map attributes */
1605dc4c4b7SMartin KaFai Lau bpf_map_init_from_attr(&array->map, attr);
1615dc4c4b7SMartin KaFai Lau
1625dc4c4b7SMartin KaFai Lau return &array->map;
1635dc4c4b7SMartin KaFai Lau }
1645dc4c4b7SMartin KaFai Lau
bpf_fd_reuseport_array_lookup_elem(struct bpf_map * map,void * key,void * value)1655dc4c4b7SMartin KaFai Lau int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
1665dc4c4b7SMartin KaFai Lau void *value)
1675dc4c4b7SMartin KaFai Lau {
1685dc4c4b7SMartin KaFai Lau struct sock *sk;
1695dc4c4b7SMartin KaFai Lau int err;
1705dc4c4b7SMartin KaFai Lau
1715dc4c4b7SMartin KaFai Lau if (map->value_size != sizeof(u64))
1725dc4c4b7SMartin KaFai Lau return -ENOSPC;
1735dc4c4b7SMartin KaFai Lau
1745dc4c4b7SMartin KaFai Lau rcu_read_lock();
1755dc4c4b7SMartin KaFai Lau sk = reuseport_array_lookup_elem(map, key);
1765dc4c4b7SMartin KaFai Lau if (sk) {
17792acdc58SDaniel Borkmann *(u64 *)value = __sock_gen_cookie(sk);
1785dc4c4b7SMartin KaFai Lau err = 0;
1795dc4c4b7SMartin KaFai Lau } else {
1805dc4c4b7SMartin KaFai Lau err = -ENOENT;
1815dc4c4b7SMartin KaFai Lau }
1825dc4c4b7SMartin KaFai Lau rcu_read_unlock();
1835dc4c4b7SMartin KaFai Lau
1845dc4c4b7SMartin KaFai Lau return err;
1855dc4c4b7SMartin KaFai Lau }
1865dc4c4b7SMartin KaFai Lau
1875dc4c4b7SMartin KaFai Lau static int
reuseport_array_update_check(const struct reuseport_array * array,const struct sock * nsk,const struct sock * osk,const struct sock_reuseport * nsk_reuse,u32 map_flags)1885dc4c4b7SMartin KaFai Lau reuseport_array_update_check(const struct reuseport_array *array,
1895dc4c4b7SMartin KaFai Lau const struct sock *nsk,
1905dc4c4b7SMartin KaFai Lau const struct sock *osk,
1915dc4c4b7SMartin KaFai Lau const struct sock_reuseport *nsk_reuse,
1925dc4c4b7SMartin KaFai Lau u32 map_flags)
1935dc4c4b7SMartin KaFai Lau {
1945dc4c4b7SMartin KaFai Lau if (osk && map_flags == BPF_NOEXIST)
1955dc4c4b7SMartin KaFai Lau return -EEXIST;
1965dc4c4b7SMartin KaFai Lau
1975dc4c4b7SMartin KaFai Lau if (!osk && map_flags == BPF_EXIST)
1985dc4c4b7SMartin KaFai Lau return -ENOENT;
1995dc4c4b7SMartin KaFai Lau
2005dc4c4b7SMartin KaFai Lau if (nsk->sk_protocol != IPPROTO_UDP && nsk->sk_protocol != IPPROTO_TCP)
2015dc4c4b7SMartin KaFai Lau return -ENOTSUPP;
2025dc4c4b7SMartin KaFai Lau
2035dc4c4b7SMartin KaFai Lau if (nsk->sk_family != AF_INET && nsk->sk_family != AF_INET6)
2045dc4c4b7SMartin KaFai Lau return -ENOTSUPP;
2055dc4c4b7SMartin KaFai Lau
2065dc4c4b7SMartin KaFai Lau if (nsk->sk_type != SOCK_STREAM && nsk->sk_type != SOCK_DGRAM)
2075dc4c4b7SMartin KaFai Lau return -ENOTSUPP;
2085dc4c4b7SMartin KaFai Lau
2095dc4c4b7SMartin KaFai Lau /*
2105dc4c4b7SMartin KaFai Lau * sk must be hashed (i.e. listening in the TCP case or binded
2115dc4c4b7SMartin KaFai Lau * in the UDP case) and
2125dc4c4b7SMartin KaFai Lau * it must also be a SO_REUSEPORT sk (i.e. reuse cannot be NULL).
2135dc4c4b7SMartin KaFai Lau *
2145dc4c4b7SMartin KaFai Lau * Also, sk will be used in bpf helper that is protected by
2155dc4c4b7SMartin KaFai Lau * rcu_read_lock().
2165dc4c4b7SMartin KaFai Lau */
2175dc4c4b7SMartin KaFai Lau if (!sock_flag(nsk, SOCK_RCU_FREE) || !sk_hashed(nsk) || !nsk_reuse)
2185dc4c4b7SMartin KaFai Lau return -EINVAL;
2195dc4c4b7SMartin KaFai Lau
2205dc4c4b7SMartin KaFai Lau /* READ_ONCE because the sk->sk_callback_lock may not be held here */
2215dc4c4b7SMartin KaFai Lau if (READ_ONCE(nsk->sk_user_data))
2225dc4c4b7SMartin KaFai Lau return -EBUSY;
2235dc4c4b7SMartin KaFai Lau
2245dc4c4b7SMartin KaFai Lau return 0;
2255dc4c4b7SMartin KaFai Lau }
2265dc4c4b7SMartin KaFai Lau
2275dc4c4b7SMartin KaFai Lau /*
2285dc4c4b7SMartin KaFai Lau * Called from syscall only.
2295dc4c4b7SMartin KaFai Lau * The "nsk" in the fd refcnt.
2305dc4c4b7SMartin KaFai Lau * The "osk" and "reuse" are protected by reuseport_lock.
2315dc4c4b7SMartin KaFai Lau */
bpf_fd_reuseport_array_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)2325dc4c4b7SMartin KaFai Lau int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
2335dc4c4b7SMartin KaFai Lau void *value, u64 map_flags)
2345dc4c4b7SMartin KaFai Lau {
2355dc4c4b7SMartin KaFai Lau struct reuseport_array *array = reuseport_array(map);
2365dc4c4b7SMartin KaFai Lau struct sock *free_osk = NULL, *osk, *nsk;
2375dc4c4b7SMartin KaFai Lau struct sock_reuseport *reuse;
2385dc4c4b7SMartin KaFai Lau u32 index = *(u32 *)key;
239f3dda7a6SMartin KaFai Lau uintptr_t sk_user_data;
2405dc4c4b7SMartin KaFai Lau struct socket *socket;
2415dc4c4b7SMartin KaFai Lau int err, fd;
2425dc4c4b7SMartin KaFai Lau
2435dc4c4b7SMartin KaFai Lau if (map_flags > BPF_EXIST)
2445dc4c4b7SMartin KaFai Lau return -EINVAL;
2455dc4c4b7SMartin KaFai Lau
2465dc4c4b7SMartin KaFai Lau if (index >= map->max_entries)
2475dc4c4b7SMartin KaFai Lau return -E2BIG;
2485dc4c4b7SMartin KaFai Lau
2495dc4c4b7SMartin KaFai Lau if (map->value_size == sizeof(u64)) {
2505dc4c4b7SMartin KaFai Lau u64 fd64 = *(u64 *)value;
2515dc4c4b7SMartin KaFai Lau
2525dc4c4b7SMartin KaFai Lau if (fd64 > S32_MAX)
2535dc4c4b7SMartin KaFai Lau return -EINVAL;
2545dc4c4b7SMartin KaFai Lau fd = fd64;
2555dc4c4b7SMartin KaFai Lau } else {
2565dc4c4b7SMartin KaFai Lau fd = *(int *)value;
2575dc4c4b7SMartin KaFai Lau }
2585dc4c4b7SMartin KaFai Lau
2595dc4c4b7SMartin KaFai Lau socket = sockfd_lookup(fd, &err);
2605dc4c4b7SMartin KaFai Lau if (!socket)
2615dc4c4b7SMartin KaFai Lau return err;
2625dc4c4b7SMartin KaFai Lau
2635dc4c4b7SMartin KaFai Lau nsk = socket->sk;
2645dc4c4b7SMartin KaFai Lau if (!nsk) {
2655dc4c4b7SMartin KaFai Lau err = -EINVAL;
2665dc4c4b7SMartin KaFai Lau goto put_file;
2675dc4c4b7SMartin KaFai Lau }
2685dc4c4b7SMartin KaFai Lau
2695dc4c4b7SMartin KaFai Lau /* Quick checks before taking reuseport_lock */
2705dc4c4b7SMartin KaFai Lau err = reuseport_array_update_check(array, nsk,
2715dc4c4b7SMartin KaFai Lau rcu_access_pointer(array->ptrs[index]),
2725dc4c4b7SMartin KaFai Lau rcu_access_pointer(nsk->sk_reuseport_cb),
2735dc4c4b7SMartin KaFai Lau map_flags);
2745dc4c4b7SMartin KaFai Lau if (err)
2755dc4c4b7SMartin KaFai Lau goto put_file;
2765dc4c4b7SMartin KaFai Lau
2775dc4c4b7SMartin KaFai Lau spin_lock_bh(&reuseport_lock);
2785dc4c4b7SMartin KaFai Lau /*
2795dc4c4b7SMartin KaFai Lau * Some of the checks only need reuseport_lock
2805dc4c4b7SMartin KaFai Lau * but it is done under sk_callback_lock also
2815dc4c4b7SMartin KaFai Lau * for simplicity reason.
2825dc4c4b7SMartin KaFai Lau */
2835dc4c4b7SMartin KaFai Lau write_lock_bh(&nsk->sk_callback_lock);
2845dc4c4b7SMartin KaFai Lau
2855dc4c4b7SMartin KaFai Lau osk = rcu_dereference_protected(array->ptrs[index],
2865dc4c4b7SMartin KaFai Lau lockdep_is_held(&reuseport_lock));
2875dc4c4b7SMartin KaFai Lau reuse = rcu_dereference_protected(nsk->sk_reuseport_cb,
2885dc4c4b7SMartin KaFai Lau lockdep_is_held(&reuseport_lock));
2895dc4c4b7SMartin KaFai Lau err = reuseport_array_update_check(array, nsk, osk, reuse, map_flags);
2905dc4c4b7SMartin KaFai Lau if (err)
2915dc4c4b7SMartin KaFai Lau goto put_file_unlock;
2925dc4c4b7SMartin KaFai Lau
293c9a368f1SMartin KaFai Lau sk_user_data = (uintptr_t)&array->ptrs[index] | SK_USER_DATA_NOCOPY |
294c9a368f1SMartin KaFai Lau SK_USER_DATA_BPF;
295f3dda7a6SMartin KaFai Lau WRITE_ONCE(nsk->sk_user_data, (void *)sk_user_data);
2965dc4c4b7SMartin KaFai Lau rcu_assign_pointer(array->ptrs[index], nsk);
2975dc4c4b7SMartin KaFai Lau free_osk = osk;
2985dc4c4b7SMartin KaFai Lau err = 0;
2995dc4c4b7SMartin KaFai Lau
3005dc4c4b7SMartin KaFai Lau put_file_unlock:
3015dc4c4b7SMartin KaFai Lau write_unlock_bh(&nsk->sk_callback_lock);
3025dc4c4b7SMartin KaFai Lau
3035dc4c4b7SMartin KaFai Lau if (free_osk) {
3045dc4c4b7SMartin KaFai Lau write_lock_bh(&free_osk->sk_callback_lock);
3055dc4c4b7SMartin KaFai Lau WRITE_ONCE(free_osk->sk_user_data, NULL);
3065dc4c4b7SMartin KaFai Lau write_unlock_bh(&free_osk->sk_callback_lock);
3075dc4c4b7SMartin KaFai Lau }
3085dc4c4b7SMartin KaFai Lau
3095dc4c4b7SMartin KaFai Lau spin_unlock_bh(&reuseport_lock);
3105dc4c4b7SMartin KaFai Lau put_file:
311*65ef66d9SJinjie Ruan sockfd_put(socket);
3125dc4c4b7SMartin KaFai Lau return err;
3135dc4c4b7SMartin KaFai Lau }
3145dc4c4b7SMartin KaFai Lau
3155dc4c4b7SMartin KaFai Lau /* Called from syscall */
reuseport_array_get_next_key(struct bpf_map * map,void * key,void * next_key)3165dc4c4b7SMartin KaFai Lau static int reuseport_array_get_next_key(struct bpf_map *map, void *key,
3175dc4c4b7SMartin KaFai Lau void *next_key)
3185dc4c4b7SMartin KaFai Lau {
3195dc4c4b7SMartin KaFai Lau struct reuseport_array *array = reuseport_array(map);
3205dc4c4b7SMartin KaFai Lau u32 index = key ? *(u32 *)key : U32_MAX;
3215dc4c4b7SMartin KaFai Lau u32 *next = (u32 *)next_key;
3225dc4c4b7SMartin KaFai Lau
3235dc4c4b7SMartin KaFai Lau if (index >= array->map.max_entries) {
3245dc4c4b7SMartin KaFai Lau *next = 0;
3255dc4c4b7SMartin KaFai Lau return 0;
3265dc4c4b7SMartin KaFai Lau }
3275dc4c4b7SMartin KaFai Lau
3285dc4c4b7SMartin KaFai Lau if (index == array->map.max_entries - 1)
3295dc4c4b7SMartin KaFai Lau return -ENOENT;
3305dc4c4b7SMartin KaFai Lau
3315dc4c4b7SMartin KaFai Lau *next = index + 1;
3325dc4c4b7SMartin KaFai Lau return 0;
3335dc4c4b7SMartin KaFai Lau }
3345dc4c4b7SMartin KaFai Lau
reuseport_array_mem_usage(const struct bpf_map * map)3352e89caf0SYafang Shao static u64 reuseport_array_mem_usage(const struct bpf_map *map)
3362e89caf0SYafang Shao {
3372e89caf0SYafang Shao struct reuseport_array *array;
3382e89caf0SYafang Shao
3392e89caf0SYafang Shao return struct_size(array, ptrs, map->max_entries);
3402e89caf0SYafang Shao }
3412e89caf0SYafang Shao
342c317ab71SMenglong Dong BTF_ID_LIST_SINGLE(reuseport_array_map_btf_ids, struct, reuseport_array)
3435dc4c4b7SMartin KaFai Lau const struct bpf_map_ops reuseport_array_ops = {
344f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal,
3455dc4c4b7SMartin KaFai Lau .map_alloc_check = reuseport_array_alloc_check,
3465dc4c4b7SMartin KaFai Lau .map_alloc = reuseport_array_alloc,
3475dc4c4b7SMartin KaFai Lau .map_free = reuseport_array_free,
3485dc4c4b7SMartin KaFai Lau .map_lookup_elem = reuseport_array_lookup_elem,
3495dc4c4b7SMartin KaFai Lau .map_get_next_key = reuseport_array_get_next_key,
3505dc4c4b7SMartin KaFai Lau .map_delete_elem = reuseport_array_delete_elem,
3512e89caf0SYafang Shao .map_mem_usage = reuseport_array_mem_usage,
352c317ab71SMenglong Dong .map_btf_id = &reuseport_array_map_btf_ids[0],
3535dc4c4b7SMartin KaFai Lau };
354