xref: /linux-6.15/kernel/bpf/bpf_task_storage.c (revision 9e6c958b)
14cf1bc1fSKP Singh // SPDX-License-Identifier: GPL-2.0
24cf1bc1fSKP Singh /*
34cf1bc1fSKP Singh  * Copyright (c) 2020 Facebook
44cf1bc1fSKP Singh  * Copyright 2020 Google LLC.
54cf1bc1fSKP Singh  */
64cf1bc1fSKP Singh 
74cf1bc1fSKP Singh #include <linux/pid.h>
84cf1bc1fSKP Singh #include <linux/sched.h>
94cf1bc1fSKP Singh #include <linux/rculist.h>
104cf1bc1fSKP Singh #include <linux/list.h>
114cf1bc1fSKP Singh #include <linux/hash.h>
124cf1bc1fSKP Singh #include <linux/types.h>
134cf1bc1fSKP Singh #include <linux/spinlock.h>
144cf1bc1fSKP Singh #include <linux/bpf.h>
154cf1bc1fSKP Singh #include <linux/bpf_local_storage.h>
164cf1bc1fSKP Singh #include <linux/filter.h>
174cf1bc1fSKP Singh #include <uapi/linux/btf.h>
184cf1bc1fSKP Singh #include <linux/btf_ids.h>
190fe4b381SKP Singh #include <linux/rcupdate_trace.h>
204cf1bc1fSKP Singh 
214cf1bc1fSKP Singh DEFINE_BPF_STORAGE_CACHE(task_cache);
224cf1bc1fSKP Singh 
234d0b9389SWei Yongjun static DEFINE_PER_CPU(int, bpf_task_storage_busy);
24bc235cdbSSong Liu 
bpf_task_storage_lock(void)25bc235cdbSSong Liu static void bpf_task_storage_lock(void)
26bc235cdbSSong Liu {
27*9e6c958bSHou Tao 	cant_migrate();
28197827a0SHou Tao 	this_cpu_inc(bpf_task_storage_busy);
29bc235cdbSSong Liu }
30bc235cdbSSong Liu 
bpf_task_storage_unlock(void)31bc235cdbSSong Liu static void bpf_task_storage_unlock(void)
32bc235cdbSSong Liu {
33197827a0SHou Tao 	this_cpu_dec(bpf_task_storage_busy);
34bc235cdbSSong Liu }
35bc235cdbSSong Liu 
bpf_task_storage_trylock(void)36bc235cdbSSong Liu static bool bpf_task_storage_trylock(void)
37bc235cdbSSong Liu {
38*9e6c958bSHou Tao 	cant_migrate();
39197827a0SHou Tao 	if (unlikely(this_cpu_inc_return(bpf_task_storage_busy) != 1)) {
40197827a0SHou Tao 		this_cpu_dec(bpf_task_storage_busy);
41bc235cdbSSong Liu 		return false;
42bc235cdbSSong Liu 	}
43bc235cdbSSong Liu 	return true;
44bc235cdbSSong Liu }
45bc235cdbSSong Liu 
task_storage_ptr(void * owner)464cf1bc1fSKP Singh static struct bpf_local_storage __rcu **task_storage_ptr(void *owner)
474cf1bc1fSKP Singh {
484cf1bc1fSKP Singh 	struct task_struct *task = owner;
494cf1bc1fSKP Singh 
50a10787e6SSong Liu 	return &task->bpf_storage;
514cf1bc1fSKP Singh }
524cf1bc1fSKP Singh 
534cf1bc1fSKP Singh static struct bpf_local_storage_data *
task_storage_lookup(struct task_struct * task,struct bpf_map * map,bool cacheit_lockit)544cf1bc1fSKP Singh task_storage_lookup(struct task_struct *task, struct bpf_map *map,
554cf1bc1fSKP Singh 		    bool cacheit_lockit)
564cf1bc1fSKP Singh {
574cf1bc1fSKP Singh 	struct bpf_local_storage *task_storage;
584cf1bc1fSKP Singh 	struct bpf_local_storage_map *smap;
594cf1bc1fSKP Singh 
600fe4b381SKP Singh 	task_storage =
610fe4b381SKP Singh 		rcu_dereference_check(task->bpf_storage, bpf_rcu_lock_held());
624cf1bc1fSKP Singh 	if (!task_storage)
634cf1bc1fSKP Singh 		return NULL;
644cf1bc1fSKP Singh 
654cf1bc1fSKP Singh 	smap = (struct bpf_local_storage_map *)map;
664cf1bc1fSKP Singh 	return bpf_local_storage_lookup(task_storage, smap, cacheit_lockit);
674cf1bc1fSKP Singh }
684cf1bc1fSKP Singh 
bpf_task_storage_free(struct task_struct * task)694cf1bc1fSKP Singh void bpf_task_storage_free(struct task_struct *task)
704cf1bc1fSKP Singh {
714cf1bc1fSKP Singh 	struct bpf_local_storage *local_storage;
724cf1bc1fSKP Singh 
73*9e6c958bSHou Tao 	migrate_disable();
744cf1bc1fSKP Singh 	rcu_read_lock();
754cf1bc1fSKP Singh 
76a10787e6SSong Liu 	local_storage = rcu_dereference(task->bpf_storage);
77*9e6c958bSHou Tao 	if (!local_storage)
78*9e6c958bSHou Tao 		goto out;
794cf1bc1fSKP Singh 
80bc235cdbSSong Liu 	bpf_task_storage_lock();
812ffcb6fcSMartin KaFai Lau 	bpf_local_storage_destroy(local_storage);
82bc235cdbSSong Liu 	bpf_task_storage_unlock();
83*9e6c958bSHou Tao out:
844cf1bc1fSKP Singh 	rcu_read_unlock();
85*9e6c958bSHou Tao 	migrate_enable();
864cf1bc1fSKP Singh }
874cf1bc1fSKP Singh 
bpf_pid_task_storage_lookup_elem(struct bpf_map * map,void * key)884cf1bc1fSKP Singh static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
894cf1bc1fSKP Singh {
904cf1bc1fSKP Singh 	struct bpf_local_storage_data *sdata;
914cf1bc1fSKP Singh 	struct task_struct *task;
924cf1bc1fSKP Singh 	unsigned int f_flags;
934cf1bc1fSKP Singh 	struct pid *pid;
944cf1bc1fSKP Singh 	int fd, err;
954cf1bc1fSKP Singh 
964cf1bc1fSKP Singh 	fd = *(int *)key;
974cf1bc1fSKP Singh 	pid = pidfd_get_pid(fd, &f_flags);
984cf1bc1fSKP Singh 	if (IS_ERR(pid))
994cf1bc1fSKP Singh 		return ERR_CAST(pid);
1004cf1bc1fSKP Singh 
1014cf1bc1fSKP Singh 	/* We should be in an RCU read side critical section, it should be safe
1024cf1bc1fSKP Singh 	 * to call pid_task.
1034cf1bc1fSKP Singh 	 */
1044cf1bc1fSKP Singh 	WARN_ON_ONCE(!rcu_read_lock_held());
1054cf1bc1fSKP Singh 	task = pid_task(pid, PIDTYPE_PID);
1064cf1bc1fSKP Singh 	if (!task) {
1074cf1bc1fSKP Singh 		err = -ENOENT;
1084cf1bc1fSKP Singh 		goto out;
1094cf1bc1fSKP Singh 	}
1104cf1bc1fSKP Singh 
111bc235cdbSSong Liu 	bpf_task_storage_lock();
1124cf1bc1fSKP Singh 	sdata = task_storage_lookup(task, map, true);
113bc235cdbSSong Liu 	bpf_task_storage_unlock();
1144cf1bc1fSKP Singh 	put_pid(pid);
1154cf1bc1fSKP Singh 	return sdata ? sdata->data : NULL;
1164cf1bc1fSKP Singh out:
1174cf1bc1fSKP Singh 	put_pid(pid);
1184cf1bc1fSKP Singh 	return ERR_PTR(err);
1194cf1bc1fSKP Singh }
1204cf1bc1fSKP Singh 
bpf_pid_task_storage_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)121d7ba4cc9SJP Kobryn static long bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
1224cf1bc1fSKP Singh 					     void *value, u64 map_flags)
1234cf1bc1fSKP Singh {
1244cf1bc1fSKP Singh 	struct bpf_local_storage_data *sdata;
1254cf1bc1fSKP Singh 	struct task_struct *task;
1264cf1bc1fSKP Singh 	unsigned int f_flags;
1274cf1bc1fSKP Singh 	struct pid *pid;
1284cf1bc1fSKP Singh 	int fd, err;
1294cf1bc1fSKP Singh 
130ba512b00SMartin KaFai Lau 	if ((map_flags & BPF_F_LOCK) && btf_record_has_field(map->record, BPF_UPTR))
131ba512b00SMartin KaFai Lau 		return -EOPNOTSUPP;
132ba512b00SMartin KaFai Lau 
1334cf1bc1fSKP Singh 	fd = *(int *)key;
1344cf1bc1fSKP Singh 	pid = pidfd_get_pid(fd, &f_flags);
1354cf1bc1fSKP Singh 	if (IS_ERR(pid))
1364cf1bc1fSKP Singh 		return PTR_ERR(pid);
1374cf1bc1fSKP Singh 
1384cf1bc1fSKP Singh 	/* We should be in an RCU read side critical section, it should be safe
1394cf1bc1fSKP Singh 	 * to call pid_task.
1404cf1bc1fSKP Singh 	 */
1414cf1bc1fSKP Singh 	WARN_ON_ONCE(!rcu_read_lock_held());
1424cf1bc1fSKP Singh 	task = pid_task(pid, PIDTYPE_PID);
143a10787e6SSong Liu 	if (!task) {
1444cf1bc1fSKP Singh 		err = -ENOENT;
1454cf1bc1fSKP Singh 		goto out;
1464cf1bc1fSKP Singh 	}
1474cf1bc1fSKP Singh 
148bc235cdbSSong Liu 	bpf_task_storage_lock();
1494cf1bc1fSKP Singh 	sdata = bpf_local_storage_update(
150b00fa38aSJoanne Koong 		task, (struct bpf_local_storage_map *)map, value, map_flags,
151ba512b00SMartin KaFai Lau 		true, GFP_ATOMIC);
152bc235cdbSSong Liu 	bpf_task_storage_unlock();
1534cf1bc1fSKP Singh 
1544cf1bc1fSKP Singh 	err = PTR_ERR_OR_ZERO(sdata);
1554cf1bc1fSKP Singh out:
1564cf1bc1fSKP Singh 	put_pid(pid);
1574cf1bc1fSKP Singh 	return err;
1584cf1bc1fSKP Singh }
1594cf1bc1fSKP Singh 
task_storage_delete(struct task_struct * task,struct bpf_map * map,bool nobusy)160fda64ae0SMartin KaFai Lau static int task_storage_delete(struct task_struct *task, struct bpf_map *map,
161fda64ae0SMartin KaFai Lau 			       bool nobusy)
1624cf1bc1fSKP Singh {
1634cf1bc1fSKP Singh 	struct bpf_local_storage_data *sdata;
1644cf1bc1fSKP Singh 
1654cf1bc1fSKP Singh 	sdata = task_storage_lookup(task, map, false);
1664cf1bc1fSKP Singh 	if (!sdata)
1674cf1bc1fSKP Singh 		return -ENOENT;
1684cf1bc1fSKP Singh 
169fda64ae0SMartin KaFai Lau 	if (!nobusy)
170fda64ae0SMartin KaFai Lau 		return -EBUSY;
171fda64ae0SMartin KaFai Lau 
172a47eabf2SMartin KaFai Lau 	bpf_selem_unlink(SELEM(sdata), false);
1734cf1bc1fSKP Singh 
1744cf1bc1fSKP Singh 	return 0;
1754cf1bc1fSKP Singh }
1764cf1bc1fSKP Singh 
bpf_pid_task_storage_delete_elem(struct bpf_map * map,void * key)177d7ba4cc9SJP Kobryn static long bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
1784cf1bc1fSKP Singh {
1794cf1bc1fSKP Singh 	struct task_struct *task;
1804cf1bc1fSKP Singh 	unsigned int f_flags;
1814cf1bc1fSKP Singh 	struct pid *pid;
1824cf1bc1fSKP Singh 	int fd, err;
1834cf1bc1fSKP Singh 
1844cf1bc1fSKP Singh 	fd = *(int *)key;
1854cf1bc1fSKP Singh 	pid = pidfd_get_pid(fd, &f_flags);
1864cf1bc1fSKP Singh 	if (IS_ERR(pid))
1874cf1bc1fSKP Singh 		return PTR_ERR(pid);
1884cf1bc1fSKP Singh 
1894cf1bc1fSKP Singh 	/* We should be in an RCU read side critical section, it should be safe
1904cf1bc1fSKP Singh 	 * to call pid_task.
1914cf1bc1fSKP Singh 	 */
1924cf1bc1fSKP Singh 	WARN_ON_ONCE(!rcu_read_lock_held());
1934cf1bc1fSKP Singh 	task = pid_task(pid, PIDTYPE_PID);
1944cf1bc1fSKP Singh 	if (!task) {
1954cf1bc1fSKP Singh 		err = -ENOENT;
1964cf1bc1fSKP Singh 		goto out;
1974cf1bc1fSKP Singh 	}
1984cf1bc1fSKP Singh 
199bc235cdbSSong Liu 	bpf_task_storage_lock();
200fda64ae0SMartin KaFai Lau 	err = task_storage_delete(task, map, true);
201bc235cdbSSong Liu 	bpf_task_storage_unlock();
2024cf1bc1fSKP Singh out:
2034cf1bc1fSKP Singh 	put_pid(pid);
2044cf1bc1fSKP Singh 	return err;
2054cf1bc1fSKP Singh }
2064cf1bc1fSKP Singh 
2076d65500cSMartin KaFai Lau /* Called by bpf_task_storage_get*() helpers */
__bpf_task_storage_get(struct bpf_map * map,struct task_struct * task,void * value,u64 flags,gfp_t gfp_flags,bool nobusy)2086d65500cSMartin KaFai Lau static void *__bpf_task_storage_get(struct bpf_map *map,
2096d65500cSMartin KaFai Lau 				    struct task_struct *task, void *value,
210e8b02296SMartin KaFai Lau 				    u64 flags, gfp_t gfp_flags, bool nobusy)
2116d65500cSMartin KaFai Lau {
2126d65500cSMartin KaFai Lau 	struct bpf_local_storage_data *sdata;
2136d65500cSMartin KaFai Lau 
214e8b02296SMartin KaFai Lau 	sdata = task_storage_lookup(task, map, nobusy);
2156d65500cSMartin KaFai Lau 	if (sdata)
2166d65500cSMartin KaFai Lau 		return sdata->data;
2176d65500cSMartin KaFai Lau 
2186d65500cSMartin KaFai Lau 	/* only allocate new storage, when the task is refcounted */
2196d65500cSMartin KaFai Lau 	if (refcount_read(&task->usage) &&
220e8b02296SMartin KaFai Lau 	    (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) && nobusy) {
2216d65500cSMartin KaFai Lau 		sdata = bpf_local_storage_update(
2226d65500cSMartin KaFai Lau 			task, (struct bpf_local_storage_map *)map, value,
223b9a5a07aSMartin KaFai Lau 			BPF_NOEXIST, false, gfp_flags);
2246d65500cSMartin KaFai Lau 		return IS_ERR(sdata) ? NULL : sdata->data;
2256d65500cSMartin KaFai Lau 	}
2266d65500cSMartin KaFai Lau 
2276d65500cSMartin KaFai Lau 	return NULL;
2286d65500cSMartin KaFai Lau }
2296d65500cSMartin KaFai Lau 
230b00fa38aSJoanne Koong /* *gfp_flags* is a hidden argument provided by the verifier */
BPF_CALL_5(bpf_task_storage_get_recur,struct bpf_map *,map,struct task_struct *,task,void *,value,u64,flags,gfp_t,gfp_flags)2310593dd34SMartin KaFai Lau BPF_CALL_5(bpf_task_storage_get_recur, struct bpf_map *, map, struct task_struct *,
232b00fa38aSJoanne Koong 	   task, void *, value, u64, flags, gfp_t, gfp_flags)
2334cf1bc1fSKP Singh {
234e8b02296SMartin KaFai Lau 	bool nobusy;
2356d65500cSMartin KaFai Lau 	void *data;
2364cf1bc1fSKP Singh 
2370fe4b381SKP Singh 	WARN_ON_ONCE(!bpf_rcu_lock_held());
2386d65500cSMartin KaFai Lau 	if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
2394cf1bc1fSKP Singh 		return (unsigned long)NULL;
2404cf1bc1fSKP Singh 
241e8b02296SMartin KaFai Lau 	nobusy = bpf_task_storage_trylock();
2426d65500cSMartin KaFai Lau 	data = __bpf_task_storage_get(map, task, value, flags,
243e8b02296SMartin KaFai Lau 				      gfp_flags, nobusy);
244e8b02296SMartin KaFai Lau 	if (nobusy)
245bc235cdbSSong Liu 		bpf_task_storage_unlock();
2466d65500cSMartin KaFai Lau 	return (unsigned long)data;
2474cf1bc1fSKP Singh }
2484cf1bc1fSKP Singh 
2494279adb0SMartin KaFai Lau /* *gfp_flags* is a hidden argument provided by the verifier */
BPF_CALL_5(bpf_task_storage_get,struct bpf_map *,map,struct task_struct *,task,void *,value,u64,flags,gfp_t,gfp_flags)2504279adb0SMartin KaFai Lau BPF_CALL_5(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
2514279adb0SMartin KaFai Lau 	   task, void *, value, u64, flags, gfp_t, gfp_flags)
2524279adb0SMartin KaFai Lau {
2534279adb0SMartin KaFai Lau 	void *data;
2544279adb0SMartin KaFai Lau 
2554279adb0SMartin KaFai Lau 	WARN_ON_ONCE(!bpf_rcu_lock_held());
2564279adb0SMartin KaFai Lau 	if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
2574279adb0SMartin KaFai Lau 		return (unsigned long)NULL;
2584279adb0SMartin KaFai Lau 
2594279adb0SMartin KaFai Lau 	bpf_task_storage_lock();
2604279adb0SMartin KaFai Lau 	data = __bpf_task_storage_get(map, task, value, flags,
2614279adb0SMartin KaFai Lau 				      gfp_flags, true);
2624279adb0SMartin KaFai Lau 	bpf_task_storage_unlock();
2634279adb0SMartin KaFai Lau 	return (unsigned long)data;
2644279adb0SMartin KaFai Lau }
2654279adb0SMartin KaFai Lau 
BPF_CALL_2(bpf_task_storage_delete_recur,struct bpf_map *,map,struct task_struct *,task)2660593dd34SMartin KaFai Lau BPF_CALL_2(bpf_task_storage_delete_recur, struct bpf_map *, map, struct task_struct *,
2674cf1bc1fSKP Singh 	   task)
2684cf1bc1fSKP Singh {
269fda64ae0SMartin KaFai Lau 	bool nobusy;
270bc235cdbSSong Liu 	int ret;
271bc235cdbSSong Liu 
2720fe4b381SKP Singh 	WARN_ON_ONCE(!bpf_rcu_lock_held());
2731a9c72adSKP Singh 	if (!task)
2741a9c72adSKP Singh 		return -EINVAL;
2751a9c72adSKP Singh 
276fda64ae0SMartin KaFai Lau 	nobusy = bpf_task_storage_trylock();
2774cf1bc1fSKP Singh 	/* This helper must only be called from places where the lifetime of the task
2784cf1bc1fSKP Singh 	 * is guaranteed. Either by being refcounted or by being protected
2794cf1bc1fSKP Singh 	 * by an RCU read-side critical section.
2804cf1bc1fSKP Singh 	 */
281fda64ae0SMartin KaFai Lau 	ret = task_storage_delete(task, map, nobusy);
282fda64ae0SMartin KaFai Lau 	if (nobusy)
283bc235cdbSSong Liu 		bpf_task_storage_unlock();
284bc235cdbSSong Liu 	return ret;
2854cf1bc1fSKP Singh }
2864cf1bc1fSKP Singh 
BPF_CALL_2(bpf_task_storage_delete,struct bpf_map *,map,struct task_struct *,task)2878a7dac37SMartin KaFai Lau BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
2888a7dac37SMartin KaFai Lau 	   task)
2898a7dac37SMartin KaFai Lau {
2908a7dac37SMartin KaFai Lau 	int ret;
2918a7dac37SMartin KaFai Lau 
2928a7dac37SMartin KaFai Lau 	WARN_ON_ONCE(!bpf_rcu_lock_held());
2938a7dac37SMartin KaFai Lau 	if (!task)
2948a7dac37SMartin KaFai Lau 		return -EINVAL;
2958a7dac37SMartin KaFai Lau 
2968a7dac37SMartin KaFai Lau 	bpf_task_storage_lock();
2978a7dac37SMartin KaFai Lau 	/* This helper must only be called from places where the lifetime of the task
2988a7dac37SMartin KaFai Lau 	 * is guaranteed. Either by being refcounted or by being protected
2998a7dac37SMartin KaFai Lau 	 * by an RCU read-side critical section.
3008a7dac37SMartin KaFai Lau 	 */
3018a7dac37SMartin KaFai Lau 	ret = task_storage_delete(task, map, true);
3028a7dac37SMartin KaFai Lau 	bpf_task_storage_unlock();
3038a7dac37SMartin KaFai Lau 	return ret;
3048a7dac37SMartin KaFai Lau }
3058a7dac37SMartin KaFai Lau 
notsupp_get_next_key(struct bpf_map * map,void * key,void * next_key)3064cf1bc1fSKP Singh static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
3074cf1bc1fSKP Singh {
3084cf1bc1fSKP Singh 	return -ENOTSUPP;
3094cf1bc1fSKP Singh }
3104cf1bc1fSKP Singh 
task_storage_map_alloc(union bpf_attr * attr)3114cf1bc1fSKP Singh static struct bpf_map *task_storage_map_alloc(union bpf_attr *attr)
3124cf1bc1fSKP Singh {
31308a7ce38SMartin KaFai Lau 	return bpf_local_storage_map_alloc(attr, &task_cache, true);
3144cf1bc1fSKP Singh }
3154cf1bc1fSKP Singh 
task_storage_map_free(struct bpf_map * map)3164cf1bc1fSKP Singh static void task_storage_map_free(struct bpf_map *map)
3174cf1bc1fSKP Singh {
318c83597faSYonghong Song 	bpf_local_storage_map_free(map, &task_cache, &bpf_task_storage_busy);
3194cf1bc1fSKP Singh }
3204cf1bc1fSKP Singh 
3213144bfa5SYonghong Song BTF_ID_LIST_GLOBAL_SINGLE(bpf_local_storage_map_btf_id, struct, bpf_local_storage_map)
3224cf1bc1fSKP Singh const struct bpf_map_ops task_storage_map_ops = {
3234cf1bc1fSKP Singh 	.map_meta_equal = bpf_map_meta_equal,
3244cf1bc1fSKP Singh 	.map_alloc_check = bpf_local_storage_map_alloc_check,
3254cf1bc1fSKP Singh 	.map_alloc = task_storage_map_alloc,
3264cf1bc1fSKP Singh 	.map_free = task_storage_map_free,
3274cf1bc1fSKP Singh 	.map_get_next_key = notsupp_get_next_key,
3284cf1bc1fSKP Singh 	.map_lookup_elem = bpf_pid_task_storage_lookup_elem,
3294cf1bc1fSKP Singh 	.map_update_elem = bpf_pid_task_storage_update_elem,
3304cf1bc1fSKP Singh 	.map_delete_elem = bpf_pid_task_storage_delete_elem,
3314cf1bc1fSKP Singh 	.map_check_btf = bpf_local_storage_map_check_btf,
3327490b7f1SYafang Shao 	.map_mem_usage = bpf_local_storage_map_mem_usage,
3333144bfa5SYonghong Song 	.map_btf_id = &bpf_local_storage_map_btf_id[0],
3344cf1bc1fSKP Singh 	.map_owner_storage_ptr = task_storage_ptr,
3354cf1bc1fSKP Singh };
3364cf1bc1fSKP Singh 
3370593dd34SMartin KaFai Lau const struct bpf_func_proto bpf_task_storage_get_recur_proto = {
3380593dd34SMartin KaFai Lau 	.func = bpf_task_storage_get_recur,
3394cf1bc1fSKP Singh 	.gpl_only = false,
3404cf1bc1fSKP Singh 	.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
3414cf1bc1fSKP Singh 	.arg1_type = ARG_CONST_MAP_PTR,
34291571a51SAlexei Starovoitov 	.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
343d19ddb47SSong Liu 	.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
3444cf1bc1fSKP Singh 	.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
3454cf1bc1fSKP Singh 	.arg4_type = ARG_ANYTHING,
3464cf1bc1fSKP Singh };
3474cf1bc1fSKP Singh 
3484279adb0SMartin KaFai Lau const struct bpf_func_proto bpf_task_storage_get_proto = {
3494279adb0SMartin KaFai Lau 	.func = bpf_task_storage_get,
3504279adb0SMartin KaFai Lau 	.gpl_only = false,
3514279adb0SMartin KaFai Lau 	.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
3524279adb0SMartin KaFai Lau 	.arg1_type = ARG_CONST_MAP_PTR,
35391571a51SAlexei Starovoitov 	.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
3544279adb0SMartin KaFai Lau 	.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
3554279adb0SMartin KaFai Lau 	.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
3564279adb0SMartin KaFai Lau 	.arg4_type = ARG_ANYTHING,
3574279adb0SMartin KaFai Lau };
3584279adb0SMartin KaFai Lau 
3590593dd34SMartin KaFai Lau const struct bpf_func_proto bpf_task_storage_delete_recur_proto = {
3600593dd34SMartin KaFai Lau 	.func = bpf_task_storage_delete_recur,
3614cf1bc1fSKP Singh 	.gpl_only = false,
3624cf1bc1fSKP Singh 	.ret_type = RET_INTEGER,
3634cf1bc1fSKP Singh 	.arg1_type = ARG_CONST_MAP_PTR,
36491571a51SAlexei Starovoitov 	.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
365d19ddb47SSong Liu 	.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
3664cf1bc1fSKP Singh };
3678a7dac37SMartin KaFai Lau 
3688a7dac37SMartin KaFai Lau const struct bpf_func_proto bpf_task_storage_delete_proto = {
3698a7dac37SMartin KaFai Lau 	.func = bpf_task_storage_delete,
3708a7dac37SMartin KaFai Lau 	.gpl_only = false,
3718a7dac37SMartin KaFai Lau 	.ret_type = RET_INTEGER,
3728a7dac37SMartin KaFai Lau 	.arg1_type = ARG_CONST_MAP_PTR,
37391571a51SAlexei Starovoitov 	.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
3748a7dac37SMartin KaFai Lau 	.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
3758a7dac37SMartin KaFai Lau };
376