xref: /linux-6.15/include/linux/bpf-cgroup.h (revision e00a844a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4 
5 #include <linux/jump_label.h>
6 #include <uapi/linux/bpf.h>
7 
8 struct sock;
9 struct cgroup;
10 struct sk_buff;
11 struct bpf_sock_ops_kern;
12 
13 #ifdef CONFIG_CGROUP_BPF
14 
15 extern struct static_key_false cgroup_bpf_enabled_key;
16 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
17 
18 struct bpf_prog_list {
19 	struct list_head node;
20 	struct bpf_prog *prog;
21 };
22 
23 struct bpf_prog_array;
24 
25 struct cgroup_bpf {
26 	/* array of effective progs in this cgroup */
27 	struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
28 
29 	/* attached progs to this cgroup and attach flags
30 	 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
31 	 * have either zero or one element
32 	 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
33 	 */
34 	struct list_head progs[MAX_BPF_ATTACH_TYPE];
35 	u32 flags[MAX_BPF_ATTACH_TYPE];
36 
37 	/* temp storage for effective prog array used by prog_attach/detach */
38 	struct bpf_prog_array __rcu *inactive;
39 };
40 
41 void cgroup_bpf_put(struct cgroup *cgrp);
42 int cgroup_bpf_inherit(struct cgroup *cgrp);
43 
44 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
45 			enum bpf_attach_type type, u32 flags);
46 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
47 			enum bpf_attach_type type, u32 flags);
48 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
49 		       union bpf_attr __user *uattr);
50 
51 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
52 int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
53 		      enum bpf_attach_type type, u32 flags);
54 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
55 		      enum bpf_attach_type type, u32 flags);
56 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
57 		     union bpf_attr __user *uattr);
58 
59 int __cgroup_bpf_run_filter_skb(struct sock *sk,
60 				struct sk_buff *skb,
61 				enum bpf_attach_type type);
62 
63 int __cgroup_bpf_run_filter_sk(struct sock *sk,
64 			       enum bpf_attach_type type);
65 
66 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
67 				     struct bpf_sock_ops_kern *sock_ops,
68 				     enum bpf_attach_type type);
69 
70 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
71 				      short access, enum bpf_attach_type type);
72 
73 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
74 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
75 ({									      \
76 	int __ret = 0;							      \
77 	if (cgroup_bpf_enabled)						      \
78 		__ret = __cgroup_bpf_run_filter_skb(sk, skb,		      \
79 						    BPF_CGROUP_INET_INGRESS); \
80 									      \
81 	__ret;								      \
82 })
83 
84 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)			       \
85 ({									       \
86 	int __ret = 0;							       \
87 	if (cgroup_bpf_enabled && sk && sk == skb->sk) {		       \
88 		typeof(sk) __sk = sk_to_full_sk(sk);			       \
89 		if (sk_fullsock(__sk))					       \
90 			__ret = __cgroup_bpf_run_filter_skb(__sk, skb,	       \
91 						      BPF_CGROUP_INET_EGRESS); \
92 	}								       \
93 	__ret;								       \
94 })
95 
96 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)				       \
97 ({									       \
98 	int __ret = 0;							       \
99 	if (cgroup_bpf_enabled && sk) {					       \
100 		__ret = __cgroup_bpf_run_filter_sk(sk,			       \
101 						 BPF_CGROUP_INET_SOCK_CREATE); \
102 	}								       \
103 	__ret;								       \
104 })
105 
106 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)				       \
107 ({									       \
108 	int __ret = 0;							       \
109 	if (cgroup_bpf_enabled && (sock_ops)->sk) {	       \
110 		typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);	       \
111 		if (__sk && sk_fullsock(__sk))				       \
112 			__ret = __cgroup_bpf_run_filter_sock_ops(__sk,	       \
113 								 sock_ops,     \
114 							 BPF_CGROUP_SOCK_OPS); \
115 	}								       \
116 	__ret;								       \
117 })
118 
119 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access)	      \
120 ({									      \
121 	int __ret = 0;							      \
122 	if (cgroup_bpf_enabled)						      \
123 		__ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
124 							  access,	      \
125 							  BPF_CGROUP_DEVICE); \
126 									      \
127 	__ret;								      \
128 })
129 #else
130 
131 struct cgroup_bpf {};
132 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
133 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
134 
135 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
136 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
137 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
138 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
139 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
140 
141 #endif /* CONFIG_CGROUP_BPF */
142 
143 #endif /* _BPF_CGROUP_H */
144