xref: /linux-6.15/include/linux/bpf-cgroup.h (revision 9b3579fc)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4 
5 #include <linux/errno.h>
6 #include <linux/jump_label.h>
7 #include <uapi/linux/bpf.h>
8 
9 struct sock;
10 struct sockaddr;
11 struct cgroup;
12 struct sk_buff;
13 struct bpf_sock_ops_kern;
14 
15 #ifdef CONFIG_CGROUP_BPF
16 
17 extern struct static_key_false cgroup_bpf_enabled_key;
18 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
19 
20 struct bpf_prog_list {
21 	struct list_head node;
22 	struct bpf_prog *prog;
23 };
24 
25 struct bpf_prog_array;
26 
27 struct cgroup_bpf {
28 	/* array of effective progs in this cgroup */
29 	struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
30 
31 	/* attached progs to this cgroup and attach flags
32 	 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
33 	 * have either zero or one element
34 	 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
35 	 */
36 	struct list_head progs[MAX_BPF_ATTACH_TYPE];
37 	u32 flags[MAX_BPF_ATTACH_TYPE];
38 
39 	/* temp storage for effective prog array used by prog_attach/detach */
40 	struct bpf_prog_array __rcu *inactive;
41 };
42 
43 void cgroup_bpf_put(struct cgroup *cgrp);
44 int cgroup_bpf_inherit(struct cgroup *cgrp);
45 
46 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
47 			enum bpf_attach_type type, u32 flags);
48 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
49 			enum bpf_attach_type type, u32 flags);
50 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
51 		       union bpf_attr __user *uattr);
52 
53 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
54 int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
55 		      enum bpf_attach_type type, u32 flags);
56 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
57 		      enum bpf_attach_type type, u32 flags);
58 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
59 		     union bpf_attr __user *uattr);
60 
61 int __cgroup_bpf_run_filter_skb(struct sock *sk,
62 				struct sk_buff *skb,
63 				enum bpf_attach_type type);
64 
65 int __cgroup_bpf_run_filter_sk(struct sock *sk,
66 			       enum bpf_attach_type type);
67 
68 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
69 				      struct sockaddr *uaddr,
70 				      enum bpf_attach_type type,
71 				      void *t_ctx);
72 
73 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
74 				     struct bpf_sock_ops_kern *sock_ops,
75 				     enum bpf_attach_type type);
76 
77 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
78 				      short access, enum bpf_attach_type type);
79 
80 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
81 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
82 ({									      \
83 	int __ret = 0;							      \
84 	if (cgroup_bpf_enabled)						      \
85 		__ret = __cgroup_bpf_run_filter_skb(sk, skb,		      \
86 						    BPF_CGROUP_INET_INGRESS); \
87 									      \
88 	__ret;								      \
89 })
90 
91 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)			       \
92 ({									       \
93 	int __ret = 0;							       \
94 	if (cgroup_bpf_enabled && sk && sk == skb->sk) {		       \
95 		typeof(sk) __sk = sk_to_full_sk(sk);			       \
96 		if (sk_fullsock(__sk))					       \
97 			__ret = __cgroup_bpf_run_filter_skb(__sk, skb,	       \
98 						      BPF_CGROUP_INET_EGRESS); \
99 	}								       \
100 	__ret;								       \
101 })
102 
103 #define BPF_CGROUP_RUN_SK_PROG(sk, type)				       \
104 ({									       \
105 	int __ret = 0;							       \
106 	if (cgroup_bpf_enabled) {					       \
107 		__ret = __cgroup_bpf_run_filter_sk(sk, type);		       \
108 	}								       \
109 	__ret;								       \
110 })
111 
112 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)				       \
113 	BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
114 
115 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)				       \
116 	BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
117 
118 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)				       \
119 	BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
120 
121 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type)				       \
122 ({									       \
123 	int __ret = 0;							       \
124 	if (cgroup_bpf_enabled)						       \
125 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
126 							  NULL);	       \
127 	__ret;								       \
128 })
129 
130 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx)		       \
131 ({									       \
132 	int __ret = 0;							       \
133 	if (cgroup_bpf_enabled)	{					       \
134 		lock_sock(sk);						       \
135 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
136 							  t_ctx);	       \
137 		release_sock(sk);					       \
138 	}								       \
139 	__ret;								       \
140 })
141 
142 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr)			       \
143 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
144 
145 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr)			       \
146 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
147 
148 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
149 					    sk->sk_prot->pre_connect)
150 
151 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr)			       \
152 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
153 
154 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr)			       \
155 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
156 
157 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr)		       \
158 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
159 
160 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr)		       \
161 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
162 
163 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx)		       \
164 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
165 
166 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx)		       \
167 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
168 
169 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)				       \
170 ({									       \
171 	int __ret = 0;							       \
172 	if (cgroup_bpf_enabled && (sock_ops)->sk) {	       \
173 		typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);	       \
174 		if (__sk && sk_fullsock(__sk))				       \
175 			__ret = __cgroup_bpf_run_filter_sock_ops(__sk,	       \
176 								 sock_ops,     \
177 							 BPF_CGROUP_SOCK_OPS); \
178 	}								       \
179 	__ret;								       \
180 })
181 
182 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access)	      \
183 ({									      \
184 	int __ret = 0;							      \
185 	if (cgroup_bpf_enabled)						      \
186 		__ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
187 							  access,	      \
188 							  BPF_CGROUP_DEVICE); \
189 									      \
190 	__ret;								      \
191 })
192 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
193 			   enum bpf_prog_type ptype, struct bpf_prog *prog);
194 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
195 			   enum bpf_prog_type ptype);
196 int cgroup_bpf_prog_query(const union bpf_attr *attr,
197 			  union bpf_attr __user *uattr);
198 #else
199 
200 struct bpf_prog;
201 struct cgroup_bpf {};
202 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
203 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
204 
205 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
206 					 enum bpf_prog_type ptype,
207 					 struct bpf_prog *prog)
208 {
209 	return -EINVAL;
210 }
211 
212 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
213 					 enum bpf_prog_type ptype)
214 {
215 	return -EINVAL;
216 }
217 
218 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
219 					union bpf_attr __user *uattr)
220 {
221 	return -EINVAL;
222 }
223 
224 #define cgroup_bpf_enabled (0)
225 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
226 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
227 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
228 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
229 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
230 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
231 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
232 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
233 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
234 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
235 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
236 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
237 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
238 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
239 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
240 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
241 
242 #endif /* CONFIG_CGROUP_BPF */
243 
244 #endif /* _BPF_CGROUP_H */
245