xref: /linux-6.15/include/linux/bpf-cgroup.h (revision 2bc7ea71)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4 
5 #include <linux/bpf.h>
6 #include <linux/bpf-cgroup-defs.h>
7 #include <linux/errno.h>
8 #include <linux/jump_label.h>
9 #include <linux/percpu.h>
10 #include <linux/rbtree.h>
11 #include <net/sock.h>
12 #include <uapi/linux/bpf.h>
13 
14 struct sock;
15 struct sockaddr;
16 struct cgroup;
17 struct sk_buff;
18 struct bpf_map;
19 struct bpf_prog;
20 struct bpf_sock_ops_kern;
21 struct bpf_cgroup_storage;
22 struct ctl_table;
23 struct ctl_table_header;
24 struct task_struct;
25 
26 #ifdef CONFIG_CGROUP_BPF
27 
28 #define CGROUP_ATYPE(type) \
29 	case BPF_##type: return type
30 
31 static inline enum cgroup_bpf_attach_type
32 to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
33 {
34 	switch (attach_type) {
35 	CGROUP_ATYPE(CGROUP_INET_INGRESS);
36 	CGROUP_ATYPE(CGROUP_INET_EGRESS);
37 	CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
38 	CGROUP_ATYPE(CGROUP_SOCK_OPS);
39 	CGROUP_ATYPE(CGROUP_DEVICE);
40 	CGROUP_ATYPE(CGROUP_INET4_BIND);
41 	CGROUP_ATYPE(CGROUP_INET6_BIND);
42 	CGROUP_ATYPE(CGROUP_INET4_CONNECT);
43 	CGROUP_ATYPE(CGROUP_INET6_CONNECT);
44 	CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
45 	CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
46 	CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
47 	CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
48 	CGROUP_ATYPE(CGROUP_SYSCTL);
49 	CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
50 	CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
51 	CGROUP_ATYPE(CGROUP_GETSOCKOPT);
52 	CGROUP_ATYPE(CGROUP_SETSOCKOPT);
53 	CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
54 	CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
55 	CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
56 	CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
57 	CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
58 	default:
59 		return CGROUP_BPF_ATTACH_TYPE_INVALID;
60 	}
61 }
62 
63 #undef CGROUP_ATYPE
64 
65 extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
66 #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
67 
68 #define for_each_cgroup_storage_type(stype) \
69 	for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
70 
71 struct bpf_cgroup_storage_map;
72 
73 struct bpf_storage_buffer {
74 	struct rcu_head rcu;
75 	char data[];
76 };
77 
78 struct bpf_cgroup_storage {
79 	union {
80 		struct bpf_storage_buffer *buf;
81 		void __percpu *percpu_buf;
82 	};
83 	struct bpf_cgroup_storage_map *map;
84 	struct bpf_cgroup_storage_key key;
85 	struct list_head list_map;
86 	struct list_head list_cg;
87 	struct rb_node node;
88 	struct rcu_head rcu;
89 };
90 
91 struct bpf_cgroup_link {
92 	struct bpf_link link;
93 	struct cgroup *cgroup;
94 	enum bpf_attach_type type;
95 };
96 
97 struct bpf_prog_list {
98 	struct list_head node;
99 	struct bpf_prog *prog;
100 	struct bpf_cgroup_link *link;
101 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
102 };
103 
104 int cgroup_bpf_inherit(struct cgroup *cgrp);
105 void cgroup_bpf_offline(struct cgroup *cgrp);
106 
107 int __cgroup_bpf_run_filter_skb(struct sock *sk,
108 				struct sk_buff *skb,
109 				enum cgroup_bpf_attach_type atype);
110 
111 int __cgroup_bpf_run_filter_sk(struct sock *sk,
112 			       enum cgroup_bpf_attach_type atype);
113 
114 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
115 				      struct sockaddr *uaddr,
116 				      enum cgroup_bpf_attach_type atype,
117 				      void *t_ctx,
118 				      u32 *flags);
119 
120 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
121 				     struct bpf_sock_ops_kern *sock_ops,
122 				     enum cgroup_bpf_attach_type atype);
123 
124 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
125 				      short access, enum cgroup_bpf_attach_type atype);
126 
127 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
128 				   struct ctl_table *table, int write,
129 				   char **buf, size_t *pcount, loff_t *ppos,
130 				   enum cgroup_bpf_attach_type atype);
131 
132 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
133 				       int *optname, char __user *optval,
134 				       int *optlen, char **kernel_optval);
135 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
136 				       int optname, char __user *optval,
137 				       int __user *optlen, int max_optlen,
138 				       int retval);
139 
140 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
141 					    int optname, void *optval,
142 					    int *optlen, int retval);
143 
144 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
145 	struct bpf_map *map)
146 {
147 	if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
148 		return BPF_CGROUP_STORAGE_PERCPU;
149 
150 	return BPF_CGROUP_STORAGE_SHARED;
151 }
152 
153 struct bpf_cgroup_storage *
154 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
155 		      void *key, bool locked);
156 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
157 					enum bpf_cgroup_storage_type stype);
158 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
159 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
160 			     struct cgroup *cgroup,
161 			     enum bpf_attach_type type);
162 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
163 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
164 
165 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
166 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
167 				     void *value, u64 flags);
168 
169 /* Opportunistic check to see whether we have any BPF program attached*/
170 static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
171 					   enum cgroup_bpf_attach_type type)
172 {
173 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
174 	struct bpf_prog_array *array;
175 
176 	array = rcu_access_pointer(cgrp->bpf.effective[type]);
177 	return array != &bpf_empty_prog_array.hdr;
178 }
179 
180 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
181 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
182 ({									      \
183 	int __ret = 0;							      \
184 	if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) &&			      \
185 	    cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS))		      \
186 		__ret = __cgroup_bpf_run_filter_skb(sk, skb,		      \
187 						    CGROUP_INET_INGRESS); \
188 									      \
189 	__ret;								      \
190 })
191 
192 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)			       \
193 ({									       \
194 	int __ret = 0;							       \
195 	if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
196 		typeof(sk) __sk = sk_to_full_sk(sk);			       \
197 		if (sk_fullsock(__sk) &&				       \
198 		    cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS))	       \
199 			__ret = __cgroup_bpf_run_filter_skb(__sk, skb,	       \
200 						      CGROUP_INET_EGRESS); \
201 	}								       \
202 	__ret;								       \
203 })
204 
205 #define BPF_CGROUP_RUN_SK_PROG(sk, atype)				       \
206 ({									       \
207 	int __ret = 0;							       \
208 	if (cgroup_bpf_enabled(atype)) {					       \
209 		__ret = __cgroup_bpf_run_filter_sk(sk, atype);		       \
210 	}								       \
211 	__ret;								       \
212 })
213 
214 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)				       \
215 	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
216 
217 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk)			       \
218 	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
219 
220 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)				       \
221 	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
222 
223 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)				       \
224 	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
225 
226 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype)				       \
227 ({									       \
228 	int __ret = 0;							       \
229 	if (cgroup_bpf_enabled(atype))					       \
230 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype,     \
231 							  NULL, NULL);	       \
232 	__ret;								       \
233 })
234 
235 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx)		       \
236 ({									       \
237 	int __ret = 0;							       \
238 	if (cgroup_bpf_enabled(atype))	{				       \
239 		lock_sock(sk);						       \
240 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype,     \
241 							  t_ctx, NULL);	       \
242 		release_sock(sk);					       \
243 	}								       \
244 	__ret;								       \
245 })
246 
247 /* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
248  * via upper bits of return code. The only flag that is supported
249  * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
250  * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
251  */
252 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags)	       \
253 ({									       \
254 	u32 __flags = 0;						       \
255 	int __ret = 0;							       \
256 	if (cgroup_bpf_enabled(atype))	{				       \
257 		lock_sock(sk);						       \
258 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype,     \
259 							  NULL, &__flags);     \
260 		release_sock(sk);					       \
261 		if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE)	       \
262 			*bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE;	       \
263 	}								       \
264 	__ret;								       \
265 })
266 
267 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk)				       \
268 	((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) ||		       \
269 	  cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) &&		       \
270 	 (sk)->sk_prot->pre_connect)
271 
272 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr)			       \
273 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
274 
275 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr)			       \
276 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
277 
278 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr)		       \
279 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
280 
281 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr)		       \
282 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
283 
284 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx)		       \
285 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
286 
287 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx)		       \
288 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
289 
290 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr)			\
291 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
292 
293 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr)			\
294 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
295 
296 /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
297  * fullsock and its parent fullsock cannot be traced by
298  * sk_to_full_sk().
299  *
300  * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
301  * Its listener-sk is not attached to the rsk_listener.
302  * In this case, the caller holds the listener-sk (unlocked),
303  * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
304  * the listener-sk such that the cgroup-bpf-progs of the
305  * listener-sk will be run.
306  *
307  * Regardless of syncookie mode or not,
308  * calling bpf_setsockopt on listener-sk will not make sense anyway,
309  * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
310  */
311 #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk)			\
312 ({									\
313 	int __ret = 0;							\
314 	if (cgroup_bpf_enabled(CGROUP_SOCK_OPS))			\
315 		__ret = __cgroup_bpf_run_filter_sock_ops(sk,		\
316 							 sock_ops,	\
317 							 CGROUP_SOCK_OPS); \
318 	__ret;								\
319 })
320 
321 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)				       \
322 ({									       \
323 	int __ret = 0;							       \
324 	if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) {       \
325 		typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);	       \
326 		if (__sk && sk_fullsock(__sk))				       \
327 			__ret = __cgroup_bpf_run_filter_sock_ops(__sk,	       \
328 								 sock_ops,     \
329 							 CGROUP_SOCK_OPS); \
330 	}								       \
331 	__ret;								       \
332 })
333 
334 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access)	      \
335 ({									      \
336 	int __ret = 0;							      \
337 	if (cgroup_bpf_enabled(CGROUP_DEVICE))			      \
338 		__ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
339 							  access,	      \
340 							  CGROUP_DEVICE); \
341 									      \
342 	__ret;								      \
343 })
344 
345 
346 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos)  \
347 ({									       \
348 	int __ret = 0;							       \
349 	if (cgroup_bpf_enabled(CGROUP_SYSCTL))			       \
350 		__ret = __cgroup_bpf_run_filter_sysctl(head, table, write,     \
351 						       buf, count, pos,        \
352 						       CGROUP_SYSCTL);     \
353 	__ret;								       \
354 })
355 
356 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen,   \
357 				       kernel_optval)			       \
358 ({									       \
359 	int __ret = 0;							       \
360 	if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) &&			       \
361 	    cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT))		       \
362 		__ret = __cgroup_bpf_run_filter_setsockopt(sock, level,	       \
363 							   optname, optval,    \
364 							   optlen,	       \
365 							   kernel_optval);     \
366 	__ret;								       \
367 })
368 
369 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen)			       \
370 ({									       \
371 	int __ret = 0;							       \
372 	if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT))			       \
373 		get_user(__ret, optlen);				       \
374 	__ret;								       \
375 })
376 
377 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen,   \
378 				       max_optlen, retval)		       \
379 ({									       \
380 	int __ret = retval;						       \
381 	if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) &&			       \
382 	    cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT))		       \
383 		if (!(sock)->sk_prot->bpf_bypass_getsockopt ||		       \
384 		    !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
385 					tcp_bpf_bypass_getsockopt,	       \
386 					level, optname))		       \
387 			__ret = __cgroup_bpf_run_filter_getsockopt(	       \
388 				sock, level, optname, optval, optlen,	       \
389 				max_optlen, retval);			       \
390 	__ret;								       \
391 })
392 
393 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval,      \
394 					    optlen, retval)		       \
395 ({									       \
396 	int __ret = retval;						       \
397 	if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT))			       \
398 		__ret = __cgroup_bpf_run_filter_getsockopt_kern(	       \
399 			sock, level, optname, optval, optlen, retval);	       \
400 	__ret;								       \
401 })
402 
403 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
404 			   enum bpf_prog_type ptype, struct bpf_prog *prog);
405 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
406 			   enum bpf_prog_type ptype);
407 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
408 int cgroup_bpf_prog_query(const union bpf_attr *attr,
409 			  union bpf_attr __user *uattr);
410 #else
411 
412 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
413 static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
414 
415 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
416 					 enum bpf_prog_type ptype,
417 					 struct bpf_prog *prog)
418 {
419 	return -EINVAL;
420 }
421 
422 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
423 					 enum bpf_prog_type ptype)
424 {
425 	return -EINVAL;
426 }
427 
428 static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
429 					 struct bpf_prog *prog)
430 {
431 	return -EINVAL;
432 }
433 
434 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
435 					union bpf_attr __user *uattr)
436 {
437 	return -EINVAL;
438 }
439 
440 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
441 					    struct bpf_map *map) { return 0; }
442 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
443 	struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
444 static inline void bpf_cgroup_storage_free(
445 	struct bpf_cgroup_storage *storage) {}
446 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
447 						 void *value) {
448 	return 0;
449 }
450 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
451 					void *key, void *value, u64 flags) {
452 	return 0;
453 }
454 
455 #define cgroup_bpf_enabled(atype) (0)
456 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
457 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; })
458 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
459 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
460 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
461 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
462 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
463 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
464 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
465 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
466 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
467 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
468 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
469 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
470 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
471 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
472 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
473 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
474 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
475 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
476 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
477 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
478 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
479 				       optlen, max_optlen, retval) ({ retval; })
480 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
481 					    optlen, retval) ({ retval; })
482 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
483 				       kernel_optval) ({ 0; })
484 
485 #define for_each_cgroup_storage_type(stype) for (; false; )
486 
487 #endif /* CONFIG_CGROUP_BPF */
488 
489 #endif /* _BPF_CGROUP_H */
490