1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BPF_CGROUP_H 3 #define _BPF_CGROUP_H 4 5 #include <linux/bpf.h> 6 #include <linux/errno.h> 7 #include <linux/jump_label.h> 8 #include <linux/percpu.h> 9 #include <linux/percpu-refcount.h> 10 #include <linux/rbtree.h> 11 #include <uapi/linux/bpf.h> 12 13 struct sock; 14 struct sockaddr; 15 struct cgroup; 16 struct sk_buff; 17 struct bpf_map; 18 struct bpf_prog; 19 struct bpf_sock_ops_kern; 20 struct bpf_cgroup_storage; 21 struct ctl_table; 22 struct ctl_table_header; 23 24 #ifdef CONFIG_CGROUP_BPF 25 26 extern struct static_key_false cgroup_bpf_enabled_key; 27 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) 28 29 DECLARE_PER_CPU(struct bpf_cgroup_storage*, 30 bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); 31 32 #define for_each_cgroup_storage_type(stype) \ 33 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) 34 35 struct bpf_cgroup_storage_map; 36 37 struct bpf_storage_buffer { 38 struct rcu_head rcu; 39 char data[]; 40 }; 41 42 struct bpf_cgroup_storage { 43 union { 44 struct bpf_storage_buffer *buf; 45 void __percpu *percpu_buf; 46 }; 47 struct bpf_cgroup_storage_map *map; 48 struct bpf_cgroup_storage_key key; 49 struct list_head list; 50 struct rb_node node; 51 struct rcu_head rcu; 52 }; 53 54 struct bpf_cgroup_link { 55 struct bpf_link link; 56 struct cgroup *cgroup; 57 enum bpf_attach_type type; 58 }; 59 60 extern const struct bpf_link_ops bpf_cgroup_link_lops; 61 62 struct bpf_prog_list { 63 struct list_head node; 64 struct bpf_prog *prog; 65 struct bpf_cgroup_link *link; 66 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 67 }; 68 69 struct bpf_prog_array; 70 71 struct cgroup_bpf { 72 /* array of effective progs in this cgroup */ 73 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE]; 74 75 /* attached progs to this cgroup and attach flags 76 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will 77 * have either zero or one element 78 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS 79 */ 80 struct list_head progs[MAX_BPF_ATTACH_TYPE]; 81 u32 flags[MAX_BPF_ATTACH_TYPE]; 82 83 /* temp storage for effective prog array used by prog_attach/detach */ 84 struct bpf_prog_array *inactive; 85 86 /* reference counter used to detach bpf programs after cgroup removal */ 87 struct percpu_ref refcnt; 88 89 /* cgroup_bpf is released using a work queue */ 90 struct work_struct release_work; 91 }; 92 93 int cgroup_bpf_inherit(struct cgroup *cgrp); 94 void cgroup_bpf_offline(struct cgroup *cgrp); 95 96 int __cgroup_bpf_attach(struct cgroup *cgrp, 97 struct bpf_prog *prog, struct bpf_prog *replace_prog, 98 struct bpf_cgroup_link *link, 99 enum bpf_attach_type type, u32 flags); 100 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, 101 struct bpf_cgroup_link *link, 102 enum bpf_attach_type type); 103 int __cgroup_bpf_replace(struct cgroup *cgrp, struct bpf_cgroup_link *link, 104 struct bpf_prog *new_prog); 105 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, 106 union bpf_attr __user *uattr); 107 108 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */ 109 int cgroup_bpf_attach(struct cgroup *cgrp, 110 struct bpf_prog *prog, struct bpf_prog *replace_prog, 111 struct bpf_cgroup_link *link, enum bpf_attach_type type, 112 u32 flags); 113 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, 114 enum bpf_attach_type type); 115 int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *old_prog, 116 struct bpf_prog *new_prog); 117 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, 118 union bpf_attr __user *uattr); 119 120 int __cgroup_bpf_run_filter_skb(struct sock *sk, 121 struct sk_buff *skb, 122 enum bpf_attach_type type); 123 124 int __cgroup_bpf_run_filter_sk(struct sock *sk, 125 enum bpf_attach_type type); 126 127 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, 128 struct sockaddr *uaddr, 129 enum bpf_attach_type type, 130 void *t_ctx); 131 132 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, 133 struct bpf_sock_ops_kern *sock_ops, 134 enum bpf_attach_type type); 135 136 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, 137 short access, enum bpf_attach_type type); 138 139 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, 140 struct ctl_table *table, int write, 141 void __user *buf, size_t *pcount, 142 loff_t *ppos, void **new_buf, 143 enum bpf_attach_type type); 144 145 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, 146 int *optname, char __user *optval, 147 int *optlen, char **kernel_optval); 148 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, 149 int optname, char __user *optval, 150 int __user *optlen, int max_optlen, 151 int retval); 152 153 static inline enum bpf_cgroup_storage_type cgroup_storage_type( 154 struct bpf_map *map) 155 { 156 if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 157 return BPF_CGROUP_STORAGE_PERCPU; 158 159 return BPF_CGROUP_STORAGE_SHARED; 160 } 161 162 static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage 163 *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) 164 { 165 enum bpf_cgroup_storage_type stype; 166 167 for_each_cgroup_storage_type(stype) 168 this_cpu_write(bpf_cgroup_storage[stype], storage[stype]); 169 } 170 171 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, 172 enum bpf_cgroup_storage_type stype); 173 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); 174 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, 175 struct cgroup *cgroup, 176 enum bpf_attach_type type); 177 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); 178 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map); 179 void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map); 180 181 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); 182 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, 183 void *value, u64 flags); 184 185 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ 186 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ 187 ({ \ 188 int __ret = 0; \ 189 if (cgroup_bpf_enabled) \ 190 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ 191 BPF_CGROUP_INET_INGRESS); \ 192 \ 193 __ret; \ 194 }) 195 196 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ 197 ({ \ 198 int __ret = 0; \ 199 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \ 200 typeof(sk) __sk = sk_to_full_sk(sk); \ 201 if (sk_fullsock(__sk)) \ 202 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ 203 BPF_CGROUP_INET_EGRESS); \ 204 } \ 205 __ret; \ 206 }) 207 208 #define BPF_CGROUP_RUN_SK_PROG(sk, type) \ 209 ({ \ 210 int __ret = 0; \ 211 if (cgroup_bpf_enabled) { \ 212 __ret = __cgroup_bpf_run_filter_sk(sk, type); \ 213 } \ 214 __ret; \ 215 }) 216 217 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ 218 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE) 219 220 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ 221 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND) 222 223 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \ 224 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND) 225 226 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \ 227 ({ \ 228 int __ret = 0; \ 229 if (cgroup_bpf_enabled) \ 230 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ 231 NULL); \ 232 __ret; \ 233 }) 234 235 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \ 236 ({ \ 237 int __ret = 0; \ 238 if (cgroup_bpf_enabled) { \ 239 lock_sock(sk); \ 240 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ 241 t_ctx); \ 242 release_sock(sk); \ 243 } \ 244 __ret; \ 245 }) 246 247 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \ 248 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND) 249 250 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \ 251 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND) 252 253 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \ 254 sk->sk_prot->pre_connect) 255 256 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \ 257 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT) 258 259 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \ 260 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT) 261 262 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \ 263 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL) 264 265 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \ 266 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL) 267 268 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \ 269 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx) 270 271 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \ 272 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx) 273 274 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \ 275 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL) 276 277 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \ 278 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL) 279 280 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ 281 ({ \ 282 int __ret = 0; \ 283 if (cgroup_bpf_enabled && (sock_ops)->sk) { \ 284 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \ 285 if (__sk && sk_fullsock(__sk)) \ 286 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \ 287 sock_ops, \ 288 BPF_CGROUP_SOCK_OPS); \ 289 } \ 290 __ret; \ 291 }) 292 293 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \ 294 ({ \ 295 int __ret = 0; \ 296 if (cgroup_bpf_enabled) \ 297 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \ 298 access, \ 299 BPF_CGROUP_DEVICE); \ 300 \ 301 __ret; \ 302 }) 303 304 305 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \ 306 ({ \ 307 int __ret = 0; \ 308 if (cgroup_bpf_enabled) \ 309 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \ 310 buf, count, pos, nbuf, \ 311 BPF_CGROUP_SYSCTL); \ 312 __ret; \ 313 }) 314 315 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ 316 kernel_optval) \ 317 ({ \ 318 int __ret = 0; \ 319 if (cgroup_bpf_enabled) \ 320 __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \ 321 optname, optval, \ 322 optlen, \ 323 kernel_optval); \ 324 __ret; \ 325 }) 326 327 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \ 328 ({ \ 329 int __ret = 0; \ 330 if (cgroup_bpf_enabled) \ 331 get_user(__ret, optlen); \ 332 __ret; \ 333 }) 334 335 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \ 336 max_optlen, retval) \ 337 ({ \ 338 int __ret = retval; \ 339 if (cgroup_bpf_enabled) \ 340 __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \ 341 optname, optval, \ 342 optlen, max_optlen, \ 343 retval); \ 344 __ret; \ 345 }) 346 347 int cgroup_bpf_prog_attach(const union bpf_attr *attr, 348 enum bpf_prog_type ptype, struct bpf_prog *prog); 349 int cgroup_bpf_prog_detach(const union bpf_attr *attr, 350 enum bpf_prog_type ptype); 351 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); 352 int cgroup_bpf_prog_query(const union bpf_attr *attr, 353 union bpf_attr __user *uattr); 354 #else 355 356 struct bpf_prog; 357 struct bpf_link; 358 struct cgroup_bpf {}; 359 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } 360 static inline void cgroup_bpf_offline(struct cgroup *cgrp) {} 361 362 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr, 363 enum bpf_prog_type ptype, 364 struct bpf_prog *prog) 365 { 366 return -EINVAL; 367 } 368 369 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr, 370 enum bpf_prog_type ptype) 371 { 372 return -EINVAL; 373 } 374 375 static inline int cgroup_bpf_link_attach(const union bpf_attr *attr, 376 struct bpf_prog *prog) 377 { 378 return -EINVAL; 379 } 380 381 static inline int cgroup_bpf_replace(struct bpf_link *link, 382 struct bpf_prog *old_prog, 383 struct bpf_prog *new_prog) 384 { 385 return -EINVAL; 386 } 387 388 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, 389 union bpf_attr __user *uattr) 390 { 391 return -EINVAL; 392 } 393 394 static inline void bpf_cgroup_storage_set( 395 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {} 396 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, 397 struct bpf_map *map) { return 0; } 398 static inline void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, 399 struct bpf_map *map) {} 400 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( 401 struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; } 402 static inline void bpf_cgroup_storage_free( 403 struct bpf_cgroup_storage *storage) {} 404 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, 405 void *value) { 406 return 0; 407 } 408 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, 409 void *key, void *value, u64 flags) { 410 return 0; 411 } 412 413 #define cgroup_bpf_enabled (0) 414 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) 415 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) 416 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) 417 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) 418 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; }) 419 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; }) 420 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) 421 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; }) 422 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; }) 423 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; }) 424 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; }) 425 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; }) 426 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) 427 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) 428 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; }) 429 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; }) 430 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) 431 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) 432 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; }) 433 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) 434 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ 435 optlen, max_optlen, retval) ({ retval; }) 436 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ 437 kernel_optval) ({ 0; }) 438 439 #define for_each_cgroup_storage_type(stype) for (; false; ) 440 441 #endif /* CONFIG_CGROUP_BPF */ 442 443 #endif /* _BPF_CGROUP_H */ 444