1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BPF_CGROUP_H 3 #define _BPF_CGROUP_H 4 5 #include <linux/jump_label.h> 6 #include <uapi/linux/bpf.h> 7 8 struct sock; 9 struct sockaddr; 10 struct cgroup; 11 struct sk_buff; 12 struct bpf_sock_ops_kern; 13 14 #ifdef CONFIG_CGROUP_BPF 15 16 extern struct static_key_false cgroup_bpf_enabled_key; 17 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) 18 19 struct bpf_prog_list { 20 struct list_head node; 21 struct bpf_prog *prog; 22 }; 23 24 struct bpf_prog_array; 25 26 struct cgroup_bpf { 27 /* array of effective progs in this cgroup */ 28 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE]; 29 30 /* attached progs to this cgroup and attach flags 31 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will 32 * have either zero or one element 33 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS 34 */ 35 struct list_head progs[MAX_BPF_ATTACH_TYPE]; 36 u32 flags[MAX_BPF_ATTACH_TYPE]; 37 38 /* temp storage for effective prog array used by prog_attach/detach */ 39 struct bpf_prog_array __rcu *inactive; 40 }; 41 42 void cgroup_bpf_put(struct cgroup *cgrp); 43 int cgroup_bpf_inherit(struct cgroup *cgrp); 44 45 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, 46 enum bpf_attach_type type, u32 flags); 47 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, 48 enum bpf_attach_type type, u32 flags); 49 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, 50 union bpf_attr __user *uattr); 51 52 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */ 53 int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, 54 enum bpf_attach_type type, u32 flags); 55 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, 56 enum bpf_attach_type type, u32 flags); 57 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, 58 union bpf_attr __user *uattr); 59 60 int __cgroup_bpf_run_filter_skb(struct sock *sk, 61 struct sk_buff *skb, 62 enum bpf_attach_type type); 63 64 int __cgroup_bpf_run_filter_sk(struct sock *sk, 65 enum bpf_attach_type type); 66 67 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, 68 struct sockaddr *uaddr, 69 enum bpf_attach_type type); 70 71 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, 72 struct bpf_sock_ops_kern *sock_ops, 73 enum bpf_attach_type type); 74 75 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, 76 short access, enum bpf_attach_type type); 77 78 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ 79 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ 80 ({ \ 81 int __ret = 0; \ 82 if (cgroup_bpf_enabled) \ 83 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ 84 BPF_CGROUP_INET_INGRESS); \ 85 \ 86 __ret; \ 87 }) 88 89 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ 90 ({ \ 91 int __ret = 0; \ 92 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \ 93 typeof(sk) __sk = sk_to_full_sk(sk); \ 94 if (sk_fullsock(__sk)) \ 95 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ 96 BPF_CGROUP_INET_EGRESS); \ 97 } \ 98 __ret; \ 99 }) 100 101 #define BPF_CGROUP_RUN_SK_PROG(sk, type) \ 102 ({ \ 103 int __ret = 0; \ 104 if (cgroup_bpf_enabled) { \ 105 __ret = __cgroup_bpf_run_filter_sk(sk, type); \ 106 } \ 107 __ret; \ 108 }) 109 110 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ 111 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE) 112 113 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ 114 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND) 115 116 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \ 117 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND) 118 119 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \ 120 ({ \ 121 int __ret = 0; \ 122 if (cgroup_bpf_enabled) \ 123 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type); \ 124 __ret; \ 125 }) 126 127 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type) \ 128 ({ \ 129 int __ret = 0; \ 130 if (cgroup_bpf_enabled) { \ 131 lock_sock(sk); \ 132 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type); \ 133 release_sock(sk); \ 134 } \ 135 __ret; \ 136 }) 137 138 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \ 139 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND) 140 141 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \ 142 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND) 143 144 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \ 145 sk->sk_prot->pre_connect) 146 147 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \ 148 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT) 149 150 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \ 151 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT) 152 153 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \ 154 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT) 155 156 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \ 157 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT) 158 159 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ 160 ({ \ 161 int __ret = 0; \ 162 if (cgroup_bpf_enabled && (sock_ops)->sk) { \ 163 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \ 164 if (__sk && sk_fullsock(__sk)) \ 165 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \ 166 sock_ops, \ 167 BPF_CGROUP_SOCK_OPS); \ 168 } \ 169 __ret; \ 170 }) 171 172 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \ 173 ({ \ 174 int __ret = 0; \ 175 if (cgroup_bpf_enabled) \ 176 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \ 177 access, \ 178 BPF_CGROUP_DEVICE); \ 179 \ 180 __ret; \ 181 }) 182 #else 183 184 struct cgroup_bpf {}; 185 static inline void cgroup_bpf_put(struct cgroup *cgrp) {} 186 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } 187 188 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) 189 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) 190 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) 191 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) 192 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; }) 193 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; }) 194 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) 195 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; }) 196 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; }) 197 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; }) 198 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; }) 199 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; }) 200 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) 201 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) 202 203 #endif /* CONFIG_CGROUP_BPF */ 204 205 #endif /* _BPF_CGROUP_H */ 206