1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4 #ifndef _LINUX_SKMSG_H 5 #define _LINUX_SKMSG_H 6 7 #include <linux/bpf.h> 8 #include <linux/filter.h> 9 #include <linux/scatterlist.h> 10 #include <linux/skbuff.h> 11 12 #include <net/sock.h> 13 #include <net/tcp.h> 14 #include <net/strparser.h> 15 16 #define MAX_MSG_FRAGS MAX_SKB_FRAGS 17 18 enum __sk_action { 19 __SK_DROP = 0, 20 __SK_PASS, 21 __SK_REDIRECT, 22 __SK_NONE, 23 }; 24 25 struct sk_msg_sg { 26 u32 start; 27 u32 curr; 28 u32 end; 29 u32 size; 30 u32 copybreak; 31 unsigned long copy; 32 /* The extra element is used for chaining the front and sections when 33 * the list becomes partitioned (e.g. end < start). The crypto APIs 34 * require the chaining. 35 */ 36 struct scatterlist data[MAX_MSG_FRAGS + 1]; 37 }; 38 static_assert(BITS_PER_LONG >= MAX_MSG_FRAGS); 39 40 /* UAPI in filter.c depends on struct sk_msg_sg being first element. */ 41 struct sk_msg { 42 struct sk_msg_sg sg; 43 void *data; 44 void *data_end; 45 u32 apply_bytes; 46 u32 cork_bytes; 47 u32 flags; 48 struct sk_buff *skb; 49 struct sock *sk_redir; 50 struct sock *sk; 51 struct list_head list; 52 }; 53 54 struct sk_psock_progs { 55 struct bpf_prog *msg_parser; 56 struct bpf_prog *skb_parser; 57 struct bpf_prog *skb_verdict; 58 }; 59 60 enum sk_psock_state_bits { 61 SK_PSOCK_TX_ENABLED, 62 }; 63 64 struct sk_psock_link { 65 struct list_head list; 66 struct bpf_map *map; 67 void *link_raw; 68 }; 69 70 struct sk_psock_parser { 71 struct strparser strp; 72 bool enabled; 73 void (*saved_data_ready)(struct sock *sk); 74 }; 75 76 struct sk_psock_work_state { 77 struct sk_buff *skb; 78 u32 len; 79 u32 off; 80 }; 81 82 struct sk_psock { 83 struct sock *sk; 84 struct sock *sk_redir; 85 u32 apply_bytes; 86 u32 cork_bytes; 87 u32 eval; 88 struct sk_msg *cork; 89 struct sk_psock_progs progs; 90 struct sk_psock_parser parser; 91 struct sk_buff_head ingress_skb; 92 struct list_head ingress_msg; 93 unsigned long state; 94 struct list_head link; 95 spinlock_t link_lock; 96 refcount_t refcnt; 97 void (*saved_unhash)(struct sock *sk); 98 void (*saved_close)(struct sock *sk, long timeout); 99 void (*saved_write_space)(struct sock *sk); 100 struct proto *sk_proto; 101 struct sk_psock_work_state work_state; 102 struct work_struct work; 103 union { 104 struct rcu_head rcu; 105 struct work_struct gc; 106 }; 107 }; 108 109 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, 110 int elem_first_coalesce); 111 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src, 112 u32 off, u32 len); 113 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len); 114 int sk_msg_free(struct sock *sk, struct sk_msg *msg); 115 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg); 116 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes); 117 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, 118 u32 bytes); 119 120 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes); 121 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes); 122 123 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, 124 struct sk_msg *msg, u32 bytes); 125 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, 126 struct sk_msg *msg, u32 bytes); 127 128 static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes) 129 { 130 WARN_ON(i == msg->sg.end && bytes); 131 } 132 133 static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes) 134 { 135 if (psock->apply_bytes) { 136 if (psock->apply_bytes < bytes) 137 psock->apply_bytes = 0; 138 else 139 psock->apply_bytes -= bytes; 140 } 141 } 142 143 static inline u32 sk_msg_iter_dist(u32 start, u32 end) 144 { 145 return end >= start ? end - start : end + (MAX_MSG_FRAGS - start); 146 } 147 148 #define sk_msg_iter_var_prev(var) \ 149 do { \ 150 if (var == 0) \ 151 var = MAX_MSG_FRAGS - 1; \ 152 else \ 153 var--; \ 154 } while (0) 155 156 #define sk_msg_iter_var_next(var) \ 157 do { \ 158 var++; \ 159 if (var == MAX_MSG_FRAGS) \ 160 var = 0; \ 161 } while (0) 162 163 #define sk_msg_iter_prev(msg, which) \ 164 sk_msg_iter_var_prev(msg->sg.which) 165 166 #define sk_msg_iter_next(msg, which) \ 167 sk_msg_iter_var_next(msg->sg.which) 168 169 static inline void sk_msg_clear_meta(struct sk_msg *msg) 170 { 171 memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy)); 172 } 173 174 static inline void sk_msg_init(struct sk_msg *msg) 175 { 176 BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != MAX_MSG_FRAGS); 177 memset(msg, 0, sizeof(*msg)); 178 sg_init_marker(msg->sg.data, MAX_MSG_FRAGS); 179 } 180 181 static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, 182 int which, u32 size) 183 { 184 dst->sg.data[which] = src->sg.data[which]; 185 dst->sg.data[which].length = size; 186 dst->sg.size += size; 187 src->sg.data[which].length -= size; 188 src->sg.data[which].offset += size; 189 } 190 191 static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src) 192 { 193 memcpy(dst, src, sizeof(*src)); 194 sk_msg_init(src); 195 } 196 197 static inline bool sk_msg_full(const struct sk_msg *msg) 198 { 199 return (msg->sg.end == msg->sg.start) && msg->sg.size; 200 } 201 202 static inline u32 sk_msg_elem_used(const struct sk_msg *msg) 203 { 204 if (sk_msg_full(msg)) 205 return MAX_MSG_FRAGS; 206 207 return sk_msg_iter_dist(msg->sg.start, msg->sg.end); 208 } 209 210 static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which) 211 { 212 return &msg->sg.data[which]; 213 } 214 215 static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which) 216 { 217 return msg->sg.data[which]; 218 } 219 220 static inline struct page *sk_msg_page(struct sk_msg *msg, int which) 221 { 222 return sg_page(sk_msg_elem(msg, which)); 223 } 224 225 static inline bool sk_msg_to_ingress(const struct sk_msg *msg) 226 { 227 return msg->flags & BPF_F_INGRESS; 228 } 229 230 static inline void sk_msg_compute_data_pointers(struct sk_msg *msg) 231 { 232 struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start); 233 234 if (test_bit(msg->sg.start, &msg->sg.copy)) { 235 msg->data = NULL; 236 msg->data_end = NULL; 237 } else { 238 msg->data = sg_virt(sge); 239 msg->data_end = msg->data + sge->length; 240 } 241 } 242 243 static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page, 244 u32 len, u32 offset) 245 { 246 struct scatterlist *sge; 247 248 get_page(page); 249 sge = sk_msg_elem(msg, msg->sg.end); 250 sg_set_page(sge, page, len, offset); 251 sg_unmark_end(sge); 252 253 __set_bit(msg->sg.end, &msg->sg.copy); 254 msg->sg.size += len; 255 sk_msg_iter_next(msg, end); 256 } 257 258 static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state) 259 { 260 do { 261 if (copy_state) 262 __set_bit(i, &msg->sg.copy); 263 else 264 __clear_bit(i, &msg->sg.copy); 265 sk_msg_iter_var_next(i); 266 if (i == msg->sg.end) 267 break; 268 } while (1); 269 } 270 271 static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start) 272 { 273 sk_msg_sg_copy(msg, start, true); 274 } 275 276 static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start) 277 { 278 sk_msg_sg_copy(msg, start, false); 279 } 280 281 static inline struct sk_psock *sk_psock(const struct sock *sk) 282 { 283 return rcu_dereference_sk_user_data(sk); 284 } 285 286 static inline void sk_psock_queue_msg(struct sk_psock *psock, 287 struct sk_msg *msg) 288 { 289 list_add_tail(&msg->list, &psock->ingress_msg); 290 } 291 292 static inline bool sk_psock_queue_empty(const struct sk_psock *psock) 293 { 294 return psock ? list_empty(&psock->ingress_msg) : true; 295 } 296 297 static inline void sk_psock_report_error(struct sk_psock *psock, int err) 298 { 299 struct sock *sk = psock->sk; 300 301 sk->sk_err = err; 302 sk->sk_error_report(sk); 303 } 304 305 struct sk_psock *sk_psock_init(struct sock *sk, int node); 306 307 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock); 308 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock); 309 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock); 310 311 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, 312 struct sk_msg *msg); 313 314 static inline struct sk_psock_link *sk_psock_init_link(void) 315 { 316 return kzalloc(sizeof(struct sk_psock_link), 317 GFP_ATOMIC | __GFP_NOWARN); 318 } 319 320 static inline void sk_psock_free_link(struct sk_psock_link *link) 321 { 322 kfree(link); 323 } 324 325 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock); 326 #if defined(CONFIG_BPF_STREAM_PARSER) 327 void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link); 328 #else 329 static inline void sk_psock_unlink(struct sock *sk, 330 struct sk_psock_link *link) 331 { 332 } 333 #endif 334 335 void __sk_psock_purge_ingress_msg(struct sk_psock *psock); 336 337 static inline void sk_psock_cork_free(struct sk_psock *psock) 338 { 339 if (psock->cork) { 340 sk_msg_free(psock->sk, psock->cork); 341 kfree(psock->cork); 342 psock->cork = NULL; 343 } 344 } 345 346 static inline void sk_psock_update_proto(struct sock *sk, 347 struct sk_psock *psock, 348 struct proto *ops) 349 { 350 psock->saved_unhash = sk->sk_prot->unhash; 351 psock->saved_close = sk->sk_prot->close; 352 psock->saved_write_space = sk->sk_write_space; 353 354 psock->sk_proto = sk->sk_prot; 355 sk->sk_prot = ops; 356 } 357 358 static inline void sk_psock_restore_proto(struct sock *sk, 359 struct sk_psock *psock) 360 { 361 sk->sk_write_space = psock->saved_write_space; 362 363 if (psock->sk_proto) { 364 struct inet_connection_sock *icsk = inet_csk(sk); 365 bool has_ulp = !!icsk->icsk_ulp_data; 366 367 if (has_ulp) 368 tcp_update_ulp(sk, psock->sk_proto); 369 else 370 sk->sk_prot = psock->sk_proto; 371 psock->sk_proto = NULL; 372 } 373 } 374 375 static inline void sk_psock_set_state(struct sk_psock *psock, 376 enum sk_psock_state_bits bit) 377 { 378 set_bit(bit, &psock->state); 379 } 380 381 static inline void sk_psock_clear_state(struct sk_psock *psock, 382 enum sk_psock_state_bits bit) 383 { 384 clear_bit(bit, &psock->state); 385 } 386 387 static inline bool sk_psock_test_state(const struct sk_psock *psock, 388 enum sk_psock_state_bits bit) 389 { 390 return test_bit(bit, &psock->state); 391 } 392 393 static inline struct sk_psock *sk_psock_get_checked(struct sock *sk) 394 { 395 struct sk_psock *psock; 396 397 rcu_read_lock(); 398 psock = sk_psock(sk); 399 if (psock) { 400 if (sk->sk_prot->recvmsg != tcp_bpf_recvmsg) { 401 psock = ERR_PTR(-EBUSY); 402 goto out; 403 } 404 405 if (!refcount_inc_not_zero(&psock->refcnt)) 406 psock = ERR_PTR(-EBUSY); 407 } 408 out: 409 rcu_read_unlock(); 410 return psock; 411 } 412 413 static inline struct sk_psock *sk_psock_get(struct sock *sk) 414 { 415 struct sk_psock *psock; 416 417 rcu_read_lock(); 418 psock = sk_psock(sk); 419 if (psock && !refcount_inc_not_zero(&psock->refcnt)) 420 psock = NULL; 421 rcu_read_unlock(); 422 return psock; 423 } 424 425 void sk_psock_stop(struct sock *sk, struct sk_psock *psock); 426 void sk_psock_destroy(struct rcu_head *rcu); 427 void sk_psock_drop(struct sock *sk, struct sk_psock *psock); 428 429 static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) 430 { 431 if (refcount_dec_and_test(&psock->refcnt)) 432 sk_psock_drop(sk, psock); 433 } 434 435 static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock) 436 { 437 if (psock->parser.enabled) 438 psock->parser.saved_data_ready(sk); 439 else 440 sk->sk_data_ready(sk); 441 } 442 443 static inline void psock_set_prog(struct bpf_prog **pprog, 444 struct bpf_prog *prog) 445 { 446 prog = xchg(pprog, prog); 447 if (prog) 448 bpf_prog_put(prog); 449 } 450 451 static inline void psock_progs_drop(struct sk_psock_progs *progs) 452 { 453 psock_set_prog(&progs->msg_parser, NULL); 454 psock_set_prog(&progs->skb_parser, NULL); 455 psock_set_prog(&progs->skb_verdict, NULL); 456 } 457 458 #endif /* _LINUX_SKMSG_H */ 459