1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4 #ifndef _LINUX_SKMSG_H 5 #define _LINUX_SKMSG_H 6 7 #include <linux/bpf.h> 8 #include <linux/filter.h> 9 #include <linux/scatterlist.h> 10 #include <linux/skbuff.h> 11 12 #include <net/sock.h> 13 #include <net/tcp.h> 14 #include <net/strparser.h> 15 16 #define MAX_MSG_FRAGS MAX_SKB_FRAGS 17 #define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1) 18 19 enum __sk_action { 20 __SK_DROP = 0, 21 __SK_PASS, 22 __SK_REDIRECT, 23 __SK_NONE, 24 }; 25 26 struct sk_msg_sg { 27 u32 start; 28 u32 curr; 29 u32 end; 30 u32 size; 31 u32 copybreak; 32 DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2); 33 /* The extra two elements: 34 * 1) used for chaining the front and sections when the list becomes 35 * partitioned (e.g. end < start). The crypto APIs require the 36 * chaining; 37 * 2) to chain tailer SG entries after the message. 38 */ 39 struct scatterlist data[MAX_MSG_FRAGS + 2]; 40 }; 41 42 /* UAPI in filter.c depends on struct sk_msg_sg being first element. */ 43 struct sk_msg { 44 struct sk_msg_sg sg; 45 void *data; 46 void *data_end; 47 u32 apply_bytes; 48 u32 cork_bytes; 49 u32 flags; 50 struct sk_buff *skb; 51 struct sock *sk_redir; 52 struct sock *sk; 53 struct list_head list; 54 }; 55 56 struct sk_psock_progs { 57 struct bpf_prog *msg_parser; 58 struct bpf_prog *stream_parser; 59 struct bpf_prog *stream_verdict; 60 struct bpf_prog *skb_verdict; 61 }; 62 63 enum sk_psock_state_bits { 64 SK_PSOCK_TX_ENABLED, 65 }; 66 67 struct sk_psock_link { 68 struct list_head list; 69 struct bpf_map *map; 70 void *link_raw; 71 }; 72 73 struct sk_psock_work_state { 74 struct sk_buff *skb; 75 u32 len; 76 u32 off; 77 }; 78 79 struct sk_psock { 80 struct sock *sk; 81 struct sock *sk_redir; 82 u32 apply_bytes; 83 u32 cork_bytes; 84 u32 eval; 85 struct sk_msg *cork; 86 struct sk_psock_progs progs; 87 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) 88 struct strparser strp; 89 #endif 90 struct sk_buff_head ingress_skb; 91 struct list_head ingress_msg; 92 spinlock_t ingress_lock; 93 unsigned long state; 94 struct list_head link; 95 spinlock_t link_lock; 96 refcount_t refcnt; 97 void (*saved_unhash)(struct sock *sk); 98 void (*saved_destroy)(struct sock *sk); 99 void (*saved_close)(struct sock *sk, long timeout); 100 void (*saved_write_space)(struct sock *sk); 101 void (*saved_data_ready)(struct sock *sk); 102 int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock, 103 bool restore); 104 struct proto *sk_proto; 105 struct mutex work_mutex; 106 struct sk_psock_work_state work_state; 107 struct work_struct work; 108 struct rcu_work rwork; 109 }; 110 111 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, 112 int elem_first_coalesce); 113 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src, 114 u32 off, u32 len); 115 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len); 116 int sk_msg_free(struct sock *sk, struct sk_msg *msg); 117 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg); 118 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes); 119 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, 120 u32 bytes); 121 122 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes); 123 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes); 124 125 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, 126 struct sk_msg *msg, u32 bytes); 127 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, 128 struct sk_msg *msg, u32 bytes); 129 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, 130 int len, int flags); 131 bool sk_msg_is_readable(struct sock *sk); 132 133 static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes) 134 { 135 WARN_ON(i == msg->sg.end && bytes); 136 } 137 138 static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes) 139 { 140 if (psock->apply_bytes) { 141 if (psock->apply_bytes < bytes) 142 psock->apply_bytes = 0; 143 else 144 psock->apply_bytes -= bytes; 145 } 146 } 147 148 static inline u32 sk_msg_iter_dist(u32 start, u32 end) 149 { 150 return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start); 151 } 152 153 #define sk_msg_iter_var_prev(var) \ 154 do { \ 155 if (var == 0) \ 156 var = NR_MSG_FRAG_IDS - 1; \ 157 else \ 158 var--; \ 159 } while (0) 160 161 #define sk_msg_iter_var_next(var) \ 162 do { \ 163 var++; \ 164 if (var == NR_MSG_FRAG_IDS) \ 165 var = 0; \ 166 } while (0) 167 168 #define sk_msg_iter_prev(msg, which) \ 169 sk_msg_iter_var_prev(msg->sg.which) 170 171 #define sk_msg_iter_next(msg, which) \ 172 sk_msg_iter_var_next(msg->sg.which) 173 174 static inline void sk_msg_init(struct sk_msg *msg) 175 { 176 BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS); 177 memset(msg, 0, sizeof(*msg)); 178 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); 179 } 180 181 static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, 182 int which, u32 size) 183 { 184 dst->sg.data[which] = src->sg.data[which]; 185 dst->sg.data[which].length = size; 186 dst->sg.size += size; 187 src->sg.size -= size; 188 src->sg.data[which].length -= size; 189 src->sg.data[which].offset += size; 190 } 191 192 static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src) 193 { 194 memcpy(dst, src, sizeof(*src)); 195 sk_msg_init(src); 196 } 197 198 static inline bool sk_msg_full(const struct sk_msg *msg) 199 { 200 return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS; 201 } 202 203 static inline u32 sk_msg_elem_used(const struct sk_msg *msg) 204 { 205 return sk_msg_iter_dist(msg->sg.start, msg->sg.end); 206 } 207 208 static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which) 209 { 210 return &msg->sg.data[which]; 211 } 212 213 static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which) 214 { 215 return msg->sg.data[which]; 216 } 217 218 static inline struct page *sk_msg_page(struct sk_msg *msg, int which) 219 { 220 return sg_page(sk_msg_elem(msg, which)); 221 } 222 223 static inline bool sk_msg_to_ingress(const struct sk_msg *msg) 224 { 225 return msg->flags & BPF_F_INGRESS; 226 } 227 228 static inline void sk_msg_compute_data_pointers(struct sk_msg *msg) 229 { 230 struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start); 231 232 if (test_bit(msg->sg.start, msg->sg.copy)) { 233 msg->data = NULL; 234 msg->data_end = NULL; 235 } else { 236 msg->data = sg_virt(sge); 237 msg->data_end = msg->data + sge->length; 238 } 239 } 240 241 static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page, 242 u32 len, u32 offset) 243 { 244 struct scatterlist *sge; 245 246 get_page(page); 247 sge = sk_msg_elem(msg, msg->sg.end); 248 sg_set_page(sge, page, len, offset); 249 sg_unmark_end(sge); 250 251 __set_bit(msg->sg.end, msg->sg.copy); 252 msg->sg.size += len; 253 sk_msg_iter_next(msg, end); 254 } 255 256 static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state) 257 { 258 do { 259 if (copy_state) 260 __set_bit(i, msg->sg.copy); 261 else 262 __clear_bit(i, msg->sg.copy); 263 sk_msg_iter_var_next(i); 264 if (i == msg->sg.end) 265 break; 266 } while (1); 267 } 268 269 static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start) 270 { 271 sk_msg_sg_copy(msg, start, true); 272 } 273 274 static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start) 275 { 276 sk_msg_sg_copy(msg, start, false); 277 } 278 279 static inline struct sk_psock *sk_psock(const struct sock *sk) 280 { 281 return rcu_dereference_sk_user_data(sk); 282 } 283 284 static inline void sk_psock_set_state(struct sk_psock *psock, 285 enum sk_psock_state_bits bit) 286 { 287 set_bit(bit, &psock->state); 288 } 289 290 static inline void sk_psock_clear_state(struct sk_psock *psock, 291 enum sk_psock_state_bits bit) 292 { 293 clear_bit(bit, &psock->state); 294 } 295 296 static inline bool sk_psock_test_state(const struct sk_psock *psock, 297 enum sk_psock_state_bits bit) 298 { 299 return test_bit(bit, &psock->state); 300 } 301 302 static inline void sock_drop(struct sock *sk, struct sk_buff *skb) 303 { 304 sk_drops_add(sk, skb); 305 kfree_skb(skb); 306 } 307 308 static inline void sk_psock_queue_msg(struct sk_psock *psock, 309 struct sk_msg *msg) 310 { 311 spin_lock_bh(&psock->ingress_lock); 312 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) 313 list_add_tail(&msg->list, &psock->ingress_msg); 314 else { 315 sk_msg_free(psock->sk, msg); 316 kfree(msg); 317 } 318 spin_unlock_bh(&psock->ingress_lock); 319 } 320 321 static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock) 322 { 323 struct sk_msg *msg; 324 325 spin_lock_bh(&psock->ingress_lock); 326 msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); 327 if (msg) 328 list_del(&msg->list); 329 spin_unlock_bh(&psock->ingress_lock); 330 return msg; 331 } 332 333 static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock) 334 { 335 struct sk_msg *msg; 336 337 spin_lock_bh(&psock->ingress_lock); 338 msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); 339 spin_unlock_bh(&psock->ingress_lock); 340 return msg; 341 } 342 343 static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock, 344 struct sk_msg *msg) 345 { 346 struct sk_msg *ret; 347 348 spin_lock_bh(&psock->ingress_lock); 349 if (list_is_last(&msg->list, &psock->ingress_msg)) 350 ret = NULL; 351 else 352 ret = list_next_entry(msg, list); 353 spin_unlock_bh(&psock->ingress_lock); 354 return ret; 355 } 356 357 static inline bool sk_psock_queue_empty(const struct sk_psock *psock) 358 { 359 return psock ? list_empty(&psock->ingress_msg) : true; 360 } 361 362 static inline void kfree_sk_msg(struct sk_msg *msg) 363 { 364 if (msg->skb) 365 consume_skb(msg->skb); 366 kfree(msg); 367 } 368 369 static inline void sk_psock_report_error(struct sk_psock *psock, int err) 370 { 371 struct sock *sk = psock->sk; 372 373 sk->sk_err = err; 374 sk_error_report(sk); 375 } 376 377 struct sk_psock *sk_psock_init(struct sock *sk, int node); 378 void sk_psock_stop(struct sk_psock *psock, bool wait); 379 380 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) 381 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock); 382 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock); 383 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock); 384 #else 385 static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) 386 { 387 return -EOPNOTSUPP; 388 } 389 390 static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) 391 { 392 } 393 394 static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) 395 { 396 } 397 #endif 398 399 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock); 400 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock); 401 402 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, 403 struct sk_msg *msg); 404 405 static inline struct sk_psock_link *sk_psock_init_link(void) 406 { 407 return kzalloc(sizeof(struct sk_psock_link), 408 GFP_ATOMIC | __GFP_NOWARN); 409 } 410 411 static inline void sk_psock_free_link(struct sk_psock_link *link) 412 { 413 kfree(link); 414 } 415 416 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock); 417 418 static inline void sk_psock_cork_free(struct sk_psock *psock) 419 { 420 if (psock->cork) { 421 sk_msg_free(psock->sk, psock->cork); 422 kfree(psock->cork); 423 psock->cork = NULL; 424 } 425 } 426 427 static inline void sk_psock_restore_proto(struct sock *sk, 428 struct sk_psock *psock) 429 { 430 if (psock->psock_update_sk_prot) 431 psock->psock_update_sk_prot(sk, psock, true); 432 } 433 434 static inline struct sk_psock *sk_psock_get(struct sock *sk) 435 { 436 struct sk_psock *psock; 437 438 rcu_read_lock(); 439 psock = sk_psock(sk); 440 if (psock && !refcount_inc_not_zero(&psock->refcnt)) 441 psock = NULL; 442 rcu_read_unlock(); 443 return psock; 444 } 445 446 void sk_psock_drop(struct sock *sk, struct sk_psock *psock); 447 448 static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) 449 { 450 if (refcount_dec_and_test(&psock->refcnt)) 451 sk_psock_drop(sk, psock); 452 } 453 454 static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock) 455 { 456 if (psock->saved_data_ready) 457 psock->saved_data_ready(sk); 458 else 459 sk->sk_data_ready(sk); 460 } 461 462 static inline void psock_set_prog(struct bpf_prog **pprog, 463 struct bpf_prog *prog) 464 { 465 prog = xchg(pprog, prog); 466 if (prog) 467 bpf_prog_put(prog); 468 } 469 470 static inline int psock_replace_prog(struct bpf_prog **pprog, 471 struct bpf_prog *prog, 472 struct bpf_prog *old) 473 { 474 if (cmpxchg(pprog, old, prog) != old) 475 return -ENOENT; 476 477 if (old) 478 bpf_prog_put(old); 479 480 return 0; 481 } 482 483 static inline void psock_progs_drop(struct sk_psock_progs *progs) 484 { 485 psock_set_prog(&progs->msg_parser, NULL); 486 psock_set_prog(&progs->stream_parser, NULL); 487 psock_set_prog(&progs->stream_verdict, NULL); 488 psock_set_prog(&progs->skb_verdict, NULL); 489 } 490 491 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb); 492 493 static inline bool sk_psock_strp_enabled(struct sk_psock *psock) 494 { 495 if (!psock) 496 return false; 497 return !!psock->saved_data_ready; 498 } 499 500 static inline bool sk_is_udp(const struct sock *sk) 501 { 502 return sk->sk_type == SOCK_DGRAM && 503 sk->sk_protocol == IPPROTO_UDP; 504 } 505 506 #if IS_ENABLED(CONFIG_NET_SOCK_MSG) 507 508 #define BPF_F_STRPARSER (1UL << 1) 509 510 /* We only have two bits so far. */ 511 #define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER) 512 513 static inline bool skb_bpf_strparser(const struct sk_buff *skb) 514 { 515 unsigned long sk_redir = skb->_sk_redir; 516 517 return sk_redir & BPF_F_STRPARSER; 518 } 519 520 static inline void skb_bpf_set_strparser(struct sk_buff *skb) 521 { 522 skb->_sk_redir |= BPF_F_STRPARSER; 523 } 524 525 static inline bool skb_bpf_ingress(const struct sk_buff *skb) 526 { 527 unsigned long sk_redir = skb->_sk_redir; 528 529 return sk_redir & BPF_F_INGRESS; 530 } 531 532 static inline void skb_bpf_set_ingress(struct sk_buff *skb) 533 { 534 skb->_sk_redir |= BPF_F_INGRESS; 535 } 536 537 static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir, 538 bool ingress) 539 { 540 skb->_sk_redir = (unsigned long)sk_redir; 541 if (ingress) 542 skb->_sk_redir |= BPF_F_INGRESS; 543 } 544 545 static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb) 546 { 547 unsigned long sk_redir = skb->_sk_redir; 548 549 return (struct sock *)(sk_redir & BPF_F_PTR_MASK); 550 } 551 552 static inline void skb_bpf_redirect_clear(struct sk_buff *skb) 553 { 554 skb->_sk_redir = 0; 555 } 556 #endif /* CONFIG_NET_SOCK_MSG */ 557 #endif /* _LINUX_SKMSG_H */ 558