xref: /linux-6.15/include/linux/skmsg.h (revision c074e146)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3 
4 #ifndef _LINUX_SKMSG_H
5 #define _LINUX_SKMSG_H
6 
7 #include <linux/bpf.h>
8 #include <linux/filter.h>
9 #include <linux/scatterlist.h>
10 #include <linux/skbuff.h>
11 
12 #include <net/sock.h>
13 #include <net/tcp.h>
14 #include <net/strparser.h>
15 
16 #define MAX_MSG_FRAGS			MAX_SKB_FRAGS
17 #define NR_MSG_FRAG_IDS			(MAX_MSG_FRAGS + 1)
18 
19 enum __sk_action {
20 	__SK_DROP = 0,
21 	__SK_PASS,
22 	__SK_REDIRECT,
23 	__SK_NONE,
24 };
25 
26 struct sk_msg_sg {
27 	u32				start;
28 	u32				curr;
29 	u32				end;
30 	u32				size;
31 	u32				copybreak;
32 	DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2);
33 	/* The extra two elements:
34 	 * 1) used for chaining the front and sections when the list becomes
35 	 *    partitioned (e.g. end < start). The crypto APIs require the
36 	 *    chaining;
37 	 * 2) to chain tailer SG entries after the message.
38 	 */
39 	struct scatterlist		data[MAX_MSG_FRAGS + 2];
40 };
41 
42 /* UAPI in filter.c depends on struct sk_msg_sg being first element. */
43 struct sk_msg {
44 	struct sk_msg_sg		sg;
45 	void				*data;
46 	void				*data_end;
47 	u32				apply_bytes;
48 	u32				cork_bytes;
49 	u32				flags;
50 	struct sk_buff			*skb;
51 	struct sock			*sk_redir;
52 	struct sock			*sk;
53 	struct list_head		list;
54 };
55 
56 struct sk_psock_progs {
57 	struct bpf_prog			*msg_parser;
58 	struct bpf_prog			*stream_parser;
59 	struct bpf_prog			*stream_verdict;
60 	struct bpf_prog			*skb_verdict;
61 };
62 
63 enum sk_psock_state_bits {
64 	SK_PSOCK_TX_ENABLED,
65 	SK_PSOCK_RX_STRP_ENABLED,
66 };
67 
68 struct sk_psock_link {
69 	struct list_head		list;
70 	struct bpf_map			*map;
71 	void				*link_raw;
72 };
73 
74 struct sk_psock_work_state {
75 	u32				len;
76 	u32				off;
77 };
78 
79 struct sk_psock {
80 	struct sock			*sk;
81 	struct sock			*sk_redir;
82 	u32				apply_bytes;
83 	u32				cork_bytes;
84 	u32				eval;
85 	bool				redir_ingress; /* undefined if sk_redir is null */
86 	struct sk_msg			*cork;
87 	struct sk_psock_progs		progs;
88 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
89 	struct strparser		strp;
90 #endif
91 	struct sk_buff_head		ingress_skb;
92 	struct list_head		ingress_msg;
93 	spinlock_t			ingress_lock;
94 	unsigned long			state;
95 	struct list_head		link;
96 	spinlock_t			link_lock;
97 	refcount_t			refcnt;
98 	void (*saved_unhash)(struct sock *sk);
99 	void (*saved_destroy)(struct sock *sk);
100 	void (*saved_close)(struct sock *sk, long timeout);
101 	void (*saved_write_space)(struct sock *sk);
102 	void (*saved_data_ready)(struct sock *sk);
103 	/* psock_update_sk_prot may be called with restore=false many times
104 	 * so the handler must be safe for this case. It will be called
105 	 * exactly once with restore=true when the psock is being destroyed
106 	 * and psock refcnt is zero, but before an RCU grace period.
107 	 */
108 	int  (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock,
109 				     bool restore);
110 	struct proto			*sk_proto;
111 	struct mutex			work_mutex;
112 	struct sk_psock_work_state	work_state;
113 	struct delayed_work		work;
114 	struct sock			*sk_pair;
115 	struct rcu_work			rwork;
116 };
117 
118 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
119 		 int elem_first_coalesce);
120 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
121 		 u32 off, u32 len);
122 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len);
123 int sk_msg_free(struct sock *sk, struct sk_msg *msg);
124 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg);
125 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes);
126 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
127 				  u32 bytes);
128 
129 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes);
130 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes);
131 
132 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
133 			      struct sk_msg *msg, u32 bytes);
134 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
135 			     struct sk_msg *msg, u32 bytes);
136 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
137 		   int len, int flags);
138 bool sk_msg_is_readable(struct sock *sk);
139 
140 static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
141 {
142 	WARN_ON(i == msg->sg.end && bytes);
143 }
144 
145 static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
146 {
147 	if (psock->apply_bytes) {
148 		if (psock->apply_bytes < bytes)
149 			psock->apply_bytes = 0;
150 		else
151 			psock->apply_bytes -= bytes;
152 	}
153 }
154 
155 static inline u32 sk_msg_iter_dist(u32 start, u32 end)
156 {
157 	return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start);
158 }
159 
160 #define sk_msg_iter_var_prev(var)			\
161 	do {						\
162 		if (var == 0)				\
163 			var = NR_MSG_FRAG_IDS - 1;	\
164 		else					\
165 			var--;				\
166 	} while (0)
167 
168 #define sk_msg_iter_var_next(var)			\
169 	do {						\
170 		var++;					\
171 		if (var == NR_MSG_FRAG_IDS)		\
172 			var = 0;			\
173 	} while (0)
174 
175 #define sk_msg_iter_prev(msg, which)			\
176 	sk_msg_iter_var_prev(msg->sg.which)
177 
178 #define sk_msg_iter_next(msg, which)			\
179 	sk_msg_iter_var_next(msg->sg.which)
180 
181 static inline void sk_msg_init(struct sk_msg *msg)
182 {
183 	BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
184 	memset(msg, 0, sizeof(*msg));
185 	sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
186 }
187 
188 static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
189 			       int which, u32 size)
190 {
191 	dst->sg.data[which] = src->sg.data[which];
192 	dst->sg.data[which].length  = size;
193 	dst->sg.size		   += size;
194 	src->sg.size		   -= size;
195 	src->sg.data[which].length -= size;
196 	src->sg.data[which].offset += size;
197 }
198 
199 static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src)
200 {
201 	memcpy(dst, src, sizeof(*src));
202 	sk_msg_init(src);
203 }
204 
205 static inline bool sk_msg_full(const struct sk_msg *msg)
206 {
207 	return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS;
208 }
209 
210 static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
211 {
212 	return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
213 }
214 
215 static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
216 {
217 	return &msg->sg.data[which];
218 }
219 
220 static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which)
221 {
222 	return msg->sg.data[which];
223 }
224 
225 static inline struct page *sk_msg_page(struct sk_msg *msg, int which)
226 {
227 	return sg_page(sk_msg_elem(msg, which));
228 }
229 
230 static inline bool sk_msg_to_ingress(const struct sk_msg *msg)
231 {
232 	return msg->flags & BPF_F_INGRESS;
233 }
234 
235 static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
236 {
237 	struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
238 
239 	if (test_bit(msg->sg.start, msg->sg.copy)) {
240 		msg->data = NULL;
241 		msg->data_end = NULL;
242 	} else {
243 		msg->data = sg_virt(sge);
244 		msg->data_end = msg->data + sge->length;
245 	}
246 }
247 
248 static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
249 				   u32 len, u32 offset)
250 {
251 	struct scatterlist *sge;
252 
253 	get_page(page);
254 	sge = sk_msg_elem(msg, msg->sg.end);
255 	sg_set_page(sge, page, len, offset);
256 	sg_unmark_end(sge);
257 
258 	__set_bit(msg->sg.end, msg->sg.copy);
259 	msg->sg.size += len;
260 	sk_msg_iter_next(msg, end);
261 }
262 
263 static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
264 {
265 	do {
266 		if (copy_state)
267 			__set_bit(i, msg->sg.copy);
268 		else
269 			__clear_bit(i, msg->sg.copy);
270 		sk_msg_iter_var_next(i);
271 		if (i == msg->sg.end)
272 			break;
273 	} while (1);
274 }
275 
276 static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start)
277 {
278 	sk_msg_sg_copy(msg, start, true);
279 }
280 
281 static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
282 {
283 	sk_msg_sg_copy(msg, start, false);
284 }
285 
286 static inline struct sk_psock *sk_psock(const struct sock *sk)
287 {
288 	return __rcu_dereference_sk_user_data_with_flags(sk,
289 							 SK_USER_DATA_PSOCK);
290 }
291 
292 static inline void sk_psock_set_state(struct sk_psock *psock,
293 				      enum sk_psock_state_bits bit)
294 {
295 	set_bit(bit, &psock->state);
296 }
297 
298 static inline void sk_psock_clear_state(struct sk_psock *psock,
299 					enum sk_psock_state_bits bit)
300 {
301 	clear_bit(bit, &psock->state);
302 }
303 
304 static inline bool sk_psock_test_state(const struct sk_psock *psock,
305 				       enum sk_psock_state_bits bit)
306 {
307 	return test_bit(bit, &psock->state);
308 }
309 
310 static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
311 {
312 	sk_drops_add(sk, skb);
313 	kfree_skb(skb);
314 }
315 
316 static inline void sk_psock_queue_msg(struct sk_psock *psock,
317 				      struct sk_msg *msg)
318 {
319 	spin_lock_bh(&psock->ingress_lock);
320 	if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
321 		list_add_tail(&msg->list, &psock->ingress_msg);
322 	else {
323 		sk_msg_free(psock->sk, msg);
324 		kfree(msg);
325 	}
326 	spin_unlock_bh(&psock->ingress_lock);
327 }
328 
329 static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
330 {
331 	struct sk_msg *msg;
332 
333 	spin_lock_bh(&psock->ingress_lock);
334 	msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
335 	if (msg)
336 		list_del(&msg->list);
337 	spin_unlock_bh(&psock->ingress_lock);
338 	return msg;
339 }
340 
341 static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock)
342 {
343 	struct sk_msg *msg;
344 
345 	spin_lock_bh(&psock->ingress_lock);
346 	msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
347 	spin_unlock_bh(&psock->ingress_lock);
348 	return msg;
349 }
350 
351 static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock,
352 					       struct sk_msg *msg)
353 {
354 	struct sk_msg *ret;
355 
356 	spin_lock_bh(&psock->ingress_lock);
357 	if (list_is_last(&msg->list, &psock->ingress_msg))
358 		ret = NULL;
359 	else
360 		ret = list_next_entry(msg, list);
361 	spin_unlock_bh(&psock->ingress_lock);
362 	return ret;
363 }
364 
365 static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
366 {
367 	return psock ? list_empty(&psock->ingress_msg) : true;
368 }
369 
370 static inline void kfree_sk_msg(struct sk_msg *msg)
371 {
372 	if (msg->skb)
373 		consume_skb(msg->skb);
374 	kfree(msg);
375 }
376 
377 static inline void sk_psock_report_error(struct sk_psock *psock, int err)
378 {
379 	struct sock *sk = psock->sk;
380 
381 	sk->sk_err = err;
382 	sk_error_report(sk);
383 }
384 
385 struct sk_psock *sk_psock_init(struct sock *sk, int node);
386 void sk_psock_stop(struct sk_psock *psock);
387 
388 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
389 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
390 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
391 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
392 #else
393 static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
394 {
395 	return -EOPNOTSUPP;
396 }
397 
398 static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
399 {
400 }
401 
402 static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
403 {
404 }
405 #endif
406 
407 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock);
408 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock);
409 
410 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
411 			 struct sk_msg *msg);
412 
413 #define sk_psock_init_link()	\
414 		((struct sk_psock_link *)kzalloc(sizeof(struct sk_psock_link),	\
415 						 GFP_ATOMIC | __GFP_NOWARN))
416 
417 static inline void sk_psock_free_link(struct sk_psock_link *link)
418 {
419 	kfree(link);
420 }
421 
422 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock);
423 
424 static inline void sk_psock_cork_free(struct sk_psock *psock)
425 {
426 	if (psock->cork) {
427 		sk_msg_free(psock->sk, psock->cork);
428 		kfree(psock->cork);
429 		psock->cork = NULL;
430 	}
431 }
432 
433 static inline void sk_psock_restore_proto(struct sock *sk,
434 					  struct sk_psock *psock)
435 {
436 	if (psock->psock_update_sk_prot)
437 		psock->psock_update_sk_prot(sk, psock, true);
438 }
439 
440 static inline struct sk_psock *sk_psock_get(struct sock *sk)
441 {
442 	struct sk_psock *psock;
443 
444 	rcu_read_lock();
445 	psock = sk_psock(sk);
446 	if (psock && !refcount_inc_not_zero(&psock->refcnt))
447 		psock = NULL;
448 	rcu_read_unlock();
449 	return psock;
450 }
451 
452 void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
453 
454 static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
455 {
456 	if (refcount_dec_and_test(&psock->refcnt))
457 		sk_psock_drop(sk, psock);
458 }
459 
460 static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
461 {
462 	if (psock->saved_data_ready)
463 		psock->saved_data_ready(sk);
464 	else
465 		sk->sk_data_ready(sk);
466 }
467 
468 static inline void psock_set_prog(struct bpf_prog **pprog,
469 				  struct bpf_prog *prog)
470 {
471 	prog = xchg(pprog, prog);
472 	if (prog)
473 		bpf_prog_put(prog);
474 }
475 
476 static inline int psock_replace_prog(struct bpf_prog **pprog,
477 				     struct bpf_prog *prog,
478 				     struct bpf_prog *old)
479 {
480 	if (cmpxchg(pprog, old, prog) != old)
481 		return -ENOENT;
482 
483 	if (old)
484 		bpf_prog_put(old);
485 
486 	return 0;
487 }
488 
489 static inline void psock_progs_drop(struct sk_psock_progs *progs)
490 {
491 	psock_set_prog(&progs->msg_parser, NULL);
492 	psock_set_prog(&progs->stream_parser, NULL);
493 	psock_set_prog(&progs->stream_verdict, NULL);
494 	psock_set_prog(&progs->skb_verdict, NULL);
495 }
496 
497 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb);
498 
499 static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
500 {
501 	if (!psock)
502 		return false;
503 	return !!psock->saved_data_ready;
504 }
505 
506 #if IS_ENABLED(CONFIG_NET_SOCK_MSG)
507 
508 #define BPF_F_STRPARSER	(1UL << 1)
509 
510 /* We only have two bits so far. */
511 #define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER)
512 
513 static inline bool skb_bpf_strparser(const struct sk_buff *skb)
514 {
515 	unsigned long sk_redir = skb->_sk_redir;
516 
517 	return sk_redir & BPF_F_STRPARSER;
518 }
519 
520 static inline void skb_bpf_set_strparser(struct sk_buff *skb)
521 {
522 	skb->_sk_redir |= BPF_F_STRPARSER;
523 }
524 
525 static inline bool skb_bpf_ingress(const struct sk_buff *skb)
526 {
527 	unsigned long sk_redir = skb->_sk_redir;
528 
529 	return sk_redir & BPF_F_INGRESS;
530 }
531 
532 static inline void skb_bpf_set_ingress(struct sk_buff *skb)
533 {
534 	skb->_sk_redir |= BPF_F_INGRESS;
535 }
536 
537 static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir,
538 				     bool ingress)
539 {
540 	skb->_sk_redir = (unsigned long)sk_redir;
541 	if (ingress)
542 		skb->_sk_redir |= BPF_F_INGRESS;
543 }
544 
545 static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb)
546 {
547 	unsigned long sk_redir = skb->_sk_redir;
548 
549 	return (struct sock *)(sk_redir & BPF_F_PTR_MASK);
550 }
551 
552 static inline void skb_bpf_redirect_clear(struct sk_buff *skb)
553 {
554 	skb->_sk_redir = 0;
555 }
556 #endif /* CONFIG_NET_SOCK_MSG */
557 #endif /* _LINUX_SKMSG_H */
558