xref: /linux-6.15/include/linux/skmsg.h (revision c098564d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3 
4 #ifndef _LINUX_SKMSG_H
5 #define _LINUX_SKMSG_H
6 
7 #include <linux/bpf.h>
8 #include <linux/filter.h>
9 #include <linux/scatterlist.h>
10 #include <linux/skbuff.h>
11 
12 #include <net/sock.h>
13 #include <net/tcp.h>
14 #include <net/strparser.h>
15 
16 #define MAX_MSG_FRAGS			MAX_SKB_FRAGS
17 #define NR_MSG_FRAG_IDS			(MAX_MSG_FRAGS + 1)
18 
19 enum __sk_action {
20 	__SK_DROP = 0,
21 	__SK_PASS,
22 	__SK_REDIRECT,
23 	__SK_NONE,
24 };
25 
26 struct sk_msg_sg {
27 	u32				start;
28 	u32				curr;
29 	u32				end;
30 	u32				size;
31 	u32				copybreak;
32 	unsigned long			copy;
33 	/* The extra two elements:
34 	 * 1) used for chaining the front and sections when the list becomes
35 	 *    partitioned (e.g. end < start). The crypto APIs require the
36 	 *    chaining;
37 	 * 2) to chain tailer SG entries after the message.
38 	 */
39 	struct scatterlist		data[MAX_MSG_FRAGS + 2];
40 };
41 static_assert(BITS_PER_LONG >= NR_MSG_FRAG_IDS);
42 
43 /* UAPI in filter.c depends on struct sk_msg_sg being first element. */
44 struct sk_msg {
45 	struct sk_msg_sg		sg;
46 	void				*data;
47 	void				*data_end;
48 	u32				apply_bytes;
49 	u32				cork_bytes;
50 	u32				flags;
51 	struct sk_buff			*skb;
52 	struct sock			*sk_redir;
53 	struct sock			*sk;
54 	struct list_head		list;
55 };
56 
57 struct sk_psock_progs {
58 	struct bpf_prog			*msg_parser;
59 	struct bpf_prog			*stream_parser;
60 	struct bpf_prog			*stream_verdict;
61 	struct bpf_prog			*skb_verdict;
62 };
63 
64 enum sk_psock_state_bits {
65 	SK_PSOCK_TX_ENABLED,
66 };
67 
68 struct sk_psock_link {
69 	struct list_head		list;
70 	struct bpf_map			*map;
71 	void				*link_raw;
72 };
73 
74 struct sk_psock_work_state {
75 	struct sk_buff			*skb;
76 	u32				len;
77 	u32				off;
78 };
79 
80 struct sk_psock {
81 	struct sock			*sk;
82 	struct sock			*sk_redir;
83 	u32				apply_bytes;
84 	u32				cork_bytes;
85 	u32				eval;
86 	struct sk_msg			*cork;
87 	struct sk_psock_progs		progs;
88 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
89 	struct strparser		strp;
90 #endif
91 	struct sk_buff_head		ingress_skb;
92 	struct list_head		ingress_msg;
93 	spinlock_t			ingress_lock;
94 	unsigned long			state;
95 	struct list_head		link;
96 	spinlock_t			link_lock;
97 	refcount_t			refcnt;
98 	void (*saved_unhash)(struct sock *sk);
99 	void (*saved_close)(struct sock *sk, long timeout);
100 	void (*saved_write_space)(struct sock *sk);
101 	void (*saved_data_ready)(struct sock *sk);
102 	int  (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock,
103 				     bool restore);
104 	struct proto			*sk_proto;
105 	struct mutex			work_mutex;
106 	struct sk_psock_work_state	work_state;
107 	struct work_struct		work;
108 	struct rcu_work			rwork;
109 };
110 
111 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
112 		 int elem_first_coalesce);
113 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
114 		 u32 off, u32 len);
115 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len);
116 int sk_msg_free(struct sock *sk, struct sk_msg *msg);
117 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg);
118 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes);
119 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
120 				  u32 bytes);
121 
122 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes);
123 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes);
124 
125 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
126 			      struct sk_msg *msg, u32 bytes);
127 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
128 			     struct sk_msg *msg, u32 bytes);
129 int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, int flags,
130 		     long timeo, int *err);
131 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
132 		   int len, int flags);
133 
134 static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
135 {
136 	WARN_ON(i == msg->sg.end && bytes);
137 }
138 
139 static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
140 {
141 	if (psock->apply_bytes) {
142 		if (psock->apply_bytes < bytes)
143 			psock->apply_bytes = 0;
144 		else
145 			psock->apply_bytes -= bytes;
146 	}
147 }
148 
149 static inline u32 sk_msg_iter_dist(u32 start, u32 end)
150 {
151 	return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start);
152 }
153 
154 #define sk_msg_iter_var_prev(var)			\
155 	do {						\
156 		if (var == 0)				\
157 			var = NR_MSG_FRAG_IDS - 1;	\
158 		else					\
159 			var--;				\
160 	} while (0)
161 
162 #define sk_msg_iter_var_next(var)			\
163 	do {						\
164 		var++;					\
165 		if (var == NR_MSG_FRAG_IDS)		\
166 			var = 0;			\
167 	} while (0)
168 
169 #define sk_msg_iter_prev(msg, which)			\
170 	sk_msg_iter_var_prev(msg->sg.which)
171 
172 #define sk_msg_iter_next(msg, which)			\
173 	sk_msg_iter_var_next(msg->sg.which)
174 
175 static inline void sk_msg_clear_meta(struct sk_msg *msg)
176 {
177 	memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy));
178 }
179 
180 static inline void sk_msg_init(struct sk_msg *msg)
181 {
182 	BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
183 	memset(msg, 0, sizeof(*msg));
184 	sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
185 }
186 
187 static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
188 			       int which, u32 size)
189 {
190 	dst->sg.data[which] = src->sg.data[which];
191 	dst->sg.data[which].length  = size;
192 	dst->sg.size		   += size;
193 	src->sg.size		   -= size;
194 	src->sg.data[which].length -= size;
195 	src->sg.data[which].offset += size;
196 }
197 
198 static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src)
199 {
200 	memcpy(dst, src, sizeof(*src));
201 	sk_msg_init(src);
202 }
203 
204 static inline bool sk_msg_full(const struct sk_msg *msg)
205 {
206 	return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS;
207 }
208 
209 static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
210 {
211 	return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
212 }
213 
214 static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
215 {
216 	return &msg->sg.data[which];
217 }
218 
219 static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which)
220 {
221 	return msg->sg.data[which];
222 }
223 
224 static inline struct page *sk_msg_page(struct sk_msg *msg, int which)
225 {
226 	return sg_page(sk_msg_elem(msg, which));
227 }
228 
229 static inline bool sk_msg_to_ingress(const struct sk_msg *msg)
230 {
231 	return msg->flags & BPF_F_INGRESS;
232 }
233 
234 static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
235 {
236 	struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
237 
238 	if (test_bit(msg->sg.start, &msg->sg.copy)) {
239 		msg->data = NULL;
240 		msg->data_end = NULL;
241 	} else {
242 		msg->data = sg_virt(sge);
243 		msg->data_end = msg->data + sge->length;
244 	}
245 }
246 
247 static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
248 				   u32 len, u32 offset)
249 {
250 	struct scatterlist *sge;
251 
252 	get_page(page);
253 	sge = sk_msg_elem(msg, msg->sg.end);
254 	sg_set_page(sge, page, len, offset);
255 	sg_unmark_end(sge);
256 
257 	__set_bit(msg->sg.end, &msg->sg.copy);
258 	msg->sg.size += len;
259 	sk_msg_iter_next(msg, end);
260 }
261 
262 static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
263 {
264 	do {
265 		if (copy_state)
266 			__set_bit(i, &msg->sg.copy);
267 		else
268 			__clear_bit(i, &msg->sg.copy);
269 		sk_msg_iter_var_next(i);
270 		if (i == msg->sg.end)
271 			break;
272 	} while (1);
273 }
274 
275 static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start)
276 {
277 	sk_msg_sg_copy(msg, start, true);
278 }
279 
280 static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
281 {
282 	sk_msg_sg_copy(msg, start, false);
283 }
284 
285 static inline struct sk_psock *sk_psock(const struct sock *sk)
286 {
287 	return rcu_dereference_sk_user_data(sk);
288 }
289 
290 static inline void sk_psock_queue_msg(struct sk_psock *psock,
291 				      struct sk_msg *msg)
292 {
293 	spin_lock_bh(&psock->ingress_lock);
294 	list_add_tail(&msg->list, &psock->ingress_msg);
295 	spin_unlock_bh(&psock->ingress_lock);
296 }
297 
298 static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
299 {
300 	struct sk_msg *msg;
301 
302 	spin_lock_bh(&psock->ingress_lock);
303 	msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
304 	if (msg)
305 		list_del(&msg->list);
306 	spin_unlock_bh(&psock->ingress_lock);
307 	return msg;
308 }
309 
310 static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock)
311 {
312 	struct sk_msg *msg;
313 
314 	spin_lock_bh(&psock->ingress_lock);
315 	msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
316 	spin_unlock_bh(&psock->ingress_lock);
317 	return msg;
318 }
319 
320 static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock,
321 					       struct sk_msg *msg)
322 {
323 	struct sk_msg *ret;
324 
325 	spin_lock_bh(&psock->ingress_lock);
326 	if (list_is_last(&msg->list, &psock->ingress_msg))
327 		ret = NULL;
328 	else
329 		ret = list_next_entry(msg, list);
330 	spin_unlock_bh(&psock->ingress_lock);
331 	return ret;
332 }
333 
334 static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
335 {
336 	return psock ? list_empty(&psock->ingress_msg) : true;
337 }
338 
339 static inline void kfree_sk_msg(struct sk_msg *msg)
340 {
341 	if (msg->skb)
342 		consume_skb(msg->skb);
343 	kfree(msg);
344 }
345 
346 static inline void sk_psock_report_error(struct sk_psock *psock, int err)
347 {
348 	struct sock *sk = psock->sk;
349 
350 	sk->sk_err = err;
351 	sk->sk_error_report(sk);
352 }
353 
354 struct sk_psock *sk_psock_init(struct sock *sk, int node);
355 void sk_psock_stop(struct sk_psock *psock, bool wait);
356 
357 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
358 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
359 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
360 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
361 #else
362 static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
363 {
364 	return -EOPNOTSUPP;
365 }
366 
367 static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
368 {
369 }
370 
371 static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
372 {
373 }
374 #endif
375 
376 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock);
377 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock);
378 
379 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
380 			 struct sk_msg *msg);
381 
382 static inline struct sk_psock_link *sk_psock_init_link(void)
383 {
384 	return kzalloc(sizeof(struct sk_psock_link),
385 		       GFP_ATOMIC | __GFP_NOWARN);
386 }
387 
388 static inline void sk_psock_free_link(struct sk_psock_link *link)
389 {
390 	kfree(link);
391 }
392 
393 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock);
394 
395 static inline void sk_psock_cork_free(struct sk_psock *psock)
396 {
397 	if (psock->cork) {
398 		sk_msg_free(psock->sk, psock->cork);
399 		kfree(psock->cork);
400 		psock->cork = NULL;
401 	}
402 }
403 
404 static inline void sk_psock_restore_proto(struct sock *sk,
405 					  struct sk_psock *psock)
406 {
407 	if (psock->psock_update_sk_prot)
408 		psock->psock_update_sk_prot(sk, psock, true);
409 }
410 
411 static inline void sk_psock_set_state(struct sk_psock *psock,
412 				      enum sk_psock_state_bits bit)
413 {
414 	set_bit(bit, &psock->state);
415 }
416 
417 static inline void sk_psock_clear_state(struct sk_psock *psock,
418 					enum sk_psock_state_bits bit)
419 {
420 	clear_bit(bit, &psock->state);
421 }
422 
423 static inline bool sk_psock_test_state(const struct sk_psock *psock,
424 				       enum sk_psock_state_bits bit)
425 {
426 	return test_bit(bit, &psock->state);
427 }
428 
429 static inline struct sk_psock *sk_psock_get(struct sock *sk)
430 {
431 	struct sk_psock *psock;
432 
433 	rcu_read_lock();
434 	psock = sk_psock(sk);
435 	if (psock && !refcount_inc_not_zero(&psock->refcnt))
436 		psock = NULL;
437 	rcu_read_unlock();
438 	return psock;
439 }
440 
441 void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
442 
443 static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
444 {
445 	if (refcount_dec_and_test(&psock->refcnt))
446 		sk_psock_drop(sk, psock);
447 }
448 
449 static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
450 {
451 	if (psock->saved_data_ready)
452 		psock->saved_data_ready(sk);
453 	else
454 		sk->sk_data_ready(sk);
455 }
456 
457 static inline void psock_set_prog(struct bpf_prog **pprog,
458 				  struct bpf_prog *prog)
459 {
460 	prog = xchg(pprog, prog);
461 	if (prog)
462 		bpf_prog_put(prog);
463 }
464 
465 static inline int psock_replace_prog(struct bpf_prog **pprog,
466 				     struct bpf_prog *prog,
467 				     struct bpf_prog *old)
468 {
469 	if (cmpxchg(pprog, old, prog) != old)
470 		return -ENOENT;
471 
472 	if (old)
473 		bpf_prog_put(old);
474 
475 	return 0;
476 }
477 
478 static inline void psock_progs_drop(struct sk_psock_progs *progs)
479 {
480 	psock_set_prog(&progs->msg_parser, NULL);
481 	psock_set_prog(&progs->stream_parser, NULL);
482 	psock_set_prog(&progs->stream_verdict, NULL);
483 	psock_set_prog(&progs->skb_verdict, NULL);
484 }
485 
486 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb);
487 
488 static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
489 {
490 	if (!psock)
491 		return false;
492 	return !!psock->saved_data_ready;
493 }
494 
495 #if IS_ENABLED(CONFIG_NET_SOCK_MSG)
496 
497 /* We only have one bit so far. */
498 #define BPF_F_PTR_MASK ~(BPF_F_INGRESS)
499 
500 static inline bool skb_bpf_ingress(const struct sk_buff *skb)
501 {
502 	unsigned long sk_redir = skb->_sk_redir;
503 
504 	return sk_redir & BPF_F_INGRESS;
505 }
506 
507 static inline void skb_bpf_set_ingress(struct sk_buff *skb)
508 {
509 	skb->_sk_redir |= BPF_F_INGRESS;
510 }
511 
512 static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir,
513 				     bool ingress)
514 {
515 	skb->_sk_redir = (unsigned long)sk_redir;
516 	if (ingress)
517 		skb->_sk_redir |= BPF_F_INGRESS;
518 }
519 
520 static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb)
521 {
522 	unsigned long sk_redir = skb->_sk_redir;
523 
524 	return (struct sock *)(sk_redir & BPF_F_PTR_MASK);
525 }
526 
527 static inline void skb_bpf_redirect_clear(struct sk_buff *skb)
528 {
529 	skb->_sk_redir = 0;
530 }
531 #endif /* CONFIG_NET_SOCK_MSG */
532 #endif /* _LINUX_SKMSG_H */
533