xref: /linux-6.15/include/linux/skmsg.h (revision 0fc7ca62)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3 
4 #ifndef _LINUX_SKMSG_H
5 #define _LINUX_SKMSG_H
6 
7 #include <linux/bpf.h>
8 #include <linux/filter.h>
9 #include <linux/scatterlist.h>
10 #include <linux/skbuff.h>
11 
12 #include <net/sock.h>
13 #include <net/tcp.h>
14 #include <net/strparser.h>
15 
16 #define MAX_MSG_FRAGS			MAX_SKB_FRAGS
17 #define NR_MSG_FRAG_IDS			(MAX_MSG_FRAGS + 1)
18 
19 enum __sk_action {
20 	__SK_DROP = 0,
21 	__SK_PASS,
22 	__SK_REDIRECT,
23 	__SK_NONE,
24 };
25 
26 struct sk_msg_sg {
27 	u32				start;
28 	u32				curr;
29 	u32				end;
30 	u32				size;
31 	u32				copybreak;
32 	unsigned long			copy;
33 	/* The extra two elements:
34 	 * 1) used for chaining the front and sections when the list becomes
35 	 *    partitioned (e.g. end < start). The crypto APIs require the
36 	 *    chaining;
37 	 * 2) to chain tailer SG entries after the message.
38 	 */
39 	struct scatterlist		data[MAX_MSG_FRAGS + 2];
40 };
41 static_assert(BITS_PER_LONG >= NR_MSG_FRAG_IDS);
42 
43 /* UAPI in filter.c depends on struct sk_msg_sg being first element. */
44 struct sk_msg {
45 	struct sk_msg_sg		sg;
46 	void				*data;
47 	void				*data_end;
48 	u32				apply_bytes;
49 	u32				cork_bytes;
50 	u32				flags;
51 	struct sk_buff			*skb;
52 	struct sock			*sk_redir;
53 	struct sock			*sk;
54 	struct list_head		list;
55 };
56 
57 struct sk_psock_progs {
58 	struct bpf_prog			*msg_parser;
59 	struct bpf_prog			*stream_parser;
60 	struct bpf_prog			*stream_verdict;
61 	struct bpf_prog			*skb_verdict;
62 };
63 
64 enum sk_psock_state_bits {
65 	SK_PSOCK_TX_ENABLED,
66 };
67 
68 struct sk_psock_link {
69 	struct list_head		list;
70 	struct bpf_map			*map;
71 	void				*link_raw;
72 };
73 
74 struct sk_psock_work_state {
75 	struct sk_buff			*skb;
76 	u32				len;
77 	u32				off;
78 };
79 
80 struct sk_psock {
81 	struct sock			*sk;
82 	struct sock			*sk_redir;
83 	u32				apply_bytes;
84 	u32				cork_bytes;
85 	u32				eval;
86 	struct sk_msg			*cork;
87 	struct sk_psock_progs		progs;
88 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
89 	struct strparser		strp;
90 #endif
91 	struct sk_buff_head		ingress_skb;
92 	struct list_head		ingress_msg;
93 	spinlock_t			ingress_lock;
94 	unsigned long			state;
95 	struct list_head		link;
96 	spinlock_t			link_lock;
97 	refcount_t			refcnt;
98 	void (*saved_unhash)(struct sock *sk);
99 	void (*saved_close)(struct sock *sk, long timeout);
100 	void (*saved_write_space)(struct sock *sk);
101 	void (*saved_data_ready)(struct sock *sk);
102 	int  (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock,
103 				     bool restore);
104 	struct proto			*sk_proto;
105 	struct mutex			work_mutex;
106 	struct sk_psock_work_state	work_state;
107 	struct work_struct		work;
108 	struct rcu_work			rwork;
109 };
110 
111 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
112 		 int elem_first_coalesce);
113 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
114 		 u32 off, u32 len);
115 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len);
116 int sk_msg_free(struct sock *sk, struct sk_msg *msg);
117 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg);
118 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes);
119 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
120 				  u32 bytes);
121 
122 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes);
123 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes);
124 
125 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
126 			      struct sk_msg *msg, u32 bytes);
127 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
128 			     struct sk_msg *msg, u32 bytes);
129 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
130 		   int len, int flags);
131 
132 static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
133 {
134 	WARN_ON(i == msg->sg.end && bytes);
135 }
136 
137 static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
138 {
139 	if (psock->apply_bytes) {
140 		if (psock->apply_bytes < bytes)
141 			psock->apply_bytes = 0;
142 		else
143 			psock->apply_bytes -= bytes;
144 	}
145 }
146 
147 static inline u32 sk_msg_iter_dist(u32 start, u32 end)
148 {
149 	return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start);
150 }
151 
152 #define sk_msg_iter_var_prev(var)			\
153 	do {						\
154 		if (var == 0)				\
155 			var = NR_MSG_FRAG_IDS - 1;	\
156 		else					\
157 			var--;				\
158 	} while (0)
159 
160 #define sk_msg_iter_var_next(var)			\
161 	do {						\
162 		var++;					\
163 		if (var == NR_MSG_FRAG_IDS)		\
164 			var = 0;			\
165 	} while (0)
166 
167 #define sk_msg_iter_prev(msg, which)			\
168 	sk_msg_iter_var_prev(msg->sg.which)
169 
170 #define sk_msg_iter_next(msg, which)			\
171 	sk_msg_iter_var_next(msg->sg.which)
172 
173 static inline void sk_msg_clear_meta(struct sk_msg *msg)
174 {
175 	memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy));
176 }
177 
178 static inline void sk_msg_init(struct sk_msg *msg)
179 {
180 	BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
181 	memset(msg, 0, sizeof(*msg));
182 	sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
183 }
184 
185 static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
186 			       int which, u32 size)
187 {
188 	dst->sg.data[which] = src->sg.data[which];
189 	dst->sg.data[which].length  = size;
190 	dst->sg.size		   += size;
191 	src->sg.size		   -= size;
192 	src->sg.data[which].length -= size;
193 	src->sg.data[which].offset += size;
194 }
195 
196 static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src)
197 {
198 	memcpy(dst, src, sizeof(*src));
199 	sk_msg_init(src);
200 }
201 
202 static inline bool sk_msg_full(const struct sk_msg *msg)
203 {
204 	return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS;
205 }
206 
207 static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
208 {
209 	return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
210 }
211 
212 static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
213 {
214 	return &msg->sg.data[which];
215 }
216 
217 static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which)
218 {
219 	return msg->sg.data[which];
220 }
221 
222 static inline struct page *sk_msg_page(struct sk_msg *msg, int which)
223 {
224 	return sg_page(sk_msg_elem(msg, which));
225 }
226 
227 static inline bool sk_msg_to_ingress(const struct sk_msg *msg)
228 {
229 	return msg->flags & BPF_F_INGRESS;
230 }
231 
232 static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
233 {
234 	struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
235 
236 	if (test_bit(msg->sg.start, &msg->sg.copy)) {
237 		msg->data = NULL;
238 		msg->data_end = NULL;
239 	} else {
240 		msg->data = sg_virt(sge);
241 		msg->data_end = msg->data + sge->length;
242 	}
243 }
244 
245 static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
246 				   u32 len, u32 offset)
247 {
248 	struct scatterlist *sge;
249 
250 	get_page(page);
251 	sge = sk_msg_elem(msg, msg->sg.end);
252 	sg_set_page(sge, page, len, offset);
253 	sg_unmark_end(sge);
254 
255 	__set_bit(msg->sg.end, &msg->sg.copy);
256 	msg->sg.size += len;
257 	sk_msg_iter_next(msg, end);
258 }
259 
260 static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
261 {
262 	do {
263 		if (copy_state)
264 			__set_bit(i, &msg->sg.copy);
265 		else
266 			__clear_bit(i, &msg->sg.copy);
267 		sk_msg_iter_var_next(i);
268 		if (i == msg->sg.end)
269 			break;
270 	} while (1);
271 }
272 
273 static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start)
274 {
275 	sk_msg_sg_copy(msg, start, true);
276 }
277 
278 static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
279 {
280 	sk_msg_sg_copy(msg, start, false);
281 }
282 
283 static inline struct sk_psock *sk_psock(const struct sock *sk)
284 {
285 	return rcu_dereference_sk_user_data(sk);
286 }
287 
288 static inline void sk_psock_queue_msg(struct sk_psock *psock,
289 				      struct sk_msg *msg)
290 {
291 	spin_lock_bh(&psock->ingress_lock);
292 	list_add_tail(&msg->list, &psock->ingress_msg);
293 	spin_unlock_bh(&psock->ingress_lock);
294 }
295 
296 static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
297 {
298 	struct sk_msg *msg;
299 
300 	spin_lock_bh(&psock->ingress_lock);
301 	msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
302 	if (msg)
303 		list_del(&msg->list);
304 	spin_unlock_bh(&psock->ingress_lock);
305 	return msg;
306 }
307 
308 static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock)
309 {
310 	struct sk_msg *msg;
311 
312 	spin_lock_bh(&psock->ingress_lock);
313 	msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
314 	spin_unlock_bh(&psock->ingress_lock);
315 	return msg;
316 }
317 
318 static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock,
319 					       struct sk_msg *msg)
320 {
321 	struct sk_msg *ret;
322 
323 	spin_lock_bh(&psock->ingress_lock);
324 	if (list_is_last(&msg->list, &psock->ingress_msg))
325 		ret = NULL;
326 	else
327 		ret = list_next_entry(msg, list);
328 	spin_unlock_bh(&psock->ingress_lock);
329 	return ret;
330 }
331 
332 static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
333 {
334 	return psock ? list_empty(&psock->ingress_msg) : true;
335 }
336 
337 static inline void kfree_sk_msg(struct sk_msg *msg)
338 {
339 	if (msg->skb)
340 		consume_skb(msg->skb);
341 	kfree(msg);
342 }
343 
344 static inline void sk_psock_report_error(struct sk_psock *psock, int err)
345 {
346 	struct sock *sk = psock->sk;
347 
348 	sk->sk_err = err;
349 	sk_error_report(sk);
350 }
351 
352 struct sk_psock *sk_psock_init(struct sock *sk, int node);
353 void sk_psock_stop(struct sk_psock *psock, bool wait);
354 
355 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
356 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
357 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
358 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
359 #else
360 static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
361 {
362 	return -EOPNOTSUPP;
363 }
364 
365 static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
366 {
367 }
368 
369 static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
370 {
371 }
372 #endif
373 
374 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock);
375 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock);
376 
377 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
378 			 struct sk_msg *msg);
379 
380 static inline struct sk_psock_link *sk_psock_init_link(void)
381 {
382 	return kzalloc(sizeof(struct sk_psock_link),
383 		       GFP_ATOMIC | __GFP_NOWARN);
384 }
385 
386 static inline void sk_psock_free_link(struct sk_psock_link *link)
387 {
388 	kfree(link);
389 }
390 
391 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock);
392 
393 static inline void sk_psock_cork_free(struct sk_psock *psock)
394 {
395 	if (psock->cork) {
396 		sk_msg_free(psock->sk, psock->cork);
397 		kfree(psock->cork);
398 		psock->cork = NULL;
399 	}
400 }
401 
402 static inline void sk_psock_restore_proto(struct sock *sk,
403 					  struct sk_psock *psock)
404 {
405 	if (psock->psock_update_sk_prot)
406 		psock->psock_update_sk_prot(sk, psock, true);
407 }
408 
409 static inline void sk_psock_set_state(struct sk_psock *psock,
410 				      enum sk_psock_state_bits bit)
411 {
412 	set_bit(bit, &psock->state);
413 }
414 
415 static inline void sk_psock_clear_state(struct sk_psock *psock,
416 					enum sk_psock_state_bits bit)
417 {
418 	clear_bit(bit, &psock->state);
419 }
420 
421 static inline bool sk_psock_test_state(const struct sk_psock *psock,
422 				       enum sk_psock_state_bits bit)
423 {
424 	return test_bit(bit, &psock->state);
425 }
426 
427 static inline struct sk_psock *sk_psock_get(struct sock *sk)
428 {
429 	struct sk_psock *psock;
430 
431 	rcu_read_lock();
432 	psock = sk_psock(sk);
433 	if (psock && !refcount_inc_not_zero(&psock->refcnt))
434 		psock = NULL;
435 	rcu_read_unlock();
436 	return psock;
437 }
438 
439 void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
440 
441 static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
442 {
443 	if (refcount_dec_and_test(&psock->refcnt))
444 		sk_psock_drop(sk, psock);
445 }
446 
447 static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
448 {
449 	if (psock->saved_data_ready)
450 		psock->saved_data_ready(sk);
451 	else
452 		sk->sk_data_ready(sk);
453 }
454 
455 static inline void psock_set_prog(struct bpf_prog **pprog,
456 				  struct bpf_prog *prog)
457 {
458 	prog = xchg(pprog, prog);
459 	if (prog)
460 		bpf_prog_put(prog);
461 }
462 
463 static inline int psock_replace_prog(struct bpf_prog **pprog,
464 				     struct bpf_prog *prog,
465 				     struct bpf_prog *old)
466 {
467 	if (cmpxchg(pprog, old, prog) != old)
468 		return -ENOENT;
469 
470 	if (old)
471 		bpf_prog_put(old);
472 
473 	return 0;
474 }
475 
476 static inline void psock_progs_drop(struct sk_psock_progs *progs)
477 {
478 	psock_set_prog(&progs->msg_parser, NULL);
479 	psock_set_prog(&progs->stream_parser, NULL);
480 	psock_set_prog(&progs->stream_verdict, NULL);
481 	psock_set_prog(&progs->skb_verdict, NULL);
482 }
483 
484 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb);
485 
486 static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
487 {
488 	if (!psock)
489 		return false;
490 	return !!psock->saved_data_ready;
491 }
492 
493 #if IS_ENABLED(CONFIG_NET_SOCK_MSG)
494 
495 /* We only have one bit so far. */
496 #define BPF_F_PTR_MASK ~(BPF_F_INGRESS)
497 
498 static inline bool skb_bpf_ingress(const struct sk_buff *skb)
499 {
500 	unsigned long sk_redir = skb->_sk_redir;
501 
502 	return sk_redir & BPF_F_INGRESS;
503 }
504 
505 static inline void skb_bpf_set_ingress(struct sk_buff *skb)
506 {
507 	skb->_sk_redir |= BPF_F_INGRESS;
508 }
509 
510 static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir,
511 				     bool ingress)
512 {
513 	skb->_sk_redir = (unsigned long)sk_redir;
514 	if (ingress)
515 		skb->_sk_redir |= BPF_F_INGRESS;
516 }
517 
518 static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb)
519 {
520 	unsigned long sk_redir = skb->_sk_redir;
521 
522 	return (struct sock *)(sk_redir & BPF_F_PTR_MASK);
523 }
524 
525 static inline void skb_bpf_redirect_clear(struct sk_buff *skb)
526 {
527 	skb->_sk_redir = 0;
528 }
529 #endif /* CONFIG_NET_SOCK_MSG */
530 #endif /* _LINUX_SKMSG_H */
531