xref: /linux-6.15/include/linux/virtio_vsock.h (revision 0df7cd3c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_VSOCK_H
3 #define _LINUX_VIRTIO_VSOCK_H
4 
5 #include <uapi/linux/virtio_vsock.h>
6 #include <linux/socket.h>
7 #include <net/sock.h>
8 #include <net/af_vsock.h>
9 
10 #define VIRTIO_VSOCK_SKB_HEADROOM (sizeof(struct virtio_vsock_hdr))
11 
12 struct virtio_vsock_skb_cb {
13 	bool reply;
14 	bool tap_delivered;
15 	u32 offset;
16 };
17 
18 #define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
19 
20 static inline struct virtio_vsock_hdr *virtio_vsock_hdr(struct sk_buff *skb)
21 {
22 	return (struct virtio_vsock_hdr *)skb->head;
23 }
24 
25 static inline bool virtio_vsock_skb_reply(struct sk_buff *skb)
26 {
27 	return VIRTIO_VSOCK_SKB_CB(skb)->reply;
28 }
29 
30 static inline void virtio_vsock_skb_set_reply(struct sk_buff *skb)
31 {
32 	VIRTIO_VSOCK_SKB_CB(skb)->reply = true;
33 }
34 
35 static inline bool virtio_vsock_skb_tap_delivered(struct sk_buff *skb)
36 {
37 	return VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered;
38 }
39 
40 static inline void virtio_vsock_skb_set_tap_delivered(struct sk_buff *skb)
41 {
42 	VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = true;
43 }
44 
45 static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb)
46 {
47 	VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false;
48 }
49 
50 static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb)
51 {
52 	u32 len;
53 
54 	len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
55 
56 	if (len > 0)
57 		skb_put(skb, len);
58 }
59 
60 static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
61 {
62 	struct sk_buff *skb;
63 
64 	if (size < VIRTIO_VSOCK_SKB_HEADROOM)
65 		return NULL;
66 
67 	skb = alloc_skb(size, mask);
68 	if (!skb)
69 		return NULL;
70 
71 	skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
72 	return skb;
73 }
74 
75 static inline void
76 virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb)
77 {
78 	spin_lock_bh(&list->lock);
79 	__skb_queue_head(list, skb);
80 	spin_unlock_bh(&list->lock);
81 }
82 
83 static inline void
84 virtio_vsock_skb_queue_tail(struct sk_buff_head *list, struct sk_buff *skb)
85 {
86 	spin_lock_bh(&list->lock);
87 	__skb_queue_tail(list, skb);
88 	spin_unlock_bh(&list->lock);
89 }
90 
91 static inline struct sk_buff *virtio_vsock_skb_dequeue(struct sk_buff_head *list)
92 {
93 	struct sk_buff *skb;
94 
95 	spin_lock_bh(&list->lock);
96 	skb = __skb_dequeue(list);
97 	spin_unlock_bh(&list->lock);
98 
99 	return skb;
100 }
101 
102 static inline void virtio_vsock_skb_queue_purge(struct sk_buff_head *list)
103 {
104 	spin_lock_bh(&list->lock);
105 	__skb_queue_purge(list);
106 	spin_unlock_bh(&list->lock);
107 }
108 
109 static inline size_t virtio_vsock_skb_len(struct sk_buff *skb)
110 {
111 	return (size_t)(skb_end_pointer(skb) - skb->head);
112 }
113 
114 #define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE	(1024 * 4)
115 #define VIRTIO_VSOCK_MAX_BUF_SIZE		0xFFFFFFFFUL
116 #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE		(1024 * 64)
117 
118 enum {
119 	VSOCK_VQ_RX     = 0, /* for host to guest data */
120 	VSOCK_VQ_TX     = 1, /* for guest to host data */
121 	VSOCK_VQ_EVENT  = 2,
122 	VSOCK_VQ_MAX    = 3,
123 };
124 
125 /* Per-socket state (accessed via vsk->trans) */
126 struct virtio_vsock_sock {
127 	struct vsock_sock *vsk;
128 
129 	spinlock_t tx_lock;
130 	spinlock_t rx_lock;
131 
132 	/* Protected by tx_lock */
133 	u32 tx_cnt;
134 	u32 peer_fwd_cnt;
135 	u32 peer_buf_alloc;
136 
137 	/* Protected by rx_lock */
138 	u32 fwd_cnt;
139 	u32 last_fwd_cnt;
140 	u32 rx_bytes;
141 	u32 buf_alloc;
142 	struct sk_buff_head rx_queue;
143 	u32 msg_count;
144 };
145 
146 struct virtio_vsock_pkt_info {
147 	u32 remote_cid, remote_port;
148 	struct vsock_sock *vsk;
149 	struct msghdr *msg;
150 	u32 pkt_len;
151 	u16 type;
152 	u16 op;
153 	u32 flags;
154 	bool reply;
155 };
156 
157 struct virtio_transport {
158 	/* This must be the first field */
159 	struct vsock_transport transport;
160 
161 	/* Takes ownership of the packet */
162 	int (*send_pkt)(struct sk_buff *skb);
163 };
164 
165 ssize_t
166 virtio_transport_stream_dequeue(struct vsock_sock *vsk,
167 				struct msghdr *msg,
168 				size_t len,
169 				int type);
170 int
171 virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
172 			       struct msghdr *msg,
173 			       size_t len, int flags);
174 
175 int
176 virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
177 				   struct msghdr *msg,
178 				   size_t len);
179 ssize_t
180 virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
181 				   struct msghdr *msg,
182 				   int flags);
183 s64 virtio_transport_stream_has_data(struct vsock_sock *vsk);
184 s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);
185 u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk);
186 
187 int virtio_transport_do_socket_init(struct vsock_sock *vsk,
188 				 struct vsock_sock *psk);
189 int
190 virtio_transport_notify_poll_in(struct vsock_sock *vsk,
191 				size_t target,
192 				bool *data_ready_now);
193 int
194 virtio_transport_notify_poll_out(struct vsock_sock *vsk,
195 				 size_t target,
196 				 bool *space_available_now);
197 
198 int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
199 	size_t target, struct vsock_transport_recv_notify_data *data);
200 int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
201 	size_t target, struct vsock_transport_recv_notify_data *data);
202 int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
203 	size_t target, struct vsock_transport_recv_notify_data *data);
204 int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
205 	size_t target, ssize_t copied, bool data_read,
206 	struct vsock_transport_recv_notify_data *data);
207 int virtio_transport_notify_send_init(struct vsock_sock *vsk,
208 	struct vsock_transport_send_notify_data *data);
209 int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
210 	struct vsock_transport_send_notify_data *data);
211 int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
212 	struct vsock_transport_send_notify_data *data);
213 int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
214 	ssize_t written, struct vsock_transport_send_notify_data *data);
215 void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val);
216 
217 u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk);
218 bool virtio_transport_stream_is_active(struct vsock_sock *vsk);
219 bool virtio_transport_stream_allow(u32 cid, u32 port);
220 int virtio_transport_dgram_bind(struct vsock_sock *vsk,
221 				struct sockaddr_vm *addr);
222 bool virtio_transport_dgram_allow(u32 cid, u32 port);
223 
224 int virtio_transport_connect(struct vsock_sock *vsk);
225 
226 int virtio_transport_shutdown(struct vsock_sock *vsk, int mode);
227 
228 void virtio_transport_release(struct vsock_sock *vsk);
229 
230 ssize_t
231 virtio_transport_stream_enqueue(struct vsock_sock *vsk,
232 				struct msghdr *msg,
233 				size_t len);
234 int
235 virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
236 			       struct sockaddr_vm *remote_addr,
237 			       struct msghdr *msg,
238 			       size_t len);
239 
240 void virtio_transport_destruct(struct vsock_sock *vsk);
241 
242 void virtio_transport_recv_pkt(struct virtio_transport *t,
243 			       struct sk_buff *skb);
244 void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb);
245 u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
246 void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
247 void virtio_transport_deliver_tap_pkt(struct sk_buff *skb);
248 int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list);
249 int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t read_actor);
250 #endif /* _LINUX_VIRTIO_VSOCK_H */
251