xref: /f-stack/dpdk/lib/librte_vhost/vhost_user.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4 
5 /* Security model
6  * --------------
7  * The vhost-user protocol connection is an external interface, so it must be
8  * robust against invalid inputs.
9  *
10  * This is important because the vhost-user master is only one step removed
11  * from the guest.  Malicious guests that have escaped will then launch further
12  * attacks from the vhost-user master.
13  *
14  * Even in deployments where guests are trusted, a bug in the vhost-user master
15  * can still cause invalid messages to be sent.  Such messages must not
16  * compromise the stability of the DPDK application by causing crashes, memory
17  * corruption, or other problematic behavior.
18  *
19  * Do not assume received VhostUserMsg fields contain sensible values!
20  */
21 
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <fcntl.h>
28 #include <sys/ioctl.h>
29 #include <sys/mman.h>
30 #include <sys/types.h>
31 #include <sys/stat.h>
32 #include <sys/syscall.h>
33 #include <assert.h>
34 #ifdef RTE_LIBRTE_VHOST_NUMA
35 #include <numaif.h>
36 #endif
37 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
38 #include <linux/userfaultfd.h>
39 #endif
40 #ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */
41 #include <linux/memfd.h>
42 #define MEMFD_SUPPORTED
43 #endif
44 
45 #include <rte_common.h>
46 #include <rte_malloc.h>
47 #include <rte_log.h>
48 
49 #include "iotlb.h"
50 #include "vhost.h"
51 #include "vhost_user.h"
52 
53 #define VIRTIO_MIN_MTU 68
54 #define VIRTIO_MAX_MTU 65535
55 
56 #define INFLIGHT_ALIGNMENT	64
57 #define INFLIGHT_VERSION	0x1
58 
59 static const char *vhost_message_str[VHOST_USER_MAX] = {
60 	[VHOST_USER_NONE] = "VHOST_USER_NONE",
61 	[VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
62 	[VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
63 	[VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
64 	[VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
65 	[VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
66 	[VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
67 	[VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
68 	[VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
69 	[VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
70 	[VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
71 	[VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
72 	[VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
73 	[VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
74 	[VHOST_USER_SET_VRING_ERR]  = "VHOST_USER_SET_VRING_ERR",
75 	[VHOST_USER_GET_PROTOCOL_FEATURES]  = "VHOST_USER_GET_PROTOCOL_FEATURES",
76 	[VHOST_USER_SET_PROTOCOL_FEATURES]  = "VHOST_USER_SET_PROTOCOL_FEATURES",
77 	[VHOST_USER_GET_QUEUE_NUM]  = "VHOST_USER_GET_QUEUE_NUM",
78 	[VHOST_USER_SET_VRING_ENABLE]  = "VHOST_USER_SET_VRING_ENABLE",
79 	[VHOST_USER_SEND_RARP]  = "VHOST_USER_SEND_RARP",
80 	[VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
81 	[VHOST_USER_SET_SLAVE_REQ_FD]  = "VHOST_USER_SET_SLAVE_REQ_FD",
82 	[VHOST_USER_IOTLB_MSG]  = "VHOST_USER_IOTLB_MSG",
83 	[VHOST_USER_CRYPTO_CREATE_SESS] = "VHOST_USER_CRYPTO_CREATE_SESS",
84 	[VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS",
85 	[VHOST_USER_POSTCOPY_ADVISE]  = "VHOST_USER_POSTCOPY_ADVISE",
86 	[VHOST_USER_POSTCOPY_LISTEN]  = "VHOST_USER_POSTCOPY_LISTEN",
87 	[VHOST_USER_POSTCOPY_END]  = "VHOST_USER_POSTCOPY_END",
88 	[VHOST_USER_GET_INFLIGHT_FD] = "VHOST_USER_GET_INFLIGHT_FD",
89 	[VHOST_USER_SET_INFLIGHT_FD] = "VHOST_USER_SET_INFLIGHT_FD",
90 	[VHOST_USER_SET_STATUS] = "VHOST_USER_SET_STATUS",
91 	[VHOST_USER_GET_STATUS] = "VHOST_USER_GET_STATUS",
92 };
93 
94 static int send_vhost_reply(int sockfd, struct VhostUserMsg *msg);
95 static int read_vhost_message(int sockfd, struct VhostUserMsg *msg);
96 
97 static void
close_msg_fds(struct VhostUserMsg * msg)98 close_msg_fds(struct VhostUserMsg *msg)
99 {
100 	int i;
101 
102 	for (i = 0; i < msg->fd_num; i++) {
103 		int fd = msg->fds[i];
104 
105 		if (fd == -1)
106 			continue;
107 
108 		msg->fds[i] = -1;
109 		close(fd);
110 	}
111 }
112 
113 /*
114  * Ensure the expected number of FDs is received,
115  * close all FDs and return an error if this is not the case.
116  */
117 static int
validate_msg_fds(struct VhostUserMsg * msg,int expected_fds)118 validate_msg_fds(struct VhostUserMsg *msg, int expected_fds)
119 {
120 	if (msg->fd_num == expected_fds)
121 		return 0;
122 
123 	VHOST_LOG_CONFIG(ERR,
124 		" Expect %d FDs for request %s, received %d\n",
125 		expected_fds,
126 		vhost_message_str[msg->request.master],
127 		msg->fd_num);
128 
129 	close_msg_fds(msg);
130 
131 	return -1;
132 }
133 
134 static uint64_t
get_blk_size(int fd)135 get_blk_size(int fd)
136 {
137 	struct stat stat;
138 	int ret;
139 
140 	ret = fstat(fd, &stat);
141 	return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
142 }
143 
144 static void
free_mem_region(struct virtio_net * dev)145 free_mem_region(struct virtio_net *dev)
146 {
147 	uint32_t i;
148 	struct rte_vhost_mem_region *reg;
149 
150 	if (!dev || !dev->mem)
151 		return;
152 
153 	for (i = 0; i < dev->mem->nregions; i++) {
154 		reg = &dev->mem->regions[i];
155 		if (reg->host_user_addr) {
156 			munmap(reg->mmap_addr, reg->mmap_size);
157 			close(reg->fd);
158 		}
159 	}
160 }
161 
162 void
vhost_backend_cleanup(struct virtio_net * dev)163 vhost_backend_cleanup(struct virtio_net *dev)
164 {
165 	if (dev->mem) {
166 		free_mem_region(dev);
167 		rte_free(dev->mem);
168 		dev->mem = NULL;
169 	}
170 
171 	rte_free(dev->guest_pages);
172 	dev->guest_pages = NULL;
173 
174 	if (dev->log_addr) {
175 		munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
176 		dev->log_addr = 0;
177 	}
178 
179 	if (dev->inflight_info) {
180 		if (dev->inflight_info->addr) {
181 			munmap(dev->inflight_info->addr,
182 			       dev->inflight_info->size);
183 			dev->inflight_info->addr = NULL;
184 		}
185 
186 		if (dev->inflight_info->fd >= 0) {
187 			close(dev->inflight_info->fd);
188 			dev->inflight_info->fd = -1;
189 		}
190 
191 		free(dev->inflight_info);
192 		dev->inflight_info = NULL;
193 	}
194 
195 	if (dev->slave_req_fd >= 0) {
196 		close(dev->slave_req_fd);
197 		dev->slave_req_fd = -1;
198 	}
199 
200 	if (dev->postcopy_ufd >= 0) {
201 		close(dev->postcopy_ufd);
202 		dev->postcopy_ufd = -1;
203 	}
204 
205 	dev->postcopy_listening = 0;
206 }
207 
208 static void
vhost_user_notify_queue_state(struct virtio_net * dev,uint16_t index,int enable)209 vhost_user_notify_queue_state(struct virtio_net *dev, uint16_t index,
210 			      int enable)
211 {
212 	struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
213 	struct vhost_virtqueue *vq = dev->virtqueue[index];
214 
215 	/* Configure guest notifications on enable */
216 	if (enable && vq->notif_enable != VIRTIO_UNINITIALIZED_NOTIF)
217 		vhost_enable_guest_notification(dev, vq, vq->notif_enable);
218 
219 	if (vdpa_dev && vdpa_dev->ops->set_vring_state)
220 		vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
221 
222 	if (dev->notify_ops->vring_state_changed)
223 		dev->notify_ops->vring_state_changed(dev->vid,
224 				index, enable);
225 }
226 
227 /*
228  * This function just returns success at the moment unless
229  * the device hasn't been initialised.
230  */
231 static int
vhost_user_set_owner(struct virtio_net ** pdev __rte_unused,struct VhostUserMsg * msg,int main_fd __rte_unused)232 vhost_user_set_owner(struct virtio_net **pdev __rte_unused,
233 			struct VhostUserMsg *msg,
234 			int main_fd __rte_unused)
235 {
236 	if (validate_msg_fds(msg, 0) != 0)
237 		return RTE_VHOST_MSG_RESULT_ERR;
238 
239 	return RTE_VHOST_MSG_RESULT_OK;
240 }
241 
242 static int
vhost_user_reset_owner(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)243 vhost_user_reset_owner(struct virtio_net **pdev,
244 			struct VhostUserMsg *msg,
245 			int main_fd __rte_unused)
246 {
247 	struct virtio_net *dev = *pdev;
248 
249 	if (validate_msg_fds(msg, 0) != 0)
250 		return RTE_VHOST_MSG_RESULT_ERR;
251 
252 	vhost_destroy_device_notify(dev);
253 
254 	cleanup_device(dev, 0);
255 	reset_device(dev);
256 	return RTE_VHOST_MSG_RESULT_OK;
257 }
258 
259 /*
260  * The features that we support are requested.
261  */
262 static int
vhost_user_get_features(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)263 vhost_user_get_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
264 			int main_fd __rte_unused)
265 {
266 	struct virtio_net *dev = *pdev;
267 	uint64_t features = 0;
268 
269 	if (validate_msg_fds(msg, 0) != 0)
270 		return RTE_VHOST_MSG_RESULT_ERR;
271 
272 	rte_vhost_driver_get_features(dev->ifname, &features);
273 
274 	msg->payload.u64 = features;
275 	msg->size = sizeof(msg->payload.u64);
276 	msg->fd_num = 0;
277 
278 	return RTE_VHOST_MSG_RESULT_REPLY;
279 }
280 
281 /*
282  * The queue number that we support are requested.
283  */
284 static int
vhost_user_get_queue_num(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)285 vhost_user_get_queue_num(struct virtio_net **pdev, struct VhostUserMsg *msg,
286 			int main_fd __rte_unused)
287 {
288 	struct virtio_net *dev = *pdev;
289 	uint32_t queue_num = 0;
290 
291 	if (validate_msg_fds(msg, 0) != 0)
292 		return RTE_VHOST_MSG_RESULT_ERR;
293 
294 	rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
295 
296 	msg->payload.u64 = (uint64_t)queue_num;
297 	msg->size = sizeof(msg->payload.u64);
298 	msg->fd_num = 0;
299 
300 	return RTE_VHOST_MSG_RESULT_REPLY;
301 }
302 
303 /*
304  * We receive the negotiated features supported by us and the virtio device.
305  */
306 static int
vhost_user_set_features(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)307 vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
308 			int main_fd __rte_unused)
309 {
310 	struct virtio_net *dev = *pdev;
311 	uint64_t features = msg->payload.u64;
312 	uint64_t vhost_features = 0;
313 	struct rte_vdpa_device *vdpa_dev;
314 
315 	if (validate_msg_fds(msg, 0) != 0)
316 		return RTE_VHOST_MSG_RESULT_ERR;
317 
318 	rte_vhost_driver_get_features(dev->ifname, &vhost_features);
319 	if (features & ~vhost_features) {
320 		VHOST_LOG_CONFIG(ERR,
321 			"(%d) received invalid negotiated features.\n",
322 			dev->vid);
323 		dev->flags |= VIRTIO_DEV_FEATURES_FAILED;
324 		dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK;
325 
326 		return RTE_VHOST_MSG_RESULT_ERR;
327 	}
328 
329 	if (dev->flags & VIRTIO_DEV_RUNNING) {
330 		if (dev->features == features)
331 			return RTE_VHOST_MSG_RESULT_OK;
332 
333 		/*
334 		 * Error out if master tries to change features while device is
335 		 * in running state. The exception being VHOST_F_LOG_ALL, which
336 		 * is enabled when the live-migration starts.
337 		 */
338 		if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
339 			VHOST_LOG_CONFIG(ERR,
340 				"(%d) features changed while device is running.\n",
341 				dev->vid);
342 			return RTE_VHOST_MSG_RESULT_ERR;
343 		}
344 
345 		if (dev->notify_ops->features_changed)
346 			dev->notify_ops->features_changed(dev->vid, features);
347 	}
348 
349 	dev->features = features;
350 	if (dev->features &
351 		((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
352 		 (1ULL << VIRTIO_F_VERSION_1) |
353 		 (1ULL << VIRTIO_F_RING_PACKED))) {
354 		dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
355 	} else {
356 		dev->vhost_hlen = sizeof(struct virtio_net_hdr);
357 	}
358 	VHOST_LOG_CONFIG(INFO,
359 		"negotiated Virtio features: 0x%" PRIx64 "\n", dev->features);
360 	VHOST_LOG_CONFIG(DEBUG,
361 		"(%d) mergeable RX buffers %s, virtio 1 %s\n",
362 		dev->vid,
363 		(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
364 		(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
365 
366 	if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) &&
367 	    !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) {
368 		/*
369 		 * Remove all but first queue pair if MQ hasn't been
370 		 * negotiated. This is safe because the device is not
371 		 * running at this stage.
372 		 */
373 		while (dev->nr_vring > 2) {
374 			struct vhost_virtqueue *vq;
375 
376 			vq = dev->virtqueue[--dev->nr_vring];
377 			if (!vq)
378 				continue;
379 
380 			dev->virtqueue[dev->nr_vring] = NULL;
381 			cleanup_vq(vq, 1);
382 			cleanup_vq_inflight(dev, vq);
383 			free_vq(dev, vq);
384 		}
385 	}
386 
387 	vdpa_dev = dev->vdpa_dev;
388 	if (vdpa_dev)
389 		vdpa_dev->ops->set_features(dev->vid);
390 
391 	dev->flags &= ~VIRTIO_DEV_FEATURES_FAILED;
392 	return RTE_VHOST_MSG_RESULT_OK;
393 }
394 
395 /*
396  * The virtio device sends us the size of the descriptor ring.
397  */
398 static int
vhost_user_set_vring_num(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)399 vhost_user_set_vring_num(struct virtio_net **pdev,
400 			struct VhostUserMsg *msg,
401 			int main_fd __rte_unused)
402 {
403 	struct virtio_net *dev = *pdev;
404 	struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
405 
406 	if (validate_msg_fds(msg, 0) != 0)
407 		return RTE_VHOST_MSG_RESULT_ERR;
408 
409 	vq->size = msg->payload.state.num;
410 
411 	/* VIRTIO 1.0, 2.4 Virtqueues says:
412 	 *
413 	 *   Queue Size value is always a power of 2. The maximum Queue Size
414 	 *   value is 32768.
415 	 *
416 	 * VIRTIO 1.1 2.7 Virtqueues says:
417 	 *
418 	 *   Packed virtqueues support up to 2^15 entries each.
419 	 */
420 	if (!vq_is_packed(dev)) {
421 		if (vq->size & (vq->size - 1)) {
422 			VHOST_LOG_CONFIG(ERR,
423 				"invalid virtqueue size %u\n", vq->size);
424 			return RTE_VHOST_MSG_RESULT_ERR;
425 		}
426 	}
427 
428 	if (vq->size > 32768) {
429 		VHOST_LOG_CONFIG(ERR,
430 			"invalid virtqueue size %u\n", vq->size);
431 		return RTE_VHOST_MSG_RESULT_ERR;
432 	}
433 
434 	if (vq_is_packed(dev)) {
435 		if (vq->shadow_used_packed)
436 			rte_free(vq->shadow_used_packed);
437 		vq->shadow_used_packed = rte_malloc(NULL,
438 				vq->size *
439 				sizeof(struct vring_used_elem_packed),
440 				RTE_CACHE_LINE_SIZE);
441 		if (!vq->shadow_used_packed) {
442 			VHOST_LOG_CONFIG(ERR,
443 					"failed to allocate memory for shadow used ring.\n");
444 			return RTE_VHOST_MSG_RESULT_ERR;
445 		}
446 
447 	} else {
448 		if (vq->shadow_used_split)
449 			rte_free(vq->shadow_used_split);
450 
451 		vq->shadow_used_split = rte_malloc(NULL,
452 				vq->size * sizeof(struct vring_used_elem),
453 				RTE_CACHE_LINE_SIZE);
454 
455 		if (!vq->shadow_used_split) {
456 			VHOST_LOG_CONFIG(ERR,
457 					"failed to allocate memory for vq internal data.\n");
458 			return RTE_VHOST_MSG_RESULT_ERR;
459 		}
460 	}
461 
462 	if (vq->batch_copy_elems)
463 		rte_free(vq->batch_copy_elems);
464 	vq->batch_copy_elems = rte_malloc(NULL,
465 				vq->size * sizeof(struct batch_copy_elem),
466 				RTE_CACHE_LINE_SIZE);
467 	if (!vq->batch_copy_elems) {
468 		VHOST_LOG_CONFIG(ERR,
469 			"failed to allocate memory for batching copy.\n");
470 		return RTE_VHOST_MSG_RESULT_ERR;
471 	}
472 
473 	return RTE_VHOST_MSG_RESULT_OK;
474 }
475 
476 /*
477  * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
478  * same numa node as the memory of vring descriptor.
479  */
480 #ifdef RTE_LIBRTE_VHOST_NUMA
481 static struct virtio_net*
numa_realloc(struct virtio_net * dev,int index)482 numa_realloc(struct virtio_net *dev, int index)
483 {
484 	int oldnode, newnode;
485 	struct virtio_net *old_dev;
486 	struct vhost_virtqueue *old_vq, *vq;
487 	struct vring_used_elem *new_shadow_used_split;
488 	struct vring_used_elem_packed *new_shadow_used_packed;
489 	struct batch_copy_elem *new_batch_copy_elems;
490 	int ret;
491 
492 	if (dev->flags & VIRTIO_DEV_RUNNING)
493 		return dev;
494 
495 	old_dev = dev;
496 	vq = old_vq = dev->virtqueue[index];
497 
498 	ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
499 			    MPOL_F_NODE | MPOL_F_ADDR);
500 
501 	/* check if we need to reallocate vq */
502 	ret |= get_mempolicy(&oldnode, NULL, 0, old_vq,
503 			     MPOL_F_NODE | MPOL_F_ADDR);
504 	if (ret) {
505 		VHOST_LOG_CONFIG(ERR,
506 			"Unable to get vq numa information.\n");
507 		return dev;
508 	}
509 	if (oldnode != newnode) {
510 		VHOST_LOG_CONFIG(INFO,
511 			"reallocate vq from %d to %d node\n", oldnode, newnode);
512 		vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode);
513 		if (!vq)
514 			return dev;
515 
516 		memcpy(vq, old_vq, sizeof(*vq));
517 
518 		if (vq_is_packed(dev)) {
519 			new_shadow_used_packed = rte_malloc_socket(NULL,
520 					vq->size *
521 					sizeof(struct vring_used_elem_packed),
522 					RTE_CACHE_LINE_SIZE,
523 					newnode);
524 			if (new_shadow_used_packed) {
525 				rte_free(vq->shadow_used_packed);
526 				vq->shadow_used_packed = new_shadow_used_packed;
527 			}
528 		} else {
529 			new_shadow_used_split = rte_malloc_socket(NULL,
530 					vq->size *
531 					sizeof(struct vring_used_elem),
532 					RTE_CACHE_LINE_SIZE,
533 					newnode);
534 			if (new_shadow_used_split) {
535 				rte_free(vq->shadow_used_split);
536 				vq->shadow_used_split = new_shadow_used_split;
537 			}
538 		}
539 
540 		new_batch_copy_elems = rte_malloc_socket(NULL,
541 			vq->size * sizeof(struct batch_copy_elem),
542 			RTE_CACHE_LINE_SIZE,
543 			newnode);
544 		if (new_batch_copy_elems) {
545 			rte_free(vq->batch_copy_elems);
546 			vq->batch_copy_elems = new_batch_copy_elems;
547 		}
548 
549 		rte_free(old_vq);
550 	}
551 
552 	/* check if we need to reallocate dev */
553 	ret = get_mempolicy(&oldnode, NULL, 0, old_dev,
554 			    MPOL_F_NODE | MPOL_F_ADDR);
555 	if (ret) {
556 		VHOST_LOG_CONFIG(ERR,
557 			"Unable to get dev numa information.\n");
558 		goto out;
559 	}
560 	if (oldnode != newnode) {
561 		VHOST_LOG_CONFIG(INFO,
562 			"reallocate dev from %d to %d node\n",
563 			oldnode, newnode);
564 		dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode);
565 		if (!dev) {
566 			dev = old_dev;
567 			goto out;
568 		}
569 
570 		memcpy(dev, old_dev, sizeof(*dev));
571 		rte_free(old_dev);
572 	}
573 
574 out:
575 	dev->virtqueue[index] = vq;
576 	vhost_devices[dev->vid] = dev;
577 
578 	if (old_vq != vq)
579 		vhost_user_iotlb_init(dev, index);
580 
581 	return dev;
582 }
583 #else
584 static struct virtio_net*
numa_realloc(struct virtio_net * dev,int index __rte_unused)585 numa_realloc(struct virtio_net *dev, int index __rte_unused)
586 {
587 	return dev;
588 }
589 #endif
590 
591 /* Converts QEMU virtual address to Vhost virtual address. */
592 static uint64_t
qva_to_vva(struct virtio_net * dev,uint64_t qva,uint64_t * len)593 qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
594 {
595 	struct rte_vhost_mem_region *r;
596 	uint32_t i;
597 
598 	if (unlikely(!dev || !dev->mem))
599 		goto out_error;
600 
601 	/* Find the region where the address lives. */
602 	for (i = 0; i < dev->mem->nregions; i++) {
603 		r = &dev->mem->regions[i];
604 
605 		if (qva >= r->guest_user_addr &&
606 		    qva <  r->guest_user_addr + r->size) {
607 
608 			if (unlikely(*len > r->guest_user_addr + r->size - qva))
609 				*len = r->guest_user_addr + r->size - qva;
610 
611 			return qva - r->guest_user_addr +
612 			       r->host_user_addr;
613 		}
614 	}
615 out_error:
616 	*len = 0;
617 
618 	return 0;
619 }
620 
621 
622 /*
623  * Converts ring address to Vhost virtual address.
624  * If IOMMU is enabled, the ring address is a guest IO virtual address,
625  * else it is a QEMU virtual address.
626  */
627 static uint64_t
ring_addr_to_vva(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t ra,uint64_t * size)628 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
629 		uint64_t ra, uint64_t *size)
630 {
631 	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
632 		uint64_t vva;
633 
634 		vhost_user_iotlb_rd_lock(vq);
635 		vva = vhost_iova_to_vva(dev, vq, ra,
636 					size, VHOST_ACCESS_RW);
637 		vhost_user_iotlb_rd_unlock(vq);
638 
639 		return vva;
640 	}
641 
642 	return qva_to_vva(dev, ra, size);
643 }
644 
645 static uint64_t
log_addr_to_gpa(struct virtio_net * dev,struct vhost_virtqueue * vq)646 log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq)
647 {
648 	uint64_t log_gpa;
649 
650 	vhost_user_iotlb_rd_lock(vq);
651 	log_gpa = translate_log_addr(dev, vq, vq->ring_addrs.log_guest_addr);
652 	vhost_user_iotlb_rd_unlock(vq);
653 
654 	return log_gpa;
655 }
656 
657 static struct virtio_net *
translate_ring_addresses(struct virtio_net * dev,int vq_index)658 translate_ring_addresses(struct virtio_net *dev, int vq_index)
659 {
660 	struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
661 	struct vhost_vring_addr *addr = &vq->ring_addrs;
662 	uint64_t len, expected_len;
663 
664 	if (addr->flags & (1 << VHOST_VRING_F_LOG)) {
665 		vq->log_guest_addr =
666 			log_addr_to_gpa(dev, vq);
667 		if (vq->log_guest_addr == 0) {
668 			VHOST_LOG_CONFIG(DEBUG,
669 				"(%d) failed to map log_guest_addr.\n",
670 				dev->vid);
671 			return dev;
672 		}
673 	}
674 
675 	if (vq_is_packed(dev)) {
676 		len = sizeof(struct vring_packed_desc) * vq->size;
677 		vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
678 			ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len);
679 		if (vq->desc_packed == NULL ||
680 				len != sizeof(struct vring_packed_desc) *
681 				vq->size) {
682 			VHOST_LOG_CONFIG(DEBUG,
683 				"(%d) failed to map desc_packed ring.\n",
684 				dev->vid);
685 			return dev;
686 		}
687 
688 		dev = numa_realloc(dev, vq_index);
689 		vq = dev->virtqueue[vq_index];
690 		addr = &vq->ring_addrs;
691 
692 		len = sizeof(struct vring_packed_desc_event);
693 		vq->driver_event = (struct vring_packed_desc_event *)
694 					(uintptr_t)ring_addr_to_vva(dev,
695 					vq, addr->avail_user_addr, &len);
696 		if (vq->driver_event == NULL ||
697 				len != sizeof(struct vring_packed_desc_event)) {
698 			VHOST_LOG_CONFIG(DEBUG,
699 				"(%d) failed to find driver area address.\n",
700 				dev->vid);
701 			return dev;
702 		}
703 
704 		len = sizeof(struct vring_packed_desc_event);
705 		vq->device_event = (struct vring_packed_desc_event *)
706 					(uintptr_t)ring_addr_to_vva(dev,
707 					vq, addr->used_user_addr, &len);
708 		if (vq->device_event == NULL ||
709 				len != sizeof(struct vring_packed_desc_event)) {
710 			VHOST_LOG_CONFIG(DEBUG,
711 				"(%d) failed to find device area address.\n",
712 				dev->vid);
713 			return dev;
714 		}
715 
716 		vq->access_ok = 1;
717 		return dev;
718 	}
719 
720 	/* The addresses are converted from QEMU virtual to Vhost virtual. */
721 	if (vq->desc && vq->avail && vq->used)
722 		return dev;
723 
724 	len = sizeof(struct vring_desc) * vq->size;
725 	vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
726 			vq, addr->desc_user_addr, &len);
727 	if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
728 		VHOST_LOG_CONFIG(DEBUG,
729 			"(%d) failed to map desc ring.\n",
730 			dev->vid);
731 		return dev;
732 	}
733 
734 	dev = numa_realloc(dev, vq_index);
735 	vq = dev->virtqueue[vq_index];
736 	addr = &vq->ring_addrs;
737 
738 	len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
739 	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
740 		len += sizeof(uint16_t);
741 	expected_len = len;
742 	vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
743 			vq, addr->avail_user_addr, &len);
744 	if (vq->avail == 0 || len != expected_len) {
745 		VHOST_LOG_CONFIG(DEBUG,
746 			"(%d) failed to map avail ring.\n",
747 			dev->vid);
748 		return dev;
749 	}
750 
751 	len = sizeof(struct vring_used) +
752 		sizeof(struct vring_used_elem) * vq->size;
753 	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
754 		len += sizeof(uint16_t);
755 	expected_len = len;
756 	vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
757 			vq, addr->used_user_addr, &len);
758 	if (vq->used == 0 || len != expected_len) {
759 		VHOST_LOG_CONFIG(DEBUG,
760 			"(%d) failed to map used ring.\n",
761 			dev->vid);
762 		return dev;
763 	}
764 
765 	if (vq->last_used_idx != vq->used->idx) {
766 		VHOST_LOG_CONFIG(WARNING,
767 			"last_used_idx (%u) and vq->used->idx (%u) mismatches; "
768 			"some packets maybe resent for Tx and dropped for Rx\n",
769 			vq->last_used_idx, vq->used->idx);
770 		vq->last_used_idx  = vq->used->idx;
771 		vq->last_avail_idx = vq->used->idx;
772 	}
773 
774 	vq->access_ok = 1;
775 
776 	VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address desc: %p\n",
777 			dev->vid, vq->desc);
778 	VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address avail: %p\n",
779 			dev->vid, vq->avail);
780 	VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address used: %p\n",
781 			dev->vid, vq->used);
782 	VHOST_LOG_CONFIG(DEBUG, "(%d) log_guest_addr: %" PRIx64 "\n",
783 			dev->vid, vq->log_guest_addr);
784 
785 	return dev;
786 }
787 
788 /*
789  * The virtio device sends us the desc, used and avail ring addresses.
790  * This function then converts these to our address space.
791  */
792 static int
vhost_user_set_vring_addr(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)793 vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg,
794 			int main_fd __rte_unused)
795 {
796 	struct virtio_net *dev = *pdev;
797 	struct vhost_virtqueue *vq;
798 	struct vhost_vring_addr *addr = &msg->payload.addr;
799 	bool access_ok;
800 
801 	if (validate_msg_fds(msg, 0) != 0)
802 		return RTE_VHOST_MSG_RESULT_ERR;
803 
804 	if (dev->mem == NULL)
805 		return RTE_VHOST_MSG_RESULT_ERR;
806 
807 	/* addr->index refers to the queue index. The txq 1, rxq is 0. */
808 	vq = dev->virtqueue[msg->payload.addr.index];
809 
810 	access_ok = vq->access_ok;
811 
812 	/*
813 	 * Rings addresses should not be interpreted as long as the ring is not
814 	 * started and enabled
815 	 */
816 	memcpy(&vq->ring_addrs, addr, sizeof(*addr));
817 
818 	vring_invalidate(dev, vq);
819 
820 	if ((vq->enabled && (dev->features &
821 				(1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) ||
822 			access_ok) {
823 		dev = translate_ring_addresses(dev, msg->payload.addr.index);
824 		if (!dev)
825 			return RTE_VHOST_MSG_RESULT_ERR;
826 
827 		*pdev = dev;
828 	}
829 
830 	return RTE_VHOST_MSG_RESULT_OK;
831 }
832 
833 /*
834  * The virtio device sends us the available ring last used index.
835  */
836 static int
vhost_user_set_vring_base(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)837 vhost_user_set_vring_base(struct virtio_net **pdev,
838 			struct VhostUserMsg *msg,
839 			int main_fd __rte_unused)
840 {
841 	struct virtio_net *dev = *pdev;
842 	struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
843 	uint64_t val = msg->payload.state.num;
844 
845 	if (validate_msg_fds(msg, 0) != 0)
846 		return RTE_VHOST_MSG_RESULT_ERR;
847 
848 	if (vq_is_packed(dev)) {
849 		/*
850 		 * Bit[0:14]: avail index
851 		 * Bit[15]: avail wrap counter
852 		 */
853 		vq->last_avail_idx = val & 0x7fff;
854 		vq->avail_wrap_counter = !!(val & (0x1 << 15));
855 		/*
856 		 * Set used index to same value as available one, as
857 		 * their values should be the same since ring processing
858 		 * was stopped at get time.
859 		 */
860 		vq->last_used_idx = vq->last_avail_idx;
861 		vq->used_wrap_counter = vq->avail_wrap_counter;
862 	} else {
863 		vq->last_used_idx = msg->payload.state.num;
864 		vq->last_avail_idx = msg->payload.state.num;
865 	}
866 
867 	return RTE_VHOST_MSG_RESULT_OK;
868 }
869 
870 static int
add_one_guest_page(struct virtio_net * dev,uint64_t guest_phys_addr,uint64_t host_phys_addr,uint64_t size)871 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
872 		   uint64_t host_phys_addr, uint64_t size)
873 {
874 	struct guest_page *page, *last_page;
875 	struct guest_page *old_pages;
876 
877 	if (dev->nr_guest_pages == dev->max_guest_pages) {
878 		dev->max_guest_pages *= 2;
879 		old_pages = dev->guest_pages;
880 		dev->guest_pages = rte_realloc(dev->guest_pages,
881 					dev->max_guest_pages * sizeof(*page),
882 					RTE_CACHE_LINE_SIZE);
883 		if (dev->guest_pages == NULL) {
884 			VHOST_LOG_CONFIG(ERR, "cannot realloc guest_pages\n");
885 			rte_free(old_pages);
886 			return -1;
887 		}
888 	}
889 
890 	if (dev->nr_guest_pages > 0) {
891 		last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
892 		/* merge if the two pages are continuous */
893 		if (host_phys_addr == last_page->host_phys_addr +
894 				      last_page->size) {
895 			last_page->size += size;
896 			return 0;
897 		}
898 	}
899 
900 	page = &dev->guest_pages[dev->nr_guest_pages++];
901 	page->guest_phys_addr = guest_phys_addr;
902 	page->host_phys_addr  = host_phys_addr;
903 	page->size = size;
904 
905 	return 0;
906 }
907 
908 static int
add_guest_pages(struct virtio_net * dev,struct rte_vhost_mem_region * reg,uint64_t page_size)909 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
910 		uint64_t page_size)
911 {
912 	uint64_t reg_size = reg->size;
913 	uint64_t host_user_addr  = reg->host_user_addr;
914 	uint64_t guest_phys_addr = reg->guest_phys_addr;
915 	uint64_t host_phys_addr;
916 	uint64_t size;
917 
918 	host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
919 	size = page_size - (guest_phys_addr & (page_size - 1));
920 	size = RTE_MIN(size, reg_size);
921 
922 	if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0)
923 		return -1;
924 
925 	host_user_addr  += size;
926 	guest_phys_addr += size;
927 	reg_size -= size;
928 
929 	while (reg_size > 0) {
930 		size = RTE_MIN(reg_size, page_size);
931 		host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
932 						  host_user_addr);
933 		if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr,
934 				size) < 0)
935 			return -1;
936 
937 		host_user_addr  += size;
938 		guest_phys_addr += size;
939 		reg_size -= size;
940 	}
941 
942 	/* sort guest page array if over binary search threshold */
943 	if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
944 		qsort((void *)dev->guest_pages, dev->nr_guest_pages,
945 			sizeof(struct guest_page), guest_page_addrcmp);
946 	}
947 
948 	return 0;
949 }
950 
951 #ifdef RTE_LIBRTE_VHOST_DEBUG
952 /* TODO: enable it only in debug mode? */
953 static void
dump_guest_pages(struct virtio_net * dev)954 dump_guest_pages(struct virtio_net *dev)
955 {
956 	uint32_t i;
957 	struct guest_page *page;
958 
959 	for (i = 0; i < dev->nr_guest_pages; i++) {
960 		page = &dev->guest_pages[i];
961 
962 		VHOST_LOG_CONFIG(INFO,
963 			"guest physical page region %u\n"
964 			"\t guest_phys_addr: %" PRIx64 "\n"
965 			"\t host_phys_addr : %" PRIx64 "\n"
966 			"\t size           : %" PRIx64 "\n",
967 			i,
968 			page->guest_phys_addr,
969 			page->host_phys_addr,
970 			page->size);
971 	}
972 }
973 #else
974 #define dump_guest_pages(dev)
975 #endif
976 
977 static bool
vhost_memory_changed(struct VhostUserMemory * new,struct rte_vhost_memory * old)978 vhost_memory_changed(struct VhostUserMemory *new,
979 		     struct rte_vhost_memory *old)
980 {
981 	uint32_t i;
982 
983 	if (new->nregions != old->nregions)
984 		return true;
985 
986 	for (i = 0; i < new->nregions; ++i) {
987 		VhostUserMemoryRegion *new_r = &new->regions[i];
988 		struct rte_vhost_mem_region *old_r = &old->regions[i];
989 
990 		if (new_r->guest_phys_addr != old_r->guest_phys_addr)
991 			return true;
992 		if (new_r->memory_size != old_r->size)
993 			return true;
994 		if (new_r->userspace_addr != old_r->guest_user_addr)
995 			return true;
996 	}
997 
998 	return false;
999 }
1000 
1001 static int
vhost_user_set_mem_table(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd)1002 vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
1003 			int main_fd)
1004 {
1005 	struct virtio_net *dev = *pdev;
1006 	struct VhostUserMemory *memory = &msg->payload.memory;
1007 	struct rte_vhost_mem_region *reg;
1008 	void *mmap_addr;
1009 	uint64_t mmap_size;
1010 	uint64_t mmap_offset;
1011 	uint64_t alignment;
1012 	uint32_t i;
1013 	int populate;
1014 
1015 	if (validate_msg_fds(msg, memory->nregions) != 0)
1016 		return RTE_VHOST_MSG_RESULT_ERR;
1017 
1018 	if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) {
1019 		VHOST_LOG_CONFIG(ERR,
1020 			"too many memory regions (%u)\n", memory->nregions);
1021 		goto close_msg_fds;
1022 	}
1023 
1024 	if (dev->mem && !vhost_memory_changed(memory, dev->mem)) {
1025 		VHOST_LOG_CONFIG(INFO,
1026 			"(%d) memory regions not changed\n", dev->vid);
1027 
1028 		close_msg_fds(msg);
1029 
1030 		return RTE_VHOST_MSG_RESULT_OK;
1031 	}
1032 
1033 	if (dev->mem) {
1034 		if (dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) {
1035 			struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
1036 
1037 			if (vdpa_dev && vdpa_dev->ops->dev_close)
1038 				vdpa_dev->ops->dev_close(dev->vid);
1039 			dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
1040 		}
1041 		free_mem_region(dev);
1042 		rte_free(dev->mem);
1043 		dev->mem = NULL;
1044 	}
1045 
1046 	/* Flush IOTLB cache as previous HVAs are now invalid */
1047 	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1048 		for (i = 0; i < dev->nr_vring; i++)
1049 			vhost_user_iotlb_flush_all(dev->virtqueue[i]);
1050 
1051 	dev->nr_guest_pages = 0;
1052 	if (dev->guest_pages == NULL) {
1053 		dev->max_guest_pages = 8;
1054 		dev->guest_pages = rte_zmalloc(NULL,
1055 					dev->max_guest_pages *
1056 					sizeof(struct guest_page),
1057 					RTE_CACHE_LINE_SIZE);
1058 		if (dev->guest_pages == NULL) {
1059 			VHOST_LOG_CONFIG(ERR,
1060 				"(%d) failed to allocate memory "
1061 				"for dev->guest_pages\n",
1062 				dev->vid);
1063 			goto close_msg_fds;
1064 		}
1065 	}
1066 
1067 	dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
1068 		sizeof(struct rte_vhost_mem_region) * memory->nregions, 0);
1069 	if (dev->mem == NULL) {
1070 		VHOST_LOG_CONFIG(ERR,
1071 			"(%d) failed to allocate memory for dev->mem\n",
1072 			dev->vid);
1073 		goto free_guest_pages;
1074 	}
1075 	dev->mem->nregions = memory->nregions;
1076 
1077 	for (i = 0; i < memory->nregions; i++) {
1078 		reg = &dev->mem->regions[i];
1079 
1080 		reg->guest_phys_addr = memory->regions[i].guest_phys_addr;
1081 		reg->guest_user_addr = memory->regions[i].userspace_addr;
1082 		reg->size            = memory->regions[i].memory_size;
1083 		reg->fd              = msg->fds[i];
1084 
1085 		/*
1086 		 * Assign invalid file descriptor value to avoid double
1087 		 * closing on error path.
1088 		 */
1089 		msg->fds[i] = -1;
1090 
1091 		mmap_offset = memory->regions[i].mmap_offset;
1092 
1093 		/* Check for memory_size + mmap_offset overflow */
1094 		if (mmap_offset >= -reg->size) {
1095 			VHOST_LOG_CONFIG(ERR,
1096 				"mmap_offset (%#"PRIx64") and memory_size "
1097 				"(%#"PRIx64") overflow\n",
1098 				mmap_offset, reg->size);
1099 			goto free_mem_table;
1100 		}
1101 
1102 		mmap_size = reg->size + mmap_offset;
1103 
1104 		/* mmap() without flag of MAP_ANONYMOUS, should be called
1105 		 * with length argument aligned with hugepagesz at older
1106 		 * longterm version Linux, like 2.6.32 and 3.2.72, or
1107 		 * mmap() will fail with EINVAL.
1108 		 *
1109 		 * to avoid failure, make sure in caller to keep length
1110 		 * aligned.
1111 		 */
1112 		alignment = get_blk_size(reg->fd);
1113 		if (alignment == (uint64_t)-1) {
1114 			VHOST_LOG_CONFIG(ERR,
1115 				"couldn't get hugepage size through fstat\n");
1116 			goto free_mem_table;
1117 		}
1118 		mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
1119 		if (mmap_size == 0) {
1120 			/*
1121 			 * It could happen if initial mmap_size + alignment
1122 			 * overflows the sizeof uint64, which could happen if
1123 			 * either mmap_size or alignment value is wrong.
1124 			 *
1125 			 * mmap() kernel implementation would return an error,
1126 			 * but better catch it before and provide useful info
1127 			 * in the logs.
1128 			 */
1129 			VHOST_LOG_CONFIG(ERR, "mmap size (0x%" PRIx64 ") "
1130 					"or alignment (0x%" PRIx64 ") is invalid\n",
1131 					reg->size + mmap_offset, alignment);
1132 			goto free_mem_table;
1133 		}
1134 
1135 		populate = dev->async_copy ? MAP_POPULATE : 0;
1136 		mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
1137 				 MAP_SHARED | populate, reg->fd, 0);
1138 
1139 		if (mmap_addr == MAP_FAILED) {
1140 			VHOST_LOG_CONFIG(ERR,
1141 				"mmap region %u failed.\n", i);
1142 			goto free_mem_table;
1143 		}
1144 
1145 		reg->mmap_addr = mmap_addr;
1146 		reg->mmap_size = mmap_size;
1147 		reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
1148 				      mmap_offset;
1149 
1150 		if (dev->async_copy)
1151 			if (add_guest_pages(dev, reg, alignment) < 0) {
1152 				VHOST_LOG_CONFIG(ERR,
1153 					"adding guest pages to region %u failed.\n",
1154 					i);
1155 				goto free_mem_table;
1156 			}
1157 
1158 		VHOST_LOG_CONFIG(INFO,
1159 			"guest memory region %u, size: 0x%" PRIx64 "\n"
1160 			"\t guest physical addr: 0x%" PRIx64 "\n"
1161 			"\t guest virtual  addr: 0x%" PRIx64 "\n"
1162 			"\t host  virtual  addr: 0x%" PRIx64 "\n"
1163 			"\t mmap addr : 0x%" PRIx64 "\n"
1164 			"\t mmap size : 0x%" PRIx64 "\n"
1165 			"\t mmap align: 0x%" PRIx64 "\n"
1166 			"\t mmap off  : 0x%" PRIx64 "\n",
1167 			i, reg->size,
1168 			reg->guest_phys_addr,
1169 			reg->guest_user_addr,
1170 			reg->host_user_addr,
1171 			(uint64_t)(uintptr_t)mmap_addr,
1172 			mmap_size,
1173 			alignment,
1174 			mmap_offset);
1175 
1176 		if (dev->postcopy_listening) {
1177 			/*
1178 			 * We haven't a better way right now than sharing
1179 			 * DPDK's virtual address with Qemu, so that Qemu can
1180 			 * retrieve the region offset when handling userfaults.
1181 			 */
1182 			memory->regions[i].userspace_addr =
1183 				reg->host_user_addr;
1184 		}
1185 	}
1186 	if (dev->postcopy_listening) {
1187 		/* Send the addresses back to qemu */
1188 		msg->fd_num = 0;
1189 		send_vhost_reply(main_fd, msg);
1190 
1191 		/* Wait for qemu to acknolwedge it's got the addresses
1192 		 * we've got to wait before we're allowed to generate faults.
1193 		 */
1194 		VhostUserMsg ack_msg;
1195 		if (read_vhost_message(main_fd, &ack_msg) <= 0) {
1196 			VHOST_LOG_CONFIG(ERR,
1197 				"Failed to read qemu ack on postcopy set-mem-table\n");
1198 			goto free_mem_table;
1199 		}
1200 
1201 		if (validate_msg_fds(&ack_msg, 0) != 0)
1202 			goto free_mem_table;
1203 
1204 		if (ack_msg.request.master != VHOST_USER_SET_MEM_TABLE) {
1205 			VHOST_LOG_CONFIG(ERR,
1206 				"Bad qemu ack on postcopy set-mem-table (%d)\n",
1207 				ack_msg.request.master);
1208 			goto free_mem_table;
1209 		}
1210 
1211 		/* Now userfault register and we can use the memory */
1212 		for (i = 0; i < memory->nregions; i++) {
1213 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
1214 			reg = &dev->mem->regions[i];
1215 			struct uffdio_register reg_struct;
1216 
1217 			/*
1218 			 * Let's register all the mmap'ed area to ensure
1219 			 * alignment on page boundary.
1220 			 */
1221 			reg_struct.range.start =
1222 				(uint64_t)(uintptr_t)reg->mmap_addr;
1223 			reg_struct.range.len = reg->mmap_size;
1224 			reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
1225 
1226 			if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER,
1227 						&reg_struct)) {
1228 				VHOST_LOG_CONFIG(ERR,
1229 					"Failed to register ufd for region %d: (ufd = %d) %s\n",
1230 					i, dev->postcopy_ufd,
1231 					strerror(errno));
1232 				goto free_mem_table;
1233 			}
1234 			VHOST_LOG_CONFIG(INFO,
1235 				"\t userfaultfd registered for range : "
1236 				"%" PRIx64 " - %" PRIx64 "\n",
1237 				(uint64_t)reg_struct.range.start,
1238 				(uint64_t)reg_struct.range.start +
1239 				(uint64_t)reg_struct.range.len - 1);
1240 #else
1241 			goto free_mem_table;
1242 #endif
1243 		}
1244 	}
1245 
1246 	for (i = 0; i < dev->nr_vring; i++) {
1247 		struct vhost_virtqueue *vq = dev->virtqueue[i];
1248 
1249 		if (!vq)
1250 			continue;
1251 
1252 		if (vq->desc || vq->avail || vq->used) {
1253 			/*
1254 			 * If the memory table got updated, the ring addresses
1255 			 * need to be translated again as virtual addresses have
1256 			 * changed.
1257 			 */
1258 			vring_invalidate(dev, vq);
1259 
1260 			dev = translate_ring_addresses(dev, i);
1261 			if (!dev) {
1262 				dev = *pdev;
1263 				goto free_mem_table;
1264 			}
1265 
1266 			*pdev = dev;
1267 		}
1268 	}
1269 
1270 	dump_guest_pages(dev);
1271 
1272 	return RTE_VHOST_MSG_RESULT_OK;
1273 
1274 free_mem_table:
1275 	free_mem_region(dev);
1276 	rte_free(dev->mem);
1277 	dev->mem = NULL;
1278 free_guest_pages:
1279 	rte_free(dev->guest_pages);
1280 	dev->guest_pages = NULL;
1281 close_msg_fds:
1282 	close_msg_fds(msg);
1283 	return RTE_VHOST_MSG_RESULT_ERR;
1284 }
1285 
1286 static bool
vq_is_ready(struct virtio_net * dev,struct vhost_virtqueue * vq)1287 vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
1288 {
1289 	bool rings_ok;
1290 
1291 	if (!vq)
1292 		return false;
1293 
1294 	if (vq_is_packed(dev))
1295 		rings_ok = vq->desc_packed && vq->driver_event &&
1296 			vq->device_event;
1297 	else
1298 		rings_ok = vq->desc && vq->avail && vq->used;
1299 
1300 	return rings_ok &&
1301 	       vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
1302 	       vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD &&
1303 	       vq->enabled;
1304 }
1305 
1306 #define VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY 2u
1307 
1308 static int
virtio_is_ready(struct virtio_net * dev)1309 virtio_is_ready(struct virtio_net *dev)
1310 {
1311 	struct vhost_virtqueue *vq;
1312 	uint32_t i, nr_vring = dev->nr_vring;
1313 
1314 	if (dev->flags & VIRTIO_DEV_READY)
1315 		return 1;
1316 
1317 	if (!dev->nr_vring)
1318 		return 0;
1319 
1320 	if (dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) {
1321 		nr_vring = VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY;
1322 
1323 		if (dev->nr_vring < nr_vring)
1324 			return 0;
1325 	}
1326 
1327 	for (i = 0; i < nr_vring; i++) {
1328 		vq = dev->virtqueue[i];
1329 
1330 		if (!vq_is_ready(dev, vq))
1331 			return 0;
1332 	}
1333 
1334 	/* If supported, ensure the frontend is really done with config */
1335 	if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_STATUS))
1336 		if (!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK))
1337 			return 0;
1338 
1339 	dev->flags |= VIRTIO_DEV_READY;
1340 
1341 	if (!(dev->flags & VIRTIO_DEV_RUNNING))
1342 		VHOST_LOG_CONFIG(INFO,
1343 			"virtio is now ready for processing.\n");
1344 	return 1;
1345 }
1346 
1347 static void *
inflight_mem_alloc(const char * name,size_t size,int * fd)1348 inflight_mem_alloc(const char *name, size_t size, int *fd)
1349 {
1350 	void *ptr;
1351 	int mfd = -1;
1352 	char fname[20] = "/tmp/memfd-XXXXXX";
1353 
1354 	*fd = -1;
1355 #ifdef MEMFD_SUPPORTED
1356 	mfd = memfd_create(name, MFD_CLOEXEC);
1357 #else
1358 	RTE_SET_USED(name);
1359 #endif
1360 	if (mfd == -1) {
1361 		mfd = mkstemp(fname);
1362 		if (mfd == -1) {
1363 			VHOST_LOG_CONFIG(ERR,
1364 				"failed to get inflight buffer fd\n");
1365 			return NULL;
1366 		}
1367 
1368 		unlink(fname);
1369 	}
1370 
1371 	if (ftruncate(mfd, size) == -1) {
1372 		VHOST_LOG_CONFIG(ERR,
1373 			"failed to alloc inflight buffer\n");
1374 		close(mfd);
1375 		return NULL;
1376 	}
1377 
1378 	ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, mfd, 0);
1379 	if (ptr == MAP_FAILED) {
1380 		VHOST_LOG_CONFIG(ERR,
1381 			"failed to mmap inflight buffer\n");
1382 		close(mfd);
1383 		return NULL;
1384 	}
1385 
1386 	*fd = mfd;
1387 	return ptr;
1388 }
1389 
1390 static uint32_t
get_pervq_shm_size_split(uint16_t queue_size)1391 get_pervq_shm_size_split(uint16_t queue_size)
1392 {
1393 	return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_split) *
1394 				  queue_size + sizeof(uint64_t) +
1395 				  sizeof(uint16_t) * 4, INFLIGHT_ALIGNMENT);
1396 }
1397 
1398 static uint32_t
get_pervq_shm_size_packed(uint16_t queue_size)1399 get_pervq_shm_size_packed(uint16_t queue_size)
1400 {
1401 	return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_packed)
1402 				  * queue_size + sizeof(uint64_t) +
1403 				  sizeof(uint16_t) * 6 + sizeof(uint8_t) * 9,
1404 				  INFLIGHT_ALIGNMENT);
1405 }
1406 
1407 static int
vhost_user_get_inflight_fd(struct virtio_net ** pdev,VhostUserMsg * msg,int main_fd __rte_unused)1408 vhost_user_get_inflight_fd(struct virtio_net **pdev,
1409 			   VhostUserMsg *msg,
1410 			   int main_fd __rte_unused)
1411 {
1412 	struct rte_vhost_inflight_info_packed *inflight_packed;
1413 	uint64_t pervq_inflight_size, mmap_size;
1414 	uint16_t num_queues, queue_size;
1415 	struct virtio_net *dev = *pdev;
1416 	int fd, i, j;
1417 	void *addr;
1418 
1419 	if (msg->size != sizeof(msg->payload.inflight)) {
1420 		VHOST_LOG_CONFIG(ERR,
1421 			"invalid get_inflight_fd message size is %d\n",
1422 			msg->size);
1423 		return RTE_VHOST_MSG_RESULT_ERR;
1424 	}
1425 
1426 	if (dev->inflight_info == NULL) {
1427 		dev->inflight_info = calloc(1,
1428 					    sizeof(struct inflight_mem_info));
1429 		if (!dev->inflight_info) {
1430 			VHOST_LOG_CONFIG(ERR,
1431 				"failed to alloc dev inflight area\n");
1432 			return RTE_VHOST_MSG_RESULT_ERR;
1433 		}
1434 		dev->inflight_info->fd = -1;
1435 	}
1436 
1437 	num_queues = msg->payload.inflight.num_queues;
1438 	queue_size = msg->payload.inflight.queue_size;
1439 
1440 	VHOST_LOG_CONFIG(INFO, "get_inflight_fd num_queues: %u\n",
1441 		msg->payload.inflight.num_queues);
1442 	VHOST_LOG_CONFIG(INFO, "get_inflight_fd queue_size: %u\n",
1443 		msg->payload.inflight.queue_size);
1444 
1445 	if (vq_is_packed(dev))
1446 		pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
1447 	else
1448 		pervq_inflight_size = get_pervq_shm_size_split(queue_size);
1449 
1450 	mmap_size = num_queues * pervq_inflight_size;
1451 	addr = inflight_mem_alloc("vhost-inflight", mmap_size, &fd);
1452 	if (!addr) {
1453 		VHOST_LOG_CONFIG(ERR,
1454 			"failed to alloc vhost inflight area\n");
1455 			msg->payload.inflight.mmap_size = 0;
1456 		return RTE_VHOST_MSG_RESULT_ERR;
1457 	}
1458 	memset(addr, 0, mmap_size);
1459 
1460 	if (dev->inflight_info->addr) {
1461 		munmap(dev->inflight_info->addr, dev->inflight_info->size);
1462 		dev->inflight_info->addr = NULL;
1463 	}
1464 
1465 	if (dev->inflight_info->fd >= 0) {
1466 		close(dev->inflight_info->fd);
1467 		dev->inflight_info->fd = -1;
1468 	}
1469 
1470 	dev->inflight_info->addr = addr;
1471 	dev->inflight_info->size = msg->payload.inflight.mmap_size = mmap_size;
1472 	dev->inflight_info->fd = msg->fds[0] = fd;
1473 	msg->payload.inflight.mmap_offset = 0;
1474 	msg->fd_num = 1;
1475 
1476 	if (vq_is_packed(dev)) {
1477 		for (i = 0; i < num_queues; i++) {
1478 			inflight_packed =
1479 				(struct rte_vhost_inflight_info_packed *)addr;
1480 			inflight_packed->used_wrap_counter = 1;
1481 			inflight_packed->old_used_wrap_counter = 1;
1482 			for (j = 0; j < queue_size; j++)
1483 				inflight_packed->desc[j].next = j + 1;
1484 			addr = (void *)((char *)addr + pervq_inflight_size);
1485 		}
1486 	}
1487 
1488 	VHOST_LOG_CONFIG(INFO,
1489 		"send inflight mmap_size: %"PRIu64"\n",
1490 		msg->payload.inflight.mmap_size);
1491 	VHOST_LOG_CONFIG(INFO,
1492 		"send inflight mmap_offset: %"PRIu64"\n",
1493 		msg->payload.inflight.mmap_offset);
1494 	VHOST_LOG_CONFIG(INFO,
1495 		"send inflight fd: %d\n", msg->fds[0]);
1496 
1497 	return RTE_VHOST_MSG_RESULT_REPLY;
1498 }
1499 
1500 static int
vhost_user_set_inflight_fd(struct virtio_net ** pdev,VhostUserMsg * msg,int main_fd __rte_unused)1501 vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg,
1502 			   int main_fd __rte_unused)
1503 {
1504 	uint64_t mmap_size, mmap_offset;
1505 	uint16_t num_queues, queue_size;
1506 	struct virtio_net *dev = *pdev;
1507 	uint32_t pervq_inflight_size;
1508 	struct vhost_virtqueue *vq;
1509 	void *addr;
1510 	int fd, i;
1511 
1512 	fd = msg->fds[0];
1513 	if (msg->size != sizeof(msg->payload.inflight) || fd < 0) {
1514 		VHOST_LOG_CONFIG(ERR,
1515 			"invalid set_inflight_fd message size is %d,fd is %d\n",
1516 			msg->size, fd);
1517 		return RTE_VHOST_MSG_RESULT_ERR;
1518 	}
1519 
1520 	mmap_size = msg->payload.inflight.mmap_size;
1521 	mmap_offset = msg->payload.inflight.mmap_offset;
1522 	num_queues = msg->payload.inflight.num_queues;
1523 	queue_size = msg->payload.inflight.queue_size;
1524 
1525 	if (vq_is_packed(dev))
1526 		pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
1527 	else
1528 		pervq_inflight_size = get_pervq_shm_size_split(queue_size);
1529 
1530 	VHOST_LOG_CONFIG(INFO,
1531 		"set_inflight_fd mmap_size: %"PRIu64"\n", mmap_size);
1532 	VHOST_LOG_CONFIG(INFO,
1533 		"set_inflight_fd mmap_offset: %"PRIu64"\n", mmap_offset);
1534 	VHOST_LOG_CONFIG(INFO,
1535 		"set_inflight_fd num_queues: %u\n", num_queues);
1536 	VHOST_LOG_CONFIG(INFO,
1537 		"set_inflight_fd queue_size: %u\n", queue_size);
1538 	VHOST_LOG_CONFIG(INFO,
1539 		"set_inflight_fd fd: %d\n", fd);
1540 	VHOST_LOG_CONFIG(INFO,
1541 		"set_inflight_fd pervq_inflight_size: %d\n",
1542 		pervq_inflight_size);
1543 
1544 	if (!dev->inflight_info) {
1545 		dev->inflight_info = calloc(1,
1546 					    sizeof(struct inflight_mem_info));
1547 		if (dev->inflight_info == NULL) {
1548 			VHOST_LOG_CONFIG(ERR,
1549 				"failed to alloc dev inflight area\n");
1550 			return RTE_VHOST_MSG_RESULT_ERR;
1551 		}
1552 		dev->inflight_info->fd = -1;
1553 	}
1554 
1555 	if (dev->inflight_info->addr) {
1556 		munmap(dev->inflight_info->addr, dev->inflight_info->size);
1557 		dev->inflight_info->addr = NULL;
1558 	}
1559 
1560 	addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1561 		    fd, mmap_offset);
1562 	if (addr == MAP_FAILED) {
1563 		VHOST_LOG_CONFIG(ERR, "failed to mmap share memory.\n");
1564 		return RTE_VHOST_MSG_RESULT_ERR;
1565 	}
1566 
1567 	if (dev->inflight_info->fd >= 0) {
1568 		close(dev->inflight_info->fd);
1569 		dev->inflight_info->fd = -1;
1570 	}
1571 
1572 	dev->inflight_info->fd = fd;
1573 	dev->inflight_info->addr = addr;
1574 	dev->inflight_info->size = mmap_size;
1575 
1576 	for (i = 0; i < num_queues; i++) {
1577 		vq = dev->virtqueue[i];
1578 		if (!vq)
1579 			continue;
1580 
1581 		if (vq_is_packed(dev)) {
1582 			vq->inflight_packed = addr;
1583 			vq->inflight_packed->desc_num = queue_size;
1584 		} else {
1585 			vq->inflight_split = addr;
1586 			vq->inflight_split->desc_num = queue_size;
1587 		}
1588 		addr = (void *)((char *)addr + pervq_inflight_size);
1589 	}
1590 
1591 	return RTE_VHOST_MSG_RESULT_OK;
1592 }
1593 
1594 static int
vhost_user_set_vring_call(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)1595 vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg,
1596 			int main_fd __rte_unused)
1597 {
1598 	struct virtio_net *dev = *pdev;
1599 	struct vhost_vring_file file;
1600 	struct vhost_virtqueue *vq;
1601 	int expected_fds;
1602 
1603 	expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1604 	if (validate_msg_fds(msg, expected_fds) != 0)
1605 		return RTE_VHOST_MSG_RESULT_ERR;
1606 
1607 	file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1608 	if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1609 		file.fd = VIRTIO_INVALID_EVENTFD;
1610 	else
1611 		file.fd = msg->fds[0];
1612 	VHOST_LOG_CONFIG(INFO,
1613 		"vring call idx:%d file:%d\n", file.index, file.fd);
1614 
1615 	vq = dev->virtqueue[file.index];
1616 
1617 	if (vq->ready) {
1618 		vq->ready = 0;
1619 		vhost_user_notify_queue_state(dev, file.index, 0);
1620 	}
1621 
1622 	if (vq->callfd >= 0)
1623 		close(vq->callfd);
1624 
1625 	vq->callfd = file.fd;
1626 
1627 	return RTE_VHOST_MSG_RESULT_OK;
1628 }
1629 
vhost_user_set_vring_err(struct virtio_net ** pdev __rte_unused,struct VhostUserMsg * msg,int main_fd __rte_unused)1630 static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
1631 			struct VhostUserMsg *msg,
1632 			int main_fd __rte_unused)
1633 {
1634 	int expected_fds;
1635 
1636 	expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1637 	if (validate_msg_fds(msg, expected_fds) != 0)
1638 		return RTE_VHOST_MSG_RESULT_ERR;
1639 
1640 	if (!(msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK))
1641 		close(msg->fds[0]);
1642 	VHOST_LOG_CONFIG(INFO, "not implemented\n");
1643 
1644 	return RTE_VHOST_MSG_RESULT_OK;
1645 }
1646 
1647 static int
resubmit_desc_compare(const void * a,const void * b)1648 resubmit_desc_compare(const void *a, const void *b)
1649 {
1650 	const struct rte_vhost_resubmit_desc *desc0 = a;
1651 	const struct rte_vhost_resubmit_desc *desc1 = b;
1652 
1653 	if (desc1->counter > desc0->counter)
1654 		return 1;
1655 
1656 	return -1;
1657 }
1658 
1659 static int
vhost_check_queue_inflights_split(struct virtio_net * dev,struct vhost_virtqueue * vq)1660 vhost_check_queue_inflights_split(struct virtio_net *dev,
1661 				  struct vhost_virtqueue *vq)
1662 {
1663 	uint16_t i;
1664 	uint16_t resubmit_num = 0, last_io, num;
1665 	struct vring_used *used = vq->used;
1666 	struct rte_vhost_resubmit_info *resubmit;
1667 	struct rte_vhost_inflight_info_split *inflight_split;
1668 
1669 	if (!(dev->protocol_features &
1670 	    (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
1671 		return RTE_VHOST_MSG_RESULT_OK;
1672 
1673 	/* The frontend may still not support the inflight feature
1674 	 * although we negotiate the protocol feature.
1675 	 */
1676 	if ((!vq->inflight_split))
1677 		return RTE_VHOST_MSG_RESULT_OK;
1678 
1679 	if (!vq->inflight_split->version) {
1680 		vq->inflight_split->version = INFLIGHT_VERSION;
1681 		return RTE_VHOST_MSG_RESULT_OK;
1682 	}
1683 
1684 	if (vq->resubmit_inflight)
1685 		return RTE_VHOST_MSG_RESULT_OK;
1686 
1687 	inflight_split = vq->inflight_split;
1688 	vq->global_counter = 0;
1689 	last_io = inflight_split->last_inflight_io;
1690 
1691 	if (inflight_split->used_idx != used->idx) {
1692 		inflight_split->desc[last_io].inflight = 0;
1693 		rte_smp_mb();
1694 		inflight_split->used_idx = used->idx;
1695 	}
1696 
1697 	for (i = 0; i < inflight_split->desc_num; i++) {
1698 		if (inflight_split->desc[i].inflight == 1)
1699 			resubmit_num++;
1700 	}
1701 
1702 	vq->last_avail_idx += resubmit_num;
1703 
1704 	if (resubmit_num) {
1705 		resubmit  = calloc(1, sizeof(struct rte_vhost_resubmit_info));
1706 		if (!resubmit) {
1707 			VHOST_LOG_CONFIG(ERR,
1708 				"failed to allocate memory for resubmit info.\n");
1709 			return RTE_VHOST_MSG_RESULT_ERR;
1710 		}
1711 
1712 		resubmit->resubmit_list = calloc(resubmit_num,
1713 			sizeof(struct rte_vhost_resubmit_desc));
1714 		if (!resubmit->resubmit_list) {
1715 			VHOST_LOG_CONFIG(ERR,
1716 				"failed to allocate memory for inflight desc.\n");
1717 			free(resubmit);
1718 			return RTE_VHOST_MSG_RESULT_ERR;
1719 		}
1720 
1721 		num = 0;
1722 		for (i = 0; i < vq->inflight_split->desc_num; i++) {
1723 			if (vq->inflight_split->desc[i].inflight == 1) {
1724 				resubmit->resubmit_list[num].index = i;
1725 				resubmit->resubmit_list[num].counter =
1726 					inflight_split->desc[i].counter;
1727 				num++;
1728 			}
1729 		}
1730 		resubmit->resubmit_num = num;
1731 
1732 		if (resubmit->resubmit_num > 1)
1733 			qsort(resubmit->resubmit_list, resubmit->resubmit_num,
1734 			      sizeof(struct rte_vhost_resubmit_desc),
1735 			      resubmit_desc_compare);
1736 
1737 		vq->global_counter = resubmit->resubmit_list[0].counter + 1;
1738 		vq->resubmit_inflight = resubmit;
1739 	}
1740 
1741 	return RTE_VHOST_MSG_RESULT_OK;
1742 }
1743 
1744 static int
vhost_check_queue_inflights_packed(struct virtio_net * dev,struct vhost_virtqueue * vq)1745 vhost_check_queue_inflights_packed(struct virtio_net *dev,
1746 				   struct vhost_virtqueue *vq)
1747 {
1748 	uint16_t i;
1749 	uint16_t resubmit_num = 0, old_used_idx, num;
1750 	struct rte_vhost_resubmit_info *resubmit;
1751 	struct rte_vhost_inflight_info_packed *inflight_packed;
1752 
1753 	if (!(dev->protocol_features &
1754 	    (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
1755 		return RTE_VHOST_MSG_RESULT_OK;
1756 
1757 	/* The frontend may still not support the inflight feature
1758 	 * although we negotiate the protocol feature.
1759 	 */
1760 	if ((!vq->inflight_packed))
1761 		return RTE_VHOST_MSG_RESULT_OK;
1762 
1763 	if (!vq->inflight_packed->version) {
1764 		vq->inflight_packed->version = INFLIGHT_VERSION;
1765 		return RTE_VHOST_MSG_RESULT_OK;
1766 	}
1767 
1768 	if (vq->resubmit_inflight)
1769 		return RTE_VHOST_MSG_RESULT_OK;
1770 
1771 	inflight_packed = vq->inflight_packed;
1772 	vq->global_counter = 0;
1773 	old_used_idx = inflight_packed->old_used_idx;
1774 
1775 	if (inflight_packed->used_idx != old_used_idx) {
1776 		if (inflight_packed->desc[old_used_idx].inflight == 0) {
1777 			inflight_packed->old_used_idx =
1778 				inflight_packed->used_idx;
1779 			inflight_packed->old_used_wrap_counter =
1780 				inflight_packed->used_wrap_counter;
1781 			inflight_packed->old_free_head =
1782 				inflight_packed->free_head;
1783 		} else {
1784 			inflight_packed->used_idx =
1785 				inflight_packed->old_used_idx;
1786 			inflight_packed->used_wrap_counter =
1787 				inflight_packed->old_used_wrap_counter;
1788 			inflight_packed->free_head =
1789 				inflight_packed->old_free_head;
1790 		}
1791 	}
1792 
1793 	for (i = 0; i < inflight_packed->desc_num; i++) {
1794 		if (inflight_packed->desc[i].inflight == 1)
1795 			resubmit_num++;
1796 	}
1797 
1798 	if (resubmit_num) {
1799 		resubmit = calloc(1, sizeof(struct rte_vhost_resubmit_info));
1800 		if (resubmit == NULL) {
1801 			VHOST_LOG_CONFIG(ERR,
1802 				"failed to allocate memory for resubmit info.\n");
1803 			return RTE_VHOST_MSG_RESULT_ERR;
1804 		}
1805 
1806 		resubmit->resubmit_list = calloc(resubmit_num,
1807 			sizeof(struct rte_vhost_resubmit_desc));
1808 		if (resubmit->resubmit_list == NULL) {
1809 			VHOST_LOG_CONFIG(ERR,
1810 				"failed to allocate memory for resubmit desc.\n");
1811 			free(resubmit);
1812 			return RTE_VHOST_MSG_RESULT_ERR;
1813 		}
1814 
1815 		num = 0;
1816 		for (i = 0; i < inflight_packed->desc_num; i++) {
1817 			if (vq->inflight_packed->desc[i].inflight == 1) {
1818 				resubmit->resubmit_list[num].index = i;
1819 				resubmit->resubmit_list[num].counter =
1820 					inflight_packed->desc[i].counter;
1821 				num++;
1822 			}
1823 		}
1824 		resubmit->resubmit_num = num;
1825 
1826 		if (resubmit->resubmit_num > 1)
1827 			qsort(resubmit->resubmit_list, resubmit->resubmit_num,
1828 			      sizeof(struct rte_vhost_resubmit_desc),
1829 			      resubmit_desc_compare);
1830 
1831 		vq->global_counter = resubmit->resubmit_list[0].counter + 1;
1832 		vq->resubmit_inflight = resubmit;
1833 	}
1834 
1835 	return RTE_VHOST_MSG_RESULT_OK;
1836 }
1837 
1838 static int
vhost_user_set_vring_kick(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)1839 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
1840 			int main_fd __rte_unused)
1841 {
1842 	struct virtio_net *dev = *pdev;
1843 	struct vhost_vring_file file;
1844 	struct vhost_virtqueue *vq;
1845 	int expected_fds;
1846 
1847 	expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1848 	if (validate_msg_fds(msg, expected_fds) != 0)
1849 		return RTE_VHOST_MSG_RESULT_ERR;
1850 
1851 	file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1852 	if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1853 		file.fd = VIRTIO_INVALID_EVENTFD;
1854 	else
1855 		file.fd = msg->fds[0];
1856 	VHOST_LOG_CONFIG(INFO,
1857 		"vring kick idx:%d file:%d\n", file.index, file.fd);
1858 
1859 	/* Interpret ring addresses only when ring is started. */
1860 	dev = translate_ring_addresses(dev, file.index);
1861 	if (!dev) {
1862 		if (file.fd != VIRTIO_INVALID_EVENTFD)
1863 			close(file.fd);
1864 
1865 		return RTE_VHOST_MSG_RESULT_ERR;
1866 	}
1867 
1868 	*pdev = dev;
1869 
1870 	vq = dev->virtqueue[file.index];
1871 
1872 	/*
1873 	 * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated,
1874 	 * the ring starts already enabled. Otherwise, it is enabled via
1875 	 * the SET_VRING_ENABLE message.
1876 	 */
1877 	if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
1878 		vq->enabled = 1;
1879 		if (dev->notify_ops->vring_state_changed)
1880 			dev->notify_ops->vring_state_changed(
1881 				dev->vid, file.index, 1);
1882 	}
1883 
1884 	if (vq->ready) {
1885 		vq->ready = 0;
1886 		vhost_user_notify_queue_state(dev, file.index, 0);
1887 	}
1888 
1889 	if (vq->kickfd >= 0)
1890 		close(vq->kickfd);
1891 	vq->kickfd = file.fd;
1892 
1893 	if (vq_is_packed(dev)) {
1894 		if (vhost_check_queue_inflights_packed(dev, vq)) {
1895 			VHOST_LOG_CONFIG(ERR,
1896 				"failed to inflights for vq: %d\n", file.index);
1897 			return RTE_VHOST_MSG_RESULT_ERR;
1898 		}
1899 	} else {
1900 		if (vhost_check_queue_inflights_split(dev, vq)) {
1901 			VHOST_LOG_CONFIG(ERR,
1902 				"failed to inflights for vq: %d\n", file.index);
1903 			return RTE_VHOST_MSG_RESULT_ERR;
1904 		}
1905 	}
1906 
1907 	return RTE_VHOST_MSG_RESULT_OK;
1908 }
1909 
1910 /*
1911  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
1912  */
1913 static int
vhost_user_get_vring_base(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)1914 vhost_user_get_vring_base(struct virtio_net **pdev,
1915 			struct VhostUserMsg *msg,
1916 			int main_fd __rte_unused)
1917 {
1918 	struct virtio_net *dev = *pdev;
1919 	struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
1920 	uint64_t val;
1921 
1922 	if (validate_msg_fds(msg, 0) != 0)
1923 		return RTE_VHOST_MSG_RESULT_ERR;
1924 
1925 	/* We have to stop the queue (virtio) if it is running. */
1926 	vhost_destroy_device_notify(dev);
1927 
1928 	dev->flags &= ~VIRTIO_DEV_READY;
1929 	dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
1930 
1931 	/* Here we are safe to get the indexes */
1932 	if (vq_is_packed(dev)) {
1933 		/*
1934 		 * Bit[0:14]: avail index
1935 		 * Bit[15]: avail wrap counter
1936 		 */
1937 		val = vq->last_avail_idx & 0x7fff;
1938 		val |= vq->avail_wrap_counter << 15;
1939 		msg->payload.state.num = val;
1940 	} else {
1941 		msg->payload.state.num = vq->last_avail_idx;
1942 	}
1943 
1944 	VHOST_LOG_CONFIG(INFO,
1945 		"vring base idx:%d file:%d\n", msg->payload.state.index,
1946 		msg->payload.state.num);
1947 	/*
1948 	 * Based on current qemu vhost-user implementation, this message is
1949 	 * sent and only sent in vhost_vring_stop.
1950 	 * TODO: cleanup the vring, it isn't usable since here.
1951 	 */
1952 	if (vq->kickfd >= 0)
1953 		close(vq->kickfd);
1954 
1955 	vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
1956 
1957 	if (vq->callfd >= 0)
1958 		close(vq->callfd);
1959 
1960 	vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
1961 
1962 	vq->signalled_used_valid = false;
1963 
1964 	if (vq_is_packed(dev)) {
1965 		rte_free(vq->shadow_used_packed);
1966 		vq->shadow_used_packed = NULL;
1967 	} else {
1968 		rte_free(vq->shadow_used_split);
1969 		vq->shadow_used_split = NULL;
1970 		if (vq->async_pkts_pending)
1971 			rte_free(vq->async_pkts_pending);
1972 		if (vq->async_pkts_info)
1973 			rte_free(vq->async_pkts_info);
1974 		vq->async_pkts_pending = NULL;
1975 		vq->async_pkts_info = NULL;
1976 	}
1977 
1978 	rte_free(vq->batch_copy_elems);
1979 	vq->batch_copy_elems = NULL;
1980 
1981 	msg->size = sizeof(msg->payload.state);
1982 	msg->fd_num = 0;
1983 
1984 	vring_invalidate(dev, vq);
1985 
1986 	return RTE_VHOST_MSG_RESULT_REPLY;
1987 }
1988 
1989 /*
1990  * when virtio queues are ready to work, qemu will send us to
1991  * enable the virtio queue pair.
1992  */
1993 static int
vhost_user_set_vring_enable(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)1994 vhost_user_set_vring_enable(struct virtio_net **pdev,
1995 			struct VhostUserMsg *msg,
1996 			int main_fd __rte_unused)
1997 {
1998 	struct virtio_net *dev = *pdev;
1999 	int enable = (int)msg->payload.state.num;
2000 	int index = (int)msg->payload.state.index;
2001 
2002 	if (validate_msg_fds(msg, 0) != 0)
2003 		return RTE_VHOST_MSG_RESULT_ERR;
2004 
2005 	VHOST_LOG_CONFIG(INFO,
2006 		"set queue enable: %d to qp idx: %d\n",
2007 		enable, index);
2008 
2009 	if (enable && dev->virtqueue[index]->async_registered) {
2010 		if (dev->virtqueue[index]->async_pkts_inflight_n) {
2011 			VHOST_LOG_CONFIG(ERR, "failed to enable vring. "
2012 			"async inflight packets must be completed first\n");
2013 			return RTE_VHOST_MSG_RESULT_ERR;
2014 		}
2015 	}
2016 
2017 	dev->virtqueue[index]->enabled = enable;
2018 
2019 	return RTE_VHOST_MSG_RESULT_OK;
2020 }
2021 
2022 static int
vhost_user_get_protocol_features(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)2023 vhost_user_get_protocol_features(struct virtio_net **pdev,
2024 			struct VhostUserMsg *msg,
2025 			int main_fd __rte_unused)
2026 {
2027 	struct virtio_net *dev = *pdev;
2028 	uint64_t features, protocol_features;
2029 
2030 	if (validate_msg_fds(msg, 0) != 0)
2031 		return RTE_VHOST_MSG_RESULT_ERR;
2032 
2033 	rte_vhost_driver_get_features(dev->ifname, &features);
2034 	rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
2035 
2036 	msg->payload.u64 = protocol_features;
2037 	msg->size = sizeof(msg->payload.u64);
2038 	msg->fd_num = 0;
2039 
2040 	return RTE_VHOST_MSG_RESULT_REPLY;
2041 }
2042 
2043 static int
vhost_user_set_protocol_features(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)2044 vhost_user_set_protocol_features(struct virtio_net **pdev,
2045 			struct VhostUserMsg *msg,
2046 			int main_fd __rte_unused)
2047 {
2048 	struct virtio_net *dev = *pdev;
2049 	uint64_t protocol_features = msg->payload.u64;
2050 	uint64_t slave_protocol_features = 0;
2051 
2052 	if (validate_msg_fds(msg, 0) != 0)
2053 		return RTE_VHOST_MSG_RESULT_ERR;
2054 
2055 	rte_vhost_driver_get_protocol_features(dev->ifname,
2056 			&slave_protocol_features);
2057 	if (protocol_features & ~slave_protocol_features) {
2058 		VHOST_LOG_CONFIG(ERR,
2059 			"(%d) received invalid protocol features.\n",
2060 			dev->vid);
2061 		return RTE_VHOST_MSG_RESULT_ERR;
2062 	}
2063 
2064 	dev->protocol_features = protocol_features;
2065 	VHOST_LOG_CONFIG(INFO,
2066 		"negotiated Vhost-user protocol features: 0x%" PRIx64 "\n",
2067 		dev->protocol_features);
2068 
2069 	return RTE_VHOST_MSG_RESULT_OK;
2070 }
2071 
2072 static int
vhost_user_set_log_base(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)2073 vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
2074 			int main_fd __rte_unused)
2075 {
2076 	struct virtio_net *dev = *pdev;
2077 	int fd = msg->fds[0];
2078 	uint64_t size, off;
2079 	void *addr;
2080 
2081 	if (validate_msg_fds(msg, 1) != 0)
2082 		return RTE_VHOST_MSG_RESULT_ERR;
2083 
2084 	if (fd < 0) {
2085 		VHOST_LOG_CONFIG(ERR, "invalid log fd: %d\n", fd);
2086 		return RTE_VHOST_MSG_RESULT_ERR;
2087 	}
2088 
2089 	if (msg->size != sizeof(VhostUserLog)) {
2090 		VHOST_LOG_CONFIG(ERR,
2091 			"invalid log base msg size: %"PRId32" != %d\n",
2092 			msg->size, (int)sizeof(VhostUserLog));
2093 		goto close_msg_fds;
2094 	}
2095 
2096 	size = msg->payload.log.mmap_size;
2097 	off  = msg->payload.log.mmap_offset;
2098 
2099 	/* Check for mmap size and offset overflow. */
2100 	if (off >= -size) {
2101 		VHOST_LOG_CONFIG(ERR,
2102 			"log offset %#"PRIx64" and log size %#"PRIx64" overflow\n",
2103 			off, size);
2104 		goto close_msg_fds;
2105 	}
2106 
2107 	VHOST_LOG_CONFIG(INFO,
2108 		"log mmap size: %"PRId64", offset: %"PRId64"\n",
2109 		size, off);
2110 
2111 	/*
2112 	 * mmap from 0 to workaround a hugepage mmap bug: mmap will
2113 	 * fail when offset is not page size aligned.
2114 	 */
2115 	addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2116 	close(fd);
2117 	if (addr == MAP_FAILED) {
2118 		VHOST_LOG_CONFIG(ERR, "mmap log base failed!\n");
2119 		return RTE_VHOST_MSG_RESULT_ERR;
2120 	}
2121 
2122 	/*
2123 	 * Free previously mapped log memory on occasionally
2124 	 * multiple VHOST_USER_SET_LOG_BASE.
2125 	 */
2126 	if (dev->log_addr) {
2127 		munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
2128 	}
2129 	dev->log_addr = (uint64_t)(uintptr_t)addr;
2130 	dev->log_base = dev->log_addr + off;
2131 	dev->log_size = size;
2132 
2133 	/*
2134 	 * The spec is not clear about it (yet), but QEMU doesn't expect
2135 	 * any payload in the reply.
2136 	 */
2137 	msg->size = 0;
2138 	msg->fd_num = 0;
2139 
2140 	return RTE_VHOST_MSG_RESULT_REPLY;
2141 
2142 close_msg_fds:
2143 	close_msg_fds(msg);
2144 	return RTE_VHOST_MSG_RESULT_ERR;
2145 }
2146 
vhost_user_set_log_fd(struct virtio_net ** pdev __rte_unused,struct VhostUserMsg * msg,int main_fd __rte_unused)2147 static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused,
2148 			struct VhostUserMsg *msg,
2149 			int main_fd __rte_unused)
2150 {
2151 	if (validate_msg_fds(msg, 1) != 0)
2152 		return RTE_VHOST_MSG_RESULT_ERR;
2153 
2154 	close(msg->fds[0]);
2155 	VHOST_LOG_CONFIG(INFO, "not implemented.\n");
2156 
2157 	return RTE_VHOST_MSG_RESULT_OK;
2158 }
2159 
2160 /*
2161  * An rarp packet is constructed and broadcasted to notify switches about
2162  * the new location of the migrated VM, so that packets from outside will
2163  * not be lost after migration.
2164  *
2165  * However, we don't actually "send" a rarp packet here, instead, we set
2166  * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
2167  */
2168 static int
vhost_user_send_rarp(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)2169 vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg,
2170 			int main_fd __rte_unused)
2171 {
2172 	struct virtio_net *dev = *pdev;
2173 	uint8_t *mac = (uint8_t *)&msg->payload.u64;
2174 	struct rte_vdpa_device *vdpa_dev;
2175 
2176 	if (validate_msg_fds(msg, 0) != 0)
2177 		return RTE_VHOST_MSG_RESULT_ERR;
2178 
2179 	VHOST_LOG_CONFIG(DEBUG,
2180 		":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
2181 		mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2182 	memcpy(dev->mac.addr_bytes, mac, 6);
2183 
2184 	/*
2185 	 * Set the flag to inject a RARP broadcast packet at
2186 	 * rte_vhost_dequeue_burst().
2187 	 *
2188 	 * __ATOMIC_RELEASE ordering is for making sure the mac is
2189 	 * copied before the flag is set.
2190 	 */
2191 	__atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE);
2192 	vdpa_dev = dev->vdpa_dev;
2193 	if (vdpa_dev && vdpa_dev->ops->migration_done)
2194 		vdpa_dev->ops->migration_done(dev->vid);
2195 
2196 	return RTE_VHOST_MSG_RESULT_OK;
2197 }
2198 
2199 static int
vhost_user_net_set_mtu(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)2200 vhost_user_net_set_mtu(struct virtio_net **pdev, struct VhostUserMsg *msg,
2201 			int main_fd __rte_unused)
2202 {
2203 	struct virtio_net *dev = *pdev;
2204 
2205 	if (validate_msg_fds(msg, 0) != 0)
2206 		return RTE_VHOST_MSG_RESULT_ERR;
2207 
2208 	if (msg->payload.u64 < VIRTIO_MIN_MTU ||
2209 			msg->payload.u64 > VIRTIO_MAX_MTU) {
2210 		VHOST_LOG_CONFIG(ERR, "Invalid MTU size (%"PRIu64")\n",
2211 				msg->payload.u64);
2212 
2213 		return RTE_VHOST_MSG_RESULT_ERR;
2214 	}
2215 
2216 	dev->mtu = msg->payload.u64;
2217 
2218 	return RTE_VHOST_MSG_RESULT_OK;
2219 }
2220 
2221 static int
vhost_user_set_req_fd(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)2222 vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg,
2223 			int main_fd __rte_unused)
2224 {
2225 	struct virtio_net *dev = *pdev;
2226 	int fd = msg->fds[0];
2227 
2228 	if (validate_msg_fds(msg, 1) != 0)
2229 		return RTE_VHOST_MSG_RESULT_ERR;
2230 
2231 	if (fd < 0) {
2232 		VHOST_LOG_CONFIG(ERR,
2233 				"Invalid file descriptor for slave channel (%d)\n",
2234 				fd);
2235 		return RTE_VHOST_MSG_RESULT_ERR;
2236 	}
2237 
2238 	if (dev->slave_req_fd >= 0)
2239 		close(dev->slave_req_fd);
2240 
2241 	dev->slave_req_fd = fd;
2242 
2243 	return RTE_VHOST_MSG_RESULT_OK;
2244 }
2245 
2246 static int
is_vring_iotlb_split(struct vhost_virtqueue * vq,struct vhost_iotlb_msg * imsg)2247 is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
2248 {
2249 	struct vhost_vring_addr *ra;
2250 	uint64_t start, end, len;
2251 
2252 	start = imsg->iova;
2253 	end = start + imsg->size;
2254 
2255 	ra = &vq->ring_addrs;
2256 	len = sizeof(struct vring_desc) * vq->size;
2257 	if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
2258 		return 1;
2259 
2260 	len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
2261 	if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
2262 		return 1;
2263 
2264 	len = sizeof(struct vring_used) +
2265 	       sizeof(struct vring_used_elem) * vq->size;
2266 	if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
2267 		return 1;
2268 
2269 	if (ra->flags & (1 << VHOST_VRING_F_LOG)) {
2270 		len = sizeof(uint64_t);
2271 		if (ra->log_guest_addr < end &&
2272 		    (ra->log_guest_addr + len) > start)
2273 			return 1;
2274 	}
2275 
2276 	return 0;
2277 }
2278 
2279 static int
is_vring_iotlb_packed(struct vhost_virtqueue * vq,struct vhost_iotlb_msg * imsg)2280 is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
2281 {
2282 	struct vhost_vring_addr *ra;
2283 	uint64_t start, end, len;
2284 
2285 	start = imsg->iova;
2286 	end = start + imsg->size;
2287 
2288 	ra = &vq->ring_addrs;
2289 	len = sizeof(struct vring_packed_desc) * vq->size;
2290 	if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
2291 		return 1;
2292 
2293 	len = sizeof(struct vring_packed_desc_event);
2294 	if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
2295 		return 1;
2296 
2297 	len = sizeof(struct vring_packed_desc_event);
2298 	if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
2299 		return 1;
2300 
2301 	if (ra->flags & (1 << VHOST_VRING_F_LOG)) {
2302 		len = sizeof(uint64_t);
2303 		if (ra->log_guest_addr < end &&
2304 		    (ra->log_guest_addr + len) > start)
2305 			return 1;
2306 	}
2307 
2308 	return 0;
2309 }
2310 
is_vring_iotlb(struct virtio_net * dev,struct vhost_virtqueue * vq,struct vhost_iotlb_msg * imsg)2311 static int is_vring_iotlb(struct virtio_net *dev,
2312 			  struct vhost_virtqueue *vq,
2313 			  struct vhost_iotlb_msg *imsg)
2314 {
2315 	if (vq_is_packed(dev))
2316 		return is_vring_iotlb_packed(vq, imsg);
2317 	else
2318 		return is_vring_iotlb_split(vq, imsg);
2319 }
2320 
2321 static int
vhost_user_iotlb_msg(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)2322 vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
2323 			int main_fd __rte_unused)
2324 {
2325 	struct virtio_net *dev = *pdev;
2326 	struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
2327 	uint16_t i;
2328 	uint64_t vva, len;
2329 
2330 	if (validate_msg_fds(msg, 0) != 0)
2331 		return RTE_VHOST_MSG_RESULT_ERR;
2332 
2333 	switch (imsg->type) {
2334 	case VHOST_IOTLB_UPDATE:
2335 		len = imsg->size;
2336 		vva = qva_to_vva(dev, imsg->uaddr, &len);
2337 		if (!vva)
2338 			return RTE_VHOST_MSG_RESULT_ERR;
2339 
2340 		for (i = 0; i < dev->nr_vring; i++) {
2341 			struct vhost_virtqueue *vq = dev->virtqueue[i];
2342 
2343 			if (!vq)
2344 				continue;
2345 
2346 			vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
2347 					len, imsg->perm);
2348 
2349 			if (is_vring_iotlb(dev, vq, imsg))
2350 				*pdev = dev = translate_ring_addresses(dev, i);
2351 		}
2352 		break;
2353 	case VHOST_IOTLB_INVALIDATE:
2354 		for (i = 0; i < dev->nr_vring; i++) {
2355 			struct vhost_virtqueue *vq = dev->virtqueue[i];
2356 
2357 			if (!vq)
2358 				continue;
2359 
2360 			vhost_user_iotlb_cache_remove(vq, imsg->iova,
2361 					imsg->size);
2362 
2363 			if (is_vring_iotlb(dev, vq, imsg))
2364 				vring_invalidate(dev, vq);
2365 		}
2366 		break;
2367 	default:
2368 		VHOST_LOG_CONFIG(ERR, "Invalid IOTLB message type (%d)\n",
2369 				imsg->type);
2370 		return RTE_VHOST_MSG_RESULT_ERR;
2371 	}
2372 
2373 	return RTE_VHOST_MSG_RESULT_OK;
2374 }
2375 
2376 static int
vhost_user_set_postcopy_advise(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)2377 vhost_user_set_postcopy_advise(struct virtio_net **pdev,
2378 			struct VhostUserMsg *msg,
2379 			int main_fd __rte_unused)
2380 {
2381 	struct virtio_net *dev = *pdev;
2382 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
2383 	struct uffdio_api api_struct;
2384 
2385 	if (validate_msg_fds(msg, 0) != 0)
2386 		return RTE_VHOST_MSG_RESULT_ERR;
2387 
2388 	dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
2389 
2390 	if (dev->postcopy_ufd == -1) {
2391 		VHOST_LOG_CONFIG(ERR, "Userfaultfd not available: %s\n",
2392 			strerror(errno));
2393 		return RTE_VHOST_MSG_RESULT_ERR;
2394 	}
2395 	api_struct.api = UFFD_API;
2396 	api_struct.features = 0;
2397 	if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
2398 		VHOST_LOG_CONFIG(ERR, "UFFDIO_API ioctl failure: %s\n",
2399 			strerror(errno));
2400 		close(dev->postcopy_ufd);
2401 		dev->postcopy_ufd = -1;
2402 		return RTE_VHOST_MSG_RESULT_ERR;
2403 	}
2404 	msg->fds[0] = dev->postcopy_ufd;
2405 	msg->fd_num = 1;
2406 
2407 	return RTE_VHOST_MSG_RESULT_REPLY;
2408 #else
2409 	dev->postcopy_ufd = -1;
2410 	msg->fd_num = 0;
2411 
2412 	return RTE_VHOST_MSG_RESULT_ERR;
2413 #endif
2414 }
2415 
2416 static int
vhost_user_set_postcopy_listen(struct virtio_net ** pdev,struct VhostUserMsg * msg __rte_unused,int main_fd __rte_unused)2417 vhost_user_set_postcopy_listen(struct virtio_net **pdev,
2418 			struct VhostUserMsg *msg __rte_unused,
2419 			int main_fd __rte_unused)
2420 {
2421 	struct virtio_net *dev = *pdev;
2422 
2423 	if (validate_msg_fds(msg, 0) != 0)
2424 		return RTE_VHOST_MSG_RESULT_ERR;
2425 
2426 	if (dev->mem && dev->mem->nregions) {
2427 		VHOST_LOG_CONFIG(ERR,
2428 			"Regions already registered at postcopy-listen\n");
2429 		return RTE_VHOST_MSG_RESULT_ERR;
2430 	}
2431 	dev->postcopy_listening = 1;
2432 
2433 	return RTE_VHOST_MSG_RESULT_OK;
2434 }
2435 
2436 static int
vhost_user_postcopy_end(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)2437 vhost_user_postcopy_end(struct virtio_net **pdev, struct VhostUserMsg *msg,
2438 			int main_fd __rte_unused)
2439 {
2440 	struct virtio_net *dev = *pdev;
2441 
2442 	if (validate_msg_fds(msg, 0) != 0)
2443 		return RTE_VHOST_MSG_RESULT_ERR;
2444 
2445 	dev->postcopy_listening = 0;
2446 	if (dev->postcopy_ufd >= 0) {
2447 		close(dev->postcopy_ufd);
2448 		dev->postcopy_ufd = -1;
2449 	}
2450 
2451 	msg->payload.u64 = 0;
2452 	msg->size = sizeof(msg->payload.u64);
2453 	msg->fd_num = 0;
2454 
2455 	return RTE_VHOST_MSG_RESULT_REPLY;
2456 }
2457 
2458 static int
vhost_user_get_status(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)2459 vhost_user_get_status(struct virtio_net **pdev, struct VhostUserMsg *msg,
2460 		      int main_fd __rte_unused)
2461 {
2462 	struct virtio_net *dev = *pdev;
2463 
2464 	if (validate_msg_fds(msg, 0) != 0)
2465 		return RTE_VHOST_MSG_RESULT_ERR;
2466 
2467 	msg->payload.u64 = dev->status;
2468 	msg->size = sizeof(msg->payload.u64);
2469 	msg->fd_num = 0;
2470 
2471 	return RTE_VHOST_MSG_RESULT_REPLY;
2472 }
2473 
2474 static int
vhost_user_set_status(struct virtio_net ** pdev,struct VhostUserMsg * msg,int main_fd __rte_unused)2475 vhost_user_set_status(struct virtio_net **pdev, struct VhostUserMsg *msg,
2476 			int main_fd __rte_unused)
2477 {
2478 	struct virtio_net *dev = *pdev;
2479 
2480 	if (validate_msg_fds(msg, 0) != 0)
2481 		return RTE_VHOST_MSG_RESULT_ERR;
2482 
2483 	/* As per Virtio specification, the device status is 8bits long */
2484 	if (msg->payload.u64 > UINT8_MAX) {
2485 		VHOST_LOG_CONFIG(ERR, "Invalid VHOST_USER_SET_STATUS payload 0x%" PRIx64 "\n",
2486 				msg->payload.u64);
2487 		return RTE_VHOST_MSG_RESULT_ERR;
2488 	}
2489 
2490 	dev->status = msg->payload.u64;
2491 
2492 	if ((dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK) &&
2493 	    (dev->flags & VIRTIO_DEV_FEATURES_FAILED)) {
2494 		VHOST_LOG_CONFIG(ERR, "FEATURES_OK bit is set but feature negotiation failed\n");
2495 		/*
2496 		 * Clear the bit to let the driver know about the feature
2497 		 * negotiation failure
2498 		 */
2499 		dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK;
2500 	}
2501 
2502 	VHOST_LOG_CONFIG(INFO, "New device status(0x%08x):\n"
2503 			"\t-RESET: %u\n"
2504 			"\t-ACKNOWLEDGE: %u\n"
2505 			"\t-DRIVER: %u\n"
2506 			"\t-FEATURES_OK: %u\n"
2507 			"\t-DRIVER_OK: %u\n"
2508 			"\t-DEVICE_NEED_RESET: %u\n"
2509 			"\t-FAILED: %u\n",
2510 			dev->status,
2511 			(dev->status == VIRTIO_DEVICE_STATUS_RESET),
2512 			!!(dev->status & VIRTIO_DEVICE_STATUS_ACK),
2513 			!!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER),
2514 			!!(dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK),
2515 			!!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK),
2516 			!!(dev->status & VIRTIO_DEVICE_STATUS_DEV_NEED_RESET),
2517 			!!(dev->status & VIRTIO_DEVICE_STATUS_FAILED));
2518 
2519 	return RTE_VHOST_MSG_RESULT_OK;
2520 }
2521 
2522 typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
2523 					struct VhostUserMsg *msg,
2524 					int main_fd);
2525 static vhost_message_handler_t vhost_message_handlers[VHOST_USER_MAX] = {
2526 	[VHOST_USER_NONE] = NULL,
2527 	[VHOST_USER_GET_FEATURES] = vhost_user_get_features,
2528 	[VHOST_USER_SET_FEATURES] = vhost_user_set_features,
2529 	[VHOST_USER_SET_OWNER] = vhost_user_set_owner,
2530 	[VHOST_USER_RESET_OWNER] = vhost_user_reset_owner,
2531 	[VHOST_USER_SET_MEM_TABLE] = vhost_user_set_mem_table,
2532 	[VHOST_USER_SET_LOG_BASE] = vhost_user_set_log_base,
2533 	[VHOST_USER_SET_LOG_FD] = vhost_user_set_log_fd,
2534 	[VHOST_USER_SET_VRING_NUM] = vhost_user_set_vring_num,
2535 	[VHOST_USER_SET_VRING_ADDR] = vhost_user_set_vring_addr,
2536 	[VHOST_USER_SET_VRING_BASE] = vhost_user_set_vring_base,
2537 	[VHOST_USER_GET_VRING_BASE] = vhost_user_get_vring_base,
2538 	[VHOST_USER_SET_VRING_KICK] = vhost_user_set_vring_kick,
2539 	[VHOST_USER_SET_VRING_CALL] = vhost_user_set_vring_call,
2540 	[VHOST_USER_SET_VRING_ERR] = vhost_user_set_vring_err,
2541 	[VHOST_USER_GET_PROTOCOL_FEATURES] = vhost_user_get_protocol_features,
2542 	[VHOST_USER_SET_PROTOCOL_FEATURES] = vhost_user_set_protocol_features,
2543 	[VHOST_USER_GET_QUEUE_NUM] = vhost_user_get_queue_num,
2544 	[VHOST_USER_SET_VRING_ENABLE] = vhost_user_set_vring_enable,
2545 	[VHOST_USER_SEND_RARP] = vhost_user_send_rarp,
2546 	[VHOST_USER_NET_SET_MTU] = vhost_user_net_set_mtu,
2547 	[VHOST_USER_SET_SLAVE_REQ_FD] = vhost_user_set_req_fd,
2548 	[VHOST_USER_IOTLB_MSG] = vhost_user_iotlb_msg,
2549 	[VHOST_USER_POSTCOPY_ADVISE] = vhost_user_set_postcopy_advise,
2550 	[VHOST_USER_POSTCOPY_LISTEN] = vhost_user_set_postcopy_listen,
2551 	[VHOST_USER_POSTCOPY_END] = vhost_user_postcopy_end,
2552 	[VHOST_USER_GET_INFLIGHT_FD] = vhost_user_get_inflight_fd,
2553 	[VHOST_USER_SET_INFLIGHT_FD] = vhost_user_set_inflight_fd,
2554 	[VHOST_USER_SET_STATUS] = vhost_user_set_status,
2555 	[VHOST_USER_GET_STATUS] = vhost_user_get_status,
2556 };
2557 
2558 /* return bytes# of read on success or negative val on failure. */
2559 static int
read_vhost_message(int sockfd,struct VhostUserMsg * msg)2560 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
2561 {
2562 	int ret;
2563 
2564 	ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
2565 		msg->fds, VHOST_MEMORY_MAX_NREGIONS, &msg->fd_num);
2566 	if (ret <= 0) {
2567 		return ret;
2568 	} else if (ret != VHOST_USER_HDR_SIZE) {
2569 		VHOST_LOG_CONFIG(ERR, "Unexpected header size read\n");
2570 		close_msg_fds(msg);
2571 		return -1;
2572 	}
2573 
2574 	if (msg->size) {
2575 		if (msg->size > sizeof(msg->payload)) {
2576 			VHOST_LOG_CONFIG(ERR,
2577 				"invalid msg size: %d\n", msg->size);
2578 			return -1;
2579 		}
2580 		ret = read(sockfd, &msg->payload, msg->size);
2581 		if (ret <= 0)
2582 			return ret;
2583 		if (ret != (int)msg->size) {
2584 			VHOST_LOG_CONFIG(ERR,
2585 				"read control message failed\n");
2586 			return -1;
2587 		}
2588 	}
2589 
2590 	return ret;
2591 }
2592 
2593 static int
send_vhost_message(int sockfd,struct VhostUserMsg * msg)2594 send_vhost_message(int sockfd, struct VhostUserMsg *msg)
2595 {
2596 	if (!msg)
2597 		return 0;
2598 
2599 	return send_fd_message(sockfd, (char *)msg,
2600 		VHOST_USER_HDR_SIZE + msg->size, msg->fds, msg->fd_num);
2601 }
2602 
2603 static int
send_vhost_reply(int sockfd,struct VhostUserMsg * msg)2604 send_vhost_reply(int sockfd, struct VhostUserMsg *msg)
2605 {
2606 	if (!msg)
2607 		return 0;
2608 
2609 	msg->flags &= ~VHOST_USER_VERSION_MASK;
2610 	msg->flags &= ~VHOST_USER_NEED_REPLY;
2611 	msg->flags |= VHOST_USER_VERSION;
2612 	msg->flags |= VHOST_USER_REPLY_MASK;
2613 
2614 	return send_vhost_message(sockfd, msg);
2615 }
2616 
2617 static int
send_vhost_slave_message(struct virtio_net * dev,struct VhostUserMsg * msg)2618 send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg)
2619 {
2620 	int ret;
2621 
2622 	if (msg->flags & VHOST_USER_NEED_REPLY)
2623 		rte_spinlock_lock(&dev->slave_req_lock);
2624 
2625 	ret = send_vhost_message(dev->slave_req_fd, msg);
2626 	if (ret < 0 && (msg->flags & VHOST_USER_NEED_REPLY))
2627 		rte_spinlock_unlock(&dev->slave_req_lock);
2628 
2629 	return ret;
2630 }
2631 
2632 /*
2633  * Allocate a queue pair if it hasn't been allocated yet
2634  */
2635 static int
vhost_user_check_and_alloc_queue_pair(struct virtio_net * dev,struct VhostUserMsg * msg)2636 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
2637 			struct VhostUserMsg *msg)
2638 {
2639 	uint32_t vring_idx;
2640 
2641 	switch (msg->request.master) {
2642 	case VHOST_USER_SET_VRING_KICK:
2643 	case VHOST_USER_SET_VRING_CALL:
2644 	case VHOST_USER_SET_VRING_ERR:
2645 		vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
2646 		break;
2647 	case VHOST_USER_SET_VRING_NUM:
2648 	case VHOST_USER_SET_VRING_BASE:
2649 	case VHOST_USER_SET_VRING_ENABLE:
2650 		vring_idx = msg->payload.state.index;
2651 		break;
2652 	case VHOST_USER_SET_VRING_ADDR:
2653 		vring_idx = msg->payload.addr.index;
2654 		break;
2655 	default:
2656 		return 0;
2657 	}
2658 
2659 	if (vring_idx >= VHOST_MAX_VRING) {
2660 		VHOST_LOG_CONFIG(ERR,
2661 			"invalid vring index: %u\n", vring_idx);
2662 		return -1;
2663 	}
2664 
2665 	if (dev->virtqueue[vring_idx])
2666 		return 0;
2667 
2668 	return alloc_vring_queue(dev, vring_idx);
2669 }
2670 
2671 static void
vhost_user_lock_all_queue_pairs(struct virtio_net * dev)2672 vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
2673 {
2674 	unsigned int i = 0;
2675 	unsigned int vq_num = 0;
2676 
2677 	while (vq_num < dev->nr_vring) {
2678 		struct vhost_virtqueue *vq = dev->virtqueue[i];
2679 
2680 		if (vq) {
2681 			rte_spinlock_lock(&vq->access_lock);
2682 			vq_num++;
2683 		}
2684 		i++;
2685 	}
2686 }
2687 
2688 static void
vhost_user_unlock_all_queue_pairs(struct virtio_net * dev)2689 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
2690 {
2691 	unsigned int i = 0;
2692 	unsigned int vq_num = 0;
2693 
2694 	while (vq_num < dev->nr_vring) {
2695 		struct vhost_virtqueue *vq = dev->virtqueue[i];
2696 
2697 		if (vq) {
2698 			rte_spinlock_unlock(&vq->access_lock);
2699 			vq_num++;
2700 		}
2701 		i++;
2702 	}
2703 }
2704 
2705 int
vhost_user_msg_handler(int vid,int fd)2706 vhost_user_msg_handler(int vid, int fd)
2707 {
2708 	struct virtio_net *dev;
2709 	struct VhostUserMsg msg;
2710 	struct rte_vdpa_device *vdpa_dev;
2711 	int ret;
2712 	int unlock_required = 0;
2713 	bool handled;
2714 	int request;
2715 	uint32_t i;
2716 
2717 	dev = get_device(vid);
2718 	if (dev == NULL)
2719 		return -1;
2720 
2721 	if (!dev->notify_ops) {
2722 		dev->notify_ops = vhost_driver_callback_get(dev->ifname);
2723 		if (!dev->notify_ops) {
2724 			VHOST_LOG_CONFIG(ERR,
2725 				"failed to get callback ops for driver %s\n",
2726 				dev->ifname);
2727 			return -1;
2728 		}
2729 	}
2730 
2731 	ret = read_vhost_message(fd, &msg);
2732 	if (ret <= 0) {
2733 		if (ret < 0)
2734 			VHOST_LOG_CONFIG(ERR,
2735 				"vhost read message failed\n");
2736 		else
2737 			VHOST_LOG_CONFIG(INFO,
2738 				"vhost peer closed\n");
2739 
2740 		return -1;
2741 	}
2742 
2743 	ret = 0;
2744 	request = msg.request.master;
2745 	if (request > VHOST_USER_NONE && request < VHOST_USER_MAX &&
2746 			vhost_message_str[request]) {
2747 		if (request != VHOST_USER_IOTLB_MSG)
2748 			VHOST_LOG_CONFIG(INFO, "read message %s\n",
2749 				vhost_message_str[request]);
2750 		else
2751 			VHOST_LOG_CONFIG(DEBUG, "read message %s\n",
2752 				vhost_message_str[request]);
2753 	} else {
2754 		VHOST_LOG_CONFIG(DEBUG, "External request %d\n", request);
2755 	}
2756 
2757 	ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
2758 	if (ret < 0) {
2759 		VHOST_LOG_CONFIG(ERR,
2760 			"failed to alloc queue\n");
2761 		return -1;
2762 	}
2763 
2764 	/*
2765 	 * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE
2766 	 * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops
2767 	 * and device is destroyed. destroy_device waits for queues to be
2768 	 * inactive, so it is safe. Otherwise taking the access_lock
2769 	 * would cause a dead lock.
2770 	 */
2771 	switch (request) {
2772 	case VHOST_USER_SET_FEATURES:
2773 	case VHOST_USER_SET_PROTOCOL_FEATURES:
2774 	case VHOST_USER_SET_OWNER:
2775 	case VHOST_USER_SET_MEM_TABLE:
2776 	case VHOST_USER_SET_LOG_BASE:
2777 	case VHOST_USER_SET_LOG_FD:
2778 	case VHOST_USER_SET_VRING_NUM:
2779 	case VHOST_USER_SET_VRING_ADDR:
2780 	case VHOST_USER_SET_VRING_BASE:
2781 	case VHOST_USER_SET_VRING_KICK:
2782 	case VHOST_USER_SET_VRING_CALL:
2783 	case VHOST_USER_SET_VRING_ERR:
2784 	case VHOST_USER_SET_VRING_ENABLE:
2785 	case VHOST_USER_SEND_RARP:
2786 	case VHOST_USER_NET_SET_MTU:
2787 	case VHOST_USER_SET_SLAVE_REQ_FD:
2788 		if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
2789 			vhost_user_lock_all_queue_pairs(dev);
2790 			unlock_required = 1;
2791 		}
2792 		break;
2793 	default:
2794 		break;
2795 
2796 	}
2797 
2798 	handled = false;
2799 	if (dev->extern_ops.pre_msg_handle) {
2800 		ret = (*dev->extern_ops.pre_msg_handle)(dev->vid,
2801 				(void *)&msg);
2802 		switch (ret) {
2803 		case RTE_VHOST_MSG_RESULT_REPLY:
2804 			send_vhost_reply(fd, &msg);
2805 			/* Fall-through */
2806 		case RTE_VHOST_MSG_RESULT_ERR:
2807 		case RTE_VHOST_MSG_RESULT_OK:
2808 			handled = true;
2809 			goto skip_to_post_handle;
2810 		case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
2811 		default:
2812 			break;
2813 		}
2814 	}
2815 
2816 	if (request > VHOST_USER_NONE && request < VHOST_USER_MAX) {
2817 		if (!vhost_message_handlers[request])
2818 			goto skip_to_post_handle;
2819 		ret = vhost_message_handlers[request](&dev, &msg, fd);
2820 
2821 		switch (ret) {
2822 		case RTE_VHOST_MSG_RESULT_ERR:
2823 			VHOST_LOG_CONFIG(ERR,
2824 				"Processing %s failed.\n",
2825 				vhost_message_str[request]);
2826 			handled = true;
2827 			break;
2828 		case RTE_VHOST_MSG_RESULT_OK:
2829 			VHOST_LOG_CONFIG(DEBUG,
2830 				"Processing %s succeeded.\n",
2831 				vhost_message_str[request]);
2832 			handled = true;
2833 			break;
2834 		case RTE_VHOST_MSG_RESULT_REPLY:
2835 			VHOST_LOG_CONFIG(DEBUG,
2836 				"Processing %s succeeded and needs reply.\n",
2837 				vhost_message_str[request]);
2838 			send_vhost_reply(fd, &msg);
2839 			handled = true;
2840 			break;
2841 		default:
2842 			break;
2843 		}
2844 	}
2845 
2846 skip_to_post_handle:
2847 	if (ret != RTE_VHOST_MSG_RESULT_ERR &&
2848 			dev->extern_ops.post_msg_handle) {
2849 		ret = (*dev->extern_ops.post_msg_handle)(dev->vid,
2850 				(void *)&msg);
2851 		switch (ret) {
2852 		case RTE_VHOST_MSG_RESULT_REPLY:
2853 			send_vhost_reply(fd, &msg);
2854 			/* Fall-through */
2855 		case RTE_VHOST_MSG_RESULT_ERR:
2856 		case RTE_VHOST_MSG_RESULT_OK:
2857 			handled = true;
2858 		case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
2859 		default:
2860 			break;
2861 		}
2862 	}
2863 
2864 	if (unlock_required)
2865 		vhost_user_unlock_all_queue_pairs(dev);
2866 
2867 	/* If message was not handled at this stage, treat it as an error */
2868 	if (!handled) {
2869 		VHOST_LOG_CONFIG(ERR,
2870 			"vhost message (req: %d) was not handled.\n", request);
2871 		close_msg_fds(&msg);
2872 		ret = RTE_VHOST_MSG_RESULT_ERR;
2873 	}
2874 
2875 	/*
2876 	 * If the request required a reply that was already sent,
2877 	 * this optional reply-ack won't be sent as the
2878 	 * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
2879 	 */
2880 	if (msg.flags & VHOST_USER_NEED_REPLY) {
2881 		msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
2882 		msg.size = sizeof(msg.payload.u64);
2883 		msg.fd_num = 0;
2884 		send_vhost_reply(fd, &msg);
2885 	} else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
2886 		VHOST_LOG_CONFIG(ERR,
2887 			"vhost message handling failed.\n");
2888 		return -1;
2889 	}
2890 
2891 	for (i = 0; i < dev->nr_vring; i++) {
2892 		struct vhost_virtqueue *vq = dev->virtqueue[i];
2893 		bool cur_ready = vq_is_ready(dev, vq);
2894 
2895 		if (cur_ready != (vq && vq->ready)) {
2896 			vq->ready = cur_ready;
2897 			vhost_user_notify_queue_state(dev, i, cur_ready);
2898 		}
2899 	}
2900 
2901 
2902 	if (!virtio_is_ready(dev))
2903 		goto out;
2904 
2905 	/*
2906 	 * Virtio is now ready. If not done already, it is time
2907 	 * to notify the application it can process the rings and
2908 	 * configure the vDPA device if present.
2909 	 */
2910 
2911 	if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
2912 		if (dev->notify_ops->new_device(dev->vid) == 0)
2913 			dev->flags |= VIRTIO_DEV_RUNNING;
2914 	}
2915 
2916 	vdpa_dev = dev->vdpa_dev;
2917 	if (!vdpa_dev)
2918 		goto out;
2919 
2920 	if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
2921 		if (vdpa_dev->ops->dev_conf(dev->vid))
2922 			VHOST_LOG_CONFIG(ERR,
2923 					 "Failed to configure vDPA device\n");
2924 		else
2925 			dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
2926 	}
2927 
2928 out:
2929 	return 0;
2930 }
2931 
process_slave_message_reply(struct virtio_net * dev,const struct VhostUserMsg * msg)2932 static int process_slave_message_reply(struct virtio_net *dev,
2933 				       const struct VhostUserMsg *msg)
2934 {
2935 	struct VhostUserMsg msg_reply;
2936 	int ret;
2937 
2938 	if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
2939 		return 0;
2940 
2941 	ret = read_vhost_message(dev->slave_req_fd, &msg_reply);
2942 	if (ret <= 0) {
2943 		if (ret < 0)
2944 			VHOST_LOG_CONFIG(ERR,
2945 				"vhost read slave message reply failed\n");
2946 		else
2947 			VHOST_LOG_CONFIG(INFO,
2948 				"vhost peer closed\n");
2949 		ret = -1;
2950 		goto out;
2951 	}
2952 
2953 	ret = 0;
2954 	if (msg_reply.request.slave != msg->request.slave) {
2955 		VHOST_LOG_CONFIG(ERR,
2956 			"Received unexpected msg type (%u), expected %u\n",
2957 			msg_reply.request.slave, msg->request.slave);
2958 		ret = -1;
2959 		goto out;
2960 	}
2961 
2962 	ret = msg_reply.payload.u64 ? -1 : 0;
2963 
2964 out:
2965 	rte_spinlock_unlock(&dev->slave_req_lock);
2966 	return ret;
2967 }
2968 
2969 int
vhost_user_iotlb_miss(struct virtio_net * dev,uint64_t iova,uint8_t perm)2970 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
2971 {
2972 	int ret;
2973 	struct VhostUserMsg msg = {
2974 		.request.slave = VHOST_USER_SLAVE_IOTLB_MSG,
2975 		.flags = VHOST_USER_VERSION,
2976 		.size = sizeof(msg.payload.iotlb),
2977 		.payload.iotlb = {
2978 			.iova = iova,
2979 			.perm = perm,
2980 			.type = VHOST_IOTLB_MISS,
2981 		},
2982 	};
2983 
2984 	ret = send_vhost_message(dev->slave_req_fd, &msg);
2985 	if (ret < 0) {
2986 		VHOST_LOG_CONFIG(ERR,
2987 				"Failed to send IOTLB miss message (%d)\n",
2988 				ret);
2989 		return ret;
2990 	}
2991 
2992 	return 0;
2993 }
2994 
2995 static int
vhost_user_slave_config_change(struct virtio_net * dev,bool need_reply)2996 vhost_user_slave_config_change(struct virtio_net *dev, bool need_reply)
2997 {
2998 	int ret;
2999 	struct VhostUserMsg msg = {
3000 		.request.slave = VHOST_USER_SLAVE_CONFIG_CHANGE_MSG,
3001 		.flags = VHOST_USER_VERSION,
3002 		.size = 0,
3003 	};
3004 
3005 	if (need_reply)
3006 		msg.flags |= VHOST_USER_NEED_REPLY;
3007 
3008 	ret = send_vhost_slave_message(dev, &msg);
3009 	if (ret < 0) {
3010 		VHOST_LOG_CONFIG(ERR,
3011 				"Failed to send config change (%d)\n",
3012 				ret);
3013 		return ret;
3014 	}
3015 
3016 	return process_slave_message_reply(dev, &msg);
3017 }
3018 
3019 int
rte_vhost_slave_config_change(int vid,bool need_reply)3020 rte_vhost_slave_config_change(int vid, bool need_reply)
3021 {
3022 	struct virtio_net *dev;
3023 
3024 	dev = get_device(vid);
3025 	if (!dev)
3026 		return -ENODEV;
3027 
3028 	return vhost_user_slave_config_change(dev, need_reply);
3029 }
3030 
vhost_user_slave_set_vring_host_notifier(struct virtio_net * dev,int index,int fd,uint64_t offset,uint64_t size)3031 static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev,
3032 						    int index, int fd,
3033 						    uint64_t offset,
3034 						    uint64_t size)
3035 {
3036 	int ret;
3037 	struct VhostUserMsg msg = {
3038 		.request.slave = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
3039 		.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY,
3040 		.size = sizeof(msg.payload.area),
3041 		.payload.area = {
3042 			.u64 = index & VHOST_USER_VRING_IDX_MASK,
3043 			.size = size,
3044 			.offset = offset,
3045 		},
3046 	};
3047 
3048 	if (fd < 0)
3049 		msg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
3050 	else {
3051 		msg.fds[0] = fd;
3052 		msg.fd_num = 1;
3053 	}
3054 
3055 	ret = send_vhost_slave_message(dev, &msg);
3056 	if (ret < 0) {
3057 		VHOST_LOG_CONFIG(ERR,
3058 			"Failed to set host notifier (%d)\n", ret);
3059 		return ret;
3060 	}
3061 
3062 	return process_slave_message_reply(dev, &msg);
3063 }
3064 
rte_vhost_host_notifier_ctrl(int vid,uint16_t qid,bool enable)3065 int rte_vhost_host_notifier_ctrl(int vid, uint16_t qid, bool enable)
3066 {
3067 	struct virtio_net *dev;
3068 	struct rte_vdpa_device *vdpa_dev;
3069 	int vfio_device_fd, ret = 0;
3070 	uint64_t offset, size;
3071 	unsigned int i, q_start, q_last;
3072 
3073 	dev = get_device(vid);
3074 	if (!dev)
3075 		return -ENODEV;
3076 
3077 	vdpa_dev = dev->vdpa_dev;
3078 	if (vdpa_dev == NULL)
3079 		return -ENODEV;
3080 
3081 	if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
3082 	    !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) ||
3083 	    !(dev->protocol_features &
3084 			(1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ)) ||
3085 	    !(dev->protocol_features &
3086 			(1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) ||
3087 	    !(dev->protocol_features &
3088 			(1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER)))
3089 		return -ENOTSUP;
3090 
3091 	if (qid == RTE_VHOST_QUEUE_ALL) {
3092 		q_start = 0;
3093 		q_last = dev->nr_vring - 1;
3094 	} else {
3095 		if (qid >= dev->nr_vring)
3096 			return -EINVAL;
3097 		q_start = qid;
3098 		q_last = qid;
3099 	}
3100 
3101 	RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP);
3102 	RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP);
3103 
3104 	vfio_device_fd = vdpa_dev->ops->get_vfio_device_fd(vid);
3105 	if (vfio_device_fd < 0)
3106 		return -ENOTSUP;
3107 
3108 	if (enable) {
3109 		for (i = q_start; i <= q_last; i++) {
3110 			if (vdpa_dev->ops->get_notify_area(vid, i, &offset,
3111 					&size) < 0) {
3112 				ret = -ENOTSUP;
3113 				goto disable;
3114 			}
3115 
3116 			if (vhost_user_slave_set_vring_host_notifier(dev, i,
3117 					vfio_device_fd, offset, size) < 0) {
3118 				ret = -EFAULT;
3119 				goto disable;
3120 			}
3121 		}
3122 	} else {
3123 disable:
3124 		for (i = q_start; i <= q_last; i++) {
3125 			vhost_user_slave_set_vring_host_notifier(dev, i, -1,
3126 					0, 0);
3127 		}
3128 	}
3129 
3130 	return ret;
3131 }
3132