Lines Matching refs:dev

67 static int send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
68 static int read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
91 validate_msg_fds(struct virtio_net *dev, struct vhu_msg_context *ctx, int expected_fds) in validate_msg_fds() argument
97 dev->ifname, expected_fds, in validate_msg_fds()
117 async_dma_map(struct virtio_net *dev, bool do_map) in async_dma_map() argument
124 for (i = 0; i < dev->nr_guest_pages; i++) { in async_dma_map()
125 page = &dev->guest_pages[i]; in async_dma_map()
152 for (i = 0; i < dev->nr_guest_pages; i++) { in async_dma_map()
153 page = &dev->guest_pages[i]; in async_dma_map()
170 free_mem_region(struct virtio_net *dev) in free_mem_region() argument
175 if (!dev || !dev->mem) in free_mem_region()
178 if (dev->async_copy && rte_vfio_is_enabled("vfio")) in free_mem_region()
179 async_dma_map(dev, false); in free_mem_region()
181 for (i = 0; i < dev->mem->nregions; i++) { in free_mem_region()
182 reg = &dev->mem->regions[i]; in free_mem_region()
191 vhost_backend_cleanup(struct virtio_net *dev) in vhost_backend_cleanup() argument
195 vdpa_dev = dev->vdpa_dev; in vhost_backend_cleanup()
197 vdpa_dev->ops->dev_cleanup(dev->vid); in vhost_backend_cleanup()
199 if (dev->mem) { in vhost_backend_cleanup()
200 free_mem_region(dev); in vhost_backend_cleanup()
201 rte_free(dev->mem); in vhost_backend_cleanup()
202 dev->mem = NULL; in vhost_backend_cleanup()
205 rte_free(dev->guest_pages); in vhost_backend_cleanup()
206 dev->guest_pages = NULL; in vhost_backend_cleanup()
208 if (dev->log_addr) { in vhost_backend_cleanup()
209 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size); in vhost_backend_cleanup()
210 dev->log_addr = 0; in vhost_backend_cleanup()
213 if (dev->inflight_info) { in vhost_backend_cleanup()
214 if (dev->inflight_info->addr) { in vhost_backend_cleanup()
215 munmap(dev->inflight_info->addr, in vhost_backend_cleanup()
216 dev->inflight_info->size); in vhost_backend_cleanup()
217 dev->inflight_info->addr = NULL; in vhost_backend_cleanup()
220 if (dev->inflight_info->fd >= 0) { in vhost_backend_cleanup()
221 close(dev->inflight_info->fd); in vhost_backend_cleanup()
222 dev->inflight_info->fd = -1; in vhost_backend_cleanup()
225 rte_free(dev->inflight_info); in vhost_backend_cleanup()
226 dev->inflight_info = NULL; in vhost_backend_cleanup()
229 if (dev->slave_req_fd >= 0) { in vhost_backend_cleanup()
230 close(dev->slave_req_fd); in vhost_backend_cleanup()
231 dev->slave_req_fd = -1; in vhost_backend_cleanup()
234 if (dev->postcopy_ufd >= 0) { in vhost_backend_cleanup()
235 close(dev->postcopy_ufd); in vhost_backend_cleanup()
236 dev->postcopy_ufd = -1; in vhost_backend_cleanup()
239 dev->postcopy_listening = 0; in vhost_backend_cleanup()
243 vhost_user_notify_queue_state(struct virtio_net *dev, uint16_t index, in vhost_user_notify_queue_state() argument
246 struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev; in vhost_user_notify_queue_state()
247 struct vhost_virtqueue *vq = dev->virtqueue[index]; in vhost_user_notify_queue_state()
251 vhost_enable_guest_notification(dev, vq, vq->notif_enable); in vhost_user_notify_queue_state()
254 vdpa_dev->ops->set_vring_state(dev->vid, index, enable); in vhost_user_notify_queue_state()
256 if (dev->notify_ops->vring_state_changed) in vhost_user_notify_queue_state()
257 dev->notify_ops->vring_state_changed(dev->vid, in vhost_user_notify_queue_state()
278 struct virtio_net *dev = *pdev; in vhost_user_reset_owner() local
280 vhost_destroy_device_notify(dev); in vhost_user_reset_owner()
282 cleanup_device(dev, 0); in vhost_user_reset_owner()
283 reset_device(dev); in vhost_user_reset_owner()
295 struct virtio_net *dev = *pdev; in vhost_user_get_features() local
298 rte_vhost_driver_get_features(dev->ifname, &features); in vhost_user_get_features()
315 struct virtio_net *dev = *pdev; in vhost_user_get_queue_num() local
318 rte_vhost_driver_get_queue_num(dev->ifname, &queue_num); in vhost_user_get_queue_num()
335 struct virtio_net *dev = *pdev; in vhost_user_set_features() local
340 rte_vhost_driver_get_features(dev->ifname, &vhost_features); in vhost_user_set_features()
343 dev->ifname); in vhost_user_set_features()
344 dev->flags |= VIRTIO_DEV_FEATURES_FAILED; in vhost_user_set_features()
345 dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK; in vhost_user_set_features()
350 if (dev->flags & VIRTIO_DEV_RUNNING) { in vhost_user_set_features()
351 if (dev->features == features) in vhost_user_set_features()
359 if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) { in vhost_user_set_features()
361 dev->ifname); in vhost_user_set_features()
365 if (dev->notify_ops->features_changed) in vhost_user_set_features()
366 dev->notify_ops->features_changed(dev->vid, features); in vhost_user_set_features()
369 dev->features = features; in vhost_user_set_features()
370 if (dev->features & in vhost_user_set_features()
374 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf); in vhost_user_set_features()
376 dev->vhost_hlen = sizeof(struct virtio_net_hdr); in vhost_user_set_features()
379 dev->ifname, dev->features); in vhost_user_set_features()
381 dev->ifname, in vhost_user_set_features()
382 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off", in vhost_user_set_features()
383 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off"); in vhost_user_set_features()
385 if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) && in vhost_user_set_features()
386 !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) { in vhost_user_set_features()
392 while (dev->nr_vring > 2) { in vhost_user_set_features()
395 vq = dev->virtqueue[--dev->nr_vring]; in vhost_user_set_features()
399 dev->virtqueue[dev->nr_vring] = NULL; in vhost_user_set_features()
401 cleanup_vq_inflight(dev, vq); in vhost_user_set_features()
402 free_vq(dev, vq); in vhost_user_set_features()
406 vdpa_dev = dev->vdpa_dev; in vhost_user_set_features()
408 vdpa_dev->ops->set_features(dev->vid); in vhost_user_set_features()
410 dev->flags &= ~VIRTIO_DEV_FEATURES_FAILED; in vhost_user_set_features()
422 struct virtio_net *dev = *pdev; in vhost_user_set_vring_num() local
423 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index]; in vhost_user_set_vring_num()
427 dev->ifname, ctx->msg.payload.state.num); in vhost_user_set_vring_num()
442 if (!vq_is_packed(dev)) { in vhost_user_set_vring_num()
445 dev->ifname, vq->size); in vhost_user_set_vring_num()
450 if (vq_is_packed(dev)) { in vhost_user_set_vring_num()
459 dev->ifname); in vhost_user_set_vring_num()
473 dev->ifname); in vhost_user_set_vring_num()
484 dev->ifname); in vhost_user_set_vring_num()
497 numa_realloc(struct virtio_net *dev, int index) in numa_realloc() argument
508 old_dev = dev; in numa_realloc()
509 vq = dev->virtqueue[index]; in numa_realloc()
516 return dev; in numa_realloc()
521 dev->ifname, index); in numa_realloc()
522 return dev; in numa_realloc()
531 dev->ifname, index, node); in numa_realloc()
532 return dev; in numa_realloc()
535 if (vq != dev->virtqueue[index]) { in numa_realloc()
537 dev->ifname, node); in numa_realloc()
538 dev->virtqueue[index] = vq; in numa_realloc()
539 vhost_user_iotlb_init(dev, index); in numa_realloc()
542 if (vq_is_packed(dev)) { in numa_realloc()
549 dev->ifname, node); in numa_realloc()
550 return dev; in numa_realloc()
560 dev->ifname, node); in numa_realloc()
561 return dev; in numa_realloc()
570 dev->ifname, node); in numa_realloc()
571 return dev; in numa_realloc()
581 dev->ifname, node); in numa_realloc()
582 return dev; in numa_realloc()
593 dev->ifname, node); in numa_realloc()
594 return dev; in numa_realloc()
605 dev->ifname, node); in numa_realloc()
606 return dev; in numa_realloc()
616 if (dev->flags & VIRTIO_DEV_RUNNING) in numa_realloc()
617 return dev; in numa_realloc()
619 ret = get_mempolicy(&dev_node, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR); in numa_realloc()
621 VHOST_LOG_CONFIG(ERR, "(%s) unable to get numa information.\n", dev->ifname); in numa_realloc()
622 return dev; in numa_realloc()
626 return dev; in numa_realloc()
628 dev = rte_realloc_socket(old_dev, sizeof(*dev), 0, node); in numa_realloc()
629 if (!dev) { in numa_realloc()
635 VHOST_LOG_CONFIG(INFO, "(%s) reallocated device on node %d\n", dev->ifname, node); in numa_realloc()
636 vhost_devices[dev->vid] = dev; in numa_realloc()
639 sizeof(struct rte_vhost_mem_region) * dev->mem->nregions; in numa_realloc()
640 mem = rte_realloc_socket(dev->mem, mem_size, 0, node); in numa_realloc()
643 dev->ifname, node); in numa_realloc()
644 return dev; in numa_realloc()
646 dev->mem = mem; in numa_realloc()
648 gp = rte_realloc_socket(dev->guest_pages, dev->max_guest_pages * sizeof(*gp), in numa_realloc()
652 dev->ifname, node); in numa_realloc()
653 return dev; in numa_realloc()
655 dev->guest_pages = gp; in numa_realloc()
657 return dev; in numa_realloc()
661 numa_realloc(struct virtio_net *dev, int index __rte_unused) in numa_realloc() argument
663 return dev; in numa_realloc()
669 qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len) in qva_to_vva() argument
674 if (unlikely(!dev || !dev->mem)) in qva_to_vva()
678 for (i = 0; i < dev->mem->nregions; i++) { in qva_to_vva()
679 r = &dev->mem->regions[i]; in qva_to_vva()
704 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, in ring_addr_to_vva() argument
707 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) { in ring_addr_to_vva()
711 vva = vhost_iova_to_vva(dev, vq, ra, in ring_addr_to_vva()
718 return qva_to_vva(dev, ra, size); in ring_addr_to_vva()
722 log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq) in log_addr_to_gpa() argument
727 log_gpa = translate_log_addr(dev, vq, vq->ring_addrs.log_guest_addr); in log_addr_to_gpa()
734 translate_ring_addresses(struct virtio_net *dev, int vq_index) in translate_ring_addresses() argument
736 struct vhost_virtqueue *vq = dev->virtqueue[vq_index]; in translate_ring_addresses()
742 log_addr_to_gpa(dev, vq); in translate_ring_addresses()
745 dev->ifname); in translate_ring_addresses()
746 return dev; in translate_ring_addresses()
750 if (vq_is_packed(dev)) { in translate_ring_addresses()
753 ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len); in translate_ring_addresses()
758 dev->ifname); in translate_ring_addresses()
759 return dev; in translate_ring_addresses()
762 dev = numa_realloc(dev, vq_index); in translate_ring_addresses()
763 vq = dev->virtqueue[vq_index]; in translate_ring_addresses()
768 (uintptr_t)ring_addr_to_vva(dev, in translate_ring_addresses()
773 dev->ifname); in translate_ring_addresses()
774 return dev; in translate_ring_addresses()
779 (uintptr_t)ring_addr_to_vva(dev, in translate_ring_addresses()
784 dev->ifname); in translate_ring_addresses()
785 return dev; in translate_ring_addresses()
789 return dev; in translate_ring_addresses()
794 return dev; in translate_ring_addresses()
797 vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev, in translate_ring_addresses()
800 VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map desc ring.\n", dev->ifname); in translate_ring_addresses()
801 return dev; in translate_ring_addresses()
804 dev = numa_realloc(dev, vq_index); in translate_ring_addresses()
805 vq = dev->virtqueue[vq_index]; in translate_ring_addresses()
809 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) in translate_ring_addresses()
812 vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev, in translate_ring_addresses()
815 VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map avail ring.\n", dev->ifname); in translate_ring_addresses()
816 return dev; in translate_ring_addresses()
821 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) in translate_ring_addresses()
824 vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev, in translate_ring_addresses()
827 VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map used ring.\n", dev->ifname); in translate_ring_addresses()
828 return dev; in translate_ring_addresses()
833 dev->ifname, in translate_ring_addresses()
838 dev->ifname); in translate_ring_addresses()
843 VHOST_LOG_CONFIG(DEBUG, "(%s) mapped address desc: %p\n", dev->ifname, vq->desc); in translate_ring_addresses()
844 VHOST_LOG_CONFIG(DEBUG, "(%s) mapped address avail: %p\n", dev->ifname, vq->avail); in translate_ring_addresses()
845 VHOST_LOG_CONFIG(DEBUG, "(%s) mapped address used: %p\n", dev->ifname, vq->used); in translate_ring_addresses()
847 dev->ifname, vq->log_guest_addr); in translate_ring_addresses()
849 return dev; in translate_ring_addresses()
861 struct virtio_net *dev = *pdev; in vhost_user_set_vring_addr() local
866 if (dev->mem == NULL) in vhost_user_set_vring_addr()
870 vq = dev->virtqueue[ctx->msg.payload.addr.index]; in vhost_user_set_vring_addr()
880 vring_invalidate(dev, vq); in vhost_user_set_vring_addr()
882 if ((vq->enabled && (dev->features & in vhost_user_set_vring_addr()
885 dev = translate_ring_addresses(dev, ctx->msg.payload.addr.index); in vhost_user_set_vring_addr()
886 if (!dev) in vhost_user_set_vring_addr()
889 *pdev = dev; in vhost_user_set_vring_addr()
903 struct virtio_net *dev = *pdev; in vhost_user_set_vring_base() local
904 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index]; in vhost_user_set_vring_base()
907 if (vq_is_packed(dev)) { in vhost_user_set_vring_base()
928 dev->ifname, ctx->msg.payload.state.index, vq->last_used_idx, in vhost_user_set_vring_base()
935 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, in add_one_guest_page() argument
941 if (dev->nr_guest_pages == dev->max_guest_pages) { in add_one_guest_page()
942 dev->max_guest_pages *= 2; in add_one_guest_page()
943 old_pages = dev->guest_pages; in add_one_guest_page()
944 dev->guest_pages = rte_realloc(dev->guest_pages, in add_one_guest_page()
945 dev->max_guest_pages * sizeof(*page), in add_one_guest_page()
947 if (dev->guest_pages == NULL) { in add_one_guest_page()
954 if (dev->nr_guest_pages > 0) { in add_one_guest_page()
955 last_page = &dev->guest_pages[dev->nr_guest_pages - 1]; in add_one_guest_page()
965 page = &dev->guest_pages[dev->nr_guest_pages++]; in add_one_guest_page()
975 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg, in add_guest_pages() argument
988 if (add_one_guest_page(dev, guest_phys_addr, host_iova, in add_guest_pages()
1000 if (add_one_guest_page(dev, guest_phys_addr, host_iova, in add_guest_pages()
1010 if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) { in add_guest_pages()
1011 qsort((void *)dev->guest_pages, dev->nr_guest_pages, in add_guest_pages()
1021 dump_guest_pages(struct virtio_net *dev) in dump_guest_pages() argument
1026 for (i = 0; i < dev->nr_guest_pages; i++) { in dump_guest_pages()
1027 page = &dev->guest_pages[i]; in dump_guest_pages()
1030 dev->ifname, i); in dump_guest_pages()
1032 dev->ifname, page->guest_phys_addr); in dump_guest_pages()
1034 dev->ifname, page->host_iova); in dump_guest_pages()
1036 dev->ifname, page->size); in dump_guest_pages()
1040 #define dump_guest_pages(dev) argument
1069 vhost_user_postcopy_region_register(struct virtio_net *dev, in vhost_user_postcopy_region_register() argument
1082 if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, in vhost_user_postcopy_region_register()
1086 dev->ifname, in vhost_user_postcopy_region_register()
1090 dev->postcopy_ufd, in vhost_user_postcopy_region_register()
1097 dev->ifname, in vhost_user_postcopy_region_register()
1106 vhost_user_postcopy_region_register(struct virtio_net *dev __rte_unused, in vhost_user_postcopy_region_register()
1114 vhost_user_postcopy_register(struct virtio_net *dev, int main_fd, in vhost_user_postcopy_register() argument
1122 if (!dev->postcopy_listening) in vhost_user_postcopy_register()
1132 reg = &dev->mem->regions[i]; in vhost_user_postcopy_register()
1138 send_vhost_reply(dev, main_fd, ctx); in vhost_user_postcopy_register()
1143 if (read_vhost_message(dev, main_fd, &ack_ctx) <= 0) { in vhost_user_postcopy_register()
1145 dev->ifname); in vhost_user_postcopy_register()
1149 if (validate_msg_fds(dev, &ack_ctx, 0) != 0) in vhost_user_postcopy_register()
1154 dev->ifname, ack_ctx.msg.request.master); in vhost_user_postcopy_register()
1160 reg = &dev->mem->regions[i]; in vhost_user_postcopy_register()
1161 if (vhost_user_postcopy_region_register(dev, reg) < 0) in vhost_user_postcopy_register()
1169 vhost_user_mmap_region(struct virtio_net *dev, in vhost_user_mmap_region() argument
1181 dev->ifname, mmap_offset, region->size); in vhost_user_mmap_region()
1196 dev->ifname); in vhost_user_mmap_region()
1210 dev->ifname, region->size + mmap_offset, alignment); in vhost_user_mmap_region()
1214 populate = dev->async_copy ? MAP_POPULATE : 0; in vhost_user_mmap_region()
1219 VHOST_LOG_CONFIG(ERR, "(%s) mmap failed (%s).\n", dev->ifname, strerror(errno)); in vhost_user_mmap_region()
1227 if (dev->async_copy) { in vhost_user_mmap_region()
1228 if (add_guest_pages(dev, region, alignment) < 0) { in vhost_user_mmap_region()
1230 dev->ifname); in vhost_user_mmap_region()
1236 dev->ifname, region->size); in vhost_user_mmap_region()
1238 dev->ifname, region->guest_phys_addr); in vhost_user_mmap_region()
1240 dev->ifname, region->guest_user_addr); in vhost_user_mmap_region()
1242 dev->ifname, region->host_user_addr); in vhost_user_mmap_region()
1244 dev->ifname, (uint64_t)(uintptr_t)mmap_addr); in vhost_user_mmap_region()
1246 dev->ifname, mmap_size); in vhost_user_mmap_region()
1248 dev->ifname, alignment); in vhost_user_mmap_region()
1250 dev->ifname, mmap_offset); in vhost_user_mmap_region()
1260 struct virtio_net *dev = *pdev; in vhost_user_set_mem_table() local
1268 if (validate_msg_fds(dev, ctx, memory->nregions) != 0) in vhost_user_set_mem_table()
1273 dev->ifname, memory->nregions); in vhost_user_set_mem_table()
1277 if (dev->mem && !vhost_memory_changed(memory, dev->mem)) { in vhost_user_set_mem_table()
1278 VHOST_LOG_CONFIG(INFO, "(%s) memory regions not changed\n", dev->ifname); in vhost_user_set_mem_table()
1285 if (dev->mem) { in vhost_user_set_mem_table()
1286 if (dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) { in vhost_user_set_mem_table()
1287 struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev; in vhost_user_set_mem_table()
1290 vdpa_dev->ops->dev_close(dev->vid); in vhost_user_set_mem_table()
1291 dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED; in vhost_user_set_mem_table()
1295 if (dev->async_copy && dev->notify_ops->vring_state_changed) { in vhost_user_set_mem_table()
1296 for (i = 0; i < dev->nr_vring; i++) { in vhost_user_set_mem_table()
1297 dev->notify_ops->vring_state_changed(dev->vid, in vhost_user_set_mem_table()
1303 free_mem_region(dev); in vhost_user_set_mem_table()
1304 rte_free(dev->mem); in vhost_user_set_mem_table()
1305 dev->mem = NULL; in vhost_user_set_mem_table()
1309 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) in vhost_user_set_mem_table()
1310 for (i = 0; i < dev->nr_vring; i++) in vhost_user_set_mem_table()
1311 vhost_user_iotlb_flush_all(dev->virtqueue[i]); in vhost_user_set_mem_table()
1317 if (dev->nr_vring > 0) in vhost_user_set_mem_table()
1318 numa_node = dev->virtqueue[0]->numa_node; in vhost_user_set_mem_table()
1320 dev->nr_guest_pages = 0; in vhost_user_set_mem_table()
1321 if (dev->guest_pages == NULL) { in vhost_user_set_mem_table()
1322 dev->max_guest_pages = 8; in vhost_user_set_mem_table()
1323 dev->guest_pages = rte_zmalloc_socket(NULL, in vhost_user_set_mem_table()
1324 dev->max_guest_pages * in vhost_user_set_mem_table()
1328 if (dev->guest_pages == NULL) { in vhost_user_set_mem_table()
1331 dev->ifname); in vhost_user_set_mem_table()
1336 dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct rte_vhost_memory) + in vhost_user_set_mem_table()
1338 if (dev->mem == NULL) { in vhost_user_set_mem_table()
1341 dev->ifname); in vhost_user_set_mem_table()
1346 reg = &dev->mem->regions[i]; in vhost_user_set_mem_table()
1361 if (vhost_user_mmap_region(dev, reg, mmap_offset) < 0) { in vhost_user_set_mem_table()
1362 VHOST_LOG_CONFIG(ERR, "(%s) failed to mmap region %u\n", dev->ifname, i); in vhost_user_set_mem_table()
1366 dev->mem->nregions++; in vhost_user_set_mem_table()
1369 if (dev->async_copy && rte_vfio_is_enabled("vfio")) in vhost_user_set_mem_table()
1370 async_dma_map(dev, true); in vhost_user_set_mem_table()
1372 if (vhost_user_postcopy_register(dev, main_fd, ctx) < 0) in vhost_user_set_mem_table()
1375 for (i = 0; i < dev->nr_vring; i++) { in vhost_user_set_mem_table()
1376 struct vhost_virtqueue *vq = dev->virtqueue[i]; in vhost_user_set_mem_table()
1387 vring_invalidate(dev, vq); in vhost_user_set_mem_table()
1389 dev = translate_ring_addresses(dev, i); in vhost_user_set_mem_table()
1390 if (!dev) { in vhost_user_set_mem_table()
1391 dev = *pdev; in vhost_user_set_mem_table()
1395 *pdev = dev; in vhost_user_set_mem_table()
1399 dump_guest_pages(dev); in vhost_user_set_mem_table()
1402 for (i = 0; i < dev->nr_vring; i++) in vhost_user_set_mem_table()
1403 dev->notify_ops->vring_state_changed(dev->vid, i, 1); in vhost_user_set_mem_table()
1409 free_mem_region(dev); in vhost_user_set_mem_table()
1410 rte_free(dev->mem); in vhost_user_set_mem_table()
1411 dev->mem = NULL; in vhost_user_set_mem_table()
1414 rte_free(dev->guest_pages); in vhost_user_set_mem_table()
1415 dev->guest_pages = NULL; in vhost_user_set_mem_table()
1422 vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq) in vq_is_ready() argument
1429 if (vq_is_packed(dev)) in vq_is_ready()
1444 virtio_is_ready(struct virtio_net *dev) in virtio_is_ready() argument
1447 uint32_t i, nr_vring = dev->nr_vring; in virtio_is_ready()
1449 if (dev->flags & VIRTIO_DEV_READY) in virtio_is_ready()
1452 if (!dev->nr_vring) in virtio_is_ready()
1455 if (dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) { in virtio_is_ready()
1458 if (dev->nr_vring < nr_vring) in virtio_is_ready()
1463 vq = dev->virtqueue[i]; in virtio_is_ready()
1465 if (!vq_is_ready(dev, vq)) in virtio_is_ready()
1470 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_STATUS)) in virtio_is_ready()
1471 if (!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK)) in virtio_is_ready()
1474 dev->flags |= VIRTIO_DEV_READY; in virtio_is_ready()
1476 if (!(dev->flags & VIRTIO_DEV_RUNNING)) in virtio_is_ready()
1477 VHOST_LOG_CONFIG(INFO, "(%s) virtio is now ready for processing.\n", dev->ifname); in virtio_is_ready()
1482 inflight_mem_alloc(struct virtio_net *dev, const char *name, size_t size, int *fd) in inflight_mem_alloc() argument
1498 dev->ifname); in inflight_mem_alloc()
1506 VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc inflight buffer\n", dev->ifname); in inflight_mem_alloc()
1513 VHOST_LOG_CONFIG(ERR, "(%s) failed to mmap inflight buffer\n", dev->ifname); in inflight_mem_alloc()
1547 struct virtio_net *dev = *pdev; in vhost_user_get_inflight_fd() local
1554 dev->ifname, ctx->msg.size); in vhost_user_get_inflight_fd()
1562 if (dev->nr_vring > 0) in vhost_user_get_inflight_fd()
1563 numa_node = dev->virtqueue[0]->numa_node; in vhost_user_get_inflight_fd()
1565 if (dev->inflight_info == NULL) { in vhost_user_get_inflight_fd()
1566 dev->inflight_info = rte_zmalloc_socket("inflight_info", in vhost_user_get_inflight_fd()
1568 if (!dev->inflight_info) { in vhost_user_get_inflight_fd()
1570 dev->ifname); in vhost_user_get_inflight_fd()
1573 dev->inflight_info->fd = -1; in vhost_user_get_inflight_fd()
1580 dev->ifname, ctx->msg.payload.inflight.num_queues); in vhost_user_get_inflight_fd()
1582 dev->ifname, ctx->msg.payload.inflight.queue_size); in vhost_user_get_inflight_fd()
1584 if (vq_is_packed(dev)) in vhost_user_get_inflight_fd()
1590 addr = inflight_mem_alloc(dev, "vhost-inflight", mmap_size, &fd); in vhost_user_get_inflight_fd()
1592 VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc vhost inflight area\n", dev->ifname); in vhost_user_get_inflight_fd()
1598 if (dev->inflight_info->addr) { in vhost_user_get_inflight_fd()
1599 munmap(dev->inflight_info->addr, dev->inflight_info->size); in vhost_user_get_inflight_fd()
1600 dev->inflight_info->addr = NULL; in vhost_user_get_inflight_fd()
1603 if (dev->inflight_info->fd >= 0) { in vhost_user_get_inflight_fd()
1604 close(dev->inflight_info->fd); in vhost_user_get_inflight_fd()
1605 dev->inflight_info->fd = -1; in vhost_user_get_inflight_fd()
1608 dev->inflight_info->addr = addr; in vhost_user_get_inflight_fd()
1609 dev->inflight_info->size = ctx->msg.payload.inflight.mmap_size = mmap_size; in vhost_user_get_inflight_fd()
1610 dev->inflight_info->fd = ctx->fds[0] = fd; in vhost_user_get_inflight_fd()
1614 if (vq_is_packed(dev)) { in vhost_user_get_inflight_fd()
1627 dev->ifname, ctx->msg.payload.inflight.mmap_size); in vhost_user_get_inflight_fd()
1629 dev->ifname, ctx->msg.payload.inflight.mmap_offset); in vhost_user_get_inflight_fd()
1630 VHOST_LOG_CONFIG(INFO, "(%s) send inflight fd: %d\n", dev->ifname, ctx->fds[0]); in vhost_user_get_inflight_fd()
1642 struct virtio_net *dev = *pdev; in vhost_user_set_inflight_fd() local
1649 if (validate_msg_fds(dev, ctx, 1) != 0) in vhost_user_set_inflight_fd()
1655 dev->ifname, ctx->msg.size, fd); in vhost_user_set_inflight_fd()
1664 if (vq_is_packed(dev)) in vhost_user_set_inflight_fd()
1670 dev->ifname, mmap_size); in vhost_user_set_inflight_fd()
1672 dev->ifname, mmap_offset); in vhost_user_set_inflight_fd()
1673 VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd num_queues: %u\n", dev->ifname, num_queues); in vhost_user_set_inflight_fd()
1674 VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd queue_size: %u\n", dev->ifname, queue_size); in vhost_user_set_inflight_fd()
1675 VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd fd: %d\n", dev->ifname, fd); in vhost_user_set_inflight_fd()
1677 dev->ifname, pervq_inflight_size); in vhost_user_set_inflight_fd()
1683 if (dev->nr_vring > 0) in vhost_user_set_inflight_fd()
1684 numa_node = dev->virtqueue[0]->numa_node; in vhost_user_set_inflight_fd()
1686 if (!dev->inflight_info) { in vhost_user_set_inflight_fd()
1687 dev->inflight_info = rte_zmalloc_socket("inflight_info", in vhost_user_set_inflight_fd()
1689 if (dev->inflight_info == NULL) { in vhost_user_set_inflight_fd()
1691 dev->ifname); in vhost_user_set_inflight_fd()
1694 dev->inflight_info->fd = -1; in vhost_user_set_inflight_fd()
1697 if (dev->inflight_info->addr) { in vhost_user_set_inflight_fd()
1698 munmap(dev->inflight_info->addr, dev->inflight_info->size); in vhost_user_set_inflight_fd()
1699 dev->inflight_info->addr = NULL; in vhost_user_set_inflight_fd()
1705 VHOST_LOG_CONFIG(ERR, "(%s) failed to mmap share memory.\n", dev->ifname); in vhost_user_set_inflight_fd()
1709 if (dev->inflight_info->fd >= 0) { in vhost_user_set_inflight_fd()
1710 close(dev->inflight_info->fd); in vhost_user_set_inflight_fd()
1711 dev->inflight_info->fd = -1; in vhost_user_set_inflight_fd()
1714 dev->inflight_info->fd = fd; in vhost_user_set_inflight_fd()
1715 dev->inflight_info->addr = addr; in vhost_user_set_inflight_fd()
1716 dev->inflight_info->size = mmap_size; in vhost_user_set_inflight_fd()
1719 vq = dev->virtqueue[i]; in vhost_user_set_inflight_fd()
1723 if (vq_is_packed(dev)) { in vhost_user_set_inflight_fd()
1741 struct virtio_net *dev = *pdev; in vhost_user_set_vring_call() local
1747 if (validate_msg_fds(dev, ctx, expected_fds) != 0) in vhost_user_set_vring_call()
1756 dev->ifname, file.index, file.fd); in vhost_user_set_vring_call()
1758 vq = dev->virtqueue[file.index]; in vhost_user_set_vring_call()
1762 vhost_user_notify_queue_state(dev, file.index, 0); in vhost_user_set_vring_call()
1777 struct virtio_net *dev = *pdev; in vhost_user_set_vring_err() local
1781 if (validate_msg_fds(dev, ctx, expected_fds) != 0) in vhost_user_set_vring_err()
1786 VHOST_LOG_CONFIG(INFO, "(%s) not implemented\n", dev->ifname); in vhost_user_set_vring_err()
1804 vhost_check_queue_inflights_split(struct virtio_net *dev, in vhost_check_queue_inflights_split() argument
1813 if (!(dev->protocol_features & in vhost_check_queue_inflights_split()
1854 dev->ifname); in vhost_check_queue_inflights_split()
1864 dev->ifname); in vhost_check_queue_inflights_split()
1893 vhost_check_queue_inflights_packed(struct virtio_net *dev, in vhost_check_queue_inflights_packed() argument
1901 if (!(dev->protocol_features & in vhost_check_queue_inflights_packed()
1952 dev->ifname); in vhost_check_queue_inflights_packed()
1962 dev->ifname); in vhost_check_queue_inflights_packed()
1995 struct virtio_net *dev = *pdev; in vhost_user_set_vring_kick() local
2001 if (validate_msg_fds(dev, ctx, expected_fds) != 0) in vhost_user_set_vring_kick()
2010 dev->ifname, file.index, file.fd); in vhost_user_set_vring_kick()
2013 dev = translate_ring_addresses(dev, file.index); in vhost_user_set_vring_kick()
2014 if (!dev) { in vhost_user_set_vring_kick()
2021 *pdev = dev; in vhost_user_set_vring_kick()
2023 vq = dev->virtqueue[file.index]; in vhost_user_set_vring_kick()
2030 if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) { in vhost_user_set_vring_kick()
2036 vhost_user_notify_queue_state(dev, file.index, 0); in vhost_user_set_vring_kick()
2043 if (vq_is_packed(dev)) { in vhost_user_set_vring_kick()
2044 if (vhost_check_queue_inflights_packed(dev, vq)) { in vhost_user_set_vring_kick()
2046 dev->ifname, file.index); in vhost_user_set_vring_kick()
2050 if (vhost_check_queue_inflights_split(dev, vq)) { in vhost_user_set_vring_kick()
2052 dev->ifname, file.index); in vhost_user_set_vring_kick()
2068 struct virtio_net *dev = *pdev; in vhost_user_get_vring_base() local
2069 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index]; in vhost_user_get_vring_base()
2073 vhost_destroy_device_notify(dev); in vhost_user_get_vring_base()
2075 dev->flags &= ~VIRTIO_DEV_READY; in vhost_user_get_vring_base()
2076 dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED; in vhost_user_get_vring_base()
2079 if (vq_is_packed(dev)) { in vhost_user_get_vring_base()
2092 dev->ifname, ctx->msg.payload.state.index, in vhost_user_get_vring_base()
2111 if (vq_is_packed(dev)) { in vhost_user_get_vring_base()
2130 vring_invalidate(dev, vq); in vhost_user_get_vring_base()
2144 struct virtio_net *dev = *pdev; in vhost_user_set_vring_enable() local
2149 dev->ifname, enable, index); in vhost_user_set_vring_enable()
2151 if (enable && dev->virtqueue[index]->async) { in vhost_user_set_vring_enable()
2152 if (dev->virtqueue[index]->async->pkts_inflight_n) { in vhost_user_set_vring_enable()
2155 dev->ifname); in vhost_user_set_vring_enable()
2160 dev->virtqueue[index]->enabled = enable; in vhost_user_set_vring_enable()
2170 struct virtio_net *dev = *pdev; in vhost_user_get_protocol_features() local
2173 rte_vhost_driver_get_features(dev->ifname, &features); in vhost_user_get_protocol_features()
2174 rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features); in vhost_user_get_protocol_features()
2188 struct virtio_net *dev = *pdev; in vhost_user_set_protocol_features() local
2192 rte_vhost_driver_get_protocol_features(dev->ifname, in vhost_user_set_protocol_features()
2195 VHOST_LOG_CONFIG(ERR, "(%s) received invalid protocol features.\n", dev->ifname); in vhost_user_set_protocol_features()
2199 dev->protocol_features = protocol_features; in vhost_user_set_protocol_features()
2201 dev->ifname, dev->protocol_features); in vhost_user_set_protocol_features()
2211 struct virtio_net *dev = *pdev; in vhost_user_set_log_base() local
2217 if (validate_msg_fds(dev, ctx, 1) != 0) in vhost_user_set_log_base()
2221 VHOST_LOG_CONFIG(ERR, "(%s) invalid log fd: %d\n", dev->ifname, fd); in vhost_user_set_log_base()
2227 dev->ifname, ctx->msg.size, (int)sizeof(VhostUserLog)); in vhost_user_set_log_base()
2238 dev->ifname, off, size); in vhost_user_set_log_base()
2243 dev->ifname, size, off); in vhost_user_set_log_base()
2252 VHOST_LOG_CONFIG(ERR, "(%s) mmap log base failed!\n", dev->ifname); in vhost_user_set_log_base()
2260 if (dev->log_addr) { in vhost_user_set_log_base()
2261 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size); in vhost_user_set_log_base()
2263 dev->log_addr = (uint64_t)(uintptr_t)addr; in vhost_user_set_log_base()
2264 dev->log_base = dev->log_addr + off; in vhost_user_set_log_base()
2265 dev->log_size = size; in vhost_user_set_log_base()
2267 for (i = 0; i < dev->nr_vring; i++) { in vhost_user_set_log_base()
2268 struct vhost_virtqueue *vq = dev->virtqueue[i]; in vhost_user_set_log_base()
2282 dev->ifname); in vhost_user_set_log_base()
2303 struct virtio_net *dev = *pdev; in vhost_user_set_log_fd() local
2305 if (validate_msg_fds(dev, ctx, 1) != 0) in vhost_user_set_log_fd()
2309 VHOST_LOG_CONFIG(INFO, "(%s) not implemented.\n", dev->ifname); in vhost_user_set_log_fd()
2327 struct virtio_net *dev = *pdev; in vhost_user_send_rarp() local
2332 dev->ifname, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); in vhost_user_send_rarp()
2333 memcpy(dev->mac.addr_bytes, mac, 6); in vhost_user_send_rarp()
2342 __atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE); in vhost_user_send_rarp()
2343 vdpa_dev = dev->vdpa_dev; in vhost_user_send_rarp()
2345 vdpa_dev->ops->migration_done(dev->vid); in vhost_user_send_rarp()
2355 struct virtio_net *dev = *pdev; in vhost_user_net_set_mtu() local
2360 dev->ifname, ctx->msg.payload.u64); in vhost_user_net_set_mtu()
2365 dev->mtu = ctx->msg.payload.u64; in vhost_user_net_set_mtu()
2375 struct virtio_net *dev = *pdev; in vhost_user_set_req_fd() local
2378 if (validate_msg_fds(dev, ctx, 1) != 0) in vhost_user_set_req_fd()
2383 dev->ifname, fd); in vhost_user_set_req_fd()
2387 if (dev->slave_req_fd >= 0) in vhost_user_set_req_fd()
2388 close(dev->slave_req_fd); in vhost_user_set_req_fd()
2390 dev->slave_req_fd = fd; in vhost_user_set_req_fd()
2460 static int is_vring_iotlb(struct virtio_net *dev, in is_vring_iotlb() argument
2464 if (vq_is_packed(dev)) in is_vring_iotlb()
2475 struct virtio_net *dev = *pdev; in vhost_user_iotlb_msg() local
2483 vva = qva_to_vva(dev, imsg->uaddr, &len); in vhost_user_iotlb_msg()
2487 for (i = 0; i < dev->nr_vring; i++) { in vhost_user_iotlb_msg()
2488 struct vhost_virtqueue *vq = dev->virtqueue[i]; in vhost_user_iotlb_msg()
2493 vhost_user_iotlb_cache_insert(dev, vq, imsg->iova, vva, in vhost_user_iotlb_msg()
2496 if (is_vring_iotlb(dev, vq, imsg)) { in vhost_user_iotlb_msg()
2498 *pdev = dev = translate_ring_addresses(dev, i); in vhost_user_iotlb_msg()
2504 for (i = 0; i < dev->nr_vring; i++) { in vhost_user_iotlb_msg()
2505 struct vhost_virtqueue *vq = dev->virtqueue[i]; in vhost_user_iotlb_msg()
2513 if (is_vring_iotlb(dev, vq, imsg)) { in vhost_user_iotlb_msg()
2515 vring_invalidate(dev, vq); in vhost_user_iotlb_msg()
2522 dev->ifname, imsg->type); in vhost_user_iotlb_msg()
2534 struct virtio_net *dev = *pdev; in vhost_user_set_postcopy_advise() local
2538 dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); in vhost_user_set_postcopy_advise()
2540 if (dev->postcopy_ufd == -1) { in vhost_user_set_postcopy_advise()
2542 dev->ifname, strerror(errno)); in vhost_user_set_postcopy_advise()
2547 if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) { in vhost_user_set_postcopy_advise()
2549 dev->ifname, strerror(errno)); in vhost_user_set_postcopy_advise()
2550 close(dev->postcopy_ufd); in vhost_user_set_postcopy_advise()
2551 dev->postcopy_ufd = -1; in vhost_user_set_postcopy_advise()
2554 ctx->fds[0] = dev->postcopy_ufd; in vhost_user_set_postcopy_advise()
2559 dev->postcopy_ufd = -1; in vhost_user_set_postcopy_advise()
2571 struct virtio_net *dev = *pdev; in vhost_user_set_postcopy_listen() local
2573 if (dev->mem && dev->mem->nregions) { in vhost_user_set_postcopy_listen()
2575 dev->ifname); in vhost_user_set_postcopy_listen()
2578 dev->postcopy_listening = 1; in vhost_user_set_postcopy_listen()
2588 struct virtio_net *dev = *pdev; in vhost_user_postcopy_end() local
2590 dev->postcopy_listening = 0; in vhost_user_postcopy_end()
2591 if (dev->postcopy_ufd >= 0) { in vhost_user_postcopy_end()
2592 close(dev->postcopy_ufd); in vhost_user_postcopy_end()
2593 dev->postcopy_ufd = -1; in vhost_user_postcopy_end()
2608 struct virtio_net *dev = *pdev; in vhost_user_get_status() local
2610 ctx->msg.payload.u64 = dev->status; in vhost_user_get_status()
2622 struct virtio_net *dev = *pdev; in vhost_user_set_status() local
2627 dev->ifname, ctx->msg.payload.u64); in vhost_user_set_status()
2631 dev->status = ctx->msg.payload.u64; in vhost_user_set_status()
2633 if ((dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK) && in vhost_user_set_status()
2634 (dev->flags & VIRTIO_DEV_FEATURES_FAILED)) { in vhost_user_set_status()
2637 dev->ifname); in vhost_user_set_status()
2642 dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK; in vhost_user_set_status()
2645 VHOST_LOG_CONFIG(INFO, "(%s) new device status(0x%08x):\n", dev->ifname, in vhost_user_set_status()
2646 dev->status); in vhost_user_set_status()
2647 VHOST_LOG_CONFIG(INFO, "(%s)\t-RESET: %u\n", dev->ifname, in vhost_user_set_status()
2648 (dev->status == VIRTIO_DEVICE_STATUS_RESET)); in vhost_user_set_status()
2649 VHOST_LOG_CONFIG(INFO, "(%s)\t-ACKNOWLEDGE: %u\n", dev->ifname, in vhost_user_set_status()
2650 !!(dev->status & VIRTIO_DEVICE_STATUS_ACK)); in vhost_user_set_status()
2651 VHOST_LOG_CONFIG(INFO, "(%s)\t-DRIVER: %u\n", dev->ifname, in vhost_user_set_status()
2652 !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER)); in vhost_user_set_status()
2653 VHOST_LOG_CONFIG(INFO, "(%s)\t-FEATURES_OK: %u\n", dev->ifname, in vhost_user_set_status()
2654 !!(dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK)); in vhost_user_set_status()
2655 VHOST_LOG_CONFIG(INFO, "(%s)\t-DRIVER_OK: %u\n", dev->ifname, in vhost_user_set_status()
2656 !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK)); in vhost_user_set_status()
2657 VHOST_LOG_CONFIG(INFO, "(%s)\t-DEVICE_NEED_RESET: %u\n", dev->ifname, in vhost_user_set_status()
2658 !!(dev->status & VIRTIO_DEVICE_STATUS_DEV_NEED_RESET)); in vhost_user_set_status()
2659 VHOST_LOG_CONFIG(INFO, "(%s)\t-FAILED: %u\n", dev->ifname, in vhost_user_set_status()
2660 !!(dev->status & VIRTIO_DEVICE_STATUS_FAILED)); in vhost_user_set_status()
2706 read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx) in read_vhost_message() argument
2710 ret = read_fd_message(dev->ifname, sockfd, (char *)&ctx->msg, VHOST_USER_HDR_SIZE, in read_vhost_message()
2715 VHOST_LOG_CONFIG(ERR, "(%s) Unexpected header size read\n", dev->ifname); in read_vhost_message()
2723 dev->ifname, ctx->msg.size); in read_vhost_message()
2730 VHOST_LOG_CONFIG(ERR, "(%s) read control message failed\n", dev->ifname); in read_vhost_message()
2739 send_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx) in send_vhost_message() argument
2744 return send_fd_message(dev->ifname, sockfd, (char *)&ctx->msg, in send_vhost_message()
2749 send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx) in send_vhost_reply() argument
2759 return send_vhost_message(dev, sockfd, ctx); in send_vhost_reply()
2763 send_vhost_slave_message(struct virtio_net *dev, in send_vhost_slave_message() argument
2769 rte_spinlock_lock(&dev->slave_req_lock); in send_vhost_slave_message()
2771 ret = send_vhost_message(dev, dev->slave_req_fd, ctx); in send_vhost_slave_message()
2773 rte_spinlock_unlock(&dev->slave_req_lock); in send_vhost_slave_message()
2782 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, in vhost_user_check_and_alloc_queue_pair() argument
2810 VHOST_LOG_CONFIG(ERR, "(%s) invalid vring index: %u\n", dev->ifname, vring_idx); in vhost_user_check_and_alloc_queue_pair()
2814 if (dev->virtqueue[vring_idx]) in vhost_user_check_and_alloc_queue_pair()
2817 return alloc_vring_queue(dev, vring_idx); in vhost_user_check_and_alloc_queue_pair()
2821 vhost_user_lock_all_queue_pairs(struct virtio_net *dev) in vhost_user_lock_all_queue_pairs() argument
2826 while (vq_num < dev->nr_vring) { in vhost_user_lock_all_queue_pairs()
2827 struct vhost_virtqueue *vq = dev->virtqueue[i]; in vhost_user_lock_all_queue_pairs()
2838 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev) in vhost_user_unlock_all_queue_pairs() argument
2843 while (vq_num < dev->nr_vring) { in vhost_user_unlock_all_queue_pairs()
2844 struct vhost_virtqueue *vq = dev->virtqueue[i]; in vhost_user_unlock_all_queue_pairs()
2857 struct virtio_net *dev; in vhost_user_msg_handler() local
2867 dev = get_device(vid); in vhost_user_msg_handler()
2868 if (dev == NULL) in vhost_user_msg_handler()
2871 if (!dev->notify_ops) { in vhost_user_msg_handler()
2872 dev->notify_ops = vhost_driver_callback_get(dev->ifname); in vhost_user_msg_handler()
2873 if (!dev->notify_ops) { in vhost_user_msg_handler()
2875 dev->ifname); in vhost_user_msg_handler()
2880 ret = read_vhost_message(dev, fd, &ctx); in vhost_user_msg_handler()
2883 VHOST_LOG_CONFIG(ERR, "(%s) vhost read message failed\n", dev->ifname); in vhost_user_msg_handler()
2885 VHOST_LOG_CONFIG(INFO, "(%s) vhost peer closed\n", dev->ifname); in vhost_user_msg_handler()
2900 dev->ifname, msg_handler->description); in vhost_user_msg_handler()
2903 dev->ifname, msg_handler->description); in vhost_user_msg_handler()
2905 VHOST_LOG_CONFIG(DEBUG, "(%s) external request %d\n", dev->ifname, request); in vhost_user_msg_handler()
2908 ret = vhost_user_check_and_alloc_queue_pair(dev, &ctx); in vhost_user_msg_handler()
2910 VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc queue\n", dev->ifname); in vhost_user_msg_handler()
2938 if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) { in vhost_user_msg_handler()
2939 vhost_user_lock_all_queue_pairs(dev); in vhost_user_msg_handler()
2949 if (dev->extern_ops.pre_msg_handle) { in vhost_user_msg_handler()
2951 ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx); in vhost_user_msg_handler()
2954 send_vhost_reply(dev, fd, &ctx); in vhost_user_msg_handler()
2969 if (!msg_handler->accepts_fd && validate_msg_fds(dev, &ctx, 0) != 0) { in vhost_user_msg_handler()
2972 ret = msg_handler->callback(&dev, &ctx, fd); in vhost_user_msg_handler()
2978 dev->ifname, msg_handler->description); in vhost_user_msg_handler()
2983 dev->ifname, msg_handler->description); in vhost_user_msg_handler()
2988 dev->ifname, msg_handler->description); in vhost_user_msg_handler()
2989 send_vhost_reply(dev, fd, &ctx); in vhost_user_msg_handler()
2998 dev->extern_ops.post_msg_handle) { in vhost_user_msg_handler()
3000 ret = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx); in vhost_user_msg_handler()
3003 send_vhost_reply(dev, fd, &ctx); in vhost_user_msg_handler()
3017 dev->ifname, request); in vhost_user_msg_handler()
3031 send_vhost_reply(dev, fd, &ctx); in vhost_user_msg_handler()
3033 VHOST_LOG_CONFIG(ERR, "(%s) vhost message handling failed.\n", dev->ifname); in vhost_user_msg_handler()
3037 for (i = 0; i < dev->nr_vring; i++) { in vhost_user_msg_handler()
3038 struct vhost_virtqueue *vq = dev->virtqueue[i]; in vhost_user_msg_handler()
3039 bool cur_ready = vq_is_ready(dev, vq); in vhost_user_msg_handler()
3043 vhost_user_notify_queue_state(dev, i, cur_ready); in vhost_user_msg_handler()
3048 vhost_user_unlock_all_queue_pairs(dev); in vhost_user_msg_handler()
3050 if (!virtio_is_ready(dev)) in vhost_user_msg_handler()
3059 if (!(dev->flags & VIRTIO_DEV_RUNNING)) { in vhost_user_msg_handler()
3060 if (dev->notify_ops->new_device(dev->vid) == 0) in vhost_user_msg_handler()
3061 dev->flags |= VIRTIO_DEV_RUNNING; in vhost_user_msg_handler()
3064 vdpa_dev = dev->vdpa_dev; in vhost_user_msg_handler()
3068 if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) { in vhost_user_msg_handler()
3069 if (vdpa_dev->ops->dev_conf(dev->vid)) in vhost_user_msg_handler()
3071 dev->ifname); in vhost_user_msg_handler()
3073 dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED; in vhost_user_msg_handler()
3080 static int process_slave_message_reply(struct virtio_net *dev, in process_slave_message_reply() argument
3089 ret = read_vhost_message(dev, dev->slave_req_fd, &msg_reply); in process_slave_message_reply()
3093 dev->ifname); in process_slave_message_reply()
3095 VHOST_LOG_CONFIG(INFO, "(%s) vhost peer closed\n", dev->ifname); in process_slave_message_reply()
3103 dev->ifname, msg_reply.msg.request.slave, ctx->msg.request.slave); in process_slave_message_reply()
3111 rte_spinlock_unlock(&dev->slave_req_lock); in process_slave_message_reply()
3116 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm) in vhost_user_iotlb_miss() argument
3132 ret = send_vhost_message(dev, dev->slave_req_fd, &ctx); in vhost_user_iotlb_miss()
3135 dev->ifname, ret); in vhost_user_iotlb_miss()
3143 vhost_user_slave_config_change(struct virtio_net *dev, bool need_reply) in vhost_user_slave_config_change() argument
3157 ret = send_vhost_slave_message(dev, &ctx); in vhost_user_slave_config_change()
3160 dev->ifname, ret); in vhost_user_slave_config_change()
3164 return process_slave_message_reply(dev, &ctx); in vhost_user_slave_config_change()
3170 struct virtio_net *dev; in rte_vhost_slave_config_change() local
3172 dev = get_device(vid); in rte_vhost_slave_config_change()
3173 if (!dev) in rte_vhost_slave_config_change()
3176 return vhost_user_slave_config_change(dev, need_reply); in rte_vhost_slave_config_change()
3179 static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev, in vhost_user_slave_set_vring_host_notifier() argument
3205 ret = send_vhost_slave_message(dev, &ctx); in vhost_user_slave_set_vring_host_notifier()
3208 dev->ifname, ret); in vhost_user_slave_set_vring_host_notifier()
3212 return process_slave_message_reply(dev, &ctx); in vhost_user_slave_set_vring_host_notifier()
3217 struct virtio_net *dev; in rte_vhost_host_notifier_ctrl() local
3223 dev = get_device(vid); in rte_vhost_host_notifier_ctrl()
3224 if (!dev) in rte_vhost_host_notifier_ctrl()
3227 vdpa_dev = dev->vdpa_dev; in rte_vhost_host_notifier_ctrl()
3231 if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) || in rte_vhost_host_notifier_ctrl()
3232 !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) || in rte_vhost_host_notifier_ctrl()
3233 !(dev->protocol_features & in rte_vhost_host_notifier_ctrl()
3235 !(dev->protocol_features & in rte_vhost_host_notifier_ctrl()
3237 !(dev->protocol_features & in rte_vhost_host_notifier_ctrl()
3243 q_last = dev->nr_vring - 1; in rte_vhost_host_notifier_ctrl()
3245 if (qid >= dev->nr_vring) in rte_vhost_host_notifier_ctrl()
3266 if (vhost_user_slave_set_vring_host_notifier(dev, i, in rte_vhost_host_notifier_ctrl()
3275 vhost_user_slave_set_vring_host_notifier(dev, i, -1, in rte_vhost_host_notifier_ctrl()