Lines Matching refs:dev

145 free_mem_region(struct virtio_net *dev)  in free_mem_region()  argument
150 if (!dev || !dev->mem) in free_mem_region()
153 for (i = 0; i < dev->mem->nregions; i++) { in free_mem_region()
154 reg = &dev->mem->regions[i]; in free_mem_region()
163 vhost_backend_cleanup(struct virtio_net *dev) in vhost_backend_cleanup() argument
165 if (dev->mem) { in vhost_backend_cleanup()
166 free_mem_region(dev); in vhost_backend_cleanup()
167 rte_free(dev->mem); in vhost_backend_cleanup()
168 dev->mem = NULL; in vhost_backend_cleanup()
171 rte_free(dev->guest_pages); in vhost_backend_cleanup()
172 dev->guest_pages = NULL; in vhost_backend_cleanup()
174 if (dev->log_addr) { in vhost_backend_cleanup()
175 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size); in vhost_backend_cleanup()
176 dev->log_addr = 0; in vhost_backend_cleanup()
179 if (dev->inflight_info) { in vhost_backend_cleanup()
180 if (dev->inflight_info->addr) { in vhost_backend_cleanup()
181 munmap(dev->inflight_info->addr, in vhost_backend_cleanup()
182 dev->inflight_info->size); in vhost_backend_cleanup()
183 dev->inflight_info->addr = NULL; in vhost_backend_cleanup()
186 if (dev->inflight_info->fd >= 0) { in vhost_backend_cleanup()
187 close(dev->inflight_info->fd); in vhost_backend_cleanup()
188 dev->inflight_info->fd = -1; in vhost_backend_cleanup()
191 free(dev->inflight_info); in vhost_backend_cleanup()
192 dev->inflight_info = NULL; in vhost_backend_cleanup()
195 if (dev->slave_req_fd >= 0) { in vhost_backend_cleanup()
196 close(dev->slave_req_fd); in vhost_backend_cleanup()
197 dev->slave_req_fd = -1; in vhost_backend_cleanup()
200 if (dev->postcopy_ufd >= 0) { in vhost_backend_cleanup()
201 close(dev->postcopy_ufd); in vhost_backend_cleanup()
202 dev->postcopy_ufd = -1; in vhost_backend_cleanup()
205 dev->postcopy_listening = 0; in vhost_backend_cleanup()
209 vhost_user_notify_queue_state(struct virtio_net *dev, uint16_t index, in vhost_user_notify_queue_state() argument
212 struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev; in vhost_user_notify_queue_state()
213 struct vhost_virtqueue *vq = dev->virtqueue[index]; in vhost_user_notify_queue_state()
217 vhost_enable_guest_notification(dev, vq, vq->notif_enable); in vhost_user_notify_queue_state()
220 vdpa_dev->ops->set_vring_state(dev->vid, index, enable); in vhost_user_notify_queue_state()
222 if (dev->notify_ops->vring_state_changed) in vhost_user_notify_queue_state()
223 dev->notify_ops->vring_state_changed(dev->vid, in vhost_user_notify_queue_state()
247 struct virtio_net *dev = *pdev; in vhost_user_reset_owner() local
252 vhost_destroy_device_notify(dev); in vhost_user_reset_owner()
254 cleanup_device(dev, 0); in vhost_user_reset_owner()
255 reset_device(dev); in vhost_user_reset_owner()
266 struct virtio_net *dev = *pdev; in vhost_user_get_features() local
272 rte_vhost_driver_get_features(dev->ifname, &features); in vhost_user_get_features()
288 struct virtio_net *dev = *pdev; in vhost_user_get_queue_num() local
294 rte_vhost_driver_get_queue_num(dev->ifname, &queue_num); in vhost_user_get_queue_num()
310 struct virtio_net *dev = *pdev; in vhost_user_set_features() local
318 rte_vhost_driver_get_features(dev->ifname, &vhost_features); in vhost_user_set_features()
322 dev->vid); in vhost_user_set_features()
323 dev->flags |= VIRTIO_DEV_FEATURES_FAILED; in vhost_user_set_features()
324 dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK; in vhost_user_set_features()
329 if (dev->flags & VIRTIO_DEV_RUNNING) { in vhost_user_set_features()
330 if (dev->features == features) in vhost_user_set_features()
338 if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) { in vhost_user_set_features()
341 dev->vid); in vhost_user_set_features()
345 if (dev->notify_ops->features_changed) in vhost_user_set_features()
346 dev->notify_ops->features_changed(dev->vid, features); in vhost_user_set_features()
349 dev->features = features; in vhost_user_set_features()
350 if (dev->features & in vhost_user_set_features()
354 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf); in vhost_user_set_features()
356 dev->vhost_hlen = sizeof(struct virtio_net_hdr); in vhost_user_set_features()
359 "negotiated Virtio features: 0x%" PRIx64 "\n", dev->features); in vhost_user_set_features()
362 dev->vid, in vhost_user_set_features()
363 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off", in vhost_user_set_features()
364 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off"); in vhost_user_set_features()
366 if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) && in vhost_user_set_features()
367 !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) { in vhost_user_set_features()
373 while (dev->nr_vring > 2) { in vhost_user_set_features()
376 vq = dev->virtqueue[--dev->nr_vring]; in vhost_user_set_features()
380 dev->virtqueue[dev->nr_vring] = NULL; in vhost_user_set_features()
382 cleanup_vq_inflight(dev, vq); in vhost_user_set_features()
383 free_vq(dev, vq); in vhost_user_set_features()
387 vdpa_dev = dev->vdpa_dev; in vhost_user_set_features()
389 vdpa_dev->ops->set_features(dev->vid); in vhost_user_set_features()
391 dev->flags &= ~VIRTIO_DEV_FEATURES_FAILED; in vhost_user_set_features()
403 struct virtio_net *dev = *pdev; in vhost_user_set_vring_num() local
404 struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index]; in vhost_user_set_vring_num()
420 if (!vq_is_packed(dev)) { in vhost_user_set_vring_num()
434 if (vq_is_packed(dev)) { in vhost_user_set_vring_num()
482 numa_realloc(struct virtio_net *dev, int index) in numa_realloc() argument
492 if (dev->flags & VIRTIO_DEV_RUNNING) in numa_realloc()
493 return dev; in numa_realloc()
495 old_dev = dev; in numa_realloc()
496 vq = old_vq = dev->virtqueue[index]; in numa_realloc()
507 return dev; in numa_realloc()
514 return dev; in numa_realloc()
518 if (vq_is_packed(dev)) { in numa_realloc()
564 dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode); in numa_realloc()
565 if (!dev) { in numa_realloc()
566 dev = old_dev; in numa_realloc()
570 memcpy(dev, old_dev, sizeof(*dev)); in numa_realloc()
575 dev->virtqueue[index] = vq; in numa_realloc()
576 vhost_devices[dev->vid] = dev; in numa_realloc()
579 vhost_user_iotlb_init(dev, index); in numa_realloc()
581 return dev; in numa_realloc()
585 numa_realloc(struct virtio_net *dev, int index __rte_unused) in numa_realloc() argument
587 return dev; in numa_realloc()
593 qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len) in qva_to_vva() argument
598 if (unlikely(!dev || !dev->mem)) in qva_to_vva()
602 for (i = 0; i < dev->mem->nregions; i++) { in qva_to_vva()
603 r = &dev->mem->regions[i]; in qva_to_vva()
628 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, in ring_addr_to_vva() argument
631 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) { in ring_addr_to_vva()
635 vva = vhost_iova_to_vva(dev, vq, ra, in ring_addr_to_vva()
642 return qva_to_vva(dev, ra, size); in ring_addr_to_vva()
646 log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq) in log_addr_to_gpa() argument
651 log_gpa = translate_log_addr(dev, vq, vq->ring_addrs.log_guest_addr); in log_addr_to_gpa()
658 translate_ring_addresses(struct virtio_net *dev, int vq_index) in translate_ring_addresses() argument
660 struct vhost_virtqueue *vq = dev->virtqueue[vq_index]; in translate_ring_addresses()
666 log_addr_to_gpa(dev, vq); in translate_ring_addresses()
670 dev->vid); in translate_ring_addresses()
671 return dev; in translate_ring_addresses()
675 if (vq_is_packed(dev)) { in translate_ring_addresses()
678 ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len); in translate_ring_addresses()
684 dev->vid); in translate_ring_addresses()
685 return dev; in translate_ring_addresses()
688 dev = numa_realloc(dev, vq_index); in translate_ring_addresses()
689 vq = dev->virtqueue[vq_index]; in translate_ring_addresses()
694 (uintptr_t)ring_addr_to_vva(dev, in translate_ring_addresses()
700 dev->vid); in translate_ring_addresses()
701 return dev; in translate_ring_addresses()
706 (uintptr_t)ring_addr_to_vva(dev, in translate_ring_addresses()
712 dev->vid); in translate_ring_addresses()
713 return dev; in translate_ring_addresses()
717 return dev; in translate_ring_addresses()
722 return dev; in translate_ring_addresses()
725 vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev, in translate_ring_addresses()
730 dev->vid); in translate_ring_addresses()
731 return dev; in translate_ring_addresses()
734 dev = numa_realloc(dev, vq_index); in translate_ring_addresses()
735 vq = dev->virtqueue[vq_index]; in translate_ring_addresses()
739 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) in translate_ring_addresses()
742 vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev, in translate_ring_addresses()
747 dev->vid); in translate_ring_addresses()
748 return dev; in translate_ring_addresses()
753 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) in translate_ring_addresses()
756 vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev, in translate_ring_addresses()
761 dev->vid); in translate_ring_addresses()
762 return dev; in translate_ring_addresses()
777 dev->vid, vq->desc); in translate_ring_addresses()
779 dev->vid, vq->avail); in translate_ring_addresses()
781 dev->vid, vq->used); in translate_ring_addresses()
783 dev->vid, vq->log_guest_addr); in translate_ring_addresses()
785 return dev; in translate_ring_addresses()
796 struct virtio_net *dev = *pdev; in vhost_user_set_vring_addr() local
804 if (dev->mem == NULL) in vhost_user_set_vring_addr()
808 vq = dev->virtqueue[msg->payload.addr.index]; in vhost_user_set_vring_addr()
818 vring_invalidate(dev, vq); in vhost_user_set_vring_addr()
820 if ((vq->enabled && (dev->features & in vhost_user_set_vring_addr()
823 dev = translate_ring_addresses(dev, msg->payload.addr.index); in vhost_user_set_vring_addr()
824 if (!dev) in vhost_user_set_vring_addr()
827 *pdev = dev; in vhost_user_set_vring_addr()
841 struct virtio_net *dev = *pdev; in vhost_user_set_vring_base() local
842 struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index]; in vhost_user_set_vring_base()
848 if (vq_is_packed(dev)) { in vhost_user_set_vring_base()
871 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, in add_one_guest_page() argument
877 if (dev->nr_guest_pages == dev->max_guest_pages) { in add_one_guest_page()
878 dev->max_guest_pages *= 2; in add_one_guest_page()
879 old_pages = dev->guest_pages; in add_one_guest_page()
880 dev->guest_pages = rte_realloc(dev->guest_pages, in add_one_guest_page()
881 dev->max_guest_pages * sizeof(*page), in add_one_guest_page()
883 if (dev->guest_pages == NULL) { in add_one_guest_page()
890 if (dev->nr_guest_pages > 0) { in add_one_guest_page()
891 last_page = &dev->guest_pages[dev->nr_guest_pages - 1]; in add_one_guest_page()
900 page = &dev->guest_pages[dev->nr_guest_pages++]; in add_one_guest_page()
909 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg, in add_guest_pages() argument
922 if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0) in add_guest_pages()
933 if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, in add_guest_pages()
943 if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) { in add_guest_pages()
944 qsort((void *)dev->guest_pages, dev->nr_guest_pages, in add_guest_pages()
954 dump_guest_pages(struct virtio_net *dev) in dump_guest_pages() argument
959 for (i = 0; i < dev->nr_guest_pages; i++) { in dump_guest_pages()
960 page = &dev->guest_pages[i]; in dump_guest_pages()
974 #define dump_guest_pages(dev) argument
1005 struct virtio_net *dev = *pdev; in vhost_user_set_mem_table() local
1024 if (dev->mem && !vhost_memory_changed(memory, dev->mem)) { in vhost_user_set_mem_table()
1026 "(%d) memory regions not changed\n", dev->vid); in vhost_user_set_mem_table()
1033 if (dev->mem) { in vhost_user_set_mem_table()
1034 if (dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) { in vhost_user_set_mem_table()
1035 struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev; in vhost_user_set_mem_table()
1038 vdpa_dev->ops->dev_close(dev->vid); in vhost_user_set_mem_table()
1039 dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED; in vhost_user_set_mem_table()
1041 free_mem_region(dev); in vhost_user_set_mem_table()
1042 rte_free(dev->mem); in vhost_user_set_mem_table()
1043 dev->mem = NULL; in vhost_user_set_mem_table()
1047 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) in vhost_user_set_mem_table()
1048 for (i = 0; i < dev->nr_vring; i++) in vhost_user_set_mem_table()
1049 vhost_user_iotlb_flush_all(dev->virtqueue[i]); in vhost_user_set_mem_table()
1051 dev->nr_guest_pages = 0; in vhost_user_set_mem_table()
1052 if (dev->guest_pages == NULL) { in vhost_user_set_mem_table()
1053 dev->max_guest_pages = 8; in vhost_user_set_mem_table()
1054 dev->guest_pages = rte_zmalloc(NULL, in vhost_user_set_mem_table()
1055 dev->max_guest_pages * in vhost_user_set_mem_table()
1058 if (dev->guest_pages == NULL) { in vhost_user_set_mem_table()
1062 dev->vid); in vhost_user_set_mem_table()
1067 dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) + in vhost_user_set_mem_table()
1069 if (dev->mem == NULL) { in vhost_user_set_mem_table()
1072 dev->vid); in vhost_user_set_mem_table()
1075 dev->mem->nregions = memory->nregions; in vhost_user_set_mem_table()
1078 reg = &dev->mem->regions[i]; in vhost_user_set_mem_table()
1135 populate = dev->async_copy ? MAP_POPULATE : 0; in vhost_user_set_mem_table()
1150 if (dev->async_copy) in vhost_user_set_mem_table()
1151 if (add_guest_pages(dev, reg, alignment) < 0) { in vhost_user_set_mem_table()
1176 if (dev->postcopy_listening) { in vhost_user_set_mem_table()
1186 if (dev->postcopy_listening) { in vhost_user_set_mem_table()
1214 reg = &dev->mem->regions[i]; in vhost_user_set_mem_table()
1226 if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, in vhost_user_set_mem_table()
1230 i, dev->postcopy_ufd, in vhost_user_set_mem_table()
1246 for (i = 0; i < dev->nr_vring; i++) { in vhost_user_set_mem_table()
1247 struct vhost_virtqueue *vq = dev->virtqueue[i]; in vhost_user_set_mem_table()
1258 vring_invalidate(dev, vq); in vhost_user_set_mem_table()
1260 dev = translate_ring_addresses(dev, i); in vhost_user_set_mem_table()
1261 if (!dev) { in vhost_user_set_mem_table()
1262 dev = *pdev; in vhost_user_set_mem_table()
1266 *pdev = dev; in vhost_user_set_mem_table()
1270 dump_guest_pages(dev); in vhost_user_set_mem_table()
1275 free_mem_region(dev); in vhost_user_set_mem_table()
1276 rte_free(dev->mem); in vhost_user_set_mem_table()
1277 dev->mem = NULL; in vhost_user_set_mem_table()
1279 rte_free(dev->guest_pages); in vhost_user_set_mem_table()
1280 dev->guest_pages = NULL; in vhost_user_set_mem_table()
1287 vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq) in vq_is_ready() argument
1294 if (vq_is_packed(dev)) in vq_is_ready()
1309 virtio_is_ready(struct virtio_net *dev) in virtio_is_ready() argument
1312 uint32_t i, nr_vring = dev->nr_vring; in virtio_is_ready()
1314 if (dev->flags & VIRTIO_DEV_READY) in virtio_is_ready()
1317 if (!dev->nr_vring) in virtio_is_ready()
1320 if (dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) { in virtio_is_ready()
1323 if (dev->nr_vring < nr_vring) in virtio_is_ready()
1328 vq = dev->virtqueue[i]; in virtio_is_ready()
1330 if (!vq_is_ready(dev, vq)) in virtio_is_ready()
1335 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_STATUS)) in virtio_is_ready()
1336 if (!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK)) in virtio_is_ready()
1339 dev->flags |= VIRTIO_DEV_READY; in virtio_is_ready()
1341 if (!(dev->flags & VIRTIO_DEV_RUNNING)) in virtio_is_ready()
1415 struct virtio_net *dev = *pdev; in vhost_user_get_inflight_fd() local
1426 if (dev->inflight_info == NULL) { in vhost_user_get_inflight_fd()
1427 dev->inflight_info = calloc(1, in vhost_user_get_inflight_fd()
1429 if (!dev->inflight_info) { in vhost_user_get_inflight_fd()
1434 dev->inflight_info->fd = -1; in vhost_user_get_inflight_fd()
1445 if (vq_is_packed(dev)) in vhost_user_get_inflight_fd()
1460 if (dev->inflight_info->addr) { in vhost_user_get_inflight_fd()
1461 munmap(dev->inflight_info->addr, dev->inflight_info->size); in vhost_user_get_inflight_fd()
1462 dev->inflight_info->addr = NULL; in vhost_user_get_inflight_fd()
1465 if (dev->inflight_info->fd >= 0) { in vhost_user_get_inflight_fd()
1466 close(dev->inflight_info->fd); in vhost_user_get_inflight_fd()
1467 dev->inflight_info->fd = -1; in vhost_user_get_inflight_fd()
1470 dev->inflight_info->addr = addr; in vhost_user_get_inflight_fd()
1471 dev->inflight_info->size = msg->payload.inflight.mmap_size = mmap_size; in vhost_user_get_inflight_fd()
1472 dev->inflight_info->fd = msg->fds[0] = fd; in vhost_user_get_inflight_fd()
1476 if (vq_is_packed(dev)) { in vhost_user_get_inflight_fd()
1506 struct virtio_net *dev = *pdev; in vhost_user_set_inflight_fd() local
1525 if (vq_is_packed(dev)) in vhost_user_set_inflight_fd()
1544 if (!dev->inflight_info) { in vhost_user_set_inflight_fd()
1545 dev->inflight_info = calloc(1, in vhost_user_set_inflight_fd()
1547 if (dev->inflight_info == NULL) { in vhost_user_set_inflight_fd()
1552 dev->inflight_info->fd = -1; in vhost_user_set_inflight_fd()
1555 if (dev->inflight_info->addr) { in vhost_user_set_inflight_fd()
1556 munmap(dev->inflight_info->addr, dev->inflight_info->size); in vhost_user_set_inflight_fd()
1557 dev->inflight_info->addr = NULL; in vhost_user_set_inflight_fd()
1567 if (dev->inflight_info->fd >= 0) { in vhost_user_set_inflight_fd()
1568 close(dev->inflight_info->fd); in vhost_user_set_inflight_fd()
1569 dev->inflight_info->fd = -1; in vhost_user_set_inflight_fd()
1572 dev->inflight_info->fd = fd; in vhost_user_set_inflight_fd()
1573 dev->inflight_info->addr = addr; in vhost_user_set_inflight_fd()
1574 dev->inflight_info->size = mmap_size; in vhost_user_set_inflight_fd()
1577 vq = dev->virtqueue[i]; in vhost_user_set_inflight_fd()
1581 if (vq_is_packed(dev)) { in vhost_user_set_inflight_fd()
1598 struct virtio_net *dev = *pdev; in vhost_user_set_vring_call() local
1615 vq = dev->virtqueue[file.index]; in vhost_user_set_vring_call()
1619 vhost_user_notify_queue_state(dev, file.index, 0); in vhost_user_set_vring_call()
1660 vhost_check_queue_inflights_split(struct virtio_net *dev, in vhost_check_queue_inflights_split() argument
1669 if (!(dev->protocol_features & in vhost_check_queue_inflights_split()
1745 vhost_check_queue_inflights_packed(struct virtio_net *dev, in vhost_check_queue_inflights_packed() argument
1753 if (!(dev->protocol_features & in vhost_check_queue_inflights_packed()
1842 struct virtio_net *dev = *pdev; in vhost_user_set_vring_kick() local
1860 dev = translate_ring_addresses(dev, file.index); in vhost_user_set_vring_kick()
1861 if (!dev) { in vhost_user_set_vring_kick()
1868 *pdev = dev; in vhost_user_set_vring_kick()
1870 vq = dev->virtqueue[file.index]; in vhost_user_set_vring_kick()
1877 if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) { in vhost_user_set_vring_kick()
1879 if (dev->notify_ops->vring_state_changed) in vhost_user_set_vring_kick()
1880 dev->notify_ops->vring_state_changed( in vhost_user_set_vring_kick()
1881 dev->vid, file.index, 1); in vhost_user_set_vring_kick()
1886 vhost_user_notify_queue_state(dev, file.index, 0); in vhost_user_set_vring_kick()
1893 if (vq_is_packed(dev)) { in vhost_user_set_vring_kick()
1894 if (vhost_check_queue_inflights_packed(dev, vq)) { in vhost_user_set_vring_kick()
1900 if (vhost_check_queue_inflights_split(dev, vq)) { in vhost_user_set_vring_kick()
1918 struct virtio_net *dev = *pdev; in vhost_user_get_vring_base() local
1919 struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index]; in vhost_user_get_vring_base()
1926 vhost_destroy_device_notify(dev); in vhost_user_get_vring_base()
1928 dev->flags &= ~VIRTIO_DEV_READY; in vhost_user_get_vring_base()
1929 dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED; in vhost_user_get_vring_base()
1932 if (vq_is_packed(dev)) { in vhost_user_get_vring_base()
1964 if (vq_is_packed(dev)) { in vhost_user_get_vring_base()
1984 vring_invalidate(dev, vq); in vhost_user_get_vring_base()
1998 struct virtio_net *dev = *pdev; in vhost_user_set_vring_enable() local
2009 if (enable && dev->virtqueue[index]->async_registered) { in vhost_user_set_vring_enable()
2010 if (dev->virtqueue[index]->async_pkts_inflight_n) { in vhost_user_set_vring_enable()
2017 dev->virtqueue[index]->enabled = enable; in vhost_user_set_vring_enable()
2027 struct virtio_net *dev = *pdev; in vhost_user_get_protocol_features() local
2033 rte_vhost_driver_get_features(dev->ifname, &features); in vhost_user_get_protocol_features()
2034 rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features); in vhost_user_get_protocol_features()
2048 struct virtio_net *dev = *pdev; in vhost_user_set_protocol_features() local
2055 rte_vhost_driver_get_protocol_features(dev->ifname, in vhost_user_set_protocol_features()
2060 dev->vid); in vhost_user_set_protocol_features()
2064 dev->protocol_features = protocol_features; in vhost_user_set_protocol_features()
2067 dev->protocol_features); in vhost_user_set_protocol_features()
2076 struct virtio_net *dev = *pdev; in vhost_user_set_log_base() local
2126 if (dev->log_addr) { in vhost_user_set_log_base()
2127 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size); in vhost_user_set_log_base()
2129 dev->log_addr = (uint64_t)(uintptr_t)addr; in vhost_user_set_log_base()
2130 dev->log_base = dev->log_addr + off; in vhost_user_set_log_base()
2131 dev->log_size = size; in vhost_user_set_log_base()
2172 struct virtio_net *dev = *pdev; in vhost_user_send_rarp() local
2182 memcpy(dev->mac.addr_bytes, mac, 6); in vhost_user_send_rarp()
2191 __atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE); in vhost_user_send_rarp()
2192 vdpa_dev = dev->vdpa_dev; in vhost_user_send_rarp()
2194 vdpa_dev->ops->migration_done(dev->vid); in vhost_user_send_rarp()
2203 struct virtio_net *dev = *pdev; in vhost_user_net_set_mtu() local
2216 dev->mtu = msg->payload.u64; in vhost_user_net_set_mtu()
2225 struct virtio_net *dev = *pdev; in vhost_user_set_req_fd() local
2238 if (dev->slave_req_fd >= 0) in vhost_user_set_req_fd()
2239 close(dev->slave_req_fd); in vhost_user_set_req_fd()
2241 dev->slave_req_fd = fd; in vhost_user_set_req_fd()
2311 static int is_vring_iotlb(struct virtio_net *dev, in is_vring_iotlb() argument
2315 if (vq_is_packed(dev)) in is_vring_iotlb()
2325 struct virtio_net *dev = *pdev; in vhost_user_iotlb_msg() local
2336 vva = qva_to_vva(dev, imsg->uaddr, &len); in vhost_user_iotlb_msg()
2340 for (i = 0; i < dev->nr_vring; i++) { in vhost_user_iotlb_msg()
2341 struct vhost_virtqueue *vq = dev->virtqueue[i]; in vhost_user_iotlb_msg()
2349 if (is_vring_iotlb(dev, vq, imsg)) in vhost_user_iotlb_msg()
2350 *pdev = dev = translate_ring_addresses(dev, i); in vhost_user_iotlb_msg()
2354 for (i = 0; i < dev->nr_vring; i++) { in vhost_user_iotlb_msg()
2355 struct vhost_virtqueue *vq = dev->virtqueue[i]; in vhost_user_iotlb_msg()
2363 if (is_vring_iotlb(dev, vq, imsg)) in vhost_user_iotlb_msg()
2364 vring_invalidate(dev, vq); in vhost_user_iotlb_msg()
2381 struct virtio_net *dev = *pdev; in vhost_user_set_postcopy_advise() local
2388 dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); in vhost_user_set_postcopy_advise()
2390 if (dev->postcopy_ufd == -1) { in vhost_user_set_postcopy_advise()
2397 if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) { in vhost_user_set_postcopy_advise()
2400 close(dev->postcopy_ufd); in vhost_user_set_postcopy_advise()
2401 dev->postcopy_ufd = -1; in vhost_user_set_postcopy_advise()
2404 msg->fds[0] = dev->postcopy_ufd; in vhost_user_set_postcopy_advise()
2409 dev->postcopy_ufd = -1; in vhost_user_set_postcopy_advise()
2421 struct virtio_net *dev = *pdev; in vhost_user_set_postcopy_listen() local
2426 if (dev->mem && dev->mem->nregions) { in vhost_user_set_postcopy_listen()
2431 dev->postcopy_listening = 1; in vhost_user_set_postcopy_listen()
2440 struct virtio_net *dev = *pdev; in vhost_user_postcopy_end() local
2445 dev->postcopy_listening = 0; in vhost_user_postcopy_end()
2446 if (dev->postcopy_ufd >= 0) { in vhost_user_postcopy_end()
2447 close(dev->postcopy_ufd); in vhost_user_postcopy_end()
2448 dev->postcopy_ufd = -1; in vhost_user_postcopy_end()
2462 struct virtio_net *dev = *pdev; in vhost_user_get_status() local
2467 msg->payload.u64 = dev->status; in vhost_user_get_status()
2478 struct virtio_net *dev = *pdev; in vhost_user_set_status() local
2490 dev->status = msg->payload.u64; in vhost_user_set_status()
2492 if ((dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK) && in vhost_user_set_status()
2493 (dev->flags & VIRTIO_DEV_FEATURES_FAILED)) { in vhost_user_set_status()
2499 dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK; in vhost_user_set_status()
2510 dev->status, in vhost_user_set_status()
2511 (dev->status == VIRTIO_DEVICE_STATUS_RESET), in vhost_user_set_status()
2512 !!(dev->status & VIRTIO_DEVICE_STATUS_ACK), in vhost_user_set_status()
2513 !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER), in vhost_user_set_status()
2514 !!(dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK), in vhost_user_set_status()
2515 !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK), in vhost_user_set_status()
2516 !!(dev->status & VIRTIO_DEVICE_STATUS_DEV_NEED_RESET), in vhost_user_set_status()
2517 !!(dev->status & VIRTIO_DEVICE_STATUS_FAILED)); in vhost_user_set_status()
2618 send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg) in send_vhost_slave_message() argument
2623 rte_spinlock_lock(&dev->slave_req_lock); in send_vhost_slave_message()
2625 ret = send_vhost_message(dev->slave_req_fd, msg); in send_vhost_slave_message()
2627 rte_spinlock_unlock(&dev->slave_req_lock); in send_vhost_slave_message()
2636 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, in vhost_user_check_and_alloc_queue_pair() argument
2665 if (dev->virtqueue[vring_idx]) in vhost_user_check_and_alloc_queue_pair()
2668 return alloc_vring_queue(dev, vring_idx); in vhost_user_check_and_alloc_queue_pair()
2672 vhost_user_lock_all_queue_pairs(struct virtio_net *dev) in vhost_user_lock_all_queue_pairs() argument
2677 while (vq_num < dev->nr_vring) { in vhost_user_lock_all_queue_pairs()
2678 struct vhost_virtqueue *vq = dev->virtqueue[i]; in vhost_user_lock_all_queue_pairs()
2689 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev) in vhost_user_unlock_all_queue_pairs() argument
2694 while (vq_num < dev->nr_vring) { in vhost_user_unlock_all_queue_pairs()
2695 struct vhost_virtqueue *vq = dev->virtqueue[i]; in vhost_user_unlock_all_queue_pairs()
2708 struct virtio_net *dev; in vhost_user_msg_handler() local
2717 dev = get_device(vid); in vhost_user_msg_handler()
2718 if (dev == NULL) in vhost_user_msg_handler()
2721 if (!dev->notify_ops) { in vhost_user_msg_handler()
2722 dev->notify_ops = vhost_driver_callback_get(dev->ifname); in vhost_user_msg_handler()
2723 if (!dev->notify_ops) { in vhost_user_msg_handler()
2726 dev->ifname); in vhost_user_msg_handler()
2757 ret = vhost_user_check_and_alloc_queue_pair(dev, &msg); in vhost_user_msg_handler()
2788 if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) { in vhost_user_msg_handler()
2789 vhost_user_lock_all_queue_pairs(dev); in vhost_user_msg_handler()
2799 if (dev->extern_ops.pre_msg_handle) { in vhost_user_msg_handler()
2800 ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, in vhost_user_msg_handler()
2819 ret = vhost_message_handlers[request](&dev, &msg, fd); in vhost_user_msg_handler()
2848 dev->extern_ops.post_msg_handle) { in vhost_user_msg_handler()
2849 ret = (*dev->extern_ops.post_msg_handle)(dev->vid, in vhost_user_msg_handler()
2865 vhost_user_unlock_all_queue_pairs(dev); in vhost_user_msg_handler()
2891 for (i = 0; i < dev->nr_vring; i++) { in vhost_user_msg_handler()
2892 struct vhost_virtqueue *vq = dev->virtqueue[i]; in vhost_user_msg_handler()
2893 bool cur_ready = vq_is_ready(dev, vq); in vhost_user_msg_handler()
2897 vhost_user_notify_queue_state(dev, i, cur_ready); in vhost_user_msg_handler()
2902 if (!virtio_is_ready(dev)) in vhost_user_msg_handler()
2911 if (!(dev->flags & VIRTIO_DEV_RUNNING)) { in vhost_user_msg_handler()
2912 if (dev->notify_ops->new_device(dev->vid) == 0) in vhost_user_msg_handler()
2913 dev->flags |= VIRTIO_DEV_RUNNING; in vhost_user_msg_handler()
2916 vdpa_dev = dev->vdpa_dev; in vhost_user_msg_handler()
2920 if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) { in vhost_user_msg_handler()
2921 if (vdpa_dev->ops->dev_conf(dev->vid)) in vhost_user_msg_handler()
2925 dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED; in vhost_user_msg_handler()
2932 static int process_slave_message_reply(struct virtio_net *dev, in process_slave_message_reply() argument
2941 ret = read_vhost_message(dev->slave_req_fd, &msg_reply); in process_slave_message_reply()
2965 rte_spinlock_unlock(&dev->slave_req_lock); in process_slave_message_reply()
2970 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm) in vhost_user_iotlb_miss() argument
2984 ret = send_vhost_message(dev->slave_req_fd, &msg); in vhost_user_iotlb_miss()
2996 vhost_user_slave_config_change(struct virtio_net *dev, bool need_reply) in vhost_user_slave_config_change() argument
3008 ret = send_vhost_slave_message(dev, &msg); in vhost_user_slave_config_change()
3016 return process_slave_message_reply(dev, &msg); in vhost_user_slave_config_change()
3022 struct virtio_net *dev; in rte_vhost_slave_config_change() local
3024 dev = get_device(vid); in rte_vhost_slave_config_change()
3025 if (!dev) in rte_vhost_slave_config_change()
3028 return vhost_user_slave_config_change(dev, need_reply); in rte_vhost_slave_config_change()
3031 static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev, in vhost_user_slave_set_vring_host_notifier() argument
3055 ret = send_vhost_slave_message(dev, &msg); in vhost_user_slave_set_vring_host_notifier()
3062 return process_slave_message_reply(dev, &msg); in vhost_user_slave_set_vring_host_notifier()
3067 struct virtio_net *dev; in rte_vhost_host_notifier_ctrl() local
3073 dev = get_device(vid); in rte_vhost_host_notifier_ctrl()
3074 if (!dev) in rte_vhost_host_notifier_ctrl()
3077 vdpa_dev = dev->vdpa_dev; in rte_vhost_host_notifier_ctrl()
3081 if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) || in rte_vhost_host_notifier_ctrl()
3082 !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) || in rte_vhost_host_notifier_ctrl()
3083 !(dev->protocol_features & in rte_vhost_host_notifier_ctrl()
3085 !(dev->protocol_features & in rte_vhost_host_notifier_ctrl()
3087 !(dev->protocol_features & in rte_vhost_host_notifier_ctrl()
3093 q_last = dev->nr_vring - 1; in rte_vhost_host_notifier_ctrl()
3095 if (qid >= dev->nr_vring) in rte_vhost_host_notifier_ctrl()
3116 if (vhost_user_slave_set_vring_host_notifier(dev, i, in rte_vhost_host_notifier_ctrl()
3125 vhost_user_slave_set_vring_host_notifier(dev, i, -1, in rte_vhost_host_notifier_ctrl()