Lines Matching refs:vdev

710 	struct vhost_dev *vdev;  in find_vhost_dev()  local
712 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { in find_vhost_dev()
713 if (vdev->ready == DEVICE_RX && in find_vhost_dev()
714 rte_is_same_ether_addr(mac, &vdev->mac_address)) in find_vhost_dev()
715 return vdev; in find_vhost_dev()
726 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m) in link_vmdq() argument
737 vdev->vid); in link_vmdq()
742 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i]; in link_vmdq()
745 vdev->vlan_tag = vlan_tags[vdev->vid]; in link_vmdq()
750 vdev->vid, in link_vmdq()
751 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1], in link_vmdq()
752 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3], in link_vmdq()
753 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5], in link_vmdq()
754 vdev->vlan_tag); in link_vmdq()
757 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address, in link_vmdq()
758 (uint32_t)vdev->vid + vmdq_pool_base); in link_vmdq()
762 vdev->vid); in link_vmdq()
764 rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1); in link_vmdq()
767 vdev->ready = DEVICE_RX; in link_vmdq()
777 unlink_vmdq(struct vhost_dev *vdev) in unlink_vmdq() argument
783 if (vdev->ready == DEVICE_RX) { in unlink_vmdq()
785 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address); in unlink_vmdq()
787 vdev->mac_address.addr_bytes[i] = 0; in unlink_vmdq()
789 vdev->vlan_tag = 0; in unlink_vmdq()
793 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST); in unlink_vmdq()
800 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST); in unlink_vmdq()
803 vdev->ready = DEVICE_MAC_LEARNING; in unlink_vmdq()
845 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m) in virtio_tx_local() argument
856 if (vdev->vid == dst_vdev->vid) { in virtio_tx_local()
859 vdev->vid); in virtio_tx_local()
872 virtio_xmit(dst_vdev, vdev, m); in virtio_tx_local()
881 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m, in find_local_dest() argument
892 if (vdev->vid == dst_vdev->vid) { in find_local_dest()
895 vdev->vid); in find_local_dest()
905 *vlan_tag = vlan_tags[vdev->vid]; in find_local_dest()
909 vdev->vid, dst_vdev->vid, *vlan_tag); in find_local_dest()
968 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag) in virtio_tx_route() argument
981 if (vdev2 != vdev) in virtio_tx_route()
982 virtio_xmit(vdev2, vdev, m); in virtio_tx_route()
988 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) { in virtio_tx_route()
994 if (unlikely(find_local_dest(vdev, m, &offset, in virtio_tx_route()
1002 "(%d) TX: MAC address is external\n", vdev->vid); in virtio_tx_route()
1047 vdev->stats.tx_total++; in virtio_tx_route()
1048 vdev->stats.tx++; in virtio_tx_route()
1077 complete_async_pkts(struct vhost_dev *vdev, uint16_t qid) in complete_async_pkts() argument
1082 complete_count = rte_vhost_poll_enqueue_completed(vdev->vid, in complete_async_pkts()
1084 vdev->nr_async_pkts -= complete_count; in complete_async_pkts()
1090 drain_eth_rx(struct vhost_dev *vdev) in drain_eth_rx() argument
1095 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q, in drain_eth_rx()
1098 while (likely(vdev->nr_async_pkts)) in drain_eth_rx()
1099 complete_async_pkts(vdev, VIRTIO_RXQ); in drain_eth_rx()
1110 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid, in drain_eth_rx()
1116 if (rx_count <= rte_vhost_avail_entries(vdev->vid, in drain_eth_rx()
1123 enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ, in drain_eth_rx()
1126 enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid, in drain_eth_rx()
1128 vdev->nr_async_pkts += enqueue_count; in drain_eth_rx()
1130 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ, in drain_eth_rx()
1135 rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count); in drain_eth_rx()
1136 rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count); in drain_eth_rx()
1144 drain_virtio_tx(struct vhost_dev *vdev) in drain_virtio_tx() argument
1151 count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool, in drain_virtio_tx()
1154 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, in drain_virtio_tx()
1159 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) { in drain_virtio_tx()
1160 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1) in drain_virtio_tx()
1165 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]); in drain_virtio_tx()
1190 struct vhost_dev *vdev; in switch_worker() local
1216 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list, in switch_worker()
1218 if (unlikely(vdev->remove)) { in switch_worker()
1219 unlink_vmdq(vdev); in switch_worker()
1220 vdev->ready = DEVICE_SAFE_REMOVE; in switch_worker()
1224 if (likely(vdev->ready == DEVICE_RX)) in switch_worker()
1225 drain_eth_rx(vdev); in switch_worker()
1227 if (likely(!vdev->remove)) in switch_worker()
1228 drain_virtio_tx(vdev); in switch_worker()
1244 struct vhost_dev *vdev = NULL; in destroy_device() local
1247 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { in destroy_device()
1248 if (vdev->vid == vid) in destroy_device()
1251 if (!vdev) in destroy_device()
1254 vdev->remove = 1; in destroy_device()
1255 while(vdev->ready != DEVICE_SAFE_REMOVE) { in destroy_device()
1260 vs_vhost_net_remove(vdev); in destroy_device()
1262 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev, in destroy_device()
1264 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry); in destroy_device()
1281 lcore_info[vdev->coreid].device_num--; in destroy_device()
1285 vdev->vid); in destroy_device()
1290 rte_free(vdev); in destroy_device()
1302 struct vhost_dev *vdev; in new_device() local
1303 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE); in new_device()
1304 if (vdev == NULL) { in new_device()
1310 vdev->vid = vid; in new_device()
1313 vs_vhost_net_setup(vdev); in new_device()
1315 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry); in new_device()
1316 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base; in new_device()
1319 vdev->ready = DEVICE_MAC_LEARNING; in new_device()
1320 vdev->remove = 0; in new_device()
1329 vdev->coreid = core_add; in new_device()
1331 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev, in new_device()
1333 lcore_info[vdev->coreid].device_num++; in new_device()
1341 vid, vdev->coreid); in new_device()
1377 struct vhost_dev *vdev; in print_stats() local
1390 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { in print_stats()
1391 tx_total = vdev->stats.tx_total; in print_stats()
1392 tx = vdev->stats.tx; in print_stats()
1395 rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic); in print_stats()
1396 rx = rte_atomic64_read(&vdev->stats.rx_atomic); in print_stats()
1407 vdev->vid, in print_stats()