Lines Matching refs:vdev

886 	struct vhost_dev *vdev;  in find_vhost_dev()  local
888 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { in find_vhost_dev()
889 if (vdev->ready == DEVICE_RX && in find_vhost_dev()
890 rte_is_same_ether_addr(mac, &vdev->mac_address)) in find_vhost_dev()
891 return vdev; in find_vhost_dev()
902 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m) in link_vmdq() argument
913 vdev->vid); in link_vmdq()
918 vdev->mac_address.addr_bytes[i] = in link_vmdq()
922 vdev->vlan_tag = vlan_tags[vdev->vid]; in link_vmdq()
927 vdev->vid, RTE_ETHER_ADDR_BYTES(&vdev->mac_address), in link_vmdq()
928 vdev->vlan_tag); in link_vmdq()
931 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address, in link_vmdq()
932 (uint32_t)vdev->vid + vmdq_pool_base); in link_vmdq()
936 vdev->vid); in link_vmdq()
938 rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1); in link_vmdq()
941 vdev->ready = DEVICE_RX; in link_vmdq()
951 unlink_vmdq(struct vhost_dev *vdev) in unlink_vmdq() argument
957 if (vdev->ready == DEVICE_RX) { in unlink_vmdq()
959 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address); in unlink_vmdq()
961 vdev->mac_address.addr_bytes[i] = 0; in unlink_vmdq()
963 vdev->vlan_tag = 0; in unlink_vmdq()
967 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST); in unlink_vmdq()
974 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST); in unlink_vmdq()
977 vdev->ready = DEVICE_MAC_LEARNING; in unlink_vmdq()
989 complete_async_pkts(struct vhost_dev *vdev) in complete_async_pkts() argument
993 int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id; in complete_async_pkts()
995 complete_count = rte_vhost_poll_enqueue_completed(vdev->vid, in complete_async_pkts()
1025 drain_vhost(struct vhost_dev *vdev) in drain_vhost() argument
1028 uint32_t buff_idx = rte_lcore_id() * RTE_MAX_VHOST_DEVICE + vdev->vid; in drain_vhost()
1033 ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit); in drain_vhost()
1034 } else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) { in drain_vhost()
1036 int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id; in drain_vhost()
1038 complete_async_pkts(vdev); in drain_vhost()
1039 ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit, dma_id, 0); in drain_vhost()
1045 ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ, in drain_vhost()
1050 __atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit, in drain_vhost()
1052 __atomic_add_fetch(&vdev->stats.rx_atomic, ret, in drain_vhost()
1056 if (!dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) in drain_vhost()
1065 struct vhost_dev *vdev; in drain_vhost_table() local
1068 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { in drain_vhost_table()
1069 if (unlikely(vdev->remove == 1)) in drain_vhost_table()
1072 vhost_txq = vhost_txbuff[lcore_id * RTE_MAX_VHOST_DEVICE + vdev->vid]; in drain_vhost_table()
1080 drain_vhost(vdev); in drain_vhost_table()
1092 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m) in virtio_tx_local() argument
1104 if (vdev->vid == dst_vdev->vid) { in virtio_tx_local()
1107 vdev->vid); in virtio_tx_local()
1124 vdev->stats.tx_total++; in virtio_tx_local()
1125 vdev->stats.tx++; in virtio_tx_local()
1141 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m, in find_local_dest() argument
1152 if (vdev->vid == dst_vdev->vid) { in find_local_dest()
1155 vdev->vid); in find_local_dest()
1165 *vlan_tag = vlan_tags[vdev->vid]; in find_local_dest()
1169 vdev->vid, dst_vdev->vid, *vlan_tag); in find_local_dest()
1222 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag) in virtio_tx_route() argument
1235 if (vdev2 != vdev) in virtio_tx_route()
1236 sync_virtio_xmit(vdev2, vdev, m); in virtio_tx_route()
1242 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) in virtio_tx_route()
1246 if (unlikely(find_local_dest(vdev, m, &offset, in virtio_tx_route()
1254 "(%d) TX: MAC address is external\n", vdev->vid); in virtio_tx_route()
1299 vdev->stats.tx_total++; in virtio_tx_route()
1300 vdev->stats.tx++; in virtio_tx_route()
1329 drain_eth_rx(struct vhost_dev *vdev) in drain_eth_rx() argument
1334 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q, in drain_eth_rx()
1346 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid, in drain_eth_rx()
1352 if (rx_count <= rte_vhost_avail_entries(vdev->vid, in drain_eth_rx()
1359 enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ, in drain_eth_rx()
1361 } else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) { in drain_eth_rx()
1363 int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id; in drain_eth_rx()
1365 complete_async_pkts(vdev); in drain_eth_rx()
1366 enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid, in drain_eth_rx()
1374 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ, in drain_eth_rx()
1379 __atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count, in drain_eth_rx()
1381 __atomic_add_fetch(&vdev->stats.rx_atomic, enqueue_count, in drain_eth_rx()
1385 if (!dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) in drain_eth_rx()
1390 drain_virtio_tx(struct vhost_dev *vdev) in drain_virtio_tx() argument
1397 count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool, in drain_virtio_tx()
1400 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, in drain_virtio_tx()
1405 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) { in drain_virtio_tx()
1406 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1) in drain_virtio_tx()
1411 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]); in drain_virtio_tx()
1436 struct vhost_dev *vdev; in switch_worker() local
1462 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list, in switch_worker()
1464 if (unlikely(vdev->remove)) { in switch_worker()
1465 unlink_vmdq(vdev); in switch_worker()
1466 vdev->ready = DEVICE_SAFE_REMOVE; in switch_worker()
1470 if (likely(vdev->ready == DEVICE_RX)) in switch_worker()
1471 drain_eth_rx(vdev); in switch_worker()
1473 if (likely(!vdev->remove)) in switch_worker()
1474 drain_virtio_tx(vdev); in switch_worker()
1490 struct vhost_dev *vdev = NULL; in destroy_device() local
1494 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { in destroy_device()
1495 if (vdev->vid == vid) in destroy_device()
1498 if (!vdev) in destroy_device()
1501 vdev->remove = 1; in destroy_device()
1502 while(vdev->ready != DEVICE_SAFE_REMOVE) { in destroy_device()
1510 vs_vhost_net_remove(vdev); in destroy_device()
1512 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev, in destroy_device()
1514 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry); in destroy_device()
1531 lcore_info[vdev->coreid].device_num--; in destroy_device()
1535 vdev->vid); in destroy_device()
1556 rte_free(vdev); in destroy_device()
1569 struct vhost_dev *vdev; in new_device() local
1570 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE); in new_device()
1571 if (vdev == NULL) { in new_device()
1577 vdev->vid = vid; in new_device()
1593 vs_vhost_net_setup(vdev); in new_device()
1595 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry); in new_device()
1596 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base; in new_device()
1599 vdev->ready = DEVICE_MAC_LEARNING; in new_device()
1600 vdev->remove = 0; in new_device()
1609 vdev->coreid = core_add; in new_device()
1611 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev, in new_device()
1613 lcore_info[vdev->coreid].device_num++; in new_device()
1621 vid, vdev->coreid); in new_device()
1638 struct vhost_dev *vdev = NULL; in vring_state_changed() local
1640 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { in vring_state_changed()
1641 if (vdev->vid == vid) in vring_state_changed()
1644 if (!vdev) in vring_state_changed()
1689 struct vhost_dev *vdev; in print_stats() local
1702 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { in print_stats()
1703 tx_total = vdev->stats.tx_total; in print_stats()
1704 tx = vdev->stats.tx; in print_stats()
1707 rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic, in print_stats()
1709 rx = __atomic_load_n(&vdev->stats.rx_atomic, in print_stats()
1721 vdev->vid, in print_stats()