| /f-stack/dpdk/drivers/net/virtio/ |
| H A D | virtqueue.c | 26 if (vq == NULL) in virtqueue_detach_unused() 29 hw = vq->hw; in virtqueue_detach_unused() 31 start = vq->vq_avail_idx & (vq->vq_nentries - 1); in virtqueue_detach_unused() 32 end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1); in virtqueue_detach_unused() 77 if (vq->vq_used_cons_idx >= vq->vq_nentries) { in virtqueue_rxvq_flush_packed() 78 vq->vq_used_cons_idx -= vq->vq_nentries; in virtqueue_rxvq_flush_packed() 99 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1); in virtqueue_rxvq_flush_split() 157 vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); in virtqueue_rxvq_reset_packed() 158 vq->vq_free_cnt = vq->vq_nentries; in virtqueue_rxvq_reset_packed() 193 vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); in virtqueue_txvq_reset_packed() [all …]
|
| H A D | virtqueue.h | 530 vq->vq_split.ring.avail->idx = vq->vq_avail_idx; in vq_update_avail_idx() 537 vq->vq_split.ring.avail->idx = vq->vq_avail_idx; in vq_update_avail_idx() 590 VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq); in virtqueue_notify() 603 (vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \ 604 (vq)->vq_avail_idx, (vq)->vq_packed.cached_flags, \ 612 (vq)->vq_nentries, (vq)->vq_free_cnt, nused, (vq)->vq_desc_head_idx, \ 694 struct virtqueue *vq = txvq->vq; in virtqueue_enqueue_xmit_packed() local 703 id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx; in virtqueue_enqueue_xmit_packed() 800 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); in virtqueue_enqueue_xmit_packed() 824 vq->vq_descx[vq->vq_desc_tail_idx].next = id; in vq_ring_free_id_packed() [all …]
|
| H A D | virtio_rxtx.c | 46 struct virtqueue *vq = rxvq->vq; in virtio_dev_rx_queue_done() local 432 struct virtqueue *vq = txvq->vq; in virtqueue_enqueue_xmit_inorder() local 444 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)]; in virtqueue_enqueue_xmit_inorder() 479 struct virtqueue *vq = txvq->vq; in virtqueue_enqueue_xmit_packed_fast() local 533 struct virtqueue *vq = txvq->vq; in virtqueue_enqueue_xmit() local 544 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)]; in virtqueue_enqueue_xmit() 957 struct virtqueue *vq = rxvq->vq; in virtio_recv_pkts() local 1064 struct virtqueue *vq = rxvq->vq; in virtio_recv_pkts_packed() local 1167 struct virtqueue *vq = rxvq->vq; in virtio_recv_pkts_inorder() local 1351 struct virtqueue *vq = rxvq->vq; in virtio_recv_mergeable_pkts() local [all …]
|
| H A D | virtio_rxtx_packed_avx.c | 85 struct virtqueue *vq = txvq->vq; in virtqueue_enqueue_batch_packed_vec() local 194 if (vq->vq_avail_idx >= vq->vq_nentries) { in virtqueue_enqueue_batch_packed_vec() 195 vq->vq_avail_idx -= vq->vq_nentries; in virtqueue_enqueue_batch_packed_vec() 207 struct virtqueue *vq = txvq->vq; in virtqueue_enqueue_single_packed_vec() local 256 struct virtqueue *vq = txvq->vq; in virtio_xmit_pkts_packed_vec() local 269 if (vq->vq_free_cnt <= vq->vq_nentries - vq->vq_free_thresh) in virtio_xmit_pkts_packed_vec() 361 struct virtqueue *vq = rxvq->vq; in virtqueue_dequeue_batch_packed_vec() local 463 struct virtqueue *vq = rxvq->vq; in virtqueue_dequeue_single_packed_vec() local 515 struct virtqueue *vq = rxvq->vq; in virtio_recv_refill_packed_vec() local 552 vq->vq_avail_idx -= vq->vq_nentries; in virtio_recv_refill_packed_vec() [all …]
|
| H A D | virtio_rxtx_simple.h | 26 struct virtqueue *vq = rxvq->vq; in virtio_rxq_rearm_vec() local 28 desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1); in virtio_rxq_rearm_vec() 29 sw_ring = &vq->sw_ring[desc_idx]; in virtio_rxq_rearm_vec() 30 start_dp = &vq->vq_split.ring.desc[desc_idx]; in virtio_rxq_rearm_vec() 47 VIRTIO_MBUF_ADDR(sw_ring[i], vq) + in virtio_rxq_rearm_vec() 48 RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size; in virtio_rxq_rearm_vec() 50 RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size; in virtio_rxq_rearm_vec() 53 vq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH; in virtio_rxq_rearm_vec() 54 vq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH; in virtio_rxq_rearm_vec() 55 vq_update_avail_idx(vq); in virtio_rxq_rearm_vec()
|
| H A D | virtio_ethdev.c | 151 struct virtqueue *vq = cvq->vq; in virtio_send_command_packed() local 173 vq->vq_avail_idx -= vq->vq_nentries; in virtio_send_command_packed() 247 struct virtqueue *vq = cvq->vq; in virtio_send_command_split() local 340 vq = cvq->vq; in virtio_send_command() 344 vq->vq_desc_head_idx, status, vq->hw->cvq, vq); in virtio_send_command() 418 vq->vq_free_cnt = vq->vq_nentries; in virtio_init_vring() 570 rxvq->vq = vq; in virtio_init_queue() 575 txvq->vq = vq; in virtio_init_queue() 582 cvq->vq = vq; in virtio_init_queue() 895 struct virtqueue *vq = rxvq->vq; in virtio_dev_rx_queue_intr_enable() local [all …]
|
| H A D | virtio_rxtx_simple_sse.c | 44 struct virtqueue *vq = rxvq->vq; in virtio_recv_pkts_vec() local 45 struct virtio_hw *hw = vq->hw; in virtio_recv_pkts_vec() 78 (uint16_t)-vq->hw->vtnet_hdr_size, in virtio_recv_pkts_vec() 79 0, (uint16_t)-vq->hw->vtnet_hdr_size, in virtio_recv_pkts_vec() 88 nb_used = virtqueue_nused(vq); in virtio_recv_pkts_vec() 96 desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); in virtio_recv_pkts_vec() 98 sw_ring = &vq->sw_ring[desc_idx]; in virtio_recv_pkts_vec() 99 sw_ring_end = &vq->sw_ring[vq->vq_nentries]; in virtio_recv_pkts_vec() 105 if (unlikely(virtqueue_kick_prepare(vq))) in virtio_recv_pkts_vec() 106 virtqueue_notify(vq); in virtio_recv_pkts_vec() [all …]
|
| H A D | virtio_rxtx_simple_altivec.c | 44 struct virtqueue *vq = rxvq->vq; in virtio_recv_pkts_vec() local 45 struct virtio_hw *hw = vq->hw; in virtio_recv_pkts_vec() 77 (uint16_t)-vq->hw->vtnet_hdr_size, 0, in virtio_recv_pkts_vec() 78 (uint16_t)-vq->hw->vtnet_hdr_size, 0, in virtio_recv_pkts_vec() 88 nb_used = virtqueue_nused(vq); in virtio_recv_pkts_vec() 98 desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); in virtio_recv_pkts_vec() 100 sw_ring = &vq->sw_ring[desc_idx]; in virtio_recv_pkts_vec() 101 sw_ring_end = &vq->sw_ring[vq->vq_nentries]; in virtio_recv_pkts_vec() 107 if (unlikely(virtqueue_kick_prepare(vq))) in virtio_recv_pkts_vec() 108 virtqueue_notify(vq); in virtio_recv_pkts_vec() [all …]
|
| H A D | virtio_rxtx_simple_neon.c | 44 struct virtqueue *vq = rxvq->vq; in virtio_recv_pkts_vec() local 45 struct virtio_hw *hw = vq->hw; in virtio_recv_pkts_vec() 88 nb_used = virtqueue_nused(vq); in virtio_recv_pkts_vec() 96 desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); in virtio_recv_pkts_vec() 97 rused = &vq->vq_split.ring.used->ring[desc_idx]; in virtio_recv_pkts_vec() 98 sw_ring = &vq->sw_ring[desc_idx]; in virtio_recv_pkts_vec() 99 sw_ring_end = &vq->sw_ring[vq->vq_nentries]; in virtio_recv_pkts_vec() 105 if (unlikely(virtqueue_kick_prepare(vq))) in virtio_recv_pkts_vec() 106 virtqueue_notify(vq); in virtio_recv_pkts_vec() 207 vq->vq_used_cons_idx += nb_pkts_received; in virtio_recv_pkts_vec() [all …]
|
| H A D | virtio_pci.c | 35 check_vq_phys_addr_ok(struct virtqueue *vq) in check_vq_phys_addr_ok() argument 41 if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> in check_vq_phys_addr_ok() 216 if (!check_vq_phys_addr_ok(vq)) in legacy_setup_queue() 369 if (!check_vq_phys_addr_ok(vq)) in modern_setup_queue() 372 desc_addr = vq->vq_ring_mem; in modern_setup_queue() 375 ring[vq->vq_nentries]), in modern_setup_queue() 398 vq->notify_addr, notify_off); in modern_setup_queue() 424 rte_write16(vq->vq_queue_index, vq->notify_addr); in modern_notify_queue() 436 ((uint32_t)vq->vq_avail_idx << 16) | in modern_notify_queue() 437 vq->vq_queue_index; in modern_notify_queue() [all …]
|
| /f-stack/dpdk/lib/librte_vhost/ |
| H A D | vhost.c | 379 vq->log_guest_addr = translate_log_addr(dev, vq, in log_translate() 909 if (!vq) in rte_vhost_get_vhost_vring() 1252 if (!vq) in rte_vhost_vring_call() 1278 if (!vq) in rte_vhost_avail_entries() 1307 vhost_avail_event(vq) = vq->last_avail_idx; in vhost_enable_notify_split() 1370 if (!vq) in rte_vhost_enable_guest_notification() 1408 if (!vq) in rte_vhost_log_used_vring() 1485 if (!vq) in rte_vhost_get_vring_base() 1514 if (!vq) in rte_vhost_set_vring_base() 1547 if (!vq) in rte_vhost_get_vring_base_from_inflight() [all …]
|
| H A D | iotlb.c | 129 vq->iotlb_cache_nr = 0; in vhost_user_iotlb_cache_remove_all() 148 vq->iotlb_cache_nr--; in vhost_user_iotlb_cache_random_evict() 195 vq->iotlb_cache_nr++; in vhost_user_iotlb_cache_insert() 201 vq->iotlb_cache_nr++; in vhost_user_iotlb_cache_insert() 229 vq->iotlb_cache_nr--; in vhost_user_iotlb_cache_remove() 292 if (vq->iotlb_pool) { in vhost_user_iotlb_init() 297 vhost_user_iotlb_flush_all(vq); in vhost_user_iotlb_init() 308 TAILQ_INIT(&vq->iotlb_list); in vhost_user_iotlb_init() 317 if (vq->iotlb_pool) in vhost_user_iotlb_init() 325 if (!vq->iotlb_pool) { in vhost_user_iotlb_init() [all …]
|
| H A D | virtio_net.c | 95 if (used_idx + vq->shadow_used_idx <= vq->size) { in flush_shadow_used_ring_split() 109 vq->last_used_idx += vq->shadow_used_idx; in flush_shadow_used_ring_split() 141 vq->last_used_idx += vq->shadow_used_idx; in async_flush_shadow_used_ring_split() 277 vq->shadow_last_used_idx = vq->last_used_idx; in vhost_shadow_dequeue_batch_packed_inorder() 300 vq->shadow_last_used_idx = vq->last_used_idx; in vhost_shadow_dequeue_batch_packed() 353 vq->desc_packed[vq->last_used_idx].len = 0; in vhost_shadow_dequeue_single_packed() 558 vq->desc[idx].addr, vq->desc[idx].len); in fill_vec_buf_split() 903 vq->batch_copy_nb_elems >= vq->size)) { in copy_mbuf_to_desc() 1215 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); in virtio_dev_rx_split() 1508 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); in virtio_dev_rx_async_submit_split() [all …]
|
| H A D | iotlb.h | 13 vhost_user_iotlb_rd_lock(struct vhost_virtqueue *vq) in vhost_user_iotlb_rd_lock() argument 15 rte_rwlock_read_lock(&vq->iotlb_lock); in vhost_user_iotlb_rd_lock() 19 vhost_user_iotlb_rd_unlock(struct vhost_virtqueue *vq) in vhost_user_iotlb_rd_unlock() argument 21 rte_rwlock_read_unlock(&vq->iotlb_lock); in vhost_user_iotlb_rd_unlock() 25 vhost_user_iotlb_wr_lock(struct vhost_virtqueue *vq) in vhost_user_iotlb_wr_lock() argument 27 rte_rwlock_write_lock(&vq->iotlb_lock); in vhost_user_iotlb_wr_lock() 31 vhost_user_iotlb_wr_unlock(struct vhost_virtqueue *vq) in vhost_user_iotlb_wr_unlock() argument 33 rte_rwlock_write_unlock(&vq->iotlb_lock); in vhost_user_iotlb_wr_unlock() 36 void vhost_user_iotlb_cache_insert(struct vhost_virtqueue *vq, uint64_t iova, 39 void vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq, [all …]
|
| H A D | vhost.h | 411 vq->last_used_idx += num; in vq_inc_last_used_packed() 412 if (vq->last_used_idx >= vq->size) { in vq_inc_last_used_packed() 414 vq->last_used_idx -= vq->size; in vq_inc_last_used_packed() 422 if (vq->last_avail_idx >= vq->size) { in vq_inc_last_avail_packed() 424 vq->last_avail_idx -= vq->size; in vq_inc_last_avail_packed() 469 __vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset, in vhost_log_cache_used_vring() 737 vq->used->idx:vq->last_used_idx; in vhost_vring_call_split() 745 vhost_used_event(vq), in vhost_vring_call_split() 783 new = vq->last_used_idx; in vhost_vring_call_packed() 805 old -= vq->size; in vhost_vring_call_packed() [all …]
|
| H A D | vhost_user.c | 421 if (vq->size & (vq->size - 1)) { in vhost_user_set_vring_num() 516 memcpy(vq, old_vq, sizeof(*vq)); in numa_realloc() 721 if (vq->desc && vq->avail && vq->used) in translate_ring_addresses() 765 if (vq->last_used_idx != vq->used->idx) { in translate_ring_addresses() 769 vq->last_used_idx, vq->used->idx); in translate_ring_addresses() 770 vq->last_used_idx = vq->used->idx; in translate_ring_addresses() 771 vq->last_avail_idx = vq->used->idx; in translate_ring_addresses() 860 vq->last_used_idx = vq->last_avail_idx; in vhost_user_set_vring_base() 1252 if (vq->desc || vq->avail || vq->used) { in vhost_user_set_mem_table() 1298 rings_ok = vq->desc && vq->avail && vq->used; in vq_is_ready() [all …]
|
| H A D | vdpa.c | 137 struct vhost_virtqueue *vq; in rte_vdpa_relay_vring_used() local 156 vq = dev->virtqueue[qid]; in rte_vdpa_relay_vring_used() 157 idx = vq->used->idx; in rte_vdpa_relay_vring_used() 163 vq->used->ring[idx & (vq->size - 1)] = in rte_vdpa_relay_vring_used() 166 desc_id = vq->used->ring[idx & (vq->size - 1)].id; in rte_vdpa_relay_vring_used() 167 desc_ring = vq->desc; in rte_vdpa_relay_vring_used() 168 nr_descs = vq->size; in rte_vdpa_relay_vring_used() 180 vhost_iova_to_vva(dev, vq, in rte_vdpa_relay_vring_used() 188 vq->desc[desc_id].addr, in rte_vdpa_relay_vring_used() 189 vq->desc[desc_id].len); in rte_vdpa_relay_vring_used() [all …]
|
| /f-stack/dpdk/drivers/crypto/virtio/ |
| H A D | virtqueue.h | 107 return vq->vq_free_cnt == 0; in virtqueue_full() 110 #define VIRTQUEUE_NUSED(vq) \ argument 111 ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx)) 117 vq->vq_ring.avail->idx = vq->vq_avail_idx; in vq_update_avail_idx() 131 avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1)); in vq_update_avail_ring() 134 vq->vq_avail_idx++; in vq_update_avail_ring() 151 VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq); in virtqueue_notify() 165 (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \ 166 (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \ 167 (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \ [all …]
|
| H A D | virtio_cryptodev.c | 205 vq->vq_desc_head_idx = vq->vq_ring.desc[head].next; in virtio_crypto_send_command() 216 while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) { in virtio_crypto_send_command() 221 while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) { in virtio_crypto_send_command() 236 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx; in virtio_crypto_send_command() 245 vq->vq_free_cnt, vq->vq_desc_head_idx); in virtio_crypto_send_command() 273 if (vq) { in virtio_crypto_queue_release() 376 vq->hw = hw; in virtio_crypto_queue_setup() 430 *pvq = vq; in virtio_crypto_queue_setup() 961 "vq = %p", vq->vq_desc_head_idx, vq); in virtio_crypto_sym_clear_session() 1019 vq->vq_desc_head_idx = vq->vq_ring.desc[head].next; in virtio_crypto_sym_clear_session() [all …]
|
| H A D | virtio_rxtx.c | 17 dp = &vq->vq_ring.desc[desc_idx]; in vq_ring_free_chain() 18 dxp = &vq->vq_descx[desc_idx]; in vq_ring_free_chain() 19 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs); in vq_ring_free_chain() 36 dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx]; in vq_ring_free_chain() 58 & (vq->vq_nentries - 1)); in virtqueue_dequeue_burst_rx() 66 vq->vq_used_cons_idx); in virtqueue_dequeue_burst_rx() 97 vq->packets_received_total++; in virtqueue_dequeue_burst_rx() 102 vq->vq_used_cons_idx++; in virtqueue_dequeue_burst_rx() 375 vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); in virtio_crypto_vring_start() 376 vq->vq_free_cnt = vq->vq_nentries; in virtio_crypto_vring_start() [all …]
|
| H A D | virtqueue.c | 14 virtqueue_disable_intr(struct virtqueue *vq) in virtqueue_disable_intr() argument 21 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_disable_intr() 25 virtqueue_detatch_unused(struct virtqueue *vq) in virtqueue_detatch_unused() argument 31 if (vq != NULL) in virtqueue_detatch_unused() 32 for (idx = 0; idx < vq->vq_nentries; idx++) { in virtqueue_detatch_unused() 33 cop = vq->vq_descx[idx].crypto_op; in virtqueue_detatch_unused() 40 vq->vq_descx[idx].crypto_op = NULL; in virtqueue_detatch_unused()
|
| H A D | virtio_pci.c | 37 check_vq_phys_addr_ok(struct virtqueue *vq) in check_vq_phys_addr_ok() argument 43 if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> in check_vq_phys_addr_ok() 151 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); in modern_set_queue_irq() 169 if (!check_vq_phys_addr_ok(vq)) in modern_setup_queue() 172 desc_addr = vq->vq_ring_mem; in modern_setup_queue() 175 ring[vq->vq_nentries]), in modern_setup_queue() 178 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); in modern_setup_queue() 188 vq->notify_addr = (void *)((uint8_t *)hw->notify_base + in modern_setup_queue() 198 vq->notify_addr, notify_off); in modern_setup_queue() 220 struct virtqueue *vq) in modern_notify_queue() argument [all …]
|
| /f-stack/dpdk/examples/vhost_blk/ |
| H A D | vhost_blk.c | 74 struct vhost_blk_queue *vq = task->vq; in enqueue_task() local 105 struct vhost_blk_queue *vq = task->vq; in enqueue_task_packed() local 126 if (vq->last_used_idx >= vq->vring.size) { in enqueue_task_packed() 127 vq->last_used_idx -= vq->vring.size; in enqueue_task_packed() 128 vq->used_wrap_counter = !vq->used_wrap_counter; in enqueue_task_packed() 414 vq->last_avail_idx -= vq->vring.size; in submit_inflight_vq() 444 vq->last_avail_idx = (vq->last_avail_idx + 1) % vq->vring.size; in vhost_blk_vq_get_desc_chain_buffer_id() 450 vq->last_avail_idx = (vq->last_avail_idx + 1) % vq->vring.size; in vhost_blk_vq_get_desc_chain_buffer_id() 584 vq->tasks[j].vq = vq; in alloc_task_pool() 656 vq->last_avail_idx = vq->last_avail_idx & in new_device() [all …]
|
| /f-stack/freebsd/contrib/openzfs/module/zfs/ |
| H A D | vdev_queue.c | 435 vq->vq_last_prio = p; in vdev_queue_class_to_issue() 448 vq->vq_last_prio = p; in vdev_queue_class_to_issue() 464 vq->vq_vdev = vd; in vdev_queue_init() 498 vq->vq_last_offset = 0; in vdev_queue_init() 575 vq->vq_nia_credit = 1; in vdev_queue_pending_add() 577 vq->vq_nia_credit--; in vdev_queue_pending_add() 599 vq->vq_nia_credit = 0; in vdev_queue_pending_remove() 603 vq->vq_nia_credit++; in vdev_queue_pending_remove() 851 mutex_exit(&vq->vq_lock); in vdev_queue_aggregate() 889 vq->vq_io_search.io_offset = vq->vq_last_offset - 1; in vdev_queue_io_to_issue() [all …]
|
| /f-stack/dpdk/drivers/net/vhost/ |
| H A D | rte_eth_vhost.c | 238 if (!vq) in vhost_dev_xstats_reset() 240 memset(&vq->stats, 0, sizeof(vq->stats)); in vhost_dev_xstats_reset() 244 if (!vq) in vhost_dev_xstats_reset() 246 memset(&vq->stats, 0, sizeof(vq->stats)); in vhost_dev_xstats_reset() 295 if (!vq) in vhost_dev_xstats_get() 308 if (!vq) in vhost_dev_xstats_get() 582 if (!vq) { in eth_rxq_intr_enable() 619 if (!vq) { in eth_rxq_intr_disable() 760 if (!vq) in queue_setup() 768 if (!vq) in queue_setup() [all …]
|