| /dpdk/drivers/vdpa/ifc/ |
| H A D | ifcvf_vdpa.c | 279 hw->vring[i].desc = gpa; in vdpa_ifcvf_start() 286 hw->vring[i].avail = gpa; in vdpa_ifcvf_start() 293 hw->vring[i].used = gpa; in vdpa_ifcvf_start() 354 vring.callfd = -1; in vdpa_enable_vfio_intr() 451 vring.kickfd = -1; in notify_relay() 622 hw->vring[i].desc = gpa; in m_ifcvf_start() 734 vring.kickfd = -1; in vring_relay() 1111 if (vring < 0 || vring >= internal->max_queues * 2) { in ifcvf_set_vring_state() 1124 if (!state && hw->vring[vring].enable) { in ifcvf_set_vring_state() 1130 if (state && !hw->vring[vring].enable) { in ifcvf_set_vring_state() [all …]
|
| /dpdk/drivers/net/virtio/virtio_user/ |
| H A D | virtio_user_dev.c | 60 struct vring *vring = &dev->vrings[queue_sel]; in virtio_user_kick_queue() local 82 state.num = vring->num; in virtio_user_kick_queue() 713 virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring, in virtio_user_handle_ctrl_msg() argument 722 idx_data = vring->desc[idx_hdr].next; in virtio_user_handle_ctrl_msg() 727 i = vring->desc[i].next; in virtio_user_handle_ctrl_msg() 765 struct vring_packed *vring, in virtio_user_handle_ctrl_msg_packed() argument 795 vring->desc[idx_data].addr; in virtio_user_handle_ctrl_msg_packed() 808 vring->desc[idx_hdr].id = vring->desc[idx_status].id; in virtio_user_handle_ctrl_msg_packed() 852 struct vring *vring = &dev->vrings[queue_idx]; in virtio_user_handle_cq() local 856 != vring->avail->idx) { in virtio_user_handle_cq() [all …]
|
| H A D | virtio_user_dev.h | 51 struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
|
| /dpdk/drivers/vdpa/sfc/ |
| H A D | sfc_vdpa_ops.c | 104 struct rte_vhost_vring vring; in sfc_vdpa_enable_vfio_intr() local 194 vring->desc = gpa; in sfc_vdpa_get_vring_info() 202 vring->avail = gpa; in sfc_vdpa_get_vring_info() 210 vring->used = gpa; in sfc_vdpa_get_vring_info() 212 vring->size = vq.size; in sfc_vdpa_get_vring_info() 215 &vring->last_avail_idx, in sfc_vdpa_get_vring_info() 216 &vring->last_used_idx); in sfc_vdpa_get_vring_info() 226 struct sfc_vdpa_vring_info vring; in sfc_vdpa_virtq_start() local 721 vid, vring, state); in sfc_vdpa_set_vring_state() 725 if (vring < 0 || vring > vring_max) { in sfc_vdpa_set_vring_state() [all …]
|
| /dpdk/examples/vhost_blk/ |
| H A D | vhost_blk.c | 75 struct vring_used *used = vq->vring.used; in enqueue_task() 126 if (vq->last_used_idx >= vq->vring.size) { in enqueue_task_packed() 127 vq->last_used_idx -= vq->vring.size; in enqueue_task_packed() 183 return &vq->vring.desc[desc->next]; in vring_get_next_desc() 192 *req_idx = (*req_idx + 1) % vq->vring.size; in vring_get_next_desc_packed() 193 return &vq->vring.desc_packed[*req_idx]; in vring_get_next_desc_packed() 413 if (vq->last_avail_idx >= vq->vring.size) { in submit_inflight_vq() 414 vq->last_avail_idx -= vq->vring.size; in submit_inflight_vq() 474 uint16_t flags = vq->vring.desc_packed[ in vhost_blk_vq_is_avail() 581 for (j = 0; j < vq->vring.size; j++) { in alloc_task_pool() [all …]
|
| H A D | vhost_blk.h | 33 struct rte_vhost_vring vring; member
|
| /dpdk/drivers/vdpa/ifc/base/ |
| H A D | ifcvf.c | 213 io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo, in ifcvf_hw_enable() 215 io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo, in ifcvf_hw_enable() 217 io_write64_twopart(hw->vring[i].used, &cfg->queue_used_lo, in ifcvf_hw_enable() 219 IFCVF_WRITE_REG16(hw->vring[i].size, &cfg->queue_size); in ifcvf_hw_enable() 223 (u32)hw->vring[i].last_avail_idx | in ifcvf_hw_enable() 224 ((u32)hw->vring[i].last_used_idx << 16); in ifcvf_hw_enable() 259 hw->vring[i].last_avail_idx = (u16)(ring_state >> 16); in ifcvf_hw_disable() 260 hw->vring[i].last_used_idx = (u16)(ring_state >> 16); in ifcvf_hw_disable()
|
| H A D | ifcvf.h | 134 struct vring_info vring[IFCVF_MAX_QUEUES * 2]; member
|
| /dpdk/drivers/net/virtio/ |
| H A D | virtio_user_ethdev.c | 177 struct vring_packed *vring; in virtio_user_setup_queue_packed() local 183 vring = &dev->packed_vrings[queue_idx]; in virtio_user_setup_queue_packed() 190 vring->num = vq->vq_nentries; in virtio_user_setup_queue_packed() 191 vring->desc = (void *)(uintptr_t)desc_addr; in virtio_user_setup_queue_packed() 192 vring->driver = (void *)(uintptr_t)avail_addr; in virtio_user_setup_queue_packed() 193 vring->device = (void *)(uintptr_t)used_addr; in virtio_user_setup_queue_packed() 197 for (i = 0; i < vring->num; i++) in virtio_user_setup_queue_packed() 198 vring->desc[i].flags = 0; in virtio_user_setup_queue_packed()
|
| H A D | virtio_ring.h | 91 struct vring { struct 152 vring_init_split(struct vring *vr, uint8_t *p, unsigned long align, in vring_init_split()
|
| H A D | virtqueue.h | 270 struct vring ring;
|
| H A D | virtio_ethdev.c | 460 struct vring *vr = &vq->vq_split.ring; in virtio_init_vring()
|
| /dpdk/drivers/net/vhost/ |
| H A D | rte_eth_vhost.c | 574 struct rte_vhost_vring vring; in eth_rxq_intr_enable() local 611 struct rte_vhost_vring vring; in eth_rxq_intr_disable() local 650 struct rte_vhost_vring vring; in eth_vhost_install_intr() local 695 if (vring.kickfd < 0) { in eth_vhost_install_intr() 909 struct rte_vhost_vring vring; in vring_conf_update() local 929 vring.kickfd)) in vring_conf_update() 968 (int)vring); in vring_state_changed() 971 if (state->cur[vring] == enable) { in vring_state_changed() 975 state->cur[vring] = enable; in vring_state_changed() 976 state->max_vring = RTE_MAX(vring, state->max_vring); in vring_state_changed() [all …]
|
| /dpdk/drivers/crypto/virtio/ |
| H A D | virtio_ring.h | 60 struct vring { struct 114 vring_init(struct vring *vr, unsigned int num, uint8_t *p, in vring_init()
|
| H A D | virtqueue.h | 65 struct vring vq_ring; /**< vring keeping desc, used and avail */
|
| H A D | virtio_rxtx.c | 372 struct vring *vr = &vq->vq_ring; in virtio_crypto_vring_start()
|
| /dpdk/lib/vhost/ |
| H A D | vhost.c | 932 struct rte_vhost_vring *vring) in rte_vhost_get_vhost_vring() argument 938 if (dev == NULL || vring == NULL) in rte_vhost_get_vhost_vring() 949 vring->desc_packed = vq->desc_packed; in rte_vhost_get_vhost_vring() 950 vring->driver_event = vq->driver_event; in rte_vhost_get_vhost_vring() 951 vring->device_event = vq->device_event; in rte_vhost_get_vhost_vring() 953 vring->desc = vq->desc; in rte_vhost_get_vhost_vring() 954 vring->avail = vq->avail; in rte_vhost_get_vhost_vring() 955 vring->used = vq->used; in rte_vhost_get_vhost_vring() 959 vring->callfd = vq->callfd; in rte_vhost_get_vhost_vring() 960 vring->kickfd = vq->kickfd; in rte_vhost_get_vhost_vring() [all …]
|
| H A D | vdpa_driver.h | 45 int (*set_vring_state)(int vid, int vring, int state);
|
| H A D | vdpa.c | 140 struct vring *s_vring; in rte_vdpa_relay_vring_used() 154 s_vring = (struct vring *)vring_m; in rte_vdpa_relay_vring_used()
|
| H A D | rte_vhost.h | 782 struct rte_vhost_vring *vring); 798 struct rte_vhost_ring_inflight *vring);
|
| /dpdk/doc/guides/prog_guide/ |
| H A D | vhost_lib.rst | 19 * Know all the necessary information about the vring: 23 the information it needs to know how to manipulate the vring. 224 be called before register async data-path for vring. 228 Register async DMA acceleration for a vhost queue after vring is enabled. 402 Called to change the state of the vring in the actual device when vring state 440 channels. Specifically, one vring can use multiple different DMA channels 442 The reason of enabling one vring to use multiple DMA channels is that 444 the same vring with their own DMA virtual channels. Besides, the number
|
| /dpdk/drivers/vdpa/mlx5/ |
| H A D | mlx5_vdpa.c | 133 mlx5_vdpa_set_vring_state(int vid, int vring, int state) in mlx5_vdpa_set_vring_state() argument 144 if (vring >= (int)priv->caps.max_num_virtio_queues * 2) { in mlx5_vdpa_set_vring_state() 145 DRV_LOG(ERR, "Too big vring id: %d.", vring); in mlx5_vdpa_set_vring_state() 149 ret = mlx5_vdpa_virtq_enable(priv, vring, state); in mlx5_vdpa_set_vring_state()
|
| /dpdk/doc/guides/nics/ |
| H A D | virtio.rst | 32 In Rx, packets described by the used descriptors in vring are available 35 In Tx, packets described by the used descriptors in vring are available 37 vring, make them available to the device, and then notify the host back
|
| /dpdk/doc/guides/rel_notes/ |
| H A D | release_16_04.rst | 439 The vhost-switch often fails to allocate mbuf when dequeue from vring because it
|
| H A D | release_2_1.rst | 961 * **vhost: Fix enqueue/dequeue to handle chained vring descriptors.**
|