Lines Matching refs:vq

127 #define VIRTIO_MBUF_ADDR(mb, vq) \  argument
128 ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->offset)))
130 #define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_iova) argument
137 #define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \ argument
138 (VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off)
340 desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq) in desc_is_used() argument
344 flags = virtqueue_fetch_flags_packed(desc, vq->hw->weak_barriers); in desc_is_used()
348 return avail == used && used == vq->vq_packed.used_wrap_counter; in desc_is_used()
352 vring_desc_init_packed(struct virtqueue *vq, int n) in vring_desc_init_packed() argument
356 vq->vq_packed.ring.desc[i].id = i; in vring_desc_init_packed()
357 vq->vq_descx[i].next = i + 1; in vring_desc_init_packed()
359 vq->vq_packed.ring.desc[i].id = i; in vring_desc_init_packed()
360 vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END; in vring_desc_init_packed()
388 virtqueue_disable_intr_packed(struct virtqueue *vq) in virtqueue_disable_intr_packed() argument
390 if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) { in virtqueue_disable_intr_packed()
391 vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE; in virtqueue_disable_intr_packed()
392 vq->vq_packed.ring.driver->desc_event_flags = in virtqueue_disable_intr_packed()
393 vq->vq_packed.event_flags_shadow; in virtqueue_disable_intr_packed()
401 virtqueue_disable_intr_split(struct virtqueue *vq) in virtqueue_disable_intr_split() argument
403 vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_disable_intr_split()
410 virtqueue_disable_intr(struct virtqueue *vq) in virtqueue_disable_intr() argument
412 if (vtpci_packed_queue(vq->hw)) in virtqueue_disable_intr()
413 virtqueue_disable_intr_packed(vq); in virtqueue_disable_intr()
415 virtqueue_disable_intr_split(vq); in virtqueue_disable_intr()
422 virtqueue_enable_intr_packed(struct virtqueue *vq) in virtqueue_enable_intr_packed() argument
424 if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) { in virtqueue_enable_intr_packed()
425 vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE; in virtqueue_enable_intr_packed()
426 vq->vq_packed.ring.driver->desc_event_flags = in virtqueue_enable_intr_packed()
427 vq->vq_packed.event_flags_shadow; in virtqueue_enable_intr_packed()
435 virtqueue_enable_intr_split(struct virtqueue *vq) in virtqueue_enable_intr_split() argument
437 vq->vq_split.ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT); in virtqueue_enable_intr_split()
444 virtqueue_enable_intr(struct virtqueue *vq) in virtqueue_enable_intr() argument
446 if (vtpci_packed_queue(vq->hw)) in virtqueue_enable_intr()
447 virtqueue_enable_intr_packed(vq); in virtqueue_enable_intr()
449 virtqueue_enable_intr_split(vq); in virtqueue_enable_intr()
455 void virtqueue_dump(struct virtqueue *vq);
459 struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
462 void virtqueue_rxvq_flush(struct virtqueue *vq);
464 int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
466 int virtqueue_txvq_reset_packed(struct virtqueue *vq);
469 virtqueue_full(const struct virtqueue *vq) in virtqueue_full() argument
471 return vq->vq_free_cnt == 0; in virtqueue_full()
487 virtqueue_nused(const struct virtqueue *vq) in virtqueue_nused() argument
491 if (vq->hw->weak_barriers) { in virtqueue_nused()
500 idx = vq->vq_split.ring.used->idx; in virtqueue_nused()
503 idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, in virtqueue_nused()
507 idx = vq->vq_split.ring.used->idx; in virtqueue_nused()
510 return idx - vq->vq_used_cons_idx; in virtqueue_nused()
513 void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
514 void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);
515 void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
519 vq_update_avail_idx(struct virtqueue *vq) in vq_update_avail_idx() argument
521 if (vq->hw->weak_barriers) { in vq_update_avail_idx()
530 vq->vq_split.ring.avail->idx = vq->vq_avail_idx; in vq_update_avail_idx()
532 __atomic_store_n(&vq->vq_split.ring.avail->idx, in vq_update_avail_idx()
533 vq->vq_avail_idx, __ATOMIC_RELEASE); in vq_update_avail_idx()
537 vq->vq_split.ring.avail->idx = vq->vq_avail_idx; in vq_update_avail_idx()
542 vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx) in vq_update_avail_ring() argument
552 avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1)); in vq_update_avail_ring()
553 if (unlikely(vq->vq_split.ring.avail->ring[avail_idx] != desc_idx)) in vq_update_avail_ring()
554 vq->vq_split.ring.avail->ring[avail_idx] = desc_idx; in vq_update_avail_ring()
555 vq->vq_avail_idx++; in vq_update_avail_ring()
559 virtqueue_kick_prepare(struct virtqueue *vq) in virtqueue_kick_prepare() argument
565 virtio_mb(vq->hw->weak_barriers); in virtqueue_kick_prepare()
566 return !(vq->vq_split.ring.used->flags & VRING_USED_F_NO_NOTIFY); in virtqueue_kick_prepare()
570 virtqueue_kick_prepare_packed(struct virtqueue *vq) in virtqueue_kick_prepare_packed() argument
577 virtio_mb(vq->hw->weak_barriers); in virtqueue_kick_prepare_packed()
578 flags = vq->vq_packed.ring.device->desc_event_flags; in virtqueue_kick_prepare_packed()
588 virtqueue_notify(struct virtqueue *vq) in virtqueue_notify() argument
590 VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq); in virtqueue_notify()
594 #define VIRTQUEUE_DUMP(vq) do { \ argument
596 used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
598 nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
599 if (vtpci_packed_queue((vq)->hw)) { \
603 (vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \
604 (vq)->vq_avail_idx, (vq)->vq_packed.cached_flags, \
605 (vq)->vq_packed.used_wrap_counter); \
612 (vq)->vq_nentries, (vq)->vq_free_cnt, nused, (vq)->vq_desc_head_idx, \
613 (vq)->vq_split.ring.avail->idx, (vq)->vq_used_cons_idx, \
614 __atomic_load_n(&(vq)->vq_split.ring.used->idx, __ATOMIC_RELAXED), \
615 (vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
618 #define VIRTQUEUE_DUMP(vq) do { } while (0) argument
694 struct virtqueue *vq = txvq->vq; in virtqueue_enqueue_xmit_packed() local
697 int16_t head_size = vq->hw->vtnet_hdr_size; in virtqueue_enqueue_xmit_packed()
703 id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx; in virtqueue_enqueue_xmit_packed()
705 dxp = &vq->vq_descx[id]; in virtqueue_enqueue_xmit_packed()
709 head_idx = vq->vq_avail_idx; in virtqueue_enqueue_xmit_packed()
712 start_dp = vq->vq_packed.ring.desc; in virtqueue_enqueue_xmit_packed()
714 head_dp = &vq->vq_packed.ring.desc[idx]; in virtqueue_enqueue_xmit_packed()
716 head_flags |= vq->vq_packed.cached_flags; in virtqueue_enqueue_xmit_packed()
725 if (!vq->hw->has_tx_offload) in virtqueue_enqueue_xmit_packed()
740 head_flags |= vq->vq_packed.cached_flags; in virtqueue_enqueue_xmit_packed()
752 start_dp[idx].len = vq->hw->vtnet_hdr_size; in virtqueue_enqueue_xmit_packed()
755 if (idx >= vq->vq_nentries) { in virtqueue_enqueue_xmit_packed()
756 idx -= vq->vq_nentries; in virtqueue_enqueue_xmit_packed()
757 vq->vq_packed.cached_flags ^= in virtqueue_enqueue_xmit_packed()
762 virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload); in virtqueue_enqueue_xmit_packed()
767 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq); in virtqueue_enqueue_xmit_packed()
777 flags |= vq->vq_packed.cached_flags; in virtqueue_enqueue_xmit_packed()
782 if (idx >= vq->vq_nentries) { in virtqueue_enqueue_xmit_packed()
783 idx -= vq->vq_nentries; in virtqueue_enqueue_xmit_packed()
784 vq->vq_packed.cached_flags ^= in virtqueue_enqueue_xmit_packed()
793 if (++idx >= vq->vq_nentries) { in virtqueue_enqueue_xmit_packed()
794 idx -= vq->vq_nentries; in virtqueue_enqueue_xmit_packed()
795 vq->vq_packed.cached_flags ^= in virtqueue_enqueue_xmit_packed()
800 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); in virtqueue_enqueue_xmit_packed()
801 vq->vq_avail_idx = idx; in virtqueue_enqueue_xmit_packed()
804 vq->vq_desc_head_idx = dxp->next; in virtqueue_enqueue_xmit_packed()
805 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) in virtqueue_enqueue_xmit_packed()
806 vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END; in virtqueue_enqueue_xmit_packed()
810 vq->hw->weak_barriers); in virtqueue_enqueue_xmit_packed()
814 vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id) in vq_ring_free_id_packed() argument
818 dxp = &vq->vq_descx[id]; in vq_ring_free_id_packed()
819 vq->vq_free_cnt += dxp->ndescs; in vq_ring_free_id_packed()
821 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) in vq_ring_free_id_packed()
822 vq->vq_desc_head_idx = id; in vq_ring_free_id_packed()
824 vq->vq_descx[vq->vq_desc_tail_idx].next = id; in vq_ring_free_id_packed()
826 vq->vq_desc_tail_idx = id; in vq_ring_free_id_packed()
831 virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num) in virtio_xmit_cleanup_inorder_packed() argument
834 uint16_t size = vq->vq_nentries; in virtio_xmit_cleanup_inorder_packed()
835 struct vring_packed_desc *desc = vq->vq_packed.ring.desc; in virtio_xmit_cleanup_inorder_packed()
838 used_idx = vq->vq_used_cons_idx; in virtio_xmit_cleanup_inorder_packed()
842 while (num > 0 && desc_is_used(&desc[used_idx], vq)) { in virtio_xmit_cleanup_inorder_packed()
846 dxp = &vq->vq_descx[used_idx]; in virtio_xmit_cleanup_inorder_packed()
852 vq->vq_packed.used_wrap_counter ^= 1; in virtio_xmit_cleanup_inorder_packed()
860 vq->vq_used_cons_idx = used_idx; in virtio_xmit_cleanup_inorder_packed()
861 vq->vq_free_cnt += free_cnt; in virtio_xmit_cleanup_inorder_packed()
865 virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num) in virtio_xmit_cleanup_normal_packed() argument
868 uint16_t size = vq->vq_nentries; in virtio_xmit_cleanup_normal_packed()
869 struct vring_packed_desc *desc = vq->vq_packed.ring.desc; in virtio_xmit_cleanup_normal_packed()
872 used_idx = vq->vq_used_cons_idx; in virtio_xmit_cleanup_normal_packed()
876 while (num-- && desc_is_used(&desc[used_idx], vq)) { in virtio_xmit_cleanup_normal_packed()
878 dxp = &vq->vq_descx[id]; in virtio_xmit_cleanup_normal_packed()
879 vq->vq_used_cons_idx += dxp->ndescs; in virtio_xmit_cleanup_normal_packed()
880 if (vq->vq_used_cons_idx >= size) { in virtio_xmit_cleanup_normal_packed()
881 vq->vq_used_cons_idx -= size; in virtio_xmit_cleanup_normal_packed()
882 vq->vq_packed.used_wrap_counter ^= 1; in virtio_xmit_cleanup_normal_packed()
884 vq_ring_free_id_packed(vq, id); in virtio_xmit_cleanup_normal_packed()
889 used_idx = vq->vq_used_cons_idx; in virtio_xmit_cleanup_normal_packed()
895 virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order) in virtio_xmit_cleanup_packed() argument
898 virtio_xmit_cleanup_inorder_packed(vq, num); in virtio_xmit_cleanup_packed()
900 virtio_xmit_cleanup_normal_packed(vq, num); in virtio_xmit_cleanup_packed()
904 virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num) in virtio_xmit_cleanup() argument
911 used_idx = (uint16_t)(vq->vq_used_cons_idx & in virtio_xmit_cleanup()
912 (vq->vq_nentries - 1)); in virtio_xmit_cleanup()
913 uep = &vq->vq_split.ring.used->ring[used_idx]; in virtio_xmit_cleanup()
916 dxp = &vq->vq_descx[desc_idx]; in virtio_xmit_cleanup()
917 vq->vq_used_cons_idx++; in virtio_xmit_cleanup()
918 vq_ring_free_chain(vq, desc_idx); in virtio_xmit_cleanup()
929 virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num) in virtio_xmit_cleanup_inorder() argument
931 uint16_t i, idx = vq->vq_used_cons_idx; in virtio_xmit_cleanup_inorder()
939 dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)]; in virtio_xmit_cleanup_inorder()
947 vq->vq_free_cnt += free_cnt; in virtio_xmit_cleanup_inorder()
948 vq->vq_used_cons_idx = idx; in virtio_xmit_cleanup_inorder()