xref: /dpdk/drivers/net/virtio/virtqueue.h (revision 7be78d02)
15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson  * Copyright(c) 2010-2014 Intel Corporation
36c3169a3SBruce Richardson  */
46c3169a3SBruce Richardson 
56c3169a3SBruce Richardson #ifndef _VIRTQUEUE_H_
66c3169a3SBruce Richardson #define _VIRTQUEUE_H_
76c3169a3SBruce Richardson 
86c3169a3SBruce Richardson #include <stdint.h>
96c3169a3SBruce Richardson 
106c3169a3SBruce Richardson #include <rte_atomic.h>
116c3169a3SBruce Richardson #include <rte_memory.h>
126c3169a3SBruce Richardson #include <rte_mempool.h>
1357f90f89SMarvin Liu #include <rte_net.h>
146c3169a3SBruce Richardson 
15b4f9a45aSMaxime Coquelin #include "virtio.h"
166c3169a3SBruce Richardson #include "virtio_ring.h"
176c3169a3SBruce Richardson #include "virtio_logs.h"
18905a2469SYuanhan Liu #include "virtio_rxtx.h"
196c3169a3SBruce Richardson 
206c3169a3SBruce Richardson struct rte_mbuf;
216c3169a3SBruce Richardson 
2257f90f89SMarvin Liu #define DEFAULT_TX_FREE_THRESH 32
231982462eSMarvin Liu #define DEFAULT_RX_FREE_THRESH 32
241982462eSMarvin Liu 
2557f90f89SMarvin Liu #define VIRTIO_MBUF_BURST_SZ 64
266c3169a3SBruce Richardson /*
279230ab8dSIlya Maximets  * Per virtio_ring.h in Linux.
286c3169a3SBruce Richardson  *     For virtio_pci on SMP, we don't need to order with respect to MMIO
29240a9941SJoyce Kong  *     accesses through relaxed memory I/O windows, so thread_fence is
306c3169a3SBruce Richardson  *     sufficient.
316c3169a3SBruce Richardson  *
329230ab8dSIlya Maximets  *     For using virtio to talk to real devices (eg. vDPA) we do need real
339230ab8dSIlya Maximets  *     barriers.
346c3169a3SBruce Richardson  */
359230ab8dSIlya Maximets static inline void
virtio_mb(uint8_t weak_barriers)369230ab8dSIlya Maximets virtio_mb(uint8_t weak_barriers)
379230ab8dSIlya Maximets {
389230ab8dSIlya Maximets 	if (weak_barriers)
39240a9941SJoyce Kong 		rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
409230ab8dSIlya Maximets 	else
419230ab8dSIlya Maximets 		rte_mb();
429230ab8dSIlya Maximets }
439230ab8dSIlya Maximets 
449230ab8dSIlya Maximets static inline void
virtio_rmb(uint8_t weak_barriers)459230ab8dSIlya Maximets virtio_rmb(uint8_t weak_barriers)
469230ab8dSIlya Maximets {
479230ab8dSIlya Maximets 	if (weak_barriers)
48240a9941SJoyce Kong 		rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
499230ab8dSIlya Maximets 	else
50f0f5d844SPhil Yang 		rte_io_rmb();
519230ab8dSIlya Maximets }
529230ab8dSIlya Maximets 
539230ab8dSIlya Maximets static inline void
virtio_wmb(uint8_t weak_barriers)549230ab8dSIlya Maximets virtio_wmb(uint8_t weak_barriers)
559230ab8dSIlya Maximets {
569230ab8dSIlya Maximets 	if (weak_barriers)
57240a9941SJoyce Kong 		rte_atomic_thread_fence(__ATOMIC_RELEASE);
589230ab8dSIlya Maximets 	else
59f0f5d844SPhil Yang 		rte_io_wmb();
609230ab8dSIlya Maximets }
616c3169a3SBruce Richardson 
622c661d41SJoyce Kong static inline uint16_t
virtqueue_fetch_flags_packed(struct vring_packed_desc * dp,uint8_t weak_barriers)632c661d41SJoyce Kong virtqueue_fetch_flags_packed(struct vring_packed_desc *dp,
642c661d41SJoyce Kong 			      uint8_t weak_barriers)
652c661d41SJoyce Kong {
662c661d41SJoyce Kong 	uint16_t flags;
672c661d41SJoyce Kong 
682c661d41SJoyce Kong 	if (weak_barriers) {
69f1b9cf07SJoyce Kong /* x86 prefers to using rte_io_rmb over __atomic_load_n as it reports
702c661d41SJoyce Kong  * a better perf(~1.5%), which comes from the saved branch by the compiler.
71f1b9cf07SJoyce Kong  * The if and else branch are identical  on the platforms except Arm.
722c661d41SJoyce Kong  */
73f1b9cf07SJoyce Kong #ifdef RTE_ARCH_ARM
742c661d41SJoyce Kong 		flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);
75f1b9cf07SJoyce Kong #else
76f1b9cf07SJoyce Kong 		flags = dp->flags;
77f1b9cf07SJoyce Kong 		rte_io_rmb();
782c661d41SJoyce Kong #endif
792c661d41SJoyce Kong 	} else {
802c661d41SJoyce Kong 		flags = dp->flags;
81f0f5d844SPhil Yang 		rte_io_rmb();
822c661d41SJoyce Kong 	}
832c661d41SJoyce Kong 
842c661d41SJoyce Kong 	return flags;
852c661d41SJoyce Kong }
862c661d41SJoyce Kong 
876094557dSJoyce Kong static inline void
virtqueue_store_flags_packed(struct vring_packed_desc * dp,uint16_t flags,uint8_t weak_barriers)886094557dSJoyce Kong virtqueue_store_flags_packed(struct vring_packed_desc *dp,
896094557dSJoyce Kong 			      uint16_t flags, uint8_t weak_barriers)
906094557dSJoyce Kong {
916094557dSJoyce Kong 	if (weak_barriers) {
92f1b9cf07SJoyce Kong /* x86 prefers to using rte_io_wmb over __atomic_store_n as it reports
936094557dSJoyce Kong  * a better perf(~1.5%), which comes from the saved branch by the compiler.
94f1b9cf07SJoyce Kong  * The if and else branch are identical on the platforms except Arm.
956094557dSJoyce Kong  */
96f1b9cf07SJoyce Kong #ifdef RTE_ARCH_ARM
976094557dSJoyce Kong 		__atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
98f1b9cf07SJoyce Kong #else
99f1b9cf07SJoyce Kong 		rte_io_wmb();
100f1b9cf07SJoyce Kong 		dp->flags = flags;
1016094557dSJoyce Kong #endif
1026094557dSJoyce Kong 	} else {
103f0f5d844SPhil Yang 		rte_io_wmb();
1046094557dSJoyce Kong 		dp->flags = flags;
1056094557dSJoyce Kong 	}
1066094557dSJoyce Kong }
107f1b9cf07SJoyce Kong 
1086c3169a3SBruce Richardson #ifdef RTE_PMD_PACKET_PREFETCH
1096c3169a3SBruce Richardson #define rte_packet_prefetch(p)  rte_prefetch1(p)
1106c3169a3SBruce Richardson #else
1116c3169a3SBruce Richardson #define rte_packet_prefetch(p)  do {} while(0)
1126c3169a3SBruce Richardson #endif
1136c3169a3SBruce Richardson 
1146c3169a3SBruce Richardson #define VIRTQUEUE_MAX_NAME_SZ 32
1156c3169a3SBruce Richardson 
116ba55c94aSMaxime Coquelin /**
117ba55c94aSMaxime Coquelin  * Return the IOVA (or virtual address in case of virtio-user) of mbuf
118ba55c94aSMaxime Coquelin  * data buffer.
119ba55c94aSMaxime Coquelin  *
120ba55c94aSMaxime Coquelin  * The address is firstly casted to the word size (sizeof(uintptr_t))
121ba55c94aSMaxime Coquelin  * before casting it to uint64_t. This is to make it work with different
122ba55c94aSMaxime Coquelin  * combination of word size (64 bit and 32 bit) and virtio device
123ba55c94aSMaxime Coquelin  * (virtio-pci and virtio-user).
124ba55c94aSMaxime Coquelin  */
125ba55c94aSMaxime Coquelin #define VIRTIO_MBUF_ADDR(mb, vq) \
126ba55c94aSMaxime Coquelin 	((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->mbuf_addr_offset)))
127ba55c94aSMaxime Coquelin 
128ba55c94aSMaxime Coquelin /**
129ba55c94aSMaxime Coquelin  * Return the physical address (or virtual address in case of
130ba55c94aSMaxime Coquelin  * virtio-user) of mbuf data buffer, taking care of mbuf data offset
131ba55c94aSMaxime Coquelin  */
132ba55c94aSMaxime Coquelin #define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \
133ba55c94aSMaxime Coquelin 	(VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off)
134ba55c94aSMaxime Coquelin 
1356c3169a3SBruce Richardson #define VTNET_SQ_RQ_QUEUE_IDX 0
1366c3169a3SBruce Richardson #define VTNET_SQ_TQ_QUEUE_IDX 1
1376c3169a3SBruce Richardson #define VTNET_SQ_CQ_QUEUE_IDX 2
1386c3169a3SBruce Richardson 
1396c3169a3SBruce Richardson enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
1406c3169a3SBruce Richardson /**
1416c3169a3SBruce Richardson  * The maximum virtqueue size is 2^15. Use that value as the end of
1426c3169a3SBruce Richardson  * descriptor chain terminator since it will never be a valid index
1436c3169a3SBruce Richardson  * in the descriptor table. This is used to verify we are correctly
1446c3169a3SBruce Richardson  * handling vq_free_cnt.
1456c3169a3SBruce Richardson  */
1466c3169a3SBruce Richardson #define VQ_RING_DESC_CHAIN_END 32768
1476c3169a3SBruce Richardson 
1486c3169a3SBruce Richardson /**
1496c3169a3SBruce Richardson  * Control the RX mode, ie. promiscuous, allmulti, etc...
1506c3169a3SBruce Richardson  * All commands require an "out" sg entry containing a 1 byte
1516c3169a3SBruce Richardson  * state value, zero = disable, non-zero = enable.  Commands
1526c3169a3SBruce Richardson  * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
1536c3169a3SBruce Richardson  * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
1546c3169a3SBruce Richardson  */
1556c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_RX              0
1566c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_RX_PROMISC      0
1576c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_RX_ALLMULTI     1
1586c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_RX_ALLUNI       2
1596c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_RX_NOMULTI      3
1606c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_RX_NOUNI        4
1616c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_RX_NOBCAST      5
1626c3169a3SBruce Richardson 
1636c3169a3SBruce Richardson /**
1646c3169a3SBruce Richardson  * Control the MAC
1656c3169a3SBruce Richardson  *
1666c3169a3SBruce Richardson  * The MAC filter table is managed by the hypervisor, the guest should
1676c3169a3SBruce Richardson  * assume the size is infinite.  Filtering should be considered
1686c3169a3SBruce Richardson  * non-perfect, ie. based on hypervisor resources, the guest may
1696c3169a3SBruce Richardson  * received packets from sources not specified in the filter list.
1706c3169a3SBruce Richardson  *
1716c3169a3SBruce Richardson  * In addition to the class/cmd header, the TABLE_SET command requires
1726c3169a3SBruce Richardson  * two out scatterlists.  Each contains a 4 byte count of entries followed
1736c3169a3SBruce Richardson  * by a concatenated byte stream of the ETH_ALEN MAC addresses.  The
1746c3169a3SBruce Richardson  * first sg list contains unicast addresses, the second is for multicast.
1756c3169a3SBruce Richardson  * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature
1766c3169a3SBruce Richardson  * is available.
1776c3169a3SBruce Richardson  *
1786c3169a3SBruce Richardson  * The ADDR_SET command requests one out scatterlist, it contains a
1796c3169a3SBruce Richardson  * 6 bytes MAC address. This functionality is present if the
1806c3169a3SBruce Richardson  * VIRTIO_NET_F_CTRL_MAC_ADDR feature is available.
1816c3169a3SBruce Richardson  */
1826c3169a3SBruce Richardson struct virtio_net_ctrl_mac {
1836c3169a3SBruce Richardson 	uint32_t entries;
18435b2d13fSOlivier Matz 	uint8_t macs[][RTE_ETHER_ADDR_LEN];
185ef5baf34SThomas Monjalon } __rte_packed;
1866c3169a3SBruce Richardson 
1876c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_MAC    1
1886c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_MAC_TABLE_SET        0
1896c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_MAC_ADDR_SET         1
1906c3169a3SBruce Richardson 
1916c3169a3SBruce Richardson /**
1926c3169a3SBruce Richardson  * Control VLAN filtering
1936c3169a3SBruce Richardson  *
1946c3169a3SBruce Richardson  * The VLAN filter table is controlled via a simple ADD/DEL interface.
1956c3169a3SBruce Richardson  * VLAN IDs not added may be filtered by the hypervisor.  Del is the
1966c3169a3SBruce Richardson  * opposite of add.  Both commands expect an out entry containing a 2
1976c3169a3SBruce Richardson  * byte VLAN ID.  VLAN filtering is available with the
1986c3169a3SBruce Richardson  * VIRTIO_NET_F_CTRL_VLAN feature bit.
1996c3169a3SBruce Richardson  */
2006c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_VLAN     2
2016c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_VLAN_ADD 0
2026c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_VLAN_DEL 1
2036c3169a3SBruce Richardson 
2040c9d6620SMaxime Coquelin /**
2050c9d6620SMaxime Coquelin  * RSS control
2060c9d6620SMaxime Coquelin  *
2070c9d6620SMaxime Coquelin  * The RSS feature configuration message is sent by the driver when
2080c9d6620SMaxime Coquelin  * VIRTIO_NET_F_RSS has been negotiated. It provides the device with
2090c9d6620SMaxime Coquelin  * hash types to use, hash key and indirection table. In this
2100c9d6620SMaxime Coquelin  * implementation, the driver only supports fixed key length (40B)
2110c9d6620SMaxime Coquelin  * and indirection table size (128 entries).
2120c9d6620SMaxime Coquelin  */
2130c9d6620SMaxime Coquelin #define VIRTIO_NET_RSS_RETA_SIZE 128
2140c9d6620SMaxime Coquelin #define VIRTIO_NET_RSS_KEY_SIZE 40
2150c9d6620SMaxime Coquelin 
2160c9d6620SMaxime Coquelin struct virtio_net_ctrl_rss {
2170c9d6620SMaxime Coquelin 	uint32_t hash_types;
2180c9d6620SMaxime Coquelin 	uint16_t indirection_table_mask;
2190c9d6620SMaxime Coquelin 	uint16_t unclassified_queue;
2200c9d6620SMaxime Coquelin 	uint16_t indirection_table[VIRTIO_NET_RSS_RETA_SIZE];
2210c9d6620SMaxime Coquelin 	uint16_t max_tx_vq;
2220c9d6620SMaxime Coquelin 	uint8_t hash_key_length;
2230c9d6620SMaxime Coquelin 	uint8_t hash_key_data[VIRTIO_NET_RSS_KEY_SIZE];
2240c9d6620SMaxime Coquelin };
2250c9d6620SMaxime Coquelin 
2267365504fSXiao Wang /*
2277365504fSXiao Wang  * Control link announce acknowledgement
2287365504fSXiao Wang  *
2297365504fSXiao Wang  * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
230*7be78d02SJosh Soref  * driver has received the notification; device would clear the
2317365504fSXiao Wang  * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
2327365504fSXiao Wang  * this command.
2337365504fSXiao Wang  */
2347365504fSXiao Wang #define VIRTIO_NET_CTRL_ANNOUNCE     3
2357365504fSXiao Wang #define VIRTIO_NET_CTRL_ANNOUNCE_ACK 0
2367365504fSXiao Wang 
2376c3169a3SBruce Richardson struct virtio_net_ctrl_hdr {
2386c3169a3SBruce Richardson 	uint8_t class;
2396c3169a3SBruce Richardson 	uint8_t cmd;
240ef5baf34SThomas Monjalon } __rte_packed;
2416c3169a3SBruce Richardson 
2426c3169a3SBruce Richardson typedef uint8_t virtio_net_ctrl_ack;
2436c3169a3SBruce Richardson 
2446c3169a3SBruce Richardson #define VIRTIO_NET_OK     0
2456c3169a3SBruce Richardson #define VIRTIO_NET_ERR    1
2466c3169a3SBruce Richardson 
2476c3169a3SBruce Richardson #define VIRTIO_MAX_CTRL_DATA 2048
2486c3169a3SBruce Richardson 
2496c3169a3SBruce Richardson struct virtio_pmd_ctrl {
2506c3169a3SBruce Richardson 	struct virtio_net_ctrl_hdr hdr;
2516c3169a3SBruce Richardson 	virtio_net_ctrl_ack status;
2526c3169a3SBruce Richardson 	uint8_t data[VIRTIO_MAX_CTRL_DATA];
2536c3169a3SBruce Richardson };
2546c3169a3SBruce Richardson 
25501ad44fdSHuawei Xie struct vq_desc_extra {
25601ad44fdSHuawei Xie 	void *cookie;
25701ad44fdSHuawei Xie 	uint16_t ndescs;
2584c3f5822SJens Freimann 	uint16_t next;
25901ad44fdSHuawei Xie };
26001ad44fdSHuawei Xie 
2613169550fSMaxime Coquelin #define virtnet_rxq_to_vq(rxvq) container_of(rxvq, struct virtqueue, rxq)
2623169550fSMaxime Coquelin #define virtnet_txq_to_vq(txvq) container_of(txvq, struct virtqueue, txq)
2633169550fSMaxime Coquelin #define virtnet_cq_to_vq(cvq) container_of(cvq, struct virtqueue, cq)
2643169550fSMaxime Coquelin 
2656c3169a3SBruce Richardson struct virtqueue {
2666c3169a3SBruce Richardson 	struct virtio_hw  *hw; /**< virtio_hw structure pointer. */
267dfd33aa4STiwei Bie 	union {
268dfd33aa4STiwei Bie 		struct {
269dfd33aa4STiwei Bie 			/**< vring keeping desc, used and avail */
270dfd33aa4STiwei Bie 			struct vring ring;
271dfd33aa4STiwei Bie 		} vq_split;
272dfd33aa4STiwei Bie 
273dfd33aa4STiwei Bie 		struct {
274dfd33aa4STiwei Bie 			/**< vring keeping descs and events */
275dfd33aa4STiwei Bie 			struct vring_packed ring;
2764c3f5822SJens Freimann 			bool used_wrap_counter;
2778e148e49STiwei Bie 			uint16_t cached_flags; /**< cached flags for descs */
2784c3f5822SJens Freimann 			uint16_t event_flags_shadow;
279dfd33aa4STiwei Bie 		} vq_packed;
280dfd33aa4STiwei Bie 	};
2818e148e49STiwei Bie 
282dfd33aa4STiwei Bie 	uint16_t vq_used_cons_idx; /**< last consumed descriptor */
28301ad44fdSHuawei Xie 	uint16_t vq_nentries;  /**< vring desc numbers */
28401ad44fdSHuawei Xie 	uint16_t vq_free_cnt;  /**< num of desc available */
28501ad44fdSHuawei Xie 	uint16_t vq_avail_idx; /**< sync until needed */
28601ad44fdSHuawei Xie 	uint16_t vq_free_thresh; /**< free threshold */
2876c3169a3SBruce Richardson 
28897bd5372SMaxime Coquelin 	/**
28997bd5372SMaxime Coquelin 	 * Head of the free chain in the descriptor table. If
29097bd5372SMaxime Coquelin 	 * there are no free descriptors, this will be set to
29197bd5372SMaxime Coquelin 	 * VQ_RING_DESC_CHAIN_END.
29297bd5372SMaxime Coquelin 	 */
29397bd5372SMaxime Coquelin 	uint16_t  vq_desc_head_idx;
29497bd5372SMaxime Coquelin 	uint16_t  vq_desc_tail_idx;
29597bd5372SMaxime Coquelin 	uint16_t  vq_queue_index;   /**< PCI queue index */
29697bd5372SMaxime Coquelin 
2976c3169a3SBruce Richardson 	void *vq_ring_virt_mem;  /**< linear address of vring*/
2986c3169a3SBruce Richardson 	unsigned int vq_ring_size;
299ba55c94aSMaxime Coquelin 	uint16_t mbuf_addr_offset;
30001ad44fdSHuawei Xie 
301905a2469SYuanhan Liu 	union {
302905a2469SYuanhan Liu 		struct virtnet_rx rxq;
303905a2469SYuanhan Liu 		struct virtnet_tx txq;
304905a2469SYuanhan Liu 		struct virtnet_ctl cq;
305905a2469SYuanhan Liu 	};
306905a2469SYuanhan Liu 
307df6e0a06SSantosh Shukla 	rte_iova_t vq_ring_mem; /**< physical address of vring,
308e8df94b8SJianfeng Tan 	                         * or virtual address for virtio_user. */
3096c3169a3SBruce Richardson 
3106ba1f63bSYuanhan Liu 	uint16_t  *notify_addr;
31101ad44fdSHuawei Xie 	struct rte_mbuf **sw_ring;  /**< RX software ring. */
31201ad44fdSHuawei Xie 	struct vq_desc_extra vq_descx[0];
3136c3169a3SBruce Richardson };
3146c3169a3SBruce Richardson 
315*7be78d02SJosh Soref /* If multiqueue is provided by host, then we support it. */
3166c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_MQ   4
3170c9d6620SMaxime Coquelin 
3186c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET        0
3190c9d6620SMaxime Coquelin #define VIRTIO_NET_CTRL_MQ_RSS_CONFIG          1
3200c9d6620SMaxime Coquelin 
3216c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN        1
3226c3169a3SBruce Richardson #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX        0x8000
3234a92b671SStephen Hemminger 
3246c3169a3SBruce Richardson /**
3256c3169a3SBruce Richardson  * This is the first element of the scatter-gather list.  If you don't
3266c3169a3SBruce Richardson  * specify GSO or CSUM features, you can simply ignore the header.
3276c3169a3SBruce Richardson  */
3286c3169a3SBruce Richardson struct virtio_net_hdr {
3296c3169a3SBruce Richardson #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1    /**< Use csum_start,csum_offset*/
33096cb6711SOlivier Matz #define VIRTIO_NET_HDR_F_DATA_VALID 2    /**< Checksum is valid */
3316c3169a3SBruce Richardson 	uint8_t flags;
3326c3169a3SBruce Richardson #define VIRTIO_NET_HDR_GSO_NONE     0    /**< Not a GSO frame */
3336c3169a3SBruce Richardson #define VIRTIO_NET_HDR_GSO_TCPV4    1    /**< GSO frame, IPv4 TCP (TSO) */
3346c3169a3SBruce Richardson #define VIRTIO_NET_HDR_GSO_UDP      3    /**< GSO frame, IPv4 UDP (UFO) */
3356c3169a3SBruce Richardson #define VIRTIO_NET_HDR_GSO_TCPV6    4    /**< GSO frame, IPv6 TCP */
3366c3169a3SBruce Richardson #define VIRTIO_NET_HDR_GSO_ECN      0x80 /**< TCP has ECN set */
3376c3169a3SBruce Richardson 	uint8_t gso_type;
3386c3169a3SBruce Richardson 	uint16_t hdr_len;     /**< Ethernet + IP + tcp/udp hdrs */
3396c3169a3SBruce Richardson 	uint16_t gso_size;    /**< Bytes to append to hdr_len per frame */
3406c3169a3SBruce Richardson 	uint16_t csum_start;  /**< Position to start checksumming from */
3416c3169a3SBruce Richardson 	uint16_t csum_offset; /**< Offset after that to place checksum */
3426c3169a3SBruce Richardson };
3436c3169a3SBruce Richardson 
3446c3169a3SBruce Richardson /**
3456c3169a3SBruce Richardson  * This is the version of the header to use when the MRG_RXBUF
3466c3169a3SBruce Richardson  * feature has been negotiated.
3476c3169a3SBruce Richardson  */
3486c3169a3SBruce Richardson struct virtio_net_hdr_mrg_rxbuf {
3496c3169a3SBruce Richardson 	struct   virtio_net_hdr hdr;
3506c3169a3SBruce Richardson 	uint16_t num_buffers; /**< Number of merged rx buffers */
3516c3169a3SBruce Richardson };
3526c3169a3SBruce Richardson 
3536dc5de3aSStephen Hemminger /* Region reserved to allow for transmit header and indirect ring */
3546dc5de3aSStephen Hemminger #define VIRTIO_MAX_TX_INDIRECT 8
3556dc5de3aSStephen Hemminger struct virtio_tx_region {
3566dc5de3aSStephen Hemminger 	struct virtio_net_hdr_mrg_rxbuf tx_hdr;
357381f39ebSMarvin Liu 	union {
358381f39ebSMarvin Liu 		struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT];
359381f39ebSMarvin Liu 		struct vring_packed_desc
360381f39ebSMarvin Liu 			tx_packed_indir[VIRTIO_MAX_TX_INDIRECT];
361381f39ebSMarvin Liu 	} __rte_aligned(16);
3626dc5de3aSStephen Hemminger };
3636dc5de3aSStephen Hemminger 
364e9f4feb7SJens Freimann static inline int
desc_is_used(struct vring_packed_desc * desc,struct virtqueue * vq)3652923b8f9STiwei Bie desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
366e9f4feb7SJens Freimann {
367e9f4feb7SJens Freimann 	uint16_t used, avail, flags;
368e9f4feb7SJens Freimann 
3692c661d41SJoyce Kong 	flags = virtqueue_fetch_flags_packed(desc, vq->hw->weak_barriers);
37012e9e70cSTiwei Bie 	used = !!(flags & VRING_PACKED_DESC_F_USED);
37112e9e70cSTiwei Bie 	avail = !!(flags & VRING_PACKED_DESC_F_AVAIL);
372e9f4feb7SJens Freimann 
373dfd33aa4STiwei Bie 	return avail == used && used == vq->vq_packed.used_wrap_counter;
374a4270ea4SJens Freimann }
375a4270ea4SJens Freimann 
376e9f4feb7SJens Freimann static inline void
vring_desc_init_packed(struct virtqueue * vq,int n)377e9f4feb7SJens Freimann vring_desc_init_packed(struct virtqueue *vq, int n)
378e9f4feb7SJens Freimann {
379e9f4feb7SJens Freimann 	int i;
380e9f4feb7SJens Freimann 	for (i = 0; i < n - 1; i++) {
3814cdc4d98STiwei Bie 		vq->vq_packed.ring.desc[i].id = i;
382e9f4feb7SJens Freimann 		vq->vq_descx[i].next = i + 1;
383e9f4feb7SJens Freimann 	}
3844cdc4d98STiwei Bie 	vq->vq_packed.ring.desc[i].id = i;
385e9f4feb7SJens Freimann 	vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
386e9f4feb7SJens Freimann }
387e9f4feb7SJens Freimann 
3886dc5de3aSStephen Hemminger /* Chain all the descriptors in the ring with an END */
3896dc5de3aSStephen Hemminger static inline void
vring_desc_init_split(struct vring_desc * dp,uint16_t n)390f803734bSJens Freimann vring_desc_init_split(struct vring_desc *dp, uint16_t n)
3916dc5de3aSStephen Hemminger {
3926dc5de3aSStephen Hemminger 	uint16_t i;
3936dc5de3aSStephen Hemminger 
3946dc5de3aSStephen Hemminger 	for (i = 0; i < n - 1; i++)
3956dc5de3aSStephen Hemminger 		dp[i].next = (uint16_t)(i + 1);
3966dc5de3aSStephen Hemminger 	dp[i].next = VQ_RING_DESC_CHAIN_END;
3976dc5de3aSStephen Hemminger }
3986dc5de3aSStephen Hemminger 
399381f39ebSMarvin Liu static inline void
vring_desc_init_indirect_packed(struct vring_packed_desc * dp,int n)400381f39ebSMarvin Liu vring_desc_init_indirect_packed(struct vring_packed_desc *dp, int n)
401381f39ebSMarvin Liu {
402381f39ebSMarvin Liu 	int i;
403381f39ebSMarvin Liu 	for (i = 0; i < n; i++) {
404381f39ebSMarvin Liu 		dp[i].id = (uint16_t)i;
405381f39ebSMarvin Liu 		dp[i].flags = VRING_DESC_F_WRITE;
406381f39ebSMarvin Liu 	}
407381f39ebSMarvin Liu }
408381f39ebSMarvin Liu 
4096c3169a3SBruce Richardson /**
41013cd890dSTiwei Bie  * Tell the backend not to interrupt us. Implementation for packed virtqueues.
4116c3169a3SBruce Richardson  */
412c056be23SJianfeng Tan static inline void
virtqueue_disable_intr_packed(struct virtqueue * vq)413e9f4feb7SJens Freimann virtqueue_disable_intr_packed(struct virtqueue *vq)
414e9f4feb7SJens Freimann {
415dfd33aa4STiwei Bie 	if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
416dfd33aa4STiwei Bie 		vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
4174cdc4d98STiwei Bie 		vq->vq_packed.ring.driver->desc_event_flags =
418dfd33aa4STiwei Bie 			vq->vq_packed.event_flags_shadow;
419e9f4feb7SJens Freimann 	}
420c68fee95STiwei Bie }
421e9f4feb7SJens Freimann 
422e9f4feb7SJens Freimann /**
42313cd890dSTiwei Bie  * Tell the backend not to interrupt us. Implementation for split virtqueues.
42413cd890dSTiwei Bie  */
42513cd890dSTiwei Bie static inline void
virtqueue_disable_intr_split(struct virtqueue * vq)42613cd890dSTiwei Bie virtqueue_disable_intr_split(struct virtqueue *vq)
42713cd890dSTiwei Bie {
42813cd890dSTiwei Bie 	vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
42913cd890dSTiwei Bie }
43013cd890dSTiwei Bie 
43113cd890dSTiwei Bie /**
432e9f4feb7SJens Freimann  * Tell the backend not to interrupt us.
433e9f4feb7SJens Freimann  */
434e9f4feb7SJens Freimann static inline void
virtqueue_disable_intr(struct virtqueue * vq)435c056be23SJianfeng Tan virtqueue_disable_intr(struct virtqueue *vq)
436c056be23SJianfeng Tan {
437b4f9a45aSMaxime Coquelin 	if (virtio_with_packed_queue(vq->hw))
438e9f4feb7SJens Freimann 		virtqueue_disable_intr_packed(vq);
439e9f4feb7SJens Freimann 	else
44013cd890dSTiwei Bie 		virtqueue_disable_intr_split(vq);
441c056be23SJianfeng Tan }
442c056be23SJianfeng Tan 
443c056be23SJianfeng Tan /**
444e9f4feb7SJens Freimann  * Tell the backend to interrupt. Implementation for packed virtqueues.
445e9f4feb7SJens Freimann  */
446e9f4feb7SJens Freimann static inline void
virtqueue_enable_intr_packed(struct virtqueue * vq)447e9f4feb7SJens Freimann virtqueue_enable_intr_packed(struct virtqueue *vq)
448e9f4feb7SJens Freimann {
449dfd33aa4STiwei Bie 	if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
450dfd33aa4STiwei Bie 		vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
4514cdc4d98STiwei Bie 		vq->vq_packed.ring.driver->desc_event_flags =
452dfd33aa4STiwei Bie 			vq->vq_packed.event_flags_shadow;
453e9f4feb7SJens Freimann 	}
454e9f4feb7SJens Freimann }
455e9f4feb7SJens Freimann 
456e9f4feb7SJens Freimann /**
457e9f4feb7SJens Freimann  * Tell the backend to interrupt. Implementation for split virtqueues.
458e9f4feb7SJens Freimann  */
459e9f4feb7SJens Freimann static inline void
virtqueue_enable_intr_split(struct virtqueue * vq)460e9f4feb7SJens Freimann virtqueue_enable_intr_split(struct virtqueue *vq)
461e9f4feb7SJens Freimann {
462dfd33aa4STiwei Bie 	vq->vq_split.ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
463e9f4feb7SJens Freimann }
464e9f4feb7SJens Freimann 
465e9f4feb7SJens Freimann /**
466c056be23SJianfeng Tan  * Tell the backend to interrupt us.
467c056be23SJianfeng Tan  */
468c056be23SJianfeng Tan static inline void
virtqueue_enable_intr(struct virtqueue * vq)469c056be23SJianfeng Tan virtqueue_enable_intr(struct virtqueue *vq)
470c056be23SJianfeng Tan {
471b4f9a45aSMaxime Coquelin 	if (virtio_with_packed_queue(vq->hw))
472e9f4feb7SJens Freimann 		virtqueue_enable_intr_packed(vq);
473e9f4feb7SJens Freimann 	else
474e9f4feb7SJens Freimann 		virtqueue_enable_intr_split(vq);
475c056be23SJianfeng Tan }
476c056be23SJianfeng Tan 
4776c3169a3SBruce Richardson /**
4786c3169a3SBruce Richardson  *  Dump virtqueue internal structures, for debug purpose only.
4796c3169a3SBruce Richardson  */
4806c3169a3SBruce Richardson void virtqueue_dump(struct virtqueue *vq);
4816c3169a3SBruce Richardson /**
4826c3169a3SBruce Richardson  *  Get all mbufs to be freed.
4836c3169a3SBruce Richardson  */
484727411f5SOlivier Matz struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
4856c3169a3SBruce Richardson 
486d8227497STiwei Bie /* Flush the elements in the used ring. */
487bcf55c93STiwei Bie void virtqueue_rxvq_flush(struct virtqueue *vq);
488d8227497STiwei Bie 
4896ebbf410SXuan Ding int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
4906ebbf410SXuan Ding 
4916ebbf410SXuan Ding int virtqueue_txvq_reset_packed(struct virtqueue *vq);
4926ebbf410SXuan Ding 
4936c3169a3SBruce Richardson static inline int
virtqueue_full(const struct virtqueue * vq)4946c3169a3SBruce Richardson virtqueue_full(const struct virtqueue *vq)
4956c3169a3SBruce Richardson {
4966c3169a3SBruce Richardson 	return vq->vq_free_cnt == 0;
4976c3169a3SBruce Richardson }
4986c3169a3SBruce Richardson 
499e67ae1e2SOlivier Matz static inline int
virtio_get_queue_type(struct virtio_hw * hw,uint16_t vq_idx)500b5ba7ee4SMaxime Coquelin virtio_get_queue_type(struct virtio_hw *hw, uint16_t vq_idx)
501e67ae1e2SOlivier Matz {
502b5ba7ee4SMaxime Coquelin 	if (vq_idx == hw->max_queue_pairs * 2)
503e67ae1e2SOlivier Matz 		return VTNET_CQ;
504b5ba7ee4SMaxime Coquelin 	else if (vq_idx % 2 == 0)
505e67ae1e2SOlivier Matz 		return VTNET_RQ;
506e67ae1e2SOlivier Matz 	else
507e67ae1e2SOlivier Matz 		return VTNET_TQ;
508e67ae1e2SOlivier Matz }
509e67ae1e2SOlivier Matz 
510f0f5d844SPhil Yang /* virtqueue_nused has load-acquire or rte_io_rmb insed */
511ea5207c1SJoyce Kong static inline uint16_t
virtqueue_nused(const struct virtqueue * vq)512ea5207c1SJoyce Kong virtqueue_nused(const struct virtqueue *vq)
513ea5207c1SJoyce Kong {
514ea5207c1SJoyce Kong 	uint16_t idx;
515ea5207c1SJoyce Kong 
516ea5207c1SJoyce Kong 	if (vq->hw->weak_barriers) {
517ea5207c1SJoyce Kong 	/**
518ea5207c1SJoyce Kong 	 * x86 prefers to using rte_smp_rmb over __atomic_load_n as it
519ea5207c1SJoyce Kong 	 * reports a slightly better perf, which comes from the saved
520ea5207c1SJoyce Kong 	 * branch by the compiler.
521f0f5d844SPhil Yang 	 * The if and else branches are identical with the smp and io
522ea5207c1SJoyce Kong 	 * barriers both defined as compiler barriers on x86.
523ea5207c1SJoyce Kong 	 */
524ea5207c1SJoyce Kong #ifdef RTE_ARCH_X86_64
525ea5207c1SJoyce Kong 		idx = vq->vq_split.ring.used->idx;
526ea5207c1SJoyce Kong 		rte_smp_rmb();
527ea5207c1SJoyce Kong #else
528ea5207c1SJoyce Kong 		idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx,
529ea5207c1SJoyce Kong 				__ATOMIC_ACQUIRE);
530ea5207c1SJoyce Kong #endif
531ea5207c1SJoyce Kong 	} else {
532ea5207c1SJoyce Kong 		idx = vq->vq_split.ring.used->idx;
533f0f5d844SPhil Yang 		rte_io_rmb();
534ea5207c1SJoyce Kong 	}
535ea5207c1SJoyce Kong 	return idx - vq->vq_used_cons_idx;
536ea5207c1SJoyce Kong }
5376c3169a3SBruce Richardson 
538d8227497STiwei Bie void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
539892dc798SJens Freimann void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);
5407097ca1bSMarvin Liu void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
5417097ca1bSMarvin Liu 			  uint16_t num);
542d8227497STiwei Bie 
5436c3169a3SBruce Richardson static inline void
vq_update_avail_idx(struct virtqueue * vq)5446c3169a3SBruce Richardson vq_update_avail_idx(struct virtqueue *vq)
5456c3169a3SBruce Richardson {
5463fc1d87cSJoyce Kong 	if (vq->hw->weak_barriers) {
5473fc1d87cSJoyce Kong 	/* x86 prefers to using rte_smp_wmb over __atomic_store_n as
5483fc1d87cSJoyce Kong 	 * it reports a slightly better perf, which comes from the
5493fc1d87cSJoyce Kong 	 * saved branch by the compiler.
5503fc1d87cSJoyce Kong 	 * The if and else branches are identical with the smp and
551f0f5d844SPhil Yang 	 * io barriers both defined as compiler barriers on x86.
5523fc1d87cSJoyce Kong 	 */
5533fc1d87cSJoyce Kong #ifdef RTE_ARCH_X86_64
5543fc1d87cSJoyce Kong 		rte_smp_wmb();
555dfd33aa4STiwei Bie 		vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
5563fc1d87cSJoyce Kong #else
5573fc1d87cSJoyce Kong 		__atomic_store_n(&vq->vq_split.ring.avail->idx,
5583fc1d87cSJoyce Kong 				 vq->vq_avail_idx, __ATOMIC_RELEASE);
5593fc1d87cSJoyce Kong #endif
5603fc1d87cSJoyce Kong 	} else {
561f0f5d844SPhil Yang 		rte_io_wmb();
5623fc1d87cSJoyce Kong 		vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
5633fc1d87cSJoyce Kong 	}
5646c3169a3SBruce Richardson }
5656c3169a3SBruce Richardson 
5666c3169a3SBruce Richardson static inline void
vq_update_avail_ring(struct virtqueue * vq,uint16_t desc_idx)5676c3169a3SBruce Richardson vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
5686c3169a3SBruce Richardson {
5696c3169a3SBruce Richardson 	uint16_t avail_idx;
5706c3169a3SBruce Richardson 	/*
5716c3169a3SBruce Richardson 	 * Place the head of the descriptor chain into the next slot and make
5726c3169a3SBruce Richardson 	 * it usable to the host. The chain is made available now rather than
5736c3169a3SBruce Richardson 	 * deferring to virtqueue_notify() in the hopes that if the host is
5746c3169a3SBruce Richardson 	 * currently running on another CPU, we can keep it processing the new
5756c3169a3SBruce Richardson 	 * descriptor.
5766c3169a3SBruce Richardson 	 */
5776c3169a3SBruce Richardson 	avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
578dfd33aa4STiwei Bie 	if (unlikely(vq->vq_split.ring.avail->ring[avail_idx] != desc_idx))
579dfd33aa4STiwei Bie 		vq->vq_split.ring.avail->ring[avail_idx] = desc_idx;
5806c3169a3SBruce Richardson 	vq->vq_avail_idx++;
5816c3169a3SBruce Richardson }
5826c3169a3SBruce Richardson 
5836c3169a3SBruce Richardson static inline int
virtqueue_kick_prepare(struct virtqueue * vq)5846c3169a3SBruce Richardson virtqueue_kick_prepare(struct virtqueue *vq)
5856c3169a3SBruce Richardson {
586d21d05c7SIlya Maximets 	/*
587d21d05c7SIlya Maximets 	 * Ensure updated avail->idx is visible to vhost before reading
588d21d05c7SIlya Maximets 	 * the used->flags.
589d21d05c7SIlya Maximets 	 */
5909230ab8dSIlya Maximets 	virtio_mb(vq->hw->weak_barriers);
591dfd33aa4STiwei Bie 	return !(vq->vq_split.ring.used->flags & VRING_USED_F_NO_NOTIFY);
5926c3169a3SBruce Richardson }
5936c3169a3SBruce Richardson 
594892dc798SJens Freimann static inline int
virtqueue_kick_prepare_packed(struct virtqueue * vq)595892dc798SJens Freimann virtqueue_kick_prepare_packed(struct virtqueue *vq)
596892dc798SJens Freimann {
597892dc798SJens Freimann 	uint16_t flags;
598892dc798SJens Freimann 
599d21d05c7SIlya Maximets 	/*
600d21d05c7SIlya Maximets 	 * Ensure updated data is visible to vhost before reading the flags.
601d21d05c7SIlya Maximets 	 */
6029230ab8dSIlya Maximets 	virtio_mb(vq->hw->weak_barriers);
6034cdc4d98STiwei Bie 	flags = vq->vq_packed.ring.device->desc_event_flags;
604892dc798SJens Freimann 
605892dc798SJens Freimann 	return flags != RING_EVENT_FLAGS_DISABLE;
606892dc798SJens Freimann }
607892dc798SJens Freimann 
608cc827f83SIlya Maximets /*
609cc827f83SIlya Maximets  * virtqueue_kick_prepare*() or the virtio_wmb() should be called
610cc827f83SIlya Maximets  * before this function to be sure that all the data is visible to vhost.
611cc827f83SIlya Maximets  */
6126c3169a3SBruce Richardson static inline void
virtqueue_notify(struct virtqueue * vq)6136c3169a3SBruce Richardson virtqueue_notify(struct virtqueue *vq)
6146c3169a3SBruce Richardson {
615f8b60756SMaxime Coquelin 	VIRTIO_OPS(vq->hw)->notify_queue(vq->hw, vq);
6166c3169a3SBruce Richardson }
6176c3169a3SBruce Richardson 
6186c3169a3SBruce Richardson #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
6196c3169a3SBruce Richardson #define VIRTQUEUE_DUMP(vq) do { \
6206c3169a3SBruce Richardson 	uint16_t used_idx, nused; \
621ea5207c1SJoyce Kong 	used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
622ea5207c1SJoyce Kong 				   __ATOMIC_RELAXED); \
6236c3169a3SBruce Richardson 	nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
624b4f9a45aSMaxime Coquelin 	if (virtio_with_packed_queue((vq)->hw)) { \
62556785a2dSJens Freimann 		PMD_INIT_LOG(DEBUG, \
62656785a2dSJens Freimann 		"VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
6278e148e49STiwei Bie 		" cached_flags=0x%x; used_wrap_counter=%d", \
62856785a2dSJens Freimann 		(vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \
629dfd33aa4STiwei Bie 		(vq)->vq_avail_idx, (vq)->vq_packed.cached_flags, \
630dfd33aa4STiwei Bie 		(vq)->vq_packed.used_wrap_counter); \
63156785a2dSJens Freimann 		break; \
63256785a2dSJens Freimann 	} \
6336c3169a3SBruce Richardson 	PMD_INIT_LOG(DEBUG, \
6346c3169a3SBruce Richardson 	  "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
6356c3169a3SBruce Richardson 	  " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
6366c3169a3SBruce Richardson 	  " avail.flags=0x%x; used.flags=0x%x", \
637ea5207c1SJoyce Kong 	  (vq)->vq_nentries, (vq)->vq_free_cnt, nused, (vq)->vq_desc_head_idx, \
638ea5207c1SJoyce Kong 	  (vq)->vq_split.ring.avail->idx, (vq)->vq_used_cons_idx, \
639ea5207c1SJoyce Kong 	  __atomic_load_n(&(vq)->vq_split.ring.used->idx, __ATOMIC_RELAXED), \
640dfd33aa4STiwei Bie 	  (vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
6416c3169a3SBruce Richardson } while (0)
6426c3169a3SBruce Richardson #else
6436c3169a3SBruce Richardson #define VIRTQUEUE_DUMP(vq) do { } while (0)
6446c3169a3SBruce Richardson #endif
6456c3169a3SBruce Richardson 
64657f90f89SMarvin Liu /* avoid write operation when necessary, to lessen cache issues */
64757f90f89SMarvin Liu #define ASSIGN_UNLESS_EQUAL(var, val) do {	\
648a1412e05SVipul Ashri 	typeof(var) *const var_ = &(var);	\
649a1412e05SVipul Ashri 	typeof(val)  const val_ = (val);	\
650a1412e05SVipul Ashri 	if (*var_ != val_)			\
651a1412e05SVipul Ashri 		*var_ = val_;			\
65257f90f89SMarvin Liu } while (0)
65357f90f89SMarvin Liu 
65457f90f89SMarvin Liu #define virtqueue_clear_net_hdr(hdr) do {		\
65557f90f89SMarvin Liu 	typeof(hdr) hdr_ = (hdr);			\
65657f90f89SMarvin Liu 	ASSIGN_UNLESS_EQUAL((hdr_)->csum_start, 0);	\
65757f90f89SMarvin Liu 	ASSIGN_UNLESS_EQUAL((hdr_)->csum_offset, 0);	\
65857f90f89SMarvin Liu 	ASSIGN_UNLESS_EQUAL((hdr_)->flags, 0);		\
65957f90f89SMarvin Liu 	ASSIGN_UNLESS_EQUAL((hdr_)->gso_type, 0);	\
66057f90f89SMarvin Liu 	ASSIGN_UNLESS_EQUAL((hdr_)->gso_size, 0);	\
66157f90f89SMarvin Liu 	ASSIGN_UNLESS_EQUAL((hdr_)->hdr_len, 0);	\
66257f90f89SMarvin Liu } while (0)
66357f90f89SMarvin Liu 
66457f90f89SMarvin Liu static inline void
virtqueue_xmit_offload(struct virtio_net_hdr * hdr,struct rte_mbuf * cookie)66585a4fa2fSDavid Marchand virtqueue_xmit_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *cookie)
66657f90f89SMarvin Liu {
667daa02b5cSOlivier Matz 	uint64_t csum_l4 = cookie->ol_flags & RTE_MBUF_F_TX_L4_MASK;
668daa02b5cSOlivier Matz 	uint16_t o_l23_len = (cookie->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
6696474b594SIvan Malov 			     cookie->outer_l2_len + cookie->outer_l3_len : 0;
67057f90f89SMarvin Liu 
671daa02b5cSOlivier Matz 	if (cookie->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
672daa02b5cSOlivier Matz 		csum_l4 |= RTE_MBUF_F_TX_TCP_CKSUM;
673004d8e85SDavid Marchand 
674004d8e85SDavid Marchand 	switch (csum_l4) {
675daa02b5cSOlivier Matz 	case RTE_MBUF_F_TX_UDP_CKSUM:
6766474b594SIvan Malov 		hdr->csum_start = o_l23_len + cookie->l2_len + cookie->l3_len;
67785a4fa2fSDavid Marchand 		hdr->csum_offset = offsetof(struct rte_udp_hdr, dgram_cksum);
67857f90f89SMarvin Liu 		hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
67957f90f89SMarvin Liu 		break;
68057f90f89SMarvin Liu 
681daa02b5cSOlivier Matz 	case RTE_MBUF_F_TX_TCP_CKSUM:
6826474b594SIvan Malov 		hdr->csum_start = o_l23_len + cookie->l2_len + cookie->l3_len;
68357f90f89SMarvin Liu 		hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
68457f90f89SMarvin Liu 		hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
68557f90f89SMarvin Liu 		break;
68657f90f89SMarvin Liu 
68757f90f89SMarvin Liu 	default:
68857f90f89SMarvin Liu 		ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
68957f90f89SMarvin Liu 		ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
69057f90f89SMarvin Liu 		ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
69157f90f89SMarvin Liu 		break;
69257f90f89SMarvin Liu 	}
69357f90f89SMarvin Liu 
69457f90f89SMarvin Liu 	/* TCP Segmentation Offload */
695daa02b5cSOlivier Matz 	if (cookie->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
696daa02b5cSOlivier Matz 		hdr->gso_type = (cookie->ol_flags & RTE_MBUF_F_TX_IPV6) ?
69757f90f89SMarvin Liu 			VIRTIO_NET_HDR_GSO_TCPV6 :
69857f90f89SMarvin Liu 			VIRTIO_NET_HDR_GSO_TCPV4;
69957f90f89SMarvin Liu 		hdr->gso_size = cookie->tso_segsz;
7006474b594SIvan Malov 		hdr->hdr_len = o_l23_len + cookie->l2_len + cookie->l3_len +
7016474b594SIvan Malov 			       cookie->l4_len;
70257f90f89SMarvin Liu 	} else {
70357f90f89SMarvin Liu 		ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
70457f90f89SMarvin Liu 		ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
70557f90f89SMarvin Liu 		ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
70657f90f89SMarvin Liu 	}
70757f90f89SMarvin Liu }
70857f90f89SMarvin Liu 
70957f90f89SMarvin Liu static inline void
virtqueue_enqueue_xmit_packed(struct virtnet_tx * txvq,struct rte_mbuf * cookie,uint16_t needed,int use_indirect,int can_push,int in_order)71057f90f89SMarvin Liu virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
711b473061bSMarvin Liu 			      uint16_t needed, int use_indirect, int can_push,
712b473061bSMarvin Liu 			      int in_order)
71357f90f89SMarvin Liu {
71457f90f89SMarvin Liu 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
71557f90f89SMarvin Liu 	struct vq_desc_extra *dxp;
7163169550fSMaxime Coquelin 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
71757f90f89SMarvin Liu 	struct vring_packed_desc *start_dp, *head_dp;
71857f90f89SMarvin Liu 	uint16_t idx, id, head_idx, head_flags;
71957f90f89SMarvin Liu 	int16_t head_size = vq->hw->vtnet_hdr_size;
72057f90f89SMarvin Liu 	struct virtio_net_hdr *hdr;
72157f90f89SMarvin Liu 	uint16_t prev;
72257f90f89SMarvin Liu 	bool prepend_header = false;
7238410c369SMarvin Liu 	uint16_t seg_num = cookie->nb_segs;
72457f90f89SMarvin Liu 
72557f90f89SMarvin Liu 	id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
72657f90f89SMarvin Liu 
72757f90f89SMarvin Liu 	dxp = &vq->vq_descx[id];
72857f90f89SMarvin Liu 	dxp->ndescs = needed;
72957f90f89SMarvin Liu 	dxp->cookie = cookie;
73057f90f89SMarvin Liu 
73157f90f89SMarvin Liu 	head_idx = vq->vq_avail_idx;
73257f90f89SMarvin Liu 	idx = head_idx;
73357f90f89SMarvin Liu 	prev = head_idx;
73457f90f89SMarvin Liu 	start_dp = vq->vq_packed.ring.desc;
73557f90f89SMarvin Liu 
73657f90f89SMarvin Liu 	head_dp = &vq->vq_packed.ring.desc[idx];
73757f90f89SMarvin Liu 	head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
73857f90f89SMarvin Liu 	head_flags |= vq->vq_packed.cached_flags;
73957f90f89SMarvin Liu 
74057f90f89SMarvin Liu 	if (can_push) {
74157f90f89SMarvin Liu 		/* prepend cannot fail, checked by caller */
74257f90f89SMarvin Liu 		hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
74357f90f89SMarvin Liu 					      -head_size);
74457f90f89SMarvin Liu 		prepend_header = true;
74557f90f89SMarvin Liu 
74657f90f89SMarvin Liu 		/* if offload disabled, it is not zeroed below, do it now */
74757f90f89SMarvin Liu 		if (!vq->hw->has_tx_offload)
74857f90f89SMarvin Liu 			virtqueue_clear_net_hdr(hdr);
749b473061bSMarvin Liu 	} else if (use_indirect) {
750b473061bSMarvin Liu 		/* setup tx ring slot to point to indirect
751b473061bSMarvin Liu 		 * descriptor list stored in reserved region.
752b473061bSMarvin Liu 		 *
753b473061bSMarvin Liu 		 * the first slot in indirect ring is already preset
754b473061bSMarvin Liu 		 * to point to the header in reserved region
755b473061bSMarvin Liu 		 */
756b473061bSMarvin Liu 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
757b473061bSMarvin Liu 			RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr);
7588410c369SMarvin Liu 		start_dp[idx].len   = (seg_num + 1) *
759b473061bSMarvin Liu 			sizeof(struct vring_packed_desc);
760ad6f0194SXuan Ding 		/* Packed descriptor id needs to be restored when inorder. */
761ad6f0194SXuan Ding 		if (in_order)
762ad6f0194SXuan Ding 			start_dp[idx].id = idx;
763b473061bSMarvin Liu 		/* reset flags for indirect desc */
764b473061bSMarvin Liu 		head_flags = VRING_DESC_F_INDIRECT;
765b473061bSMarvin Liu 		head_flags |= vq->vq_packed.cached_flags;
766b473061bSMarvin Liu 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
767b473061bSMarvin Liu 
768b473061bSMarvin Liu 		/* loop below will fill in rest of the indirect elements */
769b473061bSMarvin Liu 		start_dp = txr[idx].tx_packed_indir;
770b473061bSMarvin Liu 		idx = 1;
77157f90f89SMarvin Liu 	} else {
77257f90f89SMarvin Liu 		/* setup first tx ring slot to point to header
77357f90f89SMarvin Liu 		 * stored in reserved region.
77457f90f89SMarvin Liu 		 */
77557f90f89SMarvin Liu 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
77657f90f89SMarvin Liu 			RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
77757f90f89SMarvin Liu 		start_dp[idx].len   = vq->hw->vtnet_hdr_size;
77857f90f89SMarvin Liu 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
77957f90f89SMarvin Liu 		idx++;
78057f90f89SMarvin Liu 		if (idx >= vq->vq_nentries) {
78157f90f89SMarvin Liu 			idx -= vq->vq_nentries;
78257f90f89SMarvin Liu 			vq->vq_packed.cached_flags ^=
78357f90f89SMarvin Liu 				VRING_PACKED_DESC_F_AVAIL_USED;
78457f90f89SMarvin Liu 		}
78557f90f89SMarvin Liu 	}
78657f90f89SMarvin Liu 
78785a4fa2fSDavid Marchand 	if (vq->hw->has_tx_offload)
78885a4fa2fSDavid Marchand 		virtqueue_xmit_offload(hdr, cookie);
78957f90f89SMarvin Liu 
79057f90f89SMarvin Liu 	do {
79157f90f89SMarvin Liu 		uint16_t flags;
79257f90f89SMarvin Liu 
793ba55c94aSMaxime Coquelin 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
79457f90f89SMarvin Liu 		start_dp[idx].len  = cookie->data_len;
79557f90f89SMarvin Liu 		if (prepend_header) {
79657f90f89SMarvin Liu 			start_dp[idx].addr -= head_size;
79757f90f89SMarvin Liu 			start_dp[idx].len += head_size;
79857f90f89SMarvin Liu 			prepend_header = false;
79957f90f89SMarvin Liu 		}
80057f90f89SMarvin Liu 
80157f90f89SMarvin Liu 		if (likely(idx != head_idx)) {
80257f90f89SMarvin Liu 			flags = cookie->next ? VRING_DESC_F_NEXT : 0;
80357f90f89SMarvin Liu 			flags |= vq->vq_packed.cached_flags;
80457f90f89SMarvin Liu 			start_dp[idx].flags = flags;
80557f90f89SMarvin Liu 		}
80657f90f89SMarvin Liu 		prev = idx;
80757f90f89SMarvin Liu 		idx++;
80857f90f89SMarvin Liu 		if (idx >= vq->vq_nentries) {
80957f90f89SMarvin Liu 			idx -= vq->vq_nentries;
81057f90f89SMarvin Liu 			vq->vq_packed.cached_flags ^=
81157f90f89SMarvin Liu 				VRING_PACKED_DESC_F_AVAIL_USED;
81257f90f89SMarvin Liu 		}
81357f90f89SMarvin Liu 	} while ((cookie = cookie->next) != NULL);
81457f90f89SMarvin Liu 
81557f90f89SMarvin Liu 	start_dp[prev].id = id;
81657f90f89SMarvin Liu 
817b473061bSMarvin Liu 	if (use_indirect) {
818b473061bSMarvin Liu 		idx = head_idx;
819b473061bSMarvin Liu 		if (++idx >= vq->vq_nentries) {
820b473061bSMarvin Liu 			idx -= vq->vq_nentries;
821b473061bSMarvin Liu 			vq->vq_packed.cached_flags ^=
822b473061bSMarvin Liu 				VRING_PACKED_DESC_F_AVAIL_USED;
823b473061bSMarvin Liu 		}
824b473061bSMarvin Liu 	}
825b473061bSMarvin Liu 
82657f90f89SMarvin Liu 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
82757f90f89SMarvin Liu 	vq->vq_avail_idx = idx;
82857f90f89SMarvin Liu 
82957f90f89SMarvin Liu 	if (!in_order) {
83057f90f89SMarvin Liu 		vq->vq_desc_head_idx = dxp->next;
83157f90f89SMarvin Liu 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
83257f90f89SMarvin Liu 			vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
83357f90f89SMarvin Liu 	}
83457f90f89SMarvin Liu 
83557f90f89SMarvin Liu 	virtqueue_store_flags_packed(head_dp, head_flags,
83657f90f89SMarvin Liu 				     vq->hw->weak_barriers);
83757f90f89SMarvin Liu }
83857f90f89SMarvin Liu 
83957f90f89SMarvin Liu static void
vq_ring_free_id_packed(struct virtqueue * vq,uint16_t id)84057f90f89SMarvin Liu vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
84157f90f89SMarvin Liu {
84257f90f89SMarvin Liu 	struct vq_desc_extra *dxp;
84357f90f89SMarvin Liu 
84457f90f89SMarvin Liu 	dxp = &vq->vq_descx[id];
84557f90f89SMarvin Liu 	vq->vq_free_cnt += dxp->ndescs;
84657f90f89SMarvin Liu 
84757f90f89SMarvin Liu 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
84857f90f89SMarvin Liu 		vq->vq_desc_head_idx = id;
84957f90f89SMarvin Liu 	else
85057f90f89SMarvin Liu 		vq->vq_descx[vq->vq_desc_tail_idx].next = id;
85157f90f89SMarvin Liu 
85257f90f89SMarvin Liu 	vq->vq_desc_tail_idx = id;
85357f90f89SMarvin Liu 	dxp->next = VQ_RING_DESC_CHAIN_END;
85457f90f89SMarvin Liu }
85557f90f89SMarvin Liu 
85657f90f89SMarvin Liu static void
virtio_xmit_cleanup_inorder_packed(struct virtqueue * vq,uint16_t num)857e2ca43a3SIvan Ilchenko virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, uint16_t num)
85857f90f89SMarvin Liu {
85957f90f89SMarvin Liu 	uint16_t used_idx, id, curr_id, free_cnt = 0;
86057f90f89SMarvin Liu 	uint16_t size = vq->vq_nentries;
86157f90f89SMarvin Liu 	struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
86257f90f89SMarvin Liu 	struct vq_desc_extra *dxp;
863e2ca43a3SIvan Ilchenko 	int nb = num;
86457f90f89SMarvin Liu 
86557f90f89SMarvin Liu 	used_idx = vq->vq_used_cons_idx;
866f0f5d844SPhil Yang 	/* desc_is_used has a load-acquire or rte_io_rmb inside
86757f90f89SMarvin Liu 	 * and wait for used desc in virtqueue.
86857f90f89SMarvin Liu 	 */
869e2ca43a3SIvan Ilchenko 	while (nb > 0 && desc_is_used(&desc[used_idx], vq)) {
87057f90f89SMarvin Liu 		id = desc[used_idx].id;
87157f90f89SMarvin Liu 		do {
87257f90f89SMarvin Liu 			curr_id = used_idx;
87357f90f89SMarvin Liu 			dxp = &vq->vq_descx[used_idx];
87457f90f89SMarvin Liu 			used_idx += dxp->ndescs;
87557f90f89SMarvin Liu 			free_cnt += dxp->ndescs;
876e2ca43a3SIvan Ilchenko 			nb -= dxp->ndescs;
87757f90f89SMarvin Liu 			if (used_idx >= size) {
87857f90f89SMarvin Liu 				used_idx -= size;
87957f90f89SMarvin Liu 				vq->vq_packed.used_wrap_counter ^= 1;
88057f90f89SMarvin Liu 			}
88157f90f89SMarvin Liu 			if (dxp->cookie != NULL) {
88257f90f89SMarvin Liu 				rte_pktmbuf_free(dxp->cookie);
88357f90f89SMarvin Liu 				dxp->cookie = NULL;
88457f90f89SMarvin Liu 			}
88557f90f89SMarvin Liu 		} while (curr_id != id);
88657f90f89SMarvin Liu 	}
88757f90f89SMarvin Liu 	vq->vq_used_cons_idx = used_idx;
88857f90f89SMarvin Liu 	vq->vq_free_cnt += free_cnt;
88957f90f89SMarvin Liu }
89057f90f89SMarvin Liu 
89157f90f89SMarvin Liu static void
virtio_xmit_cleanup_normal_packed(struct virtqueue * vq,uint16_t num)892e2ca43a3SIvan Ilchenko virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, uint16_t num)
89357f90f89SMarvin Liu {
89457f90f89SMarvin Liu 	uint16_t used_idx, id;
89557f90f89SMarvin Liu 	uint16_t size = vq->vq_nentries;
89657f90f89SMarvin Liu 	struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
89757f90f89SMarvin Liu 	struct vq_desc_extra *dxp;
89857f90f89SMarvin Liu 
89957f90f89SMarvin Liu 	used_idx = vq->vq_used_cons_idx;
900f0f5d844SPhil Yang 	/* desc_is_used has a load-acquire or rte_io_rmb inside
90157f90f89SMarvin Liu 	 * and wait for used desc in virtqueue.
90257f90f89SMarvin Liu 	 */
90357f90f89SMarvin Liu 	while (num-- && desc_is_used(&desc[used_idx], vq)) {
90457f90f89SMarvin Liu 		id = desc[used_idx].id;
90557f90f89SMarvin Liu 		dxp = &vq->vq_descx[id];
90657f90f89SMarvin Liu 		vq->vq_used_cons_idx += dxp->ndescs;
90757f90f89SMarvin Liu 		if (vq->vq_used_cons_idx >= size) {
90857f90f89SMarvin Liu 			vq->vq_used_cons_idx -= size;
90957f90f89SMarvin Liu 			vq->vq_packed.used_wrap_counter ^= 1;
91057f90f89SMarvin Liu 		}
91157f90f89SMarvin Liu 		vq_ring_free_id_packed(vq, id);
91257f90f89SMarvin Liu 		if (dxp->cookie != NULL) {
91357f90f89SMarvin Liu 			rte_pktmbuf_free(dxp->cookie);
91457f90f89SMarvin Liu 			dxp->cookie = NULL;
91557f90f89SMarvin Liu 		}
91657f90f89SMarvin Liu 		used_idx = vq->vq_used_cons_idx;
91757f90f89SMarvin Liu 	}
91857f90f89SMarvin Liu }
91957f90f89SMarvin Liu 
92057f90f89SMarvin Liu /* Cleanup from completed transmits. */
92157f90f89SMarvin Liu static inline void
virtio_xmit_cleanup_packed(struct virtqueue * vq,uint16_t num,int in_order)922e2ca43a3SIvan Ilchenko virtio_xmit_cleanup_packed(struct virtqueue *vq, uint16_t num, int in_order)
92357f90f89SMarvin Liu {
92457f90f89SMarvin Liu 	if (in_order)
92557f90f89SMarvin Liu 		virtio_xmit_cleanup_inorder_packed(vq, num);
92657f90f89SMarvin Liu 	else
92757f90f89SMarvin Liu 		virtio_xmit_cleanup_normal_packed(vq, num);
92857f90f89SMarvin Liu }
92957f90f89SMarvin Liu 
93057f90f89SMarvin Liu static inline void
virtio_xmit_cleanup(struct virtqueue * vq,uint16_t num)93157f90f89SMarvin Liu virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
93257f90f89SMarvin Liu {
93357f90f89SMarvin Liu 	uint16_t i, used_idx, desc_idx;
93457f90f89SMarvin Liu 	for (i = 0; i < num; i++) {
93557f90f89SMarvin Liu 		struct vring_used_elem *uep;
93657f90f89SMarvin Liu 		struct vq_desc_extra *dxp;
93757f90f89SMarvin Liu 
93857f90f89SMarvin Liu 		used_idx = (uint16_t)(vq->vq_used_cons_idx &
93957f90f89SMarvin Liu 				(vq->vq_nentries - 1));
94057f90f89SMarvin Liu 		uep = &vq->vq_split.ring.used->ring[used_idx];
94157f90f89SMarvin Liu 
94257f90f89SMarvin Liu 		desc_idx = (uint16_t)uep->id;
94357f90f89SMarvin Liu 		dxp = &vq->vq_descx[desc_idx];
94457f90f89SMarvin Liu 		vq->vq_used_cons_idx++;
94557f90f89SMarvin Liu 		vq_ring_free_chain(vq, desc_idx);
94657f90f89SMarvin Liu 
94757f90f89SMarvin Liu 		if (dxp->cookie != NULL) {
94857f90f89SMarvin Liu 			rte_pktmbuf_free(dxp->cookie);
94957f90f89SMarvin Liu 			dxp->cookie = NULL;
95057f90f89SMarvin Liu 		}
95157f90f89SMarvin Liu 	}
95257f90f89SMarvin Liu }
95357f90f89SMarvin Liu 
95457f90f89SMarvin Liu /* Cleanup from completed inorder transmits. */
95557f90f89SMarvin Liu static __rte_always_inline void
virtio_xmit_cleanup_inorder(struct virtqueue * vq,uint16_t num)95657f90f89SMarvin Liu virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
95757f90f89SMarvin Liu {
95857f90f89SMarvin Liu 	uint16_t i, idx = vq->vq_used_cons_idx;
95957f90f89SMarvin Liu 	int16_t free_cnt = 0;
96057f90f89SMarvin Liu 	struct vq_desc_extra *dxp = NULL;
96157f90f89SMarvin Liu 
96257f90f89SMarvin Liu 	if (unlikely(num == 0))
96357f90f89SMarvin Liu 		return;
96457f90f89SMarvin Liu 
96557f90f89SMarvin Liu 	for (i = 0; i < num; i++) {
96657f90f89SMarvin Liu 		dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];
96757f90f89SMarvin Liu 		free_cnt += dxp->ndescs;
96857f90f89SMarvin Liu 		if (dxp->cookie != NULL) {
96957f90f89SMarvin Liu 			rte_pktmbuf_free(dxp->cookie);
97057f90f89SMarvin Liu 			dxp->cookie = NULL;
97157f90f89SMarvin Liu 		}
97257f90f89SMarvin Liu 	}
97357f90f89SMarvin Liu 
97457f90f89SMarvin Liu 	vq->vq_free_cnt += free_cnt;
97557f90f89SMarvin Liu 	vq->vq_used_cons_idx = idx;
97657f90f89SMarvin Liu }
9776c3169a3SBruce Richardson #endif /* _VIRTQUEUE_H_ */
978