1*d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*d30ea906Sjfb8856606 * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
3*d30ea906Sjfb8856606 */
4*d30ea906Sjfb8856606
5*d30ea906Sjfb8856606 #ifndef _VIRTQUEUE_H_
6*d30ea906Sjfb8856606 #define _VIRTQUEUE_H_
7*d30ea906Sjfb8856606
8*d30ea906Sjfb8856606 #include <stdint.h>
9*d30ea906Sjfb8856606
10*d30ea906Sjfb8856606 #include <rte_atomic.h>
11*d30ea906Sjfb8856606 #include <rte_memory.h>
12*d30ea906Sjfb8856606 #include <rte_memzone.h>
13*d30ea906Sjfb8856606 #include <rte_mempool.h>
14*d30ea906Sjfb8856606
15*d30ea906Sjfb8856606 #include "virtio_pci.h"
16*d30ea906Sjfb8856606 #include "virtio_ring.h"
17*d30ea906Sjfb8856606 #include "virtio_logs.h"
18*d30ea906Sjfb8856606 #include "virtio_crypto.h"
19*d30ea906Sjfb8856606
20*d30ea906Sjfb8856606 struct rte_mbuf;
21*d30ea906Sjfb8856606
22*d30ea906Sjfb8856606 /*
23*d30ea906Sjfb8856606 * Per virtio_config.h in Linux.
24*d30ea906Sjfb8856606 * For virtio_pci on SMP, we don't need to order with respect to MMIO
25*d30ea906Sjfb8856606 * accesses through relaxed memory I/O windows, so smp_mb() et al are
26*d30ea906Sjfb8856606 * sufficient.
27*d30ea906Sjfb8856606 *
28*d30ea906Sjfb8856606 */
29*d30ea906Sjfb8856606 #define virtio_mb() rte_smp_mb()
30*d30ea906Sjfb8856606 #define virtio_rmb() rte_smp_rmb()
31*d30ea906Sjfb8856606 #define virtio_wmb() rte_smp_wmb()
32*d30ea906Sjfb8856606
33*d30ea906Sjfb8856606 #define VIRTQUEUE_MAX_NAME_SZ 32
34*d30ea906Sjfb8856606
35*d30ea906Sjfb8856606 enum { VTCRYPTO_DATAQ = 0, VTCRYPTO_CTRLQ = 1 };
36*d30ea906Sjfb8856606
37*d30ea906Sjfb8856606 /**
38*d30ea906Sjfb8856606 * The maximum virtqueue size is 2^15. Use that value as the end of
39*d30ea906Sjfb8856606 * descriptor chain terminator since it will never be a valid index
40*d30ea906Sjfb8856606 * in the descriptor table. This is used to verify we are correctly
41*d30ea906Sjfb8856606 * handling vq_free_cnt.
42*d30ea906Sjfb8856606 */
43*d30ea906Sjfb8856606 #define VQ_RING_DESC_CHAIN_END 32768
44*d30ea906Sjfb8856606
45*d30ea906Sjfb8856606 struct vq_desc_extra {
46*d30ea906Sjfb8856606 void *crypto_op;
47*d30ea906Sjfb8856606 void *cookie;
48*d30ea906Sjfb8856606 uint16_t ndescs;
49*d30ea906Sjfb8856606 };
50*d30ea906Sjfb8856606
51*d30ea906Sjfb8856606 struct virtqueue {
52*d30ea906Sjfb8856606 /**< virtio_crypto_hw structure pointer. */
53*d30ea906Sjfb8856606 struct virtio_crypto_hw *hw;
54*d30ea906Sjfb8856606 /**< mem zone to populate RX ring. */
55*d30ea906Sjfb8856606 const struct rte_memzone *mz;
56*d30ea906Sjfb8856606 /**< memzone to populate hdr and request. */
57*d30ea906Sjfb8856606 struct rte_mempool *mpool;
58*d30ea906Sjfb8856606 uint8_t dev_id; /**< Device identifier. */
59*d30ea906Sjfb8856606 uint16_t vq_queue_index; /**< PCI queue index */
60*d30ea906Sjfb8856606
61*d30ea906Sjfb8856606 void *vq_ring_virt_mem; /**< linear address of vring*/
62*d30ea906Sjfb8856606 unsigned int vq_ring_size;
63*d30ea906Sjfb8856606 phys_addr_t vq_ring_mem; /**< physical address of vring */
64*d30ea906Sjfb8856606
65*d30ea906Sjfb8856606 struct vring vq_ring; /**< vring keeping desc, used and avail */
66*d30ea906Sjfb8856606 uint16_t vq_free_cnt; /**< num of desc available */
67*d30ea906Sjfb8856606 uint16_t vq_nentries; /**< vring desc numbers */
68*d30ea906Sjfb8856606
69*d30ea906Sjfb8856606 /**
70*d30ea906Sjfb8856606 * Head of the free chain in the descriptor table. If
71*d30ea906Sjfb8856606 * there are no free descriptors, this will be set to
72*d30ea906Sjfb8856606 * VQ_RING_DESC_CHAIN_END.
73*d30ea906Sjfb8856606 */
74*d30ea906Sjfb8856606 uint16_t vq_desc_head_idx;
75*d30ea906Sjfb8856606 uint16_t vq_desc_tail_idx;
76*d30ea906Sjfb8856606 /**
77*d30ea906Sjfb8856606 * Last consumed descriptor in the used table,
78*d30ea906Sjfb8856606 * trails vq_ring.used->idx.
79*d30ea906Sjfb8856606 */
80*d30ea906Sjfb8856606 uint16_t vq_used_cons_idx;
81*d30ea906Sjfb8856606 uint16_t vq_avail_idx;
82*d30ea906Sjfb8856606
83*d30ea906Sjfb8856606 /* Statistics */
84*d30ea906Sjfb8856606 uint64_t packets_sent_total;
85*d30ea906Sjfb8856606 uint64_t packets_sent_failed;
86*d30ea906Sjfb8856606 uint64_t packets_received_total;
87*d30ea906Sjfb8856606 uint64_t packets_received_failed;
88*d30ea906Sjfb8856606
89*d30ea906Sjfb8856606 uint16_t *notify_addr;
90*d30ea906Sjfb8856606
91*d30ea906Sjfb8856606 struct vq_desc_extra vq_descx[0];
92*d30ea906Sjfb8856606 };
93*d30ea906Sjfb8856606
94*d30ea906Sjfb8856606 /**
95*d30ea906Sjfb8856606 * Tell the backend not to interrupt us.
96*d30ea906Sjfb8856606 */
97*d30ea906Sjfb8856606 void virtqueue_disable_intr(struct virtqueue *vq);
98*d30ea906Sjfb8856606
99*d30ea906Sjfb8856606 /**
100*d30ea906Sjfb8856606 * Get all mbufs to be freed.
101*d30ea906Sjfb8856606 */
102*d30ea906Sjfb8856606 void virtqueue_detatch_unused(struct virtqueue *vq);
103*d30ea906Sjfb8856606
104*d30ea906Sjfb8856606 static inline int
virtqueue_full(const struct virtqueue * vq)105*d30ea906Sjfb8856606 virtqueue_full(const struct virtqueue *vq)
106*d30ea906Sjfb8856606 {
107*d30ea906Sjfb8856606 return vq->vq_free_cnt == 0;
108*d30ea906Sjfb8856606 }
109*d30ea906Sjfb8856606
110*d30ea906Sjfb8856606 #define VIRTQUEUE_NUSED(vq) \
111*d30ea906Sjfb8856606 ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
112*d30ea906Sjfb8856606
113*d30ea906Sjfb8856606 static inline void
vq_update_avail_idx(struct virtqueue * vq)114*d30ea906Sjfb8856606 vq_update_avail_idx(struct virtqueue *vq)
115*d30ea906Sjfb8856606 {
116*d30ea906Sjfb8856606 virtio_wmb();
117*d30ea906Sjfb8856606 vq->vq_ring.avail->idx = vq->vq_avail_idx;
118*d30ea906Sjfb8856606 }
119*d30ea906Sjfb8856606
120*d30ea906Sjfb8856606 static inline void
vq_update_avail_ring(struct virtqueue * vq,uint16_t desc_idx)121*d30ea906Sjfb8856606 vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
122*d30ea906Sjfb8856606 {
123*d30ea906Sjfb8856606 uint16_t avail_idx;
124*d30ea906Sjfb8856606 /*
125*d30ea906Sjfb8856606 * Place the head of the descriptor chain into the next slot and make
126*d30ea906Sjfb8856606 * it usable to the host. The chain is made available now rather than
127*d30ea906Sjfb8856606 * deferring to virtqueue_notify() in the hopes that if the host is
128*d30ea906Sjfb8856606 * currently running on another CPU, we can keep it processing the new
129*d30ea906Sjfb8856606 * descriptor.
130*d30ea906Sjfb8856606 */
131*d30ea906Sjfb8856606 avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
132*d30ea906Sjfb8856606 if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx))
133*d30ea906Sjfb8856606 vq->vq_ring.avail->ring[avail_idx] = desc_idx;
134*d30ea906Sjfb8856606 vq->vq_avail_idx++;
135*d30ea906Sjfb8856606 }
136*d30ea906Sjfb8856606
137*d30ea906Sjfb8856606 static inline int
virtqueue_kick_prepare(struct virtqueue * vq)138*d30ea906Sjfb8856606 virtqueue_kick_prepare(struct virtqueue *vq)
139*d30ea906Sjfb8856606 {
140*d30ea906Sjfb8856606 return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
141*d30ea906Sjfb8856606 }
142*d30ea906Sjfb8856606
143*d30ea906Sjfb8856606 static inline void
virtqueue_notify(struct virtqueue * vq)144*d30ea906Sjfb8856606 virtqueue_notify(struct virtqueue *vq)
145*d30ea906Sjfb8856606 {
146*d30ea906Sjfb8856606 /*
147*d30ea906Sjfb8856606 * Ensure updated avail->idx is visible to host.
148*d30ea906Sjfb8856606 * For virtio on IA, the notificaiton is through io port operation
149*d30ea906Sjfb8856606 * which is a serialization instruction itself.
150*d30ea906Sjfb8856606 */
151*d30ea906Sjfb8856606 VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
152*d30ea906Sjfb8856606 }
153*d30ea906Sjfb8856606
154*d30ea906Sjfb8856606 /**
155*d30ea906Sjfb8856606 * Dump virtqueue internal structures, for debug purpose only.
156*d30ea906Sjfb8856606 */
157*d30ea906Sjfb8856606 #define VIRTQUEUE_DUMP(vq) do { \
158*d30ea906Sjfb8856606 uint16_t used_idx, nused; \
159*d30ea906Sjfb8856606 used_idx = (vq)->vq_ring.used->idx; \
160*d30ea906Sjfb8856606 nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
161*d30ea906Sjfb8856606 VIRTIO_CRYPTO_INIT_LOG_DBG(\
162*d30ea906Sjfb8856606 "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
163*d30ea906Sjfb8856606 " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
164*d30ea906Sjfb8856606 " avail.flags=0x%x; used.flags=0x%x", \
165*d30ea906Sjfb8856606 (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
166*d30ea906Sjfb8856606 (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
167*d30ea906Sjfb8856606 (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
168*d30ea906Sjfb8856606 (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
169*d30ea906Sjfb8856606 } while (0)
170*d30ea906Sjfb8856606
171*d30ea906Sjfb8856606 #endif /* _VIRTQUEUE_H_ */
172