1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
3 */
4
5 /**
6 * @file
7 *
8 * Device specific vhost lib
9 */
10
11 #include <stdbool.h>
12 #include <sys/queue.h>
13
14 #include <rte_class.h>
15 #include <rte_malloc.h>
16 #include <rte_spinlock.h>
17 #include <rte_tailq.h>
18
19 #include "rte_vdpa.h"
20 #include "rte_vdpa_dev.h"
21 #include "vhost.h"
22
23 /** Double linked list of vDPA devices. */
24 TAILQ_HEAD(vdpa_device_list, rte_vdpa_device);
25
26 static struct vdpa_device_list vdpa_device_list =
27 TAILQ_HEAD_INITIALIZER(vdpa_device_list);
28 static rte_spinlock_t vdpa_device_list_lock = RTE_SPINLOCK_INITIALIZER;
29
30
31 /* Unsafe, needs to be called with vdpa_device_list_lock held */
32 static struct rte_vdpa_device *
__vdpa_find_device_by_name(const char * name)33 __vdpa_find_device_by_name(const char *name)
34 {
35 struct rte_vdpa_device *dev, *ret = NULL;
36
37 if (name == NULL)
38 return NULL;
39
40 TAILQ_FOREACH(dev, &vdpa_device_list, next) {
41 if (!strncmp(dev->device->name, name, RTE_DEV_NAME_MAX_LEN)) {
42 ret = dev;
43 break;
44 }
45 }
46
47 return ret;
48 }
49
50 struct rte_vdpa_device *
rte_vdpa_find_device_by_name(const char * name)51 rte_vdpa_find_device_by_name(const char *name)
52 {
53 struct rte_vdpa_device *dev;
54
55 rte_spinlock_lock(&vdpa_device_list_lock);
56 dev = __vdpa_find_device_by_name(name);
57 rte_spinlock_unlock(&vdpa_device_list_lock);
58
59 return dev;
60 }
61
62 struct rte_device *
rte_vdpa_get_rte_device(struct rte_vdpa_device * vdpa_dev)63 rte_vdpa_get_rte_device(struct rte_vdpa_device *vdpa_dev)
64 {
65 if (vdpa_dev == NULL)
66 return NULL;
67
68 return vdpa_dev->device;
69 }
70
71 struct rte_vdpa_device *
rte_vdpa_register_device(struct rte_device * rte_dev,struct rte_vdpa_dev_ops * ops)72 rte_vdpa_register_device(struct rte_device *rte_dev,
73 struct rte_vdpa_dev_ops *ops)
74 {
75 struct rte_vdpa_device *dev;
76
77 if (ops == NULL)
78 return NULL;
79
80 /* Check mandatory ops are implemented */
81 if (!ops->get_queue_num || !ops->get_features ||
82 !ops->get_protocol_features || !ops->dev_conf ||
83 !ops->dev_close || !ops->set_vring_state ||
84 !ops->set_features) {
85 VHOST_LOG_CONFIG(ERR,
86 "Some mandatory vDPA ops aren't implemented\n");
87 return NULL;
88 }
89
90 rte_spinlock_lock(&vdpa_device_list_lock);
91 /* Check the device hasn't been register already */
92 dev = __vdpa_find_device_by_name(rte_dev->name);
93 if (dev) {
94 dev = NULL;
95 goto out_unlock;
96 }
97
98 dev = rte_zmalloc(NULL, sizeof(*dev), 0);
99 if (!dev)
100 goto out_unlock;
101
102 dev->device = rte_dev;
103 dev->ops = ops;
104 TAILQ_INSERT_TAIL(&vdpa_device_list, dev, next);
105 out_unlock:
106 rte_spinlock_unlock(&vdpa_device_list_lock);
107
108 return dev;
109 }
110
111 int
rte_vdpa_unregister_device(struct rte_vdpa_device * dev)112 rte_vdpa_unregister_device(struct rte_vdpa_device *dev)
113 {
114 struct rte_vdpa_device *cur_dev, *tmp_dev;
115 int ret = -1;
116
117 rte_spinlock_lock(&vdpa_device_list_lock);
118 TAILQ_FOREACH_SAFE(cur_dev, &vdpa_device_list, next, tmp_dev) {
119 if (dev != cur_dev)
120 continue;
121
122 TAILQ_REMOVE(&vdpa_device_list, dev, next);
123 rte_free(dev);
124 ret = 0;
125 break;
126 }
127 rte_spinlock_unlock(&vdpa_device_list_lock);
128
129 return ret;
130 }
131
132 int
rte_vdpa_relay_vring_used(int vid,uint16_t qid,void * vring_m)133 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
134 {
135 struct virtio_net *dev = get_device(vid);
136 uint16_t idx, idx_m, desc_id;
137 struct vhost_virtqueue *vq;
138 struct vring_desc desc;
139 struct vring_desc *desc_ring;
140 struct vring_desc *idesc = NULL;
141 struct vring *s_vring;
142 uint64_t dlen;
143 uint32_t nr_descs;
144 int ret;
145
146 if (!dev || !vring_m)
147 return -1;
148
149 if (qid >= dev->nr_vring)
150 return -1;
151
152 if (vq_is_packed(dev))
153 return -1;
154
155 s_vring = (struct vring *)vring_m;
156 vq = dev->virtqueue[qid];
157 idx = vq->used->idx;
158 idx_m = s_vring->used->idx;
159 ret = (uint16_t)(idx_m - idx);
160
161 while (idx != idx_m) {
162 /* copy used entry, used ring logging is not covered here */
163 vq->used->ring[idx & (vq->size - 1)] =
164 s_vring->used->ring[idx & (vq->size - 1)];
165
166 desc_id = vq->used->ring[idx & (vq->size - 1)].id;
167 desc_ring = vq->desc;
168 nr_descs = vq->size;
169
170 if (unlikely(desc_id >= vq->size))
171 return -1;
172
173 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
174 dlen = vq->desc[desc_id].len;
175 nr_descs = dlen / sizeof(struct vring_desc);
176 if (unlikely(nr_descs > vq->size))
177 return -1;
178
179 desc_ring = (struct vring_desc *)(uintptr_t)
180 vhost_iova_to_vva(dev, vq,
181 vq->desc[desc_id].addr, &dlen,
182 VHOST_ACCESS_RO);
183 if (unlikely(!desc_ring))
184 return -1;
185
186 if (unlikely(dlen < vq->desc[desc_id].len)) {
187 idesc = vhost_alloc_copy_ind_table(dev, vq,
188 vq->desc[desc_id].addr,
189 vq->desc[desc_id].len);
190 if (unlikely(!idesc))
191 return -1;
192
193 desc_ring = idesc;
194 }
195
196 desc_id = 0;
197 }
198
199 /* dirty page logging for DMA writeable buffer */
200 do {
201 if (unlikely(desc_id >= vq->size))
202 goto fail;
203 if (unlikely(nr_descs-- == 0))
204 goto fail;
205 desc = desc_ring[desc_id];
206 if (desc.flags & VRING_DESC_F_WRITE)
207 vhost_log_write_iova(dev, vq, desc.addr,
208 desc.len);
209 desc_id = desc.next;
210 } while (desc.flags & VRING_DESC_F_NEXT);
211
212 if (unlikely(idesc)) {
213 free_ind_table(idesc);
214 idesc = NULL;
215 }
216
217 idx++;
218 }
219
220 rte_smp_wmb();
221 vq->used->idx = idx_m;
222
223 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
224 vring_used_event(s_vring) = idx_m;
225
226 return ret;
227
228 fail:
229 if (unlikely(idesc))
230 free_ind_table(idesc);
231 return -1;
232 }
233
234 int
rte_vdpa_get_queue_num(struct rte_vdpa_device * dev,uint32_t * queue_num)235 rte_vdpa_get_queue_num(struct rte_vdpa_device *dev, uint32_t *queue_num)
236 {
237 if (dev == NULL || dev->ops == NULL || dev->ops->get_queue_num == NULL)
238 return -1;
239
240 return dev->ops->get_queue_num(dev, queue_num);
241 }
242
243 int
rte_vdpa_get_features(struct rte_vdpa_device * dev,uint64_t * features)244 rte_vdpa_get_features(struct rte_vdpa_device *dev, uint64_t *features)
245 {
246 if (dev == NULL || dev->ops == NULL || dev->ops->get_features == NULL)
247 return -1;
248
249 return dev->ops->get_features(dev, features);
250 }
251
252 int
rte_vdpa_get_protocol_features(struct rte_vdpa_device * dev,uint64_t * features)253 rte_vdpa_get_protocol_features(struct rte_vdpa_device *dev, uint64_t *features)
254 {
255 if (dev == NULL || dev->ops == NULL ||
256 dev->ops->get_protocol_features == NULL)
257 return -1;
258
259 return dev->ops->get_protocol_features(dev, features);
260 }
261
262 int
rte_vdpa_get_stats_names(struct rte_vdpa_device * dev,struct rte_vdpa_stat_name * stats_names,unsigned int size)263 rte_vdpa_get_stats_names(struct rte_vdpa_device *dev,
264 struct rte_vdpa_stat_name *stats_names,
265 unsigned int size)
266 {
267 if (!dev)
268 return -EINVAL;
269
270 RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats_names, -ENOTSUP);
271
272 return dev->ops->get_stats_names(dev, stats_names, size);
273 }
274
275 int
rte_vdpa_get_stats(struct rte_vdpa_device * dev,uint16_t qid,struct rte_vdpa_stat * stats,unsigned int n)276 rte_vdpa_get_stats(struct rte_vdpa_device *dev, uint16_t qid,
277 struct rte_vdpa_stat *stats, unsigned int n)
278 {
279 if (!dev || !stats || !n)
280 return -EINVAL;
281
282 RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats, -ENOTSUP);
283
284 return dev->ops->get_stats(dev, qid, stats, n);
285 }
286
287 int
rte_vdpa_reset_stats(struct rte_vdpa_device * dev,uint16_t qid)288 rte_vdpa_reset_stats(struct rte_vdpa_device *dev, uint16_t qid)
289 {
290 if (!dev)
291 return -EINVAL;
292
293 RTE_FUNC_PTR_OR_ERR_RET(dev->ops->reset_stats, -ENOTSUP);
294
295 return dev->ops->reset_stats(dev, qid);
296 }
297
298 static int
vdpa_dev_match(struct rte_vdpa_device * dev,const struct rte_device * rte_dev)299 vdpa_dev_match(struct rte_vdpa_device *dev,
300 const struct rte_device *rte_dev)
301 {
302 if (dev->device == rte_dev)
303 return 0;
304
305 return -1;
306 }
307
308 /* Generic rte_vdpa_dev comparison function. */
309 typedef int (*rte_vdpa_cmp_t)(struct rte_vdpa_device *,
310 const struct rte_device *rte_dev);
311
312 static struct rte_vdpa_device *
vdpa_find_device(const struct rte_vdpa_device * start,rte_vdpa_cmp_t cmp,struct rte_device * rte_dev)313 vdpa_find_device(const struct rte_vdpa_device *start, rte_vdpa_cmp_t cmp,
314 struct rte_device *rte_dev)
315 {
316 struct rte_vdpa_device *dev;
317
318 rte_spinlock_lock(&vdpa_device_list_lock);
319 if (start == NULL)
320 dev = TAILQ_FIRST(&vdpa_device_list);
321 else
322 dev = TAILQ_NEXT(start, next);
323
324 while (dev != NULL) {
325 if (cmp(dev, rte_dev) == 0)
326 break;
327
328 dev = TAILQ_NEXT(dev, next);
329 }
330 rte_spinlock_unlock(&vdpa_device_list_lock);
331
332 return dev;
333 }
334
335 static void *
vdpa_dev_iterate(const void * start,const char * str,const struct rte_dev_iterator * it)336 vdpa_dev_iterate(const void *start,
337 const char *str,
338 const struct rte_dev_iterator *it)
339 {
340 struct rte_vdpa_device *vdpa_dev = NULL;
341
342 RTE_SET_USED(str);
343
344 vdpa_dev = vdpa_find_device(start, vdpa_dev_match, it->device);
345
346 return vdpa_dev;
347 }
348
349 static struct rte_class rte_class_vdpa = {
350 .dev_iterate = vdpa_dev_iterate,
351 };
352
353 RTE_REGISTER_CLASS(vdpa, rte_class_vdpa);
354