1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 */
4
5 #include <stdint.h>
6 #include <sys/types.h>
7 #include <unistd.h>
8 #include <fcntl.h>
9 #include <linux/major.h>
10 #include <sys/stat.h>
11 #include <sys/sysmacros.h>
12 #include <sys/socket.h>
13
14 #include <rte_malloc.h>
15 #include <rte_kvargs.h>
16 #include <ethdev_vdev.h>
17 #include <rte_bus_vdev.h>
18 #include <rte_alarm.h>
19 #include <rte_cycles.h>
20
21 #include "virtio_ethdev.h"
22 #include "virtio_logs.h"
23 #include "virtio.h"
24 #include "virtqueue.h"
25 #include "virtio_rxtx.h"
26 #include "virtio_user/virtio_user_dev.h"
27 #include "virtio_user/vhost.h"
28
29 #define virtio_user_get_dev(hwp) container_of(hwp, struct virtio_user_dev, hw)
30
31 static void
virtio_user_read_dev_config(struct virtio_hw * hw,size_t offset,void * dst,int length)32 virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
33 void *dst, int length)
34 {
35 int i;
36 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
37
38 if (offset == offsetof(struct virtio_net_config, mac) &&
39 length == RTE_ETHER_ADDR_LEN) {
40 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)
41 ((uint8_t *)dst)[i] = dev->mac_addr[i];
42 return;
43 }
44
45 if (offset == offsetof(struct virtio_net_config, status)) {
46 virtio_user_dev_update_link_state(dev);
47
48 *(uint16_t *)dst = dev->net_status;
49 }
50
51 if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs))
52 *(uint16_t *)dst = dev->max_queue_pairs;
53 }
54
55 static void
virtio_user_write_dev_config(struct virtio_hw * hw,size_t offset,const void * src,int length)56 virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset,
57 const void *src, int length)
58 {
59 int i;
60 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
61
62 if ((offset == offsetof(struct virtio_net_config, mac)) &&
63 (length == RTE_ETHER_ADDR_LEN)) {
64 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)
65 dev->mac_addr[i] = ((const uint8_t *)src)[i];
66 virtio_user_dev_set_mac(dev);
67 virtio_user_dev_get_mac(dev);
68 } else {
69 PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d",
70 offset, length);
71 }
72 }
73
74 static void
virtio_user_reset(struct virtio_hw * hw)75 virtio_user_reset(struct virtio_hw *hw)
76 {
77 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
78
79 if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
80 virtio_user_stop_device(dev);
81 }
82
83 static void
virtio_user_set_status(struct virtio_hw * hw,uint8_t status)84 virtio_user_set_status(struct virtio_hw *hw, uint8_t status)
85 {
86 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
87 uint8_t old_status = dev->status;
88
89 if (status & VIRTIO_CONFIG_STATUS_FEATURES_OK &&
90 ~old_status & VIRTIO_CONFIG_STATUS_FEATURES_OK)
91 virtio_user_dev_set_features(dev);
92 if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
93 virtio_user_start_device(dev);
94 else if (status == VIRTIO_CONFIG_STATUS_RESET)
95 virtio_user_reset(hw);
96
97 virtio_user_dev_set_status(dev, status);
98 }
99
100 static uint8_t
virtio_user_get_status(struct virtio_hw * hw)101 virtio_user_get_status(struct virtio_hw *hw)
102 {
103 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
104
105 virtio_user_dev_update_status(dev);
106
107 return dev->status;
108 }
109
110 static uint64_t
virtio_user_get_features(struct virtio_hw * hw)111 virtio_user_get_features(struct virtio_hw *hw)
112 {
113 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
114
115 /* unmask feature bits defined in vhost user protocol */
116 return (dev->device_features | dev->frontend_features) &
117 VIRTIO_PMD_SUPPORTED_GUEST_FEATURES;
118 }
119
120 static void
virtio_user_set_features(struct virtio_hw * hw,uint64_t features)121 virtio_user_set_features(struct virtio_hw *hw, uint64_t features)
122 {
123 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
124
125 dev->features = features & (dev->device_features | dev->frontend_features);
126 }
127
128 static int
virtio_user_features_ok(struct virtio_hw * hw __rte_unused)129 virtio_user_features_ok(struct virtio_hw *hw __rte_unused)
130 {
131 return 0;
132 }
133
134 static uint8_t
virtio_user_get_isr(struct virtio_hw * hw __rte_unused)135 virtio_user_get_isr(struct virtio_hw *hw __rte_unused)
136 {
137 /* rxq interrupts and config interrupt are separated in virtio-user,
138 * here we only report config change.
139 */
140 return VIRTIO_ISR_CONFIG;
141 }
142
143 static uint16_t
virtio_user_set_config_irq(struct virtio_hw * hw __rte_unused,uint16_t vec __rte_unused)144 virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused,
145 uint16_t vec __rte_unused)
146 {
147 return 0;
148 }
149
150 static uint16_t
virtio_user_set_queue_irq(struct virtio_hw * hw __rte_unused,struct virtqueue * vq __rte_unused,uint16_t vec)151 virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused,
152 struct virtqueue *vq __rte_unused,
153 uint16_t vec)
154 {
155 /* pretend we have done that */
156 return vec;
157 }
158
159 /* This function is to get the queue size, aka, number of descs, of a specified
160 * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the
161 * max supported queues.
162 */
163 static uint16_t
virtio_user_get_queue_num(struct virtio_hw * hw,uint16_t queue_id __rte_unused)164 virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused)
165 {
166 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
167
168 /* Currently, each queue has same queue size */
169 return dev->queue_size;
170 }
171
172 static void
virtio_user_setup_queue_packed(struct virtqueue * vq,struct virtio_user_dev * dev)173 virtio_user_setup_queue_packed(struct virtqueue *vq,
174 struct virtio_user_dev *dev)
175 {
176 uint16_t queue_idx = vq->vq_queue_index;
177 struct vring_packed *vring;
178 uint64_t desc_addr;
179 uint64_t avail_addr;
180 uint64_t used_addr;
181 uint16_t i;
182
183 vring = &dev->packed_vrings[queue_idx];
184 desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
185 avail_addr = desc_addr + vq->vq_nentries *
186 sizeof(struct vring_packed_desc);
187 used_addr = RTE_ALIGN_CEIL(avail_addr +
188 sizeof(struct vring_packed_desc_event),
189 VIRTIO_VRING_ALIGN);
190 vring->num = vq->vq_nentries;
191 vring->desc = (void *)(uintptr_t)desc_addr;
192 vring->driver = (void *)(uintptr_t)avail_addr;
193 vring->device = (void *)(uintptr_t)used_addr;
194 dev->packed_queues[queue_idx].avail_wrap_counter = true;
195 dev->packed_queues[queue_idx].used_wrap_counter = true;
196
197 for (i = 0; i < vring->num; i++)
198 vring->desc[i].flags = 0;
199 }
200
201 static void
virtio_user_setup_queue_split(struct virtqueue * vq,struct virtio_user_dev * dev)202 virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
203 {
204 uint16_t queue_idx = vq->vq_queue_index;
205 uint64_t desc_addr, avail_addr, used_addr;
206
207 desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
208 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
209 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
210 ring[vq->vq_nentries]),
211 VIRTIO_VRING_ALIGN);
212
213 dev->vrings[queue_idx].num = vq->vq_nentries;
214 dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;
215 dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr;
216 dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr;
217 }
218
219 static int
virtio_user_setup_queue(struct virtio_hw * hw,struct virtqueue * vq)220 virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
221 {
222 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
223
224 if (virtio_with_packed_queue(hw))
225 virtio_user_setup_queue_packed(vq, dev);
226 else
227 virtio_user_setup_queue_split(vq, dev);
228
229 return 0;
230 }
231
232 static void
virtio_user_del_queue(struct virtio_hw * hw,struct virtqueue * vq)233 virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
234 {
235 /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU
236 * correspondingly stops the ioeventfds, and reset the status of
237 * the device.
238 * For modern devices, set queue desc, avail, used in PCI bar to 0,
239 * not see any more behavior in QEMU.
240 *
241 * Here we just care about what information to deliver to vhost-user
242 * or vhost-kernel. So we just close ioeventfd for now.
243 */
244 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
245
246 close(dev->callfds[vq->vq_queue_index]);
247 close(dev->kickfds[vq->vq_queue_index]);
248 }
249
250 static void
virtio_user_notify_queue(struct virtio_hw * hw,struct virtqueue * vq)251 virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
252 {
253 uint64_t buf = 1;
254 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
255
256 if (hw->cvq && (virtnet_cq_to_vq(hw->cvq) == vq)) {
257 if (virtio_with_packed_queue(vq->hw))
258 virtio_user_handle_cq_packed(dev, vq->vq_queue_index);
259 else
260 virtio_user_handle_cq(dev, vq->vq_queue_index);
261 return;
262 }
263
264 if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
265 PMD_DRV_LOG(ERR, "failed to kick backend: %s",
266 strerror(errno));
267 }
268
269 static int
virtio_user_dev_close(struct virtio_hw * hw)270 virtio_user_dev_close(struct virtio_hw *hw)
271 {
272 struct virtio_user_dev *dev = virtio_user_get_dev(hw);
273
274 virtio_user_dev_uninit(dev);
275
276 return 0;
277 }
278
279 const struct virtio_ops virtio_user_ops = {
280 .read_dev_cfg = virtio_user_read_dev_config,
281 .write_dev_cfg = virtio_user_write_dev_config,
282 .get_status = virtio_user_get_status,
283 .set_status = virtio_user_set_status,
284 .get_features = virtio_user_get_features,
285 .set_features = virtio_user_set_features,
286 .features_ok = virtio_user_features_ok,
287 .get_isr = virtio_user_get_isr,
288 .set_config_irq = virtio_user_set_config_irq,
289 .set_queue_irq = virtio_user_set_queue_irq,
290 .get_queue_num = virtio_user_get_queue_num,
291 .setup_queue = virtio_user_setup_queue,
292 .del_queue = virtio_user_del_queue,
293 .notify_queue = virtio_user_notify_queue,
294 .dev_close = virtio_user_dev_close,
295 };
296
297 static const char *valid_args[] = {
298 #define VIRTIO_USER_ARG_QUEUES_NUM "queues"
299 VIRTIO_USER_ARG_QUEUES_NUM,
300 #define VIRTIO_USER_ARG_CQ_NUM "cq"
301 VIRTIO_USER_ARG_CQ_NUM,
302 #define VIRTIO_USER_ARG_MAC "mac"
303 VIRTIO_USER_ARG_MAC,
304 #define VIRTIO_USER_ARG_PATH "path"
305 VIRTIO_USER_ARG_PATH,
306 #define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size"
307 VIRTIO_USER_ARG_QUEUE_SIZE,
308 #define VIRTIO_USER_ARG_INTERFACE_NAME "iface"
309 VIRTIO_USER_ARG_INTERFACE_NAME,
310 #define VIRTIO_USER_ARG_SERVER_MODE "server"
311 VIRTIO_USER_ARG_SERVER_MODE,
312 #define VIRTIO_USER_ARG_MRG_RXBUF "mrg_rxbuf"
313 VIRTIO_USER_ARG_MRG_RXBUF,
314 #define VIRTIO_USER_ARG_IN_ORDER "in_order"
315 VIRTIO_USER_ARG_IN_ORDER,
316 #define VIRTIO_USER_ARG_PACKED_VQ "packed_vq"
317 VIRTIO_USER_ARG_PACKED_VQ,
318 #define VIRTIO_USER_ARG_SPEED "speed"
319 VIRTIO_USER_ARG_SPEED,
320 #define VIRTIO_USER_ARG_VECTORIZED "vectorized"
321 VIRTIO_USER_ARG_VECTORIZED,
322 NULL
323 };
324
325 #define VIRTIO_USER_DEF_CQ_EN 0
326 #define VIRTIO_USER_DEF_Q_NUM 1
327 #define VIRTIO_USER_DEF_Q_SZ 256
328 #define VIRTIO_USER_DEF_SERVER_MODE 0
329
330 static int
get_string_arg(const char * key __rte_unused,const char * value,void * extra_args)331 get_string_arg(const char *key __rte_unused,
332 const char *value, void *extra_args)
333 {
334 if (!value || !extra_args)
335 return -EINVAL;
336
337 *(char **)extra_args = strdup(value);
338
339 if (!*(char **)extra_args)
340 return -ENOMEM;
341
342 return 0;
343 }
344
345 static int
get_integer_arg(const char * key __rte_unused,const char * value,void * extra_args)346 get_integer_arg(const char *key __rte_unused,
347 const char *value, void *extra_args)
348 {
349 uint64_t integer = 0;
350 if (!value || !extra_args)
351 return -EINVAL;
352 errno = 0;
353 integer = strtoull(value, NULL, 0);
354 /* extra_args keeps default value, it should be replaced
355 * only in case of successful parsing of the 'value' arg
356 */
357 if (errno == 0)
358 *(uint64_t *)extra_args = integer;
359 return -errno;
360 }
361
362 static uint32_t
vdpa_dynamic_major_num(void)363 vdpa_dynamic_major_num(void)
364 {
365 FILE *fp;
366 char *line = NULL;
367 size_t size = 0;
368 char name[11];
369 bool found = false;
370 uint32_t num;
371
372 fp = fopen("/proc/devices", "r");
373 if (fp == NULL) {
374 PMD_INIT_LOG(ERR, "Cannot open /proc/devices: %s",
375 strerror(errno));
376 return UNNAMED_MAJOR;
377 }
378
379 while (getline(&line, &size, fp) > 0) {
380 char *stripped = line + strspn(line, " ");
381 if ((sscanf(stripped, "%u %10s", &num, name) == 2) &&
382 (strncmp(name, "vhost-vdpa", 10) == 0)) {
383 found = true;
384 break;
385 }
386 }
387 free(line);
388 fclose(fp);
389 return found ? num : UNNAMED_MAJOR;
390 }
391
392 static enum virtio_user_backend_type
virtio_user_backend_type(const char * path)393 virtio_user_backend_type(const char *path)
394 {
395 struct stat sb;
396
397 if (stat(path, &sb) == -1) {
398 if (errno == ENOENT)
399 return VIRTIO_USER_BACKEND_VHOST_USER;
400
401 PMD_INIT_LOG(ERR, "Stat fails: %s (%s)", path,
402 strerror(errno));
403 return VIRTIO_USER_BACKEND_UNKNOWN;
404 }
405
406 if (S_ISSOCK(sb.st_mode)) {
407 return VIRTIO_USER_BACKEND_VHOST_USER;
408 } else if (S_ISCHR(sb.st_mode)) {
409 if (major(sb.st_rdev) == MISC_MAJOR)
410 return VIRTIO_USER_BACKEND_VHOST_KERNEL;
411 if (major(sb.st_rdev) == vdpa_dynamic_major_num())
412 return VIRTIO_USER_BACKEND_VHOST_VDPA;
413 }
414 return VIRTIO_USER_BACKEND_UNKNOWN;
415 }
416
417 static struct rte_eth_dev *
virtio_user_eth_dev_alloc(struct rte_vdev_device * vdev)418 virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
419 {
420 struct rte_eth_dev *eth_dev;
421 struct rte_eth_dev_data *data;
422 struct virtio_hw *hw;
423 struct virtio_user_dev *dev;
424
425 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*dev));
426 if (!eth_dev) {
427 PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev");
428 return NULL;
429 }
430
431 data = eth_dev->data;
432 dev = eth_dev->data->dev_private;
433 hw = &dev->hw;
434
435 hw->port_id = data->port_id;
436 VIRTIO_OPS(hw) = &virtio_user_ops;
437
438 hw->intr_lsc = 1;
439 hw->use_vec_rx = 0;
440 hw->use_vec_tx = 0;
441 hw->use_inorder_rx = 0;
442 hw->use_inorder_tx = 0;
443
444 return eth_dev;
445 }
446
447 static void
virtio_user_eth_dev_free(struct rte_eth_dev * eth_dev)448 virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev)
449 {
450 rte_eth_dev_release_port(eth_dev);
451 }
452
453 /* Dev initialization routine. Invoked once for each virtio vdev at
454 * EAL init time, see rte_bus_probe().
455 * Returns 0 on success.
456 */
457 static int
virtio_user_pmd_probe(struct rte_vdev_device * vdev)458 virtio_user_pmd_probe(struct rte_vdev_device *vdev)
459 {
460 struct rte_kvargs *kvlist = NULL;
461 struct rte_eth_dev *eth_dev;
462 struct virtio_hw *hw;
463 struct virtio_user_dev *dev;
464 enum virtio_user_backend_type backend_type = VIRTIO_USER_BACKEND_UNKNOWN;
465 uint64_t queues = VIRTIO_USER_DEF_Q_NUM;
466 uint64_t cq = VIRTIO_USER_DEF_CQ_EN;
467 uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ;
468 uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE;
469 uint64_t mrg_rxbuf = 1;
470 uint64_t in_order = 1;
471 uint64_t packed_vq = 0;
472 uint64_t vectorized = 0;
473 char *path = NULL;
474 char *ifname = NULL;
475 char *mac_addr = NULL;
476 int ret = -1;
477
478 RTE_BUILD_BUG_ON(offsetof(struct virtio_user_dev, hw) != 0);
479
480 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
481 const char *name = rte_vdev_device_name(vdev);
482 eth_dev = rte_eth_dev_attach_secondary(name);
483 if (!eth_dev) {
484 PMD_INIT_LOG(ERR, "Failed to probe %s", name);
485 return -1;
486 }
487
488 dev = eth_dev->data->dev_private;
489 hw = &dev->hw;
490 VIRTIO_OPS(hw) = &virtio_user_ops;
491
492 if (eth_virtio_dev_init(eth_dev) < 0) {
493 PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
494 rte_eth_dev_release_port(eth_dev);
495 return -1;
496 }
497
498 eth_dev->dev_ops = &virtio_user_secondary_eth_dev_ops;
499 eth_dev->device = &vdev->device;
500 rte_eth_dev_probing_finish(eth_dev);
501 return 0;
502 }
503
504 kvlist = rte_kvargs_parse(rte_vdev_device_args(vdev), valid_args);
505 if (!kvlist) {
506 PMD_INIT_LOG(ERR, "error when parsing param");
507 goto end;
508 }
509
510 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) {
511 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH,
512 &get_string_arg, &path) < 0) {
513 PMD_INIT_LOG(ERR, "error to parse %s",
514 VIRTIO_USER_ARG_PATH);
515 goto end;
516 }
517 } else {
518 PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user",
519 VIRTIO_USER_ARG_PATH);
520 goto end;
521 }
522
523 backend_type = virtio_user_backend_type(path);
524 if (backend_type == VIRTIO_USER_BACKEND_UNKNOWN) {
525 PMD_INIT_LOG(ERR,
526 "unable to determine backend type for path %s",
527 path);
528 goto end;
529 }
530 PMD_INIT_LOG(INFO, "Backend type detected: %s",
531 virtio_user_backend_strings[backend_type]);
532
533 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME) == 1) {
534 if (backend_type != VIRTIO_USER_BACKEND_VHOST_KERNEL) {
535 PMD_INIT_LOG(ERR,
536 "arg %s applies only to vhost-kernel backend",
537 VIRTIO_USER_ARG_INTERFACE_NAME);
538 goto end;
539 }
540
541 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME,
542 &get_string_arg, &ifname) < 0) {
543 PMD_INIT_LOG(ERR, "error to parse %s",
544 VIRTIO_USER_ARG_INTERFACE_NAME);
545 goto end;
546 }
547 }
548
549 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) {
550 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC,
551 &get_string_arg, &mac_addr) < 0) {
552 PMD_INIT_LOG(ERR, "error to parse %s",
553 VIRTIO_USER_ARG_MAC);
554 goto end;
555 }
556 }
557
558 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) {
559 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE,
560 &get_integer_arg, &queue_size) < 0) {
561 PMD_INIT_LOG(ERR, "error to parse %s",
562 VIRTIO_USER_ARG_QUEUE_SIZE);
563 goto end;
564 }
565 }
566
567 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) {
568 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM,
569 &get_integer_arg, &queues) < 0) {
570 PMD_INIT_LOG(ERR, "error to parse %s",
571 VIRTIO_USER_ARG_QUEUES_NUM);
572 goto end;
573 }
574 }
575
576 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) {
577 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE,
578 &get_integer_arg, &server_mode) < 0) {
579 PMD_INIT_LOG(ERR, "error to parse %s",
580 VIRTIO_USER_ARG_SERVER_MODE);
581 goto end;
582 }
583 }
584
585 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) {
586 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM,
587 &get_integer_arg, &cq) < 0) {
588 PMD_INIT_LOG(ERR, "error to parse %s",
589 VIRTIO_USER_ARG_CQ_NUM);
590 goto end;
591 }
592 } else if (queues > 1) {
593 cq = 1;
594 }
595
596 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PACKED_VQ) == 1) {
597 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PACKED_VQ,
598 &get_integer_arg, &packed_vq) < 0) {
599 PMD_INIT_LOG(ERR, "error to parse %s",
600 VIRTIO_USER_ARG_PACKED_VQ);
601 goto end;
602 }
603 }
604
605 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_VECTORIZED) == 1) {
606 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_VECTORIZED,
607 &get_integer_arg, &vectorized) < 0) {
608 PMD_INIT_LOG(ERR, "error to parse %s",
609 VIRTIO_USER_ARG_VECTORIZED);
610 goto end;
611 }
612 }
613
614 if (queues > 1 && cq == 0) {
615 PMD_INIT_LOG(ERR, "multi-q requires ctrl-q");
616 goto end;
617 }
618
619 if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
620 PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u",
621 VIRTIO_USER_ARG_QUEUES_NUM, queues,
622 VIRTIO_MAX_VIRTQUEUE_PAIRS);
623 goto end;
624 }
625
626 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) {
627 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF,
628 &get_integer_arg, &mrg_rxbuf) < 0) {
629 PMD_INIT_LOG(ERR, "error to parse %s",
630 VIRTIO_USER_ARG_MRG_RXBUF);
631 goto end;
632 }
633 }
634
635 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_IN_ORDER) == 1) {
636 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_IN_ORDER,
637 &get_integer_arg, &in_order) < 0) {
638 PMD_INIT_LOG(ERR, "error to parse %s",
639 VIRTIO_USER_ARG_IN_ORDER);
640 goto end;
641 }
642 }
643
644 eth_dev = virtio_user_eth_dev_alloc(vdev);
645 if (!eth_dev) {
646 PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
647 goto end;
648 }
649
650 dev = eth_dev->data->dev_private;
651 hw = &dev->hw;
652 if (virtio_user_dev_init(dev, path, queues, cq,
653 queue_size, mac_addr, &ifname, server_mode,
654 mrg_rxbuf, in_order, packed_vq, backend_type) < 0) {
655 PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
656 virtio_user_eth_dev_free(eth_dev);
657 goto end;
658 }
659
660 /*
661 * Virtio-user requires using virtual addresses for the descriptors
662 * buffers, whatever other devices require
663 */
664 hw->use_va = true;
665
666 /* previously called by pci probing for physical dev */
667 if (eth_virtio_dev_init(eth_dev) < 0) {
668 PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
669 virtio_user_dev_uninit(dev);
670 virtio_user_eth_dev_free(eth_dev);
671 goto end;
672 }
673
674 if (vectorized) {
675 if (packed_vq) {
676 #if defined(CC_AVX512_SUPPORT) || defined(RTE_ARCH_ARM)
677 hw->use_vec_rx = 1;
678 hw->use_vec_tx = 1;
679 #else
680 PMD_INIT_LOG(INFO,
681 "building environment do not support packed ring vectorized");
682 #endif
683 } else {
684 hw->use_vec_rx = 1;
685 }
686 }
687
688 rte_eth_dev_probing_finish(eth_dev);
689 ret = 0;
690
691 end:
692 rte_kvargs_free(kvlist);
693 free(path);
694 free(mac_addr);
695 free(ifname);
696 return ret;
697 }
698
699 static int
virtio_user_pmd_remove(struct rte_vdev_device * vdev)700 virtio_user_pmd_remove(struct rte_vdev_device *vdev)
701 {
702 const char *name;
703 struct rte_eth_dev *eth_dev;
704
705 if (!vdev)
706 return -EINVAL;
707
708 name = rte_vdev_device_name(vdev);
709 PMD_DRV_LOG(INFO, "Un-Initializing %s", name);
710 eth_dev = rte_eth_dev_allocated(name);
711 /* Port has already been released by close. */
712 if (!eth_dev)
713 return 0;
714
715 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
716 return rte_eth_dev_release_port(eth_dev);
717
718 /* make sure the device is stopped, queues freed */
719 return rte_eth_dev_close(eth_dev->data->port_id);
720 }
721
virtio_user_pmd_dma_map(struct rte_vdev_device * vdev,void * addr,uint64_t iova,size_t len)722 static int virtio_user_pmd_dma_map(struct rte_vdev_device *vdev, void *addr,
723 uint64_t iova, size_t len)
724 {
725 const char *name;
726 struct rte_eth_dev *eth_dev;
727 struct virtio_user_dev *dev;
728
729 if (!vdev)
730 return -EINVAL;
731
732 name = rte_vdev_device_name(vdev);
733 eth_dev = rte_eth_dev_allocated(name);
734 /* Port has already been released by close. */
735 if (!eth_dev)
736 return 0;
737
738 dev = eth_dev->data->dev_private;
739
740 if (dev->ops->dma_map)
741 return dev->ops->dma_map(dev, addr, iova, len);
742
743 return 0;
744 }
745
virtio_user_pmd_dma_unmap(struct rte_vdev_device * vdev,void * addr,uint64_t iova,size_t len)746 static int virtio_user_pmd_dma_unmap(struct rte_vdev_device *vdev, void *addr,
747 uint64_t iova, size_t len)
748 {
749 const char *name;
750 struct rte_eth_dev *eth_dev;
751 struct virtio_user_dev *dev;
752
753 if (!vdev)
754 return -EINVAL;
755
756 name = rte_vdev_device_name(vdev);
757 eth_dev = rte_eth_dev_allocated(name);
758 /* Port has already been released by close. */
759 if (!eth_dev)
760 return 0;
761
762 dev = eth_dev->data->dev_private;
763
764 if (dev->ops->dma_unmap)
765 return dev->ops->dma_unmap(dev, addr, iova, len);
766
767 return 0;
768 }
769
770 static struct rte_vdev_driver virtio_user_driver = {
771 .probe = virtio_user_pmd_probe,
772 .remove = virtio_user_pmd_remove,
773 .dma_map = virtio_user_pmd_dma_map,
774 .dma_unmap = virtio_user_pmd_dma_unmap,
775 };
776
777 RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver);
778 RTE_PMD_REGISTER_ALIAS(net_virtio_user, virtio_user);
779 RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user,
780 "path=<path> "
781 "mac=<mac addr> "
782 "cq=<int> "
783 "queue_size=<int> "
784 "queues=<int> "
785 "iface=<string> "
786 "server=<0|1> "
787 "mrg_rxbuf=<0|1> "
788 "in_order=<0|1> "
789 "packed_vq=<0|1> "
790 "speed=<int> "
791 "vectorized=<0|1>");
792