1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright(c) 2014 6WIND S.A.
4 */
5
6 #include <stdio.h>
7 #include <string.h>
8 #include <inttypes.h>
9 #include <sys/queue.h>
10
11 #include <rte_compat.h>
12 #include <rte_bus.h>
13 #include <rte_class.h>
14 #include <rte_dev.h>
15 #include <rte_devargs.h>
16 #include <rte_debug.h>
17 #include <rte_errno.h>
18 #include <rte_kvargs.h>
19 #include <rte_log.h>
20 #include <rte_spinlock.h>
21 #include <rte_malloc.h>
22 #include <rte_string_fns.h>
23
24 #include "eal_private.h"
25 #include "hotplug_mp.h"
26
27 /**
28 * The device event callback description.
29 *
30 * It contains callback address to be registered by user application,
31 * the pointer to the parameters for callback, and the device name.
32 */
33 struct dev_event_callback {
34 TAILQ_ENTRY(dev_event_callback) next; /**< Callbacks list */
35 rte_dev_event_cb_fn cb_fn; /**< Callback address */
36 void *cb_arg; /**< Callback parameter */
37 char *dev_name; /**< Callback device name, NULL is for all device */
38 uint32_t active; /**< Callback is executing */
39 };
40
41 /** @internal Structure to keep track of registered callbacks */
42 TAILQ_HEAD(dev_event_cb_list, dev_event_callback);
43
44 /* The device event callback list for all registered callbacks. */
45 static struct dev_event_cb_list dev_event_cbs;
46
47 /* spinlock for device callbacks */
48 static rte_spinlock_t dev_event_lock = RTE_SPINLOCK_INITIALIZER;
49
50 struct dev_next_ctx {
51 struct rte_dev_iterator *it;
52 const char *bus_str;
53 const char *cls_str;
54 };
55
56 #define CTX(it, bus_str, cls_str) \
57 (&(const struct dev_next_ctx){ \
58 .it = it, \
59 .bus_str = bus_str, \
60 .cls_str = cls_str, \
61 })
62
63 #define ITCTX(ptr) \
64 (((struct dev_next_ctx *)(intptr_t)ptr)->it)
65
66 #define BUSCTX(ptr) \
67 (((struct dev_next_ctx *)(intptr_t)ptr)->bus_str)
68
69 #define CLSCTX(ptr) \
70 (((struct dev_next_ctx *)(intptr_t)ptr)->cls_str)
71
cmp_dev_name(const struct rte_device * dev,const void * _name)72 static int cmp_dev_name(const struct rte_device *dev, const void *_name)
73 {
74 const char *name = _name;
75
76 return strcmp(dev->name, name);
77 }
78
79 int
rte_dev_is_probed(const struct rte_device * dev)80 rte_dev_is_probed(const struct rte_device *dev)
81 {
82 /* The field driver should be set only when the probe is successful. */
83 return dev->driver != NULL;
84 }
85
86 /* helper function to build devargs, caller should free the memory */
87 static int
build_devargs(const char * busname,const char * devname,const char * drvargs,char ** devargs)88 build_devargs(const char *busname, const char *devname,
89 const char *drvargs, char **devargs)
90 {
91 int length;
92
93 length = snprintf(NULL, 0, "%s:%s,%s", busname, devname, drvargs);
94 if (length < 0)
95 return -EINVAL;
96
97 *devargs = malloc(length + 1);
98 if (*devargs == NULL)
99 return -ENOMEM;
100
101 length = snprintf(*devargs, length + 1, "%s:%s,%s",
102 busname, devname, drvargs);
103 if (length < 0) {
104 free(*devargs);
105 return -EINVAL;
106 }
107
108 return 0;
109 }
110
111 int
rte_eal_hotplug_add(const char * busname,const char * devname,const char * drvargs)112 rte_eal_hotplug_add(const char *busname, const char *devname,
113 const char *drvargs)
114 {
115
116 char *devargs;
117 int ret;
118
119 ret = build_devargs(busname, devname, drvargs, &devargs);
120 if (ret != 0)
121 return ret;
122
123 ret = rte_dev_probe(devargs);
124 free(devargs);
125
126 return ret;
127 }
128
129 /* probe device at local process. */
130 int
local_dev_probe(const char * devargs,struct rte_device ** new_dev)131 local_dev_probe(const char *devargs, struct rte_device **new_dev)
132 {
133 struct rte_device *dev;
134 struct rte_devargs *da;
135 int ret;
136
137 *new_dev = NULL;
138 da = calloc(1, sizeof(*da));
139 if (da == NULL)
140 return -ENOMEM;
141
142 ret = rte_devargs_parse(da, devargs);
143 if (ret)
144 goto err_devarg;
145
146 if (da->bus->plug == NULL) {
147 RTE_LOG(ERR, EAL, "Function plug not supported by bus (%s)\n",
148 da->bus->name);
149 ret = -ENOTSUP;
150 goto err_devarg;
151 }
152
153 ret = rte_devargs_insert(&da);
154 if (ret)
155 goto err_devarg;
156
157 /* the rte_devargs will be referenced in the matching rte_device */
158 ret = da->bus->scan();
159 if (ret)
160 goto err_devarg;
161
162 dev = da->bus->find_device(NULL, cmp_dev_name, da->name);
163 if (dev == NULL) {
164 RTE_LOG(ERR, EAL, "Cannot find device (%s)\n",
165 da->name);
166 ret = -ENODEV;
167 goto err_devarg;
168 }
169 /* Since there is a matching device, it is now its responsibility
170 * to manage the devargs we've just inserted. From this point
171 * those devargs shouldn't be removed manually anymore.
172 */
173
174 ret = dev->bus->plug(dev);
175 if (ret > 0)
176 ret = -ENOTSUP;
177
178 if (ret && !rte_dev_is_probed(dev)) { /* if hasn't ever succeeded */
179 RTE_LOG(ERR, EAL, "Driver cannot attach the device (%s)\n",
180 dev->name);
181 return ret;
182 }
183
184 *new_dev = dev;
185 return ret;
186
187 err_devarg:
188 if (rte_devargs_remove(da) != 0) {
189 free(da->args);
190 free(da);
191 }
192 return ret;
193 }
194
195 int
rte_dev_probe(const char * devargs)196 rte_dev_probe(const char *devargs)
197 {
198 struct eal_dev_mp_req req;
199 struct rte_device *dev;
200 int ret;
201
202 memset(&req, 0, sizeof(req));
203 req.t = EAL_DEV_REQ_TYPE_ATTACH;
204 strlcpy(req.devargs, devargs, EAL_DEV_MP_DEV_ARGS_MAX_LEN);
205
206 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
207 /**
208 * If in secondary process, just send IPC request to
209 * primary process.
210 */
211 ret = eal_dev_hotplug_request_to_primary(&req);
212 if (ret != 0) {
213 RTE_LOG(ERR, EAL,
214 "Failed to send hotplug request to primary\n");
215 return -ENOMSG;
216 }
217 if (req.result != 0)
218 RTE_LOG(ERR, EAL,
219 "Failed to hotplug add device\n");
220 return req.result;
221 }
222
223 /* attach a shared device from primary start from here: */
224
225 /* primary attach the new device itself. */
226 ret = local_dev_probe(devargs, &dev);
227
228 if (ret != 0) {
229 RTE_LOG(ERR, EAL,
230 "Failed to attach device on primary process\n");
231
232 /**
233 * it is possible that secondary process failed to attached a
234 * device that primary process have during initialization,
235 * so for -EEXIST case, we still need to sync with secondary
236 * process.
237 */
238 if (ret != -EEXIST)
239 return ret;
240 }
241
242 /* primary send attach sync request to secondary. */
243 ret = eal_dev_hotplug_request_to_secondary(&req);
244
245 /* if any communication error, we need to rollback. */
246 if (ret != 0) {
247 RTE_LOG(ERR, EAL,
248 "Failed to send hotplug add request to secondary\n");
249 ret = -ENOMSG;
250 goto rollback;
251 }
252
253 /**
254 * if any secondary failed to attach, we need to consider if rollback
255 * is necessary.
256 */
257 if (req.result != 0) {
258 RTE_LOG(ERR, EAL,
259 "Failed to attach device on secondary process\n");
260 ret = req.result;
261
262 /* for -EEXIST, we don't need to rollback. */
263 if (ret == -EEXIST)
264 return ret;
265 goto rollback;
266 }
267
268 return 0;
269
270 rollback:
271 req.t = EAL_DEV_REQ_TYPE_ATTACH_ROLLBACK;
272
273 /* primary send rollback request to secondary. */
274 if (eal_dev_hotplug_request_to_secondary(&req) != 0)
275 RTE_LOG(WARNING, EAL,
276 "Failed to rollback device attach on secondary."
277 "Devices in secondary may not sync with primary\n");
278
279 /* primary rollback itself. */
280 if (local_dev_remove(dev) != 0)
281 RTE_LOG(WARNING, EAL,
282 "Failed to rollback device attach on primary."
283 "Devices in secondary may not sync with primary\n");
284
285 return ret;
286 }
287
288 int
rte_eal_hotplug_remove(const char * busname,const char * devname)289 rte_eal_hotplug_remove(const char *busname, const char *devname)
290 {
291 struct rte_device *dev;
292 struct rte_bus *bus;
293
294 bus = rte_bus_find_by_name(busname);
295 if (bus == NULL) {
296 RTE_LOG(ERR, EAL, "Cannot find bus (%s)\n", busname);
297 return -ENOENT;
298 }
299
300 dev = bus->find_device(NULL, cmp_dev_name, devname);
301 if (dev == NULL) {
302 RTE_LOG(ERR, EAL, "Cannot find plugged device (%s)\n", devname);
303 return -EINVAL;
304 }
305
306 return rte_dev_remove(dev);
307 }
308
309 /* remove device at local process. */
310 int
local_dev_remove(struct rte_device * dev)311 local_dev_remove(struct rte_device *dev)
312 {
313 int ret;
314
315 if (dev->bus->unplug == NULL) {
316 RTE_LOG(ERR, EAL, "Function unplug not supported by bus (%s)\n",
317 dev->bus->name);
318 return -ENOTSUP;
319 }
320
321 ret = dev->bus->unplug(dev);
322 if (ret) {
323 RTE_LOG(ERR, EAL, "Driver cannot detach the device (%s)\n",
324 dev->name);
325 return (ret < 0) ? ret : -ENOENT;
326 }
327
328 return 0;
329 }
330
331 int
rte_dev_remove(struct rte_device * dev)332 rte_dev_remove(struct rte_device *dev)
333 {
334 struct eal_dev_mp_req req;
335 char *devargs;
336 int ret;
337
338 if (!rte_dev_is_probed(dev)) {
339 RTE_LOG(ERR, EAL, "Device is not probed\n");
340 return -ENOENT;
341 }
342
343 ret = build_devargs(dev->bus->name, dev->name, "", &devargs);
344 if (ret != 0)
345 return ret;
346
347 memset(&req, 0, sizeof(req));
348 req.t = EAL_DEV_REQ_TYPE_DETACH;
349 strlcpy(req.devargs, devargs, EAL_DEV_MP_DEV_ARGS_MAX_LEN);
350 free(devargs);
351
352 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
353 /**
354 * If in secondary process, just send IPC request to
355 * primary process.
356 */
357 ret = eal_dev_hotplug_request_to_primary(&req);
358 if (ret != 0) {
359 RTE_LOG(ERR, EAL,
360 "Failed to send hotplug request to primary\n");
361 return -ENOMSG;
362 }
363 if (req.result != 0)
364 RTE_LOG(ERR, EAL,
365 "Failed to hotplug remove device\n");
366 return req.result;
367 }
368
369 /* detach a device from primary start from here: */
370
371 /* primary send detach sync request to secondary */
372 ret = eal_dev_hotplug_request_to_secondary(&req);
373
374 /**
375 * if communication error, we need to rollback, because it is possible
376 * part of the secondary processes still detached it successfully.
377 */
378 if (ret != 0) {
379 RTE_LOG(ERR, EAL,
380 "Failed to send device detach request to secondary\n");
381 ret = -ENOMSG;
382 goto rollback;
383 }
384
385 /**
386 * if any secondary failed to detach, we need to consider if rollback
387 * is necessary.
388 */
389 if (req.result != 0) {
390 RTE_LOG(ERR, EAL,
391 "Failed to detach device on secondary process\n");
392 ret = req.result;
393 /**
394 * if -ENOENT, we don't need to rollback, since devices is
395 * already detached on secondary process.
396 */
397 if (ret != -ENOENT)
398 goto rollback;
399 }
400
401 /* primary detach the device itself. */
402 ret = local_dev_remove(dev);
403
404 /* if primary failed, still need to consider if rollback is necessary */
405 if (ret != 0) {
406 RTE_LOG(ERR, EAL,
407 "Failed to detach device on primary process\n");
408 /* if -ENOENT, we don't need to rollback */
409 if (ret == -ENOENT)
410 return ret;
411 goto rollback;
412 }
413
414 return 0;
415
416 rollback:
417 req.t = EAL_DEV_REQ_TYPE_DETACH_ROLLBACK;
418
419 /* primary send rollback request to secondary. */
420 if (eal_dev_hotplug_request_to_secondary(&req) != 0)
421 RTE_LOG(WARNING, EAL,
422 "Failed to rollback device detach on secondary."
423 "Devices in secondary may not sync with primary\n");
424
425 return ret;
426 }
427
428 int
rte_dev_event_callback_register(const char * device_name,rte_dev_event_cb_fn cb_fn,void * cb_arg)429 rte_dev_event_callback_register(const char *device_name,
430 rte_dev_event_cb_fn cb_fn,
431 void *cb_arg)
432 {
433 struct dev_event_callback *event_cb;
434 int ret;
435
436 if (!cb_fn)
437 return -EINVAL;
438
439 rte_spinlock_lock(&dev_event_lock);
440
441 if (TAILQ_EMPTY(&dev_event_cbs))
442 TAILQ_INIT(&dev_event_cbs);
443
444 TAILQ_FOREACH(event_cb, &dev_event_cbs, next) {
445 if (event_cb->cb_fn == cb_fn && event_cb->cb_arg == cb_arg) {
446 if (device_name == NULL && event_cb->dev_name == NULL)
447 break;
448 if (device_name == NULL || event_cb->dev_name == NULL)
449 continue;
450 if (!strcmp(event_cb->dev_name, device_name))
451 break;
452 }
453 }
454
455 /* create a new callback. */
456 if (event_cb == NULL) {
457 event_cb = malloc(sizeof(struct dev_event_callback));
458 if (event_cb != NULL) {
459 event_cb->cb_fn = cb_fn;
460 event_cb->cb_arg = cb_arg;
461 event_cb->active = 0;
462 if (!device_name) {
463 event_cb->dev_name = NULL;
464 } else {
465 event_cb->dev_name = strdup(device_name);
466 if (event_cb->dev_name == NULL) {
467 ret = -ENOMEM;
468 goto error;
469 }
470 }
471 TAILQ_INSERT_TAIL(&dev_event_cbs, event_cb, next);
472 } else {
473 RTE_LOG(ERR, EAL,
474 "Failed to allocate memory for device "
475 "event callback.");
476 ret = -ENOMEM;
477 goto error;
478 }
479 } else {
480 RTE_LOG(ERR, EAL,
481 "The callback is already exist, no need "
482 "to register again.\n");
483 event_cb = NULL;
484 ret = -EEXIST;
485 goto error;
486 }
487
488 rte_spinlock_unlock(&dev_event_lock);
489 return 0;
490 error:
491 free(event_cb);
492 rte_spinlock_unlock(&dev_event_lock);
493 return ret;
494 }
495
496 int
rte_dev_event_callback_unregister(const char * device_name,rte_dev_event_cb_fn cb_fn,void * cb_arg)497 rte_dev_event_callback_unregister(const char *device_name,
498 rte_dev_event_cb_fn cb_fn,
499 void *cb_arg)
500 {
501 int ret = 0;
502 struct dev_event_callback *event_cb, *next;
503
504 if (!cb_fn)
505 return -EINVAL;
506
507 rte_spinlock_lock(&dev_event_lock);
508 /*walk through the callbacks and remove all that match. */
509 for (event_cb = TAILQ_FIRST(&dev_event_cbs); event_cb != NULL;
510 event_cb = next) {
511
512 next = TAILQ_NEXT(event_cb, next);
513
514 if (device_name != NULL && event_cb->dev_name != NULL) {
515 if (!strcmp(event_cb->dev_name, device_name)) {
516 if (event_cb->cb_fn != cb_fn ||
517 (cb_arg != (void *)-1 &&
518 event_cb->cb_arg != cb_arg))
519 continue;
520 }
521 } else if (device_name != NULL) {
522 continue;
523 }
524
525 /*
526 * if this callback is not executing right now,
527 * then remove it.
528 */
529 if (event_cb->active == 0) {
530 TAILQ_REMOVE(&dev_event_cbs, event_cb, next);
531 free(event_cb->dev_name);
532 free(event_cb);
533 ret++;
534 } else {
535 ret = -EAGAIN;
536 break;
537 }
538 }
539
540 /* this callback is not be registered */
541 if (ret == 0)
542 ret = -ENOENT;
543
544 rte_spinlock_unlock(&dev_event_lock);
545 return ret;
546 }
547
548 void
rte_dev_event_callback_process(const char * device_name,enum rte_dev_event_type event)549 rte_dev_event_callback_process(const char *device_name,
550 enum rte_dev_event_type event)
551 {
552 struct dev_event_callback *cb_lst;
553
554 if (device_name == NULL)
555 return;
556
557 rte_spinlock_lock(&dev_event_lock);
558
559 TAILQ_FOREACH(cb_lst, &dev_event_cbs, next) {
560 if (cb_lst->dev_name) {
561 if (strcmp(cb_lst->dev_name, device_name))
562 continue;
563 }
564 cb_lst->active = 1;
565 rte_spinlock_unlock(&dev_event_lock);
566 cb_lst->cb_fn(device_name, event,
567 cb_lst->cb_arg);
568 rte_spinlock_lock(&dev_event_lock);
569 cb_lst->active = 0;
570 }
571 rte_spinlock_unlock(&dev_event_lock);
572 }
573
574 int
rte_dev_iterator_init(struct rte_dev_iterator * it,const char * dev_str)575 rte_dev_iterator_init(struct rte_dev_iterator *it,
576 const char *dev_str)
577 {
578 struct rte_devargs devargs;
579 struct rte_class *cls = NULL;
580 struct rte_bus *bus = NULL;
581
582 /* Having both bus_str and cls_str NULL is illegal,
583 * marking this iterator as invalid unless
584 * everything goes well.
585 */
586 it->bus_str = NULL;
587 it->cls_str = NULL;
588
589 devargs.data = dev_str;
590 if (rte_devargs_layers_parse(&devargs, dev_str))
591 goto get_out;
592
593 bus = devargs.bus;
594 cls = devargs.cls;
595 /* The string should have at least
596 * one layer specified.
597 */
598 if (bus == NULL && cls == NULL) {
599 RTE_LOG(ERR, EAL,
600 "Either bus or class must be specified.\n");
601 rte_errno = EINVAL;
602 goto get_out;
603 }
604 if (bus != NULL && bus->dev_iterate == NULL) {
605 RTE_LOG(ERR, EAL, "Bus %s not supported\n", bus->name);
606 rte_errno = ENOTSUP;
607 goto get_out;
608 }
609 if (cls != NULL && cls->dev_iterate == NULL) {
610 RTE_LOG(ERR, EAL, "Class %s not supported\n", cls->name);
611 rte_errno = ENOTSUP;
612 goto get_out;
613 }
614 it->bus_str = devargs.bus_str;
615 it->cls_str = devargs.cls_str;
616 it->dev_str = dev_str;
617 it->bus = bus;
618 it->cls = cls;
619 it->device = NULL;
620 it->class_device = NULL;
621 get_out:
622 return -rte_errno;
623 }
624
625 static char *
dev_str_sane_copy(const char * str)626 dev_str_sane_copy(const char *str)
627 {
628 size_t end;
629 char *copy;
630
631 end = strcspn(str, ",/");
632 if (str[end] == ',') {
633 copy = strdup(&str[end + 1]);
634 } else {
635 /* '/' or '\0' */
636 copy = strdup("");
637 }
638 if (copy == NULL) {
639 rte_errno = ENOMEM;
640 } else {
641 char *slash;
642
643 slash = strchr(copy, '/');
644 if (slash != NULL)
645 slash[0] = '\0';
646 }
647 return copy;
648 }
649
650 static int
class_next_dev_cmp(const struct rte_class * cls,const void * ctx)651 class_next_dev_cmp(const struct rte_class *cls,
652 const void *ctx)
653 {
654 struct rte_dev_iterator *it;
655 const char *cls_str = NULL;
656 void *dev;
657
658 if (cls->dev_iterate == NULL)
659 return 1;
660 it = ITCTX(ctx);
661 cls_str = CLSCTX(ctx);
662 dev = it->class_device;
663 /* it->cls_str != NULL means a class
664 * was specified in the devstr.
665 */
666 if (it->cls_str != NULL && cls != it->cls)
667 return 1;
668 /* If an error occurred previously,
669 * no need to test further.
670 */
671 if (rte_errno != 0)
672 return -1;
673 dev = cls->dev_iterate(dev, cls_str, it);
674 it->class_device = dev;
675 return dev == NULL;
676 }
677
678 static int
bus_next_dev_cmp(const struct rte_bus * bus,const void * ctx)679 bus_next_dev_cmp(const struct rte_bus *bus,
680 const void *ctx)
681 {
682 struct rte_device *dev = NULL;
683 struct rte_class *cls = NULL;
684 struct rte_dev_iterator *it;
685 const char *bus_str = NULL;
686
687 if (bus->dev_iterate == NULL)
688 return 1;
689 it = ITCTX(ctx);
690 bus_str = BUSCTX(ctx);
691 dev = it->device;
692 /* it->bus_str != NULL means a bus
693 * was specified in the devstr.
694 */
695 if (it->bus_str != NULL && bus != it->bus)
696 return 1;
697 /* If an error occurred previously,
698 * no need to test further.
699 */
700 if (rte_errno != 0)
701 return -1;
702 if (it->cls_str == NULL) {
703 dev = bus->dev_iterate(dev, bus_str, it);
704 goto end;
705 }
706 /* cls_str != NULL */
707 if (dev == NULL) {
708 next_dev_on_bus:
709 dev = bus->dev_iterate(dev, bus_str, it);
710 it->device = dev;
711 }
712 if (dev == NULL)
713 return 1;
714 if (it->cls != NULL)
715 cls = TAILQ_PREV(it->cls, rte_class_list, next);
716 cls = rte_class_find(cls, class_next_dev_cmp, ctx);
717 if (cls != NULL) {
718 it->cls = cls;
719 goto end;
720 }
721 goto next_dev_on_bus;
722 end:
723 it->device = dev;
724 return dev == NULL;
725 }
726 struct rte_device *
rte_dev_iterator_next(struct rte_dev_iterator * it)727 rte_dev_iterator_next(struct rte_dev_iterator *it)
728 {
729 struct rte_bus *bus = NULL;
730 int old_errno = rte_errno;
731 char *bus_str = NULL;
732 char *cls_str = NULL;
733
734 rte_errno = 0;
735 if (it->bus_str == NULL && it->cls_str == NULL) {
736 /* Invalid iterator. */
737 rte_errno = EINVAL;
738 return NULL;
739 }
740 if (it->bus != NULL)
741 bus = TAILQ_PREV(it->bus, rte_bus_list, next);
742 if (it->bus_str != NULL) {
743 bus_str = dev_str_sane_copy(it->bus_str);
744 if (bus_str == NULL)
745 goto out;
746 }
747 if (it->cls_str != NULL) {
748 cls_str = dev_str_sane_copy(it->cls_str);
749 if (cls_str == NULL)
750 goto out;
751 }
752 while ((bus = rte_bus_find(bus, bus_next_dev_cmp,
753 CTX(it, bus_str, cls_str)))) {
754 if (it->device != NULL) {
755 it->bus = bus;
756 goto out;
757 }
758 if (it->bus_str != NULL ||
759 rte_errno != 0)
760 break;
761 }
762 if (rte_errno == 0)
763 rte_errno = old_errno;
764 out:
765 free(bus_str);
766 free(cls_str);
767 return it->device;
768 }
769
770 int
rte_dev_dma_map(struct rte_device * dev,void * addr,uint64_t iova,size_t len)771 rte_dev_dma_map(struct rte_device *dev, void *addr, uint64_t iova,
772 size_t len)
773 {
774 if (dev->bus->dma_map == NULL || len == 0) {
775 rte_errno = ENOTSUP;
776 return -1;
777 }
778 /* Memory must be registered through rte_extmem_* APIs */
779 if (rte_mem_virt2memseg_list(addr) == NULL) {
780 rte_errno = EINVAL;
781 return -1;
782 }
783
784 return dev->bus->dma_map(dev, addr, iova, len);
785 }
786
787 int
rte_dev_dma_unmap(struct rte_device * dev,void * addr,uint64_t iova,size_t len)788 rte_dev_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova,
789 size_t len)
790 {
791 if (dev->bus->dma_unmap == NULL || len == 0) {
792 rte_errno = ENOTSUP;
793 return -1;
794 }
795 /* Memory must be registered through rte_extmem_* APIs */
796 if (rte_mem_virt2memseg_list(addr) == NULL) {
797 rte_errno = EINVAL;
798 return -1;
799 }
800
801 return dev->bus->dma_unmap(dev, addr, iova, len);
802 }
803