1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 HiSilicon Limited
3 * Copyright(c) 2021 Intel Corporation
4 */
5
6 #include <inttypes.h>
7
8 #include <rte_eal.h>
9 #include <rte_lcore.h>
10 #include <rte_log.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_string_fns.h>
14
15 #include "rte_dmadev.h"
16 #include "rte_dmadev_pmd.h"
17
18 static int16_t dma_devices_max;
19
20 struct rte_dma_fp_object *rte_dma_fp_objs;
21 static struct rte_dma_dev *rte_dma_devices;
22 static struct {
23 /* Hold the dev_max information of the primary process. This field is
24 * set by the primary process and is read by the secondary process.
25 */
26 int16_t dev_max;
27 struct rte_dma_dev_data data[0];
28 } *dma_devices_shared_data;
29
30 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
31 #define RTE_DMA_LOG(level, ...) \
32 rte_log(RTE_LOG_ ## level, rte_dma_logtype, RTE_FMT("dma: " \
33 RTE_FMT_HEAD(__VA_ARGS__,) "\n", RTE_FMT_TAIL(__VA_ARGS__,)))
34
35 int
rte_dma_dev_max(size_t dev_max)36 rte_dma_dev_max(size_t dev_max)
37 {
38 /* This function may be called before rte_eal_init(), so no rte library
39 * function can be called in this function.
40 */
41 if (dev_max == 0 || dev_max > INT16_MAX)
42 return -EINVAL;
43
44 if (dma_devices_max > 0)
45 return -EINVAL;
46
47 dma_devices_max = dev_max;
48
49 return 0;
50 }
51
52 int16_t
rte_dma_next_dev(int16_t start_dev_id)53 rte_dma_next_dev(int16_t start_dev_id)
54 {
55 int16_t dev_id = start_dev_id;
56 while (dev_id < dma_devices_max && rte_dma_devices[dev_id].state == RTE_DMA_DEV_UNUSED)
57 dev_id++;
58
59 if (dev_id < dma_devices_max)
60 return dev_id;
61
62 return -1;
63 }
64
65 static int
dma_check_name(const char * name)66 dma_check_name(const char *name)
67 {
68 size_t name_len;
69
70 if (name == NULL) {
71 RTE_DMA_LOG(ERR, "Name can't be NULL");
72 return -EINVAL;
73 }
74
75 name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN);
76 if (name_len == 0) {
77 RTE_DMA_LOG(ERR, "Zero length DMA device name");
78 return -EINVAL;
79 }
80 if (name_len >= RTE_DEV_NAME_MAX_LEN) {
81 RTE_DMA_LOG(ERR, "DMA device name is too long");
82 return -EINVAL;
83 }
84
85 return 0;
86 }
87
88 static int16_t
dma_find_free_id(void)89 dma_find_free_id(void)
90 {
91 int16_t i;
92
93 if (rte_dma_devices == NULL || dma_devices_shared_data == NULL)
94 return -1;
95
96 for (i = 0; i < dma_devices_max; i++) {
97 if (dma_devices_shared_data->data[i].dev_name[0] == '\0')
98 return i;
99 }
100
101 return -1;
102 }
103
104 static struct rte_dma_dev*
dma_find_by_name(const char * name)105 dma_find_by_name(const char *name)
106 {
107 int16_t i;
108
109 if (rte_dma_devices == NULL)
110 return NULL;
111
112 for (i = 0; i < dma_devices_max; i++) {
113 if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
114 (!strcmp(name, rte_dma_devices[i].data->dev_name)))
115 return &rte_dma_devices[i];
116 }
117
118 return NULL;
119 }
120
121 static void dma_fp_object_dummy(struct rte_dma_fp_object *obj);
122
123 static int
dma_fp_data_prepare(void)124 dma_fp_data_prepare(void)
125 {
126 size_t size;
127 void *ptr;
128 int i;
129
130 if (rte_dma_fp_objs != NULL)
131 return 0;
132
133 /* Fast-path object must align cacheline, but the return value of malloc
134 * may not be aligned to the cache line. Therefore, extra memory is
135 * applied for realignment.
136 * note: We do not call posix_memalign/aligned_alloc because it is
137 * version dependent on libc.
138 */
139 size = dma_devices_max * sizeof(struct rte_dma_fp_object) +
140 RTE_CACHE_LINE_SIZE;
141 ptr = malloc(size);
142 if (ptr == NULL)
143 return -ENOMEM;
144 memset(ptr, 0, size);
145
146 rte_dma_fp_objs = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
147 for (i = 0; i < dma_devices_max; i++)
148 dma_fp_object_dummy(&rte_dma_fp_objs[i]);
149
150 return 0;
151 }
152
153 static int
dma_dev_data_prepare(void)154 dma_dev_data_prepare(void)
155 {
156 size_t size;
157
158 if (rte_dma_devices != NULL)
159 return 0;
160
161 size = dma_devices_max * sizeof(struct rte_dma_dev);
162 rte_dma_devices = malloc(size);
163 if (rte_dma_devices == NULL)
164 return -ENOMEM;
165 memset(rte_dma_devices, 0, size);
166
167 return 0;
168 }
169
170 static int
dma_shared_data_prepare(void)171 dma_shared_data_prepare(void)
172 {
173 const char *mz_name = "rte_dma_dev_data";
174 const struct rte_memzone *mz;
175 size_t size;
176
177 if (dma_devices_shared_data != NULL)
178 return 0;
179
180 size = sizeof(*dma_devices_shared_data) +
181 sizeof(struct rte_dma_dev_data) * dma_devices_max;
182
183 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
184 mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0);
185 else
186 mz = rte_memzone_lookup(mz_name);
187 if (mz == NULL)
188 return -ENOMEM;
189
190 dma_devices_shared_data = mz->addr;
191 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
192 memset(dma_devices_shared_data, 0, size);
193 dma_devices_shared_data->dev_max = dma_devices_max;
194 } else {
195 dma_devices_max = dma_devices_shared_data->dev_max;
196 }
197
198 return 0;
199 }
200
201 static int
dma_data_prepare(void)202 dma_data_prepare(void)
203 {
204 int ret;
205
206 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
207 if (dma_devices_max == 0)
208 dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
209 ret = dma_fp_data_prepare();
210 if (ret)
211 return ret;
212 ret = dma_dev_data_prepare();
213 if (ret)
214 return ret;
215 ret = dma_shared_data_prepare();
216 if (ret)
217 return ret;
218 } else {
219 ret = dma_shared_data_prepare();
220 if (ret)
221 return ret;
222 ret = dma_fp_data_prepare();
223 if (ret)
224 return ret;
225 ret = dma_dev_data_prepare();
226 if (ret)
227 return ret;
228 }
229
230 return 0;
231 }
232
233 static struct rte_dma_dev *
dma_allocate_primary(const char * name,int numa_node,size_t private_data_size)234 dma_allocate_primary(const char *name, int numa_node, size_t private_data_size)
235 {
236 struct rte_dma_dev *dev;
237 void *dev_private;
238 int16_t dev_id;
239 int ret;
240
241 ret = dma_data_prepare();
242 if (ret < 0) {
243 RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
244 return NULL;
245 }
246
247 dev = dma_find_by_name(name);
248 if (dev != NULL) {
249 RTE_DMA_LOG(ERR, "DMA device already allocated");
250 return NULL;
251 }
252
253 dev_private = rte_zmalloc_socket(name, private_data_size,
254 RTE_CACHE_LINE_SIZE, numa_node);
255 if (dev_private == NULL) {
256 RTE_DMA_LOG(ERR, "Cannot allocate private data");
257 return NULL;
258 }
259
260 dev_id = dma_find_free_id();
261 if (dev_id < 0) {
262 RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
263 rte_free(dev_private);
264 return NULL;
265 }
266
267 dev = &rte_dma_devices[dev_id];
268 dev->data = &dma_devices_shared_data->data[dev_id];
269 rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
270 dev->data->dev_id = dev_id;
271 dev->data->numa_node = numa_node;
272 dev->data->dev_private = dev_private;
273
274 return dev;
275 }
276
277 static struct rte_dma_dev *
dma_attach_secondary(const char * name)278 dma_attach_secondary(const char *name)
279 {
280 struct rte_dma_dev *dev;
281 int16_t i;
282 int ret;
283
284 ret = dma_data_prepare();
285 if (ret < 0) {
286 RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
287 return NULL;
288 }
289
290 for (i = 0; i < dma_devices_max; i++) {
291 if (!strcmp(dma_devices_shared_data->data[i].dev_name, name))
292 break;
293 }
294 if (i == dma_devices_max) {
295 RTE_DMA_LOG(ERR,
296 "Device %s is not driven by the primary process",
297 name);
298 return NULL;
299 }
300
301 dev = &rte_dma_devices[i];
302 dev->data = &dma_devices_shared_data->data[i];
303
304 return dev;
305 }
306
307 static struct rte_dma_dev *
dma_allocate(const char * name,int numa_node,size_t private_data_size)308 dma_allocate(const char *name, int numa_node, size_t private_data_size)
309 {
310 struct rte_dma_dev *dev;
311
312 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
313 dev = dma_allocate_primary(name, numa_node, private_data_size);
314 else
315 dev = dma_attach_secondary(name);
316
317 if (dev) {
318 dev->fp_obj = &rte_dma_fp_objs[dev->data->dev_id];
319 dma_fp_object_dummy(dev->fp_obj);
320 }
321
322 return dev;
323 }
324
325 static void
dma_release(struct rte_dma_dev * dev)326 dma_release(struct rte_dma_dev *dev)
327 {
328 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
329 rte_free(dev->data->dev_private);
330 memset(dev->data, 0, sizeof(struct rte_dma_dev_data));
331 }
332
333 dma_fp_object_dummy(dev->fp_obj);
334 memset(dev, 0, sizeof(struct rte_dma_dev));
335 }
336
337 struct rte_dma_dev *
rte_dma_pmd_allocate(const char * name,int numa_node,size_t private_data_size)338 rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
339 {
340 struct rte_dma_dev *dev;
341
342 if (dma_check_name(name) != 0 || private_data_size == 0)
343 return NULL;
344
345 dev = dma_allocate(name, numa_node, private_data_size);
346 if (dev == NULL)
347 return NULL;
348
349 dev->state = RTE_DMA_DEV_REGISTERED;
350
351 return dev;
352 }
353
354 int
rte_dma_pmd_release(const char * name)355 rte_dma_pmd_release(const char *name)
356 {
357 struct rte_dma_dev *dev;
358
359 if (dma_check_name(name) != 0)
360 return -EINVAL;
361
362 dev = dma_find_by_name(name);
363 if (dev == NULL)
364 return -EINVAL;
365
366 if (dev->state == RTE_DMA_DEV_READY)
367 return rte_dma_close(dev->data->dev_id);
368
369 dma_release(dev);
370 return 0;
371 }
372
373 int
rte_dma_get_dev_id_by_name(const char * name)374 rte_dma_get_dev_id_by_name(const char *name)
375 {
376 struct rte_dma_dev *dev;
377
378 if (dma_check_name(name) != 0)
379 return -EINVAL;
380
381 dev = dma_find_by_name(name);
382 if (dev == NULL)
383 return -EINVAL;
384
385 return dev->data->dev_id;
386 }
387
388 bool
rte_dma_is_valid(int16_t dev_id)389 rte_dma_is_valid(int16_t dev_id)
390 {
391 return (dev_id >= 0) && (dev_id < dma_devices_max) &&
392 rte_dma_devices != NULL &&
393 rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED;
394 }
395
396 uint16_t
rte_dma_count_avail(void)397 rte_dma_count_avail(void)
398 {
399 uint16_t count = 0;
400 uint16_t i;
401
402 if (rte_dma_devices == NULL)
403 return count;
404
405 for (i = 0; i < dma_devices_max; i++) {
406 if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED)
407 count++;
408 }
409
410 return count;
411 }
412
413 int
rte_dma_info_get(int16_t dev_id,struct rte_dma_info * dev_info)414 rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
415 {
416 const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
417 int ret;
418
419 if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
420 return -EINVAL;
421
422 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
423 memset(dev_info, 0, sizeof(struct rte_dma_info));
424 ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
425 sizeof(struct rte_dma_info));
426 if (ret != 0)
427 return ret;
428
429 dev_info->dev_name = dev->data->dev_name;
430 dev_info->numa_node = dev->device->numa_node;
431 dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
432
433 return 0;
434 }
435
436 int
rte_dma_configure(int16_t dev_id,const struct rte_dma_conf * dev_conf)437 rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
438 {
439 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
440 struct rte_dma_info dev_info;
441 int ret;
442
443 if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
444 return -EINVAL;
445
446 if (dev->data->dev_started != 0) {
447 RTE_DMA_LOG(ERR,
448 "Device %d must be stopped to allow configuration",
449 dev_id);
450 return -EBUSY;
451 }
452
453 ret = rte_dma_info_get(dev_id, &dev_info);
454 if (ret != 0) {
455 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
456 return -EINVAL;
457 }
458 if (dev_conf->nb_vchans == 0) {
459 RTE_DMA_LOG(ERR,
460 "Device %d configure zero vchans", dev_id);
461 return -EINVAL;
462 }
463 if (dev_conf->nb_vchans > dev_info.max_vchans) {
464 RTE_DMA_LOG(ERR,
465 "Device %d configure too many vchans", dev_id);
466 return -EINVAL;
467 }
468 if (dev_conf->enable_silent &&
469 !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
470 RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
471 return -EINVAL;
472 }
473
474 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
475 ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
476 sizeof(struct rte_dma_conf));
477 if (ret == 0)
478 memcpy(&dev->data->dev_conf, dev_conf,
479 sizeof(struct rte_dma_conf));
480
481 return ret;
482 }
483
484 int
rte_dma_start(int16_t dev_id)485 rte_dma_start(int16_t dev_id)
486 {
487 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
488 int ret;
489
490 if (!rte_dma_is_valid(dev_id))
491 return -EINVAL;
492
493 if (dev->data->dev_conf.nb_vchans == 0) {
494 RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
495 return -EINVAL;
496 }
497
498 if (dev->data->dev_started != 0) {
499 RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
500 return 0;
501 }
502
503 if (dev->dev_ops->dev_start == NULL)
504 goto mark_started;
505
506 ret = (*dev->dev_ops->dev_start)(dev);
507 if (ret != 0)
508 return ret;
509
510 mark_started:
511 dev->data->dev_started = 1;
512 return 0;
513 }
514
515 int
rte_dma_stop(int16_t dev_id)516 rte_dma_stop(int16_t dev_id)
517 {
518 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
519 int ret;
520
521 if (!rte_dma_is_valid(dev_id))
522 return -EINVAL;
523
524 if (dev->data->dev_started == 0) {
525 RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
526 return 0;
527 }
528
529 if (dev->dev_ops->dev_stop == NULL)
530 goto mark_stopped;
531
532 ret = (*dev->dev_ops->dev_stop)(dev);
533 if (ret != 0)
534 return ret;
535
536 mark_stopped:
537 dev->data->dev_started = 0;
538 return 0;
539 }
540
541 int
rte_dma_close(int16_t dev_id)542 rte_dma_close(int16_t dev_id)
543 {
544 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
545 int ret;
546
547 if (!rte_dma_is_valid(dev_id))
548 return -EINVAL;
549
550 /* Device must be stopped before it can be closed */
551 if (dev->data->dev_started == 1) {
552 RTE_DMA_LOG(ERR,
553 "Device %d must be stopped before closing", dev_id);
554 return -EBUSY;
555 }
556
557 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
558 ret = (*dev->dev_ops->dev_close)(dev);
559 if (ret == 0)
560 dma_release(dev);
561
562 return ret;
563 }
564
565 int
rte_dma_vchan_setup(int16_t dev_id,uint16_t vchan,const struct rte_dma_vchan_conf * conf)566 rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
567 const struct rte_dma_vchan_conf *conf)
568 {
569 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
570 struct rte_dma_info dev_info;
571 bool src_is_dev, dst_is_dev;
572 int ret;
573
574 if (!rte_dma_is_valid(dev_id) || conf == NULL)
575 return -EINVAL;
576
577 if (dev->data->dev_started != 0) {
578 RTE_DMA_LOG(ERR,
579 "Device %d must be stopped to allow configuration",
580 dev_id);
581 return -EBUSY;
582 }
583
584 ret = rte_dma_info_get(dev_id, &dev_info);
585 if (ret != 0) {
586 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
587 return -EINVAL;
588 }
589 if (dev->data->dev_conf.nb_vchans == 0) {
590 RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
591 return -EINVAL;
592 }
593 if (vchan >= dev_info.nb_vchans) {
594 RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
595 return -EINVAL;
596 }
597 if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
598 conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
599 conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
600 conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
601 RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
602 return -EINVAL;
603 }
604 if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
605 !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
606 RTE_DMA_LOG(ERR,
607 "Device %d don't support mem2mem transfer", dev_id);
608 return -EINVAL;
609 }
610 if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
611 !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
612 RTE_DMA_LOG(ERR,
613 "Device %d don't support mem2dev transfer", dev_id);
614 return -EINVAL;
615 }
616 if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
617 !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
618 RTE_DMA_LOG(ERR,
619 "Device %d don't support dev2mem transfer", dev_id);
620 return -EINVAL;
621 }
622 if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
623 !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
624 RTE_DMA_LOG(ERR,
625 "Device %d don't support dev2dev transfer", dev_id);
626 return -EINVAL;
627 }
628 if (conf->nb_desc < dev_info.min_desc ||
629 conf->nb_desc > dev_info.max_desc) {
630 RTE_DMA_LOG(ERR,
631 "Device %d number of descriptors invalid", dev_id);
632 return -EINVAL;
633 }
634 src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
635 conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
636 if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
637 (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
638 RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
639 return -EINVAL;
640 }
641 dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
642 conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
643 if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
644 (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
645 RTE_DMA_LOG(ERR,
646 "Device %d destination port type invalid", dev_id);
647 return -EINVAL;
648 }
649
650 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
651 return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
652 sizeof(struct rte_dma_vchan_conf));
653 }
654
655 int
rte_dma_stats_get(int16_t dev_id,uint16_t vchan,struct rte_dma_stats * stats)656 rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
657 {
658 const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
659
660 if (!rte_dma_is_valid(dev_id) || stats == NULL)
661 return -EINVAL;
662
663 if (vchan >= dev->data->dev_conf.nb_vchans &&
664 vchan != RTE_DMA_ALL_VCHAN) {
665 RTE_DMA_LOG(ERR,
666 "Device %d vchan %u out of range", dev_id, vchan);
667 return -EINVAL;
668 }
669
670 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
671 memset(stats, 0, sizeof(struct rte_dma_stats));
672 return (*dev->dev_ops->stats_get)(dev, vchan, stats,
673 sizeof(struct rte_dma_stats));
674 }
675
676 int
rte_dma_stats_reset(int16_t dev_id,uint16_t vchan)677 rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
678 {
679 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
680
681 if (!rte_dma_is_valid(dev_id))
682 return -EINVAL;
683
684 if (vchan >= dev->data->dev_conf.nb_vchans &&
685 vchan != RTE_DMA_ALL_VCHAN) {
686 RTE_DMA_LOG(ERR,
687 "Device %d vchan %u out of range", dev_id, vchan);
688 return -EINVAL;
689 }
690
691 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
692 return (*dev->dev_ops->stats_reset)(dev, vchan);
693 }
694
695 int
rte_dma_vchan_status(int16_t dev_id,uint16_t vchan,enum rte_dma_vchan_status * status)696 rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status)
697 {
698 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
699
700 if (!rte_dma_is_valid(dev_id))
701 return -EINVAL;
702
703 if (vchan >= dev->data->dev_conf.nb_vchans) {
704 RTE_DMA_LOG(ERR, "Device %u vchan %u out of range\n", dev_id, vchan);
705 return -EINVAL;
706 }
707
708 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_status, -ENOTSUP);
709 return (*dev->dev_ops->vchan_status)(dev, vchan, status);
710 }
711
712 static const char *
dma_capability_name(uint64_t capability)713 dma_capability_name(uint64_t capability)
714 {
715 static const struct {
716 uint64_t capability;
717 const char *name;
718 } capa_names[] = {
719 { RTE_DMA_CAPA_MEM_TO_MEM, "mem2mem" },
720 { RTE_DMA_CAPA_MEM_TO_DEV, "mem2dev" },
721 { RTE_DMA_CAPA_DEV_TO_MEM, "dev2mem" },
722 { RTE_DMA_CAPA_DEV_TO_DEV, "dev2dev" },
723 { RTE_DMA_CAPA_SVA, "sva" },
724 { RTE_DMA_CAPA_SILENT, "silent" },
725 { RTE_DMA_CAPA_HANDLES_ERRORS, "handles_errors" },
726 { RTE_DMA_CAPA_OPS_COPY, "copy" },
727 { RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
728 { RTE_DMA_CAPA_OPS_FILL, "fill" },
729 };
730
731 const char *name = "unknown";
732 uint32_t i;
733
734 for (i = 0; i < RTE_DIM(capa_names); i++) {
735 if (capability == capa_names[i].capability) {
736 name = capa_names[i].name;
737 break;
738 }
739 }
740
741 return name;
742 }
743
744 static void
dma_dump_capability(FILE * f,uint64_t dev_capa)745 dma_dump_capability(FILE *f, uint64_t dev_capa)
746 {
747 uint64_t capa;
748
749 (void)fprintf(f, " dev_capa: 0x%" PRIx64 " -", dev_capa);
750 while (dev_capa > 0) {
751 capa = 1ull << __builtin_ctzll(dev_capa);
752 (void)fprintf(f, " %s", dma_capability_name(capa));
753 dev_capa &= ~capa;
754 }
755 (void)fprintf(f, "\n");
756 }
757
758 int
rte_dma_dump(int16_t dev_id,FILE * f)759 rte_dma_dump(int16_t dev_id, FILE *f)
760 {
761 const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
762 struct rte_dma_info dev_info;
763 int ret;
764
765 if (!rte_dma_is_valid(dev_id) || f == NULL)
766 return -EINVAL;
767
768 ret = rte_dma_info_get(dev_id, &dev_info);
769 if (ret != 0) {
770 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
771 return -EINVAL;
772 }
773
774 (void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
775 dev->data->dev_id,
776 dev->data->dev_name,
777 dev->data->dev_started ? "started" : "stopped");
778 dma_dump_capability(f, dev_info.dev_capa);
779 (void)fprintf(f, " max_vchans_supported: %u\n", dev_info.max_vchans);
780 (void)fprintf(f, " nb_vchans_configured: %u\n", dev_info.nb_vchans);
781 (void)fprintf(f, " silent_mode: %s\n",
782 dev->data->dev_conf.enable_silent ? "on" : "off");
783
784 if (dev->dev_ops->dev_dump != NULL)
785 return (*dev->dev_ops->dev_dump)(dev, f);
786
787 return 0;
788 }
789
790 static int
dummy_copy(__rte_unused void * dev_private,__rte_unused uint16_t vchan,__rte_unused rte_iova_t src,__rte_unused rte_iova_t dst,__rte_unused uint32_t length,__rte_unused uint64_t flags)791 dummy_copy(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
792 __rte_unused rte_iova_t src, __rte_unused rte_iova_t dst,
793 __rte_unused uint32_t length, __rte_unused uint64_t flags)
794 {
795 RTE_DMA_LOG(ERR, "copy is not configured or not supported.");
796 return -EINVAL;
797 }
798
799 static int
dummy_copy_sg(__rte_unused void * dev_private,__rte_unused uint16_t vchan,__rte_unused const struct rte_dma_sge * src,__rte_unused const struct rte_dma_sge * dst,__rte_unused uint16_t nb_src,__rte_unused uint16_t nb_dst,__rte_unused uint64_t flags)800 dummy_copy_sg(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
801 __rte_unused const struct rte_dma_sge *src,
802 __rte_unused const struct rte_dma_sge *dst,
803 __rte_unused uint16_t nb_src, __rte_unused uint16_t nb_dst,
804 __rte_unused uint64_t flags)
805 {
806 RTE_DMA_LOG(ERR, "copy_sg is not configured or not supported.");
807 return -EINVAL;
808 }
809
810 static int
dummy_fill(__rte_unused void * dev_private,__rte_unused uint16_t vchan,__rte_unused uint64_t pattern,__rte_unused rte_iova_t dst,__rte_unused uint32_t length,__rte_unused uint64_t flags)811 dummy_fill(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
812 __rte_unused uint64_t pattern, __rte_unused rte_iova_t dst,
813 __rte_unused uint32_t length, __rte_unused uint64_t flags)
814 {
815 RTE_DMA_LOG(ERR, "fill is not configured or not supported.");
816 return -EINVAL;
817 }
818
819 static int
dummy_submit(__rte_unused void * dev_private,__rte_unused uint16_t vchan)820 dummy_submit(__rte_unused void *dev_private, __rte_unused uint16_t vchan)
821 {
822 RTE_DMA_LOG(ERR, "submit is not configured or not supported.");
823 return -EINVAL;
824 }
825
826 static uint16_t
dummy_completed(__rte_unused void * dev_private,__rte_unused uint16_t vchan,__rte_unused const uint16_t nb_cpls,__rte_unused uint16_t * last_idx,__rte_unused bool * has_error)827 dummy_completed(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
828 __rte_unused const uint16_t nb_cpls,
829 __rte_unused uint16_t *last_idx, __rte_unused bool *has_error)
830 {
831 RTE_DMA_LOG(ERR, "completed is not configured or not supported.");
832 return 0;
833 }
834
835 static uint16_t
dummy_completed_status(__rte_unused void * dev_private,__rte_unused uint16_t vchan,__rte_unused const uint16_t nb_cpls,__rte_unused uint16_t * last_idx,__rte_unused enum rte_dma_status_code * status)836 dummy_completed_status(__rte_unused void *dev_private,
837 __rte_unused uint16_t vchan,
838 __rte_unused const uint16_t nb_cpls,
839 __rte_unused uint16_t *last_idx,
840 __rte_unused enum rte_dma_status_code *status)
841 {
842 RTE_DMA_LOG(ERR,
843 "completed_status is not configured or not supported.");
844 return 0;
845 }
846
847 static uint16_t
dummy_burst_capacity(__rte_unused const void * dev_private,__rte_unused uint16_t vchan)848 dummy_burst_capacity(__rte_unused const void *dev_private,
849 __rte_unused uint16_t vchan)
850 {
851 RTE_DMA_LOG(ERR, "burst_capacity is not configured or not supported.");
852 return 0;
853 }
854
855 static void
dma_fp_object_dummy(struct rte_dma_fp_object * obj)856 dma_fp_object_dummy(struct rte_dma_fp_object *obj)
857 {
858 obj->dev_private = NULL;
859 obj->copy = dummy_copy;
860 obj->copy_sg = dummy_copy_sg;
861 obj->fill = dummy_fill;
862 obj->submit = dummy_submit;
863 obj->completed = dummy_completed;
864 obj->completed_status = dummy_completed_status;
865 obj->burst_capacity = dummy_burst_capacity;
866 }
867