1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 NXP
3 */
4
5 #include <assert.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <inttypes.h>
11 #include <string.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_debug.h>
16 #include <rte_dev.h>
17 #include <rte_eal.h>
18 #include <rte_kvargs.h>
19 #include <rte_log.h>
20 #include <rte_malloc.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_lcore.h>
24 #include <rte_bus_vdev.h>
25
26 #include <rte_rawdev.h>
27 #include <rte_rawdev_pmd.h>
28
29 #include "skeleton_rawdev.h"
30
31 /* Count of instances */
32 static uint16_t skeldev_init_once;
33
34 /**< Rawdev Skeleton dummy driver name */
35 #define SKELETON_PMD_RAWDEV_NAME rawdev_skeleton
36
37 struct queue_buffers {
38 void *bufs[SKELETON_QUEUE_MAX_DEPTH];
39 };
40
41 static struct queue_buffers queue_buf[SKELETON_MAX_QUEUES] = {};
42 static void clear_queue_bufs(int queue_id);
43
skeleton_rawdev_info_get(struct rte_rawdev * dev,rte_rawdev_obj_t dev_info,size_t dev_info_size)44 static int skeleton_rawdev_info_get(struct rte_rawdev *dev,
45 rte_rawdev_obj_t dev_info,
46 size_t dev_info_size)
47 {
48 struct skeleton_rawdev *skeldev;
49 struct skeleton_rawdev_conf *skeldev_conf;
50
51 SKELETON_PMD_FUNC_TRACE();
52
53 if (!dev_info || dev_info_size != sizeof(*skeldev_conf)) {
54 SKELETON_PMD_ERR("Invalid request");
55 return -EINVAL;
56 }
57
58 skeldev = skeleton_rawdev_get_priv(dev);
59
60 skeldev_conf = dev_info;
61
62 skeldev_conf->num_queues = skeldev->num_queues;
63 skeldev_conf->capabilities = skeldev->capabilities;
64 skeldev_conf->device_state = skeldev->device_state;
65 skeldev_conf->firmware_state = skeldev->fw.firmware_state;
66
67 return 0;
68 }
69
skeleton_rawdev_configure(const struct rte_rawdev * dev,rte_rawdev_obj_t config,size_t config_size)70 static int skeleton_rawdev_configure(const struct rte_rawdev *dev,
71 rte_rawdev_obj_t config,
72 size_t config_size)
73 {
74 struct skeleton_rawdev *skeldev;
75 struct skeleton_rawdev_conf *skeldev_conf;
76
77 SKELETON_PMD_FUNC_TRACE();
78
79 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
80
81 if (config == NULL || config_size != sizeof(*skeldev_conf)) {
82 SKELETON_PMD_ERR("Invalid configuration");
83 return -EINVAL;
84 }
85
86 skeldev_conf = config;
87 skeldev = skeleton_rawdev_get_priv(dev);
88
89 if (skeldev_conf->num_queues <= SKELETON_MAX_QUEUES)
90 skeldev->num_queues = skeldev_conf->num_queues;
91 else
92 return -EINVAL;
93
94 skeldev->capabilities = skeldev_conf->capabilities;
95 skeldev->num_queues = skeldev_conf->num_queues;
96
97 return 0;
98 }
99
skeleton_rawdev_start(struct rte_rawdev * dev)100 static int skeleton_rawdev_start(struct rte_rawdev *dev)
101 {
102 int ret = 0;
103 struct skeleton_rawdev *skeldev;
104 enum skeleton_firmware_state fw_state;
105 enum skeleton_device_state device_state;
106
107 SKELETON_PMD_FUNC_TRACE();
108
109 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
110
111 skeldev = skeleton_rawdev_get_priv(dev);
112
113 fw_state = skeldev->fw.firmware_state;
114 device_state = skeldev->device_state;
115
116 if (fw_state == SKELETON_FW_LOADED &&
117 device_state == SKELETON_DEV_STOPPED) {
118 skeldev->device_state = SKELETON_DEV_RUNNING;
119 } else {
120 SKELETON_PMD_ERR("Device not ready for starting");
121 ret = -EINVAL;
122 }
123
124 return ret;
125 }
126
skeleton_rawdev_stop(struct rte_rawdev * dev)127 static void skeleton_rawdev_stop(struct rte_rawdev *dev)
128 {
129 struct skeleton_rawdev *skeldev;
130
131 SKELETON_PMD_FUNC_TRACE();
132
133 if (dev) {
134 skeldev = skeleton_rawdev_get_priv(dev);
135 skeldev->device_state = SKELETON_DEV_STOPPED;
136 }
137 }
138
139 static void
reset_queues(struct skeleton_rawdev * skeldev)140 reset_queues(struct skeleton_rawdev *skeldev)
141 {
142 int i;
143
144 for (i = 0; i < SKELETON_MAX_QUEUES; i++) {
145 skeldev->queues[i].depth = SKELETON_QUEUE_DEF_DEPTH;
146 skeldev->queues[i].state = SKELETON_QUEUE_DETACH;
147 }
148 }
149
150 static void
reset_attribute_table(struct skeleton_rawdev * skeldev)151 reset_attribute_table(struct skeleton_rawdev *skeldev)
152 {
153 int i;
154
155 for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) {
156 if (skeldev->attr[i].name) {
157 free(skeldev->attr[i].name);
158 skeldev->attr[i].name = NULL;
159 }
160 }
161 }
162
skeleton_rawdev_close(struct rte_rawdev * dev)163 static int skeleton_rawdev_close(struct rte_rawdev *dev)
164 {
165 int ret = 0, i;
166 struct skeleton_rawdev *skeldev;
167 enum skeleton_firmware_state fw_state;
168 enum skeleton_device_state device_state;
169
170 SKELETON_PMD_FUNC_TRACE();
171
172 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
173
174 skeldev = skeleton_rawdev_get_priv(dev);
175
176 fw_state = skeldev->fw.firmware_state;
177 device_state = skeldev->device_state;
178
179 reset_queues(skeldev);
180 reset_attribute_table(skeldev);
181
182 switch (fw_state) {
183 case SKELETON_FW_LOADED:
184 if (device_state == SKELETON_DEV_RUNNING) {
185 SKELETON_PMD_ERR("Cannot close running device");
186 ret = -EINVAL;
187 } else {
188 /* Probably call fw reset here */
189 skeldev->fw.firmware_state = SKELETON_FW_READY;
190 }
191 break;
192 case SKELETON_FW_READY:
193 SKELETON_PMD_DEBUG("Device already in stopped state");
194 break;
195 case SKELETON_FW_ERROR:
196 default:
197 SKELETON_PMD_DEBUG("Device in impossible state");
198 ret = -EINVAL;
199 break;
200 }
201
202 /* Clear all allocated queues */
203 for (i = 0; i < SKELETON_MAX_QUEUES; i++)
204 clear_queue_bufs(i);
205
206 return ret;
207 }
208
skeleton_rawdev_reset(struct rte_rawdev * dev)209 static int skeleton_rawdev_reset(struct rte_rawdev *dev)
210 {
211 struct skeleton_rawdev *skeldev;
212
213 SKELETON_PMD_FUNC_TRACE();
214
215 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
216
217 skeldev = skeleton_rawdev_get_priv(dev);
218
219 SKELETON_PMD_DEBUG("Resetting device");
220 skeldev->fw.firmware_state = SKELETON_FW_READY;
221
222 return 0;
223 }
224
skeleton_rawdev_queue_def_conf(struct rte_rawdev * dev,uint16_t queue_id,rte_rawdev_obj_t queue_conf,size_t conf_size)225 static int skeleton_rawdev_queue_def_conf(struct rte_rawdev *dev,
226 uint16_t queue_id,
227 rte_rawdev_obj_t queue_conf,
228 size_t conf_size)
229 {
230 struct skeleton_rawdev *skeldev;
231 struct skeleton_rawdev_queue *skelq;
232
233 SKELETON_PMD_FUNC_TRACE();
234
235 if (!dev || !queue_conf ||
236 conf_size != sizeof(struct skeleton_rawdev_queue))
237 return -EINVAL;
238
239 skeldev = skeleton_rawdev_get_priv(dev);
240 skelq = &skeldev->queues[queue_id];
241
242 if (queue_id < SKELETON_MAX_QUEUES)
243 rte_memcpy(queue_conf, skelq,
244 sizeof(struct skeleton_rawdev_queue));
245
246 return 0;
247 }
248
249 static void
clear_queue_bufs(int queue_id)250 clear_queue_bufs(int queue_id)
251 {
252 int i;
253
254 /* Clear buffers for queue_id */
255 for (i = 0; i < SKELETON_QUEUE_MAX_DEPTH; i++)
256 queue_buf[queue_id].bufs[i] = NULL;
257 }
258
skeleton_rawdev_queue_setup(struct rte_rawdev * dev,uint16_t queue_id,rte_rawdev_obj_t queue_conf,size_t conf_size)259 static int skeleton_rawdev_queue_setup(struct rte_rawdev *dev,
260 uint16_t queue_id,
261 rte_rawdev_obj_t queue_conf,
262 size_t conf_size)
263 {
264 int ret = 0;
265 struct skeleton_rawdev *skeldev;
266 struct skeleton_rawdev_queue *q;
267
268 SKELETON_PMD_FUNC_TRACE();
269
270 if (!dev || !queue_conf ||
271 conf_size != sizeof(struct skeleton_rawdev_queue))
272 return -EINVAL;
273
274 skeldev = skeleton_rawdev_get_priv(dev);
275 q = &skeldev->queues[queue_id];
276
277 if (skeldev->num_queues > queue_id &&
278 q->depth < SKELETON_QUEUE_MAX_DEPTH) {
279 rte_memcpy(q, queue_conf,
280 sizeof(struct skeleton_rawdev_queue));
281 clear_queue_bufs(queue_id);
282 } else {
283 SKELETON_PMD_ERR("Invalid queue configuration");
284 ret = -EINVAL;
285 }
286
287 return ret;
288 }
289
skeleton_rawdev_queue_release(struct rte_rawdev * dev,uint16_t queue_id)290 static int skeleton_rawdev_queue_release(struct rte_rawdev *dev,
291 uint16_t queue_id)
292 {
293 int ret = 0;
294 struct skeleton_rawdev *skeldev;
295
296 SKELETON_PMD_FUNC_TRACE();
297
298 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
299
300 skeldev = skeleton_rawdev_get_priv(dev);
301
302 if (skeldev->num_queues > queue_id) {
303 skeldev->queues[queue_id].state = SKELETON_QUEUE_DETACH;
304 skeldev->queues[queue_id].depth = SKELETON_QUEUE_DEF_DEPTH;
305 clear_queue_bufs(queue_id);
306 } else {
307 SKELETON_PMD_ERR("Invalid queue configuration");
308 ret = -EINVAL;
309 }
310
311 return ret;
312 }
313
skeleton_rawdev_queue_count(struct rte_rawdev * dev)314 static uint16_t skeleton_rawdev_queue_count(struct rte_rawdev *dev)
315 {
316 struct skeleton_rawdev *skeldev;
317
318 SKELETON_PMD_FUNC_TRACE();
319
320 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
321
322 skeldev = skeleton_rawdev_get_priv(dev);
323 return skeldev->num_queues;
324 }
325
skeleton_rawdev_get_attr(struct rte_rawdev * dev,const char * attr_name,uint64_t * attr_value)326 static int skeleton_rawdev_get_attr(struct rte_rawdev *dev,
327 const char *attr_name,
328 uint64_t *attr_value)
329 {
330 int i;
331 uint8_t done = 0;
332 struct skeleton_rawdev *skeldev;
333
334 SKELETON_PMD_FUNC_TRACE();
335
336 if (!dev || !attr_name || !attr_value) {
337 SKELETON_PMD_ERR("Invalid arguments for getting attributes");
338 return -EINVAL;
339 }
340
341 skeldev = skeleton_rawdev_get_priv(dev);
342
343 for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) {
344 if (!skeldev->attr[i].name)
345 continue;
346
347 if (!strncmp(skeldev->attr[i].name, attr_name,
348 SKELETON_ATTRIBUTE_NAME_MAX)) {
349 *attr_value = skeldev->attr[i].value;
350 done = 1;
351 SKELETON_PMD_DEBUG("Attribute (%s) Value (%" PRIu64 ")",
352 attr_name, *attr_value);
353 break;
354 }
355 }
356
357 if (done)
358 return 0;
359
360 /* Attribute not found */
361 return -EINVAL;
362 }
363
skeleton_rawdev_set_attr(struct rte_rawdev * dev,const char * attr_name,const uint64_t attr_value)364 static int skeleton_rawdev_set_attr(struct rte_rawdev *dev,
365 const char *attr_name,
366 const uint64_t attr_value)
367 {
368 int i;
369 uint8_t done = 0;
370 struct skeleton_rawdev *skeldev;
371
372 SKELETON_PMD_FUNC_TRACE();
373
374 if (!dev || !attr_name) {
375 SKELETON_PMD_ERR("Invalid arguments for setting attributes");
376 return -EINVAL;
377 }
378
379 skeldev = skeleton_rawdev_get_priv(dev);
380
381 /* Check if attribute already exists */
382 for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) {
383 if (!skeldev->attr[i].name)
384 break;
385
386 if (!strncmp(skeldev->attr[i].name, attr_name,
387 SKELETON_ATTRIBUTE_NAME_MAX)) {
388 /* Update value */
389 skeldev->attr[i].value = attr_value;
390 done = 1;
391 break;
392 }
393 }
394
395 if (!done) {
396 if (i < (SKELETON_MAX_ATTRIBUTES - 1)) {
397 /* There is still space to insert one more */
398 skeldev->attr[i].name = strdup(attr_name);
399 if (!skeldev->attr[i].name)
400 return -ENOMEM;
401
402 skeldev->attr[i].value = attr_value;
403 return 0;
404 }
405 }
406
407 return -EINVAL;
408 }
409
skeleton_rawdev_enqueue_bufs(struct rte_rawdev * dev,struct rte_rawdev_buf ** buffers,unsigned int count,rte_rawdev_obj_t context)410 static int skeleton_rawdev_enqueue_bufs(struct rte_rawdev *dev,
411 struct rte_rawdev_buf **buffers,
412 unsigned int count,
413 rte_rawdev_obj_t context)
414 {
415 unsigned int i;
416 uint16_t q_id;
417 RTE_SET_USED(dev);
418
419 /* context is essentially the queue_id which is
420 * transferred as opaque object through the library layer. This can
421 * help in complex implementation which require more information than
422 * just an integer - for example, a queue-pair.
423 */
424 q_id = *((int *)context);
425
426 for (i = 0; i < count; i++)
427 queue_buf[q_id].bufs[i] = buffers[i]->buf_addr;
428
429 return i;
430 }
431
skeleton_rawdev_dequeue_bufs(struct rte_rawdev * dev,struct rte_rawdev_buf ** buffers,unsigned int count,rte_rawdev_obj_t context)432 static int skeleton_rawdev_dequeue_bufs(struct rte_rawdev *dev,
433 struct rte_rawdev_buf **buffers,
434 unsigned int count,
435 rte_rawdev_obj_t context)
436 {
437 unsigned int i;
438 uint16_t q_id;
439 RTE_SET_USED(dev);
440
441 /* context is essentially the queue_id which is
442 * transferred as opaque object through the library layer. This can
443 * help in complex implementation which require more information than
444 * just an integer - for example, a queue-pair.
445 */
446 q_id = *((int *)context);
447
448 for (i = 0; i < count; i++)
449 buffers[i]->buf_addr = queue_buf[q_id].bufs[i];
450
451 return i;
452 }
453
skeleton_rawdev_dump(struct rte_rawdev * dev,FILE * f)454 static int skeleton_rawdev_dump(struct rte_rawdev *dev, FILE *f)
455 {
456 RTE_SET_USED(dev);
457 RTE_SET_USED(f);
458
459 return 0;
460 }
461
skeleton_rawdev_firmware_status_get(struct rte_rawdev * dev,rte_rawdev_obj_t status_info)462 static int skeleton_rawdev_firmware_status_get(struct rte_rawdev *dev,
463 rte_rawdev_obj_t status_info)
464 {
465 struct skeleton_rawdev *skeldev;
466
467 SKELETON_PMD_FUNC_TRACE();
468
469 skeldev = skeleton_rawdev_get_priv(dev);
470
471 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
472
473 if (status_info)
474 memcpy(status_info, &skeldev->fw.firmware_state,
475 sizeof(enum skeleton_firmware_state));
476
477 return 0;
478 }
479
480
skeleton_rawdev_firmware_version_get(struct rte_rawdev * dev,rte_rawdev_obj_t version_info)481 static int skeleton_rawdev_firmware_version_get(
482 struct rte_rawdev *dev,
483 rte_rawdev_obj_t version_info)
484 {
485 struct skeleton_rawdev *skeldev;
486 struct skeleton_firmware_version_info *vi;
487
488 SKELETON_PMD_FUNC_TRACE();
489
490 skeldev = skeleton_rawdev_get_priv(dev);
491 vi = version_info;
492
493 vi->major = skeldev->fw.firmware_version.major;
494 vi->minor = skeldev->fw.firmware_version.minor;
495 vi->subrel = skeldev->fw.firmware_version.subrel;
496
497 return 0;
498 }
499
skeleton_rawdev_firmware_load(struct rte_rawdev * dev,rte_rawdev_obj_t firmware_buf)500 static int skeleton_rawdev_firmware_load(struct rte_rawdev *dev,
501 rte_rawdev_obj_t firmware_buf)
502 {
503 struct skeleton_rawdev *skeldev;
504
505 SKELETON_PMD_FUNC_TRACE();
506
507 skeldev = skeleton_rawdev_get_priv(dev);
508
509 /* firmware_buf is a mmaped, possibly DMA'able area, buffer. Being
510 * dummy, all this does is check if firmware_buf is not NULL and
511 * sets the state of the firmware.
512 */
513 if (!firmware_buf)
514 return -EINVAL;
515
516 skeldev->fw.firmware_state = SKELETON_FW_LOADED;
517
518 return 0;
519 }
520
skeleton_rawdev_firmware_unload(struct rte_rawdev * dev)521 static int skeleton_rawdev_firmware_unload(struct rte_rawdev *dev)
522 {
523 struct skeleton_rawdev *skeldev;
524
525 SKELETON_PMD_FUNC_TRACE();
526
527 skeldev = skeleton_rawdev_get_priv(dev);
528
529 skeldev->fw.firmware_state = SKELETON_FW_READY;
530
531 return 0;
532 }
533
534 static const struct rte_rawdev_ops skeleton_rawdev_ops = {
535 .dev_info_get = skeleton_rawdev_info_get,
536 .dev_configure = skeleton_rawdev_configure,
537 .dev_start = skeleton_rawdev_start,
538 .dev_stop = skeleton_rawdev_stop,
539 .dev_close = skeleton_rawdev_close,
540 .dev_reset = skeleton_rawdev_reset,
541
542 .queue_def_conf = skeleton_rawdev_queue_def_conf,
543 .queue_setup = skeleton_rawdev_queue_setup,
544 .queue_release = skeleton_rawdev_queue_release,
545 .queue_count = skeleton_rawdev_queue_count,
546
547 .attr_get = skeleton_rawdev_get_attr,
548 .attr_set = skeleton_rawdev_set_attr,
549
550 .enqueue_bufs = skeleton_rawdev_enqueue_bufs,
551 .dequeue_bufs = skeleton_rawdev_dequeue_bufs,
552
553 .dump = skeleton_rawdev_dump,
554
555 .xstats_get = NULL,
556 .xstats_get_names = NULL,
557 .xstats_get_by_name = NULL,
558 .xstats_reset = NULL,
559
560 .firmware_status_get = skeleton_rawdev_firmware_status_get,
561 .firmware_version_get = skeleton_rawdev_firmware_version_get,
562 .firmware_load = skeleton_rawdev_firmware_load,
563 .firmware_unload = skeleton_rawdev_firmware_unload,
564
565 .dev_selftest = test_rawdev_skeldev,
566 };
567
568 static int
skeleton_rawdev_create(const char * name,struct rte_vdev_device * vdev,int socket_id)569 skeleton_rawdev_create(const char *name,
570 struct rte_vdev_device *vdev,
571 int socket_id)
572 {
573 int ret = 0, i;
574 struct rte_rawdev *rawdev = NULL;
575 struct skeleton_rawdev *skeldev = NULL;
576
577 if (!name) {
578 SKELETON_PMD_ERR("Invalid name of the device!");
579 ret = -EINVAL;
580 goto cleanup;
581 }
582
583 /* Allocate device structure */
584 rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct skeleton_rawdev),
585 socket_id);
586 if (rawdev == NULL) {
587 SKELETON_PMD_ERR("Unable to allocate rawdevice");
588 ret = -EINVAL;
589 goto cleanup;
590 }
591
592 ret = rawdev->dev_id; /* return the rawdev id of new device */
593
594 rawdev->dev_ops = &skeleton_rawdev_ops;
595 rawdev->device = &vdev->device;
596
597 skeldev = skeleton_rawdev_get_priv(rawdev);
598
599 skeldev->device_id = SKELETON_DEVICE_ID;
600 skeldev->vendor_id = SKELETON_VENDOR_ID;
601 skeldev->capabilities = SKELETON_DEFAULT_CAPA;
602
603 memset(&skeldev->fw, 0, sizeof(struct skeleton_firmware));
604
605 skeldev->fw.firmware_state = SKELETON_FW_READY;
606 skeldev->fw.firmware_version.major = SKELETON_MAJOR_VER;
607 skeldev->fw.firmware_version.minor = SKELETON_MINOR_VER;
608 skeldev->fw.firmware_version.subrel = SKELETON_SUB_VER;
609
610 skeldev->device_state = SKELETON_DEV_STOPPED;
611
612 /* Reset/set to default queue configuration for this device */
613 for (i = 0; i < SKELETON_MAX_QUEUES; i++) {
614 skeldev->queues[i].state = SKELETON_QUEUE_DETACH;
615 skeldev->queues[i].depth = SKELETON_QUEUE_DEF_DEPTH;
616 }
617
618 /* Clear all allocated queue buffers */
619 for (i = 0; i < SKELETON_MAX_QUEUES; i++)
620 clear_queue_bufs(i);
621
622 return ret;
623
624 cleanup:
625 if (rawdev)
626 rte_rawdev_pmd_release(rawdev);
627
628 return ret;
629 }
630
631 static int
skeleton_rawdev_destroy(const char * name)632 skeleton_rawdev_destroy(const char *name)
633 {
634 int ret;
635 struct rte_rawdev *rdev;
636
637 if (!name) {
638 SKELETON_PMD_ERR("Invalid device name");
639 return -EINVAL;
640 }
641
642 rdev = rte_rawdev_pmd_get_named_dev(name);
643 if (!rdev) {
644 SKELETON_PMD_ERR("Invalid device name (%s)", name);
645 return -EINVAL;
646 }
647
648 /* rte_rawdev_close is called by pmd_release */
649 ret = rte_rawdev_pmd_release(rdev);
650 if (ret)
651 SKELETON_PMD_DEBUG("Device cleanup failed");
652
653 return 0;
654 }
655
656 static int
skeldev_get_selftest(const char * key __rte_unused,const char * value,void * opaque)657 skeldev_get_selftest(const char *key __rte_unused,
658 const char *value,
659 void *opaque)
660 {
661 int *flag = opaque;
662 *flag = atoi(value);
663 return 0;
664 }
665
666 static int
skeldev_parse_vdev_args(struct rte_vdev_device * vdev)667 skeldev_parse_vdev_args(struct rte_vdev_device *vdev)
668 {
669 int selftest = 0;
670 const char *name;
671 const char *params;
672
673 static const char *const args[] = {
674 SKELETON_SELFTEST_ARG,
675 NULL
676 };
677
678 name = rte_vdev_device_name(vdev);
679
680 params = rte_vdev_device_args(vdev);
681 if (params != NULL && params[0] != '\0') {
682 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
683
684 if (!kvlist) {
685 SKELETON_PMD_INFO(
686 "Ignoring unsupported params supplied '%s'",
687 name);
688 } else {
689 int ret = rte_kvargs_process(kvlist,
690 SKELETON_SELFTEST_ARG,
691 skeldev_get_selftest, &selftest);
692 if (ret != 0 || (selftest < 0 || selftest > 1)) {
693 SKELETON_PMD_ERR("%s: Error in parsing args",
694 name);
695 rte_kvargs_free(kvlist);
696 ret = -1; /* enforce if selftest is invalid */
697 return ret;
698 }
699 }
700
701 rte_kvargs_free(kvlist);
702 }
703
704 return selftest;
705 }
706
707 static int
skeleton_rawdev_probe(struct rte_vdev_device * vdev)708 skeleton_rawdev_probe(struct rte_vdev_device *vdev)
709 {
710 const char *name;
711 int selftest = 0, ret = 0;
712
713
714 name = rte_vdev_device_name(vdev);
715 if (name == NULL)
716 return -EINVAL;
717
718 /* More than one instance is not supported */
719 if (skeldev_init_once) {
720 SKELETON_PMD_ERR("Multiple instance not supported for %s",
721 name);
722 return -EINVAL;
723 }
724
725 SKELETON_PMD_INFO("Init %s on NUMA node %d", name, rte_socket_id());
726
727 selftest = skeldev_parse_vdev_args(vdev);
728 /* In case of invalid argument, selftest != 1; ignore other values */
729
730 ret = skeleton_rawdev_create(name, vdev, rte_socket_id());
731 if (ret >= 0) {
732 /* In case command line argument for 'selftest' was passed;
733 * if invalid arguments were passed, execution continues but
734 * without selftest.
735 */
736 if (selftest == 1)
737 test_rawdev_skeldev(ret);
738 }
739
740 /* Device instance created; Second instance not possible */
741 skeldev_init_once = 1;
742
743 return ret < 0 ? ret : 0;
744 }
745
746 static int
skeleton_rawdev_remove(struct rte_vdev_device * vdev)747 skeleton_rawdev_remove(struct rte_vdev_device *vdev)
748 {
749 const char *name;
750 int ret;
751
752 name = rte_vdev_device_name(vdev);
753 if (name == NULL)
754 return -1;
755
756 SKELETON_PMD_INFO("Closing %s on NUMA node %d", name, rte_socket_id());
757
758 ret = skeleton_rawdev_destroy(name);
759 if (!ret)
760 skeldev_init_once = 0;
761
762 return ret;
763 }
764
765 static struct rte_vdev_driver skeleton_pmd_drv = {
766 .probe = skeleton_rawdev_probe,
767 .remove = skeleton_rawdev_remove
768 };
769
770 RTE_PMD_REGISTER_VDEV(SKELETON_PMD_RAWDEV_NAME, skeleton_pmd_drv);
771 RTE_LOG_REGISTER(skeleton_pmd_logtype, rawdev.skeleton, INFO);
772