1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 NXP 3 */ 4 5 #include <assert.h> 6 #include <stdio.h> 7 #include <stdbool.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <inttypes.h> 11 #include <string.h> 12 13 #include <rte_byteorder.h> 14 #include <rte_common.h> 15 #include <rte_debug.h> 16 #include <rte_dev.h> 17 #include <rte_eal.h> 18 #include <rte_kvargs.h> 19 #include <rte_log.h> 20 #include <rte_malloc.h> 21 #include <rte_memory.h> 22 #include <rte_memcpy.h> 23 #include <rte_lcore.h> 24 #include <rte_bus_vdev.h> 25 26 #include <rte_rawdev.h> 27 #include <rte_rawdev_pmd.h> 28 29 #include "skeleton_rawdev.h" 30 31 /* Dynamic log type identifier */ 32 int skeleton_pmd_logtype; 33 34 /* Count of instances */ 35 static uint16_t skeldev_init_once; 36 37 /**< Rawdev Skeleton dummy driver name */ 38 #define SKELETON_PMD_RAWDEV_NAME rawdev_skeleton 39 40 struct queue_buffers { 41 void *bufs[SKELETON_QUEUE_MAX_DEPTH]; 42 }; 43 44 static struct queue_buffers queue_buf[SKELETON_MAX_QUEUES] = {}; 45 static void clear_queue_bufs(int queue_id); 46 47 static void skeleton_rawdev_info_get(struct rte_rawdev *dev, 48 rte_rawdev_obj_t dev_info) 49 { 50 struct skeleton_rawdev *skeldev; 51 struct skeleton_rawdev_conf *skeldev_conf; 52 53 SKELETON_PMD_FUNC_TRACE(); 54 55 if (!dev_info) { 56 SKELETON_PMD_ERR("Invalid request"); 57 return; 58 } 59 60 skeldev = skeleton_rawdev_get_priv(dev); 61 62 skeldev_conf = dev_info; 63 64 skeldev_conf->num_queues = skeldev->num_queues; 65 skeldev_conf->capabilities = skeldev->capabilities; 66 skeldev_conf->device_state = skeldev->device_state; 67 skeldev_conf->firmware_state = skeldev->fw.firmware_state; 68 } 69 70 static int skeleton_rawdev_configure(const struct rte_rawdev *dev, 71 rte_rawdev_obj_t config) 72 { 73 struct skeleton_rawdev *skeldev; 74 struct skeleton_rawdev_conf *skeldev_conf; 75 76 SKELETON_PMD_FUNC_TRACE(); 77 78 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL); 79 80 if (!config) { 81 SKELETON_PMD_ERR("Invalid configuration"); 82 return -EINVAL; 83 } 84 85 skeldev_conf = config; 86 skeldev = skeleton_rawdev_get_priv(dev); 87 88 if (skeldev_conf->num_queues <= SKELETON_MAX_QUEUES) 89 skeldev->num_queues = skeldev_conf->num_queues; 90 else 91 return -EINVAL; 92 93 skeldev->capabilities = skeldev_conf->capabilities; 94 skeldev->num_queues = skeldev_conf->num_queues; 95 96 return 0; 97 } 98 99 static int skeleton_rawdev_start(struct rte_rawdev *dev) 100 { 101 int ret = 0; 102 struct skeleton_rawdev *skeldev; 103 enum skeleton_firmware_state fw_state; 104 enum skeleton_device_state device_state; 105 106 SKELETON_PMD_FUNC_TRACE(); 107 108 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL); 109 110 skeldev = skeleton_rawdev_get_priv(dev); 111 112 fw_state = skeldev->fw.firmware_state; 113 device_state = skeldev->device_state; 114 115 if (fw_state == SKELETON_FW_LOADED && 116 device_state == SKELETON_DEV_STOPPED) { 117 skeldev->device_state = SKELETON_DEV_RUNNING; 118 } else { 119 SKELETON_PMD_ERR("Device not ready for starting"); 120 ret = -EINVAL; 121 } 122 123 return ret; 124 } 125 126 static void skeleton_rawdev_stop(struct rte_rawdev *dev) 127 { 128 struct skeleton_rawdev *skeldev; 129 130 SKELETON_PMD_FUNC_TRACE(); 131 132 if (dev) { 133 skeldev = skeleton_rawdev_get_priv(dev); 134 skeldev->device_state = SKELETON_DEV_STOPPED; 135 } 136 } 137 138 static void 139 reset_queues(struct skeleton_rawdev *skeldev) 140 { 141 int i; 142 143 for (i = 0; i < SKELETON_MAX_QUEUES; i++) { 144 skeldev->queues[i].depth = SKELETON_QUEUE_DEF_DEPTH; 145 skeldev->queues[i].state = SKELETON_QUEUE_DETACH; 146 } 147 } 148 149 static void 150 reset_attribute_table(struct skeleton_rawdev *skeldev) 151 { 152 int i; 153 154 for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) { 155 if (skeldev->attr[i].name) { 156 free(skeldev->attr[i].name); 157 skeldev->attr[i].name = NULL; 158 } 159 } 160 } 161 162 static int skeleton_rawdev_close(struct rte_rawdev *dev) 163 { 164 int ret = 0, i; 165 struct skeleton_rawdev *skeldev; 166 enum skeleton_firmware_state fw_state; 167 enum skeleton_device_state device_state; 168 169 SKELETON_PMD_FUNC_TRACE(); 170 171 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL); 172 173 skeldev = skeleton_rawdev_get_priv(dev); 174 175 fw_state = skeldev->fw.firmware_state; 176 device_state = skeldev->device_state; 177 178 reset_queues(skeldev); 179 reset_attribute_table(skeldev); 180 181 switch (fw_state) { 182 case SKELETON_FW_LOADED: 183 if (device_state == SKELETON_DEV_RUNNING) { 184 SKELETON_PMD_ERR("Cannot close running device"); 185 ret = -EINVAL; 186 } else { 187 /* Probably call fw reset here */ 188 skeldev->fw.firmware_state = SKELETON_FW_READY; 189 } 190 break; 191 case SKELETON_FW_READY: 192 SKELETON_PMD_DEBUG("Device already in stopped state"); 193 break; 194 case SKELETON_FW_ERROR: 195 default: 196 SKELETON_PMD_DEBUG("Device in impossible state"); 197 ret = -EINVAL; 198 break; 199 } 200 201 /* Clear all allocated queues */ 202 for (i = 0; i < SKELETON_MAX_QUEUES; i++) 203 clear_queue_bufs(i); 204 205 return ret; 206 } 207 208 static int skeleton_rawdev_reset(struct rte_rawdev *dev) 209 { 210 struct skeleton_rawdev *skeldev; 211 212 SKELETON_PMD_FUNC_TRACE(); 213 214 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL); 215 216 skeldev = skeleton_rawdev_get_priv(dev); 217 218 SKELETON_PMD_DEBUG("Resetting device"); 219 skeldev->fw.firmware_state = SKELETON_FW_READY; 220 221 return 0; 222 } 223 224 static void skeleton_rawdev_queue_def_conf(struct rte_rawdev *dev, 225 uint16_t queue_id, 226 rte_rawdev_obj_t queue_conf) 227 { 228 struct skeleton_rawdev *skeldev; 229 struct skeleton_rawdev_queue *skelq; 230 231 SKELETON_PMD_FUNC_TRACE(); 232 233 if (!dev || !queue_conf) 234 return; 235 236 skeldev = skeleton_rawdev_get_priv(dev); 237 skelq = &skeldev->queues[queue_id]; 238 239 if (queue_id < SKELETON_MAX_QUEUES) 240 rte_memcpy(queue_conf, skelq, 241 sizeof(struct skeleton_rawdev_queue)); 242 } 243 244 static void 245 clear_queue_bufs(int queue_id) 246 { 247 int i; 248 249 /* Clear buffers for queue_id */ 250 for (i = 0; i < SKELETON_QUEUE_MAX_DEPTH; i++) 251 queue_buf[queue_id].bufs[i] = NULL; 252 } 253 254 static int skeleton_rawdev_queue_setup(struct rte_rawdev *dev, 255 uint16_t queue_id, 256 rte_rawdev_obj_t queue_conf) 257 { 258 int ret = 0; 259 struct skeleton_rawdev *skeldev; 260 struct skeleton_rawdev_queue *q; 261 262 SKELETON_PMD_FUNC_TRACE(); 263 264 if (!dev || !queue_conf) 265 return -EINVAL; 266 267 skeldev = skeleton_rawdev_get_priv(dev); 268 q = &skeldev->queues[queue_id]; 269 270 if (skeldev->num_queues > queue_id && 271 q->depth < SKELETON_QUEUE_MAX_DEPTH) { 272 rte_memcpy(q, queue_conf, 273 sizeof(struct skeleton_rawdev_queue)); 274 clear_queue_bufs(queue_id); 275 } else { 276 SKELETON_PMD_ERR("Invalid queue configuration"); 277 ret = -EINVAL; 278 } 279 280 return ret; 281 } 282 283 static int skeleton_rawdev_queue_release(struct rte_rawdev *dev, 284 uint16_t queue_id) 285 { 286 int ret = 0; 287 struct skeleton_rawdev *skeldev; 288 289 SKELETON_PMD_FUNC_TRACE(); 290 291 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL); 292 293 skeldev = skeleton_rawdev_get_priv(dev); 294 295 if (skeldev->num_queues > queue_id) { 296 skeldev->queues[queue_id].state = SKELETON_QUEUE_DETACH; 297 skeldev->queues[queue_id].depth = SKELETON_QUEUE_DEF_DEPTH; 298 clear_queue_bufs(queue_id); 299 } else { 300 SKELETON_PMD_ERR("Invalid queue configuration"); 301 ret = -EINVAL; 302 } 303 304 return ret; 305 } 306 307 static uint16_t skeleton_rawdev_queue_count(struct rte_rawdev *dev) 308 { 309 struct skeleton_rawdev *skeldev; 310 311 SKELETON_PMD_FUNC_TRACE(); 312 313 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL); 314 315 skeldev = skeleton_rawdev_get_priv(dev); 316 return skeldev->num_queues; 317 } 318 319 static int skeleton_rawdev_get_attr(struct rte_rawdev *dev, 320 const char *attr_name, 321 uint64_t *attr_value) 322 { 323 int i; 324 uint8_t done = 0; 325 struct skeleton_rawdev *skeldev; 326 327 SKELETON_PMD_FUNC_TRACE(); 328 329 if (!dev || !attr_name || !attr_value) { 330 SKELETON_PMD_ERR("Invalid arguments for getting attributes"); 331 return -EINVAL; 332 } 333 334 skeldev = skeleton_rawdev_get_priv(dev); 335 336 for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) { 337 if (!skeldev->attr[i].name) 338 continue; 339 340 if (!strncmp(skeldev->attr[i].name, attr_name, 341 SKELETON_ATTRIBUTE_NAME_MAX)) { 342 *attr_value = skeldev->attr[i].value; 343 done = 1; 344 SKELETON_PMD_DEBUG("Attribute (%s) Value (%" PRIu64 ")", 345 attr_name, *attr_value); 346 break; 347 } 348 } 349 350 if (done) 351 return 0; 352 353 /* Attribute not found */ 354 return -EINVAL; 355 } 356 357 static int skeleton_rawdev_set_attr(struct rte_rawdev *dev, 358 const char *attr_name, 359 const uint64_t attr_value) 360 { 361 int i; 362 uint8_t done = 0; 363 struct skeleton_rawdev *skeldev; 364 365 SKELETON_PMD_FUNC_TRACE(); 366 367 if (!dev || !attr_name) { 368 SKELETON_PMD_ERR("Invalid arguments for setting attributes"); 369 return -EINVAL; 370 } 371 372 skeldev = skeleton_rawdev_get_priv(dev); 373 374 /* Check if attribute already exists */ 375 for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) { 376 if (!skeldev->attr[i].name) 377 break; 378 379 if (!strncmp(skeldev->attr[i].name, attr_name, 380 SKELETON_ATTRIBUTE_NAME_MAX)) { 381 /* Update value */ 382 skeldev->attr[i].value = attr_value; 383 done = 1; 384 break; 385 } 386 } 387 388 if (!done) { 389 if (i < (SKELETON_MAX_ATTRIBUTES - 1)) { 390 /* There is still space to insert one more */ 391 skeldev->attr[i].name = strdup(attr_name); 392 if (!skeldev->attr[i].name) 393 return -ENOMEM; 394 395 skeldev->attr[i].value = attr_value; 396 return 0; 397 } 398 } 399 400 return -EINVAL; 401 } 402 403 static int skeleton_rawdev_enqueue_bufs(struct rte_rawdev *dev, 404 struct rte_rawdev_buf **buffers, 405 unsigned int count, 406 rte_rawdev_obj_t context) 407 { 408 unsigned int i; 409 uint16_t q_id; 410 RTE_SET_USED(dev); 411 412 /* context is essentially the queue_id which is 413 * transferred as opaque object through the library layer. This can 414 * help in complex implementation which require more information than 415 * just an integer - for example, a queue-pair. 416 */ 417 q_id = *((int *)context); 418 419 for (i = 0; i < count; i++) 420 queue_buf[q_id].bufs[i] = buffers[i]->buf_addr; 421 422 return i; 423 } 424 425 static int skeleton_rawdev_dequeue_bufs(struct rte_rawdev *dev, 426 struct rte_rawdev_buf **buffers, 427 unsigned int count, 428 rte_rawdev_obj_t context) 429 { 430 unsigned int i; 431 uint16_t q_id; 432 RTE_SET_USED(dev); 433 434 /* context is essentially the queue_id which is 435 * transferred as opaque object through the library layer. This can 436 * help in complex implementation which require more information than 437 * just an integer - for example, a queue-pair. 438 */ 439 q_id = *((int *)context); 440 441 for (i = 0; i < count; i++) 442 buffers[i]->buf_addr = queue_buf[q_id].bufs[i]; 443 444 return i; 445 } 446 447 static int skeleton_rawdev_dump(struct rte_rawdev *dev, FILE *f) 448 { 449 RTE_SET_USED(dev); 450 RTE_SET_USED(f); 451 452 return 0; 453 } 454 455 static int skeleton_rawdev_firmware_status_get(struct rte_rawdev *dev, 456 rte_rawdev_obj_t status_info) 457 { 458 struct skeleton_rawdev *skeldev; 459 460 SKELETON_PMD_FUNC_TRACE(); 461 462 skeldev = skeleton_rawdev_get_priv(dev); 463 464 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL); 465 466 if (status_info) 467 memcpy(status_info, &skeldev->fw.firmware_state, 468 sizeof(enum skeleton_firmware_state)); 469 470 return 0; 471 } 472 473 474 static int skeleton_rawdev_firmware_version_get( 475 struct rte_rawdev *dev, 476 rte_rawdev_obj_t version_info) 477 { 478 struct skeleton_rawdev *skeldev; 479 struct skeleton_firmware_version_info *vi; 480 481 SKELETON_PMD_FUNC_TRACE(); 482 483 skeldev = skeleton_rawdev_get_priv(dev); 484 vi = version_info; 485 486 vi->major = skeldev->fw.firmware_version.major; 487 vi->minor = skeldev->fw.firmware_version.minor; 488 vi->subrel = skeldev->fw.firmware_version.subrel; 489 490 return 0; 491 } 492 493 static int skeleton_rawdev_firmware_load(struct rte_rawdev *dev, 494 rte_rawdev_obj_t firmware_buf) 495 { 496 struct skeleton_rawdev *skeldev; 497 498 SKELETON_PMD_FUNC_TRACE(); 499 500 skeldev = skeleton_rawdev_get_priv(dev); 501 502 /* firmware_buf is a mmaped, possibly DMA'able area, buffer. Being 503 * dummy, all this does is check if firmware_buf is not NULL and 504 * sets the state of the firmware. 505 */ 506 if (!firmware_buf) 507 return -EINVAL; 508 509 skeldev->fw.firmware_state = SKELETON_FW_LOADED; 510 511 return 0; 512 } 513 514 static int skeleton_rawdev_firmware_unload(struct rte_rawdev *dev) 515 { 516 struct skeleton_rawdev *skeldev; 517 518 SKELETON_PMD_FUNC_TRACE(); 519 520 skeldev = skeleton_rawdev_get_priv(dev); 521 522 skeldev->fw.firmware_state = SKELETON_FW_READY; 523 524 return 0; 525 } 526 527 static const struct rte_rawdev_ops skeleton_rawdev_ops = { 528 .dev_info_get = skeleton_rawdev_info_get, 529 .dev_configure = skeleton_rawdev_configure, 530 .dev_start = skeleton_rawdev_start, 531 .dev_stop = skeleton_rawdev_stop, 532 .dev_close = skeleton_rawdev_close, 533 .dev_reset = skeleton_rawdev_reset, 534 535 .queue_def_conf = skeleton_rawdev_queue_def_conf, 536 .queue_setup = skeleton_rawdev_queue_setup, 537 .queue_release = skeleton_rawdev_queue_release, 538 .queue_count = skeleton_rawdev_queue_count, 539 540 .attr_get = skeleton_rawdev_get_attr, 541 .attr_set = skeleton_rawdev_set_attr, 542 543 .enqueue_bufs = skeleton_rawdev_enqueue_bufs, 544 .dequeue_bufs = skeleton_rawdev_dequeue_bufs, 545 546 .dump = skeleton_rawdev_dump, 547 548 .xstats_get = NULL, 549 .xstats_get_names = NULL, 550 .xstats_get_by_name = NULL, 551 .xstats_reset = NULL, 552 553 .firmware_status_get = skeleton_rawdev_firmware_status_get, 554 .firmware_version_get = skeleton_rawdev_firmware_version_get, 555 .firmware_load = skeleton_rawdev_firmware_load, 556 .firmware_unload = skeleton_rawdev_firmware_unload, 557 558 .dev_selftest = test_rawdev_skeldev, 559 }; 560 561 static int 562 skeleton_rawdev_create(const char *name, 563 struct rte_vdev_device *vdev, 564 int socket_id) 565 { 566 int ret = 0, i; 567 struct rte_rawdev *rawdev = NULL; 568 struct skeleton_rawdev *skeldev = NULL; 569 570 if (!name) { 571 SKELETON_PMD_ERR("Invalid name of the device!"); 572 ret = -EINVAL; 573 goto cleanup; 574 } 575 576 /* Allocate device structure */ 577 rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct skeleton_rawdev), 578 socket_id); 579 if (rawdev == NULL) { 580 SKELETON_PMD_ERR("Unable to allocate rawdevice"); 581 ret = -EINVAL; 582 goto cleanup; 583 } 584 585 ret = rawdev->dev_id; /* return the rawdev id of new device */ 586 587 rawdev->dev_ops = &skeleton_rawdev_ops; 588 rawdev->device = &vdev->device; 589 590 skeldev = skeleton_rawdev_get_priv(rawdev); 591 592 skeldev->device_id = SKELETON_DEVICE_ID; 593 skeldev->vendor_id = SKELETON_VENDOR_ID; 594 skeldev->capabilities = SKELETON_DEFAULT_CAPA; 595 596 memset(&skeldev->fw, 0, sizeof(struct skeleton_firmware)); 597 598 skeldev->fw.firmware_state = SKELETON_FW_READY; 599 skeldev->fw.firmware_version.major = SKELETON_MAJOR_VER; 600 skeldev->fw.firmware_version.minor = SKELETON_MINOR_VER; 601 skeldev->fw.firmware_version.subrel = SKELETON_SUB_VER; 602 603 skeldev->device_state = SKELETON_DEV_STOPPED; 604 605 /* Reset/set to default queue configuration for this device */ 606 for (i = 0; i < SKELETON_MAX_QUEUES; i++) { 607 skeldev->queues[i].state = SKELETON_QUEUE_DETACH; 608 skeldev->queues[i].depth = SKELETON_QUEUE_DEF_DEPTH; 609 } 610 611 /* Clear all allocated queue buffers */ 612 for (i = 0; i < SKELETON_MAX_QUEUES; i++) 613 clear_queue_bufs(i); 614 615 return ret; 616 617 cleanup: 618 if (rawdev) 619 rte_rawdev_pmd_release(rawdev); 620 621 return ret; 622 } 623 624 static int 625 skeleton_rawdev_destroy(const char *name) 626 { 627 int ret; 628 struct rte_rawdev *rdev; 629 630 if (!name) { 631 SKELETON_PMD_ERR("Invalid device name"); 632 return -EINVAL; 633 } 634 635 rdev = rte_rawdev_pmd_get_named_dev(name); 636 if (!rdev) { 637 SKELETON_PMD_ERR("Invalid device name (%s)", name); 638 return -EINVAL; 639 } 640 641 /* rte_rawdev_close is called by pmd_release */ 642 ret = rte_rawdev_pmd_release(rdev); 643 if (ret) 644 SKELETON_PMD_DEBUG("Device cleanup failed"); 645 646 return 0; 647 } 648 649 static int 650 skeldev_get_selftest(const char *key __rte_unused, 651 const char *value, 652 void *opaque) 653 { 654 int *flag = opaque; 655 *flag = atoi(value); 656 return 0; 657 } 658 659 static int 660 skeldev_parse_vdev_args(struct rte_vdev_device *vdev) 661 { 662 int selftest = 0; 663 const char *name; 664 const char *params; 665 666 static const char *const args[] = { 667 SKELETON_SELFTEST_ARG, 668 NULL 669 }; 670 671 name = rte_vdev_device_name(vdev); 672 673 params = rte_vdev_device_args(vdev); 674 if (params != NULL && params[0] != '\0') { 675 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args); 676 677 if (!kvlist) { 678 SKELETON_PMD_INFO( 679 "Ignoring unsupported params supplied '%s'", 680 name); 681 } else { 682 int ret = rte_kvargs_process(kvlist, 683 SKELETON_SELFTEST_ARG, 684 skeldev_get_selftest, &selftest); 685 if (ret != 0 || (selftest < 0 || selftest > 1)) { 686 SKELETON_PMD_ERR("%s: Error in parsing args", 687 name); 688 rte_kvargs_free(kvlist); 689 ret = -1; /* enforce if selftest is invalid */ 690 return ret; 691 } 692 } 693 694 rte_kvargs_free(kvlist); 695 } 696 697 return selftest; 698 } 699 700 static int 701 skeleton_rawdev_probe(struct rte_vdev_device *vdev) 702 { 703 const char *name; 704 int selftest = 0, ret = 0; 705 706 707 name = rte_vdev_device_name(vdev); 708 if (name == NULL) 709 return -EINVAL; 710 711 /* More than one instance is not supported */ 712 if (skeldev_init_once) { 713 SKELETON_PMD_ERR("Multiple instance not supported for %s", 714 name); 715 return -EINVAL; 716 } 717 718 SKELETON_PMD_INFO("Init %s on NUMA node %d", name, rte_socket_id()); 719 720 selftest = skeldev_parse_vdev_args(vdev); 721 /* In case of invalid argument, selftest != 1; ignore other values */ 722 723 ret = skeleton_rawdev_create(name, vdev, rte_socket_id()); 724 if (ret >= 0) { 725 /* In case command line argument for 'selftest' was passed; 726 * if invalid arguments were passed, execution continues but 727 * without selftest. 728 */ 729 if (selftest == 1) 730 test_rawdev_skeldev(ret); 731 } 732 733 /* Device instance created; Second instance not possible */ 734 skeldev_init_once = 1; 735 736 return ret < 0 ? ret : 0; 737 } 738 739 static int 740 skeleton_rawdev_remove(struct rte_vdev_device *vdev) 741 { 742 const char *name; 743 int ret; 744 745 name = rte_vdev_device_name(vdev); 746 if (name == NULL) 747 return -1; 748 749 SKELETON_PMD_INFO("Closing %s on NUMA node %d", name, rte_socket_id()); 750 751 ret = skeleton_rawdev_destroy(name); 752 if (!ret) 753 skeldev_init_once = 0; 754 755 return ret; 756 } 757 758 static struct rte_vdev_driver skeleton_pmd_drv = { 759 .probe = skeleton_rawdev_probe, 760 .remove = skeleton_rawdev_remove 761 }; 762 763 RTE_PMD_REGISTER_VDEV(SKELETON_PMD_RAWDEV_NAME, skeleton_pmd_drv); 764 765 RTE_INIT(skeleton_pmd_init_log) 766 { 767 skeleton_pmd_logtype = rte_log_register("rawdev.skeleton"); 768 if (skeleton_pmd_logtype >= 0) 769 rte_log_set_level(skeleton_pmd_logtype, RTE_LOG_INFO); 770 } 771