1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2015 Intel Corporation 3 */ 4 5 #include "rte_eth_ring.h" 6 #include <rte_mbuf.h> 7 #include <ethdev_driver.h> 8 #include <rte_malloc.h> 9 #include <rte_memcpy.h> 10 #include <rte_string_fns.h> 11 #include <rte_bus_vdev.h> 12 #include <rte_kvargs.h> 13 #include <rte_errno.h> 14 15 #define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction" 16 #define ETH_RING_ACTION_CREATE "CREATE" 17 #define ETH_RING_ACTION_ATTACH "ATTACH" 18 #define ETH_RING_INTERNAL_ARG "internal" 19 #define ETH_RING_INTERNAL_ARG_MAX_LEN 19 /* "0x..16chars..\0" */ 20 21 static const char *valid_arguments[] = { 22 ETH_RING_NUMA_NODE_ACTION_ARG, 23 ETH_RING_INTERNAL_ARG, 24 NULL 25 }; 26 27 struct ring_internal_args { 28 struct rte_ring * const *rx_queues; 29 const unsigned int nb_rx_queues; 30 struct rte_ring * const *tx_queues; 31 const unsigned int nb_tx_queues; 32 const unsigned int numa_node; 33 void *addr; /* self addr for sanity check */ 34 }; 35 36 enum dev_action { 37 DEV_CREATE, 38 DEV_ATTACH 39 }; 40 41 struct ring_queue { 42 struct rte_ring *rng; 43 rte_atomic64_t rx_pkts; 44 rte_atomic64_t tx_pkts; 45 }; 46 47 struct pmd_internals { 48 unsigned int max_rx_queues; 49 unsigned int max_tx_queues; 50 51 struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS]; 52 struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS]; 53 54 struct rte_ether_addr address; 55 enum dev_action action; 56 }; 57 58 static struct rte_eth_link pmd_link = { 59 .link_speed = RTE_ETH_SPEED_NUM_10G, 60 .link_duplex = RTE_ETH_LINK_FULL_DUPLEX, 61 .link_status = RTE_ETH_LINK_DOWN, 62 .link_autoneg = RTE_ETH_LINK_FIXED, 63 }; 64 65 RTE_LOG_REGISTER_DEFAULT(eth_ring_logtype, NOTICE); 66 67 #define PMD_LOG(level, fmt, args...) \ 68 rte_log(RTE_LOG_ ## level, eth_ring_logtype, \ 69 "%s(): " fmt "\n", __func__, ##args) 70 71 static uint16_t 72 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) 73 { 74 void **ptrs = (void *)&bufs[0]; 75 struct ring_queue *r = q; 76 const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng, 77 ptrs, nb_bufs, NULL); 78 if (r->rng->flags & RING_F_SC_DEQ) 79 r->rx_pkts.cnt += nb_rx; 80 else 81 rte_atomic64_add(&(r->rx_pkts), nb_rx); 82 return nb_rx; 83 } 84 85 static uint16_t 86 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) 87 { 88 void **ptrs = (void *)&bufs[0]; 89 struct ring_queue *r = q; 90 const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng, 91 ptrs, nb_bufs, NULL); 92 if (r->rng->flags & RING_F_SP_ENQ) 93 r->tx_pkts.cnt += nb_tx; 94 else 95 rte_atomic64_add(&(r->tx_pkts), nb_tx); 96 return nb_tx; 97 } 98 99 static int 100 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; } 101 102 static int 103 eth_dev_start(struct rte_eth_dev *dev) 104 { 105 dev->data->dev_link.link_status = RTE_ETH_LINK_UP; 106 return 0; 107 } 108 109 static int 110 eth_dev_stop(struct rte_eth_dev *dev) 111 { 112 dev->data->dev_started = 0; 113 dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; 114 return 0; 115 } 116 117 static int 118 eth_dev_set_link_down(struct rte_eth_dev *dev) 119 { 120 dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; 121 return 0; 122 } 123 124 static int 125 eth_dev_set_link_up(struct rte_eth_dev *dev) 126 { 127 dev->data->dev_link.link_status = RTE_ETH_LINK_UP; 128 return 0; 129 } 130 131 static int 132 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 133 uint16_t nb_rx_desc __rte_unused, 134 unsigned int socket_id __rte_unused, 135 const struct rte_eth_rxconf *rx_conf __rte_unused, 136 struct rte_mempool *mb_pool __rte_unused) 137 { 138 struct pmd_internals *internals = dev->data->dev_private; 139 140 dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id]; 141 return 0; 142 } 143 144 static int 145 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 146 uint16_t nb_tx_desc __rte_unused, 147 unsigned int socket_id __rte_unused, 148 const struct rte_eth_txconf *tx_conf __rte_unused) 149 { 150 struct pmd_internals *internals = dev->data->dev_private; 151 152 dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id]; 153 return 0; 154 } 155 156 157 static int 158 eth_dev_info(struct rte_eth_dev *dev, 159 struct rte_eth_dev_info *dev_info) 160 { 161 struct pmd_internals *internals = dev->data->dev_private; 162 163 dev_info->max_mac_addrs = 1; 164 dev_info->max_rx_pktlen = (uint32_t)-1; 165 dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues; 166 dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER; 167 dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 168 dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues; 169 dev_info->min_rx_bufsize = 0; 170 171 return 0; 172 } 173 174 static int 175 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 176 { 177 unsigned int i; 178 unsigned long rx_total = 0, tx_total = 0; 179 const struct pmd_internals *internal = dev->data->dev_private; 180 181 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && 182 i < dev->data->nb_rx_queues; i++) { 183 stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt; 184 rx_total += stats->q_ipackets[i]; 185 } 186 187 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && 188 i < dev->data->nb_tx_queues; i++) { 189 stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt; 190 tx_total += stats->q_opackets[i]; 191 } 192 193 stats->ipackets = rx_total; 194 stats->opackets = tx_total; 195 196 return 0; 197 } 198 199 static int 200 eth_stats_reset(struct rte_eth_dev *dev) 201 { 202 unsigned int i; 203 struct pmd_internals *internal = dev->data->dev_private; 204 205 for (i = 0; i < dev->data->nb_rx_queues; i++) 206 internal->rx_ring_queues[i].rx_pkts.cnt = 0; 207 for (i = 0; i < dev->data->nb_tx_queues; i++) 208 internal->tx_ring_queues[i].tx_pkts.cnt = 0; 209 210 return 0; 211 } 212 213 static void 214 eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused, 215 uint32_t index __rte_unused) 216 { 217 } 218 219 static int 220 eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused, 221 struct rte_ether_addr *mac_addr __rte_unused, 222 uint32_t index __rte_unused, 223 uint32_t vmdq __rte_unused) 224 { 225 return 0; 226 } 227 228 static int 229 eth_promiscuous_enable(struct rte_eth_dev *dev __rte_unused) 230 { 231 return 0; 232 } 233 234 static int 235 eth_promiscuous_disable(struct rte_eth_dev *dev __rte_unused) 236 { 237 return 0; 238 } 239 240 static int 241 eth_allmulticast_enable(struct rte_eth_dev *dev __rte_unused) 242 { 243 return 0; 244 } 245 246 static int 247 eth_allmulticast_disable(struct rte_eth_dev *dev __rte_unused) 248 { 249 return 0; 250 } 251 252 static int 253 eth_link_update(struct rte_eth_dev *dev __rte_unused, 254 int wait_to_complete __rte_unused) { return 0; } 255 256 static int 257 eth_dev_close(struct rte_eth_dev *dev) 258 { 259 struct pmd_internals *internals = NULL; 260 struct ring_queue *r = NULL; 261 uint16_t i; 262 int ret; 263 264 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 265 return 0; 266 267 ret = eth_dev_stop(dev); 268 269 internals = dev->data->dev_private; 270 if (internals->action == DEV_CREATE) { 271 /* 272 * it is only necessary to delete the rings in rx_queues because 273 * they are the same used in tx_queues 274 */ 275 for (i = 0; i < dev->data->nb_rx_queues; i++) { 276 r = dev->data->rx_queues[i]; 277 rte_ring_free(r->rng); 278 } 279 } 280 281 /* mac_addrs must not be freed alone because part of dev_private */ 282 dev->data->mac_addrs = NULL; 283 284 return ret; 285 } 286 287 static const struct eth_dev_ops ops = { 288 .dev_close = eth_dev_close, 289 .dev_start = eth_dev_start, 290 .dev_stop = eth_dev_stop, 291 .dev_set_link_up = eth_dev_set_link_up, 292 .dev_set_link_down = eth_dev_set_link_down, 293 .dev_configure = eth_dev_configure, 294 .dev_infos_get = eth_dev_info, 295 .rx_queue_setup = eth_rx_queue_setup, 296 .tx_queue_setup = eth_tx_queue_setup, 297 .link_update = eth_link_update, 298 .stats_get = eth_stats_get, 299 .stats_reset = eth_stats_reset, 300 .mac_addr_remove = eth_mac_addr_remove, 301 .mac_addr_add = eth_mac_addr_add, 302 .promiscuous_enable = eth_promiscuous_enable, 303 .promiscuous_disable = eth_promiscuous_disable, 304 .allmulticast_enable = eth_allmulticast_enable, 305 .allmulticast_disable = eth_allmulticast_disable, 306 }; 307 308 static int 309 do_eth_dev_ring_create(const char *name, 310 struct rte_vdev_device *vdev, 311 struct rte_ring * const rx_queues[], 312 const unsigned int nb_rx_queues, 313 struct rte_ring *const tx_queues[], 314 const unsigned int nb_tx_queues, 315 const unsigned int numa_node, enum dev_action action, 316 struct rte_eth_dev **eth_dev_p) 317 { 318 struct rte_eth_dev_data *data = NULL; 319 struct pmd_internals *internals = NULL; 320 struct rte_eth_dev *eth_dev = NULL; 321 void **rx_queues_local = NULL; 322 void **tx_queues_local = NULL; 323 unsigned int i; 324 325 PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u", 326 numa_node); 327 328 rx_queues_local = rte_calloc_socket(name, nb_rx_queues, 329 sizeof(void *), 0, numa_node); 330 if (rx_queues_local == NULL) { 331 rte_errno = ENOMEM; 332 goto error; 333 } 334 335 tx_queues_local = rte_calloc_socket(name, nb_tx_queues, 336 sizeof(void *), 0, numa_node); 337 if (tx_queues_local == NULL) { 338 rte_errno = ENOMEM; 339 goto error; 340 } 341 342 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node); 343 if (internals == NULL) { 344 rte_errno = ENOMEM; 345 goto error; 346 } 347 348 /* reserve an ethdev entry */ 349 eth_dev = rte_eth_dev_allocate(name); 350 if (eth_dev == NULL) { 351 rte_errno = ENOSPC; 352 goto error; 353 } 354 355 /* now put it all together 356 * - store EAL device in eth_dev, 357 * - store queue data in internals, 358 * - store numa_node info in eth_dev_data 359 * - point eth_dev_data to internals 360 * - and point eth_dev structure to new eth_dev_data structure 361 */ 362 363 eth_dev->device = &vdev->device; 364 365 data = eth_dev->data; 366 data->rx_queues = rx_queues_local; 367 data->tx_queues = tx_queues_local; 368 369 internals->action = action; 370 internals->max_rx_queues = nb_rx_queues; 371 internals->max_tx_queues = nb_tx_queues; 372 for (i = 0; i < nb_rx_queues; i++) { 373 internals->rx_ring_queues[i].rng = rx_queues[i]; 374 data->rx_queues[i] = &internals->rx_ring_queues[i]; 375 } 376 for (i = 0; i < nb_tx_queues; i++) { 377 internals->tx_ring_queues[i].rng = tx_queues[i]; 378 data->tx_queues[i] = &internals->tx_ring_queues[i]; 379 } 380 381 data->dev_private = internals; 382 data->nb_rx_queues = (uint16_t)nb_rx_queues; 383 data->nb_tx_queues = (uint16_t)nb_tx_queues; 384 data->dev_link = pmd_link; 385 data->mac_addrs = &internals->address; 386 data->promiscuous = 1; 387 data->all_multicast = 1; 388 data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 389 390 eth_dev->dev_ops = &ops; 391 data->numa_node = numa_node; 392 393 /* finally assign rx and tx ops */ 394 eth_dev->rx_pkt_burst = eth_ring_rx; 395 eth_dev->tx_pkt_burst = eth_ring_tx; 396 397 rte_eth_dev_probing_finish(eth_dev); 398 *eth_dev_p = eth_dev; 399 400 return data->port_id; 401 402 error: 403 rte_free(rx_queues_local); 404 rte_free(tx_queues_local); 405 rte_free(internals); 406 407 return -1; 408 } 409 410 int 411 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[], 412 const unsigned int nb_rx_queues, 413 struct rte_ring *const tx_queues[], 414 const unsigned int nb_tx_queues, 415 const unsigned int numa_node) 416 { 417 struct ring_internal_args args = { 418 .rx_queues = rx_queues, 419 .nb_rx_queues = nb_rx_queues, 420 .tx_queues = tx_queues, 421 .nb_tx_queues = nb_tx_queues, 422 .numa_node = numa_node, 423 .addr = &args, 424 }; 425 char args_str[32]; 426 char ring_name[RTE_RING_NAMESIZE]; 427 uint16_t port_id = RTE_MAX_ETHPORTS; 428 int ret; 429 430 /* do some parameter checking */ 431 if (rx_queues == NULL && nb_rx_queues > 0) { 432 rte_errno = EINVAL; 433 return -1; 434 } 435 if (tx_queues == NULL && nb_tx_queues > 0) { 436 rte_errno = EINVAL; 437 return -1; 438 } 439 if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) { 440 rte_errno = EINVAL; 441 return -1; 442 } 443 444 snprintf(args_str, sizeof(args_str), "%s=%p", 445 ETH_RING_INTERNAL_ARG, &args); 446 447 ret = snprintf(ring_name, sizeof(ring_name), "net_ring_%s", name); 448 if (ret >= (int)sizeof(ring_name)) { 449 rte_errno = ENAMETOOLONG; 450 return -1; 451 } 452 453 ret = rte_vdev_init(ring_name, args_str); 454 if (ret) { 455 rte_errno = EINVAL; 456 return -1; 457 } 458 459 ret = rte_eth_dev_get_port_by_name(ring_name, &port_id); 460 if (ret) { 461 rte_errno = ENODEV; 462 return -1; 463 } 464 465 return port_id; 466 } 467 468 int 469 rte_eth_from_ring(struct rte_ring *r) 470 { 471 return rte_eth_from_rings(r->name, &r, 1, &r, 1, 472 r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY); 473 } 474 475 static int 476 eth_dev_ring_create(const char *name, 477 struct rte_vdev_device *vdev, 478 const unsigned int numa_node, 479 enum dev_action action, struct rte_eth_dev **eth_dev) 480 { 481 /* rx and tx are so-called from point of view of first port. 482 * They are inverted from the point of view of second port 483 */ 484 struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS]; 485 unsigned int i; 486 char rng_name[RTE_RING_NAMESIZE]; 487 unsigned int num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS, 488 RTE_PMD_RING_MAX_TX_RINGS); 489 490 for (i = 0; i < num_rings; i++) { 491 int cc; 492 493 cc = snprintf(rng_name, sizeof(rng_name), 494 "ETH_RXTX%u_%s", i, name); 495 if (cc >= (int)sizeof(rng_name)) { 496 rte_errno = ENAMETOOLONG; 497 return -1; 498 } 499 500 rxtx[i] = (action == DEV_CREATE) ? 501 rte_ring_create(rng_name, 1024, numa_node, 502 RING_F_SP_ENQ|RING_F_SC_DEQ) : 503 rte_ring_lookup(rng_name); 504 if (rxtx[i] == NULL) 505 return -1; 506 } 507 508 if (do_eth_dev_ring_create(name, vdev, rxtx, num_rings, rxtx, num_rings, 509 numa_node, action, eth_dev) < 0) 510 return -1; 511 512 return 0; 513 } 514 515 struct node_action_pair { 516 char name[PATH_MAX]; 517 unsigned int node; 518 enum dev_action action; 519 }; 520 521 struct node_action_list { 522 unsigned int total; 523 unsigned int count; 524 struct node_action_pair *list; 525 }; 526 527 static int parse_kvlist(const char *key __rte_unused, 528 const char *value, void *data) 529 { 530 struct node_action_list *info = data; 531 int ret; 532 char *name; 533 char *action; 534 char *node; 535 char *end; 536 537 name = strdup(value); 538 539 ret = -EINVAL; 540 541 if (!name) { 542 PMD_LOG(WARNING, "command line parameter is empty for ring pmd!"); 543 goto out; 544 } 545 546 node = strchr(name, ':'); 547 if (!node) { 548 PMD_LOG(WARNING, "could not parse node value from %s", 549 name); 550 goto out; 551 } 552 553 *node = '\0'; 554 node++; 555 556 action = strchr(node, ':'); 557 if (!action) { 558 PMD_LOG(WARNING, "could not parse action value from %s", 559 node); 560 goto out; 561 } 562 563 *action = '\0'; 564 action++; 565 566 /* 567 * Need to do some sanity checking here 568 */ 569 570 if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0) 571 info->list[info->count].action = DEV_ATTACH; 572 else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0) 573 info->list[info->count].action = DEV_CREATE; 574 else 575 goto out; 576 577 errno = 0; 578 info->list[info->count].node = strtol(node, &end, 10); 579 580 if ((errno != 0) || (*end != '\0')) { 581 PMD_LOG(WARNING, 582 "node value %s is unparseable as a number", node); 583 goto out; 584 } 585 586 strlcpy(info->list[info->count].name, name, 587 sizeof(info->list[info->count].name)); 588 589 info->count++; 590 591 ret = 0; 592 out: 593 free(name); 594 return ret; 595 } 596 597 static int 598 parse_internal_args(const char *key __rte_unused, const char *value, 599 void *data) 600 { 601 struct ring_internal_args **internal_args = data; 602 void *args; 603 int ret, n; 604 605 /* make sure 'value' is valid pointer length */ 606 if (strnlen(value, ETH_RING_INTERNAL_ARG_MAX_LEN) >= 607 ETH_RING_INTERNAL_ARG_MAX_LEN) { 608 PMD_LOG(ERR, "Error parsing internal args, argument is too long"); 609 return -1; 610 } 611 612 ret = sscanf(value, "%p%n", &args, &n); 613 if (ret == 0 || (size_t)n != strlen(value)) { 614 PMD_LOG(ERR, "Error parsing internal args"); 615 616 return -1; 617 } 618 619 *internal_args = args; 620 621 if ((*internal_args)->addr != args) 622 return -1; 623 624 return 0; 625 } 626 627 static int 628 rte_pmd_ring_probe(struct rte_vdev_device *dev) 629 { 630 const char *name, *params; 631 struct rte_kvargs *kvlist = NULL; 632 int ret = 0; 633 struct node_action_list *info = NULL; 634 struct rte_eth_dev *eth_dev = NULL; 635 struct ring_internal_args *internal_args; 636 637 name = rte_vdev_device_name(dev); 638 params = rte_vdev_device_args(dev); 639 640 PMD_LOG(INFO, "Initializing pmd_ring for %s", name); 641 642 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 643 eth_dev = rte_eth_dev_attach_secondary(name); 644 if (!eth_dev) { 645 PMD_LOG(ERR, "Failed to probe %s", name); 646 return -1; 647 } 648 eth_dev->dev_ops = &ops; 649 eth_dev->device = &dev->device; 650 651 eth_dev->rx_pkt_burst = eth_ring_rx; 652 eth_dev->tx_pkt_burst = eth_ring_tx; 653 654 rte_eth_dev_probing_finish(eth_dev); 655 656 return 0; 657 } 658 659 if (params == NULL || params[0] == '\0') { 660 ret = eth_dev_ring_create(name, dev, rte_socket_id(), DEV_CREATE, 661 ð_dev); 662 if (ret == -1) { 663 PMD_LOG(INFO, 664 "Attach to pmd_ring for %s", name); 665 ret = eth_dev_ring_create(name, dev, rte_socket_id(), 666 DEV_ATTACH, ð_dev); 667 } 668 } else { 669 kvlist = rte_kvargs_parse(params, valid_arguments); 670 671 if (!kvlist) { 672 PMD_LOG(INFO, 673 "Ignoring unsupported parameters when creating rings-backed ethernet device"); 674 ret = eth_dev_ring_create(name, dev, rte_socket_id(), 675 DEV_CREATE, ð_dev); 676 if (ret == -1) { 677 PMD_LOG(INFO, 678 "Attach to pmd_ring for %s", 679 name); 680 ret = eth_dev_ring_create(name, dev, rte_socket_id(), 681 DEV_ATTACH, ð_dev); 682 } 683 684 return ret; 685 } 686 687 if (rte_kvargs_count(kvlist, ETH_RING_INTERNAL_ARG) == 1) { 688 ret = rte_kvargs_process(kvlist, ETH_RING_INTERNAL_ARG, 689 parse_internal_args, 690 &internal_args); 691 if (ret < 0) 692 goto out_free; 693 694 ret = do_eth_dev_ring_create(name, dev, 695 internal_args->rx_queues, 696 internal_args->nb_rx_queues, 697 internal_args->tx_queues, 698 internal_args->nb_tx_queues, 699 internal_args->numa_node, 700 DEV_ATTACH, 701 ð_dev); 702 if (ret >= 0) 703 ret = 0; 704 } else { 705 ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG); 706 info = rte_zmalloc("struct node_action_list", 707 sizeof(struct node_action_list) + 708 (sizeof(struct node_action_pair) * ret), 709 0); 710 if (!info) 711 goto out_free; 712 713 info->total = ret; 714 info->list = (struct node_action_pair *)(info + 1); 715 716 ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG, 717 parse_kvlist, info); 718 719 if (ret < 0) 720 goto out_free; 721 722 for (info->count = 0; info->count < info->total; info->count++) { 723 ret = eth_dev_ring_create(info->list[info->count].name, 724 dev, 725 info->list[info->count].node, 726 info->list[info->count].action, 727 ð_dev); 728 if ((ret == -1) && 729 (info->list[info->count].action == DEV_CREATE)) { 730 PMD_LOG(INFO, 731 "Attach to pmd_ring for %s", 732 name); 733 ret = eth_dev_ring_create(name, dev, 734 info->list[info->count].node, 735 DEV_ATTACH, 736 ð_dev); 737 } 738 } 739 } 740 } 741 742 out_free: 743 rte_kvargs_free(kvlist); 744 rte_free(info); 745 return ret; 746 } 747 748 static int 749 rte_pmd_ring_remove(struct rte_vdev_device *dev) 750 { 751 const char *name = rte_vdev_device_name(dev); 752 struct rte_eth_dev *eth_dev = NULL; 753 754 PMD_LOG(INFO, "Un-Initializing pmd_ring for %s", name); 755 756 if (name == NULL) 757 return -EINVAL; 758 759 /* find an ethdev entry */ 760 eth_dev = rte_eth_dev_allocated(name); 761 if (eth_dev == NULL) 762 return 0; /* port already released */ 763 764 eth_dev_close(eth_dev); 765 rte_eth_dev_release_port(eth_dev); 766 return 0; 767 } 768 769 static struct rte_vdev_driver pmd_ring_drv = { 770 .probe = rte_pmd_ring_probe, 771 .remove = rte_pmd_ring_remove, 772 }; 773 774 RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv); 775 RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring); 776 RTE_PMD_REGISTER_PARAM_STRING(net_ring, 777 ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)"); 778