1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <sys/types.h> 6 #include <sys/queue.h> 7 #include <ctype.h> 8 #include <stdio.h> 9 #include <stdlib.h> 10 #include <string.h> 11 #include <stdarg.h> 12 #include <errno.h> 13 #include <stdbool.h> 14 #include <stdint.h> 15 #include <inttypes.h> 16 #include <netinet/in.h> 17 18 #include <rte_byteorder.h> 19 #include <rte_log.h> 20 #include <rte_debug.h> 21 #include <rte_interrupts.h> 22 #include <rte_memory.h> 23 #include <rte_memcpy.h> 24 #include <rte_memzone.h> 25 #include <rte_launch.h> 26 #include <rte_eal.h> 27 #include <rte_per_lcore.h> 28 #include <rte_lcore.h> 29 #include <rte_atomic.h> 30 #include <rte_branch_prediction.h> 31 #include <rte_common.h> 32 #include <rte_mempool.h> 33 #include <rte_malloc.h> 34 #include <rte_mbuf.h> 35 #include <rte_errno.h> 36 #include <rte_spinlock.h> 37 #include <rte_string_fns.h> 38 #include <rte_kvargs.h> 39 #include <rte_class.h> 40 41 #include "rte_ether.h" 42 #include "rte_ethdev.h" 43 #include "rte_ethdev_driver.h" 44 #include "ethdev_profile.h" 45 #include "ethdev_private.h" 46 47 int rte_eth_dev_logtype; 48 49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; 50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 51 static uint16_t eth_dev_last_created_port; 52 53 /* spinlock for eth device callbacks */ 54 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; 55 56 /* spinlock for add/remove rx callbacks */ 57 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 58 59 /* spinlock for add/remove tx callbacks */ 60 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 61 62 /* spinlock for shared data allocation */ 63 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 64 65 /* store statistics names and its offset in stats structure */ 66 struct rte_eth_xstats_name_off { 67 char name[RTE_ETH_XSTATS_NAME_SIZE]; 68 unsigned offset; 69 }; 70 71 /* Shared memory between primary and secondary processes. */ 72 static struct { 73 uint64_t next_owner_id; 74 rte_spinlock_t ownership_lock; 75 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; 76 } *rte_eth_dev_shared_data; 77 78 static const struct rte_eth_xstats_name_off rte_stats_strings[] = { 79 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 80 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 81 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 82 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 83 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 84 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 85 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 86 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 87 rx_nombuf)}, 88 }; 89 90 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0])) 91 92 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = { 93 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 94 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 95 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 96 }; 97 98 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \ 99 sizeof(rte_rxq_stats_strings[0])) 100 101 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = { 102 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 103 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 104 }; 105 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \ 106 sizeof(rte_txq_stats_strings[0])) 107 108 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 109 { DEV_RX_OFFLOAD_##_name, #_name } 110 111 static const struct { 112 uint64_t offload; 113 const char *name; 114 } rte_rx_offload_names[] = { 115 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 116 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 117 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 118 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 119 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 120 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 121 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 122 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 123 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 124 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 125 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 126 RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME), 127 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 128 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 129 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 130 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 131 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 132 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 133 }; 134 135 #undef RTE_RX_OFFLOAD_BIT2STR 136 137 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 138 { DEV_TX_OFFLOAD_##_name, #_name } 139 140 static const struct { 141 uint64_t offload; 142 const char *name; 143 } rte_tx_offload_names[] = { 144 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 145 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 146 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 147 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 148 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 149 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 150 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 151 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 152 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 153 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 154 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 155 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 156 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 157 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 158 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 159 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 160 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 161 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 162 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 163 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 164 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 165 RTE_TX_OFFLOAD_BIT2STR(MATCH_METADATA), 166 }; 167 168 #undef RTE_TX_OFFLOAD_BIT2STR 169 170 /** 171 * The user application callback description. 172 * 173 * It contains callback address to be registered by user application, 174 * the pointer to the parameters for callback, and the event type. 175 */ 176 struct rte_eth_dev_callback { 177 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ 178 rte_eth_dev_cb_fn cb_fn; /**< Callback address */ 179 void *cb_arg; /**< Parameter for callback */ 180 void *ret_param; /**< Return parameter */ 181 enum rte_eth_event_type event; /**< Interrupt event type */ 182 uint32_t active; /**< Callback is executing */ 183 }; 184 185 enum { 186 STAT_QMAP_TX = 0, 187 STAT_QMAP_RX 188 }; 189 190 int 191 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 192 { 193 int ret; 194 struct rte_devargs devargs = {.args = NULL}; 195 const char *bus_param_key; 196 char *bus_str = NULL; 197 char *cls_str = NULL; 198 int str_size; 199 200 memset(iter, 0, sizeof(*iter)); 201 202 /* 203 * The devargs string may use various syntaxes: 204 * - 0000:08:00.0,representor=[1-3] 205 * - pci:0000:06:00.0,representor=[0,5] 206 * - class=eth,mac=00:11:22:33:44:55 207 * A new syntax is in development (not yet supported): 208 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 209 */ 210 211 /* 212 * Handle pure class filter (i.e. without any bus-level argument), 213 * from future new syntax. 214 * rte_devargs_parse() is not yet supporting the new syntax, 215 * that's why this simple case is temporarily parsed here. 216 */ 217 #define iter_anybus_str "class=eth," 218 if (strncmp(devargs_str, iter_anybus_str, 219 strlen(iter_anybus_str)) == 0) { 220 iter->cls_str = devargs_str + strlen(iter_anybus_str); 221 goto end; 222 } 223 224 /* Split bus, device and parameters. */ 225 ret = rte_devargs_parse(&devargs, devargs_str); 226 if (ret != 0) 227 goto error; 228 229 /* 230 * Assume parameters of old syntax can match only at ethdev level. 231 * Extra parameters will be ignored, thanks to "+" prefix. 232 */ 233 str_size = strlen(devargs.args) + 2; 234 cls_str = malloc(str_size); 235 if (cls_str == NULL) { 236 ret = -ENOMEM; 237 goto error; 238 } 239 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 240 if (ret != str_size - 1) { 241 ret = -EINVAL; 242 goto error; 243 } 244 iter->cls_str = cls_str; 245 free(devargs.args); /* allocated by rte_devargs_parse() */ 246 devargs.args = NULL; 247 248 iter->bus = devargs.bus; 249 if (iter->bus->dev_iterate == NULL) { 250 ret = -ENOTSUP; 251 goto error; 252 } 253 254 /* Convert bus args to new syntax for use with new API dev_iterate. */ 255 if (strcmp(iter->bus->name, "vdev") == 0) { 256 bus_param_key = "name"; 257 } else if (strcmp(iter->bus->name, "pci") == 0) { 258 bus_param_key = "addr"; 259 } else { 260 ret = -ENOTSUP; 261 goto error; 262 } 263 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 264 bus_str = malloc(str_size); 265 if (bus_str == NULL) { 266 ret = -ENOMEM; 267 goto error; 268 } 269 ret = snprintf(bus_str, str_size, "%s=%s", 270 bus_param_key, devargs.name); 271 if (ret != str_size - 1) { 272 ret = -EINVAL; 273 goto error; 274 } 275 iter->bus_str = bus_str; 276 277 end: 278 iter->cls = rte_class_find_by_name("eth"); 279 return 0; 280 281 error: 282 if (ret == -ENOTSUP) 283 RTE_LOG(ERR, EAL, "Bus %s does not support iterating.\n", 284 iter->bus->name); 285 free(devargs.args); 286 free(bus_str); 287 free(cls_str); 288 return ret; 289 } 290 291 uint16_t 292 rte_eth_iterator_next(struct rte_dev_iterator *iter) 293 { 294 if (iter->cls == NULL) /* invalid ethdev iterator */ 295 return RTE_MAX_ETHPORTS; 296 297 do { /* loop to try all matching rte_device */ 298 /* If not pure ethdev filter and */ 299 if (iter->bus != NULL && 300 /* not in middle of rte_eth_dev iteration, */ 301 iter->class_device == NULL) { 302 /* get next rte_device to try. */ 303 iter->device = iter->bus->dev_iterate( 304 iter->device, iter->bus_str, iter); 305 if (iter->device == NULL) 306 break; /* no more rte_device candidate */ 307 } 308 /* A device is matching bus part, need to check ethdev part. */ 309 iter->class_device = iter->cls->dev_iterate( 310 iter->class_device, iter->cls_str, iter); 311 if (iter->class_device != NULL) 312 return eth_dev_to_id(iter->class_device); /* match */ 313 } while (iter->bus != NULL); /* need to try next rte_device */ 314 315 /* No more ethdev port to iterate. */ 316 rte_eth_iterator_cleanup(iter); 317 return RTE_MAX_ETHPORTS; 318 } 319 320 void 321 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 322 { 323 if (iter->bus_str == NULL) 324 return; /* nothing to free in pure class filter */ 325 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 326 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 327 memset(iter, 0, sizeof(*iter)); 328 } 329 330 uint16_t 331 rte_eth_find_next(uint16_t port_id) 332 { 333 while (port_id < RTE_MAX_ETHPORTS && 334 rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED && 335 rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) 336 port_id++; 337 338 if (port_id >= RTE_MAX_ETHPORTS) 339 return RTE_MAX_ETHPORTS; 340 341 return port_id; 342 } 343 344 static void 345 rte_eth_dev_shared_data_prepare(void) 346 { 347 const unsigned flags = 0; 348 const struct rte_memzone *mz; 349 350 rte_spinlock_lock(&rte_eth_shared_data_lock); 351 352 if (rte_eth_dev_shared_data == NULL) { 353 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 354 /* Allocate port data and ownership shared memory. */ 355 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, 356 sizeof(*rte_eth_dev_shared_data), 357 rte_socket_id(), flags); 358 } else 359 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); 360 if (mz == NULL) 361 rte_panic("Cannot allocate ethdev shared data\n"); 362 363 rte_eth_dev_shared_data = mz->addr; 364 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 365 rte_eth_dev_shared_data->next_owner_id = 366 RTE_ETH_DEV_NO_OWNER + 1; 367 rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock); 368 memset(rte_eth_dev_shared_data->data, 0, 369 sizeof(rte_eth_dev_shared_data->data)); 370 } 371 } 372 373 rte_spinlock_unlock(&rte_eth_shared_data_lock); 374 } 375 376 static bool 377 is_allocated(const struct rte_eth_dev *ethdev) 378 { 379 return ethdev->data->name[0] != '\0'; 380 } 381 382 static struct rte_eth_dev * 383 _rte_eth_dev_allocated(const char *name) 384 { 385 unsigned i; 386 387 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 388 if (rte_eth_devices[i].data != NULL && 389 strcmp(rte_eth_devices[i].data->name, name) == 0) 390 return &rte_eth_devices[i]; 391 } 392 return NULL; 393 } 394 395 struct rte_eth_dev * 396 rte_eth_dev_allocated(const char *name) 397 { 398 struct rte_eth_dev *ethdev; 399 400 rte_eth_dev_shared_data_prepare(); 401 402 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 403 404 ethdev = _rte_eth_dev_allocated(name); 405 406 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 407 408 return ethdev; 409 } 410 411 static uint16_t 412 rte_eth_dev_find_free_port(void) 413 { 414 unsigned i; 415 416 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 417 /* Using shared name field to find a free port. */ 418 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') { 419 RTE_ASSERT(rte_eth_devices[i].state == 420 RTE_ETH_DEV_UNUSED); 421 return i; 422 } 423 } 424 return RTE_MAX_ETHPORTS; 425 } 426 427 static struct rte_eth_dev * 428 eth_dev_get(uint16_t port_id) 429 { 430 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; 431 432 eth_dev->data = &rte_eth_dev_shared_data->data[port_id]; 433 434 eth_dev_last_created_port = port_id; 435 436 return eth_dev; 437 } 438 439 struct rte_eth_dev * 440 rte_eth_dev_allocate(const char *name) 441 { 442 uint16_t port_id; 443 struct rte_eth_dev *eth_dev = NULL; 444 445 rte_eth_dev_shared_data_prepare(); 446 447 /* Synchronize port creation between primary and secondary threads. */ 448 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 449 450 if (_rte_eth_dev_allocated(name) != NULL) { 451 RTE_ETHDEV_LOG(ERR, 452 "Ethernet device with name %s already allocated\n", 453 name); 454 goto unlock; 455 } 456 457 port_id = rte_eth_dev_find_free_port(); 458 if (port_id == RTE_MAX_ETHPORTS) { 459 RTE_ETHDEV_LOG(ERR, 460 "Reached maximum number of Ethernet ports\n"); 461 goto unlock; 462 } 463 464 eth_dev = eth_dev_get(port_id); 465 snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name); 466 eth_dev->data->port_id = port_id; 467 eth_dev->data->mtu = ETHER_MTU; 468 469 unlock: 470 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 471 472 return eth_dev; 473 } 474 475 /* 476 * Attach to a port already registered by the primary process, which 477 * makes sure that the same device would have the same port id both 478 * in the primary and secondary process. 479 */ 480 struct rte_eth_dev * 481 rte_eth_dev_attach_secondary(const char *name) 482 { 483 uint16_t i; 484 struct rte_eth_dev *eth_dev = NULL; 485 486 rte_eth_dev_shared_data_prepare(); 487 488 /* Synchronize port attachment to primary port creation and release. */ 489 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 490 491 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 492 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0) 493 break; 494 } 495 if (i == RTE_MAX_ETHPORTS) { 496 RTE_ETHDEV_LOG(ERR, 497 "Device %s is not driven by the primary process\n", 498 name); 499 } else { 500 eth_dev = eth_dev_get(i); 501 RTE_ASSERT(eth_dev->data->port_id == i); 502 } 503 504 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 505 return eth_dev; 506 } 507 508 int 509 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) 510 { 511 if (eth_dev == NULL) 512 return -EINVAL; 513 514 rte_eth_dev_shared_data_prepare(); 515 516 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 517 _rte_eth_dev_callback_process(eth_dev, 518 RTE_ETH_EVENT_DESTROY, NULL); 519 520 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 521 522 eth_dev->state = RTE_ETH_DEV_UNUSED; 523 524 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 525 rte_free(eth_dev->data->rx_queues); 526 rte_free(eth_dev->data->tx_queues); 527 rte_free(eth_dev->data->mac_addrs); 528 rte_free(eth_dev->data->hash_mac_addrs); 529 rte_free(eth_dev->data->dev_private); 530 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); 531 } 532 533 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 534 535 return 0; 536 } 537 538 int 539 rte_eth_dev_is_valid_port(uint16_t port_id) 540 { 541 if (port_id >= RTE_MAX_ETHPORTS || 542 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 543 return 0; 544 else 545 return 1; 546 } 547 548 static int 549 rte_eth_is_valid_owner_id(uint64_t owner_id) 550 { 551 if (owner_id == RTE_ETH_DEV_NO_OWNER || 552 rte_eth_dev_shared_data->next_owner_id <= owner_id) 553 return 0; 554 return 1; 555 } 556 557 uint64_t 558 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 559 { 560 while (port_id < RTE_MAX_ETHPORTS && 561 ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED && 562 rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) || 563 rte_eth_devices[port_id].data->owner.id != owner_id)) 564 port_id++; 565 566 if (port_id >= RTE_MAX_ETHPORTS) 567 return RTE_MAX_ETHPORTS; 568 569 return port_id; 570 } 571 572 int __rte_experimental 573 rte_eth_dev_owner_new(uint64_t *owner_id) 574 { 575 rte_eth_dev_shared_data_prepare(); 576 577 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 578 579 *owner_id = rte_eth_dev_shared_data->next_owner_id++; 580 581 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 582 return 0; 583 } 584 585 static int 586 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 587 const struct rte_eth_dev_owner *new_owner) 588 { 589 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 590 struct rte_eth_dev_owner *port_owner; 591 int sret; 592 593 if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) { 594 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 595 port_id); 596 return -ENODEV; 597 } 598 599 if (!rte_eth_is_valid_owner_id(new_owner->id) && 600 !rte_eth_is_valid_owner_id(old_owner_id)) { 601 RTE_ETHDEV_LOG(ERR, 602 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 603 old_owner_id, new_owner->id); 604 return -EINVAL; 605 } 606 607 port_owner = &rte_eth_devices[port_id].data->owner; 608 if (port_owner->id != old_owner_id) { 609 RTE_ETHDEV_LOG(ERR, 610 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 611 port_id, port_owner->name, port_owner->id); 612 return -EPERM; 613 } 614 615 sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s", 616 new_owner->name); 617 if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN) 618 RTE_ETHDEV_LOG(ERR, "Port %u owner name was truncated\n", 619 port_id); 620 621 port_owner->id = new_owner->id; 622 623 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 624 port_id, new_owner->name, new_owner->id); 625 626 return 0; 627 } 628 629 int __rte_experimental 630 rte_eth_dev_owner_set(const uint16_t port_id, 631 const struct rte_eth_dev_owner *owner) 632 { 633 int ret; 634 635 rte_eth_dev_shared_data_prepare(); 636 637 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 638 639 ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 640 641 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 642 return ret; 643 } 644 645 int __rte_experimental 646 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 647 { 648 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 649 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 650 int ret; 651 652 rte_eth_dev_shared_data_prepare(); 653 654 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 655 656 ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner); 657 658 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 659 return ret; 660 } 661 662 void __rte_experimental 663 rte_eth_dev_owner_delete(const uint64_t owner_id) 664 { 665 uint16_t port_id; 666 667 rte_eth_dev_shared_data_prepare(); 668 669 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 670 671 if (rte_eth_is_valid_owner_id(owner_id)) { 672 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) 673 if (rte_eth_devices[port_id].data->owner.id == owner_id) 674 memset(&rte_eth_devices[port_id].data->owner, 0, 675 sizeof(struct rte_eth_dev_owner)); 676 RTE_ETHDEV_LOG(NOTICE, 677 "All port owners owned by %016"PRIx64" identifier have removed\n", 678 owner_id); 679 } else { 680 RTE_ETHDEV_LOG(ERR, 681 "Invalid owner id=%016"PRIx64"\n", 682 owner_id); 683 } 684 685 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 686 } 687 688 int __rte_experimental 689 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 690 { 691 int ret = 0; 692 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 693 694 rte_eth_dev_shared_data_prepare(); 695 696 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 697 698 if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) { 699 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 700 port_id); 701 ret = -ENODEV; 702 } else { 703 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 704 } 705 706 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 707 return ret; 708 } 709 710 int 711 rte_eth_dev_socket_id(uint16_t port_id) 712 { 713 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 714 return rte_eth_devices[port_id].data->numa_node; 715 } 716 717 void * 718 rte_eth_dev_get_sec_ctx(uint16_t port_id) 719 { 720 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 721 return rte_eth_devices[port_id].security_ctx; 722 } 723 724 uint16_t 725 rte_eth_dev_count(void) 726 { 727 return rte_eth_dev_count_avail(); 728 } 729 730 uint16_t 731 rte_eth_dev_count_avail(void) 732 { 733 uint16_t p; 734 uint16_t count; 735 736 count = 0; 737 738 RTE_ETH_FOREACH_DEV(p) 739 count++; 740 741 return count; 742 } 743 744 uint16_t __rte_experimental 745 rte_eth_dev_count_total(void) 746 { 747 uint16_t port, count = 0; 748 749 for (port = 0; port < RTE_MAX_ETHPORTS; port++) 750 if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED) 751 count++; 752 753 return count; 754 } 755 756 int 757 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 758 { 759 char *tmp; 760 761 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 762 763 if (name == NULL) { 764 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n"); 765 return -EINVAL; 766 } 767 768 /* shouldn't check 'rte_eth_devices[i].data', 769 * because it might be overwritten by VDEV PMD */ 770 tmp = rte_eth_dev_shared_data->data[port_id].name; 771 strcpy(name, tmp); 772 return 0; 773 } 774 775 int 776 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 777 { 778 uint32_t pid; 779 780 if (name == NULL) { 781 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n"); 782 return -EINVAL; 783 } 784 785 for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) { 786 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED && 787 !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) { 788 *port_id = pid; 789 return 0; 790 } 791 } 792 793 return -ENODEV; 794 } 795 796 static int 797 eth_err(uint16_t port_id, int ret) 798 { 799 if (ret == 0) 800 return 0; 801 if (rte_eth_dev_is_removed(port_id)) 802 return -EIO; 803 return ret; 804 } 805 806 static int 807 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 808 { 809 uint16_t old_nb_queues = dev->data->nb_rx_queues; 810 void **rxq; 811 unsigned i; 812 813 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ 814 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", 815 sizeof(dev->data->rx_queues[0]) * nb_queues, 816 RTE_CACHE_LINE_SIZE); 817 if (dev->data->rx_queues == NULL) { 818 dev->data->nb_rx_queues = 0; 819 return -(ENOMEM); 820 } 821 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ 822 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP); 823 824 rxq = dev->data->rx_queues; 825 826 for (i = nb_queues; i < old_nb_queues; i++) 827 (*dev->dev_ops->rx_queue_release)(rxq[i]); 828 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues, 829 RTE_CACHE_LINE_SIZE); 830 if (rxq == NULL) 831 return -(ENOMEM); 832 if (nb_queues > old_nb_queues) { 833 uint16_t new_qs = nb_queues - old_nb_queues; 834 835 memset(rxq + old_nb_queues, 0, 836 sizeof(rxq[0]) * new_qs); 837 } 838 839 dev->data->rx_queues = rxq; 840 841 } else if (dev->data->rx_queues != NULL && nb_queues == 0) { 842 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP); 843 844 rxq = dev->data->rx_queues; 845 846 for (i = nb_queues; i < old_nb_queues; i++) 847 (*dev->dev_ops->rx_queue_release)(rxq[i]); 848 849 rte_free(dev->data->rx_queues); 850 dev->data->rx_queues = NULL; 851 } 852 dev->data->nb_rx_queues = nb_queues; 853 return 0; 854 } 855 856 int 857 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 858 { 859 struct rte_eth_dev *dev; 860 861 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 862 863 dev = &rte_eth_devices[port_id]; 864 if (!dev->data->dev_started) { 865 RTE_ETHDEV_LOG(ERR, 866 "Port %u must be started before start any queue\n", 867 port_id); 868 return -EINVAL; 869 } 870 871 if (rx_queue_id >= dev->data->nb_rx_queues) { 872 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 873 return -EINVAL; 874 } 875 876 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 877 878 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 879 RTE_ETHDEV_LOG(INFO, 880 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 881 rx_queue_id, port_id); 882 return 0; 883 } 884 885 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, 886 rx_queue_id)); 887 888 } 889 890 int 891 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 892 { 893 struct rte_eth_dev *dev; 894 895 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 896 897 dev = &rte_eth_devices[port_id]; 898 if (rx_queue_id >= dev->data->nb_rx_queues) { 899 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 900 return -EINVAL; 901 } 902 903 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 904 905 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 906 RTE_ETHDEV_LOG(INFO, 907 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 908 rx_queue_id, port_id); 909 return 0; 910 } 911 912 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 913 914 } 915 916 int 917 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 918 { 919 struct rte_eth_dev *dev; 920 921 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 922 923 dev = &rte_eth_devices[port_id]; 924 if (!dev->data->dev_started) { 925 RTE_ETHDEV_LOG(ERR, 926 "Port %u must be started before start any queue\n", 927 port_id); 928 return -EINVAL; 929 } 930 931 if (tx_queue_id >= dev->data->nb_tx_queues) { 932 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 933 return -EINVAL; 934 } 935 936 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 937 938 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 939 RTE_ETHDEV_LOG(INFO, 940 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 941 tx_queue_id, port_id); 942 return 0; 943 } 944 945 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 946 } 947 948 int 949 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 950 { 951 struct rte_eth_dev *dev; 952 953 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 954 955 dev = &rte_eth_devices[port_id]; 956 if (tx_queue_id >= dev->data->nb_tx_queues) { 957 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 958 return -EINVAL; 959 } 960 961 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 962 963 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 964 RTE_ETHDEV_LOG(INFO, 965 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 966 tx_queue_id, port_id); 967 return 0; 968 } 969 970 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 971 972 } 973 974 static int 975 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 976 { 977 uint16_t old_nb_queues = dev->data->nb_tx_queues; 978 void **txq; 979 unsigned i; 980 981 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ 982 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", 983 sizeof(dev->data->tx_queues[0]) * nb_queues, 984 RTE_CACHE_LINE_SIZE); 985 if (dev->data->tx_queues == NULL) { 986 dev->data->nb_tx_queues = 0; 987 return -(ENOMEM); 988 } 989 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ 990 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP); 991 992 txq = dev->data->tx_queues; 993 994 for (i = nb_queues; i < old_nb_queues; i++) 995 (*dev->dev_ops->tx_queue_release)(txq[i]); 996 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues, 997 RTE_CACHE_LINE_SIZE); 998 if (txq == NULL) 999 return -ENOMEM; 1000 if (nb_queues > old_nb_queues) { 1001 uint16_t new_qs = nb_queues - old_nb_queues; 1002 1003 memset(txq + old_nb_queues, 0, 1004 sizeof(txq[0]) * new_qs); 1005 } 1006 1007 dev->data->tx_queues = txq; 1008 1009 } else if (dev->data->tx_queues != NULL && nb_queues == 0) { 1010 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP); 1011 1012 txq = dev->data->tx_queues; 1013 1014 for (i = nb_queues; i < old_nb_queues; i++) 1015 (*dev->dev_ops->tx_queue_release)(txq[i]); 1016 1017 rte_free(dev->data->tx_queues); 1018 dev->data->tx_queues = NULL; 1019 } 1020 dev->data->nb_tx_queues = nb_queues; 1021 return 0; 1022 } 1023 1024 uint32_t 1025 rte_eth_speed_bitflag(uint32_t speed, int duplex) 1026 { 1027 switch (speed) { 1028 case ETH_SPEED_NUM_10M: 1029 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD; 1030 case ETH_SPEED_NUM_100M: 1031 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD; 1032 case ETH_SPEED_NUM_1G: 1033 return ETH_LINK_SPEED_1G; 1034 case ETH_SPEED_NUM_2_5G: 1035 return ETH_LINK_SPEED_2_5G; 1036 case ETH_SPEED_NUM_5G: 1037 return ETH_LINK_SPEED_5G; 1038 case ETH_SPEED_NUM_10G: 1039 return ETH_LINK_SPEED_10G; 1040 case ETH_SPEED_NUM_20G: 1041 return ETH_LINK_SPEED_20G; 1042 case ETH_SPEED_NUM_25G: 1043 return ETH_LINK_SPEED_25G; 1044 case ETH_SPEED_NUM_40G: 1045 return ETH_LINK_SPEED_40G; 1046 case ETH_SPEED_NUM_50G: 1047 return ETH_LINK_SPEED_50G; 1048 case ETH_SPEED_NUM_56G: 1049 return ETH_LINK_SPEED_56G; 1050 case ETH_SPEED_NUM_100G: 1051 return ETH_LINK_SPEED_100G; 1052 default: 1053 return 0; 1054 } 1055 } 1056 1057 const char * 1058 rte_eth_dev_rx_offload_name(uint64_t offload) 1059 { 1060 const char *name = "UNKNOWN"; 1061 unsigned int i; 1062 1063 for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) { 1064 if (offload == rte_rx_offload_names[i].offload) { 1065 name = rte_rx_offload_names[i].name; 1066 break; 1067 } 1068 } 1069 1070 return name; 1071 } 1072 1073 const char * 1074 rte_eth_dev_tx_offload_name(uint64_t offload) 1075 { 1076 const char *name = "UNKNOWN"; 1077 unsigned int i; 1078 1079 for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) { 1080 if (offload == rte_tx_offload_names[i].offload) { 1081 name = rte_tx_offload_names[i].name; 1082 break; 1083 } 1084 } 1085 1086 return name; 1087 } 1088 1089 int 1090 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1091 const struct rte_eth_conf *dev_conf) 1092 { 1093 struct rte_eth_dev *dev; 1094 struct rte_eth_dev_info dev_info; 1095 struct rte_eth_conf orig_conf; 1096 int diag; 1097 int ret; 1098 1099 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1100 1101 dev = &rte_eth_devices[port_id]; 1102 1103 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 1104 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1105 1106 if (dev->data->dev_started) { 1107 RTE_ETHDEV_LOG(ERR, 1108 "Port %u must be stopped to allow configuration\n", 1109 port_id); 1110 return -EBUSY; 1111 } 1112 1113 /* Store original config, as rollback required on failure */ 1114 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1115 1116 /* 1117 * Copy the dev_conf parameter into the dev structure. 1118 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1119 */ 1120 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf)); 1121 1122 rte_eth_dev_info_get(port_id, &dev_info); 1123 1124 /* If number of queues specified by application for both Rx and Tx is 1125 * zero, use driver preferred values. This cannot be done individually 1126 * as it is valid for either Tx or Rx (but not both) to be zero. 1127 * If driver does not provide any preferred valued, fall back on 1128 * EAL defaults. 1129 */ 1130 if (nb_rx_q == 0 && nb_tx_q == 0) { 1131 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1132 if (nb_rx_q == 0) 1133 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1134 nb_tx_q = dev_info.default_txportconf.nb_queues; 1135 if (nb_tx_q == 0) 1136 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1137 } 1138 1139 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1140 RTE_ETHDEV_LOG(ERR, 1141 "Number of RX queues requested (%u) is greater than max supported(%d)\n", 1142 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1143 ret = -EINVAL; 1144 goto rollback; 1145 } 1146 1147 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1148 RTE_ETHDEV_LOG(ERR, 1149 "Number of TX queues requested (%u) is greater than max supported(%d)\n", 1150 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1151 ret = -EINVAL; 1152 goto rollback; 1153 } 1154 1155 /* 1156 * Check that the numbers of RX and TX queues are not greater 1157 * than the maximum number of RX and TX queues supported by the 1158 * configured device. 1159 */ 1160 if (nb_rx_q > dev_info.max_rx_queues) { 1161 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1162 port_id, nb_rx_q, dev_info.max_rx_queues); 1163 ret = -EINVAL; 1164 goto rollback; 1165 } 1166 1167 if (nb_tx_q > dev_info.max_tx_queues) { 1168 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1169 port_id, nb_tx_q, dev_info.max_tx_queues); 1170 ret = -EINVAL; 1171 goto rollback; 1172 } 1173 1174 /* Check that the device supports requested interrupts */ 1175 if ((dev_conf->intr_conf.lsc == 1) && 1176 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1177 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1178 dev->device->driver->name); 1179 ret = -EINVAL; 1180 goto rollback; 1181 } 1182 if ((dev_conf->intr_conf.rmv == 1) && 1183 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1184 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1185 dev->device->driver->name); 1186 ret = -EINVAL; 1187 goto rollback; 1188 } 1189 1190 /* 1191 * If jumbo frames are enabled, check that the maximum RX packet 1192 * length is supported by the configured device. 1193 */ 1194 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1195 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) { 1196 RTE_ETHDEV_LOG(ERR, 1197 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n", 1198 port_id, dev_conf->rxmode.max_rx_pkt_len, 1199 dev_info.max_rx_pktlen); 1200 ret = -EINVAL; 1201 goto rollback; 1202 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) { 1203 RTE_ETHDEV_LOG(ERR, 1204 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n", 1205 port_id, dev_conf->rxmode.max_rx_pkt_len, 1206 (unsigned)ETHER_MIN_LEN); 1207 ret = -EINVAL; 1208 goto rollback; 1209 } 1210 } else { 1211 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN || 1212 dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN) 1213 /* Use default value */ 1214 dev->data->dev_conf.rxmode.max_rx_pkt_len = 1215 ETHER_MAX_LEN; 1216 } 1217 1218 /* Any requested offloading must be within its device capabilities */ 1219 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1220 dev_conf->rxmode.offloads) { 1221 RTE_ETHDEV_LOG(ERR, 1222 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1223 "capabilities 0x%"PRIx64" in %s()\n", 1224 port_id, dev_conf->rxmode.offloads, 1225 dev_info.rx_offload_capa, 1226 __func__); 1227 ret = -EINVAL; 1228 goto rollback; 1229 } 1230 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1231 dev_conf->txmode.offloads) { 1232 RTE_ETHDEV_LOG(ERR, 1233 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1234 "capabilities 0x%"PRIx64" in %s()\n", 1235 port_id, dev_conf->txmode.offloads, 1236 dev_info.tx_offload_capa, 1237 __func__); 1238 ret = -EINVAL; 1239 goto rollback; 1240 } 1241 1242 /* Check that device supports requested rss hash functions. */ 1243 if ((dev_info.flow_type_rss_offloads | 1244 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1245 dev_info.flow_type_rss_offloads) { 1246 RTE_ETHDEV_LOG(ERR, 1247 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1248 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1249 dev_info.flow_type_rss_offloads); 1250 ret = -EINVAL; 1251 goto rollback; 1252 } 1253 1254 /* 1255 * Setup new number of RX/TX queues and reconfigure device. 1256 */ 1257 diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q); 1258 if (diag != 0) { 1259 RTE_ETHDEV_LOG(ERR, 1260 "Port%u rte_eth_dev_rx_queue_config = %d\n", 1261 port_id, diag); 1262 ret = diag; 1263 goto rollback; 1264 } 1265 1266 diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q); 1267 if (diag != 0) { 1268 RTE_ETHDEV_LOG(ERR, 1269 "Port%u rte_eth_dev_tx_queue_config = %d\n", 1270 port_id, diag); 1271 rte_eth_dev_rx_queue_config(dev, 0); 1272 ret = diag; 1273 goto rollback; 1274 } 1275 1276 diag = (*dev->dev_ops->dev_configure)(dev); 1277 if (diag != 0) { 1278 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1279 port_id, diag); 1280 rte_eth_dev_rx_queue_config(dev, 0); 1281 rte_eth_dev_tx_queue_config(dev, 0); 1282 ret = eth_err(port_id, diag); 1283 goto rollback; 1284 } 1285 1286 /* Initialize Rx profiling if enabled at compilation time. */ 1287 diag = __rte_eth_dev_profile_init(port_id, dev); 1288 if (diag != 0) { 1289 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1290 port_id, diag); 1291 rte_eth_dev_rx_queue_config(dev, 0); 1292 rte_eth_dev_tx_queue_config(dev, 0); 1293 ret = eth_err(port_id, diag); 1294 goto rollback; 1295 } 1296 1297 return 0; 1298 1299 rollback: 1300 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1301 1302 return ret; 1303 } 1304 1305 void 1306 _rte_eth_dev_reset(struct rte_eth_dev *dev) 1307 { 1308 if (dev->data->dev_started) { 1309 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n", 1310 dev->data->port_id); 1311 return; 1312 } 1313 1314 rte_eth_dev_rx_queue_config(dev, 0); 1315 rte_eth_dev_tx_queue_config(dev, 0); 1316 1317 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); 1318 } 1319 1320 static void 1321 rte_eth_dev_mac_restore(struct rte_eth_dev *dev, 1322 struct rte_eth_dev_info *dev_info) 1323 { 1324 struct ether_addr *addr; 1325 uint16_t i; 1326 uint32_t pool = 0; 1327 uint64_t pool_mask; 1328 1329 /* replay MAC address configuration including default MAC */ 1330 addr = &dev->data->mac_addrs[0]; 1331 if (*dev->dev_ops->mac_addr_set != NULL) 1332 (*dev->dev_ops->mac_addr_set)(dev, addr); 1333 else if (*dev->dev_ops->mac_addr_add != NULL) 1334 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1335 1336 if (*dev->dev_ops->mac_addr_add != NULL) { 1337 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1338 addr = &dev->data->mac_addrs[i]; 1339 1340 /* skip zero address */ 1341 if (is_zero_ether_addr(addr)) 1342 continue; 1343 1344 pool = 0; 1345 pool_mask = dev->data->mac_pool_sel[i]; 1346 1347 do { 1348 if (pool_mask & 1ULL) 1349 (*dev->dev_ops->mac_addr_add)(dev, 1350 addr, i, pool); 1351 pool_mask >>= 1; 1352 pool++; 1353 } while (pool_mask); 1354 } 1355 } 1356 } 1357 1358 static void 1359 rte_eth_dev_config_restore(struct rte_eth_dev *dev, 1360 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1361 { 1362 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1363 rte_eth_dev_mac_restore(dev, dev_info); 1364 1365 /* replay promiscuous configuration */ 1366 if (rte_eth_promiscuous_get(port_id) == 1) 1367 rte_eth_promiscuous_enable(port_id); 1368 else if (rte_eth_promiscuous_get(port_id) == 0) 1369 rte_eth_promiscuous_disable(port_id); 1370 1371 /* replay all multicast configuration */ 1372 if (rte_eth_allmulticast_get(port_id) == 1) 1373 rte_eth_allmulticast_enable(port_id); 1374 else if (rte_eth_allmulticast_get(port_id) == 0) 1375 rte_eth_allmulticast_disable(port_id); 1376 } 1377 1378 int 1379 rte_eth_dev_start(uint16_t port_id) 1380 { 1381 struct rte_eth_dev *dev; 1382 struct rte_eth_dev_info dev_info; 1383 int diag; 1384 1385 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1386 1387 dev = &rte_eth_devices[port_id]; 1388 1389 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1390 1391 if (dev->data->dev_started != 0) { 1392 RTE_ETHDEV_LOG(INFO, 1393 "Device with port_id=%"PRIu16" already started\n", 1394 port_id); 1395 return 0; 1396 } 1397 1398 rte_eth_dev_info_get(port_id, &dev_info); 1399 1400 /* Lets restore MAC now if device does not support live change */ 1401 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1402 rte_eth_dev_mac_restore(dev, &dev_info); 1403 1404 diag = (*dev->dev_ops->dev_start)(dev); 1405 if (diag == 0) 1406 dev->data->dev_started = 1; 1407 else 1408 return eth_err(port_id, diag); 1409 1410 rte_eth_dev_config_restore(dev, &dev_info, port_id); 1411 1412 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1413 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1414 (*dev->dev_ops->link_update)(dev, 0); 1415 } 1416 return 0; 1417 } 1418 1419 void 1420 rte_eth_dev_stop(uint16_t port_id) 1421 { 1422 struct rte_eth_dev *dev; 1423 1424 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1425 dev = &rte_eth_devices[port_id]; 1426 1427 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop); 1428 1429 if (dev->data->dev_started == 0) { 1430 RTE_ETHDEV_LOG(INFO, 1431 "Device with port_id=%"PRIu16" already stopped\n", 1432 port_id); 1433 return; 1434 } 1435 1436 dev->data->dev_started = 0; 1437 (*dev->dev_ops->dev_stop)(dev); 1438 } 1439 1440 int 1441 rte_eth_dev_set_link_up(uint16_t port_id) 1442 { 1443 struct rte_eth_dev *dev; 1444 1445 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1446 1447 dev = &rte_eth_devices[port_id]; 1448 1449 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1450 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1451 } 1452 1453 int 1454 rte_eth_dev_set_link_down(uint16_t port_id) 1455 { 1456 struct rte_eth_dev *dev; 1457 1458 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1459 1460 dev = &rte_eth_devices[port_id]; 1461 1462 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1463 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1464 } 1465 1466 void 1467 rte_eth_dev_close(uint16_t port_id) 1468 { 1469 struct rte_eth_dev *dev; 1470 1471 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1472 dev = &rte_eth_devices[port_id]; 1473 1474 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close); 1475 dev->data->dev_started = 0; 1476 (*dev->dev_ops->dev_close)(dev); 1477 1478 /* check behaviour flag - temporary for PMD migration */ 1479 if ((dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE) != 0) { 1480 /* new behaviour: send event + reset state + free all data */ 1481 rte_eth_dev_release_port(dev); 1482 return; 1483 } 1484 RTE_ETHDEV_LOG(DEBUG, "Port closing is using an old behaviour.\n" 1485 "The driver %s should migrate to the new behaviour.\n", 1486 dev->device->driver->name); 1487 /* old behaviour: only free queue arrays */ 1488 dev->data->nb_rx_queues = 0; 1489 rte_free(dev->data->rx_queues); 1490 dev->data->rx_queues = NULL; 1491 dev->data->nb_tx_queues = 0; 1492 rte_free(dev->data->tx_queues); 1493 dev->data->tx_queues = NULL; 1494 } 1495 1496 int 1497 rte_eth_dev_reset(uint16_t port_id) 1498 { 1499 struct rte_eth_dev *dev; 1500 int ret; 1501 1502 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1503 dev = &rte_eth_devices[port_id]; 1504 1505 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1506 1507 rte_eth_dev_stop(port_id); 1508 ret = dev->dev_ops->dev_reset(dev); 1509 1510 return eth_err(port_id, ret); 1511 } 1512 1513 int __rte_experimental 1514 rte_eth_dev_is_removed(uint16_t port_id) 1515 { 1516 struct rte_eth_dev *dev; 1517 int ret; 1518 1519 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1520 1521 dev = &rte_eth_devices[port_id]; 1522 1523 if (dev->state == RTE_ETH_DEV_REMOVED) 1524 return 1; 1525 1526 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1527 1528 ret = dev->dev_ops->is_removed(dev); 1529 if (ret != 0) 1530 /* Device is physically removed. */ 1531 dev->state = RTE_ETH_DEV_REMOVED; 1532 1533 return ret; 1534 } 1535 1536 int 1537 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1538 uint16_t nb_rx_desc, unsigned int socket_id, 1539 const struct rte_eth_rxconf *rx_conf, 1540 struct rte_mempool *mp) 1541 { 1542 int ret; 1543 uint32_t mbp_buf_size; 1544 struct rte_eth_dev *dev; 1545 struct rte_eth_dev_info dev_info; 1546 struct rte_eth_rxconf local_conf; 1547 void **rxq; 1548 1549 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1550 1551 dev = &rte_eth_devices[port_id]; 1552 if (rx_queue_id >= dev->data->nb_rx_queues) { 1553 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 1554 return -EINVAL; 1555 } 1556 1557 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 1558 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 1559 1560 /* 1561 * Check the size of the mbuf data buffer. 1562 * This value must be provided in the private data of the memory pool. 1563 * First check that the memory pool has a valid private data. 1564 */ 1565 rte_eth_dev_info_get(port_id, &dev_info); 1566 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) { 1567 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n", 1568 mp->name, (int)mp->private_data_size, 1569 (int)sizeof(struct rte_pktmbuf_pool_private)); 1570 return -ENOSPC; 1571 } 1572 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 1573 1574 if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) { 1575 RTE_ETHDEV_LOG(ERR, 1576 "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n", 1577 mp->name, (int)mbp_buf_size, 1578 (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize), 1579 (int)RTE_PKTMBUF_HEADROOM, 1580 (int)dev_info.min_rx_bufsize); 1581 return -EINVAL; 1582 } 1583 1584 /* Use default specified by driver, if nb_rx_desc is zero */ 1585 if (nb_rx_desc == 0) { 1586 nb_rx_desc = dev_info.default_rxportconf.ring_size; 1587 /* If driver default is also zero, fall back on EAL default */ 1588 if (nb_rx_desc == 0) 1589 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 1590 } 1591 1592 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 1593 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 1594 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 1595 1596 RTE_ETHDEV_LOG(ERR, 1597 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n", 1598 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 1599 dev_info.rx_desc_lim.nb_min, 1600 dev_info.rx_desc_lim.nb_align); 1601 return -EINVAL; 1602 } 1603 1604 if (dev->data->dev_started && 1605 !(dev_info.dev_capa & 1606 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 1607 return -EBUSY; 1608 1609 if (dev->data->dev_started && 1610 (dev->data->rx_queue_state[rx_queue_id] != 1611 RTE_ETH_QUEUE_STATE_STOPPED)) 1612 return -EBUSY; 1613 1614 rxq = dev->data->rx_queues; 1615 if (rxq[rx_queue_id]) { 1616 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, 1617 -ENOTSUP); 1618 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]); 1619 rxq[rx_queue_id] = NULL; 1620 } 1621 1622 if (rx_conf == NULL) 1623 rx_conf = &dev_info.default_rxconf; 1624 1625 local_conf = *rx_conf; 1626 1627 /* 1628 * If an offloading has already been enabled in 1629 * rte_eth_dev_configure(), it has been enabled on all queues, 1630 * so there is no need to enable it in this queue again. 1631 * The local_conf.offloads input to underlying PMD only carries 1632 * those offloadings which are only enabled on this queue and 1633 * not enabled on all queues. 1634 */ 1635 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 1636 1637 /* 1638 * New added offloadings for this queue are those not enabled in 1639 * rte_eth_dev_configure() and they must be per-queue type. 1640 * A pure per-port offloading can't be enabled on a queue while 1641 * disabled on another queue. A pure per-port offloading can't 1642 * be enabled for any queue as new added one if it hasn't been 1643 * enabled in rte_eth_dev_configure(). 1644 */ 1645 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 1646 local_conf.offloads) { 1647 RTE_ETHDEV_LOG(ERR, 1648 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 1649 "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n", 1650 port_id, rx_queue_id, local_conf.offloads, 1651 dev_info.rx_queue_offload_capa, 1652 __func__); 1653 return -EINVAL; 1654 } 1655 1656 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 1657 socket_id, &local_conf, mp); 1658 if (!ret) { 1659 if (!dev->data->min_rx_buf_size || 1660 dev->data->min_rx_buf_size > mbp_buf_size) 1661 dev->data->min_rx_buf_size = mbp_buf_size; 1662 } 1663 1664 return eth_err(port_id, ret); 1665 } 1666 1667 int 1668 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 1669 uint16_t nb_tx_desc, unsigned int socket_id, 1670 const struct rte_eth_txconf *tx_conf) 1671 { 1672 struct rte_eth_dev *dev; 1673 struct rte_eth_dev_info dev_info; 1674 struct rte_eth_txconf local_conf; 1675 void **txq; 1676 1677 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1678 1679 dev = &rte_eth_devices[port_id]; 1680 if (tx_queue_id >= dev->data->nb_tx_queues) { 1681 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 1682 return -EINVAL; 1683 } 1684 1685 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 1686 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 1687 1688 rte_eth_dev_info_get(port_id, &dev_info); 1689 1690 /* Use default specified by driver, if nb_tx_desc is zero */ 1691 if (nb_tx_desc == 0) { 1692 nb_tx_desc = dev_info.default_txportconf.ring_size; 1693 /* If driver default is zero, fall back on EAL default */ 1694 if (nb_tx_desc == 0) 1695 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 1696 } 1697 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 1698 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 1699 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 1700 RTE_ETHDEV_LOG(ERR, 1701 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n", 1702 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 1703 dev_info.tx_desc_lim.nb_min, 1704 dev_info.tx_desc_lim.nb_align); 1705 return -EINVAL; 1706 } 1707 1708 if (dev->data->dev_started && 1709 !(dev_info.dev_capa & 1710 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 1711 return -EBUSY; 1712 1713 if (dev->data->dev_started && 1714 (dev->data->tx_queue_state[tx_queue_id] != 1715 RTE_ETH_QUEUE_STATE_STOPPED)) 1716 return -EBUSY; 1717 1718 txq = dev->data->tx_queues; 1719 if (txq[tx_queue_id]) { 1720 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, 1721 -ENOTSUP); 1722 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]); 1723 txq[tx_queue_id] = NULL; 1724 } 1725 1726 if (tx_conf == NULL) 1727 tx_conf = &dev_info.default_txconf; 1728 1729 local_conf = *tx_conf; 1730 1731 /* 1732 * If an offloading has already been enabled in 1733 * rte_eth_dev_configure(), it has been enabled on all queues, 1734 * so there is no need to enable it in this queue again. 1735 * The local_conf.offloads input to underlying PMD only carries 1736 * those offloadings which are only enabled on this queue and 1737 * not enabled on all queues. 1738 */ 1739 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 1740 1741 /* 1742 * New added offloadings for this queue are those not enabled in 1743 * rte_eth_dev_configure() and they must be per-queue type. 1744 * A pure per-port offloading can't be enabled on a queue while 1745 * disabled on another queue. A pure per-port offloading can't 1746 * be enabled for any queue as new added one if it hasn't been 1747 * enabled in rte_eth_dev_configure(). 1748 */ 1749 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 1750 local_conf.offloads) { 1751 RTE_ETHDEV_LOG(ERR, 1752 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 1753 "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n", 1754 port_id, tx_queue_id, local_conf.offloads, 1755 dev_info.tx_queue_offload_capa, 1756 __func__); 1757 return -EINVAL; 1758 } 1759 1760 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 1761 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 1762 } 1763 1764 void 1765 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 1766 void *userdata __rte_unused) 1767 { 1768 unsigned i; 1769 1770 for (i = 0; i < unsent; i++) 1771 rte_pktmbuf_free(pkts[i]); 1772 } 1773 1774 void 1775 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 1776 void *userdata) 1777 { 1778 uint64_t *count = userdata; 1779 unsigned i; 1780 1781 for (i = 0; i < unsent; i++) 1782 rte_pktmbuf_free(pkts[i]); 1783 1784 *count += unsent; 1785 } 1786 1787 int 1788 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 1789 buffer_tx_error_fn cbfn, void *userdata) 1790 { 1791 buffer->error_callback = cbfn; 1792 buffer->error_userdata = userdata; 1793 return 0; 1794 } 1795 1796 int 1797 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 1798 { 1799 int ret = 0; 1800 1801 if (buffer == NULL) 1802 return -EINVAL; 1803 1804 buffer->size = size; 1805 if (buffer->error_callback == NULL) { 1806 ret = rte_eth_tx_buffer_set_err_callback( 1807 buffer, rte_eth_tx_buffer_drop_callback, NULL); 1808 } 1809 1810 return ret; 1811 } 1812 1813 int 1814 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 1815 { 1816 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1817 int ret; 1818 1819 /* Validate Input Data. Bail if not valid or not supported. */ 1820 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1821 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 1822 1823 /* Call driver to free pending mbufs. */ 1824 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 1825 free_cnt); 1826 return eth_err(port_id, ret); 1827 } 1828 1829 void 1830 rte_eth_promiscuous_enable(uint16_t port_id) 1831 { 1832 struct rte_eth_dev *dev; 1833 1834 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1835 dev = &rte_eth_devices[port_id]; 1836 1837 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable); 1838 (*dev->dev_ops->promiscuous_enable)(dev); 1839 dev->data->promiscuous = 1; 1840 } 1841 1842 void 1843 rte_eth_promiscuous_disable(uint16_t port_id) 1844 { 1845 struct rte_eth_dev *dev; 1846 1847 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1848 dev = &rte_eth_devices[port_id]; 1849 1850 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable); 1851 dev->data->promiscuous = 0; 1852 (*dev->dev_ops->promiscuous_disable)(dev); 1853 } 1854 1855 int 1856 rte_eth_promiscuous_get(uint16_t port_id) 1857 { 1858 struct rte_eth_dev *dev; 1859 1860 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1861 1862 dev = &rte_eth_devices[port_id]; 1863 return dev->data->promiscuous; 1864 } 1865 1866 void 1867 rte_eth_allmulticast_enable(uint16_t port_id) 1868 { 1869 struct rte_eth_dev *dev; 1870 1871 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1872 dev = &rte_eth_devices[port_id]; 1873 1874 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable); 1875 (*dev->dev_ops->allmulticast_enable)(dev); 1876 dev->data->all_multicast = 1; 1877 } 1878 1879 void 1880 rte_eth_allmulticast_disable(uint16_t port_id) 1881 { 1882 struct rte_eth_dev *dev; 1883 1884 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1885 dev = &rte_eth_devices[port_id]; 1886 1887 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable); 1888 dev->data->all_multicast = 0; 1889 (*dev->dev_ops->allmulticast_disable)(dev); 1890 } 1891 1892 int 1893 rte_eth_allmulticast_get(uint16_t port_id) 1894 { 1895 struct rte_eth_dev *dev; 1896 1897 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1898 1899 dev = &rte_eth_devices[port_id]; 1900 return dev->data->all_multicast; 1901 } 1902 1903 void 1904 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 1905 { 1906 struct rte_eth_dev *dev; 1907 1908 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1909 dev = &rte_eth_devices[port_id]; 1910 1911 if (dev->data->dev_conf.intr_conf.lsc && 1912 dev->data->dev_started) 1913 rte_eth_linkstatus_get(dev, eth_link); 1914 else { 1915 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update); 1916 (*dev->dev_ops->link_update)(dev, 1); 1917 *eth_link = dev->data->dev_link; 1918 } 1919 } 1920 1921 void 1922 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 1923 { 1924 struct rte_eth_dev *dev; 1925 1926 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1927 dev = &rte_eth_devices[port_id]; 1928 1929 if (dev->data->dev_conf.intr_conf.lsc && 1930 dev->data->dev_started) 1931 rte_eth_linkstatus_get(dev, eth_link); 1932 else { 1933 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update); 1934 (*dev->dev_ops->link_update)(dev, 0); 1935 *eth_link = dev->data->dev_link; 1936 } 1937 } 1938 1939 int 1940 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 1941 { 1942 struct rte_eth_dev *dev; 1943 1944 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1945 1946 dev = &rte_eth_devices[port_id]; 1947 memset(stats, 0, sizeof(*stats)); 1948 1949 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 1950 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 1951 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 1952 } 1953 1954 int 1955 rte_eth_stats_reset(uint16_t port_id) 1956 { 1957 struct rte_eth_dev *dev; 1958 1959 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1960 dev = &rte_eth_devices[port_id]; 1961 1962 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 1963 (*dev->dev_ops->stats_reset)(dev); 1964 dev->data->rx_mbuf_alloc_failed = 0; 1965 1966 return 0; 1967 } 1968 1969 static inline int 1970 get_xstats_basic_count(struct rte_eth_dev *dev) 1971 { 1972 uint16_t nb_rxqs, nb_txqs; 1973 int count; 1974 1975 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1976 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1977 1978 count = RTE_NB_STATS; 1979 count += nb_rxqs * RTE_NB_RXQ_STATS; 1980 count += nb_txqs * RTE_NB_TXQ_STATS; 1981 1982 return count; 1983 } 1984 1985 static int 1986 get_xstats_count(uint16_t port_id) 1987 { 1988 struct rte_eth_dev *dev; 1989 int count; 1990 1991 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1992 dev = &rte_eth_devices[port_id]; 1993 if (dev->dev_ops->xstats_get_names_by_id != NULL) { 1994 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL, 1995 NULL, 0); 1996 if (count < 0) 1997 return eth_err(port_id, count); 1998 } 1999 if (dev->dev_ops->xstats_get_names != NULL) { 2000 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2001 if (count < 0) 2002 return eth_err(port_id, count); 2003 } else 2004 count = 0; 2005 2006 2007 count += get_xstats_basic_count(dev); 2008 2009 return count; 2010 } 2011 2012 int 2013 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2014 uint64_t *id) 2015 { 2016 int cnt_xstats, idx_xstat; 2017 2018 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2019 2020 if (!id) { 2021 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n"); 2022 return -ENOMEM; 2023 } 2024 2025 if (!xstat_name) { 2026 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n"); 2027 return -ENOMEM; 2028 } 2029 2030 /* Get count */ 2031 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2032 if (cnt_xstats < 0) { 2033 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2034 return -ENODEV; 2035 } 2036 2037 /* Get id-name lookup table */ 2038 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2039 2040 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2041 port_id, xstats_names, cnt_xstats, NULL)) { 2042 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2043 return -1; 2044 } 2045 2046 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2047 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2048 *id = idx_xstat; 2049 return 0; 2050 }; 2051 } 2052 2053 return -EINVAL; 2054 } 2055 2056 /* retrieve basic stats names */ 2057 static int 2058 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev, 2059 struct rte_eth_xstat_name *xstats_names) 2060 { 2061 int cnt_used_entries = 0; 2062 uint32_t idx, id_queue; 2063 uint16_t num_q; 2064 2065 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2066 snprintf(xstats_names[cnt_used_entries].name, 2067 sizeof(xstats_names[0].name), 2068 "%s", rte_stats_strings[idx].name); 2069 cnt_used_entries++; 2070 } 2071 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2072 for (id_queue = 0; id_queue < num_q; id_queue++) { 2073 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2074 snprintf(xstats_names[cnt_used_entries].name, 2075 sizeof(xstats_names[0].name), 2076 "rx_q%u%s", 2077 id_queue, rte_rxq_stats_strings[idx].name); 2078 cnt_used_entries++; 2079 } 2080 2081 } 2082 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2083 for (id_queue = 0; id_queue < num_q; id_queue++) { 2084 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 2085 snprintf(xstats_names[cnt_used_entries].name, 2086 sizeof(xstats_names[0].name), 2087 "tx_q%u%s", 2088 id_queue, rte_txq_stats_strings[idx].name); 2089 cnt_used_entries++; 2090 } 2091 } 2092 return cnt_used_entries; 2093 } 2094 2095 /* retrieve ethdev extended statistics names */ 2096 int 2097 rte_eth_xstats_get_names_by_id(uint16_t port_id, 2098 struct rte_eth_xstat_name *xstats_names, unsigned int size, 2099 uint64_t *ids) 2100 { 2101 struct rte_eth_xstat_name *xstats_names_copy; 2102 unsigned int no_basic_stat_requested = 1; 2103 unsigned int no_ext_stat_requested = 1; 2104 unsigned int expected_entries; 2105 unsigned int basic_count; 2106 struct rte_eth_dev *dev; 2107 unsigned int i; 2108 int ret; 2109 2110 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2111 dev = &rte_eth_devices[port_id]; 2112 2113 basic_count = get_xstats_basic_count(dev); 2114 ret = get_xstats_count(port_id); 2115 if (ret < 0) 2116 return ret; 2117 expected_entries = (unsigned int)ret; 2118 2119 /* Return max number of stats if no ids given */ 2120 if (!ids) { 2121 if (!xstats_names) 2122 return expected_entries; 2123 else if (xstats_names && size < expected_entries) 2124 return expected_entries; 2125 } 2126 2127 if (ids && !xstats_names) 2128 return -EINVAL; 2129 2130 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 2131 uint64_t ids_copy[size]; 2132 2133 for (i = 0; i < size; i++) { 2134 if (ids[i] < basic_count) { 2135 no_basic_stat_requested = 0; 2136 break; 2137 } 2138 2139 /* 2140 * Convert ids to xstats ids that PMD knows. 2141 * ids known by user are basic + extended stats. 2142 */ 2143 ids_copy[i] = ids[i] - basic_count; 2144 } 2145 2146 if (no_basic_stat_requested) 2147 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 2148 xstats_names, ids_copy, size); 2149 } 2150 2151 /* Retrieve all stats */ 2152 if (!ids) { 2153 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 2154 expected_entries); 2155 if (num_stats < 0 || num_stats > (int)expected_entries) 2156 return num_stats; 2157 else 2158 return expected_entries; 2159 } 2160 2161 xstats_names_copy = calloc(expected_entries, 2162 sizeof(struct rte_eth_xstat_name)); 2163 2164 if (!xstats_names_copy) { 2165 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 2166 return -ENOMEM; 2167 } 2168 2169 if (ids) { 2170 for (i = 0; i < size; i++) { 2171 if (ids[i] >= basic_count) { 2172 no_ext_stat_requested = 0; 2173 break; 2174 } 2175 } 2176 } 2177 2178 /* Fill xstats_names_copy structure */ 2179 if (ids && no_ext_stat_requested) { 2180 rte_eth_basic_stats_get_names(dev, xstats_names_copy); 2181 } else { 2182 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 2183 expected_entries); 2184 if (ret < 0) { 2185 free(xstats_names_copy); 2186 return ret; 2187 } 2188 } 2189 2190 /* Filter stats */ 2191 for (i = 0; i < size; i++) { 2192 if (ids[i] >= expected_entries) { 2193 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 2194 free(xstats_names_copy); 2195 return -1; 2196 } 2197 xstats_names[i] = xstats_names_copy[ids[i]]; 2198 } 2199 2200 free(xstats_names_copy); 2201 return size; 2202 } 2203 2204 int 2205 rte_eth_xstats_get_names(uint16_t port_id, 2206 struct rte_eth_xstat_name *xstats_names, 2207 unsigned int size) 2208 { 2209 struct rte_eth_dev *dev; 2210 int cnt_used_entries; 2211 int cnt_expected_entries; 2212 int cnt_driver_entries; 2213 2214 cnt_expected_entries = get_xstats_count(port_id); 2215 if (xstats_names == NULL || cnt_expected_entries < 0 || 2216 (int)size < cnt_expected_entries) 2217 return cnt_expected_entries; 2218 2219 /* port_id checked in get_xstats_count() */ 2220 dev = &rte_eth_devices[port_id]; 2221 2222 cnt_used_entries = rte_eth_basic_stats_get_names( 2223 dev, xstats_names); 2224 2225 if (dev->dev_ops->xstats_get_names != NULL) { 2226 /* If there are any driver-specific xstats, append them 2227 * to end of list. 2228 */ 2229 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 2230 dev, 2231 xstats_names + cnt_used_entries, 2232 size - cnt_used_entries); 2233 if (cnt_driver_entries < 0) 2234 return eth_err(port_id, cnt_driver_entries); 2235 cnt_used_entries += cnt_driver_entries; 2236 } 2237 2238 return cnt_used_entries; 2239 } 2240 2241 2242 static int 2243 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 2244 { 2245 struct rte_eth_dev *dev; 2246 struct rte_eth_stats eth_stats; 2247 unsigned int count = 0, i, q; 2248 uint64_t val, *stats_ptr; 2249 uint16_t nb_rxqs, nb_txqs; 2250 int ret; 2251 2252 ret = rte_eth_stats_get(port_id, ð_stats); 2253 if (ret < 0) 2254 return ret; 2255 2256 dev = &rte_eth_devices[port_id]; 2257 2258 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2259 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2260 2261 /* global stats */ 2262 for (i = 0; i < RTE_NB_STATS; i++) { 2263 stats_ptr = RTE_PTR_ADD(ð_stats, 2264 rte_stats_strings[i].offset); 2265 val = *stats_ptr; 2266 xstats[count++].value = val; 2267 } 2268 2269 /* per-rxq stats */ 2270 for (q = 0; q < nb_rxqs; q++) { 2271 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 2272 stats_ptr = RTE_PTR_ADD(ð_stats, 2273 rte_rxq_stats_strings[i].offset + 2274 q * sizeof(uint64_t)); 2275 val = *stats_ptr; 2276 xstats[count++].value = val; 2277 } 2278 } 2279 2280 /* per-txq stats */ 2281 for (q = 0; q < nb_txqs; q++) { 2282 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 2283 stats_ptr = RTE_PTR_ADD(ð_stats, 2284 rte_txq_stats_strings[i].offset + 2285 q * sizeof(uint64_t)); 2286 val = *stats_ptr; 2287 xstats[count++].value = val; 2288 } 2289 } 2290 return count; 2291 } 2292 2293 /* retrieve ethdev extended statistics */ 2294 int 2295 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 2296 uint64_t *values, unsigned int size) 2297 { 2298 unsigned int no_basic_stat_requested = 1; 2299 unsigned int no_ext_stat_requested = 1; 2300 unsigned int num_xstats_filled; 2301 unsigned int basic_count; 2302 uint16_t expected_entries; 2303 struct rte_eth_dev *dev; 2304 unsigned int i; 2305 int ret; 2306 2307 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2308 ret = get_xstats_count(port_id); 2309 if (ret < 0) 2310 return ret; 2311 expected_entries = (uint16_t)ret; 2312 struct rte_eth_xstat xstats[expected_entries]; 2313 dev = &rte_eth_devices[port_id]; 2314 basic_count = get_xstats_basic_count(dev); 2315 2316 /* Return max number of stats if no ids given */ 2317 if (!ids) { 2318 if (!values) 2319 return expected_entries; 2320 else if (values && size < expected_entries) 2321 return expected_entries; 2322 } 2323 2324 if (ids && !values) 2325 return -EINVAL; 2326 2327 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 2328 unsigned int basic_count = get_xstats_basic_count(dev); 2329 uint64_t ids_copy[size]; 2330 2331 for (i = 0; i < size; i++) { 2332 if (ids[i] < basic_count) { 2333 no_basic_stat_requested = 0; 2334 break; 2335 } 2336 2337 /* 2338 * Convert ids to xstats ids that PMD knows. 2339 * ids known by user are basic + extended stats. 2340 */ 2341 ids_copy[i] = ids[i] - basic_count; 2342 } 2343 2344 if (no_basic_stat_requested) 2345 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 2346 values, size); 2347 } 2348 2349 if (ids) { 2350 for (i = 0; i < size; i++) { 2351 if (ids[i] >= basic_count) { 2352 no_ext_stat_requested = 0; 2353 break; 2354 } 2355 } 2356 } 2357 2358 /* Fill the xstats structure */ 2359 if (ids && no_ext_stat_requested) 2360 ret = rte_eth_basic_stats_get(port_id, xstats); 2361 else 2362 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 2363 2364 if (ret < 0) 2365 return ret; 2366 num_xstats_filled = (unsigned int)ret; 2367 2368 /* Return all stats */ 2369 if (!ids) { 2370 for (i = 0; i < num_xstats_filled; i++) 2371 values[i] = xstats[i].value; 2372 return expected_entries; 2373 } 2374 2375 /* Filter stats */ 2376 for (i = 0; i < size; i++) { 2377 if (ids[i] >= expected_entries) { 2378 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 2379 return -1; 2380 } 2381 values[i] = xstats[ids[i]].value; 2382 } 2383 return size; 2384 } 2385 2386 int 2387 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 2388 unsigned int n) 2389 { 2390 struct rte_eth_dev *dev; 2391 unsigned int count = 0, i; 2392 signed int xcount = 0; 2393 uint16_t nb_rxqs, nb_txqs; 2394 int ret; 2395 2396 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 2397 2398 dev = &rte_eth_devices[port_id]; 2399 2400 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2401 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2402 2403 /* Return generic statistics */ 2404 count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) + 2405 (nb_txqs * RTE_NB_TXQ_STATS); 2406 2407 /* implemented by the driver */ 2408 if (dev->dev_ops->xstats_get != NULL) { 2409 /* Retrieve the xstats from the driver at the end of the 2410 * xstats struct. 2411 */ 2412 xcount = (*dev->dev_ops->xstats_get)(dev, 2413 xstats ? xstats + count : NULL, 2414 (n > count) ? n - count : 0); 2415 2416 if (xcount < 0) 2417 return eth_err(port_id, xcount); 2418 } 2419 2420 if (n < count + xcount || xstats == NULL) 2421 return count + xcount; 2422 2423 /* now fill the xstats structure */ 2424 ret = rte_eth_basic_stats_get(port_id, xstats); 2425 if (ret < 0) 2426 return ret; 2427 count = ret; 2428 2429 for (i = 0; i < count; i++) 2430 xstats[i].id = i; 2431 /* add an offset to driver-specific stats */ 2432 for ( ; i < count + xcount; i++) 2433 xstats[i].id += count; 2434 2435 return count + xcount; 2436 } 2437 2438 /* reset ethdev extended statistics */ 2439 void 2440 rte_eth_xstats_reset(uint16_t port_id) 2441 { 2442 struct rte_eth_dev *dev; 2443 2444 RTE_ETH_VALID_PORTID_OR_RET(port_id); 2445 dev = &rte_eth_devices[port_id]; 2446 2447 /* implemented by the driver */ 2448 if (dev->dev_ops->xstats_reset != NULL) { 2449 (*dev->dev_ops->xstats_reset)(dev); 2450 return; 2451 } 2452 2453 /* fallback to default */ 2454 rte_eth_stats_reset(port_id); 2455 } 2456 2457 static int 2458 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx, 2459 uint8_t is_rx) 2460 { 2461 struct rte_eth_dev *dev; 2462 2463 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2464 2465 dev = &rte_eth_devices[port_id]; 2466 2467 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 2468 2469 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 2470 return -EINVAL; 2471 2472 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 2473 return -EINVAL; 2474 2475 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 2476 return -EINVAL; 2477 2478 return (*dev->dev_ops->queue_stats_mapping_set) 2479 (dev, queue_id, stat_idx, is_rx); 2480 } 2481 2482 2483 int 2484 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 2485 uint8_t stat_idx) 2486 { 2487 return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id, 2488 stat_idx, STAT_QMAP_TX)); 2489 } 2490 2491 2492 int 2493 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 2494 uint8_t stat_idx) 2495 { 2496 return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id, 2497 stat_idx, STAT_QMAP_RX)); 2498 } 2499 2500 int 2501 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 2502 { 2503 struct rte_eth_dev *dev; 2504 2505 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2506 dev = &rte_eth_devices[port_id]; 2507 2508 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 2509 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 2510 fw_version, fw_size)); 2511 } 2512 2513 void 2514 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 2515 { 2516 struct rte_eth_dev *dev; 2517 const struct rte_eth_desc_lim lim = { 2518 .nb_max = UINT16_MAX, 2519 .nb_min = 0, 2520 .nb_align = 1, 2521 }; 2522 2523 RTE_ETH_VALID_PORTID_OR_RET(port_id); 2524 dev = &rte_eth_devices[port_id]; 2525 2526 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 2527 dev_info->rx_desc_lim = lim; 2528 dev_info->tx_desc_lim = lim; 2529 dev_info->device = dev->device; 2530 2531 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); 2532 (*dev->dev_ops->dev_infos_get)(dev, dev_info); 2533 dev_info->driver_name = dev->device->driver->name; 2534 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 2535 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 2536 2537 dev_info->dev_flags = &dev->data->dev_flags; 2538 } 2539 2540 int 2541 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 2542 uint32_t *ptypes, int num) 2543 { 2544 int i, j; 2545 struct rte_eth_dev *dev; 2546 const uint32_t *all_ptypes; 2547 2548 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2549 dev = &rte_eth_devices[port_id]; 2550 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 2551 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 2552 2553 if (!all_ptypes) 2554 return 0; 2555 2556 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 2557 if (all_ptypes[i] & ptype_mask) { 2558 if (j < num) 2559 ptypes[j] = all_ptypes[i]; 2560 j++; 2561 } 2562 2563 return j; 2564 } 2565 2566 void 2567 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr) 2568 { 2569 struct rte_eth_dev *dev; 2570 2571 RTE_ETH_VALID_PORTID_OR_RET(port_id); 2572 dev = &rte_eth_devices[port_id]; 2573 ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 2574 } 2575 2576 2577 int 2578 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 2579 { 2580 struct rte_eth_dev *dev; 2581 2582 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2583 2584 dev = &rte_eth_devices[port_id]; 2585 *mtu = dev->data->mtu; 2586 return 0; 2587 } 2588 2589 int 2590 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 2591 { 2592 int ret; 2593 struct rte_eth_dev *dev; 2594 2595 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2596 dev = &rte_eth_devices[port_id]; 2597 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 2598 2599 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 2600 if (!ret) 2601 dev->data->mtu = mtu; 2602 2603 return eth_err(port_id, ret); 2604 } 2605 2606 int 2607 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 2608 { 2609 struct rte_eth_dev *dev; 2610 int ret; 2611 2612 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2613 dev = &rte_eth_devices[port_id]; 2614 if (!(dev->data->dev_conf.rxmode.offloads & 2615 DEV_RX_OFFLOAD_VLAN_FILTER)) { 2616 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n", 2617 port_id); 2618 return -ENOSYS; 2619 } 2620 2621 if (vlan_id > 4095) { 2622 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 2623 port_id, vlan_id); 2624 return -EINVAL; 2625 } 2626 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 2627 2628 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 2629 if (ret == 0) { 2630 struct rte_vlan_filter_conf *vfc; 2631 int vidx; 2632 int vbit; 2633 2634 vfc = &dev->data->vlan_filter_conf; 2635 vidx = vlan_id / 64; 2636 vbit = vlan_id % 64; 2637 2638 if (on) 2639 vfc->ids[vidx] |= UINT64_C(1) << vbit; 2640 else 2641 vfc->ids[vidx] &= ~(UINT64_C(1) << vbit); 2642 } 2643 2644 return eth_err(port_id, ret); 2645 } 2646 2647 int 2648 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 2649 int on) 2650 { 2651 struct rte_eth_dev *dev; 2652 2653 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2654 dev = &rte_eth_devices[port_id]; 2655 if (rx_queue_id >= dev->data->nb_rx_queues) { 2656 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 2657 return -EINVAL; 2658 } 2659 2660 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 2661 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 2662 2663 return 0; 2664 } 2665 2666 int 2667 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 2668 enum rte_vlan_type vlan_type, 2669 uint16_t tpid) 2670 { 2671 struct rte_eth_dev *dev; 2672 2673 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2674 dev = &rte_eth_devices[port_id]; 2675 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 2676 2677 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 2678 tpid)); 2679 } 2680 2681 int 2682 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 2683 { 2684 struct rte_eth_dev *dev; 2685 int ret = 0; 2686 int mask = 0; 2687 int cur, org = 0; 2688 uint64_t orig_offloads; 2689 2690 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2691 dev = &rte_eth_devices[port_id]; 2692 2693 /* save original values in case of failure */ 2694 orig_offloads = dev->data->dev_conf.rxmode.offloads; 2695 2696 /*check which option changed by application*/ 2697 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD); 2698 org = !!(dev->data->dev_conf.rxmode.offloads & 2699 DEV_RX_OFFLOAD_VLAN_STRIP); 2700 if (cur != org) { 2701 if (cur) 2702 dev->data->dev_conf.rxmode.offloads |= 2703 DEV_RX_OFFLOAD_VLAN_STRIP; 2704 else 2705 dev->data->dev_conf.rxmode.offloads &= 2706 ~DEV_RX_OFFLOAD_VLAN_STRIP; 2707 mask |= ETH_VLAN_STRIP_MASK; 2708 } 2709 2710 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); 2711 org = !!(dev->data->dev_conf.rxmode.offloads & 2712 DEV_RX_OFFLOAD_VLAN_FILTER); 2713 if (cur != org) { 2714 if (cur) 2715 dev->data->dev_conf.rxmode.offloads |= 2716 DEV_RX_OFFLOAD_VLAN_FILTER; 2717 else 2718 dev->data->dev_conf.rxmode.offloads &= 2719 ~DEV_RX_OFFLOAD_VLAN_FILTER; 2720 mask |= ETH_VLAN_FILTER_MASK; 2721 } 2722 2723 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD); 2724 org = !!(dev->data->dev_conf.rxmode.offloads & 2725 DEV_RX_OFFLOAD_VLAN_EXTEND); 2726 if (cur != org) { 2727 if (cur) 2728 dev->data->dev_conf.rxmode.offloads |= 2729 DEV_RX_OFFLOAD_VLAN_EXTEND; 2730 else 2731 dev->data->dev_conf.rxmode.offloads &= 2732 ~DEV_RX_OFFLOAD_VLAN_EXTEND; 2733 mask |= ETH_VLAN_EXTEND_MASK; 2734 } 2735 2736 /*no change*/ 2737 if (mask == 0) 2738 return ret; 2739 2740 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 2741 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 2742 if (ret) { 2743 /* hit an error restore original values */ 2744 dev->data->dev_conf.rxmode.offloads = orig_offloads; 2745 } 2746 2747 return eth_err(port_id, ret); 2748 } 2749 2750 int 2751 rte_eth_dev_get_vlan_offload(uint16_t port_id) 2752 { 2753 struct rte_eth_dev *dev; 2754 int ret = 0; 2755 2756 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2757 dev = &rte_eth_devices[port_id]; 2758 2759 if (dev->data->dev_conf.rxmode.offloads & 2760 DEV_RX_OFFLOAD_VLAN_STRIP) 2761 ret |= ETH_VLAN_STRIP_OFFLOAD; 2762 2763 if (dev->data->dev_conf.rxmode.offloads & 2764 DEV_RX_OFFLOAD_VLAN_FILTER) 2765 ret |= ETH_VLAN_FILTER_OFFLOAD; 2766 2767 if (dev->data->dev_conf.rxmode.offloads & 2768 DEV_RX_OFFLOAD_VLAN_EXTEND) 2769 ret |= ETH_VLAN_EXTEND_OFFLOAD; 2770 2771 return ret; 2772 } 2773 2774 int 2775 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 2776 { 2777 struct rte_eth_dev *dev; 2778 2779 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2780 dev = &rte_eth_devices[port_id]; 2781 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 2782 2783 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 2784 } 2785 2786 int 2787 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 2788 { 2789 struct rte_eth_dev *dev; 2790 2791 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2792 dev = &rte_eth_devices[port_id]; 2793 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 2794 memset(fc_conf, 0, sizeof(*fc_conf)); 2795 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 2796 } 2797 2798 int 2799 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 2800 { 2801 struct rte_eth_dev *dev; 2802 2803 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2804 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 2805 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 2806 return -EINVAL; 2807 } 2808 2809 dev = &rte_eth_devices[port_id]; 2810 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 2811 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 2812 } 2813 2814 int 2815 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 2816 struct rte_eth_pfc_conf *pfc_conf) 2817 { 2818 struct rte_eth_dev *dev; 2819 2820 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2821 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) { 2822 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 2823 return -EINVAL; 2824 } 2825 2826 dev = &rte_eth_devices[port_id]; 2827 /* High water, low water validation are device specific */ 2828 if (*dev->dev_ops->priority_flow_ctrl_set) 2829 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 2830 (dev, pfc_conf)); 2831 return -ENOTSUP; 2832 } 2833 2834 static int 2835 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 2836 uint16_t reta_size) 2837 { 2838 uint16_t i, num; 2839 2840 if (!reta_conf) 2841 return -EINVAL; 2842 2843 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE; 2844 for (i = 0; i < num; i++) { 2845 if (reta_conf[i].mask) 2846 return 0; 2847 } 2848 2849 return -EINVAL; 2850 } 2851 2852 static int 2853 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 2854 uint16_t reta_size, 2855 uint16_t max_rxq) 2856 { 2857 uint16_t i, idx, shift; 2858 2859 if (!reta_conf) 2860 return -EINVAL; 2861 2862 if (max_rxq == 0) { 2863 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 2864 return -EINVAL; 2865 } 2866 2867 for (i = 0; i < reta_size; i++) { 2868 idx = i / RTE_RETA_GROUP_SIZE; 2869 shift = i % RTE_RETA_GROUP_SIZE; 2870 if ((reta_conf[idx].mask & (1ULL << shift)) && 2871 (reta_conf[idx].reta[shift] >= max_rxq)) { 2872 RTE_ETHDEV_LOG(ERR, 2873 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 2874 idx, shift, 2875 reta_conf[idx].reta[shift], max_rxq); 2876 return -EINVAL; 2877 } 2878 } 2879 2880 return 0; 2881 } 2882 2883 int 2884 rte_eth_dev_rss_reta_update(uint16_t port_id, 2885 struct rte_eth_rss_reta_entry64 *reta_conf, 2886 uint16_t reta_size) 2887 { 2888 struct rte_eth_dev *dev; 2889 int ret; 2890 2891 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2892 /* Check mask bits */ 2893 ret = rte_eth_check_reta_mask(reta_conf, reta_size); 2894 if (ret < 0) 2895 return ret; 2896 2897 dev = &rte_eth_devices[port_id]; 2898 2899 /* Check entry value */ 2900 ret = rte_eth_check_reta_entry(reta_conf, reta_size, 2901 dev->data->nb_rx_queues); 2902 if (ret < 0) 2903 return ret; 2904 2905 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 2906 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 2907 reta_size)); 2908 } 2909 2910 int 2911 rte_eth_dev_rss_reta_query(uint16_t port_id, 2912 struct rte_eth_rss_reta_entry64 *reta_conf, 2913 uint16_t reta_size) 2914 { 2915 struct rte_eth_dev *dev; 2916 int ret; 2917 2918 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2919 2920 /* Check mask bits */ 2921 ret = rte_eth_check_reta_mask(reta_conf, reta_size); 2922 if (ret < 0) 2923 return ret; 2924 2925 dev = &rte_eth_devices[port_id]; 2926 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 2927 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 2928 reta_size)); 2929 } 2930 2931 int 2932 rte_eth_dev_rss_hash_update(uint16_t port_id, 2933 struct rte_eth_rss_conf *rss_conf) 2934 { 2935 struct rte_eth_dev *dev; 2936 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 2937 2938 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2939 dev = &rte_eth_devices[port_id]; 2940 rte_eth_dev_info_get(port_id, &dev_info); 2941 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 2942 dev_info.flow_type_rss_offloads) { 2943 RTE_ETHDEV_LOG(ERR, 2944 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 2945 port_id, rss_conf->rss_hf, 2946 dev_info.flow_type_rss_offloads); 2947 return -EINVAL; 2948 } 2949 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 2950 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 2951 rss_conf)); 2952 } 2953 2954 int 2955 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 2956 struct rte_eth_rss_conf *rss_conf) 2957 { 2958 struct rte_eth_dev *dev; 2959 2960 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2961 dev = &rte_eth_devices[port_id]; 2962 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 2963 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 2964 rss_conf)); 2965 } 2966 2967 int 2968 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 2969 struct rte_eth_udp_tunnel *udp_tunnel) 2970 { 2971 struct rte_eth_dev *dev; 2972 2973 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2974 if (udp_tunnel == NULL) { 2975 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n"); 2976 return -EINVAL; 2977 } 2978 2979 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 2980 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 2981 return -EINVAL; 2982 } 2983 2984 dev = &rte_eth_devices[port_id]; 2985 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 2986 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 2987 udp_tunnel)); 2988 } 2989 2990 int 2991 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 2992 struct rte_eth_udp_tunnel *udp_tunnel) 2993 { 2994 struct rte_eth_dev *dev; 2995 2996 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2997 dev = &rte_eth_devices[port_id]; 2998 2999 if (udp_tunnel == NULL) { 3000 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n"); 3001 return -EINVAL; 3002 } 3003 3004 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 3005 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 3006 return -EINVAL; 3007 } 3008 3009 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 3010 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 3011 udp_tunnel)); 3012 } 3013 3014 int 3015 rte_eth_led_on(uint16_t port_id) 3016 { 3017 struct rte_eth_dev *dev; 3018 3019 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3020 dev = &rte_eth_devices[port_id]; 3021 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 3022 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 3023 } 3024 3025 int 3026 rte_eth_led_off(uint16_t port_id) 3027 { 3028 struct rte_eth_dev *dev; 3029 3030 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3031 dev = &rte_eth_devices[port_id]; 3032 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 3033 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 3034 } 3035 3036 /* 3037 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 3038 * an empty spot. 3039 */ 3040 static int 3041 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr) 3042 { 3043 struct rte_eth_dev_info dev_info; 3044 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 3045 unsigned i; 3046 3047 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3048 rte_eth_dev_info_get(port_id, &dev_info); 3049 3050 for (i = 0; i < dev_info.max_mac_addrs; i++) 3051 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0) 3052 return i; 3053 3054 return -1; 3055 } 3056 3057 static const struct ether_addr null_mac_addr; 3058 3059 int 3060 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr, 3061 uint32_t pool) 3062 { 3063 struct rte_eth_dev *dev; 3064 int index; 3065 uint64_t pool_mask; 3066 int ret; 3067 3068 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3069 dev = &rte_eth_devices[port_id]; 3070 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 3071 3072 if (is_zero_ether_addr(addr)) { 3073 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 3074 port_id); 3075 return -EINVAL; 3076 } 3077 if (pool >= ETH_64_POOLS) { 3078 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1); 3079 return -EINVAL; 3080 } 3081 3082 index = get_mac_addr_index(port_id, addr); 3083 if (index < 0) { 3084 index = get_mac_addr_index(port_id, &null_mac_addr); 3085 if (index < 0) { 3086 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 3087 port_id); 3088 return -ENOSPC; 3089 } 3090 } else { 3091 pool_mask = dev->data->mac_pool_sel[index]; 3092 3093 /* Check if both MAC address and pool is already there, and do nothing */ 3094 if (pool_mask & (1ULL << pool)) 3095 return 0; 3096 } 3097 3098 /* Update NIC */ 3099 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 3100 3101 if (ret == 0) { 3102 /* Update address in NIC data structure */ 3103 ether_addr_copy(addr, &dev->data->mac_addrs[index]); 3104 3105 /* Update pool bitmap in NIC data structure */ 3106 dev->data->mac_pool_sel[index] |= (1ULL << pool); 3107 } 3108 3109 return eth_err(port_id, ret); 3110 } 3111 3112 int 3113 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr) 3114 { 3115 struct rte_eth_dev *dev; 3116 int index; 3117 3118 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3119 dev = &rte_eth_devices[port_id]; 3120 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 3121 3122 index = get_mac_addr_index(port_id, addr); 3123 if (index == 0) { 3124 RTE_ETHDEV_LOG(ERR, 3125 "Port %u: Cannot remove default MAC address\n", 3126 port_id); 3127 return -EADDRINUSE; 3128 } else if (index < 0) 3129 return 0; /* Do nothing if address wasn't found */ 3130 3131 /* Update NIC */ 3132 (*dev->dev_ops->mac_addr_remove)(dev, index); 3133 3134 /* Update address in NIC data structure */ 3135 ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 3136 3137 /* reset pool bitmap */ 3138 dev->data->mac_pool_sel[index] = 0; 3139 3140 return 0; 3141 } 3142 3143 int 3144 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr) 3145 { 3146 struct rte_eth_dev *dev; 3147 int ret; 3148 3149 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3150 3151 if (!is_valid_assigned_ether_addr(addr)) 3152 return -EINVAL; 3153 3154 dev = &rte_eth_devices[port_id]; 3155 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 3156 3157 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 3158 if (ret < 0) 3159 return ret; 3160 3161 /* Update default address in NIC data structure */ 3162 ether_addr_copy(addr, &dev->data->mac_addrs[0]); 3163 3164 return 0; 3165 } 3166 3167 3168 /* 3169 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 3170 * an empty spot. 3171 */ 3172 static int 3173 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr) 3174 { 3175 struct rte_eth_dev_info dev_info; 3176 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 3177 unsigned i; 3178 3179 rte_eth_dev_info_get(port_id, &dev_info); 3180 if (!dev->data->hash_mac_addrs) 3181 return -1; 3182 3183 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 3184 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 3185 ETHER_ADDR_LEN) == 0) 3186 return i; 3187 3188 return -1; 3189 } 3190 3191 int 3192 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr, 3193 uint8_t on) 3194 { 3195 int index; 3196 int ret; 3197 struct rte_eth_dev *dev; 3198 3199 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3200 3201 dev = &rte_eth_devices[port_id]; 3202 if (is_zero_ether_addr(addr)) { 3203 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 3204 port_id); 3205 return -EINVAL; 3206 } 3207 3208 index = get_hash_mac_addr_index(port_id, addr); 3209 /* Check if it's already there, and do nothing */ 3210 if ((index >= 0) && on) 3211 return 0; 3212 3213 if (index < 0) { 3214 if (!on) { 3215 RTE_ETHDEV_LOG(ERR, 3216 "Port %u: the MAC address was not set in UTA\n", 3217 port_id); 3218 return -EINVAL; 3219 } 3220 3221 index = get_hash_mac_addr_index(port_id, &null_mac_addr); 3222 if (index < 0) { 3223 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 3224 port_id); 3225 return -ENOSPC; 3226 } 3227 } 3228 3229 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 3230 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 3231 if (ret == 0) { 3232 /* Update address in NIC data structure */ 3233 if (on) 3234 ether_addr_copy(addr, 3235 &dev->data->hash_mac_addrs[index]); 3236 else 3237 ether_addr_copy(&null_mac_addr, 3238 &dev->data->hash_mac_addrs[index]); 3239 } 3240 3241 return eth_err(port_id, ret); 3242 } 3243 3244 int 3245 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 3246 { 3247 struct rte_eth_dev *dev; 3248 3249 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3250 3251 dev = &rte_eth_devices[port_id]; 3252 3253 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 3254 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 3255 on)); 3256 } 3257 3258 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 3259 uint16_t tx_rate) 3260 { 3261 struct rte_eth_dev *dev; 3262 struct rte_eth_dev_info dev_info; 3263 struct rte_eth_link link; 3264 3265 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3266 3267 dev = &rte_eth_devices[port_id]; 3268 rte_eth_dev_info_get(port_id, &dev_info); 3269 link = dev->data->dev_link; 3270 3271 if (queue_idx > dev_info.max_tx_queues) { 3272 RTE_ETHDEV_LOG(ERR, 3273 "Set queue rate limit:port %u: invalid queue id=%u\n", 3274 port_id, queue_idx); 3275 return -EINVAL; 3276 } 3277 3278 if (tx_rate > link.link_speed) { 3279 RTE_ETHDEV_LOG(ERR, 3280 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 3281 tx_rate, link.link_speed); 3282 return -EINVAL; 3283 } 3284 3285 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 3286 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 3287 queue_idx, tx_rate)); 3288 } 3289 3290 int 3291 rte_eth_mirror_rule_set(uint16_t port_id, 3292 struct rte_eth_mirror_conf *mirror_conf, 3293 uint8_t rule_id, uint8_t on) 3294 { 3295 struct rte_eth_dev *dev; 3296 3297 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3298 if (mirror_conf->rule_type == 0) { 3299 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n"); 3300 return -EINVAL; 3301 } 3302 3303 if (mirror_conf->dst_pool >= ETH_64_POOLS) { 3304 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n", 3305 ETH_64_POOLS - 1); 3306 return -EINVAL; 3307 } 3308 3309 if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP | 3310 ETH_MIRROR_VIRTUAL_POOL_DOWN)) && 3311 (mirror_conf->pool_mask == 0)) { 3312 RTE_ETHDEV_LOG(ERR, 3313 "Invalid mirror pool, pool mask can not be 0\n"); 3314 return -EINVAL; 3315 } 3316 3317 if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) && 3318 mirror_conf->vlan.vlan_mask == 0) { 3319 RTE_ETHDEV_LOG(ERR, 3320 "Invalid vlan mask, vlan mask can not be 0\n"); 3321 return -EINVAL; 3322 } 3323 3324 dev = &rte_eth_devices[port_id]; 3325 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP); 3326 3327 return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev, 3328 mirror_conf, rule_id, on)); 3329 } 3330 3331 int 3332 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id) 3333 { 3334 struct rte_eth_dev *dev; 3335 3336 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3337 3338 dev = &rte_eth_devices[port_id]; 3339 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP); 3340 3341 return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev, 3342 rule_id)); 3343 } 3344 3345 RTE_INIT(eth_dev_init_cb_lists) 3346 { 3347 int i; 3348 3349 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 3350 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 3351 } 3352 3353 int 3354 rte_eth_dev_callback_register(uint16_t port_id, 3355 enum rte_eth_event_type event, 3356 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 3357 { 3358 struct rte_eth_dev *dev; 3359 struct rte_eth_dev_callback *user_cb; 3360 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */ 3361 uint16_t last_port; 3362 3363 if (!cb_fn) 3364 return -EINVAL; 3365 3366 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 3367 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 3368 return -EINVAL; 3369 } 3370 3371 if (port_id == RTE_ETH_ALL) { 3372 next_port = 0; 3373 last_port = RTE_MAX_ETHPORTS - 1; 3374 } else { 3375 next_port = last_port = port_id; 3376 } 3377 3378 rte_spinlock_lock(&rte_eth_dev_cb_lock); 3379 3380 do { 3381 dev = &rte_eth_devices[next_port]; 3382 3383 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 3384 if (user_cb->cb_fn == cb_fn && 3385 user_cb->cb_arg == cb_arg && 3386 user_cb->event == event) { 3387 break; 3388 } 3389 } 3390 3391 /* create a new callback. */ 3392 if (user_cb == NULL) { 3393 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 3394 sizeof(struct rte_eth_dev_callback), 0); 3395 if (user_cb != NULL) { 3396 user_cb->cb_fn = cb_fn; 3397 user_cb->cb_arg = cb_arg; 3398 user_cb->event = event; 3399 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 3400 user_cb, next); 3401 } else { 3402 rte_spinlock_unlock(&rte_eth_dev_cb_lock); 3403 rte_eth_dev_callback_unregister(port_id, event, 3404 cb_fn, cb_arg); 3405 return -ENOMEM; 3406 } 3407 3408 } 3409 } while (++next_port <= last_port); 3410 3411 rte_spinlock_unlock(&rte_eth_dev_cb_lock); 3412 return 0; 3413 } 3414 3415 int 3416 rte_eth_dev_callback_unregister(uint16_t port_id, 3417 enum rte_eth_event_type event, 3418 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 3419 { 3420 int ret; 3421 struct rte_eth_dev *dev; 3422 struct rte_eth_dev_callback *cb, *next; 3423 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */ 3424 uint16_t last_port; 3425 3426 if (!cb_fn) 3427 return -EINVAL; 3428 3429 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 3430 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 3431 return -EINVAL; 3432 } 3433 3434 if (port_id == RTE_ETH_ALL) { 3435 next_port = 0; 3436 last_port = RTE_MAX_ETHPORTS - 1; 3437 } else { 3438 next_port = last_port = port_id; 3439 } 3440 3441 rte_spinlock_lock(&rte_eth_dev_cb_lock); 3442 3443 do { 3444 dev = &rte_eth_devices[next_port]; 3445 ret = 0; 3446 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 3447 cb = next) { 3448 3449 next = TAILQ_NEXT(cb, next); 3450 3451 if (cb->cb_fn != cb_fn || cb->event != event || 3452 (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 3453 continue; 3454 3455 /* 3456 * if this callback is not executing right now, 3457 * then remove it. 3458 */ 3459 if (cb->active == 0) { 3460 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 3461 rte_free(cb); 3462 } else { 3463 ret = -EAGAIN; 3464 } 3465 } 3466 } while (++next_port <= last_port); 3467 3468 rte_spinlock_unlock(&rte_eth_dev_cb_lock); 3469 return ret; 3470 } 3471 3472 int 3473 _rte_eth_dev_callback_process(struct rte_eth_dev *dev, 3474 enum rte_eth_event_type event, void *ret_param) 3475 { 3476 struct rte_eth_dev_callback *cb_lst; 3477 struct rte_eth_dev_callback dev_cb; 3478 int rc = 0; 3479 3480 rte_spinlock_lock(&rte_eth_dev_cb_lock); 3481 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 3482 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 3483 continue; 3484 dev_cb = *cb_lst; 3485 cb_lst->active = 1; 3486 if (ret_param != NULL) 3487 dev_cb.ret_param = ret_param; 3488 3489 rte_spinlock_unlock(&rte_eth_dev_cb_lock); 3490 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, 3491 dev_cb.cb_arg, dev_cb.ret_param); 3492 rte_spinlock_lock(&rte_eth_dev_cb_lock); 3493 cb_lst->active = 0; 3494 } 3495 rte_spinlock_unlock(&rte_eth_dev_cb_lock); 3496 return rc; 3497 } 3498 3499 void 3500 rte_eth_dev_probing_finish(struct rte_eth_dev *dev) 3501 { 3502 if (dev == NULL) 3503 return; 3504 3505 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); 3506 3507 dev->state = RTE_ETH_DEV_ATTACHED; 3508 } 3509 3510 int 3511 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 3512 { 3513 uint32_t vec; 3514 struct rte_eth_dev *dev; 3515 struct rte_intr_handle *intr_handle; 3516 uint16_t qid; 3517 int rc; 3518 3519 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3520 3521 dev = &rte_eth_devices[port_id]; 3522 3523 if (!dev->intr_handle) { 3524 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 3525 return -ENOTSUP; 3526 } 3527 3528 intr_handle = dev->intr_handle; 3529 if (!intr_handle->intr_vec) { 3530 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 3531 return -EPERM; 3532 } 3533 3534 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 3535 vec = intr_handle->intr_vec[qid]; 3536 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 3537 if (rc && rc != -EEXIST) { 3538 RTE_ETHDEV_LOG(ERR, 3539 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 3540 port_id, qid, op, epfd, vec); 3541 } 3542 } 3543 3544 return 0; 3545 } 3546 3547 int __rte_experimental 3548 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 3549 { 3550 struct rte_intr_handle *intr_handle; 3551 struct rte_eth_dev *dev; 3552 unsigned int efd_idx; 3553 uint32_t vec; 3554 int fd; 3555 3556 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 3557 3558 dev = &rte_eth_devices[port_id]; 3559 3560 if (queue_id >= dev->data->nb_rx_queues) { 3561 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 3562 return -1; 3563 } 3564 3565 if (!dev->intr_handle) { 3566 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 3567 return -1; 3568 } 3569 3570 intr_handle = dev->intr_handle; 3571 if (!intr_handle->intr_vec) { 3572 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 3573 return -1; 3574 } 3575 3576 vec = intr_handle->intr_vec[queue_id]; 3577 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 3578 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 3579 fd = intr_handle->efds[efd_idx]; 3580 3581 return fd; 3582 } 3583 3584 const struct rte_memzone * 3585 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, 3586 uint16_t queue_id, size_t size, unsigned align, 3587 int socket_id) 3588 { 3589 char z_name[RTE_MEMZONE_NAMESIZE]; 3590 const struct rte_memzone *mz; 3591 3592 snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s", 3593 dev->data->port_id, queue_id, ring_name); 3594 3595 mz = rte_memzone_lookup(z_name); 3596 if (mz) 3597 return mz; 3598 3599 return rte_memzone_reserve_aligned(z_name, size, socket_id, 3600 RTE_MEMZONE_IOVA_CONTIG, align); 3601 } 3602 3603 int __rte_experimental 3604 rte_eth_dev_create(struct rte_device *device, const char *name, 3605 size_t priv_data_size, 3606 ethdev_bus_specific_init ethdev_bus_specific_init, 3607 void *bus_init_params, 3608 ethdev_init_t ethdev_init, void *init_params) 3609 { 3610 struct rte_eth_dev *ethdev; 3611 int retval; 3612 3613 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL); 3614 3615 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3616 ethdev = rte_eth_dev_allocate(name); 3617 if (!ethdev) 3618 return -ENODEV; 3619 3620 if (priv_data_size) { 3621 ethdev->data->dev_private = rte_zmalloc_socket( 3622 name, priv_data_size, RTE_CACHE_LINE_SIZE, 3623 device->numa_node); 3624 3625 if (!ethdev->data->dev_private) { 3626 RTE_LOG(ERR, EAL, "failed to allocate private data"); 3627 retval = -ENOMEM; 3628 goto probe_failed; 3629 } 3630 } 3631 } else { 3632 ethdev = rte_eth_dev_attach_secondary(name); 3633 if (!ethdev) { 3634 RTE_LOG(ERR, EAL, "secondary process attach failed, " 3635 "ethdev doesn't exist"); 3636 return -ENODEV; 3637 } 3638 } 3639 3640 ethdev->device = device; 3641 3642 if (ethdev_bus_specific_init) { 3643 retval = ethdev_bus_specific_init(ethdev, bus_init_params); 3644 if (retval) { 3645 RTE_LOG(ERR, EAL, 3646 "ethdev bus specific initialisation failed"); 3647 goto probe_failed; 3648 } 3649 } 3650 3651 retval = ethdev_init(ethdev, init_params); 3652 if (retval) { 3653 RTE_LOG(ERR, EAL, "ethdev initialisation failed"); 3654 goto probe_failed; 3655 } 3656 3657 rte_eth_dev_probing_finish(ethdev); 3658 3659 return retval; 3660 3661 probe_failed: 3662 rte_eth_dev_release_port(ethdev); 3663 return retval; 3664 } 3665 3666 int __rte_experimental 3667 rte_eth_dev_destroy(struct rte_eth_dev *ethdev, 3668 ethdev_uninit_t ethdev_uninit) 3669 { 3670 int ret; 3671 3672 ethdev = rte_eth_dev_allocated(ethdev->data->name); 3673 if (!ethdev) 3674 return -ENODEV; 3675 3676 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL); 3677 3678 ret = ethdev_uninit(ethdev); 3679 if (ret) 3680 return ret; 3681 3682 return rte_eth_dev_release_port(ethdev); 3683 } 3684 3685 int 3686 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 3687 int epfd, int op, void *data) 3688 { 3689 uint32_t vec; 3690 struct rte_eth_dev *dev; 3691 struct rte_intr_handle *intr_handle; 3692 int rc; 3693 3694 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3695 3696 dev = &rte_eth_devices[port_id]; 3697 if (queue_id >= dev->data->nb_rx_queues) { 3698 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 3699 return -EINVAL; 3700 } 3701 3702 if (!dev->intr_handle) { 3703 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 3704 return -ENOTSUP; 3705 } 3706 3707 intr_handle = dev->intr_handle; 3708 if (!intr_handle->intr_vec) { 3709 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 3710 return -EPERM; 3711 } 3712 3713 vec = intr_handle->intr_vec[queue_id]; 3714 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 3715 if (rc && rc != -EEXIST) { 3716 RTE_ETHDEV_LOG(ERR, 3717 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 3718 port_id, queue_id, op, epfd, vec); 3719 return rc; 3720 } 3721 3722 return 0; 3723 } 3724 3725 int 3726 rte_eth_dev_rx_intr_enable(uint16_t port_id, 3727 uint16_t queue_id) 3728 { 3729 struct rte_eth_dev *dev; 3730 3731 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3732 3733 dev = &rte_eth_devices[port_id]; 3734 3735 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 3736 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, 3737 queue_id)); 3738 } 3739 3740 int 3741 rte_eth_dev_rx_intr_disable(uint16_t port_id, 3742 uint16_t queue_id) 3743 { 3744 struct rte_eth_dev *dev; 3745 3746 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3747 3748 dev = &rte_eth_devices[port_id]; 3749 3750 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 3751 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, 3752 queue_id)); 3753 } 3754 3755 3756 int 3757 rte_eth_dev_filter_supported(uint16_t port_id, 3758 enum rte_filter_type filter_type) 3759 { 3760 struct rte_eth_dev *dev; 3761 3762 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3763 3764 dev = &rte_eth_devices[port_id]; 3765 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP); 3766 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, 3767 RTE_ETH_FILTER_NOP, NULL); 3768 } 3769 3770 int 3771 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type, 3772 enum rte_filter_op filter_op, void *arg) 3773 { 3774 struct rte_eth_dev *dev; 3775 3776 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3777 3778 dev = &rte_eth_devices[port_id]; 3779 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP); 3780 return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type, 3781 filter_op, arg)); 3782 } 3783 3784 const struct rte_eth_rxtx_callback * 3785 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 3786 rte_rx_callback_fn fn, void *user_param) 3787 { 3788 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 3789 rte_errno = ENOTSUP; 3790 return NULL; 3791 #endif 3792 /* check input parameters */ 3793 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 3794 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 3795 rte_errno = EINVAL; 3796 return NULL; 3797 } 3798 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 3799 3800 if (cb == NULL) { 3801 rte_errno = ENOMEM; 3802 return NULL; 3803 } 3804 3805 cb->fn.rx = fn; 3806 cb->param = user_param; 3807 3808 rte_spinlock_lock(&rte_eth_rx_cb_lock); 3809 /* Add the callbacks in fifo order. */ 3810 struct rte_eth_rxtx_callback *tail = 3811 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 3812 3813 if (!tail) { 3814 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb; 3815 3816 } else { 3817 while (tail->next) 3818 tail = tail->next; 3819 tail->next = cb; 3820 } 3821 rte_spinlock_unlock(&rte_eth_rx_cb_lock); 3822 3823 return cb; 3824 } 3825 3826 const struct rte_eth_rxtx_callback * 3827 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 3828 rte_rx_callback_fn fn, void *user_param) 3829 { 3830 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 3831 rte_errno = ENOTSUP; 3832 return NULL; 3833 #endif 3834 /* check input parameters */ 3835 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 3836 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 3837 rte_errno = EINVAL; 3838 return NULL; 3839 } 3840 3841 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 3842 3843 if (cb == NULL) { 3844 rte_errno = ENOMEM; 3845 return NULL; 3846 } 3847 3848 cb->fn.rx = fn; 3849 cb->param = user_param; 3850 3851 rte_spinlock_lock(&rte_eth_rx_cb_lock); 3852 /* Add the callbacks at fisrt position*/ 3853 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 3854 rte_smp_wmb(); 3855 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb; 3856 rte_spinlock_unlock(&rte_eth_rx_cb_lock); 3857 3858 return cb; 3859 } 3860 3861 const struct rte_eth_rxtx_callback * 3862 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 3863 rte_tx_callback_fn fn, void *user_param) 3864 { 3865 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 3866 rte_errno = ENOTSUP; 3867 return NULL; 3868 #endif 3869 /* check input parameters */ 3870 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 3871 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 3872 rte_errno = EINVAL; 3873 return NULL; 3874 } 3875 3876 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 3877 3878 if (cb == NULL) { 3879 rte_errno = ENOMEM; 3880 return NULL; 3881 } 3882 3883 cb->fn.tx = fn; 3884 cb->param = user_param; 3885 3886 rte_spinlock_lock(&rte_eth_tx_cb_lock); 3887 /* Add the callbacks in fifo order. */ 3888 struct rte_eth_rxtx_callback *tail = 3889 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 3890 3891 if (!tail) { 3892 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb; 3893 3894 } else { 3895 while (tail->next) 3896 tail = tail->next; 3897 tail->next = cb; 3898 } 3899 rte_spinlock_unlock(&rte_eth_tx_cb_lock); 3900 3901 return cb; 3902 } 3903 3904 int 3905 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 3906 const struct rte_eth_rxtx_callback *user_cb) 3907 { 3908 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 3909 return -ENOTSUP; 3910 #endif 3911 /* Check input parameters. */ 3912 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 3913 if (user_cb == NULL || 3914 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 3915 return -EINVAL; 3916 3917 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 3918 struct rte_eth_rxtx_callback *cb; 3919 struct rte_eth_rxtx_callback **prev_cb; 3920 int ret = -EINVAL; 3921 3922 rte_spinlock_lock(&rte_eth_rx_cb_lock); 3923 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 3924 for (; *prev_cb != NULL; prev_cb = &cb->next) { 3925 cb = *prev_cb; 3926 if (cb == user_cb) { 3927 /* Remove the user cb from the callback list. */ 3928 *prev_cb = cb->next; 3929 ret = 0; 3930 break; 3931 } 3932 } 3933 rte_spinlock_unlock(&rte_eth_rx_cb_lock); 3934 3935 return ret; 3936 } 3937 3938 int 3939 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 3940 const struct rte_eth_rxtx_callback *user_cb) 3941 { 3942 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 3943 return -ENOTSUP; 3944 #endif 3945 /* Check input parameters. */ 3946 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 3947 if (user_cb == NULL || 3948 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 3949 return -EINVAL; 3950 3951 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 3952 int ret = -EINVAL; 3953 struct rte_eth_rxtx_callback *cb; 3954 struct rte_eth_rxtx_callback **prev_cb; 3955 3956 rte_spinlock_lock(&rte_eth_tx_cb_lock); 3957 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 3958 for (; *prev_cb != NULL; prev_cb = &cb->next) { 3959 cb = *prev_cb; 3960 if (cb == user_cb) { 3961 /* Remove the user cb from the callback list. */ 3962 *prev_cb = cb->next; 3963 ret = 0; 3964 break; 3965 } 3966 } 3967 rte_spinlock_unlock(&rte_eth_tx_cb_lock); 3968 3969 return ret; 3970 } 3971 3972 int 3973 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 3974 struct rte_eth_rxq_info *qinfo) 3975 { 3976 struct rte_eth_dev *dev; 3977 3978 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3979 3980 if (qinfo == NULL) 3981 return -EINVAL; 3982 3983 dev = &rte_eth_devices[port_id]; 3984 if (queue_id >= dev->data->nb_rx_queues) { 3985 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 3986 return -EINVAL; 3987 } 3988 3989 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 3990 3991 memset(qinfo, 0, sizeof(*qinfo)); 3992 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 3993 return 0; 3994 } 3995 3996 int 3997 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 3998 struct rte_eth_txq_info *qinfo) 3999 { 4000 struct rte_eth_dev *dev; 4001 4002 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4003 4004 if (qinfo == NULL) 4005 return -EINVAL; 4006 4007 dev = &rte_eth_devices[port_id]; 4008 if (queue_id >= dev->data->nb_tx_queues) { 4009 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); 4010 return -EINVAL; 4011 } 4012 4013 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 4014 4015 memset(qinfo, 0, sizeof(*qinfo)); 4016 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 4017 4018 return 0; 4019 } 4020 4021 int 4022 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 4023 struct ether_addr *mc_addr_set, 4024 uint32_t nb_mc_addr) 4025 { 4026 struct rte_eth_dev *dev; 4027 4028 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4029 4030 dev = &rte_eth_devices[port_id]; 4031 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 4032 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 4033 mc_addr_set, nb_mc_addr)); 4034 } 4035 4036 int 4037 rte_eth_timesync_enable(uint16_t port_id) 4038 { 4039 struct rte_eth_dev *dev; 4040 4041 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4042 dev = &rte_eth_devices[port_id]; 4043 4044 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 4045 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 4046 } 4047 4048 int 4049 rte_eth_timesync_disable(uint16_t port_id) 4050 { 4051 struct rte_eth_dev *dev; 4052 4053 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4054 dev = &rte_eth_devices[port_id]; 4055 4056 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 4057 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 4058 } 4059 4060 int 4061 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 4062 uint32_t flags) 4063 { 4064 struct rte_eth_dev *dev; 4065 4066 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4067 dev = &rte_eth_devices[port_id]; 4068 4069 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 4070 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 4071 (dev, timestamp, flags)); 4072 } 4073 4074 int 4075 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 4076 struct timespec *timestamp) 4077 { 4078 struct rte_eth_dev *dev; 4079 4080 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4081 dev = &rte_eth_devices[port_id]; 4082 4083 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 4084 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 4085 (dev, timestamp)); 4086 } 4087 4088 int 4089 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 4090 { 4091 struct rte_eth_dev *dev; 4092 4093 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4094 dev = &rte_eth_devices[port_id]; 4095 4096 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 4097 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, 4098 delta)); 4099 } 4100 4101 int 4102 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 4103 { 4104 struct rte_eth_dev *dev; 4105 4106 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4107 dev = &rte_eth_devices[port_id]; 4108 4109 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 4110 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 4111 timestamp)); 4112 } 4113 4114 int 4115 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 4116 { 4117 struct rte_eth_dev *dev; 4118 4119 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4120 dev = &rte_eth_devices[port_id]; 4121 4122 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 4123 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 4124 timestamp)); 4125 } 4126 4127 int 4128 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 4129 { 4130 struct rte_eth_dev *dev; 4131 4132 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4133 4134 dev = &rte_eth_devices[port_id]; 4135 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 4136 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 4137 } 4138 4139 int 4140 rte_eth_dev_get_eeprom_length(uint16_t port_id) 4141 { 4142 struct rte_eth_dev *dev; 4143 4144 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4145 4146 dev = &rte_eth_devices[port_id]; 4147 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 4148 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 4149 } 4150 4151 int 4152 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 4153 { 4154 struct rte_eth_dev *dev; 4155 4156 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4157 4158 dev = &rte_eth_devices[port_id]; 4159 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 4160 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 4161 } 4162 4163 int 4164 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 4165 { 4166 struct rte_eth_dev *dev; 4167 4168 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4169 4170 dev = &rte_eth_devices[port_id]; 4171 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 4172 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 4173 } 4174 4175 int __rte_experimental 4176 rte_eth_dev_get_module_info(uint16_t port_id, 4177 struct rte_eth_dev_module_info *modinfo) 4178 { 4179 struct rte_eth_dev *dev; 4180 4181 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4182 4183 dev = &rte_eth_devices[port_id]; 4184 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 4185 return (*dev->dev_ops->get_module_info)(dev, modinfo); 4186 } 4187 4188 int __rte_experimental 4189 rte_eth_dev_get_module_eeprom(uint16_t port_id, 4190 struct rte_dev_eeprom_info *info) 4191 { 4192 struct rte_eth_dev *dev; 4193 4194 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4195 4196 dev = &rte_eth_devices[port_id]; 4197 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 4198 return (*dev->dev_ops->get_module_eeprom)(dev, info); 4199 } 4200 4201 int 4202 rte_eth_dev_get_dcb_info(uint16_t port_id, 4203 struct rte_eth_dcb_info *dcb_info) 4204 { 4205 struct rte_eth_dev *dev; 4206 4207 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4208 4209 dev = &rte_eth_devices[port_id]; 4210 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 4211 4212 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 4213 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 4214 } 4215 4216 int 4217 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id, 4218 struct rte_eth_l2_tunnel_conf *l2_tunnel) 4219 { 4220 struct rte_eth_dev *dev; 4221 4222 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4223 if (l2_tunnel == NULL) { 4224 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n"); 4225 return -EINVAL; 4226 } 4227 4228 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) { 4229 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4230 return -EINVAL; 4231 } 4232 4233 dev = &rte_eth_devices[port_id]; 4234 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf, 4235 -ENOTSUP); 4236 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, 4237 l2_tunnel)); 4238 } 4239 4240 int 4241 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id, 4242 struct rte_eth_l2_tunnel_conf *l2_tunnel, 4243 uint32_t mask, 4244 uint8_t en) 4245 { 4246 struct rte_eth_dev *dev; 4247 4248 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4249 4250 if (l2_tunnel == NULL) { 4251 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n"); 4252 return -EINVAL; 4253 } 4254 4255 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) { 4256 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4257 return -EINVAL; 4258 } 4259 4260 if (mask == 0) { 4261 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n"); 4262 return -EINVAL; 4263 } 4264 4265 dev = &rte_eth_devices[port_id]; 4266 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set, 4267 -ENOTSUP); 4268 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev, 4269 l2_tunnel, mask, en)); 4270 } 4271 4272 static void 4273 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc, 4274 const struct rte_eth_desc_lim *desc_lim) 4275 { 4276 if (desc_lim->nb_align != 0) 4277 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 4278 4279 if (desc_lim->nb_max != 0) 4280 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 4281 4282 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 4283 } 4284 4285 int 4286 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 4287 uint16_t *nb_rx_desc, 4288 uint16_t *nb_tx_desc) 4289 { 4290 struct rte_eth_dev *dev; 4291 struct rte_eth_dev_info dev_info; 4292 4293 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4294 4295 dev = &rte_eth_devices[port_id]; 4296 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 4297 4298 rte_eth_dev_info_get(port_id, &dev_info); 4299 4300 if (nb_rx_desc != NULL) 4301 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 4302 4303 if (nb_tx_desc != NULL) 4304 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 4305 4306 return 0; 4307 } 4308 4309 int 4310 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 4311 { 4312 struct rte_eth_dev *dev; 4313 4314 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4315 4316 if (pool == NULL) 4317 return -EINVAL; 4318 4319 dev = &rte_eth_devices[port_id]; 4320 4321 if (*dev->dev_ops->pool_ops_supported == NULL) 4322 return 1; /* all pools are supported */ 4323 4324 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 4325 } 4326 4327 /** 4328 * A set of values to describe the possible states of a switch domain. 4329 */ 4330 enum rte_eth_switch_domain_state { 4331 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0, 4332 RTE_ETH_SWITCH_DOMAIN_ALLOCATED 4333 }; 4334 4335 /** 4336 * Array of switch domains available for allocation. Array is sized to 4337 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than 4338 * ethdev ports in a single process. 4339 */ 4340 static struct rte_eth_dev_switch { 4341 enum rte_eth_switch_domain_state state; 4342 } rte_eth_switch_domains[RTE_MAX_ETHPORTS]; 4343 4344 int __rte_experimental 4345 rte_eth_switch_domain_alloc(uint16_t *domain_id) 4346 { 4347 unsigned int i; 4348 4349 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 4350 4351 for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1; 4352 i < RTE_MAX_ETHPORTS; i++) { 4353 if (rte_eth_switch_domains[i].state == 4354 RTE_ETH_SWITCH_DOMAIN_UNUSED) { 4355 rte_eth_switch_domains[i].state = 4356 RTE_ETH_SWITCH_DOMAIN_ALLOCATED; 4357 *domain_id = i; 4358 return 0; 4359 } 4360 } 4361 4362 return -ENOSPC; 4363 } 4364 4365 int __rte_experimental 4366 rte_eth_switch_domain_free(uint16_t domain_id) 4367 { 4368 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || 4369 domain_id >= RTE_MAX_ETHPORTS) 4370 return -EINVAL; 4371 4372 if (rte_eth_switch_domains[domain_id].state != 4373 RTE_ETH_SWITCH_DOMAIN_ALLOCATED) 4374 return -EINVAL; 4375 4376 rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED; 4377 4378 return 0; 4379 } 4380 4381 static int 4382 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) 4383 { 4384 int state; 4385 struct rte_kvargs_pair *pair; 4386 char *letter; 4387 4388 arglist->str = strdup(str_in); 4389 if (arglist->str == NULL) 4390 return -ENOMEM; 4391 4392 letter = arglist->str; 4393 state = 0; 4394 arglist->count = 0; 4395 pair = &arglist->pairs[0]; 4396 while (1) { 4397 switch (state) { 4398 case 0: /* Initial */ 4399 if (*letter == '=') 4400 return -EINVAL; 4401 else if (*letter == '\0') 4402 return 0; 4403 4404 state = 1; 4405 pair->key = letter; 4406 /* fall-thru */ 4407 4408 case 1: /* Parsing key */ 4409 if (*letter == '=') { 4410 *letter = '\0'; 4411 pair->value = letter + 1; 4412 state = 2; 4413 } else if (*letter == ',' || *letter == '\0') 4414 return -EINVAL; 4415 break; 4416 4417 4418 case 2: /* Parsing value */ 4419 if (*letter == '[') 4420 state = 3; 4421 else if (*letter == ',') { 4422 *letter = '\0'; 4423 arglist->count++; 4424 pair = &arglist->pairs[arglist->count]; 4425 state = 0; 4426 } else if (*letter == '\0') { 4427 letter--; 4428 arglist->count++; 4429 pair = &arglist->pairs[arglist->count]; 4430 state = 0; 4431 } 4432 break; 4433 4434 case 3: /* Parsing list */ 4435 if (*letter == ']') 4436 state = 2; 4437 else if (*letter == '\0') 4438 return -EINVAL; 4439 break; 4440 } 4441 letter++; 4442 } 4443 } 4444 4445 int __rte_experimental 4446 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) 4447 { 4448 struct rte_kvargs args; 4449 struct rte_kvargs_pair *pair; 4450 unsigned int i; 4451 int result = 0; 4452 4453 memset(eth_da, 0, sizeof(*eth_da)); 4454 4455 result = rte_eth_devargs_tokenise(&args, dargs); 4456 if (result < 0) 4457 goto parse_cleanup; 4458 4459 for (i = 0; i < args.count; i++) { 4460 pair = &args.pairs[i]; 4461 if (strcmp("representor", pair->key) == 0) { 4462 result = rte_eth_devargs_parse_list(pair->value, 4463 rte_eth_devargs_parse_representor_ports, 4464 eth_da); 4465 if (result < 0) 4466 goto parse_cleanup; 4467 } 4468 } 4469 4470 parse_cleanup: 4471 if (args.str) 4472 free(args.str); 4473 4474 return result; 4475 } 4476 4477 RTE_INIT(ethdev_init_log) 4478 { 4479 rte_eth_dev_logtype = rte_log_register("lib.ethdev"); 4480 if (rte_eth_dev_logtype >= 0) 4481 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO); 4482 } 4483