1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <sys/types.h> 6 #include <sys/queue.h> 7 #include <ctype.h> 8 #include <stdio.h> 9 #include <stdlib.h> 10 #include <string.h> 11 #include <stdarg.h> 12 #include <errno.h> 13 #include <stdbool.h> 14 #include <stdint.h> 15 #include <inttypes.h> 16 #include <netinet/in.h> 17 18 #include <rte_byteorder.h> 19 #include <rte_log.h> 20 #include <rte_debug.h> 21 #include <rte_interrupts.h> 22 #include <rte_memory.h> 23 #include <rte_memcpy.h> 24 #include <rte_memzone.h> 25 #include <rte_launch.h> 26 #include <rte_eal.h> 27 #include <rte_per_lcore.h> 28 #include <rte_lcore.h> 29 #include <rte_atomic.h> 30 #include <rte_branch_prediction.h> 31 #include <rte_common.h> 32 #include <rte_mempool.h> 33 #include <rte_malloc.h> 34 #include <rte_mbuf.h> 35 #include <rte_errno.h> 36 #include <rte_spinlock.h> 37 #include <rte_string_fns.h> 38 #include <rte_kvargs.h> 39 #include <rte_class.h> 40 41 #include "rte_ether.h" 42 #include "rte_ethdev.h" 43 #include "rte_ethdev_driver.h" 44 #include "ethdev_profile.h" 45 #include "ethdev_private.h" 46 47 int rte_eth_dev_logtype; 48 49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; 50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 51 52 /* spinlock for eth device callbacks */ 53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; 54 55 /* spinlock for add/remove rx callbacks */ 56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 57 58 /* spinlock for add/remove tx callbacks */ 59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 60 61 /* spinlock for shared data allocation */ 62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 63 64 /* store statistics names and its offset in stats structure */ 65 struct rte_eth_xstats_name_off { 66 char name[RTE_ETH_XSTATS_NAME_SIZE]; 67 unsigned offset; 68 }; 69 70 /* Shared memory between primary and secondary processes. */ 71 static struct { 72 uint64_t next_owner_id; 73 rte_spinlock_t ownership_lock; 74 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; 75 } *rte_eth_dev_shared_data; 76 77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = { 78 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 79 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 80 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 81 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 82 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 83 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 84 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 85 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 86 rx_nombuf)}, 87 }; 88 89 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0])) 90 91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = { 92 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 93 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 94 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 95 }; 96 97 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \ 98 sizeof(rte_rxq_stats_strings[0])) 99 100 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = { 101 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 102 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 103 }; 104 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \ 105 sizeof(rte_txq_stats_strings[0])) 106 107 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 108 { DEV_RX_OFFLOAD_##_name, #_name } 109 110 static const struct { 111 uint64_t offload; 112 const char *name; 113 } rte_rx_offload_names[] = { 114 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 115 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 116 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 117 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 118 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 119 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 120 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 121 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 122 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 123 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 124 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 125 RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME), 126 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 127 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 128 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 129 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 130 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 131 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 132 }; 133 134 #undef RTE_RX_OFFLOAD_BIT2STR 135 136 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 137 { DEV_TX_OFFLOAD_##_name, #_name } 138 139 static const struct { 140 uint64_t offload; 141 const char *name; 142 } rte_tx_offload_names[] = { 143 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 144 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 145 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 146 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 147 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 148 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 149 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 150 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 151 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 152 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 153 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 154 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 155 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 156 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 157 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 158 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 159 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 160 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 161 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 162 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 163 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 164 RTE_TX_OFFLOAD_BIT2STR(MATCH_METADATA), 165 }; 166 167 #undef RTE_TX_OFFLOAD_BIT2STR 168 169 /** 170 * The user application callback description. 171 * 172 * It contains callback address to be registered by user application, 173 * the pointer to the parameters for callback, and the event type. 174 */ 175 struct rte_eth_dev_callback { 176 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ 177 rte_eth_dev_cb_fn cb_fn; /**< Callback address */ 178 void *cb_arg; /**< Parameter for callback */ 179 void *ret_param; /**< Return parameter */ 180 enum rte_eth_event_type event; /**< Interrupt event type */ 181 uint32_t active; /**< Callback is executing */ 182 }; 183 184 enum { 185 STAT_QMAP_TX = 0, 186 STAT_QMAP_RX 187 }; 188 189 int 190 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 191 { 192 int ret; 193 struct rte_devargs devargs = {.args = NULL}; 194 const char *bus_param_key; 195 char *bus_str = NULL; 196 char *cls_str = NULL; 197 int str_size; 198 199 memset(iter, 0, sizeof(*iter)); 200 201 /* 202 * The devargs string may use various syntaxes: 203 * - 0000:08:00.0,representor=[1-3] 204 * - pci:0000:06:00.0,representor=[0,5] 205 * - class=eth,mac=00:11:22:33:44:55 206 * A new syntax is in development (not yet supported): 207 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 208 */ 209 210 /* 211 * Handle pure class filter (i.e. without any bus-level argument), 212 * from future new syntax. 213 * rte_devargs_parse() is not yet supporting the new syntax, 214 * that's why this simple case is temporarily parsed here. 215 */ 216 #define iter_anybus_str "class=eth," 217 if (strncmp(devargs_str, iter_anybus_str, 218 strlen(iter_anybus_str)) == 0) { 219 iter->cls_str = devargs_str + strlen(iter_anybus_str); 220 goto end; 221 } 222 223 /* Split bus, device and parameters. */ 224 ret = rte_devargs_parse(&devargs, devargs_str); 225 if (ret != 0) 226 goto error; 227 228 /* 229 * Assume parameters of old syntax can match only at ethdev level. 230 * Extra parameters will be ignored, thanks to "+" prefix. 231 */ 232 str_size = strlen(devargs.args) + 2; 233 cls_str = malloc(str_size); 234 if (cls_str == NULL) { 235 ret = -ENOMEM; 236 goto error; 237 } 238 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 239 if (ret != str_size - 1) { 240 ret = -EINVAL; 241 goto error; 242 } 243 iter->cls_str = cls_str; 244 free(devargs.args); /* allocated by rte_devargs_parse() */ 245 devargs.args = NULL; 246 247 iter->bus = devargs.bus; 248 if (iter->bus->dev_iterate == NULL) { 249 ret = -ENOTSUP; 250 goto error; 251 } 252 253 /* Convert bus args to new syntax for use with new API dev_iterate. */ 254 if (strcmp(iter->bus->name, "vdev") == 0) { 255 bus_param_key = "name"; 256 } else if (strcmp(iter->bus->name, "pci") == 0) { 257 bus_param_key = "addr"; 258 } else { 259 ret = -ENOTSUP; 260 goto error; 261 } 262 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 263 bus_str = malloc(str_size); 264 if (bus_str == NULL) { 265 ret = -ENOMEM; 266 goto error; 267 } 268 ret = snprintf(bus_str, str_size, "%s=%s", 269 bus_param_key, devargs.name); 270 if (ret != str_size - 1) { 271 ret = -EINVAL; 272 goto error; 273 } 274 iter->bus_str = bus_str; 275 276 end: 277 iter->cls = rte_class_find_by_name("eth"); 278 return 0; 279 280 error: 281 if (ret == -ENOTSUP) 282 RTE_LOG(ERR, EAL, "Bus %s does not support iterating.\n", 283 iter->bus->name); 284 free(devargs.args); 285 free(bus_str); 286 free(cls_str); 287 return ret; 288 } 289 290 uint16_t 291 rte_eth_iterator_next(struct rte_dev_iterator *iter) 292 { 293 if (iter->cls == NULL) /* invalid ethdev iterator */ 294 return RTE_MAX_ETHPORTS; 295 296 do { /* loop to try all matching rte_device */ 297 /* If not pure ethdev filter and */ 298 if (iter->bus != NULL && 299 /* not in middle of rte_eth_dev iteration, */ 300 iter->class_device == NULL) { 301 /* get next rte_device to try. */ 302 iter->device = iter->bus->dev_iterate( 303 iter->device, iter->bus_str, iter); 304 if (iter->device == NULL) 305 break; /* no more rte_device candidate */ 306 } 307 /* A device is matching bus part, need to check ethdev part. */ 308 iter->class_device = iter->cls->dev_iterate( 309 iter->class_device, iter->cls_str, iter); 310 if (iter->class_device != NULL) 311 return eth_dev_to_id(iter->class_device); /* match */ 312 } while (iter->bus != NULL); /* need to try next rte_device */ 313 314 /* No more ethdev port to iterate. */ 315 rte_eth_iterator_cleanup(iter); 316 return RTE_MAX_ETHPORTS; 317 } 318 319 void 320 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 321 { 322 if (iter->bus_str == NULL) 323 return; /* nothing to free in pure class filter */ 324 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 325 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 326 memset(iter, 0, sizeof(*iter)); 327 } 328 329 uint16_t 330 rte_eth_find_next(uint16_t port_id) 331 { 332 while (port_id < RTE_MAX_ETHPORTS && 333 rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED && 334 rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) 335 port_id++; 336 337 if (port_id >= RTE_MAX_ETHPORTS) 338 return RTE_MAX_ETHPORTS; 339 340 return port_id; 341 } 342 343 static void 344 rte_eth_dev_shared_data_prepare(void) 345 { 346 const unsigned flags = 0; 347 const struct rte_memzone *mz; 348 349 rte_spinlock_lock(&rte_eth_shared_data_lock); 350 351 if (rte_eth_dev_shared_data == NULL) { 352 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 353 /* Allocate port data and ownership shared memory. */ 354 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, 355 sizeof(*rte_eth_dev_shared_data), 356 rte_socket_id(), flags); 357 } else 358 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); 359 if (mz == NULL) 360 rte_panic("Cannot allocate ethdev shared data\n"); 361 362 rte_eth_dev_shared_data = mz->addr; 363 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 364 rte_eth_dev_shared_data->next_owner_id = 365 RTE_ETH_DEV_NO_OWNER + 1; 366 rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock); 367 memset(rte_eth_dev_shared_data->data, 0, 368 sizeof(rte_eth_dev_shared_data->data)); 369 } 370 } 371 372 rte_spinlock_unlock(&rte_eth_shared_data_lock); 373 } 374 375 static bool 376 is_allocated(const struct rte_eth_dev *ethdev) 377 { 378 return ethdev->data->name[0] != '\0'; 379 } 380 381 static struct rte_eth_dev * 382 _rte_eth_dev_allocated(const char *name) 383 { 384 unsigned i; 385 386 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 387 if (rte_eth_devices[i].data != NULL && 388 strcmp(rte_eth_devices[i].data->name, name) == 0) 389 return &rte_eth_devices[i]; 390 } 391 return NULL; 392 } 393 394 struct rte_eth_dev * 395 rte_eth_dev_allocated(const char *name) 396 { 397 struct rte_eth_dev *ethdev; 398 399 rte_eth_dev_shared_data_prepare(); 400 401 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 402 403 ethdev = _rte_eth_dev_allocated(name); 404 405 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 406 407 return ethdev; 408 } 409 410 static uint16_t 411 rte_eth_dev_find_free_port(void) 412 { 413 unsigned i; 414 415 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 416 /* Using shared name field to find a free port. */ 417 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') { 418 RTE_ASSERT(rte_eth_devices[i].state == 419 RTE_ETH_DEV_UNUSED); 420 return i; 421 } 422 } 423 return RTE_MAX_ETHPORTS; 424 } 425 426 static struct rte_eth_dev * 427 eth_dev_get(uint16_t port_id) 428 { 429 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; 430 431 eth_dev->data = &rte_eth_dev_shared_data->data[port_id]; 432 433 return eth_dev; 434 } 435 436 struct rte_eth_dev * 437 rte_eth_dev_allocate(const char *name) 438 { 439 uint16_t port_id; 440 struct rte_eth_dev *eth_dev = NULL; 441 442 rte_eth_dev_shared_data_prepare(); 443 444 /* Synchronize port creation between primary and secondary threads. */ 445 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 446 447 if (_rte_eth_dev_allocated(name) != NULL) { 448 RTE_ETHDEV_LOG(ERR, 449 "Ethernet device with name %s already allocated\n", 450 name); 451 goto unlock; 452 } 453 454 port_id = rte_eth_dev_find_free_port(); 455 if (port_id == RTE_MAX_ETHPORTS) { 456 RTE_ETHDEV_LOG(ERR, 457 "Reached maximum number of Ethernet ports\n"); 458 goto unlock; 459 } 460 461 eth_dev = eth_dev_get(port_id); 462 snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name); 463 eth_dev->data->port_id = port_id; 464 eth_dev->data->mtu = ETHER_MTU; 465 466 unlock: 467 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 468 469 return eth_dev; 470 } 471 472 /* 473 * Attach to a port already registered by the primary process, which 474 * makes sure that the same device would have the same port id both 475 * in the primary and secondary process. 476 */ 477 struct rte_eth_dev * 478 rte_eth_dev_attach_secondary(const char *name) 479 { 480 uint16_t i; 481 struct rte_eth_dev *eth_dev = NULL; 482 483 rte_eth_dev_shared_data_prepare(); 484 485 /* Synchronize port attachment to primary port creation and release. */ 486 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 487 488 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 489 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0) 490 break; 491 } 492 if (i == RTE_MAX_ETHPORTS) { 493 RTE_ETHDEV_LOG(ERR, 494 "Device %s is not driven by the primary process\n", 495 name); 496 } else { 497 eth_dev = eth_dev_get(i); 498 RTE_ASSERT(eth_dev->data->port_id == i); 499 } 500 501 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 502 return eth_dev; 503 } 504 505 int 506 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) 507 { 508 if (eth_dev == NULL) 509 return -EINVAL; 510 511 rte_eth_dev_shared_data_prepare(); 512 513 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 514 _rte_eth_dev_callback_process(eth_dev, 515 RTE_ETH_EVENT_DESTROY, NULL); 516 517 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 518 519 eth_dev->state = RTE_ETH_DEV_UNUSED; 520 521 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 522 rte_free(eth_dev->data->rx_queues); 523 rte_free(eth_dev->data->tx_queues); 524 rte_free(eth_dev->data->mac_addrs); 525 rte_free(eth_dev->data->hash_mac_addrs); 526 rte_free(eth_dev->data->dev_private); 527 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); 528 } 529 530 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 531 532 return 0; 533 } 534 535 int 536 rte_eth_dev_is_valid_port(uint16_t port_id) 537 { 538 if (port_id >= RTE_MAX_ETHPORTS || 539 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 540 return 0; 541 else 542 return 1; 543 } 544 545 static int 546 rte_eth_is_valid_owner_id(uint64_t owner_id) 547 { 548 if (owner_id == RTE_ETH_DEV_NO_OWNER || 549 rte_eth_dev_shared_data->next_owner_id <= owner_id) 550 return 0; 551 return 1; 552 } 553 554 uint64_t 555 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 556 { 557 while (port_id < RTE_MAX_ETHPORTS && 558 ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED && 559 rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) || 560 rte_eth_devices[port_id].data->owner.id != owner_id)) 561 port_id++; 562 563 if (port_id >= RTE_MAX_ETHPORTS) 564 return RTE_MAX_ETHPORTS; 565 566 return port_id; 567 } 568 569 int __rte_experimental 570 rte_eth_dev_owner_new(uint64_t *owner_id) 571 { 572 rte_eth_dev_shared_data_prepare(); 573 574 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 575 576 *owner_id = rte_eth_dev_shared_data->next_owner_id++; 577 578 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 579 return 0; 580 } 581 582 static int 583 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 584 const struct rte_eth_dev_owner *new_owner) 585 { 586 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 587 struct rte_eth_dev_owner *port_owner; 588 int sret; 589 590 if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) { 591 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 592 port_id); 593 return -ENODEV; 594 } 595 596 if (!rte_eth_is_valid_owner_id(new_owner->id) && 597 !rte_eth_is_valid_owner_id(old_owner_id)) { 598 RTE_ETHDEV_LOG(ERR, 599 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 600 old_owner_id, new_owner->id); 601 return -EINVAL; 602 } 603 604 port_owner = &rte_eth_devices[port_id].data->owner; 605 if (port_owner->id != old_owner_id) { 606 RTE_ETHDEV_LOG(ERR, 607 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 608 port_id, port_owner->name, port_owner->id); 609 return -EPERM; 610 } 611 612 sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s", 613 new_owner->name); 614 if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN) 615 RTE_ETHDEV_LOG(ERR, "Port %u owner name was truncated\n", 616 port_id); 617 618 port_owner->id = new_owner->id; 619 620 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 621 port_id, new_owner->name, new_owner->id); 622 623 return 0; 624 } 625 626 int __rte_experimental 627 rte_eth_dev_owner_set(const uint16_t port_id, 628 const struct rte_eth_dev_owner *owner) 629 { 630 int ret; 631 632 rte_eth_dev_shared_data_prepare(); 633 634 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 635 636 ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 637 638 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 639 return ret; 640 } 641 642 int __rte_experimental 643 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 644 { 645 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 646 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 647 int ret; 648 649 rte_eth_dev_shared_data_prepare(); 650 651 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 652 653 ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner); 654 655 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 656 return ret; 657 } 658 659 void __rte_experimental 660 rte_eth_dev_owner_delete(const uint64_t owner_id) 661 { 662 uint16_t port_id; 663 664 rte_eth_dev_shared_data_prepare(); 665 666 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 667 668 if (rte_eth_is_valid_owner_id(owner_id)) { 669 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) 670 if (rte_eth_devices[port_id].data->owner.id == owner_id) 671 memset(&rte_eth_devices[port_id].data->owner, 0, 672 sizeof(struct rte_eth_dev_owner)); 673 RTE_ETHDEV_LOG(NOTICE, 674 "All port owners owned by %016"PRIx64" identifier have removed\n", 675 owner_id); 676 } else { 677 RTE_ETHDEV_LOG(ERR, 678 "Invalid owner id=%016"PRIx64"\n", 679 owner_id); 680 } 681 682 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 683 } 684 685 int __rte_experimental 686 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 687 { 688 int ret = 0; 689 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 690 691 rte_eth_dev_shared_data_prepare(); 692 693 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 694 695 if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) { 696 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 697 port_id); 698 ret = -ENODEV; 699 } else { 700 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 701 } 702 703 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 704 return ret; 705 } 706 707 int 708 rte_eth_dev_socket_id(uint16_t port_id) 709 { 710 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 711 return rte_eth_devices[port_id].data->numa_node; 712 } 713 714 void * 715 rte_eth_dev_get_sec_ctx(uint16_t port_id) 716 { 717 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 718 return rte_eth_devices[port_id].security_ctx; 719 } 720 721 uint16_t 722 rte_eth_dev_count(void) 723 { 724 return rte_eth_dev_count_avail(); 725 } 726 727 uint16_t 728 rte_eth_dev_count_avail(void) 729 { 730 uint16_t p; 731 uint16_t count; 732 733 count = 0; 734 735 RTE_ETH_FOREACH_DEV(p) 736 count++; 737 738 return count; 739 } 740 741 uint16_t __rte_experimental 742 rte_eth_dev_count_total(void) 743 { 744 uint16_t port, count = 0; 745 746 for (port = 0; port < RTE_MAX_ETHPORTS; port++) 747 if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED) 748 count++; 749 750 return count; 751 } 752 753 int 754 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 755 { 756 char *tmp; 757 758 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 759 760 if (name == NULL) { 761 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n"); 762 return -EINVAL; 763 } 764 765 /* shouldn't check 'rte_eth_devices[i].data', 766 * because it might be overwritten by VDEV PMD */ 767 tmp = rte_eth_dev_shared_data->data[port_id].name; 768 strcpy(name, tmp); 769 return 0; 770 } 771 772 int 773 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 774 { 775 uint32_t pid; 776 777 if (name == NULL) { 778 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n"); 779 return -EINVAL; 780 } 781 782 for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) { 783 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED && 784 !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) { 785 *port_id = pid; 786 return 0; 787 } 788 } 789 790 return -ENODEV; 791 } 792 793 static int 794 eth_err(uint16_t port_id, int ret) 795 { 796 if (ret == 0) 797 return 0; 798 if (rte_eth_dev_is_removed(port_id)) 799 return -EIO; 800 return ret; 801 } 802 803 static int 804 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 805 { 806 uint16_t old_nb_queues = dev->data->nb_rx_queues; 807 void **rxq; 808 unsigned i; 809 810 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ 811 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", 812 sizeof(dev->data->rx_queues[0]) * nb_queues, 813 RTE_CACHE_LINE_SIZE); 814 if (dev->data->rx_queues == NULL) { 815 dev->data->nb_rx_queues = 0; 816 return -(ENOMEM); 817 } 818 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ 819 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP); 820 821 rxq = dev->data->rx_queues; 822 823 for (i = nb_queues; i < old_nb_queues; i++) 824 (*dev->dev_ops->rx_queue_release)(rxq[i]); 825 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues, 826 RTE_CACHE_LINE_SIZE); 827 if (rxq == NULL) 828 return -(ENOMEM); 829 if (nb_queues > old_nb_queues) { 830 uint16_t new_qs = nb_queues - old_nb_queues; 831 832 memset(rxq + old_nb_queues, 0, 833 sizeof(rxq[0]) * new_qs); 834 } 835 836 dev->data->rx_queues = rxq; 837 838 } else if (dev->data->rx_queues != NULL && nb_queues == 0) { 839 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP); 840 841 rxq = dev->data->rx_queues; 842 843 for (i = nb_queues; i < old_nb_queues; i++) 844 (*dev->dev_ops->rx_queue_release)(rxq[i]); 845 846 rte_free(dev->data->rx_queues); 847 dev->data->rx_queues = NULL; 848 } 849 dev->data->nb_rx_queues = nb_queues; 850 return 0; 851 } 852 853 int 854 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 855 { 856 struct rte_eth_dev *dev; 857 858 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 859 860 dev = &rte_eth_devices[port_id]; 861 if (!dev->data->dev_started) { 862 RTE_ETHDEV_LOG(ERR, 863 "Port %u must be started before start any queue\n", 864 port_id); 865 return -EINVAL; 866 } 867 868 if (rx_queue_id >= dev->data->nb_rx_queues) { 869 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 870 return -EINVAL; 871 } 872 873 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 874 875 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 876 RTE_ETHDEV_LOG(INFO, 877 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 878 rx_queue_id, port_id); 879 return 0; 880 } 881 882 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, 883 rx_queue_id)); 884 885 } 886 887 int 888 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 889 { 890 struct rte_eth_dev *dev; 891 892 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 893 894 dev = &rte_eth_devices[port_id]; 895 if (rx_queue_id >= dev->data->nb_rx_queues) { 896 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 897 return -EINVAL; 898 } 899 900 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 901 902 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 903 RTE_ETHDEV_LOG(INFO, 904 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 905 rx_queue_id, port_id); 906 return 0; 907 } 908 909 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 910 911 } 912 913 int 914 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 915 { 916 struct rte_eth_dev *dev; 917 918 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 919 920 dev = &rte_eth_devices[port_id]; 921 if (!dev->data->dev_started) { 922 RTE_ETHDEV_LOG(ERR, 923 "Port %u must be started before start any queue\n", 924 port_id); 925 return -EINVAL; 926 } 927 928 if (tx_queue_id >= dev->data->nb_tx_queues) { 929 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 930 return -EINVAL; 931 } 932 933 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 934 935 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 936 RTE_ETHDEV_LOG(INFO, 937 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 938 tx_queue_id, port_id); 939 return 0; 940 } 941 942 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 943 } 944 945 int 946 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 947 { 948 struct rte_eth_dev *dev; 949 950 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 951 952 dev = &rte_eth_devices[port_id]; 953 if (tx_queue_id >= dev->data->nb_tx_queues) { 954 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 955 return -EINVAL; 956 } 957 958 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 959 960 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 961 RTE_ETHDEV_LOG(INFO, 962 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 963 tx_queue_id, port_id); 964 return 0; 965 } 966 967 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 968 969 } 970 971 static int 972 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 973 { 974 uint16_t old_nb_queues = dev->data->nb_tx_queues; 975 void **txq; 976 unsigned i; 977 978 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ 979 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", 980 sizeof(dev->data->tx_queues[0]) * nb_queues, 981 RTE_CACHE_LINE_SIZE); 982 if (dev->data->tx_queues == NULL) { 983 dev->data->nb_tx_queues = 0; 984 return -(ENOMEM); 985 } 986 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ 987 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP); 988 989 txq = dev->data->tx_queues; 990 991 for (i = nb_queues; i < old_nb_queues; i++) 992 (*dev->dev_ops->tx_queue_release)(txq[i]); 993 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues, 994 RTE_CACHE_LINE_SIZE); 995 if (txq == NULL) 996 return -ENOMEM; 997 if (nb_queues > old_nb_queues) { 998 uint16_t new_qs = nb_queues - old_nb_queues; 999 1000 memset(txq + old_nb_queues, 0, 1001 sizeof(txq[0]) * new_qs); 1002 } 1003 1004 dev->data->tx_queues = txq; 1005 1006 } else if (dev->data->tx_queues != NULL && nb_queues == 0) { 1007 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP); 1008 1009 txq = dev->data->tx_queues; 1010 1011 for (i = nb_queues; i < old_nb_queues; i++) 1012 (*dev->dev_ops->tx_queue_release)(txq[i]); 1013 1014 rte_free(dev->data->tx_queues); 1015 dev->data->tx_queues = NULL; 1016 } 1017 dev->data->nb_tx_queues = nb_queues; 1018 return 0; 1019 } 1020 1021 uint32_t 1022 rte_eth_speed_bitflag(uint32_t speed, int duplex) 1023 { 1024 switch (speed) { 1025 case ETH_SPEED_NUM_10M: 1026 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD; 1027 case ETH_SPEED_NUM_100M: 1028 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD; 1029 case ETH_SPEED_NUM_1G: 1030 return ETH_LINK_SPEED_1G; 1031 case ETH_SPEED_NUM_2_5G: 1032 return ETH_LINK_SPEED_2_5G; 1033 case ETH_SPEED_NUM_5G: 1034 return ETH_LINK_SPEED_5G; 1035 case ETH_SPEED_NUM_10G: 1036 return ETH_LINK_SPEED_10G; 1037 case ETH_SPEED_NUM_20G: 1038 return ETH_LINK_SPEED_20G; 1039 case ETH_SPEED_NUM_25G: 1040 return ETH_LINK_SPEED_25G; 1041 case ETH_SPEED_NUM_40G: 1042 return ETH_LINK_SPEED_40G; 1043 case ETH_SPEED_NUM_50G: 1044 return ETH_LINK_SPEED_50G; 1045 case ETH_SPEED_NUM_56G: 1046 return ETH_LINK_SPEED_56G; 1047 case ETH_SPEED_NUM_100G: 1048 return ETH_LINK_SPEED_100G; 1049 default: 1050 return 0; 1051 } 1052 } 1053 1054 const char * 1055 rte_eth_dev_rx_offload_name(uint64_t offload) 1056 { 1057 const char *name = "UNKNOWN"; 1058 unsigned int i; 1059 1060 for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) { 1061 if (offload == rte_rx_offload_names[i].offload) { 1062 name = rte_rx_offload_names[i].name; 1063 break; 1064 } 1065 } 1066 1067 return name; 1068 } 1069 1070 const char * 1071 rte_eth_dev_tx_offload_name(uint64_t offload) 1072 { 1073 const char *name = "UNKNOWN"; 1074 unsigned int i; 1075 1076 for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) { 1077 if (offload == rte_tx_offload_names[i].offload) { 1078 name = rte_tx_offload_names[i].name; 1079 break; 1080 } 1081 } 1082 1083 return name; 1084 } 1085 1086 int 1087 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1088 const struct rte_eth_conf *dev_conf) 1089 { 1090 struct rte_eth_dev *dev; 1091 struct rte_eth_dev_info dev_info; 1092 struct rte_eth_conf orig_conf; 1093 int diag; 1094 int ret; 1095 1096 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1097 1098 dev = &rte_eth_devices[port_id]; 1099 1100 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 1101 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1102 1103 if (dev->data->dev_started) { 1104 RTE_ETHDEV_LOG(ERR, 1105 "Port %u must be stopped to allow configuration\n", 1106 port_id); 1107 return -EBUSY; 1108 } 1109 1110 /* Store original config, as rollback required on failure */ 1111 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1112 1113 /* 1114 * Copy the dev_conf parameter into the dev structure. 1115 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1116 */ 1117 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf)); 1118 1119 rte_eth_dev_info_get(port_id, &dev_info); 1120 1121 /* If number of queues specified by application for both Rx and Tx is 1122 * zero, use driver preferred values. This cannot be done individually 1123 * as it is valid for either Tx or Rx (but not both) to be zero. 1124 * If driver does not provide any preferred valued, fall back on 1125 * EAL defaults. 1126 */ 1127 if (nb_rx_q == 0 && nb_tx_q == 0) { 1128 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1129 if (nb_rx_q == 0) 1130 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1131 nb_tx_q = dev_info.default_txportconf.nb_queues; 1132 if (nb_tx_q == 0) 1133 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1134 } 1135 1136 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1137 RTE_ETHDEV_LOG(ERR, 1138 "Number of RX queues requested (%u) is greater than max supported(%d)\n", 1139 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1140 ret = -EINVAL; 1141 goto rollback; 1142 } 1143 1144 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1145 RTE_ETHDEV_LOG(ERR, 1146 "Number of TX queues requested (%u) is greater than max supported(%d)\n", 1147 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1148 ret = -EINVAL; 1149 goto rollback; 1150 } 1151 1152 /* 1153 * Check that the numbers of RX and TX queues are not greater 1154 * than the maximum number of RX and TX queues supported by the 1155 * configured device. 1156 */ 1157 if (nb_rx_q > dev_info.max_rx_queues) { 1158 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1159 port_id, nb_rx_q, dev_info.max_rx_queues); 1160 ret = -EINVAL; 1161 goto rollback; 1162 } 1163 1164 if (nb_tx_q > dev_info.max_tx_queues) { 1165 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1166 port_id, nb_tx_q, dev_info.max_tx_queues); 1167 ret = -EINVAL; 1168 goto rollback; 1169 } 1170 1171 /* Check that the device supports requested interrupts */ 1172 if ((dev_conf->intr_conf.lsc == 1) && 1173 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1174 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1175 dev->device->driver->name); 1176 ret = -EINVAL; 1177 goto rollback; 1178 } 1179 if ((dev_conf->intr_conf.rmv == 1) && 1180 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1181 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1182 dev->device->driver->name); 1183 ret = -EINVAL; 1184 goto rollback; 1185 } 1186 1187 /* 1188 * If jumbo frames are enabled, check that the maximum RX packet 1189 * length is supported by the configured device. 1190 */ 1191 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1192 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) { 1193 RTE_ETHDEV_LOG(ERR, 1194 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n", 1195 port_id, dev_conf->rxmode.max_rx_pkt_len, 1196 dev_info.max_rx_pktlen); 1197 ret = -EINVAL; 1198 goto rollback; 1199 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) { 1200 RTE_ETHDEV_LOG(ERR, 1201 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n", 1202 port_id, dev_conf->rxmode.max_rx_pkt_len, 1203 (unsigned)ETHER_MIN_LEN); 1204 ret = -EINVAL; 1205 goto rollback; 1206 } 1207 } else { 1208 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN || 1209 dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN) 1210 /* Use default value */ 1211 dev->data->dev_conf.rxmode.max_rx_pkt_len = 1212 ETHER_MAX_LEN; 1213 } 1214 1215 /* Any requested offloading must be within its device capabilities */ 1216 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1217 dev_conf->rxmode.offloads) { 1218 RTE_ETHDEV_LOG(ERR, 1219 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1220 "capabilities 0x%"PRIx64" in %s()\n", 1221 port_id, dev_conf->rxmode.offloads, 1222 dev_info.rx_offload_capa, 1223 __func__); 1224 ret = -EINVAL; 1225 goto rollback; 1226 } 1227 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1228 dev_conf->txmode.offloads) { 1229 RTE_ETHDEV_LOG(ERR, 1230 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1231 "capabilities 0x%"PRIx64" in %s()\n", 1232 port_id, dev_conf->txmode.offloads, 1233 dev_info.tx_offload_capa, 1234 __func__); 1235 ret = -EINVAL; 1236 goto rollback; 1237 } 1238 1239 /* Check that device supports requested rss hash functions. */ 1240 if ((dev_info.flow_type_rss_offloads | 1241 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1242 dev_info.flow_type_rss_offloads) { 1243 RTE_ETHDEV_LOG(ERR, 1244 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1245 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1246 dev_info.flow_type_rss_offloads); 1247 ret = -EINVAL; 1248 goto rollback; 1249 } 1250 1251 /* 1252 * Setup new number of RX/TX queues and reconfigure device. 1253 */ 1254 diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q); 1255 if (diag != 0) { 1256 RTE_ETHDEV_LOG(ERR, 1257 "Port%u rte_eth_dev_rx_queue_config = %d\n", 1258 port_id, diag); 1259 ret = diag; 1260 goto rollback; 1261 } 1262 1263 diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q); 1264 if (diag != 0) { 1265 RTE_ETHDEV_LOG(ERR, 1266 "Port%u rte_eth_dev_tx_queue_config = %d\n", 1267 port_id, diag); 1268 rte_eth_dev_rx_queue_config(dev, 0); 1269 ret = diag; 1270 goto rollback; 1271 } 1272 1273 diag = (*dev->dev_ops->dev_configure)(dev); 1274 if (diag != 0) { 1275 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1276 port_id, diag); 1277 rte_eth_dev_rx_queue_config(dev, 0); 1278 rte_eth_dev_tx_queue_config(dev, 0); 1279 ret = eth_err(port_id, diag); 1280 goto rollback; 1281 } 1282 1283 /* Initialize Rx profiling if enabled at compilation time. */ 1284 diag = __rte_eth_dev_profile_init(port_id, dev); 1285 if (diag != 0) { 1286 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1287 port_id, diag); 1288 rte_eth_dev_rx_queue_config(dev, 0); 1289 rte_eth_dev_tx_queue_config(dev, 0); 1290 ret = eth_err(port_id, diag); 1291 goto rollback; 1292 } 1293 1294 return 0; 1295 1296 rollback: 1297 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1298 1299 return ret; 1300 } 1301 1302 void 1303 _rte_eth_dev_reset(struct rte_eth_dev *dev) 1304 { 1305 if (dev->data->dev_started) { 1306 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n", 1307 dev->data->port_id); 1308 return; 1309 } 1310 1311 rte_eth_dev_rx_queue_config(dev, 0); 1312 rte_eth_dev_tx_queue_config(dev, 0); 1313 1314 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); 1315 } 1316 1317 static void 1318 rte_eth_dev_mac_restore(struct rte_eth_dev *dev, 1319 struct rte_eth_dev_info *dev_info) 1320 { 1321 struct ether_addr *addr; 1322 uint16_t i; 1323 uint32_t pool = 0; 1324 uint64_t pool_mask; 1325 1326 /* replay MAC address configuration including default MAC */ 1327 addr = &dev->data->mac_addrs[0]; 1328 if (*dev->dev_ops->mac_addr_set != NULL) 1329 (*dev->dev_ops->mac_addr_set)(dev, addr); 1330 else if (*dev->dev_ops->mac_addr_add != NULL) 1331 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1332 1333 if (*dev->dev_ops->mac_addr_add != NULL) { 1334 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1335 addr = &dev->data->mac_addrs[i]; 1336 1337 /* skip zero address */ 1338 if (is_zero_ether_addr(addr)) 1339 continue; 1340 1341 pool = 0; 1342 pool_mask = dev->data->mac_pool_sel[i]; 1343 1344 do { 1345 if (pool_mask & 1ULL) 1346 (*dev->dev_ops->mac_addr_add)(dev, 1347 addr, i, pool); 1348 pool_mask >>= 1; 1349 pool++; 1350 } while (pool_mask); 1351 } 1352 } 1353 } 1354 1355 static void 1356 rte_eth_dev_config_restore(struct rte_eth_dev *dev, 1357 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1358 { 1359 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1360 rte_eth_dev_mac_restore(dev, dev_info); 1361 1362 /* replay promiscuous configuration */ 1363 if (rte_eth_promiscuous_get(port_id) == 1) 1364 rte_eth_promiscuous_enable(port_id); 1365 else if (rte_eth_promiscuous_get(port_id) == 0) 1366 rte_eth_promiscuous_disable(port_id); 1367 1368 /* replay all multicast configuration */ 1369 if (rte_eth_allmulticast_get(port_id) == 1) 1370 rte_eth_allmulticast_enable(port_id); 1371 else if (rte_eth_allmulticast_get(port_id) == 0) 1372 rte_eth_allmulticast_disable(port_id); 1373 } 1374 1375 int 1376 rte_eth_dev_start(uint16_t port_id) 1377 { 1378 struct rte_eth_dev *dev; 1379 struct rte_eth_dev_info dev_info; 1380 int diag; 1381 1382 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1383 1384 dev = &rte_eth_devices[port_id]; 1385 1386 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1387 1388 if (dev->data->dev_started != 0) { 1389 RTE_ETHDEV_LOG(INFO, 1390 "Device with port_id=%"PRIu16" already started\n", 1391 port_id); 1392 return 0; 1393 } 1394 1395 rte_eth_dev_info_get(port_id, &dev_info); 1396 1397 /* Lets restore MAC now if device does not support live change */ 1398 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1399 rte_eth_dev_mac_restore(dev, &dev_info); 1400 1401 diag = (*dev->dev_ops->dev_start)(dev); 1402 if (diag == 0) 1403 dev->data->dev_started = 1; 1404 else 1405 return eth_err(port_id, diag); 1406 1407 rte_eth_dev_config_restore(dev, &dev_info, port_id); 1408 1409 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1410 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1411 (*dev->dev_ops->link_update)(dev, 0); 1412 } 1413 return 0; 1414 } 1415 1416 void 1417 rte_eth_dev_stop(uint16_t port_id) 1418 { 1419 struct rte_eth_dev *dev; 1420 1421 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1422 dev = &rte_eth_devices[port_id]; 1423 1424 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop); 1425 1426 if (dev->data->dev_started == 0) { 1427 RTE_ETHDEV_LOG(INFO, 1428 "Device with port_id=%"PRIu16" already stopped\n", 1429 port_id); 1430 return; 1431 } 1432 1433 dev->data->dev_started = 0; 1434 (*dev->dev_ops->dev_stop)(dev); 1435 } 1436 1437 int 1438 rte_eth_dev_set_link_up(uint16_t port_id) 1439 { 1440 struct rte_eth_dev *dev; 1441 1442 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1443 1444 dev = &rte_eth_devices[port_id]; 1445 1446 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1447 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1448 } 1449 1450 int 1451 rte_eth_dev_set_link_down(uint16_t port_id) 1452 { 1453 struct rte_eth_dev *dev; 1454 1455 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1456 1457 dev = &rte_eth_devices[port_id]; 1458 1459 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1460 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1461 } 1462 1463 void 1464 rte_eth_dev_close(uint16_t port_id) 1465 { 1466 struct rte_eth_dev *dev; 1467 1468 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1469 dev = &rte_eth_devices[port_id]; 1470 1471 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close); 1472 dev->data->dev_started = 0; 1473 (*dev->dev_ops->dev_close)(dev); 1474 1475 /* check behaviour flag - temporary for PMD migration */ 1476 if ((dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE) != 0) { 1477 /* new behaviour: send event + reset state + free all data */ 1478 rte_eth_dev_release_port(dev); 1479 return; 1480 } 1481 RTE_ETHDEV_LOG(DEBUG, "Port closing is using an old behaviour.\n" 1482 "The driver %s should migrate to the new behaviour.\n", 1483 dev->device->driver->name); 1484 /* old behaviour: only free queue arrays */ 1485 dev->data->nb_rx_queues = 0; 1486 rte_free(dev->data->rx_queues); 1487 dev->data->rx_queues = NULL; 1488 dev->data->nb_tx_queues = 0; 1489 rte_free(dev->data->tx_queues); 1490 dev->data->tx_queues = NULL; 1491 } 1492 1493 int 1494 rte_eth_dev_reset(uint16_t port_id) 1495 { 1496 struct rte_eth_dev *dev; 1497 int ret; 1498 1499 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1500 dev = &rte_eth_devices[port_id]; 1501 1502 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1503 1504 rte_eth_dev_stop(port_id); 1505 ret = dev->dev_ops->dev_reset(dev); 1506 1507 return eth_err(port_id, ret); 1508 } 1509 1510 int __rte_experimental 1511 rte_eth_dev_is_removed(uint16_t port_id) 1512 { 1513 struct rte_eth_dev *dev; 1514 int ret; 1515 1516 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1517 1518 dev = &rte_eth_devices[port_id]; 1519 1520 if (dev->state == RTE_ETH_DEV_REMOVED) 1521 return 1; 1522 1523 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1524 1525 ret = dev->dev_ops->is_removed(dev); 1526 if (ret != 0) 1527 /* Device is physically removed. */ 1528 dev->state = RTE_ETH_DEV_REMOVED; 1529 1530 return ret; 1531 } 1532 1533 int 1534 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1535 uint16_t nb_rx_desc, unsigned int socket_id, 1536 const struct rte_eth_rxconf *rx_conf, 1537 struct rte_mempool *mp) 1538 { 1539 int ret; 1540 uint32_t mbp_buf_size; 1541 struct rte_eth_dev *dev; 1542 struct rte_eth_dev_info dev_info; 1543 struct rte_eth_rxconf local_conf; 1544 void **rxq; 1545 1546 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1547 1548 dev = &rte_eth_devices[port_id]; 1549 if (rx_queue_id >= dev->data->nb_rx_queues) { 1550 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 1551 return -EINVAL; 1552 } 1553 1554 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 1555 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 1556 1557 /* 1558 * Check the size of the mbuf data buffer. 1559 * This value must be provided in the private data of the memory pool. 1560 * First check that the memory pool has a valid private data. 1561 */ 1562 rte_eth_dev_info_get(port_id, &dev_info); 1563 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) { 1564 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n", 1565 mp->name, (int)mp->private_data_size, 1566 (int)sizeof(struct rte_pktmbuf_pool_private)); 1567 return -ENOSPC; 1568 } 1569 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 1570 1571 if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) { 1572 RTE_ETHDEV_LOG(ERR, 1573 "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n", 1574 mp->name, (int)mbp_buf_size, 1575 (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize), 1576 (int)RTE_PKTMBUF_HEADROOM, 1577 (int)dev_info.min_rx_bufsize); 1578 return -EINVAL; 1579 } 1580 1581 /* Use default specified by driver, if nb_rx_desc is zero */ 1582 if (nb_rx_desc == 0) { 1583 nb_rx_desc = dev_info.default_rxportconf.ring_size; 1584 /* If driver default is also zero, fall back on EAL default */ 1585 if (nb_rx_desc == 0) 1586 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 1587 } 1588 1589 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 1590 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 1591 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 1592 1593 RTE_ETHDEV_LOG(ERR, 1594 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 1595 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 1596 dev_info.rx_desc_lim.nb_min, 1597 dev_info.rx_desc_lim.nb_align); 1598 return -EINVAL; 1599 } 1600 1601 if (dev->data->dev_started && 1602 !(dev_info.dev_capa & 1603 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 1604 return -EBUSY; 1605 1606 if (dev->data->dev_started && 1607 (dev->data->rx_queue_state[rx_queue_id] != 1608 RTE_ETH_QUEUE_STATE_STOPPED)) 1609 return -EBUSY; 1610 1611 rxq = dev->data->rx_queues; 1612 if (rxq[rx_queue_id]) { 1613 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, 1614 -ENOTSUP); 1615 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]); 1616 rxq[rx_queue_id] = NULL; 1617 } 1618 1619 if (rx_conf == NULL) 1620 rx_conf = &dev_info.default_rxconf; 1621 1622 local_conf = *rx_conf; 1623 1624 /* 1625 * If an offloading has already been enabled in 1626 * rte_eth_dev_configure(), it has been enabled on all queues, 1627 * so there is no need to enable it in this queue again. 1628 * The local_conf.offloads input to underlying PMD only carries 1629 * those offloadings which are only enabled on this queue and 1630 * not enabled on all queues. 1631 */ 1632 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 1633 1634 /* 1635 * New added offloadings for this queue are those not enabled in 1636 * rte_eth_dev_configure() and they must be per-queue type. 1637 * A pure per-port offloading can't be enabled on a queue while 1638 * disabled on another queue. A pure per-port offloading can't 1639 * be enabled for any queue as new added one if it hasn't been 1640 * enabled in rte_eth_dev_configure(). 1641 */ 1642 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 1643 local_conf.offloads) { 1644 RTE_ETHDEV_LOG(ERR, 1645 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 1646 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 1647 port_id, rx_queue_id, local_conf.offloads, 1648 dev_info.rx_queue_offload_capa, 1649 __func__); 1650 return -EINVAL; 1651 } 1652 1653 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 1654 socket_id, &local_conf, mp); 1655 if (!ret) { 1656 if (!dev->data->min_rx_buf_size || 1657 dev->data->min_rx_buf_size > mbp_buf_size) 1658 dev->data->min_rx_buf_size = mbp_buf_size; 1659 } 1660 1661 return eth_err(port_id, ret); 1662 } 1663 1664 int 1665 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 1666 uint16_t nb_tx_desc, unsigned int socket_id, 1667 const struct rte_eth_txconf *tx_conf) 1668 { 1669 struct rte_eth_dev *dev; 1670 struct rte_eth_dev_info dev_info; 1671 struct rte_eth_txconf local_conf; 1672 void **txq; 1673 1674 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1675 1676 dev = &rte_eth_devices[port_id]; 1677 if (tx_queue_id >= dev->data->nb_tx_queues) { 1678 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 1679 return -EINVAL; 1680 } 1681 1682 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 1683 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 1684 1685 rte_eth_dev_info_get(port_id, &dev_info); 1686 1687 /* Use default specified by driver, if nb_tx_desc is zero */ 1688 if (nb_tx_desc == 0) { 1689 nb_tx_desc = dev_info.default_txportconf.ring_size; 1690 /* If driver default is zero, fall back on EAL default */ 1691 if (nb_tx_desc == 0) 1692 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 1693 } 1694 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 1695 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 1696 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 1697 RTE_ETHDEV_LOG(ERR, 1698 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 1699 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 1700 dev_info.tx_desc_lim.nb_min, 1701 dev_info.tx_desc_lim.nb_align); 1702 return -EINVAL; 1703 } 1704 1705 if (dev->data->dev_started && 1706 !(dev_info.dev_capa & 1707 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 1708 return -EBUSY; 1709 1710 if (dev->data->dev_started && 1711 (dev->data->tx_queue_state[tx_queue_id] != 1712 RTE_ETH_QUEUE_STATE_STOPPED)) 1713 return -EBUSY; 1714 1715 txq = dev->data->tx_queues; 1716 if (txq[tx_queue_id]) { 1717 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, 1718 -ENOTSUP); 1719 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]); 1720 txq[tx_queue_id] = NULL; 1721 } 1722 1723 if (tx_conf == NULL) 1724 tx_conf = &dev_info.default_txconf; 1725 1726 local_conf = *tx_conf; 1727 1728 /* 1729 * If an offloading has already been enabled in 1730 * rte_eth_dev_configure(), it has been enabled on all queues, 1731 * so there is no need to enable it in this queue again. 1732 * The local_conf.offloads input to underlying PMD only carries 1733 * those offloadings which are only enabled on this queue and 1734 * not enabled on all queues. 1735 */ 1736 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 1737 1738 /* 1739 * New added offloadings for this queue are those not enabled in 1740 * rte_eth_dev_configure() and they must be per-queue type. 1741 * A pure per-port offloading can't be enabled on a queue while 1742 * disabled on another queue. A pure per-port offloading can't 1743 * be enabled for any queue as new added one if it hasn't been 1744 * enabled in rte_eth_dev_configure(). 1745 */ 1746 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 1747 local_conf.offloads) { 1748 RTE_ETHDEV_LOG(ERR, 1749 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 1750 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 1751 port_id, tx_queue_id, local_conf.offloads, 1752 dev_info.tx_queue_offload_capa, 1753 __func__); 1754 return -EINVAL; 1755 } 1756 1757 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 1758 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 1759 } 1760 1761 void 1762 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 1763 void *userdata __rte_unused) 1764 { 1765 unsigned i; 1766 1767 for (i = 0; i < unsent; i++) 1768 rte_pktmbuf_free(pkts[i]); 1769 } 1770 1771 void 1772 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 1773 void *userdata) 1774 { 1775 uint64_t *count = userdata; 1776 unsigned i; 1777 1778 for (i = 0; i < unsent; i++) 1779 rte_pktmbuf_free(pkts[i]); 1780 1781 *count += unsent; 1782 } 1783 1784 int 1785 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 1786 buffer_tx_error_fn cbfn, void *userdata) 1787 { 1788 buffer->error_callback = cbfn; 1789 buffer->error_userdata = userdata; 1790 return 0; 1791 } 1792 1793 int 1794 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 1795 { 1796 int ret = 0; 1797 1798 if (buffer == NULL) 1799 return -EINVAL; 1800 1801 buffer->size = size; 1802 if (buffer->error_callback == NULL) { 1803 ret = rte_eth_tx_buffer_set_err_callback( 1804 buffer, rte_eth_tx_buffer_drop_callback, NULL); 1805 } 1806 1807 return ret; 1808 } 1809 1810 int 1811 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 1812 { 1813 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 1814 int ret; 1815 1816 /* Validate Input Data. Bail if not valid or not supported. */ 1817 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1818 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 1819 1820 /* Call driver to free pending mbufs. */ 1821 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 1822 free_cnt); 1823 return eth_err(port_id, ret); 1824 } 1825 1826 void 1827 rte_eth_promiscuous_enable(uint16_t port_id) 1828 { 1829 struct rte_eth_dev *dev; 1830 1831 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1832 dev = &rte_eth_devices[port_id]; 1833 1834 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable); 1835 (*dev->dev_ops->promiscuous_enable)(dev); 1836 dev->data->promiscuous = 1; 1837 } 1838 1839 void 1840 rte_eth_promiscuous_disable(uint16_t port_id) 1841 { 1842 struct rte_eth_dev *dev; 1843 1844 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1845 dev = &rte_eth_devices[port_id]; 1846 1847 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable); 1848 dev->data->promiscuous = 0; 1849 (*dev->dev_ops->promiscuous_disable)(dev); 1850 } 1851 1852 int 1853 rte_eth_promiscuous_get(uint16_t port_id) 1854 { 1855 struct rte_eth_dev *dev; 1856 1857 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1858 1859 dev = &rte_eth_devices[port_id]; 1860 return dev->data->promiscuous; 1861 } 1862 1863 void 1864 rte_eth_allmulticast_enable(uint16_t port_id) 1865 { 1866 struct rte_eth_dev *dev; 1867 1868 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1869 dev = &rte_eth_devices[port_id]; 1870 1871 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable); 1872 (*dev->dev_ops->allmulticast_enable)(dev); 1873 dev->data->all_multicast = 1; 1874 } 1875 1876 void 1877 rte_eth_allmulticast_disable(uint16_t port_id) 1878 { 1879 struct rte_eth_dev *dev; 1880 1881 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1882 dev = &rte_eth_devices[port_id]; 1883 1884 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable); 1885 dev->data->all_multicast = 0; 1886 (*dev->dev_ops->allmulticast_disable)(dev); 1887 } 1888 1889 int 1890 rte_eth_allmulticast_get(uint16_t port_id) 1891 { 1892 struct rte_eth_dev *dev; 1893 1894 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1895 1896 dev = &rte_eth_devices[port_id]; 1897 return dev->data->all_multicast; 1898 } 1899 1900 void 1901 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 1902 { 1903 struct rte_eth_dev *dev; 1904 1905 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1906 dev = &rte_eth_devices[port_id]; 1907 1908 if (dev->data->dev_conf.intr_conf.lsc && 1909 dev->data->dev_started) 1910 rte_eth_linkstatus_get(dev, eth_link); 1911 else { 1912 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update); 1913 (*dev->dev_ops->link_update)(dev, 1); 1914 *eth_link = dev->data->dev_link; 1915 } 1916 } 1917 1918 void 1919 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 1920 { 1921 struct rte_eth_dev *dev; 1922 1923 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1924 dev = &rte_eth_devices[port_id]; 1925 1926 if (dev->data->dev_conf.intr_conf.lsc && 1927 dev->data->dev_started) 1928 rte_eth_linkstatus_get(dev, eth_link); 1929 else { 1930 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update); 1931 (*dev->dev_ops->link_update)(dev, 0); 1932 *eth_link = dev->data->dev_link; 1933 } 1934 } 1935 1936 int 1937 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 1938 { 1939 struct rte_eth_dev *dev; 1940 1941 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1942 1943 dev = &rte_eth_devices[port_id]; 1944 memset(stats, 0, sizeof(*stats)); 1945 1946 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 1947 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 1948 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 1949 } 1950 1951 int 1952 rte_eth_stats_reset(uint16_t port_id) 1953 { 1954 struct rte_eth_dev *dev; 1955 1956 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1957 dev = &rte_eth_devices[port_id]; 1958 1959 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 1960 (*dev->dev_ops->stats_reset)(dev); 1961 dev->data->rx_mbuf_alloc_failed = 0; 1962 1963 return 0; 1964 } 1965 1966 static inline int 1967 get_xstats_basic_count(struct rte_eth_dev *dev) 1968 { 1969 uint16_t nb_rxqs, nb_txqs; 1970 int count; 1971 1972 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1973 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1974 1975 count = RTE_NB_STATS; 1976 count += nb_rxqs * RTE_NB_RXQ_STATS; 1977 count += nb_txqs * RTE_NB_TXQ_STATS; 1978 1979 return count; 1980 } 1981 1982 static int 1983 get_xstats_count(uint16_t port_id) 1984 { 1985 struct rte_eth_dev *dev; 1986 int count; 1987 1988 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1989 dev = &rte_eth_devices[port_id]; 1990 if (dev->dev_ops->xstats_get_names_by_id != NULL) { 1991 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL, 1992 NULL, 0); 1993 if (count < 0) 1994 return eth_err(port_id, count); 1995 } 1996 if (dev->dev_ops->xstats_get_names != NULL) { 1997 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 1998 if (count < 0) 1999 return eth_err(port_id, count); 2000 } else 2001 count = 0; 2002 2003 2004 count += get_xstats_basic_count(dev); 2005 2006 return count; 2007 } 2008 2009 int 2010 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2011 uint64_t *id) 2012 { 2013 int cnt_xstats, idx_xstat; 2014 2015 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2016 2017 if (!id) { 2018 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n"); 2019 return -ENOMEM; 2020 } 2021 2022 if (!xstat_name) { 2023 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n"); 2024 return -ENOMEM; 2025 } 2026 2027 /* Get count */ 2028 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2029 if (cnt_xstats < 0) { 2030 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2031 return -ENODEV; 2032 } 2033 2034 /* Get id-name lookup table */ 2035 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2036 2037 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2038 port_id, xstats_names, cnt_xstats, NULL)) { 2039 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2040 return -1; 2041 } 2042 2043 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2044 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2045 *id = idx_xstat; 2046 return 0; 2047 }; 2048 } 2049 2050 return -EINVAL; 2051 } 2052 2053 /* retrieve basic stats names */ 2054 static int 2055 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev, 2056 struct rte_eth_xstat_name *xstats_names) 2057 { 2058 int cnt_used_entries = 0; 2059 uint32_t idx, id_queue; 2060 uint16_t num_q; 2061 2062 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2063 snprintf(xstats_names[cnt_used_entries].name, 2064 sizeof(xstats_names[0].name), 2065 "%s", rte_stats_strings[idx].name); 2066 cnt_used_entries++; 2067 } 2068 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2069 for (id_queue = 0; id_queue < num_q; id_queue++) { 2070 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2071 snprintf(xstats_names[cnt_used_entries].name, 2072 sizeof(xstats_names[0].name), 2073 "rx_q%u%s", 2074 id_queue, rte_rxq_stats_strings[idx].name); 2075 cnt_used_entries++; 2076 } 2077 2078 } 2079 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2080 for (id_queue = 0; id_queue < num_q; id_queue++) { 2081 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 2082 snprintf(xstats_names[cnt_used_entries].name, 2083 sizeof(xstats_names[0].name), 2084 "tx_q%u%s", 2085 id_queue, rte_txq_stats_strings[idx].name); 2086 cnt_used_entries++; 2087 } 2088 } 2089 return cnt_used_entries; 2090 } 2091 2092 /* retrieve ethdev extended statistics names */ 2093 int 2094 rte_eth_xstats_get_names_by_id(uint16_t port_id, 2095 struct rte_eth_xstat_name *xstats_names, unsigned int size, 2096 uint64_t *ids) 2097 { 2098 struct rte_eth_xstat_name *xstats_names_copy; 2099 unsigned int no_basic_stat_requested = 1; 2100 unsigned int no_ext_stat_requested = 1; 2101 unsigned int expected_entries; 2102 unsigned int basic_count; 2103 struct rte_eth_dev *dev; 2104 unsigned int i; 2105 int ret; 2106 2107 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2108 dev = &rte_eth_devices[port_id]; 2109 2110 basic_count = get_xstats_basic_count(dev); 2111 ret = get_xstats_count(port_id); 2112 if (ret < 0) 2113 return ret; 2114 expected_entries = (unsigned int)ret; 2115 2116 /* Return max number of stats if no ids given */ 2117 if (!ids) { 2118 if (!xstats_names) 2119 return expected_entries; 2120 else if (xstats_names && size < expected_entries) 2121 return expected_entries; 2122 } 2123 2124 if (ids && !xstats_names) 2125 return -EINVAL; 2126 2127 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 2128 uint64_t ids_copy[size]; 2129 2130 for (i = 0; i < size; i++) { 2131 if (ids[i] < basic_count) { 2132 no_basic_stat_requested = 0; 2133 break; 2134 } 2135 2136 /* 2137 * Convert ids to xstats ids that PMD knows. 2138 * ids known by user are basic + extended stats. 2139 */ 2140 ids_copy[i] = ids[i] - basic_count; 2141 } 2142 2143 if (no_basic_stat_requested) 2144 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 2145 xstats_names, ids_copy, size); 2146 } 2147 2148 /* Retrieve all stats */ 2149 if (!ids) { 2150 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 2151 expected_entries); 2152 if (num_stats < 0 || num_stats > (int)expected_entries) 2153 return num_stats; 2154 else 2155 return expected_entries; 2156 } 2157 2158 xstats_names_copy = calloc(expected_entries, 2159 sizeof(struct rte_eth_xstat_name)); 2160 2161 if (!xstats_names_copy) { 2162 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 2163 return -ENOMEM; 2164 } 2165 2166 if (ids) { 2167 for (i = 0; i < size; i++) { 2168 if (ids[i] >= basic_count) { 2169 no_ext_stat_requested = 0; 2170 break; 2171 } 2172 } 2173 } 2174 2175 /* Fill xstats_names_copy structure */ 2176 if (ids && no_ext_stat_requested) { 2177 rte_eth_basic_stats_get_names(dev, xstats_names_copy); 2178 } else { 2179 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 2180 expected_entries); 2181 if (ret < 0) { 2182 free(xstats_names_copy); 2183 return ret; 2184 } 2185 } 2186 2187 /* Filter stats */ 2188 for (i = 0; i < size; i++) { 2189 if (ids[i] >= expected_entries) { 2190 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 2191 free(xstats_names_copy); 2192 return -1; 2193 } 2194 xstats_names[i] = xstats_names_copy[ids[i]]; 2195 } 2196 2197 free(xstats_names_copy); 2198 return size; 2199 } 2200 2201 int 2202 rte_eth_xstats_get_names(uint16_t port_id, 2203 struct rte_eth_xstat_name *xstats_names, 2204 unsigned int size) 2205 { 2206 struct rte_eth_dev *dev; 2207 int cnt_used_entries; 2208 int cnt_expected_entries; 2209 int cnt_driver_entries; 2210 2211 cnt_expected_entries = get_xstats_count(port_id); 2212 if (xstats_names == NULL || cnt_expected_entries < 0 || 2213 (int)size < cnt_expected_entries) 2214 return cnt_expected_entries; 2215 2216 /* port_id checked in get_xstats_count() */ 2217 dev = &rte_eth_devices[port_id]; 2218 2219 cnt_used_entries = rte_eth_basic_stats_get_names( 2220 dev, xstats_names); 2221 2222 if (dev->dev_ops->xstats_get_names != NULL) { 2223 /* If there are any driver-specific xstats, append them 2224 * to end of list. 2225 */ 2226 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 2227 dev, 2228 xstats_names + cnt_used_entries, 2229 size - cnt_used_entries); 2230 if (cnt_driver_entries < 0) 2231 return eth_err(port_id, cnt_driver_entries); 2232 cnt_used_entries += cnt_driver_entries; 2233 } 2234 2235 return cnt_used_entries; 2236 } 2237 2238 2239 static int 2240 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 2241 { 2242 struct rte_eth_dev *dev; 2243 struct rte_eth_stats eth_stats; 2244 unsigned int count = 0, i, q; 2245 uint64_t val, *stats_ptr; 2246 uint16_t nb_rxqs, nb_txqs; 2247 int ret; 2248 2249 ret = rte_eth_stats_get(port_id, ð_stats); 2250 if (ret < 0) 2251 return ret; 2252 2253 dev = &rte_eth_devices[port_id]; 2254 2255 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2256 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2257 2258 /* global stats */ 2259 for (i = 0; i < RTE_NB_STATS; i++) { 2260 stats_ptr = RTE_PTR_ADD(ð_stats, 2261 rte_stats_strings[i].offset); 2262 val = *stats_ptr; 2263 xstats[count++].value = val; 2264 } 2265 2266 /* per-rxq stats */ 2267 for (q = 0; q < nb_rxqs; q++) { 2268 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 2269 stats_ptr = RTE_PTR_ADD(ð_stats, 2270 rte_rxq_stats_strings[i].offset + 2271 q * sizeof(uint64_t)); 2272 val = *stats_ptr; 2273 xstats[count++].value = val; 2274 } 2275 } 2276 2277 /* per-txq stats */ 2278 for (q = 0; q < nb_txqs; q++) { 2279 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 2280 stats_ptr = RTE_PTR_ADD(ð_stats, 2281 rte_txq_stats_strings[i].offset + 2282 q * sizeof(uint64_t)); 2283 val = *stats_ptr; 2284 xstats[count++].value = val; 2285 } 2286 } 2287 return count; 2288 } 2289 2290 /* retrieve ethdev extended statistics */ 2291 int 2292 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 2293 uint64_t *values, unsigned int size) 2294 { 2295 unsigned int no_basic_stat_requested = 1; 2296 unsigned int no_ext_stat_requested = 1; 2297 unsigned int num_xstats_filled; 2298 unsigned int basic_count; 2299 uint16_t expected_entries; 2300 struct rte_eth_dev *dev; 2301 unsigned int i; 2302 int ret; 2303 2304 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2305 ret = get_xstats_count(port_id); 2306 if (ret < 0) 2307 return ret; 2308 expected_entries = (uint16_t)ret; 2309 struct rte_eth_xstat xstats[expected_entries]; 2310 dev = &rte_eth_devices[port_id]; 2311 basic_count = get_xstats_basic_count(dev); 2312 2313 /* Return max number of stats if no ids given */ 2314 if (!ids) { 2315 if (!values) 2316 return expected_entries; 2317 else if (values && size < expected_entries) 2318 return expected_entries; 2319 } 2320 2321 if (ids && !values) 2322 return -EINVAL; 2323 2324 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 2325 unsigned int basic_count = get_xstats_basic_count(dev); 2326 uint64_t ids_copy[size]; 2327 2328 for (i = 0; i < size; i++) { 2329 if (ids[i] < basic_count) { 2330 no_basic_stat_requested = 0; 2331 break; 2332 } 2333 2334 /* 2335 * Convert ids to xstats ids that PMD knows. 2336 * ids known by user are basic + extended stats. 2337 */ 2338 ids_copy[i] = ids[i] - basic_count; 2339 } 2340 2341 if (no_basic_stat_requested) 2342 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 2343 values, size); 2344 } 2345 2346 if (ids) { 2347 for (i = 0; i < size; i++) { 2348 if (ids[i] >= basic_count) { 2349 no_ext_stat_requested = 0; 2350 break; 2351 } 2352 } 2353 } 2354 2355 /* Fill the xstats structure */ 2356 if (ids && no_ext_stat_requested) 2357 ret = rte_eth_basic_stats_get(port_id, xstats); 2358 else 2359 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 2360 2361 if (ret < 0) 2362 return ret; 2363 num_xstats_filled = (unsigned int)ret; 2364 2365 /* Return all stats */ 2366 if (!ids) { 2367 for (i = 0; i < num_xstats_filled; i++) 2368 values[i] = xstats[i].value; 2369 return expected_entries; 2370 } 2371 2372 /* Filter stats */ 2373 for (i = 0; i < size; i++) { 2374 if (ids[i] >= expected_entries) { 2375 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 2376 return -1; 2377 } 2378 values[i] = xstats[ids[i]].value; 2379 } 2380 return size; 2381 } 2382 2383 int 2384 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 2385 unsigned int n) 2386 { 2387 struct rte_eth_dev *dev; 2388 unsigned int count = 0, i; 2389 signed int xcount = 0; 2390 uint16_t nb_rxqs, nb_txqs; 2391 int ret; 2392 2393 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 2394 2395 dev = &rte_eth_devices[port_id]; 2396 2397 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2398 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2399 2400 /* Return generic statistics */ 2401 count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) + 2402 (nb_txqs * RTE_NB_TXQ_STATS); 2403 2404 /* implemented by the driver */ 2405 if (dev->dev_ops->xstats_get != NULL) { 2406 /* Retrieve the xstats from the driver at the end of the 2407 * xstats struct. 2408 */ 2409 xcount = (*dev->dev_ops->xstats_get)(dev, 2410 xstats ? xstats + count : NULL, 2411 (n > count) ? n - count : 0); 2412 2413 if (xcount < 0) 2414 return eth_err(port_id, xcount); 2415 } 2416 2417 if (n < count + xcount || xstats == NULL) 2418 return count + xcount; 2419 2420 /* now fill the xstats structure */ 2421 ret = rte_eth_basic_stats_get(port_id, xstats); 2422 if (ret < 0) 2423 return ret; 2424 count = ret; 2425 2426 for (i = 0; i < count; i++) 2427 xstats[i].id = i; 2428 /* add an offset to driver-specific stats */ 2429 for ( ; i < count + xcount; i++) 2430 xstats[i].id += count; 2431 2432 return count + xcount; 2433 } 2434 2435 /* reset ethdev extended statistics */ 2436 void 2437 rte_eth_xstats_reset(uint16_t port_id) 2438 { 2439 struct rte_eth_dev *dev; 2440 2441 RTE_ETH_VALID_PORTID_OR_RET(port_id); 2442 dev = &rte_eth_devices[port_id]; 2443 2444 /* implemented by the driver */ 2445 if (dev->dev_ops->xstats_reset != NULL) { 2446 (*dev->dev_ops->xstats_reset)(dev); 2447 return; 2448 } 2449 2450 /* fallback to default */ 2451 rte_eth_stats_reset(port_id); 2452 } 2453 2454 static int 2455 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx, 2456 uint8_t is_rx) 2457 { 2458 struct rte_eth_dev *dev; 2459 2460 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2461 2462 dev = &rte_eth_devices[port_id]; 2463 2464 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 2465 2466 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 2467 return -EINVAL; 2468 2469 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 2470 return -EINVAL; 2471 2472 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 2473 return -EINVAL; 2474 2475 return (*dev->dev_ops->queue_stats_mapping_set) 2476 (dev, queue_id, stat_idx, is_rx); 2477 } 2478 2479 2480 int 2481 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 2482 uint8_t stat_idx) 2483 { 2484 return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id, 2485 stat_idx, STAT_QMAP_TX)); 2486 } 2487 2488 2489 int 2490 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 2491 uint8_t stat_idx) 2492 { 2493 return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id, 2494 stat_idx, STAT_QMAP_RX)); 2495 } 2496 2497 int 2498 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 2499 { 2500 struct rte_eth_dev *dev; 2501 2502 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2503 dev = &rte_eth_devices[port_id]; 2504 2505 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 2506 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 2507 fw_version, fw_size)); 2508 } 2509 2510 void 2511 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 2512 { 2513 struct rte_eth_dev *dev; 2514 const struct rte_eth_desc_lim lim = { 2515 .nb_max = UINT16_MAX, 2516 .nb_min = 0, 2517 .nb_align = 1, 2518 }; 2519 2520 RTE_ETH_VALID_PORTID_OR_RET(port_id); 2521 dev = &rte_eth_devices[port_id]; 2522 2523 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 2524 dev_info->rx_desc_lim = lim; 2525 dev_info->tx_desc_lim = lim; 2526 dev_info->device = dev->device; 2527 2528 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); 2529 (*dev->dev_ops->dev_infos_get)(dev, dev_info); 2530 dev_info->driver_name = dev->device->driver->name; 2531 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 2532 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 2533 2534 dev_info->dev_flags = &dev->data->dev_flags; 2535 } 2536 2537 int 2538 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 2539 uint32_t *ptypes, int num) 2540 { 2541 int i, j; 2542 struct rte_eth_dev *dev; 2543 const uint32_t *all_ptypes; 2544 2545 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2546 dev = &rte_eth_devices[port_id]; 2547 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 2548 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 2549 2550 if (!all_ptypes) 2551 return 0; 2552 2553 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 2554 if (all_ptypes[i] & ptype_mask) { 2555 if (j < num) 2556 ptypes[j] = all_ptypes[i]; 2557 j++; 2558 } 2559 2560 return j; 2561 } 2562 2563 void 2564 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr) 2565 { 2566 struct rte_eth_dev *dev; 2567 2568 RTE_ETH_VALID_PORTID_OR_RET(port_id); 2569 dev = &rte_eth_devices[port_id]; 2570 ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 2571 } 2572 2573 2574 int 2575 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 2576 { 2577 struct rte_eth_dev *dev; 2578 2579 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2580 2581 dev = &rte_eth_devices[port_id]; 2582 *mtu = dev->data->mtu; 2583 return 0; 2584 } 2585 2586 int 2587 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 2588 { 2589 int ret; 2590 struct rte_eth_dev *dev; 2591 2592 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2593 dev = &rte_eth_devices[port_id]; 2594 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 2595 2596 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 2597 if (!ret) 2598 dev->data->mtu = mtu; 2599 2600 return eth_err(port_id, ret); 2601 } 2602 2603 int 2604 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 2605 { 2606 struct rte_eth_dev *dev; 2607 int ret; 2608 2609 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2610 dev = &rte_eth_devices[port_id]; 2611 if (!(dev->data->dev_conf.rxmode.offloads & 2612 DEV_RX_OFFLOAD_VLAN_FILTER)) { 2613 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n", 2614 port_id); 2615 return -ENOSYS; 2616 } 2617 2618 if (vlan_id > 4095) { 2619 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 2620 port_id, vlan_id); 2621 return -EINVAL; 2622 } 2623 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 2624 2625 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 2626 if (ret == 0) { 2627 struct rte_vlan_filter_conf *vfc; 2628 int vidx; 2629 int vbit; 2630 2631 vfc = &dev->data->vlan_filter_conf; 2632 vidx = vlan_id / 64; 2633 vbit = vlan_id % 64; 2634 2635 if (on) 2636 vfc->ids[vidx] |= UINT64_C(1) << vbit; 2637 else 2638 vfc->ids[vidx] &= ~(UINT64_C(1) << vbit); 2639 } 2640 2641 return eth_err(port_id, ret); 2642 } 2643 2644 int 2645 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 2646 int on) 2647 { 2648 struct rte_eth_dev *dev; 2649 2650 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2651 dev = &rte_eth_devices[port_id]; 2652 if (rx_queue_id >= dev->data->nb_rx_queues) { 2653 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 2654 return -EINVAL; 2655 } 2656 2657 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 2658 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 2659 2660 return 0; 2661 } 2662 2663 int 2664 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 2665 enum rte_vlan_type vlan_type, 2666 uint16_t tpid) 2667 { 2668 struct rte_eth_dev *dev; 2669 2670 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2671 dev = &rte_eth_devices[port_id]; 2672 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 2673 2674 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 2675 tpid)); 2676 } 2677 2678 int 2679 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 2680 { 2681 struct rte_eth_dev *dev; 2682 int ret = 0; 2683 int mask = 0; 2684 int cur, org = 0; 2685 uint64_t orig_offloads; 2686 2687 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2688 dev = &rte_eth_devices[port_id]; 2689 2690 /* save original values in case of failure */ 2691 orig_offloads = dev->data->dev_conf.rxmode.offloads; 2692 2693 /*check which option changed by application*/ 2694 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD); 2695 org = !!(dev->data->dev_conf.rxmode.offloads & 2696 DEV_RX_OFFLOAD_VLAN_STRIP); 2697 if (cur != org) { 2698 if (cur) 2699 dev->data->dev_conf.rxmode.offloads |= 2700 DEV_RX_OFFLOAD_VLAN_STRIP; 2701 else 2702 dev->data->dev_conf.rxmode.offloads &= 2703 ~DEV_RX_OFFLOAD_VLAN_STRIP; 2704 mask |= ETH_VLAN_STRIP_MASK; 2705 } 2706 2707 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); 2708 org = !!(dev->data->dev_conf.rxmode.offloads & 2709 DEV_RX_OFFLOAD_VLAN_FILTER); 2710 if (cur != org) { 2711 if (cur) 2712 dev->data->dev_conf.rxmode.offloads |= 2713 DEV_RX_OFFLOAD_VLAN_FILTER; 2714 else 2715 dev->data->dev_conf.rxmode.offloads &= 2716 ~DEV_RX_OFFLOAD_VLAN_FILTER; 2717 mask |= ETH_VLAN_FILTER_MASK; 2718 } 2719 2720 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD); 2721 org = !!(dev->data->dev_conf.rxmode.offloads & 2722 DEV_RX_OFFLOAD_VLAN_EXTEND); 2723 if (cur != org) { 2724 if (cur) 2725 dev->data->dev_conf.rxmode.offloads |= 2726 DEV_RX_OFFLOAD_VLAN_EXTEND; 2727 else 2728 dev->data->dev_conf.rxmode.offloads &= 2729 ~DEV_RX_OFFLOAD_VLAN_EXTEND; 2730 mask |= ETH_VLAN_EXTEND_MASK; 2731 } 2732 2733 /*no change*/ 2734 if (mask == 0) 2735 return ret; 2736 2737 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 2738 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 2739 if (ret) { 2740 /* hit an error restore original values */ 2741 dev->data->dev_conf.rxmode.offloads = orig_offloads; 2742 } 2743 2744 return eth_err(port_id, ret); 2745 } 2746 2747 int 2748 rte_eth_dev_get_vlan_offload(uint16_t port_id) 2749 { 2750 struct rte_eth_dev *dev; 2751 int ret = 0; 2752 2753 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2754 dev = &rte_eth_devices[port_id]; 2755 2756 if (dev->data->dev_conf.rxmode.offloads & 2757 DEV_RX_OFFLOAD_VLAN_STRIP) 2758 ret |= ETH_VLAN_STRIP_OFFLOAD; 2759 2760 if (dev->data->dev_conf.rxmode.offloads & 2761 DEV_RX_OFFLOAD_VLAN_FILTER) 2762 ret |= ETH_VLAN_FILTER_OFFLOAD; 2763 2764 if (dev->data->dev_conf.rxmode.offloads & 2765 DEV_RX_OFFLOAD_VLAN_EXTEND) 2766 ret |= ETH_VLAN_EXTEND_OFFLOAD; 2767 2768 return ret; 2769 } 2770 2771 int 2772 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 2773 { 2774 struct rte_eth_dev *dev; 2775 2776 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2777 dev = &rte_eth_devices[port_id]; 2778 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 2779 2780 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 2781 } 2782 2783 int 2784 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 2785 { 2786 struct rte_eth_dev *dev; 2787 2788 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2789 dev = &rte_eth_devices[port_id]; 2790 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 2791 memset(fc_conf, 0, sizeof(*fc_conf)); 2792 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 2793 } 2794 2795 int 2796 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 2797 { 2798 struct rte_eth_dev *dev; 2799 2800 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2801 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 2802 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 2803 return -EINVAL; 2804 } 2805 2806 dev = &rte_eth_devices[port_id]; 2807 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 2808 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 2809 } 2810 2811 int 2812 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 2813 struct rte_eth_pfc_conf *pfc_conf) 2814 { 2815 struct rte_eth_dev *dev; 2816 2817 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2818 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) { 2819 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 2820 return -EINVAL; 2821 } 2822 2823 dev = &rte_eth_devices[port_id]; 2824 /* High water, low water validation are device specific */ 2825 if (*dev->dev_ops->priority_flow_ctrl_set) 2826 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 2827 (dev, pfc_conf)); 2828 return -ENOTSUP; 2829 } 2830 2831 static int 2832 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 2833 uint16_t reta_size) 2834 { 2835 uint16_t i, num; 2836 2837 if (!reta_conf) 2838 return -EINVAL; 2839 2840 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE; 2841 for (i = 0; i < num; i++) { 2842 if (reta_conf[i].mask) 2843 return 0; 2844 } 2845 2846 return -EINVAL; 2847 } 2848 2849 static int 2850 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 2851 uint16_t reta_size, 2852 uint16_t max_rxq) 2853 { 2854 uint16_t i, idx, shift; 2855 2856 if (!reta_conf) 2857 return -EINVAL; 2858 2859 if (max_rxq == 0) { 2860 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 2861 return -EINVAL; 2862 } 2863 2864 for (i = 0; i < reta_size; i++) { 2865 idx = i / RTE_RETA_GROUP_SIZE; 2866 shift = i % RTE_RETA_GROUP_SIZE; 2867 if ((reta_conf[idx].mask & (1ULL << shift)) && 2868 (reta_conf[idx].reta[shift] >= max_rxq)) { 2869 RTE_ETHDEV_LOG(ERR, 2870 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 2871 idx, shift, 2872 reta_conf[idx].reta[shift], max_rxq); 2873 return -EINVAL; 2874 } 2875 } 2876 2877 return 0; 2878 } 2879 2880 int 2881 rte_eth_dev_rss_reta_update(uint16_t port_id, 2882 struct rte_eth_rss_reta_entry64 *reta_conf, 2883 uint16_t reta_size) 2884 { 2885 struct rte_eth_dev *dev; 2886 int ret; 2887 2888 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2889 /* Check mask bits */ 2890 ret = rte_eth_check_reta_mask(reta_conf, reta_size); 2891 if (ret < 0) 2892 return ret; 2893 2894 dev = &rte_eth_devices[port_id]; 2895 2896 /* Check entry value */ 2897 ret = rte_eth_check_reta_entry(reta_conf, reta_size, 2898 dev->data->nb_rx_queues); 2899 if (ret < 0) 2900 return ret; 2901 2902 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 2903 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 2904 reta_size)); 2905 } 2906 2907 int 2908 rte_eth_dev_rss_reta_query(uint16_t port_id, 2909 struct rte_eth_rss_reta_entry64 *reta_conf, 2910 uint16_t reta_size) 2911 { 2912 struct rte_eth_dev *dev; 2913 int ret; 2914 2915 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2916 2917 /* Check mask bits */ 2918 ret = rte_eth_check_reta_mask(reta_conf, reta_size); 2919 if (ret < 0) 2920 return ret; 2921 2922 dev = &rte_eth_devices[port_id]; 2923 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 2924 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 2925 reta_size)); 2926 } 2927 2928 int 2929 rte_eth_dev_rss_hash_update(uint16_t port_id, 2930 struct rte_eth_rss_conf *rss_conf) 2931 { 2932 struct rte_eth_dev *dev; 2933 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 2934 2935 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2936 dev = &rte_eth_devices[port_id]; 2937 rte_eth_dev_info_get(port_id, &dev_info); 2938 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 2939 dev_info.flow_type_rss_offloads) { 2940 RTE_ETHDEV_LOG(ERR, 2941 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 2942 port_id, rss_conf->rss_hf, 2943 dev_info.flow_type_rss_offloads); 2944 return -EINVAL; 2945 } 2946 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 2947 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 2948 rss_conf)); 2949 } 2950 2951 int 2952 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 2953 struct rte_eth_rss_conf *rss_conf) 2954 { 2955 struct rte_eth_dev *dev; 2956 2957 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2958 dev = &rte_eth_devices[port_id]; 2959 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 2960 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 2961 rss_conf)); 2962 } 2963 2964 int 2965 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 2966 struct rte_eth_udp_tunnel *udp_tunnel) 2967 { 2968 struct rte_eth_dev *dev; 2969 2970 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2971 if (udp_tunnel == NULL) { 2972 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n"); 2973 return -EINVAL; 2974 } 2975 2976 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 2977 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 2978 return -EINVAL; 2979 } 2980 2981 dev = &rte_eth_devices[port_id]; 2982 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 2983 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 2984 udp_tunnel)); 2985 } 2986 2987 int 2988 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 2989 struct rte_eth_udp_tunnel *udp_tunnel) 2990 { 2991 struct rte_eth_dev *dev; 2992 2993 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2994 dev = &rte_eth_devices[port_id]; 2995 2996 if (udp_tunnel == NULL) { 2997 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n"); 2998 return -EINVAL; 2999 } 3000 3001 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 3002 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 3003 return -EINVAL; 3004 } 3005 3006 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 3007 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 3008 udp_tunnel)); 3009 } 3010 3011 int 3012 rte_eth_led_on(uint16_t port_id) 3013 { 3014 struct rte_eth_dev *dev; 3015 3016 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3017 dev = &rte_eth_devices[port_id]; 3018 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 3019 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 3020 } 3021 3022 int 3023 rte_eth_led_off(uint16_t port_id) 3024 { 3025 struct rte_eth_dev *dev; 3026 3027 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3028 dev = &rte_eth_devices[port_id]; 3029 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 3030 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 3031 } 3032 3033 /* 3034 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 3035 * an empty spot. 3036 */ 3037 static int 3038 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr) 3039 { 3040 struct rte_eth_dev_info dev_info; 3041 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 3042 unsigned i; 3043 3044 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3045 rte_eth_dev_info_get(port_id, &dev_info); 3046 3047 for (i = 0; i < dev_info.max_mac_addrs; i++) 3048 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0) 3049 return i; 3050 3051 return -1; 3052 } 3053 3054 static const struct ether_addr null_mac_addr; 3055 3056 int 3057 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr, 3058 uint32_t pool) 3059 { 3060 struct rte_eth_dev *dev; 3061 int index; 3062 uint64_t pool_mask; 3063 int ret; 3064 3065 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3066 dev = &rte_eth_devices[port_id]; 3067 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 3068 3069 if (is_zero_ether_addr(addr)) { 3070 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 3071 port_id); 3072 return -EINVAL; 3073 } 3074 if (pool >= ETH_64_POOLS) { 3075 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1); 3076 return -EINVAL; 3077 } 3078 3079 index = get_mac_addr_index(port_id, addr); 3080 if (index < 0) { 3081 index = get_mac_addr_index(port_id, &null_mac_addr); 3082 if (index < 0) { 3083 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 3084 port_id); 3085 return -ENOSPC; 3086 } 3087 } else { 3088 pool_mask = dev->data->mac_pool_sel[index]; 3089 3090 /* Check if both MAC address and pool is already there, and do nothing */ 3091 if (pool_mask & (1ULL << pool)) 3092 return 0; 3093 } 3094 3095 /* Update NIC */ 3096 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 3097 3098 if (ret == 0) { 3099 /* Update address in NIC data structure */ 3100 ether_addr_copy(addr, &dev->data->mac_addrs[index]); 3101 3102 /* Update pool bitmap in NIC data structure */ 3103 dev->data->mac_pool_sel[index] |= (1ULL << pool); 3104 } 3105 3106 return eth_err(port_id, ret); 3107 } 3108 3109 int 3110 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr) 3111 { 3112 struct rte_eth_dev *dev; 3113 int index; 3114 3115 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3116 dev = &rte_eth_devices[port_id]; 3117 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 3118 3119 index = get_mac_addr_index(port_id, addr); 3120 if (index == 0) { 3121 RTE_ETHDEV_LOG(ERR, 3122 "Port %u: Cannot remove default MAC address\n", 3123 port_id); 3124 return -EADDRINUSE; 3125 } else if (index < 0) 3126 return 0; /* Do nothing if address wasn't found */ 3127 3128 /* Update NIC */ 3129 (*dev->dev_ops->mac_addr_remove)(dev, index); 3130 3131 /* Update address in NIC data structure */ 3132 ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 3133 3134 /* reset pool bitmap */ 3135 dev->data->mac_pool_sel[index] = 0; 3136 3137 return 0; 3138 } 3139 3140 int 3141 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr) 3142 { 3143 struct rte_eth_dev *dev; 3144 int ret; 3145 3146 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3147 3148 if (!is_valid_assigned_ether_addr(addr)) 3149 return -EINVAL; 3150 3151 dev = &rte_eth_devices[port_id]; 3152 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 3153 3154 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 3155 if (ret < 0) 3156 return ret; 3157 3158 /* Update default address in NIC data structure */ 3159 ether_addr_copy(addr, &dev->data->mac_addrs[0]); 3160 3161 return 0; 3162 } 3163 3164 3165 /* 3166 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 3167 * an empty spot. 3168 */ 3169 static int 3170 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr) 3171 { 3172 struct rte_eth_dev_info dev_info; 3173 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 3174 unsigned i; 3175 3176 rte_eth_dev_info_get(port_id, &dev_info); 3177 if (!dev->data->hash_mac_addrs) 3178 return -1; 3179 3180 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 3181 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 3182 ETHER_ADDR_LEN) == 0) 3183 return i; 3184 3185 return -1; 3186 } 3187 3188 int 3189 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr, 3190 uint8_t on) 3191 { 3192 int index; 3193 int ret; 3194 struct rte_eth_dev *dev; 3195 3196 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3197 3198 dev = &rte_eth_devices[port_id]; 3199 if (is_zero_ether_addr(addr)) { 3200 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 3201 port_id); 3202 return -EINVAL; 3203 } 3204 3205 index = get_hash_mac_addr_index(port_id, addr); 3206 /* Check if it's already there, and do nothing */ 3207 if ((index >= 0) && on) 3208 return 0; 3209 3210 if (index < 0) { 3211 if (!on) { 3212 RTE_ETHDEV_LOG(ERR, 3213 "Port %u: the MAC address was not set in UTA\n", 3214 port_id); 3215 return -EINVAL; 3216 } 3217 3218 index = get_hash_mac_addr_index(port_id, &null_mac_addr); 3219 if (index < 0) { 3220 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 3221 port_id); 3222 return -ENOSPC; 3223 } 3224 } 3225 3226 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 3227 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 3228 if (ret == 0) { 3229 /* Update address in NIC data structure */ 3230 if (on) 3231 ether_addr_copy(addr, 3232 &dev->data->hash_mac_addrs[index]); 3233 else 3234 ether_addr_copy(&null_mac_addr, 3235 &dev->data->hash_mac_addrs[index]); 3236 } 3237 3238 return eth_err(port_id, ret); 3239 } 3240 3241 int 3242 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 3243 { 3244 struct rte_eth_dev *dev; 3245 3246 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3247 3248 dev = &rte_eth_devices[port_id]; 3249 3250 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 3251 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 3252 on)); 3253 } 3254 3255 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 3256 uint16_t tx_rate) 3257 { 3258 struct rte_eth_dev *dev; 3259 struct rte_eth_dev_info dev_info; 3260 struct rte_eth_link link; 3261 3262 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3263 3264 dev = &rte_eth_devices[port_id]; 3265 rte_eth_dev_info_get(port_id, &dev_info); 3266 link = dev->data->dev_link; 3267 3268 if (queue_idx > dev_info.max_tx_queues) { 3269 RTE_ETHDEV_LOG(ERR, 3270 "Set queue rate limit:port %u: invalid queue id=%u\n", 3271 port_id, queue_idx); 3272 return -EINVAL; 3273 } 3274 3275 if (tx_rate > link.link_speed) { 3276 RTE_ETHDEV_LOG(ERR, 3277 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 3278 tx_rate, link.link_speed); 3279 return -EINVAL; 3280 } 3281 3282 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 3283 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 3284 queue_idx, tx_rate)); 3285 } 3286 3287 int 3288 rte_eth_mirror_rule_set(uint16_t port_id, 3289 struct rte_eth_mirror_conf *mirror_conf, 3290 uint8_t rule_id, uint8_t on) 3291 { 3292 struct rte_eth_dev *dev; 3293 3294 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3295 if (mirror_conf->rule_type == 0) { 3296 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n"); 3297 return -EINVAL; 3298 } 3299 3300 if (mirror_conf->dst_pool >= ETH_64_POOLS) { 3301 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n", 3302 ETH_64_POOLS - 1); 3303 return -EINVAL; 3304 } 3305 3306 if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP | 3307 ETH_MIRROR_VIRTUAL_POOL_DOWN)) && 3308 (mirror_conf->pool_mask == 0)) { 3309 RTE_ETHDEV_LOG(ERR, 3310 "Invalid mirror pool, pool mask can not be 0\n"); 3311 return -EINVAL; 3312 } 3313 3314 if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) && 3315 mirror_conf->vlan.vlan_mask == 0) { 3316 RTE_ETHDEV_LOG(ERR, 3317 "Invalid vlan mask, vlan mask can not be 0\n"); 3318 return -EINVAL; 3319 } 3320 3321 dev = &rte_eth_devices[port_id]; 3322 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP); 3323 3324 return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev, 3325 mirror_conf, rule_id, on)); 3326 } 3327 3328 int 3329 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id) 3330 { 3331 struct rte_eth_dev *dev; 3332 3333 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3334 3335 dev = &rte_eth_devices[port_id]; 3336 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP); 3337 3338 return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev, 3339 rule_id)); 3340 } 3341 3342 RTE_INIT(eth_dev_init_cb_lists) 3343 { 3344 int i; 3345 3346 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 3347 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 3348 } 3349 3350 int 3351 rte_eth_dev_callback_register(uint16_t port_id, 3352 enum rte_eth_event_type event, 3353 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 3354 { 3355 struct rte_eth_dev *dev; 3356 struct rte_eth_dev_callback *user_cb; 3357 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */ 3358 uint16_t last_port; 3359 3360 if (!cb_fn) 3361 return -EINVAL; 3362 3363 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 3364 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 3365 return -EINVAL; 3366 } 3367 3368 if (port_id == RTE_ETH_ALL) { 3369 next_port = 0; 3370 last_port = RTE_MAX_ETHPORTS - 1; 3371 } else { 3372 next_port = last_port = port_id; 3373 } 3374 3375 rte_spinlock_lock(&rte_eth_dev_cb_lock); 3376 3377 do { 3378 dev = &rte_eth_devices[next_port]; 3379 3380 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 3381 if (user_cb->cb_fn == cb_fn && 3382 user_cb->cb_arg == cb_arg && 3383 user_cb->event == event) { 3384 break; 3385 } 3386 } 3387 3388 /* create a new callback. */ 3389 if (user_cb == NULL) { 3390 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 3391 sizeof(struct rte_eth_dev_callback), 0); 3392 if (user_cb != NULL) { 3393 user_cb->cb_fn = cb_fn; 3394 user_cb->cb_arg = cb_arg; 3395 user_cb->event = event; 3396 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 3397 user_cb, next); 3398 } else { 3399 rte_spinlock_unlock(&rte_eth_dev_cb_lock); 3400 rte_eth_dev_callback_unregister(port_id, event, 3401 cb_fn, cb_arg); 3402 return -ENOMEM; 3403 } 3404 3405 } 3406 } while (++next_port <= last_port); 3407 3408 rte_spinlock_unlock(&rte_eth_dev_cb_lock); 3409 return 0; 3410 } 3411 3412 int 3413 rte_eth_dev_callback_unregister(uint16_t port_id, 3414 enum rte_eth_event_type event, 3415 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 3416 { 3417 int ret; 3418 struct rte_eth_dev *dev; 3419 struct rte_eth_dev_callback *cb, *next; 3420 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */ 3421 uint16_t last_port; 3422 3423 if (!cb_fn) 3424 return -EINVAL; 3425 3426 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 3427 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 3428 return -EINVAL; 3429 } 3430 3431 if (port_id == RTE_ETH_ALL) { 3432 next_port = 0; 3433 last_port = RTE_MAX_ETHPORTS - 1; 3434 } else { 3435 next_port = last_port = port_id; 3436 } 3437 3438 rte_spinlock_lock(&rte_eth_dev_cb_lock); 3439 3440 do { 3441 dev = &rte_eth_devices[next_port]; 3442 ret = 0; 3443 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 3444 cb = next) { 3445 3446 next = TAILQ_NEXT(cb, next); 3447 3448 if (cb->cb_fn != cb_fn || cb->event != event || 3449 (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 3450 continue; 3451 3452 /* 3453 * if this callback is not executing right now, 3454 * then remove it. 3455 */ 3456 if (cb->active == 0) { 3457 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 3458 rte_free(cb); 3459 } else { 3460 ret = -EAGAIN; 3461 } 3462 } 3463 } while (++next_port <= last_port); 3464 3465 rte_spinlock_unlock(&rte_eth_dev_cb_lock); 3466 return ret; 3467 } 3468 3469 int 3470 _rte_eth_dev_callback_process(struct rte_eth_dev *dev, 3471 enum rte_eth_event_type event, void *ret_param) 3472 { 3473 struct rte_eth_dev_callback *cb_lst; 3474 struct rte_eth_dev_callback dev_cb; 3475 int rc = 0; 3476 3477 rte_spinlock_lock(&rte_eth_dev_cb_lock); 3478 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 3479 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 3480 continue; 3481 dev_cb = *cb_lst; 3482 cb_lst->active = 1; 3483 if (ret_param != NULL) 3484 dev_cb.ret_param = ret_param; 3485 3486 rte_spinlock_unlock(&rte_eth_dev_cb_lock); 3487 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, 3488 dev_cb.cb_arg, dev_cb.ret_param); 3489 rte_spinlock_lock(&rte_eth_dev_cb_lock); 3490 cb_lst->active = 0; 3491 } 3492 rte_spinlock_unlock(&rte_eth_dev_cb_lock); 3493 return rc; 3494 } 3495 3496 void 3497 rte_eth_dev_probing_finish(struct rte_eth_dev *dev) 3498 { 3499 if (dev == NULL) 3500 return; 3501 3502 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); 3503 3504 dev->state = RTE_ETH_DEV_ATTACHED; 3505 } 3506 3507 int 3508 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 3509 { 3510 uint32_t vec; 3511 struct rte_eth_dev *dev; 3512 struct rte_intr_handle *intr_handle; 3513 uint16_t qid; 3514 int rc; 3515 3516 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3517 3518 dev = &rte_eth_devices[port_id]; 3519 3520 if (!dev->intr_handle) { 3521 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 3522 return -ENOTSUP; 3523 } 3524 3525 intr_handle = dev->intr_handle; 3526 if (!intr_handle->intr_vec) { 3527 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 3528 return -EPERM; 3529 } 3530 3531 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 3532 vec = intr_handle->intr_vec[qid]; 3533 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 3534 if (rc && rc != -EEXIST) { 3535 RTE_ETHDEV_LOG(ERR, 3536 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 3537 port_id, qid, op, epfd, vec); 3538 } 3539 } 3540 3541 return 0; 3542 } 3543 3544 int __rte_experimental 3545 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 3546 { 3547 struct rte_intr_handle *intr_handle; 3548 struct rte_eth_dev *dev; 3549 unsigned int efd_idx; 3550 uint32_t vec; 3551 int fd; 3552 3553 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 3554 3555 dev = &rte_eth_devices[port_id]; 3556 3557 if (queue_id >= dev->data->nb_rx_queues) { 3558 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 3559 return -1; 3560 } 3561 3562 if (!dev->intr_handle) { 3563 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 3564 return -1; 3565 } 3566 3567 intr_handle = dev->intr_handle; 3568 if (!intr_handle->intr_vec) { 3569 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 3570 return -1; 3571 } 3572 3573 vec = intr_handle->intr_vec[queue_id]; 3574 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 3575 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 3576 fd = intr_handle->efds[efd_idx]; 3577 3578 return fd; 3579 } 3580 3581 const struct rte_memzone * 3582 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, 3583 uint16_t queue_id, size_t size, unsigned align, 3584 int socket_id) 3585 { 3586 char z_name[RTE_MEMZONE_NAMESIZE]; 3587 const struct rte_memzone *mz; 3588 3589 snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s", 3590 dev->data->port_id, queue_id, ring_name); 3591 3592 mz = rte_memzone_lookup(z_name); 3593 if (mz) 3594 return mz; 3595 3596 return rte_memzone_reserve_aligned(z_name, size, socket_id, 3597 RTE_MEMZONE_IOVA_CONTIG, align); 3598 } 3599 3600 int __rte_experimental 3601 rte_eth_dev_create(struct rte_device *device, const char *name, 3602 size_t priv_data_size, 3603 ethdev_bus_specific_init ethdev_bus_specific_init, 3604 void *bus_init_params, 3605 ethdev_init_t ethdev_init, void *init_params) 3606 { 3607 struct rte_eth_dev *ethdev; 3608 int retval; 3609 3610 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL); 3611 3612 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3613 ethdev = rte_eth_dev_allocate(name); 3614 if (!ethdev) 3615 return -ENODEV; 3616 3617 if (priv_data_size) { 3618 ethdev->data->dev_private = rte_zmalloc_socket( 3619 name, priv_data_size, RTE_CACHE_LINE_SIZE, 3620 device->numa_node); 3621 3622 if (!ethdev->data->dev_private) { 3623 RTE_LOG(ERR, EAL, "failed to allocate private data"); 3624 retval = -ENOMEM; 3625 goto probe_failed; 3626 } 3627 } 3628 } else { 3629 ethdev = rte_eth_dev_attach_secondary(name); 3630 if (!ethdev) { 3631 RTE_LOG(ERR, EAL, "secondary process attach failed, " 3632 "ethdev doesn't exist"); 3633 return -ENODEV; 3634 } 3635 } 3636 3637 ethdev->device = device; 3638 3639 if (ethdev_bus_specific_init) { 3640 retval = ethdev_bus_specific_init(ethdev, bus_init_params); 3641 if (retval) { 3642 RTE_LOG(ERR, EAL, 3643 "ethdev bus specific initialisation failed"); 3644 goto probe_failed; 3645 } 3646 } 3647 3648 retval = ethdev_init(ethdev, init_params); 3649 if (retval) { 3650 RTE_LOG(ERR, EAL, "ethdev initialisation failed"); 3651 goto probe_failed; 3652 } 3653 3654 rte_eth_dev_probing_finish(ethdev); 3655 3656 return retval; 3657 3658 probe_failed: 3659 rte_eth_dev_release_port(ethdev); 3660 return retval; 3661 } 3662 3663 int __rte_experimental 3664 rte_eth_dev_destroy(struct rte_eth_dev *ethdev, 3665 ethdev_uninit_t ethdev_uninit) 3666 { 3667 int ret; 3668 3669 ethdev = rte_eth_dev_allocated(ethdev->data->name); 3670 if (!ethdev) 3671 return -ENODEV; 3672 3673 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL); 3674 3675 ret = ethdev_uninit(ethdev); 3676 if (ret) 3677 return ret; 3678 3679 return rte_eth_dev_release_port(ethdev); 3680 } 3681 3682 int 3683 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 3684 int epfd, int op, void *data) 3685 { 3686 uint32_t vec; 3687 struct rte_eth_dev *dev; 3688 struct rte_intr_handle *intr_handle; 3689 int rc; 3690 3691 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3692 3693 dev = &rte_eth_devices[port_id]; 3694 if (queue_id >= dev->data->nb_rx_queues) { 3695 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 3696 return -EINVAL; 3697 } 3698 3699 if (!dev->intr_handle) { 3700 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 3701 return -ENOTSUP; 3702 } 3703 3704 intr_handle = dev->intr_handle; 3705 if (!intr_handle->intr_vec) { 3706 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 3707 return -EPERM; 3708 } 3709 3710 vec = intr_handle->intr_vec[queue_id]; 3711 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 3712 if (rc && rc != -EEXIST) { 3713 RTE_ETHDEV_LOG(ERR, 3714 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 3715 port_id, queue_id, op, epfd, vec); 3716 return rc; 3717 } 3718 3719 return 0; 3720 } 3721 3722 int 3723 rte_eth_dev_rx_intr_enable(uint16_t port_id, 3724 uint16_t queue_id) 3725 { 3726 struct rte_eth_dev *dev; 3727 3728 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3729 3730 dev = &rte_eth_devices[port_id]; 3731 3732 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 3733 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, 3734 queue_id)); 3735 } 3736 3737 int 3738 rte_eth_dev_rx_intr_disable(uint16_t port_id, 3739 uint16_t queue_id) 3740 { 3741 struct rte_eth_dev *dev; 3742 3743 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3744 3745 dev = &rte_eth_devices[port_id]; 3746 3747 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 3748 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, 3749 queue_id)); 3750 } 3751 3752 3753 int 3754 rte_eth_dev_filter_supported(uint16_t port_id, 3755 enum rte_filter_type filter_type) 3756 { 3757 struct rte_eth_dev *dev; 3758 3759 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3760 3761 dev = &rte_eth_devices[port_id]; 3762 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP); 3763 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, 3764 RTE_ETH_FILTER_NOP, NULL); 3765 } 3766 3767 int 3768 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type, 3769 enum rte_filter_op filter_op, void *arg) 3770 { 3771 struct rte_eth_dev *dev; 3772 3773 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3774 3775 dev = &rte_eth_devices[port_id]; 3776 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP); 3777 return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type, 3778 filter_op, arg)); 3779 } 3780 3781 const struct rte_eth_rxtx_callback * 3782 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 3783 rte_rx_callback_fn fn, void *user_param) 3784 { 3785 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 3786 rte_errno = ENOTSUP; 3787 return NULL; 3788 #endif 3789 /* check input parameters */ 3790 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 3791 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 3792 rte_errno = EINVAL; 3793 return NULL; 3794 } 3795 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 3796 3797 if (cb == NULL) { 3798 rte_errno = ENOMEM; 3799 return NULL; 3800 } 3801 3802 cb->fn.rx = fn; 3803 cb->param = user_param; 3804 3805 rte_spinlock_lock(&rte_eth_rx_cb_lock); 3806 /* Add the callbacks in fifo order. */ 3807 struct rte_eth_rxtx_callback *tail = 3808 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 3809 3810 if (!tail) { 3811 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb; 3812 3813 } else { 3814 while (tail->next) 3815 tail = tail->next; 3816 tail->next = cb; 3817 } 3818 rte_spinlock_unlock(&rte_eth_rx_cb_lock); 3819 3820 return cb; 3821 } 3822 3823 const struct rte_eth_rxtx_callback * 3824 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 3825 rte_rx_callback_fn fn, void *user_param) 3826 { 3827 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 3828 rte_errno = ENOTSUP; 3829 return NULL; 3830 #endif 3831 /* check input parameters */ 3832 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 3833 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 3834 rte_errno = EINVAL; 3835 return NULL; 3836 } 3837 3838 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 3839 3840 if (cb == NULL) { 3841 rte_errno = ENOMEM; 3842 return NULL; 3843 } 3844 3845 cb->fn.rx = fn; 3846 cb->param = user_param; 3847 3848 rte_spinlock_lock(&rte_eth_rx_cb_lock); 3849 /* Add the callbacks at fisrt position*/ 3850 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 3851 rte_smp_wmb(); 3852 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb; 3853 rte_spinlock_unlock(&rte_eth_rx_cb_lock); 3854 3855 return cb; 3856 } 3857 3858 const struct rte_eth_rxtx_callback * 3859 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 3860 rte_tx_callback_fn fn, void *user_param) 3861 { 3862 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 3863 rte_errno = ENOTSUP; 3864 return NULL; 3865 #endif 3866 /* check input parameters */ 3867 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 3868 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 3869 rte_errno = EINVAL; 3870 return NULL; 3871 } 3872 3873 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 3874 3875 if (cb == NULL) { 3876 rte_errno = ENOMEM; 3877 return NULL; 3878 } 3879 3880 cb->fn.tx = fn; 3881 cb->param = user_param; 3882 3883 rte_spinlock_lock(&rte_eth_tx_cb_lock); 3884 /* Add the callbacks in fifo order. */ 3885 struct rte_eth_rxtx_callback *tail = 3886 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 3887 3888 if (!tail) { 3889 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb; 3890 3891 } else { 3892 while (tail->next) 3893 tail = tail->next; 3894 tail->next = cb; 3895 } 3896 rte_spinlock_unlock(&rte_eth_tx_cb_lock); 3897 3898 return cb; 3899 } 3900 3901 int 3902 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 3903 const struct rte_eth_rxtx_callback *user_cb) 3904 { 3905 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 3906 return -ENOTSUP; 3907 #endif 3908 /* Check input parameters. */ 3909 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 3910 if (user_cb == NULL || 3911 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 3912 return -EINVAL; 3913 3914 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 3915 struct rte_eth_rxtx_callback *cb; 3916 struct rte_eth_rxtx_callback **prev_cb; 3917 int ret = -EINVAL; 3918 3919 rte_spinlock_lock(&rte_eth_rx_cb_lock); 3920 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 3921 for (; *prev_cb != NULL; prev_cb = &cb->next) { 3922 cb = *prev_cb; 3923 if (cb == user_cb) { 3924 /* Remove the user cb from the callback list. */ 3925 *prev_cb = cb->next; 3926 ret = 0; 3927 break; 3928 } 3929 } 3930 rte_spinlock_unlock(&rte_eth_rx_cb_lock); 3931 3932 return ret; 3933 } 3934 3935 int 3936 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 3937 const struct rte_eth_rxtx_callback *user_cb) 3938 { 3939 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 3940 return -ENOTSUP; 3941 #endif 3942 /* Check input parameters. */ 3943 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 3944 if (user_cb == NULL || 3945 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 3946 return -EINVAL; 3947 3948 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 3949 int ret = -EINVAL; 3950 struct rte_eth_rxtx_callback *cb; 3951 struct rte_eth_rxtx_callback **prev_cb; 3952 3953 rte_spinlock_lock(&rte_eth_tx_cb_lock); 3954 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 3955 for (; *prev_cb != NULL; prev_cb = &cb->next) { 3956 cb = *prev_cb; 3957 if (cb == user_cb) { 3958 /* Remove the user cb from the callback list. */ 3959 *prev_cb = cb->next; 3960 ret = 0; 3961 break; 3962 } 3963 } 3964 rte_spinlock_unlock(&rte_eth_tx_cb_lock); 3965 3966 return ret; 3967 } 3968 3969 int 3970 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 3971 struct rte_eth_rxq_info *qinfo) 3972 { 3973 struct rte_eth_dev *dev; 3974 3975 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3976 3977 if (qinfo == NULL) 3978 return -EINVAL; 3979 3980 dev = &rte_eth_devices[port_id]; 3981 if (queue_id >= dev->data->nb_rx_queues) { 3982 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 3983 return -EINVAL; 3984 } 3985 3986 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 3987 3988 memset(qinfo, 0, sizeof(*qinfo)); 3989 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 3990 return 0; 3991 } 3992 3993 int 3994 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 3995 struct rte_eth_txq_info *qinfo) 3996 { 3997 struct rte_eth_dev *dev; 3998 3999 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4000 4001 if (qinfo == NULL) 4002 return -EINVAL; 4003 4004 dev = &rte_eth_devices[port_id]; 4005 if (queue_id >= dev->data->nb_tx_queues) { 4006 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); 4007 return -EINVAL; 4008 } 4009 4010 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 4011 4012 memset(qinfo, 0, sizeof(*qinfo)); 4013 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 4014 4015 return 0; 4016 } 4017 4018 int 4019 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 4020 struct ether_addr *mc_addr_set, 4021 uint32_t nb_mc_addr) 4022 { 4023 struct rte_eth_dev *dev; 4024 4025 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4026 4027 dev = &rte_eth_devices[port_id]; 4028 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 4029 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 4030 mc_addr_set, nb_mc_addr)); 4031 } 4032 4033 int 4034 rte_eth_timesync_enable(uint16_t port_id) 4035 { 4036 struct rte_eth_dev *dev; 4037 4038 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4039 dev = &rte_eth_devices[port_id]; 4040 4041 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 4042 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 4043 } 4044 4045 int 4046 rte_eth_timesync_disable(uint16_t port_id) 4047 { 4048 struct rte_eth_dev *dev; 4049 4050 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4051 dev = &rte_eth_devices[port_id]; 4052 4053 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 4054 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 4055 } 4056 4057 int 4058 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 4059 uint32_t flags) 4060 { 4061 struct rte_eth_dev *dev; 4062 4063 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4064 dev = &rte_eth_devices[port_id]; 4065 4066 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 4067 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 4068 (dev, timestamp, flags)); 4069 } 4070 4071 int 4072 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 4073 struct timespec *timestamp) 4074 { 4075 struct rte_eth_dev *dev; 4076 4077 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4078 dev = &rte_eth_devices[port_id]; 4079 4080 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 4081 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 4082 (dev, timestamp)); 4083 } 4084 4085 int 4086 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 4087 { 4088 struct rte_eth_dev *dev; 4089 4090 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4091 dev = &rte_eth_devices[port_id]; 4092 4093 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 4094 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, 4095 delta)); 4096 } 4097 4098 int 4099 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 4100 { 4101 struct rte_eth_dev *dev; 4102 4103 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4104 dev = &rte_eth_devices[port_id]; 4105 4106 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 4107 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 4108 timestamp)); 4109 } 4110 4111 int 4112 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 4113 { 4114 struct rte_eth_dev *dev; 4115 4116 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4117 dev = &rte_eth_devices[port_id]; 4118 4119 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 4120 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 4121 timestamp)); 4122 } 4123 4124 int 4125 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 4126 { 4127 struct rte_eth_dev *dev; 4128 4129 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4130 4131 dev = &rte_eth_devices[port_id]; 4132 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 4133 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 4134 } 4135 4136 int 4137 rte_eth_dev_get_eeprom_length(uint16_t port_id) 4138 { 4139 struct rte_eth_dev *dev; 4140 4141 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4142 4143 dev = &rte_eth_devices[port_id]; 4144 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 4145 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 4146 } 4147 4148 int 4149 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 4150 { 4151 struct rte_eth_dev *dev; 4152 4153 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4154 4155 dev = &rte_eth_devices[port_id]; 4156 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 4157 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 4158 } 4159 4160 int 4161 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 4162 { 4163 struct rte_eth_dev *dev; 4164 4165 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4166 4167 dev = &rte_eth_devices[port_id]; 4168 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 4169 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 4170 } 4171 4172 int __rte_experimental 4173 rte_eth_dev_get_module_info(uint16_t port_id, 4174 struct rte_eth_dev_module_info *modinfo) 4175 { 4176 struct rte_eth_dev *dev; 4177 4178 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4179 4180 dev = &rte_eth_devices[port_id]; 4181 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 4182 return (*dev->dev_ops->get_module_info)(dev, modinfo); 4183 } 4184 4185 int __rte_experimental 4186 rte_eth_dev_get_module_eeprom(uint16_t port_id, 4187 struct rte_dev_eeprom_info *info) 4188 { 4189 struct rte_eth_dev *dev; 4190 4191 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4192 4193 dev = &rte_eth_devices[port_id]; 4194 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 4195 return (*dev->dev_ops->get_module_eeprom)(dev, info); 4196 } 4197 4198 int 4199 rte_eth_dev_get_dcb_info(uint16_t port_id, 4200 struct rte_eth_dcb_info *dcb_info) 4201 { 4202 struct rte_eth_dev *dev; 4203 4204 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4205 4206 dev = &rte_eth_devices[port_id]; 4207 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 4208 4209 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 4210 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 4211 } 4212 4213 int 4214 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id, 4215 struct rte_eth_l2_tunnel_conf *l2_tunnel) 4216 { 4217 struct rte_eth_dev *dev; 4218 4219 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4220 if (l2_tunnel == NULL) { 4221 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n"); 4222 return -EINVAL; 4223 } 4224 4225 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) { 4226 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4227 return -EINVAL; 4228 } 4229 4230 dev = &rte_eth_devices[port_id]; 4231 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf, 4232 -ENOTSUP); 4233 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, 4234 l2_tunnel)); 4235 } 4236 4237 int 4238 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id, 4239 struct rte_eth_l2_tunnel_conf *l2_tunnel, 4240 uint32_t mask, 4241 uint8_t en) 4242 { 4243 struct rte_eth_dev *dev; 4244 4245 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4246 4247 if (l2_tunnel == NULL) { 4248 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n"); 4249 return -EINVAL; 4250 } 4251 4252 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) { 4253 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4254 return -EINVAL; 4255 } 4256 4257 if (mask == 0) { 4258 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n"); 4259 return -EINVAL; 4260 } 4261 4262 dev = &rte_eth_devices[port_id]; 4263 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set, 4264 -ENOTSUP); 4265 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev, 4266 l2_tunnel, mask, en)); 4267 } 4268 4269 static void 4270 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc, 4271 const struct rte_eth_desc_lim *desc_lim) 4272 { 4273 if (desc_lim->nb_align != 0) 4274 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 4275 4276 if (desc_lim->nb_max != 0) 4277 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 4278 4279 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 4280 } 4281 4282 int 4283 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 4284 uint16_t *nb_rx_desc, 4285 uint16_t *nb_tx_desc) 4286 { 4287 struct rte_eth_dev *dev; 4288 struct rte_eth_dev_info dev_info; 4289 4290 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4291 4292 dev = &rte_eth_devices[port_id]; 4293 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 4294 4295 rte_eth_dev_info_get(port_id, &dev_info); 4296 4297 if (nb_rx_desc != NULL) 4298 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 4299 4300 if (nb_tx_desc != NULL) 4301 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 4302 4303 return 0; 4304 } 4305 4306 int 4307 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 4308 { 4309 struct rte_eth_dev *dev; 4310 4311 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4312 4313 if (pool == NULL) 4314 return -EINVAL; 4315 4316 dev = &rte_eth_devices[port_id]; 4317 4318 if (*dev->dev_ops->pool_ops_supported == NULL) 4319 return 1; /* all pools are supported */ 4320 4321 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 4322 } 4323 4324 /** 4325 * A set of values to describe the possible states of a switch domain. 4326 */ 4327 enum rte_eth_switch_domain_state { 4328 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0, 4329 RTE_ETH_SWITCH_DOMAIN_ALLOCATED 4330 }; 4331 4332 /** 4333 * Array of switch domains available for allocation. Array is sized to 4334 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than 4335 * ethdev ports in a single process. 4336 */ 4337 static struct rte_eth_dev_switch { 4338 enum rte_eth_switch_domain_state state; 4339 } rte_eth_switch_domains[RTE_MAX_ETHPORTS]; 4340 4341 int __rte_experimental 4342 rte_eth_switch_domain_alloc(uint16_t *domain_id) 4343 { 4344 unsigned int i; 4345 4346 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 4347 4348 for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1; 4349 i < RTE_MAX_ETHPORTS; i++) { 4350 if (rte_eth_switch_domains[i].state == 4351 RTE_ETH_SWITCH_DOMAIN_UNUSED) { 4352 rte_eth_switch_domains[i].state = 4353 RTE_ETH_SWITCH_DOMAIN_ALLOCATED; 4354 *domain_id = i; 4355 return 0; 4356 } 4357 } 4358 4359 return -ENOSPC; 4360 } 4361 4362 int __rte_experimental 4363 rte_eth_switch_domain_free(uint16_t domain_id) 4364 { 4365 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || 4366 domain_id >= RTE_MAX_ETHPORTS) 4367 return -EINVAL; 4368 4369 if (rte_eth_switch_domains[domain_id].state != 4370 RTE_ETH_SWITCH_DOMAIN_ALLOCATED) 4371 return -EINVAL; 4372 4373 rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED; 4374 4375 return 0; 4376 } 4377 4378 static int 4379 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) 4380 { 4381 int state; 4382 struct rte_kvargs_pair *pair; 4383 char *letter; 4384 4385 arglist->str = strdup(str_in); 4386 if (arglist->str == NULL) 4387 return -ENOMEM; 4388 4389 letter = arglist->str; 4390 state = 0; 4391 arglist->count = 0; 4392 pair = &arglist->pairs[0]; 4393 while (1) { 4394 switch (state) { 4395 case 0: /* Initial */ 4396 if (*letter == '=') 4397 return -EINVAL; 4398 else if (*letter == '\0') 4399 return 0; 4400 4401 state = 1; 4402 pair->key = letter; 4403 /* fall-thru */ 4404 4405 case 1: /* Parsing key */ 4406 if (*letter == '=') { 4407 *letter = '\0'; 4408 pair->value = letter + 1; 4409 state = 2; 4410 } else if (*letter == ',' || *letter == '\0') 4411 return -EINVAL; 4412 break; 4413 4414 4415 case 2: /* Parsing value */ 4416 if (*letter == '[') 4417 state = 3; 4418 else if (*letter == ',') { 4419 *letter = '\0'; 4420 arglist->count++; 4421 pair = &arglist->pairs[arglist->count]; 4422 state = 0; 4423 } else if (*letter == '\0') { 4424 letter--; 4425 arglist->count++; 4426 pair = &arglist->pairs[arglist->count]; 4427 state = 0; 4428 } 4429 break; 4430 4431 case 3: /* Parsing list */ 4432 if (*letter == ']') 4433 state = 2; 4434 else if (*letter == '\0') 4435 return -EINVAL; 4436 break; 4437 } 4438 letter++; 4439 } 4440 } 4441 4442 int __rte_experimental 4443 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) 4444 { 4445 struct rte_kvargs args; 4446 struct rte_kvargs_pair *pair; 4447 unsigned int i; 4448 int result = 0; 4449 4450 memset(eth_da, 0, sizeof(*eth_da)); 4451 4452 result = rte_eth_devargs_tokenise(&args, dargs); 4453 if (result < 0) 4454 goto parse_cleanup; 4455 4456 for (i = 0; i < args.count; i++) { 4457 pair = &args.pairs[i]; 4458 if (strcmp("representor", pair->key) == 0) { 4459 result = rte_eth_devargs_parse_list(pair->value, 4460 rte_eth_devargs_parse_representor_ports, 4461 eth_da); 4462 if (result < 0) 4463 goto parse_cleanup; 4464 } 4465 } 4466 4467 parse_cleanup: 4468 if (args.str) 4469 free(args.str); 4470 4471 return result; 4472 } 4473 4474 RTE_INIT(ethdev_init_log) 4475 { 4476 rte_eth_dev_logtype = rte_log_register("lib.ethdev"); 4477 if (rte_eth_dev_logtype >= 0) 4478 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO); 4479 } 4480