1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <rte_byteorder.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_interrupts.h> 18 #include <rte_memory.h> 19 #include <rte_memcpy.h> 20 #include <rte_memzone.h> 21 #include <rte_launch.h> 22 #include <rte_eal.h> 23 #include <rte_per_lcore.h> 24 #include <rte_lcore.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_common.h> 27 #include <rte_mempool.h> 28 #include <rte_malloc.h> 29 #include <rte_mbuf.h> 30 #include <rte_errno.h> 31 #include <rte_spinlock.h> 32 #include <rte_string_fns.h> 33 #include <rte_kvargs.h> 34 #include <rte_class.h> 35 #include <rte_ether.h> 36 #include <rte_telemetry.h> 37 38 #include "rte_ethdev_trace.h" 39 #include "rte_ethdev.h" 40 #include "ethdev_driver.h" 41 #include "ethdev_profile.h" 42 #include "ethdev_private.h" 43 44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; 45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 46 47 /* spinlock for eth device callbacks */ 48 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; 49 50 /* spinlock for add/remove rx callbacks */ 51 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 52 53 /* spinlock for add/remove tx callbacks */ 54 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 55 56 /* spinlock for shared data allocation */ 57 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 58 59 /* store statistics names and its offset in stats structure */ 60 struct rte_eth_xstats_name_off { 61 char name[RTE_ETH_XSTATS_NAME_SIZE]; 62 unsigned offset; 63 }; 64 65 /* Shared memory between primary and secondary processes. */ 66 static struct { 67 uint64_t next_owner_id; 68 rte_spinlock_t ownership_lock; 69 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; 70 } *eth_dev_shared_data; 71 72 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 73 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 74 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 75 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 76 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 77 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 78 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 79 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 80 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 81 rx_nombuf)}, 82 }; 83 84 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 85 86 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 87 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 88 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 89 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 90 }; 91 92 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 93 94 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 95 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 96 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 97 }; 98 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 99 100 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 101 { DEV_RX_OFFLOAD_##_name, #_name } 102 103 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name) \ 104 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 105 106 static const struct { 107 uint64_t offload; 108 const char *name; 109 } eth_dev_rx_offload_names[] = { 110 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 111 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 112 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 113 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 114 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 115 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 116 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 117 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 118 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 119 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 120 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 121 RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME), 122 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 123 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 124 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 125 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 126 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 127 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 128 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 129 RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 130 }; 131 132 #undef RTE_RX_OFFLOAD_BIT2STR 133 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 134 135 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 136 { DEV_TX_OFFLOAD_##_name, #_name } 137 138 static const struct { 139 uint64_t offload; 140 const char *name; 141 } eth_dev_tx_offload_names[] = { 142 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 143 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 144 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 145 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 146 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 147 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 148 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 149 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 150 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 151 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 152 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 153 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 154 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 155 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 156 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 157 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 158 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 159 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 160 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 161 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 162 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 163 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 164 }; 165 166 #undef RTE_TX_OFFLOAD_BIT2STR 167 168 /** 169 * The user application callback description. 170 * 171 * It contains callback address to be registered by user application, 172 * the pointer to the parameters for callback, and the event type. 173 */ 174 struct rte_eth_dev_callback { 175 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ 176 rte_eth_dev_cb_fn cb_fn; /**< Callback address */ 177 void *cb_arg; /**< Parameter for callback */ 178 void *ret_param; /**< Return parameter */ 179 enum rte_eth_event_type event; /**< Interrupt event type */ 180 uint32_t active; /**< Callback is executing */ 181 }; 182 183 enum { 184 STAT_QMAP_TX = 0, 185 STAT_QMAP_RX 186 }; 187 188 int 189 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 190 { 191 int ret; 192 struct rte_devargs devargs; 193 const char *bus_param_key; 194 char *bus_str = NULL; 195 char *cls_str = NULL; 196 int str_size; 197 198 if (iter == NULL) { 199 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 200 return -EINVAL; 201 } 202 203 if (devargs_str == NULL) { 204 RTE_ETHDEV_LOG(ERR, 205 "Cannot initialize iterator from NULL device description string\n"); 206 return -EINVAL; 207 } 208 209 memset(iter, 0, sizeof(*iter)); 210 memset(&devargs, 0, sizeof(devargs)); 211 212 /* 213 * The devargs string may use various syntaxes: 214 * - 0000:08:00.0,representor=[1-3] 215 * - pci:0000:06:00.0,representor=[0,5] 216 * - class=eth,mac=00:11:22:33:44:55 217 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 218 */ 219 220 /* 221 * Handle pure class filter (i.e. without any bus-level argument), 222 * from future new syntax. 223 * rte_devargs_parse() is not yet supporting the new syntax, 224 * that's why this simple case is temporarily parsed here. 225 */ 226 #define iter_anybus_str "class=eth," 227 if (strncmp(devargs_str, iter_anybus_str, 228 strlen(iter_anybus_str)) == 0) { 229 iter->cls_str = devargs_str + strlen(iter_anybus_str); 230 goto end; 231 } 232 233 /* Split bus, device and parameters. */ 234 ret = rte_devargs_parse(&devargs, devargs_str); 235 if (ret != 0) 236 goto error; 237 238 /* 239 * Assume parameters of old syntax can match only at ethdev level. 240 * Extra parameters will be ignored, thanks to "+" prefix. 241 */ 242 str_size = strlen(devargs.args) + 2; 243 cls_str = malloc(str_size); 244 if (cls_str == NULL) { 245 ret = -ENOMEM; 246 goto error; 247 } 248 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 249 if (ret != str_size - 1) { 250 ret = -EINVAL; 251 goto error; 252 } 253 iter->cls_str = cls_str; 254 255 iter->bus = devargs.bus; 256 if (iter->bus->dev_iterate == NULL) { 257 ret = -ENOTSUP; 258 goto error; 259 } 260 261 /* Convert bus args to new syntax for use with new API dev_iterate. */ 262 if ((strcmp(iter->bus->name, "vdev") == 0) || 263 (strcmp(iter->bus->name, "fslmc") == 0) || 264 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 265 bus_param_key = "name"; 266 } else if (strcmp(iter->bus->name, "pci") == 0) { 267 bus_param_key = "addr"; 268 } else { 269 ret = -ENOTSUP; 270 goto error; 271 } 272 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 273 bus_str = malloc(str_size); 274 if (bus_str == NULL) { 275 ret = -ENOMEM; 276 goto error; 277 } 278 ret = snprintf(bus_str, str_size, "%s=%s", 279 bus_param_key, devargs.name); 280 if (ret != str_size - 1) { 281 ret = -EINVAL; 282 goto error; 283 } 284 iter->bus_str = bus_str; 285 286 end: 287 iter->cls = rte_class_find_by_name("eth"); 288 rte_devargs_reset(&devargs); 289 return 0; 290 291 error: 292 if (ret == -ENOTSUP) 293 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 294 iter->bus->name); 295 rte_devargs_reset(&devargs); 296 free(bus_str); 297 free(cls_str); 298 return ret; 299 } 300 301 uint16_t 302 rte_eth_iterator_next(struct rte_dev_iterator *iter) 303 { 304 if (iter == NULL) { 305 RTE_ETHDEV_LOG(ERR, 306 "Cannot get next device from NULL iterator\n"); 307 return RTE_MAX_ETHPORTS; 308 } 309 310 if (iter->cls == NULL) /* invalid ethdev iterator */ 311 return RTE_MAX_ETHPORTS; 312 313 do { /* loop to try all matching rte_device */ 314 /* If not pure ethdev filter and */ 315 if (iter->bus != NULL && 316 /* not in middle of rte_eth_dev iteration, */ 317 iter->class_device == NULL) { 318 /* get next rte_device to try. */ 319 iter->device = iter->bus->dev_iterate( 320 iter->device, iter->bus_str, iter); 321 if (iter->device == NULL) 322 break; /* no more rte_device candidate */ 323 } 324 /* A device is matching bus part, need to check ethdev part. */ 325 iter->class_device = iter->cls->dev_iterate( 326 iter->class_device, iter->cls_str, iter); 327 if (iter->class_device != NULL) 328 return eth_dev_to_id(iter->class_device); /* match */ 329 } while (iter->bus != NULL); /* need to try next rte_device */ 330 331 /* No more ethdev port to iterate. */ 332 rte_eth_iterator_cleanup(iter); 333 return RTE_MAX_ETHPORTS; 334 } 335 336 void 337 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 338 { 339 if (iter == NULL) { 340 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 341 return; 342 } 343 344 if (iter->bus_str == NULL) 345 return; /* nothing to free in pure class filter */ 346 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 347 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 348 memset(iter, 0, sizeof(*iter)); 349 } 350 351 uint16_t 352 rte_eth_find_next(uint16_t port_id) 353 { 354 while (port_id < RTE_MAX_ETHPORTS && 355 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 356 port_id++; 357 358 if (port_id >= RTE_MAX_ETHPORTS) 359 return RTE_MAX_ETHPORTS; 360 361 return port_id; 362 } 363 364 /* 365 * Macro to iterate over all valid ports for internal usage. 366 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 367 */ 368 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 369 for (port_id = rte_eth_find_next(0); \ 370 port_id < RTE_MAX_ETHPORTS; \ 371 port_id = rte_eth_find_next(port_id + 1)) 372 373 uint16_t 374 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 375 { 376 port_id = rte_eth_find_next(port_id); 377 while (port_id < RTE_MAX_ETHPORTS && 378 rte_eth_devices[port_id].device != parent) 379 port_id = rte_eth_find_next(port_id + 1); 380 381 return port_id; 382 } 383 384 uint16_t 385 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 386 { 387 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 388 return rte_eth_find_next_of(port_id, 389 rte_eth_devices[ref_port_id].device); 390 } 391 392 static void 393 eth_dev_shared_data_prepare(void) 394 { 395 const unsigned flags = 0; 396 const struct rte_memzone *mz; 397 398 rte_spinlock_lock(ð_dev_shared_data_lock); 399 400 if (eth_dev_shared_data == NULL) { 401 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 402 /* Allocate port data and ownership shared memory. */ 403 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, 404 sizeof(*eth_dev_shared_data), 405 rte_socket_id(), flags); 406 } else 407 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); 408 if (mz == NULL) 409 rte_panic("Cannot allocate ethdev shared data\n"); 410 411 eth_dev_shared_data = mz->addr; 412 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 413 eth_dev_shared_data->next_owner_id = 414 RTE_ETH_DEV_NO_OWNER + 1; 415 rte_spinlock_init(ð_dev_shared_data->ownership_lock); 416 memset(eth_dev_shared_data->data, 0, 417 sizeof(eth_dev_shared_data->data)); 418 } 419 } 420 421 rte_spinlock_unlock(ð_dev_shared_data_lock); 422 } 423 424 static bool 425 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 426 { 427 return ethdev->data->name[0] != '\0'; 428 } 429 430 static struct rte_eth_dev * 431 eth_dev_allocated(const char *name) 432 { 433 uint16_t i; 434 435 RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX); 436 437 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 438 if (rte_eth_devices[i].data != NULL && 439 strcmp(rte_eth_devices[i].data->name, name) == 0) 440 return &rte_eth_devices[i]; 441 } 442 return NULL; 443 } 444 445 struct rte_eth_dev * 446 rte_eth_dev_allocated(const char *name) 447 { 448 struct rte_eth_dev *ethdev; 449 450 eth_dev_shared_data_prepare(); 451 452 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 453 454 ethdev = eth_dev_allocated(name); 455 456 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 457 458 return ethdev; 459 } 460 461 static uint16_t 462 eth_dev_find_free_port(void) 463 { 464 uint16_t i; 465 466 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 467 /* Using shared name field to find a free port. */ 468 if (eth_dev_shared_data->data[i].name[0] == '\0') { 469 RTE_ASSERT(rte_eth_devices[i].state == 470 RTE_ETH_DEV_UNUSED); 471 return i; 472 } 473 } 474 return RTE_MAX_ETHPORTS; 475 } 476 477 static struct rte_eth_dev * 478 eth_dev_get(uint16_t port_id) 479 { 480 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; 481 482 eth_dev->data = ð_dev_shared_data->data[port_id]; 483 484 return eth_dev; 485 } 486 487 struct rte_eth_dev * 488 rte_eth_dev_allocate(const char *name) 489 { 490 uint16_t port_id; 491 struct rte_eth_dev *eth_dev = NULL; 492 size_t name_len; 493 494 name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN); 495 if (name_len == 0) { 496 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n"); 497 return NULL; 498 } 499 500 if (name_len >= RTE_ETH_NAME_MAX_LEN) { 501 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n"); 502 return NULL; 503 } 504 505 eth_dev_shared_data_prepare(); 506 507 /* Synchronize port creation between primary and secondary threads. */ 508 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 509 510 if (eth_dev_allocated(name) != NULL) { 511 RTE_ETHDEV_LOG(ERR, 512 "Ethernet device with name %s already allocated\n", 513 name); 514 goto unlock; 515 } 516 517 port_id = eth_dev_find_free_port(); 518 if (port_id == RTE_MAX_ETHPORTS) { 519 RTE_ETHDEV_LOG(ERR, 520 "Reached maximum number of Ethernet ports\n"); 521 goto unlock; 522 } 523 524 eth_dev = eth_dev_get(port_id); 525 strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name)); 526 eth_dev->data->port_id = port_id; 527 eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS; 528 eth_dev->data->mtu = RTE_ETHER_MTU; 529 pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL); 530 531 unlock: 532 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 533 534 return eth_dev; 535 } 536 537 /* 538 * Attach to a port already registered by the primary process, which 539 * makes sure that the same device would have the same port id both 540 * in the primary and secondary process. 541 */ 542 struct rte_eth_dev * 543 rte_eth_dev_attach_secondary(const char *name) 544 { 545 uint16_t i; 546 struct rte_eth_dev *eth_dev = NULL; 547 548 eth_dev_shared_data_prepare(); 549 550 /* Synchronize port attachment to primary port creation and release. */ 551 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 552 553 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 554 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0) 555 break; 556 } 557 if (i == RTE_MAX_ETHPORTS) { 558 RTE_ETHDEV_LOG(ERR, 559 "Device %s is not driven by the primary process\n", 560 name); 561 } else { 562 eth_dev = eth_dev_get(i); 563 RTE_ASSERT(eth_dev->data->port_id == i); 564 } 565 566 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 567 return eth_dev; 568 } 569 570 int 571 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) 572 { 573 if (eth_dev == NULL) 574 return -EINVAL; 575 576 eth_dev_shared_data_prepare(); 577 578 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 579 rte_eth_dev_callback_process(eth_dev, 580 RTE_ETH_EVENT_DESTROY, NULL); 581 582 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 583 584 eth_dev->state = RTE_ETH_DEV_UNUSED; 585 eth_dev->device = NULL; 586 eth_dev->process_private = NULL; 587 eth_dev->intr_handle = NULL; 588 eth_dev->rx_pkt_burst = NULL; 589 eth_dev->tx_pkt_burst = NULL; 590 eth_dev->tx_pkt_prepare = NULL; 591 eth_dev->rx_queue_count = NULL; 592 eth_dev->rx_descriptor_status = NULL; 593 eth_dev->tx_descriptor_status = NULL; 594 eth_dev->dev_ops = NULL; 595 596 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 597 rte_free(eth_dev->data->rx_queues); 598 rte_free(eth_dev->data->tx_queues); 599 rte_free(eth_dev->data->mac_addrs); 600 rte_free(eth_dev->data->hash_mac_addrs); 601 rte_free(eth_dev->data->dev_private); 602 pthread_mutex_destroy(ð_dev->data->flow_ops_mutex); 603 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); 604 } 605 606 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 607 608 return 0; 609 } 610 611 int 612 rte_eth_dev_is_valid_port(uint16_t port_id) 613 { 614 if (port_id >= RTE_MAX_ETHPORTS || 615 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 616 return 0; 617 else 618 return 1; 619 } 620 621 static int 622 eth_is_valid_owner_id(uint64_t owner_id) 623 { 624 if (owner_id == RTE_ETH_DEV_NO_OWNER || 625 eth_dev_shared_data->next_owner_id <= owner_id) 626 return 0; 627 return 1; 628 } 629 630 uint64_t 631 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 632 { 633 port_id = rte_eth_find_next(port_id); 634 while (port_id < RTE_MAX_ETHPORTS && 635 rte_eth_devices[port_id].data->owner.id != owner_id) 636 port_id = rte_eth_find_next(port_id + 1); 637 638 return port_id; 639 } 640 641 int 642 rte_eth_dev_owner_new(uint64_t *owner_id) 643 { 644 if (owner_id == NULL) { 645 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 646 return -EINVAL; 647 } 648 649 eth_dev_shared_data_prepare(); 650 651 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 652 653 *owner_id = eth_dev_shared_data->next_owner_id++; 654 655 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 656 return 0; 657 } 658 659 static int 660 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 661 const struct rte_eth_dev_owner *new_owner) 662 { 663 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 664 struct rte_eth_dev_owner *port_owner; 665 666 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 667 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 668 port_id); 669 return -ENODEV; 670 } 671 672 if (new_owner == NULL) { 673 RTE_ETHDEV_LOG(ERR, 674 "Cannot set ethdev port %u owner from NULL owner\n", 675 port_id); 676 return -EINVAL; 677 } 678 679 if (!eth_is_valid_owner_id(new_owner->id) && 680 !eth_is_valid_owner_id(old_owner_id)) { 681 RTE_ETHDEV_LOG(ERR, 682 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 683 old_owner_id, new_owner->id); 684 return -EINVAL; 685 } 686 687 port_owner = &rte_eth_devices[port_id].data->owner; 688 if (port_owner->id != old_owner_id) { 689 RTE_ETHDEV_LOG(ERR, 690 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 691 port_id, port_owner->name, port_owner->id); 692 return -EPERM; 693 } 694 695 /* can not truncate (same structure) */ 696 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 697 698 port_owner->id = new_owner->id; 699 700 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 701 port_id, new_owner->name, new_owner->id); 702 703 return 0; 704 } 705 706 int 707 rte_eth_dev_owner_set(const uint16_t port_id, 708 const struct rte_eth_dev_owner *owner) 709 { 710 int ret; 711 712 eth_dev_shared_data_prepare(); 713 714 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 715 716 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 717 718 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 719 return ret; 720 } 721 722 int 723 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 724 { 725 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 726 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 727 int ret; 728 729 eth_dev_shared_data_prepare(); 730 731 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 732 733 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 734 735 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 736 return ret; 737 } 738 739 int 740 rte_eth_dev_owner_delete(const uint64_t owner_id) 741 { 742 uint16_t port_id; 743 int ret = 0; 744 745 eth_dev_shared_data_prepare(); 746 747 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 748 749 if (eth_is_valid_owner_id(owner_id)) { 750 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) 751 if (rte_eth_devices[port_id].data->owner.id == owner_id) 752 memset(&rte_eth_devices[port_id].data->owner, 0, 753 sizeof(struct rte_eth_dev_owner)); 754 RTE_ETHDEV_LOG(NOTICE, 755 "All port owners owned by %016"PRIx64" identifier have removed\n", 756 owner_id); 757 } else { 758 RTE_ETHDEV_LOG(ERR, 759 "Invalid owner id=%016"PRIx64"\n", 760 owner_id); 761 ret = -EINVAL; 762 } 763 764 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 765 766 return ret; 767 } 768 769 int 770 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 771 { 772 struct rte_eth_dev *ethdev; 773 774 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 775 ethdev = &rte_eth_devices[port_id]; 776 777 if (!eth_dev_is_allocated(ethdev)) { 778 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 779 port_id); 780 return -ENODEV; 781 } 782 783 if (owner == NULL) { 784 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 785 port_id); 786 return -EINVAL; 787 } 788 789 eth_dev_shared_data_prepare(); 790 791 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 792 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 793 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 794 795 return 0; 796 } 797 798 int 799 rte_eth_dev_socket_id(uint16_t port_id) 800 { 801 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 802 return rte_eth_devices[port_id].data->numa_node; 803 } 804 805 void * 806 rte_eth_dev_get_sec_ctx(uint16_t port_id) 807 { 808 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 809 return rte_eth_devices[port_id].security_ctx; 810 } 811 812 uint16_t 813 rte_eth_dev_count_avail(void) 814 { 815 uint16_t p; 816 uint16_t count; 817 818 count = 0; 819 820 RTE_ETH_FOREACH_DEV(p) 821 count++; 822 823 return count; 824 } 825 826 uint16_t 827 rte_eth_dev_count_total(void) 828 { 829 uint16_t port, count = 0; 830 831 RTE_ETH_FOREACH_VALID_DEV(port) 832 count++; 833 834 return count; 835 } 836 837 int 838 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 839 { 840 char *tmp; 841 842 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 843 844 if (name == NULL) { 845 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 846 port_id); 847 return -EINVAL; 848 } 849 850 /* shouldn't check 'rte_eth_devices[i].data', 851 * because it might be overwritten by VDEV PMD */ 852 tmp = eth_dev_shared_data->data[port_id].name; 853 strcpy(name, tmp); 854 return 0; 855 } 856 857 int 858 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 859 { 860 uint16_t pid; 861 862 if (name == NULL) { 863 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 864 return -EINVAL; 865 } 866 867 if (port_id == NULL) { 868 RTE_ETHDEV_LOG(ERR, 869 "Cannot get port ID to NULL for %s\n", name); 870 return -EINVAL; 871 } 872 873 RTE_ETH_FOREACH_VALID_DEV(pid) 874 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 875 *port_id = pid; 876 return 0; 877 } 878 879 return -ENODEV; 880 } 881 882 static int 883 eth_err(uint16_t port_id, int ret) 884 { 885 if (ret == 0) 886 return 0; 887 if (rte_eth_dev_is_removed(port_id)) 888 return -EIO; 889 return ret; 890 } 891 892 static void 893 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid) 894 { 895 void **rxq = dev->data->rx_queues; 896 897 if (rxq[qid] == NULL) 898 return; 899 900 if (dev->dev_ops->rx_queue_release != NULL) 901 (*dev->dev_ops->rx_queue_release)(dev, qid); 902 rxq[qid] = NULL; 903 } 904 905 static void 906 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid) 907 { 908 void **txq = dev->data->tx_queues; 909 910 if (txq[qid] == NULL) 911 return; 912 913 if (dev->dev_ops->tx_queue_release != NULL) 914 (*dev->dev_ops->tx_queue_release)(dev, qid); 915 txq[qid] = NULL; 916 } 917 918 static int 919 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 920 { 921 uint16_t old_nb_queues = dev->data->nb_rx_queues; 922 unsigned i; 923 924 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ 925 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", 926 sizeof(dev->data->rx_queues[0]) * 927 RTE_MAX_QUEUES_PER_PORT, 928 RTE_CACHE_LINE_SIZE); 929 if (dev->data->rx_queues == NULL) { 930 dev->data->nb_rx_queues = 0; 931 return -(ENOMEM); 932 } 933 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ 934 for (i = nb_queues; i < old_nb_queues; i++) 935 eth_dev_rxq_release(dev, i); 936 937 } else if (dev->data->rx_queues != NULL && nb_queues == 0) { 938 for (i = nb_queues; i < old_nb_queues; i++) 939 eth_dev_rxq_release(dev, i); 940 941 rte_free(dev->data->rx_queues); 942 dev->data->rx_queues = NULL; 943 } 944 dev->data->nb_rx_queues = nb_queues; 945 return 0; 946 } 947 948 static int 949 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 950 { 951 uint16_t port_id; 952 953 if (rx_queue_id >= dev->data->nb_rx_queues) { 954 port_id = dev->data->port_id; 955 RTE_ETHDEV_LOG(ERR, 956 "Invalid Rx queue_id=%u of device with port_id=%u\n", 957 rx_queue_id, port_id); 958 return -EINVAL; 959 } 960 961 if (dev->data->rx_queues[rx_queue_id] == NULL) { 962 port_id = dev->data->port_id; 963 RTE_ETHDEV_LOG(ERR, 964 "Queue %u of device with port_id=%u has not been setup\n", 965 rx_queue_id, port_id); 966 return -EINVAL; 967 } 968 969 return 0; 970 } 971 972 static int 973 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 974 { 975 uint16_t port_id; 976 977 if (tx_queue_id >= dev->data->nb_tx_queues) { 978 port_id = dev->data->port_id; 979 RTE_ETHDEV_LOG(ERR, 980 "Invalid Tx queue_id=%u of device with port_id=%u\n", 981 tx_queue_id, port_id); 982 return -EINVAL; 983 } 984 985 if (dev->data->tx_queues[tx_queue_id] == NULL) { 986 port_id = dev->data->port_id; 987 RTE_ETHDEV_LOG(ERR, 988 "Queue %u of device with port_id=%u has not been setup\n", 989 tx_queue_id, port_id); 990 return -EINVAL; 991 } 992 993 return 0; 994 } 995 996 int 997 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 998 { 999 struct rte_eth_dev *dev; 1000 int ret; 1001 1002 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1003 dev = &rte_eth_devices[port_id]; 1004 1005 if (!dev->data->dev_started) { 1006 RTE_ETHDEV_LOG(ERR, 1007 "Port %u must be started before start any queue\n", 1008 port_id); 1009 return -EINVAL; 1010 } 1011 1012 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1013 if (ret != 0) 1014 return ret; 1015 1016 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 1017 1018 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1019 RTE_ETHDEV_LOG(INFO, 1020 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1021 rx_queue_id, port_id); 1022 return -EINVAL; 1023 } 1024 1025 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1026 RTE_ETHDEV_LOG(INFO, 1027 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1028 rx_queue_id, port_id); 1029 return 0; 1030 } 1031 1032 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 1033 } 1034 1035 int 1036 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 1037 { 1038 struct rte_eth_dev *dev; 1039 int ret; 1040 1041 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1042 dev = &rte_eth_devices[port_id]; 1043 1044 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1045 if (ret != 0) 1046 return ret; 1047 1048 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 1049 1050 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1051 RTE_ETHDEV_LOG(INFO, 1052 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1053 rx_queue_id, port_id); 1054 return -EINVAL; 1055 } 1056 1057 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1058 RTE_ETHDEV_LOG(INFO, 1059 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1060 rx_queue_id, port_id); 1061 return 0; 1062 } 1063 1064 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 1065 } 1066 1067 int 1068 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 1069 { 1070 struct rte_eth_dev *dev; 1071 int ret; 1072 1073 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1074 dev = &rte_eth_devices[port_id]; 1075 1076 if (!dev->data->dev_started) { 1077 RTE_ETHDEV_LOG(ERR, 1078 "Port %u must be started before start any queue\n", 1079 port_id); 1080 return -EINVAL; 1081 } 1082 1083 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1084 if (ret != 0) 1085 return ret; 1086 1087 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 1088 1089 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1090 RTE_ETHDEV_LOG(INFO, 1091 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1092 tx_queue_id, port_id); 1093 return -EINVAL; 1094 } 1095 1096 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1097 RTE_ETHDEV_LOG(INFO, 1098 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1099 tx_queue_id, port_id); 1100 return 0; 1101 } 1102 1103 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 1104 } 1105 1106 int 1107 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 1108 { 1109 struct rte_eth_dev *dev; 1110 int ret; 1111 1112 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1113 dev = &rte_eth_devices[port_id]; 1114 1115 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1116 if (ret != 0) 1117 return ret; 1118 1119 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 1120 1121 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1122 RTE_ETHDEV_LOG(INFO, 1123 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1124 tx_queue_id, port_id); 1125 return -EINVAL; 1126 } 1127 1128 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1129 RTE_ETHDEV_LOG(INFO, 1130 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1131 tx_queue_id, port_id); 1132 return 0; 1133 } 1134 1135 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 1136 } 1137 1138 static int 1139 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 1140 { 1141 uint16_t old_nb_queues = dev->data->nb_tx_queues; 1142 unsigned i; 1143 1144 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ 1145 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", 1146 sizeof(dev->data->tx_queues[0]) * 1147 RTE_MAX_QUEUES_PER_PORT, 1148 RTE_CACHE_LINE_SIZE); 1149 if (dev->data->tx_queues == NULL) { 1150 dev->data->nb_tx_queues = 0; 1151 return -(ENOMEM); 1152 } 1153 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ 1154 for (i = nb_queues; i < old_nb_queues; i++) 1155 eth_dev_txq_release(dev, i); 1156 1157 } else if (dev->data->tx_queues != NULL && nb_queues == 0) { 1158 for (i = nb_queues; i < old_nb_queues; i++) 1159 eth_dev_txq_release(dev, i); 1160 1161 rte_free(dev->data->tx_queues); 1162 dev->data->tx_queues = NULL; 1163 } 1164 dev->data->nb_tx_queues = nb_queues; 1165 return 0; 1166 } 1167 1168 uint32_t 1169 rte_eth_speed_bitflag(uint32_t speed, int duplex) 1170 { 1171 switch (speed) { 1172 case ETH_SPEED_NUM_10M: 1173 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD; 1174 case ETH_SPEED_NUM_100M: 1175 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD; 1176 case ETH_SPEED_NUM_1G: 1177 return ETH_LINK_SPEED_1G; 1178 case ETH_SPEED_NUM_2_5G: 1179 return ETH_LINK_SPEED_2_5G; 1180 case ETH_SPEED_NUM_5G: 1181 return ETH_LINK_SPEED_5G; 1182 case ETH_SPEED_NUM_10G: 1183 return ETH_LINK_SPEED_10G; 1184 case ETH_SPEED_NUM_20G: 1185 return ETH_LINK_SPEED_20G; 1186 case ETH_SPEED_NUM_25G: 1187 return ETH_LINK_SPEED_25G; 1188 case ETH_SPEED_NUM_40G: 1189 return ETH_LINK_SPEED_40G; 1190 case ETH_SPEED_NUM_50G: 1191 return ETH_LINK_SPEED_50G; 1192 case ETH_SPEED_NUM_56G: 1193 return ETH_LINK_SPEED_56G; 1194 case ETH_SPEED_NUM_100G: 1195 return ETH_LINK_SPEED_100G; 1196 case ETH_SPEED_NUM_200G: 1197 return ETH_LINK_SPEED_200G; 1198 default: 1199 return 0; 1200 } 1201 } 1202 1203 const char * 1204 rte_eth_dev_rx_offload_name(uint64_t offload) 1205 { 1206 const char *name = "UNKNOWN"; 1207 unsigned int i; 1208 1209 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1210 if (offload == eth_dev_rx_offload_names[i].offload) { 1211 name = eth_dev_rx_offload_names[i].name; 1212 break; 1213 } 1214 } 1215 1216 return name; 1217 } 1218 1219 const char * 1220 rte_eth_dev_tx_offload_name(uint64_t offload) 1221 { 1222 const char *name = "UNKNOWN"; 1223 unsigned int i; 1224 1225 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1226 if (offload == eth_dev_tx_offload_names[i].offload) { 1227 name = eth_dev_tx_offload_names[i].name; 1228 break; 1229 } 1230 } 1231 1232 return name; 1233 } 1234 1235 static inline int 1236 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1237 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1238 { 1239 int ret = 0; 1240 1241 if (dev_info_size == 0) { 1242 if (config_size != max_rx_pkt_len) { 1243 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1244 " %u != %u is not allowed\n", 1245 port_id, config_size, max_rx_pkt_len); 1246 ret = -EINVAL; 1247 } 1248 } else if (config_size > dev_info_size) { 1249 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1250 "> max allowed value %u\n", port_id, config_size, 1251 dev_info_size); 1252 ret = -EINVAL; 1253 } else if (config_size < RTE_ETHER_MIN_LEN) { 1254 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1255 "< min allowed value %u\n", port_id, config_size, 1256 (unsigned int)RTE_ETHER_MIN_LEN); 1257 ret = -EINVAL; 1258 } 1259 return ret; 1260 } 1261 1262 /* 1263 * Validate offloads that are requested through rte_eth_dev_configure against 1264 * the offloads successfully set by the ethernet device. 1265 * 1266 * @param port_id 1267 * The port identifier of the Ethernet device. 1268 * @param req_offloads 1269 * The offloads that have been requested through `rte_eth_dev_configure`. 1270 * @param set_offloads 1271 * The offloads successfully set by the ethernet device. 1272 * @param offload_type 1273 * The offload type i.e. Rx/Tx string. 1274 * @param offload_name 1275 * The function that prints the offload name. 1276 * @return 1277 * - (0) if validation successful. 1278 * - (-EINVAL) if requested offload has been silently disabled. 1279 * 1280 */ 1281 static int 1282 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1283 uint64_t set_offloads, const char *offload_type, 1284 const char *(*offload_name)(uint64_t)) 1285 { 1286 uint64_t offloads_diff = req_offloads ^ set_offloads; 1287 uint64_t offload; 1288 int ret = 0; 1289 1290 while (offloads_diff != 0) { 1291 /* Check if any offload is requested but not enabled. */ 1292 offload = 1ULL << __builtin_ctzll(offloads_diff); 1293 if (offload & req_offloads) { 1294 RTE_ETHDEV_LOG(ERR, 1295 "Port %u failed to enable %s offload %s\n", 1296 port_id, offload_type, offload_name(offload)); 1297 ret = -EINVAL; 1298 } 1299 1300 /* Check if offload couldn't be disabled. */ 1301 if (offload & set_offloads) { 1302 RTE_ETHDEV_LOG(DEBUG, 1303 "Port %u %s offload %s is not requested but enabled\n", 1304 port_id, offload_type, offload_name(offload)); 1305 } 1306 1307 offloads_diff &= ~offload; 1308 } 1309 1310 return ret; 1311 } 1312 1313 int 1314 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1315 const struct rte_eth_conf *dev_conf) 1316 { 1317 struct rte_eth_dev *dev; 1318 struct rte_eth_dev_info dev_info; 1319 struct rte_eth_conf orig_conf; 1320 uint16_t overhead_len; 1321 int diag; 1322 int ret; 1323 uint16_t old_mtu; 1324 1325 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1326 dev = &rte_eth_devices[port_id]; 1327 1328 if (dev_conf == NULL) { 1329 RTE_ETHDEV_LOG(ERR, 1330 "Cannot configure ethdev port %u from NULL config\n", 1331 port_id); 1332 return -EINVAL; 1333 } 1334 1335 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1336 1337 if (dev->data->dev_started) { 1338 RTE_ETHDEV_LOG(ERR, 1339 "Port %u must be stopped to allow configuration\n", 1340 port_id); 1341 return -EBUSY; 1342 } 1343 1344 /* 1345 * Ensure that "dev_configured" is always 0 each time prepare to do 1346 * dev_configure() to avoid any non-anticipated behaviour. 1347 * And set to 1 when dev_configure() is executed successfully. 1348 */ 1349 dev->data->dev_configured = 0; 1350 1351 /* Store original config, as rollback required on failure */ 1352 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1353 1354 /* 1355 * Copy the dev_conf parameter into the dev structure. 1356 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1357 */ 1358 if (dev_conf != &dev->data->dev_conf) 1359 memcpy(&dev->data->dev_conf, dev_conf, 1360 sizeof(dev->data->dev_conf)); 1361 1362 /* Backup mtu for rollback */ 1363 old_mtu = dev->data->mtu; 1364 1365 ret = rte_eth_dev_info_get(port_id, &dev_info); 1366 if (ret != 0) 1367 goto rollback; 1368 1369 /* Get the real Ethernet overhead length */ 1370 if (dev_info.max_mtu != UINT16_MAX && 1371 dev_info.max_rx_pktlen > dev_info.max_mtu) 1372 overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu; 1373 else 1374 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1375 1376 /* If number of queues specified by application for both Rx and Tx is 1377 * zero, use driver preferred values. This cannot be done individually 1378 * as it is valid for either Tx or Rx (but not both) to be zero. 1379 * If driver does not provide any preferred valued, fall back on 1380 * EAL defaults. 1381 */ 1382 if (nb_rx_q == 0 && nb_tx_q == 0) { 1383 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1384 if (nb_rx_q == 0) 1385 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1386 nb_tx_q = dev_info.default_txportconf.nb_queues; 1387 if (nb_tx_q == 0) 1388 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1389 } 1390 1391 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1392 RTE_ETHDEV_LOG(ERR, 1393 "Number of RX queues requested (%u) is greater than max supported(%d)\n", 1394 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1395 ret = -EINVAL; 1396 goto rollback; 1397 } 1398 1399 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1400 RTE_ETHDEV_LOG(ERR, 1401 "Number of TX queues requested (%u) is greater than max supported(%d)\n", 1402 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1403 ret = -EINVAL; 1404 goto rollback; 1405 } 1406 1407 /* 1408 * Check that the numbers of RX and TX queues are not greater 1409 * than the maximum number of RX and TX queues supported by the 1410 * configured device. 1411 */ 1412 if (nb_rx_q > dev_info.max_rx_queues) { 1413 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1414 port_id, nb_rx_q, dev_info.max_rx_queues); 1415 ret = -EINVAL; 1416 goto rollback; 1417 } 1418 1419 if (nb_tx_q > dev_info.max_tx_queues) { 1420 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1421 port_id, nb_tx_q, dev_info.max_tx_queues); 1422 ret = -EINVAL; 1423 goto rollback; 1424 } 1425 1426 /* Check that the device supports requested interrupts */ 1427 if ((dev_conf->intr_conf.lsc == 1) && 1428 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1429 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1430 dev->device->driver->name); 1431 ret = -EINVAL; 1432 goto rollback; 1433 } 1434 if ((dev_conf->intr_conf.rmv == 1) && 1435 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1436 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1437 dev->device->driver->name); 1438 ret = -EINVAL; 1439 goto rollback; 1440 } 1441 1442 /* 1443 * If jumbo frames are enabled, check that the maximum RX packet 1444 * length is supported by the configured device. 1445 */ 1446 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1447 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) { 1448 RTE_ETHDEV_LOG(ERR, 1449 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n", 1450 port_id, dev_conf->rxmode.max_rx_pkt_len, 1451 dev_info.max_rx_pktlen); 1452 ret = -EINVAL; 1453 goto rollback; 1454 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) { 1455 RTE_ETHDEV_LOG(ERR, 1456 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n", 1457 port_id, dev_conf->rxmode.max_rx_pkt_len, 1458 (unsigned int)RTE_ETHER_MIN_LEN); 1459 ret = -EINVAL; 1460 goto rollback; 1461 } 1462 1463 /* Scale the MTU size to adapt max_rx_pkt_len */ 1464 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len - 1465 overhead_len; 1466 } else { 1467 uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len; 1468 if (pktlen < RTE_ETHER_MIN_MTU + overhead_len || 1469 pktlen > RTE_ETHER_MTU + overhead_len) 1470 /* Use default value */ 1471 dev->data->dev_conf.rxmode.max_rx_pkt_len = 1472 RTE_ETHER_MTU + overhead_len; 1473 } 1474 1475 /* 1476 * If LRO is enabled, check that the maximum aggregated packet 1477 * size is supported by the configured device. 1478 */ 1479 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 1480 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1481 dev->data->dev_conf.rxmode.max_lro_pkt_size = 1482 dev->data->dev_conf.rxmode.max_rx_pkt_len; 1483 ret = eth_dev_check_lro_pkt_size(port_id, 1484 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1485 dev->data->dev_conf.rxmode.max_rx_pkt_len, 1486 dev_info.max_lro_pkt_size); 1487 if (ret != 0) 1488 goto rollback; 1489 } 1490 1491 /* Any requested offloading must be within its device capabilities */ 1492 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1493 dev_conf->rxmode.offloads) { 1494 RTE_ETHDEV_LOG(ERR, 1495 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1496 "capabilities 0x%"PRIx64" in %s()\n", 1497 port_id, dev_conf->rxmode.offloads, 1498 dev_info.rx_offload_capa, 1499 __func__); 1500 ret = -EINVAL; 1501 goto rollback; 1502 } 1503 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1504 dev_conf->txmode.offloads) { 1505 RTE_ETHDEV_LOG(ERR, 1506 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1507 "capabilities 0x%"PRIx64" in %s()\n", 1508 port_id, dev_conf->txmode.offloads, 1509 dev_info.tx_offload_capa, 1510 __func__); 1511 ret = -EINVAL; 1512 goto rollback; 1513 } 1514 1515 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1516 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1517 1518 /* Check that device supports requested rss hash functions. */ 1519 if ((dev_info.flow_type_rss_offloads | 1520 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1521 dev_info.flow_type_rss_offloads) { 1522 RTE_ETHDEV_LOG(ERR, 1523 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1524 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1525 dev_info.flow_type_rss_offloads); 1526 ret = -EINVAL; 1527 goto rollback; 1528 } 1529 1530 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1531 if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) && 1532 (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) { 1533 RTE_ETHDEV_LOG(ERR, 1534 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1535 port_id, 1536 rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH)); 1537 ret = -EINVAL; 1538 goto rollback; 1539 } 1540 1541 /* 1542 * Setup new number of RX/TX queues and reconfigure device. 1543 */ 1544 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1545 if (diag != 0) { 1546 RTE_ETHDEV_LOG(ERR, 1547 "Port%u eth_dev_rx_queue_config = %d\n", 1548 port_id, diag); 1549 ret = diag; 1550 goto rollback; 1551 } 1552 1553 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1554 if (diag != 0) { 1555 RTE_ETHDEV_LOG(ERR, 1556 "Port%u eth_dev_tx_queue_config = %d\n", 1557 port_id, diag); 1558 eth_dev_rx_queue_config(dev, 0); 1559 ret = diag; 1560 goto rollback; 1561 } 1562 1563 diag = (*dev->dev_ops->dev_configure)(dev); 1564 if (diag != 0) { 1565 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1566 port_id, diag); 1567 ret = eth_err(port_id, diag); 1568 goto reset_queues; 1569 } 1570 1571 /* Initialize Rx profiling if enabled at compilation time. */ 1572 diag = __rte_eth_dev_profile_init(port_id, dev); 1573 if (diag != 0) { 1574 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1575 port_id, diag); 1576 ret = eth_err(port_id, diag); 1577 goto reset_queues; 1578 } 1579 1580 /* Validate Rx offloads. */ 1581 diag = eth_dev_validate_offloads(port_id, 1582 dev_conf->rxmode.offloads, 1583 dev->data->dev_conf.rxmode.offloads, "Rx", 1584 rte_eth_dev_rx_offload_name); 1585 if (diag != 0) { 1586 ret = diag; 1587 goto reset_queues; 1588 } 1589 1590 /* Validate Tx offloads. */ 1591 diag = eth_dev_validate_offloads(port_id, 1592 dev_conf->txmode.offloads, 1593 dev->data->dev_conf.txmode.offloads, "Tx", 1594 rte_eth_dev_tx_offload_name); 1595 if (diag != 0) { 1596 ret = diag; 1597 goto reset_queues; 1598 } 1599 1600 dev->data->dev_configured = 1; 1601 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1602 return 0; 1603 reset_queues: 1604 eth_dev_rx_queue_config(dev, 0); 1605 eth_dev_tx_queue_config(dev, 0); 1606 rollback: 1607 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1608 if (old_mtu != dev->data->mtu) 1609 dev->data->mtu = old_mtu; 1610 1611 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1612 return ret; 1613 } 1614 1615 void 1616 rte_eth_dev_internal_reset(struct rte_eth_dev *dev) 1617 { 1618 if (dev->data->dev_started) { 1619 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n", 1620 dev->data->port_id); 1621 return; 1622 } 1623 1624 eth_dev_rx_queue_config(dev, 0); 1625 eth_dev_tx_queue_config(dev, 0); 1626 1627 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); 1628 } 1629 1630 static void 1631 eth_dev_mac_restore(struct rte_eth_dev *dev, 1632 struct rte_eth_dev_info *dev_info) 1633 { 1634 struct rte_ether_addr *addr; 1635 uint16_t i; 1636 uint32_t pool = 0; 1637 uint64_t pool_mask; 1638 1639 /* replay MAC address configuration including default MAC */ 1640 addr = &dev->data->mac_addrs[0]; 1641 if (*dev->dev_ops->mac_addr_set != NULL) 1642 (*dev->dev_ops->mac_addr_set)(dev, addr); 1643 else if (*dev->dev_ops->mac_addr_add != NULL) 1644 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1645 1646 if (*dev->dev_ops->mac_addr_add != NULL) { 1647 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1648 addr = &dev->data->mac_addrs[i]; 1649 1650 /* skip zero address */ 1651 if (rte_is_zero_ether_addr(addr)) 1652 continue; 1653 1654 pool = 0; 1655 pool_mask = dev->data->mac_pool_sel[i]; 1656 1657 do { 1658 if (pool_mask & 1ULL) 1659 (*dev->dev_ops->mac_addr_add)(dev, 1660 addr, i, pool); 1661 pool_mask >>= 1; 1662 pool++; 1663 } while (pool_mask); 1664 } 1665 } 1666 } 1667 1668 static int 1669 eth_dev_config_restore(struct rte_eth_dev *dev, 1670 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1671 { 1672 int ret; 1673 1674 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1675 eth_dev_mac_restore(dev, dev_info); 1676 1677 /* replay promiscuous configuration */ 1678 /* 1679 * use callbacks directly since we don't need port_id check and 1680 * would like to bypass the same value set 1681 */ 1682 if (rte_eth_promiscuous_get(port_id) == 1 && 1683 *dev->dev_ops->promiscuous_enable != NULL) { 1684 ret = eth_err(port_id, 1685 (*dev->dev_ops->promiscuous_enable)(dev)); 1686 if (ret != 0 && ret != -ENOTSUP) { 1687 RTE_ETHDEV_LOG(ERR, 1688 "Failed to enable promiscuous mode for device (port %u): %s\n", 1689 port_id, rte_strerror(-ret)); 1690 return ret; 1691 } 1692 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1693 *dev->dev_ops->promiscuous_disable != NULL) { 1694 ret = eth_err(port_id, 1695 (*dev->dev_ops->promiscuous_disable)(dev)); 1696 if (ret != 0 && ret != -ENOTSUP) { 1697 RTE_ETHDEV_LOG(ERR, 1698 "Failed to disable promiscuous mode for device (port %u): %s\n", 1699 port_id, rte_strerror(-ret)); 1700 return ret; 1701 } 1702 } 1703 1704 /* replay all multicast configuration */ 1705 /* 1706 * use callbacks directly since we don't need port_id check and 1707 * would like to bypass the same value set 1708 */ 1709 if (rte_eth_allmulticast_get(port_id) == 1 && 1710 *dev->dev_ops->allmulticast_enable != NULL) { 1711 ret = eth_err(port_id, 1712 (*dev->dev_ops->allmulticast_enable)(dev)); 1713 if (ret != 0 && ret != -ENOTSUP) { 1714 RTE_ETHDEV_LOG(ERR, 1715 "Failed to enable allmulticast mode for device (port %u): %s\n", 1716 port_id, rte_strerror(-ret)); 1717 return ret; 1718 } 1719 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1720 *dev->dev_ops->allmulticast_disable != NULL) { 1721 ret = eth_err(port_id, 1722 (*dev->dev_ops->allmulticast_disable)(dev)); 1723 if (ret != 0 && ret != -ENOTSUP) { 1724 RTE_ETHDEV_LOG(ERR, 1725 "Failed to disable allmulticast mode for device (port %u): %s\n", 1726 port_id, rte_strerror(-ret)); 1727 return ret; 1728 } 1729 } 1730 1731 return 0; 1732 } 1733 1734 int 1735 rte_eth_dev_start(uint16_t port_id) 1736 { 1737 struct rte_eth_dev *dev; 1738 struct rte_eth_dev_info dev_info; 1739 int diag; 1740 int ret, ret_stop; 1741 1742 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1743 dev = &rte_eth_devices[port_id]; 1744 1745 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1746 1747 if (dev->data->dev_configured == 0) { 1748 RTE_ETHDEV_LOG(INFO, 1749 "Device with port_id=%"PRIu16" is not configured.\n", 1750 port_id); 1751 return -EINVAL; 1752 } 1753 1754 if (dev->data->dev_started != 0) { 1755 RTE_ETHDEV_LOG(INFO, 1756 "Device with port_id=%"PRIu16" already started\n", 1757 port_id); 1758 return 0; 1759 } 1760 1761 ret = rte_eth_dev_info_get(port_id, &dev_info); 1762 if (ret != 0) 1763 return ret; 1764 1765 /* Lets restore MAC now if device does not support live change */ 1766 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1767 eth_dev_mac_restore(dev, &dev_info); 1768 1769 diag = (*dev->dev_ops->dev_start)(dev); 1770 if (diag == 0) 1771 dev->data->dev_started = 1; 1772 else 1773 return eth_err(port_id, diag); 1774 1775 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1776 if (ret != 0) { 1777 RTE_ETHDEV_LOG(ERR, 1778 "Error during restoring configuration for device (port %u): %s\n", 1779 port_id, rte_strerror(-ret)); 1780 ret_stop = rte_eth_dev_stop(port_id); 1781 if (ret_stop != 0) { 1782 RTE_ETHDEV_LOG(ERR, 1783 "Failed to stop device (port %u): %s\n", 1784 port_id, rte_strerror(-ret_stop)); 1785 } 1786 1787 return ret; 1788 } 1789 1790 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1791 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1792 (*dev->dev_ops->link_update)(dev, 0); 1793 } 1794 1795 rte_ethdev_trace_start(port_id); 1796 return 0; 1797 } 1798 1799 int 1800 rte_eth_dev_stop(uint16_t port_id) 1801 { 1802 struct rte_eth_dev *dev; 1803 int ret; 1804 1805 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1806 dev = &rte_eth_devices[port_id]; 1807 1808 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP); 1809 1810 if (dev->data->dev_started == 0) { 1811 RTE_ETHDEV_LOG(INFO, 1812 "Device with port_id=%"PRIu16" already stopped\n", 1813 port_id); 1814 return 0; 1815 } 1816 1817 dev->data->dev_started = 0; 1818 ret = (*dev->dev_ops->dev_stop)(dev); 1819 rte_ethdev_trace_stop(port_id, ret); 1820 1821 return ret; 1822 } 1823 1824 int 1825 rte_eth_dev_set_link_up(uint16_t port_id) 1826 { 1827 struct rte_eth_dev *dev; 1828 1829 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1830 dev = &rte_eth_devices[port_id]; 1831 1832 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1833 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1834 } 1835 1836 int 1837 rte_eth_dev_set_link_down(uint16_t port_id) 1838 { 1839 struct rte_eth_dev *dev; 1840 1841 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1842 dev = &rte_eth_devices[port_id]; 1843 1844 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1845 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1846 } 1847 1848 int 1849 rte_eth_dev_close(uint16_t port_id) 1850 { 1851 struct rte_eth_dev *dev; 1852 int firsterr, binerr; 1853 int *lasterr = &firsterr; 1854 1855 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1856 dev = &rte_eth_devices[port_id]; 1857 1858 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1859 *lasterr = (*dev->dev_ops->dev_close)(dev); 1860 if (*lasterr != 0) 1861 lasterr = &binerr; 1862 1863 rte_ethdev_trace_close(port_id); 1864 *lasterr = rte_eth_dev_release_port(dev); 1865 1866 return firsterr; 1867 } 1868 1869 int 1870 rte_eth_dev_reset(uint16_t port_id) 1871 { 1872 struct rte_eth_dev *dev; 1873 int ret; 1874 1875 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1876 dev = &rte_eth_devices[port_id]; 1877 1878 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1879 1880 ret = rte_eth_dev_stop(port_id); 1881 if (ret != 0) { 1882 RTE_ETHDEV_LOG(ERR, 1883 "Failed to stop device (port %u) before reset: %s - ignore\n", 1884 port_id, rte_strerror(-ret)); 1885 } 1886 ret = dev->dev_ops->dev_reset(dev); 1887 1888 return eth_err(port_id, ret); 1889 } 1890 1891 int 1892 rte_eth_dev_is_removed(uint16_t port_id) 1893 { 1894 struct rte_eth_dev *dev; 1895 int ret; 1896 1897 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1898 dev = &rte_eth_devices[port_id]; 1899 1900 if (dev->state == RTE_ETH_DEV_REMOVED) 1901 return 1; 1902 1903 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1904 1905 ret = dev->dev_ops->is_removed(dev); 1906 if (ret != 0) 1907 /* Device is physically removed. */ 1908 dev->state = RTE_ETH_DEV_REMOVED; 1909 1910 return ret; 1911 } 1912 1913 static int 1914 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, 1915 uint16_t n_seg, uint32_t *mbp_buf_size, 1916 const struct rte_eth_dev_info *dev_info) 1917 { 1918 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1919 struct rte_mempool *mp_first; 1920 uint32_t offset_mask; 1921 uint16_t seg_idx; 1922 1923 if (n_seg > seg_capa->max_nseg) { 1924 RTE_ETHDEV_LOG(ERR, 1925 "Requested Rx segments %u exceed supported %u\n", 1926 n_seg, seg_capa->max_nseg); 1927 return -EINVAL; 1928 } 1929 /* 1930 * Check the sizes and offsets against buffer sizes 1931 * for each segment specified in extended configuration. 1932 */ 1933 mp_first = rx_seg[0].mp; 1934 offset_mask = (1u << seg_capa->offset_align_log2) - 1; 1935 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1936 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1937 uint32_t length = rx_seg[seg_idx].length; 1938 uint32_t offset = rx_seg[seg_idx].offset; 1939 1940 if (mpl == NULL) { 1941 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1942 return -EINVAL; 1943 } 1944 if (seg_idx != 0 && mp_first != mpl && 1945 seg_capa->multi_pools == 0) { 1946 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1947 return -ENOTSUP; 1948 } 1949 if (offset != 0) { 1950 if (seg_capa->offset_allowed == 0) { 1951 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1952 return -ENOTSUP; 1953 } 1954 if (offset & offset_mask) { 1955 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 1956 offset, 1957 seg_capa->offset_align_log2); 1958 return -EINVAL; 1959 } 1960 } 1961 if (mpl->private_data_size < 1962 sizeof(struct rte_pktmbuf_pool_private)) { 1963 RTE_ETHDEV_LOG(ERR, 1964 "%s private_data_size %u < %u\n", 1965 mpl->name, mpl->private_data_size, 1966 (unsigned int)sizeof 1967 (struct rte_pktmbuf_pool_private)); 1968 return -ENOSPC; 1969 } 1970 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 1971 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 1972 length = length != 0 ? length : *mbp_buf_size; 1973 if (*mbp_buf_size < length + offset) { 1974 RTE_ETHDEV_LOG(ERR, 1975 "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n", 1976 mpl->name, *mbp_buf_size, 1977 length + offset, length, offset); 1978 return -EINVAL; 1979 } 1980 } 1981 return 0; 1982 } 1983 1984 int 1985 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1986 uint16_t nb_rx_desc, unsigned int socket_id, 1987 const struct rte_eth_rxconf *rx_conf, 1988 struct rte_mempool *mp) 1989 { 1990 int ret; 1991 uint32_t mbp_buf_size; 1992 struct rte_eth_dev *dev; 1993 struct rte_eth_dev_info dev_info; 1994 struct rte_eth_rxconf local_conf; 1995 1996 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1997 dev = &rte_eth_devices[port_id]; 1998 1999 if (rx_queue_id >= dev->data->nb_rx_queues) { 2000 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 2001 return -EINVAL; 2002 } 2003 2004 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 2005 2006 ret = rte_eth_dev_info_get(port_id, &dev_info); 2007 if (ret != 0) 2008 return ret; 2009 2010 if (mp != NULL) { 2011 /* Single pool configuration check. */ 2012 if (rx_conf != NULL && rx_conf->rx_nseg != 0) { 2013 RTE_ETHDEV_LOG(ERR, 2014 "Ambiguous segment configuration\n"); 2015 return -EINVAL; 2016 } 2017 /* 2018 * Check the size of the mbuf data buffer, this value 2019 * must be provided in the private data of the memory pool. 2020 * First check that the memory pool(s) has a valid private data. 2021 */ 2022 if (mp->private_data_size < 2023 sizeof(struct rte_pktmbuf_pool_private)) { 2024 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 2025 mp->name, mp->private_data_size, 2026 (unsigned int) 2027 sizeof(struct rte_pktmbuf_pool_private)); 2028 return -ENOSPC; 2029 } 2030 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2031 if (mbp_buf_size < dev_info.min_rx_bufsize + 2032 RTE_PKTMBUF_HEADROOM) { 2033 RTE_ETHDEV_LOG(ERR, 2034 "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", 2035 mp->name, mbp_buf_size, 2036 RTE_PKTMBUF_HEADROOM + 2037 dev_info.min_rx_bufsize, 2038 RTE_PKTMBUF_HEADROOM, 2039 dev_info.min_rx_bufsize); 2040 return -EINVAL; 2041 } 2042 } else { 2043 const struct rte_eth_rxseg_split *rx_seg; 2044 uint16_t n_seg; 2045 2046 /* Extended multi-segment configuration check. */ 2047 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { 2048 RTE_ETHDEV_LOG(ERR, 2049 "Memory pool is null and no extended configuration provided\n"); 2050 return -EINVAL; 2051 } 2052 2053 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2054 n_seg = rx_conf->rx_nseg; 2055 2056 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2057 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, 2058 &mbp_buf_size, 2059 &dev_info); 2060 if (ret != 0) 2061 return ret; 2062 } else { 2063 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2064 return -EINVAL; 2065 } 2066 } 2067 2068 /* Use default specified by driver, if nb_rx_desc is zero */ 2069 if (nb_rx_desc == 0) { 2070 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2071 /* If driver default is also zero, fall back on EAL default */ 2072 if (nb_rx_desc == 0) 2073 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2074 } 2075 2076 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2077 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2078 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2079 2080 RTE_ETHDEV_LOG(ERR, 2081 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2082 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2083 dev_info.rx_desc_lim.nb_min, 2084 dev_info.rx_desc_lim.nb_align); 2085 return -EINVAL; 2086 } 2087 2088 if (dev->data->dev_started && 2089 !(dev_info.dev_capa & 2090 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2091 return -EBUSY; 2092 2093 if (dev->data->dev_started && 2094 (dev->data->rx_queue_state[rx_queue_id] != 2095 RTE_ETH_QUEUE_STATE_STOPPED)) 2096 return -EBUSY; 2097 2098 eth_dev_rxq_release(dev, rx_queue_id); 2099 2100 if (rx_conf == NULL) 2101 rx_conf = &dev_info.default_rxconf; 2102 2103 local_conf = *rx_conf; 2104 2105 /* 2106 * If an offloading has already been enabled in 2107 * rte_eth_dev_configure(), it has been enabled on all queues, 2108 * so there is no need to enable it in this queue again. 2109 * The local_conf.offloads input to underlying PMD only carries 2110 * those offloadings which are only enabled on this queue and 2111 * not enabled on all queues. 2112 */ 2113 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2114 2115 /* 2116 * New added offloadings for this queue are those not enabled in 2117 * rte_eth_dev_configure() and they must be per-queue type. 2118 * A pure per-port offloading can't be enabled on a queue while 2119 * disabled on another queue. A pure per-port offloading can't 2120 * be enabled for any queue as new added one if it hasn't been 2121 * enabled in rte_eth_dev_configure(). 2122 */ 2123 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2124 local_conf.offloads) { 2125 RTE_ETHDEV_LOG(ERR, 2126 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2127 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2128 port_id, rx_queue_id, local_conf.offloads, 2129 dev_info.rx_queue_offload_capa, 2130 __func__); 2131 return -EINVAL; 2132 } 2133 2134 /* 2135 * If LRO is enabled, check that the maximum aggregated packet 2136 * size is supported by the configured device. 2137 */ 2138 if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 2139 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2140 dev->data->dev_conf.rxmode.max_lro_pkt_size = 2141 dev->data->dev_conf.rxmode.max_rx_pkt_len; 2142 int ret = eth_dev_check_lro_pkt_size(port_id, 2143 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2144 dev->data->dev_conf.rxmode.max_rx_pkt_len, 2145 dev_info.max_lro_pkt_size); 2146 if (ret != 0) 2147 return ret; 2148 } 2149 2150 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2151 socket_id, &local_conf, mp); 2152 if (!ret) { 2153 if (!dev->data->min_rx_buf_size || 2154 dev->data->min_rx_buf_size > mbp_buf_size) 2155 dev->data->min_rx_buf_size = mbp_buf_size; 2156 } 2157 2158 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2159 rx_conf, ret); 2160 return eth_err(port_id, ret); 2161 } 2162 2163 int 2164 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2165 uint16_t nb_rx_desc, 2166 const struct rte_eth_hairpin_conf *conf) 2167 { 2168 int ret; 2169 struct rte_eth_dev *dev; 2170 struct rte_eth_hairpin_cap cap; 2171 int i; 2172 int count; 2173 2174 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2175 dev = &rte_eth_devices[port_id]; 2176 2177 if (rx_queue_id >= dev->data->nb_rx_queues) { 2178 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 2179 return -EINVAL; 2180 } 2181 2182 if (conf == NULL) { 2183 RTE_ETHDEV_LOG(ERR, 2184 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2185 port_id); 2186 return -EINVAL; 2187 } 2188 2189 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2190 if (ret != 0) 2191 return ret; 2192 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, 2193 -ENOTSUP); 2194 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2195 if (nb_rx_desc == 0) 2196 nb_rx_desc = cap.max_nb_desc; 2197 if (nb_rx_desc > cap.max_nb_desc) { 2198 RTE_ETHDEV_LOG(ERR, 2199 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2200 nb_rx_desc, cap.max_nb_desc); 2201 return -EINVAL; 2202 } 2203 if (conf->peer_count > cap.max_rx_2_tx) { 2204 RTE_ETHDEV_LOG(ERR, 2205 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2206 conf->peer_count, cap.max_rx_2_tx); 2207 return -EINVAL; 2208 } 2209 if (conf->peer_count == 0) { 2210 RTE_ETHDEV_LOG(ERR, 2211 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2212 conf->peer_count); 2213 return -EINVAL; 2214 } 2215 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2216 cap.max_nb_queues != UINT16_MAX; i++) { 2217 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2218 count++; 2219 } 2220 if (count > cap.max_nb_queues) { 2221 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2222 cap.max_nb_queues); 2223 return -EINVAL; 2224 } 2225 if (dev->data->dev_started) 2226 return -EBUSY; 2227 eth_dev_rxq_release(dev, rx_queue_id); 2228 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2229 nb_rx_desc, conf); 2230 if (ret == 0) 2231 dev->data->rx_queue_state[rx_queue_id] = 2232 RTE_ETH_QUEUE_STATE_HAIRPIN; 2233 return eth_err(port_id, ret); 2234 } 2235 2236 int 2237 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2238 uint16_t nb_tx_desc, unsigned int socket_id, 2239 const struct rte_eth_txconf *tx_conf) 2240 { 2241 struct rte_eth_dev *dev; 2242 struct rte_eth_dev_info dev_info; 2243 struct rte_eth_txconf local_conf; 2244 int ret; 2245 2246 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2247 dev = &rte_eth_devices[port_id]; 2248 2249 if (tx_queue_id >= dev->data->nb_tx_queues) { 2250 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 2251 return -EINVAL; 2252 } 2253 2254 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 2255 2256 ret = rte_eth_dev_info_get(port_id, &dev_info); 2257 if (ret != 0) 2258 return ret; 2259 2260 /* Use default specified by driver, if nb_tx_desc is zero */ 2261 if (nb_tx_desc == 0) { 2262 nb_tx_desc = dev_info.default_txportconf.ring_size; 2263 /* If driver default is zero, fall back on EAL default */ 2264 if (nb_tx_desc == 0) 2265 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2266 } 2267 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2268 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2269 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2270 RTE_ETHDEV_LOG(ERR, 2271 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2272 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2273 dev_info.tx_desc_lim.nb_min, 2274 dev_info.tx_desc_lim.nb_align); 2275 return -EINVAL; 2276 } 2277 2278 if (dev->data->dev_started && 2279 !(dev_info.dev_capa & 2280 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2281 return -EBUSY; 2282 2283 if (dev->data->dev_started && 2284 (dev->data->tx_queue_state[tx_queue_id] != 2285 RTE_ETH_QUEUE_STATE_STOPPED)) 2286 return -EBUSY; 2287 2288 eth_dev_txq_release(dev, tx_queue_id); 2289 2290 if (tx_conf == NULL) 2291 tx_conf = &dev_info.default_txconf; 2292 2293 local_conf = *tx_conf; 2294 2295 /* 2296 * If an offloading has already been enabled in 2297 * rte_eth_dev_configure(), it has been enabled on all queues, 2298 * so there is no need to enable it in this queue again. 2299 * The local_conf.offloads input to underlying PMD only carries 2300 * those offloadings which are only enabled on this queue and 2301 * not enabled on all queues. 2302 */ 2303 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2304 2305 /* 2306 * New added offloadings for this queue are those not enabled in 2307 * rte_eth_dev_configure() and they must be per-queue type. 2308 * A pure per-port offloading can't be enabled on a queue while 2309 * disabled on another queue. A pure per-port offloading can't 2310 * be enabled for any queue as new added one if it hasn't been 2311 * enabled in rte_eth_dev_configure(). 2312 */ 2313 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2314 local_conf.offloads) { 2315 RTE_ETHDEV_LOG(ERR, 2316 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2317 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2318 port_id, tx_queue_id, local_conf.offloads, 2319 dev_info.tx_queue_offload_capa, 2320 __func__); 2321 return -EINVAL; 2322 } 2323 2324 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2325 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2326 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2327 } 2328 2329 int 2330 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2331 uint16_t nb_tx_desc, 2332 const struct rte_eth_hairpin_conf *conf) 2333 { 2334 struct rte_eth_dev *dev; 2335 struct rte_eth_hairpin_cap cap; 2336 int i; 2337 int count; 2338 int ret; 2339 2340 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2341 dev = &rte_eth_devices[port_id]; 2342 2343 if (tx_queue_id >= dev->data->nb_tx_queues) { 2344 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 2345 return -EINVAL; 2346 } 2347 2348 if (conf == NULL) { 2349 RTE_ETHDEV_LOG(ERR, 2350 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2351 port_id); 2352 return -EINVAL; 2353 } 2354 2355 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2356 if (ret != 0) 2357 return ret; 2358 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, 2359 -ENOTSUP); 2360 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2361 if (nb_tx_desc == 0) 2362 nb_tx_desc = cap.max_nb_desc; 2363 if (nb_tx_desc > cap.max_nb_desc) { 2364 RTE_ETHDEV_LOG(ERR, 2365 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2366 nb_tx_desc, cap.max_nb_desc); 2367 return -EINVAL; 2368 } 2369 if (conf->peer_count > cap.max_tx_2_rx) { 2370 RTE_ETHDEV_LOG(ERR, 2371 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2372 conf->peer_count, cap.max_tx_2_rx); 2373 return -EINVAL; 2374 } 2375 if (conf->peer_count == 0) { 2376 RTE_ETHDEV_LOG(ERR, 2377 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2378 conf->peer_count); 2379 return -EINVAL; 2380 } 2381 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2382 cap.max_nb_queues != UINT16_MAX; i++) { 2383 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2384 count++; 2385 } 2386 if (count > cap.max_nb_queues) { 2387 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2388 cap.max_nb_queues); 2389 return -EINVAL; 2390 } 2391 if (dev->data->dev_started) 2392 return -EBUSY; 2393 eth_dev_txq_release(dev, tx_queue_id); 2394 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2395 (dev, tx_queue_id, nb_tx_desc, conf); 2396 if (ret == 0) 2397 dev->data->tx_queue_state[tx_queue_id] = 2398 RTE_ETH_QUEUE_STATE_HAIRPIN; 2399 return eth_err(port_id, ret); 2400 } 2401 2402 int 2403 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2404 { 2405 struct rte_eth_dev *dev; 2406 int ret; 2407 2408 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2409 dev = &rte_eth_devices[tx_port]; 2410 2411 if (dev->data->dev_started == 0) { 2412 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2413 return -EBUSY; 2414 } 2415 2416 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP); 2417 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2418 if (ret != 0) 2419 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2420 " to Rx %d (%d - all ports)\n", 2421 tx_port, rx_port, RTE_MAX_ETHPORTS); 2422 2423 return ret; 2424 } 2425 2426 int 2427 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2428 { 2429 struct rte_eth_dev *dev; 2430 int ret; 2431 2432 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2433 dev = &rte_eth_devices[tx_port]; 2434 2435 if (dev->data->dev_started == 0) { 2436 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2437 return -EBUSY; 2438 } 2439 2440 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP); 2441 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2442 if (ret != 0) 2443 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2444 " from Rx %d (%d - all ports)\n", 2445 tx_port, rx_port, RTE_MAX_ETHPORTS); 2446 2447 return ret; 2448 } 2449 2450 int 2451 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2452 size_t len, uint32_t direction) 2453 { 2454 struct rte_eth_dev *dev; 2455 int ret; 2456 2457 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2458 dev = &rte_eth_devices[port_id]; 2459 2460 if (peer_ports == NULL) { 2461 RTE_ETHDEV_LOG(ERR, 2462 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2463 port_id); 2464 return -EINVAL; 2465 } 2466 2467 if (len == 0) { 2468 RTE_ETHDEV_LOG(ERR, 2469 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2470 port_id); 2471 return -EINVAL; 2472 } 2473 2474 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports, 2475 -ENOTSUP); 2476 2477 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2478 len, direction); 2479 if (ret < 0) 2480 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2481 port_id, direction ? "Rx" : "Tx"); 2482 2483 return ret; 2484 } 2485 2486 void 2487 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2488 void *userdata __rte_unused) 2489 { 2490 rte_pktmbuf_free_bulk(pkts, unsent); 2491 } 2492 2493 void 2494 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2495 void *userdata) 2496 { 2497 uint64_t *count = userdata; 2498 2499 rte_pktmbuf_free_bulk(pkts, unsent); 2500 *count += unsent; 2501 } 2502 2503 int 2504 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2505 buffer_tx_error_fn cbfn, void *userdata) 2506 { 2507 if (buffer == NULL) { 2508 RTE_ETHDEV_LOG(ERR, 2509 "Cannot set Tx buffer error callback to NULL buffer\n"); 2510 return -EINVAL; 2511 } 2512 2513 buffer->error_callback = cbfn; 2514 buffer->error_userdata = userdata; 2515 return 0; 2516 } 2517 2518 int 2519 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2520 { 2521 int ret = 0; 2522 2523 if (buffer == NULL) { 2524 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2525 return -EINVAL; 2526 } 2527 2528 buffer->size = size; 2529 if (buffer->error_callback == NULL) { 2530 ret = rte_eth_tx_buffer_set_err_callback( 2531 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2532 } 2533 2534 return ret; 2535 } 2536 2537 int 2538 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2539 { 2540 struct rte_eth_dev *dev; 2541 int ret; 2542 2543 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2544 dev = &rte_eth_devices[port_id]; 2545 2546 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 2547 2548 /* Call driver to free pending mbufs. */ 2549 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2550 free_cnt); 2551 return eth_err(port_id, ret); 2552 } 2553 2554 int 2555 rte_eth_promiscuous_enable(uint16_t port_id) 2556 { 2557 struct rte_eth_dev *dev; 2558 int diag = 0; 2559 2560 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2561 dev = &rte_eth_devices[port_id]; 2562 2563 if (dev->data->promiscuous == 1) 2564 return 0; 2565 2566 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); 2567 2568 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2569 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2570 2571 return eth_err(port_id, diag); 2572 } 2573 2574 int 2575 rte_eth_promiscuous_disable(uint16_t port_id) 2576 { 2577 struct rte_eth_dev *dev; 2578 int diag = 0; 2579 2580 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2581 dev = &rte_eth_devices[port_id]; 2582 2583 if (dev->data->promiscuous == 0) 2584 return 0; 2585 2586 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); 2587 2588 dev->data->promiscuous = 0; 2589 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2590 if (diag != 0) 2591 dev->data->promiscuous = 1; 2592 2593 return eth_err(port_id, diag); 2594 } 2595 2596 int 2597 rte_eth_promiscuous_get(uint16_t port_id) 2598 { 2599 struct rte_eth_dev *dev; 2600 2601 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2602 dev = &rte_eth_devices[port_id]; 2603 2604 return dev->data->promiscuous; 2605 } 2606 2607 int 2608 rte_eth_allmulticast_enable(uint16_t port_id) 2609 { 2610 struct rte_eth_dev *dev; 2611 int diag; 2612 2613 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2614 dev = &rte_eth_devices[port_id]; 2615 2616 if (dev->data->all_multicast == 1) 2617 return 0; 2618 2619 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); 2620 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2621 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2622 2623 return eth_err(port_id, diag); 2624 } 2625 2626 int 2627 rte_eth_allmulticast_disable(uint16_t port_id) 2628 { 2629 struct rte_eth_dev *dev; 2630 int diag; 2631 2632 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2633 dev = &rte_eth_devices[port_id]; 2634 2635 if (dev->data->all_multicast == 0) 2636 return 0; 2637 2638 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); 2639 dev->data->all_multicast = 0; 2640 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2641 if (diag != 0) 2642 dev->data->all_multicast = 1; 2643 2644 return eth_err(port_id, diag); 2645 } 2646 2647 int 2648 rte_eth_allmulticast_get(uint16_t port_id) 2649 { 2650 struct rte_eth_dev *dev; 2651 2652 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2653 dev = &rte_eth_devices[port_id]; 2654 2655 return dev->data->all_multicast; 2656 } 2657 2658 int 2659 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2660 { 2661 struct rte_eth_dev *dev; 2662 2663 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2664 dev = &rte_eth_devices[port_id]; 2665 2666 if (eth_link == NULL) { 2667 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2668 port_id); 2669 return -EINVAL; 2670 } 2671 2672 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2673 rte_eth_linkstatus_get(dev, eth_link); 2674 else { 2675 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2676 (*dev->dev_ops->link_update)(dev, 1); 2677 *eth_link = dev->data->dev_link; 2678 } 2679 2680 return 0; 2681 } 2682 2683 int 2684 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2685 { 2686 struct rte_eth_dev *dev; 2687 2688 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2689 dev = &rte_eth_devices[port_id]; 2690 2691 if (eth_link == NULL) { 2692 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2693 port_id); 2694 return -EINVAL; 2695 } 2696 2697 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2698 rte_eth_linkstatus_get(dev, eth_link); 2699 else { 2700 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2701 (*dev->dev_ops->link_update)(dev, 0); 2702 *eth_link = dev->data->dev_link; 2703 } 2704 2705 return 0; 2706 } 2707 2708 const char * 2709 rte_eth_link_speed_to_str(uint32_t link_speed) 2710 { 2711 switch (link_speed) { 2712 case ETH_SPEED_NUM_NONE: return "None"; 2713 case ETH_SPEED_NUM_10M: return "10 Mbps"; 2714 case ETH_SPEED_NUM_100M: return "100 Mbps"; 2715 case ETH_SPEED_NUM_1G: return "1 Gbps"; 2716 case ETH_SPEED_NUM_2_5G: return "2.5 Gbps"; 2717 case ETH_SPEED_NUM_5G: return "5 Gbps"; 2718 case ETH_SPEED_NUM_10G: return "10 Gbps"; 2719 case ETH_SPEED_NUM_20G: return "20 Gbps"; 2720 case ETH_SPEED_NUM_25G: return "25 Gbps"; 2721 case ETH_SPEED_NUM_40G: return "40 Gbps"; 2722 case ETH_SPEED_NUM_50G: return "50 Gbps"; 2723 case ETH_SPEED_NUM_56G: return "56 Gbps"; 2724 case ETH_SPEED_NUM_100G: return "100 Gbps"; 2725 case ETH_SPEED_NUM_200G: return "200 Gbps"; 2726 case ETH_SPEED_NUM_UNKNOWN: return "Unknown"; 2727 default: return "Invalid"; 2728 } 2729 } 2730 2731 int 2732 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2733 { 2734 if (str == NULL) { 2735 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2736 return -EINVAL; 2737 } 2738 2739 if (len == 0) { 2740 RTE_ETHDEV_LOG(ERR, 2741 "Cannot convert link to string with zero size\n"); 2742 return -EINVAL; 2743 } 2744 2745 if (eth_link == NULL) { 2746 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2747 return -EINVAL; 2748 } 2749 2750 if (eth_link->link_status == ETH_LINK_DOWN) 2751 return snprintf(str, len, "Link down"); 2752 else 2753 return snprintf(str, len, "Link up at %s %s %s", 2754 rte_eth_link_speed_to_str(eth_link->link_speed), 2755 (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 2756 "FDX" : "HDX", 2757 (eth_link->link_autoneg == ETH_LINK_AUTONEG) ? 2758 "Autoneg" : "Fixed"); 2759 } 2760 2761 int 2762 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2763 { 2764 struct rte_eth_dev *dev; 2765 2766 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2767 dev = &rte_eth_devices[port_id]; 2768 2769 if (stats == NULL) { 2770 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2771 port_id); 2772 return -EINVAL; 2773 } 2774 2775 memset(stats, 0, sizeof(*stats)); 2776 2777 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 2778 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2779 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2780 } 2781 2782 int 2783 rte_eth_stats_reset(uint16_t port_id) 2784 { 2785 struct rte_eth_dev *dev; 2786 int ret; 2787 2788 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2789 dev = &rte_eth_devices[port_id]; 2790 2791 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 2792 ret = (*dev->dev_ops->stats_reset)(dev); 2793 if (ret != 0) 2794 return eth_err(port_id, ret); 2795 2796 dev->data->rx_mbuf_alloc_failed = 0; 2797 2798 return 0; 2799 } 2800 2801 static inline int 2802 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2803 { 2804 uint16_t nb_rxqs, nb_txqs; 2805 int count; 2806 2807 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2808 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2809 2810 count = RTE_NB_STATS; 2811 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2812 count += nb_rxqs * RTE_NB_RXQ_STATS; 2813 count += nb_txqs * RTE_NB_TXQ_STATS; 2814 } 2815 2816 return count; 2817 } 2818 2819 static int 2820 eth_dev_get_xstats_count(uint16_t port_id) 2821 { 2822 struct rte_eth_dev *dev; 2823 int count; 2824 2825 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2826 dev = &rte_eth_devices[port_id]; 2827 if (dev->dev_ops->xstats_get_names != NULL) { 2828 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2829 if (count < 0) 2830 return eth_err(port_id, count); 2831 } else 2832 count = 0; 2833 2834 2835 count += eth_dev_get_xstats_basic_count(dev); 2836 2837 return count; 2838 } 2839 2840 int 2841 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2842 uint64_t *id) 2843 { 2844 int cnt_xstats, idx_xstat; 2845 2846 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2847 2848 if (xstat_name == NULL) { 2849 RTE_ETHDEV_LOG(ERR, 2850 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 2851 port_id); 2852 return -ENOMEM; 2853 } 2854 2855 if (id == NULL) { 2856 RTE_ETHDEV_LOG(ERR, 2857 "Cannot get ethdev port %u xstats ID to NULL\n", 2858 port_id); 2859 return -ENOMEM; 2860 } 2861 2862 /* Get count */ 2863 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2864 if (cnt_xstats < 0) { 2865 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2866 return -ENODEV; 2867 } 2868 2869 /* Get id-name lookup table */ 2870 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2871 2872 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2873 port_id, xstats_names, cnt_xstats, NULL)) { 2874 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2875 return -1; 2876 } 2877 2878 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2879 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2880 *id = idx_xstat; 2881 return 0; 2882 }; 2883 } 2884 2885 return -EINVAL; 2886 } 2887 2888 /* retrieve basic stats names */ 2889 static int 2890 eth_basic_stats_get_names(struct rte_eth_dev *dev, 2891 struct rte_eth_xstat_name *xstats_names) 2892 { 2893 int cnt_used_entries = 0; 2894 uint32_t idx, id_queue; 2895 uint16_t num_q; 2896 2897 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2898 strlcpy(xstats_names[cnt_used_entries].name, 2899 eth_dev_stats_strings[idx].name, 2900 sizeof(xstats_names[0].name)); 2901 cnt_used_entries++; 2902 } 2903 2904 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2905 return cnt_used_entries; 2906 2907 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2908 for (id_queue = 0; id_queue < num_q; id_queue++) { 2909 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2910 snprintf(xstats_names[cnt_used_entries].name, 2911 sizeof(xstats_names[0].name), 2912 "rx_q%u_%s", 2913 id_queue, eth_dev_rxq_stats_strings[idx].name); 2914 cnt_used_entries++; 2915 } 2916 2917 } 2918 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2919 for (id_queue = 0; id_queue < num_q; id_queue++) { 2920 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 2921 snprintf(xstats_names[cnt_used_entries].name, 2922 sizeof(xstats_names[0].name), 2923 "tx_q%u_%s", 2924 id_queue, eth_dev_txq_stats_strings[idx].name); 2925 cnt_used_entries++; 2926 } 2927 } 2928 return cnt_used_entries; 2929 } 2930 2931 /* retrieve ethdev extended statistics names */ 2932 int 2933 rte_eth_xstats_get_names_by_id(uint16_t port_id, 2934 struct rte_eth_xstat_name *xstats_names, unsigned int size, 2935 uint64_t *ids) 2936 { 2937 struct rte_eth_xstat_name *xstats_names_copy; 2938 unsigned int no_basic_stat_requested = 1; 2939 unsigned int no_ext_stat_requested = 1; 2940 unsigned int expected_entries; 2941 unsigned int basic_count; 2942 struct rte_eth_dev *dev; 2943 unsigned int i; 2944 int ret; 2945 2946 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2947 dev = &rte_eth_devices[port_id]; 2948 2949 basic_count = eth_dev_get_xstats_basic_count(dev); 2950 ret = eth_dev_get_xstats_count(port_id); 2951 if (ret < 0) 2952 return ret; 2953 expected_entries = (unsigned int)ret; 2954 2955 /* Return max number of stats if no ids given */ 2956 if (!ids) { 2957 if (!xstats_names) 2958 return expected_entries; 2959 else if (xstats_names && size < expected_entries) 2960 return expected_entries; 2961 } 2962 2963 if (ids && !xstats_names) 2964 return -EINVAL; 2965 2966 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 2967 uint64_t ids_copy[size]; 2968 2969 for (i = 0; i < size; i++) { 2970 if (ids[i] < basic_count) { 2971 no_basic_stat_requested = 0; 2972 break; 2973 } 2974 2975 /* 2976 * Convert ids to xstats ids that PMD knows. 2977 * ids known by user are basic + extended stats. 2978 */ 2979 ids_copy[i] = ids[i] - basic_count; 2980 } 2981 2982 if (no_basic_stat_requested) 2983 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 2984 ids_copy, xstats_names, size); 2985 } 2986 2987 /* Retrieve all stats */ 2988 if (!ids) { 2989 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 2990 expected_entries); 2991 if (num_stats < 0 || num_stats > (int)expected_entries) 2992 return num_stats; 2993 else 2994 return expected_entries; 2995 } 2996 2997 xstats_names_copy = calloc(expected_entries, 2998 sizeof(struct rte_eth_xstat_name)); 2999 3000 if (!xstats_names_copy) { 3001 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3002 return -ENOMEM; 3003 } 3004 3005 if (ids) { 3006 for (i = 0; i < size; i++) { 3007 if (ids[i] >= basic_count) { 3008 no_ext_stat_requested = 0; 3009 break; 3010 } 3011 } 3012 } 3013 3014 /* Fill xstats_names_copy structure */ 3015 if (ids && no_ext_stat_requested) { 3016 eth_basic_stats_get_names(dev, xstats_names_copy); 3017 } else { 3018 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3019 expected_entries); 3020 if (ret < 0) { 3021 free(xstats_names_copy); 3022 return ret; 3023 } 3024 } 3025 3026 /* Filter stats */ 3027 for (i = 0; i < size; i++) { 3028 if (ids[i] >= expected_entries) { 3029 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3030 free(xstats_names_copy); 3031 return -1; 3032 } 3033 xstats_names[i] = xstats_names_copy[ids[i]]; 3034 } 3035 3036 free(xstats_names_copy); 3037 return size; 3038 } 3039 3040 int 3041 rte_eth_xstats_get_names(uint16_t port_id, 3042 struct rte_eth_xstat_name *xstats_names, 3043 unsigned int size) 3044 { 3045 struct rte_eth_dev *dev; 3046 int cnt_used_entries; 3047 int cnt_expected_entries; 3048 int cnt_driver_entries; 3049 3050 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3051 if (xstats_names == NULL || cnt_expected_entries < 0 || 3052 (int)size < cnt_expected_entries) 3053 return cnt_expected_entries; 3054 3055 /* port_id checked in eth_dev_get_xstats_count() */ 3056 dev = &rte_eth_devices[port_id]; 3057 3058 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3059 3060 if (dev->dev_ops->xstats_get_names != NULL) { 3061 /* If there are any driver-specific xstats, append them 3062 * to end of list. 3063 */ 3064 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3065 dev, 3066 xstats_names + cnt_used_entries, 3067 size - cnt_used_entries); 3068 if (cnt_driver_entries < 0) 3069 return eth_err(port_id, cnt_driver_entries); 3070 cnt_used_entries += cnt_driver_entries; 3071 } 3072 3073 return cnt_used_entries; 3074 } 3075 3076 3077 static int 3078 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3079 { 3080 struct rte_eth_dev *dev; 3081 struct rte_eth_stats eth_stats; 3082 unsigned int count = 0, i, q; 3083 uint64_t val, *stats_ptr; 3084 uint16_t nb_rxqs, nb_txqs; 3085 int ret; 3086 3087 ret = rte_eth_stats_get(port_id, ð_stats); 3088 if (ret < 0) 3089 return ret; 3090 3091 dev = &rte_eth_devices[port_id]; 3092 3093 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3094 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3095 3096 /* global stats */ 3097 for (i = 0; i < RTE_NB_STATS; i++) { 3098 stats_ptr = RTE_PTR_ADD(ð_stats, 3099 eth_dev_stats_strings[i].offset); 3100 val = *stats_ptr; 3101 xstats[count++].value = val; 3102 } 3103 3104 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3105 return count; 3106 3107 /* per-rxq stats */ 3108 for (q = 0; q < nb_rxqs; q++) { 3109 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3110 stats_ptr = RTE_PTR_ADD(ð_stats, 3111 eth_dev_rxq_stats_strings[i].offset + 3112 q * sizeof(uint64_t)); 3113 val = *stats_ptr; 3114 xstats[count++].value = val; 3115 } 3116 } 3117 3118 /* per-txq stats */ 3119 for (q = 0; q < nb_txqs; q++) { 3120 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3121 stats_ptr = RTE_PTR_ADD(ð_stats, 3122 eth_dev_txq_stats_strings[i].offset + 3123 q * sizeof(uint64_t)); 3124 val = *stats_ptr; 3125 xstats[count++].value = val; 3126 } 3127 } 3128 return count; 3129 } 3130 3131 /* retrieve ethdev extended statistics */ 3132 int 3133 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3134 uint64_t *values, unsigned int size) 3135 { 3136 unsigned int no_basic_stat_requested = 1; 3137 unsigned int no_ext_stat_requested = 1; 3138 unsigned int num_xstats_filled; 3139 unsigned int basic_count; 3140 uint16_t expected_entries; 3141 struct rte_eth_dev *dev; 3142 unsigned int i; 3143 int ret; 3144 3145 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3146 dev = &rte_eth_devices[port_id]; 3147 3148 ret = eth_dev_get_xstats_count(port_id); 3149 if (ret < 0) 3150 return ret; 3151 expected_entries = (uint16_t)ret; 3152 struct rte_eth_xstat xstats[expected_entries]; 3153 basic_count = eth_dev_get_xstats_basic_count(dev); 3154 3155 /* Return max number of stats if no ids given */ 3156 if (!ids) { 3157 if (!values) 3158 return expected_entries; 3159 else if (values && size < expected_entries) 3160 return expected_entries; 3161 } 3162 3163 if (ids && !values) 3164 return -EINVAL; 3165 3166 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3167 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3168 uint64_t ids_copy[size]; 3169 3170 for (i = 0; i < size; i++) { 3171 if (ids[i] < basic_count) { 3172 no_basic_stat_requested = 0; 3173 break; 3174 } 3175 3176 /* 3177 * Convert ids to xstats ids that PMD knows. 3178 * ids known by user are basic + extended stats. 3179 */ 3180 ids_copy[i] = ids[i] - basic_count; 3181 } 3182 3183 if (no_basic_stat_requested) 3184 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3185 values, size); 3186 } 3187 3188 if (ids) { 3189 for (i = 0; i < size; i++) { 3190 if (ids[i] >= basic_count) { 3191 no_ext_stat_requested = 0; 3192 break; 3193 } 3194 } 3195 } 3196 3197 /* Fill the xstats structure */ 3198 if (ids && no_ext_stat_requested) 3199 ret = eth_basic_stats_get(port_id, xstats); 3200 else 3201 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3202 3203 if (ret < 0) 3204 return ret; 3205 num_xstats_filled = (unsigned int)ret; 3206 3207 /* Return all stats */ 3208 if (!ids) { 3209 for (i = 0; i < num_xstats_filled; i++) 3210 values[i] = xstats[i].value; 3211 return expected_entries; 3212 } 3213 3214 /* Filter stats */ 3215 for (i = 0; i < size; i++) { 3216 if (ids[i] >= expected_entries) { 3217 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3218 return -1; 3219 } 3220 values[i] = xstats[ids[i]].value; 3221 } 3222 return size; 3223 } 3224 3225 int 3226 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3227 unsigned int n) 3228 { 3229 struct rte_eth_dev *dev; 3230 unsigned int count = 0, i; 3231 signed int xcount = 0; 3232 uint16_t nb_rxqs, nb_txqs; 3233 int ret; 3234 3235 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3236 dev = &rte_eth_devices[port_id]; 3237 3238 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3239 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3240 3241 /* Return generic statistics */ 3242 count = RTE_NB_STATS; 3243 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) 3244 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS); 3245 3246 /* implemented by the driver */ 3247 if (dev->dev_ops->xstats_get != NULL) { 3248 /* Retrieve the xstats from the driver at the end of the 3249 * xstats struct. 3250 */ 3251 xcount = (*dev->dev_ops->xstats_get)(dev, 3252 xstats ? xstats + count : NULL, 3253 (n > count) ? n - count : 0); 3254 3255 if (xcount < 0) 3256 return eth_err(port_id, xcount); 3257 } 3258 3259 if (n < count + xcount || xstats == NULL) 3260 return count + xcount; 3261 3262 /* now fill the xstats structure */ 3263 ret = eth_basic_stats_get(port_id, xstats); 3264 if (ret < 0) 3265 return ret; 3266 count = ret; 3267 3268 for (i = 0; i < count; i++) 3269 xstats[i].id = i; 3270 /* add an offset to driver-specific stats */ 3271 for ( ; i < count + xcount; i++) 3272 xstats[i].id += count; 3273 3274 return count + xcount; 3275 } 3276 3277 /* reset ethdev extended statistics */ 3278 int 3279 rte_eth_xstats_reset(uint16_t port_id) 3280 { 3281 struct rte_eth_dev *dev; 3282 3283 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3284 dev = &rte_eth_devices[port_id]; 3285 3286 /* implemented by the driver */ 3287 if (dev->dev_ops->xstats_reset != NULL) 3288 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3289 3290 /* fallback to default */ 3291 return rte_eth_stats_reset(port_id); 3292 } 3293 3294 static int 3295 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3296 uint8_t stat_idx, uint8_t is_rx) 3297 { 3298 struct rte_eth_dev *dev; 3299 3300 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3301 dev = &rte_eth_devices[port_id]; 3302 3303 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3304 return -EINVAL; 3305 3306 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3307 return -EINVAL; 3308 3309 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3310 return -EINVAL; 3311 3312 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 3313 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3314 } 3315 3316 int 3317 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3318 uint8_t stat_idx) 3319 { 3320 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3321 tx_queue_id, 3322 stat_idx, STAT_QMAP_TX)); 3323 } 3324 3325 int 3326 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3327 uint8_t stat_idx) 3328 { 3329 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3330 rx_queue_id, 3331 stat_idx, STAT_QMAP_RX)); 3332 } 3333 3334 int 3335 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3336 { 3337 struct rte_eth_dev *dev; 3338 3339 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3340 dev = &rte_eth_devices[port_id]; 3341 3342 if (fw_version == NULL && fw_size > 0) { 3343 RTE_ETHDEV_LOG(ERR, 3344 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3345 port_id); 3346 return -EINVAL; 3347 } 3348 3349 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 3350 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3351 fw_version, fw_size)); 3352 } 3353 3354 int 3355 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3356 { 3357 struct rte_eth_dev *dev; 3358 const struct rte_eth_desc_lim lim = { 3359 .nb_max = UINT16_MAX, 3360 .nb_min = 0, 3361 .nb_align = 1, 3362 .nb_seg_max = UINT16_MAX, 3363 .nb_mtu_seg_max = UINT16_MAX, 3364 }; 3365 int diag; 3366 3367 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3368 dev = &rte_eth_devices[port_id]; 3369 3370 if (dev_info == NULL) { 3371 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3372 port_id); 3373 return -EINVAL; 3374 } 3375 3376 /* 3377 * Init dev_info before port_id check since caller does not have 3378 * return status and does not know if get is successful or not. 3379 */ 3380 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3381 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3382 3383 dev_info->rx_desc_lim = lim; 3384 dev_info->tx_desc_lim = lim; 3385 dev_info->device = dev->device; 3386 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 3387 dev_info->max_mtu = UINT16_MAX; 3388 3389 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 3390 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3391 if (diag != 0) { 3392 /* Cleanup already filled in device information */ 3393 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3394 return eth_err(port_id, diag); 3395 } 3396 3397 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3398 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3399 RTE_MAX_QUEUES_PER_PORT); 3400 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3401 RTE_MAX_QUEUES_PER_PORT); 3402 3403 dev_info->driver_name = dev->device->driver->name; 3404 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3405 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3406 3407 dev_info->dev_flags = &dev->data->dev_flags; 3408 3409 return 0; 3410 } 3411 3412 int 3413 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3414 uint32_t *ptypes, int num) 3415 { 3416 int i, j; 3417 struct rte_eth_dev *dev; 3418 const uint32_t *all_ptypes; 3419 3420 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3421 dev = &rte_eth_devices[port_id]; 3422 3423 if (ptypes == NULL && num > 0) { 3424 RTE_ETHDEV_LOG(ERR, 3425 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3426 port_id); 3427 return -EINVAL; 3428 } 3429 3430 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 3431 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3432 3433 if (!all_ptypes) 3434 return 0; 3435 3436 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3437 if (all_ptypes[i] & ptype_mask) { 3438 if (j < num) 3439 ptypes[j] = all_ptypes[i]; 3440 j++; 3441 } 3442 3443 return j; 3444 } 3445 3446 int 3447 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3448 uint32_t *set_ptypes, unsigned int num) 3449 { 3450 const uint32_t valid_ptype_masks[] = { 3451 RTE_PTYPE_L2_MASK, 3452 RTE_PTYPE_L3_MASK, 3453 RTE_PTYPE_L4_MASK, 3454 RTE_PTYPE_TUNNEL_MASK, 3455 RTE_PTYPE_INNER_L2_MASK, 3456 RTE_PTYPE_INNER_L3_MASK, 3457 RTE_PTYPE_INNER_L4_MASK, 3458 }; 3459 const uint32_t *all_ptypes; 3460 struct rte_eth_dev *dev; 3461 uint32_t unused_mask; 3462 unsigned int i, j; 3463 int ret; 3464 3465 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3466 dev = &rte_eth_devices[port_id]; 3467 3468 if (num > 0 && set_ptypes == NULL) { 3469 RTE_ETHDEV_LOG(ERR, 3470 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3471 port_id); 3472 return -EINVAL; 3473 } 3474 3475 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3476 *dev->dev_ops->dev_ptypes_set == NULL) { 3477 ret = 0; 3478 goto ptype_unknown; 3479 } 3480 3481 if (ptype_mask == 0) { 3482 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3483 ptype_mask); 3484 goto ptype_unknown; 3485 } 3486 3487 unused_mask = ptype_mask; 3488 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3489 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3490 if (mask && mask != valid_ptype_masks[i]) { 3491 ret = -EINVAL; 3492 goto ptype_unknown; 3493 } 3494 unused_mask &= ~valid_ptype_masks[i]; 3495 } 3496 3497 if (unused_mask) { 3498 ret = -EINVAL; 3499 goto ptype_unknown; 3500 } 3501 3502 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3503 if (all_ptypes == NULL) { 3504 ret = 0; 3505 goto ptype_unknown; 3506 } 3507 3508 /* 3509 * Accommodate as many set_ptypes as possible. If the supplied 3510 * set_ptypes array is insufficient fill it partially. 3511 */ 3512 for (i = 0, j = 0; set_ptypes != NULL && 3513 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3514 if (ptype_mask & all_ptypes[i]) { 3515 if (j < num - 1) { 3516 set_ptypes[j] = all_ptypes[i]; 3517 j++; 3518 continue; 3519 } 3520 break; 3521 } 3522 } 3523 3524 if (set_ptypes != NULL && j < num) 3525 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3526 3527 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3528 3529 ptype_unknown: 3530 if (num > 0) 3531 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3532 3533 return ret; 3534 } 3535 3536 int 3537 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3538 { 3539 struct rte_eth_dev *dev; 3540 3541 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3542 dev = &rte_eth_devices[port_id]; 3543 3544 if (mac_addr == NULL) { 3545 RTE_ETHDEV_LOG(ERR, 3546 "Cannot get ethdev port %u MAC address to NULL\n", 3547 port_id); 3548 return -EINVAL; 3549 } 3550 3551 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3552 3553 return 0; 3554 } 3555 3556 int 3557 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3558 { 3559 struct rte_eth_dev *dev; 3560 3561 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3562 dev = &rte_eth_devices[port_id]; 3563 3564 if (mtu == NULL) { 3565 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3566 port_id); 3567 return -EINVAL; 3568 } 3569 3570 *mtu = dev->data->mtu; 3571 return 0; 3572 } 3573 3574 int 3575 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3576 { 3577 int ret; 3578 struct rte_eth_dev_info dev_info; 3579 struct rte_eth_dev *dev; 3580 3581 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3582 dev = &rte_eth_devices[port_id]; 3583 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 3584 3585 /* 3586 * Check if the device supports dev_infos_get, if it does not 3587 * skip min_mtu/max_mtu validation here as this requires values 3588 * that are populated within the call to rte_eth_dev_info_get() 3589 * which relies on dev->dev_ops->dev_infos_get. 3590 */ 3591 if (*dev->dev_ops->dev_infos_get != NULL) { 3592 ret = rte_eth_dev_info_get(port_id, &dev_info); 3593 if (ret != 0) 3594 return ret; 3595 3596 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu) 3597 return -EINVAL; 3598 } 3599 3600 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3601 if (!ret) 3602 dev->data->mtu = mtu; 3603 3604 return eth_err(port_id, ret); 3605 } 3606 3607 int 3608 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3609 { 3610 struct rte_eth_dev *dev; 3611 int ret; 3612 3613 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3614 dev = &rte_eth_devices[port_id]; 3615 3616 if (!(dev->data->dev_conf.rxmode.offloads & 3617 DEV_RX_OFFLOAD_VLAN_FILTER)) { 3618 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n", 3619 port_id); 3620 return -ENOSYS; 3621 } 3622 3623 if (vlan_id > 4095) { 3624 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3625 port_id, vlan_id); 3626 return -EINVAL; 3627 } 3628 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 3629 3630 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3631 if (ret == 0) { 3632 struct rte_vlan_filter_conf *vfc; 3633 int vidx; 3634 int vbit; 3635 3636 vfc = &dev->data->vlan_filter_conf; 3637 vidx = vlan_id / 64; 3638 vbit = vlan_id % 64; 3639 3640 if (on) 3641 vfc->ids[vidx] |= UINT64_C(1) << vbit; 3642 else 3643 vfc->ids[vidx] &= ~(UINT64_C(1) << vbit); 3644 } 3645 3646 return eth_err(port_id, ret); 3647 } 3648 3649 int 3650 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3651 int on) 3652 { 3653 struct rte_eth_dev *dev; 3654 3655 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3656 dev = &rte_eth_devices[port_id]; 3657 3658 if (rx_queue_id >= dev->data->nb_rx_queues) { 3659 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3660 return -EINVAL; 3661 } 3662 3663 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 3664 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3665 3666 return 0; 3667 } 3668 3669 int 3670 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3671 enum rte_vlan_type vlan_type, 3672 uint16_t tpid) 3673 { 3674 struct rte_eth_dev *dev; 3675 3676 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3677 dev = &rte_eth_devices[port_id]; 3678 3679 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 3680 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3681 tpid)); 3682 } 3683 3684 int 3685 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3686 { 3687 struct rte_eth_dev_info dev_info; 3688 struct rte_eth_dev *dev; 3689 int ret = 0; 3690 int mask = 0; 3691 int cur, org = 0; 3692 uint64_t orig_offloads; 3693 uint64_t dev_offloads; 3694 uint64_t new_offloads; 3695 3696 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3697 dev = &rte_eth_devices[port_id]; 3698 3699 /* save original values in case of failure */ 3700 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3701 dev_offloads = orig_offloads; 3702 3703 /* check which option changed by application */ 3704 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD); 3705 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP); 3706 if (cur != org) { 3707 if (cur) 3708 dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3709 else 3710 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3711 mask |= ETH_VLAN_STRIP_MASK; 3712 } 3713 3714 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); 3715 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER); 3716 if (cur != org) { 3717 if (cur) 3718 dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3719 else 3720 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3721 mask |= ETH_VLAN_FILTER_MASK; 3722 } 3723 3724 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD); 3725 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND); 3726 if (cur != org) { 3727 if (cur) 3728 dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 3729 else 3730 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 3731 mask |= ETH_VLAN_EXTEND_MASK; 3732 } 3733 3734 cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD); 3735 org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP); 3736 if (cur != org) { 3737 if (cur) 3738 dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 3739 else 3740 dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 3741 mask |= ETH_QINQ_STRIP_MASK; 3742 } 3743 3744 /*no change*/ 3745 if (mask == 0) 3746 return ret; 3747 3748 ret = rte_eth_dev_info_get(port_id, &dev_info); 3749 if (ret != 0) 3750 return ret; 3751 3752 /* Rx VLAN offloading must be within its device capabilities */ 3753 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3754 new_offloads = dev_offloads & ~orig_offloads; 3755 RTE_ETHDEV_LOG(ERR, 3756 "Ethdev port_id=%u requested new added VLAN offloads " 3757 "0x%" PRIx64 " must be within Rx offloads capabilities " 3758 "0x%" PRIx64 " in %s()\n", 3759 port_id, new_offloads, dev_info.rx_offload_capa, 3760 __func__); 3761 return -EINVAL; 3762 } 3763 3764 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 3765 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3766 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3767 if (ret) { 3768 /* hit an error restore original values */ 3769 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3770 } 3771 3772 return eth_err(port_id, ret); 3773 } 3774 3775 int 3776 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3777 { 3778 struct rte_eth_dev *dev; 3779 uint64_t *dev_offloads; 3780 int ret = 0; 3781 3782 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3783 dev = &rte_eth_devices[port_id]; 3784 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3785 3786 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 3787 ret |= ETH_VLAN_STRIP_OFFLOAD; 3788 3789 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 3790 ret |= ETH_VLAN_FILTER_OFFLOAD; 3791 3792 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 3793 ret |= ETH_VLAN_EXTEND_OFFLOAD; 3794 3795 if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP) 3796 ret |= ETH_QINQ_STRIP_OFFLOAD; 3797 3798 return ret; 3799 } 3800 3801 int 3802 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3803 { 3804 struct rte_eth_dev *dev; 3805 3806 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3807 dev = &rte_eth_devices[port_id]; 3808 3809 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 3810 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3811 } 3812 3813 int 3814 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3815 { 3816 struct rte_eth_dev *dev; 3817 3818 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3819 dev = &rte_eth_devices[port_id]; 3820 3821 if (fc_conf == NULL) { 3822 RTE_ETHDEV_LOG(ERR, 3823 "Cannot get ethdev port %u flow control config to NULL\n", 3824 port_id); 3825 return -EINVAL; 3826 } 3827 3828 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 3829 memset(fc_conf, 0, sizeof(*fc_conf)); 3830 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3831 } 3832 3833 int 3834 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3835 { 3836 struct rte_eth_dev *dev; 3837 3838 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3839 dev = &rte_eth_devices[port_id]; 3840 3841 if (fc_conf == NULL) { 3842 RTE_ETHDEV_LOG(ERR, 3843 "Cannot set ethdev port %u flow control from NULL config\n", 3844 port_id); 3845 return -EINVAL; 3846 } 3847 3848 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 3849 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 3850 return -EINVAL; 3851 } 3852 3853 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 3854 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 3855 } 3856 3857 int 3858 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 3859 struct rte_eth_pfc_conf *pfc_conf) 3860 { 3861 struct rte_eth_dev *dev; 3862 3863 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3864 dev = &rte_eth_devices[port_id]; 3865 3866 if (pfc_conf == NULL) { 3867 RTE_ETHDEV_LOG(ERR, 3868 "Cannot set ethdev port %u priority flow control from NULL config\n", 3869 port_id); 3870 return -EINVAL; 3871 } 3872 3873 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) { 3874 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 3875 return -EINVAL; 3876 } 3877 3878 /* High water, low water validation are device specific */ 3879 if (*dev->dev_ops->priority_flow_ctrl_set) 3880 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 3881 (dev, pfc_conf)); 3882 return -ENOTSUP; 3883 } 3884 3885 static int 3886 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 3887 uint16_t reta_size) 3888 { 3889 uint16_t i, num; 3890 3891 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE; 3892 for (i = 0; i < num; i++) { 3893 if (reta_conf[i].mask) 3894 return 0; 3895 } 3896 3897 return -EINVAL; 3898 } 3899 3900 static int 3901 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 3902 uint16_t reta_size, 3903 uint16_t max_rxq) 3904 { 3905 uint16_t i, idx, shift; 3906 3907 if (max_rxq == 0) { 3908 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 3909 return -EINVAL; 3910 } 3911 3912 for (i = 0; i < reta_size; i++) { 3913 idx = i / RTE_RETA_GROUP_SIZE; 3914 shift = i % RTE_RETA_GROUP_SIZE; 3915 if ((reta_conf[idx].mask & (1ULL << shift)) && 3916 (reta_conf[idx].reta[shift] >= max_rxq)) { 3917 RTE_ETHDEV_LOG(ERR, 3918 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 3919 idx, shift, 3920 reta_conf[idx].reta[shift], max_rxq); 3921 return -EINVAL; 3922 } 3923 } 3924 3925 return 0; 3926 } 3927 3928 int 3929 rte_eth_dev_rss_reta_update(uint16_t port_id, 3930 struct rte_eth_rss_reta_entry64 *reta_conf, 3931 uint16_t reta_size) 3932 { 3933 struct rte_eth_dev *dev; 3934 int ret; 3935 3936 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3937 dev = &rte_eth_devices[port_id]; 3938 3939 if (reta_conf == NULL) { 3940 RTE_ETHDEV_LOG(ERR, 3941 "Cannot update ethdev port %u RSS RETA to NULL\n", 3942 port_id); 3943 return -EINVAL; 3944 } 3945 3946 if (reta_size == 0) { 3947 RTE_ETHDEV_LOG(ERR, 3948 "Cannot update ethdev port %u RSS RETA with zero size\n", 3949 port_id); 3950 return -EINVAL; 3951 } 3952 3953 /* Check mask bits */ 3954 ret = eth_check_reta_mask(reta_conf, reta_size); 3955 if (ret < 0) 3956 return ret; 3957 3958 /* Check entry value */ 3959 ret = eth_check_reta_entry(reta_conf, reta_size, 3960 dev->data->nb_rx_queues); 3961 if (ret < 0) 3962 return ret; 3963 3964 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 3965 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 3966 reta_size)); 3967 } 3968 3969 int 3970 rte_eth_dev_rss_reta_query(uint16_t port_id, 3971 struct rte_eth_rss_reta_entry64 *reta_conf, 3972 uint16_t reta_size) 3973 { 3974 struct rte_eth_dev *dev; 3975 int ret; 3976 3977 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3978 dev = &rte_eth_devices[port_id]; 3979 3980 if (reta_conf == NULL) { 3981 RTE_ETHDEV_LOG(ERR, 3982 "Cannot query ethdev port %u RSS RETA from NULL config\n", 3983 port_id); 3984 return -EINVAL; 3985 } 3986 3987 /* Check mask bits */ 3988 ret = eth_check_reta_mask(reta_conf, reta_size); 3989 if (ret < 0) 3990 return ret; 3991 3992 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 3993 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 3994 reta_size)); 3995 } 3996 3997 int 3998 rte_eth_dev_rss_hash_update(uint16_t port_id, 3999 struct rte_eth_rss_conf *rss_conf) 4000 { 4001 struct rte_eth_dev *dev; 4002 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4003 int ret; 4004 4005 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4006 dev = &rte_eth_devices[port_id]; 4007 4008 if (rss_conf == NULL) { 4009 RTE_ETHDEV_LOG(ERR, 4010 "Cannot update ethdev port %u RSS hash from NULL config\n", 4011 port_id); 4012 return -EINVAL; 4013 } 4014 4015 ret = rte_eth_dev_info_get(port_id, &dev_info); 4016 if (ret != 0) 4017 return ret; 4018 4019 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4020 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4021 dev_info.flow_type_rss_offloads) { 4022 RTE_ETHDEV_LOG(ERR, 4023 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4024 port_id, rss_conf->rss_hf, 4025 dev_info.flow_type_rss_offloads); 4026 return -EINVAL; 4027 } 4028 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 4029 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4030 rss_conf)); 4031 } 4032 4033 int 4034 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4035 struct rte_eth_rss_conf *rss_conf) 4036 { 4037 struct rte_eth_dev *dev; 4038 4039 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4040 dev = &rte_eth_devices[port_id]; 4041 4042 if (rss_conf == NULL) { 4043 RTE_ETHDEV_LOG(ERR, 4044 "Cannot get ethdev port %u RSS hash config to NULL\n", 4045 port_id); 4046 return -EINVAL; 4047 } 4048 4049 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 4050 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4051 rss_conf)); 4052 } 4053 4054 int 4055 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4056 struct rte_eth_udp_tunnel *udp_tunnel) 4057 { 4058 struct rte_eth_dev *dev; 4059 4060 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4061 dev = &rte_eth_devices[port_id]; 4062 4063 if (udp_tunnel == NULL) { 4064 RTE_ETHDEV_LOG(ERR, 4065 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4066 port_id); 4067 return -EINVAL; 4068 } 4069 4070 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 4071 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4072 return -EINVAL; 4073 } 4074 4075 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 4076 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4077 udp_tunnel)); 4078 } 4079 4080 int 4081 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4082 struct rte_eth_udp_tunnel *udp_tunnel) 4083 { 4084 struct rte_eth_dev *dev; 4085 4086 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4087 dev = &rte_eth_devices[port_id]; 4088 4089 if (udp_tunnel == NULL) { 4090 RTE_ETHDEV_LOG(ERR, 4091 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4092 port_id); 4093 return -EINVAL; 4094 } 4095 4096 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 4097 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4098 return -EINVAL; 4099 } 4100 4101 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 4102 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4103 udp_tunnel)); 4104 } 4105 4106 int 4107 rte_eth_led_on(uint16_t port_id) 4108 { 4109 struct rte_eth_dev *dev; 4110 4111 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4112 dev = &rte_eth_devices[port_id]; 4113 4114 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 4115 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4116 } 4117 4118 int 4119 rte_eth_led_off(uint16_t port_id) 4120 { 4121 struct rte_eth_dev *dev; 4122 4123 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4124 dev = &rte_eth_devices[port_id]; 4125 4126 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 4127 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4128 } 4129 4130 int 4131 rte_eth_fec_get_capability(uint16_t port_id, 4132 struct rte_eth_fec_capa *speed_fec_capa, 4133 unsigned int num) 4134 { 4135 struct rte_eth_dev *dev; 4136 int ret; 4137 4138 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4139 dev = &rte_eth_devices[port_id]; 4140 4141 if (speed_fec_capa == NULL && num > 0) { 4142 RTE_ETHDEV_LOG(ERR, 4143 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4144 port_id); 4145 return -EINVAL; 4146 } 4147 4148 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP); 4149 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4150 4151 return ret; 4152 } 4153 4154 int 4155 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4156 { 4157 struct rte_eth_dev *dev; 4158 4159 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4160 dev = &rte_eth_devices[port_id]; 4161 4162 if (fec_capa == NULL) { 4163 RTE_ETHDEV_LOG(ERR, 4164 "Cannot get ethdev port %u current FEC mode to NULL\n", 4165 port_id); 4166 return -EINVAL; 4167 } 4168 4169 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP); 4170 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4171 } 4172 4173 int 4174 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4175 { 4176 struct rte_eth_dev *dev; 4177 4178 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4179 dev = &rte_eth_devices[port_id]; 4180 4181 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); 4182 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4183 } 4184 4185 /* 4186 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4187 * an empty spot. 4188 */ 4189 static int 4190 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4191 { 4192 struct rte_eth_dev_info dev_info; 4193 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4194 unsigned i; 4195 int ret; 4196 4197 ret = rte_eth_dev_info_get(port_id, &dev_info); 4198 if (ret != 0) 4199 return -1; 4200 4201 for (i = 0; i < dev_info.max_mac_addrs; i++) 4202 if (memcmp(addr, &dev->data->mac_addrs[i], 4203 RTE_ETHER_ADDR_LEN) == 0) 4204 return i; 4205 4206 return -1; 4207 } 4208 4209 static const struct rte_ether_addr null_mac_addr; 4210 4211 int 4212 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4213 uint32_t pool) 4214 { 4215 struct rte_eth_dev *dev; 4216 int index; 4217 uint64_t pool_mask; 4218 int ret; 4219 4220 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4221 dev = &rte_eth_devices[port_id]; 4222 4223 if (addr == NULL) { 4224 RTE_ETHDEV_LOG(ERR, 4225 "Cannot add ethdev port %u MAC address from NULL address\n", 4226 port_id); 4227 return -EINVAL; 4228 } 4229 4230 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 4231 4232 if (rte_is_zero_ether_addr(addr)) { 4233 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4234 port_id); 4235 return -EINVAL; 4236 } 4237 if (pool >= ETH_64_POOLS) { 4238 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1); 4239 return -EINVAL; 4240 } 4241 4242 index = eth_dev_get_mac_addr_index(port_id, addr); 4243 if (index < 0) { 4244 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4245 if (index < 0) { 4246 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4247 port_id); 4248 return -ENOSPC; 4249 } 4250 } else { 4251 pool_mask = dev->data->mac_pool_sel[index]; 4252 4253 /* Check if both MAC address and pool is already there, and do nothing */ 4254 if (pool_mask & (1ULL << pool)) 4255 return 0; 4256 } 4257 4258 /* Update NIC */ 4259 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4260 4261 if (ret == 0) { 4262 /* Update address in NIC data structure */ 4263 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4264 4265 /* Update pool bitmap in NIC data structure */ 4266 dev->data->mac_pool_sel[index] |= (1ULL << pool); 4267 } 4268 4269 return eth_err(port_id, ret); 4270 } 4271 4272 int 4273 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4274 { 4275 struct rte_eth_dev *dev; 4276 int index; 4277 4278 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4279 dev = &rte_eth_devices[port_id]; 4280 4281 if (addr == NULL) { 4282 RTE_ETHDEV_LOG(ERR, 4283 "Cannot remove ethdev port %u MAC address from NULL address\n", 4284 port_id); 4285 return -EINVAL; 4286 } 4287 4288 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 4289 4290 index = eth_dev_get_mac_addr_index(port_id, addr); 4291 if (index == 0) { 4292 RTE_ETHDEV_LOG(ERR, 4293 "Port %u: Cannot remove default MAC address\n", 4294 port_id); 4295 return -EADDRINUSE; 4296 } else if (index < 0) 4297 return 0; /* Do nothing if address wasn't found */ 4298 4299 /* Update NIC */ 4300 (*dev->dev_ops->mac_addr_remove)(dev, index); 4301 4302 /* Update address in NIC data structure */ 4303 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4304 4305 /* reset pool bitmap */ 4306 dev->data->mac_pool_sel[index] = 0; 4307 4308 return 0; 4309 } 4310 4311 int 4312 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4313 { 4314 struct rte_eth_dev *dev; 4315 int ret; 4316 4317 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4318 dev = &rte_eth_devices[port_id]; 4319 4320 if (addr == NULL) { 4321 RTE_ETHDEV_LOG(ERR, 4322 "Cannot set ethdev port %u default MAC address from NULL address\n", 4323 port_id); 4324 return -EINVAL; 4325 } 4326 4327 if (!rte_is_valid_assigned_ether_addr(addr)) 4328 return -EINVAL; 4329 4330 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 4331 4332 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4333 if (ret < 0) 4334 return ret; 4335 4336 /* Update default address in NIC data structure */ 4337 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4338 4339 return 0; 4340 } 4341 4342 4343 /* 4344 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4345 * an empty spot. 4346 */ 4347 static int 4348 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4349 const struct rte_ether_addr *addr) 4350 { 4351 struct rte_eth_dev_info dev_info; 4352 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4353 unsigned i; 4354 int ret; 4355 4356 ret = rte_eth_dev_info_get(port_id, &dev_info); 4357 if (ret != 0) 4358 return -1; 4359 4360 if (!dev->data->hash_mac_addrs) 4361 return -1; 4362 4363 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4364 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4365 RTE_ETHER_ADDR_LEN) == 0) 4366 return i; 4367 4368 return -1; 4369 } 4370 4371 int 4372 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4373 uint8_t on) 4374 { 4375 int index; 4376 int ret; 4377 struct rte_eth_dev *dev; 4378 4379 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4380 dev = &rte_eth_devices[port_id]; 4381 4382 if (addr == NULL) { 4383 RTE_ETHDEV_LOG(ERR, 4384 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4385 port_id); 4386 return -EINVAL; 4387 } 4388 4389 if (rte_is_zero_ether_addr(addr)) { 4390 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4391 port_id); 4392 return -EINVAL; 4393 } 4394 4395 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4396 /* Check if it's already there, and do nothing */ 4397 if ((index >= 0) && on) 4398 return 0; 4399 4400 if (index < 0) { 4401 if (!on) { 4402 RTE_ETHDEV_LOG(ERR, 4403 "Port %u: the MAC address was not set in UTA\n", 4404 port_id); 4405 return -EINVAL; 4406 } 4407 4408 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4409 if (index < 0) { 4410 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4411 port_id); 4412 return -ENOSPC; 4413 } 4414 } 4415 4416 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 4417 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 4418 if (ret == 0) { 4419 /* Update address in NIC data structure */ 4420 if (on) 4421 rte_ether_addr_copy(addr, 4422 &dev->data->hash_mac_addrs[index]); 4423 else 4424 rte_ether_addr_copy(&null_mac_addr, 4425 &dev->data->hash_mac_addrs[index]); 4426 } 4427 4428 return eth_err(port_id, ret); 4429 } 4430 4431 int 4432 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 4433 { 4434 struct rte_eth_dev *dev; 4435 4436 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4437 dev = &rte_eth_devices[port_id]; 4438 4439 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 4440 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 4441 on)); 4442 } 4443 4444 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 4445 uint16_t tx_rate) 4446 { 4447 struct rte_eth_dev *dev; 4448 struct rte_eth_dev_info dev_info; 4449 struct rte_eth_link link; 4450 int ret; 4451 4452 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4453 dev = &rte_eth_devices[port_id]; 4454 4455 ret = rte_eth_dev_info_get(port_id, &dev_info); 4456 if (ret != 0) 4457 return ret; 4458 4459 link = dev->data->dev_link; 4460 4461 if (queue_idx > dev_info.max_tx_queues) { 4462 RTE_ETHDEV_LOG(ERR, 4463 "Set queue rate limit:port %u: invalid queue id=%u\n", 4464 port_id, queue_idx); 4465 return -EINVAL; 4466 } 4467 4468 if (tx_rate > link.link_speed) { 4469 RTE_ETHDEV_LOG(ERR, 4470 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 4471 tx_rate, link.link_speed); 4472 return -EINVAL; 4473 } 4474 4475 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 4476 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 4477 queue_idx, tx_rate)); 4478 } 4479 4480 RTE_INIT(eth_dev_init_cb_lists) 4481 { 4482 uint16_t i; 4483 4484 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 4485 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 4486 } 4487 4488 int 4489 rte_eth_dev_callback_register(uint16_t port_id, 4490 enum rte_eth_event_type event, 4491 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4492 { 4493 struct rte_eth_dev *dev; 4494 struct rte_eth_dev_callback *user_cb; 4495 uint16_t next_port; 4496 uint16_t last_port; 4497 4498 if (cb_fn == NULL) { 4499 RTE_ETHDEV_LOG(ERR, 4500 "Cannot register ethdev port %u callback from NULL\n", 4501 port_id); 4502 return -EINVAL; 4503 } 4504 4505 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4506 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4507 return -EINVAL; 4508 } 4509 4510 if (port_id == RTE_ETH_ALL) { 4511 next_port = 0; 4512 last_port = RTE_MAX_ETHPORTS - 1; 4513 } else { 4514 next_port = last_port = port_id; 4515 } 4516 4517 rte_spinlock_lock(ð_dev_cb_lock); 4518 4519 do { 4520 dev = &rte_eth_devices[next_port]; 4521 4522 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 4523 if (user_cb->cb_fn == cb_fn && 4524 user_cb->cb_arg == cb_arg && 4525 user_cb->event == event) { 4526 break; 4527 } 4528 } 4529 4530 /* create a new callback. */ 4531 if (user_cb == NULL) { 4532 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4533 sizeof(struct rte_eth_dev_callback), 0); 4534 if (user_cb != NULL) { 4535 user_cb->cb_fn = cb_fn; 4536 user_cb->cb_arg = cb_arg; 4537 user_cb->event = event; 4538 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4539 user_cb, next); 4540 } else { 4541 rte_spinlock_unlock(ð_dev_cb_lock); 4542 rte_eth_dev_callback_unregister(port_id, event, 4543 cb_fn, cb_arg); 4544 return -ENOMEM; 4545 } 4546 4547 } 4548 } while (++next_port <= last_port); 4549 4550 rte_spinlock_unlock(ð_dev_cb_lock); 4551 return 0; 4552 } 4553 4554 int 4555 rte_eth_dev_callback_unregister(uint16_t port_id, 4556 enum rte_eth_event_type event, 4557 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4558 { 4559 int ret; 4560 struct rte_eth_dev *dev; 4561 struct rte_eth_dev_callback *cb, *next; 4562 uint16_t next_port; 4563 uint16_t last_port; 4564 4565 if (cb_fn == NULL) { 4566 RTE_ETHDEV_LOG(ERR, 4567 "Cannot unregister ethdev port %u callback from NULL\n", 4568 port_id); 4569 return -EINVAL; 4570 } 4571 4572 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4573 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4574 return -EINVAL; 4575 } 4576 4577 if (port_id == RTE_ETH_ALL) { 4578 next_port = 0; 4579 last_port = RTE_MAX_ETHPORTS - 1; 4580 } else { 4581 next_port = last_port = port_id; 4582 } 4583 4584 rte_spinlock_lock(ð_dev_cb_lock); 4585 4586 do { 4587 dev = &rte_eth_devices[next_port]; 4588 ret = 0; 4589 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4590 cb = next) { 4591 4592 next = TAILQ_NEXT(cb, next); 4593 4594 if (cb->cb_fn != cb_fn || cb->event != event || 4595 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4596 continue; 4597 4598 /* 4599 * if this callback is not executing right now, 4600 * then remove it. 4601 */ 4602 if (cb->active == 0) { 4603 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4604 rte_free(cb); 4605 } else { 4606 ret = -EAGAIN; 4607 } 4608 } 4609 } while (++next_port <= last_port); 4610 4611 rte_spinlock_unlock(ð_dev_cb_lock); 4612 return ret; 4613 } 4614 4615 int 4616 rte_eth_dev_callback_process(struct rte_eth_dev *dev, 4617 enum rte_eth_event_type event, void *ret_param) 4618 { 4619 struct rte_eth_dev_callback *cb_lst; 4620 struct rte_eth_dev_callback dev_cb; 4621 int rc = 0; 4622 4623 rte_spinlock_lock(ð_dev_cb_lock); 4624 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 4625 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 4626 continue; 4627 dev_cb = *cb_lst; 4628 cb_lst->active = 1; 4629 if (ret_param != NULL) 4630 dev_cb.ret_param = ret_param; 4631 4632 rte_spinlock_unlock(ð_dev_cb_lock); 4633 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, 4634 dev_cb.cb_arg, dev_cb.ret_param); 4635 rte_spinlock_lock(ð_dev_cb_lock); 4636 cb_lst->active = 0; 4637 } 4638 rte_spinlock_unlock(ð_dev_cb_lock); 4639 return rc; 4640 } 4641 4642 void 4643 rte_eth_dev_probing_finish(struct rte_eth_dev *dev) 4644 { 4645 if (dev == NULL) 4646 return; 4647 4648 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); 4649 4650 dev->state = RTE_ETH_DEV_ATTACHED; 4651 } 4652 4653 int 4654 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4655 { 4656 uint32_t vec; 4657 struct rte_eth_dev *dev; 4658 struct rte_intr_handle *intr_handle; 4659 uint16_t qid; 4660 int rc; 4661 4662 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4663 dev = &rte_eth_devices[port_id]; 4664 4665 if (!dev->intr_handle) { 4666 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4667 return -ENOTSUP; 4668 } 4669 4670 intr_handle = dev->intr_handle; 4671 if (!intr_handle->intr_vec) { 4672 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4673 return -EPERM; 4674 } 4675 4676 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4677 vec = intr_handle->intr_vec[qid]; 4678 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4679 if (rc && rc != -EEXIST) { 4680 RTE_ETHDEV_LOG(ERR, 4681 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 4682 port_id, qid, op, epfd, vec); 4683 } 4684 } 4685 4686 return 0; 4687 } 4688 4689 int 4690 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 4691 { 4692 struct rte_intr_handle *intr_handle; 4693 struct rte_eth_dev *dev; 4694 unsigned int efd_idx; 4695 uint32_t vec; 4696 int fd; 4697 4698 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 4699 dev = &rte_eth_devices[port_id]; 4700 4701 if (queue_id >= dev->data->nb_rx_queues) { 4702 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 4703 return -1; 4704 } 4705 4706 if (!dev->intr_handle) { 4707 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4708 return -1; 4709 } 4710 4711 intr_handle = dev->intr_handle; 4712 if (!intr_handle->intr_vec) { 4713 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4714 return -1; 4715 } 4716 4717 vec = intr_handle->intr_vec[queue_id]; 4718 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 4719 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 4720 fd = intr_handle->efds[efd_idx]; 4721 4722 return fd; 4723 } 4724 4725 static inline int 4726 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id, 4727 const char *ring_name) 4728 { 4729 return snprintf(name, len, "eth_p%d_q%d_%s", 4730 port_id, queue_id, ring_name); 4731 } 4732 4733 const struct rte_memzone * 4734 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, 4735 uint16_t queue_id, size_t size, unsigned align, 4736 int socket_id) 4737 { 4738 char z_name[RTE_MEMZONE_NAMESIZE]; 4739 const struct rte_memzone *mz; 4740 int rc; 4741 4742 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4743 queue_id, ring_name); 4744 if (rc >= RTE_MEMZONE_NAMESIZE) { 4745 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4746 rte_errno = ENAMETOOLONG; 4747 return NULL; 4748 } 4749 4750 mz = rte_memzone_lookup(z_name); 4751 if (mz) { 4752 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) || 4753 size > mz->len || 4754 ((uintptr_t)mz->addr & (align - 1)) != 0) { 4755 RTE_ETHDEV_LOG(ERR, 4756 "memzone %s does not justify the requested attributes\n", 4757 mz->name); 4758 return NULL; 4759 } 4760 4761 return mz; 4762 } 4763 4764 return rte_memzone_reserve_aligned(z_name, size, socket_id, 4765 RTE_MEMZONE_IOVA_CONTIG, align); 4766 } 4767 4768 int 4769 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name, 4770 uint16_t queue_id) 4771 { 4772 char z_name[RTE_MEMZONE_NAMESIZE]; 4773 const struct rte_memzone *mz; 4774 int rc = 0; 4775 4776 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4777 queue_id, ring_name); 4778 if (rc >= RTE_MEMZONE_NAMESIZE) { 4779 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4780 return -ENAMETOOLONG; 4781 } 4782 4783 mz = rte_memzone_lookup(z_name); 4784 if (mz) 4785 rc = rte_memzone_free(mz); 4786 else 4787 rc = -ENOENT; 4788 4789 return rc; 4790 } 4791 4792 int 4793 rte_eth_dev_create(struct rte_device *device, const char *name, 4794 size_t priv_data_size, 4795 ethdev_bus_specific_init ethdev_bus_specific_init, 4796 void *bus_init_params, 4797 ethdev_init_t ethdev_init, void *init_params) 4798 { 4799 struct rte_eth_dev *ethdev; 4800 int retval; 4801 4802 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL); 4803 4804 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4805 ethdev = rte_eth_dev_allocate(name); 4806 if (!ethdev) 4807 return -ENODEV; 4808 4809 if (priv_data_size) { 4810 ethdev->data->dev_private = rte_zmalloc_socket( 4811 name, priv_data_size, RTE_CACHE_LINE_SIZE, 4812 device->numa_node); 4813 4814 if (!ethdev->data->dev_private) { 4815 RTE_ETHDEV_LOG(ERR, 4816 "failed to allocate private data\n"); 4817 retval = -ENOMEM; 4818 goto probe_failed; 4819 } 4820 } 4821 } else { 4822 ethdev = rte_eth_dev_attach_secondary(name); 4823 if (!ethdev) { 4824 RTE_ETHDEV_LOG(ERR, 4825 "secondary process attach failed, ethdev doesn't exist\n"); 4826 return -ENODEV; 4827 } 4828 } 4829 4830 ethdev->device = device; 4831 4832 if (ethdev_bus_specific_init) { 4833 retval = ethdev_bus_specific_init(ethdev, bus_init_params); 4834 if (retval) { 4835 RTE_ETHDEV_LOG(ERR, 4836 "ethdev bus specific initialisation failed\n"); 4837 goto probe_failed; 4838 } 4839 } 4840 4841 retval = ethdev_init(ethdev, init_params); 4842 if (retval) { 4843 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n"); 4844 goto probe_failed; 4845 } 4846 4847 rte_eth_dev_probing_finish(ethdev); 4848 4849 return retval; 4850 4851 probe_failed: 4852 rte_eth_dev_release_port(ethdev); 4853 return retval; 4854 } 4855 4856 int 4857 rte_eth_dev_destroy(struct rte_eth_dev *ethdev, 4858 ethdev_uninit_t ethdev_uninit) 4859 { 4860 int ret; 4861 4862 ethdev = rte_eth_dev_allocated(ethdev->data->name); 4863 if (!ethdev) 4864 return -ENODEV; 4865 4866 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL); 4867 4868 ret = ethdev_uninit(ethdev); 4869 if (ret) 4870 return ret; 4871 4872 return rte_eth_dev_release_port(ethdev); 4873 } 4874 4875 int 4876 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 4877 int epfd, int op, void *data) 4878 { 4879 uint32_t vec; 4880 struct rte_eth_dev *dev; 4881 struct rte_intr_handle *intr_handle; 4882 int rc; 4883 4884 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4885 dev = &rte_eth_devices[port_id]; 4886 4887 if (queue_id >= dev->data->nb_rx_queues) { 4888 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 4889 return -EINVAL; 4890 } 4891 4892 if (!dev->intr_handle) { 4893 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4894 return -ENOTSUP; 4895 } 4896 4897 intr_handle = dev->intr_handle; 4898 if (!intr_handle->intr_vec) { 4899 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4900 return -EPERM; 4901 } 4902 4903 vec = intr_handle->intr_vec[queue_id]; 4904 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4905 if (rc && rc != -EEXIST) { 4906 RTE_ETHDEV_LOG(ERR, 4907 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 4908 port_id, queue_id, op, epfd, vec); 4909 return rc; 4910 } 4911 4912 return 0; 4913 } 4914 4915 int 4916 rte_eth_dev_rx_intr_enable(uint16_t port_id, 4917 uint16_t queue_id) 4918 { 4919 struct rte_eth_dev *dev; 4920 int ret; 4921 4922 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4923 dev = &rte_eth_devices[port_id]; 4924 4925 ret = eth_dev_validate_rx_queue(dev, queue_id); 4926 if (ret != 0) 4927 return ret; 4928 4929 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 4930 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 4931 } 4932 4933 int 4934 rte_eth_dev_rx_intr_disable(uint16_t port_id, 4935 uint16_t queue_id) 4936 { 4937 struct rte_eth_dev *dev; 4938 int ret; 4939 4940 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4941 dev = &rte_eth_devices[port_id]; 4942 4943 ret = eth_dev_validate_rx_queue(dev, queue_id); 4944 if (ret != 0) 4945 return ret; 4946 4947 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 4948 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 4949 } 4950 4951 4952 const struct rte_eth_rxtx_callback * 4953 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 4954 rte_rx_callback_fn fn, void *user_param) 4955 { 4956 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4957 rte_errno = ENOTSUP; 4958 return NULL; 4959 #endif 4960 struct rte_eth_dev *dev; 4961 4962 /* check input parameters */ 4963 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 4964 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 4965 rte_errno = EINVAL; 4966 return NULL; 4967 } 4968 dev = &rte_eth_devices[port_id]; 4969 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 4970 rte_errno = EINVAL; 4971 return NULL; 4972 } 4973 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 4974 4975 if (cb == NULL) { 4976 rte_errno = ENOMEM; 4977 return NULL; 4978 } 4979 4980 cb->fn.rx = fn; 4981 cb->param = user_param; 4982 4983 rte_spinlock_lock(ð_dev_rx_cb_lock); 4984 /* Add the callbacks in fifo order. */ 4985 struct rte_eth_rxtx_callback *tail = 4986 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 4987 4988 if (!tail) { 4989 /* Stores to cb->fn and cb->param should complete before 4990 * cb is visible to data plane. 4991 */ 4992 __atomic_store_n( 4993 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 4994 cb, __ATOMIC_RELEASE); 4995 4996 } else { 4997 while (tail->next) 4998 tail = tail->next; 4999 /* Stores to cb->fn and cb->param should complete before 5000 * cb is visible to data plane. 5001 */ 5002 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5003 } 5004 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5005 5006 return cb; 5007 } 5008 5009 const struct rte_eth_rxtx_callback * 5010 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5011 rte_rx_callback_fn fn, void *user_param) 5012 { 5013 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5014 rte_errno = ENOTSUP; 5015 return NULL; 5016 #endif 5017 /* check input parameters */ 5018 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5019 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5020 rte_errno = EINVAL; 5021 return NULL; 5022 } 5023 5024 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5025 5026 if (cb == NULL) { 5027 rte_errno = ENOMEM; 5028 return NULL; 5029 } 5030 5031 cb->fn.rx = fn; 5032 cb->param = user_param; 5033 5034 rte_spinlock_lock(ð_dev_rx_cb_lock); 5035 /* Add the callbacks at first position */ 5036 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5037 /* Stores to cb->fn, cb->param and cb->next should complete before 5038 * cb is visible to data plane threads. 5039 */ 5040 __atomic_store_n( 5041 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5042 cb, __ATOMIC_RELEASE); 5043 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5044 5045 return cb; 5046 } 5047 5048 const struct rte_eth_rxtx_callback * 5049 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5050 rte_tx_callback_fn fn, void *user_param) 5051 { 5052 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5053 rte_errno = ENOTSUP; 5054 return NULL; 5055 #endif 5056 struct rte_eth_dev *dev; 5057 5058 /* check input parameters */ 5059 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5060 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5061 rte_errno = EINVAL; 5062 return NULL; 5063 } 5064 5065 dev = &rte_eth_devices[port_id]; 5066 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5067 rte_errno = EINVAL; 5068 return NULL; 5069 } 5070 5071 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5072 5073 if (cb == NULL) { 5074 rte_errno = ENOMEM; 5075 return NULL; 5076 } 5077 5078 cb->fn.tx = fn; 5079 cb->param = user_param; 5080 5081 rte_spinlock_lock(ð_dev_tx_cb_lock); 5082 /* Add the callbacks in fifo order. */ 5083 struct rte_eth_rxtx_callback *tail = 5084 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5085 5086 if (!tail) { 5087 /* Stores to cb->fn and cb->param should complete before 5088 * cb is visible to data plane. 5089 */ 5090 __atomic_store_n( 5091 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5092 cb, __ATOMIC_RELEASE); 5093 5094 } else { 5095 while (tail->next) 5096 tail = tail->next; 5097 /* Stores to cb->fn and cb->param should complete before 5098 * cb is visible to data plane. 5099 */ 5100 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5101 } 5102 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5103 5104 return cb; 5105 } 5106 5107 int 5108 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5109 const struct rte_eth_rxtx_callback *user_cb) 5110 { 5111 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5112 return -ENOTSUP; 5113 #endif 5114 /* Check input parameters. */ 5115 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5116 if (user_cb == NULL || 5117 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5118 return -EINVAL; 5119 5120 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5121 struct rte_eth_rxtx_callback *cb; 5122 struct rte_eth_rxtx_callback **prev_cb; 5123 int ret = -EINVAL; 5124 5125 rte_spinlock_lock(ð_dev_rx_cb_lock); 5126 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5127 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5128 cb = *prev_cb; 5129 if (cb == user_cb) { 5130 /* Remove the user cb from the callback list. */ 5131 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5132 ret = 0; 5133 break; 5134 } 5135 } 5136 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5137 5138 return ret; 5139 } 5140 5141 int 5142 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5143 const struct rte_eth_rxtx_callback *user_cb) 5144 { 5145 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5146 return -ENOTSUP; 5147 #endif 5148 /* Check input parameters. */ 5149 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5150 if (user_cb == NULL || 5151 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5152 return -EINVAL; 5153 5154 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5155 int ret = -EINVAL; 5156 struct rte_eth_rxtx_callback *cb; 5157 struct rte_eth_rxtx_callback **prev_cb; 5158 5159 rte_spinlock_lock(ð_dev_tx_cb_lock); 5160 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5161 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5162 cb = *prev_cb; 5163 if (cb == user_cb) { 5164 /* Remove the user cb from the callback list. */ 5165 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5166 ret = 0; 5167 break; 5168 } 5169 } 5170 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5171 5172 return ret; 5173 } 5174 5175 int 5176 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5177 struct rte_eth_rxq_info *qinfo) 5178 { 5179 struct rte_eth_dev *dev; 5180 5181 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5182 dev = &rte_eth_devices[port_id]; 5183 5184 if (queue_id >= dev->data->nb_rx_queues) { 5185 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 5186 return -EINVAL; 5187 } 5188 5189 if (qinfo == NULL) { 5190 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5191 port_id, queue_id); 5192 return -EINVAL; 5193 } 5194 5195 if (dev->data->rx_queues == NULL || 5196 dev->data->rx_queues[queue_id] == NULL) { 5197 RTE_ETHDEV_LOG(ERR, 5198 "Rx queue %"PRIu16" of device with port_id=%" 5199 PRIu16" has not been setup\n", 5200 queue_id, port_id); 5201 return -EINVAL; 5202 } 5203 5204 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5205 RTE_ETHDEV_LOG(INFO, 5206 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5207 queue_id, port_id); 5208 return -EINVAL; 5209 } 5210 5211 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 5212 5213 memset(qinfo, 0, sizeof(*qinfo)); 5214 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5215 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5216 5217 return 0; 5218 } 5219 5220 int 5221 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5222 struct rte_eth_txq_info *qinfo) 5223 { 5224 struct rte_eth_dev *dev; 5225 5226 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5227 dev = &rte_eth_devices[port_id]; 5228 5229 if (queue_id >= dev->data->nb_tx_queues) { 5230 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); 5231 return -EINVAL; 5232 } 5233 5234 if (qinfo == NULL) { 5235 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5236 port_id, queue_id); 5237 return -EINVAL; 5238 } 5239 5240 if (dev->data->tx_queues == NULL || 5241 dev->data->tx_queues[queue_id] == NULL) { 5242 RTE_ETHDEV_LOG(ERR, 5243 "Tx queue %"PRIu16" of device with port_id=%" 5244 PRIu16" has not been setup\n", 5245 queue_id, port_id); 5246 return -EINVAL; 5247 } 5248 5249 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5250 RTE_ETHDEV_LOG(INFO, 5251 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5252 queue_id, port_id); 5253 return -EINVAL; 5254 } 5255 5256 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 5257 5258 memset(qinfo, 0, sizeof(*qinfo)); 5259 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5260 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5261 5262 return 0; 5263 } 5264 5265 int 5266 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5267 struct rte_eth_burst_mode *mode) 5268 { 5269 struct rte_eth_dev *dev; 5270 5271 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5272 dev = &rte_eth_devices[port_id]; 5273 5274 if (queue_id >= dev->data->nb_rx_queues) { 5275 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 5276 return -EINVAL; 5277 } 5278 5279 if (mode == NULL) { 5280 RTE_ETHDEV_LOG(ERR, 5281 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5282 port_id, queue_id); 5283 return -EINVAL; 5284 } 5285 5286 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); 5287 memset(mode, 0, sizeof(*mode)); 5288 return eth_err(port_id, 5289 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5290 } 5291 5292 int 5293 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5294 struct rte_eth_burst_mode *mode) 5295 { 5296 struct rte_eth_dev *dev; 5297 5298 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5299 dev = &rte_eth_devices[port_id]; 5300 5301 if (queue_id >= dev->data->nb_tx_queues) { 5302 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); 5303 return -EINVAL; 5304 } 5305 5306 if (mode == NULL) { 5307 RTE_ETHDEV_LOG(ERR, 5308 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5309 port_id, queue_id); 5310 return -EINVAL; 5311 } 5312 5313 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); 5314 memset(mode, 0, sizeof(*mode)); 5315 return eth_err(port_id, 5316 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5317 } 5318 5319 int 5320 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5321 struct rte_power_monitor_cond *pmc) 5322 { 5323 struct rte_eth_dev *dev; 5324 5325 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5326 dev = &rte_eth_devices[port_id]; 5327 5328 if (queue_id >= dev->data->nb_rx_queues) { 5329 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5330 return -EINVAL; 5331 } 5332 5333 if (pmc == NULL) { 5334 RTE_ETHDEV_LOG(ERR, 5335 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5336 port_id, queue_id); 5337 return -EINVAL; 5338 } 5339 5340 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP); 5341 return eth_err(port_id, 5342 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5343 } 5344 5345 int 5346 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5347 struct rte_ether_addr *mc_addr_set, 5348 uint32_t nb_mc_addr) 5349 { 5350 struct rte_eth_dev *dev; 5351 5352 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5353 dev = &rte_eth_devices[port_id]; 5354 5355 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 5356 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5357 mc_addr_set, nb_mc_addr)); 5358 } 5359 5360 int 5361 rte_eth_timesync_enable(uint16_t port_id) 5362 { 5363 struct rte_eth_dev *dev; 5364 5365 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5366 dev = &rte_eth_devices[port_id]; 5367 5368 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 5369 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5370 } 5371 5372 int 5373 rte_eth_timesync_disable(uint16_t port_id) 5374 { 5375 struct rte_eth_dev *dev; 5376 5377 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5378 dev = &rte_eth_devices[port_id]; 5379 5380 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 5381 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5382 } 5383 5384 int 5385 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5386 uint32_t flags) 5387 { 5388 struct rte_eth_dev *dev; 5389 5390 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5391 dev = &rte_eth_devices[port_id]; 5392 5393 if (timestamp == NULL) { 5394 RTE_ETHDEV_LOG(ERR, 5395 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5396 port_id); 5397 return -EINVAL; 5398 } 5399 5400 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 5401 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5402 (dev, timestamp, flags)); 5403 } 5404 5405 int 5406 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5407 struct timespec *timestamp) 5408 { 5409 struct rte_eth_dev *dev; 5410 5411 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5412 dev = &rte_eth_devices[port_id]; 5413 5414 if (timestamp == NULL) { 5415 RTE_ETHDEV_LOG(ERR, 5416 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5417 port_id); 5418 return -EINVAL; 5419 } 5420 5421 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 5422 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5423 (dev, timestamp)); 5424 } 5425 5426 int 5427 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5428 { 5429 struct rte_eth_dev *dev; 5430 5431 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5432 dev = &rte_eth_devices[port_id]; 5433 5434 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 5435 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 5436 } 5437 5438 int 5439 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 5440 { 5441 struct rte_eth_dev *dev; 5442 5443 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5444 dev = &rte_eth_devices[port_id]; 5445 5446 if (timestamp == NULL) { 5447 RTE_ETHDEV_LOG(ERR, 5448 "Cannot read ethdev port %u timesync time to NULL\n", 5449 port_id); 5450 return -EINVAL; 5451 } 5452 5453 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 5454 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 5455 timestamp)); 5456 } 5457 5458 int 5459 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 5460 { 5461 struct rte_eth_dev *dev; 5462 5463 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5464 dev = &rte_eth_devices[port_id]; 5465 5466 if (timestamp == NULL) { 5467 RTE_ETHDEV_LOG(ERR, 5468 "Cannot write ethdev port %u timesync from NULL time\n", 5469 port_id); 5470 return -EINVAL; 5471 } 5472 5473 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 5474 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 5475 timestamp)); 5476 } 5477 5478 int 5479 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 5480 { 5481 struct rte_eth_dev *dev; 5482 5483 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5484 dev = &rte_eth_devices[port_id]; 5485 5486 if (clock == NULL) { 5487 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 5488 port_id); 5489 return -EINVAL; 5490 } 5491 5492 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); 5493 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 5494 } 5495 5496 int 5497 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 5498 { 5499 struct rte_eth_dev *dev; 5500 5501 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5502 dev = &rte_eth_devices[port_id]; 5503 5504 if (info == NULL) { 5505 RTE_ETHDEV_LOG(ERR, 5506 "Cannot get ethdev port %u register info to NULL\n", 5507 port_id); 5508 return -EINVAL; 5509 } 5510 5511 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 5512 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 5513 } 5514 5515 int 5516 rte_eth_dev_get_eeprom_length(uint16_t port_id) 5517 { 5518 struct rte_eth_dev *dev; 5519 5520 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5521 dev = &rte_eth_devices[port_id]; 5522 5523 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 5524 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 5525 } 5526 5527 int 5528 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5529 { 5530 struct rte_eth_dev *dev; 5531 5532 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5533 dev = &rte_eth_devices[port_id]; 5534 5535 if (info == NULL) { 5536 RTE_ETHDEV_LOG(ERR, 5537 "Cannot get ethdev port %u EEPROM info to NULL\n", 5538 port_id); 5539 return -EINVAL; 5540 } 5541 5542 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 5543 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 5544 } 5545 5546 int 5547 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5548 { 5549 struct rte_eth_dev *dev; 5550 5551 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5552 dev = &rte_eth_devices[port_id]; 5553 5554 if (info == NULL) { 5555 RTE_ETHDEV_LOG(ERR, 5556 "Cannot set ethdev port %u EEPROM from NULL info\n", 5557 port_id); 5558 return -EINVAL; 5559 } 5560 5561 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 5562 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 5563 } 5564 5565 int 5566 rte_eth_dev_get_module_info(uint16_t port_id, 5567 struct rte_eth_dev_module_info *modinfo) 5568 { 5569 struct rte_eth_dev *dev; 5570 5571 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5572 dev = &rte_eth_devices[port_id]; 5573 5574 if (modinfo == NULL) { 5575 RTE_ETHDEV_LOG(ERR, 5576 "Cannot get ethdev port %u EEPROM module info to NULL\n", 5577 port_id); 5578 return -EINVAL; 5579 } 5580 5581 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 5582 return (*dev->dev_ops->get_module_info)(dev, modinfo); 5583 } 5584 5585 int 5586 rte_eth_dev_get_module_eeprom(uint16_t port_id, 5587 struct rte_dev_eeprom_info *info) 5588 { 5589 struct rte_eth_dev *dev; 5590 5591 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5592 dev = &rte_eth_devices[port_id]; 5593 5594 if (info == NULL) { 5595 RTE_ETHDEV_LOG(ERR, 5596 "Cannot get ethdev port %u module EEPROM info to NULL\n", 5597 port_id); 5598 return -EINVAL; 5599 } 5600 5601 if (info->data == NULL) { 5602 RTE_ETHDEV_LOG(ERR, 5603 "Cannot get ethdev port %u module EEPROM data to NULL\n", 5604 port_id); 5605 return -EINVAL; 5606 } 5607 5608 if (info->length == 0) { 5609 RTE_ETHDEV_LOG(ERR, 5610 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 5611 port_id); 5612 return -EINVAL; 5613 } 5614 5615 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 5616 return (*dev->dev_ops->get_module_eeprom)(dev, info); 5617 } 5618 5619 int 5620 rte_eth_dev_get_dcb_info(uint16_t port_id, 5621 struct rte_eth_dcb_info *dcb_info) 5622 { 5623 struct rte_eth_dev *dev; 5624 5625 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5626 dev = &rte_eth_devices[port_id]; 5627 5628 if (dcb_info == NULL) { 5629 RTE_ETHDEV_LOG(ERR, 5630 "Cannot get ethdev port %u DCB info to NULL\n", 5631 port_id); 5632 return -EINVAL; 5633 } 5634 5635 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 5636 5637 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 5638 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 5639 } 5640 5641 static void 5642 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 5643 const struct rte_eth_desc_lim *desc_lim) 5644 { 5645 if (desc_lim->nb_align != 0) 5646 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5647 5648 if (desc_lim->nb_max != 0) 5649 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5650 5651 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5652 } 5653 5654 int 5655 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5656 uint16_t *nb_rx_desc, 5657 uint16_t *nb_tx_desc) 5658 { 5659 struct rte_eth_dev_info dev_info; 5660 int ret; 5661 5662 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5663 5664 ret = rte_eth_dev_info_get(port_id, &dev_info); 5665 if (ret != 0) 5666 return ret; 5667 5668 if (nb_rx_desc != NULL) 5669 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5670 5671 if (nb_tx_desc != NULL) 5672 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5673 5674 return 0; 5675 } 5676 5677 int 5678 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5679 struct rte_eth_hairpin_cap *cap) 5680 { 5681 struct rte_eth_dev *dev; 5682 5683 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5684 dev = &rte_eth_devices[port_id]; 5685 5686 if (cap == NULL) { 5687 RTE_ETHDEV_LOG(ERR, 5688 "Cannot get ethdev port %u hairpin capability to NULL\n", 5689 port_id); 5690 return -EINVAL; 5691 } 5692 5693 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); 5694 memset(cap, 0, sizeof(*cap)); 5695 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 5696 } 5697 5698 int 5699 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5700 { 5701 if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5702 return 1; 5703 return 0; 5704 } 5705 5706 int 5707 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5708 { 5709 if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5710 return 1; 5711 return 0; 5712 } 5713 5714 int 5715 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 5716 { 5717 struct rte_eth_dev *dev; 5718 5719 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5720 dev = &rte_eth_devices[port_id]; 5721 5722 if (pool == NULL) { 5723 RTE_ETHDEV_LOG(ERR, 5724 "Cannot test ethdev port %u mempool operation from NULL pool\n", 5725 port_id); 5726 return -EINVAL; 5727 } 5728 5729 if (*dev->dev_ops->pool_ops_supported == NULL) 5730 return 1; /* all pools are supported */ 5731 5732 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 5733 } 5734 5735 /** 5736 * A set of values to describe the possible states of a switch domain. 5737 */ 5738 enum rte_eth_switch_domain_state { 5739 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0, 5740 RTE_ETH_SWITCH_DOMAIN_ALLOCATED 5741 }; 5742 5743 /** 5744 * Array of switch domains available for allocation. Array is sized to 5745 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than 5746 * ethdev ports in a single process. 5747 */ 5748 static struct rte_eth_dev_switch { 5749 enum rte_eth_switch_domain_state state; 5750 } eth_dev_switch_domains[RTE_MAX_ETHPORTS]; 5751 5752 int 5753 rte_eth_switch_domain_alloc(uint16_t *domain_id) 5754 { 5755 uint16_t i; 5756 5757 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 5758 5759 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 5760 if (eth_dev_switch_domains[i].state == 5761 RTE_ETH_SWITCH_DOMAIN_UNUSED) { 5762 eth_dev_switch_domains[i].state = 5763 RTE_ETH_SWITCH_DOMAIN_ALLOCATED; 5764 *domain_id = i; 5765 return 0; 5766 } 5767 } 5768 5769 return -ENOSPC; 5770 } 5771 5772 int 5773 rte_eth_switch_domain_free(uint16_t domain_id) 5774 { 5775 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || 5776 domain_id >= RTE_MAX_ETHPORTS) 5777 return -EINVAL; 5778 5779 if (eth_dev_switch_domains[domain_id].state != 5780 RTE_ETH_SWITCH_DOMAIN_ALLOCATED) 5781 return -EINVAL; 5782 5783 eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED; 5784 5785 return 0; 5786 } 5787 5788 static int 5789 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) 5790 { 5791 int state; 5792 struct rte_kvargs_pair *pair; 5793 char *letter; 5794 5795 arglist->str = strdup(str_in); 5796 if (arglist->str == NULL) 5797 return -ENOMEM; 5798 5799 letter = arglist->str; 5800 state = 0; 5801 arglist->count = 0; 5802 pair = &arglist->pairs[0]; 5803 while (1) { 5804 switch (state) { 5805 case 0: /* Initial */ 5806 if (*letter == '=') 5807 return -EINVAL; 5808 else if (*letter == '\0') 5809 return 0; 5810 5811 state = 1; 5812 pair->key = letter; 5813 /* fall-thru */ 5814 5815 case 1: /* Parsing key */ 5816 if (*letter == '=') { 5817 *letter = '\0'; 5818 pair->value = letter + 1; 5819 state = 2; 5820 } else if (*letter == ',' || *letter == '\0') 5821 return -EINVAL; 5822 break; 5823 5824 5825 case 2: /* Parsing value */ 5826 if (*letter == '[') 5827 state = 3; 5828 else if (*letter == ',') { 5829 *letter = '\0'; 5830 arglist->count++; 5831 pair = &arglist->pairs[arglist->count]; 5832 state = 0; 5833 } else if (*letter == '\0') { 5834 letter--; 5835 arglist->count++; 5836 pair = &arglist->pairs[arglist->count]; 5837 state = 0; 5838 } 5839 break; 5840 5841 case 3: /* Parsing list */ 5842 if (*letter == ']') 5843 state = 2; 5844 else if (*letter == '\0') 5845 return -EINVAL; 5846 break; 5847 } 5848 letter++; 5849 } 5850 } 5851 5852 int 5853 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) 5854 { 5855 struct rte_kvargs args; 5856 struct rte_kvargs_pair *pair; 5857 unsigned int i; 5858 int result = 0; 5859 5860 memset(eth_da, 0, sizeof(*eth_da)); 5861 5862 result = eth_dev_devargs_tokenise(&args, dargs); 5863 if (result < 0) 5864 goto parse_cleanup; 5865 5866 for (i = 0; i < args.count; i++) { 5867 pair = &args.pairs[i]; 5868 if (strcmp("representor", pair->key) == 0) { 5869 if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) { 5870 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n", 5871 dargs); 5872 result = -1; 5873 goto parse_cleanup; 5874 } 5875 result = rte_eth_devargs_parse_representor_ports( 5876 pair->value, eth_da); 5877 if (result < 0) 5878 goto parse_cleanup; 5879 } 5880 } 5881 5882 parse_cleanup: 5883 if (args.str) 5884 free(args.str); 5885 5886 return result; 5887 } 5888 5889 int 5890 rte_eth_representor_id_get(uint16_t port_id, 5891 enum rte_eth_representor_type type, 5892 int controller, int pf, int representor_port, 5893 uint16_t *repr_id) 5894 { 5895 int ret, n, count; 5896 uint32_t i; 5897 struct rte_eth_representor_info *info = NULL; 5898 size_t size; 5899 5900 if (type == RTE_ETH_REPRESENTOR_NONE) 5901 return 0; 5902 if (repr_id == NULL) 5903 return -EINVAL; 5904 5905 /* Get PMD representor range info. */ 5906 ret = rte_eth_representor_info_get(port_id, NULL); 5907 if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF && 5908 controller == -1 && pf == -1) { 5909 /* Direct mapping for legacy VF representor. */ 5910 *repr_id = representor_port; 5911 return 0; 5912 } else if (ret < 0) { 5913 return ret; 5914 } 5915 n = ret; 5916 size = sizeof(*info) + n * sizeof(info->ranges[0]); 5917 info = calloc(1, size); 5918 if (info == NULL) 5919 return -ENOMEM; 5920 info->nb_ranges_alloc = n; 5921 ret = rte_eth_representor_info_get(port_id, info); 5922 if (ret < 0) 5923 goto out; 5924 5925 /* Default controller and pf to caller. */ 5926 if (controller == -1) 5927 controller = info->controller; 5928 if (pf == -1) 5929 pf = info->pf; 5930 5931 /* Locate representor ID. */ 5932 ret = -ENOENT; 5933 for (i = 0; i < info->nb_ranges; ++i) { 5934 if (info->ranges[i].type != type) 5935 continue; 5936 if (info->ranges[i].controller != controller) 5937 continue; 5938 if (info->ranges[i].id_end < info->ranges[i].id_base) { 5939 RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n", 5940 port_id, info->ranges[i].id_base, 5941 info->ranges[i].id_end, i); 5942 continue; 5943 5944 } 5945 count = info->ranges[i].id_end - info->ranges[i].id_base + 1; 5946 switch (info->ranges[i].type) { 5947 case RTE_ETH_REPRESENTOR_PF: 5948 if (pf < info->ranges[i].pf || 5949 pf >= info->ranges[i].pf + count) 5950 continue; 5951 *repr_id = info->ranges[i].id_base + 5952 (pf - info->ranges[i].pf); 5953 ret = 0; 5954 goto out; 5955 case RTE_ETH_REPRESENTOR_VF: 5956 if (info->ranges[i].pf != pf) 5957 continue; 5958 if (representor_port < info->ranges[i].vf || 5959 representor_port >= info->ranges[i].vf + count) 5960 continue; 5961 *repr_id = info->ranges[i].id_base + 5962 (representor_port - info->ranges[i].vf); 5963 ret = 0; 5964 goto out; 5965 case RTE_ETH_REPRESENTOR_SF: 5966 if (info->ranges[i].pf != pf) 5967 continue; 5968 if (representor_port < info->ranges[i].sf || 5969 representor_port >= info->ranges[i].sf + count) 5970 continue; 5971 *repr_id = info->ranges[i].id_base + 5972 (representor_port - info->ranges[i].sf); 5973 ret = 0; 5974 goto out; 5975 default: 5976 break; 5977 } 5978 } 5979 out: 5980 free(info); 5981 return ret; 5982 } 5983 5984 static int 5985 eth_dev_handle_port_list(const char *cmd __rte_unused, 5986 const char *params __rte_unused, 5987 struct rte_tel_data *d) 5988 { 5989 int port_id; 5990 5991 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 5992 RTE_ETH_FOREACH_DEV(port_id) 5993 rte_tel_data_add_array_int(d, port_id); 5994 return 0; 5995 } 5996 5997 static void 5998 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 5999 const char *stat_name) 6000 { 6001 int q; 6002 struct rte_tel_data *q_data = rte_tel_data_alloc(); 6003 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); 6004 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 6005 rte_tel_data_add_array_u64(q_data, q_stats[q]); 6006 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 6007 } 6008 6009 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 6010 6011 static int 6012 eth_dev_handle_port_stats(const char *cmd __rte_unused, 6013 const char *params, 6014 struct rte_tel_data *d) 6015 { 6016 struct rte_eth_stats stats; 6017 int port_id, ret; 6018 6019 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6020 return -1; 6021 6022 port_id = atoi(params); 6023 if (!rte_eth_dev_is_valid_port(port_id)) 6024 return -1; 6025 6026 ret = rte_eth_stats_get(port_id, &stats); 6027 if (ret < 0) 6028 return -1; 6029 6030 rte_tel_data_start_dict(d); 6031 ADD_DICT_STAT(stats, ipackets); 6032 ADD_DICT_STAT(stats, opackets); 6033 ADD_DICT_STAT(stats, ibytes); 6034 ADD_DICT_STAT(stats, obytes); 6035 ADD_DICT_STAT(stats, imissed); 6036 ADD_DICT_STAT(stats, ierrors); 6037 ADD_DICT_STAT(stats, oerrors); 6038 ADD_DICT_STAT(stats, rx_nombuf); 6039 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 6040 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 6041 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 6042 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 6043 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 6044 6045 return 0; 6046 } 6047 6048 static int 6049 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 6050 const char *params, 6051 struct rte_tel_data *d) 6052 { 6053 struct rte_eth_xstat *eth_xstats; 6054 struct rte_eth_xstat_name *xstat_names; 6055 int port_id, num_xstats; 6056 int i, ret; 6057 char *end_param; 6058 6059 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6060 return -1; 6061 6062 port_id = strtoul(params, &end_param, 0); 6063 if (*end_param != '\0') 6064 RTE_ETHDEV_LOG(NOTICE, 6065 "Extra parameters passed to ethdev telemetry command, ignoring"); 6066 if (!rte_eth_dev_is_valid_port(port_id)) 6067 return -1; 6068 6069 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 6070 if (num_xstats < 0) 6071 return -1; 6072 6073 /* use one malloc for both names and stats */ 6074 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 6075 sizeof(struct rte_eth_xstat_name)) * num_xstats); 6076 if (eth_xstats == NULL) 6077 return -1; 6078 xstat_names = (void *)ð_xstats[num_xstats]; 6079 6080 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 6081 if (ret < 0 || ret > num_xstats) { 6082 free(eth_xstats); 6083 return -1; 6084 } 6085 6086 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 6087 if (ret < 0 || ret > num_xstats) { 6088 free(eth_xstats); 6089 return -1; 6090 } 6091 6092 rte_tel_data_start_dict(d); 6093 for (i = 0; i < num_xstats; i++) 6094 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 6095 eth_xstats[i].value); 6096 return 0; 6097 } 6098 6099 static int 6100 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 6101 const char *params, 6102 struct rte_tel_data *d) 6103 { 6104 static const char *status_str = "status"; 6105 int ret, port_id; 6106 struct rte_eth_link link; 6107 char *end_param; 6108 6109 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6110 return -1; 6111 6112 port_id = strtoul(params, &end_param, 0); 6113 if (*end_param != '\0') 6114 RTE_ETHDEV_LOG(NOTICE, 6115 "Extra parameters passed to ethdev telemetry command, ignoring"); 6116 if (!rte_eth_dev_is_valid_port(port_id)) 6117 return -1; 6118 6119 ret = rte_eth_link_get_nowait(port_id, &link); 6120 if (ret < 0) 6121 return -1; 6122 6123 rte_tel_data_start_dict(d); 6124 if (!link.link_status) { 6125 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 6126 return 0; 6127 } 6128 rte_tel_data_add_dict_string(d, status_str, "UP"); 6129 rte_tel_data_add_dict_u64(d, "speed", link.link_speed); 6130 rte_tel_data_add_dict_string(d, "duplex", 6131 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 6132 "full-duplex" : "half-duplex"); 6133 return 0; 6134 } 6135 6136 int 6137 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue, 6138 struct rte_hairpin_peer_info *cur_info, 6139 struct rte_hairpin_peer_info *peer_info, 6140 uint32_t direction) 6141 { 6142 struct rte_eth_dev *dev; 6143 6144 /* Current queue information is not mandatory. */ 6145 if (peer_info == NULL) 6146 return -EINVAL; 6147 6148 /* No need to check the validity again. */ 6149 dev = &rte_eth_devices[peer_port]; 6150 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update, 6151 -ENOTSUP); 6152 6153 return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue, 6154 cur_info, peer_info, direction); 6155 } 6156 6157 int 6158 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue, 6159 struct rte_hairpin_peer_info *peer_info, 6160 uint32_t direction) 6161 { 6162 struct rte_eth_dev *dev; 6163 6164 if (peer_info == NULL) 6165 return -EINVAL; 6166 6167 /* No need to check the validity again. */ 6168 dev = &rte_eth_devices[cur_port]; 6169 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind, 6170 -ENOTSUP); 6171 6172 return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue, 6173 peer_info, direction); 6174 } 6175 6176 int 6177 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue, 6178 uint32_t direction) 6179 { 6180 struct rte_eth_dev *dev; 6181 6182 /* No need to check the validity again. */ 6183 dev = &rte_eth_devices[cur_port]; 6184 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind, 6185 -ENOTSUP); 6186 6187 return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue, 6188 direction); 6189 } 6190 6191 int 6192 rte_eth_representor_info_get(uint16_t port_id, 6193 struct rte_eth_representor_info *info) 6194 { 6195 struct rte_eth_dev *dev; 6196 6197 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6198 dev = &rte_eth_devices[port_id]; 6199 6200 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP); 6201 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6202 } 6203 6204 int 6205 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6206 { 6207 struct rte_eth_dev *dev; 6208 6209 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6210 dev = &rte_eth_devices[port_id]; 6211 6212 if (dev->data->dev_configured != 0) { 6213 RTE_ETHDEV_LOG(ERR, 6214 "The port (id=%"PRIu16") is already configured\n", 6215 port_id); 6216 return -EBUSY; 6217 } 6218 6219 if (features == NULL) { 6220 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6221 return -EINVAL; 6222 } 6223 6224 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP); 6225 return eth_err(port_id, 6226 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6227 } 6228 6229 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6230 6231 RTE_INIT(ethdev_init_telemetry) 6232 { 6233 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 6234 "Returns list of available ethdev ports. Takes no parameters"); 6235 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 6236 "Returns the common stats for a port. Parameters: int port_id"); 6237 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 6238 "Returns the extended stats for a port. Parameters: int port_id"); 6239 rte_telemetry_register_cmd("/ethdev/link_status", 6240 eth_dev_handle_port_link_status, 6241 "Returns the link status for a port. Parameters: int port_id"); 6242 } 6243