1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <rte_byteorder.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_interrupts.h> 18 #include <rte_memory.h> 19 #include <rte_memcpy.h> 20 #include <rte_memzone.h> 21 #include <rte_launch.h> 22 #include <rte_eal.h> 23 #include <rte_per_lcore.h> 24 #include <rte_lcore.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_common.h> 27 #include <rte_mempool.h> 28 #include <rte_malloc.h> 29 #include <rte_mbuf.h> 30 #include <rte_errno.h> 31 #include <rte_spinlock.h> 32 #include <rte_string_fns.h> 33 #include <rte_kvargs.h> 34 #include <rte_class.h> 35 #include <rte_ether.h> 36 #include <rte_telemetry.h> 37 38 #include "rte_ethdev_trace.h" 39 #include "rte_ethdev.h" 40 #include "ethdev_driver.h" 41 #include "ethdev_profile.h" 42 #include "ethdev_private.h" 43 44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; 45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 46 47 /* public fast-path API */ 48 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 49 50 /* spinlock for eth device callbacks */ 51 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; 52 53 /* spinlock for add/remove rx callbacks */ 54 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 55 56 /* spinlock for add/remove tx callbacks */ 57 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 58 59 /* spinlock for shared data allocation */ 60 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 61 62 /* store statistics names and its offset in stats structure */ 63 struct rte_eth_xstats_name_off { 64 char name[RTE_ETH_XSTATS_NAME_SIZE]; 65 unsigned offset; 66 }; 67 68 /* Shared memory between primary and secondary processes. */ 69 static struct { 70 uint64_t next_owner_id; 71 rte_spinlock_t ownership_lock; 72 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; 73 } *eth_dev_shared_data; 74 75 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 76 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 77 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 78 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 79 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 80 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 81 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 82 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 83 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 84 rx_nombuf)}, 85 }; 86 87 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 88 89 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 90 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 91 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 92 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 93 }; 94 95 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 96 97 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 98 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 99 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 100 }; 101 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 102 103 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 104 { DEV_RX_OFFLOAD_##_name, #_name } 105 106 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name) \ 107 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 108 109 static const struct { 110 uint64_t offload; 111 const char *name; 112 } eth_dev_rx_offload_names[] = { 113 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 114 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 115 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 116 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 117 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 118 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 119 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 120 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 121 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 122 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 123 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 124 RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME), 125 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 126 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 127 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 128 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 129 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 130 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 131 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 132 RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 133 }; 134 135 #undef RTE_RX_OFFLOAD_BIT2STR 136 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 137 138 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 139 { DEV_TX_OFFLOAD_##_name, #_name } 140 141 static const struct { 142 uint64_t offload; 143 const char *name; 144 } eth_dev_tx_offload_names[] = { 145 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 146 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 147 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 148 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 149 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 150 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 151 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 152 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 153 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 154 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 155 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 156 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 157 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 158 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 159 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 160 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 161 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 162 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 163 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 164 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 165 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 166 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 167 }; 168 169 #undef RTE_TX_OFFLOAD_BIT2STR 170 171 /** 172 * The user application callback description. 173 * 174 * It contains callback address to be registered by user application, 175 * the pointer to the parameters for callback, and the event type. 176 */ 177 struct rte_eth_dev_callback { 178 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ 179 rte_eth_dev_cb_fn cb_fn; /**< Callback address */ 180 void *cb_arg; /**< Parameter for callback */ 181 void *ret_param; /**< Return parameter */ 182 enum rte_eth_event_type event; /**< Interrupt event type */ 183 uint32_t active; /**< Callback is executing */ 184 }; 185 186 enum { 187 STAT_QMAP_TX = 0, 188 STAT_QMAP_RX 189 }; 190 191 int 192 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 193 { 194 int ret; 195 struct rte_devargs devargs; 196 const char *bus_param_key; 197 char *bus_str = NULL; 198 char *cls_str = NULL; 199 int str_size; 200 201 if (iter == NULL) { 202 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 203 return -EINVAL; 204 } 205 206 if (devargs_str == NULL) { 207 RTE_ETHDEV_LOG(ERR, 208 "Cannot initialize iterator from NULL device description string\n"); 209 return -EINVAL; 210 } 211 212 memset(iter, 0, sizeof(*iter)); 213 memset(&devargs, 0, sizeof(devargs)); 214 215 /* 216 * The devargs string may use various syntaxes: 217 * - 0000:08:00.0,representor=[1-3] 218 * - pci:0000:06:00.0,representor=[0,5] 219 * - class=eth,mac=00:11:22:33:44:55 220 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 221 */ 222 223 /* 224 * Handle pure class filter (i.e. without any bus-level argument), 225 * from future new syntax. 226 * rte_devargs_parse() is not yet supporting the new syntax, 227 * that's why this simple case is temporarily parsed here. 228 */ 229 #define iter_anybus_str "class=eth," 230 if (strncmp(devargs_str, iter_anybus_str, 231 strlen(iter_anybus_str)) == 0) { 232 iter->cls_str = devargs_str + strlen(iter_anybus_str); 233 goto end; 234 } 235 236 /* Split bus, device and parameters. */ 237 ret = rte_devargs_parse(&devargs, devargs_str); 238 if (ret != 0) 239 goto error; 240 241 /* 242 * Assume parameters of old syntax can match only at ethdev level. 243 * Extra parameters will be ignored, thanks to "+" prefix. 244 */ 245 str_size = strlen(devargs.args) + 2; 246 cls_str = malloc(str_size); 247 if (cls_str == NULL) { 248 ret = -ENOMEM; 249 goto error; 250 } 251 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 252 if (ret != str_size - 1) { 253 ret = -EINVAL; 254 goto error; 255 } 256 iter->cls_str = cls_str; 257 258 iter->bus = devargs.bus; 259 if (iter->bus->dev_iterate == NULL) { 260 ret = -ENOTSUP; 261 goto error; 262 } 263 264 /* Convert bus args to new syntax for use with new API dev_iterate. */ 265 if ((strcmp(iter->bus->name, "vdev") == 0) || 266 (strcmp(iter->bus->name, "fslmc") == 0) || 267 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 268 bus_param_key = "name"; 269 } else if (strcmp(iter->bus->name, "pci") == 0) { 270 bus_param_key = "addr"; 271 } else { 272 ret = -ENOTSUP; 273 goto error; 274 } 275 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 276 bus_str = malloc(str_size); 277 if (bus_str == NULL) { 278 ret = -ENOMEM; 279 goto error; 280 } 281 ret = snprintf(bus_str, str_size, "%s=%s", 282 bus_param_key, devargs.name); 283 if (ret != str_size - 1) { 284 ret = -EINVAL; 285 goto error; 286 } 287 iter->bus_str = bus_str; 288 289 end: 290 iter->cls = rte_class_find_by_name("eth"); 291 rte_devargs_reset(&devargs); 292 return 0; 293 294 error: 295 if (ret == -ENOTSUP) 296 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 297 iter->bus->name); 298 rte_devargs_reset(&devargs); 299 free(bus_str); 300 free(cls_str); 301 return ret; 302 } 303 304 uint16_t 305 rte_eth_iterator_next(struct rte_dev_iterator *iter) 306 { 307 if (iter == NULL) { 308 RTE_ETHDEV_LOG(ERR, 309 "Cannot get next device from NULL iterator\n"); 310 return RTE_MAX_ETHPORTS; 311 } 312 313 if (iter->cls == NULL) /* invalid ethdev iterator */ 314 return RTE_MAX_ETHPORTS; 315 316 do { /* loop to try all matching rte_device */ 317 /* If not pure ethdev filter and */ 318 if (iter->bus != NULL && 319 /* not in middle of rte_eth_dev iteration, */ 320 iter->class_device == NULL) { 321 /* get next rte_device to try. */ 322 iter->device = iter->bus->dev_iterate( 323 iter->device, iter->bus_str, iter); 324 if (iter->device == NULL) 325 break; /* no more rte_device candidate */ 326 } 327 /* A device is matching bus part, need to check ethdev part. */ 328 iter->class_device = iter->cls->dev_iterate( 329 iter->class_device, iter->cls_str, iter); 330 if (iter->class_device != NULL) 331 return eth_dev_to_id(iter->class_device); /* match */ 332 } while (iter->bus != NULL); /* need to try next rte_device */ 333 334 /* No more ethdev port to iterate. */ 335 rte_eth_iterator_cleanup(iter); 336 return RTE_MAX_ETHPORTS; 337 } 338 339 void 340 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 341 { 342 if (iter == NULL) { 343 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 344 return; 345 } 346 347 if (iter->bus_str == NULL) 348 return; /* nothing to free in pure class filter */ 349 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 350 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 351 memset(iter, 0, sizeof(*iter)); 352 } 353 354 uint16_t 355 rte_eth_find_next(uint16_t port_id) 356 { 357 while (port_id < RTE_MAX_ETHPORTS && 358 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 359 port_id++; 360 361 if (port_id >= RTE_MAX_ETHPORTS) 362 return RTE_MAX_ETHPORTS; 363 364 return port_id; 365 } 366 367 /* 368 * Macro to iterate over all valid ports for internal usage. 369 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 370 */ 371 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 372 for (port_id = rte_eth_find_next(0); \ 373 port_id < RTE_MAX_ETHPORTS; \ 374 port_id = rte_eth_find_next(port_id + 1)) 375 376 uint16_t 377 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 378 { 379 port_id = rte_eth_find_next(port_id); 380 while (port_id < RTE_MAX_ETHPORTS && 381 rte_eth_devices[port_id].device != parent) 382 port_id = rte_eth_find_next(port_id + 1); 383 384 return port_id; 385 } 386 387 uint16_t 388 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 389 { 390 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 391 return rte_eth_find_next_of(port_id, 392 rte_eth_devices[ref_port_id].device); 393 } 394 395 static void 396 eth_dev_shared_data_prepare(void) 397 { 398 const unsigned flags = 0; 399 const struct rte_memzone *mz; 400 401 rte_spinlock_lock(ð_dev_shared_data_lock); 402 403 if (eth_dev_shared_data == NULL) { 404 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 405 /* Allocate port data and ownership shared memory. */ 406 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, 407 sizeof(*eth_dev_shared_data), 408 rte_socket_id(), flags); 409 } else 410 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); 411 if (mz == NULL) 412 rte_panic("Cannot allocate ethdev shared data\n"); 413 414 eth_dev_shared_data = mz->addr; 415 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 416 eth_dev_shared_data->next_owner_id = 417 RTE_ETH_DEV_NO_OWNER + 1; 418 rte_spinlock_init(ð_dev_shared_data->ownership_lock); 419 memset(eth_dev_shared_data->data, 0, 420 sizeof(eth_dev_shared_data->data)); 421 } 422 } 423 424 rte_spinlock_unlock(ð_dev_shared_data_lock); 425 } 426 427 static bool 428 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 429 { 430 return ethdev->data->name[0] != '\0'; 431 } 432 433 static struct rte_eth_dev * 434 eth_dev_allocated(const char *name) 435 { 436 uint16_t i; 437 438 RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX); 439 440 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 441 if (rte_eth_devices[i].data != NULL && 442 strcmp(rte_eth_devices[i].data->name, name) == 0) 443 return &rte_eth_devices[i]; 444 } 445 return NULL; 446 } 447 448 struct rte_eth_dev * 449 rte_eth_dev_allocated(const char *name) 450 { 451 struct rte_eth_dev *ethdev; 452 453 eth_dev_shared_data_prepare(); 454 455 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 456 457 ethdev = eth_dev_allocated(name); 458 459 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 460 461 return ethdev; 462 } 463 464 static uint16_t 465 eth_dev_find_free_port(void) 466 { 467 uint16_t i; 468 469 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 470 /* Using shared name field to find a free port. */ 471 if (eth_dev_shared_data->data[i].name[0] == '\0') { 472 RTE_ASSERT(rte_eth_devices[i].state == 473 RTE_ETH_DEV_UNUSED); 474 return i; 475 } 476 } 477 return RTE_MAX_ETHPORTS; 478 } 479 480 static struct rte_eth_dev * 481 eth_dev_get(uint16_t port_id) 482 { 483 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; 484 485 eth_dev->data = ð_dev_shared_data->data[port_id]; 486 487 return eth_dev; 488 } 489 490 struct rte_eth_dev * 491 rte_eth_dev_allocate(const char *name) 492 { 493 uint16_t port_id; 494 struct rte_eth_dev *eth_dev = NULL; 495 size_t name_len; 496 497 name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN); 498 if (name_len == 0) { 499 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n"); 500 return NULL; 501 } 502 503 if (name_len >= RTE_ETH_NAME_MAX_LEN) { 504 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n"); 505 return NULL; 506 } 507 508 eth_dev_shared_data_prepare(); 509 510 /* Synchronize port creation between primary and secondary threads. */ 511 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 512 513 if (eth_dev_allocated(name) != NULL) { 514 RTE_ETHDEV_LOG(ERR, 515 "Ethernet device with name %s already allocated\n", 516 name); 517 goto unlock; 518 } 519 520 port_id = eth_dev_find_free_port(); 521 if (port_id == RTE_MAX_ETHPORTS) { 522 RTE_ETHDEV_LOG(ERR, 523 "Reached maximum number of Ethernet ports\n"); 524 goto unlock; 525 } 526 527 eth_dev = eth_dev_get(port_id); 528 strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name)); 529 eth_dev->data->port_id = port_id; 530 eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS; 531 eth_dev->data->mtu = RTE_ETHER_MTU; 532 pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL); 533 534 unlock: 535 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 536 537 return eth_dev; 538 } 539 540 /* 541 * Attach to a port already registered by the primary process, which 542 * makes sure that the same device would have the same port id both 543 * in the primary and secondary process. 544 */ 545 struct rte_eth_dev * 546 rte_eth_dev_attach_secondary(const char *name) 547 { 548 uint16_t i; 549 struct rte_eth_dev *eth_dev = NULL; 550 551 eth_dev_shared_data_prepare(); 552 553 /* Synchronize port attachment to primary port creation and release. */ 554 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 555 556 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 557 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0) 558 break; 559 } 560 if (i == RTE_MAX_ETHPORTS) { 561 RTE_ETHDEV_LOG(ERR, 562 "Device %s is not driven by the primary process\n", 563 name); 564 } else { 565 eth_dev = eth_dev_get(i); 566 RTE_ASSERT(eth_dev->data->port_id == i); 567 } 568 569 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 570 return eth_dev; 571 } 572 573 int 574 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) 575 { 576 if (eth_dev == NULL) 577 return -EINVAL; 578 579 eth_dev_shared_data_prepare(); 580 581 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 582 rte_eth_dev_callback_process(eth_dev, 583 RTE_ETH_EVENT_DESTROY, NULL); 584 585 eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id); 586 587 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 588 589 eth_dev->state = RTE_ETH_DEV_UNUSED; 590 eth_dev->device = NULL; 591 eth_dev->process_private = NULL; 592 eth_dev->intr_handle = NULL; 593 eth_dev->rx_pkt_burst = NULL; 594 eth_dev->tx_pkt_burst = NULL; 595 eth_dev->tx_pkt_prepare = NULL; 596 eth_dev->rx_queue_count = NULL; 597 eth_dev->rx_descriptor_status = NULL; 598 eth_dev->tx_descriptor_status = NULL; 599 eth_dev->dev_ops = NULL; 600 601 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 602 rte_free(eth_dev->data->rx_queues); 603 rte_free(eth_dev->data->tx_queues); 604 rte_free(eth_dev->data->mac_addrs); 605 rte_free(eth_dev->data->hash_mac_addrs); 606 rte_free(eth_dev->data->dev_private); 607 pthread_mutex_destroy(ð_dev->data->flow_ops_mutex); 608 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); 609 } 610 611 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 612 613 return 0; 614 } 615 616 int 617 rte_eth_dev_is_valid_port(uint16_t port_id) 618 { 619 if (port_id >= RTE_MAX_ETHPORTS || 620 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 621 return 0; 622 else 623 return 1; 624 } 625 626 static int 627 eth_is_valid_owner_id(uint64_t owner_id) 628 { 629 if (owner_id == RTE_ETH_DEV_NO_OWNER || 630 eth_dev_shared_data->next_owner_id <= owner_id) 631 return 0; 632 return 1; 633 } 634 635 uint64_t 636 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 637 { 638 port_id = rte_eth_find_next(port_id); 639 while (port_id < RTE_MAX_ETHPORTS && 640 rte_eth_devices[port_id].data->owner.id != owner_id) 641 port_id = rte_eth_find_next(port_id + 1); 642 643 return port_id; 644 } 645 646 int 647 rte_eth_dev_owner_new(uint64_t *owner_id) 648 { 649 if (owner_id == NULL) { 650 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 651 return -EINVAL; 652 } 653 654 eth_dev_shared_data_prepare(); 655 656 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 657 658 *owner_id = eth_dev_shared_data->next_owner_id++; 659 660 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 661 return 0; 662 } 663 664 static int 665 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 666 const struct rte_eth_dev_owner *new_owner) 667 { 668 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 669 struct rte_eth_dev_owner *port_owner; 670 671 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 672 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 673 port_id); 674 return -ENODEV; 675 } 676 677 if (new_owner == NULL) { 678 RTE_ETHDEV_LOG(ERR, 679 "Cannot set ethdev port %u owner from NULL owner\n", 680 port_id); 681 return -EINVAL; 682 } 683 684 if (!eth_is_valid_owner_id(new_owner->id) && 685 !eth_is_valid_owner_id(old_owner_id)) { 686 RTE_ETHDEV_LOG(ERR, 687 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 688 old_owner_id, new_owner->id); 689 return -EINVAL; 690 } 691 692 port_owner = &rte_eth_devices[port_id].data->owner; 693 if (port_owner->id != old_owner_id) { 694 RTE_ETHDEV_LOG(ERR, 695 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 696 port_id, port_owner->name, port_owner->id); 697 return -EPERM; 698 } 699 700 /* can not truncate (same structure) */ 701 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 702 703 port_owner->id = new_owner->id; 704 705 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 706 port_id, new_owner->name, new_owner->id); 707 708 return 0; 709 } 710 711 int 712 rte_eth_dev_owner_set(const uint16_t port_id, 713 const struct rte_eth_dev_owner *owner) 714 { 715 int ret; 716 717 eth_dev_shared_data_prepare(); 718 719 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 720 721 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 722 723 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 724 return ret; 725 } 726 727 int 728 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 729 { 730 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 731 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 732 int ret; 733 734 eth_dev_shared_data_prepare(); 735 736 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 737 738 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 739 740 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 741 return ret; 742 } 743 744 int 745 rte_eth_dev_owner_delete(const uint64_t owner_id) 746 { 747 uint16_t port_id; 748 int ret = 0; 749 750 eth_dev_shared_data_prepare(); 751 752 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 753 754 if (eth_is_valid_owner_id(owner_id)) { 755 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) 756 if (rte_eth_devices[port_id].data->owner.id == owner_id) 757 memset(&rte_eth_devices[port_id].data->owner, 0, 758 sizeof(struct rte_eth_dev_owner)); 759 RTE_ETHDEV_LOG(NOTICE, 760 "All port owners owned by %016"PRIx64" identifier have removed\n", 761 owner_id); 762 } else { 763 RTE_ETHDEV_LOG(ERR, 764 "Invalid owner id=%016"PRIx64"\n", 765 owner_id); 766 ret = -EINVAL; 767 } 768 769 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 770 771 return ret; 772 } 773 774 int 775 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 776 { 777 struct rte_eth_dev *ethdev; 778 779 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 780 ethdev = &rte_eth_devices[port_id]; 781 782 if (!eth_dev_is_allocated(ethdev)) { 783 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 784 port_id); 785 return -ENODEV; 786 } 787 788 if (owner == NULL) { 789 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 790 port_id); 791 return -EINVAL; 792 } 793 794 eth_dev_shared_data_prepare(); 795 796 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 797 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 798 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 799 800 return 0; 801 } 802 803 int 804 rte_eth_dev_socket_id(uint16_t port_id) 805 { 806 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 807 return rte_eth_devices[port_id].data->numa_node; 808 } 809 810 void * 811 rte_eth_dev_get_sec_ctx(uint16_t port_id) 812 { 813 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 814 return rte_eth_devices[port_id].security_ctx; 815 } 816 817 uint16_t 818 rte_eth_dev_count_avail(void) 819 { 820 uint16_t p; 821 uint16_t count; 822 823 count = 0; 824 825 RTE_ETH_FOREACH_DEV(p) 826 count++; 827 828 return count; 829 } 830 831 uint16_t 832 rte_eth_dev_count_total(void) 833 { 834 uint16_t port, count = 0; 835 836 RTE_ETH_FOREACH_VALID_DEV(port) 837 count++; 838 839 return count; 840 } 841 842 int 843 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 844 { 845 char *tmp; 846 847 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 848 849 if (name == NULL) { 850 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 851 port_id); 852 return -EINVAL; 853 } 854 855 /* shouldn't check 'rte_eth_devices[i].data', 856 * because it might be overwritten by VDEV PMD */ 857 tmp = eth_dev_shared_data->data[port_id].name; 858 strcpy(name, tmp); 859 return 0; 860 } 861 862 int 863 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 864 { 865 uint16_t pid; 866 867 if (name == NULL) { 868 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 869 return -EINVAL; 870 } 871 872 if (port_id == NULL) { 873 RTE_ETHDEV_LOG(ERR, 874 "Cannot get port ID to NULL for %s\n", name); 875 return -EINVAL; 876 } 877 878 RTE_ETH_FOREACH_VALID_DEV(pid) 879 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 880 *port_id = pid; 881 return 0; 882 } 883 884 return -ENODEV; 885 } 886 887 static int 888 eth_err(uint16_t port_id, int ret) 889 { 890 if (ret == 0) 891 return 0; 892 if (rte_eth_dev_is_removed(port_id)) 893 return -EIO; 894 return ret; 895 } 896 897 static void 898 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid) 899 { 900 void **rxq = dev->data->rx_queues; 901 902 if (rxq[qid] == NULL) 903 return; 904 905 if (dev->dev_ops->rx_queue_release != NULL) 906 (*dev->dev_ops->rx_queue_release)(dev, qid); 907 rxq[qid] = NULL; 908 } 909 910 static void 911 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid) 912 { 913 void **txq = dev->data->tx_queues; 914 915 if (txq[qid] == NULL) 916 return; 917 918 if (dev->dev_ops->tx_queue_release != NULL) 919 (*dev->dev_ops->tx_queue_release)(dev, qid); 920 txq[qid] = NULL; 921 } 922 923 static int 924 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 925 { 926 uint16_t old_nb_queues = dev->data->nb_rx_queues; 927 unsigned i; 928 929 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ 930 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", 931 sizeof(dev->data->rx_queues[0]) * 932 RTE_MAX_QUEUES_PER_PORT, 933 RTE_CACHE_LINE_SIZE); 934 if (dev->data->rx_queues == NULL) { 935 dev->data->nb_rx_queues = 0; 936 return -(ENOMEM); 937 } 938 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ 939 for (i = nb_queues; i < old_nb_queues; i++) 940 eth_dev_rxq_release(dev, i); 941 942 } else if (dev->data->rx_queues != NULL && nb_queues == 0) { 943 for (i = nb_queues; i < old_nb_queues; i++) 944 eth_dev_rxq_release(dev, i); 945 946 rte_free(dev->data->rx_queues); 947 dev->data->rx_queues = NULL; 948 } 949 dev->data->nb_rx_queues = nb_queues; 950 return 0; 951 } 952 953 static int 954 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 955 { 956 uint16_t port_id; 957 958 if (rx_queue_id >= dev->data->nb_rx_queues) { 959 port_id = dev->data->port_id; 960 RTE_ETHDEV_LOG(ERR, 961 "Invalid Rx queue_id=%u of device with port_id=%u\n", 962 rx_queue_id, port_id); 963 return -EINVAL; 964 } 965 966 if (dev->data->rx_queues[rx_queue_id] == NULL) { 967 port_id = dev->data->port_id; 968 RTE_ETHDEV_LOG(ERR, 969 "Queue %u of device with port_id=%u has not been setup\n", 970 rx_queue_id, port_id); 971 return -EINVAL; 972 } 973 974 return 0; 975 } 976 977 static int 978 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 979 { 980 uint16_t port_id; 981 982 if (tx_queue_id >= dev->data->nb_tx_queues) { 983 port_id = dev->data->port_id; 984 RTE_ETHDEV_LOG(ERR, 985 "Invalid Tx queue_id=%u of device with port_id=%u\n", 986 tx_queue_id, port_id); 987 return -EINVAL; 988 } 989 990 if (dev->data->tx_queues[tx_queue_id] == NULL) { 991 port_id = dev->data->port_id; 992 RTE_ETHDEV_LOG(ERR, 993 "Queue %u of device with port_id=%u has not been setup\n", 994 tx_queue_id, port_id); 995 return -EINVAL; 996 } 997 998 return 0; 999 } 1000 1001 int 1002 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 1003 { 1004 struct rte_eth_dev *dev; 1005 int ret; 1006 1007 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1008 dev = &rte_eth_devices[port_id]; 1009 1010 if (!dev->data->dev_started) { 1011 RTE_ETHDEV_LOG(ERR, 1012 "Port %u must be started before start any queue\n", 1013 port_id); 1014 return -EINVAL; 1015 } 1016 1017 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1018 if (ret != 0) 1019 return ret; 1020 1021 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 1022 1023 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1024 RTE_ETHDEV_LOG(INFO, 1025 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1026 rx_queue_id, port_id); 1027 return -EINVAL; 1028 } 1029 1030 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1031 RTE_ETHDEV_LOG(INFO, 1032 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1033 rx_queue_id, port_id); 1034 return 0; 1035 } 1036 1037 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 1038 } 1039 1040 int 1041 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 1042 { 1043 struct rte_eth_dev *dev; 1044 int ret; 1045 1046 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1047 dev = &rte_eth_devices[port_id]; 1048 1049 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1050 if (ret != 0) 1051 return ret; 1052 1053 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 1054 1055 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1056 RTE_ETHDEV_LOG(INFO, 1057 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1058 rx_queue_id, port_id); 1059 return -EINVAL; 1060 } 1061 1062 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1063 RTE_ETHDEV_LOG(INFO, 1064 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1065 rx_queue_id, port_id); 1066 return 0; 1067 } 1068 1069 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 1070 } 1071 1072 int 1073 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 1074 { 1075 struct rte_eth_dev *dev; 1076 int ret; 1077 1078 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1079 dev = &rte_eth_devices[port_id]; 1080 1081 if (!dev->data->dev_started) { 1082 RTE_ETHDEV_LOG(ERR, 1083 "Port %u must be started before start any queue\n", 1084 port_id); 1085 return -EINVAL; 1086 } 1087 1088 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1089 if (ret != 0) 1090 return ret; 1091 1092 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 1093 1094 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1095 RTE_ETHDEV_LOG(INFO, 1096 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1097 tx_queue_id, port_id); 1098 return -EINVAL; 1099 } 1100 1101 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1102 RTE_ETHDEV_LOG(INFO, 1103 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1104 tx_queue_id, port_id); 1105 return 0; 1106 } 1107 1108 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 1109 } 1110 1111 int 1112 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 1113 { 1114 struct rte_eth_dev *dev; 1115 int ret; 1116 1117 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1118 dev = &rte_eth_devices[port_id]; 1119 1120 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1121 if (ret != 0) 1122 return ret; 1123 1124 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 1125 1126 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1127 RTE_ETHDEV_LOG(INFO, 1128 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1129 tx_queue_id, port_id); 1130 return -EINVAL; 1131 } 1132 1133 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1134 RTE_ETHDEV_LOG(INFO, 1135 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1136 tx_queue_id, port_id); 1137 return 0; 1138 } 1139 1140 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 1141 } 1142 1143 static int 1144 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 1145 { 1146 uint16_t old_nb_queues = dev->data->nb_tx_queues; 1147 unsigned i; 1148 1149 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ 1150 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", 1151 sizeof(dev->data->tx_queues[0]) * 1152 RTE_MAX_QUEUES_PER_PORT, 1153 RTE_CACHE_LINE_SIZE); 1154 if (dev->data->tx_queues == NULL) { 1155 dev->data->nb_tx_queues = 0; 1156 return -(ENOMEM); 1157 } 1158 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ 1159 for (i = nb_queues; i < old_nb_queues; i++) 1160 eth_dev_txq_release(dev, i); 1161 1162 } else if (dev->data->tx_queues != NULL && nb_queues == 0) { 1163 for (i = nb_queues; i < old_nb_queues; i++) 1164 eth_dev_txq_release(dev, i); 1165 1166 rte_free(dev->data->tx_queues); 1167 dev->data->tx_queues = NULL; 1168 } 1169 dev->data->nb_tx_queues = nb_queues; 1170 return 0; 1171 } 1172 1173 uint32_t 1174 rte_eth_speed_bitflag(uint32_t speed, int duplex) 1175 { 1176 switch (speed) { 1177 case ETH_SPEED_NUM_10M: 1178 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD; 1179 case ETH_SPEED_NUM_100M: 1180 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD; 1181 case ETH_SPEED_NUM_1G: 1182 return ETH_LINK_SPEED_1G; 1183 case ETH_SPEED_NUM_2_5G: 1184 return ETH_LINK_SPEED_2_5G; 1185 case ETH_SPEED_NUM_5G: 1186 return ETH_LINK_SPEED_5G; 1187 case ETH_SPEED_NUM_10G: 1188 return ETH_LINK_SPEED_10G; 1189 case ETH_SPEED_NUM_20G: 1190 return ETH_LINK_SPEED_20G; 1191 case ETH_SPEED_NUM_25G: 1192 return ETH_LINK_SPEED_25G; 1193 case ETH_SPEED_NUM_40G: 1194 return ETH_LINK_SPEED_40G; 1195 case ETH_SPEED_NUM_50G: 1196 return ETH_LINK_SPEED_50G; 1197 case ETH_SPEED_NUM_56G: 1198 return ETH_LINK_SPEED_56G; 1199 case ETH_SPEED_NUM_100G: 1200 return ETH_LINK_SPEED_100G; 1201 case ETH_SPEED_NUM_200G: 1202 return ETH_LINK_SPEED_200G; 1203 default: 1204 return 0; 1205 } 1206 } 1207 1208 const char * 1209 rte_eth_dev_rx_offload_name(uint64_t offload) 1210 { 1211 const char *name = "UNKNOWN"; 1212 unsigned int i; 1213 1214 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1215 if (offload == eth_dev_rx_offload_names[i].offload) { 1216 name = eth_dev_rx_offload_names[i].name; 1217 break; 1218 } 1219 } 1220 1221 return name; 1222 } 1223 1224 const char * 1225 rte_eth_dev_tx_offload_name(uint64_t offload) 1226 { 1227 const char *name = "UNKNOWN"; 1228 unsigned int i; 1229 1230 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1231 if (offload == eth_dev_tx_offload_names[i].offload) { 1232 name = eth_dev_tx_offload_names[i].name; 1233 break; 1234 } 1235 } 1236 1237 return name; 1238 } 1239 1240 static inline int 1241 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1242 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1243 { 1244 int ret = 0; 1245 1246 if (dev_info_size == 0) { 1247 if (config_size != max_rx_pkt_len) { 1248 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1249 " %u != %u is not allowed\n", 1250 port_id, config_size, max_rx_pkt_len); 1251 ret = -EINVAL; 1252 } 1253 } else if (config_size > dev_info_size) { 1254 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1255 "> max allowed value %u\n", port_id, config_size, 1256 dev_info_size); 1257 ret = -EINVAL; 1258 } else if (config_size < RTE_ETHER_MIN_LEN) { 1259 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1260 "< min allowed value %u\n", port_id, config_size, 1261 (unsigned int)RTE_ETHER_MIN_LEN); 1262 ret = -EINVAL; 1263 } 1264 return ret; 1265 } 1266 1267 /* 1268 * Validate offloads that are requested through rte_eth_dev_configure against 1269 * the offloads successfully set by the ethernet device. 1270 * 1271 * @param port_id 1272 * The port identifier of the Ethernet device. 1273 * @param req_offloads 1274 * The offloads that have been requested through `rte_eth_dev_configure`. 1275 * @param set_offloads 1276 * The offloads successfully set by the ethernet device. 1277 * @param offload_type 1278 * The offload type i.e. Rx/Tx string. 1279 * @param offload_name 1280 * The function that prints the offload name. 1281 * @return 1282 * - (0) if validation successful. 1283 * - (-EINVAL) if requested offload has been silently disabled. 1284 * 1285 */ 1286 static int 1287 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1288 uint64_t set_offloads, const char *offload_type, 1289 const char *(*offload_name)(uint64_t)) 1290 { 1291 uint64_t offloads_diff = req_offloads ^ set_offloads; 1292 uint64_t offload; 1293 int ret = 0; 1294 1295 while (offloads_diff != 0) { 1296 /* Check if any offload is requested but not enabled. */ 1297 offload = 1ULL << __builtin_ctzll(offloads_diff); 1298 if (offload & req_offloads) { 1299 RTE_ETHDEV_LOG(ERR, 1300 "Port %u failed to enable %s offload %s\n", 1301 port_id, offload_type, offload_name(offload)); 1302 ret = -EINVAL; 1303 } 1304 1305 /* Check if offload couldn't be disabled. */ 1306 if (offload & set_offloads) { 1307 RTE_ETHDEV_LOG(DEBUG, 1308 "Port %u %s offload %s is not requested but enabled\n", 1309 port_id, offload_type, offload_name(offload)); 1310 } 1311 1312 offloads_diff &= ~offload; 1313 } 1314 1315 return ret; 1316 } 1317 1318 int 1319 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1320 const struct rte_eth_conf *dev_conf) 1321 { 1322 struct rte_eth_dev *dev; 1323 struct rte_eth_dev_info dev_info; 1324 struct rte_eth_conf orig_conf; 1325 uint16_t overhead_len; 1326 int diag; 1327 int ret; 1328 uint16_t old_mtu; 1329 1330 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1331 dev = &rte_eth_devices[port_id]; 1332 1333 if (dev_conf == NULL) { 1334 RTE_ETHDEV_LOG(ERR, 1335 "Cannot configure ethdev port %u from NULL config\n", 1336 port_id); 1337 return -EINVAL; 1338 } 1339 1340 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1341 1342 if (dev->data->dev_started) { 1343 RTE_ETHDEV_LOG(ERR, 1344 "Port %u must be stopped to allow configuration\n", 1345 port_id); 1346 return -EBUSY; 1347 } 1348 1349 /* 1350 * Ensure that "dev_configured" is always 0 each time prepare to do 1351 * dev_configure() to avoid any non-anticipated behaviour. 1352 * And set to 1 when dev_configure() is executed successfully. 1353 */ 1354 dev->data->dev_configured = 0; 1355 1356 /* Store original config, as rollback required on failure */ 1357 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1358 1359 /* 1360 * Copy the dev_conf parameter into the dev structure. 1361 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1362 */ 1363 if (dev_conf != &dev->data->dev_conf) 1364 memcpy(&dev->data->dev_conf, dev_conf, 1365 sizeof(dev->data->dev_conf)); 1366 1367 /* Backup mtu for rollback */ 1368 old_mtu = dev->data->mtu; 1369 1370 ret = rte_eth_dev_info_get(port_id, &dev_info); 1371 if (ret != 0) 1372 goto rollback; 1373 1374 /* Get the real Ethernet overhead length */ 1375 if (dev_info.max_mtu != UINT16_MAX && 1376 dev_info.max_rx_pktlen > dev_info.max_mtu) 1377 overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu; 1378 else 1379 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1380 1381 /* If number of queues specified by application for both Rx and Tx is 1382 * zero, use driver preferred values. This cannot be done individually 1383 * as it is valid for either Tx or Rx (but not both) to be zero. 1384 * If driver does not provide any preferred valued, fall back on 1385 * EAL defaults. 1386 */ 1387 if (nb_rx_q == 0 && nb_tx_q == 0) { 1388 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1389 if (nb_rx_q == 0) 1390 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1391 nb_tx_q = dev_info.default_txportconf.nb_queues; 1392 if (nb_tx_q == 0) 1393 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1394 } 1395 1396 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1397 RTE_ETHDEV_LOG(ERR, 1398 "Number of RX queues requested (%u) is greater than max supported(%d)\n", 1399 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1400 ret = -EINVAL; 1401 goto rollback; 1402 } 1403 1404 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1405 RTE_ETHDEV_LOG(ERR, 1406 "Number of TX queues requested (%u) is greater than max supported(%d)\n", 1407 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1408 ret = -EINVAL; 1409 goto rollback; 1410 } 1411 1412 /* 1413 * Check that the numbers of RX and TX queues are not greater 1414 * than the maximum number of RX and TX queues supported by the 1415 * configured device. 1416 */ 1417 if (nb_rx_q > dev_info.max_rx_queues) { 1418 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1419 port_id, nb_rx_q, dev_info.max_rx_queues); 1420 ret = -EINVAL; 1421 goto rollback; 1422 } 1423 1424 if (nb_tx_q > dev_info.max_tx_queues) { 1425 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1426 port_id, nb_tx_q, dev_info.max_tx_queues); 1427 ret = -EINVAL; 1428 goto rollback; 1429 } 1430 1431 /* Check that the device supports requested interrupts */ 1432 if ((dev_conf->intr_conf.lsc == 1) && 1433 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1434 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1435 dev->device->driver->name); 1436 ret = -EINVAL; 1437 goto rollback; 1438 } 1439 if ((dev_conf->intr_conf.rmv == 1) && 1440 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1441 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1442 dev->device->driver->name); 1443 ret = -EINVAL; 1444 goto rollback; 1445 } 1446 1447 /* 1448 * If jumbo frames are enabled, check that the maximum RX packet 1449 * length is supported by the configured device. 1450 */ 1451 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1452 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) { 1453 RTE_ETHDEV_LOG(ERR, 1454 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n", 1455 port_id, dev_conf->rxmode.max_rx_pkt_len, 1456 dev_info.max_rx_pktlen); 1457 ret = -EINVAL; 1458 goto rollback; 1459 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) { 1460 RTE_ETHDEV_LOG(ERR, 1461 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n", 1462 port_id, dev_conf->rxmode.max_rx_pkt_len, 1463 (unsigned int)RTE_ETHER_MIN_LEN); 1464 ret = -EINVAL; 1465 goto rollback; 1466 } 1467 1468 /* Scale the MTU size to adapt max_rx_pkt_len */ 1469 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len - 1470 overhead_len; 1471 } else { 1472 uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len; 1473 if (pktlen < RTE_ETHER_MIN_MTU + overhead_len || 1474 pktlen > RTE_ETHER_MTU + overhead_len) 1475 /* Use default value */ 1476 dev->data->dev_conf.rxmode.max_rx_pkt_len = 1477 RTE_ETHER_MTU + overhead_len; 1478 } 1479 1480 /* 1481 * If LRO is enabled, check that the maximum aggregated packet 1482 * size is supported by the configured device. 1483 */ 1484 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 1485 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1486 dev->data->dev_conf.rxmode.max_lro_pkt_size = 1487 dev->data->dev_conf.rxmode.max_rx_pkt_len; 1488 ret = eth_dev_check_lro_pkt_size(port_id, 1489 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1490 dev->data->dev_conf.rxmode.max_rx_pkt_len, 1491 dev_info.max_lro_pkt_size); 1492 if (ret != 0) 1493 goto rollback; 1494 } 1495 1496 /* Any requested offloading must be within its device capabilities */ 1497 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1498 dev_conf->rxmode.offloads) { 1499 RTE_ETHDEV_LOG(ERR, 1500 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1501 "capabilities 0x%"PRIx64" in %s()\n", 1502 port_id, dev_conf->rxmode.offloads, 1503 dev_info.rx_offload_capa, 1504 __func__); 1505 ret = -EINVAL; 1506 goto rollback; 1507 } 1508 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1509 dev_conf->txmode.offloads) { 1510 RTE_ETHDEV_LOG(ERR, 1511 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1512 "capabilities 0x%"PRIx64" in %s()\n", 1513 port_id, dev_conf->txmode.offloads, 1514 dev_info.tx_offload_capa, 1515 __func__); 1516 ret = -EINVAL; 1517 goto rollback; 1518 } 1519 1520 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1521 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1522 1523 /* Check that device supports requested rss hash functions. */ 1524 if ((dev_info.flow_type_rss_offloads | 1525 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1526 dev_info.flow_type_rss_offloads) { 1527 RTE_ETHDEV_LOG(ERR, 1528 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1529 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1530 dev_info.flow_type_rss_offloads); 1531 ret = -EINVAL; 1532 goto rollback; 1533 } 1534 1535 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1536 if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) && 1537 (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) { 1538 RTE_ETHDEV_LOG(ERR, 1539 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1540 port_id, 1541 rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH)); 1542 ret = -EINVAL; 1543 goto rollback; 1544 } 1545 1546 /* 1547 * Setup new number of RX/TX queues and reconfigure device. 1548 */ 1549 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1550 if (diag != 0) { 1551 RTE_ETHDEV_LOG(ERR, 1552 "Port%u eth_dev_rx_queue_config = %d\n", 1553 port_id, diag); 1554 ret = diag; 1555 goto rollback; 1556 } 1557 1558 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1559 if (diag != 0) { 1560 RTE_ETHDEV_LOG(ERR, 1561 "Port%u eth_dev_tx_queue_config = %d\n", 1562 port_id, diag); 1563 eth_dev_rx_queue_config(dev, 0); 1564 ret = diag; 1565 goto rollback; 1566 } 1567 1568 diag = (*dev->dev_ops->dev_configure)(dev); 1569 if (diag != 0) { 1570 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1571 port_id, diag); 1572 ret = eth_err(port_id, diag); 1573 goto reset_queues; 1574 } 1575 1576 /* Initialize Rx profiling if enabled at compilation time. */ 1577 diag = __rte_eth_dev_profile_init(port_id, dev); 1578 if (diag != 0) { 1579 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1580 port_id, diag); 1581 ret = eth_err(port_id, diag); 1582 goto reset_queues; 1583 } 1584 1585 /* Validate Rx offloads. */ 1586 diag = eth_dev_validate_offloads(port_id, 1587 dev_conf->rxmode.offloads, 1588 dev->data->dev_conf.rxmode.offloads, "Rx", 1589 rte_eth_dev_rx_offload_name); 1590 if (diag != 0) { 1591 ret = diag; 1592 goto reset_queues; 1593 } 1594 1595 /* Validate Tx offloads. */ 1596 diag = eth_dev_validate_offloads(port_id, 1597 dev_conf->txmode.offloads, 1598 dev->data->dev_conf.txmode.offloads, "Tx", 1599 rte_eth_dev_tx_offload_name); 1600 if (diag != 0) { 1601 ret = diag; 1602 goto reset_queues; 1603 } 1604 1605 dev->data->dev_configured = 1; 1606 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1607 return 0; 1608 reset_queues: 1609 eth_dev_rx_queue_config(dev, 0); 1610 eth_dev_tx_queue_config(dev, 0); 1611 rollback: 1612 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1613 if (old_mtu != dev->data->mtu) 1614 dev->data->mtu = old_mtu; 1615 1616 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1617 return ret; 1618 } 1619 1620 void 1621 rte_eth_dev_internal_reset(struct rte_eth_dev *dev) 1622 { 1623 if (dev->data->dev_started) { 1624 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n", 1625 dev->data->port_id); 1626 return; 1627 } 1628 1629 eth_dev_rx_queue_config(dev, 0); 1630 eth_dev_tx_queue_config(dev, 0); 1631 1632 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); 1633 } 1634 1635 static void 1636 eth_dev_mac_restore(struct rte_eth_dev *dev, 1637 struct rte_eth_dev_info *dev_info) 1638 { 1639 struct rte_ether_addr *addr; 1640 uint16_t i; 1641 uint32_t pool = 0; 1642 uint64_t pool_mask; 1643 1644 /* replay MAC address configuration including default MAC */ 1645 addr = &dev->data->mac_addrs[0]; 1646 if (*dev->dev_ops->mac_addr_set != NULL) 1647 (*dev->dev_ops->mac_addr_set)(dev, addr); 1648 else if (*dev->dev_ops->mac_addr_add != NULL) 1649 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1650 1651 if (*dev->dev_ops->mac_addr_add != NULL) { 1652 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1653 addr = &dev->data->mac_addrs[i]; 1654 1655 /* skip zero address */ 1656 if (rte_is_zero_ether_addr(addr)) 1657 continue; 1658 1659 pool = 0; 1660 pool_mask = dev->data->mac_pool_sel[i]; 1661 1662 do { 1663 if (pool_mask & 1ULL) 1664 (*dev->dev_ops->mac_addr_add)(dev, 1665 addr, i, pool); 1666 pool_mask >>= 1; 1667 pool++; 1668 } while (pool_mask); 1669 } 1670 } 1671 } 1672 1673 static int 1674 eth_dev_config_restore(struct rte_eth_dev *dev, 1675 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1676 { 1677 int ret; 1678 1679 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1680 eth_dev_mac_restore(dev, dev_info); 1681 1682 /* replay promiscuous configuration */ 1683 /* 1684 * use callbacks directly since we don't need port_id check and 1685 * would like to bypass the same value set 1686 */ 1687 if (rte_eth_promiscuous_get(port_id) == 1 && 1688 *dev->dev_ops->promiscuous_enable != NULL) { 1689 ret = eth_err(port_id, 1690 (*dev->dev_ops->promiscuous_enable)(dev)); 1691 if (ret != 0 && ret != -ENOTSUP) { 1692 RTE_ETHDEV_LOG(ERR, 1693 "Failed to enable promiscuous mode for device (port %u): %s\n", 1694 port_id, rte_strerror(-ret)); 1695 return ret; 1696 } 1697 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1698 *dev->dev_ops->promiscuous_disable != NULL) { 1699 ret = eth_err(port_id, 1700 (*dev->dev_ops->promiscuous_disable)(dev)); 1701 if (ret != 0 && ret != -ENOTSUP) { 1702 RTE_ETHDEV_LOG(ERR, 1703 "Failed to disable promiscuous mode for device (port %u): %s\n", 1704 port_id, rte_strerror(-ret)); 1705 return ret; 1706 } 1707 } 1708 1709 /* replay all multicast configuration */ 1710 /* 1711 * use callbacks directly since we don't need port_id check and 1712 * would like to bypass the same value set 1713 */ 1714 if (rte_eth_allmulticast_get(port_id) == 1 && 1715 *dev->dev_ops->allmulticast_enable != NULL) { 1716 ret = eth_err(port_id, 1717 (*dev->dev_ops->allmulticast_enable)(dev)); 1718 if (ret != 0 && ret != -ENOTSUP) { 1719 RTE_ETHDEV_LOG(ERR, 1720 "Failed to enable allmulticast mode for device (port %u): %s\n", 1721 port_id, rte_strerror(-ret)); 1722 return ret; 1723 } 1724 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1725 *dev->dev_ops->allmulticast_disable != NULL) { 1726 ret = eth_err(port_id, 1727 (*dev->dev_ops->allmulticast_disable)(dev)); 1728 if (ret != 0 && ret != -ENOTSUP) { 1729 RTE_ETHDEV_LOG(ERR, 1730 "Failed to disable allmulticast mode for device (port %u): %s\n", 1731 port_id, rte_strerror(-ret)); 1732 return ret; 1733 } 1734 } 1735 1736 return 0; 1737 } 1738 1739 int 1740 rte_eth_dev_start(uint16_t port_id) 1741 { 1742 struct rte_eth_dev *dev; 1743 struct rte_eth_dev_info dev_info; 1744 int diag; 1745 int ret, ret_stop; 1746 1747 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1748 dev = &rte_eth_devices[port_id]; 1749 1750 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1751 1752 if (dev->data->dev_configured == 0) { 1753 RTE_ETHDEV_LOG(INFO, 1754 "Device with port_id=%"PRIu16" is not configured.\n", 1755 port_id); 1756 return -EINVAL; 1757 } 1758 1759 if (dev->data->dev_started != 0) { 1760 RTE_ETHDEV_LOG(INFO, 1761 "Device with port_id=%"PRIu16" already started\n", 1762 port_id); 1763 return 0; 1764 } 1765 1766 ret = rte_eth_dev_info_get(port_id, &dev_info); 1767 if (ret != 0) 1768 return ret; 1769 1770 /* Lets restore MAC now if device does not support live change */ 1771 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1772 eth_dev_mac_restore(dev, &dev_info); 1773 1774 diag = (*dev->dev_ops->dev_start)(dev); 1775 if (diag == 0) 1776 dev->data->dev_started = 1; 1777 else 1778 return eth_err(port_id, diag); 1779 1780 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1781 if (ret != 0) { 1782 RTE_ETHDEV_LOG(ERR, 1783 "Error during restoring configuration for device (port %u): %s\n", 1784 port_id, rte_strerror(-ret)); 1785 ret_stop = rte_eth_dev_stop(port_id); 1786 if (ret_stop != 0) { 1787 RTE_ETHDEV_LOG(ERR, 1788 "Failed to stop device (port %u): %s\n", 1789 port_id, rte_strerror(-ret_stop)); 1790 } 1791 1792 return ret; 1793 } 1794 1795 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1796 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1797 (*dev->dev_ops->link_update)(dev, 0); 1798 } 1799 1800 /* expose selection of PMD fast-path functions */ 1801 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1802 1803 rte_ethdev_trace_start(port_id); 1804 return 0; 1805 } 1806 1807 int 1808 rte_eth_dev_stop(uint16_t port_id) 1809 { 1810 struct rte_eth_dev *dev; 1811 int ret; 1812 1813 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1814 dev = &rte_eth_devices[port_id]; 1815 1816 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP); 1817 1818 if (dev->data->dev_started == 0) { 1819 RTE_ETHDEV_LOG(INFO, 1820 "Device with port_id=%"PRIu16" already stopped\n", 1821 port_id); 1822 return 0; 1823 } 1824 1825 /* point fast-path functions to dummy ones */ 1826 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1827 1828 dev->data->dev_started = 0; 1829 ret = (*dev->dev_ops->dev_stop)(dev); 1830 rte_ethdev_trace_stop(port_id, ret); 1831 1832 return ret; 1833 } 1834 1835 int 1836 rte_eth_dev_set_link_up(uint16_t port_id) 1837 { 1838 struct rte_eth_dev *dev; 1839 1840 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1841 dev = &rte_eth_devices[port_id]; 1842 1843 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1844 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1845 } 1846 1847 int 1848 rte_eth_dev_set_link_down(uint16_t port_id) 1849 { 1850 struct rte_eth_dev *dev; 1851 1852 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1853 dev = &rte_eth_devices[port_id]; 1854 1855 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1856 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1857 } 1858 1859 int 1860 rte_eth_dev_close(uint16_t port_id) 1861 { 1862 struct rte_eth_dev *dev; 1863 int firsterr, binerr; 1864 int *lasterr = &firsterr; 1865 1866 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1867 dev = &rte_eth_devices[port_id]; 1868 1869 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1870 *lasterr = (*dev->dev_ops->dev_close)(dev); 1871 if (*lasterr != 0) 1872 lasterr = &binerr; 1873 1874 rte_ethdev_trace_close(port_id); 1875 *lasterr = rte_eth_dev_release_port(dev); 1876 1877 return firsterr; 1878 } 1879 1880 int 1881 rte_eth_dev_reset(uint16_t port_id) 1882 { 1883 struct rte_eth_dev *dev; 1884 int ret; 1885 1886 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1887 dev = &rte_eth_devices[port_id]; 1888 1889 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1890 1891 ret = rte_eth_dev_stop(port_id); 1892 if (ret != 0) { 1893 RTE_ETHDEV_LOG(ERR, 1894 "Failed to stop device (port %u) before reset: %s - ignore\n", 1895 port_id, rte_strerror(-ret)); 1896 } 1897 ret = dev->dev_ops->dev_reset(dev); 1898 1899 return eth_err(port_id, ret); 1900 } 1901 1902 int 1903 rte_eth_dev_is_removed(uint16_t port_id) 1904 { 1905 struct rte_eth_dev *dev; 1906 int ret; 1907 1908 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1909 dev = &rte_eth_devices[port_id]; 1910 1911 if (dev->state == RTE_ETH_DEV_REMOVED) 1912 return 1; 1913 1914 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1915 1916 ret = dev->dev_ops->is_removed(dev); 1917 if (ret != 0) 1918 /* Device is physically removed. */ 1919 dev->state = RTE_ETH_DEV_REMOVED; 1920 1921 return ret; 1922 } 1923 1924 static int 1925 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, 1926 uint16_t n_seg, uint32_t *mbp_buf_size, 1927 const struct rte_eth_dev_info *dev_info) 1928 { 1929 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1930 struct rte_mempool *mp_first; 1931 uint32_t offset_mask; 1932 uint16_t seg_idx; 1933 1934 if (n_seg > seg_capa->max_nseg) { 1935 RTE_ETHDEV_LOG(ERR, 1936 "Requested Rx segments %u exceed supported %u\n", 1937 n_seg, seg_capa->max_nseg); 1938 return -EINVAL; 1939 } 1940 /* 1941 * Check the sizes and offsets against buffer sizes 1942 * for each segment specified in extended configuration. 1943 */ 1944 mp_first = rx_seg[0].mp; 1945 offset_mask = (1u << seg_capa->offset_align_log2) - 1; 1946 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1947 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1948 uint32_t length = rx_seg[seg_idx].length; 1949 uint32_t offset = rx_seg[seg_idx].offset; 1950 1951 if (mpl == NULL) { 1952 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1953 return -EINVAL; 1954 } 1955 if (seg_idx != 0 && mp_first != mpl && 1956 seg_capa->multi_pools == 0) { 1957 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1958 return -ENOTSUP; 1959 } 1960 if (offset != 0) { 1961 if (seg_capa->offset_allowed == 0) { 1962 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1963 return -ENOTSUP; 1964 } 1965 if (offset & offset_mask) { 1966 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 1967 offset, 1968 seg_capa->offset_align_log2); 1969 return -EINVAL; 1970 } 1971 } 1972 if (mpl->private_data_size < 1973 sizeof(struct rte_pktmbuf_pool_private)) { 1974 RTE_ETHDEV_LOG(ERR, 1975 "%s private_data_size %u < %u\n", 1976 mpl->name, mpl->private_data_size, 1977 (unsigned int)sizeof 1978 (struct rte_pktmbuf_pool_private)); 1979 return -ENOSPC; 1980 } 1981 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 1982 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 1983 length = length != 0 ? length : *mbp_buf_size; 1984 if (*mbp_buf_size < length + offset) { 1985 RTE_ETHDEV_LOG(ERR, 1986 "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n", 1987 mpl->name, *mbp_buf_size, 1988 length + offset, length, offset); 1989 return -EINVAL; 1990 } 1991 } 1992 return 0; 1993 } 1994 1995 int 1996 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1997 uint16_t nb_rx_desc, unsigned int socket_id, 1998 const struct rte_eth_rxconf *rx_conf, 1999 struct rte_mempool *mp) 2000 { 2001 int ret; 2002 uint32_t mbp_buf_size; 2003 struct rte_eth_dev *dev; 2004 struct rte_eth_dev_info dev_info; 2005 struct rte_eth_rxconf local_conf; 2006 2007 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2008 dev = &rte_eth_devices[port_id]; 2009 2010 if (rx_queue_id >= dev->data->nb_rx_queues) { 2011 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 2012 return -EINVAL; 2013 } 2014 2015 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 2016 2017 ret = rte_eth_dev_info_get(port_id, &dev_info); 2018 if (ret != 0) 2019 return ret; 2020 2021 if (mp != NULL) { 2022 /* Single pool configuration check. */ 2023 if (rx_conf != NULL && rx_conf->rx_nseg != 0) { 2024 RTE_ETHDEV_LOG(ERR, 2025 "Ambiguous segment configuration\n"); 2026 return -EINVAL; 2027 } 2028 /* 2029 * Check the size of the mbuf data buffer, this value 2030 * must be provided in the private data of the memory pool. 2031 * First check that the memory pool(s) has a valid private data. 2032 */ 2033 if (mp->private_data_size < 2034 sizeof(struct rte_pktmbuf_pool_private)) { 2035 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 2036 mp->name, mp->private_data_size, 2037 (unsigned int) 2038 sizeof(struct rte_pktmbuf_pool_private)); 2039 return -ENOSPC; 2040 } 2041 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2042 if (mbp_buf_size < dev_info.min_rx_bufsize + 2043 RTE_PKTMBUF_HEADROOM) { 2044 RTE_ETHDEV_LOG(ERR, 2045 "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", 2046 mp->name, mbp_buf_size, 2047 RTE_PKTMBUF_HEADROOM + 2048 dev_info.min_rx_bufsize, 2049 RTE_PKTMBUF_HEADROOM, 2050 dev_info.min_rx_bufsize); 2051 return -EINVAL; 2052 } 2053 } else { 2054 const struct rte_eth_rxseg_split *rx_seg; 2055 uint16_t n_seg; 2056 2057 /* Extended multi-segment configuration check. */ 2058 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { 2059 RTE_ETHDEV_LOG(ERR, 2060 "Memory pool is null and no extended configuration provided\n"); 2061 return -EINVAL; 2062 } 2063 2064 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2065 n_seg = rx_conf->rx_nseg; 2066 2067 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2068 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, 2069 &mbp_buf_size, 2070 &dev_info); 2071 if (ret != 0) 2072 return ret; 2073 } else { 2074 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2075 return -EINVAL; 2076 } 2077 } 2078 2079 /* Use default specified by driver, if nb_rx_desc is zero */ 2080 if (nb_rx_desc == 0) { 2081 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2082 /* If driver default is also zero, fall back on EAL default */ 2083 if (nb_rx_desc == 0) 2084 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2085 } 2086 2087 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2088 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2089 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2090 2091 RTE_ETHDEV_LOG(ERR, 2092 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2093 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2094 dev_info.rx_desc_lim.nb_min, 2095 dev_info.rx_desc_lim.nb_align); 2096 return -EINVAL; 2097 } 2098 2099 if (dev->data->dev_started && 2100 !(dev_info.dev_capa & 2101 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2102 return -EBUSY; 2103 2104 if (dev->data->dev_started && 2105 (dev->data->rx_queue_state[rx_queue_id] != 2106 RTE_ETH_QUEUE_STATE_STOPPED)) 2107 return -EBUSY; 2108 2109 eth_dev_rxq_release(dev, rx_queue_id); 2110 2111 if (rx_conf == NULL) 2112 rx_conf = &dev_info.default_rxconf; 2113 2114 local_conf = *rx_conf; 2115 2116 /* 2117 * If an offloading has already been enabled in 2118 * rte_eth_dev_configure(), it has been enabled on all queues, 2119 * so there is no need to enable it in this queue again. 2120 * The local_conf.offloads input to underlying PMD only carries 2121 * those offloadings which are only enabled on this queue and 2122 * not enabled on all queues. 2123 */ 2124 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2125 2126 /* 2127 * New added offloadings for this queue are those not enabled in 2128 * rte_eth_dev_configure() and they must be per-queue type. 2129 * A pure per-port offloading can't be enabled on a queue while 2130 * disabled on another queue. A pure per-port offloading can't 2131 * be enabled for any queue as new added one if it hasn't been 2132 * enabled in rte_eth_dev_configure(). 2133 */ 2134 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2135 local_conf.offloads) { 2136 RTE_ETHDEV_LOG(ERR, 2137 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2138 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2139 port_id, rx_queue_id, local_conf.offloads, 2140 dev_info.rx_queue_offload_capa, 2141 __func__); 2142 return -EINVAL; 2143 } 2144 2145 /* 2146 * If LRO is enabled, check that the maximum aggregated packet 2147 * size is supported by the configured device. 2148 */ 2149 if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 2150 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2151 dev->data->dev_conf.rxmode.max_lro_pkt_size = 2152 dev->data->dev_conf.rxmode.max_rx_pkt_len; 2153 int ret = eth_dev_check_lro_pkt_size(port_id, 2154 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2155 dev->data->dev_conf.rxmode.max_rx_pkt_len, 2156 dev_info.max_lro_pkt_size); 2157 if (ret != 0) 2158 return ret; 2159 } 2160 2161 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2162 socket_id, &local_conf, mp); 2163 if (!ret) { 2164 if (!dev->data->min_rx_buf_size || 2165 dev->data->min_rx_buf_size > mbp_buf_size) 2166 dev->data->min_rx_buf_size = mbp_buf_size; 2167 } 2168 2169 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2170 rx_conf, ret); 2171 return eth_err(port_id, ret); 2172 } 2173 2174 int 2175 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2176 uint16_t nb_rx_desc, 2177 const struct rte_eth_hairpin_conf *conf) 2178 { 2179 int ret; 2180 struct rte_eth_dev *dev; 2181 struct rte_eth_hairpin_cap cap; 2182 int i; 2183 int count; 2184 2185 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2186 dev = &rte_eth_devices[port_id]; 2187 2188 if (rx_queue_id >= dev->data->nb_rx_queues) { 2189 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 2190 return -EINVAL; 2191 } 2192 2193 if (conf == NULL) { 2194 RTE_ETHDEV_LOG(ERR, 2195 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2196 port_id); 2197 return -EINVAL; 2198 } 2199 2200 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2201 if (ret != 0) 2202 return ret; 2203 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, 2204 -ENOTSUP); 2205 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2206 if (nb_rx_desc == 0) 2207 nb_rx_desc = cap.max_nb_desc; 2208 if (nb_rx_desc > cap.max_nb_desc) { 2209 RTE_ETHDEV_LOG(ERR, 2210 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2211 nb_rx_desc, cap.max_nb_desc); 2212 return -EINVAL; 2213 } 2214 if (conf->peer_count > cap.max_rx_2_tx) { 2215 RTE_ETHDEV_LOG(ERR, 2216 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2217 conf->peer_count, cap.max_rx_2_tx); 2218 return -EINVAL; 2219 } 2220 if (conf->peer_count == 0) { 2221 RTE_ETHDEV_LOG(ERR, 2222 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2223 conf->peer_count); 2224 return -EINVAL; 2225 } 2226 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2227 cap.max_nb_queues != UINT16_MAX; i++) { 2228 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2229 count++; 2230 } 2231 if (count > cap.max_nb_queues) { 2232 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2233 cap.max_nb_queues); 2234 return -EINVAL; 2235 } 2236 if (dev->data->dev_started) 2237 return -EBUSY; 2238 eth_dev_rxq_release(dev, rx_queue_id); 2239 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2240 nb_rx_desc, conf); 2241 if (ret == 0) 2242 dev->data->rx_queue_state[rx_queue_id] = 2243 RTE_ETH_QUEUE_STATE_HAIRPIN; 2244 return eth_err(port_id, ret); 2245 } 2246 2247 int 2248 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2249 uint16_t nb_tx_desc, unsigned int socket_id, 2250 const struct rte_eth_txconf *tx_conf) 2251 { 2252 struct rte_eth_dev *dev; 2253 struct rte_eth_dev_info dev_info; 2254 struct rte_eth_txconf local_conf; 2255 int ret; 2256 2257 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2258 dev = &rte_eth_devices[port_id]; 2259 2260 if (tx_queue_id >= dev->data->nb_tx_queues) { 2261 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 2262 return -EINVAL; 2263 } 2264 2265 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 2266 2267 ret = rte_eth_dev_info_get(port_id, &dev_info); 2268 if (ret != 0) 2269 return ret; 2270 2271 /* Use default specified by driver, if nb_tx_desc is zero */ 2272 if (nb_tx_desc == 0) { 2273 nb_tx_desc = dev_info.default_txportconf.ring_size; 2274 /* If driver default is zero, fall back on EAL default */ 2275 if (nb_tx_desc == 0) 2276 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2277 } 2278 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2279 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2280 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2281 RTE_ETHDEV_LOG(ERR, 2282 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2283 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2284 dev_info.tx_desc_lim.nb_min, 2285 dev_info.tx_desc_lim.nb_align); 2286 return -EINVAL; 2287 } 2288 2289 if (dev->data->dev_started && 2290 !(dev_info.dev_capa & 2291 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2292 return -EBUSY; 2293 2294 if (dev->data->dev_started && 2295 (dev->data->tx_queue_state[tx_queue_id] != 2296 RTE_ETH_QUEUE_STATE_STOPPED)) 2297 return -EBUSY; 2298 2299 eth_dev_txq_release(dev, tx_queue_id); 2300 2301 if (tx_conf == NULL) 2302 tx_conf = &dev_info.default_txconf; 2303 2304 local_conf = *tx_conf; 2305 2306 /* 2307 * If an offloading has already been enabled in 2308 * rte_eth_dev_configure(), it has been enabled on all queues, 2309 * so there is no need to enable it in this queue again. 2310 * The local_conf.offloads input to underlying PMD only carries 2311 * those offloadings which are only enabled on this queue and 2312 * not enabled on all queues. 2313 */ 2314 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2315 2316 /* 2317 * New added offloadings for this queue are those not enabled in 2318 * rte_eth_dev_configure() and they must be per-queue type. 2319 * A pure per-port offloading can't be enabled on a queue while 2320 * disabled on another queue. A pure per-port offloading can't 2321 * be enabled for any queue as new added one if it hasn't been 2322 * enabled in rte_eth_dev_configure(). 2323 */ 2324 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2325 local_conf.offloads) { 2326 RTE_ETHDEV_LOG(ERR, 2327 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2328 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2329 port_id, tx_queue_id, local_conf.offloads, 2330 dev_info.tx_queue_offload_capa, 2331 __func__); 2332 return -EINVAL; 2333 } 2334 2335 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2336 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2337 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2338 } 2339 2340 int 2341 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2342 uint16_t nb_tx_desc, 2343 const struct rte_eth_hairpin_conf *conf) 2344 { 2345 struct rte_eth_dev *dev; 2346 struct rte_eth_hairpin_cap cap; 2347 int i; 2348 int count; 2349 int ret; 2350 2351 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2352 dev = &rte_eth_devices[port_id]; 2353 2354 if (tx_queue_id >= dev->data->nb_tx_queues) { 2355 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 2356 return -EINVAL; 2357 } 2358 2359 if (conf == NULL) { 2360 RTE_ETHDEV_LOG(ERR, 2361 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2362 port_id); 2363 return -EINVAL; 2364 } 2365 2366 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2367 if (ret != 0) 2368 return ret; 2369 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, 2370 -ENOTSUP); 2371 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2372 if (nb_tx_desc == 0) 2373 nb_tx_desc = cap.max_nb_desc; 2374 if (nb_tx_desc > cap.max_nb_desc) { 2375 RTE_ETHDEV_LOG(ERR, 2376 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2377 nb_tx_desc, cap.max_nb_desc); 2378 return -EINVAL; 2379 } 2380 if (conf->peer_count > cap.max_tx_2_rx) { 2381 RTE_ETHDEV_LOG(ERR, 2382 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2383 conf->peer_count, cap.max_tx_2_rx); 2384 return -EINVAL; 2385 } 2386 if (conf->peer_count == 0) { 2387 RTE_ETHDEV_LOG(ERR, 2388 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2389 conf->peer_count); 2390 return -EINVAL; 2391 } 2392 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2393 cap.max_nb_queues != UINT16_MAX; i++) { 2394 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2395 count++; 2396 } 2397 if (count > cap.max_nb_queues) { 2398 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2399 cap.max_nb_queues); 2400 return -EINVAL; 2401 } 2402 if (dev->data->dev_started) 2403 return -EBUSY; 2404 eth_dev_txq_release(dev, tx_queue_id); 2405 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2406 (dev, tx_queue_id, nb_tx_desc, conf); 2407 if (ret == 0) 2408 dev->data->tx_queue_state[tx_queue_id] = 2409 RTE_ETH_QUEUE_STATE_HAIRPIN; 2410 return eth_err(port_id, ret); 2411 } 2412 2413 int 2414 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2415 { 2416 struct rte_eth_dev *dev; 2417 int ret; 2418 2419 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2420 dev = &rte_eth_devices[tx_port]; 2421 2422 if (dev->data->dev_started == 0) { 2423 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2424 return -EBUSY; 2425 } 2426 2427 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP); 2428 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2429 if (ret != 0) 2430 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2431 " to Rx %d (%d - all ports)\n", 2432 tx_port, rx_port, RTE_MAX_ETHPORTS); 2433 2434 return ret; 2435 } 2436 2437 int 2438 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2439 { 2440 struct rte_eth_dev *dev; 2441 int ret; 2442 2443 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2444 dev = &rte_eth_devices[tx_port]; 2445 2446 if (dev->data->dev_started == 0) { 2447 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2448 return -EBUSY; 2449 } 2450 2451 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP); 2452 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2453 if (ret != 0) 2454 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2455 " from Rx %d (%d - all ports)\n", 2456 tx_port, rx_port, RTE_MAX_ETHPORTS); 2457 2458 return ret; 2459 } 2460 2461 int 2462 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2463 size_t len, uint32_t direction) 2464 { 2465 struct rte_eth_dev *dev; 2466 int ret; 2467 2468 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2469 dev = &rte_eth_devices[port_id]; 2470 2471 if (peer_ports == NULL) { 2472 RTE_ETHDEV_LOG(ERR, 2473 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2474 port_id); 2475 return -EINVAL; 2476 } 2477 2478 if (len == 0) { 2479 RTE_ETHDEV_LOG(ERR, 2480 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2481 port_id); 2482 return -EINVAL; 2483 } 2484 2485 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports, 2486 -ENOTSUP); 2487 2488 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2489 len, direction); 2490 if (ret < 0) 2491 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2492 port_id, direction ? "Rx" : "Tx"); 2493 2494 return ret; 2495 } 2496 2497 void 2498 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2499 void *userdata __rte_unused) 2500 { 2501 rte_pktmbuf_free_bulk(pkts, unsent); 2502 } 2503 2504 void 2505 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2506 void *userdata) 2507 { 2508 uint64_t *count = userdata; 2509 2510 rte_pktmbuf_free_bulk(pkts, unsent); 2511 *count += unsent; 2512 } 2513 2514 int 2515 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2516 buffer_tx_error_fn cbfn, void *userdata) 2517 { 2518 if (buffer == NULL) { 2519 RTE_ETHDEV_LOG(ERR, 2520 "Cannot set Tx buffer error callback to NULL buffer\n"); 2521 return -EINVAL; 2522 } 2523 2524 buffer->error_callback = cbfn; 2525 buffer->error_userdata = userdata; 2526 return 0; 2527 } 2528 2529 int 2530 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2531 { 2532 int ret = 0; 2533 2534 if (buffer == NULL) { 2535 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2536 return -EINVAL; 2537 } 2538 2539 buffer->size = size; 2540 if (buffer->error_callback == NULL) { 2541 ret = rte_eth_tx_buffer_set_err_callback( 2542 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2543 } 2544 2545 return ret; 2546 } 2547 2548 int 2549 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2550 { 2551 struct rte_eth_dev *dev; 2552 int ret; 2553 2554 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2555 dev = &rte_eth_devices[port_id]; 2556 2557 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 2558 2559 /* Call driver to free pending mbufs. */ 2560 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2561 free_cnt); 2562 return eth_err(port_id, ret); 2563 } 2564 2565 int 2566 rte_eth_promiscuous_enable(uint16_t port_id) 2567 { 2568 struct rte_eth_dev *dev; 2569 int diag = 0; 2570 2571 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2572 dev = &rte_eth_devices[port_id]; 2573 2574 if (dev->data->promiscuous == 1) 2575 return 0; 2576 2577 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); 2578 2579 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2580 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2581 2582 return eth_err(port_id, diag); 2583 } 2584 2585 int 2586 rte_eth_promiscuous_disable(uint16_t port_id) 2587 { 2588 struct rte_eth_dev *dev; 2589 int diag = 0; 2590 2591 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2592 dev = &rte_eth_devices[port_id]; 2593 2594 if (dev->data->promiscuous == 0) 2595 return 0; 2596 2597 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); 2598 2599 dev->data->promiscuous = 0; 2600 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2601 if (diag != 0) 2602 dev->data->promiscuous = 1; 2603 2604 return eth_err(port_id, diag); 2605 } 2606 2607 int 2608 rte_eth_promiscuous_get(uint16_t port_id) 2609 { 2610 struct rte_eth_dev *dev; 2611 2612 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2613 dev = &rte_eth_devices[port_id]; 2614 2615 return dev->data->promiscuous; 2616 } 2617 2618 int 2619 rte_eth_allmulticast_enable(uint16_t port_id) 2620 { 2621 struct rte_eth_dev *dev; 2622 int diag; 2623 2624 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2625 dev = &rte_eth_devices[port_id]; 2626 2627 if (dev->data->all_multicast == 1) 2628 return 0; 2629 2630 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); 2631 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2632 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2633 2634 return eth_err(port_id, diag); 2635 } 2636 2637 int 2638 rte_eth_allmulticast_disable(uint16_t port_id) 2639 { 2640 struct rte_eth_dev *dev; 2641 int diag; 2642 2643 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2644 dev = &rte_eth_devices[port_id]; 2645 2646 if (dev->data->all_multicast == 0) 2647 return 0; 2648 2649 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); 2650 dev->data->all_multicast = 0; 2651 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2652 if (diag != 0) 2653 dev->data->all_multicast = 1; 2654 2655 return eth_err(port_id, diag); 2656 } 2657 2658 int 2659 rte_eth_allmulticast_get(uint16_t port_id) 2660 { 2661 struct rte_eth_dev *dev; 2662 2663 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2664 dev = &rte_eth_devices[port_id]; 2665 2666 return dev->data->all_multicast; 2667 } 2668 2669 int 2670 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2671 { 2672 struct rte_eth_dev *dev; 2673 2674 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2675 dev = &rte_eth_devices[port_id]; 2676 2677 if (eth_link == NULL) { 2678 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2679 port_id); 2680 return -EINVAL; 2681 } 2682 2683 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2684 rte_eth_linkstatus_get(dev, eth_link); 2685 else { 2686 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2687 (*dev->dev_ops->link_update)(dev, 1); 2688 *eth_link = dev->data->dev_link; 2689 } 2690 2691 return 0; 2692 } 2693 2694 int 2695 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2696 { 2697 struct rte_eth_dev *dev; 2698 2699 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2700 dev = &rte_eth_devices[port_id]; 2701 2702 if (eth_link == NULL) { 2703 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2704 port_id); 2705 return -EINVAL; 2706 } 2707 2708 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2709 rte_eth_linkstatus_get(dev, eth_link); 2710 else { 2711 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2712 (*dev->dev_ops->link_update)(dev, 0); 2713 *eth_link = dev->data->dev_link; 2714 } 2715 2716 return 0; 2717 } 2718 2719 const char * 2720 rte_eth_link_speed_to_str(uint32_t link_speed) 2721 { 2722 switch (link_speed) { 2723 case ETH_SPEED_NUM_NONE: return "None"; 2724 case ETH_SPEED_NUM_10M: return "10 Mbps"; 2725 case ETH_SPEED_NUM_100M: return "100 Mbps"; 2726 case ETH_SPEED_NUM_1G: return "1 Gbps"; 2727 case ETH_SPEED_NUM_2_5G: return "2.5 Gbps"; 2728 case ETH_SPEED_NUM_5G: return "5 Gbps"; 2729 case ETH_SPEED_NUM_10G: return "10 Gbps"; 2730 case ETH_SPEED_NUM_20G: return "20 Gbps"; 2731 case ETH_SPEED_NUM_25G: return "25 Gbps"; 2732 case ETH_SPEED_NUM_40G: return "40 Gbps"; 2733 case ETH_SPEED_NUM_50G: return "50 Gbps"; 2734 case ETH_SPEED_NUM_56G: return "56 Gbps"; 2735 case ETH_SPEED_NUM_100G: return "100 Gbps"; 2736 case ETH_SPEED_NUM_200G: return "200 Gbps"; 2737 case ETH_SPEED_NUM_UNKNOWN: return "Unknown"; 2738 default: return "Invalid"; 2739 } 2740 } 2741 2742 int 2743 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2744 { 2745 if (str == NULL) { 2746 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2747 return -EINVAL; 2748 } 2749 2750 if (len == 0) { 2751 RTE_ETHDEV_LOG(ERR, 2752 "Cannot convert link to string with zero size\n"); 2753 return -EINVAL; 2754 } 2755 2756 if (eth_link == NULL) { 2757 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2758 return -EINVAL; 2759 } 2760 2761 if (eth_link->link_status == ETH_LINK_DOWN) 2762 return snprintf(str, len, "Link down"); 2763 else 2764 return snprintf(str, len, "Link up at %s %s %s", 2765 rte_eth_link_speed_to_str(eth_link->link_speed), 2766 (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 2767 "FDX" : "HDX", 2768 (eth_link->link_autoneg == ETH_LINK_AUTONEG) ? 2769 "Autoneg" : "Fixed"); 2770 } 2771 2772 int 2773 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2774 { 2775 struct rte_eth_dev *dev; 2776 2777 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2778 dev = &rte_eth_devices[port_id]; 2779 2780 if (stats == NULL) { 2781 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2782 port_id); 2783 return -EINVAL; 2784 } 2785 2786 memset(stats, 0, sizeof(*stats)); 2787 2788 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 2789 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2790 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2791 } 2792 2793 int 2794 rte_eth_stats_reset(uint16_t port_id) 2795 { 2796 struct rte_eth_dev *dev; 2797 int ret; 2798 2799 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2800 dev = &rte_eth_devices[port_id]; 2801 2802 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 2803 ret = (*dev->dev_ops->stats_reset)(dev); 2804 if (ret != 0) 2805 return eth_err(port_id, ret); 2806 2807 dev->data->rx_mbuf_alloc_failed = 0; 2808 2809 return 0; 2810 } 2811 2812 static inline int 2813 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2814 { 2815 uint16_t nb_rxqs, nb_txqs; 2816 int count; 2817 2818 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2819 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2820 2821 count = RTE_NB_STATS; 2822 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2823 count += nb_rxqs * RTE_NB_RXQ_STATS; 2824 count += nb_txqs * RTE_NB_TXQ_STATS; 2825 } 2826 2827 return count; 2828 } 2829 2830 static int 2831 eth_dev_get_xstats_count(uint16_t port_id) 2832 { 2833 struct rte_eth_dev *dev; 2834 int count; 2835 2836 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2837 dev = &rte_eth_devices[port_id]; 2838 if (dev->dev_ops->xstats_get_names != NULL) { 2839 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2840 if (count < 0) 2841 return eth_err(port_id, count); 2842 } else 2843 count = 0; 2844 2845 2846 count += eth_dev_get_xstats_basic_count(dev); 2847 2848 return count; 2849 } 2850 2851 int 2852 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2853 uint64_t *id) 2854 { 2855 int cnt_xstats, idx_xstat; 2856 2857 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2858 2859 if (xstat_name == NULL) { 2860 RTE_ETHDEV_LOG(ERR, 2861 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 2862 port_id); 2863 return -ENOMEM; 2864 } 2865 2866 if (id == NULL) { 2867 RTE_ETHDEV_LOG(ERR, 2868 "Cannot get ethdev port %u xstats ID to NULL\n", 2869 port_id); 2870 return -ENOMEM; 2871 } 2872 2873 /* Get count */ 2874 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2875 if (cnt_xstats < 0) { 2876 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2877 return -ENODEV; 2878 } 2879 2880 /* Get id-name lookup table */ 2881 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2882 2883 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2884 port_id, xstats_names, cnt_xstats, NULL)) { 2885 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2886 return -1; 2887 } 2888 2889 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2890 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2891 *id = idx_xstat; 2892 return 0; 2893 }; 2894 } 2895 2896 return -EINVAL; 2897 } 2898 2899 /* retrieve basic stats names */ 2900 static int 2901 eth_basic_stats_get_names(struct rte_eth_dev *dev, 2902 struct rte_eth_xstat_name *xstats_names) 2903 { 2904 int cnt_used_entries = 0; 2905 uint32_t idx, id_queue; 2906 uint16_t num_q; 2907 2908 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2909 strlcpy(xstats_names[cnt_used_entries].name, 2910 eth_dev_stats_strings[idx].name, 2911 sizeof(xstats_names[0].name)); 2912 cnt_used_entries++; 2913 } 2914 2915 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2916 return cnt_used_entries; 2917 2918 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2919 for (id_queue = 0; id_queue < num_q; id_queue++) { 2920 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2921 snprintf(xstats_names[cnt_used_entries].name, 2922 sizeof(xstats_names[0].name), 2923 "rx_q%u_%s", 2924 id_queue, eth_dev_rxq_stats_strings[idx].name); 2925 cnt_used_entries++; 2926 } 2927 2928 } 2929 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2930 for (id_queue = 0; id_queue < num_q; id_queue++) { 2931 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 2932 snprintf(xstats_names[cnt_used_entries].name, 2933 sizeof(xstats_names[0].name), 2934 "tx_q%u_%s", 2935 id_queue, eth_dev_txq_stats_strings[idx].name); 2936 cnt_used_entries++; 2937 } 2938 } 2939 return cnt_used_entries; 2940 } 2941 2942 /* retrieve ethdev extended statistics names */ 2943 int 2944 rte_eth_xstats_get_names_by_id(uint16_t port_id, 2945 struct rte_eth_xstat_name *xstats_names, unsigned int size, 2946 uint64_t *ids) 2947 { 2948 struct rte_eth_xstat_name *xstats_names_copy; 2949 unsigned int no_basic_stat_requested = 1; 2950 unsigned int no_ext_stat_requested = 1; 2951 unsigned int expected_entries; 2952 unsigned int basic_count; 2953 struct rte_eth_dev *dev; 2954 unsigned int i; 2955 int ret; 2956 2957 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2958 dev = &rte_eth_devices[port_id]; 2959 2960 basic_count = eth_dev_get_xstats_basic_count(dev); 2961 ret = eth_dev_get_xstats_count(port_id); 2962 if (ret < 0) 2963 return ret; 2964 expected_entries = (unsigned int)ret; 2965 2966 /* Return max number of stats if no ids given */ 2967 if (!ids) { 2968 if (!xstats_names) 2969 return expected_entries; 2970 else if (xstats_names && size < expected_entries) 2971 return expected_entries; 2972 } 2973 2974 if (ids && !xstats_names) 2975 return -EINVAL; 2976 2977 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 2978 uint64_t ids_copy[size]; 2979 2980 for (i = 0; i < size; i++) { 2981 if (ids[i] < basic_count) { 2982 no_basic_stat_requested = 0; 2983 break; 2984 } 2985 2986 /* 2987 * Convert ids to xstats ids that PMD knows. 2988 * ids known by user are basic + extended stats. 2989 */ 2990 ids_copy[i] = ids[i] - basic_count; 2991 } 2992 2993 if (no_basic_stat_requested) 2994 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 2995 ids_copy, xstats_names, size); 2996 } 2997 2998 /* Retrieve all stats */ 2999 if (!ids) { 3000 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3001 expected_entries); 3002 if (num_stats < 0 || num_stats > (int)expected_entries) 3003 return num_stats; 3004 else 3005 return expected_entries; 3006 } 3007 3008 xstats_names_copy = calloc(expected_entries, 3009 sizeof(struct rte_eth_xstat_name)); 3010 3011 if (!xstats_names_copy) { 3012 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3013 return -ENOMEM; 3014 } 3015 3016 if (ids) { 3017 for (i = 0; i < size; i++) { 3018 if (ids[i] >= basic_count) { 3019 no_ext_stat_requested = 0; 3020 break; 3021 } 3022 } 3023 } 3024 3025 /* Fill xstats_names_copy structure */ 3026 if (ids && no_ext_stat_requested) { 3027 eth_basic_stats_get_names(dev, xstats_names_copy); 3028 } else { 3029 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3030 expected_entries); 3031 if (ret < 0) { 3032 free(xstats_names_copy); 3033 return ret; 3034 } 3035 } 3036 3037 /* Filter stats */ 3038 for (i = 0; i < size; i++) { 3039 if (ids[i] >= expected_entries) { 3040 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3041 free(xstats_names_copy); 3042 return -1; 3043 } 3044 xstats_names[i] = xstats_names_copy[ids[i]]; 3045 } 3046 3047 free(xstats_names_copy); 3048 return size; 3049 } 3050 3051 int 3052 rte_eth_xstats_get_names(uint16_t port_id, 3053 struct rte_eth_xstat_name *xstats_names, 3054 unsigned int size) 3055 { 3056 struct rte_eth_dev *dev; 3057 int cnt_used_entries; 3058 int cnt_expected_entries; 3059 int cnt_driver_entries; 3060 3061 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3062 if (xstats_names == NULL || cnt_expected_entries < 0 || 3063 (int)size < cnt_expected_entries) 3064 return cnt_expected_entries; 3065 3066 /* port_id checked in eth_dev_get_xstats_count() */ 3067 dev = &rte_eth_devices[port_id]; 3068 3069 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3070 3071 if (dev->dev_ops->xstats_get_names != NULL) { 3072 /* If there are any driver-specific xstats, append them 3073 * to end of list. 3074 */ 3075 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3076 dev, 3077 xstats_names + cnt_used_entries, 3078 size - cnt_used_entries); 3079 if (cnt_driver_entries < 0) 3080 return eth_err(port_id, cnt_driver_entries); 3081 cnt_used_entries += cnt_driver_entries; 3082 } 3083 3084 return cnt_used_entries; 3085 } 3086 3087 3088 static int 3089 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3090 { 3091 struct rte_eth_dev *dev; 3092 struct rte_eth_stats eth_stats; 3093 unsigned int count = 0, i, q; 3094 uint64_t val, *stats_ptr; 3095 uint16_t nb_rxqs, nb_txqs; 3096 int ret; 3097 3098 ret = rte_eth_stats_get(port_id, ð_stats); 3099 if (ret < 0) 3100 return ret; 3101 3102 dev = &rte_eth_devices[port_id]; 3103 3104 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3105 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3106 3107 /* global stats */ 3108 for (i = 0; i < RTE_NB_STATS; i++) { 3109 stats_ptr = RTE_PTR_ADD(ð_stats, 3110 eth_dev_stats_strings[i].offset); 3111 val = *stats_ptr; 3112 xstats[count++].value = val; 3113 } 3114 3115 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3116 return count; 3117 3118 /* per-rxq stats */ 3119 for (q = 0; q < nb_rxqs; q++) { 3120 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3121 stats_ptr = RTE_PTR_ADD(ð_stats, 3122 eth_dev_rxq_stats_strings[i].offset + 3123 q * sizeof(uint64_t)); 3124 val = *stats_ptr; 3125 xstats[count++].value = val; 3126 } 3127 } 3128 3129 /* per-txq stats */ 3130 for (q = 0; q < nb_txqs; q++) { 3131 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3132 stats_ptr = RTE_PTR_ADD(ð_stats, 3133 eth_dev_txq_stats_strings[i].offset + 3134 q * sizeof(uint64_t)); 3135 val = *stats_ptr; 3136 xstats[count++].value = val; 3137 } 3138 } 3139 return count; 3140 } 3141 3142 /* retrieve ethdev extended statistics */ 3143 int 3144 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3145 uint64_t *values, unsigned int size) 3146 { 3147 unsigned int no_basic_stat_requested = 1; 3148 unsigned int no_ext_stat_requested = 1; 3149 unsigned int num_xstats_filled; 3150 unsigned int basic_count; 3151 uint16_t expected_entries; 3152 struct rte_eth_dev *dev; 3153 unsigned int i; 3154 int ret; 3155 3156 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3157 dev = &rte_eth_devices[port_id]; 3158 3159 ret = eth_dev_get_xstats_count(port_id); 3160 if (ret < 0) 3161 return ret; 3162 expected_entries = (uint16_t)ret; 3163 struct rte_eth_xstat xstats[expected_entries]; 3164 basic_count = eth_dev_get_xstats_basic_count(dev); 3165 3166 /* Return max number of stats if no ids given */ 3167 if (!ids) { 3168 if (!values) 3169 return expected_entries; 3170 else if (values && size < expected_entries) 3171 return expected_entries; 3172 } 3173 3174 if (ids && !values) 3175 return -EINVAL; 3176 3177 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3178 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3179 uint64_t ids_copy[size]; 3180 3181 for (i = 0; i < size; i++) { 3182 if (ids[i] < basic_count) { 3183 no_basic_stat_requested = 0; 3184 break; 3185 } 3186 3187 /* 3188 * Convert ids to xstats ids that PMD knows. 3189 * ids known by user are basic + extended stats. 3190 */ 3191 ids_copy[i] = ids[i] - basic_count; 3192 } 3193 3194 if (no_basic_stat_requested) 3195 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3196 values, size); 3197 } 3198 3199 if (ids) { 3200 for (i = 0; i < size; i++) { 3201 if (ids[i] >= basic_count) { 3202 no_ext_stat_requested = 0; 3203 break; 3204 } 3205 } 3206 } 3207 3208 /* Fill the xstats structure */ 3209 if (ids && no_ext_stat_requested) 3210 ret = eth_basic_stats_get(port_id, xstats); 3211 else 3212 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3213 3214 if (ret < 0) 3215 return ret; 3216 num_xstats_filled = (unsigned int)ret; 3217 3218 /* Return all stats */ 3219 if (!ids) { 3220 for (i = 0; i < num_xstats_filled; i++) 3221 values[i] = xstats[i].value; 3222 return expected_entries; 3223 } 3224 3225 /* Filter stats */ 3226 for (i = 0; i < size; i++) { 3227 if (ids[i] >= expected_entries) { 3228 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3229 return -1; 3230 } 3231 values[i] = xstats[ids[i]].value; 3232 } 3233 return size; 3234 } 3235 3236 int 3237 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3238 unsigned int n) 3239 { 3240 struct rte_eth_dev *dev; 3241 unsigned int count = 0, i; 3242 signed int xcount = 0; 3243 uint16_t nb_rxqs, nb_txqs; 3244 int ret; 3245 3246 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3247 dev = &rte_eth_devices[port_id]; 3248 3249 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3250 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3251 3252 /* Return generic statistics */ 3253 count = RTE_NB_STATS; 3254 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) 3255 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS); 3256 3257 /* implemented by the driver */ 3258 if (dev->dev_ops->xstats_get != NULL) { 3259 /* Retrieve the xstats from the driver at the end of the 3260 * xstats struct. 3261 */ 3262 xcount = (*dev->dev_ops->xstats_get)(dev, 3263 xstats ? xstats + count : NULL, 3264 (n > count) ? n - count : 0); 3265 3266 if (xcount < 0) 3267 return eth_err(port_id, xcount); 3268 } 3269 3270 if (n < count + xcount || xstats == NULL) 3271 return count + xcount; 3272 3273 /* now fill the xstats structure */ 3274 ret = eth_basic_stats_get(port_id, xstats); 3275 if (ret < 0) 3276 return ret; 3277 count = ret; 3278 3279 for (i = 0; i < count; i++) 3280 xstats[i].id = i; 3281 /* add an offset to driver-specific stats */ 3282 for ( ; i < count + xcount; i++) 3283 xstats[i].id += count; 3284 3285 return count + xcount; 3286 } 3287 3288 /* reset ethdev extended statistics */ 3289 int 3290 rte_eth_xstats_reset(uint16_t port_id) 3291 { 3292 struct rte_eth_dev *dev; 3293 3294 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3295 dev = &rte_eth_devices[port_id]; 3296 3297 /* implemented by the driver */ 3298 if (dev->dev_ops->xstats_reset != NULL) 3299 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3300 3301 /* fallback to default */ 3302 return rte_eth_stats_reset(port_id); 3303 } 3304 3305 static int 3306 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3307 uint8_t stat_idx, uint8_t is_rx) 3308 { 3309 struct rte_eth_dev *dev; 3310 3311 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3312 dev = &rte_eth_devices[port_id]; 3313 3314 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3315 return -EINVAL; 3316 3317 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3318 return -EINVAL; 3319 3320 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3321 return -EINVAL; 3322 3323 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 3324 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3325 } 3326 3327 int 3328 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3329 uint8_t stat_idx) 3330 { 3331 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3332 tx_queue_id, 3333 stat_idx, STAT_QMAP_TX)); 3334 } 3335 3336 int 3337 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3338 uint8_t stat_idx) 3339 { 3340 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3341 rx_queue_id, 3342 stat_idx, STAT_QMAP_RX)); 3343 } 3344 3345 int 3346 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3347 { 3348 struct rte_eth_dev *dev; 3349 3350 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3351 dev = &rte_eth_devices[port_id]; 3352 3353 if (fw_version == NULL && fw_size > 0) { 3354 RTE_ETHDEV_LOG(ERR, 3355 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3356 port_id); 3357 return -EINVAL; 3358 } 3359 3360 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 3361 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3362 fw_version, fw_size)); 3363 } 3364 3365 int 3366 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3367 { 3368 struct rte_eth_dev *dev; 3369 const struct rte_eth_desc_lim lim = { 3370 .nb_max = UINT16_MAX, 3371 .nb_min = 0, 3372 .nb_align = 1, 3373 .nb_seg_max = UINT16_MAX, 3374 .nb_mtu_seg_max = UINT16_MAX, 3375 }; 3376 int diag; 3377 3378 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3379 dev = &rte_eth_devices[port_id]; 3380 3381 if (dev_info == NULL) { 3382 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3383 port_id); 3384 return -EINVAL; 3385 } 3386 3387 /* 3388 * Init dev_info before port_id check since caller does not have 3389 * return status and does not know if get is successful or not. 3390 */ 3391 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3392 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3393 3394 dev_info->rx_desc_lim = lim; 3395 dev_info->tx_desc_lim = lim; 3396 dev_info->device = dev->device; 3397 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 3398 dev_info->max_mtu = UINT16_MAX; 3399 3400 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 3401 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3402 if (diag != 0) { 3403 /* Cleanup already filled in device information */ 3404 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3405 return eth_err(port_id, diag); 3406 } 3407 3408 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3409 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3410 RTE_MAX_QUEUES_PER_PORT); 3411 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3412 RTE_MAX_QUEUES_PER_PORT); 3413 3414 dev_info->driver_name = dev->device->driver->name; 3415 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3416 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3417 3418 dev_info->dev_flags = &dev->data->dev_flags; 3419 3420 return 0; 3421 } 3422 3423 int 3424 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3425 { 3426 struct rte_eth_dev *dev; 3427 3428 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3429 dev = &rte_eth_devices[port_id]; 3430 3431 if (dev_conf == NULL) { 3432 RTE_ETHDEV_LOG(ERR, 3433 "Cannot get ethdev port %u configuration to NULL\n", 3434 port_id); 3435 return -EINVAL; 3436 } 3437 3438 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3439 3440 return 0; 3441 } 3442 3443 int 3444 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3445 uint32_t *ptypes, int num) 3446 { 3447 int i, j; 3448 struct rte_eth_dev *dev; 3449 const uint32_t *all_ptypes; 3450 3451 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3452 dev = &rte_eth_devices[port_id]; 3453 3454 if (ptypes == NULL && num > 0) { 3455 RTE_ETHDEV_LOG(ERR, 3456 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3457 port_id); 3458 return -EINVAL; 3459 } 3460 3461 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 3462 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3463 3464 if (!all_ptypes) 3465 return 0; 3466 3467 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3468 if (all_ptypes[i] & ptype_mask) { 3469 if (j < num) 3470 ptypes[j] = all_ptypes[i]; 3471 j++; 3472 } 3473 3474 return j; 3475 } 3476 3477 int 3478 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3479 uint32_t *set_ptypes, unsigned int num) 3480 { 3481 const uint32_t valid_ptype_masks[] = { 3482 RTE_PTYPE_L2_MASK, 3483 RTE_PTYPE_L3_MASK, 3484 RTE_PTYPE_L4_MASK, 3485 RTE_PTYPE_TUNNEL_MASK, 3486 RTE_PTYPE_INNER_L2_MASK, 3487 RTE_PTYPE_INNER_L3_MASK, 3488 RTE_PTYPE_INNER_L4_MASK, 3489 }; 3490 const uint32_t *all_ptypes; 3491 struct rte_eth_dev *dev; 3492 uint32_t unused_mask; 3493 unsigned int i, j; 3494 int ret; 3495 3496 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3497 dev = &rte_eth_devices[port_id]; 3498 3499 if (num > 0 && set_ptypes == NULL) { 3500 RTE_ETHDEV_LOG(ERR, 3501 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3502 port_id); 3503 return -EINVAL; 3504 } 3505 3506 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3507 *dev->dev_ops->dev_ptypes_set == NULL) { 3508 ret = 0; 3509 goto ptype_unknown; 3510 } 3511 3512 if (ptype_mask == 0) { 3513 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3514 ptype_mask); 3515 goto ptype_unknown; 3516 } 3517 3518 unused_mask = ptype_mask; 3519 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3520 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3521 if (mask && mask != valid_ptype_masks[i]) { 3522 ret = -EINVAL; 3523 goto ptype_unknown; 3524 } 3525 unused_mask &= ~valid_ptype_masks[i]; 3526 } 3527 3528 if (unused_mask) { 3529 ret = -EINVAL; 3530 goto ptype_unknown; 3531 } 3532 3533 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3534 if (all_ptypes == NULL) { 3535 ret = 0; 3536 goto ptype_unknown; 3537 } 3538 3539 /* 3540 * Accommodate as many set_ptypes as possible. If the supplied 3541 * set_ptypes array is insufficient fill it partially. 3542 */ 3543 for (i = 0, j = 0; set_ptypes != NULL && 3544 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3545 if (ptype_mask & all_ptypes[i]) { 3546 if (j < num - 1) { 3547 set_ptypes[j] = all_ptypes[i]; 3548 j++; 3549 continue; 3550 } 3551 break; 3552 } 3553 } 3554 3555 if (set_ptypes != NULL && j < num) 3556 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3557 3558 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3559 3560 ptype_unknown: 3561 if (num > 0) 3562 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3563 3564 return ret; 3565 } 3566 3567 int 3568 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3569 unsigned int num) 3570 { 3571 int32_t ret; 3572 struct rte_eth_dev *dev; 3573 struct rte_eth_dev_info dev_info; 3574 3575 if (ma == NULL) { 3576 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3577 return -EINVAL; 3578 } 3579 3580 /* will check for us that port_id is a valid one */ 3581 ret = rte_eth_dev_info_get(port_id, &dev_info); 3582 if (ret != 0) 3583 return ret; 3584 3585 dev = &rte_eth_devices[port_id]; 3586 num = RTE_MIN(dev_info.max_mac_addrs, num); 3587 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3588 3589 return num; 3590 } 3591 3592 int 3593 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3594 { 3595 struct rte_eth_dev *dev; 3596 3597 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3598 dev = &rte_eth_devices[port_id]; 3599 3600 if (mac_addr == NULL) { 3601 RTE_ETHDEV_LOG(ERR, 3602 "Cannot get ethdev port %u MAC address to NULL\n", 3603 port_id); 3604 return -EINVAL; 3605 } 3606 3607 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3608 3609 return 0; 3610 } 3611 3612 int 3613 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3614 { 3615 struct rte_eth_dev *dev; 3616 3617 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3618 dev = &rte_eth_devices[port_id]; 3619 3620 if (mtu == NULL) { 3621 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3622 port_id); 3623 return -EINVAL; 3624 } 3625 3626 *mtu = dev->data->mtu; 3627 return 0; 3628 } 3629 3630 int 3631 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3632 { 3633 int ret; 3634 struct rte_eth_dev_info dev_info; 3635 struct rte_eth_dev *dev; 3636 3637 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3638 dev = &rte_eth_devices[port_id]; 3639 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 3640 3641 /* 3642 * Check if the device supports dev_infos_get, if it does not 3643 * skip min_mtu/max_mtu validation here as this requires values 3644 * that are populated within the call to rte_eth_dev_info_get() 3645 * which relies on dev->dev_ops->dev_infos_get. 3646 */ 3647 if (*dev->dev_ops->dev_infos_get != NULL) { 3648 ret = rte_eth_dev_info_get(port_id, &dev_info); 3649 if (ret != 0) 3650 return ret; 3651 3652 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu) 3653 return -EINVAL; 3654 } 3655 3656 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3657 if (!ret) 3658 dev->data->mtu = mtu; 3659 3660 return eth_err(port_id, ret); 3661 } 3662 3663 int 3664 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3665 { 3666 struct rte_eth_dev *dev; 3667 int ret; 3668 3669 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3670 dev = &rte_eth_devices[port_id]; 3671 3672 if (!(dev->data->dev_conf.rxmode.offloads & 3673 DEV_RX_OFFLOAD_VLAN_FILTER)) { 3674 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n", 3675 port_id); 3676 return -ENOSYS; 3677 } 3678 3679 if (vlan_id > 4095) { 3680 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3681 port_id, vlan_id); 3682 return -EINVAL; 3683 } 3684 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 3685 3686 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3687 if (ret == 0) { 3688 struct rte_vlan_filter_conf *vfc; 3689 int vidx; 3690 int vbit; 3691 3692 vfc = &dev->data->vlan_filter_conf; 3693 vidx = vlan_id / 64; 3694 vbit = vlan_id % 64; 3695 3696 if (on) 3697 vfc->ids[vidx] |= UINT64_C(1) << vbit; 3698 else 3699 vfc->ids[vidx] &= ~(UINT64_C(1) << vbit); 3700 } 3701 3702 return eth_err(port_id, ret); 3703 } 3704 3705 int 3706 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3707 int on) 3708 { 3709 struct rte_eth_dev *dev; 3710 3711 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3712 dev = &rte_eth_devices[port_id]; 3713 3714 if (rx_queue_id >= dev->data->nb_rx_queues) { 3715 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3716 return -EINVAL; 3717 } 3718 3719 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 3720 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3721 3722 return 0; 3723 } 3724 3725 int 3726 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3727 enum rte_vlan_type vlan_type, 3728 uint16_t tpid) 3729 { 3730 struct rte_eth_dev *dev; 3731 3732 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3733 dev = &rte_eth_devices[port_id]; 3734 3735 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 3736 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3737 tpid)); 3738 } 3739 3740 int 3741 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3742 { 3743 struct rte_eth_dev_info dev_info; 3744 struct rte_eth_dev *dev; 3745 int ret = 0; 3746 int mask = 0; 3747 int cur, org = 0; 3748 uint64_t orig_offloads; 3749 uint64_t dev_offloads; 3750 uint64_t new_offloads; 3751 3752 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3753 dev = &rte_eth_devices[port_id]; 3754 3755 /* save original values in case of failure */ 3756 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3757 dev_offloads = orig_offloads; 3758 3759 /* check which option changed by application */ 3760 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD); 3761 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP); 3762 if (cur != org) { 3763 if (cur) 3764 dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3765 else 3766 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3767 mask |= ETH_VLAN_STRIP_MASK; 3768 } 3769 3770 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); 3771 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER); 3772 if (cur != org) { 3773 if (cur) 3774 dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3775 else 3776 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3777 mask |= ETH_VLAN_FILTER_MASK; 3778 } 3779 3780 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD); 3781 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND); 3782 if (cur != org) { 3783 if (cur) 3784 dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 3785 else 3786 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 3787 mask |= ETH_VLAN_EXTEND_MASK; 3788 } 3789 3790 cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD); 3791 org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP); 3792 if (cur != org) { 3793 if (cur) 3794 dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 3795 else 3796 dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 3797 mask |= ETH_QINQ_STRIP_MASK; 3798 } 3799 3800 /*no change*/ 3801 if (mask == 0) 3802 return ret; 3803 3804 ret = rte_eth_dev_info_get(port_id, &dev_info); 3805 if (ret != 0) 3806 return ret; 3807 3808 /* Rx VLAN offloading must be within its device capabilities */ 3809 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3810 new_offloads = dev_offloads & ~orig_offloads; 3811 RTE_ETHDEV_LOG(ERR, 3812 "Ethdev port_id=%u requested new added VLAN offloads " 3813 "0x%" PRIx64 " must be within Rx offloads capabilities " 3814 "0x%" PRIx64 " in %s()\n", 3815 port_id, new_offloads, dev_info.rx_offload_capa, 3816 __func__); 3817 return -EINVAL; 3818 } 3819 3820 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 3821 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3822 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3823 if (ret) { 3824 /* hit an error restore original values */ 3825 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3826 } 3827 3828 return eth_err(port_id, ret); 3829 } 3830 3831 int 3832 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3833 { 3834 struct rte_eth_dev *dev; 3835 uint64_t *dev_offloads; 3836 int ret = 0; 3837 3838 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3839 dev = &rte_eth_devices[port_id]; 3840 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3841 3842 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 3843 ret |= ETH_VLAN_STRIP_OFFLOAD; 3844 3845 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 3846 ret |= ETH_VLAN_FILTER_OFFLOAD; 3847 3848 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 3849 ret |= ETH_VLAN_EXTEND_OFFLOAD; 3850 3851 if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP) 3852 ret |= ETH_QINQ_STRIP_OFFLOAD; 3853 3854 return ret; 3855 } 3856 3857 int 3858 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3859 { 3860 struct rte_eth_dev *dev; 3861 3862 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3863 dev = &rte_eth_devices[port_id]; 3864 3865 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 3866 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3867 } 3868 3869 int 3870 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3871 { 3872 struct rte_eth_dev *dev; 3873 3874 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3875 dev = &rte_eth_devices[port_id]; 3876 3877 if (fc_conf == NULL) { 3878 RTE_ETHDEV_LOG(ERR, 3879 "Cannot get ethdev port %u flow control config to NULL\n", 3880 port_id); 3881 return -EINVAL; 3882 } 3883 3884 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 3885 memset(fc_conf, 0, sizeof(*fc_conf)); 3886 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3887 } 3888 3889 int 3890 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3891 { 3892 struct rte_eth_dev *dev; 3893 3894 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3895 dev = &rte_eth_devices[port_id]; 3896 3897 if (fc_conf == NULL) { 3898 RTE_ETHDEV_LOG(ERR, 3899 "Cannot set ethdev port %u flow control from NULL config\n", 3900 port_id); 3901 return -EINVAL; 3902 } 3903 3904 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 3905 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 3906 return -EINVAL; 3907 } 3908 3909 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 3910 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 3911 } 3912 3913 int 3914 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 3915 struct rte_eth_pfc_conf *pfc_conf) 3916 { 3917 struct rte_eth_dev *dev; 3918 3919 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3920 dev = &rte_eth_devices[port_id]; 3921 3922 if (pfc_conf == NULL) { 3923 RTE_ETHDEV_LOG(ERR, 3924 "Cannot set ethdev port %u priority flow control from NULL config\n", 3925 port_id); 3926 return -EINVAL; 3927 } 3928 3929 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) { 3930 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 3931 return -EINVAL; 3932 } 3933 3934 /* High water, low water validation are device specific */ 3935 if (*dev->dev_ops->priority_flow_ctrl_set) 3936 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 3937 (dev, pfc_conf)); 3938 return -ENOTSUP; 3939 } 3940 3941 static int 3942 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 3943 uint16_t reta_size) 3944 { 3945 uint16_t i, num; 3946 3947 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE; 3948 for (i = 0; i < num; i++) { 3949 if (reta_conf[i].mask) 3950 return 0; 3951 } 3952 3953 return -EINVAL; 3954 } 3955 3956 static int 3957 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 3958 uint16_t reta_size, 3959 uint16_t max_rxq) 3960 { 3961 uint16_t i, idx, shift; 3962 3963 if (max_rxq == 0) { 3964 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 3965 return -EINVAL; 3966 } 3967 3968 for (i = 0; i < reta_size; i++) { 3969 idx = i / RTE_RETA_GROUP_SIZE; 3970 shift = i % RTE_RETA_GROUP_SIZE; 3971 if ((reta_conf[idx].mask & (1ULL << shift)) && 3972 (reta_conf[idx].reta[shift] >= max_rxq)) { 3973 RTE_ETHDEV_LOG(ERR, 3974 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 3975 idx, shift, 3976 reta_conf[idx].reta[shift], max_rxq); 3977 return -EINVAL; 3978 } 3979 } 3980 3981 return 0; 3982 } 3983 3984 int 3985 rte_eth_dev_rss_reta_update(uint16_t port_id, 3986 struct rte_eth_rss_reta_entry64 *reta_conf, 3987 uint16_t reta_size) 3988 { 3989 struct rte_eth_dev *dev; 3990 int ret; 3991 3992 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3993 dev = &rte_eth_devices[port_id]; 3994 3995 if (reta_conf == NULL) { 3996 RTE_ETHDEV_LOG(ERR, 3997 "Cannot update ethdev port %u RSS RETA to NULL\n", 3998 port_id); 3999 return -EINVAL; 4000 } 4001 4002 if (reta_size == 0) { 4003 RTE_ETHDEV_LOG(ERR, 4004 "Cannot update ethdev port %u RSS RETA with zero size\n", 4005 port_id); 4006 return -EINVAL; 4007 } 4008 4009 /* Check mask bits */ 4010 ret = eth_check_reta_mask(reta_conf, reta_size); 4011 if (ret < 0) 4012 return ret; 4013 4014 /* Check entry value */ 4015 ret = eth_check_reta_entry(reta_conf, reta_size, 4016 dev->data->nb_rx_queues); 4017 if (ret < 0) 4018 return ret; 4019 4020 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 4021 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4022 reta_size)); 4023 } 4024 4025 int 4026 rte_eth_dev_rss_reta_query(uint16_t port_id, 4027 struct rte_eth_rss_reta_entry64 *reta_conf, 4028 uint16_t reta_size) 4029 { 4030 struct rte_eth_dev *dev; 4031 int ret; 4032 4033 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4034 dev = &rte_eth_devices[port_id]; 4035 4036 if (reta_conf == NULL) { 4037 RTE_ETHDEV_LOG(ERR, 4038 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4039 port_id); 4040 return -EINVAL; 4041 } 4042 4043 /* Check mask bits */ 4044 ret = eth_check_reta_mask(reta_conf, reta_size); 4045 if (ret < 0) 4046 return ret; 4047 4048 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 4049 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4050 reta_size)); 4051 } 4052 4053 int 4054 rte_eth_dev_rss_hash_update(uint16_t port_id, 4055 struct rte_eth_rss_conf *rss_conf) 4056 { 4057 struct rte_eth_dev *dev; 4058 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4059 int ret; 4060 4061 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4062 dev = &rte_eth_devices[port_id]; 4063 4064 if (rss_conf == NULL) { 4065 RTE_ETHDEV_LOG(ERR, 4066 "Cannot update ethdev port %u RSS hash from NULL config\n", 4067 port_id); 4068 return -EINVAL; 4069 } 4070 4071 ret = rte_eth_dev_info_get(port_id, &dev_info); 4072 if (ret != 0) 4073 return ret; 4074 4075 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4076 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4077 dev_info.flow_type_rss_offloads) { 4078 RTE_ETHDEV_LOG(ERR, 4079 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4080 port_id, rss_conf->rss_hf, 4081 dev_info.flow_type_rss_offloads); 4082 return -EINVAL; 4083 } 4084 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 4085 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4086 rss_conf)); 4087 } 4088 4089 int 4090 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4091 struct rte_eth_rss_conf *rss_conf) 4092 { 4093 struct rte_eth_dev *dev; 4094 4095 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4096 dev = &rte_eth_devices[port_id]; 4097 4098 if (rss_conf == NULL) { 4099 RTE_ETHDEV_LOG(ERR, 4100 "Cannot get ethdev port %u RSS hash config to NULL\n", 4101 port_id); 4102 return -EINVAL; 4103 } 4104 4105 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 4106 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4107 rss_conf)); 4108 } 4109 4110 int 4111 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4112 struct rte_eth_udp_tunnel *udp_tunnel) 4113 { 4114 struct rte_eth_dev *dev; 4115 4116 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4117 dev = &rte_eth_devices[port_id]; 4118 4119 if (udp_tunnel == NULL) { 4120 RTE_ETHDEV_LOG(ERR, 4121 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4122 port_id); 4123 return -EINVAL; 4124 } 4125 4126 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 4127 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4128 return -EINVAL; 4129 } 4130 4131 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 4132 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4133 udp_tunnel)); 4134 } 4135 4136 int 4137 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4138 struct rte_eth_udp_tunnel *udp_tunnel) 4139 { 4140 struct rte_eth_dev *dev; 4141 4142 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4143 dev = &rte_eth_devices[port_id]; 4144 4145 if (udp_tunnel == NULL) { 4146 RTE_ETHDEV_LOG(ERR, 4147 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4148 port_id); 4149 return -EINVAL; 4150 } 4151 4152 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 4153 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4154 return -EINVAL; 4155 } 4156 4157 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 4158 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4159 udp_tunnel)); 4160 } 4161 4162 int 4163 rte_eth_led_on(uint16_t port_id) 4164 { 4165 struct rte_eth_dev *dev; 4166 4167 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4168 dev = &rte_eth_devices[port_id]; 4169 4170 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 4171 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4172 } 4173 4174 int 4175 rte_eth_led_off(uint16_t port_id) 4176 { 4177 struct rte_eth_dev *dev; 4178 4179 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4180 dev = &rte_eth_devices[port_id]; 4181 4182 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 4183 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4184 } 4185 4186 int 4187 rte_eth_fec_get_capability(uint16_t port_id, 4188 struct rte_eth_fec_capa *speed_fec_capa, 4189 unsigned int num) 4190 { 4191 struct rte_eth_dev *dev; 4192 int ret; 4193 4194 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4195 dev = &rte_eth_devices[port_id]; 4196 4197 if (speed_fec_capa == NULL && num > 0) { 4198 RTE_ETHDEV_LOG(ERR, 4199 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4200 port_id); 4201 return -EINVAL; 4202 } 4203 4204 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP); 4205 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4206 4207 return ret; 4208 } 4209 4210 int 4211 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4212 { 4213 struct rte_eth_dev *dev; 4214 4215 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4216 dev = &rte_eth_devices[port_id]; 4217 4218 if (fec_capa == NULL) { 4219 RTE_ETHDEV_LOG(ERR, 4220 "Cannot get ethdev port %u current FEC mode to NULL\n", 4221 port_id); 4222 return -EINVAL; 4223 } 4224 4225 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP); 4226 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4227 } 4228 4229 int 4230 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4231 { 4232 struct rte_eth_dev *dev; 4233 4234 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4235 dev = &rte_eth_devices[port_id]; 4236 4237 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); 4238 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4239 } 4240 4241 /* 4242 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4243 * an empty spot. 4244 */ 4245 static int 4246 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4247 { 4248 struct rte_eth_dev_info dev_info; 4249 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4250 unsigned i; 4251 int ret; 4252 4253 ret = rte_eth_dev_info_get(port_id, &dev_info); 4254 if (ret != 0) 4255 return -1; 4256 4257 for (i = 0; i < dev_info.max_mac_addrs; i++) 4258 if (memcmp(addr, &dev->data->mac_addrs[i], 4259 RTE_ETHER_ADDR_LEN) == 0) 4260 return i; 4261 4262 return -1; 4263 } 4264 4265 static const struct rte_ether_addr null_mac_addr; 4266 4267 int 4268 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4269 uint32_t pool) 4270 { 4271 struct rte_eth_dev *dev; 4272 int index; 4273 uint64_t pool_mask; 4274 int ret; 4275 4276 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4277 dev = &rte_eth_devices[port_id]; 4278 4279 if (addr == NULL) { 4280 RTE_ETHDEV_LOG(ERR, 4281 "Cannot add ethdev port %u MAC address from NULL address\n", 4282 port_id); 4283 return -EINVAL; 4284 } 4285 4286 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 4287 4288 if (rte_is_zero_ether_addr(addr)) { 4289 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4290 port_id); 4291 return -EINVAL; 4292 } 4293 if (pool >= ETH_64_POOLS) { 4294 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1); 4295 return -EINVAL; 4296 } 4297 4298 index = eth_dev_get_mac_addr_index(port_id, addr); 4299 if (index < 0) { 4300 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4301 if (index < 0) { 4302 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4303 port_id); 4304 return -ENOSPC; 4305 } 4306 } else { 4307 pool_mask = dev->data->mac_pool_sel[index]; 4308 4309 /* Check if both MAC address and pool is already there, and do nothing */ 4310 if (pool_mask & (1ULL << pool)) 4311 return 0; 4312 } 4313 4314 /* Update NIC */ 4315 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4316 4317 if (ret == 0) { 4318 /* Update address in NIC data structure */ 4319 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4320 4321 /* Update pool bitmap in NIC data structure */ 4322 dev->data->mac_pool_sel[index] |= (1ULL << pool); 4323 } 4324 4325 return eth_err(port_id, ret); 4326 } 4327 4328 int 4329 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4330 { 4331 struct rte_eth_dev *dev; 4332 int index; 4333 4334 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4335 dev = &rte_eth_devices[port_id]; 4336 4337 if (addr == NULL) { 4338 RTE_ETHDEV_LOG(ERR, 4339 "Cannot remove ethdev port %u MAC address from NULL address\n", 4340 port_id); 4341 return -EINVAL; 4342 } 4343 4344 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 4345 4346 index = eth_dev_get_mac_addr_index(port_id, addr); 4347 if (index == 0) { 4348 RTE_ETHDEV_LOG(ERR, 4349 "Port %u: Cannot remove default MAC address\n", 4350 port_id); 4351 return -EADDRINUSE; 4352 } else if (index < 0) 4353 return 0; /* Do nothing if address wasn't found */ 4354 4355 /* Update NIC */ 4356 (*dev->dev_ops->mac_addr_remove)(dev, index); 4357 4358 /* Update address in NIC data structure */ 4359 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4360 4361 /* reset pool bitmap */ 4362 dev->data->mac_pool_sel[index] = 0; 4363 4364 return 0; 4365 } 4366 4367 int 4368 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4369 { 4370 struct rte_eth_dev *dev; 4371 int ret; 4372 4373 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4374 dev = &rte_eth_devices[port_id]; 4375 4376 if (addr == NULL) { 4377 RTE_ETHDEV_LOG(ERR, 4378 "Cannot set ethdev port %u default MAC address from NULL address\n", 4379 port_id); 4380 return -EINVAL; 4381 } 4382 4383 if (!rte_is_valid_assigned_ether_addr(addr)) 4384 return -EINVAL; 4385 4386 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 4387 4388 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4389 if (ret < 0) 4390 return ret; 4391 4392 /* Update default address in NIC data structure */ 4393 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4394 4395 return 0; 4396 } 4397 4398 4399 /* 4400 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4401 * an empty spot. 4402 */ 4403 static int 4404 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4405 const struct rte_ether_addr *addr) 4406 { 4407 struct rte_eth_dev_info dev_info; 4408 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4409 unsigned i; 4410 int ret; 4411 4412 ret = rte_eth_dev_info_get(port_id, &dev_info); 4413 if (ret != 0) 4414 return -1; 4415 4416 if (!dev->data->hash_mac_addrs) 4417 return -1; 4418 4419 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4420 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4421 RTE_ETHER_ADDR_LEN) == 0) 4422 return i; 4423 4424 return -1; 4425 } 4426 4427 int 4428 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4429 uint8_t on) 4430 { 4431 int index; 4432 int ret; 4433 struct rte_eth_dev *dev; 4434 4435 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4436 dev = &rte_eth_devices[port_id]; 4437 4438 if (addr == NULL) { 4439 RTE_ETHDEV_LOG(ERR, 4440 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4441 port_id); 4442 return -EINVAL; 4443 } 4444 4445 if (rte_is_zero_ether_addr(addr)) { 4446 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4447 port_id); 4448 return -EINVAL; 4449 } 4450 4451 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4452 /* Check if it's already there, and do nothing */ 4453 if ((index >= 0) && on) 4454 return 0; 4455 4456 if (index < 0) { 4457 if (!on) { 4458 RTE_ETHDEV_LOG(ERR, 4459 "Port %u: the MAC address was not set in UTA\n", 4460 port_id); 4461 return -EINVAL; 4462 } 4463 4464 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4465 if (index < 0) { 4466 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4467 port_id); 4468 return -ENOSPC; 4469 } 4470 } 4471 4472 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 4473 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 4474 if (ret == 0) { 4475 /* Update address in NIC data structure */ 4476 if (on) 4477 rte_ether_addr_copy(addr, 4478 &dev->data->hash_mac_addrs[index]); 4479 else 4480 rte_ether_addr_copy(&null_mac_addr, 4481 &dev->data->hash_mac_addrs[index]); 4482 } 4483 4484 return eth_err(port_id, ret); 4485 } 4486 4487 int 4488 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 4489 { 4490 struct rte_eth_dev *dev; 4491 4492 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4493 dev = &rte_eth_devices[port_id]; 4494 4495 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 4496 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 4497 on)); 4498 } 4499 4500 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 4501 uint16_t tx_rate) 4502 { 4503 struct rte_eth_dev *dev; 4504 struct rte_eth_dev_info dev_info; 4505 struct rte_eth_link link; 4506 int ret; 4507 4508 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4509 dev = &rte_eth_devices[port_id]; 4510 4511 ret = rte_eth_dev_info_get(port_id, &dev_info); 4512 if (ret != 0) 4513 return ret; 4514 4515 link = dev->data->dev_link; 4516 4517 if (queue_idx > dev_info.max_tx_queues) { 4518 RTE_ETHDEV_LOG(ERR, 4519 "Set queue rate limit:port %u: invalid queue id=%u\n", 4520 port_id, queue_idx); 4521 return -EINVAL; 4522 } 4523 4524 if (tx_rate > link.link_speed) { 4525 RTE_ETHDEV_LOG(ERR, 4526 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 4527 tx_rate, link.link_speed); 4528 return -EINVAL; 4529 } 4530 4531 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 4532 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 4533 queue_idx, tx_rate)); 4534 } 4535 4536 RTE_INIT(eth_dev_init_fp_ops) 4537 { 4538 uint32_t i; 4539 4540 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 4541 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 4542 } 4543 4544 RTE_INIT(eth_dev_init_cb_lists) 4545 { 4546 uint16_t i; 4547 4548 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 4549 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 4550 } 4551 4552 int 4553 rte_eth_dev_callback_register(uint16_t port_id, 4554 enum rte_eth_event_type event, 4555 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4556 { 4557 struct rte_eth_dev *dev; 4558 struct rte_eth_dev_callback *user_cb; 4559 uint16_t next_port; 4560 uint16_t last_port; 4561 4562 if (cb_fn == NULL) { 4563 RTE_ETHDEV_LOG(ERR, 4564 "Cannot register ethdev port %u callback from NULL\n", 4565 port_id); 4566 return -EINVAL; 4567 } 4568 4569 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4570 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4571 return -EINVAL; 4572 } 4573 4574 if (port_id == RTE_ETH_ALL) { 4575 next_port = 0; 4576 last_port = RTE_MAX_ETHPORTS - 1; 4577 } else { 4578 next_port = last_port = port_id; 4579 } 4580 4581 rte_spinlock_lock(ð_dev_cb_lock); 4582 4583 do { 4584 dev = &rte_eth_devices[next_port]; 4585 4586 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 4587 if (user_cb->cb_fn == cb_fn && 4588 user_cb->cb_arg == cb_arg && 4589 user_cb->event == event) { 4590 break; 4591 } 4592 } 4593 4594 /* create a new callback. */ 4595 if (user_cb == NULL) { 4596 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4597 sizeof(struct rte_eth_dev_callback), 0); 4598 if (user_cb != NULL) { 4599 user_cb->cb_fn = cb_fn; 4600 user_cb->cb_arg = cb_arg; 4601 user_cb->event = event; 4602 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4603 user_cb, next); 4604 } else { 4605 rte_spinlock_unlock(ð_dev_cb_lock); 4606 rte_eth_dev_callback_unregister(port_id, event, 4607 cb_fn, cb_arg); 4608 return -ENOMEM; 4609 } 4610 4611 } 4612 } while (++next_port <= last_port); 4613 4614 rte_spinlock_unlock(ð_dev_cb_lock); 4615 return 0; 4616 } 4617 4618 int 4619 rte_eth_dev_callback_unregister(uint16_t port_id, 4620 enum rte_eth_event_type event, 4621 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4622 { 4623 int ret; 4624 struct rte_eth_dev *dev; 4625 struct rte_eth_dev_callback *cb, *next; 4626 uint16_t next_port; 4627 uint16_t last_port; 4628 4629 if (cb_fn == NULL) { 4630 RTE_ETHDEV_LOG(ERR, 4631 "Cannot unregister ethdev port %u callback from NULL\n", 4632 port_id); 4633 return -EINVAL; 4634 } 4635 4636 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4637 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4638 return -EINVAL; 4639 } 4640 4641 if (port_id == RTE_ETH_ALL) { 4642 next_port = 0; 4643 last_port = RTE_MAX_ETHPORTS - 1; 4644 } else { 4645 next_port = last_port = port_id; 4646 } 4647 4648 rte_spinlock_lock(ð_dev_cb_lock); 4649 4650 do { 4651 dev = &rte_eth_devices[next_port]; 4652 ret = 0; 4653 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4654 cb = next) { 4655 4656 next = TAILQ_NEXT(cb, next); 4657 4658 if (cb->cb_fn != cb_fn || cb->event != event || 4659 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4660 continue; 4661 4662 /* 4663 * if this callback is not executing right now, 4664 * then remove it. 4665 */ 4666 if (cb->active == 0) { 4667 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4668 rte_free(cb); 4669 } else { 4670 ret = -EAGAIN; 4671 } 4672 } 4673 } while (++next_port <= last_port); 4674 4675 rte_spinlock_unlock(ð_dev_cb_lock); 4676 return ret; 4677 } 4678 4679 int 4680 rte_eth_dev_callback_process(struct rte_eth_dev *dev, 4681 enum rte_eth_event_type event, void *ret_param) 4682 { 4683 struct rte_eth_dev_callback *cb_lst; 4684 struct rte_eth_dev_callback dev_cb; 4685 int rc = 0; 4686 4687 rte_spinlock_lock(ð_dev_cb_lock); 4688 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 4689 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 4690 continue; 4691 dev_cb = *cb_lst; 4692 cb_lst->active = 1; 4693 if (ret_param != NULL) 4694 dev_cb.ret_param = ret_param; 4695 4696 rte_spinlock_unlock(ð_dev_cb_lock); 4697 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, 4698 dev_cb.cb_arg, dev_cb.ret_param); 4699 rte_spinlock_lock(ð_dev_cb_lock); 4700 cb_lst->active = 0; 4701 } 4702 rte_spinlock_unlock(ð_dev_cb_lock); 4703 return rc; 4704 } 4705 4706 void 4707 rte_eth_dev_probing_finish(struct rte_eth_dev *dev) 4708 { 4709 if (dev == NULL) 4710 return; 4711 4712 /* 4713 * for secondary process, at that point we expect device 4714 * to be already 'usable', so shared data and all function pointers 4715 * for fast-path devops have to be setup properly inside rte_eth_dev. 4716 */ 4717 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 4718 eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev); 4719 4720 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); 4721 4722 dev->state = RTE_ETH_DEV_ATTACHED; 4723 } 4724 4725 int 4726 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4727 { 4728 uint32_t vec; 4729 struct rte_eth_dev *dev; 4730 struct rte_intr_handle *intr_handle; 4731 uint16_t qid; 4732 int rc; 4733 4734 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4735 dev = &rte_eth_devices[port_id]; 4736 4737 if (!dev->intr_handle) { 4738 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4739 return -ENOTSUP; 4740 } 4741 4742 intr_handle = dev->intr_handle; 4743 if (!intr_handle->intr_vec) { 4744 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4745 return -EPERM; 4746 } 4747 4748 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4749 vec = intr_handle->intr_vec[qid]; 4750 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4751 if (rc && rc != -EEXIST) { 4752 RTE_ETHDEV_LOG(ERR, 4753 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 4754 port_id, qid, op, epfd, vec); 4755 } 4756 } 4757 4758 return 0; 4759 } 4760 4761 int 4762 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 4763 { 4764 struct rte_intr_handle *intr_handle; 4765 struct rte_eth_dev *dev; 4766 unsigned int efd_idx; 4767 uint32_t vec; 4768 int fd; 4769 4770 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 4771 dev = &rte_eth_devices[port_id]; 4772 4773 if (queue_id >= dev->data->nb_rx_queues) { 4774 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 4775 return -1; 4776 } 4777 4778 if (!dev->intr_handle) { 4779 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4780 return -1; 4781 } 4782 4783 intr_handle = dev->intr_handle; 4784 if (!intr_handle->intr_vec) { 4785 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4786 return -1; 4787 } 4788 4789 vec = intr_handle->intr_vec[queue_id]; 4790 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 4791 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 4792 fd = intr_handle->efds[efd_idx]; 4793 4794 return fd; 4795 } 4796 4797 static inline int 4798 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id, 4799 const char *ring_name) 4800 { 4801 return snprintf(name, len, "eth_p%d_q%d_%s", 4802 port_id, queue_id, ring_name); 4803 } 4804 4805 const struct rte_memzone * 4806 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, 4807 uint16_t queue_id, size_t size, unsigned align, 4808 int socket_id) 4809 { 4810 char z_name[RTE_MEMZONE_NAMESIZE]; 4811 const struct rte_memzone *mz; 4812 int rc; 4813 4814 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4815 queue_id, ring_name); 4816 if (rc >= RTE_MEMZONE_NAMESIZE) { 4817 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4818 rte_errno = ENAMETOOLONG; 4819 return NULL; 4820 } 4821 4822 mz = rte_memzone_lookup(z_name); 4823 if (mz) { 4824 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) || 4825 size > mz->len || 4826 ((uintptr_t)mz->addr & (align - 1)) != 0) { 4827 RTE_ETHDEV_LOG(ERR, 4828 "memzone %s does not justify the requested attributes\n", 4829 mz->name); 4830 return NULL; 4831 } 4832 4833 return mz; 4834 } 4835 4836 return rte_memzone_reserve_aligned(z_name, size, socket_id, 4837 RTE_MEMZONE_IOVA_CONTIG, align); 4838 } 4839 4840 int 4841 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name, 4842 uint16_t queue_id) 4843 { 4844 char z_name[RTE_MEMZONE_NAMESIZE]; 4845 const struct rte_memzone *mz; 4846 int rc = 0; 4847 4848 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4849 queue_id, ring_name); 4850 if (rc >= RTE_MEMZONE_NAMESIZE) { 4851 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4852 return -ENAMETOOLONG; 4853 } 4854 4855 mz = rte_memzone_lookup(z_name); 4856 if (mz) 4857 rc = rte_memzone_free(mz); 4858 else 4859 rc = -ENOENT; 4860 4861 return rc; 4862 } 4863 4864 int 4865 rte_eth_dev_create(struct rte_device *device, const char *name, 4866 size_t priv_data_size, 4867 ethdev_bus_specific_init ethdev_bus_specific_init, 4868 void *bus_init_params, 4869 ethdev_init_t ethdev_init, void *init_params) 4870 { 4871 struct rte_eth_dev *ethdev; 4872 int retval; 4873 4874 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL); 4875 4876 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4877 ethdev = rte_eth_dev_allocate(name); 4878 if (!ethdev) 4879 return -ENODEV; 4880 4881 if (priv_data_size) { 4882 ethdev->data->dev_private = rte_zmalloc_socket( 4883 name, priv_data_size, RTE_CACHE_LINE_SIZE, 4884 device->numa_node); 4885 4886 if (!ethdev->data->dev_private) { 4887 RTE_ETHDEV_LOG(ERR, 4888 "failed to allocate private data\n"); 4889 retval = -ENOMEM; 4890 goto probe_failed; 4891 } 4892 } 4893 } else { 4894 ethdev = rte_eth_dev_attach_secondary(name); 4895 if (!ethdev) { 4896 RTE_ETHDEV_LOG(ERR, 4897 "secondary process attach failed, ethdev doesn't exist\n"); 4898 return -ENODEV; 4899 } 4900 } 4901 4902 ethdev->device = device; 4903 4904 if (ethdev_bus_specific_init) { 4905 retval = ethdev_bus_specific_init(ethdev, bus_init_params); 4906 if (retval) { 4907 RTE_ETHDEV_LOG(ERR, 4908 "ethdev bus specific initialisation failed\n"); 4909 goto probe_failed; 4910 } 4911 } 4912 4913 retval = ethdev_init(ethdev, init_params); 4914 if (retval) { 4915 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n"); 4916 goto probe_failed; 4917 } 4918 4919 rte_eth_dev_probing_finish(ethdev); 4920 4921 return retval; 4922 4923 probe_failed: 4924 rte_eth_dev_release_port(ethdev); 4925 return retval; 4926 } 4927 4928 int 4929 rte_eth_dev_destroy(struct rte_eth_dev *ethdev, 4930 ethdev_uninit_t ethdev_uninit) 4931 { 4932 int ret; 4933 4934 ethdev = rte_eth_dev_allocated(ethdev->data->name); 4935 if (!ethdev) 4936 return -ENODEV; 4937 4938 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL); 4939 4940 ret = ethdev_uninit(ethdev); 4941 if (ret) 4942 return ret; 4943 4944 return rte_eth_dev_release_port(ethdev); 4945 } 4946 4947 int 4948 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 4949 int epfd, int op, void *data) 4950 { 4951 uint32_t vec; 4952 struct rte_eth_dev *dev; 4953 struct rte_intr_handle *intr_handle; 4954 int rc; 4955 4956 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4957 dev = &rte_eth_devices[port_id]; 4958 4959 if (queue_id >= dev->data->nb_rx_queues) { 4960 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 4961 return -EINVAL; 4962 } 4963 4964 if (!dev->intr_handle) { 4965 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4966 return -ENOTSUP; 4967 } 4968 4969 intr_handle = dev->intr_handle; 4970 if (!intr_handle->intr_vec) { 4971 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4972 return -EPERM; 4973 } 4974 4975 vec = intr_handle->intr_vec[queue_id]; 4976 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4977 if (rc && rc != -EEXIST) { 4978 RTE_ETHDEV_LOG(ERR, 4979 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 4980 port_id, queue_id, op, epfd, vec); 4981 return rc; 4982 } 4983 4984 return 0; 4985 } 4986 4987 int 4988 rte_eth_dev_rx_intr_enable(uint16_t port_id, 4989 uint16_t queue_id) 4990 { 4991 struct rte_eth_dev *dev; 4992 int ret; 4993 4994 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4995 dev = &rte_eth_devices[port_id]; 4996 4997 ret = eth_dev_validate_rx_queue(dev, queue_id); 4998 if (ret != 0) 4999 return ret; 5000 5001 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 5002 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5003 } 5004 5005 int 5006 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5007 uint16_t queue_id) 5008 { 5009 struct rte_eth_dev *dev; 5010 int ret; 5011 5012 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5013 dev = &rte_eth_devices[port_id]; 5014 5015 ret = eth_dev_validate_rx_queue(dev, queue_id); 5016 if (ret != 0) 5017 return ret; 5018 5019 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 5020 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5021 } 5022 5023 5024 const struct rte_eth_rxtx_callback * 5025 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5026 rte_rx_callback_fn fn, void *user_param) 5027 { 5028 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5029 rte_errno = ENOTSUP; 5030 return NULL; 5031 #endif 5032 struct rte_eth_dev *dev; 5033 5034 /* check input parameters */ 5035 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5036 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5037 rte_errno = EINVAL; 5038 return NULL; 5039 } 5040 dev = &rte_eth_devices[port_id]; 5041 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5042 rte_errno = EINVAL; 5043 return NULL; 5044 } 5045 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5046 5047 if (cb == NULL) { 5048 rte_errno = ENOMEM; 5049 return NULL; 5050 } 5051 5052 cb->fn.rx = fn; 5053 cb->param = user_param; 5054 5055 rte_spinlock_lock(ð_dev_rx_cb_lock); 5056 /* Add the callbacks in fifo order. */ 5057 struct rte_eth_rxtx_callback *tail = 5058 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5059 5060 if (!tail) { 5061 /* Stores to cb->fn and cb->param should complete before 5062 * cb is visible to data plane. 5063 */ 5064 __atomic_store_n( 5065 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5066 cb, __ATOMIC_RELEASE); 5067 5068 } else { 5069 while (tail->next) 5070 tail = tail->next; 5071 /* Stores to cb->fn and cb->param should complete before 5072 * cb is visible to data plane. 5073 */ 5074 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5075 } 5076 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5077 5078 return cb; 5079 } 5080 5081 const struct rte_eth_rxtx_callback * 5082 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5083 rte_rx_callback_fn fn, void *user_param) 5084 { 5085 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5086 rte_errno = ENOTSUP; 5087 return NULL; 5088 #endif 5089 /* check input parameters */ 5090 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5091 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5092 rte_errno = EINVAL; 5093 return NULL; 5094 } 5095 5096 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5097 5098 if (cb == NULL) { 5099 rte_errno = ENOMEM; 5100 return NULL; 5101 } 5102 5103 cb->fn.rx = fn; 5104 cb->param = user_param; 5105 5106 rte_spinlock_lock(ð_dev_rx_cb_lock); 5107 /* Add the callbacks at first position */ 5108 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5109 /* Stores to cb->fn, cb->param and cb->next should complete before 5110 * cb is visible to data plane threads. 5111 */ 5112 __atomic_store_n( 5113 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5114 cb, __ATOMIC_RELEASE); 5115 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5116 5117 return cb; 5118 } 5119 5120 const struct rte_eth_rxtx_callback * 5121 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5122 rte_tx_callback_fn fn, void *user_param) 5123 { 5124 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5125 rte_errno = ENOTSUP; 5126 return NULL; 5127 #endif 5128 struct rte_eth_dev *dev; 5129 5130 /* check input parameters */ 5131 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5132 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5133 rte_errno = EINVAL; 5134 return NULL; 5135 } 5136 5137 dev = &rte_eth_devices[port_id]; 5138 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5139 rte_errno = EINVAL; 5140 return NULL; 5141 } 5142 5143 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5144 5145 if (cb == NULL) { 5146 rte_errno = ENOMEM; 5147 return NULL; 5148 } 5149 5150 cb->fn.tx = fn; 5151 cb->param = user_param; 5152 5153 rte_spinlock_lock(ð_dev_tx_cb_lock); 5154 /* Add the callbacks in fifo order. */ 5155 struct rte_eth_rxtx_callback *tail = 5156 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5157 5158 if (!tail) { 5159 /* Stores to cb->fn and cb->param should complete before 5160 * cb is visible to data plane. 5161 */ 5162 __atomic_store_n( 5163 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5164 cb, __ATOMIC_RELEASE); 5165 5166 } else { 5167 while (tail->next) 5168 tail = tail->next; 5169 /* Stores to cb->fn and cb->param should complete before 5170 * cb is visible to data plane. 5171 */ 5172 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5173 } 5174 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5175 5176 return cb; 5177 } 5178 5179 int 5180 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5181 const struct rte_eth_rxtx_callback *user_cb) 5182 { 5183 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5184 return -ENOTSUP; 5185 #endif 5186 /* Check input parameters. */ 5187 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5188 if (user_cb == NULL || 5189 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5190 return -EINVAL; 5191 5192 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5193 struct rte_eth_rxtx_callback *cb; 5194 struct rte_eth_rxtx_callback **prev_cb; 5195 int ret = -EINVAL; 5196 5197 rte_spinlock_lock(ð_dev_rx_cb_lock); 5198 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5199 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5200 cb = *prev_cb; 5201 if (cb == user_cb) { 5202 /* Remove the user cb from the callback list. */ 5203 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5204 ret = 0; 5205 break; 5206 } 5207 } 5208 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5209 5210 return ret; 5211 } 5212 5213 int 5214 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5215 const struct rte_eth_rxtx_callback *user_cb) 5216 { 5217 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5218 return -ENOTSUP; 5219 #endif 5220 /* Check input parameters. */ 5221 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5222 if (user_cb == NULL || 5223 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5224 return -EINVAL; 5225 5226 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5227 int ret = -EINVAL; 5228 struct rte_eth_rxtx_callback *cb; 5229 struct rte_eth_rxtx_callback **prev_cb; 5230 5231 rte_spinlock_lock(ð_dev_tx_cb_lock); 5232 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5233 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5234 cb = *prev_cb; 5235 if (cb == user_cb) { 5236 /* Remove the user cb from the callback list. */ 5237 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5238 ret = 0; 5239 break; 5240 } 5241 } 5242 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5243 5244 return ret; 5245 } 5246 5247 int 5248 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5249 struct rte_eth_rxq_info *qinfo) 5250 { 5251 struct rte_eth_dev *dev; 5252 5253 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5254 dev = &rte_eth_devices[port_id]; 5255 5256 if (queue_id >= dev->data->nb_rx_queues) { 5257 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 5258 return -EINVAL; 5259 } 5260 5261 if (qinfo == NULL) { 5262 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5263 port_id, queue_id); 5264 return -EINVAL; 5265 } 5266 5267 if (dev->data->rx_queues == NULL || 5268 dev->data->rx_queues[queue_id] == NULL) { 5269 RTE_ETHDEV_LOG(ERR, 5270 "Rx queue %"PRIu16" of device with port_id=%" 5271 PRIu16" has not been setup\n", 5272 queue_id, port_id); 5273 return -EINVAL; 5274 } 5275 5276 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5277 RTE_ETHDEV_LOG(INFO, 5278 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5279 queue_id, port_id); 5280 return -EINVAL; 5281 } 5282 5283 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 5284 5285 memset(qinfo, 0, sizeof(*qinfo)); 5286 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5287 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5288 5289 return 0; 5290 } 5291 5292 int 5293 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5294 struct rte_eth_txq_info *qinfo) 5295 { 5296 struct rte_eth_dev *dev; 5297 5298 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5299 dev = &rte_eth_devices[port_id]; 5300 5301 if (queue_id >= dev->data->nb_tx_queues) { 5302 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); 5303 return -EINVAL; 5304 } 5305 5306 if (qinfo == NULL) { 5307 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5308 port_id, queue_id); 5309 return -EINVAL; 5310 } 5311 5312 if (dev->data->tx_queues == NULL || 5313 dev->data->tx_queues[queue_id] == NULL) { 5314 RTE_ETHDEV_LOG(ERR, 5315 "Tx queue %"PRIu16" of device with port_id=%" 5316 PRIu16" has not been setup\n", 5317 queue_id, port_id); 5318 return -EINVAL; 5319 } 5320 5321 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5322 RTE_ETHDEV_LOG(INFO, 5323 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5324 queue_id, port_id); 5325 return -EINVAL; 5326 } 5327 5328 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 5329 5330 memset(qinfo, 0, sizeof(*qinfo)); 5331 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5332 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5333 5334 return 0; 5335 } 5336 5337 int 5338 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5339 struct rte_eth_burst_mode *mode) 5340 { 5341 struct rte_eth_dev *dev; 5342 5343 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5344 dev = &rte_eth_devices[port_id]; 5345 5346 if (queue_id >= dev->data->nb_rx_queues) { 5347 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 5348 return -EINVAL; 5349 } 5350 5351 if (mode == NULL) { 5352 RTE_ETHDEV_LOG(ERR, 5353 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5354 port_id, queue_id); 5355 return -EINVAL; 5356 } 5357 5358 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); 5359 memset(mode, 0, sizeof(*mode)); 5360 return eth_err(port_id, 5361 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5362 } 5363 5364 int 5365 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5366 struct rte_eth_burst_mode *mode) 5367 { 5368 struct rte_eth_dev *dev; 5369 5370 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5371 dev = &rte_eth_devices[port_id]; 5372 5373 if (queue_id >= dev->data->nb_tx_queues) { 5374 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); 5375 return -EINVAL; 5376 } 5377 5378 if (mode == NULL) { 5379 RTE_ETHDEV_LOG(ERR, 5380 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5381 port_id, queue_id); 5382 return -EINVAL; 5383 } 5384 5385 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); 5386 memset(mode, 0, sizeof(*mode)); 5387 return eth_err(port_id, 5388 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5389 } 5390 5391 int 5392 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5393 struct rte_power_monitor_cond *pmc) 5394 { 5395 struct rte_eth_dev *dev; 5396 5397 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5398 dev = &rte_eth_devices[port_id]; 5399 5400 if (queue_id >= dev->data->nb_rx_queues) { 5401 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5402 return -EINVAL; 5403 } 5404 5405 if (pmc == NULL) { 5406 RTE_ETHDEV_LOG(ERR, 5407 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5408 port_id, queue_id); 5409 return -EINVAL; 5410 } 5411 5412 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP); 5413 return eth_err(port_id, 5414 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5415 } 5416 5417 int 5418 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5419 struct rte_ether_addr *mc_addr_set, 5420 uint32_t nb_mc_addr) 5421 { 5422 struct rte_eth_dev *dev; 5423 5424 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5425 dev = &rte_eth_devices[port_id]; 5426 5427 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 5428 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5429 mc_addr_set, nb_mc_addr)); 5430 } 5431 5432 int 5433 rte_eth_timesync_enable(uint16_t port_id) 5434 { 5435 struct rte_eth_dev *dev; 5436 5437 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5438 dev = &rte_eth_devices[port_id]; 5439 5440 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 5441 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5442 } 5443 5444 int 5445 rte_eth_timesync_disable(uint16_t port_id) 5446 { 5447 struct rte_eth_dev *dev; 5448 5449 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5450 dev = &rte_eth_devices[port_id]; 5451 5452 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 5453 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5454 } 5455 5456 int 5457 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5458 uint32_t flags) 5459 { 5460 struct rte_eth_dev *dev; 5461 5462 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5463 dev = &rte_eth_devices[port_id]; 5464 5465 if (timestamp == NULL) { 5466 RTE_ETHDEV_LOG(ERR, 5467 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5468 port_id); 5469 return -EINVAL; 5470 } 5471 5472 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 5473 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5474 (dev, timestamp, flags)); 5475 } 5476 5477 int 5478 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5479 struct timespec *timestamp) 5480 { 5481 struct rte_eth_dev *dev; 5482 5483 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5484 dev = &rte_eth_devices[port_id]; 5485 5486 if (timestamp == NULL) { 5487 RTE_ETHDEV_LOG(ERR, 5488 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5489 port_id); 5490 return -EINVAL; 5491 } 5492 5493 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 5494 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5495 (dev, timestamp)); 5496 } 5497 5498 int 5499 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5500 { 5501 struct rte_eth_dev *dev; 5502 5503 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5504 dev = &rte_eth_devices[port_id]; 5505 5506 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 5507 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 5508 } 5509 5510 int 5511 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 5512 { 5513 struct rte_eth_dev *dev; 5514 5515 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5516 dev = &rte_eth_devices[port_id]; 5517 5518 if (timestamp == NULL) { 5519 RTE_ETHDEV_LOG(ERR, 5520 "Cannot read ethdev port %u timesync time to NULL\n", 5521 port_id); 5522 return -EINVAL; 5523 } 5524 5525 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 5526 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 5527 timestamp)); 5528 } 5529 5530 int 5531 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 5532 { 5533 struct rte_eth_dev *dev; 5534 5535 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5536 dev = &rte_eth_devices[port_id]; 5537 5538 if (timestamp == NULL) { 5539 RTE_ETHDEV_LOG(ERR, 5540 "Cannot write ethdev port %u timesync from NULL time\n", 5541 port_id); 5542 return -EINVAL; 5543 } 5544 5545 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 5546 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 5547 timestamp)); 5548 } 5549 5550 int 5551 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 5552 { 5553 struct rte_eth_dev *dev; 5554 5555 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5556 dev = &rte_eth_devices[port_id]; 5557 5558 if (clock == NULL) { 5559 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 5560 port_id); 5561 return -EINVAL; 5562 } 5563 5564 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); 5565 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 5566 } 5567 5568 int 5569 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 5570 { 5571 struct rte_eth_dev *dev; 5572 5573 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5574 dev = &rte_eth_devices[port_id]; 5575 5576 if (info == NULL) { 5577 RTE_ETHDEV_LOG(ERR, 5578 "Cannot get ethdev port %u register info to NULL\n", 5579 port_id); 5580 return -EINVAL; 5581 } 5582 5583 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 5584 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 5585 } 5586 5587 int 5588 rte_eth_dev_get_eeprom_length(uint16_t port_id) 5589 { 5590 struct rte_eth_dev *dev; 5591 5592 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5593 dev = &rte_eth_devices[port_id]; 5594 5595 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 5596 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 5597 } 5598 5599 int 5600 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5601 { 5602 struct rte_eth_dev *dev; 5603 5604 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5605 dev = &rte_eth_devices[port_id]; 5606 5607 if (info == NULL) { 5608 RTE_ETHDEV_LOG(ERR, 5609 "Cannot get ethdev port %u EEPROM info to NULL\n", 5610 port_id); 5611 return -EINVAL; 5612 } 5613 5614 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 5615 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 5616 } 5617 5618 int 5619 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5620 { 5621 struct rte_eth_dev *dev; 5622 5623 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5624 dev = &rte_eth_devices[port_id]; 5625 5626 if (info == NULL) { 5627 RTE_ETHDEV_LOG(ERR, 5628 "Cannot set ethdev port %u EEPROM from NULL info\n", 5629 port_id); 5630 return -EINVAL; 5631 } 5632 5633 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 5634 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 5635 } 5636 5637 int 5638 rte_eth_dev_get_module_info(uint16_t port_id, 5639 struct rte_eth_dev_module_info *modinfo) 5640 { 5641 struct rte_eth_dev *dev; 5642 5643 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5644 dev = &rte_eth_devices[port_id]; 5645 5646 if (modinfo == NULL) { 5647 RTE_ETHDEV_LOG(ERR, 5648 "Cannot get ethdev port %u EEPROM module info to NULL\n", 5649 port_id); 5650 return -EINVAL; 5651 } 5652 5653 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 5654 return (*dev->dev_ops->get_module_info)(dev, modinfo); 5655 } 5656 5657 int 5658 rte_eth_dev_get_module_eeprom(uint16_t port_id, 5659 struct rte_dev_eeprom_info *info) 5660 { 5661 struct rte_eth_dev *dev; 5662 5663 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5664 dev = &rte_eth_devices[port_id]; 5665 5666 if (info == NULL) { 5667 RTE_ETHDEV_LOG(ERR, 5668 "Cannot get ethdev port %u module EEPROM info to NULL\n", 5669 port_id); 5670 return -EINVAL; 5671 } 5672 5673 if (info->data == NULL) { 5674 RTE_ETHDEV_LOG(ERR, 5675 "Cannot get ethdev port %u module EEPROM data to NULL\n", 5676 port_id); 5677 return -EINVAL; 5678 } 5679 5680 if (info->length == 0) { 5681 RTE_ETHDEV_LOG(ERR, 5682 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 5683 port_id); 5684 return -EINVAL; 5685 } 5686 5687 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 5688 return (*dev->dev_ops->get_module_eeprom)(dev, info); 5689 } 5690 5691 int 5692 rte_eth_dev_get_dcb_info(uint16_t port_id, 5693 struct rte_eth_dcb_info *dcb_info) 5694 { 5695 struct rte_eth_dev *dev; 5696 5697 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5698 dev = &rte_eth_devices[port_id]; 5699 5700 if (dcb_info == NULL) { 5701 RTE_ETHDEV_LOG(ERR, 5702 "Cannot get ethdev port %u DCB info to NULL\n", 5703 port_id); 5704 return -EINVAL; 5705 } 5706 5707 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 5708 5709 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 5710 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 5711 } 5712 5713 static void 5714 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 5715 const struct rte_eth_desc_lim *desc_lim) 5716 { 5717 if (desc_lim->nb_align != 0) 5718 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5719 5720 if (desc_lim->nb_max != 0) 5721 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5722 5723 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5724 } 5725 5726 int 5727 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5728 uint16_t *nb_rx_desc, 5729 uint16_t *nb_tx_desc) 5730 { 5731 struct rte_eth_dev_info dev_info; 5732 int ret; 5733 5734 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5735 5736 ret = rte_eth_dev_info_get(port_id, &dev_info); 5737 if (ret != 0) 5738 return ret; 5739 5740 if (nb_rx_desc != NULL) 5741 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5742 5743 if (nb_tx_desc != NULL) 5744 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5745 5746 return 0; 5747 } 5748 5749 int 5750 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5751 struct rte_eth_hairpin_cap *cap) 5752 { 5753 struct rte_eth_dev *dev; 5754 5755 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5756 dev = &rte_eth_devices[port_id]; 5757 5758 if (cap == NULL) { 5759 RTE_ETHDEV_LOG(ERR, 5760 "Cannot get ethdev port %u hairpin capability to NULL\n", 5761 port_id); 5762 return -EINVAL; 5763 } 5764 5765 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); 5766 memset(cap, 0, sizeof(*cap)); 5767 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 5768 } 5769 5770 int 5771 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5772 { 5773 if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5774 return 1; 5775 return 0; 5776 } 5777 5778 int 5779 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5780 { 5781 if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5782 return 1; 5783 return 0; 5784 } 5785 5786 int 5787 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 5788 { 5789 struct rte_eth_dev *dev; 5790 5791 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5792 dev = &rte_eth_devices[port_id]; 5793 5794 if (pool == NULL) { 5795 RTE_ETHDEV_LOG(ERR, 5796 "Cannot test ethdev port %u mempool operation from NULL pool\n", 5797 port_id); 5798 return -EINVAL; 5799 } 5800 5801 if (*dev->dev_ops->pool_ops_supported == NULL) 5802 return 1; /* all pools are supported */ 5803 5804 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 5805 } 5806 5807 /** 5808 * A set of values to describe the possible states of a switch domain. 5809 */ 5810 enum rte_eth_switch_domain_state { 5811 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0, 5812 RTE_ETH_SWITCH_DOMAIN_ALLOCATED 5813 }; 5814 5815 /** 5816 * Array of switch domains available for allocation. Array is sized to 5817 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than 5818 * ethdev ports in a single process. 5819 */ 5820 static struct rte_eth_dev_switch { 5821 enum rte_eth_switch_domain_state state; 5822 } eth_dev_switch_domains[RTE_MAX_ETHPORTS]; 5823 5824 int 5825 rte_eth_switch_domain_alloc(uint16_t *domain_id) 5826 { 5827 uint16_t i; 5828 5829 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 5830 5831 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 5832 if (eth_dev_switch_domains[i].state == 5833 RTE_ETH_SWITCH_DOMAIN_UNUSED) { 5834 eth_dev_switch_domains[i].state = 5835 RTE_ETH_SWITCH_DOMAIN_ALLOCATED; 5836 *domain_id = i; 5837 return 0; 5838 } 5839 } 5840 5841 return -ENOSPC; 5842 } 5843 5844 int 5845 rte_eth_switch_domain_free(uint16_t domain_id) 5846 { 5847 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || 5848 domain_id >= RTE_MAX_ETHPORTS) 5849 return -EINVAL; 5850 5851 if (eth_dev_switch_domains[domain_id].state != 5852 RTE_ETH_SWITCH_DOMAIN_ALLOCATED) 5853 return -EINVAL; 5854 5855 eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED; 5856 5857 return 0; 5858 } 5859 5860 static int 5861 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) 5862 { 5863 int state; 5864 struct rte_kvargs_pair *pair; 5865 char *letter; 5866 5867 arglist->str = strdup(str_in); 5868 if (arglist->str == NULL) 5869 return -ENOMEM; 5870 5871 letter = arglist->str; 5872 state = 0; 5873 arglist->count = 0; 5874 pair = &arglist->pairs[0]; 5875 while (1) { 5876 switch (state) { 5877 case 0: /* Initial */ 5878 if (*letter == '=') 5879 return -EINVAL; 5880 else if (*letter == '\0') 5881 return 0; 5882 5883 state = 1; 5884 pair->key = letter; 5885 /* fall-thru */ 5886 5887 case 1: /* Parsing key */ 5888 if (*letter == '=') { 5889 *letter = '\0'; 5890 pair->value = letter + 1; 5891 state = 2; 5892 } else if (*letter == ',' || *letter == '\0') 5893 return -EINVAL; 5894 break; 5895 5896 5897 case 2: /* Parsing value */ 5898 if (*letter == '[') 5899 state = 3; 5900 else if (*letter == ',') { 5901 *letter = '\0'; 5902 arglist->count++; 5903 pair = &arglist->pairs[arglist->count]; 5904 state = 0; 5905 } else if (*letter == '\0') { 5906 letter--; 5907 arglist->count++; 5908 pair = &arglist->pairs[arglist->count]; 5909 state = 0; 5910 } 5911 break; 5912 5913 case 3: /* Parsing list */ 5914 if (*letter == ']') 5915 state = 2; 5916 else if (*letter == '\0') 5917 return -EINVAL; 5918 break; 5919 } 5920 letter++; 5921 } 5922 } 5923 5924 int 5925 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) 5926 { 5927 struct rte_kvargs args; 5928 struct rte_kvargs_pair *pair; 5929 unsigned int i; 5930 int result = 0; 5931 5932 memset(eth_da, 0, sizeof(*eth_da)); 5933 5934 result = eth_dev_devargs_tokenise(&args, dargs); 5935 if (result < 0) 5936 goto parse_cleanup; 5937 5938 for (i = 0; i < args.count; i++) { 5939 pair = &args.pairs[i]; 5940 if (strcmp("representor", pair->key) == 0) { 5941 if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) { 5942 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n", 5943 dargs); 5944 result = -1; 5945 goto parse_cleanup; 5946 } 5947 result = rte_eth_devargs_parse_representor_ports( 5948 pair->value, eth_da); 5949 if (result < 0) 5950 goto parse_cleanup; 5951 } 5952 } 5953 5954 parse_cleanup: 5955 if (args.str) 5956 free(args.str); 5957 5958 return result; 5959 } 5960 5961 int 5962 rte_eth_representor_id_get(uint16_t port_id, 5963 enum rte_eth_representor_type type, 5964 int controller, int pf, int representor_port, 5965 uint16_t *repr_id) 5966 { 5967 int ret, n, count; 5968 uint32_t i; 5969 struct rte_eth_representor_info *info = NULL; 5970 size_t size; 5971 5972 if (type == RTE_ETH_REPRESENTOR_NONE) 5973 return 0; 5974 if (repr_id == NULL) 5975 return -EINVAL; 5976 5977 /* Get PMD representor range info. */ 5978 ret = rte_eth_representor_info_get(port_id, NULL); 5979 if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF && 5980 controller == -1 && pf == -1) { 5981 /* Direct mapping for legacy VF representor. */ 5982 *repr_id = representor_port; 5983 return 0; 5984 } else if (ret < 0) { 5985 return ret; 5986 } 5987 n = ret; 5988 size = sizeof(*info) + n * sizeof(info->ranges[0]); 5989 info = calloc(1, size); 5990 if (info == NULL) 5991 return -ENOMEM; 5992 info->nb_ranges_alloc = n; 5993 ret = rte_eth_representor_info_get(port_id, info); 5994 if (ret < 0) 5995 goto out; 5996 5997 /* Default controller and pf to caller. */ 5998 if (controller == -1) 5999 controller = info->controller; 6000 if (pf == -1) 6001 pf = info->pf; 6002 6003 /* Locate representor ID. */ 6004 ret = -ENOENT; 6005 for (i = 0; i < info->nb_ranges; ++i) { 6006 if (info->ranges[i].type != type) 6007 continue; 6008 if (info->ranges[i].controller != controller) 6009 continue; 6010 if (info->ranges[i].id_end < info->ranges[i].id_base) { 6011 RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n", 6012 port_id, info->ranges[i].id_base, 6013 info->ranges[i].id_end, i); 6014 continue; 6015 6016 } 6017 count = info->ranges[i].id_end - info->ranges[i].id_base + 1; 6018 switch (info->ranges[i].type) { 6019 case RTE_ETH_REPRESENTOR_PF: 6020 if (pf < info->ranges[i].pf || 6021 pf >= info->ranges[i].pf + count) 6022 continue; 6023 *repr_id = info->ranges[i].id_base + 6024 (pf - info->ranges[i].pf); 6025 ret = 0; 6026 goto out; 6027 case RTE_ETH_REPRESENTOR_VF: 6028 if (info->ranges[i].pf != pf) 6029 continue; 6030 if (representor_port < info->ranges[i].vf || 6031 representor_port >= info->ranges[i].vf + count) 6032 continue; 6033 *repr_id = info->ranges[i].id_base + 6034 (representor_port - info->ranges[i].vf); 6035 ret = 0; 6036 goto out; 6037 case RTE_ETH_REPRESENTOR_SF: 6038 if (info->ranges[i].pf != pf) 6039 continue; 6040 if (representor_port < info->ranges[i].sf || 6041 representor_port >= info->ranges[i].sf + count) 6042 continue; 6043 *repr_id = info->ranges[i].id_base + 6044 (representor_port - info->ranges[i].sf); 6045 ret = 0; 6046 goto out; 6047 default: 6048 break; 6049 } 6050 } 6051 out: 6052 free(info); 6053 return ret; 6054 } 6055 6056 static int 6057 eth_dev_handle_port_list(const char *cmd __rte_unused, 6058 const char *params __rte_unused, 6059 struct rte_tel_data *d) 6060 { 6061 int port_id; 6062 6063 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 6064 RTE_ETH_FOREACH_DEV(port_id) 6065 rte_tel_data_add_array_int(d, port_id); 6066 return 0; 6067 } 6068 6069 static void 6070 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 6071 const char *stat_name) 6072 { 6073 int q; 6074 struct rte_tel_data *q_data = rte_tel_data_alloc(); 6075 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); 6076 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 6077 rte_tel_data_add_array_u64(q_data, q_stats[q]); 6078 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 6079 } 6080 6081 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 6082 6083 static int 6084 eth_dev_handle_port_stats(const char *cmd __rte_unused, 6085 const char *params, 6086 struct rte_tel_data *d) 6087 { 6088 struct rte_eth_stats stats; 6089 int port_id, ret; 6090 6091 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6092 return -1; 6093 6094 port_id = atoi(params); 6095 if (!rte_eth_dev_is_valid_port(port_id)) 6096 return -1; 6097 6098 ret = rte_eth_stats_get(port_id, &stats); 6099 if (ret < 0) 6100 return -1; 6101 6102 rte_tel_data_start_dict(d); 6103 ADD_DICT_STAT(stats, ipackets); 6104 ADD_DICT_STAT(stats, opackets); 6105 ADD_DICT_STAT(stats, ibytes); 6106 ADD_DICT_STAT(stats, obytes); 6107 ADD_DICT_STAT(stats, imissed); 6108 ADD_DICT_STAT(stats, ierrors); 6109 ADD_DICT_STAT(stats, oerrors); 6110 ADD_DICT_STAT(stats, rx_nombuf); 6111 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 6112 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 6113 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 6114 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 6115 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 6116 6117 return 0; 6118 } 6119 6120 static int 6121 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 6122 const char *params, 6123 struct rte_tel_data *d) 6124 { 6125 struct rte_eth_xstat *eth_xstats; 6126 struct rte_eth_xstat_name *xstat_names; 6127 int port_id, num_xstats; 6128 int i, ret; 6129 char *end_param; 6130 6131 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6132 return -1; 6133 6134 port_id = strtoul(params, &end_param, 0); 6135 if (*end_param != '\0') 6136 RTE_ETHDEV_LOG(NOTICE, 6137 "Extra parameters passed to ethdev telemetry command, ignoring"); 6138 if (!rte_eth_dev_is_valid_port(port_id)) 6139 return -1; 6140 6141 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 6142 if (num_xstats < 0) 6143 return -1; 6144 6145 /* use one malloc for both names and stats */ 6146 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 6147 sizeof(struct rte_eth_xstat_name)) * num_xstats); 6148 if (eth_xstats == NULL) 6149 return -1; 6150 xstat_names = (void *)ð_xstats[num_xstats]; 6151 6152 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 6153 if (ret < 0 || ret > num_xstats) { 6154 free(eth_xstats); 6155 return -1; 6156 } 6157 6158 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 6159 if (ret < 0 || ret > num_xstats) { 6160 free(eth_xstats); 6161 return -1; 6162 } 6163 6164 rte_tel_data_start_dict(d); 6165 for (i = 0; i < num_xstats; i++) 6166 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 6167 eth_xstats[i].value); 6168 return 0; 6169 } 6170 6171 static int 6172 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 6173 const char *params, 6174 struct rte_tel_data *d) 6175 { 6176 static const char *status_str = "status"; 6177 int ret, port_id; 6178 struct rte_eth_link link; 6179 char *end_param; 6180 6181 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6182 return -1; 6183 6184 port_id = strtoul(params, &end_param, 0); 6185 if (*end_param != '\0') 6186 RTE_ETHDEV_LOG(NOTICE, 6187 "Extra parameters passed to ethdev telemetry command, ignoring"); 6188 if (!rte_eth_dev_is_valid_port(port_id)) 6189 return -1; 6190 6191 ret = rte_eth_link_get_nowait(port_id, &link); 6192 if (ret < 0) 6193 return -1; 6194 6195 rte_tel_data_start_dict(d); 6196 if (!link.link_status) { 6197 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 6198 return 0; 6199 } 6200 rte_tel_data_add_dict_string(d, status_str, "UP"); 6201 rte_tel_data_add_dict_u64(d, "speed", link.link_speed); 6202 rte_tel_data_add_dict_string(d, "duplex", 6203 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 6204 "full-duplex" : "half-duplex"); 6205 return 0; 6206 } 6207 6208 static int 6209 eth_dev_handle_port_info(const char *cmd __rte_unused, 6210 const char *params, 6211 struct rte_tel_data *d) 6212 { 6213 struct rte_tel_data *rxq_state, *txq_state; 6214 char mac_addr[RTE_ETHER_ADDR_LEN]; 6215 struct rte_eth_dev *eth_dev; 6216 char *end_param; 6217 int port_id, i; 6218 6219 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6220 return -1; 6221 6222 port_id = strtoul(params, &end_param, 0); 6223 if (*end_param != '\0') 6224 RTE_ETHDEV_LOG(NOTICE, 6225 "Extra parameters passed to ethdev telemetry command, ignoring"); 6226 6227 if (!rte_eth_dev_is_valid_port(port_id)) 6228 return -EINVAL; 6229 6230 eth_dev = &rte_eth_devices[port_id]; 6231 if (!eth_dev) 6232 return -EINVAL; 6233 6234 rxq_state = rte_tel_data_alloc(); 6235 if (!rxq_state) 6236 return -ENOMEM; 6237 6238 txq_state = rte_tel_data_alloc(); 6239 if (!txq_state) 6240 return -ENOMEM; 6241 6242 rte_tel_data_start_dict(d); 6243 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 6244 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 6245 rte_tel_data_add_dict_int(d, "nb_rx_queues", 6246 eth_dev->data->nb_rx_queues); 6247 rte_tel_data_add_dict_int(d, "nb_tx_queues", 6248 eth_dev->data->nb_tx_queues); 6249 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 6250 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 6251 rte_tel_data_add_dict_int(d, "rx_mbuf_size_min", 6252 eth_dev->data->min_rx_buf_size); 6253 rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", 6254 eth_dev->data->rx_mbuf_alloc_failed); 6255 snprintf(mac_addr, RTE_ETHER_ADDR_LEN, "%02x:%02x:%02x:%02x:%02x:%02x", 6256 eth_dev->data->mac_addrs->addr_bytes[0], 6257 eth_dev->data->mac_addrs->addr_bytes[1], 6258 eth_dev->data->mac_addrs->addr_bytes[2], 6259 eth_dev->data->mac_addrs->addr_bytes[3], 6260 eth_dev->data->mac_addrs->addr_bytes[4], 6261 eth_dev->data->mac_addrs->addr_bytes[5]); 6262 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 6263 rte_tel_data_add_dict_int(d, "promiscuous", 6264 eth_dev->data->promiscuous); 6265 rte_tel_data_add_dict_int(d, "scattered_rx", 6266 eth_dev->data->scattered_rx); 6267 rte_tel_data_add_dict_int(d, "all_multicast", 6268 eth_dev->data->all_multicast); 6269 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 6270 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 6271 rte_tel_data_add_dict_int(d, "dev_configured", 6272 eth_dev->data->dev_configured); 6273 6274 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 6275 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 6276 rte_tel_data_add_array_int(rxq_state, 6277 eth_dev->data->rx_queue_state[i]); 6278 6279 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 6280 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 6281 rte_tel_data_add_array_int(txq_state, 6282 eth_dev->data->tx_queue_state[i]); 6283 6284 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 6285 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 6286 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 6287 rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags); 6288 rte_tel_data_add_dict_int(d, "rx_offloads", 6289 eth_dev->data->dev_conf.rxmode.offloads); 6290 rte_tel_data_add_dict_int(d, "tx_offloads", 6291 eth_dev->data->dev_conf.txmode.offloads); 6292 rte_tel_data_add_dict_int(d, "ethdev_rss_hf", 6293 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 6294 6295 return 0; 6296 } 6297 6298 int 6299 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue, 6300 struct rte_hairpin_peer_info *cur_info, 6301 struct rte_hairpin_peer_info *peer_info, 6302 uint32_t direction) 6303 { 6304 struct rte_eth_dev *dev; 6305 6306 /* Current queue information is not mandatory. */ 6307 if (peer_info == NULL) 6308 return -EINVAL; 6309 6310 /* No need to check the validity again. */ 6311 dev = &rte_eth_devices[peer_port]; 6312 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update, 6313 -ENOTSUP); 6314 6315 return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue, 6316 cur_info, peer_info, direction); 6317 } 6318 6319 int 6320 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue, 6321 struct rte_hairpin_peer_info *peer_info, 6322 uint32_t direction) 6323 { 6324 struct rte_eth_dev *dev; 6325 6326 if (peer_info == NULL) 6327 return -EINVAL; 6328 6329 /* No need to check the validity again. */ 6330 dev = &rte_eth_devices[cur_port]; 6331 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind, 6332 -ENOTSUP); 6333 6334 return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue, 6335 peer_info, direction); 6336 } 6337 6338 int 6339 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue, 6340 uint32_t direction) 6341 { 6342 struct rte_eth_dev *dev; 6343 6344 /* No need to check the validity again. */ 6345 dev = &rte_eth_devices[cur_port]; 6346 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind, 6347 -ENOTSUP); 6348 6349 return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue, 6350 direction); 6351 } 6352 6353 int 6354 rte_eth_representor_info_get(uint16_t port_id, 6355 struct rte_eth_representor_info *info) 6356 { 6357 struct rte_eth_dev *dev; 6358 6359 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6360 dev = &rte_eth_devices[port_id]; 6361 6362 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP); 6363 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6364 } 6365 6366 int 6367 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6368 { 6369 struct rte_eth_dev *dev; 6370 6371 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6372 dev = &rte_eth_devices[port_id]; 6373 6374 if (dev->data->dev_configured != 0) { 6375 RTE_ETHDEV_LOG(ERR, 6376 "The port (id=%"PRIu16") is already configured\n", 6377 port_id); 6378 return -EBUSY; 6379 } 6380 6381 if (features == NULL) { 6382 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6383 return -EINVAL; 6384 } 6385 6386 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP); 6387 return eth_err(port_id, 6388 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6389 } 6390 6391 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6392 6393 RTE_INIT(ethdev_init_telemetry) 6394 { 6395 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 6396 "Returns list of available ethdev ports. Takes no parameters"); 6397 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 6398 "Returns the common stats for a port. Parameters: int port_id"); 6399 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 6400 "Returns the extended stats for a port. Parameters: int port_id"); 6401 rte_telemetry_register_cmd("/ethdev/link_status", 6402 eth_dev_handle_port_link_status, 6403 "Returns the link status for a port. Parameters: int port_id"); 6404 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 6405 "Returns the device info for a port. Parameters: int port_id"); 6406 } 6407