1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <rte_byteorder.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_interrupts.h> 18 #include <rte_memory.h> 19 #include <rte_memcpy.h> 20 #include <rte_memzone.h> 21 #include <rte_launch.h> 22 #include <rte_eal.h> 23 #include <rte_per_lcore.h> 24 #include <rte_lcore.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_common.h> 27 #include <rte_mempool.h> 28 #include <rte_malloc.h> 29 #include <rte_mbuf.h> 30 #include <rte_errno.h> 31 #include <rte_spinlock.h> 32 #include <rte_string_fns.h> 33 #include <rte_kvargs.h> 34 #include <rte_class.h> 35 #include <rte_ether.h> 36 #include <rte_telemetry.h> 37 38 #include "rte_ethdev_trace.h" 39 #include "rte_ethdev.h" 40 #include "ethdev_driver.h" 41 #include "ethdev_profile.h" 42 #include "ethdev_private.h" 43 44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; 45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 46 47 /* public fast-path API */ 48 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 49 50 /* spinlock for eth device callbacks */ 51 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; 52 53 /* spinlock for add/remove rx callbacks */ 54 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 55 56 /* spinlock for add/remove tx callbacks */ 57 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 58 59 /* spinlock for shared data allocation */ 60 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 61 62 /* store statistics names and its offset in stats structure */ 63 struct rte_eth_xstats_name_off { 64 char name[RTE_ETH_XSTATS_NAME_SIZE]; 65 unsigned offset; 66 }; 67 68 /* Shared memory between primary and secondary processes. */ 69 static struct { 70 uint64_t next_owner_id; 71 rte_spinlock_t ownership_lock; 72 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; 73 } *eth_dev_shared_data; 74 75 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 76 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 77 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 78 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 79 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 80 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 81 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 82 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 83 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 84 rx_nombuf)}, 85 }; 86 87 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 88 89 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 90 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 91 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 92 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 93 }; 94 95 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 96 97 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 98 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 99 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 100 }; 101 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 102 103 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 104 { DEV_RX_OFFLOAD_##_name, #_name } 105 106 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name) \ 107 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 108 109 static const struct { 110 uint64_t offload; 111 const char *name; 112 } eth_dev_rx_offload_names[] = { 113 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 114 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 115 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 116 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 117 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 118 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 119 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 120 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 121 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 122 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 123 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 124 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 125 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 126 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 127 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 128 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 129 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 130 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 131 RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 132 }; 133 134 #undef RTE_RX_OFFLOAD_BIT2STR 135 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 136 137 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 138 { DEV_TX_OFFLOAD_##_name, #_name } 139 140 static const struct { 141 uint64_t offload; 142 const char *name; 143 } eth_dev_tx_offload_names[] = { 144 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 145 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 146 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 147 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 148 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 149 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 150 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 151 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 152 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 153 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 154 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 155 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 156 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 157 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 158 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 159 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 160 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 161 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 162 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 163 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 164 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 165 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 166 }; 167 168 #undef RTE_TX_OFFLOAD_BIT2STR 169 170 /** 171 * The user application callback description. 172 * 173 * It contains callback address to be registered by user application, 174 * the pointer to the parameters for callback, and the event type. 175 */ 176 struct rte_eth_dev_callback { 177 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ 178 rte_eth_dev_cb_fn cb_fn; /**< Callback address */ 179 void *cb_arg; /**< Parameter for callback */ 180 void *ret_param; /**< Return parameter */ 181 enum rte_eth_event_type event; /**< Interrupt event type */ 182 uint32_t active; /**< Callback is executing */ 183 }; 184 185 enum { 186 STAT_QMAP_TX = 0, 187 STAT_QMAP_RX 188 }; 189 190 int 191 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 192 { 193 int ret; 194 struct rte_devargs devargs; 195 const char *bus_param_key; 196 char *bus_str = NULL; 197 char *cls_str = NULL; 198 int str_size; 199 200 if (iter == NULL) { 201 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 202 return -EINVAL; 203 } 204 205 if (devargs_str == NULL) { 206 RTE_ETHDEV_LOG(ERR, 207 "Cannot initialize iterator from NULL device description string\n"); 208 return -EINVAL; 209 } 210 211 memset(iter, 0, sizeof(*iter)); 212 memset(&devargs, 0, sizeof(devargs)); 213 214 /* 215 * The devargs string may use various syntaxes: 216 * - 0000:08:00.0,representor=[1-3] 217 * - pci:0000:06:00.0,representor=[0,5] 218 * - class=eth,mac=00:11:22:33:44:55 219 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 220 */ 221 222 /* 223 * Handle pure class filter (i.e. without any bus-level argument), 224 * from future new syntax. 225 * rte_devargs_parse() is not yet supporting the new syntax, 226 * that's why this simple case is temporarily parsed here. 227 */ 228 #define iter_anybus_str "class=eth," 229 if (strncmp(devargs_str, iter_anybus_str, 230 strlen(iter_anybus_str)) == 0) { 231 iter->cls_str = devargs_str + strlen(iter_anybus_str); 232 goto end; 233 } 234 235 /* Split bus, device and parameters. */ 236 ret = rte_devargs_parse(&devargs, devargs_str); 237 if (ret != 0) 238 goto error; 239 240 /* 241 * Assume parameters of old syntax can match only at ethdev level. 242 * Extra parameters will be ignored, thanks to "+" prefix. 243 */ 244 str_size = strlen(devargs.args) + 2; 245 cls_str = malloc(str_size); 246 if (cls_str == NULL) { 247 ret = -ENOMEM; 248 goto error; 249 } 250 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 251 if (ret != str_size - 1) { 252 ret = -EINVAL; 253 goto error; 254 } 255 iter->cls_str = cls_str; 256 257 iter->bus = devargs.bus; 258 if (iter->bus->dev_iterate == NULL) { 259 ret = -ENOTSUP; 260 goto error; 261 } 262 263 /* Convert bus args to new syntax for use with new API dev_iterate. */ 264 if ((strcmp(iter->bus->name, "vdev") == 0) || 265 (strcmp(iter->bus->name, "fslmc") == 0) || 266 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 267 bus_param_key = "name"; 268 } else if (strcmp(iter->bus->name, "pci") == 0) { 269 bus_param_key = "addr"; 270 } else { 271 ret = -ENOTSUP; 272 goto error; 273 } 274 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 275 bus_str = malloc(str_size); 276 if (bus_str == NULL) { 277 ret = -ENOMEM; 278 goto error; 279 } 280 ret = snprintf(bus_str, str_size, "%s=%s", 281 bus_param_key, devargs.name); 282 if (ret != str_size - 1) { 283 ret = -EINVAL; 284 goto error; 285 } 286 iter->bus_str = bus_str; 287 288 end: 289 iter->cls = rte_class_find_by_name("eth"); 290 rte_devargs_reset(&devargs); 291 return 0; 292 293 error: 294 if (ret == -ENOTSUP) 295 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 296 iter->bus->name); 297 rte_devargs_reset(&devargs); 298 free(bus_str); 299 free(cls_str); 300 return ret; 301 } 302 303 uint16_t 304 rte_eth_iterator_next(struct rte_dev_iterator *iter) 305 { 306 if (iter == NULL) { 307 RTE_ETHDEV_LOG(ERR, 308 "Cannot get next device from NULL iterator\n"); 309 return RTE_MAX_ETHPORTS; 310 } 311 312 if (iter->cls == NULL) /* invalid ethdev iterator */ 313 return RTE_MAX_ETHPORTS; 314 315 do { /* loop to try all matching rte_device */ 316 /* If not pure ethdev filter and */ 317 if (iter->bus != NULL && 318 /* not in middle of rte_eth_dev iteration, */ 319 iter->class_device == NULL) { 320 /* get next rte_device to try. */ 321 iter->device = iter->bus->dev_iterate( 322 iter->device, iter->bus_str, iter); 323 if (iter->device == NULL) 324 break; /* no more rte_device candidate */ 325 } 326 /* A device is matching bus part, need to check ethdev part. */ 327 iter->class_device = iter->cls->dev_iterate( 328 iter->class_device, iter->cls_str, iter); 329 if (iter->class_device != NULL) 330 return eth_dev_to_id(iter->class_device); /* match */ 331 } while (iter->bus != NULL); /* need to try next rte_device */ 332 333 /* No more ethdev port to iterate. */ 334 rte_eth_iterator_cleanup(iter); 335 return RTE_MAX_ETHPORTS; 336 } 337 338 void 339 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 340 { 341 if (iter == NULL) { 342 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 343 return; 344 } 345 346 if (iter->bus_str == NULL) 347 return; /* nothing to free in pure class filter */ 348 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 349 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 350 memset(iter, 0, sizeof(*iter)); 351 } 352 353 uint16_t 354 rte_eth_find_next(uint16_t port_id) 355 { 356 while (port_id < RTE_MAX_ETHPORTS && 357 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 358 port_id++; 359 360 if (port_id >= RTE_MAX_ETHPORTS) 361 return RTE_MAX_ETHPORTS; 362 363 return port_id; 364 } 365 366 /* 367 * Macro to iterate over all valid ports for internal usage. 368 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 369 */ 370 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 371 for (port_id = rte_eth_find_next(0); \ 372 port_id < RTE_MAX_ETHPORTS; \ 373 port_id = rte_eth_find_next(port_id + 1)) 374 375 uint16_t 376 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 377 { 378 port_id = rte_eth_find_next(port_id); 379 while (port_id < RTE_MAX_ETHPORTS && 380 rte_eth_devices[port_id].device != parent) 381 port_id = rte_eth_find_next(port_id + 1); 382 383 return port_id; 384 } 385 386 uint16_t 387 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 388 { 389 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 390 return rte_eth_find_next_of(port_id, 391 rte_eth_devices[ref_port_id].device); 392 } 393 394 static void 395 eth_dev_shared_data_prepare(void) 396 { 397 const unsigned flags = 0; 398 const struct rte_memzone *mz; 399 400 rte_spinlock_lock(ð_dev_shared_data_lock); 401 402 if (eth_dev_shared_data == NULL) { 403 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 404 /* Allocate port data and ownership shared memory. */ 405 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, 406 sizeof(*eth_dev_shared_data), 407 rte_socket_id(), flags); 408 } else 409 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); 410 if (mz == NULL) 411 rte_panic("Cannot allocate ethdev shared data\n"); 412 413 eth_dev_shared_data = mz->addr; 414 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 415 eth_dev_shared_data->next_owner_id = 416 RTE_ETH_DEV_NO_OWNER + 1; 417 rte_spinlock_init(ð_dev_shared_data->ownership_lock); 418 memset(eth_dev_shared_data->data, 0, 419 sizeof(eth_dev_shared_data->data)); 420 } 421 } 422 423 rte_spinlock_unlock(ð_dev_shared_data_lock); 424 } 425 426 static bool 427 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 428 { 429 return ethdev->data->name[0] != '\0'; 430 } 431 432 static struct rte_eth_dev * 433 eth_dev_allocated(const char *name) 434 { 435 uint16_t i; 436 437 RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX); 438 439 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 440 if (rte_eth_devices[i].data != NULL && 441 strcmp(rte_eth_devices[i].data->name, name) == 0) 442 return &rte_eth_devices[i]; 443 } 444 return NULL; 445 } 446 447 struct rte_eth_dev * 448 rte_eth_dev_allocated(const char *name) 449 { 450 struct rte_eth_dev *ethdev; 451 452 eth_dev_shared_data_prepare(); 453 454 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 455 456 ethdev = eth_dev_allocated(name); 457 458 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 459 460 return ethdev; 461 } 462 463 static uint16_t 464 eth_dev_find_free_port(void) 465 { 466 uint16_t i; 467 468 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 469 /* Using shared name field to find a free port. */ 470 if (eth_dev_shared_data->data[i].name[0] == '\0') { 471 RTE_ASSERT(rte_eth_devices[i].state == 472 RTE_ETH_DEV_UNUSED); 473 return i; 474 } 475 } 476 return RTE_MAX_ETHPORTS; 477 } 478 479 static struct rte_eth_dev * 480 eth_dev_get(uint16_t port_id) 481 { 482 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; 483 484 eth_dev->data = ð_dev_shared_data->data[port_id]; 485 486 return eth_dev; 487 } 488 489 struct rte_eth_dev * 490 rte_eth_dev_allocate(const char *name) 491 { 492 uint16_t port_id; 493 struct rte_eth_dev *eth_dev = NULL; 494 size_t name_len; 495 496 name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN); 497 if (name_len == 0) { 498 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n"); 499 return NULL; 500 } 501 502 if (name_len >= RTE_ETH_NAME_MAX_LEN) { 503 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n"); 504 return NULL; 505 } 506 507 eth_dev_shared_data_prepare(); 508 509 /* Synchronize port creation between primary and secondary threads. */ 510 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 511 512 if (eth_dev_allocated(name) != NULL) { 513 RTE_ETHDEV_LOG(ERR, 514 "Ethernet device with name %s already allocated\n", 515 name); 516 goto unlock; 517 } 518 519 port_id = eth_dev_find_free_port(); 520 if (port_id == RTE_MAX_ETHPORTS) { 521 RTE_ETHDEV_LOG(ERR, 522 "Reached maximum number of Ethernet ports\n"); 523 goto unlock; 524 } 525 526 eth_dev = eth_dev_get(port_id); 527 strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name)); 528 eth_dev->data->port_id = port_id; 529 eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS; 530 eth_dev->data->mtu = RTE_ETHER_MTU; 531 pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL); 532 533 unlock: 534 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 535 536 return eth_dev; 537 } 538 539 /* 540 * Attach to a port already registered by the primary process, which 541 * makes sure that the same device would have the same port id both 542 * in the primary and secondary process. 543 */ 544 struct rte_eth_dev * 545 rte_eth_dev_attach_secondary(const char *name) 546 { 547 uint16_t i; 548 struct rte_eth_dev *eth_dev = NULL; 549 550 eth_dev_shared_data_prepare(); 551 552 /* Synchronize port attachment to primary port creation and release. */ 553 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 554 555 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 556 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0) 557 break; 558 } 559 if (i == RTE_MAX_ETHPORTS) { 560 RTE_ETHDEV_LOG(ERR, 561 "Device %s is not driven by the primary process\n", 562 name); 563 } else { 564 eth_dev = eth_dev_get(i); 565 RTE_ASSERT(eth_dev->data->port_id == i); 566 } 567 568 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 569 return eth_dev; 570 } 571 572 int 573 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) 574 { 575 if (eth_dev == NULL) 576 return -EINVAL; 577 578 eth_dev_shared_data_prepare(); 579 580 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 581 rte_eth_dev_callback_process(eth_dev, 582 RTE_ETH_EVENT_DESTROY, NULL); 583 584 eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id); 585 586 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 587 588 eth_dev->state = RTE_ETH_DEV_UNUSED; 589 eth_dev->device = NULL; 590 eth_dev->process_private = NULL; 591 eth_dev->intr_handle = NULL; 592 eth_dev->rx_pkt_burst = NULL; 593 eth_dev->tx_pkt_burst = NULL; 594 eth_dev->tx_pkt_prepare = NULL; 595 eth_dev->rx_queue_count = NULL; 596 eth_dev->rx_descriptor_status = NULL; 597 eth_dev->tx_descriptor_status = NULL; 598 eth_dev->dev_ops = NULL; 599 600 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 601 rte_free(eth_dev->data->rx_queues); 602 rte_free(eth_dev->data->tx_queues); 603 rte_free(eth_dev->data->mac_addrs); 604 rte_free(eth_dev->data->hash_mac_addrs); 605 rte_free(eth_dev->data->dev_private); 606 pthread_mutex_destroy(ð_dev->data->flow_ops_mutex); 607 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); 608 } 609 610 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 611 612 return 0; 613 } 614 615 int 616 rte_eth_dev_is_valid_port(uint16_t port_id) 617 { 618 if (port_id >= RTE_MAX_ETHPORTS || 619 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 620 return 0; 621 else 622 return 1; 623 } 624 625 static int 626 eth_is_valid_owner_id(uint64_t owner_id) 627 { 628 if (owner_id == RTE_ETH_DEV_NO_OWNER || 629 eth_dev_shared_data->next_owner_id <= owner_id) 630 return 0; 631 return 1; 632 } 633 634 uint64_t 635 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 636 { 637 port_id = rte_eth_find_next(port_id); 638 while (port_id < RTE_MAX_ETHPORTS && 639 rte_eth_devices[port_id].data->owner.id != owner_id) 640 port_id = rte_eth_find_next(port_id + 1); 641 642 return port_id; 643 } 644 645 int 646 rte_eth_dev_owner_new(uint64_t *owner_id) 647 { 648 if (owner_id == NULL) { 649 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 650 return -EINVAL; 651 } 652 653 eth_dev_shared_data_prepare(); 654 655 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 656 657 *owner_id = eth_dev_shared_data->next_owner_id++; 658 659 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 660 return 0; 661 } 662 663 static int 664 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 665 const struct rte_eth_dev_owner *new_owner) 666 { 667 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 668 struct rte_eth_dev_owner *port_owner; 669 670 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 671 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 672 port_id); 673 return -ENODEV; 674 } 675 676 if (new_owner == NULL) { 677 RTE_ETHDEV_LOG(ERR, 678 "Cannot set ethdev port %u owner from NULL owner\n", 679 port_id); 680 return -EINVAL; 681 } 682 683 if (!eth_is_valid_owner_id(new_owner->id) && 684 !eth_is_valid_owner_id(old_owner_id)) { 685 RTE_ETHDEV_LOG(ERR, 686 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 687 old_owner_id, new_owner->id); 688 return -EINVAL; 689 } 690 691 port_owner = &rte_eth_devices[port_id].data->owner; 692 if (port_owner->id != old_owner_id) { 693 RTE_ETHDEV_LOG(ERR, 694 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 695 port_id, port_owner->name, port_owner->id); 696 return -EPERM; 697 } 698 699 /* can not truncate (same structure) */ 700 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 701 702 port_owner->id = new_owner->id; 703 704 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 705 port_id, new_owner->name, new_owner->id); 706 707 return 0; 708 } 709 710 int 711 rte_eth_dev_owner_set(const uint16_t port_id, 712 const struct rte_eth_dev_owner *owner) 713 { 714 int ret; 715 716 eth_dev_shared_data_prepare(); 717 718 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 719 720 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 721 722 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 723 return ret; 724 } 725 726 int 727 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 728 { 729 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 730 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 731 int ret; 732 733 eth_dev_shared_data_prepare(); 734 735 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 736 737 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 738 739 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 740 return ret; 741 } 742 743 int 744 rte_eth_dev_owner_delete(const uint64_t owner_id) 745 { 746 uint16_t port_id; 747 int ret = 0; 748 749 eth_dev_shared_data_prepare(); 750 751 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 752 753 if (eth_is_valid_owner_id(owner_id)) { 754 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) 755 if (rte_eth_devices[port_id].data->owner.id == owner_id) 756 memset(&rte_eth_devices[port_id].data->owner, 0, 757 sizeof(struct rte_eth_dev_owner)); 758 RTE_ETHDEV_LOG(NOTICE, 759 "All port owners owned by %016"PRIx64" identifier have removed\n", 760 owner_id); 761 } else { 762 RTE_ETHDEV_LOG(ERR, 763 "Invalid owner id=%016"PRIx64"\n", 764 owner_id); 765 ret = -EINVAL; 766 } 767 768 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 769 770 return ret; 771 } 772 773 int 774 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 775 { 776 struct rte_eth_dev *ethdev; 777 778 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 779 ethdev = &rte_eth_devices[port_id]; 780 781 if (!eth_dev_is_allocated(ethdev)) { 782 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 783 port_id); 784 return -ENODEV; 785 } 786 787 if (owner == NULL) { 788 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 789 port_id); 790 return -EINVAL; 791 } 792 793 eth_dev_shared_data_prepare(); 794 795 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 796 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 797 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 798 799 return 0; 800 } 801 802 int 803 rte_eth_dev_socket_id(uint16_t port_id) 804 { 805 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 806 return rte_eth_devices[port_id].data->numa_node; 807 } 808 809 void * 810 rte_eth_dev_get_sec_ctx(uint16_t port_id) 811 { 812 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 813 return rte_eth_devices[port_id].security_ctx; 814 } 815 816 uint16_t 817 rte_eth_dev_count_avail(void) 818 { 819 uint16_t p; 820 uint16_t count; 821 822 count = 0; 823 824 RTE_ETH_FOREACH_DEV(p) 825 count++; 826 827 return count; 828 } 829 830 uint16_t 831 rte_eth_dev_count_total(void) 832 { 833 uint16_t port, count = 0; 834 835 RTE_ETH_FOREACH_VALID_DEV(port) 836 count++; 837 838 return count; 839 } 840 841 int 842 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 843 { 844 char *tmp; 845 846 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 847 848 if (name == NULL) { 849 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 850 port_id); 851 return -EINVAL; 852 } 853 854 /* shouldn't check 'rte_eth_devices[i].data', 855 * because it might be overwritten by VDEV PMD */ 856 tmp = eth_dev_shared_data->data[port_id].name; 857 strcpy(name, tmp); 858 return 0; 859 } 860 861 int 862 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 863 { 864 uint16_t pid; 865 866 if (name == NULL) { 867 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 868 return -EINVAL; 869 } 870 871 if (port_id == NULL) { 872 RTE_ETHDEV_LOG(ERR, 873 "Cannot get port ID to NULL for %s\n", name); 874 return -EINVAL; 875 } 876 877 RTE_ETH_FOREACH_VALID_DEV(pid) 878 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 879 *port_id = pid; 880 return 0; 881 } 882 883 return -ENODEV; 884 } 885 886 static int 887 eth_err(uint16_t port_id, int ret) 888 { 889 if (ret == 0) 890 return 0; 891 if (rte_eth_dev_is_removed(port_id)) 892 return -EIO; 893 return ret; 894 } 895 896 static void 897 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid) 898 { 899 void **rxq = dev->data->rx_queues; 900 901 if (rxq[qid] == NULL) 902 return; 903 904 if (dev->dev_ops->rx_queue_release != NULL) 905 (*dev->dev_ops->rx_queue_release)(dev, qid); 906 rxq[qid] = NULL; 907 } 908 909 static void 910 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid) 911 { 912 void **txq = dev->data->tx_queues; 913 914 if (txq[qid] == NULL) 915 return; 916 917 if (dev->dev_ops->tx_queue_release != NULL) 918 (*dev->dev_ops->tx_queue_release)(dev, qid); 919 txq[qid] = NULL; 920 } 921 922 static int 923 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 924 { 925 uint16_t old_nb_queues = dev->data->nb_rx_queues; 926 unsigned i; 927 928 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ 929 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", 930 sizeof(dev->data->rx_queues[0]) * 931 RTE_MAX_QUEUES_PER_PORT, 932 RTE_CACHE_LINE_SIZE); 933 if (dev->data->rx_queues == NULL) { 934 dev->data->nb_rx_queues = 0; 935 return -(ENOMEM); 936 } 937 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ 938 for (i = nb_queues; i < old_nb_queues; i++) 939 eth_dev_rxq_release(dev, i); 940 941 } else if (dev->data->rx_queues != NULL && nb_queues == 0) { 942 for (i = nb_queues; i < old_nb_queues; i++) 943 eth_dev_rxq_release(dev, i); 944 945 rte_free(dev->data->rx_queues); 946 dev->data->rx_queues = NULL; 947 } 948 dev->data->nb_rx_queues = nb_queues; 949 return 0; 950 } 951 952 static int 953 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 954 { 955 uint16_t port_id; 956 957 if (rx_queue_id >= dev->data->nb_rx_queues) { 958 port_id = dev->data->port_id; 959 RTE_ETHDEV_LOG(ERR, 960 "Invalid Rx queue_id=%u of device with port_id=%u\n", 961 rx_queue_id, port_id); 962 return -EINVAL; 963 } 964 965 if (dev->data->rx_queues[rx_queue_id] == NULL) { 966 port_id = dev->data->port_id; 967 RTE_ETHDEV_LOG(ERR, 968 "Queue %u of device with port_id=%u has not been setup\n", 969 rx_queue_id, port_id); 970 return -EINVAL; 971 } 972 973 return 0; 974 } 975 976 static int 977 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 978 { 979 uint16_t port_id; 980 981 if (tx_queue_id >= dev->data->nb_tx_queues) { 982 port_id = dev->data->port_id; 983 RTE_ETHDEV_LOG(ERR, 984 "Invalid Tx queue_id=%u of device with port_id=%u\n", 985 tx_queue_id, port_id); 986 return -EINVAL; 987 } 988 989 if (dev->data->tx_queues[tx_queue_id] == NULL) { 990 port_id = dev->data->port_id; 991 RTE_ETHDEV_LOG(ERR, 992 "Queue %u of device with port_id=%u has not been setup\n", 993 tx_queue_id, port_id); 994 return -EINVAL; 995 } 996 997 return 0; 998 } 999 1000 int 1001 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 1002 { 1003 struct rte_eth_dev *dev; 1004 int ret; 1005 1006 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1007 dev = &rte_eth_devices[port_id]; 1008 1009 if (!dev->data->dev_started) { 1010 RTE_ETHDEV_LOG(ERR, 1011 "Port %u must be started before start any queue\n", 1012 port_id); 1013 return -EINVAL; 1014 } 1015 1016 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1017 if (ret != 0) 1018 return ret; 1019 1020 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 1021 1022 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1023 RTE_ETHDEV_LOG(INFO, 1024 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1025 rx_queue_id, port_id); 1026 return -EINVAL; 1027 } 1028 1029 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1030 RTE_ETHDEV_LOG(INFO, 1031 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1032 rx_queue_id, port_id); 1033 return 0; 1034 } 1035 1036 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 1037 } 1038 1039 int 1040 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 1041 { 1042 struct rte_eth_dev *dev; 1043 int ret; 1044 1045 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1046 dev = &rte_eth_devices[port_id]; 1047 1048 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1049 if (ret != 0) 1050 return ret; 1051 1052 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 1053 1054 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1055 RTE_ETHDEV_LOG(INFO, 1056 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1057 rx_queue_id, port_id); 1058 return -EINVAL; 1059 } 1060 1061 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1062 RTE_ETHDEV_LOG(INFO, 1063 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1064 rx_queue_id, port_id); 1065 return 0; 1066 } 1067 1068 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 1069 } 1070 1071 int 1072 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 1073 { 1074 struct rte_eth_dev *dev; 1075 int ret; 1076 1077 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1078 dev = &rte_eth_devices[port_id]; 1079 1080 if (!dev->data->dev_started) { 1081 RTE_ETHDEV_LOG(ERR, 1082 "Port %u must be started before start any queue\n", 1083 port_id); 1084 return -EINVAL; 1085 } 1086 1087 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1088 if (ret != 0) 1089 return ret; 1090 1091 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 1092 1093 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1094 RTE_ETHDEV_LOG(INFO, 1095 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1096 tx_queue_id, port_id); 1097 return -EINVAL; 1098 } 1099 1100 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1101 RTE_ETHDEV_LOG(INFO, 1102 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1103 tx_queue_id, port_id); 1104 return 0; 1105 } 1106 1107 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 1108 } 1109 1110 int 1111 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 1112 { 1113 struct rte_eth_dev *dev; 1114 int ret; 1115 1116 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1117 dev = &rte_eth_devices[port_id]; 1118 1119 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1120 if (ret != 0) 1121 return ret; 1122 1123 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 1124 1125 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1126 RTE_ETHDEV_LOG(INFO, 1127 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1128 tx_queue_id, port_id); 1129 return -EINVAL; 1130 } 1131 1132 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1133 RTE_ETHDEV_LOG(INFO, 1134 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1135 tx_queue_id, port_id); 1136 return 0; 1137 } 1138 1139 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 1140 } 1141 1142 static int 1143 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 1144 { 1145 uint16_t old_nb_queues = dev->data->nb_tx_queues; 1146 unsigned i; 1147 1148 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ 1149 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", 1150 sizeof(dev->data->tx_queues[0]) * 1151 RTE_MAX_QUEUES_PER_PORT, 1152 RTE_CACHE_LINE_SIZE); 1153 if (dev->data->tx_queues == NULL) { 1154 dev->data->nb_tx_queues = 0; 1155 return -(ENOMEM); 1156 } 1157 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ 1158 for (i = nb_queues; i < old_nb_queues; i++) 1159 eth_dev_txq_release(dev, i); 1160 1161 } else if (dev->data->tx_queues != NULL && nb_queues == 0) { 1162 for (i = nb_queues; i < old_nb_queues; i++) 1163 eth_dev_txq_release(dev, i); 1164 1165 rte_free(dev->data->tx_queues); 1166 dev->data->tx_queues = NULL; 1167 } 1168 dev->data->nb_tx_queues = nb_queues; 1169 return 0; 1170 } 1171 1172 uint32_t 1173 rte_eth_speed_bitflag(uint32_t speed, int duplex) 1174 { 1175 switch (speed) { 1176 case ETH_SPEED_NUM_10M: 1177 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD; 1178 case ETH_SPEED_NUM_100M: 1179 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD; 1180 case ETH_SPEED_NUM_1G: 1181 return ETH_LINK_SPEED_1G; 1182 case ETH_SPEED_NUM_2_5G: 1183 return ETH_LINK_SPEED_2_5G; 1184 case ETH_SPEED_NUM_5G: 1185 return ETH_LINK_SPEED_5G; 1186 case ETH_SPEED_NUM_10G: 1187 return ETH_LINK_SPEED_10G; 1188 case ETH_SPEED_NUM_20G: 1189 return ETH_LINK_SPEED_20G; 1190 case ETH_SPEED_NUM_25G: 1191 return ETH_LINK_SPEED_25G; 1192 case ETH_SPEED_NUM_40G: 1193 return ETH_LINK_SPEED_40G; 1194 case ETH_SPEED_NUM_50G: 1195 return ETH_LINK_SPEED_50G; 1196 case ETH_SPEED_NUM_56G: 1197 return ETH_LINK_SPEED_56G; 1198 case ETH_SPEED_NUM_100G: 1199 return ETH_LINK_SPEED_100G; 1200 case ETH_SPEED_NUM_200G: 1201 return ETH_LINK_SPEED_200G; 1202 default: 1203 return 0; 1204 } 1205 } 1206 1207 const char * 1208 rte_eth_dev_rx_offload_name(uint64_t offload) 1209 { 1210 const char *name = "UNKNOWN"; 1211 unsigned int i; 1212 1213 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1214 if (offload == eth_dev_rx_offload_names[i].offload) { 1215 name = eth_dev_rx_offload_names[i].name; 1216 break; 1217 } 1218 } 1219 1220 return name; 1221 } 1222 1223 const char * 1224 rte_eth_dev_tx_offload_name(uint64_t offload) 1225 { 1226 const char *name = "UNKNOWN"; 1227 unsigned int i; 1228 1229 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1230 if (offload == eth_dev_tx_offload_names[i].offload) { 1231 name = eth_dev_tx_offload_names[i].name; 1232 break; 1233 } 1234 } 1235 1236 return name; 1237 } 1238 1239 static inline int 1240 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1241 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1242 { 1243 int ret = 0; 1244 1245 if (dev_info_size == 0) { 1246 if (config_size != max_rx_pkt_len) { 1247 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1248 " %u != %u is not allowed\n", 1249 port_id, config_size, max_rx_pkt_len); 1250 ret = -EINVAL; 1251 } 1252 } else if (config_size > dev_info_size) { 1253 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1254 "> max allowed value %u\n", port_id, config_size, 1255 dev_info_size); 1256 ret = -EINVAL; 1257 } else if (config_size < RTE_ETHER_MIN_LEN) { 1258 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1259 "< min allowed value %u\n", port_id, config_size, 1260 (unsigned int)RTE_ETHER_MIN_LEN); 1261 ret = -EINVAL; 1262 } 1263 return ret; 1264 } 1265 1266 /* 1267 * Validate offloads that are requested through rte_eth_dev_configure against 1268 * the offloads successfully set by the ethernet device. 1269 * 1270 * @param port_id 1271 * The port identifier of the Ethernet device. 1272 * @param req_offloads 1273 * The offloads that have been requested through `rte_eth_dev_configure`. 1274 * @param set_offloads 1275 * The offloads successfully set by the ethernet device. 1276 * @param offload_type 1277 * The offload type i.e. Rx/Tx string. 1278 * @param offload_name 1279 * The function that prints the offload name. 1280 * @return 1281 * - (0) if validation successful. 1282 * - (-EINVAL) if requested offload has been silently disabled. 1283 * 1284 */ 1285 static int 1286 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1287 uint64_t set_offloads, const char *offload_type, 1288 const char *(*offload_name)(uint64_t)) 1289 { 1290 uint64_t offloads_diff = req_offloads ^ set_offloads; 1291 uint64_t offload; 1292 int ret = 0; 1293 1294 while (offloads_diff != 0) { 1295 /* Check if any offload is requested but not enabled. */ 1296 offload = 1ULL << __builtin_ctzll(offloads_diff); 1297 if (offload & req_offloads) { 1298 RTE_ETHDEV_LOG(ERR, 1299 "Port %u failed to enable %s offload %s\n", 1300 port_id, offload_type, offload_name(offload)); 1301 ret = -EINVAL; 1302 } 1303 1304 /* Check if offload couldn't be disabled. */ 1305 if (offload & set_offloads) { 1306 RTE_ETHDEV_LOG(DEBUG, 1307 "Port %u %s offload %s is not requested but enabled\n", 1308 port_id, offload_type, offload_name(offload)); 1309 } 1310 1311 offloads_diff &= ~offload; 1312 } 1313 1314 return ret; 1315 } 1316 1317 static uint32_t 1318 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1319 { 1320 uint32_t overhead_len; 1321 1322 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1323 overhead_len = max_rx_pktlen - max_mtu; 1324 else 1325 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1326 1327 return overhead_len; 1328 } 1329 1330 int 1331 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1332 const struct rte_eth_conf *dev_conf) 1333 { 1334 struct rte_eth_dev *dev; 1335 struct rte_eth_dev_info dev_info; 1336 struct rte_eth_conf orig_conf; 1337 uint32_t max_rx_pktlen; 1338 uint32_t overhead_len; 1339 int diag; 1340 int ret; 1341 uint16_t old_mtu; 1342 1343 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1344 dev = &rte_eth_devices[port_id]; 1345 1346 if (dev_conf == NULL) { 1347 RTE_ETHDEV_LOG(ERR, 1348 "Cannot configure ethdev port %u from NULL config\n", 1349 port_id); 1350 return -EINVAL; 1351 } 1352 1353 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1354 1355 if (dev->data->dev_started) { 1356 RTE_ETHDEV_LOG(ERR, 1357 "Port %u must be stopped to allow configuration\n", 1358 port_id); 1359 return -EBUSY; 1360 } 1361 1362 /* 1363 * Ensure that "dev_configured" is always 0 each time prepare to do 1364 * dev_configure() to avoid any non-anticipated behaviour. 1365 * And set to 1 when dev_configure() is executed successfully. 1366 */ 1367 dev->data->dev_configured = 0; 1368 1369 /* Store original config, as rollback required on failure */ 1370 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1371 1372 /* 1373 * Copy the dev_conf parameter into the dev structure. 1374 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1375 */ 1376 if (dev_conf != &dev->data->dev_conf) 1377 memcpy(&dev->data->dev_conf, dev_conf, 1378 sizeof(dev->data->dev_conf)); 1379 1380 /* Backup mtu for rollback */ 1381 old_mtu = dev->data->mtu; 1382 1383 ret = rte_eth_dev_info_get(port_id, &dev_info); 1384 if (ret != 0) 1385 goto rollback; 1386 1387 /* Get the real Ethernet overhead length */ 1388 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1389 dev_info.max_mtu); 1390 1391 /* If number of queues specified by application for both Rx and Tx is 1392 * zero, use driver preferred values. This cannot be done individually 1393 * as it is valid for either Tx or Rx (but not both) to be zero. 1394 * If driver does not provide any preferred valued, fall back on 1395 * EAL defaults. 1396 */ 1397 if (nb_rx_q == 0 && nb_tx_q == 0) { 1398 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1399 if (nb_rx_q == 0) 1400 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1401 nb_tx_q = dev_info.default_txportconf.nb_queues; 1402 if (nb_tx_q == 0) 1403 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1404 } 1405 1406 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1407 RTE_ETHDEV_LOG(ERR, 1408 "Number of RX queues requested (%u) is greater than max supported(%d)\n", 1409 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1410 ret = -EINVAL; 1411 goto rollback; 1412 } 1413 1414 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1415 RTE_ETHDEV_LOG(ERR, 1416 "Number of TX queues requested (%u) is greater than max supported(%d)\n", 1417 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1418 ret = -EINVAL; 1419 goto rollback; 1420 } 1421 1422 /* 1423 * Check that the numbers of RX and TX queues are not greater 1424 * than the maximum number of RX and TX queues supported by the 1425 * configured device. 1426 */ 1427 if (nb_rx_q > dev_info.max_rx_queues) { 1428 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1429 port_id, nb_rx_q, dev_info.max_rx_queues); 1430 ret = -EINVAL; 1431 goto rollback; 1432 } 1433 1434 if (nb_tx_q > dev_info.max_tx_queues) { 1435 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1436 port_id, nb_tx_q, dev_info.max_tx_queues); 1437 ret = -EINVAL; 1438 goto rollback; 1439 } 1440 1441 /* Check that the device supports requested interrupts */ 1442 if ((dev_conf->intr_conf.lsc == 1) && 1443 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1444 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1445 dev->device->driver->name); 1446 ret = -EINVAL; 1447 goto rollback; 1448 } 1449 if ((dev_conf->intr_conf.rmv == 1) && 1450 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1451 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1452 dev->device->driver->name); 1453 ret = -EINVAL; 1454 goto rollback; 1455 } 1456 1457 /* 1458 * Check that the maximum RX packet length is supported by the 1459 * configured device. 1460 */ 1461 if (dev_conf->rxmode.mtu == 0) 1462 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1463 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1464 if (max_rx_pktlen > dev_info.max_rx_pktlen) { 1465 RTE_ETHDEV_LOG(ERR, 1466 "Ethdev port_id=%u max_rx_pktlen %u > max valid value %u\n", 1467 port_id, max_rx_pktlen, dev_info.max_rx_pktlen); 1468 ret = -EINVAL; 1469 goto rollback; 1470 } else if (max_rx_pktlen < RTE_ETHER_MIN_LEN) { 1471 RTE_ETHDEV_LOG(ERR, 1472 "Ethdev port_id=%u max_rx_pktlen %u < min valid value %u\n", 1473 port_id, max_rx_pktlen, RTE_ETHER_MIN_LEN); 1474 ret = -EINVAL; 1475 goto rollback; 1476 } 1477 1478 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1479 1480 /* 1481 * If LRO is enabled, check that the maximum aggregated packet 1482 * size is supported by the configured device. 1483 */ 1484 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 1485 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1486 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1487 ret = eth_dev_check_lro_pkt_size(port_id, 1488 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1489 max_rx_pktlen, 1490 dev_info.max_lro_pkt_size); 1491 if (ret != 0) 1492 goto rollback; 1493 } 1494 1495 /* Any requested offloading must be within its device capabilities */ 1496 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1497 dev_conf->rxmode.offloads) { 1498 RTE_ETHDEV_LOG(ERR, 1499 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1500 "capabilities 0x%"PRIx64" in %s()\n", 1501 port_id, dev_conf->rxmode.offloads, 1502 dev_info.rx_offload_capa, 1503 __func__); 1504 ret = -EINVAL; 1505 goto rollback; 1506 } 1507 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1508 dev_conf->txmode.offloads) { 1509 RTE_ETHDEV_LOG(ERR, 1510 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1511 "capabilities 0x%"PRIx64" in %s()\n", 1512 port_id, dev_conf->txmode.offloads, 1513 dev_info.tx_offload_capa, 1514 __func__); 1515 ret = -EINVAL; 1516 goto rollback; 1517 } 1518 1519 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1520 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1521 1522 /* Check that device supports requested rss hash functions. */ 1523 if ((dev_info.flow_type_rss_offloads | 1524 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1525 dev_info.flow_type_rss_offloads) { 1526 RTE_ETHDEV_LOG(ERR, 1527 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1528 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1529 dev_info.flow_type_rss_offloads); 1530 ret = -EINVAL; 1531 goto rollback; 1532 } 1533 1534 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1535 if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) && 1536 (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) { 1537 RTE_ETHDEV_LOG(ERR, 1538 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1539 port_id, 1540 rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH)); 1541 ret = -EINVAL; 1542 goto rollback; 1543 } 1544 1545 /* 1546 * Setup new number of RX/TX queues and reconfigure device. 1547 */ 1548 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1549 if (diag != 0) { 1550 RTE_ETHDEV_LOG(ERR, 1551 "Port%u eth_dev_rx_queue_config = %d\n", 1552 port_id, diag); 1553 ret = diag; 1554 goto rollback; 1555 } 1556 1557 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1558 if (diag != 0) { 1559 RTE_ETHDEV_LOG(ERR, 1560 "Port%u eth_dev_tx_queue_config = %d\n", 1561 port_id, diag); 1562 eth_dev_rx_queue_config(dev, 0); 1563 ret = diag; 1564 goto rollback; 1565 } 1566 1567 diag = (*dev->dev_ops->dev_configure)(dev); 1568 if (diag != 0) { 1569 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1570 port_id, diag); 1571 ret = eth_err(port_id, diag); 1572 goto reset_queues; 1573 } 1574 1575 /* Initialize Rx profiling if enabled at compilation time. */ 1576 diag = __rte_eth_dev_profile_init(port_id, dev); 1577 if (diag != 0) { 1578 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1579 port_id, diag); 1580 ret = eth_err(port_id, diag); 1581 goto reset_queues; 1582 } 1583 1584 /* Validate Rx offloads. */ 1585 diag = eth_dev_validate_offloads(port_id, 1586 dev_conf->rxmode.offloads, 1587 dev->data->dev_conf.rxmode.offloads, "Rx", 1588 rte_eth_dev_rx_offload_name); 1589 if (diag != 0) { 1590 ret = diag; 1591 goto reset_queues; 1592 } 1593 1594 /* Validate Tx offloads. */ 1595 diag = eth_dev_validate_offloads(port_id, 1596 dev_conf->txmode.offloads, 1597 dev->data->dev_conf.txmode.offloads, "Tx", 1598 rte_eth_dev_tx_offload_name); 1599 if (diag != 0) { 1600 ret = diag; 1601 goto reset_queues; 1602 } 1603 1604 dev->data->dev_configured = 1; 1605 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1606 return 0; 1607 reset_queues: 1608 eth_dev_rx_queue_config(dev, 0); 1609 eth_dev_tx_queue_config(dev, 0); 1610 rollback: 1611 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1612 if (old_mtu != dev->data->mtu) 1613 dev->data->mtu = old_mtu; 1614 1615 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1616 return ret; 1617 } 1618 1619 void 1620 rte_eth_dev_internal_reset(struct rte_eth_dev *dev) 1621 { 1622 if (dev->data->dev_started) { 1623 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n", 1624 dev->data->port_id); 1625 return; 1626 } 1627 1628 eth_dev_rx_queue_config(dev, 0); 1629 eth_dev_tx_queue_config(dev, 0); 1630 1631 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); 1632 } 1633 1634 static void 1635 eth_dev_mac_restore(struct rte_eth_dev *dev, 1636 struct rte_eth_dev_info *dev_info) 1637 { 1638 struct rte_ether_addr *addr; 1639 uint16_t i; 1640 uint32_t pool = 0; 1641 uint64_t pool_mask; 1642 1643 /* replay MAC address configuration including default MAC */ 1644 addr = &dev->data->mac_addrs[0]; 1645 if (*dev->dev_ops->mac_addr_set != NULL) 1646 (*dev->dev_ops->mac_addr_set)(dev, addr); 1647 else if (*dev->dev_ops->mac_addr_add != NULL) 1648 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1649 1650 if (*dev->dev_ops->mac_addr_add != NULL) { 1651 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1652 addr = &dev->data->mac_addrs[i]; 1653 1654 /* skip zero address */ 1655 if (rte_is_zero_ether_addr(addr)) 1656 continue; 1657 1658 pool = 0; 1659 pool_mask = dev->data->mac_pool_sel[i]; 1660 1661 do { 1662 if (pool_mask & 1ULL) 1663 (*dev->dev_ops->mac_addr_add)(dev, 1664 addr, i, pool); 1665 pool_mask >>= 1; 1666 pool++; 1667 } while (pool_mask); 1668 } 1669 } 1670 } 1671 1672 static int 1673 eth_dev_config_restore(struct rte_eth_dev *dev, 1674 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1675 { 1676 int ret; 1677 1678 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1679 eth_dev_mac_restore(dev, dev_info); 1680 1681 /* replay promiscuous configuration */ 1682 /* 1683 * use callbacks directly since we don't need port_id check and 1684 * would like to bypass the same value set 1685 */ 1686 if (rte_eth_promiscuous_get(port_id) == 1 && 1687 *dev->dev_ops->promiscuous_enable != NULL) { 1688 ret = eth_err(port_id, 1689 (*dev->dev_ops->promiscuous_enable)(dev)); 1690 if (ret != 0 && ret != -ENOTSUP) { 1691 RTE_ETHDEV_LOG(ERR, 1692 "Failed to enable promiscuous mode for device (port %u): %s\n", 1693 port_id, rte_strerror(-ret)); 1694 return ret; 1695 } 1696 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1697 *dev->dev_ops->promiscuous_disable != NULL) { 1698 ret = eth_err(port_id, 1699 (*dev->dev_ops->promiscuous_disable)(dev)); 1700 if (ret != 0 && ret != -ENOTSUP) { 1701 RTE_ETHDEV_LOG(ERR, 1702 "Failed to disable promiscuous mode for device (port %u): %s\n", 1703 port_id, rte_strerror(-ret)); 1704 return ret; 1705 } 1706 } 1707 1708 /* replay all multicast configuration */ 1709 /* 1710 * use callbacks directly since we don't need port_id check and 1711 * would like to bypass the same value set 1712 */ 1713 if (rte_eth_allmulticast_get(port_id) == 1 && 1714 *dev->dev_ops->allmulticast_enable != NULL) { 1715 ret = eth_err(port_id, 1716 (*dev->dev_ops->allmulticast_enable)(dev)); 1717 if (ret != 0 && ret != -ENOTSUP) { 1718 RTE_ETHDEV_LOG(ERR, 1719 "Failed to enable allmulticast mode for device (port %u): %s\n", 1720 port_id, rte_strerror(-ret)); 1721 return ret; 1722 } 1723 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1724 *dev->dev_ops->allmulticast_disable != NULL) { 1725 ret = eth_err(port_id, 1726 (*dev->dev_ops->allmulticast_disable)(dev)); 1727 if (ret != 0 && ret != -ENOTSUP) { 1728 RTE_ETHDEV_LOG(ERR, 1729 "Failed to disable allmulticast mode for device (port %u): %s\n", 1730 port_id, rte_strerror(-ret)); 1731 return ret; 1732 } 1733 } 1734 1735 return 0; 1736 } 1737 1738 int 1739 rte_eth_dev_start(uint16_t port_id) 1740 { 1741 struct rte_eth_dev *dev; 1742 struct rte_eth_dev_info dev_info; 1743 int diag; 1744 int ret, ret_stop; 1745 1746 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1747 dev = &rte_eth_devices[port_id]; 1748 1749 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1750 1751 if (dev->data->dev_configured == 0) { 1752 RTE_ETHDEV_LOG(INFO, 1753 "Device with port_id=%"PRIu16" is not configured.\n", 1754 port_id); 1755 return -EINVAL; 1756 } 1757 1758 if (dev->data->dev_started != 0) { 1759 RTE_ETHDEV_LOG(INFO, 1760 "Device with port_id=%"PRIu16" already started\n", 1761 port_id); 1762 return 0; 1763 } 1764 1765 ret = rte_eth_dev_info_get(port_id, &dev_info); 1766 if (ret != 0) 1767 return ret; 1768 1769 /* Lets restore MAC now if device does not support live change */ 1770 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1771 eth_dev_mac_restore(dev, &dev_info); 1772 1773 diag = (*dev->dev_ops->dev_start)(dev); 1774 if (diag == 0) 1775 dev->data->dev_started = 1; 1776 else 1777 return eth_err(port_id, diag); 1778 1779 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1780 if (ret != 0) { 1781 RTE_ETHDEV_LOG(ERR, 1782 "Error during restoring configuration for device (port %u): %s\n", 1783 port_id, rte_strerror(-ret)); 1784 ret_stop = rte_eth_dev_stop(port_id); 1785 if (ret_stop != 0) { 1786 RTE_ETHDEV_LOG(ERR, 1787 "Failed to stop device (port %u): %s\n", 1788 port_id, rte_strerror(-ret_stop)); 1789 } 1790 1791 return ret; 1792 } 1793 1794 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1795 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1796 (*dev->dev_ops->link_update)(dev, 0); 1797 } 1798 1799 /* expose selection of PMD fast-path functions */ 1800 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1801 1802 rte_ethdev_trace_start(port_id); 1803 return 0; 1804 } 1805 1806 int 1807 rte_eth_dev_stop(uint16_t port_id) 1808 { 1809 struct rte_eth_dev *dev; 1810 int ret; 1811 1812 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1813 dev = &rte_eth_devices[port_id]; 1814 1815 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP); 1816 1817 if (dev->data->dev_started == 0) { 1818 RTE_ETHDEV_LOG(INFO, 1819 "Device with port_id=%"PRIu16" already stopped\n", 1820 port_id); 1821 return 0; 1822 } 1823 1824 /* point fast-path functions to dummy ones */ 1825 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1826 1827 dev->data->dev_started = 0; 1828 ret = (*dev->dev_ops->dev_stop)(dev); 1829 rte_ethdev_trace_stop(port_id, ret); 1830 1831 return ret; 1832 } 1833 1834 int 1835 rte_eth_dev_set_link_up(uint16_t port_id) 1836 { 1837 struct rte_eth_dev *dev; 1838 1839 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1840 dev = &rte_eth_devices[port_id]; 1841 1842 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1843 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1844 } 1845 1846 int 1847 rte_eth_dev_set_link_down(uint16_t port_id) 1848 { 1849 struct rte_eth_dev *dev; 1850 1851 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1852 dev = &rte_eth_devices[port_id]; 1853 1854 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1855 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1856 } 1857 1858 int 1859 rte_eth_dev_close(uint16_t port_id) 1860 { 1861 struct rte_eth_dev *dev; 1862 int firsterr, binerr; 1863 int *lasterr = &firsterr; 1864 1865 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1866 dev = &rte_eth_devices[port_id]; 1867 1868 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1869 *lasterr = (*dev->dev_ops->dev_close)(dev); 1870 if (*lasterr != 0) 1871 lasterr = &binerr; 1872 1873 rte_ethdev_trace_close(port_id); 1874 *lasterr = rte_eth_dev_release_port(dev); 1875 1876 return firsterr; 1877 } 1878 1879 int 1880 rte_eth_dev_reset(uint16_t port_id) 1881 { 1882 struct rte_eth_dev *dev; 1883 int ret; 1884 1885 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1886 dev = &rte_eth_devices[port_id]; 1887 1888 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1889 1890 ret = rte_eth_dev_stop(port_id); 1891 if (ret != 0) { 1892 RTE_ETHDEV_LOG(ERR, 1893 "Failed to stop device (port %u) before reset: %s - ignore\n", 1894 port_id, rte_strerror(-ret)); 1895 } 1896 ret = dev->dev_ops->dev_reset(dev); 1897 1898 return eth_err(port_id, ret); 1899 } 1900 1901 int 1902 rte_eth_dev_is_removed(uint16_t port_id) 1903 { 1904 struct rte_eth_dev *dev; 1905 int ret; 1906 1907 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1908 dev = &rte_eth_devices[port_id]; 1909 1910 if (dev->state == RTE_ETH_DEV_REMOVED) 1911 return 1; 1912 1913 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1914 1915 ret = dev->dev_ops->is_removed(dev); 1916 if (ret != 0) 1917 /* Device is physically removed. */ 1918 dev->state = RTE_ETH_DEV_REMOVED; 1919 1920 return ret; 1921 } 1922 1923 static int 1924 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, 1925 uint16_t n_seg, uint32_t *mbp_buf_size, 1926 const struct rte_eth_dev_info *dev_info) 1927 { 1928 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1929 struct rte_mempool *mp_first; 1930 uint32_t offset_mask; 1931 uint16_t seg_idx; 1932 1933 if (n_seg > seg_capa->max_nseg) { 1934 RTE_ETHDEV_LOG(ERR, 1935 "Requested Rx segments %u exceed supported %u\n", 1936 n_seg, seg_capa->max_nseg); 1937 return -EINVAL; 1938 } 1939 /* 1940 * Check the sizes and offsets against buffer sizes 1941 * for each segment specified in extended configuration. 1942 */ 1943 mp_first = rx_seg[0].mp; 1944 offset_mask = (1u << seg_capa->offset_align_log2) - 1; 1945 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1946 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1947 uint32_t length = rx_seg[seg_idx].length; 1948 uint32_t offset = rx_seg[seg_idx].offset; 1949 1950 if (mpl == NULL) { 1951 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1952 return -EINVAL; 1953 } 1954 if (seg_idx != 0 && mp_first != mpl && 1955 seg_capa->multi_pools == 0) { 1956 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1957 return -ENOTSUP; 1958 } 1959 if (offset != 0) { 1960 if (seg_capa->offset_allowed == 0) { 1961 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1962 return -ENOTSUP; 1963 } 1964 if (offset & offset_mask) { 1965 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 1966 offset, 1967 seg_capa->offset_align_log2); 1968 return -EINVAL; 1969 } 1970 } 1971 if (mpl->private_data_size < 1972 sizeof(struct rte_pktmbuf_pool_private)) { 1973 RTE_ETHDEV_LOG(ERR, 1974 "%s private_data_size %u < %u\n", 1975 mpl->name, mpl->private_data_size, 1976 (unsigned int)sizeof 1977 (struct rte_pktmbuf_pool_private)); 1978 return -ENOSPC; 1979 } 1980 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 1981 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 1982 length = length != 0 ? length : *mbp_buf_size; 1983 if (*mbp_buf_size < length + offset) { 1984 RTE_ETHDEV_LOG(ERR, 1985 "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n", 1986 mpl->name, *mbp_buf_size, 1987 length + offset, length, offset); 1988 return -EINVAL; 1989 } 1990 } 1991 return 0; 1992 } 1993 1994 int 1995 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1996 uint16_t nb_rx_desc, unsigned int socket_id, 1997 const struct rte_eth_rxconf *rx_conf, 1998 struct rte_mempool *mp) 1999 { 2000 int ret; 2001 uint32_t mbp_buf_size; 2002 struct rte_eth_dev *dev; 2003 struct rte_eth_dev_info dev_info; 2004 struct rte_eth_rxconf local_conf; 2005 2006 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2007 dev = &rte_eth_devices[port_id]; 2008 2009 if (rx_queue_id >= dev->data->nb_rx_queues) { 2010 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 2011 return -EINVAL; 2012 } 2013 2014 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 2015 2016 ret = rte_eth_dev_info_get(port_id, &dev_info); 2017 if (ret != 0) 2018 return ret; 2019 2020 if (mp != NULL) { 2021 /* Single pool configuration check. */ 2022 if (rx_conf != NULL && rx_conf->rx_nseg != 0) { 2023 RTE_ETHDEV_LOG(ERR, 2024 "Ambiguous segment configuration\n"); 2025 return -EINVAL; 2026 } 2027 /* 2028 * Check the size of the mbuf data buffer, this value 2029 * must be provided in the private data of the memory pool. 2030 * First check that the memory pool(s) has a valid private data. 2031 */ 2032 if (mp->private_data_size < 2033 sizeof(struct rte_pktmbuf_pool_private)) { 2034 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 2035 mp->name, mp->private_data_size, 2036 (unsigned int) 2037 sizeof(struct rte_pktmbuf_pool_private)); 2038 return -ENOSPC; 2039 } 2040 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2041 if (mbp_buf_size < dev_info.min_rx_bufsize + 2042 RTE_PKTMBUF_HEADROOM) { 2043 RTE_ETHDEV_LOG(ERR, 2044 "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", 2045 mp->name, mbp_buf_size, 2046 RTE_PKTMBUF_HEADROOM + 2047 dev_info.min_rx_bufsize, 2048 RTE_PKTMBUF_HEADROOM, 2049 dev_info.min_rx_bufsize); 2050 return -EINVAL; 2051 } 2052 } else { 2053 const struct rte_eth_rxseg_split *rx_seg; 2054 uint16_t n_seg; 2055 2056 /* Extended multi-segment configuration check. */ 2057 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { 2058 RTE_ETHDEV_LOG(ERR, 2059 "Memory pool is null and no extended configuration provided\n"); 2060 return -EINVAL; 2061 } 2062 2063 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2064 n_seg = rx_conf->rx_nseg; 2065 2066 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2067 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, 2068 &mbp_buf_size, 2069 &dev_info); 2070 if (ret != 0) 2071 return ret; 2072 } else { 2073 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2074 return -EINVAL; 2075 } 2076 } 2077 2078 /* Use default specified by driver, if nb_rx_desc is zero */ 2079 if (nb_rx_desc == 0) { 2080 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2081 /* If driver default is also zero, fall back on EAL default */ 2082 if (nb_rx_desc == 0) 2083 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2084 } 2085 2086 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2087 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2088 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2089 2090 RTE_ETHDEV_LOG(ERR, 2091 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2092 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2093 dev_info.rx_desc_lim.nb_min, 2094 dev_info.rx_desc_lim.nb_align); 2095 return -EINVAL; 2096 } 2097 2098 if (dev->data->dev_started && 2099 !(dev_info.dev_capa & 2100 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2101 return -EBUSY; 2102 2103 if (dev->data->dev_started && 2104 (dev->data->rx_queue_state[rx_queue_id] != 2105 RTE_ETH_QUEUE_STATE_STOPPED)) 2106 return -EBUSY; 2107 2108 eth_dev_rxq_release(dev, rx_queue_id); 2109 2110 if (rx_conf == NULL) 2111 rx_conf = &dev_info.default_rxconf; 2112 2113 local_conf = *rx_conf; 2114 2115 /* 2116 * If an offloading has already been enabled in 2117 * rte_eth_dev_configure(), it has been enabled on all queues, 2118 * so there is no need to enable it in this queue again. 2119 * The local_conf.offloads input to underlying PMD only carries 2120 * those offloadings which are only enabled on this queue and 2121 * not enabled on all queues. 2122 */ 2123 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2124 2125 /* 2126 * New added offloadings for this queue are those not enabled in 2127 * rte_eth_dev_configure() and they must be per-queue type. 2128 * A pure per-port offloading can't be enabled on a queue while 2129 * disabled on another queue. A pure per-port offloading can't 2130 * be enabled for any queue as new added one if it hasn't been 2131 * enabled in rte_eth_dev_configure(). 2132 */ 2133 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2134 local_conf.offloads) { 2135 RTE_ETHDEV_LOG(ERR, 2136 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2137 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2138 port_id, rx_queue_id, local_conf.offloads, 2139 dev_info.rx_queue_offload_capa, 2140 __func__); 2141 return -EINVAL; 2142 } 2143 2144 /* 2145 * If LRO is enabled, check that the maximum aggregated packet 2146 * size is supported by the configured device. 2147 */ 2148 /* Get the real Ethernet overhead length */ 2149 if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 2150 uint32_t overhead_len; 2151 uint32_t max_rx_pktlen; 2152 int ret; 2153 2154 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2155 dev_info.max_mtu); 2156 max_rx_pktlen = dev->data->mtu + overhead_len; 2157 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2158 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2159 ret = eth_dev_check_lro_pkt_size(port_id, 2160 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2161 max_rx_pktlen, 2162 dev_info.max_lro_pkt_size); 2163 if (ret != 0) 2164 return ret; 2165 } 2166 2167 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2168 socket_id, &local_conf, mp); 2169 if (!ret) { 2170 if (!dev->data->min_rx_buf_size || 2171 dev->data->min_rx_buf_size > mbp_buf_size) 2172 dev->data->min_rx_buf_size = mbp_buf_size; 2173 } 2174 2175 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2176 rx_conf, ret); 2177 return eth_err(port_id, ret); 2178 } 2179 2180 int 2181 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2182 uint16_t nb_rx_desc, 2183 const struct rte_eth_hairpin_conf *conf) 2184 { 2185 int ret; 2186 struct rte_eth_dev *dev; 2187 struct rte_eth_hairpin_cap cap; 2188 int i; 2189 int count; 2190 2191 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2192 dev = &rte_eth_devices[port_id]; 2193 2194 if (rx_queue_id >= dev->data->nb_rx_queues) { 2195 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 2196 return -EINVAL; 2197 } 2198 2199 if (conf == NULL) { 2200 RTE_ETHDEV_LOG(ERR, 2201 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2202 port_id); 2203 return -EINVAL; 2204 } 2205 2206 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2207 if (ret != 0) 2208 return ret; 2209 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, 2210 -ENOTSUP); 2211 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2212 if (nb_rx_desc == 0) 2213 nb_rx_desc = cap.max_nb_desc; 2214 if (nb_rx_desc > cap.max_nb_desc) { 2215 RTE_ETHDEV_LOG(ERR, 2216 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2217 nb_rx_desc, cap.max_nb_desc); 2218 return -EINVAL; 2219 } 2220 if (conf->peer_count > cap.max_rx_2_tx) { 2221 RTE_ETHDEV_LOG(ERR, 2222 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2223 conf->peer_count, cap.max_rx_2_tx); 2224 return -EINVAL; 2225 } 2226 if (conf->peer_count == 0) { 2227 RTE_ETHDEV_LOG(ERR, 2228 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2229 conf->peer_count); 2230 return -EINVAL; 2231 } 2232 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2233 cap.max_nb_queues != UINT16_MAX; i++) { 2234 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2235 count++; 2236 } 2237 if (count > cap.max_nb_queues) { 2238 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2239 cap.max_nb_queues); 2240 return -EINVAL; 2241 } 2242 if (dev->data->dev_started) 2243 return -EBUSY; 2244 eth_dev_rxq_release(dev, rx_queue_id); 2245 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2246 nb_rx_desc, conf); 2247 if (ret == 0) 2248 dev->data->rx_queue_state[rx_queue_id] = 2249 RTE_ETH_QUEUE_STATE_HAIRPIN; 2250 return eth_err(port_id, ret); 2251 } 2252 2253 int 2254 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2255 uint16_t nb_tx_desc, unsigned int socket_id, 2256 const struct rte_eth_txconf *tx_conf) 2257 { 2258 struct rte_eth_dev *dev; 2259 struct rte_eth_dev_info dev_info; 2260 struct rte_eth_txconf local_conf; 2261 int ret; 2262 2263 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2264 dev = &rte_eth_devices[port_id]; 2265 2266 if (tx_queue_id >= dev->data->nb_tx_queues) { 2267 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 2268 return -EINVAL; 2269 } 2270 2271 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 2272 2273 ret = rte_eth_dev_info_get(port_id, &dev_info); 2274 if (ret != 0) 2275 return ret; 2276 2277 /* Use default specified by driver, if nb_tx_desc is zero */ 2278 if (nb_tx_desc == 0) { 2279 nb_tx_desc = dev_info.default_txportconf.ring_size; 2280 /* If driver default is zero, fall back on EAL default */ 2281 if (nb_tx_desc == 0) 2282 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2283 } 2284 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2285 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2286 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2287 RTE_ETHDEV_LOG(ERR, 2288 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2289 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2290 dev_info.tx_desc_lim.nb_min, 2291 dev_info.tx_desc_lim.nb_align); 2292 return -EINVAL; 2293 } 2294 2295 if (dev->data->dev_started && 2296 !(dev_info.dev_capa & 2297 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2298 return -EBUSY; 2299 2300 if (dev->data->dev_started && 2301 (dev->data->tx_queue_state[tx_queue_id] != 2302 RTE_ETH_QUEUE_STATE_STOPPED)) 2303 return -EBUSY; 2304 2305 eth_dev_txq_release(dev, tx_queue_id); 2306 2307 if (tx_conf == NULL) 2308 tx_conf = &dev_info.default_txconf; 2309 2310 local_conf = *tx_conf; 2311 2312 /* 2313 * If an offloading has already been enabled in 2314 * rte_eth_dev_configure(), it has been enabled on all queues, 2315 * so there is no need to enable it in this queue again. 2316 * The local_conf.offloads input to underlying PMD only carries 2317 * those offloadings which are only enabled on this queue and 2318 * not enabled on all queues. 2319 */ 2320 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2321 2322 /* 2323 * New added offloadings for this queue are those not enabled in 2324 * rte_eth_dev_configure() and they must be per-queue type. 2325 * A pure per-port offloading can't be enabled on a queue while 2326 * disabled on another queue. A pure per-port offloading can't 2327 * be enabled for any queue as new added one if it hasn't been 2328 * enabled in rte_eth_dev_configure(). 2329 */ 2330 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2331 local_conf.offloads) { 2332 RTE_ETHDEV_LOG(ERR, 2333 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2334 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2335 port_id, tx_queue_id, local_conf.offloads, 2336 dev_info.tx_queue_offload_capa, 2337 __func__); 2338 return -EINVAL; 2339 } 2340 2341 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2342 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2343 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2344 } 2345 2346 int 2347 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2348 uint16_t nb_tx_desc, 2349 const struct rte_eth_hairpin_conf *conf) 2350 { 2351 struct rte_eth_dev *dev; 2352 struct rte_eth_hairpin_cap cap; 2353 int i; 2354 int count; 2355 int ret; 2356 2357 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2358 dev = &rte_eth_devices[port_id]; 2359 2360 if (tx_queue_id >= dev->data->nb_tx_queues) { 2361 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 2362 return -EINVAL; 2363 } 2364 2365 if (conf == NULL) { 2366 RTE_ETHDEV_LOG(ERR, 2367 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2368 port_id); 2369 return -EINVAL; 2370 } 2371 2372 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2373 if (ret != 0) 2374 return ret; 2375 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, 2376 -ENOTSUP); 2377 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2378 if (nb_tx_desc == 0) 2379 nb_tx_desc = cap.max_nb_desc; 2380 if (nb_tx_desc > cap.max_nb_desc) { 2381 RTE_ETHDEV_LOG(ERR, 2382 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2383 nb_tx_desc, cap.max_nb_desc); 2384 return -EINVAL; 2385 } 2386 if (conf->peer_count > cap.max_tx_2_rx) { 2387 RTE_ETHDEV_LOG(ERR, 2388 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2389 conf->peer_count, cap.max_tx_2_rx); 2390 return -EINVAL; 2391 } 2392 if (conf->peer_count == 0) { 2393 RTE_ETHDEV_LOG(ERR, 2394 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2395 conf->peer_count); 2396 return -EINVAL; 2397 } 2398 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2399 cap.max_nb_queues != UINT16_MAX; i++) { 2400 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2401 count++; 2402 } 2403 if (count > cap.max_nb_queues) { 2404 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2405 cap.max_nb_queues); 2406 return -EINVAL; 2407 } 2408 if (dev->data->dev_started) 2409 return -EBUSY; 2410 eth_dev_txq_release(dev, tx_queue_id); 2411 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2412 (dev, tx_queue_id, nb_tx_desc, conf); 2413 if (ret == 0) 2414 dev->data->tx_queue_state[tx_queue_id] = 2415 RTE_ETH_QUEUE_STATE_HAIRPIN; 2416 return eth_err(port_id, ret); 2417 } 2418 2419 int 2420 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2421 { 2422 struct rte_eth_dev *dev; 2423 int ret; 2424 2425 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2426 dev = &rte_eth_devices[tx_port]; 2427 2428 if (dev->data->dev_started == 0) { 2429 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2430 return -EBUSY; 2431 } 2432 2433 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP); 2434 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2435 if (ret != 0) 2436 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2437 " to Rx %d (%d - all ports)\n", 2438 tx_port, rx_port, RTE_MAX_ETHPORTS); 2439 2440 return ret; 2441 } 2442 2443 int 2444 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2445 { 2446 struct rte_eth_dev *dev; 2447 int ret; 2448 2449 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2450 dev = &rte_eth_devices[tx_port]; 2451 2452 if (dev->data->dev_started == 0) { 2453 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2454 return -EBUSY; 2455 } 2456 2457 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP); 2458 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2459 if (ret != 0) 2460 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2461 " from Rx %d (%d - all ports)\n", 2462 tx_port, rx_port, RTE_MAX_ETHPORTS); 2463 2464 return ret; 2465 } 2466 2467 int 2468 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2469 size_t len, uint32_t direction) 2470 { 2471 struct rte_eth_dev *dev; 2472 int ret; 2473 2474 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2475 dev = &rte_eth_devices[port_id]; 2476 2477 if (peer_ports == NULL) { 2478 RTE_ETHDEV_LOG(ERR, 2479 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2480 port_id); 2481 return -EINVAL; 2482 } 2483 2484 if (len == 0) { 2485 RTE_ETHDEV_LOG(ERR, 2486 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2487 port_id); 2488 return -EINVAL; 2489 } 2490 2491 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports, 2492 -ENOTSUP); 2493 2494 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2495 len, direction); 2496 if (ret < 0) 2497 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2498 port_id, direction ? "Rx" : "Tx"); 2499 2500 return ret; 2501 } 2502 2503 void 2504 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2505 void *userdata __rte_unused) 2506 { 2507 rte_pktmbuf_free_bulk(pkts, unsent); 2508 } 2509 2510 void 2511 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2512 void *userdata) 2513 { 2514 uint64_t *count = userdata; 2515 2516 rte_pktmbuf_free_bulk(pkts, unsent); 2517 *count += unsent; 2518 } 2519 2520 int 2521 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2522 buffer_tx_error_fn cbfn, void *userdata) 2523 { 2524 if (buffer == NULL) { 2525 RTE_ETHDEV_LOG(ERR, 2526 "Cannot set Tx buffer error callback to NULL buffer\n"); 2527 return -EINVAL; 2528 } 2529 2530 buffer->error_callback = cbfn; 2531 buffer->error_userdata = userdata; 2532 return 0; 2533 } 2534 2535 int 2536 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2537 { 2538 int ret = 0; 2539 2540 if (buffer == NULL) { 2541 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2542 return -EINVAL; 2543 } 2544 2545 buffer->size = size; 2546 if (buffer->error_callback == NULL) { 2547 ret = rte_eth_tx_buffer_set_err_callback( 2548 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2549 } 2550 2551 return ret; 2552 } 2553 2554 int 2555 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2556 { 2557 struct rte_eth_dev *dev; 2558 int ret; 2559 2560 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2561 dev = &rte_eth_devices[port_id]; 2562 2563 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 2564 2565 /* Call driver to free pending mbufs. */ 2566 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2567 free_cnt); 2568 return eth_err(port_id, ret); 2569 } 2570 2571 int 2572 rte_eth_promiscuous_enable(uint16_t port_id) 2573 { 2574 struct rte_eth_dev *dev; 2575 int diag = 0; 2576 2577 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2578 dev = &rte_eth_devices[port_id]; 2579 2580 if (dev->data->promiscuous == 1) 2581 return 0; 2582 2583 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); 2584 2585 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2586 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2587 2588 return eth_err(port_id, diag); 2589 } 2590 2591 int 2592 rte_eth_promiscuous_disable(uint16_t port_id) 2593 { 2594 struct rte_eth_dev *dev; 2595 int diag = 0; 2596 2597 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2598 dev = &rte_eth_devices[port_id]; 2599 2600 if (dev->data->promiscuous == 0) 2601 return 0; 2602 2603 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); 2604 2605 dev->data->promiscuous = 0; 2606 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2607 if (diag != 0) 2608 dev->data->promiscuous = 1; 2609 2610 return eth_err(port_id, diag); 2611 } 2612 2613 int 2614 rte_eth_promiscuous_get(uint16_t port_id) 2615 { 2616 struct rte_eth_dev *dev; 2617 2618 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2619 dev = &rte_eth_devices[port_id]; 2620 2621 return dev->data->promiscuous; 2622 } 2623 2624 int 2625 rte_eth_allmulticast_enable(uint16_t port_id) 2626 { 2627 struct rte_eth_dev *dev; 2628 int diag; 2629 2630 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2631 dev = &rte_eth_devices[port_id]; 2632 2633 if (dev->data->all_multicast == 1) 2634 return 0; 2635 2636 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); 2637 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2638 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2639 2640 return eth_err(port_id, diag); 2641 } 2642 2643 int 2644 rte_eth_allmulticast_disable(uint16_t port_id) 2645 { 2646 struct rte_eth_dev *dev; 2647 int diag; 2648 2649 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2650 dev = &rte_eth_devices[port_id]; 2651 2652 if (dev->data->all_multicast == 0) 2653 return 0; 2654 2655 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); 2656 dev->data->all_multicast = 0; 2657 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2658 if (diag != 0) 2659 dev->data->all_multicast = 1; 2660 2661 return eth_err(port_id, diag); 2662 } 2663 2664 int 2665 rte_eth_allmulticast_get(uint16_t port_id) 2666 { 2667 struct rte_eth_dev *dev; 2668 2669 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2670 dev = &rte_eth_devices[port_id]; 2671 2672 return dev->data->all_multicast; 2673 } 2674 2675 int 2676 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2677 { 2678 struct rte_eth_dev *dev; 2679 2680 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2681 dev = &rte_eth_devices[port_id]; 2682 2683 if (eth_link == NULL) { 2684 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2685 port_id); 2686 return -EINVAL; 2687 } 2688 2689 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2690 rte_eth_linkstatus_get(dev, eth_link); 2691 else { 2692 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2693 (*dev->dev_ops->link_update)(dev, 1); 2694 *eth_link = dev->data->dev_link; 2695 } 2696 2697 return 0; 2698 } 2699 2700 int 2701 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2702 { 2703 struct rte_eth_dev *dev; 2704 2705 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2706 dev = &rte_eth_devices[port_id]; 2707 2708 if (eth_link == NULL) { 2709 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2710 port_id); 2711 return -EINVAL; 2712 } 2713 2714 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2715 rte_eth_linkstatus_get(dev, eth_link); 2716 else { 2717 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2718 (*dev->dev_ops->link_update)(dev, 0); 2719 *eth_link = dev->data->dev_link; 2720 } 2721 2722 return 0; 2723 } 2724 2725 const char * 2726 rte_eth_link_speed_to_str(uint32_t link_speed) 2727 { 2728 switch (link_speed) { 2729 case ETH_SPEED_NUM_NONE: return "None"; 2730 case ETH_SPEED_NUM_10M: return "10 Mbps"; 2731 case ETH_SPEED_NUM_100M: return "100 Mbps"; 2732 case ETH_SPEED_NUM_1G: return "1 Gbps"; 2733 case ETH_SPEED_NUM_2_5G: return "2.5 Gbps"; 2734 case ETH_SPEED_NUM_5G: return "5 Gbps"; 2735 case ETH_SPEED_NUM_10G: return "10 Gbps"; 2736 case ETH_SPEED_NUM_20G: return "20 Gbps"; 2737 case ETH_SPEED_NUM_25G: return "25 Gbps"; 2738 case ETH_SPEED_NUM_40G: return "40 Gbps"; 2739 case ETH_SPEED_NUM_50G: return "50 Gbps"; 2740 case ETH_SPEED_NUM_56G: return "56 Gbps"; 2741 case ETH_SPEED_NUM_100G: return "100 Gbps"; 2742 case ETH_SPEED_NUM_200G: return "200 Gbps"; 2743 case ETH_SPEED_NUM_UNKNOWN: return "Unknown"; 2744 default: return "Invalid"; 2745 } 2746 } 2747 2748 int 2749 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2750 { 2751 if (str == NULL) { 2752 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2753 return -EINVAL; 2754 } 2755 2756 if (len == 0) { 2757 RTE_ETHDEV_LOG(ERR, 2758 "Cannot convert link to string with zero size\n"); 2759 return -EINVAL; 2760 } 2761 2762 if (eth_link == NULL) { 2763 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2764 return -EINVAL; 2765 } 2766 2767 if (eth_link->link_status == ETH_LINK_DOWN) 2768 return snprintf(str, len, "Link down"); 2769 else 2770 return snprintf(str, len, "Link up at %s %s %s", 2771 rte_eth_link_speed_to_str(eth_link->link_speed), 2772 (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 2773 "FDX" : "HDX", 2774 (eth_link->link_autoneg == ETH_LINK_AUTONEG) ? 2775 "Autoneg" : "Fixed"); 2776 } 2777 2778 int 2779 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2780 { 2781 struct rte_eth_dev *dev; 2782 2783 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2784 dev = &rte_eth_devices[port_id]; 2785 2786 if (stats == NULL) { 2787 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2788 port_id); 2789 return -EINVAL; 2790 } 2791 2792 memset(stats, 0, sizeof(*stats)); 2793 2794 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 2795 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2796 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2797 } 2798 2799 int 2800 rte_eth_stats_reset(uint16_t port_id) 2801 { 2802 struct rte_eth_dev *dev; 2803 int ret; 2804 2805 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2806 dev = &rte_eth_devices[port_id]; 2807 2808 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 2809 ret = (*dev->dev_ops->stats_reset)(dev); 2810 if (ret != 0) 2811 return eth_err(port_id, ret); 2812 2813 dev->data->rx_mbuf_alloc_failed = 0; 2814 2815 return 0; 2816 } 2817 2818 static inline int 2819 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2820 { 2821 uint16_t nb_rxqs, nb_txqs; 2822 int count; 2823 2824 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2825 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2826 2827 count = RTE_NB_STATS; 2828 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2829 count += nb_rxqs * RTE_NB_RXQ_STATS; 2830 count += nb_txqs * RTE_NB_TXQ_STATS; 2831 } 2832 2833 return count; 2834 } 2835 2836 static int 2837 eth_dev_get_xstats_count(uint16_t port_id) 2838 { 2839 struct rte_eth_dev *dev; 2840 int count; 2841 2842 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2843 dev = &rte_eth_devices[port_id]; 2844 if (dev->dev_ops->xstats_get_names != NULL) { 2845 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2846 if (count < 0) 2847 return eth_err(port_id, count); 2848 } else 2849 count = 0; 2850 2851 2852 count += eth_dev_get_xstats_basic_count(dev); 2853 2854 return count; 2855 } 2856 2857 int 2858 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2859 uint64_t *id) 2860 { 2861 int cnt_xstats, idx_xstat; 2862 2863 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2864 2865 if (xstat_name == NULL) { 2866 RTE_ETHDEV_LOG(ERR, 2867 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 2868 port_id); 2869 return -ENOMEM; 2870 } 2871 2872 if (id == NULL) { 2873 RTE_ETHDEV_LOG(ERR, 2874 "Cannot get ethdev port %u xstats ID to NULL\n", 2875 port_id); 2876 return -ENOMEM; 2877 } 2878 2879 /* Get count */ 2880 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2881 if (cnt_xstats < 0) { 2882 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2883 return -ENODEV; 2884 } 2885 2886 /* Get id-name lookup table */ 2887 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2888 2889 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2890 port_id, xstats_names, cnt_xstats, NULL)) { 2891 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2892 return -1; 2893 } 2894 2895 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2896 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2897 *id = idx_xstat; 2898 return 0; 2899 }; 2900 } 2901 2902 return -EINVAL; 2903 } 2904 2905 /* retrieve basic stats names */ 2906 static int 2907 eth_basic_stats_get_names(struct rte_eth_dev *dev, 2908 struct rte_eth_xstat_name *xstats_names) 2909 { 2910 int cnt_used_entries = 0; 2911 uint32_t idx, id_queue; 2912 uint16_t num_q; 2913 2914 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2915 strlcpy(xstats_names[cnt_used_entries].name, 2916 eth_dev_stats_strings[idx].name, 2917 sizeof(xstats_names[0].name)); 2918 cnt_used_entries++; 2919 } 2920 2921 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2922 return cnt_used_entries; 2923 2924 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2925 for (id_queue = 0; id_queue < num_q; id_queue++) { 2926 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2927 snprintf(xstats_names[cnt_used_entries].name, 2928 sizeof(xstats_names[0].name), 2929 "rx_q%u_%s", 2930 id_queue, eth_dev_rxq_stats_strings[idx].name); 2931 cnt_used_entries++; 2932 } 2933 2934 } 2935 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2936 for (id_queue = 0; id_queue < num_q; id_queue++) { 2937 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 2938 snprintf(xstats_names[cnt_used_entries].name, 2939 sizeof(xstats_names[0].name), 2940 "tx_q%u_%s", 2941 id_queue, eth_dev_txq_stats_strings[idx].name); 2942 cnt_used_entries++; 2943 } 2944 } 2945 return cnt_used_entries; 2946 } 2947 2948 /* retrieve ethdev extended statistics names */ 2949 int 2950 rte_eth_xstats_get_names_by_id(uint16_t port_id, 2951 struct rte_eth_xstat_name *xstats_names, unsigned int size, 2952 uint64_t *ids) 2953 { 2954 struct rte_eth_xstat_name *xstats_names_copy; 2955 unsigned int no_basic_stat_requested = 1; 2956 unsigned int no_ext_stat_requested = 1; 2957 unsigned int expected_entries; 2958 unsigned int basic_count; 2959 struct rte_eth_dev *dev; 2960 unsigned int i; 2961 int ret; 2962 2963 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2964 dev = &rte_eth_devices[port_id]; 2965 2966 basic_count = eth_dev_get_xstats_basic_count(dev); 2967 ret = eth_dev_get_xstats_count(port_id); 2968 if (ret < 0) 2969 return ret; 2970 expected_entries = (unsigned int)ret; 2971 2972 /* Return max number of stats if no ids given */ 2973 if (!ids) { 2974 if (!xstats_names) 2975 return expected_entries; 2976 else if (xstats_names && size < expected_entries) 2977 return expected_entries; 2978 } 2979 2980 if (ids && !xstats_names) 2981 return -EINVAL; 2982 2983 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 2984 uint64_t ids_copy[size]; 2985 2986 for (i = 0; i < size; i++) { 2987 if (ids[i] < basic_count) { 2988 no_basic_stat_requested = 0; 2989 break; 2990 } 2991 2992 /* 2993 * Convert ids to xstats ids that PMD knows. 2994 * ids known by user are basic + extended stats. 2995 */ 2996 ids_copy[i] = ids[i] - basic_count; 2997 } 2998 2999 if (no_basic_stat_requested) 3000 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3001 ids_copy, xstats_names, size); 3002 } 3003 3004 /* Retrieve all stats */ 3005 if (!ids) { 3006 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3007 expected_entries); 3008 if (num_stats < 0 || num_stats > (int)expected_entries) 3009 return num_stats; 3010 else 3011 return expected_entries; 3012 } 3013 3014 xstats_names_copy = calloc(expected_entries, 3015 sizeof(struct rte_eth_xstat_name)); 3016 3017 if (!xstats_names_copy) { 3018 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3019 return -ENOMEM; 3020 } 3021 3022 if (ids) { 3023 for (i = 0; i < size; i++) { 3024 if (ids[i] >= basic_count) { 3025 no_ext_stat_requested = 0; 3026 break; 3027 } 3028 } 3029 } 3030 3031 /* Fill xstats_names_copy structure */ 3032 if (ids && no_ext_stat_requested) { 3033 eth_basic_stats_get_names(dev, xstats_names_copy); 3034 } else { 3035 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3036 expected_entries); 3037 if (ret < 0) { 3038 free(xstats_names_copy); 3039 return ret; 3040 } 3041 } 3042 3043 /* Filter stats */ 3044 for (i = 0; i < size; i++) { 3045 if (ids[i] >= expected_entries) { 3046 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3047 free(xstats_names_copy); 3048 return -1; 3049 } 3050 xstats_names[i] = xstats_names_copy[ids[i]]; 3051 } 3052 3053 free(xstats_names_copy); 3054 return size; 3055 } 3056 3057 int 3058 rte_eth_xstats_get_names(uint16_t port_id, 3059 struct rte_eth_xstat_name *xstats_names, 3060 unsigned int size) 3061 { 3062 struct rte_eth_dev *dev; 3063 int cnt_used_entries; 3064 int cnt_expected_entries; 3065 int cnt_driver_entries; 3066 3067 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3068 if (xstats_names == NULL || cnt_expected_entries < 0 || 3069 (int)size < cnt_expected_entries) 3070 return cnt_expected_entries; 3071 3072 /* port_id checked in eth_dev_get_xstats_count() */ 3073 dev = &rte_eth_devices[port_id]; 3074 3075 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3076 3077 if (dev->dev_ops->xstats_get_names != NULL) { 3078 /* If there are any driver-specific xstats, append them 3079 * to end of list. 3080 */ 3081 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3082 dev, 3083 xstats_names + cnt_used_entries, 3084 size - cnt_used_entries); 3085 if (cnt_driver_entries < 0) 3086 return eth_err(port_id, cnt_driver_entries); 3087 cnt_used_entries += cnt_driver_entries; 3088 } 3089 3090 return cnt_used_entries; 3091 } 3092 3093 3094 static int 3095 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3096 { 3097 struct rte_eth_dev *dev; 3098 struct rte_eth_stats eth_stats; 3099 unsigned int count = 0, i, q; 3100 uint64_t val, *stats_ptr; 3101 uint16_t nb_rxqs, nb_txqs; 3102 int ret; 3103 3104 ret = rte_eth_stats_get(port_id, ð_stats); 3105 if (ret < 0) 3106 return ret; 3107 3108 dev = &rte_eth_devices[port_id]; 3109 3110 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3111 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3112 3113 /* global stats */ 3114 for (i = 0; i < RTE_NB_STATS; i++) { 3115 stats_ptr = RTE_PTR_ADD(ð_stats, 3116 eth_dev_stats_strings[i].offset); 3117 val = *stats_ptr; 3118 xstats[count++].value = val; 3119 } 3120 3121 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3122 return count; 3123 3124 /* per-rxq stats */ 3125 for (q = 0; q < nb_rxqs; q++) { 3126 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3127 stats_ptr = RTE_PTR_ADD(ð_stats, 3128 eth_dev_rxq_stats_strings[i].offset + 3129 q * sizeof(uint64_t)); 3130 val = *stats_ptr; 3131 xstats[count++].value = val; 3132 } 3133 } 3134 3135 /* per-txq stats */ 3136 for (q = 0; q < nb_txqs; q++) { 3137 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3138 stats_ptr = RTE_PTR_ADD(ð_stats, 3139 eth_dev_txq_stats_strings[i].offset + 3140 q * sizeof(uint64_t)); 3141 val = *stats_ptr; 3142 xstats[count++].value = val; 3143 } 3144 } 3145 return count; 3146 } 3147 3148 /* retrieve ethdev extended statistics */ 3149 int 3150 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3151 uint64_t *values, unsigned int size) 3152 { 3153 unsigned int no_basic_stat_requested = 1; 3154 unsigned int no_ext_stat_requested = 1; 3155 unsigned int num_xstats_filled; 3156 unsigned int basic_count; 3157 uint16_t expected_entries; 3158 struct rte_eth_dev *dev; 3159 unsigned int i; 3160 int ret; 3161 3162 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3163 dev = &rte_eth_devices[port_id]; 3164 3165 ret = eth_dev_get_xstats_count(port_id); 3166 if (ret < 0) 3167 return ret; 3168 expected_entries = (uint16_t)ret; 3169 struct rte_eth_xstat xstats[expected_entries]; 3170 basic_count = eth_dev_get_xstats_basic_count(dev); 3171 3172 /* Return max number of stats if no ids given */ 3173 if (!ids) { 3174 if (!values) 3175 return expected_entries; 3176 else if (values && size < expected_entries) 3177 return expected_entries; 3178 } 3179 3180 if (ids && !values) 3181 return -EINVAL; 3182 3183 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3184 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3185 uint64_t ids_copy[size]; 3186 3187 for (i = 0; i < size; i++) { 3188 if (ids[i] < basic_count) { 3189 no_basic_stat_requested = 0; 3190 break; 3191 } 3192 3193 /* 3194 * Convert ids to xstats ids that PMD knows. 3195 * ids known by user are basic + extended stats. 3196 */ 3197 ids_copy[i] = ids[i] - basic_count; 3198 } 3199 3200 if (no_basic_stat_requested) 3201 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3202 values, size); 3203 } 3204 3205 if (ids) { 3206 for (i = 0; i < size; i++) { 3207 if (ids[i] >= basic_count) { 3208 no_ext_stat_requested = 0; 3209 break; 3210 } 3211 } 3212 } 3213 3214 /* Fill the xstats structure */ 3215 if (ids && no_ext_stat_requested) 3216 ret = eth_basic_stats_get(port_id, xstats); 3217 else 3218 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3219 3220 if (ret < 0) 3221 return ret; 3222 num_xstats_filled = (unsigned int)ret; 3223 3224 /* Return all stats */ 3225 if (!ids) { 3226 for (i = 0; i < num_xstats_filled; i++) 3227 values[i] = xstats[i].value; 3228 return expected_entries; 3229 } 3230 3231 /* Filter stats */ 3232 for (i = 0; i < size; i++) { 3233 if (ids[i] >= expected_entries) { 3234 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3235 return -1; 3236 } 3237 values[i] = xstats[ids[i]].value; 3238 } 3239 return size; 3240 } 3241 3242 int 3243 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3244 unsigned int n) 3245 { 3246 struct rte_eth_dev *dev; 3247 unsigned int count = 0, i; 3248 signed int xcount = 0; 3249 uint16_t nb_rxqs, nb_txqs; 3250 int ret; 3251 3252 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3253 dev = &rte_eth_devices[port_id]; 3254 3255 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3256 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3257 3258 /* Return generic statistics */ 3259 count = RTE_NB_STATS; 3260 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) 3261 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS); 3262 3263 /* implemented by the driver */ 3264 if (dev->dev_ops->xstats_get != NULL) { 3265 /* Retrieve the xstats from the driver at the end of the 3266 * xstats struct. 3267 */ 3268 xcount = (*dev->dev_ops->xstats_get)(dev, 3269 xstats ? xstats + count : NULL, 3270 (n > count) ? n - count : 0); 3271 3272 if (xcount < 0) 3273 return eth_err(port_id, xcount); 3274 } 3275 3276 if (n < count + xcount || xstats == NULL) 3277 return count + xcount; 3278 3279 /* now fill the xstats structure */ 3280 ret = eth_basic_stats_get(port_id, xstats); 3281 if (ret < 0) 3282 return ret; 3283 count = ret; 3284 3285 for (i = 0; i < count; i++) 3286 xstats[i].id = i; 3287 /* add an offset to driver-specific stats */ 3288 for ( ; i < count + xcount; i++) 3289 xstats[i].id += count; 3290 3291 return count + xcount; 3292 } 3293 3294 /* reset ethdev extended statistics */ 3295 int 3296 rte_eth_xstats_reset(uint16_t port_id) 3297 { 3298 struct rte_eth_dev *dev; 3299 3300 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3301 dev = &rte_eth_devices[port_id]; 3302 3303 /* implemented by the driver */ 3304 if (dev->dev_ops->xstats_reset != NULL) 3305 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3306 3307 /* fallback to default */ 3308 return rte_eth_stats_reset(port_id); 3309 } 3310 3311 static int 3312 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3313 uint8_t stat_idx, uint8_t is_rx) 3314 { 3315 struct rte_eth_dev *dev; 3316 3317 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3318 dev = &rte_eth_devices[port_id]; 3319 3320 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3321 return -EINVAL; 3322 3323 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3324 return -EINVAL; 3325 3326 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3327 return -EINVAL; 3328 3329 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 3330 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3331 } 3332 3333 int 3334 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3335 uint8_t stat_idx) 3336 { 3337 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3338 tx_queue_id, 3339 stat_idx, STAT_QMAP_TX)); 3340 } 3341 3342 int 3343 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3344 uint8_t stat_idx) 3345 { 3346 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3347 rx_queue_id, 3348 stat_idx, STAT_QMAP_RX)); 3349 } 3350 3351 int 3352 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3353 { 3354 struct rte_eth_dev *dev; 3355 3356 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3357 dev = &rte_eth_devices[port_id]; 3358 3359 if (fw_version == NULL && fw_size > 0) { 3360 RTE_ETHDEV_LOG(ERR, 3361 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3362 port_id); 3363 return -EINVAL; 3364 } 3365 3366 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 3367 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3368 fw_version, fw_size)); 3369 } 3370 3371 int 3372 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3373 { 3374 struct rte_eth_dev *dev; 3375 const struct rte_eth_desc_lim lim = { 3376 .nb_max = UINT16_MAX, 3377 .nb_min = 0, 3378 .nb_align = 1, 3379 .nb_seg_max = UINT16_MAX, 3380 .nb_mtu_seg_max = UINT16_MAX, 3381 }; 3382 int diag; 3383 3384 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3385 dev = &rte_eth_devices[port_id]; 3386 3387 if (dev_info == NULL) { 3388 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3389 port_id); 3390 return -EINVAL; 3391 } 3392 3393 /* 3394 * Init dev_info before port_id check since caller does not have 3395 * return status and does not know if get is successful or not. 3396 */ 3397 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3398 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3399 3400 dev_info->rx_desc_lim = lim; 3401 dev_info->tx_desc_lim = lim; 3402 dev_info->device = dev->device; 3403 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 3404 dev_info->max_mtu = UINT16_MAX; 3405 3406 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 3407 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3408 if (diag != 0) { 3409 /* Cleanup already filled in device information */ 3410 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3411 return eth_err(port_id, diag); 3412 } 3413 3414 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3415 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3416 RTE_MAX_QUEUES_PER_PORT); 3417 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3418 RTE_MAX_QUEUES_PER_PORT); 3419 3420 dev_info->driver_name = dev->device->driver->name; 3421 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3422 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3423 3424 dev_info->dev_flags = &dev->data->dev_flags; 3425 3426 return 0; 3427 } 3428 3429 int 3430 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3431 { 3432 struct rte_eth_dev *dev; 3433 3434 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3435 dev = &rte_eth_devices[port_id]; 3436 3437 if (dev_conf == NULL) { 3438 RTE_ETHDEV_LOG(ERR, 3439 "Cannot get ethdev port %u configuration to NULL\n", 3440 port_id); 3441 return -EINVAL; 3442 } 3443 3444 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3445 3446 return 0; 3447 } 3448 3449 int 3450 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3451 uint32_t *ptypes, int num) 3452 { 3453 int i, j; 3454 struct rte_eth_dev *dev; 3455 const uint32_t *all_ptypes; 3456 3457 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3458 dev = &rte_eth_devices[port_id]; 3459 3460 if (ptypes == NULL && num > 0) { 3461 RTE_ETHDEV_LOG(ERR, 3462 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3463 port_id); 3464 return -EINVAL; 3465 } 3466 3467 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 3468 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3469 3470 if (!all_ptypes) 3471 return 0; 3472 3473 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3474 if (all_ptypes[i] & ptype_mask) { 3475 if (j < num) 3476 ptypes[j] = all_ptypes[i]; 3477 j++; 3478 } 3479 3480 return j; 3481 } 3482 3483 int 3484 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3485 uint32_t *set_ptypes, unsigned int num) 3486 { 3487 const uint32_t valid_ptype_masks[] = { 3488 RTE_PTYPE_L2_MASK, 3489 RTE_PTYPE_L3_MASK, 3490 RTE_PTYPE_L4_MASK, 3491 RTE_PTYPE_TUNNEL_MASK, 3492 RTE_PTYPE_INNER_L2_MASK, 3493 RTE_PTYPE_INNER_L3_MASK, 3494 RTE_PTYPE_INNER_L4_MASK, 3495 }; 3496 const uint32_t *all_ptypes; 3497 struct rte_eth_dev *dev; 3498 uint32_t unused_mask; 3499 unsigned int i, j; 3500 int ret; 3501 3502 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3503 dev = &rte_eth_devices[port_id]; 3504 3505 if (num > 0 && set_ptypes == NULL) { 3506 RTE_ETHDEV_LOG(ERR, 3507 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3508 port_id); 3509 return -EINVAL; 3510 } 3511 3512 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3513 *dev->dev_ops->dev_ptypes_set == NULL) { 3514 ret = 0; 3515 goto ptype_unknown; 3516 } 3517 3518 if (ptype_mask == 0) { 3519 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3520 ptype_mask); 3521 goto ptype_unknown; 3522 } 3523 3524 unused_mask = ptype_mask; 3525 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3526 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3527 if (mask && mask != valid_ptype_masks[i]) { 3528 ret = -EINVAL; 3529 goto ptype_unknown; 3530 } 3531 unused_mask &= ~valid_ptype_masks[i]; 3532 } 3533 3534 if (unused_mask) { 3535 ret = -EINVAL; 3536 goto ptype_unknown; 3537 } 3538 3539 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3540 if (all_ptypes == NULL) { 3541 ret = 0; 3542 goto ptype_unknown; 3543 } 3544 3545 /* 3546 * Accommodate as many set_ptypes as possible. If the supplied 3547 * set_ptypes array is insufficient fill it partially. 3548 */ 3549 for (i = 0, j = 0; set_ptypes != NULL && 3550 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3551 if (ptype_mask & all_ptypes[i]) { 3552 if (j < num - 1) { 3553 set_ptypes[j] = all_ptypes[i]; 3554 j++; 3555 continue; 3556 } 3557 break; 3558 } 3559 } 3560 3561 if (set_ptypes != NULL && j < num) 3562 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3563 3564 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3565 3566 ptype_unknown: 3567 if (num > 0) 3568 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3569 3570 return ret; 3571 } 3572 3573 int 3574 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3575 unsigned int num) 3576 { 3577 int32_t ret; 3578 struct rte_eth_dev *dev; 3579 struct rte_eth_dev_info dev_info; 3580 3581 if (ma == NULL) { 3582 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3583 return -EINVAL; 3584 } 3585 3586 /* will check for us that port_id is a valid one */ 3587 ret = rte_eth_dev_info_get(port_id, &dev_info); 3588 if (ret != 0) 3589 return ret; 3590 3591 dev = &rte_eth_devices[port_id]; 3592 num = RTE_MIN(dev_info.max_mac_addrs, num); 3593 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3594 3595 return num; 3596 } 3597 3598 int 3599 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3600 { 3601 struct rte_eth_dev *dev; 3602 3603 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3604 dev = &rte_eth_devices[port_id]; 3605 3606 if (mac_addr == NULL) { 3607 RTE_ETHDEV_LOG(ERR, 3608 "Cannot get ethdev port %u MAC address to NULL\n", 3609 port_id); 3610 return -EINVAL; 3611 } 3612 3613 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3614 3615 return 0; 3616 } 3617 3618 int 3619 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3620 { 3621 struct rte_eth_dev *dev; 3622 3623 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3624 dev = &rte_eth_devices[port_id]; 3625 3626 if (mtu == NULL) { 3627 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3628 port_id); 3629 return -EINVAL; 3630 } 3631 3632 *mtu = dev->data->mtu; 3633 return 0; 3634 } 3635 3636 int 3637 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3638 { 3639 int ret; 3640 struct rte_eth_dev_info dev_info; 3641 struct rte_eth_dev *dev; 3642 3643 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3644 dev = &rte_eth_devices[port_id]; 3645 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 3646 3647 /* 3648 * Check if the device supports dev_infos_get, if it does not 3649 * skip min_mtu/max_mtu validation here as this requires values 3650 * that are populated within the call to rte_eth_dev_info_get() 3651 * which relies on dev->dev_ops->dev_infos_get. 3652 */ 3653 if (*dev->dev_ops->dev_infos_get != NULL) { 3654 uint16_t overhead_len; 3655 uint32_t frame_size; 3656 3657 ret = rte_eth_dev_info_get(port_id, &dev_info); 3658 if (ret != 0) 3659 return ret; 3660 3661 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu) 3662 return -EINVAL; 3663 3664 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 3665 dev_info.max_mtu); 3666 frame_size = mtu + overhead_len; 3667 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) 3668 return -EINVAL; 3669 } 3670 3671 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3672 if (ret == 0) 3673 dev->data->mtu = mtu; 3674 3675 return eth_err(port_id, ret); 3676 } 3677 3678 int 3679 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3680 { 3681 struct rte_eth_dev *dev; 3682 int ret; 3683 3684 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3685 dev = &rte_eth_devices[port_id]; 3686 3687 if (!(dev->data->dev_conf.rxmode.offloads & 3688 DEV_RX_OFFLOAD_VLAN_FILTER)) { 3689 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n", 3690 port_id); 3691 return -ENOSYS; 3692 } 3693 3694 if (vlan_id > 4095) { 3695 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3696 port_id, vlan_id); 3697 return -EINVAL; 3698 } 3699 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 3700 3701 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3702 if (ret == 0) { 3703 struct rte_vlan_filter_conf *vfc; 3704 int vidx; 3705 int vbit; 3706 3707 vfc = &dev->data->vlan_filter_conf; 3708 vidx = vlan_id / 64; 3709 vbit = vlan_id % 64; 3710 3711 if (on) 3712 vfc->ids[vidx] |= UINT64_C(1) << vbit; 3713 else 3714 vfc->ids[vidx] &= ~(UINT64_C(1) << vbit); 3715 } 3716 3717 return eth_err(port_id, ret); 3718 } 3719 3720 int 3721 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3722 int on) 3723 { 3724 struct rte_eth_dev *dev; 3725 3726 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3727 dev = &rte_eth_devices[port_id]; 3728 3729 if (rx_queue_id >= dev->data->nb_rx_queues) { 3730 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3731 return -EINVAL; 3732 } 3733 3734 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 3735 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3736 3737 return 0; 3738 } 3739 3740 int 3741 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3742 enum rte_vlan_type vlan_type, 3743 uint16_t tpid) 3744 { 3745 struct rte_eth_dev *dev; 3746 3747 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3748 dev = &rte_eth_devices[port_id]; 3749 3750 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 3751 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3752 tpid)); 3753 } 3754 3755 int 3756 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3757 { 3758 struct rte_eth_dev_info dev_info; 3759 struct rte_eth_dev *dev; 3760 int ret = 0; 3761 int mask = 0; 3762 int cur, org = 0; 3763 uint64_t orig_offloads; 3764 uint64_t dev_offloads; 3765 uint64_t new_offloads; 3766 3767 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3768 dev = &rte_eth_devices[port_id]; 3769 3770 /* save original values in case of failure */ 3771 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3772 dev_offloads = orig_offloads; 3773 3774 /* check which option changed by application */ 3775 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD); 3776 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP); 3777 if (cur != org) { 3778 if (cur) 3779 dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3780 else 3781 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3782 mask |= ETH_VLAN_STRIP_MASK; 3783 } 3784 3785 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); 3786 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER); 3787 if (cur != org) { 3788 if (cur) 3789 dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3790 else 3791 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3792 mask |= ETH_VLAN_FILTER_MASK; 3793 } 3794 3795 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD); 3796 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND); 3797 if (cur != org) { 3798 if (cur) 3799 dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 3800 else 3801 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 3802 mask |= ETH_VLAN_EXTEND_MASK; 3803 } 3804 3805 cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD); 3806 org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP); 3807 if (cur != org) { 3808 if (cur) 3809 dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 3810 else 3811 dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 3812 mask |= ETH_QINQ_STRIP_MASK; 3813 } 3814 3815 /*no change*/ 3816 if (mask == 0) 3817 return ret; 3818 3819 ret = rte_eth_dev_info_get(port_id, &dev_info); 3820 if (ret != 0) 3821 return ret; 3822 3823 /* Rx VLAN offloading must be within its device capabilities */ 3824 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3825 new_offloads = dev_offloads & ~orig_offloads; 3826 RTE_ETHDEV_LOG(ERR, 3827 "Ethdev port_id=%u requested new added VLAN offloads " 3828 "0x%" PRIx64 " must be within Rx offloads capabilities " 3829 "0x%" PRIx64 " in %s()\n", 3830 port_id, new_offloads, dev_info.rx_offload_capa, 3831 __func__); 3832 return -EINVAL; 3833 } 3834 3835 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 3836 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3837 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3838 if (ret) { 3839 /* hit an error restore original values */ 3840 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3841 } 3842 3843 return eth_err(port_id, ret); 3844 } 3845 3846 int 3847 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3848 { 3849 struct rte_eth_dev *dev; 3850 uint64_t *dev_offloads; 3851 int ret = 0; 3852 3853 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3854 dev = &rte_eth_devices[port_id]; 3855 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3856 3857 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 3858 ret |= ETH_VLAN_STRIP_OFFLOAD; 3859 3860 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 3861 ret |= ETH_VLAN_FILTER_OFFLOAD; 3862 3863 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 3864 ret |= ETH_VLAN_EXTEND_OFFLOAD; 3865 3866 if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP) 3867 ret |= ETH_QINQ_STRIP_OFFLOAD; 3868 3869 return ret; 3870 } 3871 3872 int 3873 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3874 { 3875 struct rte_eth_dev *dev; 3876 3877 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3878 dev = &rte_eth_devices[port_id]; 3879 3880 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 3881 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3882 } 3883 3884 int 3885 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3886 { 3887 struct rte_eth_dev *dev; 3888 3889 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3890 dev = &rte_eth_devices[port_id]; 3891 3892 if (fc_conf == NULL) { 3893 RTE_ETHDEV_LOG(ERR, 3894 "Cannot get ethdev port %u flow control config to NULL\n", 3895 port_id); 3896 return -EINVAL; 3897 } 3898 3899 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 3900 memset(fc_conf, 0, sizeof(*fc_conf)); 3901 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3902 } 3903 3904 int 3905 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3906 { 3907 struct rte_eth_dev *dev; 3908 3909 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3910 dev = &rte_eth_devices[port_id]; 3911 3912 if (fc_conf == NULL) { 3913 RTE_ETHDEV_LOG(ERR, 3914 "Cannot set ethdev port %u flow control from NULL config\n", 3915 port_id); 3916 return -EINVAL; 3917 } 3918 3919 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 3920 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 3921 return -EINVAL; 3922 } 3923 3924 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 3925 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 3926 } 3927 3928 int 3929 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 3930 struct rte_eth_pfc_conf *pfc_conf) 3931 { 3932 struct rte_eth_dev *dev; 3933 3934 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3935 dev = &rte_eth_devices[port_id]; 3936 3937 if (pfc_conf == NULL) { 3938 RTE_ETHDEV_LOG(ERR, 3939 "Cannot set ethdev port %u priority flow control from NULL config\n", 3940 port_id); 3941 return -EINVAL; 3942 } 3943 3944 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) { 3945 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 3946 return -EINVAL; 3947 } 3948 3949 /* High water, low water validation are device specific */ 3950 if (*dev->dev_ops->priority_flow_ctrl_set) 3951 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 3952 (dev, pfc_conf)); 3953 return -ENOTSUP; 3954 } 3955 3956 static int 3957 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 3958 uint16_t reta_size) 3959 { 3960 uint16_t i, num; 3961 3962 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE; 3963 for (i = 0; i < num; i++) { 3964 if (reta_conf[i].mask) 3965 return 0; 3966 } 3967 3968 return -EINVAL; 3969 } 3970 3971 static int 3972 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 3973 uint16_t reta_size, 3974 uint16_t max_rxq) 3975 { 3976 uint16_t i, idx, shift; 3977 3978 if (max_rxq == 0) { 3979 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 3980 return -EINVAL; 3981 } 3982 3983 for (i = 0; i < reta_size; i++) { 3984 idx = i / RTE_RETA_GROUP_SIZE; 3985 shift = i % RTE_RETA_GROUP_SIZE; 3986 if ((reta_conf[idx].mask & (1ULL << shift)) && 3987 (reta_conf[idx].reta[shift] >= max_rxq)) { 3988 RTE_ETHDEV_LOG(ERR, 3989 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 3990 idx, shift, 3991 reta_conf[idx].reta[shift], max_rxq); 3992 return -EINVAL; 3993 } 3994 } 3995 3996 return 0; 3997 } 3998 3999 int 4000 rte_eth_dev_rss_reta_update(uint16_t port_id, 4001 struct rte_eth_rss_reta_entry64 *reta_conf, 4002 uint16_t reta_size) 4003 { 4004 struct rte_eth_dev *dev; 4005 int ret; 4006 4007 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4008 dev = &rte_eth_devices[port_id]; 4009 4010 if (reta_conf == NULL) { 4011 RTE_ETHDEV_LOG(ERR, 4012 "Cannot update ethdev port %u RSS RETA to NULL\n", 4013 port_id); 4014 return -EINVAL; 4015 } 4016 4017 if (reta_size == 0) { 4018 RTE_ETHDEV_LOG(ERR, 4019 "Cannot update ethdev port %u RSS RETA with zero size\n", 4020 port_id); 4021 return -EINVAL; 4022 } 4023 4024 /* Check mask bits */ 4025 ret = eth_check_reta_mask(reta_conf, reta_size); 4026 if (ret < 0) 4027 return ret; 4028 4029 /* Check entry value */ 4030 ret = eth_check_reta_entry(reta_conf, reta_size, 4031 dev->data->nb_rx_queues); 4032 if (ret < 0) 4033 return ret; 4034 4035 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 4036 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4037 reta_size)); 4038 } 4039 4040 int 4041 rte_eth_dev_rss_reta_query(uint16_t port_id, 4042 struct rte_eth_rss_reta_entry64 *reta_conf, 4043 uint16_t reta_size) 4044 { 4045 struct rte_eth_dev *dev; 4046 int ret; 4047 4048 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4049 dev = &rte_eth_devices[port_id]; 4050 4051 if (reta_conf == NULL) { 4052 RTE_ETHDEV_LOG(ERR, 4053 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4054 port_id); 4055 return -EINVAL; 4056 } 4057 4058 /* Check mask bits */ 4059 ret = eth_check_reta_mask(reta_conf, reta_size); 4060 if (ret < 0) 4061 return ret; 4062 4063 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 4064 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4065 reta_size)); 4066 } 4067 4068 int 4069 rte_eth_dev_rss_hash_update(uint16_t port_id, 4070 struct rte_eth_rss_conf *rss_conf) 4071 { 4072 struct rte_eth_dev *dev; 4073 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4074 int ret; 4075 4076 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4077 dev = &rte_eth_devices[port_id]; 4078 4079 if (rss_conf == NULL) { 4080 RTE_ETHDEV_LOG(ERR, 4081 "Cannot update ethdev port %u RSS hash from NULL config\n", 4082 port_id); 4083 return -EINVAL; 4084 } 4085 4086 ret = rte_eth_dev_info_get(port_id, &dev_info); 4087 if (ret != 0) 4088 return ret; 4089 4090 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4091 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4092 dev_info.flow_type_rss_offloads) { 4093 RTE_ETHDEV_LOG(ERR, 4094 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4095 port_id, rss_conf->rss_hf, 4096 dev_info.flow_type_rss_offloads); 4097 return -EINVAL; 4098 } 4099 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 4100 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4101 rss_conf)); 4102 } 4103 4104 int 4105 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4106 struct rte_eth_rss_conf *rss_conf) 4107 { 4108 struct rte_eth_dev *dev; 4109 4110 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4111 dev = &rte_eth_devices[port_id]; 4112 4113 if (rss_conf == NULL) { 4114 RTE_ETHDEV_LOG(ERR, 4115 "Cannot get ethdev port %u RSS hash config to NULL\n", 4116 port_id); 4117 return -EINVAL; 4118 } 4119 4120 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 4121 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4122 rss_conf)); 4123 } 4124 4125 int 4126 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4127 struct rte_eth_udp_tunnel *udp_tunnel) 4128 { 4129 struct rte_eth_dev *dev; 4130 4131 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4132 dev = &rte_eth_devices[port_id]; 4133 4134 if (udp_tunnel == NULL) { 4135 RTE_ETHDEV_LOG(ERR, 4136 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4137 port_id); 4138 return -EINVAL; 4139 } 4140 4141 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 4142 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4143 return -EINVAL; 4144 } 4145 4146 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 4147 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4148 udp_tunnel)); 4149 } 4150 4151 int 4152 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4153 struct rte_eth_udp_tunnel *udp_tunnel) 4154 { 4155 struct rte_eth_dev *dev; 4156 4157 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4158 dev = &rte_eth_devices[port_id]; 4159 4160 if (udp_tunnel == NULL) { 4161 RTE_ETHDEV_LOG(ERR, 4162 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4163 port_id); 4164 return -EINVAL; 4165 } 4166 4167 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 4168 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4169 return -EINVAL; 4170 } 4171 4172 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 4173 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4174 udp_tunnel)); 4175 } 4176 4177 int 4178 rte_eth_led_on(uint16_t port_id) 4179 { 4180 struct rte_eth_dev *dev; 4181 4182 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4183 dev = &rte_eth_devices[port_id]; 4184 4185 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 4186 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4187 } 4188 4189 int 4190 rte_eth_led_off(uint16_t port_id) 4191 { 4192 struct rte_eth_dev *dev; 4193 4194 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4195 dev = &rte_eth_devices[port_id]; 4196 4197 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 4198 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4199 } 4200 4201 int 4202 rte_eth_fec_get_capability(uint16_t port_id, 4203 struct rte_eth_fec_capa *speed_fec_capa, 4204 unsigned int num) 4205 { 4206 struct rte_eth_dev *dev; 4207 int ret; 4208 4209 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4210 dev = &rte_eth_devices[port_id]; 4211 4212 if (speed_fec_capa == NULL && num > 0) { 4213 RTE_ETHDEV_LOG(ERR, 4214 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4215 port_id); 4216 return -EINVAL; 4217 } 4218 4219 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP); 4220 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4221 4222 return ret; 4223 } 4224 4225 int 4226 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4227 { 4228 struct rte_eth_dev *dev; 4229 4230 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4231 dev = &rte_eth_devices[port_id]; 4232 4233 if (fec_capa == NULL) { 4234 RTE_ETHDEV_LOG(ERR, 4235 "Cannot get ethdev port %u current FEC mode to NULL\n", 4236 port_id); 4237 return -EINVAL; 4238 } 4239 4240 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP); 4241 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4242 } 4243 4244 int 4245 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4246 { 4247 struct rte_eth_dev *dev; 4248 4249 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4250 dev = &rte_eth_devices[port_id]; 4251 4252 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); 4253 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4254 } 4255 4256 /* 4257 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4258 * an empty spot. 4259 */ 4260 static int 4261 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4262 { 4263 struct rte_eth_dev_info dev_info; 4264 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4265 unsigned i; 4266 int ret; 4267 4268 ret = rte_eth_dev_info_get(port_id, &dev_info); 4269 if (ret != 0) 4270 return -1; 4271 4272 for (i = 0; i < dev_info.max_mac_addrs; i++) 4273 if (memcmp(addr, &dev->data->mac_addrs[i], 4274 RTE_ETHER_ADDR_LEN) == 0) 4275 return i; 4276 4277 return -1; 4278 } 4279 4280 static const struct rte_ether_addr null_mac_addr; 4281 4282 int 4283 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4284 uint32_t pool) 4285 { 4286 struct rte_eth_dev *dev; 4287 int index; 4288 uint64_t pool_mask; 4289 int ret; 4290 4291 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4292 dev = &rte_eth_devices[port_id]; 4293 4294 if (addr == NULL) { 4295 RTE_ETHDEV_LOG(ERR, 4296 "Cannot add ethdev port %u MAC address from NULL address\n", 4297 port_id); 4298 return -EINVAL; 4299 } 4300 4301 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 4302 4303 if (rte_is_zero_ether_addr(addr)) { 4304 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4305 port_id); 4306 return -EINVAL; 4307 } 4308 if (pool >= ETH_64_POOLS) { 4309 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1); 4310 return -EINVAL; 4311 } 4312 4313 index = eth_dev_get_mac_addr_index(port_id, addr); 4314 if (index < 0) { 4315 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4316 if (index < 0) { 4317 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4318 port_id); 4319 return -ENOSPC; 4320 } 4321 } else { 4322 pool_mask = dev->data->mac_pool_sel[index]; 4323 4324 /* Check if both MAC address and pool is already there, and do nothing */ 4325 if (pool_mask & (1ULL << pool)) 4326 return 0; 4327 } 4328 4329 /* Update NIC */ 4330 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4331 4332 if (ret == 0) { 4333 /* Update address in NIC data structure */ 4334 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4335 4336 /* Update pool bitmap in NIC data structure */ 4337 dev->data->mac_pool_sel[index] |= (1ULL << pool); 4338 } 4339 4340 return eth_err(port_id, ret); 4341 } 4342 4343 int 4344 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4345 { 4346 struct rte_eth_dev *dev; 4347 int index; 4348 4349 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4350 dev = &rte_eth_devices[port_id]; 4351 4352 if (addr == NULL) { 4353 RTE_ETHDEV_LOG(ERR, 4354 "Cannot remove ethdev port %u MAC address from NULL address\n", 4355 port_id); 4356 return -EINVAL; 4357 } 4358 4359 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 4360 4361 index = eth_dev_get_mac_addr_index(port_id, addr); 4362 if (index == 0) { 4363 RTE_ETHDEV_LOG(ERR, 4364 "Port %u: Cannot remove default MAC address\n", 4365 port_id); 4366 return -EADDRINUSE; 4367 } else if (index < 0) 4368 return 0; /* Do nothing if address wasn't found */ 4369 4370 /* Update NIC */ 4371 (*dev->dev_ops->mac_addr_remove)(dev, index); 4372 4373 /* Update address in NIC data structure */ 4374 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4375 4376 /* reset pool bitmap */ 4377 dev->data->mac_pool_sel[index] = 0; 4378 4379 return 0; 4380 } 4381 4382 int 4383 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4384 { 4385 struct rte_eth_dev *dev; 4386 int ret; 4387 4388 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4389 dev = &rte_eth_devices[port_id]; 4390 4391 if (addr == NULL) { 4392 RTE_ETHDEV_LOG(ERR, 4393 "Cannot set ethdev port %u default MAC address from NULL address\n", 4394 port_id); 4395 return -EINVAL; 4396 } 4397 4398 if (!rte_is_valid_assigned_ether_addr(addr)) 4399 return -EINVAL; 4400 4401 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 4402 4403 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4404 if (ret < 0) 4405 return ret; 4406 4407 /* Update default address in NIC data structure */ 4408 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4409 4410 return 0; 4411 } 4412 4413 4414 /* 4415 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4416 * an empty spot. 4417 */ 4418 static int 4419 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4420 const struct rte_ether_addr *addr) 4421 { 4422 struct rte_eth_dev_info dev_info; 4423 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4424 unsigned i; 4425 int ret; 4426 4427 ret = rte_eth_dev_info_get(port_id, &dev_info); 4428 if (ret != 0) 4429 return -1; 4430 4431 if (!dev->data->hash_mac_addrs) 4432 return -1; 4433 4434 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4435 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4436 RTE_ETHER_ADDR_LEN) == 0) 4437 return i; 4438 4439 return -1; 4440 } 4441 4442 int 4443 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4444 uint8_t on) 4445 { 4446 int index; 4447 int ret; 4448 struct rte_eth_dev *dev; 4449 4450 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4451 dev = &rte_eth_devices[port_id]; 4452 4453 if (addr == NULL) { 4454 RTE_ETHDEV_LOG(ERR, 4455 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4456 port_id); 4457 return -EINVAL; 4458 } 4459 4460 if (rte_is_zero_ether_addr(addr)) { 4461 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4462 port_id); 4463 return -EINVAL; 4464 } 4465 4466 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4467 /* Check if it's already there, and do nothing */ 4468 if ((index >= 0) && on) 4469 return 0; 4470 4471 if (index < 0) { 4472 if (!on) { 4473 RTE_ETHDEV_LOG(ERR, 4474 "Port %u: the MAC address was not set in UTA\n", 4475 port_id); 4476 return -EINVAL; 4477 } 4478 4479 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4480 if (index < 0) { 4481 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4482 port_id); 4483 return -ENOSPC; 4484 } 4485 } 4486 4487 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 4488 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 4489 if (ret == 0) { 4490 /* Update address in NIC data structure */ 4491 if (on) 4492 rte_ether_addr_copy(addr, 4493 &dev->data->hash_mac_addrs[index]); 4494 else 4495 rte_ether_addr_copy(&null_mac_addr, 4496 &dev->data->hash_mac_addrs[index]); 4497 } 4498 4499 return eth_err(port_id, ret); 4500 } 4501 4502 int 4503 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 4504 { 4505 struct rte_eth_dev *dev; 4506 4507 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4508 dev = &rte_eth_devices[port_id]; 4509 4510 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 4511 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 4512 on)); 4513 } 4514 4515 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 4516 uint16_t tx_rate) 4517 { 4518 struct rte_eth_dev *dev; 4519 struct rte_eth_dev_info dev_info; 4520 struct rte_eth_link link; 4521 int ret; 4522 4523 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4524 dev = &rte_eth_devices[port_id]; 4525 4526 ret = rte_eth_dev_info_get(port_id, &dev_info); 4527 if (ret != 0) 4528 return ret; 4529 4530 link = dev->data->dev_link; 4531 4532 if (queue_idx > dev_info.max_tx_queues) { 4533 RTE_ETHDEV_LOG(ERR, 4534 "Set queue rate limit:port %u: invalid queue id=%u\n", 4535 port_id, queue_idx); 4536 return -EINVAL; 4537 } 4538 4539 if (tx_rate > link.link_speed) { 4540 RTE_ETHDEV_LOG(ERR, 4541 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 4542 tx_rate, link.link_speed); 4543 return -EINVAL; 4544 } 4545 4546 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 4547 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 4548 queue_idx, tx_rate)); 4549 } 4550 4551 RTE_INIT(eth_dev_init_fp_ops) 4552 { 4553 uint32_t i; 4554 4555 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 4556 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 4557 } 4558 4559 RTE_INIT(eth_dev_init_cb_lists) 4560 { 4561 uint16_t i; 4562 4563 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 4564 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 4565 } 4566 4567 int 4568 rte_eth_dev_callback_register(uint16_t port_id, 4569 enum rte_eth_event_type event, 4570 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4571 { 4572 struct rte_eth_dev *dev; 4573 struct rte_eth_dev_callback *user_cb; 4574 uint16_t next_port; 4575 uint16_t last_port; 4576 4577 if (cb_fn == NULL) { 4578 RTE_ETHDEV_LOG(ERR, 4579 "Cannot register ethdev port %u callback from NULL\n", 4580 port_id); 4581 return -EINVAL; 4582 } 4583 4584 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4585 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4586 return -EINVAL; 4587 } 4588 4589 if (port_id == RTE_ETH_ALL) { 4590 next_port = 0; 4591 last_port = RTE_MAX_ETHPORTS - 1; 4592 } else { 4593 next_port = last_port = port_id; 4594 } 4595 4596 rte_spinlock_lock(ð_dev_cb_lock); 4597 4598 do { 4599 dev = &rte_eth_devices[next_port]; 4600 4601 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 4602 if (user_cb->cb_fn == cb_fn && 4603 user_cb->cb_arg == cb_arg && 4604 user_cb->event == event) { 4605 break; 4606 } 4607 } 4608 4609 /* create a new callback. */ 4610 if (user_cb == NULL) { 4611 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4612 sizeof(struct rte_eth_dev_callback), 0); 4613 if (user_cb != NULL) { 4614 user_cb->cb_fn = cb_fn; 4615 user_cb->cb_arg = cb_arg; 4616 user_cb->event = event; 4617 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4618 user_cb, next); 4619 } else { 4620 rte_spinlock_unlock(ð_dev_cb_lock); 4621 rte_eth_dev_callback_unregister(port_id, event, 4622 cb_fn, cb_arg); 4623 return -ENOMEM; 4624 } 4625 4626 } 4627 } while (++next_port <= last_port); 4628 4629 rte_spinlock_unlock(ð_dev_cb_lock); 4630 return 0; 4631 } 4632 4633 int 4634 rte_eth_dev_callback_unregister(uint16_t port_id, 4635 enum rte_eth_event_type event, 4636 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4637 { 4638 int ret; 4639 struct rte_eth_dev *dev; 4640 struct rte_eth_dev_callback *cb, *next; 4641 uint16_t next_port; 4642 uint16_t last_port; 4643 4644 if (cb_fn == NULL) { 4645 RTE_ETHDEV_LOG(ERR, 4646 "Cannot unregister ethdev port %u callback from NULL\n", 4647 port_id); 4648 return -EINVAL; 4649 } 4650 4651 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4652 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4653 return -EINVAL; 4654 } 4655 4656 if (port_id == RTE_ETH_ALL) { 4657 next_port = 0; 4658 last_port = RTE_MAX_ETHPORTS - 1; 4659 } else { 4660 next_port = last_port = port_id; 4661 } 4662 4663 rte_spinlock_lock(ð_dev_cb_lock); 4664 4665 do { 4666 dev = &rte_eth_devices[next_port]; 4667 ret = 0; 4668 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4669 cb = next) { 4670 4671 next = TAILQ_NEXT(cb, next); 4672 4673 if (cb->cb_fn != cb_fn || cb->event != event || 4674 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4675 continue; 4676 4677 /* 4678 * if this callback is not executing right now, 4679 * then remove it. 4680 */ 4681 if (cb->active == 0) { 4682 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4683 rte_free(cb); 4684 } else { 4685 ret = -EAGAIN; 4686 } 4687 } 4688 } while (++next_port <= last_port); 4689 4690 rte_spinlock_unlock(ð_dev_cb_lock); 4691 return ret; 4692 } 4693 4694 int 4695 rte_eth_dev_callback_process(struct rte_eth_dev *dev, 4696 enum rte_eth_event_type event, void *ret_param) 4697 { 4698 struct rte_eth_dev_callback *cb_lst; 4699 struct rte_eth_dev_callback dev_cb; 4700 int rc = 0; 4701 4702 rte_spinlock_lock(ð_dev_cb_lock); 4703 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 4704 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 4705 continue; 4706 dev_cb = *cb_lst; 4707 cb_lst->active = 1; 4708 if (ret_param != NULL) 4709 dev_cb.ret_param = ret_param; 4710 4711 rte_spinlock_unlock(ð_dev_cb_lock); 4712 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, 4713 dev_cb.cb_arg, dev_cb.ret_param); 4714 rte_spinlock_lock(ð_dev_cb_lock); 4715 cb_lst->active = 0; 4716 } 4717 rte_spinlock_unlock(ð_dev_cb_lock); 4718 return rc; 4719 } 4720 4721 void 4722 rte_eth_dev_probing_finish(struct rte_eth_dev *dev) 4723 { 4724 if (dev == NULL) 4725 return; 4726 4727 /* 4728 * for secondary process, at that point we expect device 4729 * to be already 'usable', so shared data and all function pointers 4730 * for fast-path devops have to be setup properly inside rte_eth_dev. 4731 */ 4732 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 4733 eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev); 4734 4735 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); 4736 4737 dev->state = RTE_ETH_DEV_ATTACHED; 4738 } 4739 4740 int 4741 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4742 { 4743 uint32_t vec; 4744 struct rte_eth_dev *dev; 4745 struct rte_intr_handle *intr_handle; 4746 uint16_t qid; 4747 int rc; 4748 4749 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4750 dev = &rte_eth_devices[port_id]; 4751 4752 if (!dev->intr_handle) { 4753 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4754 return -ENOTSUP; 4755 } 4756 4757 intr_handle = dev->intr_handle; 4758 if (!intr_handle->intr_vec) { 4759 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4760 return -EPERM; 4761 } 4762 4763 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4764 vec = intr_handle->intr_vec[qid]; 4765 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4766 if (rc && rc != -EEXIST) { 4767 RTE_ETHDEV_LOG(ERR, 4768 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 4769 port_id, qid, op, epfd, vec); 4770 } 4771 } 4772 4773 return 0; 4774 } 4775 4776 int 4777 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 4778 { 4779 struct rte_intr_handle *intr_handle; 4780 struct rte_eth_dev *dev; 4781 unsigned int efd_idx; 4782 uint32_t vec; 4783 int fd; 4784 4785 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 4786 dev = &rte_eth_devices[port_id]; 4787 4788 if (queue_id >= dev->data->nb_rx_queues) { 4789 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 4790 return -1; 4791 } 4792 4793 if (!dev->intr_handle) { 4794 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4795 return -1; 4796 } 4797 4798 intr_handle = dev->intr_handle; 4799 if (!intr_handle->intr_vec) { 4800 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4801 return -1; 4802 } 4803 4804 vec = intr_handle->intr_vec[queue_id]; 4805 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 4806 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 4807 fd = intr_handle->efds[efd_idx]; 4808 4809 return fd; 4810 } 4811 4812 static inline int 4813 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id, 4814 const char *ring_name) 4815 { 4816 return snprintf(name, len, "eth_p%d_q%d_%s", 4817 port_id, queue_id, ring_name); 4818 } 4819 4820 const struct rte_memzone * 4821 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, 4822 uint16_t queue_id, size_t size, unsigned align, 4823 int socket_id) 4824 { 4825 char z_name[RTE_MEMZONE_NAMESIZE]; 4826 const struct rte_memzone *mz; 4827 int rc; 4828 4829 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4830 queue_id, ring_name); 4831 if (rc >= RTE_MEMZONE_NAMESIZE) { 4832 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4833 rte_errno = ENAMETOOLONG; 4834 return NULL; 4835 } 4836 4837 mz = rte_memzone_lookup(z_name); 4838 if (mz) { 4839 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) || 4840 size > mz->len || 4841 ((uintptr_t)mz->addr & (align - 1)) != 0) { 4842 RTE_ETHDEV_LOG(ERR, 4843 "memzone %s does not justify the requested attributes\n", 4844 mz->name); 4845 return NULL; 4846 } 4847 4848 return mz; 4849 } 4850 4851 return rte_memzone_reserve_aligned(z_name, size, socket_id, 4852 RTE_MEMZONE_IOVA_CONTIG, align); 4853 } 4854 4855 int 4856 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name, 4857 uint16_t queue_id) 4858 { 4859 char z_name[RTE_MEMZONE_NAMESIZE]; 4860 const struct rte_memzone *mz; 4861 int rc = 0; 4862 4863 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4864 queue_id, ring_name); 4865 if (rc >= RTE_MEMZONE_NAMESIZE) { 4866 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4867 return -ENAMETOOLONG; 4868 } 4869 4870 mz = rte_memzone_lookup(z_name); 4871 if (mz) 4872 rc = rte_memzone_free(mz); 4873 else 4874 rc = -ENOENT; 4875 4876 return rc; 4877 } 4878 4879 int 4880 rte_eth_dev_create(struct rte_device *device, const char *name, 4881 size_t priv_data_size, 4882 ethdev_bus_specific_init ethdev_bus_specific_init, 4883 void *bus_init_params, 4884 ethdev_init_t ethdev_init, void *init_params) 4885 { 4886 struct rte_eth_dev *ethdev; 4887 int retval; 4888 4889 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL); 4890 4891 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4892 ethdev = rte_eth_dev_allocate(name); 4893 if (!ethdev) 4894 return -ENODEV; 4895 4896 if (priv_data_size) { 4897 ethdev->data->dev_private = rte_zmalloc_socket( 4898 name, priv_data_size, RTE_CACHE_LINE_SIZE, 4899 device->numa_node); 4900 4901 if (!ethdev->data->dev_private) { 4902 RTE_ETHDEV_LOG(ERR, 4903 "failed to allocate private data\n"); 4904 retval = -ENOMEM; 4905 goto probe_failed; 4906 } 4907 } 4908 } else { 4909 ethdev = rte_eth_dev_attach_secondary(name); 4910 if (!ethdev) { 4911 RTE_ETHDEV_LOG(ERR, 4912 "secondary process attach failed, ethdev doesn't exist\n"); 4913 return -ENODEV; 4914 } 4915 } 4916 4917 ethdev->device = device; 4918 4919 if (ethdev_bus_specific_init) { 4920 retval = ethdev_bus_specific_init(ethdev, bus_init_params); 4921 if (retval) { 4922 RTE_ETHDEV_LOG(ERR, 4923 "ethdev bus specific initialisation failed\n"); 4924 goto probe_failed; 4925 } 4926 } 4927 4928 retval = ethdev_init(ethdev, init_params); 4929 if (retval) { 4930 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n"); 4931 goto probe_failed; 4932 } 4933 4934 rte_eth_dev_probing_finish(ethdev); 4935 4936 return retval; 4937 4938 probe_failed: 4939 rte_eth_dev_release_port(ethdev); 4940 return retval; 4941 } 4942 4943 int 4944 rte_eth_dev_destroy(struct rte_eth_dev *ethdev, 4945 ethdev_uninit_t ethdev_uninit) 4946 { 4947 int ret; 4948 4949 ethdev = rte_eth_dev_allocated(ethdev->data->name); 4950 if (!ethdev) 4951 return -ENODEV; 4952 4953 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL); 4954 4955 ret = ethdev_uninit(ethdev); 4956 if (ret) 4957 return ret; 4958 4959 return rte_eth_dev_release_port(ethdev); 4960 } 4961 4962 int 4963 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 4964 int epfd, int op, void *data) 4965 { 4966 uint32_t vec; 4967 struct rte_eth_dev *dev; 4968 struct rte_intr_handle *intr_handle; 4969 int rc; 4970 4971 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4972 dev = &rte_eth_devices[port_id]; 4973 4974 if (queue_id >= dev->data->nb_rx_queues) { 4975 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 4976 return -EINVAL; 4977 } 4978 4979 if (!dev->intr_handle) { 4980 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4981 return -ENOTSUP; 4982 } 4983 4984 intr_handle = dev->intr_handle; 4985 if (!intr_handle->intr_vec) { 4986 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4987 return -EPERM; 4988 } 4989 4990 vec = intr_handle->intr_vec[queue_id]; 4991 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4992 if (rc && rc != -EEXIST) { 4993 RTE_ETHDEV_LOG(ERR, 4994 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 4995 port_id, queue_id, op, epfd, vec); 4996 return rc; 4997 } 4998 4999 return 0; 5000 } 5001 5002 int 5003 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5004 uint16_t queue_id) 5005 { 5006 struct rte_eth_dev *dev; 5007 int ret; 5008 5009 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5010 dev = &rte_eth_devices[port_id]; 5011 5012 ret = eth_dev_validate_rx_queue(dev, queue_id); 5013 if (ret != 0) 5014 return ret; 5015 5016 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 5017 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5018 } 5019 5020 int 5021 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5022 uint16_t queue_id) 5023 { 5024 struct rte_eth_dev *dev; 5025 int ret; 5026 5027 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5028 dev = &rte_eth_devices[port_id]; 5029 5030 ret = eth_dev_validate_rx_queue(dev, queue_id); 5031 if (ret != 0) 5032 return ret; 5033 5034 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 5035 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5036 } 5037 5038 5039 const struct rte_eth_rxtx_callback * 5040 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5041 rte_rx_callback_fn fn, void *user_param) 5042 { 5043 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5044 rte_errno = ENOTSUP; 5045 return NULL; 5046 #endif 5047 struct rte_eth_dev *dev; 5048 5049 /* check input parameters */ 5050 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5051 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5052 rte_errno = EINVAL; 5053 return NULL; 5054 } 5055 dev = &rte_eth_devices[port_id]; 5056 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5057 rte_errno = EINVAL; 5058 return NULL; 5059 } 5060 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5061 5062 if (cb == NULL) { 5063 rte_errno = ENOMEM; 5064 return NULL; 5065 } 5066 5067 cb->fn.rx = fn; 5068 cb->param = user_param; 5069 5070 rte_spinlock_lock(ð_dev_rx_cb_lock); 5071 /* Add the callbacks in fifo order. */ 5072 struct rte_eth_rxtx_callback *tail = 5073 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5074 5075 if (!tail) { 5076 /* Stores to cb->fn and cb->param should complete before 5077 * cb is visible to data plane. 5078 */ 5079 __atomic_store_n( 5080 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5081 cb, __ATOMIC_RELEASE); 5082 5083 } else { 5084 while (tail->next) 5085 tail = tail->next; 5086 /* Stores to cb->fn and cb->param should complete before 5087 * cb is visible to data plane. 5088 */ 5089 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5090 } 5091 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5092 5093 return cb; 5094 } 5095 5096 const struct rte_eth_rxtx_callback * 5097 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5098 rte_rx_callback_fn fn, void *user_param) 5099 { 5100 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5101 rte_errno = ENOTSUP; 5102 return NULL; 5103 #endif 5104 /* check input parameters */ 5105 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5106 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5107 rte_errno = EINVAL; 5108 return NULL; 5109 } 5110 5111 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5112 5113 if (cb == NULL) { 5114 rte_errno = ENOMEM; 5115 return NULL; 5116 } 5117 5118 cb->fn.rx = fn; 5119 cb->param = user_param; 5120 5121 rte_spinlock_lock(ð_dev_rx_cb_lock); 5122 /* Add the callbacks at first position */ 5123 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5124 /* Stores to cb->fn, cb->param and cb->next should complete before 5125 * cb is visible to data plane threads. 5126 */ 5127 __atomic_store_n( 5128 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5129 cb, __ATOMIC_RELEASE); 5130 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5131 5132 return cb; 5133 } 5134 5135 const struct rte_eth_rxtx_callback * 5136 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5137 rte_tx_callback_fn fn, void *user_param) 5138 { 5139 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5140 rte_errno = ENOTSUP; 5141 return NULL; 5142 #endif 5143 struct rte_eth_dev *dev; 5144 5145 /* check input parameters */ 5146 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5147 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5148 rte_errno = EINVAL; 5149 return NULL; 5150 } 5151 5152 dev = &rte_eth_devices[port_id]; 5153 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5154 rte_errno = EINVAL; 5155 return NULL; 5156 } 5157 5158 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5159 5160 if (cb == NULL) { 5161 rte_errno = ENOMEM; 5162 return NULL; 5163 } 5164 5165 cb->fn.tx = fn; 5166 cb->param = user_param; 5167 5168 rte_spinlock_lock(ð_dev_tx_cb_lock); 5169 /* Add the callbacks in fifo order. */ 5170 struct rte_eth_rxtx_callback *tail = 5171 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5172 5173 if (!tail) { 5174 /* Stores to cb->fn and cb->param should complete before 5175 * cb is visible to data plane. 5176 */ 5177 __atomic_store_n( 5178 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5179 cb, __ATOMIC_RELEASE); 5180 5181 } else { 5182 while (tail->next) 5183 tail = tail->next; 5184 /* Stores to cb->fn and cb->param should complete before 5185 * cb is visible to data plane. 5186 */ 5187 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5188 } 5189 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5190 5191 return cb; 5192 } 5193 5194 int 5195 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5196 const struct rte_eth_rxtx_callback *user_cb) 5197 { 5198 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5199 return -ENOTSUP; 5200 #endif 5201 /* Check input parameters. */ 5202 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5203 if (user_cb == NULL || 5204 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5205 return -EINVAL; 5206 5207 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5208 struct rte_eth_rxtx_callback *cb; 5209 struct rte_eth_rxtx_callback **prev_cb; 5210 int ret = -EINVAL; 5211 5212 rte_spinlock_lock(ð_dev_rx_cb_lock); 5213 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5214 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5215 cb = *prev_cb; 5216 if (cb == user_cb) { 5217 /* Remove the user cb from the callback list. */ 5218 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5219 ret = 0; 5220 break; 5221 } 5222 } 5223 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5224 5225 return ret; 5226 } 5227 5228 int 5229 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5230 const struct rte_eth_rxtx_callback *user_cb) 5231 { 5232 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5233 return -ENOTSUP; 5234 #endif 5235 /* Check input parameters. */ 5236 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5237 if (user_cb == NULL || 5238 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5239 return -EINVAL; 5240 5241 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5242 int ret = -EINVAL; 5243 struct rte_eth_rxtx_callback *cb; 5244 struct rte_eth_rxtx_callback **prev_cb; 5245 5246 rte_spinlock_lock(ð_dev_tx_cb_lock); 5247 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5248 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5249 cb = *prev_cb; 5250 if (cb == user_cb) { 5251 /* Remove the user cb from the callback list. */ 5252 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5253 ret = 0; 5254 break; 5255 } 5256 } 5257 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5258 5259 return ret; 5260 } 5261 5262 int 5263 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5264 struct rte_eth_rxq_info *qinfo) 5265 { 5266 struct rte_eth_dev *dev; 5267 5268 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5269 dev = &rte_eth_devices[port_id]; 5270 5271 if (queue_id >= dev->data->nb_rx_queues) { 5272 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 5273 return -EINVAL; 5274 } 5275 5276 if (qinfo == NULL) { 5277 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5278 port_id, queue_id); 5279 return -EINVAL; 5280 } 5281 5282 if (dev->data->rx_queues == NULL || 5283 dev->data->rx_queues[queue_id] == NULL) { 5284 RTE_ETHDEV_LOG(ERR, 5285 "Rx queue %"PRIu16" of device with port_id=%" 5286 PRIu16" has not been setup\n", 5287 queue_id, port_id); 5288 return -EINVAL; 5289 } 5290 5291 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5292 RTE_ETHDEV_LOG(INFO, 5293 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5294 queue_id, port_id); 5295 return -EINVAL; 5296 } 5297 5298 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 5299 5300 memset(qinfo, 0, sizeof(*qinfo)); 5301 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5302 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5303 5304 return 0; 5305 } 5306 5307 int 5308 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5309 struct rte_eth_txq_info *qinfo) 5310 { 5311 struct rte_eth_dev *dev; 5312 5313 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5314 dev = &rte_eth_devices[port_id]; 5315 5316 if (queue_id >= dev->data->nb_tx_queues) { 5317 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); 5318 return -EINVAL; 5319 } 5320 5321 if (qinfo == NULL) { 5322 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5323 port_id, queue_id); 5324 return -EINVAL; 5325 } 5326 5327 if (dev->data->tx_queues == NULL || 5328 dev->data->tx_queues[queue_id] == NULL) { 5329 RTE_ETHDEV_LOG(ERR, 5330 "Tx queue %"PRIu16" of device with port_id=%" 5331 PRIu16" has not been setup\n", 5332 queue_id, port_id); 5333 return -EINVAL; 5334 } 5335 5336 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5337 RTE_ETHDEV_LOG(INFO, 5338 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5339 queue_id, port_id); 5340 return -EINVAL; 5341 } 5342 5343 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 5344 5345 memset(qinfo, 0, sizeof(*qinfo)); 5346 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5347 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5348 5349 return 0; 5350 } 5351 5352 int 5353 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5354 struct rte_eth_burst_mode *mode) 5355 { 5356 struct rte_eth_dev *dev; 5357 5358 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5359 dev = &rte_eth_devices[port_id]; 5360 5361 if (queue_id >= dev->data->nb_rx_queues) { 5362 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 5363 return -EINVAL; 5364 } 5365 5366 if (mode == NULL) { 5367 RTE_ETHDEV_LOG(ERR, 5368 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5369 port_id, queue_id); 5370 return -EINVAL; 5371 } 5372 5373 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); 5374 memset(mode, 0, sizeof(*mode)); 5375 return eth_err(port_id, 5376 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5377 } 5378 5379 int 5380 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5381 struct rte_eth_burst_mode *mode) 5382 { 5383 struct rte_eth_dev *dev; 5384 5385 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5386 dev = &rte_eth_devices[port_id]; 5387 5388 if (queue_id >= dev->data->nb_tx_queues) { 5389 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); 5390 return -EINVAL; 5391 } 5392 5393 if (mode == NULL) { 5394 RTE_ETHDEV_LOG(ERR, 5395 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5396 port_id, queue_id); 5397 return -EINVAL; 5398 } 5399 5400 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); 5401 memset(mode, 0, sizeof(*mode)); 5402 return eth_err(port_id, 5403 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5404 } 5405 5406 int 5407 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5408 struct rte_power_monitor_cond *pmc) 5409 { 5410 struct rte_eth_dev *dev; 5411 5412 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5413 dev = &rte_eth_devices[port_id]; 5414 5415 if (queue_id >= dev->data->nb_rx_queues) { 5416 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5417 return -EINVAL; 5418 } 5419 5420 if (pmc == NULL) { 5421 RTE_ETHDEV_LOG(ERR, 5422 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5423 port_id, queue_id); 5424 return -EINVAL; 5425 } 5426 5427 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP); 5428 return eth_err(port_id, 5429 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5430 } 5431 5432 int 5433 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5434 struct rte_ether_addr *mc_addr_set, 5435 uint32_t nb_mc_addr) 5436 { 5437 struct rte_eth_dev *dev; 5438 5439 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5440 dev = &rte_eth_devices[port_id]; 5441 5442 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 5443 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5444 mc_addr_set, nb_mc_addr)); 5445 } 5446 5447 int 5448 rte_eth_timesync_enable(uint16_t port_id) 5449 { 5450 struct rte_eth_dev *dev; 5451 5452 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5453 dev = &rte_eth_devices[port_id]; 5454 5455 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 5456 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5457 } 5458 5459 int 5460 rte_eth_timesync_disable(uint16_t port_id) 5461 { 5462 struct rte_eth_dev *dev; 5463 5464 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5465 dev = &rte_eth_devices[port_id]; 5466 5467 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 5468 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5469 } 5470 5471 int 5472 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5473 uint32_t flags) 5474 { 5475 struct rte_eth_dev *dev; 5476 5477 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5478 dev = &rte_eth_devices[port_id]; 5479 5480 if (timestamp == NULL) { 5481 RTE_ETHDEV_LOG(ERR, 5482 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5483 port_id); 5484 return -EINVAL; 5485 } 5486 5487 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 5488 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5489 (dev, timestamp, flags)); 5490 } 5491 5492 int 5493 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5494 struct timespec *timestamp) 5495 { 5496 struct rte_eth_dev *dev; 5497 5498 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5499 dev = &rte_eth_devices[port_id]; 5500 5501 if (timestamp == NULL) { 5502 RTE_ETHDEV_LOG(ERR, 5503 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5504 port_id); 5505 return -EINVAL; 5506 } 5507 5508 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 5509 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5510 (dev, timestamp)); 5511 } 5512 5513 int 5514 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5515 { 5516 struct rte_eth_dev *dev; 5517 5518 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5519 dev = &rte_eth_devices[port_id]; 5520 5521 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 5522 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 5523 } 5524 5525 int 5526 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 5527 { 5528 struct rte_eth_dev *dev; 5529 5530 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5531 dev = &rte_eth_devices[port_id]; 5532 5533 if (timestamp == NULL) { 5534 RTE_ETHDEV_LOG(ERR, 5535 "Cannot read ethdev port %u timesync time to NULL\n", 5536 port_id); 5537 return -EINVAL; 5538 } 5539 5540 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 5541 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 5542 timestamp)); 5543 } 5544 5545 int 5546 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 5547 { 5548 struct rte_eth_dev *dev; 5549 5550 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5551 dev = &rte_eth_devices[port_id]; 5552 5553 if (timestamp == NULL) { 5554 RTE_ETHDEV_LOG(ERR, 5555 "Cannot write ethdev port %u timesync from NULL time\n", 5556 port_id); 5557 return -EINVAL; 5558 } 5559 5560 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 5561 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 5562 timestamp)); 5563 } 5564 5565 int 5566 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 5567 { 5568 struct rte_eth_dev *dev; 5569 5570 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5571 dev = &rte_eth_devices[port_id]; 5572 5573 if (clock == NULL) { 5574 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 5575 port_id); 5576 return -EINVAL; 5577 } 5578 5579 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); 5580 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 5581 } 5582 5583 int 5584 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 5585 { 5586 struct rte_eth_dev *dev; 5587 5588 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5589 dev = &rte_eth_devices[port_id]; 5590 5591 if (info == NULL) { 5592 RTE_ETHDEV_LOG(ERR, 5593 "Cannot get ethdev port %u register info to NULL\n", 5594 port_id); 5595 return -EINVAL; 5596 } 5597 5598 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 5599 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 5600 } 5601 5602 int 5603 rte_eth_dev_get_eeprom_length(uint16_t port_id) 5604 { 5605 struct rte_eth_dev *dev; 5606 5607 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5608 dev = &rte_eth_devices[port_id]; 5609 5610 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 5611 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 5612 } 5613 5614 int 5615 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5616 { 5617 struct rte_eth_dev *dev; 5618 5619 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5620 dev = &rte_eth_devices[port_id]; 5621 5622 if (info == NULL) { 5623 RTE_ETHDEV_LOG(ERR, 5624 "Cannot get ethdev port %u EEPROM info to NULL\n", 5625 port_id); 5626 return -EINVAL; 5627 } 5628 5629 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 5630 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 5631 } 5632 5633 int 5634 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5635 { 5636 struct rte_eth_dev *dev; 5637 5638 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5639 dev = &rte_eth_devices[port_id]; 5640 5641 if (info == NULL) { 5642 RTE_ETHDEV_LOG(ERR, 5643 "Cannot set ethdev port %u EEPROM from NULL info\n", 5644 port_id); 5645 return -EINVAL; 5646 } 5647 5648 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 5649 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 5650 } 5651 5652 int 5653 rte_eth_dev_get_module_info(uint16_t port_id, 5654 struct rte_eth_dev_module_info *modinfo) 5655 { 5656 struct rte_eth_dev *dev; 5657 5658 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5659 dev = &rte_eth_devices[port_id]; 5660 5661 if (modinfo == NULL) { 5662 RTE_ETHDEV_LOG(ERR, 5663 "Cannot get ethdev port %u EEPROM module info to NULL\n", 5664 port_id); 5665 return -EINVAL; 5666 } 5667 5668 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 5669 return (*dev->dev_ops->get_module_info)(dev, modinfo); 5670 } 5671 5672 int 5673 rte_eth_dev_get_module_eeprom(uint16_t port_id, 5674 struct rte_dev_eeprom_info *info) 5675 { 5676 struct rte_eth_dev *dev; 5677 5678 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5679 dev = &rte_eth_devices[port_id]; 5680 5681 if (info == NULL) { 5682 RTE_ETHDEV_LOG(ERR, 5683 "Cannot get ethdev port %u module EEPROM info to NULL\n", 5684 port_id); 5685 return -EINVAL; 5686 } 5687 5688 if (info->data == NULL) { 5689 RTE_ETHDEV_LOG(ERR, 5690 "Cannot get ethdev port %u module EEPROM data to NULL\n", 5691 port_id); 5692 return -EINVAL; 5693 } 5694 5695 if (info->length == 0) { 5696 RTE_ETHDEV_LOG(ERR, 5697 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 5698 port_id); 5699 return -EINVAL; 5700 } 5701 5702 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 5703 return (*dev->dev_ops->get_module_eeprom)(dev, info); 5704 } 5705 5706 int 5707 rte_eth_dev_get_dcb_info(uint16_t port_id, 5708 struct rte_eth_dcb_info *dcb_info) 5709 { 5710 struct rte_eth_dev *dev; 5711 5712 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5713 dev = &rte_eth_devices[port_id]; 5714 5715 if (dcb_info == NULL) { 5716 RTE_ETHDEV_LOG(ERR, 5717 "Cannot get ethdev port %u DCB info to NULL\n", 5718 port_id); 5719 return -EINVAL; 5720 } 5721 5722 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 5723 5724 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 5725 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 5726 } 5727 5728 static void 5729 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 5730 const struct rte_eth_desc_lim *desc_lim) 5731 { 5732 if (desc_lim->nb_align != 0) 5733 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5734 5735 if (desc_lim->nb_max != 0) 5736 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5737 5738 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5739 } 5740 5741 int 5742 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5743 uint16_t *nb_rx_desc, 5744 uint16_t *nb_tx_desc) 5745 { 5746 struct rte_eth_dev_info dev_info; 5747 int ret; 5748 5749 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5750 5751 ret = rte_eth_dev_info_get(port_id, &dev_info); 5752 if (ret != 0) 5753 return ret; 5754 5755 if (nb_rx_desc != NULL) 5756 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5757 5758 if (nb_tx_desc != NULL) 5759 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5760 5761 return 0; 5762 } 5763 5764 int 5765 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5766 struct rte_eth_hairpin_cap *cap) 5767 { 5768 struct rte_eth_dev *dev; 5769 5770 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5771 dev = &rte_eth_devices[port_id]; 5772 5773 if (cap == NULL) { 5774 RTE_ETHDEV_LOG(ERR, 5775 "Cannot get ethdev port %u hairpin capability to NULL\n", 5776 port_id); 5777 return -EINVAL; 5778 } 5779 5780 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); 5781 memset(cap, 0, sizeof(*cap)); 5782 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 5783 } 5784 5785 int 5786 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5787 { 5788 if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5789 return 1; 5790 return 0; 5791 } 5792 5793 int 5794 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5795 { 5796 if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5797 return 1; 5798 return 0; 5799 } 5800 5801 int 5802 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 5803 { 5804 struct rte_eth_dev *dev; 5805 5806 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5807 dev = &rte_eth_devices[port_id]; 5808 5809 if (pool == NULL) { 5810 RTE_ETHDEV_LOG(ERR, 5811 "Cannot test ethdev port %u mempool operation from NULL pool\n", 5812 port_id); 5813 return -EINVAL; 5814 } 5815 5816 if (*dev->dev_ops->pool_ops_supported == NULL) 5817 return 1; /* all pools are supported */ 5818 5819 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 5820 } 5821 5822 /** 5823 * A set of values to describe the possible states of a switch domain. 5824 */ 5825 enum rte_eth_switch_domain_state { 5826 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0, 5827 RTE_ETH_SWITCH_DOMAIN_ALLOCATED 5828 }; 5829 5830 /** 5831 * Array of switch domains available for allocation. Array is sized to 5832 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than 5833 * ethdev ports in a single process. 5834 */ 5835 static struct rte_eth_dev_switch { 5836 enum rte_eth_switch_domain_state state; 5837 } eth_dev_switch_domains[RTE_MAX_ETHPORTS]; 5838 5839 int 5840 rte_eth_switch_domain_alloc(uint16_t *domain_id) 5841 { 5842 uint16_t i; 5843 5844 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 5845 5846 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 5847 if (eth_dev_switch_domains[i].state == 5848 RTE_ETH_SWITCH_DOMAIN_UNUSED) { 5849 eth_dev_switch_domains[i].state = 5850 RTE_ETH_SWITCH_DOMAIN_ALLOCATED; 5851 *domain_id = i; 5852 return 0; 5853 } 5854 } 5855 5856 return -ENOSPC; 5857 } 5858 5859 int 5860 rte_eth_switch_domain_free(uint16_t domain_id) 5861 { 5862 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || 5863 domain_id >= RTE_MAX_ETHPORTS) 5864 return -EINVAL; 5865 5866 if (eth_dev_switch_domains[domain_id].state != 5867 RTE_ETH_SWITCH_DOMAIN_ALLOCATED) 5868 return -EINVAL; 5869 5870 eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED; 5871 5872 return 0; 5873 } 5874 5875 static int 5876 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) 5877 { 5878 int state; 5879 struct rte_kvargs_pair *pair; 5880 char *letter; 5881 5882 arglist->str = strdup(str_in); 5883 if (arglist->str == NULL) 5884 return -ENOMEM; 5885 5886 letter = arglist->str; 5887 state = 0; 5888 arglist->count = 0; 5889 pair = &arglist->pairs[0]; 5890 while (1) { 5891 switch (state) { 5892 case 0: /* Initial */ 5893 if (*letter == '=') 5894 return -EINVAL; 5895 else if (*letter == '\0') 5896 return 0; 5897 5898 state = 1; 5899 pair->key = letter; 5900 /* fall-thru */ 5901 5902 case 1: /* Parsing key */ 5903 if (*letter == '=') { 5904 *letter = '\0'; 5905 pair->value = letter + 1; 5906 state = 2; 5907 } else if (*letter == ',' || *letter == '\0') 5908 return -EINVAL; 5909 break; 5910 5911 5912 case 2: /* Parsing value */ 5913 if (*letter == '[') 5914 state = 3; 5915 else if (*letter == ',') { 5916 *letter = '\0'; 5917 arglist->count++; 5918 pair = &arglist->pairs[arglist->count]; 5919 state = 0; 5920 } else if (*letter == '\0') { 5921 letter--; 5922 arglist->count++; 5923 pair = &arglist->pairs[arglist->count]; 5924 state = 0; 5925 } 5926 break; 5927 5928 case 3: /* Parsing list */ 5929 if (*letter == ']') 5930 state = 2; 5931 else if (*letter == '\0') 5932 return -EINVAL; 5933 break; 5934 } 5935 letter++; 5936 } 5937 } 5938 5939 int 5940 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) 5941 { 5942 struct rte_kvargs args; 5943 struct rte_kvargs_pair *pair; 5944 unsigned int i; 5945 int result = 0; 5946 5947 memset(eth_da, 0, sizeof(*eth_da)); 5948 5949 result = eth_dev_devargs_tokenise(&args, dargs); 5950 if (result < 0) 5951 goto parse_cleanup; 5952 5953 for (i = 0; i < args.count; i++) { 5954 pair = &args.pairs[i]; 5955 if (strcmp("representor", pair->key) == 0) { 5956 if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) { 5957 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n", 5958 dargs); 5959 result = -1; 5960 goto parse_cleanup; 5961 } 5962 result = rte_eth_devargs_parse_representor_ports( 5963 pair->value, eth_da); 5964 if (result < 0) 5965 goto parse_cleanup; 5966 } 5967 } 5968 5969 parse_cleanup: 5970 if (args.str) 5971 free(args.str); 5972 5973 return result; 5974 } 5975 5976 int 5977 rte_eth_representor_id_get(uint16_t port_id, 5978 enum rte_eth_representor_type type, 5979 int controller, int pf, int representor_port, 5980 uint16_t *repr_id) 5981 { 5982 int ret, n, count; 5983 uint32_t i; 5984 struct rte_eth_representor_info *info = NULL; 5985 size_t size; 5986 5987 if (type == RTE_ETH_REPRESENTOR_NONE) 5988 return 0; 5989 if (repr_id == NULL) 5990 return -EINVAL; 5991 5992 /* Get PMD representor range info. */ 5993 ret = rte_eth_representor_info_get(port_id, NULL); 5994 if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF && 5995 controller == -1 && pf == -1) { 5996 /* Direct mapping for legacy VF representor. */ 5997 *repr_id = representor_port; 5998 return 0; 5999 } else if (ret < 0) { 6000 return ret; 6001 } 6002 n = ret; 6003 size = sizeof(*info) + n * sizeof(info->ranges[0]); 6004 info = calloc(1, size); 6005 if (info == NULL) 6006 return -ENOMEM; 6007 info->nb_ranges_alloc = n; 6008 ret = rte_eth_representor_info_get(port_id, info); 6009 if (ret < 0) 6010 goto out; 6011 6012 /* Default controller and pf to caller. */ 6013 if (controller == -1) 6014 controller = info->controller; 6015 if (pf == -1) 6016 pf = info->pf; 6017 6018 /* Locate representor ID. */ 6019 ret = -ENOENT; 6020 for (i = 0; i < info->nb_ranges; ++i) { 6021 if (info->ranges[i].type != type) 6022 continue; 6023 if (info->ranges[i].controller != controller) 6024 continue; 6025 if (info->ranges[i].id_end < info->ranges[i].id_base) { 6026 RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n", 6027 port_id, info->ranges[i].id_base, 6028 info->ranges[i].id_end, i); 6029 continue; 6030 6031 } 6032 count = info->ranges[i].id_end - info->ranges[i].id_base + 1; 6033 switch (info->ranges[i].type) { 6034 case RTE_ETH_REPRESENTOR_PF: 6035 if (pf < info->ranges[i].pf || 6036 pf >= info->ranges[i].pf + count) 6037 continue; 6038 *repr_id = info->ranges[i].id_base + 6039 (pf - info->ranges[i].pf); 6040 ret = 0; 6041 goto out; 6042 case RTE_ETH_REPRESENTOR_VF: 6043 if (info->ranges[i].pf != pf) 6044 continue; 6045 if (representor_port < info->ranges[i].vf || 6046 representor_port >= info->ranges[i].vf + count) 6047 continue; 6048 *repr_id = info->ranges[i].id_base + 6049 (representor_port - info->ranges[i].vf); 6050 ret = 0; 6051 goto out; 6052 case RTE_ETH_REPRESENTOR_SF: 6053 if (info->ranges[i].pf != pf) 6054 continue; 6055 if (representor_port < info->ranges[i].sf || 6056 representor_port >= info->ranges[i].sf + count) 6057 continue; 6058 *repr_id = info->ranges[i].id_base + 6059 (representor_port - info->ranges[i].sf); 6060 ret = 0; 6061 goto out; 6062 default: 6063 break; 6064 } 6065 } 6066 out: 6067 free(info); 6068 return ret; 6069 } 6070 6071 static int 6072 eth_dev_handle_port_list(const char *cmd __rte_unused, 6073 const char *params __rte_unused, 6074 struct rte_tel_data *d) 6075 { 6076 int port_id; 6077 6078 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 6079 RTE_ETH_FOREACH_DEV(port_id) 6080 rte_tel_data_add_array_int(d, port_id); 6081 return 0; 6082 } 6083 6084 static void 6085 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 6086 const char *stat_name) 6087 { 6088 int q; 6089 struct rte_tel_data *q_data = rte_tel_data_alloc(); 6090 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); 6091 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 6092 rte_tel_data_add_array_u64(q_data, q_stats[q]); 6093 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 6094 } 6095 6096 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 6097 6098 static int 6099 eth_dev_handle_port_stats(const char *cmd __rte_unused, 6100 const char *params, 6101 struct rte_tel_data *d) 6102 { 6103 struct rte_eth_stats stats; 6104 int port_id, ret; 6105 6106 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6107 return -1; 6108 6109 port_id = atoi(params); 6110 if (!rte_eth_dev_is_valid_port(port_id)) 6111 return -1; 6112 6113 ret = rte_eth_stats_get(port_id, &stats); 6114 if (ret < 0) 6115 return -1; 6116 6117 rte_tel_data_start_dict(d); 6118 ADD_DICT_STAT(stats, ipackets); 6119 ADD_DICT_STAT(stats, opackets); 6120 ADD_DICT_STAT(stats, ibytes); 6121 ADD_DICT_STAT(stats, obytes); 6122 ADD_DICT_STAT(stats, imissed); 6123 ADD_DICT_STAT(stats, ierrors); 6124 ADD_DICT_STAT(stats, oerrors); 6125 ADD_DICT_STAT(stats, rx_nombuf); 6126 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 6127 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 6128 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 6129 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 6130 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 6131 6132 return 0; 6133 } 6134 6135 static int 6136 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 6137 const char *params, 6138 struct rte_tel_data *d) 6139 { 6140 struct rte_eth_xstat *eth_xstats; 6141 struct rte_eth_xstat_name *xstat_names; 6142 int port_id, num_xstats; 6143 int i, ret; 6144 char *end_param; 6145 6146 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6147 return -1; 6148 6149 port_id = strtoul(params, &end_param, 0); 6150 if (*end_param != '\0') 6151 RTE_ETHDEV_LOG(NOTICE, 6152 "Extra parameters passed to ethdev telemetry command, ignoring"); 6153 if (!rte_eth_dev_is_valid_port(port_id)) 6154 return -1; 6155 6156 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 6157 if (num_xstats < 0) 6158 return -1; 6159 6160 /* use one malloc for both names and stats */ 6161 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 6162 sizeof(struct rte_eth_xstat_name)) * num_xstats); 6163 if (eth_xstats == NULL) 6164 return -1; 6165 xstat_names = (void *)ð_xstats[num_xstats]; 6166 6167 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 6168 if (ret < 0 || ret > num_xstats) { 6169 free(eth_xstats); 6170 return -1; 6171 } 6172 6173 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 6174 if (ret < 0 || ret > num_xstats) { 6175 free(eth_xstats); 6176 return -1; 6177 } 6178 6179 rte_tel_data_start_dict(d); 6180 for (i = 0; i < num_xstats; i++) 6181 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 6182 eth_xstats[i].value); 6183 return 0; 6184 } 6185 6186 static int 6187 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 6188 const char *params, 6189 struct rte_tel_data *d) 6190 { 6191 static const char *status_str = "status"; 6192 int ret, port_id; 6193 struct rte_eth_link link; 6194 char *end_param; 6195 6196 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6197 return -1; 6198 6199 port_id = strtoul(params, &end_param, 0); 6200 if (*end_param != '\0') 6201 RTE_ETHDEV_LOG(NOTICE, 6202 "Extra parameters passed to ethdev telemetry command, ignoring"); 6203 if (!rte_eth_dev_is_valid_port(port_id)) 6204 return -1; 6205 6206 ret = rte_eth_link_get_nowait(port_id, &link); 6207 if (ret < 0) 6208 return -1; 6209 6210 rte_tel_data_start_dict(d); 6211 if (!link.link_status) { 6212 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 6213 return 0; 6214 } 6215 rte_tel_data_add_dict_string(d, status_str, "UP"); 6216 rte_tel_data_add_dict_u64(d, "speed", link.link_speed); 6217 rte_tel_data_add_dict_string(d, "duplex", 6218 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 6219 "full-duplex" : "half-duplex"); 6220 return 0; 6221 } 6222 6223 static int 6224 eth_dev_handle_port_info(const char *cmd __rte_unused, 6225 const char *params, 6226 struct rte_tel_data *d) 6227 { 6228 struct rte_tel_data *rxq_state, *txq_state; 6229 char mac_addr[RTE_ETHER_ADDR_LEN]; 6230 struct rte_eth_dev *eth_dev; 6231 char *end_param; 6232 int port_id, i; 6233 6234 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6235 return -1; 6236 6237 port_id = strtoul(params, &end_param, 0); 6238 if (*end_param != '\0') 6239 RTE_ETHDEV_LOG(NOTICE, 6240 "Extra parameters passed to ethdev telemetry command, ignoring"); 6241 6242 if (!rte_eth_dev_is_valid_port(port_id)) 6243 return -EINVAL; 6244 6245 eth_dev = &rte_eth_devices[port_id]; 6246 if (!eth_dev) 6247 return -EINVAL; 6248 6249 rxq_state = rte_tel_data_alloc(); 6250 if (!rxq_state) 6251 return -ENOMEM; 6252 6253 txq_state = rte_tel_data_alloc(); 6254 if (!txq_state) 6255 return -ENOMEM; 6256 6257 rte_tel_data_start_dict(d); 6258 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 6259 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 6260 rte_tel_data_add_dict_int(d, "nb_rx_queues", 6261 eth_dev->data->nb_rx_queues); 6262 rte_tel_data_add_dict_int(d, "nb_tx_queues", 6263 eth_dev->data->nb_tx_queues); 6264 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 6265 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 6266 rte_tel_data_add_dict_int(d, "rx_mbuf_size_min", 6267 eth_dev->data->min_rx_buf_size); 6268 rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", 6269 eth_dev->data->rx_mbuf_alloc_failed); 6270 snprintf(mac_addr, RTE_ETHER_ADDR_LEN, "%02x:%02x:%02x:%02x:%02x:%02x", 6271 eth_dev->data->mac_addrs->addr_bytes[0], 6272 eth_dev->data->mac_addrs->addr_bytes[1], 6273 eth_dev->data->mac_addrs->addr_bytes[2], 6274 eth_dev->data->mac_addrs->addr_bytes[3], 6275 eth_dev->data->mac_addrs->addr_bytes[4], 6276 eth_dev->data->mac_addrs->addr_bytes[5]); 6277 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 6278 rte_tel_data_add_dict_int(d, "promiscuous", 6279 eth_dev->data->promiscuous); 6280 rte_tel_data_add_dict_int(d, "scattered_rx", 6281 eth_dev->data->scattered_rx); 6282 rte_tel_data_add_dict_int(d, "all_multicast", 6283 eth_dev->data->all_multicast); 6284 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 6285 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 6286 rte_tel_data_add_dict_int(d, "dev_configured", 6287 eth_dev->data->dev_configured); 6288 6289 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 6290 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 6291 rte_tel_data_add_array_int(rxq_state, 6292 eth_dev->data->rx_queue_state[i]); 6293 6294 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 6295 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 6296 rte_tel_data_add_array_int(txq_state, 6297 eth_dev->data->tx_queue_state[i]); 6298 6299 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 6300 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 6301 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 6302 rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags); 6303 rte_tel_data_add_dict_int(d, "rx_offloads", 6304 eth_dev->data->dev_conf.rxmode.offloads); 6305 rte_tel_data_add_dict_int(d, "tx_offloads", 6306 eth_dev->data->dev_conf.txmode.offloads); 6307 rte_tel_data_add_dict_int(d, "ethdev_rss_hf", 6308 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 6309 6310 return 0; 6311 } 6312 6313 int 6314 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue, 6315 struct rte_hairpin_peer_info *cur_info, 6316 struct rte_hairpin_peer_info *peer_info, 6317 uint32_t direction) 6318 { 6319 struct rte_eth_dev *dev; 6320 6321 /* Current queue information is not mandatory. */ 6322 if (peer_info == NULL) 6323 return -EINVAL; 6324 6325 /* No need to check the validity again. */ 6326 dev = &rte_eth_devices[peer_port]; 6327 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update, 6328 -ENOTSUP); 6329 6330 return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue, 6331 cur_info, peer_info, direction); 6332 } 6333 6334 int 6335 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue, 6336 struct rte_hairpin_peer_info *peer_info, 6337 uint32_t direction) 6338 { 6339 struct rte_eth_dev *dev; 6340 6341 if (peer_info == NULL) 6342 return -EINVAL; 6343 6344 /* No need to check the validity again. */ 6345 dev = &rte_eth_devices[cur_port]; 6346 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind, 6347 -ENOTSUP); 6348 6349 return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue, 6350 peer_info, direction); 6351 } 6352 6353 int 6354 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue, 6355 uint32_t direction) 6356 { 6357 struct rte_eth_dev *dev; 6358 6359 /* No need to check the validity again. */ 6360 dev = &rte_eth_devices[cur_port]; 6361 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind, 6362 -ENOTSUP); 6363 6364 return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue, 6365 direction); 6366 } 6367 6368 int 6369 rte_eth_representor_info_get(uint16_t port_id, 6370 struct rte_eth_representor_info *info) 6371 { 6372 struct rte_eth_dev *dev; 6373 6374 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6375 dev = &rte_eth_devices[port_id]; 6376 6377 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP); 6378 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6379 } 6380 6381 int 6382 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6383 { 6384 struct rte_eth_dev *dev; 6385 6386 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6387 dev = &rte_eth_devices[port_id]; 6388 6389 if (dev->data->dev_configured != 0) { 6390 RTE_ETHDEV_LOG(ERR, 6391 "The port (id=%"PRIu16") is already configured\n", 6392 port_id); 6393 return -EBUSY; 6394 } 6395 6396 if (features == NULL) { 6397 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6398 return -EINVAL; 6399 } 6400 6401 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP); 6402 return eth_err(port_id, 6403 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6404 } 6405 6406 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6407 6408 RTE_INIT(ethdev_init_telemetry) 6409 { 6410 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 6411 "Returns list of available ethdev ports. Takes no parameters"); 6412 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 6413 "Returns the common stats for a port. Parameters: int port_id"); 6414 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 6415 "Returns the extended stats for a port. Parameters: int port_id"); 6416 rte_telemetry_register_cmd("/ethdev/link_status", 6417 eth_dev_handle_port_link_status, 6418 "Returns the link status for a port. Parameters: int port_id"); 6419 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 6420 "Returns the device info for a port. Parameters: int port_id"); 6421 } 6422