1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <rte_byteorder.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_interrupts.h> 18 #include <rte_memory.h> 19 #include <rte_memcpy.h> 20 #include <rte_memzone.h> 21 #include <rte_launch.h> 22 #include <rte_eal.h> 23 #include <rte_per_lcore.h> 24 #include <rte_lcore.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_common.h> 27 #include <rte_mempool.h> 28 #include <rte_malloc.h> 29 #include <rte_mbuf.h> 30 #include <rte_errno.h> 31 #include <rte_spinlock.h> 32 #include <rte_string_fns.h> 33 #include <rte_kvargs.h> 34 #include <rte_class.h> 35 #include <rte_ether.h> 36 #include <rte_telemetry.h> 37 38 #include "rte_ethdev_trace.h" 39 #include "rte_ethdev.h" 40 #include "ethdev_driver.h" 41 #include "ethdev_profile.h" 42 #include "ethdev_private.h" 43 44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; 45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 46 47 /* public fast-path API */ 48 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 49 50 /* spinlock for eth device callbacks */ 51 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; 52 53 /* spinlock for add/remove rx callbacks */ 54 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 55 56 /* spinlock for add/remove tx callbacks */ 57 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 58 59 /* spinlock for shared data allocation */ 60 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 61 62 /* store statistics names and its offset in stats structure */ 63 struct rte_eth_xstats_name_off { 64 char name[RTE_ETH_XSTATS_NAME_SIZE]; 65 unsigned offset; 66 }; 67 68 /* Shared memory between primary and secondary processes. */ 69 static struct { 70 uint64_t next_owner_id; 71 rte_spinlock_t ownership_lock; 72 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; 73 } *eth_dev_shared_data; 74 75 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 76 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 77 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 78 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 79 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 80 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 81 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 82 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 83 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 84 rx_nombuf)}, 85 }; 86 87 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 88 89 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 90 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 91 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 92 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 93 }; 94 95 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 96 97 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 98 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 99 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 100 }; 101 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 102 103 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 104 { DEV_RX_OFFLOAD_##_name, #_name } 105 106 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name) \ 107 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 108 109 static const struct { 110 uint64_t offload; 111 const char *name; 112 } eth_dev_rx_offload_names[] = { 113 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 114 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 115 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 116 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 117 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 118 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 119 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 120 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 121 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 122 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 123 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 124 RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME), 125 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 126 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 127 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 128 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 129 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 130 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 131 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 132 RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 133 }; 134 135 #undef RTE_RX_OFFLOAD_BIT2STR 136 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 137 138 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 139 { DEV_TX_OFFLOAD_##_name, #_name } 140 141 static const struct { 142 uint64_t offload; 143 const char *name; 144 } eth_dev_tx_offload_names[] = { 145 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 146 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 147 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 148 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 149 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 150 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 151 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 152 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 153 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 154 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 155 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 156 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 157 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 158 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 159 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 160 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 161 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 162 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 163 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 164 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 165 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 166 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 167 }; 168 169 #undef RTE_TX_OFFLOAD_BIT2STR 170 171 /** 172 * The user application callback description. 173 * 174 * It contains callback address to be registered by user application, 175 * the pointer to the parameters for callback, and the event type. 176 */ 177 struct rte_eth_dev_callback { 178 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ 179 rte_eth_dev_cb_fn cb_fn; /**< Callback address */ 180 void *cb_arg; /**< Parameter for callback */ 181 void *ret_param; /**< Return parameter */ 182 enum rte_eth_event_type event; /**< Interrupt event type */ 183 uint32_t active; /**< Callback is executing */ 184 }; 185 186 enum { 187 STAT_QMAP_TX = 0, 188 STAT_QMAP_RX 189 }; 190 191 int 192 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 193 { 194 int ret; 195 struct rte_devargs devargs; 196 const char *bus_param_key; 197 char *bus_str = NULL; 198 char *cls_str = NULL; 199 int str_size; 200 201 if (iter == NULL) { 202 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 203 return -EINVAL; 204 } 205 206 if (devargs_str == NULL) { 207 RTE_ETHDEV_LOG(ERR, 208 "Cannot initialize iterator from NULL device description string\n"); 209 return -EINVAL; 210 } 211 212 memset(iter, 0, sizeof(*iter)); 213 memset(&devargs, 0, sizeof(devargs)); 214 215 /* 216 * The devargs string may use various syntaxes: 217 * - 0000:08:00.0,representor=[1-3] 218 * - pci:0000:06:00.0,representor=[0,5] 219 * - class=eth,mac=00:11:22:33:44:55 220 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 221 */ 222 223 /* 224 * Handle pure class filter (i.e. without any bus-level argument), 225 * from future new syntax. 226 * rte_devargs_parse() is not yet supporting the new syntax, 227 * that's why this simple case is temporarily parsed here. 228 */ 229 #define iter_anybus_str "class=eth," 230 if (strncmp(devargs_str, iter_anybus_str, 231 strlen(iter_anybus_str)) == 0) { 232 iter->cls_str = devargs_str + strlen(iter_anybus_str); 233 goto end; 234 } 235 236 /* Split bus, device and parameters. */ 237 ret = rte_devargs_parse(&devargs, devargs_str); 238 if (ret != 0) 239 goto error; 240 241 /* 242 * Assume parameters of old syntax can match only at ethdev level. 243 * Extra parameters will be ignored, thanks to "+" prefix. 244 */ 245 str_size = strlen(devargs.args) + 2; 246 cls_str = malloc(str_size); 247 if (cls_str == NULL) { 248 ret = -ENOMEM; 249 goto error; 250 } 251 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 252 if (ret != str_size - 1) { 253 ret = -EINVAL; 254 goto error; 255 } 256 iter->cls_str = cls_str; 257 258 iter->bus = devargs.bus; 259 if (iter->bus->dev_iterate == NULL) { 260 ret = -ENOTSUP; 261 goto error; 262 } 263 264 /* Convert bus args to new syntax for use with new API dev_iterate. */ 265 if ((strcmp(iter->bus->name, "vdev") == 0) || 266 (strcmp(iter->bus->name, "fslmc") == 0) || 267 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 268 bus_param_key = "name"; 269 } else if (strcmp(iter->bus->name, "pci") == 0) { 270 bus_param_key = "addr"; 271 } else { 272 ret = -ENOTSUP; 273 goto error; 274 } 275 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 276 bus_str = malloc(str_size); 277 if (bus_str == NULL) { 278 ret = -ENOMEM; 279 goto error; 280 } 281 ret = snprintf(bus_str, str_size, "%s=%s", 282 bus_param_key, devargs.name); 283 if (ret != str_size - 1) { 284 ret = -EINVAL; 285 goto error; 286 } 287 iter->bus_str = bus_str; 288 289 end: 290 iter->cls = rte_class_find_by_name("eth"); 291 rte_devargs_reset(&devargs); 292 return 0; 293 294 error: 295 if (ret == -ENOTSUP) 296 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 297 iter->bus->name); 298 rte_devargs_reset(&devargs); 299 free(bus_str); 300 free(cls_str); 301 return ret; 302 } 303 304 uint16_t 305 rte_eth_iterator_next(struct rte_dev_iterator *iter) 306 { 307 if (iter == NULL) { 308 RTE_ETHDEV_LOG(ERR, 309 "Cannot get next device from NULL iterator\n"); 310 return RTE_MAX_ETHPORTS; 311 } 312 313 if (iter->cls == NULL) /* invalid ethdev iterator */ 314 return RTE_MAX_ETHPORTS; 315 316 do { /* loop to try all matching rte_device */ 317 /* If not pure ethdev filter and */ 318 if (iter->bus != NULL && 319 /* not in middle of rte_eth_dev iteration, */ 320 iter->class_device == NULL) { 321 /* get next rte_device to try. */ 322 iter->device = iter->bus->dev_iterate( 323 iter->device, iter->bus_str, iter); 324 if (iter->device == NULL) 325 break; /* no more rte_device candidate */ 326 } 327 /* A device is matching bus part, need to check ethdev part. */ 328 iter->class_device = iter->cls->dev_iterate( 329 iter->class_device, iter->cls_str, iter); 330 if (iter->class_device != NULL) 331 return eth_dev_to_id(iter->class_device); /* match */ 332 } while (iter->bus != NULL); /* need to try next rte_device */ 333 334 /* No more ethdev port to iterate. */ 335 rte_eth_iterator_cleanup(iter); 336 return RTE_MAX_ETHPORTS; 337 } 338 339 void 340 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 341 { 342 if (iter == NULL) { 343 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 344 return; 345 } 346 347 if (iter->bus_str == NULL) 348 return; /* nothing to free in pure class filter */ 349 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 350 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 351 memset(iter, 0, sizeof(*iter)); 352 } 353 354 uint16_t 355 rte_eth_find_next(uint16_t port_id) 356 { 357 while (port_id < RTE_MAX_ETHPORTS && 358 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 359 port_id++; 360 361 if (port_id >= RTE_MAX_ETHPORTS) 362 return RTE_MAX_ETHPORTS; 363 364 return port_id; 365 } 366 367 /* 368 * Macro to iterate over all valid ports for internal usage. 369 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 370 */ 371 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 372 for (port_id = rte_eth_find_next(0); \ 373 port_id < RTE_MAX_ETHPORTS; \ 374 port_id = rte_eth_find_next(port_id + 1)) 375 376 uint16_t 377 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 378 { 379 port_id = rte_eth_find_next(port_id); 380 while (port_id < RTE_MAX_ETHPORTS && 381 rte_eth_devices[port_id].device != parent) 382 port_id = rte_eth_find_next(port_id + 1); 383 384 return port_id; 385 } 386 387 uint16_t 388 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 389 { 390 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 391 return rte_eth_find_next_of(port_id, 392 rte_eth_devices[ref_port_id].device); 393 } 394 395 static void 396 eth_dev_shared_data_prepare(void) 397 { 398 const unsigned flags = 0; 399 const struct rte_memzone *mz; 400 401 rte_spinlock_lock(ð_dev_shared_data_lock); 402 403 if (eth_dev_shared_data == NULL) { 404 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 405 /* Allocate port data and ownership shared memory. */ 406 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, 407 sizeof(*eth_dev_shared_data), 408 rte_socket_id(), flags); 409 } else 410 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); 411 if (mz == NULL) 412 rte_panic("Cannot allocate ethdev shared data\n"); 413 414 eth_dev_shared_data = mz->addr; 415 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 416 eth_dev_shared_data->next_owner_id = 417 RTE_ETH_DEV_NO_OWNER + 1; 418 rte_spinlock_init(ð_dev_shared_data->ownership_lock); 419 memset(eth_dev_shared_data->data, 0, 420 sizeof(eth_dev_shared_data->data)); 421 } 422 } 423 424 rte_spinlock_unlock(ð_dev_shared_data_lock); 425 } 426 427 static bool 428 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 429 { 430 return ethdev->data->name[0] != '\0'; 431 } 432 433 static struct rte_eth_dev * 434 eth_dev_allocated(const char *name) 435 { 436 uint16_t i; 437 438 RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX); 439 440 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 441 if (rte_eth_devices[i].data != NULL && 442 strcmp(rte_eth_devices[i].data->name, name) == 0) 443 return &rte_eth_devices[i]; 444 } 445 return NULL; 446 } 447 448 struct rte_eth_dev * 449 rte_eth_dev_allocated(const char *name) 450 { 451 struct rte_eth_dev *ethdev; 452 453 eth_dev_shared_data_prepare(); 454 455 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 456 457 ethdev = eth_dev_allocated(name); 458 459 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 460 461 return ethdev; 462 } 463 464 static uint16_t 465 eth_dev_find_free_port(void) 466 { 467 uint16_t i; 468 469 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 470 /* Using shared name field to find a free port. */ 471 if (eth_dev_shared_data->data[i].name[0] == '\0') { 472 RTE_ASSERT(rte_eth_devices[i].state == 473 RTE_ETH_DEV_UNUSED); 474 return i; 475 } 476 } 477 return RTE_MAX_ETHPORTS; 478 } 479 480 static struct rte_eth_dev * 481 eth_dev_get(uint16_t port_id) 482 { 483 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; 484 485 eth_dev->data = ð_dev_shared_data->data[port_id]; 486 487 return eth_dev; 488 } 489 490 struct rte_eth_dev * 491 rte_eth_dev_allocate(const char *name) 492 { 493 uint16_t port_id; 494 struct rte_eth_dev *eth_dev = NULL; 495 size_t name_len; 496 497 name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN); 498 if (name_len == 0) { 499 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n"); 500 return NULL; 501 } 502 503 if (name_len >= RTE_ETH_NAME_MAX_LEN) { 504 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n"); 505 return NULL; 506 } 507 508 eth_dev_shared_data_prepare(); 509 510 /* Synchronize port creation between primary and secondary threads. */ 511 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 512 513 if (eth_dev_allocated(name) != NULL) { 514 RTE_ETHDEV_LOG(ERR, 515 "Ethernet device with name %s already allocated\n", 516 name); 517 goto unlock; 518 } 519 520 port_id = eth_dev_find_free_port(); 521 if (port_id == RTE_MAX_ETHPORTS) { 522 RTE_ETHDEV_LOG(ERR, 523 "Reached maximum number of Ethernet ports\n"); 524 goto unlock; 525 } 526 527 eth_dev = eth_dev_get(port_id); 528 strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name)); 529 eth_dev->data->port_id = port_id; 530 eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS; 531 eth_dev->data->mtu = RTE_ETHER_MTU; 532 pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL); 533 534 unlock: 535 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 536 537 return eth_dev; 538 } 539 540 /* 541 * Attach to a port already registered by the primary process, which 542 * makes sure that the same device would have the same port id both 543 * in the primary and secondary process. 544 */ 545 struct rte_eth_dev * 546 rte_eth_dev_attach_secondary(const char *name) 547 { 548 uint16_t i; 549 struct rte_eth_dev *eth_dev = NULL; 550 551 eth_dev_shared_data_prepare(); 552 553 /* Synchronize port attachment to primary port creation and release. */ 554 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 555 556 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 557 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0) 558 break; 559 } 560 if (i == RTE_MAX_ETHPORTS) { 561 RTE_ETHDEV_LOG(ERR, 562 "Device %s is not driven by the primary process\n", 563 name); 564 } else { 565 eth_dev = eth_dev_get(i); 566 RTE_ASSERT(eth_dev->data->port_id == i); 567 } 568 569 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 570 return eth_dev; 571 } 572 573 int 574 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) 575 { 576 if (eth_dev == NULL) 577 return -EINVAL; 578 579 eth_dev_shared_data_prepare(); 580 581 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 582 rte_eth_dev_callback_process(eth_dev, 583 RTE_ETH_EVENT_DESTROY, NULL); 584 585 eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id); 586 587 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 588 589 eth_dev->state = RTE_ETH_DEV_UNUSED; 590 eth_dev->device = NULL; 591 eth_dev->process_private = NULL; 592 eth_dev->intr_handle = NULL; 593 eth_dev->rx_pkt_burst = NULL; 594 eth_dev->tx_pkt_burst = NULL; 595 eth_dev->tx_pkt_prepare = NULL; 596 eth_dev->rx_queue_count = NULL; 597 eth_dev->rx_descriptor_status = NULL; 598 eth_dev->tx_descriptor_status = NULL; 599 eth_dev->dev_ops = NULL; 600 601 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 602 rte_free(eth_dev->data->rx_queues); 603 rte_free(eth_dev->data->tx_queues); 604 rte_free(eth_dev->data->mac_addrs); 605 rte_free(eth_dev->data->hash_mac_addrs); 606 rte_free(eth_dev->data->dev_private); 607 pthread_mutex_destroy(ð_dev->data->flow_ops_mutex); 608 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); 609 } 610 611 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 612 613 return 0; 614 } 615 616 int 617 rte_eth_dev_is_valid_port(uint16_t port_id) 618 { 619 if (port_id >= RTE_MAX_ETHPORTS || 620 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 621 return 0; 622 else 623 return 1; 624 } 625 626 static int 627 eth_is_valid_owner_id(uint64_t owner_id) 628 { 629 if (owner_id == RTE_ETH_DEV_NO_OWNER || 630 eth_dev_shared_data->next_owner_id <= owner_id) 631 return 0; 632 return 1; 633 } 634 635 uint64_t 636 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 637 { 638 port_id = rte_eth_find_next(port_id); 639 while (port_id < RTE_MAX_ETHPORTS && 640 rte_eth_devices[port_id].data->owner.id != owner_id) 641 port_id = rte_eth_find_next(port_id + 1); 642 643 return port_id; 644 } 645 646 int 647 rte_eth_dev_owner_new(uint64_t *owner_id) 648 { 649 if (owner_id == NULL) { 650 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 651 return -EINVAL; 652 } 653 654 eth_dev_shared_data_prepare(); 655 656 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 657 658 *owner_id = eth_dev_shared_data->next_owner_id++; 659 660 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 661 return 0; 662 } 663 664 static int 665 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 666 const struct rte_eth_dev_owner *new_owner) 667 { 668 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 669 struct rte_eth_dev_owner *port_owner; 670 671 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 672 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 673 port_id); 674 return -ENODEV; 675 } 676 677 if (new_owner == NULL) { 678 RTE_ETHDEV_LOG(ERR, 679 "Cannot set ethdev port %u owner from NULL owner\n", 680 port_id); 681 return -EINVAL; 682 } 683 684 if (!eth_is_valid_owner_id(new_owner->id) && 685 !eth_is_valid_owner_id(old_owner_id)) { 686 RTE_ETHDEV_LOG(ERR, 687 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 688 old_owner_id, new_owner->id); 689 return -EINVAL; 690 } 691 692 port_owner = &rte_eth_devices[port_id].data->owner; 693 if (port_owner->id != old_owner_id) { 694 RTE_ETHDEV_LOG(ERR, 695 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 696 port_id, port_owner->name, port_owner->id); 697 return -EPERM; 698 } 699 700 /* can not truncate (same structure) */ 701 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 702 703 port_owner->id = new_owner->id; 704 705 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 706 port_id, new_owner->name, new_owner->id); 707 708 return 0; 709 } 710 711 int 712 rte_eth_dev_owner_set(const uint16_t port_id, 713 const struct rte_eth_dev_owner *owner) 714 { 715 int ret; 716 717 eth_dev_shared_data_prepare(); 718 719 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 720 721 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 722 723 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 724 return ret; 725 } 726 727 int 728 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 729 { 730 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 731 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 732 int ret; 733 734 eth_dev_shared_data_prepare(); 735 736 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 737 738 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 739 740 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 741 return ret; 742 } 743 744 int 745 rte_eth_dev_owner_delete(const uint64_t owner_id) 746 { 747 uint16_t port_id; 748 int ret = 0; 749 750 eth_dev_shared_data_prepare(); 751 752 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 753 754 if (eth_is_valid_owner_id(owner_id)) { 755 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) 756 if (rte_eth_devices[port_id].data->owner.id == owner_id) 757 memset(&rte_eth_devices[port_id].data->owner, 0, 758 sizeof(struct rte_eth_dev_owner)); 759 RTE_ETHDEV_LOG(NOTICE, 760 "All port owners owned by %016"PRIx64" identifier have removed\n", 761 owner_id); 762 } else { 763 RTE_ETHDEV_LOG(ERR, 764 "Invalid owner id=%016"PRIx64"\n", 765 owner_id); 766 ret = -EINVAL; 767 } 768 769 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 770 771 return ret; 772 } 773 774 int 775 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 776 { 777 struct rte_eth_dev *ethdev; 778 779 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 780 ethdev = &rte_eth_devices[port_id]; 781 782 if (!eth_dev_is_allocated(ethdev)) { 783 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 784 port_id); 785 return -ENODEV; 786 } 787 788 if (owner == NULL) { 789 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 790 port_id); 791 return -EINVAL; 792 } 793 794 eth_dev_shared_data_prepare(); 795 796 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 797 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 798 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 799 800 return 0; 801 } 802 803 int 804 rte_eth_dev_socket_id(uint16_t port_id) 805 { 806 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 807 return rte_eth_devices[port_id].data->numa_node; 808 } 809 810 void * 811 rte_eth_dev_get_sec_ctx(uint16_t port_id) 812 { 813 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 814 return rte_eth_devices[port_id].security_ctx; 815 } 816 817 uint16_t 818 rte_eth_dev_count_avail(void) 819 { 820 uint16_t p; 821 uint16_t count; 822 823 count = 0; 824 825 RTE_ETH_FOREACH_DEV(p) 826 count++; 827 828 return count; 829 } 830 831 uint16_t 832 rte_eth_dev_count_total(void) 833 { 834 uint16_t port, count = 0; 835 836 RTE_ETH_FOREACH_VALID_DEV(port) 837 count++; 838 839 return count; 840 } 841 842 int 843 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 844 { 845 char *tmp; 846 847 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 848 849 if (name == NULL) { 850 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 851 port_id); 852 return -EINVAL; 853 } 854 855 /* shouldn't check 'rte_eth_devices[i].data', 856 * because it might be overwritten by VDEV PMD */ 857 tmp = eth_dev_shared_data->data[port_id].name; 858 strcpy(name, tmp); 859 return 0; 860 } 861 862 int 863 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 864 { 865 uint16_t pid; 866 867 if (name == NULL) { 868 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 869 return -EINVAL; 870 } 871 872 if (port_id == NULL) { 873 RTE_ETHDEV_LOG(ERR, 874 "Cannot get port ID to NULL for %s\n", name); 875 return -EINVAL; 876 } 877 878 RTE_ETH_FOREACH_VALID_DEV(pid) 879 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 880 *port_id = pid; 881 return 0; 882 } 883 884 return -ENODEV; 885 } 886 887 static int 888 eth_err(uint16_t port_id, int ret) 889 { 890 if (ret == 0) 891 return 0; 892 if (rte_eth_dev_is_removed(port_id)) 893 return -EIO; 894 return ret; 895 } 896 897 static void 898 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid) 899 { 900 void **rxq = dev->data->rx_queues; 901 902 if (rxq[qid] == NULL) 903 return; 904 905 if (dev->dev_ops->rx_queue_release != NULL) 906 (*dev->dev_ops->rx_queue_release)(dev, qid); 907 rxq[qid] = NULL; 908 } 909 910 static void 911 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid) 912 { 913 void **txq = dev->data->tx_queues; 914 915 if (txq[qid] == NULL) 916 return; 917 918 if (dev->dev_ops->tx_queue_release != NULL) 919 (*dev->dev_ops->tx_queue_release)(dev, qid); 920 txq[qid] = NULL; 921 } 922 923 static int 924 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 925 { 926 uint16_t old_nb_queues = dev->data->nb_rx_queues; 927 unsigned i; 928 929 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ 930 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", 931 sizeof(dev->data->rx_queues[0]) * 932 RTE_MAX_QUEUES_PER_PORT, 933 RTE_CACHE_LINE_SIZE); 934 if (dev->data->rx_queues == NULL) { 935 dev->data->nb_rx_queues = 0; 936 return -(ENOMEM); 937 } 938 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ 939 for (i = nb_queues; i < old_nb_queues; i++) 940 eth_dev_rxq_release(dev, i); 941 942 } else if (dev->data->rx_queues != NULL && nb_queues == 0) { 943 for (i = nb_queues; i < old_nb_queues; i++) 944 eth_dev_rxq_release(dev, i); 945 946 rte_free(dev->data->rx_queues); 947 dev->data->rx_queues = NULL; 948 } 949 dev->data->nb_rx_queues = nb_queues; 950 return 0; 951 } 952 953 static int 954 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 955 { 956 uint16_t port_id; 957 958 if (rx_queue_id >= dev->data->nb_rx_queues) { 959 port_id = dev->data->port_id; 960 RTE_ETHDEV_LOG(ERR, 961 "Invalid Rx queue_id=%u of device with port_id=%u\n", 962 rx_queue_id, port_id); 963 return -EINVAL; 964 } 965 966 if (dev->data->rx_queues[rx_queue_id] == NULL) { 967 port_id = dev->data->port_id; 968 RTE_ETHDEV_LOG(ERR, 969 "Queue %u of device with port_id=%u has not been setup\n", 970 rx_queue_id, port_id); 971 return -EINVAL; 972 } 973 974 return 0; 975 } 976 977 static int 978 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 979 { 980 uint16_t port_id; 981 982 if (tx_queue_id >= dev->data->nb_tx_queues) { 983 port_id = dev->data->port_id; 984 RTE_ETHDEV_LOG(ERR, 985 "Invalid Tx queue_id=%u of device with port_id=%u\n", 986 tx_queue_id, port_id); 987 return -EINVAL; 988 } 989 990 if (dev->data->tx_queues[tx_queue_id] == NULL) { 991 port_id = dev->data->port_id; 992 RTE_ETHDEV_LOG(ERR, 993 "Queue %u of device with port_id=%u has not been setup\n", 994 tx_queue_id, port_id); 995 return -EINVAL; 996 } 997 998 return 0; 999 } 1000 1001 int 1002 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 1003 { 1004 struct rte_eth_dev *dev; 1005 int ret; 1006 1007 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1008 dev = &rte_eth_devices[port_id]; 1009 1010 if (!dev->data->dev_started) { 1011 RTE_ETHDEV_LOG(ERR, 1012 "Port %u must be started before start any queue\n", 1013 port_id); 1014 return -EINVAL; 1015 } 1016 1017 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1018 if (ret != 0) 1019 return ret; 1020 1021 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 1022 1023 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1024 RTE_ETHDEV_LOG(INFO, 1025 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1026 rx_queue_id, port_id); 1027 return -EINVAL; 1028 } 1029 1030 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1031 RTE_ETHDEV_LOG(INFO, 1032 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1033 rx_queue_id, port_id); 1034 return 0; 1035 } 1036 1037 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 1038 } 1039 1040 int 1041 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 1042 { 1043 struct rte_eth_dev *dev; 1044 int ret; 1045 1046 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1047 dev = &rte_eth_devices[port_id]; 1048 1049 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1050 if (ret != 0) 1051 return ret; 1052 1053 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 1054 1055 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1056 RTE_ETHDEV_LOG(INFO, 1057 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1058 rx_queue_id, port_id); 1059 return -EINVAL; 1060 } 1061 1062 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1063 RTE_ETHDEV_LOG(INFO, 1064 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1065 rx_queue_id, port_id); 1066 return 0; 1067 } 1068 1069 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 1070 } 1071 1072 int 1073 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 1074 { 1075 struct rte_eth_dev *dev; 1076 int ret; 1077 1078 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1079 dev = &rte_eth_devices[port_id]; 1080 1081 if (!dev->data->dev_started) { 1082 RTE_ETHDEV_LOG(ERR, 1083 "Port %u must be started before start any queue\n", 1084 port_id); 1085 return -EINVAL; 1086 } 1087 1088 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1089 if (ret != 0) 1090 return ret; 1091 1092 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 1093 1094 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1095 RTE_ETHDEV_LOG(INFO, 1096 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1097 tx_queue_id, port_id); 1098 return -EINVAL; 1099 } 1100 1101 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1102 RTE_ETHDEV_LOG(INFO, 1103 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1104 tx_queue_id, port_id); 1105 return 0; 1106 } 1107 1108 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 1109 } 1110 1111 int 1112 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 1113 { 1114 struct rte_eth_dev *dev; 1115 int ret; 1116 1117 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1118 dev = &rte_eth_devices[port_id]; 1119 1120 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1121 if (ret != 0) 1122 return ret; 1123 1124 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 1125 1126 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1127 RTE_ETHDEV_LOG(INFO, 1128 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1129 tx_queue_id, port_id); 1130 return -EINVAL; 1131 } 1132 1133 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1134 RTE_ETHDEV_LOG(INFO, 1135 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1136 tx_queue_id, port_id); 1137 return 0; 1138 } 1139 1140 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 1141 } 1142 1143 static int 1144 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 1145 { 1146 uint16_t old_nb_queues = dev->data->nb_tx_queues; 1147 unsigned i; 1148 1149 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ 1150 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", 1151 sizeof(dev->data->tx_queues[0]) * 1152 RTE_MAX_QUEUES_PER_PORT, 1153 RTE_CACHE_LINE_SIZE); 1154 if (dev->data->tx_queues == NULL) { 1155 dev->data->nb_tx_queues = 0; 1156 return -(ENOMEM); 1157 } 1158 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ 1159 for (i = nb_queues; i < old_nb_queues; i++) 1160 eth_dev_txq_release(dev, i); 1161 1162 } else if (dev->data->tx_queues != NULL && nb_queues == 0) { 1163 for (i = nb_queues; i < old_nb_queues; i++) 1164 eth_dev_txq_release(dev, i); 1165 1166 rte_free(dev->data->tx_queues); 1167 dev->data->tx_queues = NULL; 1168 } 1169 dev->data->nb_tx_queues = nb_queues; 1170 return 0; 1171 } 1172 1173 uint32_t 1174 rte_eth_speed_bitflag(uint32_t speed, int duplex) 1175 { 1176 switch (speed) { 1177 case ETH_SPEED_NUM_10M: 1178 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD; 1179 case ETH_SPEED_NUM_100M: 1180 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD; 1181 case ETH_SPEED_NUM_1G: 1182 return ETH_LINK_SPEED_1G; 1183 case ETH_SPEED_NUM_2_5G: 1184 return ETH_LINK_SPEED_2_5G; 1185 case ETH_SPEED_NUM_5G: 1186 return ETH_LINK_SPEED_5G; 1187 case ETH_SPEED_NUM_10G: 1188 return ETH_LINK_SPEED_10G; 1189 case ETH_SPEED_NUM_20G: 1190 return ETH_LINK_SPEED_20G; 1191 case ETH_SPEED_NUM_25G: 1192 return ETH_LINK_SPEED_25G; 1193 case ETH_SPEED_NUM_40G: 1194 return ETH_LINK_SPEED_40G; 1195 case ETH_SPEED_NUM_50G: 1196 return ETH_LINK_SPEED_50G; 1197 case ETH_SPEED_NUM_56G: 1198 return ETH_LINK_SPEED_56G; 1199 case ETH_SPEED_NUM_100G: 1200 return ETH_LINK_SPEED_100G; 1201 case ETH_SPEED_NUM_200G: 1202 return ETH_LINK_SPEED_200G; 1203 default: 1204 return 0; 1205 } 1206 } 1207 1208 const char * 1209 rte_eth_dev_rx_offload_name(uint64_t offload) 1210 { 1211 const char *name = "UNKNOWN"; 1212 unsigned int i; 1213 1214 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1215 if (offload == eth_dev_rx_offload_names[i].offload) { 1216 name = eth_dev_rx_offload_names[i].name; 1217 break; 1218 } 1219 } 1220 1221 return name; 1222 } 1223 1224 const char * 1225 rte_eth_dev_tx_offload_name(uint64_t offload) 1226 { 1227 const char *name = "UNKNOWN"; 1228 unsigned int i; 1229 1230 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1231 if (offload == eth_dev_tx_offload_names[i].offload) { 1232 name = eth_dev_tx_offload_names[i].name; 1233 break; 1234 } 1235 } 1236 1237 return name; 1238 } 1239 1240 static inline int 1241 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1242 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1243 { 1244 int ret = 0; 1245 1246 if (dev_info_size == 0) { 1247 if (config_size != max_rx_pkt_len) { 1248 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1249 " %u != %u is not allowed\n", 1250 port_id, config_size, max_rx_pkt_len); 1251 ret = -EINVAL; 1252 } 1253 } else if (config_size > dev_info_size) { 1254 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1255 "> max allowed value %u\n", port_id, config_size, 1256 dev_info_size); 1257 ret = -EINVAL; 1258 } else if (config_size < RTE_ETHER_MIN_LEN) { 1259 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1260 "< min allowed value %u\n", port_id, config_size, 1261 (unsigned int)RTE_ETHER_MIN_LEN); 1262 ret = -EINVAL; 1263 } 1264 return ret; 1265 } 1266 1267 /* 1268 * Validate offloads that are requested through rte_eth_dev_configure against 1269 * the offloads successfully set by the ethernet device. 1270 * 1271 * @param port_id 1272 * The port identifier of the Ethernet device. 1273 * @param req_offloads 1274 * The offloads that have been requested through `rte_eth_dev_configure`. 1275 * @param set_offloads 1276 * The offloads successfully set by the ethernet device. 1277 * @param offload_type 1278 * The offload type i.e. Rx/Tx string. 1279 * @param offload_name 1280 * The function that prints the offload name. 1281 * @return 1282 * - (0) if validation successful. 1283 * - (-EINVAL) if requested offload has been silently disabled. 1284 * 1285 */ 1286 static int 1287 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1288 uint64_t set_offloads, const char *offload_type, 1289 const char *(*offload_name)(uint64_t)) 1290 { 1291 uint64_t offloads_diff = req_offloads ^ set_offloads; 1292 uint64_t offload; 1293 int ret = 0; 1294 1295 while (offloads_diff != 0) { 1296 /* Check if any offload is requested but not enabled. */ 1297 offload = 1ULL << __builtin_ctzll(offloads_diff); 1298 if (offload & req_offloads) { 1299 RTE_ETHDEV_LOG(ERR, 1300 "Port %u failed to enable %s offload %s\n", 1301 port_id, offload_type, offload_name(offload)); 1302 ret = -EINVAL; 1303 } 1304 1305 /* Check if offload couldn't be disabled. */ 1306 if (offload & set_offloads) { 1307 RTE_ETHDEV_LOG(DEBUG, 1308 "Port %u %s offload %s is not requested but enabled\n", 1309 port_id, offload_type, offload_name(offload)); 1310 } 1311 1312 offloads_diff &= ~offload; 1313 } 1314 1315 return ret; 1316 } 1317 1318 static uint32_t 1319 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1320 { 1321 uint32_t overhead_len; 1322 1323 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1324 overhead_len = max_rx_pktlen - max_mtu; 1325 else 1326 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1327 1328 return overhead_len; 1329 } 1330 1331 int 1332 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1333 const struct rte_eth_conf *dev_conf) 1334 { 1335 struct rte_eth_dev *dev; 1336 struct rte_eth_dev_info dev_info; 1337 struct rte_eth_conf orig_conf; 1338 uint32_t max_rx_pktlen; 1339 uint32_t overhead_len; 1340 int diag; 1341 int ret; 1342 uint16_t old_mtu; 1343 1344 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1345 dev = &rte_eth_devices[port_id]; 1346 1347 if (dev_conf == NULL) { 1348 RTE_ETHDEV_LOG(ERR, 1349 "Cannot configure ethdev port %u from NULL config\n", 1350 port_id); 1351 return -EINVAL; 1352 } 1353 1354 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1355 1356 if (dev->data->dev_started) { 1357 RTE_ETHDEV_LOG(ERR, 1358 "Port %u must be stopped to allow configuration\n", 1359 port_id); 1360 return -EBUSY; 1361 } 1362 1363 /* 1364 * Ensure that "dev_configured" is always 0 each time prepare to do 1365 * dev_configure() to avoid any non-anticipated behaviour. 1366 * And set to 1 when dev_configure() is executed successfully. 1367 */ 1368 dev->data->dev_configured = 0; 1369 1370 /* Store original config, as rollback required on failure */ 1371 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1372 1373 /* 1374 * Copy the dev_conf parameter into the dev structure. 1375 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1376 */ 1377 if (dev_conf != &dev->data->dev_conf) 1378 memcpy(&dev->data->dev_conf, dev_conf, 1379 sizeof(dev->data->dev_conf)); 1380 1381 /* Backup mtu for rollback */ 1382 old_mtu = dev->data->mtu; 1383 1384 ret = rte_eth_dev_info_get(port_id, &dev_info); 1385 if (ret != 0) 1386 goto rollback; 1387 1388 /* Get the real Ethernet overhead length */ 1389 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1390 dev_info.max_mtu); 1391 1392 /* If number of queues specified by application for both Rx and Tx is 1393 * zero, use driver preferred values. This cannot be done individually 1394 * as it is valid for either Tx or Rx (but not both) to be zero. 1395 * If driver does not provide any preferred valued, fall back on 1396 * EAL defaults. 1397 */ 1398 if (nb_rx_q == 0 && nb_tx_q == 0) { 1399 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1400 if (nb_rx_q == 0) 1401 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1402 nb_tx_q = dev_info.default_txportconf.nb_queues; 1403 if (nb_tx_q == 0) 1404 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1405 } 1406 1407 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1408 RTE_ETHDEV_LOG(ERR, 1409 "Number of RX queues requested (%u) is greater than max supported(%d)\n", 1410 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1411 ret = -EINVAL; 1412 goto rollback; 1413 } 1414 1415 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1416 RTE_ETHDEV_LOG(ERR, 1417 "Number of TX queues requested (%u) is greater than max supported(%d)\n", 1418 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1419 ret = -EINVAL; 1420 goto rollback; 1421 } 1422 1423 /* 1424 * Check that the numbers of RX and TX queues are not greater 1425 * than the maximum number of RX and TX queues supported by the 1426 * configured device. 1427 */ 1428 if (nb_rx_q > dev_info.max_rx_queues) { 1429 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1430 port_id, nb_rx_q, dev_info.max_rx_queues); 1431 ret = -EINVAL; 1432 goto rollback; 1433 } 1434 1435 if (nb_tx_q > dev_info.max_tx_queues) { 1436 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1437 port_id, nb_tx_q, dev_info.max_tx_queues); 1438 ret = -EINVAL; 1439 goto rollback; 1440 } 1441 1442 /* Check that the device supports requested interrupts */ 1443 if ((dev_conf->intr_conf.lsc == 1) && 1444 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1445 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1446 dev->device->driver->name); 1447 ret = -EINVAL; 1448 goto rollback; 1449 } 1450 if ((dev_conf->intr_conf.rmv == 1) && 1451 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1452 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1453 dev->device->driver->name); 1454 ret = -EINVAL; 1455 goto rollback; 1456 } 1457 1458 /* 1459 * Check that the maximum RX packet length is supported by the 1460 * configured device. 1461 */ 1462 if (dev_conf->rxmode.mtu == 0) 1463 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1464 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1465 if (max_rx_pktlen > dev_info.max_rx_pktlen) { 1466 RTE_ETHDEV_LOG(ERR, 1467 "Ethdev port_id=%u max_rx_pktlen %u > max valid value %u\n", 1468 port_id, max_rx_pktlen, dev_info.max_rx_pktlen); 1469 ret = -EINVAL; 1470 goto rollback; 1471 } else if (max_rx_pktlen < RTE_ETHER_MIN_LEN) { 1472 RTE_ETHDEV_LOG(ERR, 1473 "Ethdev port_id=%u max_rx_pktlen %u < min valid value %u\n", 1474 port_id, max_rx_pktlen, RTE_ETHER_MIN_LEN); 1475 ret = -EINVAL; 1476 goto rollback; 1477 } 1478 1479 if ((dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) { 1480 if (dev->data->dev_conf.rxmode.mtu < RTE_ETHER_MIN_MTU || 1481 dev->data->dev_conf.rxmode.mtu > RTE_ETHER_MTU) 1482 /* Use default value */ 1483 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1484 } 1485 1486 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1487 1488 /* 1489 * If LRO is enabled, check that the maximum aggregated packet 1490 * size is supported by the configured device. 1491 */ 1492 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 1493 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1494 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1495 ret = eth_dev_check_lro_pkt_size(port_id, 1496 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1497 max_rx_pktlen, 1498 dev_info.max_lro_pkt_size); 1499 if (ret != 0) 1500 goto rollback; 1501 } 1502 1503 /* Any requested offloading must be within its device capabilities */ 1504 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1505 dev_conf->rxmode.offloads) { 1506 RTE_ETHDEV_LOG(ERR, 1507 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1508 "capabilities 0x%"PRIx64" in %s()\n", 1509 port_id, dev_conf->rxmode.offloads, 1510 dev_info.rx_offload_capa, 1511 __func__); 1512 ret = -EINVAL; 1513 goto rollback; 1514 } 1515 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1516 dev_conf->txmode.offloads) { 1517 RTE_ETHDEV_LOG(ERR, 1518 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1519 "capabilities 0x%"PRIx64" in %s()\n", 1520 port_id, dev_conf->txmode.offloads, 1521 dev_info.tx_offload_capa, 1522 __func__); 1523 ret = -EINVAL; 1524 goto rollback; 1525 } 1526 1527 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1528 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1529 1530 /* Check that device supports requested rss hash functions. */ 1531 if ((dev_info.flow_type_rss_offloads | 1532 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1533 dev_info.flow_type_rss_offloads) { 1534 RTE_ETHDEV_LOG(ERR, 1535 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1536 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1537 dev_info.flow_type_rss_offloads); 1538 ret = -EINVAL; 1539 goto rollback; 1540 } 1541 1542 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1543 if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) && 1544 (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) { 1545 RTE_ETHDEV_LOG(ERR, 1546 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1547 port_id, 1548 rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH)); 1549 ret = -EINVAL; 1550 goto rollback; 1551 } 1552 1553 /* 1554 * Setup new number of RX/TX queues and reconfigure device. 1555 */ 1556 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1557 if (diag != 0) { 1558 RTE_ETHDEV_LOG(ERR, 1559 "Port%u eth_dev_rx_queue_config = %d\n", 1560 port_id, diag); 1561 ret = diag; 1562 goto rollback; 1563 } 1564 1565 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1566 if (diag != 0) { 1567 RTE_ETHDEV_LOG(ERR, 1568 "Port%u eth_dev_tx_queue_config = %d\n", 1569 port_id, diag); 1570 eth_dev_rx_queue_config(dev, 0); 1571 ret = diag; 1572 goto rollback; 1573 } 1574 1575 diag = (*dev->dev_ops->dev_configure)(dev); 1576 if (diag != 0) { 1577 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1578 port_id, diag); 1579 ret = eth_err(port_id, diag); 1580 goto reset_queues; 1581 } 1582 1583 /* Initialize Rx profiling if enabled at compilation time. */ 1584 diag = __rte_eth_dev_profile_init(port_id, dev); 1585 if (diag != 0) { 1586 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1587 port_id, diag); 1588 ret = eth_err(port_id, diag); 1589 goto reset_queues; 1590 } 1591 1592 /* Validate Rx offloads. */ 1593 diag = eth_dev_validate_offloads(port_id, 1594 dev_conf->rxmode.offloads, 1595 dev->data->dev_conf.rxmode.offloads, "Rx", 1596 rte_eth_dev_rx_offload_name); 1597 if (diag != 0) { 1598 ret = diag; 1599 goto reset_queues; 1600 } 1601 1602 /* Validate Tx offloads. */ 1603 diag = eth_dev_validate_offloads(port_id, 1604 dev_conf->txmode.offloads, 1605 dev->data->dev_conf.txmode.offloads, "Tx", 1606 rte_eth_dev_tx_offload_name); 1607 if (diag != 0) { 1608 ret = diag; 1609 goto reset_queues; 1610 } 1611 1612 dev->data->dev_configured = 1; 1613 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1614 return 0; 1615 reset_queues: 1616 eth_dev_rx_queue_config(dev, 0); 1617 eth_dev_tx_queue_config(dev, 0); 1618 rollback: 1619 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1620 if (old_mtu != dev->data->mtu) 1621 dev->data->mtu = old_mtu; 1622 1623 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1624 return ret; 1625 } 1626 1627 void 1628 rte_eth_dev_internal_reset(struct rte_eth_dev *dev) 1629 { 1630 if (dev->data->dev_started) { 1631 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n", 1632 dev->data->port_id); 1633 return; 1634 } 1635 1636 eth_dev_rx_queue_config(dev, 0); 1637 eth_dev_tx_queue_config(dev, 0); 1638 1639 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); 1640 } 1641 1642 static void 1643 eth_dev_mac_restore(struct rte_eth_dev *dev, 1644 struct rte_eth_dev_info *dev_info) 1645 { 1646 struct rte_ether_addr *addr; 1647 uint16_t i; 1648 uint32_t pool = 0; 1649 uint64_t pool_mask; 1650 1651 /* replay MAC address configuration including default MAC */ 1652 addr = &dev->data->mac_addrs[0]; 1653 if (*dev->dev_ops->mac_addr_set != NULL) 1654 (*dev->dev_ops->mac_addr_set)(dev, addr); 1655 else if (*dev->dev_ops->mac_addr_add != NULL) 1656 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1657 1658 if (*dev->dev_ops->mac_addr_add != NULL) { 1659 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1660 addr = &dev->data->mac_addrs[i]; 1661 1662 /* skip zero address */ 1663 if (rte_is_zero_ether_addr(addr)) 1664 continue; 1665 1666 pool = 0; 1667 pool_mask = dev->data->mac_pool_sel[i]; 1668 1669 do { 1670 if (pool_mask & 1ULL) 1671 (*dev->dev_ops->mac_addr_add)(dev, 1672 addr, i, pool); 1673 pool_mask >>= 1; 1674 pool++; 1675 } while (pool_mask); 1676 } 1677 } 1678 } 1679 1680 static int 1681 eth_dev_config_restore(struct rte_eth_dev *dev, 1682 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1683 { 1684 int ret; 1685 1686 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1687 eth_dev_mac_restore(dev, dev_info); 1688 1689 /* replay promiscuous configuration */ 1690 /* 1691 * use callbacks directly since we don't need port_id check and 1692 * would like to bypass the same value set 1693 */ 1694 if (rte_eth_promiscuous_get(port_id) == 1 && 1695 *dev->dev_ops->promiscuous_enable != NULL) { 1696 ret = eth_err(port_id, 1697 (*dev->dev_ops->promiscuous_enable)(dev)); 1698 if (ret != 0 && ret != -ENOTSUP) { 1699 RTE_ETHDEV_LOG(ERR, 1700 "Failed to enable promiscuous mode for device (port %u): %s\n", 1701 port_id, rte_strerror(-ret)); 1702 return ret; 1703 } 1704 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1705 *dev->dev_ops->promiscuous_disable != NULL) { 1706 ret = eth_err(port_id, 1707 (*dev->dev_ops->promiscuous_disable)(dev)); 1708 if (ret != 0 && ret != -ENOTSUP) { 1709 RTE_ETHDEV_LOG(ERR, 1710 "Failed to disable promiscuous mode for device (port %u): %s\n", 1711 port_id, rte_strerror(-ret)); 1712 return ret; 1713 } 1714 } 1715 1716 /* replay all multicast configuration */ 1717 /* 1718 * use callbacks directly since we don't need port_id check and 1719 * would like to bypass the same value set 1720 */ 1721 if (rte_eth_allmulticast_get(port_id) == 1 && 1722 *dev->dev_ops->allmulticast_enable != NULL) { 1723 ret = eth_err(port_id, 1724 (*dev->dev_ops->allmulticast_enable)(dev)); 1725 if (ret != 0 && ret != -ENOTSUP) { 1726 RTE_ETHDEV_LOG(ERR, 1727 "Failed to enable allmulticast mode for device (port %u): %s\n", 1728 port_id, rte_strerror(-ret)); 1729 return ret; 1730 } 1731 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1732 *dev->dev_ops->allmulticast_disable != NULL) { 1733 ret = eth_err(port_id, 1734 (*dev->dev_ops->allmulticast_disable)(dev)); 1735 if (ret != 0 && ret != -ENOTSUP) { 1736 RTE_ETHDEV_LOG(ERR, 1737 "Failed to disable allmulticast mode for device (port %u): %s\n", 1738 port_id, rte_strerror(-ret)); 1739 return ret; 1740 } 1741 } 1742 1743 return 0; 1744 } 1745 1746 int 1747 rte_eth_dev_start(uint16_t port_id) 1748 { 1749 struct rte_eth_dev *dev; 1750 struct rte_eth_dev_info dev_info; 1751 int diag; 1752 int ret, ret_stop; 1753 1754 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1755 dev = &rte_eth_devices[port_id]; 1756 1757 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1758 1759 if (dev->data->dev_configured == 0) { 1760 RTE_ETHDEV_LOG(INFO, 1761 "Device with port_id=%"PRIu16" is not configured.\n", 1762 port_id); 1763 return -EINVAL; 1764 } 1765 1766 if (dev->data->dev_started != 0) { 1767 RTE_ETHDEV_LOG(INFO, 1768 "Device with port_id=%"PRIu16" already started\n", 1769 port_id); 1770 return 0; 1771 } 1772 1773 ret = rte_eth_dev_info_get(port_id, &dev_info); 1774 if (ret != 0) 1775 return ret; 1776 1777 /* Lets restore MAC now if device does not support live change */ 1778 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1779 eth_dev_mac_restore(dev, &dev_info); 1780 1781 diag = (*dev->dev_ops->dev_start)(dev); 1782 if (diag == 0) 1783 dev->data->dev_started = 1; 1784 else 1785 return eth_err(port_id, diag); 1786 1787 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1788 if (ret != 0) { 1789 RTE_ETHDEV_LOG(ERR, 1790 "Error during restoring configuration for device (port %u): %s\n", 1791 port_id, rte_strerror(-ret)); 1792 ret_stop = rte_eth_dev_stop(port_id); 1793 if (ret_stop != 0) { 1794 RTE_ETHDEV_LOG(ERR, 1795 "Failed to stop device (port %u): %s\n", 1796 port_id, rte_strerror(-ret_stop)); 1797 } 1798 1799 return ret; 1800 } 1801 1802 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1803 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1804 (*dev->dev_ops->link_update)(dev, 0); 1805 } 1806 1807 /* expose selection of PMD fast-path functions */ 1808 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1809 1810 rte_ethdev_trace_start(port_id); 1811 return 0; 1812 } 1813 1814 int 1815 rte_eth_dev_stop(uint16_t port_id) 1816 { 1817 struct rte_eth_dev *dev; 1818 int ret; 1819 1820 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1821 dev = &rte_eth_devices[port_id]; 1822 1823 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP); 1824 1825 if (dev->data->dev_started == 0) { 1826 RTE_ETHDEV_LOG(INFO, 1827 "Device with port_id=%"PRIu16" already stopped\n", 1828 port_id); 1829 return 0; 1830 } 1831 1832 /* point fast-path functions to dummy ones */ 1833 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1834 1835 dev->data->dev_started = 0; 1836 ret = (*dev->dev_ops->dev_stop)(dev); 1837 rte_ethdev_trace_stop(port_id, ret); 1838 1839 return ret; 1840 } 1841 1842 int 1843 rte_eth_dev_set_link_up(uint16_t port_id) 1844 { 1845 struct rte_eth_dev *dev; 1846 1847 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1848 dev = &rte_eth_devices[port_id]; 1849 1850 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1851 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1852 } 1853 1854 int 1855 rte_eth_dev_set_link_down(uint16_t port_id) 1856 { 1857 struct rte_eth_dev *dev; 1858 1859 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1860 dev = &rte_eth_devices[port_id]; 1861 1862 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1863 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1864 } 1865 1866 int 1867 rte_eth_dev_close(uint16_t port_id) 1868 { 1869 struct rte_eth_dev *dev; 1870 int firsterr, binerr; 1871 int *lasterr = &firsterr; 1872 1873 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1874 dev = &rte_eth_devices[port_id]; 1875 1876 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1877 *lasterr = (*dev->dev_ops->dev_close)(dev); 1878 if (*lasterr != 0) 1879 lasterr = &binerr; 1880 1881 rte_ethdev_trace_close(port_id); 1882 *lasterr = rte_eth_dev_release_port(dev); 1883 1884 return firsterr; 1885 } 1886 1887 int 1888 rte_eth_dev_reset(uint16_t port_id) 1889 { 1890 struct rte_eth_dev *dev; 1891 int ret; 1892 1893 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1894 dev = &rte_eth_devices[port_id]; 1895 1896 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1897 1898 ret = rte_eth_dev_stop(port_id); 1899 if (ret != 0) { 1900 RTE_ETHDEV_LOG(ERR, 1901 "Failed to stop device (port %u) before reset: %s - ignore\n", 1902 port_id, rte_strerror(-ret)); 1903 } 1904 ret = dev->dev_ops->dev_reset(dev); 1905 1906 return eth_err(port_id, ret); 1907 } 1908 1909 int 1910 rte_eth_dev_is_removed(uint16_t port_id) 1911 { 1912 struct rte_eth_dev *dev; 1913 int ret; 1914 1915 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1916 dev = &rte_eth_devices[port_id]; 1917 1918 if (dev->state == RTE_ETH_DEV_REMOVED) 1919 return 1; 1920 1921 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1922 1923 ret = dev->dev_ops->is_removed(dev); 1924 if (ret != 0) 1925 /* Device is physically removed. */ 1926 dev->state = RTE_ETH_DEV_REMOVED; 1927 1928 return ret; 1929 } 1930 1931 static int 1932 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, 1933 uint16_t n_seg, uint32_t *mbp_buf_size, 1934 const struct rte_eth_dev_info *dev_info) 1935 { 1936 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1937 struct rte_mempool *mp_first; 1938 uint32_t offset_mask; 1939 uint16_t seg_idx; 1940 1941 if (n_seg > seg_capa->max_nseg) { 1942 RTE_ETHDEV_LOG(ERR, 1943 "Requested Rx segments %u exceed supported %u\n", 1944 n_seg, seg_capa->max_nseg); 1945 return -EINVAL; 1946 } 1947 /* 1948 * Check the sizes and offsets against buffer sizes 1949 * for each segment specified in extended configuration. 1950 */ 1951 mp_first = rx_seg[0].mp; 1952 offset_mask = (1u << seg_capa->offset_align_log2) - 1; 1953 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1954 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1955 uint32_t length = rx_seg[seg_idx].length; 1956 uint32_t offset = rx_seg[seg_idx].offset; 1957 1958 if (mpl == NULL) { 1959 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1960 return -EINVAL; 1961 } 1962 if (seg_idx != 0 && mp_first != mpl && 1963 seg_capa->multi_pools == 0) { 1964 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1965 return -ENOTSUP; 1966 } 1967 if (offset != 0) { 1968 if (seg_capa->offset_allowed == 0) { 1969 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1970 return -ENOTSUP; 1971 } 1972 if (offset & offset_mask) { 1973 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 1974 offset, 1975 seg_capa->offset_align_log2); 1976 return -EINVAL; 1977 } 1978 } 1979 if (mpl->private_data_size < 1980 sizeof(struct rte_pktmbuf_pool_private)) { 1981 RTE_ETHDEV_LOG(ERR, 1982 "%s private_data_size %u < %u\n", 1983 mpl->name, mpl->private_data_size, 1984 (unsigned int)sizeof 1985 (struct rte_pktmbuf_pool_private)); 1986 return -ENOSPC; 1987 } 1988 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 1989 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 1990 length = length != 0 ? length : *mbp_buf_size; 1991 if (*mbp_buf_size < length + offset) { 1992 RTE_ETHDEV_LOG(ERR, 1993 "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n", 1994 mpl->name, *mbp_buf_size, 1995 length + offset, length, offset); 1996 return -EINVAL; 1997 } 1998 } 1999 return 0; 2000 } 2001 2002 int 2003 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2004 uint16_t nb_rx_desc, unsigned int socket_id, 2005 const struct rte_eth_rxconf *rx_conf, 2006 struct rte_mempool *mp) 2007 { 2008 int ret; 2009 uint32_t mbp_buf_size; 2010 struct rte_eth_dev *dev; 2011 struct rte_eth_dev_info dev_info; 2012 struct rte_eth_rxconf local_conf; 2013 2014 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2015 dev = &rte_eth_devices[port_id]; 2016 2017 if (rx_queue_id >= dev->data->nb_rx_queues) { 2018 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 2019 return -EINVAL; 2020 } 2021 2022 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 2023 2024 ret = rte_eth_dev_info_get(port_id, &dev_info); 2025 if (ret != 0) 2026 return ret; 2027 2028 if (mp != NULL) { 2029 /* Single pool configuration check. */ 2030 if (rx_conf != NULL && rx_conf->rx_nseg != 0) { 2031 RTE_ETHDEV_LOG(ERR, 2032 "Ambiguous segment configuration\n"); 2033 return -EINVAL; 2034 } 2035 /* 2036 * Check the size of the mbuf data buffer, this value 2037 * must be provided in the private data of the memory pool. 2038 * First check that the memory pool(s) has a valid private data. 2039 */ 2040 if (mp->private_data_size < 2041 sizeof(struct rte_pktmbuf_pool_private)) { 2042 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 2043 mp->name, mp->private_data_size, 2044 (unsigned int) 2045 sizeof(struct rte_pktmbuf_pool_private)); 2046 return -ENOSPC; 2047 } 2048 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2049 if (mbp_buf_size < dev_info.min_rx_bufsize + 2050 RTE_PKTMBUF_HEADROOM) { 2051 RTE_ETHDEV_LOG(ERR, 2052 "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", 2053 mp->name, mbp_buf_size, 2054 RTE_PKTMBUF_HEADROOM + 2055 dev_info.min_rx_bufsize, 2056 RTE_PKTMBUF_HEADROOM, 2057 dev_info.min_rx_bufsize); 2058 return -EINVAL; 2059 } 2060 } else { 2061 const struct rte_eth_rxseg_split *rx_seg; 2062 uint16_t n_seg; 2063 2064 /* Extended multi-segment configuration check. */ 2065 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { 2066 RTE_ETHDEV_LOG(ERR, 2067 "Memory pool is null and no extended configuration provided\n"); 2068 return -EINVAL; 2069 } 2070 2071 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2072 n_seg = rx_conf->rx_nseg; 2073 2074 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2075 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, 2076 &mbp_buf_size, 2077 &dev_info); 2078 if (ret != 0) 2079 return ret; 2080 } else { 2081 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2082 return -EINVAL; 2083 } 2084 } 2085 2086 /* Use default specified by driver, if nb_rx_desc is zero */ 2087 if (nb_rx_desc == 0) { 2088 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2089 /* If driver default is also zero, fall back on EAL default */ 2090 if (nb_rx_desc == 0) 2091 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2092 } 2093 2094 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2095 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2096 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2097 2098 RTE_ETHDEV_LOG(ERR, 2099 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2100 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2101 dev_info.rx_desc_lim.nb_min, 2102 dev_info.rx_desc_lim.nb_align); 2103 return -EINVAL; 2104 } 2105 2106 if (dev->data->dev_started && 2107 !(dev_info.dev_capa & 2108 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2109 return -EBUSY; 2110 2111 if (dev->data->dev_started && 2112 (dev->data->rx_queue_state[rx_queue_id] != 2113 RTE_ETH_QUEUE_STATE_STOPPED)) 2114 return -EBUSY; 2115 2116 eth_dev_rxq_release(dev, rx_queue_id); 2117 2118 if (rx_conf == NULL) 2119 rx_conf = &dev_info.default_rxconf; 2120 2121 local_conf = *rx_conf; 2122 2123 /* 2124 * If an offloading has already been enabled in 2125 * rte_eth_dev_configure(), it has been enabled on all queues, 2126 * so there is no need to enable it in this queue again. 2127 * The local_conf.offloads input to underlying PMD only carries 2128 * those offloadings which are only enabled on this queue and 2129 * not enabled on all queues. 2130 */ 2131 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2132 2133 /* 2134 * New added offloadings for this queue are those not enabled in 2135 * rte_eth_dev_configure() and they must be per-queue type. 2136 * A pure per-port offloading can't be enabled on a queue while 2137 * disabled on another queue. A pure per-port offloading can't 2138 * be enabled for any queue as new added one if it hasn't been 2139 * enabled in rte_eth_dev_configure(). 2140 */ 2141 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2142 local_conf.offloads) { 2143 RTE_ETHDEV_LOG(ERR, 2144 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2145 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2146 port_id, rx_queue_id, local_conf.offloads, 2147 dev_info.rx_queue_offload_capa, 2148 __func__); 2149 return -EINVAL; 2150 } 2151 2152 /* 2153 * If LRO is enabled, check that the maximum aggregated packet 2154 * size is supported by the configured device. 2155 */ 2156 /* Get the real Ethernet overhead length */ 2157 if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 2158 uint32_t overhead_len; 2159 uint32_t max_rx_pktlen; 2160 int ret; 2161 2162 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2163 dev_info.max_mtu); 2164 max_rx_pktlen = dev->data->mtu + overhead_len; 2165 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2166 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2167 ret = eth_dev_check_lro_pkt_size(port_id, 2168 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2169 max_rx_pktlen, 2170 dev_info.max_lro_pkt_size); 2171 if (ret != 0) 2172 return ret; 2173 } 2174 2175 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2176 socket_id, &local_conf, mp); 2177 if (!ret) { 2178 if (!dev->data->min_rx_buf_size || 2179 dev->data->min_rx_buf_size > mbp_buf_size) 2180 dev->data->min_rx_buf_size = mbp_buf_size; 2181 } 2182 2183 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2184 rx_conf, ret); 2185 return eth_err(port_id, ret); 2186 } 2187 2188 int 2189 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2190 uint16_t nb_rx_desc, 2191 const struct rte_eth_hairpin_conf *conf) 2192 { 2193 int ret; 2194 struct rte_eth_dev *dev; 2195 struct rte_eth_hairpin_cap cap; 2196 int i; 2197 int count; 2198 2199 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2200 dev = &rte_eth_devices[port_id]; 2201 2202 if (rx_queue_id >= dev->data->nb_rx_queues) { 2203 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 2204 return -EINVAL; 2205 } 2206 2207 if (conf == NULL) { 2208 RTE_ETHDEV_LOG(ERR, 2209 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2210 port_id); 2211 return -EINVAL; 2212 } 2213 2214 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2215 if (ret != 0) 2216 return ret; 2217 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, 2218 -ENOTSUP); 2219 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2220 if (nb_rx_desc == 0) 2221 nb_rx_desc = cap.max_nb_desc; 2222 if (nb_rx_desc > cap.max_nb_desc) { 2223 RTE_ETHDEV_LOG(ERR, 2224 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2225 nb_rx_desc, cap.max_nb_desc); 2226 return -EINVAL; 2227 } 2228 if (conf->peer_count > cap.max_rx_2_tx) { 2229 RTE_ETHDEV_LOG(ERR, 2230 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2231 conf->peer_count, cap.max_rx_2_tx); 2232 return -EINVAL; 2233 } 2234 if (conf->peer_count == 0) { 2235 RTE_ETHDEV_LOG(ERR, 2236 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2237 conf->peer_count); 2238 return -EINVAL; 2239 } 2240 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2241 cap.max_nb_queues != UINT16_MAX; i++) { 2242 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2243 count++; 2244 } 2245 if (count > cap.max_nb_queues) { 2246 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2247 cap.max_nb_queues); 2248 return -EINVAL; 2249 } 2250 if (dev->data->dev_started) 2251 return -EBUSY; 2252 eth_dev_rxq_release(dev, rx_queue_id); 2253 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2254 nb_rx_desc, conf); 2255 if (ret == 0) 2256 dev->data->rx_queue_state[rx_queue_id] = 2257 RTE_ETH_QUEUE_STATE_HAIRPIN; 2258 return eth_err(port_id, ret); 2259 } 2260 2261 int 2262 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2263 uint16_t nb_tx_desc, unsigned int socket_id, 2264 const struct rte_eth_txconf *tx_conf) 2265 { 2266 struct rte_eth_dev *dev; 2267 struct rte_eth_dev_info dev_info; 2268 struct rte_eth_txconf local_conf; 2269 int ret; 2270 2271 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2272 dev = &rte_eth_devices[port_id]; 2273 2274 if (tx_queue_id >= dev->data->nb_tx_queues) { 2275 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 2276 return -EINVAL; 2277 } 2278 2279 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 2280 2281 ret = rte_eth_dev_info_get(port_id, &dev_info); 2282 if (ret != 0) 2283 return ret; 2284 2285 /* Use default specified by driver, if nb_tx_desc is zero */ 2286 if (nb_tx_desc == 0) { 2287 nb_tx_desc = dev_info.default_txportconf.ring_size; 2288 /* If driver default is zero, fall back on EAL default */ 2289 if (nb_tx_desc == 0) 2290 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2291 } 2292 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2293 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2294 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2295 RTE_ETHDEV_LOG(ERR, 2296 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2297 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2298 dev_info.tx_desc_lim.nb_min, 2299 dev_info.tx_desc_lim.nb_align); 2300 return -EINVAL; 2301 } 2302 2303 if (dev->data->dev_started && 2304 !(dev_info.dev_capa & 2305 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2306 return -EBUSY; 2307 2308 if (dev->data->dev_started && 2309 (dev->data->tx_queue_state[tx_queue_id] != 2310 RTE_ETH_QUEUE_STATE_STOPPED)) 2311 return -EBUSY; 2312 2313 eth_dev_txq_release(dev, tx_queue_id); 2314 2315 if (tx_conf == NULL) 2316 tx_conf = &dev_info.default_txconf; 2317 2318 local_conf = *tx_conf; 2319 2320 /* 2321 * If an offloading has already been enabled in 2322 * rte_eth_dev_configure(), it has been enabled on all queues, 2323 * so there is no need to enable it in this queue again. 2324 * The local_conf.offloads input to underlying PMD only carries 2325 * those offloadings which are only enabled on this queue and 2326 * not enabled on all queues. 2327 */ 2328 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2329 2330 /* 2331 * New added offloadings for this queue are those not enabled in 2332 * rte_eth_dev_configure() and they must be per-queue type. 2333 * A pure per-port offloading can't be enabled on a queue while 2334 * disabled on another queue. A pure per-port offloading can't 2335 * be enabled for any queue as new added one if it hasn't been 2336 * enabled in rte_eth_dev_configure(). 2337 */ 2338 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2339 local_conf.offloads) { 2340 RTE_ETHDEV_LOG(ERR, 2341 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2342 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2343 port_id, tx_queue_id, local_conf.offloads, 2344 dev_info.tx_queue_offload_capa, 2345 __func__); 2346 return -EINVAL; 2347 } 2348 2349 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2350 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2351 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2352 } 2353 2354 int 2355 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2356 uint16_t nb_tx_desc, 2357 const struct rte_eth_hairpin_conf *conf) 2358 { 2359 struct rte_eth_dev *dev; 2360 struct rte_eth_hairpin_cap cap; 2361 int i; 2362 int count; 2363 int ret; 2364 2365 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2366 dev = &rte_eth_devices[port_id]; 2367 2368 if (tx_queue_id >= dev->data->nb_tx_queues) { 2369 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 2370 return -EINVAL; 2371 } 2372 2373 if (conf == NULL) { 2374 RTE_ETHDEV_LOG(ERR, 2375 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2376 port_id); 2377 return -EINVAL; 2378 } 2379 2380 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2381 if (ret != 0) 2382 return ret; 2383 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, 2384 -ENOTSUP); 2385 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2386 if (nb_tx_desc == 0) 2387 nb_tx_desc = cap.max_nb_desc; 2388 if (nb_tx_desc > cap.max_nb_desc) { 2389 RTE_ETHDEV_LOG(ERR, 2390 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2391 nb_tx_desc, cap.max_nb_desc); 2392 return -EINVAL; 2393 } 2394 if (conf->peer_count > cap.max_tx_2_rx) { 2395 RTE_ETHDEV_LOG(ERR, 2396 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2397 conf->peer_count, cap.max_tx_2_rx); 2398 return -EINVAL; 2399 } 2400 if (conf->peer_count == 0) { 2401 RTE_ETHDEV_LOG(ERR, 2402 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2403 conf->peer_count); 2404 return -EINVAL; 2405 } 2406 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2407 cap.max_nb_queues != UINT16_MAX; i++) { 2408 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2409 count++; 2410 } 2411 if (count > cap.max_nb_queues) { 2412 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2413 cap.max_nb_queues); 2414 return -EINVAL; 2415 } 2416 if (dev->data->dev_started) 2417 return -EBUSY; 2418 eth_dev_txq_release(dev, tx_queue_id); 2419 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2420 (dev, tx_queue_id, nb_tx_desc, conf); 2421 if (ret == 0) 2422 dev->data->tx_queue_state[tx_queue_id] = 2423 RTE_ETH_QUEUE_STATE_HAIRPIN; 2424 return eth_err(port_id, ret); 2425 } 2426 2427 int 2428 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2429 { 2430 struct rte_eth_dev *dev; 2431 int ret; 2432 2433 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2434 dev = &rte_eth_devices[tx_port]; 2435 2436 if (dev->data->dev_started == 0) { 2437 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2438 return -EBUSY; 2439 } 2440 2441 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP); 2442 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2443 if (ret != 0) 2444 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2445 " to Rx %d (%d - all ports)\n", 2446 tx_port, rx_port, RTE_MAX_ETHPORTS); 2447 2448 return ret; 2449 } 2450 2451 int 2452 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2453 { 2454 struct rte_eth_dev *dev; 2455 int ret; 2456 2457 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2458 dev = &rte_eth_devices[tx_port]; 2459 2460 if (dev->data->dev_started == 0) { 2461 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2462 return -EBUSY; 2463 } 2464 2465 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP); 2466 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2467 if (ret != 0) 2468 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2469 " from Rx %d (%d - all ports)\n", 2470 tx_port, rx_port, RTE_MAX_ETHPORTS); 2471 2472 return ret; 2473 } 2474 2475 int 2476 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2477 size_t len, uint32_t direction) 2478 { 2479 struct rte_eth_dev *dev; 2480 int ret; 2481 2482 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2483 dev = &rte_eth_devices[port_id]; 2484 2485 if (peer_ports == NULL) { 2486 RTE_ETHDEV_LOG(ERR, 2487 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2488 port_id); 2489 return -EINVAL; 2490 } 2491 2492 if (len == 0) { 2493 RTE_ETHDEV_LOG(ERR, 2494 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2495 port_id); 2496 return -EINVAL; 2497 } 2498 2499 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports, 2500 -ENOTSUP); 2501 2502 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2503 len, direction); 2504 if (ret < 0) 2505 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2506 port_id, direction ? "Rx" : "Tx"); 2507 2508 return ret; 2509 } 2510 2511 void 2512 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2513 void *userdata __rte_unused) 2514 { 2515 rte_pktmbuf_free_bulk(pkts, unsent); 2516 } 2517 2518 void 2519 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2520 void *userdata) 2521 { 2522 uint64_t *count = userdata; 2523 2524 rte_pktmbuf_free_bulk(pkts, unsent); 2525 *count += unsent; 2526 } 2527 2528 int 2529 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2530 buffer_tx_error_fn cbfn, void *userdata) 2531 { 2532 if (buffer == NULL) { 2533 RTE_ETHDEV_LOG(ERR, 2534 "Cannot set Tx buffer error callback to NULL buffer\n"); 2535 return -EINVAL; 2536 } 2537 2538 buffer->error_callback = cbfn; 2539 buffer->error_userdata = userdata; 2540 return 0; 2541 } 2542 2543 int 2544 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2545 { 2546 int ret = 0; 2547 2548 if (buffer == NULL) { 2549 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2550 return -EINVAL; 2551 } 2552 2553 buffer->size = size; 2554 if (buffer->error_callback == NULL) { 2555 ret = rte_eth_tx_buffer_set_err_callback( 2556 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2557 } 2558 2559 return ret; 2560 } 2561 2562 int 2563 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2564 { 2565 struct rte_eth_dev *dev; 2566 int ret; 2567 2568 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2569 dev = &rte_eth_devices[port_id]; 2570 2571 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 2572 2573 /* Call driver to free pending mbufs. */ 2574 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2575 free_cnt); 2576 return eth_err(port_id, ret); 2577 } 2578 2579 int 2580 rte_eth_promiscuous_enable(uint16_t port_id) 2581 { 2582 struct rte_eth_dev *dev; 2583 int diag = 0; 2584 2585 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2586 dev = &rte_eth_devices[port_id]; 2587 2588 if (dev->data->promiscuous == 1) 2589 return 0; 2590 2591 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); 2592 2593 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2594 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2595 2596 return eth_err(port_id, diag); 2597 } 2598 2599 int 2600 rte_eth_promiscuous_disable(uint16_t port_id) 2601 { 2602 struct rte_eth_dev *dev; 2603 int diag = 0; 2604 2605 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2606 dev = &rte_eth_devices[port_id]; 2607 2608 if (dev->data->promiscuous == 0) 2609 return 0; 2610 2611 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); 2612 2613 dev->data->promiscuous = 0; 2614 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2615 if (diag != 0) 2616 dev->data->promiscuous = 1; 2617 2618 return eth_err(port_id, diag); 2619 } 2620 2621 int 2622 rte_eth_promiscuous_get(uint16_t port_id) 2623 { 2624 struct rte_eth_dev *dev; 2625 2626 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2627 dev = &rte_eth_devices[port_id]; 2628 2629 return dev->data->promiscuous; 2630 } 2631 2632 int 2633 rte_eth_allmulticast_enable(uint16_t port_id) 2634 { 2635 struct rte_eth_dev *dev; 2636 int diag; 2637 2638 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2639 dev = &rte_eth_devices[port_id]; 2640 2641 if (dev->data->all_multicast == 1) 2642 return 0; 2643 2644 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); 2645 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2646 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2647 2648 return eth_err(port_id, diag); 2649 } 2650 2651 int 2652 rte_eth_allmulticast_disable(uint16_t port_id) 2653 { 2654 struct rte_eth_dev *dev; 2655 int diag; 2656 2657 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2658 dev = &rte_eth_devices[port_id]; 2659 2660 if (dev->data->all_multicast == 0) 2661 return 0; 2662 2663 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); 2664 dev->data->all_multicast = 0; 2665 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2666 if (diag != 0) 2667 dev->data->all_multicast = 1; 2668 2669 return eth_err(port_id, diag); 2670 } 2671 2672 int 2673 rte_eth_allmulticast_get(uint16_t port_id) 2674 { 2675 struct rte_eth_dev *dev; 2676 2677 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2678 dev = &rte_eth_devices[port_id]; 2679 2680 return dev->data->all_multicast; 2681 } 2682 2683 int 2684 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2685 { 2686 struct rte_eth_dev *dev; 2687 2688 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2689 dev = &rte_eth_devices[port_id]; 2690 2691 if (eth_link == NULL) { 2692 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2693 port_id); 2694 return -EINVAL; 2695 } 2696 2697 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2698 rte_eth_linkstatus_get(dev, eth_link); 2699 else { 2700 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2701 (*dev->dev_ops->link_update)(dev, 1); 2702 *eth_link = dev->data->dev_link; 2703 } 2704 2705 return 0; 2706 } 2707 2708 int 2709 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2710 { 2711 struct rte_eth_dev *dev; 2712 2713 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2714 dev = &rte_eth_devices[port_id]; 2715 2716 if (eth_link == NULL) { 2717 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2718 port_id); 2719 return -EINVAL; 2720 } 2721 2722 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2723 rte_eth_linkstatus_get(dev, eth_link); 2724 else { 2725 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2726 (*dev->dev_ops->link_update)(dev, 0); 2727 *eth_link = dev->data->dev_link; 2728 } 2729 2730 return 0; 2731 } 2732 2733 const char * 2734 rte_eth_link_speed_to_str(uint32_t link_speed) 2735 { 2736 switch (link_speed) { 2737 case ETH_SPEED_NUM_NONE: return "None"; 2738 case ETH_SPEED_NUM_10M: return "10 Mbps"; 2739 case ETH_SPEED_NUM_100M: return "100 Mbps"; 2740 case ETH_SPEED_NUM_1G: return "1 Gbps"; 2741 case ETH_SPEED_NUM_2_5G: return "2.5 Gbps"; 2742 case ETH_SPEED_NUM_5G: return "5 Gbps"; 2743 case ETH_SPEED_NUM_10G: return "10 Gbps"; 2744 case ETH_SPEED_NUM_20G: return "20 Gbps"; 2745 case ETH_SPEED_NUM_25G: return "25 Gbps"; 2746 case ETH_SPEED_NUM_40G: return "40 Gbps"; 2747 case ETH_SPEED_NUM_50G: return "50 Gbps"; 2748 case ETH_SPEED_NUM_56G: return "56 Gbps"; 2749 case ETH_SPEED_NUM_100G: return "100 Gbps"; 2750 case ETH_SPEED_NUM_200G: return "200 Gbps"; 2751 case ETH_SPEED_NUM_UNKNOWN: return "Unknown"; 2752 default: return "Invalid"; 2753 } 2754 } 2755 2756 int 2757 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2758 { 2759 if (str == NULL) { 2760 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2761 return -EINVAL; 2762 } 2763 2764 if (len == 0) { 2765 RTE_ETHDEV_LOG(ERR, 2766 "Cannot convert link to string with zero size\n"); 2767 return -EINVAL; 2768 } 2769 2770 if (eth_link == NULL) { 2771 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2772 return -EINVAL; 2773 } 2774 2775 if (eth_link->link_status == ETH_LINK_DOWN) 2776 return snprintf(str, len, "Link down"); 2777 else 2778 return snprintf(str, len, "Link up at %s %s %s", 2779 rte_eth_link_speed_to_str(eth_link->link_speed), 2780 (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 2781 "FDX" : "HDX", 2782 (eth_link->link_autoneg == ETH_LINK_AUTONEG) ? 2783 "Autoneg" : "Fixed"); 2784 } 2785 2786 int 2787 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2788 { 2789 struct rte_eth_dev *dev; 2790 2791 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2792 dev = &rte_eth_devices[port_id]; 2793 2794 if (stats == NULL) { 2795 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2796 port_id); 2797 return -EINVAL; 2798 } 2799 2800 memset(stats, 0, sizeof(*stats)); 2801 2802 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 2803 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2804 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2805 } 2806 2807 int 2808 rte_eth_stats_reset(uint16_t port_id) 2809 { 2810 struct rte_eth_dev *dev; 2811 int ret; 2812 2813 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2814 dev = &rte_eth_devices[port_id]; 2815 2816 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 2817 ret = (*dev->dev_ops->stats_reset)(dev); 2818 if (ret != 0) 2819 return eth_err(port_id, ret); 2820 2821 dev->data->rx_mbuf_alloc_failed = 0; 2822 2823 return 0; 2824 } 2825 2826 static inline int 2827 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2828 { 2829 uint16_t nb_rxqs, nb_txqs; 2830 int count; 2831 2832 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2833 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2834 2835 count = RTE_NB_STATS; 2836 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2837 count += nb_rxqs * RTE_NB_RXQ_STATS; 2838 count += nb_txqs * RTE_NB_TXQ_STATS; 2839 } 2840 2841 return count; 2842 } 2843 2844 static int 2845 eth_dev_get_xstats_count(uint16_t port_id) 2846 { 2847 struct rte_eth_dev *dev; 2848 int count; 2849 2850 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2851 dev = &rte_eth_devices[port_id]; 2852 if (dev->dev_ops->xstats_get_names != NULL) { 2853 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2854 if (count < 0) 2855 return eth_err(port_id, count); 2856 } else 2857 count = 0; 2858 2859 2860 count += eth_dev_get_xstats_basic_count(dev); 2861 2862 return count; 2863 } 2864 2865 int 2866 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2867 uint64_t *id) 2868 { 2869 int cnt_xstats, idx_xstat; 2870 2871 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2872 2873 if (xstat_name == NULL) { 2874 RTE_ETHDEV_LOG(ERR, 2875 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 2876 port_id); 2877 return -ENOMEM; 2878 } 2879 2880 if (id == NULL) { 2881 RTE_ETHDEV_LOG(ERR, 2882 "Cannot get ethdev port %u xstats ID to NULL\n", 2883 port_id); 2884 return -ENOMEM; 2885 } 2886 2887 /* Get count */ 2888 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2889 if (cnt_xstats < 0) { 2890 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2891 return -ENODEV; 2892 } 2893 2894 /* Get id-name lookup table */ 2895 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2896 2897 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2898 port_id, xstats_names, cnt_xstats, NULL)) { 2899 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2900 return -1; 2901 } 2902 2903 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2904 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2905 *id = idx_xstat; 2906 return 0; 2907 }; 2908 } 2909 2910 return -EINVAL; 2911 } 2912 2913 /* retrieve basic stats names */ 2914 static int 2915 eth_basic_stats_get_names(struct rte_eth_dev *dev, 2916 struct rte_eth_xstat_name *xstats_names) 2917 { 2918 int cnt_used_entries = 0; 2919 uint32_t idx, id_queue; 2920 uint16_t num_q; 2921 2922 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2923 strlcpy(xstats_names[cnt_used_entries].name, 2924 eth_dev_stats_strings[idx].name, 2925 sizeof(xstats_names[0].name)); 2926 cnt_used_entries++; 2927 } 2928 2929 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2930 return cnt_used_entries; 2931 2932 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2933 for (id_queue = 0; id_queue < num_q; id_queue++) { 2934 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2935 snprintf(xstats_names[cnt_used_entries].name, 2936 sizeof(xstats_names[0].name), 2937 "rx_q%u_%s", 2938 id_queue, eth_dev_rxq_stats_strings[idx].name); 2939 cnt_used_entries++; 2940 } 2941 2942 } 2943 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2944 for (id_queue = 0; id_queue < num_q; id_queue++) { 2945 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 2946 snprintf(xstats_names[cnt_used_entries].name, 2947 sizeof(xstats_names[0].name), 2948 "tx_q%u_%s", 2949 id_queue, eth_dev_txq_stats_strings[idx].name); 2950 cnt_used_entries++; 2951 } 2952 } 2953 return cnt_used_entries; 2954 } 2955 2956 /* retrieve ethdev extended statistics names */ 2957 int 2958 rte_eth_xstats_get_names_by_id(uint16_t port_id, 2959 struct rte_eth_xstat_name *xstats_names, unsigned int size, 2960 uint64_t *ids) 2961 { 2962 struct rte_eth_xstat_name *xstats_names_copy; 2963 unsigned int no_basic_stat_requested = 1; 2964 unsigned int no_ext_stat_requested = 1; 2965 unsigned int expected_entries; 2966 unsigned int basic_count; 2967 struct rte_eth_dev *dev; 2968 unsigned int i; 2969 int ret; 2970 2971 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2972 dev = &rte_eth_devices[port_id]; 2973 2974 basic_count = eth_dev_get_xstats_basic_count(dev); 2975 ret = eth_dev_get_xstats_count(port_id); 2976 if (ret < 0) 2977 return ret; 2978 expected_entries = (unsigned int)ret; 2979 2980 /* Return max number of stats if no ids given */ 2981 if (!ids) { 2982 if (!xstats_names) 2983 return expected_entries; 2984 else if (xstats_names && size < expected_entries) 2985 return expected_entries; 2986 } 2987 2988 if (ids && !xstats_names) 2989 return -EINVAL; 2990 2991 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 2992 uint64_t ids_copy[size]; 2993 2994 for (i = 0; i < size; i++) { 2995 if (ids[i] < basic_count) { 2996 no_basic_stat_requested = 0; 2997 break; 2998 } 2999 3000 /* 3001 * Convert ids to xstats ids that PMD knows. 3002 * ids known by user are basic + extended stats. 3003 */ 3004 ids_copy[i] = ids[i] - basic_count; 3005 } 3006 3007 if (no_basic_stat_requested) 3008 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3009 ids_copy, xstats_names, size); 3010 } 3011 3012 /* Retrieve all stats */ 3013 if (!ids) { 3014 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3015 expected_entries); 3016 if (num_stats < 0 || num_stats > (int)expected_entries) 3017 return num_stats; 3018 else 3019 return expected_entries; 3020 } 3021 3022 xstats_names_copy = calloc(expected_entries, 3023 sizeof(struct rte_eth_xstat_name)); 3024 3025 if (!xstats_names_copy) { 3026 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3027 return -ENOMEM; 3028 } 3029 3030 if (ids) { 3031 for (i = 0; i < size; i++) { 3032 if (ids[i] >= basic_count) { 3033 no_ext_stat_requested = 0; 3034 break; 3035 } 3036 } 3037 } 3038 3039 /* Fill xstats_names_copy structure */ 3040 if (ids && no_ext_stat_requested) { 3041 eth_basic_stats_get_names(dev, xstats_names_copy); 3042 } else { 3043 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3044 expected_entries); 3045 if (ret < 0) { 3046 free(xstats_names_copy); 3047 return ret; 3048 } 3049 } 3050 3051 /* Filter stats */ 3052 for (i = 0; i < size; i++) { 3053 if (ids[i] >= expected_entries) { 3054 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3055 free(xstats_names_copy); 3056 return -1; 3057 } 3058 xstats_names[i] = xstats_names_copy[ids[i]]; 3059 } 3060 3061 free(xstats_names_copy); 3062 return size; 3063 } 3064 3065 int 3066 rte_eth_xstats_get_names(uint16_t port_id, 3067 struct rte_eth_xstat_name *xstats_names, 3068 unsigned int size) 3069 { 3070 struct rte_eth_dev *dev; 3071 int cnt_used_entries; 3072 int cnt_expected_entries; 3073 int cnt_driver_entries; 3074 3075 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3076 if (xstats_names == NULL || cnt_expected_entries < 0 || 3077 (int)size < cnt_expected_entries) 3078 return cnt_expected_entries; 3079 3080 /* port_id checked in eth_dev_get_xstats_count() */ 3081 dev = &rte_eth_devices[port_id]; 3082 3083 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3084 3085 if (dev->dev_ops->xstats_get_names != NULL) { 3086 /* If there are any driver-specific xstats, append them 3087 * to end of list. 3088 */ 3089 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3090 dev, 3091 xstats_names + cnt_used_entries, 3092 size - cnt_used_entries); 3093 if (cnt_driver_entries < 0) 3094 return eth_err(port_id, cnt_driver_entries); 3095 cnt_used_entries += cnt_driver_entries; 3096 } 3097 3098 return cnt_used_entries; 3099 } 3100 3101 3102 static int 3103 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3104 { 3105 struct rte_eth_dev *dev; 3106 struct rte_eth_stats eth_stats; 3107 unsigned int count = 0, i, q; 3108 uint64_t val, *stats_ptr; 3109 uint16_t nb_rxqs, nb_txqs; 3110 int ret; 3111 3112 ret = rte_eth_stats_get(port_id, ð_stats); 3113 if (ret < 0) 3114 return ret; 3115 3116 dev = &rte_eth_devices[port_id]; 3117 3118 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3119 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3120 3121 /* global stats */ 3122 for (i = 0; i < RTE_NB_STATS; i++) { 3123 stats_ptr = RTE_PTR_ADD(ð_stats, 3124 eth_dev_stats_strings[i].offset); 3125 val = *stats_ptr; 3126 xstats[count++].value = val; 3127 } 3128 3129 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3130 return count; 3131 3132 /* per-rxq stats */ 3133 for (q = 0; q < nb_rxqs; q++) { 3134 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3135 stats_ptr = RTE_PTR_ADD(ð_stats, 3136 eth_dev_rxq_stats_strings[i].offset + 3137 q * sizeof(uint64_t)); 3138 val = *stats_ptr; 3139 xstats[count++].value = val; 3140 } 3141 } 3142 3143 /* per-txq stats */ 3144 for (q = 0; q < nb_txqs; q++) { 3145 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3146 stats_ptr = RTE_PTR_ADD(ð_stats, 3147 eth_dev_txq_stats_strings[i].offset + 3148 q * sizeof(uint64_t)); 3149 val = *stats_ptr; 3150 xstats[count++].value = val; 3151 } 3152 } 3153 return count; 3154 } 3155 3156 /* retrieve ethdev extended statistics */ 3157 int 3158 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3159 uint64_t *values, unsigned int size) 3160 { 3161 unsigned int no_basic_stat_requested = 1; 3162 unsigned int no_ext_stat_requested = 1; 3163 unsigned int num_xstats_filled; 3164 unsigned int basic_count; 3165 uint16_t expected_entries; 3166 struct rte_eth_dev *dev; 3167 unsigned int i; 3168 int ret; 3169 3170 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3171 dev = &rte_eth_devices[port_id]; 3172 3173 ret = eth_dev_get_xstats_count(port_id); 3174 if (ret < 0) 3175 return ret; 3176 expected_entries = (uint16_t)ret; 3177 struct rte_eth_xstat xstats[expected_entries]; 3178 basic_count = eth_dev_get_xstats_basic_count(dev); 3179 3180 /* Return max number of stats if no ids given */ 3181 if (!ids) { 3182 if (!values) 3183 return expected_entries; 3184 else if (values && size < expected_entries) 3185 return expected_entries; 3186 } 3187 3188 if (ids && !values) 3189 return -EINVAL; 3190 3191 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3192 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3193 uint64_t ids_copy[size]; 3194 3195 for (i = 0; i < size; i++) { 3196 if (ids[i] < basic_count) { 3197 no_basic_stat_requested = 0; 3198 break; 3199 } 3200 3201 /* 3202 * Convert ids to xstats ids that PMD knows. 3203 * ids known by user are basic + extended stats. 3204 */ 3205 ids_copy[i] = ids[i] - basic_count; 3206 } 3207 3208 if (no_basic_stat_requested) 3209 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3210 values, size); 3211 } 3212 3213 if (ids) { 3214 for (i = 0; i < size; i++) { 3215 if (ids[i] >= basic_count) { 3216 no_ext_stat_requested = 0; 3217 break; 3218 } 3219 } 3220 } 3221 3222 /* Fill the xstats structure */ 3223 if (ids && no_ext_stat_requested) 3224 ret = eth_basic_stats_get(port_id, xstats); 3225 else 3226 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3227 3228 if (ret < 0) 3229 return ret; 3230 num_xstats_filled = (unsigned int)ret; 3231 3232 /* Return all stats */ 3233 if (!ids) { 3234 for (i = 0; i < num_xstats_filled; i++) 3235 values[i] = xstats[i].value; 3236 return expected_entries; 3237 } 3238 3239 /* Filter stats */ 3240 for (i = 0; i < size; i++) { 3241 if (ids[i] >= expected_entries) { 3242 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3243 return -1; 3244 } 3245 values[i] = xstats[ids[i]].value; 3246 } 3247 return size; 3248 } 3249 3250 int 3251 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3252 unsigned int n) 3253 { 3254 struct rte_eth_dev *dev; 3255 unsigned int count = 0, i; 3256 signed int xcount = 0; 3257 uint16_t nb_rxqs, nb_txqs; 3258 int ret; 3259 3260 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3261 dev = &rte_eth_devices[port_id]; 3262 3263 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3264 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3265 3266 /* Return generic statistics */ 3267 count = RTE_NB_STATS; 3268 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) 3269 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS); 3270 3271 /* implemented by the driver */ 3272 if (dev->dev_ops->xstats_get != NULL) { 3273 /* Retrieve the xstats from the driver at the end of the 3274 * xstats struct. 3275 */ 3276 xcount = (*dev->dev_ops->xstats_get)(dev, 3277 xstats ? xstats + count : NULL, 3278 (n > count) ? n - count : 0); 3279 3280 if (xcount < 0) 3281 return eth_err(port_id, xcount); 3282 } 3283 3284 if (n < count + xcount || xstats == NULL) 3285 return count + xcount; 3286 3287 /* now fill the xstats structure */ 3288 ret = eth_basic_stats_get(port_id, xstats); 3289 if (ret < 0) 3290 return ret; 3291 count = ret; 3292 3293 for (i = 0; i < count; i++) 3294 xstats[i].id = i; 3295 /* add an offset to driver-specific stats */ 3296 for ( ; i < count + xcount; i++) 3297 xstats[i].id += count; 3298 3299 return count + xcount; 3300 } 3301 3302 /* reset ethdev extended statistics */ 3303 int 3304 rte_eth_xstats_reset(uint16_t port_id) 3305 { 3306 struct rte_eth_dev *dev; 3307 3308 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3309 dev = &rte_eth_devices[port_id]; 3310 3311 /* implemented by the driver */ 3312 if (dev->dev_ops->xstats_reset != NULL) 3313 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3314 3315 /* fallback to default */ 3316 return rte_eth_stats_reset(port_id); 3317 } 3318 3319 static int 3320 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3321 uint8_t stat_idx, uint8_t is_rx) 3322 { 3323 struct rte_eth_dev *dev; 3324 3325 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3326 dev = &rte_eth_devices[port_id]; 3327 3328 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3329 return -EINVAL; 3330 3331 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3332 return -EINVAL; 3333 3334 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3335 return -EINVAL; 3336 3337 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 3338 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3339 } 3340 3341 int 3342 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3343 uint8_t stat_idx) 3344 { 3345 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3346 tx_queue_id, 3347 stat_idx, STAT_QMAP_TX)); 3348 } 3349 3350 int 3351 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3352 uint8_t stat_idx) 3353 { 3354 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3355 rx_queue_id, 3356 stat_idx, STAT_QMAP_RX)); 3357 } 3358 3359 int 3360 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3361 { 3362 struct rte_eth_dev *dev; 3363 3364 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3365 dev = &rte_eth_devices[port_id]; 3366 3367 if (fw_version == NULL && fw_size > 0) { 3368 RTE_ETHDEV_LOG(ERR, 3369 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3370 port_id); 3371 return -EINVAL; 3372 } 3373 3374 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 3375 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3376 fw_version, fw_size)); 3377 } 3378 3379 int 3380 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3381 { 3382 struct rte_eth_dev *dev; 3383 const struct rte_eth_desc_lim lim = { 3384 .nb_max = UINT16_MAX, 3385 .nb_min = 0, 3386 .nb_align = 1, 3387 .nb_seg_max = UINT16_MAX, 3388 .nb_mtu_seg_max = UINT16_MAX, 3389 }; 3390 int diag; 3391 3392 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3393 dev = &rte_eth_devices[port_id]; 3394 3395 if (dev_info == NULL) { 3396 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3397 port_id); 3398 return -EINVAL; 3399 } 3400 3401 /* 3402 * Init dev_info before port_id check since caller does not have 3403 * return status and does not know if get is successful or not. 3404 */ 3405 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3406 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3407 3408 dev_info->rx_desc_lim = lim; 3409 dev_info->tx_desc_lim = lim; 3410 dev_info->device = dev->device; 3411 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 3412 dev_info->max_mtu = UINT16_MAX; 3413 3414 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 3415 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3416 if (diag != 0) { 3417 /* Cleanup already filled in device information */ 3418 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3419 return eth_err(port_id, diag); 3420 } 3421 3422 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3423 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3424 RTE_MAX_QUEUES_PER_PORT); 3425 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3426 RTE_MAX_QUEUES_PER_PORT); 3427 3428 dev_info->driver_name = dev->device->driver->name; 3429 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3430 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3431 3432 dev_info->dev_flags = &dev->data->dev_flags; 3433 3434 return 0; 3435 } 3436 3437 int 3438 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3439 { 3440 struct rte_eth_dev *dev; 3441 3442 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3443 dev = &rte_eth_devices[port_id]; 3444 3445 if (dev_conf == NULL) { 3446 RTE_ETHDEV_LOG(ERR, 3447 "Cannot get ethdev port %u configuration to NULL\n", 3448 port_id); 3449 return -EINVAL; 3450 } 3451 3452 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3453 3454 return 0; 3455 } 3456 3457 int 3458 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3459 uint32_t *ptypes, int num) 3460 { 3461 int i, j; 3462 struct rte_eth_dev *dev; 3463 const uint32_t *all_ptypes; 3464 3465 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3466 dev = &rte_eth_devices[port_id]; 3467 3468 if (ptypes == NULL && num > 0) { 3469 RTE_ETHDEV_LOG(ERR, 3470 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3471 port_id); 3472 return -EINVAL; 3473 } 3474 3475 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 3476 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3477 3478 if (!all_ptypes) 3479 return 0; 3480 3481 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3482 if (all_ptypes[i] & ptype_mask) { 3483 if (j < num) 3484 ptypes[j] = all_ptypes[i]; 3485 j++; 3486 } 3487 3488 return j; 3489 } 3490 3491 int 3492 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3493 uint32_t *set_ptypes, unsigned int num) 3494 { 3495 const uint32_t valid_ptype_masks[] = { 3496 RTE_PTYPE_L2_MASK, 3497 RTE_PTYPE_L3_MASK, 3498 RTE_PTYPE_L4_MASK, 3499 RTE_PTYPE_TUNNEL_MASK, 3500 RTE_PTYPE_INNER_L2_MASK, 3501 RTE_PTYPE_INNER_L3_MASK, 3502 RTE_PTYPE_INNER_L4_MASK, 3503 }; 3504 const uint32_t *all_ptypes; 3505 struct rte_eth_dev *dev; 3506 uint32_t unused_mask; 3507 unsigned int i, j; 3508 int ret; 3509 3510 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3511 dev = &rte_eth_devices[port_id]; 3512 3513 if (num > 0 && set_ptypes == NULL) { 3514 RTE_ETHDEV_LOG(ERR, 3515 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3516 port_id); 3517 return -EINVAL; 3518 } 3519 3520 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3521 *dev->dev_ops->dev_ptypes_set == NULL) { 3522 ret = 0; 3523 goto ptype_unknown; 3524 } 3525 3526 if (ptype_mask == 0) { 3527 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3528 ptype_mask); 3529 goto ptype_unknown; 3530 } 3531 3532 unused_mask = ptype_mask; 3533 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3534 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3535 if (mask && mask != valid_ptype_masks[i]) { 3536 ret = -EINVAL; 3537 goto ptype_unknown; 3538 } 3539 unused_mask &= ~valid_ptype_masks[i]; 3540 } 3541 3542 if (unused_mask) { 3543 ret = -EINVAL; 3544 goto ptype_unknown; 3545 } 3546 3547 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3548 if (all_ptypes == NULL) { 3549 ret = 0; 3550 goto ptype_unknown; 3551 } 3552 3553 /* 3554 * Accommodate as many set_ptypes as possible. If the supplied 3555 * set_ptypes array is insufficient fill it partially. 3556 */ 3557 for (i = 0, j = 0; set_ptypes != NULL && 3558 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3559 if (ptype_mask & all_ptypes[i]) { 3560 if (j < num - 1) { 3561 set_ptypes[j] = all_ptypes[i]; 3562 j++; 3563 continue; 3564 } 3565 break; 3566 } 3567 } 3568 3569 if (set_ptypes != NULL && j < num) 3570 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3571 3572 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3573 3574 ptype_unknown: 3575 if (num > 0) 3576 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3577 3578 return ret; 3579 } 3580 3581 int 3582 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3583 unsigned int num) 3584 { 3585 int32_t ret; 3586 struct rte_eth_dev *dev; 3587 struct rte_eth_dev_info dev_info; 3588 3589 if (ma == NULL) { 3590 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3591 return -EINVAL; 3592 } 3593 3594 /* will check for us that port_id is a valid one */ 3595 ret = rte_eth_dev_info_get(port_id, &dev_info); 3596 if (ret != 0) 3597 return ret; 3598 3599 dev = &rte_eth_devices[port_id]; 3600 num = RTE_MIN(dev_info.max_mac_addrs, num); 3601 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3602 3603 return num; 3604 } 3605 3606 int 3607 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3608 { 3609 struct rte_eth_dev *dev; 3610 3611 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3612 dev = &rte_eth_devices[port_id]; 3613 3614 if (mac_addr == NULL) { 3615 RTE_ETHDEV_LOG(ERR, 3616 "Cannot get ethdev port %u MAC address to NULL\n", 3617 port_id); 3618 return -EINVAL; 3619 } 3620 3621 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3622 3623 return 0; 3624 } 3625 3626 int 3627 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3628 { 3629 struct rte_eth_dev *dev; 3630 3631 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3632 dev = &rte_eth_devices[port_id]; 3633 3634 if (mtu == NULL) { 3635 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3636 port_id); 3637 return -EINVAL; 3638 } 3639 3640 *mtu = dev->data->mtu; 3641 return 0; 3642 } 3643 3644 int 3645 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3646 { 3647 int ret; 3648 struct rte_eth_dev_info dev_info; 3649 struct rte_eth_dev *dev; 3650 int is_jumbo_frame_capable = 0; 3651 3652 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3653 dev = &rte_eth_devices[port_id]; 3654 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 3655 3656 /* 3657 * Check if the device supports dev_infos_get, if it does not 3658 * skip min_mtu/max_mtu validation here as this requires values 3659 * that are populated within the call to rte_eth_dev_info_get() 3660 * which relies on dev->dev_ops->dev_infos_get. 3661 */ 3662 if (*dev->dev_ops->dev_infos_get != NULL) { 3663 uint16_t overhead_len; 3664 uint32_t frame_size; 3665 3666 ret = rte_eth_dev_info_get(port_id, &dev_info); 3667 if (ret != 0) 3668 return ret; 3669 3670 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu) 3671 return -EINVAL; 3672 3673 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 3674 dev_info.max_mtu); 3675 frame_size = mtu + overhead_len; 3676 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) 3677 return -EINVAL; 3678 3679 if ((dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) != 0) 3680 is_jumbo_frame_capable = 1; 3681 } 3682 3683 if (mtu > RTE_ETHER_MTU && is_jumbo_frame_capable == 0) 3684 return -EINVAL; 3685 3686 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3687 if (ret == 0) { 3688 dev->data->mtu = mtu; 3689 3690 /* switch to jumbo mode if needed */ 3691 if (mtu > RTE_ETHER_MTU) 3692 dev->data->dev_conf.rxmode.offloads |= 3693 DEV_RX_OFFLOAD_JUMBO_FRAME; 3694 else 3695 dev->data->dev_conf.rxmode.offloads &= 3696 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 3697 } 3698 3699 return eth_err(port_id, ret); 3700 } 3701 3702 int 3703 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3704 { 3705 struct rte_eth_dev *dev; 3706 int ret; 3707 3708 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3709 dev = &rte_eth_devices[port_id]; 3710 3711 if (!(dev->data->dev_conf.rxmode.offloads & 3712 DEV_RX_OFFLOAD_VLAN_FILTER)) { 3713 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n", 3714 port_id); 3715 return -ENOSYS; 3716 } 3717 3718 if (vlan_id > 4095) { 3719 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3720 port_id, vlan_id); 3721 return -EINVAL; 3722 } 3723 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 3724 3725 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3726 if (ret == 0) { 3727 struct rte_vlan_filter_conf *vfc; 3728 int vidx; 3729 int vbit; 3730 3731 vfc = &dev->data->vlan_filter_conf; 3732 vidx = vlan_id / 64; 3733 vbit = vlan_id % 64; 3734 3735 if (on) 3736 vfc->ids[vidx] |= UINT64_C(1) << vbit; 3737 else 3738 vfc->ids[vidx] &= ~(UINT64_C(1) << vbit); 3739 } 3740 3741 return eth_err(port_id, ret); 3742 } 3743 3744 int 3745 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3746 int on) 3747 { 3748 struct rte_eth_dev *dev; 3749 3750 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3751 dev = &rte_eth_devices[port_id]; 3752 3753 if (rx_queue_id >= dev->data->nb_rx_queues) { 3754 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3755 return -EINVAL; 3756 } 3757 3758 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 3759 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3760 3761 return 0; 3762 } 3763 3764 int 3765 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3766 enum rte_vlan_type vlan_type, 3767 uint16_t tpid) 3768 { 3769 struct rte_eth_dev *dev; 3770 3771 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3772 dev = &rte_eth_devices[port_id]; 3773 3774 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 3775 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3776 tpid)); 3777 } 3778 3779 int 3780 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3781 { 3782 struct rte_eth_dev_info dev_info; 3783 struct rte_eth_dev *dev; 3784 int ret = 0; 3785 int mask = 0; 3786 int cur, org = 0; 3787 uint64_t orig_offloads; 3788 uint64_t dev_offloads; 3789 uint64_t new_offloads; 3790 3791 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3792 dev = &rte_eth_devices[port_id]; 3793 3794 /* save original values in case of failure */ 3795 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3796 dev_offloads = orig_offloads; 3797 3798 /* check which option changed by application */ 3799 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD); 3800 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP); 3801 if (cur != org) { 3802 if (cur) 3803 dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3804 else 3805 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3806 mask |= ETH_VLAN_STRIP_MASK; 3807 } 3808 3809 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); 3810 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER); 3811 if (cur != org) { 3812 if (cur) 3813 dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3814 else 3815 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3816 mask |= ETH_VLAN_FILTER_MASK; 3817 } 3818 3819 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD); 3820 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND); 3821 if (cur != org) { 3822 if (cur) 3823 dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 3824 else 3825 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 3826 mask |= ETH_VLAN_EXTEND_MASK; 3827 } 3828 3829 cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD); 3830 org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP); 3831 if (cur != org) { 3832 if (cur) 3833 dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 3834 else 3835 dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 3836 mask |= ETH_QINQ_STRIP_MASK; 3837 } 3838 3839 /*no change*/ 3840 if (mask == 0) 3841 return ret; 3842 3843 ret = rte_eth_dev_info_get(port_id, &dev_info); 3844 if (ret != 0) 3845 return ret; 3846 3847 /* Rx VLAN offloading must be within its device capabilities */ 3848 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3849 new_offloads = dev_offloads & ~orig_offloads; 3850 RTE_ETHDEV_LOG(ERR, 3851 "Ethdev port_id=%u requested new added VLAN offloads " 3852 "0x%" PRIx64 " must be within Rx offloads capabilities " 3853 "0x%" PRIx64 " in %s()\n", 3854 port_id, new_offloads, dev_info.rx_offload_capa, 3855 __func__); 3856 return -EINVAL; 3857 } 3858 3859 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 3860 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3861 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3862 if (ret) { 3863 /* hit an error restore original values */ 3864 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3865 } 3866 3867 return eth_err(port_id, ret); 3868 } 3869 3870 int 3871 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3872 { 3873 struct rte_eth_dev *dev; 3874 uint64_t *dev_offloads; 3875 int ret = 0; 3876 3877 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3878 dev = &rte_eth_devices[port_id]; 3879 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3880 3881 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 3882 ret |= ETH_VLAN_STRIP_OFFLOAD; 3883 3884 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 3885 ret |= ETH_VLAN_FILTER_OFFLOAD; 3886 3887 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 3888 ret |= ETH_VLAN_EXTEND_OFFLOAD; 3889 3890 if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP) 3891 ret |= ETH_QINQ_STRIP_OFFLOAD; 3892 3893 return ret; 3894 } 3895 3896 int 3897 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3898 { 3899 struct rte_eth_dev *dev; 3900 3901 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3902 dev = &rte_eth_devices[port_id]; 3903 3904 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 3905 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3906 } 3907 3908 int 3909 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3910 { 3911 struct rte_eth_dev *dev; 3912 3913 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3914 dev = &rte_eth_devices[port_id]; 3915 3916 if (fc_conf == NULL) { 3917 RTE_ETHDEV_LOG(ERR, 3918 "Cannot get ethdev port %u flow control config to NULL\n", 3919 port_id); 3920 return -EINVAL; 3921 } 3922 3923 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 3924 memset(fc_conf, 0, sizeof(*fc_conf)); 3925 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3926 } 3927 3928 int 3929 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3930 { 3931 struct rte_eth_dev *dev; 3932 3933 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3934 dev = &rte_eth_devices[port_id]; 3935 3936 if (fc_conf == NULL) { 3937 RTE_ETHDEV_LOG(ERR, 3938 "Cannot set ethdev port %u flow control from NULL config\n", 3939 port_id); 3940 return -EINVAL; 3941 } 3942 3943 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 3944 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 3945 return -EINVAL; 3946 } 3947 3948 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 3949 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 3950 } 3951 3952 int 3953 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 3954 struct rte_eth_pfc_conf *pfc_conf) 3955 { 3956 struct rte_eth_dev *dev; 3957 3958 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3959 dev = &rte_eth_devices[port_id]; 3960 3961 if (pfc_conf == NULL) { 3962 RTE_ETHDEV_LOG(ERR, 3963 "Cannot set ethdev port %u priority flow control from NULL config\n", 3964 port_id); 3965 return -EINVAL; 3966 } 3967 3968 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) { 3969 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 3970 return -EINVAL; 3971 } 3972 3973 /* High water, low water validation are device specific */ 3974 if (*dev->dev_ops->priority_flow_ctrl_set) 3975 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 3976 (dev, pfc_conf)); 3977 return -ENOTSUP; 3978 } 3979 3980 static int 3981 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 3982 uint16_t reta_size) 3983 { 3984 uint16_t i, num; 3985 3986 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE; 3987 for (i = 0; i < num; i++) { 3988 if (reta_conf[i].mask) 3989 return 0; 3990 } 3991 3992 return -EINVAL; 3993 } 3994 3995 static int 3996 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 3997 uint16_t reta_size, 3998 uint16_t max_rxq) 3999 { 4000 uint16_t i, idx, shift; 4001 4002 if (max_rxq == 0) { 4003 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 4004 return -EINVAL; 4005 } 4006 4007 for (i = 0; i < reta_size; i++) { 4008 idx = i / RTE_RETA_GROUP_SIZE; 4009 shift = i % RTE_RETA_GROUP_SIZE; 4010 if ((reta_conf[idx].mask & (1ULL << shift)) && 4011 (reta_conf[idx].reta[shift] >= max_rxq)) { 4012 RTE_ETHDEV_LOG(ERR, 4013 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 4014 idx, shift, 4015 reta_conf[idx].reta[shift], max_rxq); 4016 return -EINVAL; 4017 } 4018 } 4019 4020 return 0; 4021 } 4022 4023 int 4024 rte_eth_dev_rss_reta_update(uint16_t port_id, 4025 struct rte_eth_rss_reta_entry64 *reta_conf, 4026 uint16_t reta_size) 4027 { 4028 struct rte_eth_dev *dev; 4029 int ret; 4030 4031 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4032 dev = &rte_eth_devices[port_id]; 4033 4034 if (reta_conf == NULL) { 4035 RTE_ETHDEV_LOG(ERR, 4036 "Cannot update ethdev port %u RSS RETA to NULL\n", 4037 port_id); 4038 return -EINVAL; 4039 } 4040 4041 if (reta_size == 0) { 4042 RTE_ETHDEV_LOG(ERR, 4043 "Cannot update ethdev port %u RSS RETA with zero size\n", 4044 port_id); 4045 return -EINVAL; 4046 } 4047 4048 /* Check mask bits */ 4049 ret = eth_check_reta_mask(reta_conf, reta_size); 4050 if (ret < 0) 4051 return ret; 4052 4053 /* Check entry value */ 4054 ret = eth_check_reta_entry(reta_conf, reta_size, 4055 dev->data->nb_rx_queues); 4056 if (ret < 0) 4057 return ret; 4058 4059 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 4060 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4061 reta_size)); 4062 } 4063 4064 int 4065 rte_eth_dev_rss_reta_query(uint16_t port_id, 4066 struct rte_eth_rss_reta_entry64 *reta_conf, 4067 uint16_t reta_size) 4068 { 4069 struct rte_eth_dev *dev; 4070 int ret; 4071 4072 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4073 dev = &rte_eth_devices[port_id]; 4074 4075 if (reta_conf == NULL) { 4076 RTE_ETHDEV_LOG(ERR, 4077 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4078 port_id); 4079 return -EINVAL; 4080 } 4081 4082 /* Check mask bits */ 4083 ret = eth_check_reta_mask(reta_conf, reta_size); 4084 if (ret < 0) 4085 return ret; 4086 4087 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 4088 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4089 reta_size)); 4090 } 4091 4092 int 4093 rte_eth_dev_rss_hash_update(uint16_t port_id, 4094 struct rte_eth_rss_conf *rss_conf) 4095 { 4096 struct rte_eth_dev *dev; 4097 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4098 int ret; 4099 4100 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4101 dev = &rte_eth_devices[port_id]; 4102 4103 if (rss_conf == NULL) { 4104 RTE_ETHDEV_LOG(ERR, 4105 "Cannot update ethdev port %u RSS hash from NULL config\n", 4106 port_id); 4107 return -EINVAL; 4108 } 4109 4110 ret = rte_eth_dev_info_get(port_id, &dev_info); 4111 if (ret != 0) 4112 return ret; 4113 4114 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4115 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4116 dev_info.flow_type_rss_offloads) { 4117 RTE_ETHDEV_LOG(ERR, 4118 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4119 port_id, rss_conf->rss_hf, 4120 dev_info.flow_type_rss_offloads); 4121 return -EINVAL; 4122 } 4123 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 4124 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4125 rss_conf)); 4126 } 4127 4128 int 4129 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4130 struct rte_eth_rss_conf *rss_conf) 4131 { 4132 struct rte_eth_dev *dev; 4133 4134 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4135 dev = &rte_eth_devices[port_id]; 4136 4137 if (rss_conf == NULL) { 4138 RTE_ETHDEV_LOG(ERR, 4139 "Cannot get ethdev port %u RSS hash config to NULL\n", 4140 port_id); 4141 return -EINVAL; 4142 } 4143 4144 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 4145 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4146 rss_conf)); 4147 } 4148 4149 int 4150 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4151 struct rte_eth_udp_tunnel *udp_tunnel) 4152 { 4153 struct rte_eth_dev *dev; 4154 4155 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4156 dev = &rte_eth_devices[port_id]; 4157 4158 if (udp_tunnel == NULL) { 4159 RTE_ETHDEV_LOG(ERR, 4160 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4161 port_id); 4162 return -EINVAL; 4163 } 4164 4165 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 4166 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4167 return -EINVAL; 4168 } 4169 4170 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 4171 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4172 udp_tunnel)); 4173 } 4174 4175 int 4176 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4177 struct rte_eth_udp_tunnel *udp_tunnel) 4178 { 4179 struct rte_eth_dev *dev; 4180 4181 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4182 dev = &rte_eth_devices[port_id]; 4183 4184 if (udp_tunnel == NULL) { 4185 RTE_ETHDEV_LOG(ERR, 4186 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4187 port_id); 4188 return -EINVAL; 4189 } 4190 4191 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 4192 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4193 return -EINVAL; 4194 } 4195 4196 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 4197 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4198 udp_tunnel)); 4199 } 4200 4201 int 4202 rte_eth_led_on(uint16_t port_id) 4203 { 4204 struct rte_eth_dev *dev; 4205 4206 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4207 dev = &rte_eth_devices[port_id]; 4208 4209 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 4210 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4211 } 4212 4213 int 4214 rte_eth_led_off(uint16_t port_id) 4215 { 4216 struct rte_eth_dev *dev; 4217 4218 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4219 dev = &rte_eth_devices[port_id]; 4220 4221 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 4222 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4223 } 4224 4225 int 4226 rte_eth_fec_get_capability(uint16_t port_id, 4227 struct rte_eth_fec_capa *speed_fec_capa, 4228 unsigned int num) 4229 { 4230 struct rte_eth_dev *dev; 4231 int ret; 4232 4233 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4234 dev = &rte_eth_devices[port_id]; 4235 4236 if (speed_fec_capa == NULL && num > 0) { 4237 RTE_ETHDEV_LOG(ERR, 4238 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4239 port_id); 4240 return -EINVAL; 4241 } 4242 4243 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP); 4244 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4245 4246 return ret; 4247 } 4248 4249 int 4250 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4251 { 4252 struct rte_eth_dev *dev; 4253 4254 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4255 dev = &rte_eth_devices[port_id]; 4256 4257 if (fec_capa == NULL) { 4258 RTE_ETHDEV_LOG(ERR, 4259 "Cannot get ethdev port %u current FEC mode to NULL\n", 4260 port_id); 4261 return -EINVAL; 4262 } 4263 4264 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP); 4265 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4266 } 4267 4268 int 4269 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4270 { 4271 struct rte_eth_dev *dev; 4272 4273 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4274 dev = &rte_eth_devices[port_id]; 4275 4276 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); 4277 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4278 } 4279 4280 /* 4281 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4282 * an empty spot. 4283 */ 4284 static int 4285 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4286 { 4287 struct rte_eth_dev_info dev_info; 4288 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4289 unsigned i; 4290 int ret; 4291 4292 ret = rte_eth_dev_info_get(port_id, &dev_info); 4293 if (ret != 0) 4294 return -1; 4295 4296 for (i = 0; i < dev_info.max_mac_addrs; i++) 4297 if (memcmp(addr, &dev->data->mac_addrs[i], 4298 RTE_ETHER_ADDR_LEN) == 0) 4299 return i; 4300 4301 return -1; 4302 } 4303 4304 static const struct rte_ether_addr null_mac_addr; 4305 4306 int 4307 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4308 uint32_t pool) 4309 { 4310 struct rte_eth_dev *dev; 4311 int index; 4312 uint64_t pool_mask; 4313 int ret; 4314 4315 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4316 dev = &rte_eth_devices[port_id]; 4317 4318 if (addr == NULL) { 4319 RTE_ETHDEV_LOG(ERR, 4320 "Cannot add ethdev port %u MAC address from NULL address\n", 4321 port_id); 4322 return -EINVAL; 4323 } 4324 4325 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 4326 4327 if (rte_is_zero_ether_addr(addr)) { 4328 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4329 port_id); 4330 return -EINVAL; 4331 } 4332 if (pool >= ETH_64_POOLS) { 4333 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1); 4334 return -EINVAL; 4335 } 4336 4337 index = eth_dev_get_mac_addr_index(port_id, addr); 4338 if (index < 0) { 4339 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4340 if (index < 0) { 4341 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4342 port_id); 4343 return -ENOSPC; 4344 } 4345 } else { 4346 pool_mask = dev->data->mac_pool_sel[index]; 4347 4348 /* Check if both MAC address and pool is already there, and do nothing */ 4349 if (pool_mask & (1ULL << pool)) 4350 return 0; 4351 } 4352 4353 /* Update NIC */ 4354 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4355 4356 if (ret == 0) { 4357 /* Update address in NIC data structure */ 4358 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4359 4360 /* Update pool bitmap in NIC data structure */ 4361 dev->data->mac_pool_sel[index] |= (1ULL << pool); 4362 } 4363 4364 return eth_err(port_id, ret); 4365 } 4366 4367 int 4368 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4369 { 4370 struct rte_eth_dev *dev; 4371 int index; 4372 4373 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4374 dev = &rte_eth_devices[port_id]; 4375 4376 if (addr == NULL) { 4377 RTE_ETHDEV_LOG(ERR, 4378 "Cannot remove ethdev port %u MAC address from NULL address\n", 4379 port_id); 4380 return -EINVAL; 4381 } 4382 4383 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 4384 4385 index = eth_dev_get_mac_addr_index(port_id, addr); 4386 if (index == 0) { 4387 RTE_ETHDEV_LOG(ERR, 4388 "Port %u: Cannot remove default MAC address\n", 4389 port_id); 4390 return -EADDRINUSE; 4391 } else if (index < 0) 4392 return 0; /* Do nothing if address wasn't found */ 4393 4394 /* Update NIC */ 4395 (*dev->dev_ops->mac_addr_remove)(dev, index); 4396 4397 /* Update address in NIC data structure */ 4398 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4399 4400 /* reset pool bitmap */ 4401 dev->data->mac_pool_sel[index] = 0; 4402 4403 return 0; 4404 } 4405 4406 int 4407 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4408 { 4409 struct rte_eth_dev *dev; 4410 int ret; 4411 4412 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4413 dev = &rte_eth_devices[port_id]; 4414 4415 if (addr == NULL) { 4416 RTE_ETHDEV_LOG(ERR, 4417 "Cannot set ethdev port %u default MAC address from NULL address\n", 4418 port_id); 4419 return -EINVAL; 4420 } 4421 4422 if (!rte_is_valid_assigned_ether_addr(addr)) 4423 return -EINVAL; 4424 4425 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 4426 4427 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4428 if (ret < 0) 4429 return ret; 4430 4431 /* Update default address in NIC data structure */ 4432 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4433 4434 return 0; 4435 } 4436 4437 4438 /* 4439 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4440 * an empty spot. 4441 */ 4442 static int 4443 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4444 const struct rte_ether_addr *addr) 4445 { 4446 struct rte_eth_dev_info dev_info; 4447 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4448 unsigned i; 4449 int ret; 4450 4451 ret = rte_eth_dev_info_get(port_id, &dev_info); 4452 if (ret != 0) 4453 return -1; 4454 4455 if (!dev->data->hash_mac_addrs) 4456 return -1; 4457 4458 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4459 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4460 RTE_ETHER_ADDR_LEN) == 0) 4461 return i; 4462 4463 return -1; 4464 } 4465 4466 int 4467 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4468 uint8_t on) 4469 { 4470 int index; 4471 int ret; 4472 struct rte_eth_dev *dev; 4473 4474 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4475 dev = &rte_eth_devices[port_id]; 4476 4477 if (addr == NULL) { 4478 RTE_ETHDEV_LOG(ERR, 4479 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4480 port_id); 4481 return -EINVAL; 4482 } 4483 4484 if (rte_is_zero_ether_addr(addr)) { 4485 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4486 port_id); 4487 return -EINVAL; 4488 } 4489 4490 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4491 /* Check if it's already there, and do nothing */ 4492 if ((index >= 0) && on) 4493 return 0; 4494 4495 if (index < 0) { 4496 if (!on) { 4497 RTE_ETHDEV_LOG(ERR, 4498 "Port %u: the MAC address was not set in UTA\n", 4499 port_id); 4500 return -EINVAL; 4501 } 4502 4503 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4504 if (index < 0) { 4505 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4506 port_id); 4507 return -ENOSPC; 4508 } 4509 } 4510 4511 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 4512 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 4513 if (ret == 0) { 4514 /* Update address in NIC data structure */ 4515 if (on) 4516 rte_ether_addr_copy(addr, 4517 &dev->data->hash_mac_addrs[index]); 4518 else 4519 rte_ether_addr_copy(&null_mac_addr, 4520 &dev->data->hash_mac_addrs[index]); 4521 } 4522 4523 return eth_err(port_id, ret); 4524 } 4525 4526 int 4527 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 4528 { 4529 struct rte_eth_dev *dev; 4530 4531 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4532 dev = &rte_eth_devices[port_id]; 4533 4534 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 4535 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 4536 on)); 4537 } 4538 4539 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 4540 uint16_t tx_rate) 4541 { 4542 struct rte_eth_dev *dev; 4543 struct rte_eth_dev_info dev_info; 4544 struct rte_eth_link link; 4545 int ret; 4546 4547 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4548 dev = &rte_eth_devices[port_id]; 4549 4550 ret = rte_eth_dev_info_get(port_id, &dev_info); 4551 if (ret != 0) 4552 return ret; 4553 4554 link = dev->data->dev_link; 4555 4556 if (queue_idx > dev_info.max_tx_queues) { 4557 RTE_ETHDEV_LOG(ERR, 4558 "Set queue rate limit:port %u: invalid queue id=%u\n", 4559 port_id, queue_idx); 4560 return -EINVAL; 4561 } 4562 4563 if (tx_rate > link.link_speed) { 4564 RTE_ETHDEV_LOG(ERR, 4565 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 4566 tx_rate, link.link_speed); 4567 return -EINVAL; 4568 } 4569 4570 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 4571 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 4572 queue_idx, tx_rate)); 4573 } 4574 4575 RTE_INIT(eth_dev_init_fp_ops) 4576 { 4577 uint32_t i; 4578 4579 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 4580 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 4581 } 4582 4583 RTE_INIT(eth_dev_init_cb_lists) 4584 { 4585 uint16_t i; 4586 4587 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 4588 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 4589 } 4590 4591 int 4592 rte_eth_dev_callback_register(uint16_t port_id, 4593 enum rte_eth_event_type event, 4594 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4595 { 4596 struct rte_eth_dev *dev; 4597 struct rte_eth_dev_callback *user_cb; 4598 uint16_t next_port; 4599 uint16_t last_port; 4600 4601 if (cb_fn == NULL) { 4602 RTE_ETHDEV_LOG(ERR, 4603 "Cannot register ethdev port %u callback from NULL\n", 4604 port_id); 4605 return -EINVAL; 4606 } 4607 4608 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4609 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4610 return -EINVAL; 4611 } 4612 4613 if (port_id == RTE_ETH_ALL) { 4614 next_port = 0; 4615 last_port = RTE_MAX_ETHPORTS - 1; 4616 } else { 4617 next_port = last_port = port_id; 4618 } 4619 4620 rte_spinlock_lock(ð_dev_cb_lock); 4621 4622 do { 4623 dev = &rte_eth_devices[next_port]; 4624 4625 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 4626 if (user_cb->cb_fn == cb_fn && 4627 user_cb->cb_arg == cb_arg && 4628 user_cb->event == event) { 4629 break; 4630 } 4631 } 4632 4633 /* create a new callback. */ 4634 if (user_cb == NULL) { 4635 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4636 sizeof(struct rte_eth_dev_callback), 0); 4637 if (user_cb != NULL) { 4638 user_cb->cb_fn = cb_fn; 4639 user_cb->cb_arg = cb_arg; 4640 user_cb->event = event; 4641 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4642 user_cb, next); 4643 } else { 4644 rte_spinlock_unlock(ð_dev_cb_lock); 4645 rte_eth_dev_callback_unregister(port_id, event, 4646 cb_fn, cb_arg); 4647 return -ENOMEM; 4648 } 4649 4650 } 4651 } while (++next_port <= last_port); 4652 4653 rte_spinlock_unlock(ð_dev_cb_lock); 4654 return 0; 4655 } 4656 4657 int 4658 rte_eth_dev_callback_unregister(uint16_t port_id, 4659 enum rte_eth_event_type event, 4660 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4661 { 4662 int ret; 4663 struct rte_eth_dev *dev; 4664 struct rte_eth_dev_callback *cb, *next; 4665 uint16_t next_port; 4666 uint16_t last_port; 4667 4668 if (cb_fn == NULL) { 4669 RTE_ETHDEV_LOG(ERR, 4670 "Cannot unregister ethdev port %u callback from NULL\n", 4671 port_id); 4672 return -EINVAL; 4673 } 4674 4675 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4676 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4677 return -EINVAL; 4678 } 4679 4680 if (port_id == RTE_ETH_ALL) { 4681 next_port = 0; 4682 last_port = RTE_MAX_ETHPORTS - 1; 4683 } else { 4684 next_port = last_port = port_id; 4685 } 4686 4687 rte_spinlock_lock(ð_dev_cb_lock); 4688 4689 do { 4690 dev = &rte_eth_devices[next_port]; 4691 ret = 0; 4692 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4693 cb = next) { 4694 4695 next = TAILQ_NEXT(cb, next); 4696 4697 if (cb->cb_fn != cb_fn || cb->event != event || 4698 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4699 continue; 4700 4701 /* 4702 * if this callback is not executing right now, 4703 * then remove it. 4704 */ 4705 if (cb->active == 0) { 4706 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4707 rte_free(cb); 4708 } else { 4709 ret = -EAGAIN; 4710 } 4711 } 4712 } while (++next_port <= last_port); 4713 4714 rte_spinlock_unlock(ð_dev_cb_lock); 4715 return ret; 4716 } 4717 4718 int 4719 rte_eth_dev_callback_process(struct rte_eth_dev *dev, 4720 enum rte_eth_event_type event, void *ret_param) 4721 { 4722 struct rte_eth_dev_callback *cb_lst; 4723 struct rte_eth_dev_callback dev_cb; 4724 int rc = 0; 4725 4726 rte_spinlock_lock(ð_dev_cb_lock); 4727 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 4728 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 4729 continue; 4730 dev_cb = *cb_lst; 4731 cb_lst->active = 1; 4732 if (ret_param != NULL) 4733 dev_cb.ret_param = ret_param; 4734 4735 rte_spinlock_unlock(ð_dev_cb_lock); 4736 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, 4737 dev_cb.cb_arg, dev_cb.ret_param); 4738 rte_spinlock_lock(ð_dev_cb_lock); 4739 cb_lst->active = 0; 4740 } 4741 rte_spinlock_unlock(ð_dev_cb_lock); 4742 return rc; 4743 } 4744 4745 void 4746 rte_eth_dev_probing_finish(struct rte_eth_dev *dev) 4747 { 4748 if (dev == NULL) 4749 return; 4750 4751 /* 4752 * for secondary process, at that point we expect device 4753 * to be already 'usable', so shared data and all function pointers 4754 * for fast-path devops have to be setup properly inside rte_eth_dev. 4755 */ 4756 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 4757 eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev); 4758 4759 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); 4760 4761 dev->state = RTE_ETH_DEV_ATTACHED; 4762 } 4763 4764 int 4765 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4766 { 4767 uint32_t vec; 4768 struct rte_eth_dev *dev; 4769 struct rte_intr_handle *intr_handle; 4770 uint16_t qid; 4771 int rc; 4772 4773 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4774 dev = &rte_eth_devices[port_id]; 4775 4776 if (!dev->intr_handle) { 4777 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4778 return -ENOTSUP; 4779 } 4780 4781 intr_handle = dev->intr_handle; 4782 if (!intr_handle->intr_vec) { 4783 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4784 return -EPERM; 4785 } 4786 4787 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4788 vec = intr_handle->intr_vec[qid]; 4789 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4790 if (rc && rc != -EEXIST) { 4791 RTE_ETHDEV_LOG(ERR, 4792 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 4793 port_id, qid, op, epfd, vec); 4794 } 4795 } 4796 4797 return 0; 4798 } 4799 4800 int 4801 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 4802 { 4803 struct rte_intr_handle *intr_handle; 4804 struct rte_eth_dev *dev; 4805 unsigned int efd_idx; 4806 uint32_t vec; 4807 int fd; 4808 4809 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 4810 dev = &rte_eth_devices[port_id]; 4811 4812 if (queue_id >= dev->data->nb_rx_queues) { 4813 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 4814 return -1; 4815 } 4816 4817 if (!dev->intr_handle) { 4818 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4819 return -1; 4820 } 4821 4822 intr_handle = dev->intr_handle; 4823 if (!intr_handle->intr_vec) { 4824 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4825 return -1; 4826 } 4827 4828 vec = intr_handle->intr_vec[queue_id]; 4829 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 4830 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 4831 fd = intr_handle->efds[efd_idx]; 4832 4833 return fd; 4834 } 4835 4836 static inline int 4837 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id, 4838 const char *ring_name) 4839 { 4840 return snprintf(name, len, "eth_p%d_q%d_%s", 4841 port_id, queue_id, ring_name); 4842 } 4843 4844 const struct rte_memzone * 4845 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, 4846 uint16_t queue_id, size_t size, unsigned align, 4847 int socket_id) 4848 { 4849 char z_name[RTE_MEMZONE_NAMESIZE]; 4850 const struct rte_memzone *mz; 4851 int rc; 4852 4853 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4854 queue_id, ring_name); 4855 if (rc >= RTE_MEMZONE_NAMESIZE) { 4856 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4857 rte_errno = ENAMETOOLONG; 4858 return NULL; 4859 } 4860 4861 mz = rte_memzone_lookup(z_name); 4862 if (mz) { 4863 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) || 4864 size > mz->len || 4865 ((uintptr_t)mz->addr & (align - 1)) != 0) { 4866 RTE_ETHDEV_LOG(ERR, 4867 "memzone %s does not justify the requested attributes\n", 4868 mz->name); 4869 return NULL; 4870 } 4871 4872 return mz; 4873 } 4874 4875 return rte_memzone_reserve_aligned(z_name, size, socket_id, 4876 RTE_MEMZONE_IOVA_CONTIG, align); 4877 } 4878 4879 int 4880 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name, 4881 uint16_t queue_id) 4882 { 4883 char z_name[RTE_MEMZONE_NAMESIZE]; 4884 const struct rte_memzone *mz; 4885 int rc = 0; 4886 4887 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4888 queue_id, ring_name); 4889 if (rc >= RTE_MEMZONE_NAMESIZE) { 4890 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4891 return -ENAMETOOLONG; 4892 } 4893 4894 mz = rte_memzone_lookup(z_name); 4895 if (mz) 4896 rc = rte_memzone_free(mz); 4897 else 4898 rc = -ENOENT; 4899 4900 return rc; 4901 } 4902 4903 int 4904 rte_eth_dev_create(struct rte_device *device, const char *name, 4905 size_t priv_data_size, 4906 ethdev_bus_specific_init ethdev_bus_specific_init, 4907 void *bus_init_params, 4908 ethdev_init_t ethdev_init, void *init_params) 4909 { 4910 struct rte_eth_dev *ethdev; 4911 int retval; 4912 4913 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL); 4914 4915 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4916 ethdev = rte_eth_dev_allocate(name); 4917 if (!ethdev) 4918 return -ENODEV; 4919 4920 if (priv_data_size) { 4921 ethdev->data->dev_private = rte_zmalloc_socket( 4922 name, priv_data_size, RTE_CACHE_LINE_SIZE, 4923 device->numa_node); 4924 4925 if (!ethdev->data->dev_private) { 4926 RTE_ETHDEV_LOG(ERR, 4927 "failed to allocate private data\n"); 4928 retval = -ENOMEM; 4929 goto probe_failed; 4930 } 4931 } 4932 } else { 4933 ethdev = rte_eth_dev_attach_secondary(name); 4934 if (!ethdev) { 4935 RTE_ETHDEV_LOG(ERR, 4936 "secondary process attach failed, ethdev doesn't exist\n"); 4937 return -ENODEV; 4938 } 4939 } 4940 4941 ethdev->device = device; 4942 4943 if (ethdev_bus_specific_init) { 4944 retval = ethdev_bus_specific_init(ethdev, bus_init_params); 4945 if (retval) { 4946 RTE_ETHDEV_LOG(ERR, 4947 "ethdev bus specific initialisation failed\n"); 4948 goto probe_failed; 4949 } 4950 } 4951 4952 retval = ethdev_init(ethdev, init_params); 4953 if (retval) { 4954 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n"); 4955 goto probe_failed; 4956 } 4957 4958 rte_eth_dev_probing_finish(ethdev); 4959 4960 return retval; 4961 4962 probe_failed: 4963 rte_eth_dev_release_port(ethdev); 4964 return retval; 4965 } 4966 4967 int 4968 rte_eth_dev_destroy(struct rte_eth_dev *ethdev, 4969 ethdev_uninit_t ethdev_uninit) 4970 { 4971 int ret; 4972 4973 ethdev = rte_eth_dev_allocated(ethdev->data->name); 4974 if (!ethdev) 4975 return -ENODEV; 4976 4977 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL); 4978 4979 ret = ethdev_uninit(ethdev); 4980 if (ret) 4981 return ret; 4982 4983 return rte_eth_dev_release_port(ethdev); 4984 } 4985 4986 int 4987 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 4988 int epfd, int op, void *data) 4989 { 4990 uint32_t vec; 4991 struct rte_eth_dev *dev; 4992 struct rte_intr_handle *intr_handle; 4993 int rc; 4994 4995 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4996 dev = &rte_eth_devices[port_id]; 4997 4998 if (queue_id >= dev->data->nb_rx_queues) { 4999 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 5000 return -EINVAL; 5001 } 5002 5003 if (!dev->intr_handle) { 5004 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 5005 return -ENOTSUP; 5006 } 5007 5008 intr_handle = dev->intr_handle; 5009 if (!intr_handle->intr_vec) { 5010 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 5011 return -EPERM; 5012 } 5013 5014 vec = intr_handle->intr_vec[queue_id]; 5015 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5016 if (rc && rc != -EEXIST) { 5017 RTE_ETHDEV_LOG(ERR, 5018 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 5019 port_id, queue_id, op, epfd, vec); 5020 return rc; 5021 } 5022 5023 return 0; 5024 } 5025 5026 int 5027 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5028 uint16_t queue_id) 5029 { 5030 struct rte_eth_dev *dev; 5031 int ret; 5032 5033 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5034 dev = &rte_eth_devices[port_id]; 5035 5036 ret = eth_dev_validate_rx_queue(dev, queue_id); 5037 if (ret != 0) 5038 return ret; 5039 5040 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 5041 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5042 } 5043 5044 int 5045 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5046 uint16_t queue_id) 5047 { 5048 struct rte_eth_dev *dev; 5049 int ret; 5050 5051 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5052 dev = &rte_eth_devices[port_id]; 5053 5054 ret = eth_dev_validate_rx_queue(dev, queue_id); 5055 if (ret != 0) 5056 return ret; 5057 5058 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 5059 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5060 } 5061 5062 5063 const struct rte_eth_rxtx_callback * 5064 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5065 rte_rx_callback_fn fn, void *user_param) 5066 { 5067 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5068 rte_errno = ENOTSUP; 5069 return NULL; 5070 #endif 5071 struct rte_eth_dev *dev; 5072 5073 /* check input parameters */ 5074 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5075 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5076 rte_errno = EINVAL; 5077 return NULL; 5078 } 5079 dev = &rte_eth_devices[port_id]; 5080 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5081 rte_errno = EINVAL; 5082 return NULL; 5083 } 5084 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5085 5086 if (cb == NULL) { 5087 rte_errno = ENOMEM; 5088 return NULL; 5089 } 5090 5091 cb->fn.rx = fn; 5092 cb->param = user_param; 5093 5094 rte_spinlock_lock(ð_dev_rx_cb_lock); 5095 /* Add the callbacks in fifo order. */ 5096 struct rte_eth_rxtx_callback *tail = 5097 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5098 5099 if (!tail) { 5100 /* Stores to cb->fn and cb->param should complete before 5101 * cb is visible to data plane. 5102 */ 5103 __atomic_store_n( 5104 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5105 cb, __ATOMIC_RELEASE); 5106 5107 } else { 5108 while (tail->next) 5109 tail = tail->next; 5110 /* Stores to cb->fn and cb->param should complete before 5111 * cb is visible to data plane. 5112 */ 5113 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5114 } 5115 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5116 5117 return cb; 5118 } 5119 5120 const struct rte_eth_rxtx_callback * 5121 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5122 rte_rx_callback_fn fn, void *user_param) 5123 { 5124 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5125 rte_errno = ENOTSUP; 5126 return NULL; 5127 #endif 5128 /* check input parameters */ 5129 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5130 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5131 rte_errno = EINVAL; 5132 return NULL; 5133 } 5134 5135 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5136 5137 if (cb == NULL) { 5138 rte_errno = ENOMEM; 5139 return NULL; 5140 } 5141 5142 cb->fn.rx = fn; 5143 cb->param = user_param; 5144 5145 rte_spinlock_lock(ð_dev_rx_cb_lock); 5146 /* Add the callbacks at first position */ 5147 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5148 /* Stores to cb->fn, cb->param and cb->next should complete before 5149 * cb is visible to data plane threads. 5150 */ 5151 __atomic_store_n( 5152 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5153 cb, __ATOMIC_RELEASE); 5154 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5155 5156 return cb; 5157 } 5158 5159 const struct rte_eth_rxtx_callback * 5160 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5161 rte_tx_callback_fn fn, void *user_param) 5162 { 5163 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5164 rte_errno = ENOTSUP; 5165 return NULL; 5166 #endif 5167 struct rte_eth_dev *dev; 5168 5169 /* check input parameters */ 5170 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5171 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5172 rte_errno = EINVAL; 5173 return NULL; 5174 } 5175 5176 dev = &rte_eth_devices[port_id]; 5177 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5178 rte_errno = EINVAL; 5179 return NULL; 5180 } 5181 5182 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5183 5184 if (cb == NULL) { 5185 rte_errno = ENOMEM; 5186 return NULL; 5187 } 5188 5189 cb->fn.tx = fn; 5190 cb->param = user_param; 5191 5192 rte_spinlock_lock(ð_dev_tx_cb_lock); 5193 /* Add the callbacks in fifo order. */ 5194 struct rte_eth_rxtx_callback *tail = 5195 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5196 5197 if (!tail) { 5198 /* Stores to cb->fn and cb->param should complete before 5199 * cb is visible to data plane. 5200 */ 5201 __atomic_store_n( 5202 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5203 cb, __ATOMIC_RELEASE); 5204 5205 } else { 5206 while (tail->next) 5207 tail = tail->next; 5208 /* Stores to cb->fn and cb->param should complete before 5209 * cb is visible to data plane. 5210 */ 5211 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5212 } 5213 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5214 5215 return cb; 5216 } 5217 5218 int 5219 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5220 const struct rte_eth_rxtx_callback *user_cb) 5221 { 5222 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5223 return -ENOTSUP; 5224 #endif 5225 /* Check input parameters. */ 5226 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5227 if (user_cb == NULL || 5228 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5229 return -EINVAL; 5230 5231 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5232 struct rte_eth_rxtx_callback *cb; 5233 struct rte_eth_rxtx_callback **prev_cb; 5234 int ret = -EINVAL; 5235 5236 rte_spinlock_lock(ð_dev_rx_cb_lock); 5237 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5238 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5239 cb = *prev_cb; 5240 if (cb == user_cb) { 5241 /* Remove the user cb from the callback list. */ 5242 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5243 ret = 0; 5244 break; 5245 } 5246 } 5247 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5248 5249 return ret; 5250 } 5251 5252 int 5253 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5254 const struct rte_eth_rxtx_callback *user_cb) 5255 { 5256 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5257 return -ENOTSUP; 5258 #endif 5259 /* Check input parameters. */ 5260 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5261 if (user_cb == NULL || 5262 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5263 return -EINVAL; 5264 5265 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5266 int ret = -EINVAL; 5267 struct rte_eth_rxtx_callback *cb; 5268 struct rte_eth_rxtx_callback **prev_cb; 5269 5270 rte_spinlock_lock(ð_dev_tx_cb_lock); 5271 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5272 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5273 cb = *prev_cb; 5274 if (cb == user_cb) { 5275 /* Remove the user cb from the callback list. */ 5276 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5277 ret = 0; 5278 break; 5279 } 5280 } 5281 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5282 5283 return ret; 5284 } 5285 5286 int 5287 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5288 struct rte_eth_rxq_info *qinfo) 5289 { 5290 struct rte_eth_dev *dev; 5291 5292 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5293 dev = &rte_eth_devices[port_id]; 5294 5295 if (queue_id >= dev->data->nb_rx_queues) { 5296 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 5297 return -EINVAL; 5298 } 5299 5300 if (qinfo == NULL) { 5301 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5302 port_id, queue_id); 5303 return -EINVAL; 5304 } 5305 5306 if (dev->data->rx_queues == NULL || 5307 dev->data->rx_queues[queue_id] == NULL) { 5308 RTE_ETHDEV_LOG(ERR, 5309 "Rx queue %"PRIu16" of device with port_id=%" 5310 PRIu16" has not been setup\n", 5311 queue_id, port_id); 5312 return -EINVAL; 5313 } 5314 5315 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5316 RTE_ETHDEV_LOG(INFO, 5317 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5318 queue_id, port_id); 5319 return -EINVAL; 5320 } 5321 5322 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 5323 5324 memset(qinfo, 0, sizeof(*qinfo)); 5325 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5326 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5327 5328 return 0; 5329 } 5330 5331 int 5332 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5333 struct rte_eth_txq_info *qinfo) 5334 { 5335 struct rte_eth_dev *dev; 5336 5337 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5338 dev = &rte_eth_devices[port_id]; 5339 5340 if (queue_id >= dev->data->nb_tx_queues) { 5341 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); 5342 return -EINVAL; 5343 } 5344 5345 if (qinfo == NULL) { 5346 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5347 port_id, queue_id); 5348 return -EINVAL; 5349 } 5350 5351 if (dev->data->tx_queues == NULL || 5352 dev->data->tx_queues[queue_id] == NULL) { 5353 RTE_ETHDEV_LOG(ERR, 5354 "Tx queue %"PRIu16" of device with port_id=%" 5355 PRIu16" has not been setup\n", 5356 queue_id, port_id); 5357 return -EINVAL; 5358 } 5359 5360 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5361 RTE_ETHDEV_LOG(INFO, 5362 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5363 queue_id, port_id); 5364 return -EINVAL; 5365 } 5366 5367 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 5368 5369 memset(qinfo, 0, sizeof(*qinfo)); 5370 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5371 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5372 5373 return 0; 5374 } 5375 5376 int 5377 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5378 struct rte_eth_burst_mode *mode) 5379 { 5380 struct rte_eth_dev *dev; 5381 5382 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5383 dev = &rte_eth_devices[port_id]; 5384 5385 if (queue_id >= dev->data->nb_rx_queues) { 5386 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 5387 return -EINVAL; 5388 } 5389 5390 if (mode == NULL) { 5391 RTE_ETHDEV_LOG(ERR, 5392 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5393 port_id, queue_id); 5394 return -EINVAL; 5395 } 5396 5397 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); 5398 memset(mode, 0, sizeof(*mode)); 5399 return eth_err(port_id, 5400 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5401 } 5402 5403 int 5404 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5405 struct rte_eth_burst_mode *mode) 5406 { 5407 struct rte_eth_dev *dev; 5408 5409 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5410 dev = &rte_eth_devices[port_id]; 5411 5412 if (queue_id >= dev->data->nb_tx_queues) { 5413 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); 5414 return -EINVAL; 5415 } 5416 5417 if (mode == NULL) { 5418 RTE_ETHDEV_LOG(ERR, 5419 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5420 port_id, queue_id); 5421 return -EINVAL; 5422 } 5423 5424 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); 5425 memset(mode, 0, sizeof(*mode)); 5426 return eth_err(port_id, 5427 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5428 } 5429 5430 int 5431 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5432 struct rte_power_monitor_cond *pmc) 5433 { 5434 struct rte_eth_dev *dev; 5435 5436 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5437 dev = &rte_eth_devices[port_id]; 5438 5439 if (queue_id >= dev->data->nb_rx_queues) { 5440 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5441 return -EINVAL; 5442 } 5443 5444 if (pmc == NULL) { 5445 RTE_ETHDEV_LOG(ERR, 5446 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5447 port_id, queue_id); 5448 return -EINVAL; 5449 } 5450 5451 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP); 5452 return eth_err(port_id, 5453 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5454 } 5455 5456 int 5457 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5458 struct rte_ether_addr *mc_addr_set, 5459 uint32_t nb_mc_addr) 5460 { 5461 struct rte_eth_dev *dev; 5462 5463 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5464 dev = &rte_eth_devices[port_id]; 5465 5466 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 5467 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5468 mc_addr_set, nb_mc_addr)); 5469 } 5470 5471 int 5472 rte_eth_timesync_enable(uint16_t port_id) 5473 { 5474 struct rte_eth_dev *dev; 5475 5476 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5477 dev = &rte_eth_devices[port_id]; 5478 5479 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 5480 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5481 } 5482 5483 int 5484 rte_eth_timesync_disable(uint16_t port_id) 5485 { 5486 struct rte_eth_dev *dev; 5487 5488 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5489 dev = &rte_eth_devices[port_id]; 5490 5491 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 5492 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5493 } 5494 5495 int 5496 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5497 uint32_t flags) 5498 { 5499 struct rte_eth_dev *dev; 5500 5501 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5502 dev = &rte_eth_devices[port_id]; 5503 5504 if (timestamp == NULL) { 5505 RTE_ETHDEV_LOG(ERR, 5506 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5507 port_id); 5508 return -EINVAL; 5509 } 5510 5511 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 5512 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5513 (dev, timestamp, flags)); 5514 } 5515 5516 int 5517 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5518 struct timespec *timestamp) 5519 { 5520 struct rte_eth_dev *dev; 5521 5522 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5523 dev = &rte_eth_devices[port_id]; 5524 5525 if (timestamp == NULL) { 5526 RTE_ETHDEV_LOG(ERR, 5527 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5528 port_id); 5529 return -EINVAL; 5530 } 5531 5532 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 5533 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5534 (dev, timestamp)); 5535 } 5536 5537 int 5538 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5539 { 5540 struct rte_eth_dev *dev; 5541 5542 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5543 dev = &rte_eth_devices[port_id]; 5544 5545 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 5546 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 5547 } 5548 5549 int 5550 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 5551 { 5552 struct rte_eth_dev *dev; 5553 5554 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5555 dev = &rte_eth_devices[port_id]; 5556 5557 if (timestamp == NULL) { 5558 RTE_ETHDEV_LOG(ERR, 5559 "Cannot read ethdev port %u timesync time to NULL\n", 5560 port_id); 5561 return -EINVAL; 5562 } 5563 5564 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 5565 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 5566 timestamp)); 5567 } 5568 5569 int 5570 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 5571 { 5572 struct rte_eth_dev *dev; 5573 5574 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5575 dev = &rte_eth_devices[port_id]; 5576 5577 if (timestamp == NULL) { 5578 RTE_ETHDEV_LOG(ERR, 5579 "Cannot write ethdev port %u timesync from NULL time\n", 5580 port_id); 5581 return -EINVAL; 5582 } 5583 5584 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 5585 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 5586 timestamp)); 5587 } 5588 5589 int 5590 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 5591 { 5592 struct rte_eth_dev *dev; 5593 5594 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5595 dev = &rte_eth_devices[port_id]; 5596 5597 if (clock == NULL) { 5598 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 5599 port_id); 5600 return -EINVAL; 5601 } 5602 5603 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); 5604 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 5605 } 5606 5607 int 5608 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 5609 { 5610 struct rte_eth_dev *dev; 5611 5612 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5613 dev = &rte_eth_devices[port_id]; 5614 5615 if (info == NULL) { 5616 RTE_ETHDEV_LOG(ERR, 5617 "Cannot get ethdev port %u register info to NULL\n", 5618 port_id); 5619 return -EINVAL; 5620 } 5621 5622 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 5623 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 5624 } 5625 5626 int 5627 rte_eth_dev_get_eeprom_length(uint16_t port_id) 5628 { 5629 struct rte_eth_dev *dev; 5630 5631 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5632 dev = &rte_eth_devices[port_id]; 5633 5634 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 5635 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 5636 } 5637 5638 int 5639 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5640 { 5641 struct rte_eth_dev *dev; 5642 5643 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5644 dev = &rte_eth_devices[port_id]; 5645 5646 if (info == NULL) { 5647 RTE_ETHDEV_LOG(ERR, 5648 "Cannot get ethdev port %u EEPROM info to NULL\n", 5649 port_id); 5650 return -EINVAL; 5651 } 5652 5653 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 5654 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 5655 } 5656 5657 int 5658 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5659 { 5660 struct rte_eth_dev *dev; 5661 5662 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5663 dev = &rte_eth_devices[port_id]; 5664 5665 if (info == NULL) { 5666 RTE_ETHDEV_LOG(ERR, 5667 "Cannot set ethdev port %u EEPROM from NULL info\n", 5668 port_id); 5669 return -EINVAL; 5670 } 5671 5672 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 5673 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 5674 } 5675 5676 int 5677 rte_eth_dev_get_module_info(uint16_t port_id, 5678 struct rte_eth_dev_module_info *modinfo) 5679 { 5680 struct rte_eth_dev *dev; 5681 5682 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5683 dev = &rte_eth_devices[port_id]; 5684 5685 if (modinfo == NULL) { 5686 RTE_ETHDEV_LOG(ERR, 5687 "Cannot get ethdev port %u EEPROM module info to NULL\n", 5688 port_id); 5689 return -EINVAL; 5690 } 5691 5692 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 5693 return (*dev->dev_ops->get_module_info)(dev, modinfo); 5694 } 5695 5696 int 5697 rte_eth_dev_get_module_eeprom(uint16_t port_id, 5698 struct rte_dev_eeprom_info *info) 5699 { 5700 struct rte_eth_dev *dev; 5701 5702 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5703 dev = &rte_eth_devices[port_id]; 5704 5705 if (info == NULL) { 5706 RTE_ETHDEV_LOG(ERR, 5707 "Cannot get ethdev port %u module EEPROM info to NULL\n", 5708 port_id); 5709 return -EINVAL; 5710 } 5711 5712 if (info->data == NULL) { 5713 RTE_ETHDEV_LOG(ERR, 5714 "Cannot get ethdev port %u module EEPROM data to NULL\n", 5715 port_id); 5716 return -EINVAL; 5717 } 5718 5719 if (info->length == 0) { 5720 RTE_ETHDEV_LOG(ERR, 5721 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 5722 port_id); 5723 return -EINVAL; 5724 } 5725 5726 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 5727 return (*dev->dev_ops->get_module_eeprom)(dev, info); 5728 } 5729 5730 int 5731 rte_eth_dev_get_dcb_info(uint16_t port_id, 5732 struct rte_eth_dcb_info *dcb_info) 5733 { 5734 struct rte_eth_dev *dev; 5735 5736 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5737 dev = &rte_eth_devices[port_id]; 5738 5739 if (dcb_info == NULL) { 5740 RTE_ETHDEV_LOG(ERR, 5741 "Cannot get ethdev port %u DCB info to NULL\n", 5742 port_id); 5743 return -EINVAL; 5744 } 5745 5746 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 5747 5748 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 5749 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 5750 } 5751 5752 static void 5753 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 5754 const struct rte_eth_desc_lim *desc_lim) 5755 { 5756 if (desc_lim->nb_align != 0) 5757 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5758 5759 if (desc_lim->nb_max != 0) 5760 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5761 5762 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5763 } 5764 5765 int 5766 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5767 uint16_t *nb_rx_desc, 5768 uint16_t *nb_tx_desc) 5769 { 5770 struct rte_eth_dev_info dev_info; 5771 int ret; 5772 5773 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5774 5775 ret = rte_eth_dev_info_get(port_id, &dev_info); 5776 if (ret != 0) 5777 return ret; 5778 5779 if (nb_rx_desc != NULL) 5780 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5781 5782 if (nb_tx_desc != NULL) 5783 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5784 5785 return 0; 5786 } 5787 5788 int 5789 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5790 struct rte_eth_hairpin_cap *cap) 5791 { 5792 struct rte_eth_dev *dev; 5793 5794 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5795 dev = &rte_eth_devices[port_id]; 5796 5797 if (cap == NULL) { 5798 RTE_ETHDEV_LOG(ERR, 5799 "Cannot get ethdev port %u hairpin capability to NULL\n", 5800 port_id); 5801 return -EINVAL; 5802 } 5803 5804 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); 5805 memset(cap, 0, sizeof(*cap)); 5806 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 5807 } 5808 5809 int 5810 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5811 { 5812 if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5813 return 1; 5814 return 0; 5815 } 5816 5817 int 5818 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5819 { 5820 if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5821 return 1; 5822 return 0; 5823 } 5824 5825 int 5826 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 5827 { 5828 struct rte_eth_dev *dev; 5829 5830 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5831 dev = &rte_eth_devices[port_id]; 5832 5833 if (pool == NULL) { 5834 RTE_ETHDEV_LOG(ERR, 5835 "Cannot test ethdev port %u mempool operation from NULL pool\n", 5836 port_id); 5837 return -EINVAL; 5838 } 5839 5840 if (*dev->dev_ops->pool_ops_supported == NULL) 5841 return 1; /* all pools are supported */ 5842 5843 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 5844 } 5845 5846 /** 5847 * A set of values to describe the possible states of a switch domain. 5848 */ 5849 enum rte_eth_switch_domain_state { 5850 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0, 5851 RTE_ETH_SWITCH_DOMAIN_ALLOCATED 5852 }; 5853 5854 /** 5855 * Array of switch domains available for allocation. Array is sized to 5856 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than 5857 * ethdev ports in a single process. 5858 */ 5859 static struct rte_eth_dev_switch { 5860 enum rte_eth_switch_domain_state state; 5861 } eth_dev_switch_domains[RTE_MAX_ETHPORTS]; 5862 5863 int 5864 rte_eth_switch_domain_alloc(uint16_t *domain_id) 5865 { 5866 uint16_t i; 5867 5868 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 5869 5870 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 5871 if (eth_dev_switch_domains[i].state == 5872 RTE_ETH_SWITCH_DOMAIN_UNUSED) { 5873 eth_dev_switch_domains[i].state = 5874 RTE_ETH_SWITCH_DOMAIN_ALLOCATED; 5875 *domain_id = i; 5876 return 0; 5877 } 5878 } 5879 5880 return -ENOSPC; 5881 } 5882 5883 int 5884 rte_eth_switch_domain_free(uint16_t domain_id) 5885 { 5886 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || 5887 domain_id >= RTE_MAX_ETHPORTS) 5888 return -EINVAL; 5889 5890 if (eth_dev_switch_domains[domain_id].state != 5891 RTE_ETH_SWITCH_DOMAIN_ALLOCATED) 5892 return -EINVAL; 5893 5894 eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED; 5895 5896 return 0; 5897 } 5898 5899 static int 5900 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) 5901 { 5902 int state; 5903 struct rte_kvargs_pair *pair; 5904 char *letter; 5905 5906 arglist->str = strdup(str_in); 5907 if (arglist->str == NULL) 5908 return -ENOMEM; 5909 5910 letter = arglist->str; 5911 state = 0; 5912 arglist->count = 0; 5913 pair = &arglist->pairs[0]; 5914 while (1) { 5915 switch (state) { 5916 case 0: /* Initial */ 5917 if (*letter == '=') 5918 return -EINVAL; 5919 else if (*letter == '\0') 5920 return 0; 5921 5922 state = 1; 5923 pair->key = letter; 5924 /* fall-thru */ 5925 5926 case 1: /* Parsing key */ 5927 if (*letter == '=') { 5928 *letter = '\0'; 5929 pair->value = letter + 1; 5930 state = 2; 5931 } else if (*letter == ',' || *letter == '\0') 5932 return -EINVAL; 5933 break; 5934 5935 5936 case 2: /* Parsing value */ 5937 if (*letter == '[') 5938 state = 3; 5939 else if (*letter == ',') { 5940 *letter = '\0'; 5941 arglist->count++; 5942 pair = &arglist->pairs[arglist->count]; 5943 state = 0; 5944 } else if (*letter == '\0') { 5945 letter--; 5946 arglist->count++; 5947 pair = &arglist->pairs[arglist->count]; 5948 state = 0; 5949 } 5950 break; 5951 5952 case 3: /* Parsing list */ 5953 if (*letter == ']') 5954 state = 2; 5955 else if (*letter == '\0') 5956 return -EINVAL; 5957 break; 5958 } 5959 letter++; 5960 } 5961 } 5962 5963 int 5964 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) 5965 { 5966 struct rte_kvargs args; 5967 struct rte_kvargs_pair *pair; 5968 unsigned int i; 5969 int result = 0; 5970 5971 memset(eth_da, 0, sizeof(*eth_da)); 5972 5973 result = eth_dev_devargs_tokenise(&args, dargs); 5974 if (result < 0) 5975 goto parse_cleanup; 5976 5977 for (i = 0; i < args.count; i++) { 5978 pair = &args.pairs[i]; 5979 if (strcmp("representor", pair->key) == 0) { 5980 if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) { 5981 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n", 5982 dargs); 5983 result = -1; 5984 goto parse_cleanup; 5985 } 5986 result = rte_eth_devargs_parse_representor_ports( 5987 pair->value, eth_da); 5988 if (result < 0) 5989 goto parse_cleanup; 5990 } 5991 } 5992 5993 parse_cleanup: 5994 if (args.str) 5995 free(args.str); 5996 5997 return result; 5998 } 5999 6000 int 6001 rte_eth_representor_id_get(uint16_t port_id, 6002 enum rte_eth_representor_type type, 6003 int controller, int pf, int representor_port, 6004 uint16_t *repr_id) 6005 { 6006 int ret, n, count; 6007 uint32_t i; 6008 struct rte_eth_representor_info *info = NULL; 6009 size_t size; 6010 6011 if (type == RTE_ETH_REPRESENTOR_NONE) 6012 return 0; 6013 if (repr_id == NULL) 6014 return -EINVAL; 6015 6016 /* Get PMD representor range info. */ 6017 ret = rte_eth_representor_info_get(port_id, NULL); 6018 if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF && 6019 controller == -1 && pf == -1) { 6020 /* Direct mapping for legacy VF representor. */ 6021 *repr_id = representor_port; 6022 return 0; 6023 } else if (ret < 0) { 6024 return ret; 6025 } 6026 n = ret; 6027 size = sizeof(*info) + n * sizeof(info->ranges[0]); 6028 info = calloc(1, size); 6029 if (info == NULL) 6030 return -ENOMEM; 6031 info->nb_ranges_alloc = n; 6032 ret = rte_eth_representor_info_get(port_id, info); 6033 if (ret < 0) 6034 goto out; 6035 6036 /* Default controller and pf to caller. */ 6037 if (controller == -1) 6038 controller = info->controller; 6039 if (pf == -1) 6040 pf = info->pf; 6041 6042 /* Locate representor ID. */ 6043 ret = -ENOENT; 6044 for (i = 0; i < info->nb_ranges; ++i) { 6045 if (info->ranges[i].type != type) 6046 continue; 6047 if (info->ranges[i].controller != controller) 6048 continue; 6049 if (info->ranges[i].id_end < info->ranges[i].id_base) { 6050 RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n", 6051 port_id, info->ranges[i].id_base, 6052 info->ranges[i].id_end, i); 6053 continue; 6054 6055 } 6056 count = info->ranges[i].id_end - info->ranges[i].id_base + 1; 6057 switch (info->ranges[i].type) { 6058 case RTE_ETH_REPRESENTOR_PF: 6059 if (pf < info->ranges[i].pf || 6060 pf >= info->ranges[i].pf + count) 6061 continue; 6062 *repr_id = info->ranges[i].id_base + 6063 (pf - info->ranges[i].pf); 6064 ret = 0; 6065 goto out; 6066 case RTE_ETH_REPRESENTOR_VF: 6067 if (info->ranges[i].pf != pf) 6068 continue; 6069 if (representor_port < info->ranges[i].vf || 6070 representor_port >= info->ranges[i].vf + count) 6071 continue; 6072 *repr_id = info->ranges[i].id_base + 6073 (representor_port - info->ranges[i].vf); 6074 ret = 0; 6075 goto out; 6076 case RTE_ETH_REPRESENTOR_SF: 6077 if (info->ranges[i].pf != pf) 6078 continue; 6079 if (representor_port < info->ranges[i].sf || 6080 representor_port >= info->ranges[i].sf + count) 6081 continue; 6082 *repr_id = info->ranges[i].id_base + 6083 (representor_port - info->ranges[i].sf); 6084 ret = 0; 6085 goto out; 6086 default: 6087 break; 6088 } 6089 } 6090 out: 6091 free(info); 6092 return ret; 6093 } 6094 6095 static int 6096 eth_dev_handle_port_list(const char *cmd __rte_unused, 6097 const char *params __rte_unused, 6098 struct rte_tel_data *d) 6099 { 6100 int port_id; 6101 6102 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 6103 RTE_ETH_FOREACH_DEV(port_id) 6104 rte_tel_data_add_array_int(d, port_id); 6105 return 0; 6106 } 6107 6108 static void 6109 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 6110 const char *stat_name) 6111 { 6112 int q; 6113 struct rte_tel_data *q_data = rte_tel_data_alloc(); 6114 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); 6115 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 6116 rte_tel_data_add_array_u64(q_data, q_stats[q]); 6117 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 6118 } 6119 6120 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 6121 6122 static int 6123 eth_dev_handle_port_stats(const char *cmd __rte_unused, 6124 const char *params, 6125 struct rte_tel_data *d) 6126 { 6127 struct rte_eth_stats stats; 6128 int port_id, ret; 6129 6130 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6131 return -1; 6132 6133 port_id = atoi(params); 6134 if (!rte_eth_dev_is_valid_port(port_id)) 6135 return -1; 6136 6137 ret = rte_eth_stats_get(port_id, &stats); 6138 if (ret < 0) 6139 return -1; 6140 6141 rte_tel_data_start_dict(d); 6142 ADD_DICT_STAT(stats, ipackets); 6143 ADD_DICT_STAT(stats, opackets); 6144 ADD_DICT_STAT(stats, ibytes); 6145 ADD_DICT_STAT(stats, obytes); 6146 ADD_DICT_STAT(stats, imissed); 6147 ADD_DICT_STAT(stats, ierrors); 6148 ADD_DICT_STAT(stats, oerrors); 6149 ADD_DICT_STAT(stats, rx_nombuf); 6150 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 6151 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 6152 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 6153 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 6154 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 6155 6156 return 0; 6157 } 6158 6159 static int 6160 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 6161 const char *params, 6162 struct rte_tel_data *d) 6163 { 6164 struct rte_eth_xstat *eth_xstats; 6165 struct rte_eth_xstat_name *xstat_names; 6166 int port_id, num_xstats; 6167 int i, ret; 6168 char *end_param; 6169 6170 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6171 return -1; 6172 6173 port_id = strtoul(params, &end_param, 0); 6174 if (*end_param != '\0') 6175 RTE_ETHDEV_LOG(NOTICE, 6176 "Extra parameters passed to ethdev telemetry command, ignoring"); 6177 if (!rte_eth_dev_is_valid_port(port_id)) 6178 return -1; 6179 6180 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 6181 if (num_xstats < 0) 6182 return -1; 6183 6184 /* use one malloc for both names and stats */ 6185 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 6186 sizeof(struct rte_eth_xstat_name)) * num_xstats); 6187 if (eth_xstats == NULL) 6188 return -1; 6189 xstat_names = (void *)ð_xstats[num_xstats]; 6190 6191 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 6192 if (ret < 0 || ret > num_xstats) { 6193 free(eth_xstats); 6194 return -1; 6195 } 6196 6197 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 6198 if (ret < 0 || ret > num_xstats) { 6199 free(eth_xstats); 6200 return -1; 6201 } 6202 6203 rte_tel_data_start_dict(d); 6204 for (i = 0; i < num_xstats; i++) 6205 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 6206 eth_xstats[i].value); 6207 return 0; 6208 } 6209 6210 static int 6211 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 6212 const char *params, 6213 struct rte_tel_data *d) 6214 { 6215 static const char *status_str = "status"; 6216 int ret, port_id; 6217 struct rte_eth_link link; 6218 char *end_param; 6219 6220 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6221 return -1; 6222 6223 port_id = strtoul(params, &end_param, 0); 6224 if (*end_param != '\0') 6225 RTE_ETHDEV_LOG(NOTICE, 6226 "Extra parameters passed to ethdev telemetry command, ignoring"); 6227 if (!rte_eth_dev_is_valid_port(port_id)) 6228 return -1; 6229 6230 ret = rte_eth_link_get_nowait(port_id, &link); 6231 if (ret < 0) 6232 return -1; 6233 6234 rte_tel_data_start_dict(d); 6235 if (!link.link_status) { 6236 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 6237 return 0; 6238 } 6239 rte_tel_data_add_dict_string(d, status_str, "UP"); 6240 rte_tel_data_add_dict_u64(d, "speed", link.link_speed); 6241 rte_tel_data_add_dict_string(d, "duplex", 6242 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 6243 "full-duplex" : "half-duplex"); 6244 return 0; 6245 } 6246 6247 static int 6248 eth_dev_handle_port_info(const char *cmd __rte_unused, 6249 const char *params, 6250 struct rte_tel_data *d) 6251 { 6252 struct rte_tel_data *rxq_state, *txq_state; 6253 char mac_addr[RTE_ETHER_ADDR_LEN]; 6254 struct rte_eth_dev *eth_dev; 6255 char *end_param; 6256 int port_id, i; 6257 6258 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6259 return -1; 6260 6261 port_id = strtoul(params, &end_param, 0); 6262 if (*end_param != '\0') 6263 RTE_ETHDEV_LOG(NOTICE, 6264 "Extra parameters passed to ethdev telemetry command, ignoring"); 6265 6266 if (!rte_eth_dev_is_valid_port(port_id)) 6267 return -EINVAL; 6268 6269 eth_dev = &rte_eth_devices[port_id]; 6270 if (!eth_dev) 6271 return -EINVAL; 6272 6273 rxq_state = rte_tel_data_alloc(); 6274 if (!rxq_state) 6275 return -ENOMEM; 6276 6277 txq_state = rte_tel_data_alloc(); 6278 if (!txq_state) 6279 return -ENOMEM; 6280 6281 rte_tel_data_start_dict(d); 6282 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 6283 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 6284 rte_tel_data_add_dict_int(d, "nb_rx_queues", 6285 eth_dev->data->nb_rx_queues); 6286 rte_tel_data_add_dict_int(d, "nb_tx_queues", 6287 eth_dev->data->nb_tx_queues); 6288 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 6289 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 6290 rte_tel_data_add_dict_int(d, "rx_mbuf_size_min", 6291 eth_dev->data->min_rx_buf_size); 6292 rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", 6293 eth_dev->data->rx_mbuf_alloc_failed); 6294 snprintf(mac_addr, RTE_ETHER_ADDR_LEN, "%02x:%02x:%02x:%02x:%02x:%02x", 6295 eth_dev->data->mac_addrs->addr_bytes[0], 6296 eth_dev->data->mac_addrs->addr_bytes[1], 6297 eth_dev->data->mac_addrs->addr_bytes[2], 6298 eth_dev->data->mac_addrs->addr_bytes[3], 6299 eth_dev->data->mac_addrs->addr_bytes[4], 6300 eth_dev->data->mac_addrs->addr_bytes[5]); 6301 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 6302 rte_tel_data_add_dict_int(d, "promiscuous", 6303 eth_dev->data->promiscuous); 6304 rte_tel_data_add_dict_int(d, "scattered_rx", 6305 eth_dev->data->scattered_rx); 6306 rte_tel_data_add_dict_int(d, "all_multicast", 6307 eth_dev->data->all_multicast); 6308 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 6309 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 6310 rte_tel_data_add_dict_int(d, "dev_configured", 6311 eth_dev->data->dev_configured); 6312 6313 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 6314 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 6315 rte_tel_data_add_array_int(rxq_state, 6316 eth_dev->data->rx_queue_state[i]); 6317 6318 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 6319 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 6320 rte_tel_data_add_array_int(txq_state, 6321 eth_dev->data->tx_queue_state[i]); 6322 6323 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 6324 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 6325 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 6326 rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags); 6327 rte_tel_data_add_dict_int(d, "rx_offloads", 6328 eth_dev->data->dev_conf.rxmode.offloads); 6329 rte_tel_data_add_dict_int(d, "tx_offloads", 6330 eth_dev->data->dev_conf.txmode.offloads); 6331 rte_tel_data_add_dict_int(d, "ethdev_rss_hf", 6332 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 6333 6334 return 0; 6335 } 6336 6337 int 6338 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue, 6339 struct rte_hairpin_peer_info *cur_info, 6340 struct rte_hairpin_peer_info *peer_info, 6341 uint32_t direction) 6342 { 6343 struct rte_eth_dev *dev; 6344 6345 /* Current queue information is not mandatory. */ 6346 if (peer_info == NULL) 6347 return -EINVAL; 6348 6349 /* No need to check the validity again. */ 6350 dev = &rte_eth_devices[peer_port]; 6351 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update, 6352 -ENOTSUP); 6353 6354 return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue, 6355 cur_info, peer_info, direction); 6356 } 6357 6358 int 6359 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue, 6360 struct rte_hairpin_peer_info *peer_info, 6361 uint32_t direction) 6362 { 6363 struct rte_eth_dev *dev; 6364 6365 if (peer_info == NULL) 6366 return -EINVAL; 6367 6368 /* No need to check the validity again. */ 6369 dev = &rte_eth_devices[cur_port]; 6370 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind, 6371 -ENOTSUP); 6372 6373 return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue, 6374 peer_info, direction); 6375 } 6376 6377 int 6378 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue, 6379 uint32_t direction) 6380 { 6381 struct rte_eth_dev *dev; 6382 6383 /* No need to check the validity again. */ 6384 dev = &rte_eth_devices[cur_port]; 6385 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind, 6386 -ENOTSUP); 6387 6388 return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue, 6389 direction); 6390 } 6391 6392 int 6393 rte_eth_representor_info_get(uint16_t port_id, 6394 struct rte_eth_representor_info *info) 6395 { 6396 struct rte_eth_dev *dev; 6397 6398 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6399 dev = &rte_eth_devices[port_id]; 6400 6401 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP); 6402 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6403 } 6404 6405 int 6406 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6407 { 6408 struct rte_eth_dev *dev; 6409 6410 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6411 dev = &rte_eth_devices[port_id]; 6412 6413 if (dev->data->dev_configured != 0) { 6414 RTE_ETHDEV_LOG(ERR, 6415 "The port (id=%"PRIu16") is already configured\n", 6416 port_id); 6417 return -EBUSY; 6418 } 6419 6420 if (features == NULL) { 6421 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6422 return -EINVAL; 6423 } 6424 6425 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP); 6426 return eth_err(port_id, 6427 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6428 } 6429 6430 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6431 6432 RTE_INIT(ethdev_init_telemetry) 6433 { 6434 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 6435 "Returns list of available ethdev ports. Takes no parameters"); 6436 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 6437 "Returns the common stats for a port. Parameters: int port_id"); 6438 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 6439 "Returns the extended stats for a port. Parameters: int port_id"); 6440 rte_telemetry_register_cmd("/ethdev/link_status", 6441 eth_dev_handle_port_link_status, 6442 "Returns the link status for a port. Parameters: int port_id"); 6443 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 6444 "Returns the device info for a port. Parameters: int port_id"); 6445 } 6446