1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <rte_byteorder.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_interrupts.h> 18 #include <rte_memory.h> 19 #include <rte_memcpy.h> 20 #include <rte_memzone.h> 21 #include <rte_launch.h> 22 #include <rte_eal.h> 23 #include <rte_per_lcore.h> 24 #include <rte_lcore.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_common.h> 27 #include <rte_mempool.h> 28 #include <rte_malloc.h> 29 #include <rte_mbuf.h> 30 #include <rte_errno.h> 31 #include <rte_spinlock.h> 32 #include <rte_string_fns.h> 33 #include <rte_kvargs.h> 34 #include <rte_class.h> 35 #include <rte_ether.h> 36 #include <rte_telemetry.h> 37 38 #include "rte_ethdev_trace.h" 39 #include "rte_ethdev.h" 40 #include "ethdev_driver.h" 41 #include "ethdev_profile.h" 42 #include "ethdev_private.h" 43 44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; 45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 46 47 /* public fast-path API */ 48 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 49 50 /* spinlock for eth device callbacks */ 51 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; 52 53 /* spinlock for add/remove Rx callbacks */ 54 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 55 56 /* spinlock for add/remove Tx callbacks */ 57 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 58 59 /* spinlock for shared data allocation */ 60 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 61 62 /* store statistics names and its offset in stats structure */ 63 struct rte_eth_xstats_name_off { 64 char name[RTE_ETH_XSTATS_NAME_SIZE]; 65 unsigned offset; 66 }; 67 68 /* Shared memory between primary and secondary processes. */ 69 static struct { 70 uint64_t next_owner_id; 71 rte_spinlock_t ownership_lock; 72 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; 73 } *eth_dev_shared_data; 74 75 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 76 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 77 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 78 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 79 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 80 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 81 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 82 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 83 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 84 rx_nombuf)}, 85 }; 86 87 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 88 89 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 90 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 91 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 92 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 93 }; 94 95 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 96 97 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 98 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 99 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 100 }; 101 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 102 103 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 104 { DEV_RX_OFFLOAD_##_name, #_name } 105 106 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name) \ 107 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 108 109 static const struct { 110 uint64_t offload; 111 const char *name; 112 } eth_dev_rx_offload_names[] = { 113 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 114 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 115 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 116 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 117 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 118 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 119 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 120 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 121 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 122 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 123 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 124 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 125 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 126 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 127 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 128 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 129 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 130 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 131 RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 132 }; 133 134 #undef RTE_RX_OFFLOAD_BIT2STR 135 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 136 137 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 138 { DEV_TX_OFFLOAD_##_name, #_name } 139 140 static const struct { 141 uint64_t offload; 142 const char *name; 143 } eth_dev_tx_offload_names[] = { 144 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 145 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 146 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 147 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 148 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 149 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 150 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 151 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 152 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 153 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 154 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 155 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 156 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 157 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 158 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 159 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 160 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 161 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 162 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 163 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 164 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 165 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 166 }; 167 168 #undef RTE_TX_OFFLOAD_BIT2STR 169 170 /** 171 * The user application callback description. 172 * 173 * It contains callback address to be registered by user application, 174 * the pointer to the parameters for callback, and the event type. 175 */ 176 struct rte_eth_dev_callback { 177 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ 178 rte_eth_dev_cb_fn cb_fn; /**< Callback address */ 179 void *cb_arg; /**< Parameter for callback */ 180 void *ret_param; /**< Return parameter */ 181 enum rte_eth_event_type event; /**< Interrupt event type */ 182 uint32_t active; /**< Callback is executing */ 183 }; 184 185 enum { 186 STAT_QMAP_TX = 0, 187 STAT_QMAP_RX 188 }; 189 190 int 191 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 192 { 193 int ret; 194 struct rte_devargs devargs; 195 const char *bus_param_key; 196 char *bus_str = NULL; 197 char *cls_str = NULL; 198 int str_size; 199 200 if (iter == NULL) { 201 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 202 return -EINVAL; 203 } 204 205 if (devargs_str == NULL) { 206 RTE_ETHDEV_LOG(ERR, 207 "Cannot initialize iterator from NULL device description string\n"); 208 return -EINVAL; 209 } 210 211 memset(iter, 0, sizeof(*iter)); 212 memset(&devargs, 0, sizeof(devargs)); 213 214 /* 215 * The devargs string may use various syntaxes: 216 * - 0000:08:00.0,representor=[1-3] 217 * - pci:0000:06:00.0,representor=[0,5] 218 * - class=eth,mac=00:11:22:33:44:55 219 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 220 */ 221 222 /* 223 * Handle pure class filter (i.e. without any bus-level argument), 224 * from future new syntax. 225 * rte_devargs_parse() is not yet supporting the new syntax, 226 * that's why this simple case is temporarily parsed here. 227 */ 228 #define iter_anybus_str "class=eth," 229 if (strncmp(devargs_str, iter_anybus_str, 230 strlen(iter_anybus_str)) == 0) { 231 iter->cls_str = devargs_str + strlen(iter_anybus_str); 232 goto end; 233 } 234 235 /* Split bus, device and parameters. */ 236 ret = rte_devargs_parse(&devargs, devargs_str); 237 if (ret != 0) 238 goto error; 239 240 /* 241 * Assume parameters of old syntax can match only at ethdev level. 242 * Extra parameters will be ignored, thanks to "+" prefix. 243 */ 244 str_size = strlen(devargs.args) + 2; 245 cls_str = malloc(str_size); 246 if (cls_str == NULL) { 247 ret = -ENOMEM; 248 goto error; 249 } 250 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 251 if (ret != str_size - 1) { 252 ret = -EINVAL; 253 goto error; 254 } 255 iter->cls_str = cls_str; 256 257 iter->bus = devargs.bus; 258 if (iter->bus->dev_iterate == NULL) { 259 ret = -ENOTSUP; 260 goto error; 261 } 262 263 /* Convert bus args to new syntax for use with new API dev_iterate. */ 264 if ((strcmp(iter->bus->name, "vdev") == 0) || 265 (strcmp(iter->bus->name, "fslmc") == 0) || 266 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 267 bus_param_key = "name"; 268 } else if (strcmp(iter->bus->name, "pci") == 0) { 269 bus_param_key = "addr"; 270 } else { 271 ret = -ENOTSUP; 272 goto error; 273 } 274 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 275 bus_str = malloc(str_size); 276 if (bus_str == NULL) { 277 ret = -ENOMEM; 278 goto error; 279 } 280 ret = snprintf(bus_str, str_size, "%s=%s", 281 bus_param_key, devargs.name); 282 if (ret != str_size - 1) { 283 ret = -EINVAL; 284 goto error; 285 } 286 iter->bus_str = bus_str; 287 288 end: 289 iter->cls = rte_class_find_by_name("eth"); 290 rte_devargs_reset(&devargs); 291 return 0; 292 293 error: 294 if (ret == -ENOTSUP) 295 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 296 iter->bus->name); 297 rte_devargs_reset(&devargs); 298 free(bus_str); 299 free(cls_str); 300 return ret; 301 } 302 303 uint16_t 304 rte_eth_iterator_next(struct rte_dev_iterator *iter) 305 { 306 if (iter == NULL) { 307 RTE_ETHDEV_LOG(ERR, 308 "Cannot get next device from NULL iterator\n"); 309 return RTE_MAX_ETHPORTS; 310 } 311 312 if (iter->cls == NULL) /* invalid ethdev iterator */ 313 return RTE_MAX_ETHPORTS; 314 315 do { /* loop to try all matching rte_device */ 316 /* If not pure ethdev filter and */ 317 if (iter->bus != NULL && 318 /* not in middle of rte_eth_dev iteration, */ 319 iter->class_device == NULL) { 320 /* get next rte_device to try. */ 321 iter->device = iter->bus->dev_iterate( 322 iter->device, iter->bus_str, iter); 323 if (iter->device == NULL) 324 break; /* no more rte_device candidate */ 325 } 326 /* A device is matching bus part, need to check ethdev part. */ 327 iter->class_device = iter->cls->dev_iterate( 328 iter->class_device, iter->cls_str, iter); 329 if (iter->class_device != NULL) 330 return eth_dev_to_id(iter->class_device); /* match */ 331 } while (iter->bus != NULL); /* need to try next rte_device */ 332 333 /* No more ethdev port to iterate. */ 334 rte_eth_iterator_cleanup(iter); 335 return RTE_MAX_ETHPORTS; 336 } 337 338 void 339 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 340 { 341 if (iter == NULL) { 342 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 343 return; 344 } 345 346 if (iter->bus_str == NULL) 347 return; /* nothing to free in pure class filter */ 348 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 349 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 350 memset(iter, 0, sizeof(*iter)); 351 } 352 353 uint16_t 354 rte_eth_find_next(uint16_t port_id) 355 { 356 while (port_id < RTE_MAX_ETHPORTS && 357 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 358 port_id++; 359 360 if (port_id >= RTE_MAX_ETHPORTS) 361 return RTE_MAX_ETHPORTS; 362 363 return port_id; 364 } 365 366 /* 367 * Macro to iterate over all valid ports for internal usage. 368 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 369 */ 370 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 371 for (port_id = rte_eth_find_next(0); \ 372 port_id < RTE_MAX_ETHPORTS; \ 373 port_id = rte_eth_find_next(port_id + 1)) 374 375 uint16_t 376 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 377 { 378 port_id = rte_eth_find_next(port_id); 379 while (port_id < RTE_MAX_ETHPORTS && 380 rte_eth_devices[port_id].device != parent) 381 port_id = rte_eth_find_next(port_id + 1); 382 383 return port_id; 384 } 385 386 uint16_t 387 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 388 { 389 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 390 return rte_eth_find_next_of(port_id, 391 rte_eth_devices[ref_port_id].device); 392 } 393 394 static void 395 eth_dev_shared_data_prepare(void) 396 { 397 const unsigned flags = 0; 398 const struct rte_memzone *mz; 399 400 rte_spinlock_lock(ð_dev_shared_data_lock); 401 402 if (eth_dev_shared_data == NULL) { 403 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 404 /* Allocate port data and ownership shared memory. */ 405 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, 406 sizeof(*eth_dev_shared_data), 407 rte_socket_id(), flags); 408 } else 409 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); 410 if (mz == NULL) 411 rte_panic("Cannot allocate ethdev shared data\n"); 412 413 eth_dev_shared_data = mz->addr; 414 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 415 eth_dev_shared_data->next_owner_id = 416 RTE_ETH_DEV_NO_OWNER + 1; 417 rte_spinlock_init(ð_dev_shared_data->ownership_lock); 418 memset(eth_dev_shared_data->data, 0, 419 sizeof(eth_dev_shared_data->data)); 420 } 421 } 422 423 rte_spinlock_unlock(ð_dev_shared_data_lock); 424 } 425 426 static bool 427 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 428 { 429 return ethdev->data->name[0] != '\0'; 430 } 431 432 static struct rte_eth_dev * 433 eth_dev_allocated(const char *name) 434 { 435 uint16_t i; 436 437 RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX); 438 439 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 440 if (rte_eth_devices[i].data != NULL && 441 strcmp(rte_eth_devices[i].data->name, name) == 0) 442 return &rte_eth_devices[i]; 443 } 444 return NULL; 445 } 446 447 struct rte_eth_dev * 448 rte_eth_dev_allocated(const char *name) 449 { 450 struct rte_eth_dev *ethdev; 451 452 eth_dev_shared_data_prepare(); 453 454 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 455 456 ethdev = eth_dev_allocated(name); 457 458 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 459 460 return ethdev; 461 } 462 463 static uint16_t 464 eth_dev_find_free_port(void) 465 { 466 uint16_t i; 467 468 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 469 /* Using shared name field to find a free port. */ 470 if (eth_dev_shared_data->data[i].name[0] == '\0') { 471 RTE_ASSERT(rte_eth_devices[i].state == 472 RTE_ETH_DEV_UNUSED); 473 return i; 474 } 475 } 476 return RTE_MAX_ETHPORTS; 477 } 478 479 static struct rte_eth_dev * 480 eth_dev_get(uint16_t port_id) 481 { 482 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; 483 484 eth_dev->data = ð_dev_shared_data->data[port_id]; 485 486 return eth_dev; 487 } 488 489 struct rte_eth_dev * 490 rte_eth_dev_allocate(const char *name) 491 { 492 uint16_t port_id; 493 struct rte_eth_dev *eth_dev = NULL; 494 size_t name_len; 495 496 name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN); 497 if (name_len == 0) { 498 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n"); 499 return NULL; 500 } 501 502 if (name_len >= RTE_ETH_NAME_MAX_LEN) { 503 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n"); 504 return NULL; 505 } 506 507 eth_dev_shared_data_prepare(); 508 509 /* Synchronize port creation between primary and secondary threads. */ 510 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 511 512 if (eth_dev_allocated(name) != NULL) { 513 RTE_ETHDEV_LOG(ERR, 514 "Ethernet device with name %s already allocated\n", 515 name); 516 goto unlock; 517 } 518 519 port_id = eth_dev_find_free_port(); 520 if (port_id == RTE_MAX_ETHPORTS) { 521 RTE_ETHDEV_LOG(ERR, 522 "Reached maximum number of Ethernet ports\n"); 523 goto unlock; 524 } 525 526 eth_dev = eth_dev_get(port_id); 527 strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name)); 528 eth_dev->data->port_id = port_id; 529 eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS; 530 eth_dev->data->mtu = RTE_ETHER_MTU; 531 pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL); 532 533 unlock: 534 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 535 536 return eth_dev; 537 } 538 539 /* 540 * Attach to a port already registered by the primary process, which 541 * makes sure that the same device would have the same port ID both 542 * in the primary and secondary process. 543 */ 544 struct rte_eth_dev * 545 rte_eth_dev_attach_secondary(const char *name) 546 { 547 uint16_t i; 548 struct rte_eth_dev *eth_dev = NULL; 549 550 eth_dev_shared_data_prepare(); 551 552 /* Synchronize port attachment to primary port creation and release. */ 553 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 554 555 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 556 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0) 557 break; 558 } 559 if (i == RTE_MAX_ETHPORTS) { 560 RTE_ETHDEV_LOG(ERR, 561 "Device %s is not driven by the primary process\n", 562 name); 563 } else { 564 eth_dev = eth_dev_get(i); 565 RTE_ASSERT(eth_dev->data->port_id == i); 566 } 567 568 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 569 return eth_dev; 570 } 571 572 int 573 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) 574 { 575 if (eth_dev == NULL) 576 return -EINVAL; 577 578 eth_dev_shared_data_prepare(); 579 580 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 581 rte_eth_dev_callback_process(eth_dev, 582 RTE_ETH_EVENT_DESTROY, NULL); 583 584 eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id); 585 586 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 587 588 eth_dev->state = RTE_ETH_DEV_UNUSED; 589 eth_dev->device = NULL; 590 eth_dev->process_private = NULL; 591 eth_dev->intr_handle = NULL; 592 eth_dev->rx_pkt_burst = NULL; 593 eth_dev->tx_pkt_burst = NULL; 594 eth_dev->tx_pkt_prepare = NULL; 595 eth_dev->rx_queue_count = NULL; 596 eth_dev->rx_descriptor_status = NULL; 597 eth_dev->tx_descriptor_status = NULL; 598 eth_dev->dev_ops = NULL; 599 600 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 601 rte_free(eth_dev->data->rx_queues); 602 rte_free(eth_dev->data->tx_queues); 603 rte_free(eth_dev->data->mac_addrs); 604 rte_free(eth_dev->data->hash_mac_addrs); 605 rte_free(eth_dev->data->dev_private); 606 pthread_mutex_destroy(ð_dev->data->flow_ops_mutex); 607 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); 608 } 609 610 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 611 612 return 0; 613 } 614 615 int 616 rte_eth_dev_is_valid_port(uint16_t port_id) 617 { 618 if (port_id >= RTE_MAX_ETHPORTS || 619 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 620 return 0; 621 else 622 return 1; 623 } 624 625 static int 626 eth_is_valid_owner_id(uint64_t owner_id) 627 { 628 if (owner_id == RTE_ETH_DEV_NO_OWNER || 629 eth_dev_shared_data->next_owner_id <= owner_id) 630 return 0; 631 return 1; 632 } 633 634 uint64_t 635 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 636 { 637 port_id = rte_eth_find_next(port_id); 638 while (port_id < RTE_MAX_ETHPORTS && 639 rte_eth_devices[port_id].data->owner.id != owner_id) 640 port_id = rte_eth_find_next(port_id + 1); 641 642 return port_id; 643 } 644 645 int 646 rte_eth_dev_owner_new(uint64_t *owner_id) 647 { 648 if (owner_id == NULL) { 649 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 650 return -EINVAL; 651 } 652 653 eth_dev_shared_data_prepare(); 654 655 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 656 657 *owner_id = eth_dev_shared_data->next_owner_id++; 658 659 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 660 return 0; 661 } 662 663 static int 664 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 665 const struct rte_eth_dev_owner *new_owner) 666 { 667 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 668 struct rte_eth_dev_owner *port_owner; 669 670 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 671 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 672 port_id); 673 return -ENODEV; 674 } 675 676 if (new_owner == NULL) { 677 RTE_ETHDEV_LOG(ERR, 678 "Cannot set ethdev port %u owner from NULL owner\n", 679 port_id); 680 return -EINVAL; 681 } 682 683 if (!eth_is_valid_owner_id(new_owner->id) && 684 !eth_is_valid_owner_id(old_owner_id)) { 685 RTE_ETHDEV_LOG(ERR, 686 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 687 old_owner_id, new_owner->id); 688 return -EINVAL; 689 } 690 691 port_owner = &rte_eth_devices[port_id].data->owner; 692 if (port_owner->id != old_owner_id) { 693 RTE_ETHDEV_LOG(ERR, 694 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 695 port_id, port_owner->name, port_owner->id); 696 return -EPERM; 697 } 698 699 /* can not truncate (same structure) */ 700 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 701 702 port_owner->id = new_owner->id; 703 704 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 705 port_id, new_owner->name, new_owner->id); 706 707 return 0; 708 } 709 710 int 711 rte_eth_dev_owner_set(const uint16_t port_id, 712 const struct rte_eth_dev_owner *owner) 713 { 714 int ret; 715 716 eth_dev_shared_data_prepare(); 717 718 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 719 720 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 721 722 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 723 return ret; 724 } 725 726 int 727 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 728 { 729 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 730 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 731 int ret; 732 733 eth_dev_shared_data_prepare(); 734 735 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 736 737 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 738 739 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 740 return ret; 741 } 742 743 int 744 rte_eth_dev_owner_delete(const uint64_t owner_id) 745 { 746 uint16_t port_id; 747 int ret = 0; 748 749 eth_dev_shared_data_prepare(); 750 751 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 752 753 if (eth_is_valid_owner_id(owner_id)) { 754 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) 755 if (rte_eth_devices[port_id].data->owner.id == owner_id) 756 memset(&rte_eth_devices[port_id].data->owner, 0, 757 sizeof(struct rte_eth_dev_owner)); 758 RTE_ETHDEV_LOG(NOTICE, 759 "All port owners owned by %016"PRIx64" identifier have removed\n", 760 owner_id); 761 } else { 762 RTE_ETHDEV_LOG(ERR, 763 "Invalid owner ID=%016"PRIx64"\n", 764 owner_id); 765 ret = -EINVAL; 766 } 767 768 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 769 770 return ret; 771 } 772 773 int 774 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 775 { 776 struct rte_eth_dev *ethdev; 777 778 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 779 ethdev = &rte_eth_devices[port_id]; 780 781 if (!eth_dev_is_allocated(ethdev)) { 782 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 783 port_id); 784 return -ENODEV; 785 } 786 787 if (owner == NULL) { 788 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 789 port_id); 790 return -EINVAL; 791 } 792 793 eth_dev_shared_data_prepare(); 794 795 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 796 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 797 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 798 799 return 0; 800 } 801 802 int 803 rte_eth_dev_socket_id(uint16_t port_id) 804 { 805 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 806 return rte_eth_devices[port_id].data->numa_node; 807 } 808 809 void * 810 rte_eth_dev_get_sec_ctx(uint16_t port_id) 811 { 812 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 813 return rte_eth_devices[port_id].security_ctx; 814 } 815 816 uint16_t 817 rte_eth_dev_count_avail(void) 818 { 819 uint16_t p; 820 uint16_t count; 821 822 count = 0; 823 824 RTE_ETH_FOREACH_DEV(p) 825 count++; 826 827 return count; 828 } 829 830 uint16_t 831 rte_eth_dev_count_total(void) 832 { 833 uint16_t port, count = 0; 834 835 RTE_ETH_FOREACH_VALID_DEV(port) 836 count++; 837 838 return count; 839 } 840 841 int 842 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 843 { 844 char *tmp; 845 846 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 847 848 if (name == NULL) { 849 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 850 port_id); 851 return -EINVAL; 852 } 853 854 /* shouldn't check 'rte_eth_devices[i].data', 855 * because it might be overwritten by VDEV PMD */ 856 tmp = eth_dev_shared_data->data[port_id].name; 857 strcpy(name, tmp); 858 return 0; 859 } 860 861 int 862 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 863 { 864 uint16_t pid; 865 866 if (name == NULL) { 867 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 868 return -EINVAL; 869 } 870 871 if (port_id == NULL) { 872 RTE_ETHDEV_LOG(ERR, 873 "Cannot get port ID to NULL for %s\n", name); 874 return -EINVAL; 875 } 876 877 RTE_ETH_FOREACH_VALID_DEV(pid) 878 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 879 *port_id = pid; 880 return 0; 881 } 882 883 return -ENODEV; 884 } 885 886 static int 887 eth_err(uint16_t port_id, int ret) 888 { 889 if (ret == 0) 890 return 0; 891 if (rte_eth_dev_is_removed(port_id)) 892 return -EIO; 893 return ret; 894 } 895 896 static void 897 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid) 898 { 899 void **rxq = dev->data->rx_queues; 900 901 if (rxq[qid] == NULL) 902 return; 903 904 if (dev->dev_ops->rx_queue_release != NULL) 905 (*dev->dev_ops->rx_queue_release)(dev, qid); 906 rxq[qid] = NULL; 907 } 908 909 static void 910 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid) 911 { 912 void **txq = dev->data->tx_queues; 913 914 if (txq[qid] == NULL) 915 return; 916 917 if (dev->dev_ops->tx_queue_release != NULL) 918 (*dev->dev_ops->tx_queue_release)(dev, qid); 919 txq[qid] = NULL; 920 } 921 922 static int 923 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 924 { 925 uint16_t old_nb_queues = dev->data->nb_rx_queues; 926 unsigned i; 927 928 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ 929 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", 930 sizeof(dev->data->rx_queues[0]) * 931 RTE_MAX_QUEUES_PER_PORT, 932 RTE_CACHE_LINE_SIZE); 933 if (dev->data->rx_queues == NULL) { 934 dev->data->nb_rx_queues = 0; 935 return -(ENOMEM); 936 } 937 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ 938 for (i = nb_queues; i < old_nb_queues; i++) 939 eth_dev_rxq_release(dev, i); 940 941 } else if (dev->data->rx_queues != NULL && nb_queues == 0) { 942 for (i = nb_queues; i < old_nb_queues; i++) 943 eth_dev_rxq_release(dev, i); 944 945 rte_free(dev->data->rx_queues); 946 dev->data->rx_queues = NULL; 947 } 948 dev->data->nb_rx_queues = nb_queues; 949 return 0; 950 } 951 952 static int 953 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 954 { 955 uint16_t port_id; 956 957 if (rx_queue_id >= dev->data->nb_rx_queues) { 958 port_id = dev->data->port_id; 959 RTE_ETHDEV_LOG(ERR, 960 "Invalid Rx queue_id=%u of device with port_id=%u\n", 961 rx_queue_id, port_id); 962 return -EINVAL; 963 } 964 965 if (dev->data->rx_queues[rx_queue_id] == NULL) { 966 port_id = dev->data->port_id; 967 RTE_ETHDEV_LOG(ERR, 968 "Queue %u of device with port_id=%u has not been setup\n", 969 rx_queue_id, port_id); 970 return -EINVAL; 971 } 972 973 return 0; 974 } 975 976 static int 977 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 978 { 979 uint16_t port_id; 980 981 if (tx_queue_id >= dev->data->nb_tx_queues) { 982 port_id = dev->data->port_id; 983 RTE_ETHDEV_LOG(ERR, 984 "Invalid Tx queue_id=%u of device with port_id=%u\n", 985 tx_queue_id, port_id); 986 return -EINVAL; 987 } 988 989 if (dev->data->tx_queues[tx_queue_id] == NULL) { 990 port_id = dev->data->port_id; 991 RTE_ETHDEV_LOG(ERR, 992 "Queue %u of device with port_id=%u has not been setup\n", 993 tx_queue_id, port_id); 994 return -EINVAL; 995 } 996 997 return 0; 998 } 999 1000 int 1001 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 1002 { 1003 struct rte_eth_dev *dev; 1004 int ret; 1005 1006 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1007 dev = &rte_eth_devices[port_id]; 1008 1009 if (!dev->data->dev_started) { 1010 RTE_ETHDEV_LOG(ERR, 1011 "Port %u must be started before start any queue\n", 1012 port_id); 1013 return -EINVAL; 1014 } 1015 1016 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1017 if (ret != 0) 1018 return ret; 1019 1020 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 1021 1022 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1023 RTE_ETHDEV_LOG(INFO, 1024 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1025 rx_queue_id, port_id); 1026 return -EINVAL; 1027 } 1028 1029 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1030 RTE_ETHDEV_LOG(INFO, 1031 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1032 rx_queue_id, port_id); 1033 return 0; 1034 } 1035 1036 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 1037 } 1038 1039 int 1040 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 1041 { 1042 struct rte_eth_dev *dev; 1043 int ret; 1044 1045 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1046 dev = &rte_eth_devices[port_id]; 1047 1048 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1049 if (ret != 0) 1050 return ret; 1051 1052 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 1053 1054 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1055 RTE_ETHDEV_LOG(INFO, 1056 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1057 rx_queue_id, port_id); 1058 return -EINVAL; 1059 } 1060 1061 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1062 RTE_ETHDEV_LOG(INFO, 1063 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1064 rx_queue_id, port_id); 1065 return 0; 1066 } 1067 1068 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 1069 } 1070 1071 int 1072 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 1073 { 1074 struct rte_eth_dev *dev; 1075 int ret; 1076 1077 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1078 dev = &rte_eth_devices[port_id]; 1079 1080 if (!dev->data->dev_started) { 1081 RTE_ETHDEV_LOG(ERR, 1082 "Port %u must be started before start any queue\n", 1083 port_id); 1084 return -EINVAL; 1085 } 1086 1087 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1088 if (ret != 0) 1089 return ret; 1090 1091 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 1092 1093 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1094 RTE_ETHDEV_LOG(INFO, 1095 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1096 tx_queue_id, port_id); 1097 return -EINVAL; 1098 } 1099 1100 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1101 RTE_ETHDEV_LOG(INFO, 1102 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1103 tx_queue_id, port_id); 1104 return 0; 1105 } 1106 1107 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 1108 } 1109 1110 int 1111 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 1112 { 1113 struct rte_eth_dev *dev; 1114 int ret; 1115 1116 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1117 dev = &rte_eth_devices[port_id]; 1118 1119 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1120 if (ret != 0) 1121 return ret; 1122 1123 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 1124 1125 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1126 RTE_ETHDEV_LOG(INFO, 1127 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1128 tx_queue_id, port_id); 1129 return -EINVAL; 1130 } 1131 1132 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1133 RTE_ETHDEV_LOG(INFO, 1134 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1135 tx_queue_id, port_id); 1136 return 0; 1137 } 1138 1139 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 1140 } 1141 1142 static int 1143 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 1144 { 1145 uint16_t old_nb_queues = dev->data->nb_tx_queues; 1146 unsigned i; 1147 1148 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ 1149 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", 1150 sizeof(dev->data->tx_queues[0]) * 1151 RTE_MAX_QUEUES_PER_PORT, 1152 RTE_CACHE_LINE_SIZE); 1153 if (dev->data->tx_queues == NULL) { 1154 dev->data->nb_tx_queues = 0; 1155 return -(ENOMEM); 1156 } 1157 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ 1158 for (i = nb_queues; i < old_nb_queues; i++) 1159 eth_dev_txq_release(dev, i); 1160 1161 } else if (dev->data->tx_queues != NULL && nb_queues == 0) { 1162 for (i = nb_queues; i < old_nb_queues; i++) 1163 eth_dev_txq_release(dev, i); 1164 1165 rte_free(dev->data->tx_queues); 1166 dev->data->tx_queues = NULL; 1167 } 1168 dev->data->nb_tx_queues = nb_queues; 1169 return 0; 1170 } 1171 1172 uint32_t 1173 rte_eth_speed_bitflag(uint32_t speed, int duplex) 1174 { 1175 switch (speed) { 1176 case ETH_SPEED_NUM_10M: 1177 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD; 1178 case ETH_SPEED_NUM_100M: 1179 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD; 1180 case ETH_SPEED_NUM_1G: 1181 return ETH_LINK_SPEED_1G; 1182 case ETH_SPEED_NUM_2_5G: 1183 return ETH_LINK_SPEED_2_5G; 1184 case ETH_SPEED_NUM_5G: 1185 return ETH_LINK_SPEED_5G; 1186 case ETH_SPEED_NUM_10G: 1187 return ETH_LINK_SPEED_10G; 1188 case ETH_SPEED_NUM_20G: 1189 return ETH_LINK_SPEED_20G; 1190 case ETH_SPEED_NUM_25G: 1191 return ETH_LINK_SPEED_25G; 1192 case ETH_SPEED_NUM_40G: 1193 return ETH_LINK_SPEED_40G; 1194 case ETH_SPEED_NUM_50G: 1195 return ETH_LINK_SPEED_50G; 1196 case ETH_SPEED_NUM_56G: 1197 return ETH_LINK_SPEED_56G; 1198 case ETH_SPEED_NUM_100G: 1199 return ETH_LINK_SPEED_100G; 1200 case ETH_SPEED_NUM_200G: 1201 return ETH_LINK_SPEED_200G; 1202 default: 1203 return 0; 1204 } 1205 } 1206 1207 const char * 1208 rte_eth_dev_rx_offload_name(uint64_t offload) 1209 { 1210 const char *name = "UNKNOWN"; 1211 unsigned int i; 1212 1213 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1214 if (offload == eth_dev_rx_offload_names[i].offload) { 1215 name = eth_dev_rx_offload_names[i].name; 1216 break; 1217 } 1218 } 1219 1220 return name; 1221 } 1222 1223 const char * 1224 rte_eth_dev_tx_offload_name(uint64_t offload) 1225 { 1226 const char *name = "UNKNOWN"; 1227 unsigned int i; 1228 1229 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1230 if (offload == eth_dev_tx_offload_names[i].offload) { 1231 name = eth_dev_tx_offload_names[i].name; 1232 break; 1233 } 1234 } 1235 1236 return name; 1237 } 1238 1239 static inline int 1240 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1241 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1242 { 1243 int ret = 0; 1244 1245 if (dev_info_size == 0) { 1246 if (config_size != max_rx_pkt_len) { 1247 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1248 " %u != %u is not allowed\n", 1249 port_id, config_size, max_rx_pkt_len); 1250 ret = -EINVAL; 1251 } 1252 } else if (config_size > dev_info_size) { 1253 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1254 "> max allowed value %u\n", port_id, config_size, 1255 dev_info_size); 1256 ret = -EINVAL; 1257 } else if (config_size < RTE_ETHER_MIN_LEN) { 1258 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1259 "< min allowed value %u\n", port_id, config_size, 1260 (unsigned int)RTE_ETHER_MIN_LEN); 1261 ret = -EINVAL; 1262 } 1263 return ret; 1264 } 1265 1266 /* 1267 * Validate offloads that are requested through rte_eth_dev_configure against 1268 * the offloads successfully set by the Ethernet device. 1269 * 1270 * @param port_id 1271 * The port identifier of the Ethernet device. 1272 * @param req_offloads 1273 * The offloads that have been requested through `rte_eth_dev_configure`. 1274 * @param set_offloads 1275 * The offloads successfully set by the Ethernet device. 1276 * @param offload_type 1277 * The offload type i.e. Rx/Tx string. 1278 * @param offload_name 1279 * The function that prints the offload name. 1280 * @return 1281 * - (0) if validation successful. 1282 * - (-EINVAL) if requested offload has been silently disabled. 1283 * 1284 */ 1285 static int 1286 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1287 uint64_t set_offloads, const char *offload_type, 1288 const char *(*offload_name)(uint64_t)) 1289 { 1290 uint64_t offloads_diff = req_offloads ^ set_offloads; 1291 uint64_t offload; 1292 int ret = 0; 1293 1294 while (offloads_diff != 0) { 1295 /* Check if any offload is requested but not enabled. */ 1296 offload = RTE_BIT64(__builtin_ctzll(offloads_diff)); 1297 if (offload & req_offloads) { 1298 RTE_ETHDEV_LOG(ERR, 1299 "Port %u failed to enable %s offload %s\n", 1300 port_id, offload_type, offload_name(offload)); 1301 ret = -EINVAL; 1302 } 1303 1304 /* Check if offload couldn't be disabled. */ 1305 if (offload & set_offloads) { 1306 RTE_ETHDEV_LOG(DEBUG, 1307 "Port %u %s offload %s is not requested but enabled\n", 1308 port_id, offload_type, offload_name(offload)); 1309 } 1310 1311 offloads_diff &= ~offload; 1312 } 1313 1314 return ret; 1315 } 1316 1317 static uint32_t 1318 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1319 { 1320 uint32_t overhead_len; 1321 1322 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1323 overhead_len = max_rx_pktlen - max_mtu; 1324 else 1325 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1326 1327 return overhead_len; 1328 } 1329 1330 /* rte_eth_dev_info_get() should be called prior to this function */ 1331 static int 1332 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1333 uint16_t mtu) 1334 { 1335 uint32_t overhead_len; 1336 uint32_t frame_size; 1337 1338 if (mtu < dev_info->min_mtu) { 1339 RTE_ETHDEV_LOG(ERR, 1340 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1341 mtu, dev_info->min_mtu, port_id); 1342 return -EINVAL; 1343 } 1344 if (mtu > dev_info->max_mtu) { 1345 RTE_ETHDEV_LOG(ERR, 1346 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1347 mtu, dev_info->max_mtu, port_id); 1348 return -EINVAL; 1349 } 1350 1351 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1352 dev_info->max_mtu); 1353 frame_size = mtu + overhead_len; 1354 if (frame_size < RTE_ETHER_MIN_LEN) { 1355 RTE_ETHDEV_LOG(ERR, 1356 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1357 frame_size, RTE_ETHER_MIN_LEN, port_id); 1358 return -EINVAL; 1359 } 1360 1361 if (frame_size > dev_info->max_rx_pktlen) { 1362 RTE_ETHDEV_LOG(ERR, 1363 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1364 frame_size, dev_info->max_rx_pktlen, port_id); 1365 return -EINVAL; 1366 } 1367 1368 return 0; 1369 } 1370 1371 int 1372 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1373 const struct rte_eth_conf *dev_conf) 1374 { 1375 struct rte_eth_dev *dev; 1376 struct rte_eth_dev_info dev_info; 1377 struct rte_eth_conf orig_conf; 1378 int diag; 1379 int ret; 1380 uint16_t old_mtu; 1381 1382 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1383 dev = &rte_eth_devices[port_id]; 1384 1385 if (dev_conf == NULL) { 1386 RTE_ETHDEV_LOG(ERR, 1387 "Cannot configure ethdev port %u from NULL config\n", 1388 port_id); 1389 return -EINVAL; 1390 } 1391 1392 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1393 1394 if (dev->data->dev_started) { 1395 RTE_ETHDEV_LOG(ERR, 1396 "Port %u must be stopped to allow configuration\n", 1397 port_id); 1398 return -EBUSY; 1399 } 1400 1401 /* 1402 * Ensure that "dev_configured" is always 0 each time prepare to do 1403 * dev_configure() to avoid any non-anticipated behaviour. 1404 * And set to 1 when dev_configure() is executed successfully. 1405 */ 1406 dev->data->dev_configured = 0; 1407 1408 /* Store original config, as rollback required on failure */ 1409 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1410 1411 /* 1412 * Copy the dev_conf parameter into the dev structure. 1413 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1414 */ 1415 if (dev_conf != &dev->data->dev_conf) 1416 memcpy(&dev->data->dev_conf, dev_conf, 1417 sizeof(dev->data->dev_conf)); 1418 1419 /* Backup mtu for rollback */ 1420 old_mtu = dev->data->mtu; 1421 1422 ret = rte_eth_dev_info_get(port_id, &dev_info); 1423 if (ret != 0) 1424 goto rollback; 1425 1426 /* If number of queues specified by application for both Rx and Tx is 1427 * zero, use driver preferred values. This cannot be done individually 1428 * as it is valid for either Tx or Rx (but not both) to be zero. 1429 * If driver does not provide any preferred valued, fall back on 1430 * EAL defaults. 1431 */ 1432 if (nb_rx_q == 0 && nb_tx_q == 0) { 1433 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1434 if (nb_rx_q == 0) 1435 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1436 nb_tx_q = dev_info.default_txportconf.nb_queues; 1437 if (nb_tx_q == 0) 1438 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1439 } 1440 1441 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1442 RTE_ETHDEV_LOG(ERR, 1443 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1444 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1445 ret = -EINVAL; 1446 goto rollback; 1447 } 1448 1449 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1450 RTE_ETHDEV_LOG(ERR, 1451 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1452 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1453 ret = -EINVAL; 1454 goto rollback; 1455 } 1456 1457 /* 1458 * Check that the numbers of Rx and Tx queues are not greater 1459 * than the maximum number of Rx and Tx queues supported by the 1460 * configured device. 1461 */ 1462 if (nb_rx_q > dev_info.max_rx_queues) { 1463 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1464 port_id, nb_rx_q, dev_info.max_rx_queues); 1465 ret = -EINVAL; 1466 goto rollback; 1467 } 1468 1469 if (nb_tx_q > dev_info.max_tx_queues) { 1470 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1471 port_id, nb_tx_q, dev_info.max_tx_queues); 1472 ret = -EINVAL; 1473 goto rollback; 1474 } 1475 1476 /* Check that the device supports requested interrupts */ 1477 if ((dev_conf->intr_conf.lsc == 1) && 1478 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1479 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1480 dev->device->driver->name); 1481 ret = -EINVAL; 1482 goto rollback; 1483 } 1484 if ((dev_conf->intr_conf.rmv == 1) && 1485 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1486 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1487 dev->device->driver->name); 1488 ret = -EINVAL; 1489 goto rollback; 1490 } 1491 1492 if (dev_conf->rxmode.mtu == 0) 1493 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1494 1495 ret = eth_dev_validate_mtu(port_id, &dev_info, 1496 dev->data->dev_conf.rxmode.mtu); 1497 if (ret != 0) 1498 goto rollback; 1499 1500 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1501 1502 /* 1503 * If LRO is enabled, check that the maximum aggregated packet 1504 * size is supported by the configured device. 1505 */ 1506 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 1507 uint32_t max_rx_pktlen; 1508 uint32_t overhead_len; 1509 1510 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1511 dev_info.max_mtu); 1512 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1513 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1514 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1515 ret = eth_dev_check_lro_pkt_size(port_id, 1516 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1517 max_rx_pktlen, 1518 dev_info.max_lro_pkt_size); 1519 if (ret != 0) 1520 goto rollback; 1521 } 1522 1523 /* Any requested offloading must be within its device capabilities */ 1524 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1525 dev_conf->rxmode.offloads) { 1526 RTE_ETHDEV_LOG(ERR, 1527 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1528 "capabilities 0x%"PRIx64" in %s()\n", 1529 port_id, dev_conf->rxmode.offloads, 1530 dev_info.rx_offload_capa, 1531 __func__); 1532 ret = -EINVAL; 1533 goto rollback; 1534 } 1535 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1536 dev_conf->txmode.offloads) { 1537 RTE_ETHDEV_LOG(ERR, 1538 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1539 "capabilities 0x%"PRIx64" in %s()\n", 1540 port_id, dev_conf->txmode.offloads, 1541 dev_info.tx_offload_capa, 1542 __func__); 1543 ret = -EINVAL; 1544 goto rollback; 1545 } 1546 1547 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1548 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1549 1550 /* Check that device supports requested rss hash functions. */ 1551 if ((dev_info.flow_type_rss_offloads | 1552 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1553 dev_info.flow_type_rss_offloads) { 1554 RTE_ETHDEV_LOG(ERR, 1555 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1556 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1557 dev_info.flow_type_rss_offloads); 1558 ret = -EINVAL; 1559 goto rollback; 1560 } 1561 1562 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1563 if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) && 1564 (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) { 1565 RTE_ETHDEV_LOG(ERR, 1566 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1567 port_id, 1568 rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH)); 1569 ret = -EINVAL; 1570 goto rollback; 1571 } 1572 1573 /* 1574 * Setup new number of Rx/Tx queues and reconfigure device. 1575 */ 1576 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1577 if (diag != 0) { 1578 RTE_ETHDEV_LOG(ERR, 1579 "Port%u eth_dev_rx_queue_config = %d\n", 1580 port_id, diag); 1581 ret = diag; 1582 goto rollback; 1583 } 1584 1585 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1586 if (diag != 0) { 1587 RTE_ETHDEV_LOG(ERR, 1588 "Port%u eth_dev_tx_queue_config = %d\n", 1589 port_id, diag); 1590 eth_dev_rx_queue_config(dev, 0); 1591 ret = diag; 1592 goto rollback; 1593 } 1594 1595 diag = (*dev->dev_ops->dev_configure)(dev); 1596 if (diag != 0) { 1597 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1598 port_id, diag); 1599 ret = eth_err(port_id, diag); 1600 goto reset_queues; 1601 } 1602 1603 /* Initialize Rx profiling if enabled at compilation time. */ 1604 diag = __rte_eth_dev_profile_init(port_id, dev); 1605 if (diag != 0) { 1606 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1607 port_id, diag); 1608 ret = eth_err(port_id, diag); 1609 goto reset_queues; 1610 } 1611 1612 /* Validate Rx offloads. */ 1613 diag = eth_dev_validate_offloads(port_id, 1614 dev_conf->rxmode.offloads, 1615 dev->data->dev_conf.rxmode.offloads, "Rx", 1616 rte_eth_dev_rx_offload_name); 1617 if (diag != 0) { 1618 ret = diag; 1619 goto reset_queues; 1620 } 1621 1622 /* Validate Tx offloads. */ 1623 diag = eth_dev_validate_offloads(port_id, 1624 dev_conf->txmode.offloads, 1625 dev->data->dev_conf.txmode.offloads, "Tx", 1626 rte_eth_dev_tx_offload_name); 1627 if (diag != 0) { 1628 ret = diag; 1629 goto reset_queues; 1630 } 1631 1632 dev->data->dev_configured = 1; 1633 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1634 return 0; 1635 reset_queues: 1636 eth_dev_rx_queue_config(dev, 0); 1637 eth_dev_tx_queue_config(dev, 0); 1638 rollback: 1639 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1640 if (old_mtu != dev->data->mtu) 1641 dev->data->mtu = old_mtu; 1642 1643 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1644 return ret; 1645 } 1646 1647 void 1648 rte_eth_dev_internal_reset(struct rte_eth_dev *dev) 1649 { 1650 if (dev->data->dev_started) { 1651 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n", 1652 dev->data->port_id); 1653 return; 1654 } 1655 1656 eth_dev_rx_queue_config(dev, 0); 1657 eth_dev_tx_queue_config(dev, 0); 1658 1659 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); 1660 } 1661 1662 static void 1663 eth_dev_mac_restore(struct rte_eth_dev *dev, 1664 struct rte_eth_dev_info *dev_info) 1665 { 1666 struct rte_ether_addr *addr; 1667 uint16_t i; 1668 uint32_t pool = 0; 1669 uint64_t pool_mask; 1670 1671 /* replay MAC address configuration including default MAC */ 1672 addr = &dev->data->mac_addrs[0]; 1673 if (*dev->dev_ops->mac_addr_set != NULL) 1674 (*dev->dev_ops->mac_addr_set)(dev, addr); 1675 else if (*dev->dev_ops->mac_addr_add != NULL) 1676 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1677 1678 if (*dev->dev_ops->mac_addr_add != NULL) { 1679 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1680 addr = &dev->data->mac_addrs[i]; 1681 1682 /* skip zero address */ 1683 if (rte_is_zero_ether_addr(addr)) 1684 continue; 1685 1686 pool = 0; 1687 pool_mask = dev->data->mac_pool_sel[i]; 1688 1689 do { 1690 if (pool_mask & UINT64_C(1)) 1691 (*dev->dev_ops->mac_addr_add)(dev, 1692 addr, i, pool); 1693 pool_mask >>= 1; 1694 pool++; 1695 } while (pool_mask); 1696 } 1697 } 1698 } 1699 1700 static int 1701 eth_dev_config_restore(struct rte_eth_dev *dev, 1702 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1703 { 1704 int ret; 1705 1706 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1707 eth_dev_mac_restore(dev, dev_info); 1708 1709 /* replay promiscuous configuration */ 1710 /* 1711 * use callbacks directly since we don't need port_id check and 1712 * would like to bypass the same value set 1713 */ 1714 if (rte_eth_promiscuous_get(port_id) == 1 && 1715 *dev->dev_ops->promiscuous_enable != NULL) { 1716 ret = eth_err(port_id, 1717 (*dev->dev_ops->promiscuous_enable)(dev)); 1718 if (ret != 0 && ret != -ENOTSUP) { 1719 RTE_ETHDEV_LOG(ERR, 1720 "Failed to enable promiscuous mode for device (port %u): %s\n", 1721 port_id, rte_strerror(-ret)); 1722 return ret; 1723 } 1724 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1725 *dev->dev_ops->promiscuous_disable != NULL) { 1726 ret = eth_err(port_id, 1727 (*dev->dev_ops->promiscuous_disable)(dev)); 1728 if (ret != 0 && ret != -ENOTSUP) { 1729 RTE_ETHDEV_LOG(ERR, 1730 "Failed to disable promiscuous mode for device (port %u): %s\n", 1731 port_id, rte_strerror(-ret)); 1732 return ret; 1733 } 1734 } 1735 1736 /* replay all multicast configuration */ 1737 /* 1738 * use callbacks directly since we don't need port_id check and 1739 * would like to bypass the same value set 1740 */ 1741 if (rte_eth_allmulticast_get(port_id) == 1 && 1742 *dev->dev_ops->allmulticast_enable != NULL) { 1743 ret = eth_err(port_id, 1744 (*dev->dev_ops->allmulticast_enable)(dev)); 1745 if (ret != 0 && ret != -ENOTSUP) { 1746 RTE_ETHDEV_LOG(ERR, 1747 "Failed to enable allmulticast mode for device (port %u): %s\n", 1748 port_id, rte_strerror(-ret)); 1749 return ret; 1750 } 1751 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1752 *dev->dev_ops->allmulticast_disable != NULL) { 1753 ret = eth_err(port_id, 1754 (*dev->dev_ops->allmulticast_disable)(dev)); 1755 if (ret != 0 && ret != -ENOTSUP) { 1756 RTE_ETHDEV_LOG(ERR, 1757 "Failed to disable allmulticast mode for device (port %u): %s\n", 1758 port_id, rte_strerror(-ret)); 1759 return ret; 1760 } 1761 } 1762 1763 return 0; 1764 } 1765 1766 int 1767 rte_eth_dev_start(uint16_t port_id) 1768 { 1769 struct rte_eth_dev *dev; 1770 struct rte_eth_dev_info dev_info; 1771 int diag; 1772 int ret, ret_stop; 1773 1774 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1775 dev = &rte_eth_devices[port_id]; 1776 1777 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1778 1779 if (dev->data->dev_configured == 0) { 1780 RTE_ETHDEV_LOG(INFO, 1781 "Device with port_id=%"PRIu16" is not configured.\n", 1782 port_id); 1783 return -EINVAL; 1784 } 1785 1786 if (dev->data->dev_started != 0) { 1787 RTE_ETHDEV_LOG(INFO, 1788 "Device with port_id=%"PRIu16" already started\n", 1789 port_id); 1790 return 0; 1791 } 1792 1793 ret = rte_eth_dev_info_get(port_id, &dev_info); 1794 if (ret != 0) 1795 return ret; 1796 1797 /* Lets restore MAC now if device does not support live change */ 1798 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1799 eth_dev_mac_restore(dev, &dev_info); 1800 1801 diag = (*dev->dev_ops->dev_start)(dev); 1802 if (diag == 0) 1803 dev->data->dev_started = 1; 1804 else 1805 return eth_err(port_id, diag); 1806 1807 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1808 if (ret != 0) { 1809 RTE_ETHDEV_LOG(ERR, 1810 "Error during restoring configuration for device (port %u): %s\n", 1811 port_id, rte_strerror(-ret)); 1812 ret_stop = rte_eth_dev_stop(port_id); 1813 if (ret_stop != 0) { 1814 RTE_ETHDEV_LOG(ERR, 1815 "Failed to stop device (port %u): %s\n", 1816 port_id, rte_strerror(-ret_stop)); 1817 } 1818 1819 return ret; 1820 } 1821 1822 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1823 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1824 (*dev->dev_ops->link_update)(dev, 0); 1825 } 1826 1827 /* expose selection of PMD fast-path functions */ 1828 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1829 1830 rte_ethdev_trace_start(port_id); 1831 return 0; 1832 } 1833 1834 int 1835 rte_eth_dev_stop(uint16_t port_id) 1836 { 1837 struct rte_eth_dev *dev; 1838 int ret; 1839 1840 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1841 dev = &rte_eth_devices[port_id]; 1842 1843 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP); 1844 1845 if (dev->data->dev_started == 0) { 1846 RTE_ETHDEV_LOG(INFO, 1847 "Device with port_id=%"PRIu16" already stopped\n", 1848 port_id); 1849 return 0; 1850 } 1851 1852 /* point fast-path functions to dummy ones */ 1853 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1854 1855 dev->data->dev_started = 0; 1856 ret = (*dev->dev_ops->dev_stop)(dev); 1857 rte_ethdev_trace_stop(port_id, ret); 1858 1859 return ret; 1860 } 1861 1862 int 1863 rte_eth_dev_set_link_up(uint16_t port_id) 1864 { 1865 struct rte_eth_dev *dev; 1866 1867 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1868 dev = &rte_eth_devices[port_id]; 1869 1870 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1871 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1872 } 1873 1874 int 1875 rte_eth_dev_set_link_down(uint16_t port_id) 1876 { 1877 struct rte_eth_dev *dev; 1878 1879 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1880 dev = &rte_eth_devices[port_id]; 1881 1882 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1883 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1884 } 1885 1886 int 1887 rte_eth_dev_close(uint16_t port_id) 1888 { 1889 struct rte_eth_dev *dev; 1890 int firsterr, binerr; 1891 int *lasterr = &firsterr; 1892 1893 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1894 dev = &rte_eth_devices[port_id]; 1895 1896 if (dev->data->dev_started) { 1897 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1898 port_id); 1899 return -EINVAL; 1900 } 1901 1902 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1903 *lasterr = (*dev->dev_ops->dev_close)(dev); 1904 if (*lasterr != 0) 1905 lasterr = &binerr; 1906 1907 rte_ethdev_trace_close(port_id); 1908 *lasterr = rte_eth_dev_release_port(dev); 1909 1910 return firsterr; 1911 } 1912 1913 int 1914 rte_eth_dev_reset(uint16_t port_id) 1915 { 1916 struct rte_eth_dev *dev; 1917 int ret; 1918 1919 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1920 dev = &rte_eth_devices[port_id]; 1921 1922 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1923 1924 ret = rte_eth_dev_stop(port_id); 1925 if (ret != 0) { 1926 RTE_ETHDEV_LOG(ERR, 1927 "Failed to stop device (port %u) before reset: %s - ignore\n", 1928 port_id, rte_strerror(-ret)); 1929 } 1930 ret = dev->dev_ops->dev_reset(dev); 1931 1932 return eth_err(port_id, ret); 1933 } 1934 1935 int 1936 rte_eth_dev_is_removed(uint16_t port_id) 1937 { 1938 struct rte_eth_dev *dev; 1939 int ret; 1940 1941 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1942 dev = &rte_eth_devices[port_id]; 1943 1944 if (dev->state == RTE_ETH_DEV_REMOVED) 1945 return 1; 1946 1947 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1948 1949 ret = dev->dev_ops->is_removed(dev); 1950 if (ret != 0) 1951 /* Device is physically removed. */ 1952 dev->state = RTE_ETH_DEV_REMOVED; 1953 1954 return ret; 1955 } 1956 1957 static int 1958 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, 1959 uint16_t n_seg, uint32_t *mbp_buf_size, 1960 const struct rte_eth_dev_info *dev_info) 1961 { 1962 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1963 struct rte_mempool *mp_first; 1964 uint32_t offset_mask; 1965 uint16_t seg_idx; 1966 1967 if (n_seg > seg_capa->max_nseg) { 1968 RTE_ETHDEV_LOG(ERR, 1969 "Requested Rx segments %u exceed supported %u\n", 1970 n_seg, seg_capa->max_nseg); 1971 return -EINVAL; 1972 } 1973 /* 1974 * Check the sizes and offsets against buffer sizes 1975 * for each segment specified in extended configuration. 1976 */ 1977 mp_first = rx_seg[0].mp; 1978 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 1979 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1980 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1981 uint32_t length = rx_seg[seg_idx].length; 1982 uint32_t offset = rx_seg[seg_idx].offset; 1983 1984 if (mpl == NULL) { 1985 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1986 return -EINVAL; 1987 } 1988 if (seg_idx != 0 && mp_first != mpl && 1989 seg_capa->multi_pools == 0) { 1990 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1991 return -ENOTSUP; 1992 } 1993 if (offset != 0) { 1994 if (seg_capa->offset_allowed == 0) { 1995 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1996 return -ENOTSUP; 1997 } 1998 if (offset & offset_mask) { 1999 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 2000 offset, 2001 seg_capa->offset_align_log2); 2002 return -EINVAL; 2003 } 2004 } 2005 if (mpl->private_data_size < 2006 sizeof(struct rte_pktmbuf_pool_private)) { 2007 RTE_ETHDEV_LOG(ERR, 2008 "%s private_data_size %u < %u\n", 2009 mpl->name, mpl->private_data_size, 2010 (unsigned int)sizeof 2011 (struct rte_pktmbuf_pool_private)); 2012 return -ENOSPC; 2013 } 2014 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 2015 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 2016 length = length != 0 ? length : *mbp_buf_size; 2017 if (*mbp_buf_size < length + offset) { 2018 RTE_ETHDEV_LOG(ERR, 2019 "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n", 2020 mpl->name, *mbp_buf_size, 2021 length + offset, length, offset); 2022 return -EINVAL; 2023 } 2024 } 2025 return 0; 2026 } 2027 2028 int 2029 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2030 uint16_t nb_rx_desc, unsigned int socket_id, 2031 const struct rte_eth_rxconf *rx_conf, 2032 struct rte_mempool *mp) 2033 { 2034 int ret; 2035 uint32_t mbp_buf_size; 2036 struct rte_eth_dev *dev; 2037 struct rte_eth_dev_info dev_info; 2038 struct rte_eth_rxconf local_conf; 2039 2040 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2041 dev = &rte_eth_devices[port_id]; 2042 2043 if (rx_queue_id >= dev->data->nb_rx_queues) { 2044 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2045 return -EINVAL; 2046 } 2047 2048 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 2049 2050 ret = rte_eth_dev_info_get(port_id, &dev_info); 2051 if (ret != 0) 2052 return ret; 2053 2054 if (mp != NULL) { 2055 /* Single pool configuration check. */ 2056 if (rx_conf != NULL && rx_conf->rx_nseg != 0) { 2057 RTE_ETHDEV_LOG(ERR, 2058 "Ambiguous segment configuration\n"); 2059 return -EINVAL; 2060 } 2061 /* 2062 * Check the size of the mbuf data buffer, this value 2063 * must be provided in the private data of the memory pool. 2064 * First check that the memory pool(s) has a valid private data. 2065 */ 2066 if (mp->private_data_size < 2067 sizeof(struct rte_pktmbuf_pool_private)) { 2068 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 2069 mp->name, mp->private_data_size, 2070 (unsigned int) 2071 sizeof(struct rte_pktmbuf_pool_private)); 2072 return -ENOSPC; 2073 } 2074 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2075 if (mbp_buf_size < dev_info.min_rx_bufsize + 2076 RTE_PKTMBUF_HEADROOM) { 2077 RTE_ETHDEV_LOG(ERR, 2078 "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", 2079 mp->name, mbp_buf_size, 2080 RTE_PKTMBUF_HEADROOM + 2081 dev_info.min_rx_bufsize, 2082 RTE_PKTMBUF_HEADROOM, 2083 dev_info.min_rx_bufsize); 2084 return -EINVAL; 2085 } 2086 } else { 2087 const struct rte_eth_rxseg_split *rx_seg; 2088 uint16_t n_seg; 2089 2090 /* Extended multi-segment configuration check. */ 2091 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { 2092 RTE_ETHDEV_LOG(ERR, 2093 "Memory pool is null and no extended configuration provided\n"); 2094 return -EINVAL; 2095 } 2096 2097 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2098 n_seg = rx_conf->rx_nseg; 2099 2100 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2101 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, 2102 &mbp_buf_size, 2103 &dev_info); 2104 if (ret != 0) 2105 return ret; 2106 } else { 2107 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2108 return -EINVAL; 2109 } 2110 } 2111 2112 /* Use default specified by driver, if nb_rx_desc is zero */ 2113 if (nb_rx_desc == 0) { 2114 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2115 /* If driver default is also zero, fall back on EAL default */ 2116 if (nb_rx_desc == 0) 2117 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2118 } 2119 2120 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2121 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2122 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2123 2124 RTE_ETHDEV_LOG(ERR, 2125 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2126 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2127 dev_info.rx_desc_lim.nb_min, 2128 dev_info.rx_desc_lim.nb_align); 2129 return -EINVAL; 2130 } 2131 2132 if (dev->data->dev_started && 2133 !(dev_info.dev_capa & 2134 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2135 return -EBUSY; 2136 2137 if (dev->data->dev_started && 2138 (dev->data->rx_queue_state[rx_queue_id] != 2139 RTE_ETH_QUEUE_STATE_STOPPED)) 2140 return -EBUSY; 2141 2142 eth_dev_rxq_release(dev, rx_queue_id); 2143 2144 if (rx_conf == NULL) 2145 rx_conf = &dev_info.default_rxconf; 2146 2147 local_conf = *rx_conf; 2148 2149 /* 2150 * If an offloading has already been enabled in 2151 * rte_eth_dev_configure(), it has been enabled on all queues, 2152 * so there is no need to enable it in this queue again. 2153 * The local_conf.offloads input to underlying PMD only carries 2154 * those offloadings which are only enabled on this queue and 2155 * not enabled on all queues. 2156 */ 2157 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2158 2159 /* 2160 * New added offloadings for this queue are those not enabled in 2161 * rte_eth_dev_configure() and they must be per-queue type. 2162 * A pure per-port offloading can't be enabled on a queue while 2163 * disabled on another queue. A pure per-port offloading can't 2164 * be enabled for any queue as new added one if it hasn't been 2165 * enabled in rte_eth_dev_configure(). 2166 */ 2167 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2168 local_conf.offloads) { 2169 RTE_ETHDEV_LOG(ERR, 2170 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2171 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2172 port_id, rx_queue_id, local_conf.offloads, 2173 dev_info.rx_queue_offload_capa, 2174 __func__); 2175 return -EINVAL; 2176 } 2177 2178 if (local_conf.share_group > 0 && 2179 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 2180 RTE_ETHDEV_LOG(ERR, 2181 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 2182 port_id, rx_queue_id, local_conf.share_group); 2183 return -EINVAL; 2184 } 2185 2186 /* 2187 * If LRO is enabled, check that the maximum aggregated packet 2188 * size is supported by the configured device. 2189 */ 2190 /* Get the real Ethernet overhead length */ 2191 if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 2192 uint32_t overhead_len; 2193 uint32_t max_rx_pktlen; 2194 int ret; 2195 2196 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2197 dev_info.max_mtu); 2198 max_rx_pktlen = dev->data->mtu + overhead_len; 2199 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2200 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2201 ret = eth_dev_check_lro_pkt_size(port_id, 2202 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2203 max_rx_pktlen, 2204 dev_info.max_lro_pkt_size); 2205 if (ret != 0) 2206 return ret; 2207 } 2208 2209 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2210 socket_id, &local_conf, mp); 2211 if (!ret) { 2212 if (!dev->data->min_rx_buf_size || 2213 dev->data->min_rx_buf_size > mbp_buf_size) 2214 dev->data->min_rx_buf_size = mbp_buf_size; 2215 } 2216 2217 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2218 rx_conf, ret); 2219 return eth_err(port_id, ret); 2220 } 2221 2222 int 2223 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2224 uint16_t nb_rx_desc, 2225 const struct rte_eth_hairpin_conf *conf) 2226 { 2227 int ret; 2228 struct rte_eth_dev *dev; 2229 struct rte_eth_hairpin_cap cap; 2230 int i; 2231 int count; 2232 2233 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2234 dev = &rte_eth_devices[port_id]; 2235 2236 if (rx_queue_id >= dev->data->nb_rx_queues) { 2237 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2238 return -EINVAL; 2239 } 2240 2241 if (conf == NULL) { 2242 RTE_ETHDEV_LOG(ERR, 2243 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2244 port_id); 2245 return -EINVAL; 2246 } 2247 2248 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2249 if (ret != 0) 2250 return ret; 2251 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, 2252 -ENOTSUP); 2253 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2254 if (nb_rx_desc == 0) 2255 nb_rx_desc = cap.max_nb_desc; 2256 if (nb_rx_desc > cap.max_nb_desc) { 2257 RTE_ETHDEV_LOG(ERR, 2258 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2259 nb_rx_desc, cap.max_nb_desc); 2260 return -EINVAL; 2261 } 2262 if (conf->peer_count > cap.max_rx_2_tx) { 2263 RTE_ETHDEV_LOG(ERR, 2264 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2265 conf->peer_count, cap.max_rx_2_tx); 2266 return -EINVAL; 2267 } 2268 if (conf->peer_count == 0) { 2269 RTE_ETHDEV_LOG(ERR, 2270 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2271 conf->peer_count); 2272 return -EINVAL; 2273 } 2274 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2275 cap.max_nb_queues != UINT16_MAX; i++) { 2276 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2277 count++; 2278 } 2279 if (count > cap.max_nb_queues) { 2280 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2281 cap.max_nb_queues); 2282 return -EINVAL; 2283 } 2284 if (dev->data->dev_started) 2285 return -EBUSY; 2286 eth_dev_rxq_release(dev, rx_queue_id); 2287 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2288 nb_rx_desc, conf); 2289 if (ret == 0) 2290 dev->data->rx_queue_state[rx_queue_id] = 2291 RTE_ETH_QUEUE_STATE_HAIRPIN; 2292 return eth_err(port_id, ret); 2293 } 2294 2295 int 2296 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2297 uint16_t nb_tx_desc, unsigned int socket_id, 2298 const struct rte_eth_txconf *tx_conf) 2299 { 2300 struct rte_eth_dev *dev; 2301 struct rte_eth_dev_info dev_info; 2302 struct rte_eth_txconf local_conf; 2303 int ret; 2304 2305 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2306 dev = &rte_eth_devices[port_id]; 2307 2308 if (tx_queue_id >= dev->data->nb_tx_queues) { 2309 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2310 return -EINVAL; 2311 } 2312 2313 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 2314 2315 ret = rte_eth_dev_info_get(port_id, &dev_info); 2316 if (ret != 0) 2317 return ret; 2318 2319 /* Use default specified by driver, if nb_tx_desc is zero */ 2320 if (nb_tx_desc == 0) { 2321 nb_tx_desc = dev_info.default_txportconf.ring_size; 2322 /* If driver default is zero, fall back on EAL default */ 2323 if (nb_tx_desc == 0) 2324 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2325 } 2326 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2327 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2328 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2329 RTE_ETHDEV_LOG(ERR, 2330 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2331 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2332 dev_info.tx_desc_lim.nb_min, 2333 dev_info.tx_desc_lim.nb_align); 2334 return -EINVAL; 2335 } 2336 2337 if (dev->data->dev_started && 2338 !(dev_info.dev_capa & 2339 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2340 return -EBUSY; 2341 2342 if (dev->data->dev_started && 2343 (dev->data->tx_queue_state[tx_queue_id] != 2344 RTE_ETH_QUEUE_STATE_STOPPED)) 2345 return -EBUSY; 2346 2347 eth_dev_txq_release(dev, tx_queue_id); 2348 2349 if (tx_conf == NULL) 2350 tx_conf = &dev_info.default_txconf; 2351 2352 local_conf = *tx_conf; 2353 2354 /* 2355 * If an offloading has already been enabled in 2356 * rte_eth_dev_configure(), it has been enabled on all queues, 2357 * so there is no need to enable it in this queue again. 2358 * The local_conf.offloads input to underlying PMD only carries 2359 * those offloadings which are only enabled on this queue and 2360 * not enabled on all queues. 2361 */ 2362 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2363 2364 /* 2365 * New added offloadings for this queue are those not enabled in 2366 * rte_eth_dev_configure() and they must be per-queue type. 2367 * A pure per-port offloading can't be enabled on a queue while 2368 * disabled on another queue. A pure per-port offloading can't 2369 * be enabled for any queue as new added one if it hasn't been 2370 * enabled in rte_eth_dev_configure(). 2371 */ 2372 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2373 local_conf.offloads) { 2374 RTE_ETHDEV_LOG(ERR, 2375 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2376 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2377 port_id, tx_queue_id, local_conf.offloads, 2378 dev_info.tx_queue_offload_capa, 2379 __func__); 2380 return -EINVAL; 2381 } 2382 2383 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2384 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2385 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2386 } 2387 2388 int 2389 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2390 uint16_t nb_tx_desc, 2391 const struct rte_eth_hairpin_conf *conf) 2392 { 2393 struct rte_eth_dev *dev; 2394 struct rte_eth_hairpin_cap cap; 2395 int i; 2396 int count; 2397 int ret; 2398 2399 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2400 dev = &rte_eth_devices[port_id]; 2401 2402 if (tx_queue_id >= dev->data->nb_tx_queues) { 2403 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2404 return -EINVAL; 2405 } 2406 2407 if (conf == NULL) { 2408 RTE_ETHDEV_LOG(ERR, 2409 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2410 port_id); 2411 return -EINVAL; 2412 } 2413 2414 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2415 if (ret != 0) 2416 return ret; 2417 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, 2418 -ENOTSUP); 2419 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2420 if (nb_tx_desc == 0) 2421 nb_tx_desc = cap.max_nb_desc; 2422 if (nb_tx_desc > cap.max_nb_desc) { 2423 RTE_ETHDEV_LOG(ERR, 2424 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2425 nb_tx_desc, cap.max_nb_desc); 2426 return -EINVAL; 2427 } 2428 if (conf->peer_count > cap.max_tx_2_rx) { 2429 RTE_ETHDEV_LOG(ERR, 2430 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2431 conf->peer_count, cap.max_tx_2_rx); 2432 return -EINVAL; 2433 } 2434 if (conf->peer_count == 0) { 2435 RTE_ETHDEV_LOG(ERR, 2436 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2437 conf->peer_count); 2438 return -EINVAL; 2439 } 2440 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2441 cap.max_nb_queues != UINT16_MAX; i++) { 2442 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2443 count++; 2444 } 2445 if (count > cap.max_nb_queues) { 2446 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2447 cap.max_nb_queues); 2448 return -EINVAL; 2449 } 2450 if (dev->data->dev_started) 2451 return -EBUSY; 2452 eth_dev_txq_release(dev, tx_queue_id); 2453 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2454 (dev, tx_queue_id, nb_tx_desc, conf); 2455 if (ret == 0) 2456 dev->data->tx_queue_state[tx_queue_id] = 2457 RTE_ETH_QUEUE_STATE_HAIRPIN; 2458 return eth_err(port_id, ret); 2459 } 2460 2461 int 2462 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2463 { 2464 struct rte_eth_dev *dev; 2465 int ret; 2466 2467 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2468 dev = &rte_eth_devices[tx_port]; 2469 2470 if (dev->data->dev_started == 0) { 2471 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2472 return -EBUSY; 2473 } 2474 2475 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP); 2476 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2477 if (ret != 0) 2478 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2479 " to Rx %d (%d - all ports)\n", 2480 tx_port, rx_port, RTE_MAX_ETHPORTS); 2481 2482 return ret; 2483 } 2484 2485 int 2486 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2487 { 2488 struct rte_eth_dev *dev; 2489 int ret; 2490 2491 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2492 dev = &rte_eth_devices[tx_port]; 2493 2494 if (dev->data->dev_started == 0) { 2495 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2496 return -EBUSY; 2497 } 2498 2499 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP); 2500 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2501 if (ret != 0) 2502 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2503 " from Rx %d (%d - all ports)\n", 2504 tx_port, rx_port, RTE_MAX_ETHPORTS); 2505 2506 return ret; 2507 } 2508 2509 int 2510 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2511 size_t len, uint32_t direction) 2512 { 2513 struct rte_eth_dev *dev; 2514 int ret; 2515 2516 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2517 dev = &rte_eth_devices[port_id]; 2518 2519 if (peer_ports == NULL) { 2520 RTE_ETHDEV_LOG(ERR, 2521 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2522 port_id); 2523 return -EINVAL; 2524 } 2525 2526 if (len == 0) { 2527 RTE_ETHDEV_LOG(ERR, 2528 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2529 port_id); 2530 return -EINVAL; 2531 } 2532 2533 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports, 2534 -ENOTSUP); 2535 2536 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2537 len, direction); 2538 if (ret < 0) 2539 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2540 port_id, direction ? "Rx" : "Tx"); 2541 2542 return ret; 2543 } 2544 2545 void 2546 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2547 void *userdata __rte_unused) 2548 { 2549 rte_pktmbuf_free_bulk(pkts, unsent); 2550 } 2551 2552 void 2553 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2554 void *userdata) 2555 { 2556 uint64_t *count = userdata; 2557 2558 rte_pktmbuf_free_bulk(pkts, unsent); 2559 *count += unsent; 2560 } 2561 2562 int 2563 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2564 buffer_tx_error_fn cbfn, void *userdata) 2565 { 2566 if (buffer == NULL) { 2567 RTE_ETHDEV_LOG(ERR, 2568 "Cannot set Tx buffer error callback to NULL buffer\n"); 2569 return -EINVAL; 2570 } 2571 2572 buffer->error_callback = cbfn; 2573 buffer->error_userdata = userdata; 2574 return 0; 2575 } 2576 2577 int 2578 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2579 { 2580 int ret = 0; 2581 2582 if (buffer == NULL) { 2583 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2584 return -EINVAL; 2585 } 2586 2587 buffer->size = size; 2588 if (buffer->error_callback == NULL) { 2589 ret = rte_eth_tx_buffer_set_err_callback( 2590 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2591 } 2592 2593 return ret; 2594 } 2595 2596 int 2597 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2598 { 2599 struct rte_eth_dev *dev; 2600 int ret; 2601 2602 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2603 dev = &rte_eth_devices[port_id]; 2604 2605 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 2606 2607 /* Call driver to free pending mbufs. */ 2608 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2609 free_cnt); 2610 return eth_err(port_id, ret); 2611 } 2612 2613 int 2614 rte_eth_promiscuous_enable(uint16_t port_id) 2615 { 2616 struct rte_eth_dev *dev; 2617 int diag = 0; 2618 2619 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2620 dev = &rte_eth_devices[port_id]; 2621 2622 if (dev->data->promiscuous == 1) 2623 return 0; 2624 2625 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); 2626 2627 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2628 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2629 2630 return eth_err(port_id, diag); 2631 } 2632 2633 int 2634 rte_eth_promiscuous_disable(uint16_t port_id) 2635 { 2636 struct rte_eth_dev *dev; 2637 int diag = 0; 2638 2639 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2640 dev = &rte_eth_devices[port_id]; 2641 2642 if (dev->data->promiscuous == 0) 2643 return 0; 2644 2645 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); 2646 2647 dev->data->promiscuous = 0; 2648 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2649 if (diag != 0) 2650 dev->data->promiscuous = 1; 2651 2652 return eth_err(port_id, diag); 2653 } 2654 2655 int 2656 rte_eth_promiscuous_get(uint16_t port_id) 2657 { 2658 struct rte_eth_dev *dev; 2659 2660 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2661 dev = &rte_eth_devices[port_id]; 2662 2663 return dev->data->promiscuous; 2664 } 2665 2666 int 2667 rte_eth_allmulticast_enable(uint16_t port_id) 2668 { 2669 struct rte_eth_dev *dev; 2670 int diag; 2671 2672 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2673 dev = &rte_eth_devices[port_id]; 2674 2675 if (dev->data->all_multicast == 1) 2676 return 0; 2677 2678 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); 2679 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2680 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2681 2682 return eth_err(port_id, diag); 2683 } 2684 2685 int 2686 rte_eth_allmulticast_disable(uint16_t port_id) 2687 { 2688 struct rte_eth_dev *dev; 2689 int diag; 2690 2691 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2692 dev = &rte_eth_devices[port_id]; 2693 2694 if (dev->data->all_multicast == 0) 2695 return 0; 2696 2697 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); 2698 dev->data->all_multicast = 0; 2699 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2700 if (diag != 0) 2701 dev->data->all_multicast = 1; 2702 2703 return eth_err(port_id, diag); 2704 } 2705 2706 int 2707 rte_eth_allmulticast_get(uint16_t port_id) 2708 { 2709 struct rte_eth_dev *dev; 2710 2711 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2712 dev = &rte_eth_devices[port_id]; 2713 2714 return dev->data->all_multicast; 2715 } 2716 2717 int 2718 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2719 { 2720 struct rte_eth_dev *dev; 2721 2722 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2723 dev = &rte_eth_devices[port_id]; 2724 2725 if (eth_link == NULL) { 2726 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2727 port_id); 2728 return -EINVAL; 2729 } 2730 2731 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2732 rte_eth_linkstatus_get(dev, eth_link); 2733 else { 2734 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2735 (*dev->dev_ops->link_update)(dev, 1); 2736 *eth_link = dev->data->dev_link; 2737 } 2738 2739 return 0; 2740 } 2741 2742 int 2743 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2744 { 2745 struct rte_eth_dev *dev; 2746 2747 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2748 dev = &rte_eth_devices[port_id]; 2749 2750 if (eth_link == NULL) { 2751 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2752 port_id); 2753 return -EINVAL; 2754 } 2755 2756 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2757 rte_eth_linkstatus_get(dev, eth_link); 2758 else { 2759 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2760 (*dev->dev_ops->link_update)(dev, 0); 2761 *eth_link = dev->data->dev_link; 2762 } 2763 2764 return 0; 2765 } 2766 2767 const char * 2768 rte_eth_link_speed_to_str(uint32_t link_speed) 2769 { 2770 switch (link_speed) { 2771 case ETH_SPEED_NUM_NONE: return "None"; 2772 case ETH_SPEED_NUM_10M: return "10 Mbps"; 2773 case ETH_SPEED_NUM_100M: return "100 Mbps"; 2774 case ETH_SPEED_NUM_1G: return "1 Gbps"; 2775 case ETH_SPEED_NUM_2_5G: return "2.5 Gbps"; 2776 case ETH_SPEED_NUM_5G: return "5 Gbps"; 2777 case ETH_SPEED_NUM_10G: return "10 Gbps"; 2778 case ETH_SPEED_NUM_20G: return "20 Gbps"; 2779 case ETH_SPEED_NUM_25G: return "25 Gbps"; 2780 case ETH_SPEED_NUM_40G: return "40 Gbps"; 2781 case ETH_SPEED_NUM_50G: return "50 Gbps"; 2782 case ETH_SPEED_NUM_56G: return "56 Gbps"; 2783 case ETH_SPEED_NUM_100G: return "100 Gbps"; 2784 case ETH_SPEED_NUM_200G: return "200 Gbps"; 2785 case ETH_SPEED_NUM_UNKNOWN: return "Unknown"; 2786 default: return "Invalid"; 2787 } 2788 } 2789 2790 int 2791 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2792 { 2793 if (str == NULL) { 2794 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2795 return -EINVAL; 2796 } 2797 2798 if (len == 0) { 2799 RTE_ETHDEV_LOG(ERR, 2800 "Cannot convert link to string with zero size\n"); 2801 return -EINVAL; 2802 } 2803 2804 if (eth_link == NULL) { 2805 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2806 return -EINVAL; 2807 } 2808 2809 if (eth_link->link_status == ETH_LINK_DOWN) 2810 return snprintf(str, len, "Link down"); 2811 else 2812 return snprintf(str, len, "Link up at %s %s %s", 2813 rte_eth_link_speed_to_str(eth_link->link_speed), 2814 (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 2815 "FDX" : "HDX", 2816 (eth_link->link_autoneg == ETH_LINK_AUTONEG) ? 2817 "Autoneg" : "Fixed"); 2818 } 2819 2820 int 2821 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2822 { 2823 struct rte_eth_dev *dev; 2824 2825 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2826 dev = &rte_eth_devices[port_id]; 2827 2828 if (stats == NULL) { 2829 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2830 port_id); 2831 return -EINVAL; 2832 } 2833 2834 memset(stats, 0, sizeof(*stats)); 2835 2836 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 2837 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2838 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2839 } 2840 2841 int 2842 rte_eth_stats_reset(uint16_t port_id) 2843 { 2844 struct rte_eth_dev *dev; 2845 int ret; 2846 2847 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2848 dev = &rte_eth_devices[port_id]; 2849 2850 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 2851 ret = (*dev->dev_ops->stats_reset)(dev); 2852 if (ret != 0) 2853 return eth_err(port_id, ret); 2854 2855 dev->data->rx_mbuf_alloc_failed = 0; 2856 2857 return 0; 2858 } 2859 2860 static inline int 2861 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2862 { 2863 uint16_t nb_rxqs, nb_txqs; 2864 int count; 2865 2866 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2867 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2868 2869 count = RTE_NB_STATS; 2870 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2871 count += nb_rxqs * RTE_NB_RXQ_STATS; 2872 count += nb_txqs * RTE_NB_TXQ_STATS; 2873 } 2874 2875 return count; 2876 } 2877 2878 static int 2879 eth_dev_get_xstats_count(uint16_t port_id) 2880 { 2881 struct rte_eth_dev *dev; 2882 int count; 2883 2884 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2885 dev = &rte_eth_devices[port_id]; 2886 if (dev->dev_ops->xstats_get_names != NULL) { 2887 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2888 if (count < 0) 2889 return eth_err(port_id, count); 2890 } else 2891 count = 0; 2892 2893 2894 count += eth_dev_get_xstats_basic_count(dev); 2895 2896 return count; 2897 } 2898 2899 int 2900 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2901 uint64_t *id) 2902 { 2903 int cnt_xstats, idx_xstat; 2904 2905 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2906 2907 if (xstat_name == NULL) { 2908 RTE_ETHDEV_LOG(ERR, 2909 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 2910 port_id); 2911 return -ENOMEM; 2912 } 2913 2914 if (id == NULL) { 2915 RTE_ETHDEV_LOG(ERR, 2916 "Cannot get ethdev port %u xstats ID to NULL\n", 2917 port_id); 2918 return -ENOMEM; 2919 } 2920 2921 /* Get count */ 2922 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2923 if (cnt_xstats < 0) { 2924 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2925 return -ENODEV; 2926 } 2927 2928 /* Get id-name lookup table */ 2929 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2930 2931 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2932 port_id, xstats_names, cnt_xstats, NULL)) { 2933 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2934 return -1; 2935 } 2936 2937 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2938 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2939 *id = idx_xstat; 2940 return 0; 2941 }; 2942 } 2943 2944 return -EINVAL; 2945 } 2946 2947 /* retrieve basic stats names */ 2948 static int 2949 eth_basic_stats_get_names(struct rte_eth_dev *dev, 2950 struct rte_eth_xstat_name *xstats_names) 2951 { 2952 int cnt_used_entries = 0; 2953 uint32_t idx, id_queue; 2954 uint16_t num_q; 2955 2956 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2957 strlcpy(xstats_names[cnt_used_entries].name, 2958 eth_dev_stats_strings[idx].name, 2959 sizeof(xstats_names[0].name)); 2960 cnt_used_entries++; 2961 } 2962 2963 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2964 return cnt_used_entries; 2965 2966 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2967 for (id_queue = 0; id_queue < num_q; id_queue++) { 2968 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2969 snprintf(xstats_names[cnt_used_entries].name, 2970 sizeof(xstats_names[0].name), 2971 "rx_q%u_%s", 2972 id_queue, eth_dev_rxq_stats_strings[idx].name); 2973 cnt_used_entries++; 2974 } 2975 2976 } 2977 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2978 for (id_queue = 0; id_queue < num_q; id_queue++) { 2979 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 2980 snprintf(xstats_names[cnt_used_entries].name, 2981 sizeof(xstats_names[0].name), 2982 "tx_q%u_%s", 2983 id_queue, eth_dev_txq_stats_strings[idx].name); 2984 cnt_used_entries++; 2985 } 2986 } 2987 return cnt_used_entries; 2988 } 2989 2990 /* retrieve ethdev extended statistics names */ 2991 int 2992 rte_eth_xstats_get_names_by_id(uint16_t port_id, 2993 struct rte_eth_xstat_name *xstats_names, unsigned int size, 2994 uint64_t *ids) 2995 { 2996 struct rte_eth_xstat_name *xstats_names_copy; 2997 unsigned int no_basic_stat_requested = 1; 2998 unsigned int no_ext_stat_requested = 1; 2999 unsigned int expected_entries; 3000 unsigned int basic_count; 3001 struct rte_eth_dev *dev; 3002 unsigned int i; 3003 int ret; 3004 3005 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3006 dev = &rte_eth_devices[port_id]; 3007 3008 basic_count = eth_dev_get_xstats_basic_count(dev); 3009 ret = eth_dev_get_xstats_count(port_id); 3010 if (ret < 0) 3011 return ret; 3012 expected_entries = (unsigned int)ret; 3013 3014 /* Return max number of stats if no ids given */ 3015 if (!ids) { 3016 if (!xstats_names) 3017 return expected_entries; 3018 else if (xstats_names && size < expected_entries) 3019 return expected_entries; 3020 } 3021 3022 if (ids && !xstats_names) 3023 return -EINVAL; 3024 3025 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 3026 uint64_t ids_copy[size]; 3027 3028 for (i = 0; i < size; i++) { 3029 if (ids[i] < basic_count) { 3030 no_basic_stat_requested = 0; 3031 break; 3032 } 3033 3034 /* 3035 * Convert ids to xstats ids that PMD knows. 3036 * ids known by user are basic + extended stats. 3037 */ 3038 ids_copy[i] = ids[i] - basic_count; 3039 } 3040 3041 if (no_basic_stat_requested) 3042 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3043 ids_copy, xstats_names, size); 3044 } 3045 3046 /* Retrieve all stats */ 3047 if (!ids) { 3048 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3049 expected_entries); 3050 if (num_stats < 0 || num_stats > (int)expected_entries) 3051 return num_stats; 3052 else 3053 return expected_entries; 3054 } 3055 3056 xstats_names_copy = calloc(expected_entries, 3057 sizeof(struct rte_eth_xstat_name)); 3058 3059 if (!xstats_names_copy) { 3060 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3061 return -ENOMEM; 3062 } 3063 3064 if (ids) { 3065 for (i = 0; i < size; i++) { 3066 if (ids[i] >= basic_count) { 3067 no_ext_stat_requested = 0; 3068 break; 3069 } 3070 } 3071 } 3072 3073 /* Fill xstats_names_copy structure */ 3074 if (ids && no_ext_stat_requested) { 3075 eth_basic_stats_get_names(dev, xstats_names_copy); 3076 } else { 3077 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3078 expected_entries); 3079 if (ret < 0) { 3080 free(xstats_names_copy); 3081 return ret; 3082 } 3083 } 3084 3085 /* Filter stats */ 3086 for (i = 0; i < size; i++) { 3087 if (ids[i] >= expected_entries) { 3088 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3089 free(xstats_names_copy); 3090 return -1; 3091 } 3092 xstats_names[i] = xstats_names_copy[ids[i]]; 3093 } 3094 3095 free(xstats_names_copy); 3096 return size; 3097 } 3098 3099 int 3100 rte_eth_xstats_get_names(uint16_t port_id, 3101 struct rte_eth_xstat_name *xstats_names, 3102 unsigned int size) 3103 { 3104 struct rte_eth_dev *dev; 3105 int cnt_used_entries; 3106 int cnt_expected_entries; 3107 int cnt_driver_entries; 3108 3109 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3110 if (xstats_names == NULL || cnt_expected_entries < 0 || 3111 (int)size < cnt_expected_entries) 3112 return cnt_expected_entries; 3113 3114 /* port_id checked in eth_dev_get_xstats_count() */ 3115 dev = &rte_eth_devices[port_id]; 3116 3117 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3118 3119 if (dev->dev_ops->xstats_get_names != NULL) { 3120 /* If there are any driver-specific xstats, append them 3121 * to end of list. 3122 */ 3123 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3124 dev, 3125 xstats_names + cnt_used_entries, 3126 size - cnt_used_entries); 3127 if (cnt_driver_entries < 0) 3128 return eth_err(port_id, cnt_driver_entries); 3129 cnt_used_entries += cnt_driver_entries; 3130 } 3131 3132 return cnt_used_entries; 3133 } 3134 3135 3136 static int 3137 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3138 { 3139 struct rte_eth_dev *dev; 3140 struct rte_eth_stats eth_stats; 3141 unsigned int count = 0, i, q; 3142 uint64_t val, *stats_ptr; 3143 uint16_t nb_rxqs, nb_txqs; 3144 int ret; 3145 3146 ret = rte_eth_stats_get(port_id, ð_stats); 3147 if (ret < 0) 3148 return ret; 3149 3150 dev = &rte_eth_devices[port_id]; 3151 3152 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3153 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3154 3155 /* global stats */ 3156 for (i = 0; i < RTE_NB_STATS; i++) { 3157 stats_ptr = RTE_PTR_ADD(ð_stats, 3158 eth_dev_stats_strings[i].offset); 3159 val = *stats_ptr; 3160 xstats[count++].value = val; 3161 } 3162 3163 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3164 return count; 3165 3166 /* per-rxq stats */ 3167 for (q = 0; q < nb_rxqs; q++) { 3168 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3169 stats_ptr = RTE_PTR_ADD(ð_stats, 3170 eth_dev_rxq_stats_strings[i].offset + 3171 q * sizeof(uint64_t)); 3172 val = *stats_ptr; 3173 xstats[count++].value = val; 3174 } 3175 } 3176 3177 /* per-txq stats */ 3178 for (q = 0; q < nb_txqs; q++) { 3179 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3180 stats_ptr = RTE_PTR_ADD(ð_stats, 3181 eth_dev_txq_stats_strings[i].offset + 3182 q * sizeof(uint64_t)); 3183 val = *stats_ptr; 3184 xstats[count++].value = val; 3185 } 3186 } 3187 return count; 3188 } 3189 3190 /* retrieve ethdev extended statistics */ 3191 int 3192 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3193 uint64_t *values, unsigned int size) 3194 { 3195 unsigned int no_basic_stat_requested = 1; 3196 unsigned int no_ext_stat_requested = 1; 3197 unsigned int num_xstats_filled; 3198 unsigned int basic_count; 3199 uint16_t expected_entries; 3200 struct rte_eth_dev *dev; 3201 unsigned int i; 3202 int ret; 3203 3204 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3205 dev = &rte_eth_devices[port_id]; 3206 3207 ret = eth_dev_get_xstats_count(port_id); 3208 if (ret < 0) 3209 return ret; 3210 expected_entries = (uint16_t)ret; 3211 struct rte_eth_xstat xstats[expected_entries]; 3212 basic_count = eth_dev_get_xstats_basic_count(dev); 3213 3214 /* Return max number of stats if no ids given */ 3215 if (!ids) { 3216 if (!values) 3217 return expected_entries; 3218 else if (values && size < expected_entries) 3219 return expected_entries; 3220 } 3221 3222 if (ids && !values) 3223 return -EINVAL; 3224 3225 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3226 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3227 uint64_t ids_copy[size]; 3228 3229 for (i = 0; i < size; i++) { 3230 if (ids[i] < basic_count) { 3231 no_basic_stat_requested = 0; 3232 break; 3233 } 3234 3235 /* 3236 * Convert ids to xstats ids that PMD knows. 3237 * ids known by user are basic + extended stats. 3238 */ 3239 ids_copy[i] = ids[i] - basic_count; 3240 } 3241 3242 if (no_basic_stat_requested) 3243 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3244 values, size); 3245 } 3246 3247 if (ids) { 3248 for (i = 0; i < size; i++) { 3249 if (ids[i] >= basic_count) { 3250 no_ext_stat_requested = 0; 3251 break; 3252 } 3253 } 3254 } 3255 3256 /* Fill the xstats structure */ 3257 if (ids && no_ext_stat_requested) 3258 ret = eth_basic_stats_get(port_id, xstats); 3259 else 3260 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3261 3262 if (ret < 0) 3263 return ret; 3264 num_xstats_filled = (unsigned int)ret; 3265 3266 /* Return all stats */ 3267 if (!ids) { 3268 for (i = 0; i < num_xstats_filled; i++) 3269 values[i] = xstats[i].value; 3270 return expected_entries; 3271 } 3272 3273 /* Filter stats */ 3274 for (i = 0; i < size; i++) { 3275 if (ids[i] >= expected_entries) { 3276 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3277 return -1; 3278 } 3279 values[i] = xstats[ids[i]].value; 3280 } 3281 return size; 3282 } 3283 3284 int 3285 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3286 unsigned int n) 3287 { 3288 struct rte_eth_dev *dev; 3289 unsigned int count = 0, i; 3290 signed int xcount = 0; 3291 uint16_t nb_rxqs, nb_txqs; 3292 int ret; 3293 3294 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3295 dev = &rte_eth_devices[port_id]; 3296 3297 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3298 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3299 3300 /* Return generic statistics */ 3301 count = RTE_NB_STATS; 3302 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) 3303 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS); 3304 3305 /* implemented by the driver */ 3306 if (dev->dev_ops->xstats_get != NULL) { 3307 /* Retrieve the xstats from the driver at the end of the 3308 * xstats struct. 3309 */ 3310 xcount = (*dev->dev_ops->xstats_get)(dev, 3311 xstats ? xstats + count : NULL, 3312 (n > count) ? n - count : 0); 3313 3314 if (xcount < 0) 3315 return eth_err(port_id, xcount); 3316 } 3317 3318 if (n < count + xcount || xstats == NULL) 3319 return count + xcount; 3320 3321 /* now fill the xstats structure */ 3322 ret = eth_basic_stats_get(port_id, xstats); 3323 if (ret < 0) 3324 return ret; 3325 count = ret; 3326 3327 for (i = 0; i < count; i++) 3328 xstats[i].id = i; 3329 /* add an offset to driver-specific stats */ 3330 for ( ; i < count + xcount; i++) 3331 xstats[i].id += count; 3332 3333 return count + xcount; 3334 } 3335 3336 /* reset ethdev extended statistics */ 3337 int 3338 rte_eth_xstats_reset(uint16_t port_id) 3339 { 3340 struct rte_eth_dev *dev; 3341 3342 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3343 dev = &rte_eth_devices[port_id]; 3344 3345 /* implemented by the driver */ 3346 if (dev->dev_ops->xstats_reset != NULL) 3347 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3348 3349 /* fallback to default */ 3350 return rte_eth_stats_reset(port_id); 3351 } 3352 3353 static int 3354 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3355 uint8_t stat_idx, uint8_t is_rx) 3356 { 3357 struct rte_eth_dev *dev; 3358 3359 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3360 dev = &rte_eth_devices[port_id]; 3361 3362 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3363 return -EINVAL; 3364 3365 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3366 return -EINVAL; 3367 3368 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3369 return -EINVAL; 3370 3371 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 3372 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3373 } 3374 3375 int 3376 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3377 uint8_t stat_idx) 3378 { 3379 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3380 tx_queue_id, 3381 stat_idx, STAT_QMAP_TX)); 3382 } 3383 3384 int 3385 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3386 uint8_t stat_idx) 3387 { 3388 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3389 rx_queue_id, 3390 stat_idx, STAT_QMAP_RX)); 3391 } 3392 3393 int 3394 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3395 { 3396 struct rte_eth_dev *dev; 3397 3398 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3399 dev = &rte_eth_devices[port_id]; 3400 3401 if (fw_version == NULL && fw_size > 0) { 3402 RTE_ETHDEV_LOG(ERR, 3403 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3404 port_id); 3405 return -EINVAL; 3406 } 3407 3408 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 3409 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3410 fw_version, fw_size)); 3411 } 3412 3413 int 3414 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3415 { 3416 struct rte_eth_dev *dev; 3417 const struct rte_eth_desc_lim lim = { 3418 .nb_max = UINT16_MAX, 3419 .nb_min = 0, 3420 .nb_align = 1, 3421 .nb_seg_max = UINT16_MAX, 3422 .nb_mtu_seg_max = UINT16_MAX, 3423 }; 3424 int diag; 3425 3426 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3427 dev = &rte_eth_devices[port_id]; 3428 3429 if (dev_info == NULL) { 3430 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3431 port_id); 3432 return -EINVAL; 3433 } 3434 3435 /* 3436 * Init dev_info before port_id check since caller does not have 3437 * return status and does not know if get is successful or not. 3438 */ 3439 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3440 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3441 3442 dev_info->rx_desc_lim = lim; 3443 dev_info->tx_desc_lim = lim; 3444 dev_info->device = dev->device; 3445 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3446 RTE_ETHER_CRC_LEN; 3447 dev_info->max_mtu = UINT16_MAX; 3448 3449 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 3450 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3451 if (diag != 0) { 3452 /* Cleanup already filled in device information */ 3453 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3454 return eth_err(port_id, diag); 3455 } 3456 3457 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3458 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3459 RTE_MAX_QUEUES_PER_PORT); 3460 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3461 RTE_MAX_QUEUES_PER_PORT); 3462 3463 dev_info->driver_name = dev->device->driver->name; 3464 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3465 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3466 3467 dev_info->dev_flags = &dev->data->dev_flags; 3468 3469 return 0; 3470 } 3471 3472 int 3473 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3474 { 3475 struct rte_eth_dev *dev; 3476 3477 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3478 dev = &rte_eth_devices[port_id]; 3479 3480 if (dev_conf == NULL) { 3481 RTE_ETHDEV_LOG(ERR, 3482 "Cannot get ethdev port %u configuration to NULL\n", 3483 port_id); 3484 return -EINVAL; 3485 } 3486 3487 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3488 3489 return 0; 3490 } 3491 3492 int 3493 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3494 uint32_t *ptypes, int num) 3495 { 3496 int i, j; 3497 struct rte_eth_dev *dev; 3498 const uint32_t *all_ptypes; 3499 3500 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3501 dev = &rte_eth_devices[port_id]; 3502 3503 if (ptypes == NULL && num > 0) { 3504 RTE_ETHDEV_LOG(ERR, 3505 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3506 port_id); 3507 return -EINVAL; 3508 } 3509 3510 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 3511 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3512 3513 if (!all_ptypes) 3514 return 0; 3515 3516 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3517 if (all_ptypes[i] & ptype_mask) { 3518 if (j < num) 3519 ptypes[j] = all_ptypes[i]; 3520 j++; 3521 } 3522 3523 return j; 3524 } 3525 3526 int 3527 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3528 uint32_t *set_ptypes, unsigned int num) 3529 { 3530 const uint32_t valid_ptype_masks[] = { 3531 RTE_PTYPE_L2_MASK, 3532 RTE_PTYPE_L3_MASK, 3533 RTE_PTYPE_L4_MASK, 3534 RTE_PTYPE_TUNNEL_MASK, 3535 RTE_PTYPE_INNER_L2_MASK, 3536 RTE_PTYPE_INNER_L3_MASK, 3537 RTE_PTYPE_INNER_L4_MASK, 3538 }; 3539 const uint32_t *all_ptypes; 3540 struct rte_eth_dev *dev; 3541 uint32_t unused_mask; 3542 unsigned int i, j; 3543 int ret; 3544 3545 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3546 dev = &rte_eth_devices[port_id]; 3547 3548 if (num > 0 && set_ptypes == NULL) { 3549 RTE_ETHDEV_LOG(ERR, 3550 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3551 port_id); 3552 return -EINVAL; 3553 } 3554 3555 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3556 *dev->dev_ops->dev_ptypes_set == NULL) { 3557 ret = 0; 3558 goto ptype_unknown; 3559 } 3560 3561 if (ptype_mask == 0) { 3562 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3563 ptype_mask); 3564 goto ptype_unknown; 3565 } 3566 3567 unused_mask = ptype_mask; 3568 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3569 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3570 if (mask && mask != valid_ptype_masks[i]) { 3571 ret = -EINVAL; 3572 goto ptype_unknown; 3573 } 3574 unused_mask &= ~valid_ptype_masks[i]; 3575 } 3576 3577 if (unused_mask) { 3578 ret = -EINVAL; 3579 goto ptype_unknown; 3580 } 3581 3582 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3583 if (all_ptypes == NULL) { 3584 ret = 0; 3585 goto ptype_unknown; 3586 } 3587 3588 /* 3589 * Accommodate as many set_ptypes as possible. If the supplied 3590 * set_ptypes array is insufficient fill it partially. 3591 */ 3592 for (i = 0, j = 0; set_ptypes != NULL && 3593 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3594 if (ptype_mask & all_ptypes[i]) { 3595 if (j < num - 1) { 3596 set_ptypes[j] = all_ptypes[i]; 3597 j++; 3598 continue; 3599 } 3600 break; 3601 } 3602 } 3603 3604 if (set_ptypes != NULL && j < num) 3605 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3606 3607 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3608 3609 ptype_unknown: 3610 if (num > 0) 3611 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3612 3613 return ret; 3614 } 3615 3616 int 3617 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3618 unsigned int num) 3619 { 3620 int32_t ret; 3621 struct rte_eth_dev *dev; 3622 struct rte_eth_dev_info dev_info; 3623 3624 if (ma == NULL) { 3625 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3626 return -EINVAL; 3627 } 3628 3629 /* will check for us that port_id is a valid one */ 3630 ret = rte_eth_dev_info_get(port_id, &dev_info); 3631 if (ret != 0) 3632 return ret; 3633 3634 dev = &rte_eth_devices[port_id]; 3635 num = RTE_MIN(dev_info.max_mac_addrs, num); 3636 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3637 3638 return num; 3639 } 3640 3641 int 3642 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3643 { 3644 struct rte_eth_dev *dev; 3645 3646 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3647 dev = &rte_eth_devices[port_id]; 3648 3649 if (mac_addr == NULL) { 3650 RTE_ETHDEV_LOG(ERR, 3651 "Cannot get ethdev port %u MAC address to NULL\n", 3652 port_id); 3653 return -EINVAL; 3654 } 3655 3656 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3657 3658 return 0; 3659 } 3660 3661 int 3662 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3663 { 3664 struct rte_eth_dev *dev; 3665 3666 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3667 dev = &rte_eth_devices[port_id]; 3668 3669 if (mtu == NULL) { 3670 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3671 port_id); 3672 return -EINVAL; 3673 } 3674 3675 *mtu = dev->data->mtu; 3676 return 0; 3677 } 3678 3679 int 3680 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3681 { 3682 int ret; 3683 struct rte_eth_dev_info dev_info; 3684 struct rte_eth_dev *dev; 3685 3686 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3687 dev = &rte_eth_devices[port_id]; 3688 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 3689 3690 /* 3691 * Check if the device supports dev_infos_get, if it does not 3692 * skip min_mtu/max_mtu validation here as this requires values 3693 * that are populated within the call to rte_eth_dev_info_get() 3694 * which relies on dev->dev_ops->dev_infos_get. 3695 */ 3696 if (*dev->dev_ops->dev_infos_get != NULL) { 3697 ret = rte_eth_dev_info_get(port_id, &dev_info); 3698 if (ret != 0) 3699 return ret; 3700 3701 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 3702 if (ret != 0) 3703 return ret; 3704 } 3705 3706 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3707 if (ret == 0) 3708 dev->data->mtu = mtu; 3709 3710 return eth_err(port_id, ret); 3711 } 3712 3713 int 3714 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3715 { 3716 struct rte_eth_dev *dev; 3717 int ret; 3718 3719 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3720 dev = &rte_eth_devices[port_id]; 3721 3722 if (!(dev->data->dev_conf.rxmode.offloads & 3723 DEV_RX_OFFLOAD_VLAN_FILTER)) { 3724 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 3725 port_id); 3726 return -ENOSYS; 3727 } 3728 3729 if (vlan_id > 4095) { 3730 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3731 port_id, vlan_id); 3732 return -EINVAL; 3733 } 3734 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 3735 3736 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3737 if (ret == 0) { 3738 struct rte_vlan_filter_conf *vfc; 3739 int vidx; 3740 int vbit; 3741 3742 vfc = &dev->data->vlan_filter_conf; 3743 vidx = vlan_id / 64; 3744 vbit = vlan_id % 64; 3745 3746 if (on) 3747 vfc->ids[vidx] |= RTE_BIT64(vbit); 3748 else 3749 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 3750 } 3751 3752 return eth_err(port_id, ret); 3753 } 3754 3755 int 3756 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3757 int on) 3758 { 3759 struct rte_eth_dev *dev; 3760 3761 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3762 dev = &rte_eth_devices[port_id]; 3763 3764 if (rx_queue_id >= dev->data->nb_rx_queues) { 3765 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3766 return -EINVAL; 3767 } 3768 3769 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 3770 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3771 3772 return 0; 3773 } 3774 3775 int 3776 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3777 enum rte_vlan_type vlan_type, 3778 uint16_t tpid) 3779 { 3780 struct rte_eth_dev *dev; 3781 3782 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3783 dev = &rte_eth_devices[port_id]; 3784 3785 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 3786 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3787 tpid)); 3788 } 3789 3790 int 3791 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3792 { 3793 struct rte_eth_dev_info dev_info; 3794 struct rte_eth_dev *dev; 3795 int ret = 0; 3796 int mask = 0; 3797 int cur, org = 0; 3798 uint64_t orig_offloads; 3799 uint64_t dev_offloads; 3800 uint64_t new_offloads; 3801 3802 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3803 dev = &rte_eth_devices[port_id]; 3804 3805 /* save original values in case of failure */ 3806 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3807 dev_offloads = orig_offloads; 3808 3809 /* check which option changed by application */ 3810 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD); 3811 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP); 3812 if (cur != org) { 3813 if (cur) 3814 dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3815 else 3816 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3817 mask |= ETH_VLAN_STRIP_MASK; 3818 } 3819 3820 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); 3821 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER); 3822 if (cur != org) { 3823 if (cur) 3824 dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3825 else 3826 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3827 mask |= ETH_VLAN_FILTER_MASK; 3828 } 3829 3830 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD); 3831 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND); 3832 if (cur != org) { 3833 if (cur) 3834 dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 3835 else 3836 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 3837 mask |= ETH_VLAN_EXTEND_MASK; 3838 } 3839 3840 cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD); 3841 org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP); 3842 if (cur != org) { 3843 if (cur) 3844 dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 3845 else 3846 dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 3847 mask |= ETH_QINQ_STRIP_MASK; 3848 } 3849 3850 /*no change*/ 3851 if (mask == 0) 3852 return ret; 3853 3854 ret = rte_eth_dev_info_get(port_id, &dev_info); 3855 if (ret != 0) 3856 return ret; 3857 3858 /* Rx VLAN offloading must be within its device capabilities */ 3859 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3860 new_offloads = dev_offloads & ~orig_offloads; 3861 RTE_ETHDEV_LOG(ERR, 3862 "Ethdev port_id=%u requested new added VLAN offloads " 3863 "0x%" PRIx64 " must be within Rx offloads capabilities " 3864 "0x%" PRIx64 " in %s()\n", 3865 port_id, new_offloads, dev_info.rx_offload_capa, 3866 __func__); 3867 return -EINVAL; 3868 } 3869 3870 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 3871 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3872 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3873 if (ret) { 3874 /* hit an error restore original values */ 3875 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3876 } 3877 3878 return eth_err(port_id, ret); 3879 } 3880 3881 int 3882 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3883 { 3884 struct rte_eth_dev *dev; 3885 uint64_t *dev_offloads; 3886 int ret = 0; 3887 3888 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3889 dev = &rte_eth_devices[port_id]; 3890 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3891 3892 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 3893 ret |= ETH_VLAN_STRIP_OFFLOAD; 3894 3895 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 3896 ret |= ETH_VLAN_FILTER_OFFLOAD; 3897 3898 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 3899 ret |= ETH_VLAN_EXTEND_OFFLOAD; 3900 3901 if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP) 3902 ret |= ETH_QINQ_STRIP_OFFLOAD; 3903 3904 return ret; 3905 } 3906 3907 int 3908 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3909 { 3910 struct rte_eth_dev *dev; 3911 3912 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3913 dev = &rte_eth_devices[port_id]; 3914 3915 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 3916 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3917 } 3918 3919 int 3920 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3921 { 3922 struct rte_eth_dev *dev; 3923 3924 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3925 dev = &rte_eth_devices[port_id]; 3926 3927 if (fc_conf == NULL) { 3928 RTE_ETHDEV_LOG(ERR, 3929 "Cannot get ethdev port %u flow control config to NULL\n", 3930 port_id); 3931 return -EINVAL; 3932 } 3933 3934 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 3935 memset(fc_conf, 0, sizeof(*fc_conf)); 3936 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3937 } 3938 3939 int 3940 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3941 { 3942 struct rte_eth_dev *dev; 3943 3944 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3945 dev = &rte_eth_devices[port_id]; 3946 3947 if (fc_conf == NULL) { 3948 RTE_ETHDEV_LOG(ERR, 3949 "Cannot set ethdev port %u flow control from NULL config\n", 3950 port_id); 3951 return -EINVAL; 3952 } 3953 3954 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 3955 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 3956 return -EINVAL; 3957 } 3958 3959 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 3960 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 3961 } 3962 3963 int 3964 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 3965 struct rte_eth_pfc_conf *pfc_conf) 3966 { 3967 struct rte_eth_dev *dev; 3968 3969 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3970 dev = &rte_eth_devices[port_id]; 3971 3972 if (pfc_conf == NULL) { 3973 RTE_ETHDEV_LOG(ERR, 3974 "Cannot set ethdev port %u priority flow control from NULL config\n", 3975 port_id); 3976 return -EINVAL; 3977 } 3978 3979 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) { 3980 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 3981 return -EINVAL; 3982 } 3983 3984 /* High water, low water validation are device specific */ 3985 if (*dev->dev_ops->priority_flow_ctrl_set) 3986 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 3987 (dev, pfc_conf)); 3988 return -ENOTSUP; 3989 } 3990 3991 static int 3992 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 3993 uint16_t reta_size) 3994 { 3995 uint16_t i, num; 3996 3997 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE; 3998 for (i = 0; i < num; i++) { 3999 if (reta_conf[i].mask) 4000 return 0; 4001 } 4002 4003 return -EINVAL; 4004 } 4005 4006 static int 4007 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 4008 uint16_t reta_size, 4009 uint16_t max_rxq) 4010 { 4011 uint16_t i, idx, shift; 4012 4013 if (max_rxq == 0) { 4014 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 4015 return -EINVAL; 4016 } 4017 4018 for (i = 0; i < reta_size; i++) { 4019 idx = i / RTE_RETA_GROUP_SIZE; 4020 shift = i % RTE_RETA_GROUP_SIZE; 4021 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 4022 (reta_conf[idx].reta[shift] >= max_rxq)) { 4023 RTE_ETHDEV_LOG(ERR, 4024 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 4025 idx, shift, 4026 reta_conf[idx].reta[shift], max_rxq); 4027 return -EINVAL; 4028 } 4029 } 4030 4031 return 0; 4032 } 4033 4034 int 4035 rte_eth_dev_rss_reta_update(uint16_t port_id, 4036 struct rte_eth_rss_reta_entry64 *reta_conf, 4037 uint16_t reta_size) 4038 { 4039 struct rte_eth_dev *dev; 4040 int ret; 4041 4042 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4043 dev = &rte_eth_devices[port_id]; 4044 4045 if (reta_conf == NULL) { 4046 RTE_ETHDEV_LOG(ERR, 4047 "Cannot update ethdev port %u RSS RETA to NULL\n", 4048 port_id); 4049 return -EINVAL; 4050 } 4051 4052 if (reta_size == 0) { 4053 RTE_ETHDEV_LOG(ERR, 4054 "Cannot update ethdev port %u RSS RETA with zero size\n", 4055 port_id); 4056 return -EINVAL; 4057 } 4058 4059 /* Check mask bits */ 4060 ret = eth_check_reta_mask(reta_conf, reta_size); 4061 if (ret < 0) 4062 return ret; 4063 4064 /* Check entry value */ 4065 ret = eth_check_reta_entry(reta_conf, reta_size, 4066 dev->data->nb_rx_queues); 4067 if (ret < 0) 4068 return ret; 4069 4070 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 4071 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4072 reta_size)); 4073 } 4074 4075 int 4076 rte_eth_dev_rss_reta_query(uint16_t port_id, 4077 struct rte_eth_rss_reta_entry64 *reta_conf, 4078 uint16_t reta_size) 4079 { 4080 struct rte_eth_dev *dev; 4081 int ret; 4082 4083 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4084 dev = &rte_eth_devices[port_id]; 4085 4086 if (reta_conf == NULL) { 4087 RTE_ETHDEV_LOG(ERR, 4088 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4089 port_id); 4090 return -EINVAL; 4091 } 4092 4093 /* Check mask bits */ 4094 ret = eth_check_reta_mask(reta_conf, reta_size); 4095 if (ret < 0) 4096 return ret; 4097 4098 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 4099 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4100 reta_size)); 4101 } 4102 4103 int 4104 rte_eth_dev_rss_hash_update(uint16_t port_id, 4105 struct rte_eth_rss_conf *rss_conf) 4106 { 4107 struct rte_eth_dev *dev; 4108 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4109 int ret; 4110 4111 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4112 dev = &rte_eth_devices[port_id]; 4113 4114 if (rss_conf == NULL) { 4115 RTE_ETHDEV_LOG(ERR, 4116 "Cannot update ethdev port %u RSS hash from NULL config\n", 4117 port_id); 4118 return -EINVAL; 4119 } 4120 4121 ret = rte_eth_dev_info_get(port_id, &dev_info); 4122 if (ret != 0) 4123 return ret; 4124 4125 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4126 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4127 dev_info.flow_type_rss_offloads) { 4128 RTE_ETHDEV_LOG(ERR, 4129 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4130 port_id, rss_conf->rss_hf, 4131 dev_info.flow_type_rss_offloads); 4132 return -EINVAL; 4133 } 4134 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 4135 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4136 rss_conf)); 4137 } 4138 4139 int 4140 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4141 struct rte_eth_rss_conf *rss_conf) 4142 { 4143 struct rte_eth_dev *dev; 4144 4145 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4146 dev = &rte_eth_devices[port_id]; 4147 4148 if (rss_conf == NULL) { 4149 RTE_ETHDEV_LOG(ERR, 4150 "Cannot get ethdev port %u RSS hash config to NULL\n", 4151 port_id); 4152 return -EINVAL; 4153 } 4154 4155 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 4156 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4157 rss_conf)); 4158 } 4159 4160 int 4161 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4162 struct rte_eth_udp_tunnel *udp_tunnel) 4163 { 4164 struct rte_eth_dev *dev; 4165 4166 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4167 dev = &rte_eth_devices[port_id]; 4168 4169 if (udp_tunnel == NULL) { 4170 RTE_ETHDEV_LOG(ERR, 4171 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4172 port_id); 4173 return -EINVAL; 4174 } 4175 4176 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 4177 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4178 return -EINVAL; 4179 } 4180 4181 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 4182 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4183 udp_tunnel)); 4184 } 4185 4186 int 4187 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4188 struct rte_eth_udp_tunnel *udp_tunnel) 4189 { 4190 struct rte_eth_dev *dev; 4191 4192 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4193 dev = &rte_eth_devices[port_id]; 4194 4195 if (udp_tunnel == NULL) { 4196 RTE_ETHDEV_LOG(ERR, 4197 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4198 port_id); 4199 return -EINVAL; 4200 } 4201 4202 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 4203 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4204 return -EINVAL; 4205 } 4206 4207 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 4208 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4209 udp_tunnel)); 4210 } 4211 4212 int 4213 rte_eth_led_on(uint16_t port_id) 4214 { 4215 struct rte_eth_dev *dev; 4216 4217 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4218 dev = &rte_eth_devices[port_id]; 4219 4220 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 4221 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4222 } 4223 4224 int 4225 rte_eth_led_off(uint16_t port_id) 4226 { 4227 struct rte_eth_dev *dev; 4228 4229 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4230 dev = &rte_eth_devices[port_id]; 4231 4232 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 4233 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4234 } 4235 4236 int 4237 rte_eth_fec_get_capability(uint16_t port_id, 4238 struct rte_eth_fec_capa *speed_fec_capa, 4239 unsigned int num) 4240 { 4241 struct rte_eth_dev *dev; 4242 int ret; 4243 4244 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4245 dev = &rte_eth_devices[port_id]; 4246 4247 if (speed_fec_capa == NULL && num > 0) { 4248 RTE_ETHDEV_LOG(ERR, 4249 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4250 port_id); 4251 return -EINVAL; 4252 } 4253 4254 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP); 4255 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4256 4257 return ret; 4258 } 4259 4260 int 4261 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4262 { 4263 struct rte_eth_dev *dev; 4264 4265 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4266 dev = &rte_eth_devices[port_id]; 4267 4268 if (fec_capa == NULL) { 4269 RTE_ETHDEV_LOG(ERR, 4270 "Cannot get ethdev port %u current FEC mode to NULL\n", 4271 port_id); 4272 return -EINVAL; 4273 } 4274 4275 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP); 4276 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4277 } 4278 4279 int 4280 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4281 { 4282 struct rte_eth_dev *dev; 4283 4284 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4285 dev = &rte_eth_devices[port_id]; 4286 4287 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); 4288 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4289 } 4290 4291 /* 4292 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4293 * an empty spot. 4294 */ 4295 static int 4296 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4297 { 4298 struct rte_eth_dev_info dev_info; 4299 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4300 unsigned i; 4301 int ret; 4302 4303 ret = rte_eth_dev_info_get(port_id, &dev_info); 4304 if (ret != 0) 4305 return -1; 4306 4307 for (i = 0; i < dev_info.max_mac_addrs; i++) 4308 if (memcmp(addr, &dev->data->mac_addrs[i], 4309 RTE_ETHER_ADDR_LEN) == 0) 4310 return i; 4311 4312 return -1; 4313 } 4314 4315 static const struct rte_ether_addr null_mac_addr; 4316 4317 int 4318 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4319 uint32_t pool) 4320 { 4321 struct rte_eth_dev *dev; 4322 int index; 4323 uint64_t pool_mask; 4324 int ret; 4325 4326 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4327 dev = &rte_eth_devices[port_id]; 4328 4329 if (addr == NULL) { 4330 RTE_ETHDEV_LOG(ERR, 4331 "Cannot add ethdev port %u MAC address from NULL address\n", 4332 port_id); 4333 return -EINVAL; 4334 } 4335 4336 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 4337 4338 if (rte_is_zero_ether_addr(addr)) { 4339 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4340 port_id); 4341 return -EINVAL; 4342 } 4343 if (pool >= ETH_64_POOLS) { 4344 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", ETH_64_POOLS - 1); 4345 return -EINVAL; 4346 } 4347 4348 index = eth_dev_get_mac_addr_index(port_id, addr); 4349 if (index < 0) { 4350 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4351 if (index < 0) { 4352 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4353 port_id); 4354 return -ENOSPC; 4355 } 4356 } else { 4357 pool_mask = dev->data->mac_pool_sel[index]; 4358 4359 /* Check if both MAC address and pool is already there, and do nothing */ 4360 if (pool_mask & RTE_BIT64(pool)) 4361 return 0; 4362 } 4363 4364 /* Update NIC */ 4365 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4366 4367 if (ret == 0) { 4368 /* Update address in NIC data structure */ 4369 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4370 4371 /* Update pool bitmap in NIC data structure */ 4372 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 4373 } 4374 4375 return eth_err(port_id, ret); 4376 } 4377 4378 int 4379 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4380 { 4381 struct rte_eth_dev *dev; 4382 int index; 4383 4384 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4385 dev = &rte_eth_devices[port_id]; 4386 4387 if (addr == NULL) { 4388 RTE_ETHDEV_LOG(ERR, 4389 "Cannot remove ethdev port %u MAC address from NULL address\n", 4390 port_id); 4391 return -EINVAL; 4392 } 4393 4394 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 4395 4396 index = eth_dev_get_mac_addr_index(port_id, addr); 4397 if (index == 0) { 4398 RTE_ETHDEV_LOG(ERR, 4399 "Port %u: Cannot remove default MAC address\n", 4400 port_id); 4401 return -EADDRINUSE; 4402 } else if (index < 0) 4403 return 0; /* Do nothing if address wasn't found */ 4404 4405 /* Update NIC */ 4406 (*dev->dev_ops->mac_addr_remove)(dev, index); 4407 4408 /* Update address in NIC data structure */ 4409 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4410 4411 /* reset pool bitmap */ 4412 dev->data->mac_pool_sel[index] = 0; 4413 4414 return 0; 4415 } 4416 4417 int 4418 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4419 { 4420 struct rte_eth_dev *dev; 4421 int ret; 4422 4423 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4424 dev = &rte_eth_devices[port_id]; 4425 4426 if (addr == NULL) { 4427 RTE_ETHDEV_LOG(ERR, 4428 "Cannot set ethdev port %u default MAC address from NULL address\n", 4429 port_id); 4430 return -EINVAL; 4431 } 4432 4433 if (!rte_is_valid_assigned_ether_addr(addr)) 4434 return -EINVAL; 4435 4436 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 4437 4438 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4439 if (ret < 0) 4440 return ret; 4441 4442 /* Update default address in NIC data structure */ 4443 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4444 4445 return 0; 4446 } 4447 4448 4449 /* 4450 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4451 * an empty spot. 4452 */ 4453 static int 4454 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4455 const struct rte_ether_addr *addr) 4456 { 4457 struct rte_eth_dev_info dev_info; 4458 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4459 unsigned i; 4460 int ret; 4461 4462 ret = rte_eth_dev_info_get(port_id, &dev_info); 4463 if (ret != 0) 4464 return -1; 4465 4466 if (!dev->data->hash_mac_addrs) 4467 return -1; 4468 4469 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4470 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4471 RTE_ETHER_ADDR_LEN) == 0) 4472 return i; 4473 4474 return -1; 4475 } 4476 4477 int 4478 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4479 uint8_t on) 4480 { 4481 int index; 4482 int ret; 4483 struct rte_eth_dev *dev; 4484 4485 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4486 dev = &rte_eth_devices[port_id]; 4487 4488 if (addr == NULL) { 4489 RTE_ETHDEV_LOG(ERR, 4490 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4491 port_id); 4492 return -EINVAL; 4493 } 4494 4495 if (rte_is_zero_ether_addr(addr)) { 4496 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4497 port_id); 4498 return -EINVAL; 4499 } 4500 4501 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4502 /* Check if it's already there, and do nothing */ 4503 if ((index >= 0) && on) 4504 return 0; 4505 4506 if (index < 0) { 4507 if (!on) { 4508 RTE_ETHDEV_LOG(ERR, 4509 "Port %u: the MAC address was not set in UTA\n", 4510 port_id); 4511 return -EINVAL; 4512 } 4513 4514 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4515 if (index < 0) { 4516 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4517 port_id); 4518 return -ENOSPC; 4519 } 4520 } 4521 4522 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 4523 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 4524 if (ret == 0) { 4525 /* Update address in NIC data structure */ 4526 if (on) 4527 rte_ether_addr_copy(addr, 4528 &dev->data->hash_mac_addrs[index]); 4529 else 4530 rte_ether_addr_copy(&null_mac_addr, 4531 &dev->data->hash_mac_addrs[index]); 4532 } 4533 4534 return eth_err(port_id, ret); 4535 } 4536 4537 int 4538 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 4539 { 4540 struct rte_eth_dev *dev; 4541 4542 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4543 dev = &rte_eth_devices[port_id]; 4544 4545 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 4546 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 4547 on)); 4548 } 4549 4550 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 4551 uint16_t tx_rate) 4552 { 4553 struct rte_eth_dev *dev; 4554 struct rte_eth_dev_info dev_info; 4555 struct rte_eth_link link; 4556 int ret; 4557 4558 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4559 dev = &rte_eth_devices[port_id]; 4560 4561 ret = rte_eth_dev_info_get(port_id, &dev_info); 4562 if (ret != 0) 4563 return ret; 4564 4565 link = dev->data->dev_link; 4566 4567 if (queue_idx > dev_info.max_tx_queues) { 4568 RTE_ETHDEV_LOG(ERR, 4569 "Set queue rate limit:port %u: invalid queue ID=%u\n", 4570 port_id, queue_idx); 4571 return -EINVAL; 4572 } 4573 4574 if (tx_rate > link.link_speed) { 4575 RTE_ETHDEV_LOG(ERR, 4576 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 4577 tx_rate, link.link_speed); 4578 return -EINVAL; 4579 } 4580 4581 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 4582 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 4583 queue_idx, tx_rate)); 4584 } 4585 4586 RTE_INIT(eth_dev_init_fp_ops) 4587 { 4588 uint32_t i; 4589 4590 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 4591 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 4592 } 4593 4594 RTE_INIT(eth_dev_init_cb_lists) 4595 { 4596 uint16_t i; 4597 4598 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 4599 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 4600 } 4601 4602 int 4603 rte_eth_dev_callback_register(uint16_t port_id, 4604 enum rte_eth_event_type event, 4605 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4606 { 4607 struct rte_eth_dev *dev; 4608 struct rte_eth_dev_callback *user_cb; 4609 uint16_t next_port; 4610 uint16_t last_port; 4611 4612 if (cb_fn == NULL) { 4613 RTE_ETHDEV_LOG(ERR, 4614 "Cannot register ethdev port %u callback from NULL\n", 4615 port_id); 4616 return -EINVAL; 4617 } 4618 4619 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4620 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4621 return -EINVAL; 4622 } 4623 4624 if (port_id == RTE_ETH_ALL) { 4625 next_port = 0; 4626 last_port = RTE_MAX_ETHPORTS - 1; 4627 } else { 4628 next_port = last_port = port_id; 4629 } 4630 4631 rte_spinlock_lock(ð_dev_cb_lock); 4632 4633 do { 4634 dev = &rte_eth_devices[next_port]; 4635 4636 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 4637 if (user_cb->cb_fn == cb_fn && 4638 user_cb->cb_arg == cb_arg && 4639 user_cb->event == event) { 4640 break; 4641 } 4642 } 4643 4644 /* create a new callback. */ 4645 if (user_cb == NULL) { 4646 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4647 sizeof(struct rte_eth_dev_callback), 0); 4648 if (user_cb != NULL) { 4649 user_cb->cb_fn = cb_fn; 4650 user_cb->cb_arg = cb_arg; 4651 user_cb->event = event; 4652 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4653 user_cb, next); 4654 } else { 4655 rte_spinlock_unlock(ð_dev_cb_lock); 4656 rte_eth_dev_callback_unregister(port_id, event, 4657 cb_fn, cb_arg); 4658 return -ENOMEM; 4659 } 4660 4661 } 4662 } while (++next_port <= last_port); 4663 4664 rte_spinlock_unlock(ð_dev_cb_lock); 4665 return 0; 4666 } 4667 4668 int 4669 rte_eth_dev_callback_unregister(uint16_t port_id, 4670 enum rte_eth_event_type event, 4671 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4672 { 4673 int ret; 4674 struct rte_eth_dev *dev; 4675 struct rte_eth_dev_callback *cb, *next; 4676 uint16_t next_port; 4677 uint16_t last_port; 4678 4679 if (cb_fn == NULL) { 4680 RTE_ETHDEV_LOG(ERR, 4681 "Cannot unregister ethdev port %u callback from NULL\n", 4682 port_id); 4683 return -EINVAL; 4684 } 4685 4686 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4687 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4688 return -EINVAL; 4689 } 4690 4691 if (port_id == RTE_ETH_ALL) { 4692 next_port = 0; 4693 last_port = RTE_MAX_ETHPORTS - 1; 4694 } else { 4695 next_port = last_port = port_id; 4696 } 4697 4698 rte_spinlock_lock(ð_dev_cb_lock); 4699 4700 do { 4701 dev = &rte_eth_devices[next_port]; 4702 ret = 0; 4703 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4704 cb = next) { 4705 4706 next = TAILQ_NEXT(cb, next); 4707 4708 if (cb->cb_fn != cb_fn || cb->event != event || 4709 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4710 continue; 4711 4712 /* 4713 * if this callback is not executing right now, 4714 * then remove it. 4715 */ 4716 if (cb->active == 0) { 4717 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4718 rte_free(cb); 4719 } else { 4720 ret = -EAGAIN; 4721 } 4722 } 4723 } while (++next_port <= last_port); 4724 4725 rte_spinlock_unlock(ð_dev_cb_lock); 4726 return ret; 4727 } 4728 4729 int 4730 rte_eth_dev_callback_process(struct rte_eth_dev *dev, 4731 enum rte_eth_event_type event, void *ret_param) 4732 { 4733 struct rte_eth_dev_callback *cb_lst; 4734 struct rte_eth_dev_callback dev_cb; 4735 int rc = 0; 4736 4737 rte_spinlock_lock(ð_dev_cb_lock); 4738 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 4739 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 4740 continue; 4741 dev_cb = *cb_lst; 4742 cb_lst->active = 1; 4743 if (ret_param != NULL) 4744 dev_cb.ret_param = ret_param; 4745 4746 rte_spinlock_unlock(ð_dev_cb_lock); 4747 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, 4748 dev_cb.cb_arg, dev_cb.ret_param); 4749 rte_spinlock_lock(ð_dev_cb_lock); 4750 cb_lst->active = 0; 4751 } 4752 rte_spinlock_unlock(ð_dev_cb_lock); 4753 return rc; 4754 } 4755 4756 void 4757 rte_eth_dev_probing_finish(struct rte_eth_dev *dev) 4758 { 4759 if (dev == NULL) 4760 return; 4761 4762 /* 4763 * for secondary process, at that point we expect device 4764 * to be already 'usable', so shared data and all function pointers 4765 * for fast-path devops have to be setup properly inside rte_eth_dev. 4766 */ 4767 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 4768 eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev); 4769 4770 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); 4771 4772 dev->state = RTE_ETH_DEV_ATTACHED; 4773 } 4774 4775 int 4776 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4777 { 4778 uint32_t vec; 4779 struct rte_eth_dev *dev; 4780 struct rte_intr_handle *intr_handle; 4781 uint16_t qid; 4782 int rc; 4783 4784 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4785 dev = &rte_eth_devices[port_id]; 4786 4787 if (!dev->intr_handle) { 4788 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4789 return -ENOTSUP; 4790 } 4791 4792 intr_handle = dev->intr_handle; 4793 if (!intr_handle->intr_vec) { 4794 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4795 return -EPERM; 4796 } 4797 4798 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4799 vec = intr_handle->intr_vec[qid]; 4800 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4801 if (rc && rc != -EEXIST) { 4802 RTE_ETHDEV_LOG(ERR, 4803 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 4804 port_id, qid, op, epfd, vec); 4805 } 4806 } 4807 4808 return 0; 4809 } 4810 4811 int 4812 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 4813 { 4814 struct rte_intr_handle *intr_handle; 4815 struct rte_eth_dev *dev; 4816 unsigned int efd_idx; 4817 uint32_t vec; 4818 int fd; 4819 4820 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 4821 dev = &rte_eth_devices[port_id]; 4822 4823 if (queue_id >= dev->data->nb_rx_queues) { 4824 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 4825 return -1; 4826 } 4827 4828 if (!dev->intr_handle) { 4829 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4830 return -1; 4831 } 4832 4833 intr_handle = dev->intr_handle; 4834 if (!intr_handle->intr_vec) { 4835 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4836 return -1; 4837 } 4838 4839 vec = intr_handle->intr_vec[queue_id]; 4840 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 4841 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 4842 fd = intr_handle->efds[efd_idx]; 4843 4844 return fd; 4845 } 4846 4847 static inline int 4848 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id, 4849 const char *ring_name) 4850 { 4851 return snprintf(name, len, "eth_p%d_q%d_%s", 4852 port_id, queue_id, ring_name); 4853 } 4854 4855 const struct rte_memzone * 4856 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, 4857 uint16_t queue_id, size_t size, unsigned align, 4858 int socket_id) 4859 { 4860 char z_name[RTE_MEMZONE_NAMESIZE]; 4861 const struct rte_memzone *mz; 4862 int rc; 4863 4864 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4865 queue_id, ring_name); 4866 if (rc >= RTE_MEMZONE_NAMESIZE) { 4867 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4868 rte_errno = ENAMETOOLONG; 4869 return NULL; 4870 } 4871 4872 mz = rte_memzone_lookup(z_name); 4873 if (mz) { 4874 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) || 4875 size > mz->len || 4876 ((uintptr_t)mz->addr & (align - 1)) != 0) { 4877 RTE_ETHDEV_LOG(ERR, 4878 "memzone %s does not justify the requested attributes\n", 4879 mz->name); 4880 return NULL; 4881 } 4882 4883 return mz; 4884 } 4885 4886 return rte_memzone_reserve_aligned(z_name, size, socket_id, 4887 RTE_MEMZONE_IOVA_CONTIG, align); 4888 } 4889 4890 int 4891 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name, 4892 uint16_t queue_id) 4893 { 4894 char z_name[RTE_MEMZONE_NAMESIZE]; 4895 const struct rte_memzone *mz; 4896 int rc = 0; 4897 4898 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4899 queue_id, ring_name); 4900 if (rc >= RTE_MEMZONE_NAMESIZE) { 4901 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4902 return -ENAMETOOLONG; 4903 } 4904 4905 mz = rte_memzone_lookup(z_name); 4906 if (mz) 4907 rc = rte_memzone_free(mz); 4908 else 4909 rc = -ENOENT; 4910 4911 return rc; 4912 } 4913 4914 int 4915 rte_eth_dev_create(struct rte_device *device, const char *name, 4916 size_t priv_data_size, 4917 ethdev_bus_specific_init ethdev_bus_specific_init, 4918 void *bus_init_params, 4919 ethdev_init_t ethdev_init, void *init_params) 4920 { 4921 struct rte_eth_dev *ethdev; 4922 int retval; 4923 4924 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL); 4925 4926 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4927 ethdev = rte_eth_dev_allocate(name); 4928 if (!ethdev) 4929 return -ENODEV; 4930 4931 if (priv_data_size) { 4932 ethdev->data->dev_private = rte_zmalloc_socket( 4933 name, priv_data_size, RTE_CACHE_LINE_SIZE, 4934 device->numa_node); 4935 4936 if (!ethdev->data->dev_private) { 4937 RTE_ETHDEV_LOG(ERR, 4938 "failed to allocate private data\n"); 4939 retval = -ENOMEM; 4940 goto probe_failed; 4941 } 4942 } 4943 } else { 4944 ethdev = rte_eth_dev_attach_secondary(name); 4945 if (!ethdev) { 4946 RTE_ETHDEV_LOG(ERR, 4947 "secondary process attach failed, ethdev doesn't exist\n"); 4948 return -ENODEV; 4949 } 4950 } 4951 4952 ethdev->device = device; 4953 4954 if (ethdev_bus_specific_init) { 4955 retval = ethdev_bus_specific_init(ethdev, bus_init_params); 4956 if (retval) { 4957 RTE_ETHDEV_LOG(ERR, 4958 "ethdev bus specific initialisation failed\n"); 4959 goto probe_failed; 4960 } 4961 } 4962 4963 retval = ethdev_init(ethdev, init_params); 4964 if (retval) { 4965 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n"); 4966 goto probe_failed; 4967 } 4968 4969 rte_eth_dev_probing_finish(ethdev); 4970 4971 return retval; 4972 4973 probe_failed: 4974 rte_eth_dev_release_port(ethdev); 4975 return retval; 4976 } 4977 4978 int 4979 rte_eth_dev_destroy(struct rte_eth_dev *ethdev, 4980 ethdev_uninit_t ethdev_uninit) 4981 { 4982 int ret; 4983 4984 ethdev = rte_eth_dev_allocated(ethdev->data->name); 4985 if (!ethdev) 4986 return -ENODEV; 4987 4988 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL); 4989 4990 ret = ethdev_uninit(ethdev); 4991 if (ret) 4992 return ret; 4993 4994 return rte_eth_dev_release_port(ethdev); 4995 } 4996 4997 int 4998 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 4999 int epfd, int op, void *data) 5000 { 5001 uint32_t vec; 5002 struct rte_eth_dev *dev; 5003 struct rte_intr_handle *intr_handle; 5004 int rc; 5005 5006 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5007 dev = &rte_eth_devices[port_id]; 5008 5009 if (queue_id >= dev->data->nb_rx_queues) { 5010 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5011 return -EINVAL; 5012 } 5013 5014 if (!dev->intr_handle) { 5015 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5016 return -ENOTSUP; 5017 } 5018 5019 intr_handle = dev->intr_handle; 5020 if (!intr_handle->intr_vec) { 5021 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5022 return -EPERM; 5023 } 5024 5025 vec = intr_handle->intr_vec[queue_id]; 5026 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5027 if (rc && rc != -EEXIST) { 5028 RTE_ETHDEV_LOG(ERR, 5029 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5030 port_id, queue_id, op, epfd, vec); 5031 return rc; 5032 } 5033 5034 return 0; 5035 } 5036 5037 int 5038 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5039 uint16_t queue_id) 5040 { 5041 struct rte_eth_dev *dev; 5042 int ret; 5043 5044 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5045 dev = &rte_eth_devices[port_id]; 5046 5047 ret = eth_dev_validate_rx_queue(dev, queue_id); 5048 if (ret != 0) 5049 return ret; 5050 5051 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 5052 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5053 } 5054 5055 int 5056 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5057 uint16_t queue_id) 5058 { 5059 struct rte_eth_dev *dev; 5060 int ret; 5061 5062 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5063 dev = &rte_eth_devices[port_id]; 5064 5065 ret = eth_dev_validate_rx_queue(dev, queue_id); 5066 if (ret != 0) 5067 return ret; 5068 5069 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 5070 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5071 } 5072 5073 5074 const struct rte_eth_rxtx_callback * 5075 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5076 rte_rx_callback_fn fn, void *user_param) 5077 { 5078 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5079 rte_errno = ENOTSUP; 5080 return NULL; 5081 #endif 5082 struct rte_eth_dev *dev; 5083 5084 /* check input parameters */ 5085 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5086 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5087 rte_errno = EINVAL; 5088 return NULL; 5089 } 5090 dev = &rte_eth_devices[port_id]; 5091 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5092 rte_errno = EINVAL; 5093 return NULL; 5094 } 5095 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5096 5097 if (cb == NULL) { 5098 rte_errno = ENOMEM; 5099 return NULL; 5100 } 5101 5102 cb->fn.rx = fn; 5103 cb->param = user_param; 5104 5105 rte_spinlock_lock(ð_dev_rx_cb_lock); 5106 /* Add the callbacks in fifo order. */ 5107 struct rte_eth_rxtx_callback *tail = 5108 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5109 5110 if (!tail) { 5111 /* Stores to cb->fn and cb->param should complete before 5112 * cb is visible to data plane. 5113 */ 5114 __atomic_store_n( 5115 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5116 cb, __ATOMIC_RELEASE); 5117 5118 } else { 5119 while (tail->next) 5120 tail = tail->next; 5121 /* Stores to cb->fn and cb->param should complete before 5122 * cb is visible to data plane. 5123 */ 5124 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5125 } 5126 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5127 5128 return cb; 5129 } 5130 5131 const struct rte_eth_rxtx_callback * 5132 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5133 rte_rx_callback_fn fn, void *user_param) 5134 { 5135 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5136 rte_errno = ENOTSUP; 5137 return NULL; 5138 #endif 5139 /* check input parameters */ 5140 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5141 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5142 rte_errno = EINVAL; 5143 return NULL; 5144 } 5145 5146 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5147 5148 if (cb == NULL) { 5149 rte_errno = ENOMEM; 5150 return NULL; 5151 } 5152 5153 cb->fn.rx = fn; 5154 cb->param = user_param; 5155 5156 rte_spinlock_lock(ð_dev_rx_cb_lock); 5157 /* Add the callbacks at first position */ 5158 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5159 /* Stores to cb->fn, cb->param and cb->next should complete before 5160 * cb is visible to data plane threads. 5161 */ 5162 __atomic_store_n( 5163 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5164 cb, __ATOMIC_RELEASE); 5165 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5166 5167 return cb; 5168 } 5169 5170 const struct rte_eth_rxtx_callback * 5171 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5172 rte_tx_callback_fn fn, void *user_param) 5173 { 5174 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5175 rte_errno = ENOTSUP; 5176 return NULL; 5177 #endif 5178 struct rte_eth_dev *dev; 5179 5180 /* check input parameters */ 5181 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5182 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5183 rte_errno = EINVAL; 5184 return NULL; 5185 } 5186 5187 dev = &rte_eth_devices[port_id]; 5188 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5189 rte_errno = EINVAL; 5190 return NULL; 5191 } 5192 5193 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5194 5195 if (cb == NULL) { 5196 rte_errno = ENOMEM; 5197 return NULL; 5198 } 5199 5200 cb->fn.tx = fn; 5201 cb->param = user_param; 5202 5203 rte_spinlock_lock(ð_dev_tx_cb_lock); 5204 /* Add the callbacks in fifo order. */ 5205 struct rte_eth_rxtx_callback *tail = 5206 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5207 5208 if (!tail) { 5209 /* Stores to cb->fn and cb->param should complete before 5210 * cb is visible to data plane. 5211 */ 5212 __atomic_store_n( 5213 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5214 cb, __ATOMIC_RELEASE); 5215 5216 } else { 5217 while (tail->next) 5218 tail = tail->next; 5219 /* Stores to cb->fn and cb->param should complete before 5220 * cb is visible to data plane. 5221 */ 5222 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5223 } 5224 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5225 5226 return cb; 5227 } 5228 5229 int 5230 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5231 const struct rte_eth_rxtx_callback *user_cb) 5232 { 5233 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5234 return -ENOTSUP; 5235 #endif 5236 /* Check input parameters. */ 5237 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5238 if (user_cb == NULL || 5239 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5240 return -EINVAL; 5241 5242 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5243 struct rte_eth_rxtx_callback *cb; 5244 struct rte_eth_rxtx_callback **prev_cb; 5245 int ret = -EINVAL; 5246 5247 rte_spinlock_lock(ð_dev_rx_cb_lock); 5248 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5249 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5250 cb = *prev_cb; 5251 if (cb == user_cb) { 5252 /* Remove the user cb from the callback list. */ 5253 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5254 ret = 0; 5255 break; 5256 } 5257 } 5258 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5259 5260 return ret; 5261 } 5262 5263 int 5264 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5265 const struct rte_eth_rxtx_callback *user_cb) 5266 { 5267 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5268 return -ENOTSUP; 5269 #endif 5270 /* Check input parameters. */ 5271 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5272 if (user_cb == NULL || 5273 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5274 return -EINVAL; 5275 5276 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5277 int ret = -EINVAL; 5278 struct rte_eth_rxtx_callback *cb; 5279 struct rte_eth_rxtx_callback **prev_cb; 5280 5281 rte_spinlock_lock(ð_dev_tx_cb_lock); 5282 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5283 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5284 cb = *prev_cb; 5285 if (cb == user_cb) { 5286 /* Remove the user cb from the callback list. */ 5287 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5288 ret = 0; 5289 break; 5290 } 5291 } 5292 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5293 5294 return ret; 5295 } 5296 5297 int 5298 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5299 struct rte_eth_rxq_info *qinfo) 5300 { 5301 struct rte_eth_dev *dev; 5302 5303 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5304 dev = &rte_eth_devices[port_id]; 5305 5306 if (queue_id >= dev->data->nb_rx_queues) { 5307 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5308 return -EINVAL; 5309 } 5310 5311 if (qinfo == NULL) { 5312 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5313 port_id, queue_id); 5314 return -EINVAL; 5315 } 5316 5317 if (dev->data->rx_queues == NULL || 5318 dev->data->rx_queues[queue_id] == NULL) { 5319 RTE_ETHDEV_LOG(ERR, 5320 "Rx queue %"PRIu16" of device with port_id=%" 5321 PRIu16" has not been setup\n", 5322 queue_id, port_id); 5323 return -EINVAL; 5324 } 5325 5326 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5327 RTE_ETHDEV_LOG(INFO, 5328 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5329 queue_id, port_id); 5330 return -EINVAL; 5331 } 5332 5333 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 5334 5335 memset(qinfo, 0, sizeof(*qinfo)); 5336 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5337 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5338 5339 return 0; 5340 } 5341 5342 int 5343 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5344 struct rte_eth_txq_info *qinfo) 5345 { 5346 struct rte_eth_dev *dev; 5347 5348 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5349 dev = &rte_eth_devices[port_id]; 5350 5351 if (queue_id >= dev->data->nb_tx_queues) { 5352 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5353 return -EINVAL; 5354 } 5355 5356 if (qinfo == NULL) { 5357 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5358 port_id, queue_id); 5359 return -EINVAL; 5360 } 5361 5362 if (dev->data->tx_queues == NULL || 5363 dev->data->tx_queues[queue_id] == NULL) { 5364 RTE_ETHDEV_LOG(ERR, 5365 "Tx queue %"PRIu16" of device with port_id=%" 5366 PRIu16" has not been setup\n", 5367 queue_id, port_id); 5368 return -EINVAL; 5369 } 5370 5371 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5372 RTE_ETHDEV_LOG(INFO, 5373 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5374 queue_id, port_id); 5375 return -EINVAL; 5376 } 5377 5378 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 5379 5380 memset(qinfo, 0, sizeof(*qinfo)); 5381 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5382 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5383 5384 return 0; 5385 } 5386 5387 int 5388 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5389 struct rte_eth_burst_mode *mode) 5390 { 5391 struct rte_eth_dev *dev; 5392 5393 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5394 dev = &rte_eth_devices[port_id]; 5395 5396 if (queue_id >= dev->data->nb_rx_queues) { 5397 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5398 return -EINVAL; 5399 } 5400 5401 if (mode == NULL) { 5402 RTE_ETHDEV_LOG(ERR, 5403 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5404 port_id, queue_id); 5405 return -EINVAL; 5406 } 5407 5408 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); 5409 memset(mode, 0, sizeof(*mode)); 5410 return eth_err(port_id, 5411 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5412 } 5413 5414 int 5415 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5416 struct rte_eth_burst_mode *mode) 5417 { 5418 struct rte_eth_dev *dev; 5419 5420 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5421 dev = &rte_eth_devices[port_id]; 5422 5423 if (queue_id >= dev->data->nb_tx_queues) { 5424 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5425 return -EINVAL; 5426 } 5427 5428 if (mode == NULL) { 5429 RTE_ETHDEV_LOG(ERR, 5430 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5431 port_id, queue_id); 5432 return -EINVAL; 5433 } 5434 5435 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); 5436 memset(mode, 0, sizeof(*mode)); 5437 return eth_err(port_id, 5438 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5439 } 5440 5441 int 5442 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5443 struct rte_power_monitor_cond *pmc) 5444 { 5445 struct rte_eth_dev *dev; 5446 5447 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5448 dev = &rte_eth_devices[port_id]; 5449 5450 if (queue_id >= dev->data->nb_rx_queues) { 5451 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5452 return -EINVAL; 5453 } 5454 5455 if (pmc == NULL) { 5456 RTE_ETHDEV_LOG(ERR, 5457 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5458 port_id, queue_id); 5459 return -EINVAL; 5460 } 5461 5462 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP); 5463 return eth_err(port_id, 5464 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5465 } 5466 5467 int 5468 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5469 struct rte_ether_addr *mc_addr_set, 5470 uint32_t nb_mc_addr) 5471 { 5472 struct rte_eth_dev *dev; 5473 5474 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5475 dev = &rte_eth_devices[port_id]; 5476 5477 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 5478 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5479 mc_addr_set, nb_mc_addr)); 5480 } 5481 5482 int 5483 rte_eth_timesync_enable(uint16_t port_id) 5484 { 5485 struct rte_eth_dev *dev; 5486 5487 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5488 dev = &rte_eth_devices[port_id]; 5489 5490 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 5491 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5492 } 5493 5494 int 5495 rte_eth_timesync_disable(uint16_t port_id) 5496 { 5497 struct rte_eth_dev *dev; 5498 5499 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5500 dev = &rte_eth_devices[port_id]; 5501 5502 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 5503 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5504 } 5505 5506 int 5507 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5508 uint32_t flags) 5509 { 5510 struct rte_eth_dev *dev; 5511 5512 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5513 dev = &rte_eth_devices[port_id]; 5514 5515 if (timestamp == NULL) { 5516 RTE_ETHDEV_LOG(ERR, 5517 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5518 port_id); 5519 return -EINVAL; 5520 } 5521 5522 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 5523 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5524 (dev, timestamp, flags)); 5525 } 5526 5527 int 5528 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5529 struct timespec *timestamp) 5530 { 5531 struct rte_eth_dev *dev; 5532 5533 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5534 dev = &rte_eth_devices[port_id]; 5535 5536 if (timestamp == NULL) { 5537 RTE_ETHDEV_LOG(ERR, 5538 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5539 port_id); 5540 return -EINVAL; 5541 } 5542 5543 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 5544 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5545 (dev, timestamp)); 5546 } 5547 5548 int 5549 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5550 { 5551 struct rte_eth_dev *dev; 5552 5553 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5554 dev = &rte_eth_devices[port_id]; 5555 5556 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 5557 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 5558 } 5559 5560 int 5561 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 5562 { 5563 struct rte_eth_dev *dev; 5564 5565 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5566 dev = &rte_eth_devices[port_id]; 5567 5568 if (timestamp == NULL) { 5569 RTE_ETHDEV_LOG(ERR, 5570 "Cannot read ethdev port %u timesync time to NULL\n", 5571 port_id); 5572 return -EINVAL; 5573 } 5574 5575 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 5576 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 5577 timestamp)); 5578 } 5579 5580 int 5581 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 5582 { 5583 struct rte_eth_dev *dev; 5584 5585 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5586 dev = &rte_eth_devices[port_id]; 5587 5588 if (timestamp == NULL) { 5589 RTE_ETHDEV_LOG(ERR, 5590 "Cannot write ethdev port %u timesync from NULL time\n", 5591 port_id); 5592 return -EINVAL; 5593 } 5594 5595 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 5596 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 5597 timestamp)); 5598 } 5599 5600 int 5601 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 5602 { 5603 struct rte_eth_dev *dev; 5604 5605 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5606 dev = &rte_eth_devices[port_id]; 5607 5608 if (clock == NULL) { 5609 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 5610 port_id); 5611 return -EINVAL; 5612 } 5613 5614 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); 5615 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 5616 } 5617 5618 int 5619 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 5620 { 5621 struct rte_eth_dev *dev; 5622 5623 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5624 dev = &rte_eth_devices[port_id]; 5625 5626 if (info == NULL) { 5627 RTE_ETHDEV_LOG(ERR, 5628 "Cannot get ethdev port %u register info to NULL\n", 5629 port_id); 5630 return -EINVAL; 5631 } 5632 5633 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 5634 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 5635 } 5636 5637 int 5638 rte_eth_dev_get_eeprom_length(uint16_t port_id) 5639 { 5640 struct rte_eth_dev *dev; 5641 5642 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5643 dev = &rte_eth_devices[port_id]; 5644 5645 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 5646 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 5647 } 5648 5649 int 5650 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5651 { 5652 struct rte_eth_dev *dev; 5653 5654 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5655 dev = &rte_eth_devices[port_id]; 5656 5657 if (info == NULL) { 5658 RTE_ETHDEV_LOG(ERR, 5659 "Cannot get ethdev port %u EEPROM info to NULL\n", 5660 port_id); 5661 return -EINVAL; 5662 } 5663 5664 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 5665 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 5666 } 5667 5668 int 5669 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5670 { 5671 struct rte_eth_dev *dev; 5672 5673 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5674 dev = &rte_eth_devices[port_id]; 5675 5676 if (info == NULL) { 5677 RTE_ETHDEV_LOG(ERR, 5678 "Cannot set ethdev port %u EEPROM from NULL info\n", 5679 port_id); 5680 return -EINVAL; 5681 } 5682 5683 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 5684 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 5685 } 5686 5687 int 5688 rte_eth_dev_get_module_info(uint16_t port_id, 5689 struct rte_eth_dev_module_info *modinfo) 5690 { 5691 struct rte_eth_dev *dev; 5692 5693 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5694 dev = &rte_eth_devices[port_id]; 5695 5696 if (modinfo == NULL) { 5697 RTE_ETHDEV_LOG(ERR, 5698 "Cannot get ethdev port %u EEPROM module info to NULL\n", 5699 port_id); 5700 return -EINVAL; 5701 } 5702 5703 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 5704 return (*dev->dev_ops->get_module_info)(dev, modinfo); 5705 } 5706 5707 int 5708 rte_eth_dev_get_module_eeprom(uint16_t port_id, 5709 struct rte_dev_eeprom_info *info) 5710 { 5711 struct rte_eth_dev *dev; 5712 5713 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5714 dev = &rte_eth_devices[port_id]; 5715 5716 if (info == NULL) { 5717 RTE_ETHDEV_LOG(ERR, 5718 "Cannot get ethdev port %u module EEPROM info to NULL\n", 5719 port_id); 5720 return -EINVAL; 5721 } 5722 5723 if (info->data == NULL) { 5724 RTE_ETHDEV_LOG(ERR, 5725 "Cannot get ethdev port %u module EEPROM data to NULL\n", 5726 port_id); 5727 return -EINVAL; 5728 } 5729 5730 if (info->length == 0) { 5731 RTE_ETHDEV_LOG(ERR, 5732 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 5733 port_id); 5734 return -EINVAL; 5735 } 5736 5737 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 5738 return (*dev->dev_ops->get_module_eeprom)(dev, info); 5739 } 5740 5741 int 5742 rte_eth_dev_get_dcb_info(uint16_t port_id, 5743 struct rte_eth_dcb_info *dcb_info) 5744 { 5745 struct rte_eth_dev *dev; 5746 5747 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5748 dev = &rte_eth_devices[port_id]; 5749 5750 if (dcb_info == NULL) { 5751 RTE_ETHDEV_LOG(ERR, 5752 "Cannot get ethdev port %u DCB info to NULL\n", 5753 port_id); 5754 return -EINVAL; 5755 } 5756 5757 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 5758 5759 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 5760 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 5761 } 5762 5763 static void 5764 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 5765 const struct rte_eth_desc_lim *desc_lim) 5766 { 5767 if (desc_lim->nb_align != 0) 5768 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5769 5770 if (desc_lim->nb_max != 0) 5771 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5772 5773 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5774 } 5775 5776 int 5777 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5778 uint16_t *nb_rx_desc, 5779 uint16_t *nb_tx_desc) 5780 { 5781 struct rte_eth_dev_info dev_info; 5782 int ret; 5783 5784 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5785 5786 ret = rte_eth_dev_info_get(port_id, &dev_info); 5787 if (ret != 0) 5788 return ret; 5789 5790 if (nb_rx_desc != NULL) 5791 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5792 5793 if (nb_tx_desc != NULL) 5794 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5795 5796 return 0; 5797 } 5798 5799 int 5800 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5801 struct rte_eth_hairpin_cap *cap) 5802 { 5803 struct rte_eth_dev *dev; 5804 5805 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5806 dev = &rte_eth_devices[port_id]; 5807 5808 if (cap == NULL) { 5809 RTE_ETHDEV_LOG(ERR, 5810 "Cannot get ethdev port %u hairpin capability to NULL\n", 5811 port_id); 5812 return -EINVAL; 5813 } 5814 5815 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); 5816 memset(cap, 0, sizeof(*cap)); 5817 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 5818 } 5819 5820 int 5821 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5822 { 5823 if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5824 return 1; 5825 return 0; 5826 } 5827 5828 int 5829 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5830 { 5831 if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5832 return 1; 5833 return 0; 5834 } 5835 5836 int 5837 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 5838 { 5839 struct rte_eth_dev *dev; 5840 5841 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5842 dev = &rte_eth_devices[port_id]; 5843 5844 if (pool == NULL) { 5845 RTE_ETHDEV_LOG(ERR, 5846 "Cannot test ethdev port %u mempool operation from NULL pool\n", 5847 port_id); 5848 return -EINVAL; 5849 } 5850 5851 if (*dev->dev_ops->pool_ops_supported == NULL) 5852 return 1; /* all pools are supported */ 5853 5854 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 5855 } 5856 5857 /** 5858 * A set of values to describe the possible states of a switch domain. 5859 */ 5860 enum rte_eth_switch_domain_state { 5861 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0, 5862 RTE_ETH_SWITCH_DOMAIN_ALLOCATED 5863 }; 5864 5865 /** 5866 * Array of switch domains available for allocation. Array is sized to 5867 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than 5868 * ethdev ports in a single process. 5869 */ 5870 static struct rte_eth_dev_switch { 5871 enum rte_eth_switch_domain_state state; 5872 } eth_dev_switch_domains[RTE_MAX_ETHPORTS]; 5873 5874 int 5875 rte_eth_switch_domain_alloc(uint16_t *domain_id) 5876 { 5877 uint16_t i; 5878 5879 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 5880 5881 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 5882 if (eth_dev_switch_domains[i].state == 5883 RTE_ETH_SWITCH_DOMAIN_UNUSED) { 5884 eth_dev_switch_domains[i].state = 5885 RTE_ETH_SWITCH_DOMAIN_ALLOCATED; 5886 *domain_id = i; 5887 return 0; 5888 } 5889 } 5890 5891 return -ENOSPC; 5892 } 5893 5894 int 5895 rte_eth_switch_domain_free(uint16_t domain_id) 5896 { 5897 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || 5898 domain_id >= RTE_MAX_ETHPORTS) 5899 return -EINVAL; 5900 5901 if (eth_dev_switch_domains[domain_id].state != 5902 RTE_ETH_SWITCH_DOMAIN_ALLOCATED) 5903 return -EINVAL; 5904 5905 eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED; 5906 5907 return 0; 5908 } 5909 5910 static int 5911 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) 5912 { 5913 int state; 5914 struct rte_kvargs_pair *pair; 5915 char *letter; 5916 5917 arglist->str = strdup(str_in); 5918 if (arglist->str == NULL) 5919 return -ENOMEM; 5920 5921 letter = arglist->str; 5922 state = 0; 5923 arglist->count = 0; 5924 pair = &arglist->pairs[0]; 5925 while (1) { 5926 switch (state) { 5927 case 0: /* Initial */ 5928 if (*letter == '=') 5929 return -EINVAL; 5930 else if (*letter == '\0') 5931 return 0; 5932 5933 state = 1; 5934 pair->key = letter; 5935 /* fall-thru */ 5936 5937 case 1: /* Parsing key */ 5938 if (*letter == '=') { 5939 *letter = '\0'; 5940 pair->value = letter + 1; 5941 state = 2; 5942 } else if (*letter == ',' || *letter == '\0') 5943 return -EINVAL; 5944 break; 5945 5946 5947 case 2: /* Parsing value */ 5948 if (*letter == '[') 5949 state = 3; 5950 else if (*letter == ',') { 5951 *letter = '\0'; 5952 arglist->count++; 5953 pair = &arglist->pairs[arglist->count]; 5954 state = 0; 5955 } else if (*letter == '\0') { 5956 letter--; 5957 arglist->count++; 5958 pair = &arglist->pairs[arglist->count]; 5959 state = 0; 5960 } 5961 break; 5962 5963 case 3: /* Parsing list */ 5964 if (*letter == ']') 5965 state = 2; 5966 else if (*letter == '\0') 5967 return -EINVAL; 5968 break; 5969 } 5970 letter++; 5971 } 5972 } 5973 5974 int 5975 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) 5976 { 5977 struct rte_kvargs args; 5978 struct rte_kvargs_pair *pair; 5979 unsigned int i; 5980 int result = 0; 5981 5982 memset(eth_da, 0, sizeof(*eth_da)); 5983 5984 result = eth_dev_devargs_tokenise(&args, dargs); 5985 if (result < 0) 5986 goto parse_cleanup; 5987 5988 for (i = 0; i < args.count; i++) { 5989 pair = &args.pairs[i]; 5990 if (strcmp("representor", pair->key) == 0) { 5991 if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) { 5992 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n", 5993 dargs); 5994 result = -1; 5995 goto parse_cleanup; 5996 } 5997 result = rte_eth_devargs_parse_representor_ports( 5998 pair->value, eth_da); 5999 if (result < 0) 6000 goto parse_cleanup; 6001 } 6002 } 6003 6004 parse_cleanup: 6005 if (args.str) 6006 free(args.str); 6007 6008 return result; 6009 } 6010 6011 int 6012 rte_eth_representor_id_get(uint16_t port_id, 6013 enum rte_eth_representor_type type, 6014 int controller, int pf, int representor_port, 6015 uint16_t *repr_id) 6016 { 6017 int ret, n, count; 6018 uint32_t i; 6019 struct rte_eth_representor_info *info = NULL; 6020 size_t size; 6021 6022 if (type == RTE_ETH_REPRESENTOR_NONE) 6023 return 0; 6024 if (repr_id == NULL) 6025 return -EINVAL; 6026 6027 /* Get PMD representor range info. */ 6028 ret = rte_eth_representor_info_get(port_id, NULL); 6029 if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF && 6030 controller == -1 && pf == -1) { 6031 /* Direct mapping for legacy VF representor. */ 6032 *repr_id = representor_port; 6033 return 0; 6034 } else if (ret < 0) { 6035 return ret; 6036 } 6037 n = ret; 6038 size = sizeof(*info) + n * sizeof(info->ranges[0]); 6039 info = calloc(1, size); 6040 if (info == NULL) 6041 return -ENOMEM; 6042 info->nb_ranges_alloc = n; 6043 ret = rte_eth_representor_info_get(port_id, info); 6044 if (ret < 0) 6045 goto out; 6046 6047 /* Default controller and pf to caller. */ 6048 if (controller == -1) 6049 controller = info->controller; 6050 if (pf == -1) 6051 pf = info->pf; 6052 6053 /* Locate representor ID. */ 6054 ret = -ENOENT; 6055 for (i = 0; i < info->nb_ranges; ++i) { 6056 if (info->ranges[i].type != type) 6057 continue; 6058 if (info->ranges[i].controller != controller) 6059 continue; 6060 if (info->ranges[i].id_end < info->ranges[i].id_base) { 6061 RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n", 6062 port_id, info->ranges[i].id_base, 6063 info->ranges[i].id_end, i); 6064 continue; 6065 6066 } 6067 count = info->ranges[i].id_end - info->ranges[i].id_base + 1; 6068 switch (info->ranges[i].type) { 6069 case RTE_ETH_REPRESENTOR_PF: 6070 if (pf < info->ranges[i].pf || 6071 pf >= info->ranges[i].pf + count) 6072 continue; 6073 *repr_id = info->ranges[i].id_base + 6074 (pf - info->ranges[i].pf); 6075 ret = 0; 6076 goto out; 6077 case RTE_ETH_REPRESENTOR_VF: 6078 if (info->ranges[i].pf != pf) 6079 continue; 6080 if (representor_port < info->ranges[i].vf || 6081 representor_port >= info->ranges[i].vf + count) 6082 continue; 6083 *repr_id = info->ranges[i].id_base + 6084 (representor_port - info->ranges[i].vf); 6085 ret = 0; 6086 goto out; 6087 case RTE_ETH_REPRESENTOR_SF: 6088 if (info->ranges[i].pf != pf) 6089 continue; 6090 if (representor_port < info->ranges[i].sf || 6091 representor_port >= info->ranges[i].sf + count) 6092 continue; 6093 *repr_id = info->ranges[i].id_base + 6094 (representor_port - info->ranges[i].sf); 6095 ret = 0; 6096 goto out; 6097 default: 6098 break; 6099 } 6100 } 6101 out: 6102 free(info); 6103 return ret; 6104 } 6105 6106 static int 6107 eth_dev_handle_port_list(const char *cmd __rte_unused, 6108 const char *params __rte_unused, 6109 struct rte_tel_data *d) 6110 { 6111 int port_id; 6112 6113 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 6114 RTE_ETH_FOREACH_DEV(port_id) 6115 rte_tel_data_add_array_int(d, port_id); 6116 return 0; 6117 } 6118 6119 static void 6120 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 6121 const char *stat_name) 6122 { 6123 int q; 6124 struct rte_tel_data *q_data = rte_tel_data_alloc(); 6125 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); 6126 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 6127 rte_tel_data_add_array_u64(q_data, q_stats[q]); 6128 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 6129 } 6130 6131 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 6132 6133 static int 6134 eth_dev_handle_port_stats(const char *cmd __rte_unused, 6135 const char *params, 6136 struct rte_tel_data *d) 6137 { 6138 struct rte_eth_stats stats; 6139 int port_id, ret; 6140 6141 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6142 return -1; 6143 6144 port_id = atoi(params); 6145 if (!rte_eth_dev_is_valid_port(port_id)) 6146 return -1; 6147 6148 ret = rte_eth_stats_get(port_id, &stats); 6149 if (ret < 0) 6150 return -1; 6151 6152 rte_tel_data_start_dict(d); 6153 ADD_DICT_STAT(stats, ipackets); 6154 ADD_DICT_STAT(stats, opackets); 6155 ADD_DICT_STAT(stats, ibytes); 6156 ADD_DICT_STAT(stats, obytes); 6157 ADD_DICT_STAT(stats, imissed); 6158 ADD_DICT_STAT(stats, ierrors); 6159 ADD_DICT_STAT(stats, oerrors); 6160 ADD_DICT_STAT(stats, rx_nombuf); 6161 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 6162 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 6163 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 6164 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 6165 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 6166 6167 return 0; 6168 } 6169 6170 static int 6171 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 6172 const char *params, 6173 struct rte_tel_data *d) 6174 { 6175 struct rte_eth_xstat *eth_xstats; 6176 struct rte_eth_xstat_name *xstat_names; 6177 int port_id, num_xstats; 6178 int i, ret; 6179 char *end_param; 6180 6181 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6182 return -1; 6183 6184 port_id = strtoul(params, &end_param, 0); 6185 if (*end_param != '\0') 6186 RTE_ETHDEV_LOG(NOTICE, 6187 "Extra parameters passed to ethdev telemetry command, ignoring"); 6188 if (!rte_eth_dev_is_valid_port(port_id)) 6189 return -1; 6190 6191 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 6192 if (num_xstats < 0) 6193 return -1; 6194 6195 /* use one malloc for both names and stats */ 6196 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 6197 sizeof(struct rte_eth_xstat_name)) * num_xstats); 6198 if (eth_xstats == NULL) 6199 return -1; 6200 xstat_names = (void *)ð_xstats[num_xstats]; 6201 6202 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 6203 if (ret < 0 || ret > num_xstats) { 6204 free(eth_xstats); 6205 return -1; 6206 } 6207 6208 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 6209 if (ret < 0 || ret > num_xstats) { 6210 free(eth_xstats); 6211 return -1; 6212 } 6213 6214 rte_tel_data_start_dict(d); 6215 for (i = 0; i < num_xstats; i++) 6216 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 6217 eth_xstats[i].value); 6218 return 0; 6219 } 6220 6221 static int 6222 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 6223 const char *params, 6224 struct rte_tel_data *d) 6225 { 6226 static const char *status_str = "status"; 6227 int ret, port_id; 6228 struct rte_eth_link link; 6229 char *end_param; 6230 6231 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6232 return -1; 6233 6234 port_id = strtoul(params, &end_param, 0); 6235 if (*end_param != '\0') 6236 RTE_ETHDEV_LOG(NOTICE, 6237 "Extra parameters passed to ethdev telemetry command, ignoring"); 6238 if (!rte_eth_dev_is_valid_port(port_id)) 6239 return -1; 6240 6241 ret = rte_eth_link_get_nowait(port_id, &link); 6242 if (ret < 0) 6243 return -1; 6244 6245 rte_tel_data_start_dict(d); 6246 if (!link.link_status) { 6247 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 6248 return 0; 6249 } 6250 rte_tel_data_add_dict_string(d, status_str, "UP"); 6251 rte_tel_data_add_dict_u64(d, "speed", link.link_speed); 6252 rte_tel_data_add_dict_string(d, "duplex", 6253 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 6254 "full-duplex" : "half-duplex"); 6255 return 0; 6256 } 6257 6258 static int 6259 eth_dev_handle_port_info(const char *cmd __rte_unused, 6260 const char *params, 6261 struct rte_tel_data *d) 6262 { 6263 struct rte_tel_data *rxq_state, *txq_state; 6264 char mac_addr[RTE_ETHER_ADDR_LEN]; 6265 struct rte_eth_dev *eth_dev; 6266 char *end_param; 6267 int port_id, i; 6268 6269 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6270 return -1; 6271 6272 port_id = strtoul(params, &end_param, 0); 6273 if (*end_param != '\0') 6274 RTE_ETHDEV_LOG(NOTICE, 6275 "Extra parameters passed to ethdev telemetry command, ignoring"); 6276 6277 if (!rte_eth_dev_is_valid_port(port_id)) 6278 return -EINVAL; 6279 6280 eth_dev = &rte_eth_devices[port_id]; 6281 if (!eth_dev) 6282 return -EINVAL; 6283 6284 rxq_state = rte_tel_data_alloc(); 6285 if (!rxq_state) 6286 return -ENOMEM; 6287 6288 txq_state = rte_tel_data_alloc(); 6289 if (!txq_state) 6290 return -ENOMEM; 6291 6292 rte_tel_data_start_dict(d); 6293 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 6294 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 6295 rte_tel_data_add_dict_int(d, "nb_rx_queues", 6296 eth_dev->data->nb_rx_queues); 6297 rte_tel_data_add_dict_int(d, "nb_tx_queues", 6298 eth_dev->data->nb_tx_queues); 6299 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 6300 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 6301 rte_tel_data_add_dict_int(d, "rx_mbuf_size_min", 6302 eth_dev->data->min_rx_buf_size); 6303 rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", 6304 eth_dev->data->rx_mbuf_alloc_failed); 6305 snprintf(mac_addr, RTE_ETHER_ADDR_LEN, "%02x:%02x:%02x:%02x:%02x:%02x", 6306 eth_dev->data->mac_addrs->addr_bytes[0], 6307 eth_dev->data->mac_addrs->addr_bytes[1], 6308 eth_dev->data->mac_addrs->addr_bytes[2], 6309 eth_dev->data->mac_addrs->addr_bytes[3], 6310 eth_dev->data->mac_addrs->addr_bytes[4], 6311 eth_dev->data->mac_addrs->addr_bytes[5]); 6312 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 6313 rte_tel_data_add_dict_int(d, "promiscuous", 6314 eth_dev->data->promiscuous); 6315 rte_tel_data_add_dict_int(d, "scattered_rx", 6316 eth_dev->data->scattered_rx); 6317 rte_tel_data_add_dict_int(d, "all_multicast", 6318 eth_dev->data->all_multicast); 6319 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 6320 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 6321 rte_tel_data_add_dict_int(d, "dev_configured", 6322 eth_dev->data->dev_configured); 6323 6324 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 6325 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 6326 rte_tel_data_add_array_int(rxq_state, 6327 eth_dev->data->rx_queue_state[i]); 6328 6329 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 6330 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 6331 rte_tel_data_add_array_int(txq_state, 6332 eth_dev->data->tx_queue_state[i]); 6333 6334 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 6335 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 6336 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 6337 rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags); 6338 rte_tel_data_add_dict_int(d, "rx_offloads", 6339 eth_dev->data->dev_conf.rxmode.offloads); 6340 rte_tel_data_add_dict_int(d, "tx_offloads", 6341 eth_dev->data->dev_conf.txmode.offloads); 6342 rte_tel_data_add_dict_int(d, "ethdev_rss_hf", 6343 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 6344 6345 return 0; 6346 } 6347 6348 int 6349 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue, 6350 struct rte_hairpin_peer_info *cur_info, 6351 struct rte_hairpin_peer_info *peer_info, 6352 uint32_t direction) 6353 { 6354 struct rte_eth_dev *dev; 6355 6356 /* Current queue information is not mandatory. */ 6357 if (peer_info == NULL) 6358 return -EINVAL; 6359 6360 /* No need to check the validity again. */ 6361 dev = &rte_eth_devices[peer_port]; 6362 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update, 6363 -ENOTSUP); 6364 6365 return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue, 6366 cur_info, peer_info, direction); 6367 } 6368 6369 int 6370 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue, 6371 struct rte_hairpin_peer_info *peer_info, 6372 uint32_t direction) 6373 { 6374 struct rte_eth_dev *dev; 6375 6376 if (peer_info == NULL) 6377 return -EINVAL; 6378 6379 /* No need to check the validity again. */ 6380 dev = &rte_eth_devices[cur_port]; 6381 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind, 6382 -ENOTSUP); 6383 6384 return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue, 6385 peer_info, direction); 6386 } 6387 6388 int 6389 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue, 6390 uint32_t direction) 6391 { 6392 struct rte_eth_dev *dev; 6393 6394 /* No need to check the validity again. */ 6395 dev = &rte_eth_devices[cur_port]; 6396 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind, 6397 -ENOTSUP); 6398 6399 return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue, 6400 direction); 6401 } 6402 6403 int 6404 rte_eth_representor_info_get(uint16_t port_id, 6405 struct rte_eth_representor_info *info) 6406 { 6407 struct rte_eth_dev *dev; 6408 6409 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6410 dev = &rte_eth_devices[port_id]; 6411 6412 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP); 6413 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6414 } 6415 6416 int 6417 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6418 { 6419 struct rte_eth_dev *dev; 6420 6421 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6422 dev = &rte_eth_devices[port_id]; 6423 6424 if (dev->data->dev_configured != 0) { 6425 RTE_ETHDEV_LOG(ERR, 6426 "The port (ID=%"PRIu16") is already configured\n", 6427 port_id); 6428 return -EBUSY; 6429 } 6430 6431 if (features == NULL) { 6432 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6433 return -EINVAL; 6434 } 6435 6436 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP); 6437 return eth_err(port_id, 6438 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6439 } 6440 6441 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6442 6443 RTE_INIT(ethdev_init_telemetry) 6444 { 6445 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 6446 "Returns list of available ethdev ports. Takes no parameters"); 6447 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 6448 "Returns the common stats for a port. Parameters: int port_id"); 6449 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 6450 "Returns the extended stats for a port. Parameters: int port_id"); 6451 rte_telemetry_register_cmd("/ethdev/link_status", 6452 eth_dev_handle_port_link_status, 6453 "Returns the link status for a port. Parameters: int port_id"); 6454 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 6455 "Returns the device info for a port. Parameters: int port_id"); 6456 } 6457