1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <sys/types.h> 6 #include <sys/queue.h> 7 #include <ctype.h> 8 #include <stdio.h> 9 #include <stdlib.h> 10 #include <string.h> 11 #include <stdarg.h> 12 #include <errno.h> 13 #include <stdbool.h> 14 #include <stdint.h> 15 #include <inttypes.h> 16 #include <netinet/in.h> 17 18 #include <rte_byteorder.h> 19 #include <rte_log.h> 20 #include <rte_debug.h> 21 #include <rte_interrupts.h> 22 #include <rte_memory.h> 23 #include <rte_memcpy.h> 24 #include <rte_memzone.h> 25 #include <rte_launch.h> 26 #include <rte_eal.h> 27 #include <rte_per_lcore.h> 28 #include <rte_lcore.h> 29 #include <rte_atomic.h> 30 #include <rte_branch_prediction.h> 31 #include <rte_common.h> 32 #include <rte_mempool.h> 33 #include <rte_malloc.h> 34 #include <rte_mbuf.h> 35 #include <rte_errno.h> 36 #include <rte_spinlock.h> 37 #include <rte_string_fns.h> 38 #include <rte_kvargs.h> 39 #include <rte_class.h> 40 #include <rte_ether.h> 41 42 #include "rte_ethdev.h" 43 #include "rte_ethdev_driver.h" 44 #include "ethdev_profile.h" 45 #include "ethdev_private.h" 46 47 int rte_eth_dev_logtype; 48 49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; 50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 51 52 /* spinlock for eth device callbacks */ 53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; 54 55 /* spinlock for add/remove rx callbacks */ 56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 57 58 /* spinlock for add/remove tx callbacks */ 59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 60 61 /* spinlock for shared data allocation */ 62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 63 64 /* store statistics names and its offset in stats structure */ 65 struct rte_eth_xstats_name_off { 66 char name[RTE_ETH_XSTATS_NAME_SIZE]; 67 unsigned offset; 68 }; 69 70 /* Shared memory between primary and secondary processes. */ 71 static struct { 72 uint64_t next_owner_id; 73 rte_spinlock_t ownership_lock; 74 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; 75 } *rte_eth_dev_shared_data; 76 77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = { 78 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 79 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 80 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 81 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 82 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 83 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 84 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 85 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 86 rx_nombuf)}, 87 }; 88 89 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0])) 90 91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = { 92 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 93 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 94 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 95 }; 96 97 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \ 98 sizeof(rte_rxq_stats_strings[0])) 99 100 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = { 101 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 102 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 103 }; 104 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \ 105 sizeof(rte_txq_stats_strings[0])) 106 107 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 108 { DEV_RX_OFFLOAD_##_name, #_name } 109 110 static const struct { 111 uint64_t offload; 112 const char *name; 113 } rte_rx_offload_names[] = { 114 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 115 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 116 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 117 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 118 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 119 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 120 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 121 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 122 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 123 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 124 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 125 RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME), 126 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 127 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 128 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 129 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 130 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 131 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 132 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 133 }; 134 135 #undef RTE_RX_OFFLOAD_BIT2STR 136 137 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 138 { DEV_TX_OFFLOAD_##_name, #_name } 139 140 static const struct { 141 uint64_t offload; 142 const char *name; 143 } rte_tx_offload_names[] = { 144 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 145 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 146 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 147 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 148 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 149 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 150 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 151 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 152 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 153 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 154 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 155 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 156 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 157 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 158 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 159 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 160 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 161 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 162 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 163 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 164 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 165 }; 166 167 #undef RTE_TX_OFFLOAD_BIT2STR 168 169 /** 170 * The user application callback description. 171 * 172 * It contains callback address to be registered by user application, 173 * the pointer to the parameters for callback, and the event type. 174 */ 175 struct rte_eth_dev_callback { 176 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ 177 rte_eth_dev_cb_fn cb_fn; /**< Callback address */ 178 void *cb_arg; /**< Parameter for callback */ 179 void *ret_param; /**< Return parameter */ 180 enum rte_eth_event_type event; /**< Interrupt event type */ 181 uint32_t active; /**< Callback is executing */ 182 }; 183 184 enum { 185 STAT_QMAP_TX = 0, 186 STAT_QMAP_RX 187 }; 188 189 int 190 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 191 { 192 int ret; 193 struct rte_devargs devargs = {.args = NULL}; 194 const char *bus_param_key; 195 char *bus_str = NULL; 196 char *cls_str = NULL; 197 int str_size; 198 199 memset(iter, 0, sizeof(*iter)); 200 201 /* 202 * The devargs string may use various syntaxes: 203 * - 0000:08:00.0,representor=[1-3] 204 * - pci:0000:06:00.0,representor=[0,5] 205 * - class=eth,mac=00:11:22:33:44:55 206 * A new syntax is in development (not yet supported): 207 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 208 */ 209 210 /* 211 * Handle pure class filter (i.e. without any bus-level argument), 212 * from future new syntax. 213 * rte_devargs_parse() is not yet supporting the new syntax, 214 * that's why this simple case is temporarily parsed here. 215 */ 216 #define iter_anybus_str "class=eth," 217 if (strncmp(devargs_str, iter_anybus_str, 218 strlen(iter_anybus_str)) == 0) { 219 iter->cls_str = devargs_str + strlen(iter_anybus_str); 220 goto end; 221 } 222 223 /* Split bus, device and parameters. */ 224 ret = rte_devargs_parse(&devargs, devargs_str); 225 if (ret != 0) 226 goto error; 227 228 /* 229 * Assume parameters of old syntax can match only at ethdev level. 230 * Extra parameters will be ignored, thanks to "+" prefix. 231 */ 232 str_size = strlen(devargs.args) + 2; 233 cls_str = malloc(str_size); 234 if (cls_str == NULL) { 235 ret = -ENOMEM; 236 goto error; 237 } 238 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 239 if (ret != str_size - 1) { 240 ret = -EINVAL; 241 goto error; 242 } 243 iter->cls_str = cls_str; 244 free(devargs.args); /* allocated by rte_devargs_parse() */ 245 devargs.args = NULL; 246 247 iter->bus = devargs.bus; 248 if (iter->bus->dev_iterate == NULL) { 249 ret = -ENOTSUP; 250 goto error; 251 } 252 253 /* Convert bus args to new syntax for use with new API dev_iterate. */ 254 if (strcmp(iter->bus->name, "vdev") == 0) { 255 bus_param_key = "name"; 256 } else if (strcmp(iter->bus->name, "pci") == 0) { 257 bus_param_key = "addr"; 258 } else { 259 ret = -ENOTSUP; 260 goto error; 261 } 262 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 263 bus_str = malloc(str_size); 264 if (bus_str == NULL) { 265 ret = -ENOMEM; 266 goto error; 267 } 268 ret = snprintf(bus_str, str_size, "%s=%s", 269 bus_param_key, devargs.name); 270 if (ret != str_size - 1) { 271 ret = -EINVAL; 272 goto error; 273 } 274 iter->bus_str = bus_str; 275 276 end: 277 iter->cls = rte_class_find_by_name("eth"); 278 return 0; 279 280 error: 281 if (ret == -ENOTSUP) 282 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 283 iter->bus->name); 284 free(devargs.args); 285 free(bus_str); 286 free(cls_str); 287 return ret; 288 } 289 290 uint16_t 291 rte_eth_iterator_next(struct rte_dev_iterator *iter) 292 { 293 if (iter->cls == NULL) /* invalid ethdev iterator */ 294 return RTE_MAX_ETHPORTS; 295 296 do { /* loop to try all matching rte_device */ 297 /* If not pure ethdev filter and */ 298 if (iter->bus != NULL && 299 /* not in middle of rte_eth_dev iteration, */ 300 iter->class_device == NULL) { 301 /* get next rte_device to try. */ 302 iter->device = iter->bus->dev_iterate( 303 iter->device, iter->bus_str, iter); 304 if (iter->device == NULL) 305 break; /* no more rte_device candidate */ 306 } 307 /* A device is matching bus part, need to check ethdev part. */ 308 iter->class_device = iter->cls->dev_iterate( 309 iter->class_device, iter->cls_str, iter); 310 if (iter->class_device != NULL) 311 return eth_dev_to_id(iter->class_device); /* match */ 312 } while (iter->bus != NULL); /* need to try next rte_device */ 313 314 /* No more ethdev port to iterate. */ 315 rte_eth_iterator_cleanup(iter); 316 return RTE_MAX_ETHPORTS; 317 } 318 319 void 320 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 321 { 322 if (iter->bus_str == NULL) 323 return; /* nothing to free in pure class filter */ 324 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 325 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 326 memset(iter, 0, sizeof(*iter)); 327 } 328 329 uint16_t 330 rte_eth_find_next(uint16_t port_id) 331 { 332 while (port_id < RTE_MAX_ETHPORTS && 333 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 334 port_id++; 335 336 if (port_id >= RTE_MAX_ETHPORTS) 337 return RTE_MAX_ETHPORTS; 338 339 return port_id; 340 } 341 342 /* 343 * Macro to iterate over all valid ports for internal usage. 344 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 345 */ 346 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 347 for (port_id = rte_eth_find_next(0); \ 348 port_id < RTE_MAX_ETHPORTS; \ 349 port_id = rte_eth_find_next(port_id + 1)) 350 351 uint16_t 352 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 353 { 354 port_id = rte_eth_find_next(port_id); 355 while (port_id < RTE_MAX_ETHPORTS && 356 rte_eth_devices[port_id].device != parent) 357 port_id = rte_eth_find_next(port_id + 1); 358 359 return port_id; 360 } 361 362 uint16_t 363 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 364 { 365 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 366 return rte_eth_find_next_of(port_id, 367 rte_eth_devices[ref_port_id].device); 368 } 369 370 static void 371 rte_eth_dev_shared_data_prepare(void) 372 { 373 const unsigned flags = 0; 374 const struct rte_memzone *mz; 375 376 rte_spinlock_lock(&rte_eth_shared_data_lock); 377 378 if (rte_eth_dev_shared_data == NULL) { 379 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 380 /* Allocate port data and ownership shared memory. */ 381 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, 382 sizeof(*rte_eth_dev_shared_data), 383 rte_socket_id(), flags); 384 } else 385 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); 386 if (mz == NULL) 387 rte_panic("Cannot allocate ethdev shared data\n"); 388 389 rte_eth_dev_shared_data = mz->addr; 390 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 391 rte_eth_dev_shared_data->next_owner_id = 392 RTE_ETH_DEV_NO_OWNER + 1; 393 rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock); 394 memset(rte_eth_dev_shared_data->data, 0, 395 sizeof(rte_eth_dev_shared_data->data)); 396 } 397 } 398 399 rte_spinlock_unlock(&rte_eth_shared_data_lock); 400 } 401 402 static bool 403 is_allocated(const struct rte_eth_dev *ethdev) 404 { 405 return ethdev->data->name[0] != '\0'; 406 } 407 408 static struct rte_eth_dev * 409 _rte_eth_dev_allocated(const char *name) 410 { 411 uint16_t i; 412 413 RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX); 414 415 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 416 if (rte_eth_devices[i].data != NULL && 417 strcmp(rte_eth_devices[i].data->name, name) == 0) 418 return &rte_eth_devices[i]; 419 } 420 return NULL; 421 } 422 423 struct rte_eth_dev * 424 rte_eth_dev_allocated(const char *name) 425 { 426 struct rte_eth_dev *ethdev; 427 428 rte_eth_dev_shared_data_prepare(); 429 430 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 431 432 ethdev = _rte_eth_dev_allocated(name); 433 434 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 435 436 return ethdev; 437 } 438 439 static uint16_t 440 rte_eth_dev_find_free_port(void) 441 { 442 uint16_t i; 443 444 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 445 /* Using shared name field to find a free port. */ 446 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') { 447 RTE_ASSERT(rte_eth_devices[i].state == 448 RTE_ETH_DEV_UNUSED); 449 return i; 450 } 451 } 452 return RTE_MAX_ETHPORTS; 453 } 454 455 static struct rte_eth_dev * 456 eth_dev_get(uint16_t port_id) 457 { 458 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; 459 460 eth_dev->data = &rte_eth_dev_shared_data->data[port_id]; 461 462 return eth_dev; 463 } 464 465 struct rte_eth_dev * 466 rte_eth_dev_allocate(const char *name) 467 { 468 uint16_t port_id; 469 struct rte_eth_dev *eth_dev = NULL; 470 size_t name_len; 471 472 name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN); 473 if (name_len == 0) { 474 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n"); 475 return NULL; 476 } 477 478 if (name_len >= RTE_ETH_NAME_MAX_LEN) { 479 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n"); 480 return NULL; 481 } 482 483 rte_eth_dev_shared_data_prepare(); 484 485 /* Synchronize port creation between primary and secondary threads. */ 486 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 487 488 if (_rte_eth_dev_allocated(name) != NULL) { 489 RTE_ETHDEV_LOG(ERR, 490 "Ethernet device with name %s already allocated\n", 491 name); 492 goto unlock; 493 } 494 495 port_id = rte_eth_dev_find_free_port(); 496 if (port_id == RTE_MAX_ETHPORTS) { 497 RTE_ETHDEV_LOG(ERR, 498 "Reached maximum number of Ethernet ports\n"); 499 goto unlock; 500 } 501 502 eth_dev = eth_dev_get(port_id); 503 strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name)); 504 eth_dev->data->port_id = port_id; 505 eth_dev->data->mtu = RTE_ETHER_MTU; 506 507 unlock: 508 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 509 510 return eth_dev; 511 } 512 513 /* 514 * Attach to a port already registered by the primary process, which 515 * makes sure that the same device would have the same port id both 516 * in the primary and secondary process. 517 */ 518 struct rte_eth_dev * 519 rte_eth_dev_attach_secondary(const char *name) 520 { 521 uint16_t i; 522 struct rte_eth_dev *eth_dev = NULL; 523 524 rte_eth_dev_shared_data_prepare(); 525 526 /* Synchronize port attachment to primary port creation and release. */ 527 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 528 529 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 530 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0) 531 break; 532 } 533 if (i == RTE_MAX_ETHPORTS) { 534 RTE_ETHDEV_LOG(ERR, 535 "Device %s is not driven by the primary process\n", 536 name); 537 } else { 538 eth_dev = eth_dev_get(i); 539 RTE_ASSERT(eth_dev->data->port_id == i); 540 } 541 542 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 543 return eth_dev; 544 } 545 546 int 547 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) 548 { 549 if (eth_dev == NULL) 550 return -EINVAL; 551 552 rte_eth_dev_shared_data_prepare(); 553 554 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 555 _rte_eth_dev_callback_process(eth_dev, 556 RTE_ETH_EVENT_DESTROY, NULL); 557 558 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 559 560 eth_dev->state = RTE_ETH_DEV_UNUSED; 561 562 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 563 rte_free(eth_dev->data->rx_queues); 564 rte_free(eth_dev->data->tx_queues); 565 rte_free(eth_dev->data->mac_addrs); 566 rte_free(eth_dev->data->hash_mac_addrs); 567 rte_free(eth_dev->data->dev_private); 568 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); 569 } 570 571 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 572 573 return 0; 574 } 575 576 int 577 rte_eth_dev_is_valid_port(uint16_t port_id) 578 { 579 if (port_id >= RTE_MAX_ETHPORTS || 580 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 581 return 0; 582 else 583 return 1; 584 } 585 586 static int 587 rte_eth_is_valid_owner_id(uint64_t owner_id) 588 { 589 if (owner_id == RTE_ETH_DEV_NO_OWNER || 590 rte_eth_dev_shared_data->next_owner_id <= owner_id) 591 return 0; 592 return 1; 593 } 594 595 uint64_t 596 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 597 { 598 port_id = rte_eth_find_next(port_id); 599 while (port_id < RTE_MAX_ETHPORTS && 600 rte_eth_devices[port_id].data->owner.id != owner_id) 601 port_id = rte_eth_find_next(port_id + 1); 602 603 return port_id; 604 } 605 606 int 607 rte_eth_dev_owner_new(uint64_t *owner_id) 608 { 609 rte_eth_dev_shared_data_prepare(); 610 611 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 612 613 *owner_id = rte_eth_dev_shared_data->next_owner_id++; 614 615 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 616 return 0; 617 } 618 619 static int 620 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 621 const struct rte_eth_dev_owner *new_owner) 622 { 623 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 624 struct rte_eth_dev_owner *port_owner; 625 626 if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) { 627 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 628 port_id); 629 return -ENODEV; 630 } 631 632 if (!rte_eth_is_valid_owner_id(new_owner->id) && 633 !rte_eth_is_valid_owner_id(old_owner_id)) { 634 RTE_ETHDEV_LOG(ERR, 635 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 636 old_owner_id, new_owner->id); 637 return -EINVAL; 638 } 639 640 port_owner = &rte_eth_devices[port_id].data->owner; 641 if (port_owner->id != old_owner_id) { 642 RTE_ETHDEV_LOG(ERR, 643 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 644 port_id, port_owner->name, port_owner->id); 645 return -EPERM; 646 } 647 648 /* can not truncate (same structure) */ 649 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 650 651 port_owner->id = new_owner->id; 652 653 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 654 port_id, new_owner->name, new_owner->id); 655 656 return 0; 657 } 658 659 int 660 rte_eth_dev_owner_set(const uint16_t port_id, 661 const struct rte_eth_dev_owner *owner) 662 { 663 int ret; 664 665 rte_eth_dev_shared_data_prepare(); 666 667 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 668 669 ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 670 671 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 672 return ret; 673 } 674 675 int 676 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 677 { 678 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 679 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 680 int ret; 681 682 rte_eth_dev_shared_data_prepare(); 683 684 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 685 686 ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner); 687 688 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 689 return ret; 690 } 691 692 int 693 rte_eth_dev_owner_delete(const uint64_t owner_id) 694 { 695 uint16_t port_id; 696 int ret = 0; 697 698 rte_eth_dev_shared_data_prepare(); 699 700 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 701 702 if (rte_eth_is_valid_owner_id(owner_id)) { 703 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) 704 if (rte_eth_devices[port_id].data->owner.id == owner_id) 705 memset(&rte_eth_devices[port_id].data->owner, 0, 706 sizeof(struct rte_eth_dev_owner)); 707 RTE_ETHDEV_LOG(NOTICE, 708 "All port owners owned by %016"PRIx64" identifier have removed\n", 709 owner_id); 710 } else { 711 RTE_ETHDEV_LOG(ERR, 712 "Invalid owner id=%016"PRIx64"\n", 713 owner_id); 714 ret = -EINVAL; 715 } 716 717 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 718 719 return ret; 720 } 721 722 int 723 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 724 { 725 int ret = 0; 726 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 727 728 rte_eth_dev_shared_data_prepare(); 729 730 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock); 731 732 if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) { 733 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 734 port_id); 735 ret = -ENODEV; 736 } else { 737 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 738 } 739 740 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); 741 return ret; 742 } 743 744 int 745 rte_eth_dev_socket_id(uint16_t port_id) 746 { 747 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 748 return rte_eth_devices[port_id].data->numa_node; 749 } 750 751 void * 752 rte_eth_dev_get_sec_ctx(uint16_t port_id) 753 { 754 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 755 return rte_eth_devices[port_id].security_ctx; 756 } 757 758 uint16_t 759 rte_eth_dev_count_avail(void) 760 { 761 uint16_t p; 762 uint16_t count; 763 764 count = 0; 765 766 RTE_ETH_FOREACH_DEV(p) 767 count++; 768 769 return count; 770 } 771 772 uint16_t 773 rte_eth_dev_count_total(void) 774 { 775 uint16_t port, count = 0; 776 777 RTE_ETH_FOREACH_VALID_DEV(port) 778 count++; 779 780 return count; 781 } 782 783 int 784 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 785 { 786 char *tmp; 787 788 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 789 790 if (name == NULL) { 791 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n"); 792 return -EINVAL; 793 } 794 795 /* shouldn't check 'rte_eth_devices[i].data', 796 * because it might be overwritten by VDEV PMD */ 797 tmp = rte_eth_dev_shared_data->data[port_id].name; 798 strcpy(name, tmp); 799 return 0; 800 } 801 802 int 803 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 804 { 805 uint16_t pid; 806 807 if (name == NULL) { 808 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n"); 809 return -EINVAL; 810 } 811 812 RTE_ETH_FOREACH_VALID_DEV(pid) 813 if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) { 814 *port_id = pid; 815 return 0; 816 } 817 818 return -ENODEV; 819 } 820 821 static int 822 eth_err(uint16_t port_id, int ret) 823 { 824 if (ret == 0) 825 return 0; 826 if (rte_eth_dev_is_removed(port_id)) 827 return -EIO; 828 return ret; 829 } 830 831 static int 832 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 833 { 834 uint16_t old_nb_queues = dev->data->nb_rx_queues; 835 void **rxq; 836 unsigned i; 837 838 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ 839 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", 840 sizeof(dev->data->rx_queues[0]) * nb_queues, 841 RTE_CACHE_LINE_SIZE); 842 if (dev->data->rx_queues == NULL) { 843 dev->data->nb_rx_queues = 0; 844 return -(ENOMEM); 845 } 846 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ 847 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP); 848 849 rxq = dev->data->rx_queues; 850 851 for (i = nb_queues; i < old_nb_queues; i++) 852 (*dev->dev_ops->rx_queue_release)(rxq[i]); 853 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues, 854 RTE_CACHE_LINE_SIZE); 855 if (rxq == NULL) 856 return -(ENOMEM); 857 if (nb_queues > old_nb_queues) { 858 uint16_t new_qs = nb_queues - old_nb_queues; 859 860 memset(rxq + old_nb_queues, 0, 861 sizeof(rxq[0]) * new_qs); 862 } 863 864 dev->data->rx_queues = rxq; 865 866 } else if (dev->data->rx_queues != NULL && nb_queues == 0) { 867 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP); 868 869 rxq = dev->data->rx_queues; 870 871 for (i = nb_queues; i < old_nb_queues; i++) 872 (*dev->dev_ops->rx_queue_release)(rxq[i]); 873 874 rte_free(dev->data->rx_queues); 875 dev->data->rx_queues = NULL; 876 } 877 dev->data->nb_rx_queues = nb_queues; 878 return 0; 879 } 880 881 int 882 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 883 { 884 struct rte_eth_dev *dev; 885 886 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 887 888 dev = &rte_eth_devices[port_id]; 889 if (!dev->data->dev_started) { 890 RTE_ETHDEV_LOG(ERR, 891 "Port %u must be started before start any queue\n", 892 port_id); 893 return -EINVAL; 894 } 895 896 if (rx_queue_id >= dev->data->nb_rx_queues) { 897 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 898 return -EINVAL; 899 } 900 901 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 902 903 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 904 RTE_ETHDEV_LOG(INFO, 905 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 906 rx_queue_id, port_id); 907 return -EINVAL; 908 } 909 910 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 911 RTE_ETHDEV_LOG(INFO, 912 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 913 rx_queue_id, port_id); 914 return 0; 915 } 916 917 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, 918 rx_queue_id)); 919 920 } 921 922 int 923 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 924 { 925 struct rte_eth_dev *dev; 926 927 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 928 929 dev = &rte_eth_devices[port_id]; 930 if (rx_queue_id >= dev->data->nb_rx_queues) { 931 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 932 return -EINVAL; 933 } 934 935 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 936 937 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 938 RTE_ETHDEV_LOG(INFO, 939 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 940 rx_queue_id, port_id); 941 return -EINVAL; 942 } 943 944 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 945 RTE_ETHDEV_LOG(INFO, 946 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 947 rx_queue_id, port_id); 948 return 0; 949 } 950 951 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 952 953 } 954 955 int 956 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 957 { 958 struct rte_eth_dev *dev; 959 960 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 961 962 dev = &rte_eth_devices[port_id]; 963 if (!dev->data->dev_started) { 964 RTE_ETHDEV_LOG(ERR, 965 "Port %u must be started before start any queue\n", 966 port_id); 967 return -EINVAL; 968 } 969 970 if (tx_queue_id >= dev->data->nb_tx_queues) { 971 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 972 return -EINVAL; 973 } 974 975 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 976 977 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 978 RTE_ETHDEV_LOG(INFO, 979 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 980 tx_queue_id, port_id); 981 return -EINVAL; 982 } 983 984 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 985 RTE_ETHDEV_LOG(INFO, 986 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 987 tx_queue_id, port_id); 988 return 0; 989 } 990 991 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 992 } 993 994 int 995 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 996 { 997 struct rte_eth_dev *dev; 998 999 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1000 1001 dev = &rte_eth_devices[port_id]; 1002 if (tx_queue_id >= dev->data->nb_tx_queues) { 1003 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 1004 return -EINVAL; 1005 } 1006 1007 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 1008 1009 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1010 RTE_ETHDEV_LOG(INFO, 1011 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1012 tx_queue_id, port_id); 1013 return -EINVAL; 1014 } 1015 1016 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1017 RTE_ETHDEV_LOG(INFO, 1018 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1019 tx_queue_id, port_id); 1020 return 0; 1021 } 1022 1023 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 1024 1025 } 1026 1027 static int 1028 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 1029 { 1030 uint16_t old_nb_queues = dev->data->nb_tx_queues; 1031 void **txq; 1032 unsigned i; 1033 1034 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ 1035 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", 1036 sizeof(dev->data->tx_queues[0]) * nb_queues, 1037 RTE_CACHE_LINE_SIZE); 1038 if (dev->data->tx_queues == NULL) { 1039 dev->data->nb_tx_queues = 0; 1040 return -(ENOMEM); 1041 } 1042 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ 1043 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP); 1044 1045 txq = dev->data->tx_queues; 1046 1047 for (i = nb_queues; i < old_nb_queues; i++) 1048 (*dev->dev_ops->tx_queue_release)(txq[i]); 1049 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues, 1050 RTE_CACHE_LINE_SIZE); 1051 if (txq == NULL) 1052 return -ENOMEM; 1053 if (nb_queues > old_nb_queues) { 1054 uint16_t new_qs = nb_queues - old_nb_queues; 1055 1056 memset(txq + old_nb_queues, 0, 1057 sizeof(txq[0]) * new_qs); 1058 } 1059 1060 dev->data->tx_queues = txq; 1061 1062 } else if (dev->data->tx_queues != NULL && nb_queues == 0) { 1063 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP); 1064 1065 txq = dev->data->tx_queues; 1066 1067 for (i = nb_queues; i < old_nb_queues; i++) 1068 (*dev->dev_ops->tx_queue_release)(txq[i]); 1069 1070 rte_free(dev->data->tx_queues); 1071 dev->data->tx_queues = NULL; 1072 } 1073 dev->data->nb_tx_queues = nb_queues; 1074 return 0; 1075 } 1076 1077 uint32_t 1078 rte_eth_speed_bitflag(uint32_t speed, int duplex) 1079 { 1080 switch (speed) { 1081 case ETH_SPEED_NUM_10M: 1082 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD; 1083 case ETH_SPEED_NUM_100M: 1084 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD; 1085 case ETH_SPEED_NUM_1G: 1086 return ETH_LINK_SPEED_1G; 1087 case ETH_SPEED_NUM_2_5G: 1088 return ETH_LINK_SPEED_2_5G; 1089 case ETH_SPEED_NUM_5G: 1090 return ETH_LINK_SPEED_5G; 1091 case ETH_SPEED_NUM_10G: 1092 return ETH_LINK_SPEED_10G; 1093 case ETH_SPEED_NUM_20G: 1094 return ETH_LINK_SPEED_20G; 1095 case ETH_SPEED_NUM_25G: 1096 return ETH_LINK_SPEED_25G; 1097 case ETH_SPEED_NUM_40G: 1098 return ETH_LINK_SPEED_40G; 1099 case ETH_SPEED_NUM_50G: 1100 return ETH_LINK_SPEED_50G; 1101 case ETH_SPEED_NUM_56G: 1102 return ETH_LINK_SPEED_56G; 1103 case ETH_SPEED_NUM_100G: 1104 return ETH_LINK_SPEED_100G; 1105 default: 1106 return 0; 1107 } 1108 } 1109 1110 const char * 1111 rte_eth_dev_rx_offload_name(uint64_t offload) 1112 { 1113 const char *name = "UNKNOWN"; 1114 unsigned int i; 1115 1116 for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) { 1117 if (offload == rte_rx_offload_names[i].offload) { 1118 name = rte_rx_offload_names[i].name; 1119 break; 1120 } 1121 } 1122 1123 return name; 1124 } 1125 1126 const char * 1127 rte_eth_dev_tx_offload_name(uint64_t offload) 1128 { 1129 const char *name = "UNKNOWN"; 1130 unsigned int i; 1131 1132 for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) { 1133 if (offload == rte_tx_offload_names[i].offload) { 1134 name = rte_tx_offload_names[i].name; 1135 break; 1136 } 1137 } 1138 1139 return name; 1140 } 1141 1142 static inline int 1143 check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1144 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1145 { 1146 int ret = 0; 1147 1148 if (dev_info_size == 0) { 1149 if (config_size != max_rx_pkt_len) { 1150 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1151 " %u != %u is not allowed\n", 1152 port_id, config_size, max_rx_pkt_len); 1153 ret = -EINVAL; 1154 } 1155 } else if (config_size > dev_info_size) { 1156 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1157 "> max allowed value %u\n", port_id, config_size, 1158 dev_info_size); 1159 ret = -EINVAL; 1160 } else if (config_size < RTE_ETHER_MIN_LEN) { 1161 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1162 "< min allowed value %u\n", port_id, config_size, 1163 (unsigned int)RTE_ETHER_MIN_LEN); 1164 ret = -EINVAL; 1165 } 1166 return ret; 1167 } 1168 1169 /* 1170 * Validate offloads that are requested through rte_eth_dev_configure against 1171 * the offloads successfully set by the ethernet device. 1172 * 1173 * @param port_id 1174 * The port identifier of the Ethernet device. 1175 * @param req_offloads 1176 * The offloads that have been requested through `rte_eth_dev_configure`. 1177 * @param set_offloads 1178 * The offloads successfully set by the ethernet device. 1179 * @param offload_type 1180 * The offload type i.e. Rx/Tx string. 1181 * @param offload_name 1182 * The function that prints the offload name. 1183 * @return 1184 * - (0) if validation successful. 1185 * - (-EINVAL) if requested offload has been silently disabled. 1186 * 1187 */ 1188 static int 1189 validate_offloads(uint16_t port_id, uint64_t req_offloads, 1190 uint64_t set_offloads, const char *offload_type, 1191 const char *(*offload_name)(uint64_t)) 1192 { 1193 uint64_t offloads_diff = req_offloads ^ set_offloads; 1194 uint64_t offload; 1195 int ret = 0; 1196 1197 while (offloads_diff != 0) { 1198 /* Check if any offload is requested but not enabled. */ 1199 offload = 1ULL << __builtin_ctzll(offloads_diff); 1200 if (offload & req_offloads) { 1201 RTE_ETHDEV_LOG(ERR, 1202 "Port %u failed to enable %s offload %s\n", 1203 port_id, offload_type, offload_name(offload)); 1204 ret = -EINVAL; 1205 } 1206 1207 /* Check if offload couldn't be disabled. */ 1208 if (offload & set_offloads) { 1209 RTE_ETHDEV_LOG(DEBUG, 1210 "Port %u %s offload %s is not requested but enabled\n", 1211 port_id, offload_type, offload_name(offload)); 1212 } 1213 1214 offloads_diff &= ~offload; 1215 } 1216 1217 return ret; 1218 } 1219 1220 int 1221 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1222 const struct rte_eth_conf *dev_conf) 1223 { 1224 struct rte_eth_dev *dev; 1225 struct rte_eth_dev_info dev_info; 1226 struct rte_eth_conf orig_conf; 1227 int diag; 1228 int ret; 1229 1230 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1231 1232 dev = &rte_eth_devices[port_id]; 1233 1234 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1235 1236 if (dev->data->dev_started) { 1237 RTE_ETHDEV_LOG(ERR, 1238 "Port %u must be stopped to allow configuration\n", 1239 port_id); 1240 return -EBUSY; 1241 } 1242 1243 /* Store original config, as rollback required on failure */ 1244 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1245 1246 /* 1247 * Copy the dev_conf parameter into the dev structure. 1248 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1249 */ 1250 if (dev_conf != &dev->data->dev_conf) 1251 memcpy(&dev->data->dev_conf, dev_conf, 1252 sizeof(dev->data->dev_conf)); 1253 1254 ret = rte_eth_dev_info_get(port_id, &dev_info); 1255 if (ret != 0) 1256 goto rollback; 1257 1258 /* If number of queues specified by application for both Rx and Tx is 1259 * zero, use driver preferred values. This cannot be done individually 1260 * as it is valid for either Tx or Rx (but not both) to be zero. 1261 * If driver does not provide any preferred valued, fall back on 1262 * EAL defaults. 1263 */ 1264 if (nb_rx_q == 0 && nb_tx_q == 0) { 1265 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1266 if (nb_rx_q == 0) 1267 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1268 nb_tx_q = dev_info.default_txportconf.nb_queues; 1269 if (nb_tx_q == 0) 1270 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1271 } 1272 1273 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1274 RTE_ETHDEV_LOG(ERR, 1275 "Number of RX queues requested (%u) is greater than max supported(%d)\n", 1276 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1277 ret = -EINVAL; 1278 goto rollback; 1279 } 1280 1281 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1282 RTE_ETHDEV_LOG(ERR, 1283 "Number of TX queues requested (%u) is greater than max supported(%d)\n", 1284 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1285 ret = -EINVAL; 1286 goto rollback; 1287 } 1288 1289 /* 1290 * Check that the numbers of RX and TX queues are not greater 1291 * than the maximum number of RX and TX queues supported by the 1292 * configured device. 1293 */ 1294 if (nb_rx_q > dev_info.max_rx_queues) { 1295 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1296 port_id, nb_rx_q, dev_info.max_rx_queues); 1297 ret = -EINVAL; 1298 goto rollback; 1299 } 1300 1301 if (nb_tx_q > dev_info.max_tx_queues) { 1302 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1303 port_id, nb_tx_q, dev_info.max_tx_queues); 1304 ret = -EINVAL; 1305 goto rollback; 1306 } 1307 1308 /* Check that the device supports requested interrupts */ 1309 if ((dev_conf->intr_conf.lsc == 1) && 1310 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1311 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1312 dev->device->driver->name); 1313 ret = -EINVAL; 1314 goto rollback; 1315 } 1316 if ((dev_conf->intr_conf.rmv == 1) && 1317 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1318 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1319 dev->device->driver->name); 1320 ret = -EINVAL; 1321 goto rollback; 1322 } 1323 1324 /* 1325 * If jumbo frames are enabled, check that the maximum RX packet 1326 * length is supported by the configured device. 1327 */ 1328 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1329 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) { 1330 RTE_ETHDEV_LOG(ERR, 1331 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n", 1332 port_id, dev_conf->rxmode.max_rx_pkt_len, 1333 dev_info.max_rx_pktlen); 1334 ret = -EINVAL; 1335 goto rollback; 1336 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) { 1337 RTE_ETHDEV_LOG(ERR, 1338 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n", 1339 port_id, dev_conf->rxmode.max_rx_pkt_len, 1340 (unsigned int)RTE_ETHER_MIN_LEN); 1341 ret = -EINVAL; 1342 goto rollback; 1343 } 1344 } else { 1345 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN || 1346 dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN) 1347 /* Use default value */ 1348 dev->data->dev_conf.rxmode.max_rx_pkt_len = 1349 RTE_ETHER_MAX_LEN; 1350 } 1351 1352 /* 1353 * If LRO is enabled, check that the maximum aggregated packet 1354 * size is supported by the configured device. 1355 */ 1356 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 1357 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1358 dev->data->dev_conf.rxmode.max_lro_pkt_size = 1359 dev->data->dev_conf.rxmode.max_rx_pkt_len; 1360 ret = check_lro_pkt_size(port_id, 1361 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1362 dev->data->dev_conf.rxmode.max_rx_pkt_len, 1363 dev_info.max_lro_pkt_size); 1364 if (ret != 0) 1365 goto rollback; 1366 } 1367 1368 /* Any requested offloading must be within its device capabilities */ 1369 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1370 dev_conf->rxmode.offloads) { 1371 RTE_ETHDEV_LOG(ERR, 1372 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1373 "capabilities 0x%"PRIx64" in %s()\n", 1374 port_id, dev_conf->rxmode.offloads, 1375 dev_info.rx_offload_capa, 1376 __func__); 1377 ret = -EINVAL; 1378 goto rollback; 1379 } 1380 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1381 dev_conf->txmode.offloads) { 1382 RTE_ETHDEV_LOG(ERR, 1383 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1384 "capabilities 0x%"PRIx64" in %s()\n", 1385 port_id, dev_conf->txmode.offloads, 1386 dev_info.tx_offload_capa, 1387 __func__); 1388 ret = -EINVAL; 1389 goto rollback; 1390 } 1391 1392 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1393 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1394 1395 /* Check that device supports requested rss hash functions. */ 1396 if ((dev_info.flow_type_rss_offloads | 1397 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1398 dev_info.flow_type_rss_offloads) { 1399 RTE_ETHDEV_LOG(ERR, 1400 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1401 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1402 dev_info.flow_type_rss_offloads); 1403 ret = -EINVAL; 1404 goto rollback; 1405 } 1406 1407 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1408 if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) && 1409 (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) { 1410 RTE_ETHDEV_LOG(ERR, 1411 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1412 port_id, 1413 rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH)); 1414 ret = -EINVAL; 1415 goto rollback; 1416 } 1417 1418 /* 1419 * Setup new number of RX/TX queues and reconfigure device. 1420 */ 1421 diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q); 1422 if (diag != 0) { 1423 RTE_ETHDEV_LOG(ERR, 1424 "Port%u rte_eth_dev_rx_queue_config = %d\n", 1425 port_id, diag); 1426 ret = diag; 1427 goto rollback; 1428 } 1429 1430 diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q); 1431 if (diag != 0) { 1432 RTE_ETHDEV_LOG(ERR, 1433 "Port%u rte_eth_dev_tx_queue_config = %d\n", 1434 port_id, diag); 1435 rte_eth_dev_rx_queue_config(dev, 0); 1436 ret = diag; 1437 goto rollback; 1438 } 1439 1440 diag = (*dev->dev_ops->dev_configure)(dev); 1441 if (diag != 0) { 1442 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1443 port_id, diag); 1444 ret = eth_err(port_id, diag); 1445 goto reset_queues; 1446 } 1447 1448 /* Initialize Rx profiling if enabled at compilation time. */ 1449 diag = __rte_eth_dev_profile_init(port_id, dev); 1450 if (diag != 0) { 1451 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1452 port_id, diag); 1453 ret = eth_err(port_id, diag); 1454 goto reset_queues; 1455 } 1456 1457 /* Validate Rx offloads. */ 1458 diag = validate_offloads(port_id, 1459 dev_conf->rxmode.offloads, 1460 dev->data->dev_conf.rxmode.offloads, "Rx", 1461 rte_eth_dev_rx_offload_name); 1462 if (diag != 0) { 1463 ret = diag; 1464 goto reset_queues; 1465 } 1466 1467 /* Validate Tx offloads. */ 1468 diag = validate_offloads(port_id, 1469 dev_conf->txmode.offloads, 1470 dev->data->dev_conf.txmode.offloads, "Tx", 1471 rte_eth_dev_tx_offload_name); 1472 if (diag != 0) { 1473 ret = diag; 1474 goto reset_queues; 1475 } 1476 1477 return 0; 1478 reset_queues: 1479 rte_eth_dev_rx_queue_config(dev, 0); 1480 rte_eth_dev_tx_queue_config(dev, 0); 1481 rollback: 1482 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1483 1484 return ret; 1485 } 1486 1487 void 1488 _rte_eth_dev_reset(struct rte_eth_dev *dev) 1489 { 1490 if (dev->data->dev_started) { 1491 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n", 1492 dev->data->port_id); 1493 return; 1494 } 1495 1496 rte_eth_dev_rx_queue_config(dev, 0); 1497 rte_eth_dev_tx_queue_config(dev, 0); 1498 1499 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); 1500 } 1501 1502 static void 1503 rte_eth_dev_mac_restore(struct rte_eth_dev *dev, 1504 struct rte_eth_dev_info *dev_info) 1505 { 1506 struct rte_ether_addr *addr; 1507 uint16_t i; 1508 uint32_t pool = 0; 1509 uint64_t pool_mask; 1510 1511 /* replay MAC address configuration including default MAC */ 1512 addr = &dev->data->mac_addrs[0]; 1513 if (*dev->dev_ops->mac_addr_set != NULL) 1514 (*dev->dev_ops->mac_addr_set)(dev, addr); 1515 else if (*dev->dev_ops->mac_addr_add != NULL) 1516 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1517 1518 if (*dev->dev_ops->mac_addr_add != NULL) { 1519 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1520 addr = &dev->data->mac_addrs[i]; 1521 1522 /* skip zero address */ 1523 if (rte_is_zero_ether_addr(addr)) 1524 continue; 1525 1526 pool = 0; 1527 pool_mask = dev->data->mac_pool_sel[i]; 1528 1529 do { 1530 if (pool_mask & 1ULL) 1531 (*dev->dev_ops->mac_addr_add)(dev, 1532 addr, i, pool); 1533 pool_mask >>= 1; 1534 pool++; 1535 } while (pool_mask); 1536 } 1537 } 1538 } 1539 1540 static int 1541 rte_eth_dev_config_restore(struct rte_eth_dev *dev, 1542 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1543 { 1544 int ret; 1545 1546 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1547 rte_eth_dev_mac_restore(dev, dev_info); 1548 1549 /* replay promiscuous configuration */ 1550 /* 1551 * use callbacks directly since we don't need port_id check and 1552 * would like to bypass the same value set 1553 */ 1554 if (rte_eth_promiscuous_get(port_id) == 1 && 1555 *dev->dev_ops->promiscuous_enable != NULL) { 1556 ret = eth_err(port_id, 1557 (*dev->dev_ops->promiscuous_enable)(dev)); 1558 if (ret != 0 && ret != -ENOTSUP) { 1559 RTE_ETHDEV_LOG(ERR, 1560 "Failed to enable promiscuous mode for device (port %u): %s\n", 1561 port_id, rte_strerror(-ret)); 1562 return ret; 1563 } 1564 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1565 *dev->dev_ops->promiscuous_disable != NULL) { 1566 ret = eth_err(port_id, 1567 (*dev->dev_ops->promiscuous_disable)(dev)); 1568 if (ret != 0 && ret != -ENOTSUP) { 1569 RTE_ETHDEV_LOG(ERR, 1570 "Failed to disable promiscuous mode for device (port %u): %s\n", 1571 port_id, rte_strerror(-ret)); 1572 return ret; 1573 } 1574 } 1575 1576 /* replay all multicast configuration */ 1577 /* 1578 * use callbacks directly since we don't need port_id check and 1579 * would like to bypass the same value set 1580 */ 1581 if (rte_eth_allmulticast_get(port_id) == 1 && 1582 *dev->dev_ops->allmulticast_enable != NULL) { 1583 ret = eth_err(port_id, 1584 (*dev->dev_ops->allmulticast_enable)(dev)); 1585 if (ret != 0 && ret != -ENOTSUP) { 1586 RTE_ETHDEV_LOG(ERR, 1587 "Failed to enable allmulticast mode for device (port %u): %s\n", 1588 port_id, rte_strerror(-ret)); 1589 return ret; 1590 } 1591 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1592 *dev->dev_ops->allmulticast_disable != NULL) { 1593 ret = eth_err(port_id, 1594 (*dev->dev_ops->allmulticast_disable)(dev)); 1595 if (ret != 0 && ret != -ENOTSUP) { 1596 RTE_ETHDEV_LOG(ERR, 1597 "Failed to disable allmulticast mode for device (port %u): %s\n", 1598 port_id, rte_strerror(-ret)); 1599 return ret; 1600 } 1601 } 1602 1603 return 0; 1604 } 1605 1606 int 1607 rte_eth_dev_start(uint16_t port_id) 1608 { 1609 struct rte_eth_dev *dev; 1610 struct rte_eth_dev_info dev_info; 1611 int diag; 1612 int ret; 1613 1614 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1615 1616 dev = &rte_eth_devices[port_id]; 1617 1618 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1619 1620 if (dev->data->dev_started != 0) { 1621 RTE_ETHDEV_LOG(INFO, 1622 "Device with port_id=%"PRIu16" already started\n", 1623 port_id); 1624 return 0; 1625 } 1626 1627 ret = rte_eth_dev_info_get(port_id, &dev_info); 1628 if (ret != 0) 1629 return ret; 1630 1631 /* Lets restore MAC now if device does not support live change */ 1632 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1633 rte_eth_dev_mac_restore(dev, &dev_info); 1634 1635 diag = (*dev->dev_ops->dev_start)(dev); 1636 if (diag == 0) 1637 dev->data->dev_started = 1; 1638 else 1639 return eth_err(port_id, diag); 1640 1641 ret = rte_eth_dev_config_restore(dev, &dev_info, port_id); 1642 if (ret != 0) { 1643 RTE_ETHDEV_LOG(ERR, 1644 "Error during restoring configuration for device (port %u): %s\n", 1645 port_id, rte_strerror(-ret)); 1646 rte_eth_dev_stop(port_id); 1647 return ret; 1648 } 1649 1650 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1651 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1652 (*dev->dev_ops->link_update)(dev, 0); 1653 } 1654 return 0; 1655 } 1656 1657 void 1658 rte_eth_dev_stop(uint16_t port_id) 1659 { 1660 struct rte_eth_dev *dev; 1661 1662 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1663 dev = &rte_eth_devices[port_id]; 1664 1665 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop); 1666 1667 if (dev->data->dev_started == 0) { 1668 RTE_ETHDEV_LOG(INFO, 1669 "Device with port_id=%"PRIu16" already stopped\n", 1670 port_id); 1671 return; 1672 } 1673 1674 dev->data->dev_started = 0; 1675 (*dev->dev_ops->dev_stop)(dev); 1676 } 1677 1678 int 1679 rte_eth_dev_set_link_up(uint16_t port_id) 1680 { 1681 struct rte_eth_dev *dev; 1682 1683 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1684 1685 dev = &rte_eth_devices[port_id]; 1686 1687 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1688 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1689 } 1690 1691 int 1692 rte_eth_dev_set_link_down(uint16_t port_id) 1693 { 1694 struct rte_eth_dev *dev; 1695 1696 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1697 1698 dev = &rte_eth_devices[port_id]; 1699 1700 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1701 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1702 } 1703 1704 void 1705 rte_eth_dev_close(uint16_t port_id) 1706 { 1707 struct rte_eth_dev *dev; 1708 1709 RTE_ETH_VALID_PORTID_OR_RET(port_id); 1710 dev = &rte_eth_devices[port_id]; 1711 1712 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close); 1713 dev->data->dev_started = 0; 1714 (*dev->dev_ops->dev_close)(dev); 1715 1716 /* check behaviour flag - temporary for PMD migration */ 1717 if ((dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE) != 0) { 1718 /* new behaviour: send event + reset state + free all data */ 1719 rte_eth_dev_release_port(dev); 1720 return; 1721 } 1722 RTE_ETHDEV_LOG(DEBUG, "Port closing is using an old behaviour.\n" 1723 "The driver %s should migrate to the new behaviour.\n", 1724 dev->device->driver->name); 1725 /* old behaviour: only free queue arrays */ 1726 dev->data->nb_rx_queues = 0; 1727 rte_free(dev->data->rx_queues); 1728 dev->data->rx_queues = NULL; 1729 dev->data->nb_tx_queues = 0; 1730 rte_free(dev->data->tx_queues); 1731 dev->data->tx_queues = NULL; 1732 } 1733 1734 int 1735 rte_eth_dev_reset(uint16_t port_id) 1736 { 1737 struct rte_eth_dev *dev; 1738 int ret; 1739 1740 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1741 dev = &rte_eth_devices[port_id]; 1742 1743 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1744 1745 rte_eth_dev_stop(port_id); 1746 ret = dev->dev_ops->dev_reset(dev); 1747 1748 return eth_err(port_id, ret); 1749 } 1750 1751 int 1752 rte_eth_dev_is_removed(uint16_t port_id) 1753 { 1754 struct rte_eth_dev *dev; 1755 int ret; 1756 1757 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1758 1759 dev = &rte_eth_devices[port_id]; 1760 1761 if (dev->state == RTE_ETH_DEV_REMOVED) 1762 return 1; 1763 1764 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1765 1766 ret = dev->dev_ops->is_removed(dev); 1767 if (ret != 0) 1768 /* Device is physically removed. */ 1769 dev->state = RTE_ETH_DEV_REMOVED; 1770 1771 return ret; 1772 } 1773 1774 int 1775 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1776 uint16_t nb_rx_desc, unsigned int socket_id, 1777 const struct rte_eth_rxconf *rx_conf, 1778 struct rte_mempool *mp) 1779 { 1780 int ret; 1781 uint32_t mbp_buf_size; 1782 struct rte_eth_dev *dev; 1783 struct rte_eth_dev_info dev_info; 1784 struct rte_eth_rxconf local_conf; 1785 void **rxq; 1786 1787 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1788 1789 dev = &rte_eth_devices[port_id]; 1790 if (rx_queue_id >= dev->data->nb_rx_queues) { 1791 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 1792 return -EINVAL; 1793 } 1794 1795 if (mp == NULL) { 1796 RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n"); 1797 return -EINVAL; 1798 } 1799 1800 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 1801 1802 /* 1803 * Check the size of the mbuf data buffer. 1804 * This value must be provided in the private data of the memory pool. 1805 * First check that the memory pool has a valid private data. 1806 */ 1807 ret = rte_eth_dev_info_get(port_id, &dev_info); 1808 if (ret != 0) 1809 return ret; 1810 1811 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) { 1812 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n", 1813 mp->name, (int)mp->private_data_size, 1814 (int)sizeof(struct rte_pktmbuf_pool_private)); 1815 return -ENOSPC; 1816 } 1817 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 1818 1819 if (mbp_buf_size < dev_info.min_rx_bufsize + RTE_PKTMBUF_HEADROOM) { 1820 RTE_ETHDEV_LOG(ERR, 1821 "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n", 1822 mp->name, (int)mbp_buf_size, 1823 (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize), 1824 (int)RTE_PKTMBUF_HEADROOM, 1825 (int)dev_info.min_rx_bufsize); 1826 return -EINVAL; 1827 } 1828 1829 /* Use default specified by driver, if nb_rx_desc is zero */ 1830 if (nb_rx_desc == 0) { 1831 nb_rx_desc = dev_info.default_rxportconf.ring_size; 1832 /* If driver default is also zero, fall back on EAL default */ 1833 if (nb_rx_desc == 0) 1834 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 1835 } 1836 1837 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 1838 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 1839 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 1840 1841 RTE_ETHDEV_LOG(ERR, 1842 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 1843 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 1844 dev_info.rx_desc_lim.nb_min, 1845 dev_info.rx_desc_lim.nb_align); 1846 return -EINVAL; 1847 } 1848 1849 if (dev->data->dev_started && 1850 !(dev_info.dev_capa & 1851 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 1852 return -EBUSY; 1853 1854 if (dev->data->dev_started && 1855 (dev->data->rx_queue_state[rx_queue_id] != 1856 RTE_ETH_QUEUE_STATE_STOPPED)) 1857 return -EBUSY; 1858 1859 rxq = dev->data->rx_queues; 1860 if (rxq[rx_queue_id]) { 1861 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, 1862 -ENOTSUP); 1863 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]); 1864 rxq[rx_queue_id] = NULL; 1865 } 1866 1867 if (rx_conf == NULL) 1868 rx_conf = &dev_info.default_rxconf; 1869 1870 local_conf = *rx_conf; 1871 1872 /* 1873 * If an offloading has already been enabled in 1874 * rte_eth_dev_configure(), it has been enabled on all queues, 1875 * so there is no need to enable it in this queue again. 1876 * The local_conf.offloads input to underlying PMD only carries 1877 * those offloadings which are only enabled on this queue and 1878 * not enabled on all queues. 1879 */ 1880 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 1881 1882 /* 1883 * New added offloadings for this queue are those not enabled in 1884 * rte_eth_dev_configure() and they must be per-queue type. 1885 * A pure per-port offloading can't be enabled on a queue while 1886 * disabled on another queue. A pure per-port offloading can't 1887 * be enabled for any queue as new added one if it hasn't been 1888 * enabled in rte_eth_dev_configure(). 1889 */ 1890 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 1891 local_conf.offloads) { 1892 RTE_ETHDEV_LOG(ERR, 1893 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 1894 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 1895 port_id, rx_queue_id, local_conf.offloads, 1896 dev_info.rx_queue_offload_capa, 1897 __func__); 1898 return -EINVAL; 1899 } 1900 1901 /* 1902 * If LRO is enabled, check that the maximum aggregated packet 1903 * size is supported by the configured device. 1904 */ 1905 if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 1906 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 1907 dev->data->dev_conf.rxmode.max_lro_pkt_size = 1908 dev->data->dev_conf.rxmode.max_rx_pkt_len; 1909 int ret = check_lro_pkt_size(port_id, 1910 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1911 dev->data->dev_conf.rxmode.max_rx_pkt_len, 1912 dev_info.max_lro_pkt_size); 1913 if (ret != 0) 1914 return ret; 1915 } 1916 1917 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 1918 socket_id, &local_conf, mp); 1919 if (!ret) { 1920 if (!dev->data->min_rx_buf_size || 1921 dev->data->min_rx_buf_size > mbp_buf_size) 1922 dev->data->min_rx_buf_size = mbp_buf_size; 1923 } 1924 1925 return eth_err(port_id, ret); 1926 } 1927 1928 int 1929 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1930 uint16_t nb_rx_desc, 1931 const struct rte_eth_hairpin_conf *conf) 1932 { 1933 int ret; 1934 struct rte_eth_dev *dev; 1935 struct rte_eth_hairpin_cap cap; 1936 void **rxq; 1937 int i; 1938 int count; 1939 1940 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 1941 1942 dev = &rte_eth_devices[port_id]; 1943 if (rx_queue_id >= dev->data->nb_rx_queues) { 1944 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 1945 return -EINVAL; 1946 } 1947 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 1948 if (ret != 0) 1949 return ret; 1950 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, 1951 -ENOTSUP); 1952 /* if nb_rx_desc is zero use max number of desc from the driver. */ 1953 if (nb_rx_desc == 0) 1954 nb_rx_desc = cap.max_nb_desc; 1955 if (nb_rx_desc > cap.max_nb_desc) { 1956 RTE_ETHDEV_LOG(ERR, 1957 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 1958 nb_rx_desc, cap.max_nb_desc); 1959 return -EINVAL; 1960 } 1961 if (conf->peer_count > cap.max_rx_2_tx) { 1962 RTE_ETHDEV_LOG(ERR, 1963 "Invalid value for number of peers for Rx queue(=%hu), should be: <= %hu", 1964 conf->peer_count, cap.max_rx_2_tx); 1965 return -EINVAL; 1966 } 1967 if (conf->peer_count == 0) { 1968 RTE_ETHDEV_LOG(ERR, 1969 "Invalid value for number of peers for Rx queue(=%hu), should be: > 0", 1970 conf->peer_count); 1971 return -EINVAL; 1972 } 1973 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 1974 cap.max_nb_queues != UINT16_MAX; i++) { 1975 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 1976 count++; 1977 } 1978 if (count > cap.max_nb_queues) { 1979 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 1980 cap.max_nb_queues); 1981 return -EINVAL; 1982 } 1983 if (dev->data->dev_started) 1984 return -EBUSY; 1985 rxq = dev->data->rx_queues; 1986 if (rxq[rx_queue_id] != NULL) { 1987 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, 1988 -ENOTSUP); 1989 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]); 1990 rxq[rx_queue_id] = NULL; 1991 } 1992 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 1993 nb_rx_desc, conf); 1994 if (ret == 0) 1995 dev->data->rx_queue_state[rx_queue_id] = 1996 RTE_ETH_QUEUE_STATE_HAIRPIN; 1997 return eth_err(port_id, ret); 1998 } 1999 2000 int 2001 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2002 uint16_t nb_tx_desc, unsigned int socket_id, 2003 const struct rte_eth_txconf *tx_conf) 2004 { 2005 struct rte_eth_dev *dev; 2006 struct rte_eth_dev_info dev_info; 2007 struct rte_eth_txconf local_conf; 2008 void **txq; 2009 int ret; 2010 2011 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 2012 2013 dev = &rte_eth_devices[port_id]; 2014 if (tx_queue_id >= dev->data->nb_tx_queues) { 2015 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 2016 return -EINVAL; 2017 } 2018 2019 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 2020 2021 ret = rte_eth_dev_info_get(port_id, &dev_info); 2022 if (ret != 0) 2023 return ret; 2024 2025 /* Use default specified by driver, if nb_tx_desc is zero */ 2026 if (nb_tx_desc == 0) { 2027 nb_tx_desc = dev_info.default_txportconf.ring_size; 2028 /* If driver default is zero, fall back on EAL default */ 2029 if (nb_tx_desc == 0) 2030 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2031 } 2032 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2033 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2034 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2035 RTE_ETHDEV_LOG(ERR, 2036 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2037 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2038 dev_info.tx_desc_lim.nb_min, 2039 dev_info.tx_desc_lim.nb_align); 2040 return -EINVAL; 2041 } 2042 2043 if (dev->data->dev_started && 2044 !(dev_info.dev_capa & 2045 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2046 return -EBUSY; 2047 2048 if (dev->data->dev_started && 2049 (dev->data->tx_queue_state[tx_queue_id] != 2050 RTE_ETH_QUEUE_STATE_STOPPED)) 2051 return -EBUSY; 2052 2053 txq = dev->data->tx_queues; 2054 if (txq[tx_queue_id]) { 2055 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, 2056 -ENOTSUP); 2057 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]); 2058 txq[tx_queue_id] = NULL; 2059 } 2060 2061 if (tx_conf == NULL) 2062 tx_conf = &dev_info.default_txconf; 2063 2064 local_conf = *tx_conf; 2065 2066 /* 2067 * If an offloading has already been enabled in 2068 * rte_eth_dev_configure(), it has been enabled on all queues, 2069 * so there is no need to enable it in this queue again. 2070 * The local_conf.offloads input to underlying PMD only carries 2071 * those offloadings which are only enabled on this queue and 2072 * not enabled on all queues. 2073 */ 2074 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2075 2076 /* 2077 * New added offloadings for this queue are those not enabled in 2078 * rte_eth_dev_configure() and they must be per-queue type. 2079 * A pure per-port offloading can't be enabled on a queue while 2080 * disabled on another queue. A pure per-port offloading can't 2081 * be enabled for any queue as new added one if it hasn't been 2082 * enabled in rte_eth_dev_configure(). 2083 */ 2084 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2085 local_conf.offloads) { 2086 RTE_ETHDEV_LOG(ERR, 2087 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2088 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2089 port_id, tx_queue_id, local_conf.offloads, 2090 dev_info.tx_queue_offload_capa, 2091 __func__); 2092 return -EINVAL; 2093 } 2094 2095 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2096 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2097 } 2098 2099 int 2100 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2101 uint16_t nb_tx_desc, 2102 const struct rte_eth_hairpin_conf *conf) 2103 { 2104 struct rte_eth_dev *dev; 2105 struct rte_eth_hairpin_cap cap; 2106 void **txq; 2107 int i; 2108 int count; 2109 int ret; 2110 2111 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 2112 dev = &rte_eth_devices[port_id]; 2113 if (tx_queue_id >= dev->data->nb_tx_queues) { 2114 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 2115 return -EINVAL; 2116 } 2117 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2118 if (ret != 0) 2119 return ret; 2120 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, 2121 -ENOTSUP); 2122 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2123 if (nb_tx_desc == 0) 2124 nb_tx_desc = cap.max_nb_desc; 2125 if (nb_tx_desc > cap.max_nb_desc) { 2126 RTE_ETHDEV_LOG(ERR, 2127 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2128 nb_tx_desc, cap.max_nb_desc); 2129 return -EINVAL; 2130 } 2131 if (conf->peer_count > cap.max_tx_2_rx) { 2132 RTE_ETHDEV_LOG(ERR, 2133 "Invalid value for number of peers for Tx queue(=%hu), should be: <= %hu", 2134 conf->peer_count, cap.max_tx_2_rx); 2135 return -EINVAL; 2136 } 2137 if (conf->peer_count == 0) { 2138 RTE_ETHDEV_LOG(ERR, 2139 "Invalid value for number of peers for Tx queue(=%hu), should be: > 0", 2140 conf->peer_count); 2141 return -EINVAL; 2142 } 2143 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2144 cap.max_nb_queues != UINT16_MAX; i++) { 2145 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2146 count++; 2147 } 2148 if (count > cap.max_nb_queues) { 2149 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2150 cap.max_nb_queues); 2151 return -EINVAL; 2152 } 2153 if (dev->data->dev_started) 2154 return -EBUSY; 2155 txq = dev->data->tx_queues; 2156 if (txq[tx_queue_id] != NULL) { 2157 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, 2158 -ENOTSUP); 2159 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]); 2160 txq[tx_queue_id] = NULL; 2161 } 2162 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2163 (dev, tx_queue_id, nb_tx_desc, conf); 2164 if (ret == 0) 2165 dev->data->tx_queue_state[tx_queue_id] = 2166 RTE_ETH_QUEUE_STATE_HAIRPIN; 2167 return eth_err(port_id, ret); 2168 } 2169 2170 void 2171 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2172 void *userdata __rte_unused) 2173 { 2174 unsigned i; 2175 2176 for (i = 0; i < unsent; i++) 2177 rte_pktmbuf_free(pkts[i]); 2178 } 2179 2180 void 2181 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2182 void *userdata) 2183 { 2184 uint64_t *count = userdata; 2185 unsigned i; 2186 2187 for (i = 0; i < unsent; i++) 2188 rte_pktmbuf_free(pkts[i]); 2189 2190 *count += unsent; 2191 } 2192 2193 int 2194 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2195 buffer_tx_error_fn cbfn, void *userdata) 2196 { 2197 buffer->error_callback = cbfn; 2198 buffer->error_userdata = userdata; 2199 return 0; 2200 } 2201 2202 int 2203 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2204 { 2205 int ret = 0; 2206 2207 if (buffer == NULL) 2208 return -EINVAL; 2209 2210 buffer->size = size; 2211 if (buffer->error_callback == NULL) { 2212 ret = rte_eth_tx_buffer_set_err_callback( 2213 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2214 } 2215 2216 return ret; 2217 } 2218 2219 int 2220 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2221 { 2222 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 2223 int ret; 2224 2225 /* Validate Input Data. Bail if not valid or not supported. */ 2226 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2227 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 2228 2229 /* Call driver to free pending mbufs. */ 2230 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2231 free_cnt); 2232 return eth_err(port_id, ret); 2233 } 2234 2235 int 2236 rte_eth_promiscuous_enable(uint16_t port_id) 2237 { 2238 struct rte_eth_dev *dev; 2239 int diag = 0; 2240 2241 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2242 dev = &rte_eth_devices[port_id]; 2243 2244 if (dev->data->promiscuous == 1) 2245 return 0; 2246 2247 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); 2248 2249 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2250 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2251 2252 return eth_err(port_id, diag); 2253 } 2254 2255 int 2256 rte_eth_promiscuous_disable(uint16_t port_id) 2257 { 2258 struct rte_eth_dev *dev; 2259 int diag = 0; 2260 2261 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2262 dev = &rte_eth_devices[port_id]; 2263 2264 if (dev->data->promiscuous == 0) 2265 return 0; 2266 2267 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); 2268 2269 dev->data->promiscuous = 0; 2270 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2271 if (diag != 0) 2272 dev->data->promiscuous = 1; 2273 2274 return eth_err(port_id, diag); 2275 } 2276 2277 int 2278 rte_eth_promiscuous_get(uint16_t port_id) 2279 { 2280 struct rte_eth_dev *dev; 2281 2282 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 2283 2284 dev = &rte_eth_devices[port_id]; 2285 return dev->data->promiscuous; 2286 } 2287 2288 int 2289 rte_eth_allmulticast_enable(uint16_t port_id) 2290 { 2291 struct rte_eth_dev *dev; 2292 int diag; 2293 2294 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2295 dev = &rte_eth_devices[port_id]; 2296 2297 if (dev->data->all_multicast == 1) 2298 return 0; 2299 2300 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); 2301 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2302 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2303 2304 return eth_err(port_id, diag); 2305 } 2306 2307 int 2308 rte_eth_allmulticast_disable(uint16_t port_id) 2309 { 2310 struct rte_eth_dev *dev; 2311 int diag; 2312 2313 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2314 dev = &rte_eth_devices[port_id]; 2315 2316 if (dev->data->all_multicast == 0) 2317 return 0; 2318 2319 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); 2320 dev->data->all_multicast = 0; 2321 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2322 if (diag != 0) 2323 dev->data->all_multicast = 1; 2324 2325 return eth_err(port_id, diag); 2326 } 2327 2328 int 2329 rte_eth_allmulticast_get(uint16_t port_id) 2330 { 2331 struct rte_eth_dev *dev; 2332 2333 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 2334 2335 dev = &rte_eth_devices[port_id]; 2336 return dev->data->all_multicast; 2337 } 2338 2339 int 2340 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2341 { 2342 struct rte_eth_dev *dev; 2343 2344 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2345 dev = &rte_eth_devices[port_id]; 2346 2347 if (dev->data->dev_conf.intr_conf.lsc && 2348 dev->data->dev_started) 2349 rte_eth_linkstatus_get(dev, eth_link); 2350 else { 2351 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2352 (*dev->dev_ops->link_update)(dev, 1); 2353 *eth_link = dev->data->dev_link; 2354 } 2355 2356 return 0; 2357 } 2358 2359 int 2360 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2361 { 2362 struct rte_eth_dev *dev; 2363 2364 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2365 dev = &rte_eth_devices[port_id]; 2366 2367 if (dev->data->dev_conf.intr_conf.lsc && 2368 dev->data->dev_started) 2369 rte_eth_linkstatus_get(dev, eth_link); 2370 else { 2371 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2372 (*dev->dev_ops->link_update)(dev, 0); 2373 *eth_link = dev->data->dev_link; 2374 } 2375 2376 return 0; 2377 } 2378 2379 int 2380 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2381 { 2382 struct rte_eth_dev *dev; 2383 2384 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 2385 2386 dev = &rte_eth_devices[port_id]; 2387 memset(stats, 0, sizeof(*stats)); 2388 2389 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 2390 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2391 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2392 } 2393 2394 int 2395 rte_eth_stats_reset(uint16_t port_id) 2396 { 2397 struct rte_eth_dev *dev; 2398 int ret; 2399 2400 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2401 dev = &rte_eth_devices[port_id]; 2402 2403 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 2404 ret = (*dev->dev_ops->stats_reset)(dev); 2405 if (ret != 0) 2406 return eth_err(port_id, ret); 2407 2408 dev->data->rx_mbuf_alloc_failed = 0; 2409 2410 return 0; 2411 } 2412 2413 static inline int 2414 get_xstats_basic_count(struct rte_eth_dev *dev) 2415 { 2416 uint16_t nb_rxqs, nb_txqs; 2417 int count; 2418 2419 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2420 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2421 2422 count = RTE_NB_STATS; 2423 count += nb_rxqs * RTE_NB_RXQ_STATS; 2424 count += nb_txqs * RTE_NB_TXQ_STATS; 2425 2426 return count; 2427 } 2428 2429 static int 2430 get_xstats_count(uint16_t port_id) 2431 { 2432 struct rte_eth_dev *dev; 2433 int count; 2434 2435 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 2436 dev = &rte_eth_devices[port_id]; 2437 if (dev->dev_ops->xstats_get_names_by_id != NULL) { 2438 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL, 2439 NULL, 0); 2440 if (count < 0) 2441 return eth_err(port_id, count); 2442 } 2443 if (dev->dev_ops->xstats_get_names != NULL) { 2444 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2445 if (count < 0) 2446 return eth_err(port_id, count); 2447 } else 2448 count = 0; 2449 2450 2451 count += get_xstats_basic_count(dev); 2452 2453 return count; 2454 } 2455 2456 int 2457 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2458 uint64_t *id) 2459 { 2460 int cnt_xstats, idx_xstat; 2461 2462 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2463 2464 if (!id) { 2465 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n"); 2466 return -ENOMEM; 2467 } 2468 2469 if (!xstat_name) { 2470 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n"); 2471 return -ENOMEM; 2472 } 2473 2474 /* Get count */ 2475 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2476 if (cnt_xstats < 0) { 2477 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2478 return -ENODEV; 2479 } 2480 2481 /* Get id-name lookup table */ 2482 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2483 2484 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2485 port_id, xstats_names, cnt_xstats, NULL)) { 2486 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2487 return -1; 2488 } 2489 2490 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2491 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2492 *id = idx_xstat; 2493 return 0; 2494 }; 2495 } 2496 2497 return -EINVAL; 2498 } 2499 2500 /* retrieve basic stats names */ 2501 static int 2502 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev, 2503 struct rte_eth_xstat_name *xstats_names) 2504 { 2505 int cnt_used_entries = 0; 2506 uint32_t idx, id_queue; 2507 uint16_t num_q; 2508 2509 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2510 strlcpy(xstats_names[cnt_used_entries].name, 2511 rte_stats_strings[idx].name, 2512 sizeof(xstats_names[0].name)); 2513 cnt_used_entries++; 2514 } 2515 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2516 for (id_queue = 0; id_queue < num_q; id_queue++) { 2517 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2518 snprintf(xstats_names[cnt_used_entries].name, 2519 sizeof(xstats_names[0].name), 2520 "rx_q%u%s", 2521 id_queue, rte_rxq_stats_strings[idx].name); 2522 cnt_used_entries++; 2523 } 2524 2525 } 2526 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2527 for (id_queue = 0; id_queue < num_q; id_queue++) { 2528 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 2529 snprintf(xstats_names[cnt_used_entries].name, 2530 sizeof(xstats_names[0].name), 2531 "tx_q%u%s", 2532 id_queue, rte_txq_stats_strings[idx].name); 2533 cnt_used_entries++; 2534 } 2535 } 2536 return cnt_used_entries; 2537 } 2538 2539 /* retrieve ethdev extended statistics names */ 2540 int 2541 rte_eth_xstats_get_names_by_id(uint16_t port_id, 2542 struct rte_eth_xstat_name *xstats_names, unsigned int size, 2543 uint64_t *ids) 2544 { 2545 struct rte_eth_xstat_name *xstats_names_copy; 2546 unsigned int no_basic_stat_requested = 1; 2547 unsigned int no_ext_stat_requested = 1; 2548 unsigned int expected_entries; 2549 unsigned int basic_count; 2550 struct rte_eth_dev *dev; 2551 unsigned int i; 2552 int ret; 2553 2554 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2555 dev = &rte_eth_devices[port_id]; 2556 2557 basic_count = get_xstats_basic_count(dev); 2558 ret = get_xstats_count(port_id); 2559 if (ret < 0) 2560 return ret; 2561 expected_entries = (unsigned int)ret; 2562 2563 /* Return max number of stats if no ids given */ 2564 if (!ids) { 2565 if (!xstats_names) 2566 return expected_entries; 2567 else if (xstats_names && size < expected_entries) 2568 return expected_entries; 2569 } 2570 2571 if (ids && !xstats_names) 2572 return -EINVAL; 2573 2574 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 2575 uint64_t ids_copy[size]; 2576 2577 for (i = 0; i < size; i++) { 2578 if (ids[i] < basic_count) { 2579 no_basic_stat_requested = 0; 2580 break; 2581 } 2582 2583 /* 2584 * Convert ids to xstats ids that PMD knows. 2585 * ids known by user are basic + extended stats. 2586 */ 2587 ids_copy[i] = ids[i] - basic_count; 2588 } 2589 2590 if (no_basic_stat_requested) 2591 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 2592 xstats_names, ids_copy, size); 2593 } 2594 2595 /* Retrieve all stats */ 2596 if (!ids) { 2597 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 2598 expected_entries); 2599 if (num_stats < 0 || num_stats > (int)expected_entries) 2600 return num_stats; 2601 else 2602 return expected_entries; 2603 } 2604 2605 xstats_names_copy = calloc(expected_entries, 2606 sizeof(struct rte_eth_xstat_name)); 2607 2608 if (!xstats_names_copy) { 2609 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 2610 return -ENOMEM; 2611 } 2612 2613 if (ids) { 2614 for (i = 0; i < size; i++) { 2615 if (ids[i] >= basic_count) { 2616 no_ext_stat_requested = 0; 2617 break; 2618 } 2619 } 2620 } 2621 2622 /* Fill xstats_names_copy structure */ 2623 if (ids && no_ext_stat_requested) { 2624 rte_eth_basic_stats_get_names(dev, xstats_names_copy); 2625 } else { 2626 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 2627 expected_entries); 2628 if (ret < 0) { 2629 free(xstats_names_copy); 2630 return ret; 2631 } 2632 } 2633 2634 /* Filter stats */ 2635 for (i = 0; i < size; i++) { 2636 if (ids[i] >= expected_entries) { 2637 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 2638 free(xstats_names_copy); 2639 return -1; 2640 } 2641 xstats_names[i] = xstats_names_copy[ids[i]]; 2642 } 2643 2644 free(xstats_names_copy); 2645 return size; 2646 } 2647 2648 int 2649 rte_eth_xstats_get_names(uint16_t port_id, 2650 struct rte_eth_xstat_name *xstats_names, 2651 unsigned int size) 2652 { 2653 struct rte_eth_dev *dev; 2654 int cnt_used_entries; 2655 int cnt_expected_entries; 2656 int cnt_driver_entries; 2657 2658 cnt_expected_entries = get_xstats_count(port_id); 2659 if (xstats_names == NULL || cnt_expected_entries < 0 || 2660 (int)size < cnt_expected_entries) 2661 return cnt_expected_entries; 2662 2663 /* port_id checked in get_xstats_count() */ 2664 dev = &rte_eth_devices[port_id]; 2665 2666 cnt_used_entries = rte_eth_basic_stats_get_names( 2667 dev, xstats_names); 2668 2669 if (dev->dev_ops->xstats_get_names != NULL) { 2670 /* If there are any driver-specific xstats, append them 2671 * to end of list. 2672 */ 2673 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 2674 dev, 2675 xstats_names + cnt_used_entries, 2676 size - cnt_used_entries); 2677 if (cnt_driver_entries < 0) 2678 return eth_err(port_id, cnt_driver_entries); 2679 cnt_used_entries += cnt_driver_entries; 2680 } 2681 2682 return cnt_used_entries; 2683 } 2684 2685 2686 static int 2687 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 2688 { 2689 struct rte_eth_dev *dev; 2690 struct rte_eth_stats eth_stats; 2691 unsigned int count = 0, i, q; 2692 uint64_t val, *stats_ptr; 2693 uint16_t nb_rxqs, nb_txqs; 2694 int ret; 2695 2696 ret = rte_eth_stats_get(port_id, ð_stats); 2697 if (ret < 0) 2698 return ret; 2699 2700 dev = &rte_eth_devices[port_id]; 2701 2702 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2703 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2704 2705 /* global stats */ 2706 for (i = 0; i < RTE_NB_STATS; i++) { 2707 stats_ptr = RTE_PTR_ADD(ð_stats, 2708 rte_stats_strings[i].offset); 2709 val = *stats_ptr; 2710 xstats[count++].value = val; 2711 } 2712 2713 /* per-rxq stats */ 2714 for (q = 0; q < nb_rxqs; q++) { 2715 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 2716 stats_ptr = RTE_PTR_ADD(ð_stats, 2717 rte_rxq_stats_strings[i].offset + 2718 q * sizeof(uint64_t)); 2719 val = *stats_ptr; 2720 xstats[count++].value = val; 2721 } 2722 } 2723 2724 /* per-txq stats */ 2725 for (q = 0; q < nb_txqs; q++) { 2726 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 2727 stats_ptr = RTE_PTR_ADD(ð_stats, 2728 rte_txq_stats_strings[i].offset + 2729 q * sizeof(uint64_t)); 2730 val = *stats_ptr; 2731 xstats[count++].value = val; 2732 } 2733 } 2734 return count; 2735 } 2736 2737 /* retrieve ethdev extended statistics */ 2738 int 2739 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 2740 uint64_t *values, unsigned int size) 2741 { 2742 unsigned int no_basic_stat_requested = 1; 2743 unsigned int no_ext_stat_requested = 1; 2744 unsigned int num_xstats_filled; 2745 unsigned int basic_count; 2746 uint16_t expected_entries; 2747 struct rte_eth_dev *dev; 2748 unsigned int i; 2749 int ret; 2750 2751 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2752 ret = get_xstats_count(port_id); 2753 if (ret < 0) 2754 return ret; 2755 expected_entries = (uint16_t)ret; 2756 struct rte_eth_xstat xstats[expected_entries]; 2757 dev = &rte_eth_devices[port_id]; 2758 basic_count = get_xstats_basic_count(dev); 2759 2760 /* Return max number of stats if no ids given */ 2761 if (!ids) { 2762 if (!values) 2763 return expected_entries; 2764 else if (values && size < expected_entries) 2765 return expected_entries; 2766 } 2767 2768 if (ids && !values) 2769 return -EINVAL; 2770 2771 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 2772 unsigned int basic_count = get_xstats_basic_count(dev); 2773 uint64_t ids_copy[size]; 2774 2775 for (i = 0; i < size; i++) { 2776 if (ids[i] < basic_count) { 2777 no_basic_stat_requested = 0; 2778 break; 2779 } 2780 2781 /* 2782 * Convert ids to xstats ids that PMD knows. 2783 * ids known by user are basic + extended stats. 2784 */ 2785 ids_copy[i] = ids[i] - basic_count; 2786 } 2787 2788 if (no_basic_stat_requested) 2789 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 2790 values, size); 2791 } 2792 2793 if (ids) { 2794 for (i = 0; i < size; i++) { 2795 if (ids[i] >= basic_count) { 2796 no_ext_stat_requested = 0; 2797 break; 2798 } 2799 } 2800 } 2801 2802 /* Fill the xstats structure */ 2803 if (ids && no_ext_stat_requested) 2804 ret = rte_eth_basic_stats_get(port_id, xstats); 2805 else 2806 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 2807 2808 if (ret < 0) 2809 return ret; 2810 num_xstats_filled = (unsigned int)ret; 2811 2812 /* Return all stats */ 2813 if (!ids) { 2814 for (i = 0; i < num_xstats_filled; i++) 2815 values[i] = xstats[i].value; 2816 return expected_entries; 2817 } 2818 2819 /* Filter stats */ 2820 for (i = 0; i < size; i++) { 2821 if (ids[i] >= expected_entries) { 2822 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 2823 return -1; 2824 } 2825 values[i] = xstats[ids[i]].value; 2826 } 2827 return size; 2828 } 2829 2830 int 2831 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 2832 unsigned int n) 2833 { 2834 struct rte_eth_dev *dev; 2835 unsigned int count = 0, i; 2836 signed int xcount = 0; 2837 uint16_t nb_rxqs, nb_txqs; 2838 int ret; 2839 2840 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 2841 2842 dev = &rte_eth_devices[port_id]; 2843 2844 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2845 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2846 2847 /* Return generic statistics */ 2848 count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) + 2849 (nb_txqs * RTE_NB_TXQ_STATS); 2850 2851 /* implemented by the driver */ 2852 if (dev->dev_ops->xstats_get != NULL) { 2853 /* Retrieve the xstats from the driver at the end of the 2854 * xstats struct. 2855 */ 2856 xcount = (*dev->dev_ops->xstats_get)(dev, 2857 xstats ? xstats + count : NULL, 2858 (n > count) ? n - count : 0); 2859 2860 if (xcount < 0) 2861 return eth_err(port_id, xcount); 2862 } 2863 2864 if (n < count + xcount || xstats == NULL) 2865 return count + xcount; 2866 2867 /* now fill the xstats structure */ 2868 ret = rte_eth_basic_stats_get(port_id, xstats); 2869 if (ret < 0) 2870 return ret; 2871 count = ret; 2872 2873 for (i = 0; i < count; i++) 2874 xstats[i].id = i; 2875 /* add an offset to driver-specific stats */ 2876 for ( ; i < count + xcount; i++) 2877 xstats[i].id += count; 2878 2879 return count + xcount; 2880 } 2881 2882 /* reset ethdev extended statistics */ 2883 int 2884 rte_eth_xstats_reset(uint16_t port_id) 2885 { 2886 struct rte_eth_dev *dev; 2887 2888 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2889 dev = &rte_eth_devices[port_id]; 2890 2891 /* implemented by the driver */ 2892 if (dev->dev_ops->xstats_reset != NULL) 2893 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 2894 2895 /* fallback to default */ 2896 return rte_eth_stats_reset(port_id); 2897 } 2898 2899 static int 2900 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx, 2901 uint8_t is_rx) 2902 { 2903 struct rte_eth_dev *dev; 2904 2905 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2906 2907 dev = &rte_eth_devices[port_id]; 2908 2909 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 2910 2911 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 2912 return -EINVAL; 2913 2914 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 2915 return -EINVAL; 2916 2917 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 2918 return -EINVAL; 2919 2920 return (*dev->dev_ops->queue_stats_mapping_set) 2921 (dev, queue_id, stat_idx, is_rx); 2922 } 2923 2924 2925 int 2926 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 2927 uint8_t stat_idx) 2928 { 2929 return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id, 2930 stat_idx, STAT_QMAP_TX)); 2931 } 2932 2933 2934 int 2935 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 2936 uint8_t stat_idx) 2937 { 2938 return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id, 2939 stat_idx, STAT_QMAP_RX)); 2940 } 2941 2942 int 2943 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 2944 { 2945 struct rte_eth_dev *dev; 2946 2947 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2948 dev = &rte_eth_devices[port_id]; 2949 2950 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 2951 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 2952 fw_version, fw_size)); 2953 } 2954 2955 int 2956 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 2957 { 2958 struct rte_eth_dev *dev; 2959 const struct rte_eth_desc_lim lim = { 2960 .nb_max = UINT16_MAX, 2961 .nb_min = 0, 2962 .nb_align = 1, 2963 .nb_seg_max = UINT16_MAX, 2964 .nb_mtu_seg_max = UINT16_MAX, 2965 }; 2966 int diag; 2967 2968 /* 2969 * Init dev_info before port_id check since caller does not have 2970 * return status and does not know if get is successful or not. 2971 */ 2972 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 2973 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 2974 2975 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2976 dev = &rte_eth_devices[port_id]; 2977 2978 dev_info->rx_desc_lim = lim; 2979 dev_info->tx_desc_lim = lim; 2980 dev_info->device = dev->device; 2981 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 2982 dev_info->max_mtu = UINT16_MAX; 2983 2984 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 2985 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 2986 if (diag != 0) { 2987 /* Cleanup already filled in device information */ 2988 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 2989 return eth_err(port_id, diag); 2990 } 2991 2992 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 2993 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 2994 RTE_MAX_QUEUES_PER_PORT); 2995 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 2996 RTE_MAX_QUEUES_PER_PORT); 2997 2998 dev_info->driver_name = dev->device->driver->name; 2999 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3000 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3001 3002 dev_info->dev_flags = &dev->data->dev_flags; 3003 3004 return 0; 3005 } 3006 3007 int 3008 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3009 uint32_t *ptypes, int num) 3010 { 3011 int i, j; 3012 struct rte_eth_dev *dev; 3013 const uint32_t *all_ptypes; 3014 3015 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3016 dev = &rte_eth_devices[port_id]; 3017 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 3018 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3019 3020 if (!all_ptypes) 3021 return 0; 3022 3023 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3024 if (all_ptypes[i] & ptype_mask) { 3025 if (j < num) 3026 ptypes[j] = all_ptypes[i]; 3027 j++; 3028 } 3029 3030 return j; 3031 } 3032 3033 int 3034 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3035 uint32_t *set_ptypes, unsigned int num) 3036 { 3037 const uint32_t valid_ptype_masks[] = { 3038 RTE_PTYPE_L2_MASK, 3039 RTE_PTYPE_L3_MASK, 3040 RTE_PTYPE_L4_MASK, 3041 RTE_PTYPE_TUNNEL_MASK, 3042 RTE_PTYPE_INNER_L2_MASK, 3043 RTE_PTYPE_INNER_L3_MASK, 3044 RTE_PTYPE_INNER_L4_MASK, 3045 }; 3046 const uint32_t *all_ptypes; 3047 struct rte_eth_dev *dev; 3048 uint32_t unused_mask; 3049 unsigned int i, j; 3050 int ret; 3051 3052 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3053 dev = &rte_eth_devices[port_id]; 3054 3055 if (num > 0 && set_ptypes == NULL) 3056 return -EINVAL; 3057 3058 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3059 *dev->dev_ops->dev_ptypes_set == NULL) { 3060 ret = 0; 3061 goto ptype_unknown; 3062 } 3063 3064 if (ptype_mask == 0) { 3065 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3066 ptype_mask); 3067 goto ptype_unknown; 3068 } 3069 3070 unused_mask = ptype_mask; 3071 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3072 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3073 if (mask && mask != valid_ptype_masks[i]) { 3074 ret = -EINVAL; 3075 goto ptype_unknown; 3076 } 3077 unused_mask &= ~valid_ptype_masks[i]; 3078 } 3079 3080 if (unused_mask) { 3081 ret = -EINVAL; 3082 goto ptype_unknown; 3083 } 3084 3085 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3086 if (all_ptypes == NULL) { 3087 ret = 0; 3088 goto ptype_unknown; 3089 } 3090 3091 /* 3092 * Accommodate as many set_ptypes as possible. If the supplied 3093 * set_ptypes array is insufficient fill it partially. 3094 */ 3095 for (i = 0, j = 0; set_ptypes != NULL && 3096 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3097 if (ptype_mask & all_ptypes[i]) { 3098 if (j < num - 1) { 3099 set_ptypes[j] = all_ptypes[i]; 3100 j++; 3101 continue; 3102 } 3103 break; 3104 } 3105 } 3106 3107 if (set_ptypes != NULL && j < num) 3108 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3109 3110 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3111 3112 ptype_unknown: 3113 if (num > 0) 3114 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3115 3116 return ret; 3117 } 3118 3119 int 3120 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3121 { 3122 struct rte_eth_dev *dev; 3123 3124 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3125 dev = &rte_eth_devices[port_id]; 3126 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3127 3128 return 0; 3129 } 3130 3131 int 3132 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3133 { 3134 struct rte_eth_dev *dev; 3135 3136 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3137 3138 dev = &rte_eth_devices[port_id]; 3139 *mtu = dev->data->mtu; 3140 return 0; 3141 } 3142 3143 int 3144 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3145 { 3146 int ret; 3147 struct rte_eth_dev_info dev_info; 3148 struct rte_eth_dev *dev; 3149 3150 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3151 dev = &rte_eth_devices[port_id]; 3152 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 3153 3154 /* 3155 * Check if the device supports dev_infos_get, if it does not 3156 * skip min_mtu/max_mtu validation here as this requires values 3157 * that are populated within the call to rte_eth_dev_info_get() 3158 * which relies on dev->dev_ops->dev_infos_get. 3159 */ 3160 if (*dev->dev_ops->dev_infos_get != NULL) { 3161 ret = rte_eth_dev_info_get(port_id, &dev_info); 3162 if (ret != 0) 3163 return ret; 3164 3165 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu) 3166 return -EINVAL; 3167 } 3168 3169 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3170 if (!ret) 3171 dev->data->mtu = mtu; 3172 3173 return eth_err(port_id, ret); 3174 } 3175 3176 int 3177 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3178 { 3179 struct rte_eth_dev *dev; 3180 int ret; 3181 3182 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3183 dev = &rte_eth_devices[port_id]; 3184 if (!(dev->data->dev_conf.rxmode.offloads & 3185 DEV_RX_OFFLOAD_VLAN_FILTER)) { 3186 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n", 3187 port_id); 3188 return -ENOSYS; 3189 } 3190 3191 if (vlan_id > 4095) { 3192 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3193 port_id, vlan_id); 3194 return -EINVAL; 3195 } 3196 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 3197 3198 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3199 if (ret == 0) { 3200 struct rte_vlan_filter_conf *vfc; 3201 int vidx; 3202 int vbit; 3203 3204 vfc = &dev->data->vlan_filter_conf; 3205 vidx = vlan_id / 64; 3206 vbit = vlan_id % 64; 3207 3208 if (on) 3209 vfc->ids[vidx] |= UINT64_C(1) << vbit; 3210 else 3211 vfc->ids[vidx] &= ~(UINT64_C(1) << vbit); 3212 } 3213 3214 return eth_err(port_id, ret); 3215 } 3216 3217 int 3218 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3219 int on) 3220 { 3221 struct rte_eth_dev *dev; 3222 3223 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3224 dev = &rte_eth_devices[port_id]; 3225 if (rx_queue_id >= dev->data->nb_rx_queues) { 3226 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3227 return -EINVAL; 3228 } 3229 3230 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 3231 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3232 3233 return 0; 3234 } 3235 3236 int 3237 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3238 enum rte_vlan_type vlan_type, 3239 uint16_t tpid) 3240 { 3241 struct rte_eth_dev *dev; 3242 3243 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3244 dev = &rte_eth_devices[port_id]; 3245 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 3246 3247 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3248 tpid)); 3249 } 3250 3251 int 3252 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3253 { 3254 struct rte_eth_dev_info dev_info; 3255 struct rte_eth_dev *dev; 3256 int ret = 0; 3257 int mask = 0; 3258 int cur, org = 0; 3259 uint64_t orig_offloads; 3260 uint64_t dev_offloads; 3261 uint64_t new_offloads; 3262 3263 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3264 dev = &rte_eth_devices[port_id]; 3265 3266 /* save original values in case of failure */ 3267 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3268 dev_offloads = orig_offloads; 3269 3270 /* check which option changed by application */ 3271 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD); 3272 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP); 3273 if (cur != org) { 3274 if (cur) 3275 dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3276 else 3277 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3278 mask |= ETH_VLAN_STRIP_MASK; 3279 } 3280 3281 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); 3282 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER); 3283 if (cur != org) { 3284 if (cur) 3285 dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3286 else 3287 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3288 mask |= ETH_VLAN_FILTER_MASK; 3289 } 3290 3291 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD); 3292 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND); 3293 if (cur != org) { 3294 if (cur) 3295 dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 3296 else 3297 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 3298 mask |= ETH_VLAN_EXTEND_MASK; 3299 } 3300 3301 cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD); 3302 org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP); 3303 if (cur != org) { 3304 if (cur) 3305 dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 3306 else 3307 dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 3308 mask |= ETH_QINQ_STRIP_MASK; 3309 } 3310 3311 /*no change*/ 3312 if (mask == 0) 3313 return ret; 3314 3315 ret = rte_eth_dev_info_get(port_id, &dev_info); 3316 if (ret != 0) 3317 return ret; 3318 3319 /* Rx VLAN offloading must be within its device capabilities */ 3320 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3321 new_offloads = dev_offloads & ~orig_offloads; 3322 RTE_ETHDEV_LOG(ERR, 3323 "Ethdev port_id=%u requested new added VLAN offloads " 3324 "0x%" PRIx64 " must be within Rx offloads capabilities " 3325 "0x%" PRIx64 " in %s()\n", 3326 port_id, new_offloads, dev_info.rx_offload_capa, 3327 __func__); 3328 return -EINVAL; 3329 } 3330 3331 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 3332 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3333 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3334 if (ret) { 3335 /* hit an error restore original values */ 3336 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3337 } 3338 3339 return eth_err(port_id, ret); 3340 } 3341 3342 int 3343 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3344 { 3345 struct rte_eth_dev *dev; 3346 uint64_t *dev_offloads; 3347 int ret = 0; 3348 3349 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3350 dev = &rte_eth_devices[port_id]; 3351 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3352 3353 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 3354 ret |= ETH_VLAN_STRIP_OFFLOAD; 3355 3356 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 3357 ret |= ETH_VLAN_FILTER_OFFLOAD; 3358 3359 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 3360 ret |= ETH_VLAN_EXTEND_OFFLOAD; 3361 3362 if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP) 3363 ret |= ETH_QINQ_STRIP_OFFLOAD; 3364 3365 return ret; 3366 } 3367 3368 int 3369 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3370 { 3371 struct rte_eth_dev *dev; 3372 3373 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3374 dev = &rte_eth_devices[port_id]; 3375 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 3376 3377 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3378 } 3379 3380 int 3381 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3382 { 3383 struct rte_eth_dev *dev; 3384 3385 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3386 dev = &rte_eth_devices[port_id]; 3387 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 3388 memset(fc_conf, 0, sizeof(*fc_conf)); 3389 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3390 } 3391 3392 int 3393 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3394 { 3395 struct rte_eth_dev *dev; 3396 3397 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3398 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 3399 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 3400 return -EINVAL; 3401 } 3402 3403 dev = &rte_eth_devices[port_id]; 3404 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 3405 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 3406 } 3407 3408 int 3409 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 3410 struct rte_eth_pfc_conf *pfc_conf) 3411 { 3412 struct rte_eth_dev *dev; 3413 3414 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3415 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) { 3416 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 3417 return -EINVAL; 3418 } 3419 3420 dev = &rte_eth_devices[port_id]; 3421 /* High water, low water validation are device specific */ 3422 if (*dev->dev_ops->priority_flow_ctrl_set) 3423 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 3424 (dev, pfc_conf)); 3425 return -ENOTSUP; 3426 } 3427 3428 static int 3429 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 3430 uint16_t reta_size) 3431 { 3432 uint16_t i, num; 3433 3434 if (!reta_conf) 3435 return -EINVAL; 3436 3437 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE; 3438 for (i = 0; i < num; i++) { 3439 if (reta_conf[i].mask) 3440 return 0; 3441 } 3442 3443 return -EINVAL; 3444 } 3445 3446 static int 3447 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 3448 uint16_t reta_size, 3449 uint16_t max_rxq) 3450 { 3451 uint16_t i, idx, shift; 3452 3453 if (!reta_conf) 3454 return -EINVAL; 3455 3456 if (max_rxq == 0) { 3457 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 3458 return -EINVAL; 3459 } 3460 3461 for (i = 0; i < reta_size; i++) { 3462 idx = i / RTE_RETA_GROUP_SIZE; 3463 shift = i % RTE_RETA_GROUP_SIZE; 3464 if ((reta_conf[idx].mask & (1ULL << shift)) && 3465 (reta_conf[idx].reta[shift] >= max_rxq)) { 3466 RTE_ETHDEV_LOG(ERR, 3467 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 3468 idx, shift, 3469 reta_conf[idx].reta[shift], max_rxq); 3470 return -EINVAL; 3471 } 3472 } 3473 3474 return 0; 3475 } 3476 3477 int 3478 rte_eth_dev_rss_reta_update(uint16_t port_id, 3479 struct rte_eth_rss_reta_entry64 *reta_conf, 3480 uint16_t reta_size) 3481 { 3482 struct rte_eth_dev *dev; 3483 int ret; 3484 3485 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3486 /* Check mask bits */ 3487 ret = rte_eth_check_reta_mask(reta_conf, reta_size); 3488 if (ret < 0) 3489 return ret; 3490 3491 dev = &rte_eth_devices[port_id]; 3492 3493 /* Check entry value */ 3494 ret = rte_eth_check_reta_entry(reta_conf, reta_size, 3495 dev->data->nb_rx_queues); 3496 if (ret < 0) 3497 return ret; 3498 3499 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 3500 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 3501 reta_size)); 3502 } 3503 3504 int 3505 rte_eth_dev_rss_reta_query(uint16_t port_id, 3506 struct rte_eth_rss_reta_entry64 *reta_conf, 3507 uint16_t reta_size) 3508 { 3509 struct rte_eth_dev *dev; 3510 int ret; 3511 3512 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3513 3514 /* Check mask bits */ 3515 ret = rte_eth_check_reta_mask(reta_conf, reta_size); 3516 if (ret < 0) 3517 return ret; 3518 3519 dev = &rte_eth_devices[port_id]; 3520 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 3521 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 3522 reta_size)); 3523 } 3524 3525 int 3526 rte_eth_dev_rss_hash_update(uint16_t port_id, 3527 struct rte_eth_rss_conf *rss_conf) 3528 { 3529 struct rte_eth_dev *dev; 3530 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 3531 int ret; 3532 3533 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3534 3535 ret = rte_eth_dev_info_get(port_id, &dev_info); 3536 if (ret != 0) 3537 return ret; 3538 3539 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 3540 3541 dev = &rte_eth_devices[port_id]; 3542 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 3543 dev_info.flow_type_rss_offloads) { 3544 RTE_ETHDEV_LOG(ERR, 3545 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 3546 port_id, rss_conf->rss_hf, 3547 dev_info.flow_type_rss_offloads); 3548 return -EINVAL; 3549 } 3550 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 3551 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 3552 rss_conf)); 3553 } 3554 3555 int 3556 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 3557 struct rte_eth_rss_conf *rss_conf) 3558 { 3559 struct rte_eth_dev *dev; 3560 3561 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3562 dev = &rte_eth_devices[port_id]; 3563 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 3564 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 3565 rss_conf)); 3566 } 3567 3568 int 3569 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 3570 struct rte_eth_udp_tunnel *udp_tunnel) 3571 { 3572 struct rte_eth_dev *dev; 3573 3574 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3575 if (udp_tunnel == NULL) { 3576 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n"); 3577 return -EINVAL; 3578 } 3579 3580 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 3581 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 3582 return -EINVAL; 3583 } 3584 3585 dev = &rte_eth_devices[port_id]; 3586 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 3587 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 3588 udp_tunnel)); 3589 } 3590 3591 int 3592 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 3593 struct rte_eth_udp_tunnel *udp_tunnel) 3594 { 3595 struct rte_eth_dev *dev; 3596 3597 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3598 dev = &rte_eth_devices[port_id]; 3599 3600 if (udp_tunnel == NULL) { 3601 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n"); 3602 return -EINVAL; 3603 } 3604 3605 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 3606 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 3607 return -EINVAL; 3608 } 3609 3610 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 3611 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 3612 udp_tunnel)); 3613 } 3614 3615 int 3616 rte_eth_led_on(uint16_t port_id) 3617 { 3618 struct rte_eth_dev *dev; 3619 3620 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3621 dev = &rte_eth_devices[port_id]; 3622 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 3623 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 3624 } 3625 3626 int 3627 rte_eth_led_off(uint16_t port_id) 3628 { 3629 struct rte_eth_dev *dev; 3630 3631 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3632 dev = &rte_eth_devices[port_id]; 3633 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 3634 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 3635 } 3636 3637 /* 3638 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 3639 * an empty spot. 3640 */ 3641 static int 3642 get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 3643 { 3644 struct rte_eth_dev_info dev_info; 3645 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 3646 unsigned i; 3647 int ret; 3648 3649 ret = rte_eth_dev_info_get(port_id, &dev_info); 3650 if (ret != 0) 3651 return -1; 3652 3653 for (i = 0; i < dev_info.max_mac_addrs; i++) 3654 if (memcmp(addr, &dev->data->mac_addrs[i], 3655 RTE_ETHER_ADDR_LEN) == 0) 3656 return i; 3657 3658 return -1; 3659 } 3660 3661 static const struct rte_ether_addr null_mac_addr; 3662 3663 int 3664 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 3665 uint32_t pool) 3666 { 3667 struct rte_eth_dev *dev; 3668 int index; 3669 uint64_t pool_mask; 3670 int ret; 3671 3672 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3673 dev = &rte_eth_devices[port_id]; 3674 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 3675 3676 if (rte_is_zero_ether_addr(addr)) { 3677 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 3678 port_id); 3679 return -EINVAL; 3680 } 3681 if (pool >= ETH_64_POOLS) { 3682 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1); 3683 return -EINVAL; 3684 } 3685 3686 index = get_mac_addr_index(port_id, addr); 3687 if (index < 0) { 3688 index = get_mac_addr_index(port_id, &null_mac_addr); 3689 if (index < 0) { 3690 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 3691 port_id); 3692 return -ENOSPC; 3693 } 3694 } else { 3695 pool_mask = dev->data->mac_pool_sel[index]; 3696 3697 /* Check if both MAC address and pool is already there, and do nothing */ 3698 if (pool_mask & (1ULL << pool)) 3699 return 0; 3700 } 3701 3702 /* Update NIC */ 3703 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 3704 3705 if (ret == 0) { 3706 /* Update address in NIC data structure */ 3707 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 3708 3709 /* Update pool bitmap in NIC data structure */ 3710 dev->data->mac_pool_sel[index] |= (1ULL << pool); 3711 } 3712 3713 return eth_err(port_id, ret); 3714 } 3715 3716 int 3717 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 3718 { 3719 struct rte_eth_dev *dev; 3720 int index; 3721 3722 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3723 dev = &rte_eth_devices[port_id]; 3724 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 3725 3726 index = get_mac_addr_index(port_id, addr); 3727 if (index == 0) { 3728 RTE_ETHDEV_LOG(ERR, 3729 "Port %u: Cannot remove default MAC address\n", 3730 port_id); 3731 return -EADDRINUSE; 3732 } else if (index < 0) 3733 return 0; /* Do nothing if address wasn't found */ 3734 3735 /* Update NIC */ 3736 (*dev->dev_ops->mac_addr_remove)(dev, index); 3737 3738 /* Update address in NIC data structure */ 3739 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 3740 3741 /* reset pool bitmap */ 3742 dev->data->mac_pool_sel[index] = 0; 3743 3744 return 0; 3745 } 3746 3747 int 3748 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 3749 { 3750 struct rte_eth_dev *dev; 3751 int ret; 3752 3753 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3754 3755 if (!rte_is_valid_assigned_ether_addr(addr)) 3756 return -EINVAL; 3757 3758 dev = &rte_eth_devices[port_id]; 3759 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 3760 3761 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 3762 if (ret < 0) 3763 return ret; 3764 3765 /* Update default address in NIC data structure */ 3766 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 3767 3768 return 0; 3769 } 3770 3771 3772 /* 3773 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 3774 * an empty spot. 3775 */ 3776 static int 3777 get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 3778 { 3779 struct rte_eth_dev_info dev_info; 3780 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 3781 unsigned i; 3782 int ret; 3783 3784 ret = rte_eth_dev_info_get(port_id, &dev_info); 3785 if (ret != 0) 3786 return -1; 3787 3788 if (!dev->data->hash_mac_addrs) 3789 return -1; 3790 3791 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 3792 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 3793 RTE_ETHER_ADDR_LEN) == 0) 3794 return i; 3795 3796 return -1; 3797 } 3798 3799 int 3800 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 3801 uint8_t on) 3802 { 3803 int index; 3804 int ret; 3805 struct rte_eth_dev *dev; 3806 3807 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3808 3809 dev = &rte_eth_devices[port_id]; 3810 if (rte_is_zero_ether_addr(addr)) { 3811 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 3812 port_id); 3813 return -EINVAL; 3814 } 3815 3816 index = get_hash_mac_addr_index(port_id, addr); 3817 /* Check if it's already there, and do nothing */ 3818 if ((index >= 0) && on) 3819 return 0; 3820 3821 if (index < 0) { 3822 if (!on) { 3823 RTE_ETHDEV_LOG(ERR, 3824 "Port %u: the MAC address was not set in UTA\n", 3825 port_id); 3826 return -EINVAL; 3827 } 3828 3829 index = get_hash_mac_addr_index(port_id, &null_mac_addr); 3830 if (index < 0) { 3831 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 3832 port_id); 3833 return -ENOSPC; 3834 } 3835 } 3836 3837 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 3838 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 3839 if (ret == 0) { 3840 /* Update address in NIC data structure */ 3841 if (on) 3842 rte_ether_addr_copy(addr, 3843 &dev->data->hash_mac_addrs[index]); 3844 else 3845 rte_ether_addr_copy(&null_mac_addr, 3846 &dev->data->hash_mac_addrs[index]); 3847 } 3848 3849 return eth_err(port_id, ret); 3850 } 3851 3852 int 3853 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 3854 { 3855 struct rte_eth_dev *dev; 3856 3857 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3858 3859 dev = &rte_eth_devices[port_id]; 3860 3861 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 3862 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 3863 on)); 3864 } 3865 3866 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 3867 uint16_t tx_rate) 3868 { 3869 struct rte_eth_dev *dev; 3870 struct rte_eth_dev_info dev_info; 3871 struct rte_eth_link link; 3872 int ret; 3873 3874 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3875 3876 ret = rte_eth_dev_info_get(port_id, &dev_info); 3877 if (ret != 0) 3878 return ret; 3879 3880 dev = &rte_eth_devices[port_id]; 3881 link = dev->data->dev_link; 3882 3883 if (queue_idx > dev_info.max_tx_queues) { 3884 RTE_ETHDEV_LOG(ERR, 3885 "Set queue rate limit:port %u: invalid queue id=%u\n", 3886 port_id, queue_idx); 3887 return -EINVAL; 3888 } 3889 3890 if (tx_rate > link.link_speed) { 3891 RTE_ETHDEV_LOG(ERR, 3892 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 3893 tx_rate, link.link_speed); 3894 return -EINVAL; 3895 } 3896 3897 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 3898 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 3899 queue_idx, tx_rate)); 3900 } 3901 3902 int 3903 rte_eth_mirror_rule_set(uint16_t port_id, 3904 struct rte_eth_mirror_conf *mirror_conf, 3905 uint8_t rule_id, uint8_t on) 3906 { 3907 struct rte_eth_dev *dev; 3908 3909 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3910 if (mirror_conf->rule_type == 0) { 3911 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n"); 3912 return -EINVAL; 3913 } 3914 3915 if (mirror_conf->dst_pool >= ETH_64_POOLS) { 3916 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n", 3917 ETH_64_POOLS - 1); 3918 return -EINVAL; 3919 } 3920 3921 if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP | 3922 ETH_MIRROR_VIRTUAL_POOL_DOWN)) && 3923 (mirror_conf->pool_mask == 0)) { 3924 RTE_ETHDEV_LOG(ERR, 3925 "Invalid mirror pool, pool mask can not be 0\n"); 3926 return -EINVAL; 3927 } 3928 3929 if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) && 3930 mirror_conf->vlan.vlan_mask == 0) { 3931 RTE_ETHDEV_LOG(ERR, 3932 "Invalid vlan mask, vlan mask can not be 0\n"); 3933 return -EINVAL; 3934 } 3935 3936 dev = &rte_eth_devices[port_id]; 3937 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP); 3938 3939 return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev, 3940 mirror_conf, rule_id, on)); 3941 } 3942 3943 int 3944 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id) 3945 { 3946 struct rte_eth_dev *dev; 3947 3948 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3949 3950 dev = &rte_eth_devices[port_id]; 3951 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP); 3952 3953 return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev, 3954 rule_id)); 3955 } 3956 3957 RTE_INIT(eth_dev_init_cb_lists) 3958 { 3959 uint16_t i; 3960 3961 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 3962 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 3963 } 3964 3965 int 3966 rte_eth_dev_callback_register(uint16_t port_id, 3967 enum rte_eth_event_type event, 3968 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 3969 { 3970 struct rte_eth_dev *dev; 3971 struct rte_eth_dev_callback *user_cb; 3972 uint16_t next_port; 3973 uint16_t last_port; 3974 3975 if (!cb_fn) 3976 return -EINVAL; 3977 3978 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 3979 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 3980 return -EINVAL; 3981 } 3982 3983 if (port_id == RTE_ETH_ALL) { 3984 next_port = 0; 3985 last_port = RTE_MAX_ETHPORTS - 1; 3986 } else { 3987 next_port = last_port = port_id; 3988 } 3989 3990 rte_spinlock_lock(&rte_eth_dev_cb_lock); 3991 3992 do { 3993 dev = &rte_eth_devices[next_port]; 3994 3995 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 3996 if (user_cb->cb_fn == cb_fn && 3997 user_cb->cb_arg == cb_arg && 3998 user_cb->event == event) { 3999 break; 4000 } 4001 } 4002 4003 /* create a new callback. */ 4004 if (user_cb == NULL) { 4005 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4006 sizeof(struct rte_eth_dev_callback), 0); 4007 if (user_cb != NULL) { 4008 user_cb->cb_fn = cb_fn; 4009 user_cb->cb_arg = cb_arg; 4010 user_cb->event = event; 4011 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4012 user_cb, next); 4013 } else { 4014 rte_spinlock_unlock(&rte_eth_dev_cb_lock); 4015 rte_eth_dev_callback_unregister(port_id, event, 4016 cb_fn, cb_arg); 4017 return -ENOMEM; 4018 } 4019 4020 } 4021 } while (++next_port <= last_port); 4022 4023 rte_spinlock_unlock(&rte_eth_dev_cb_lock); 4024 return 0; 4025 } 4026 4027 int 4028 rte_eth_dev_callback_unregister(uint16_t port_id, 4029 enum rte_eth_event_type event, 4030 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4031 { 4032 int ret; 4033 struct rte_eth_dev *dev; 4034 struct rte_eth_dev_callback *cb, *next; 4035 uint16_t next_port; 4036 uint16_t last_port; 4037 4038 if (!cb_fn) 4039 return -EINVAL; 4040 4041 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4042 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4043 return -EINVAL; 4044 } 4045 4046 if (port_id == RTE_ETH_ALL) { 4047 next_port = 0; 4048 last_port = RTE_MAX_ETHPORTS - 1; 4049 } else { 4050 next_port = last_port = port_id; 4051 } 4052 4053 rte_spinlock_lock(&rte_eth_dev_cb_lock); 4054 4055 do { 4056 dev = &rte_eth_devices[next_port]; 4057 ret = 0; 4058 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4059 cb = next) { 4060 4061 next = TAILQ_NEXT(cb, next); 4062 4063 if (cb->cb_fn != cb_fn || cb->event != event || 4064 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4065 continue; 4066 4067 /* 4068 * if this callback is not executing right now, 4069 * then remove it. 4070 */ 4071 if (cb->active == 0) { 4072 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4073 rte_free(cb); 4074 } else { 4075 ret = -EAGAIN; 4076 } 4077 } 4078 } while (++next_port <= last_port); 4079 4080 rte_spinlock_unlock(&rte_eth_dev_cb_lock); 4081 return ret; 4082 } 4083 4084 int 4085 _rte_eth_dev_callback_process(struct rte_eth_dev *dev, 4086 enum rte_eth_event_type event, void *ret_param) 4087 { 4088 struct rte_eth_dev_callback *cb_lst; 4089 struct rte_eth_dev_callback dev_cb; 4090 int rc = 0; 4091 4092 rte_spinlock_lock(&rte_eth_dev_cb_lock); 4093 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 4094 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 4095 continue; 4096 dev_cb = *cb_lst; 4097 cb_lst->active = 1; 4098 if (ret_param != NULL) 4099 dev_cb.ret_param = ret_param; 4100 4101 rte_spinlock_unlock(&rte_eth_dev_cb_lock); 4102 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, 4103 dev_cb.cb_arg, dev_cb.ret_param); 4104 rte_spinlock_lock(&rte_eth_dev_cb_lock); 4105 cb_lst->active = 0; 4106 } 4107 rte_spinlock_unlock(&rte_eth_dev_cb_lock); 4108 return rc; 4109 } 4110 4111 void 4112 rte_eth_dev_probing_finish(struct rte_eth_dev *dev) 4113 { 4114 if (dev == NULL) 4115 return; 4116 4117 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); 4118 4119 dev->state = RTE_ETH_DEV_ATTACHED; 4120 } 4121 4122 int 4123 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4124 { 4125 uint32_t vec; 4126 struct rte_eth_dev *dev; 4127 struct rte_intr_handle *intr_handle; 4128 uint16_t qid; 4129 int rc; 4130 4131 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4132 4133 dev = &rte_eth_devices[port_id]; 4134 4135 if (!dev->intr_handle) { 4136 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4137 return -ENOTSUP; 4138 } 4139 4140 intr_handle = dev->intr_handle; 4141 if (!intr_handle->intr_vec) { 4142 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4143 return -EPERM; 4144 } 4145 4146 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4147 vec = intr_handle->intr_vec[qid]; 4148 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4149 if (rc && rc != -EEXIST) { 4150 RTE_ETHDEV_LOG(ERR, 4151 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 4152 port_id, qid, op, epfd, vec); 4153 } 4154 } 4155 4156 return 0; 4157 } 4158 4159 int 4160 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 4161 { 4162 struct rte_intr_handle *intr_handle; 4163 struct rte_eth_dev *dev; 4164 unsigned int efd_idx; 4165 uint32_t vec; 4166 int fd; 4167 4168 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 4169 4170 dev = &rte_eth_devices[port_id]; 4171 4172 if (queue_id >= dev->data->nb_rx_queues) { 4173 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 4174 return -1; 4175 } 4176 4177 if (!dev->intr_handle) { 4178 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4179 return -1; 4180 } 4181 4182 intr_handle = dev->intr_handle; 4183 if (!intr_handle->intr_vec) { 4184 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4185 return -1; 4186 } 4187 4188 vec = intr_handle->intr_vec[queue_id]; 4189 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 4190 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 4191 fd = intr_handle->efds[efd_idx]; 4192 4193 return fd; 4194 } 4195 4196 const struct rte_memzone * 4197 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, 4198 uint16_t queue_id, size_t size, unsigned align, 4199 int socket_id) 4200 { 4201 char z_name[RTE_MEMZONE_NAMESIZE]; 4202 const struct rte_memzone *mz; 4203 int rc; 4204 4205 rc = snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s", 4206 dev->data->port_id, queue_id, ring_name); 4207 if (rc >= RTE_MEMZONE_NAMESIZE) { 4208 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4209 rte_errno = ENAMETOOLONG; 4210 return NULL; 4211 } 4212 4213 mz = rte_memzone_lookup(z_name); 4214 if (mz) 4215 return mz; 4216 4217 return rte_memzone_reserve_aligned(z_name, size, socket_id, 4218 RTE_MEMZONE_IOVA_CONTIG, align); 4219 } 4220 4221 int 4222 rte_eth_dev_create(struct rte_device *device, const char *name, 4223 size_t priv_data_size, 4224 ethdev_bus_specific_init ethdev_bus_specific_init, 4225 void *bus_init_params, 4226 ethdev_init_t ethdev_init, void *init_params) 4227 { 4228 struct rte_eth_dev *ethdev; 4229 int retval; 4230 4231 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL); 4232 4233 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4234 ethdev = rte_eth_dev_allocate(name); 4235 if (!ethdev) 4236 return -ENODEV; 4237 4238 if (priv_data_size) { 4239 ethdev->data->dev_private = rte_zmalloc_socket( 4240 name, priv_data_size, RTE_CACHE_LINE_SIZE, 4241 device->numa_node); 4242 4243 if (!ethdev->data->dev_private) { 4244 RTE_ETHDEV_LOG(ERR, 4245 "failed to allocate private data\n"); 4246 retval = -ENOMEM; 4247 goto probe_failed; 4248 } 4249 } 4250 } else { 4251 ethdev = rte_eth_dev_attach_secondary(name); 4252 if (!ethdev) { 4253 RTE_ETHDEV_LOG(ERR, 4254 "secondary process attach failed, ethdev doesn't exist\n"); 4255 return -ENODEV; 4256 } 4257 } 4258 4259 ethdev->device = device; 4260 4261 if (ethdev_bus_specific_init) { 4262 retval = ethdev_bus_specific_init(ethdev, bus_init_params); 4263 if (retval) { 4264 RTE_ETHDEV_LOG(ERR, 4265 "ethdev bus specific initialisation failed\n"); 4266 goto probe_failed; 4267 } 4268 } 4269 4270 retval = ethdev_init(ethdev, init_params); 4271 if (retval) { 4272 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n"); 4273 goto probe_failed; 4274 } 4275 4276 rte_eth_dev_probing_finish(ethdev); 4277 4278 return retval; 4279 4280 probe_failed: 4281 rte_eth_dev_release_port(ethdev); 4282 return retval; 4283 } 4284 4285 int 4286 rte_eth_dev_destroy(struct rte_eth_dev *ethdev, 4287 ethdev_uninit_t ethdev_uninit) 4288 { 4289 int ret; 4290 4291 ethdev = rte_eth_dev_allocated(ethdev->data->name); 4292 if (!ethdev) 4293 return -ENODEV; 4294 4295 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL); 4296 4297 ret = ethdev_uninit(ethdev); 4298 if (ret) 4299 return ret; 4300 4301 return rte_eth_dev_release_port(ethdev); 4302 } 4303 4304 int 4305 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 4306 int epfd, int op, void *data) 4307 { 4308 uint32_t vec; 4309 struct rte_eth_dev *dev; 4310 struct rte_intr_handle *intr_handle; 4311 int rc; 4312 4313 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4314 4315 dev = &rte_eth_devices[port_id]; 4316 if (queue_id >= dev->data->nb_rx_queues) { 4317 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 4318 return -EINVAL; 4319 } 4320 4321 if (!dev->intr_handle) { 4322 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4323 return -ENOTSUP; 4324 } 4325 4326 intr_handle = dev->intr_handle; 4327 if (!intr_handle->intr_vec) { 4328 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4329 return -EPERM; 4330 } 4331 4332 vec = intr_handle->intr_vec[queue_id]; 4333 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4334 if (rc && rc != -EEXIST) { 4335 RTE_ETHDEV_LOG(ERR, 4336 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 4337 port_id, queue_id, op, epfd, vec); 4338 return rc; 4339 } 4340 4341 return 0; 4342 } 4343 4344 int 4345 rte_eth_dev_rx_intr_enable(uint16_t port_id, 4346 uint16_t queue_id) 4347 { 4348 struct rte_eth_dev *dev; 4349 4350 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4351 4352 dev = &rte_eth_devices[port_id]; 4353 4354 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 4355 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, 4356 queue_id)); 4357 } 4358 4359 int 4360 rte_eth_dev_rx_intr_disable(uint16_t port_id, 4361 uint16_t queue_id) 4362 { 4363 struct rte_eth_dev *dev; 4364 4365 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4366 4367 dev = &rte_eth_devices[port_id]; 4368 4369 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 4370 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, 4371 queue_id)); 4372 } 4373 4374 4375 int 4376 rte_eth_dev_filter_supported(uint16_t port_id, 4377 enum rte_filter_type filter_type) 4378 { 4379 struct rte_eth_dev *dev; 4380 4381 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4382 4383 dev = &rte_eth_devices[port_id]; 4384 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP); 4385 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, 4386 RTE_ETH_FILTER_NOP, NULL); 4387 } 4388 4389 int 4390 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type, 4391 enum rte_filter_op filter_op, void *arg) 4392 { 4393 struct rte_eth_dev *dev; 4394 4395 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4396 4397 dev = &rte_eth_devices[port_id]; 4398 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP); 4399 return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type, 4400 filter_op, arg)); 4401 } 4402 4403 const struct rte_eth_rxtx_callback * 4404 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 4405 rte_rx_callback_fn fn, void *user_param) 4406 { 4407 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4408 rte_errno = ENOTSUP; 4409 return NULL; 4410 #endif 4411 struct rte_eth_dev *dev; 4412 4413 /* check input parameters */ 4414 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 4415 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 4416 rte_errno = EINVAL; 4417 return NULL; 4418 } 4419 dev = &rte_eth_devices[port_id]; 4420 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 4421 rte_errno = EINVAL; 4422 return NULL; 4423 } 4424 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 4425 4426 if (cb == NULL) { 4427 rte_errno = ENOMEM; 4428 return NULL; 4429 } 4430 4431 cb->fn.rx = fn; 4432 cb->param = user_param; 4433 4434 rte_spinlock_lock(&rte_eth_rx_cb_lock); 4435 /* Add the callbacks in fifo order. */ 4436 struct rte_eth_rxtx_callback *tail = 4437 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 4438 4439 if (!tail) { 4440 /* Stores to cb->fn and cb->param should complete before 4441 * cb is visible to data plane. 4442 */ 4443 __atomic_store_n( 4444 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 4445 cb, __ATOMIC_RELEASE); 4446 4447 } else { 4448 while (tail->next) 4449 tail = tail->next; 4450 /* Stores to cb->fn and cb->param should complete before 4451 * cb is visible to data plane. 4452 */ 4453 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 4454 } 4455 rte_spinlock_unlock(&rte_eth_rx_cb_lock); 4456 4457 return cb; 4458 } 4459 4460 const struct rte_eth_rxtx_callback * 4461 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 4462 rte_rx_callback_fn fn, void *user_param) 4463 { 4464 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4465 rte_errno = ENOTSUP; 4466 return NULL; 4467 #endif 4468 /* check input parameters */ 4469 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 4470 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 4471 rte_errno = EINVAL; 4472 return NULL; 4473 } 4474 4475 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 4476 4477 if (cb == NULL) { 4478 rte_errno = ENOMEM; 4479 return NULL; 4480 } 4481 4482 cb->fn.rx = fn; 4483 cb->param = user_param; 4484 4485 rte_spinlock_lock(&rte_eth_rx_cb_lock); 4486 /* Add the callbacks at first position */ 4487 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 4488 rte_smp_wmb(); 4489 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb; 4490 rte_spinlock_unlock(&rte_eth_rx_cb_lock); 4491 4492 return cb; 4493 } 4494 4495 const struct rte_eth_rxtx_callback * 4496 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 4497 rte_tx_callback_fn fn, void *user_param) 4498 { 4499 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4500 rte_errno = ENOTSUP; 4501 return NULL; 4502 #endif 4503 struct rte_eth_dev *dev; 4504 4505 /* check input parameters */ 4506 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 4507 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 4508 rte_errno = EINVAL; 4509 return NULL; 4510 } 4511 4512 dev = &rte_eth_devices[port_id]; 4513 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 4514 rte_errno = EINVAL; 4515 return NULL; 4516 } 4517 4518 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 4519 4520 if (cb == NULL) { 4521 rte_errno = ENOMEM; 4522 return NULL; 4523 } 4524 4525 cb->fn.tx = fn; 4526 cb->param = user_param; 4527 4528 rte_spinlock_lock(&rte_eth_tx_cb_lock); 4529 /* Add the callbacks in fifo order. */ 4530 struct rte_eth_rxtx_callback *tail = 4531 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 4532 4533 if (!tail) { 4534 /* Stores to cb->fn and cb->param should complete before 4535 * cb is visible to data plane. 4536 */ 4537 __atomic_store_n( 4538 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 4539 cb, __ATOMIC_RELEASE); 4540 4541 } else { 4542 while (tail->next) 4543 tail = tail->next; 4544 /* Stores to cb->fn and cb->param should complete before 4545 * cb is visible to data plane. 4546 */ 4547 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 4548 } 4549 rte_spinlock_unlock(&rte_eth_tx_cb_lock); 4550 4551 return cb; 4552 } 4553 4554 int 4555 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 4556 const struct rte_eth_rxtx_callback *user_cb) 4557 { 4558 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4559 return -ENOTSUP; 4560 #endif 4561 /* Check input parameters. */ 4562 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 4563 if (user_cb == NULL || 4564 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 4565 return -EINVAL; 4566 4567 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4568 struct rte_eth_rxtx_callback *cb; 4569 struct rte_eth_rxtx_callback **prev_cb; 4570 int ret = -EINVAL; 4571 4572 rte_spinlock_lock(&rte_eth_rx_cb_lock); 4573 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 4574 for (; *prev_cb != NULL; prev_cb = &cb->next) { 4575 cb = *prev_cb; 4576 if (cb == user_cb) { 4577 /* Remove the user cb from the callback list. */ 4578 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 4579 ret = 0; 4580 break; 4581 } 4582 } 4583 rte_spinlock_unlock(&rte_eth_rx_cb_lock); 4584 4585 return ret; 4586 } 4587 4588 int 4589 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 4590 const struct rte_eth_rxtx_callback *user_cb) 4591 { 4592 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4593 return -ENOTSUP; 4594 #endif 4595 /* Check input parameters. */ 4596 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 4597 if (user_cb == NULL || 4598 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 4599 return -EINVAL; 4600 4601 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4602 int ret = -EINVAL; 4603 struct rte_eth_rxtx_callback *cb; 4604 struct rte_eth_rxtx_callback **prev_cb; 4605 4606 rte_spinlock_lock(&rte_eth_tx_cb_lock); 4607 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 4608 for (; *prev_cb != NULL; prev_cb = &cb->next) { 4609 cb = *prev_cb; 4610 if (cb == user_cb) { 4611 /* Remove the user cb from the callback list. */ 4612 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 4613 ret = 0; 4614 break; 4615 } 4616 } 4617 rte_spinlock_unlock(&rte_eth_tx_cb_lock); 4618 4619 return ret; 4620 } 4621 4622 int 4623 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 4624 struct rte_eth_rxq_info *qinfo) 4625 { 4626 struct rte_eth_dev *dev; 4627 4628 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4629 4630 if (qinfo == NULL) 4631 return -EINVAL; 4632 4633 dev = &rte_eth_devices[port_id]; 4634 if (queue_id >= dev->data->nb_rx_queues) { 4635 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 4636 return -EINVAL; 4637 } 4638 4639 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 4640 RTE_ETHDEV_LOG(INFO, 4641 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 4642 queue_id, port_id); 4643 return -EINVAL; 4644 } 4645 4646 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 4647 4648 memset(qinfo, 0, sizeof(*qinfo)); 4649 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 4650 return 0; 4651 } 4652 4653 int 4654 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 4655 struct rte_eth_txq_info *qinfo) 4656 { 4657 struct rte_eth_dev *dev; 4658 4659 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4660 4661 if (qinfo == NULL) 4662 return -EINVAL; 4663 4664 dev = &rte_eth_devices[port_id]; 4665 if (queue_id >= dev->data->nb_tx_queues) { 4666 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); 4667 return -EINVAL; 4668 } 4669 4670 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 4671 RTE_ETHDEV_LOG(INFO, 4672 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 4673 queue_id, port_id); 4674 return -EINVAL; 4675 } 4676 4677 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 4678 4679 memset(qinfo, 0, sizeof(*qinfo)); 4680 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 4681 4682 return 0; 4683 } 4684 4685 int 4686 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 4687 struct rte_eth_burst_mode *mode) 4688 { 4689 struct rte_eth_dev *dev; 4690 4691 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4692 4693 if (mode == NULL) 4694 return -EINVAL; 4695 4696 dev = &rte_eth_devices[port_id]; 4697 4698 if (queue_id >= dev->data->nb_rx_queues) { 4699 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 4700 return -EINVAL; 4701 } 4702 4703 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); 4704 memset(mode, 0, sizeof(*mode)); 4705 return eth_err(port_id, 4706 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 4707 } 4708 4709 int 4710 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 4711 struct rte_eth_burst_mode *mode) 4712 { 4713 struct rte_eth_dev *dev; 4714 4715 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4716 4717 if (mode == NULL) 4718 return -EINVAL; 4719 4720 dev = &rte_eth_devices[port_id]; 4721 4722 if (queue_id >= dev->data->nb_tx_queues) { 4723 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); 4724 return -EINVAL; 4725 } 4726 4727 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); 4728 memset(mode, 0, sizeof(*mode)); 4729 return eth_err(port_id, 4730 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 4731 } 4732 4733 int 4734 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 4735 struct rte_ether_addr *mc_addr_set, 4736 uint32_t nb_mc_addr) 4737 { 4738 struct rte_eth_dev *dev; 4739 4740 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4741 4742 dev = &rte_eth_devices[port_id]; 4743 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 4744 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 4745 mc_addr_set, nb_mc_addr)); 4746 } 4747 4748 int 4749 rte_eth_timesync_enable(uint16_t port_id) 4750 { 4751 struct rte_eth_dev *dev; 4752 4753 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4754 dev = &rte_eth_devices[port_id]; 4755 4756 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 4757 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 4758 } 4759 4760 int 4761 rte_eth_timesync_disable(uint16_t port_id) 4762 { 4763 struct rte_eth_dev *dev; 4764 4765 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4766 dev = &rte_eth_devices[port_id]; 4767 4768 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 4769 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 4770 } 4771 4772 int 4773 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 4774 uint32_t flags) 4775 { 4776 struct rte_eth_dev *dev; 4777 4778 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4779 dev = &rte_eth_devices[port_id]; 4780 4781 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 4782 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 4783 (dev, timestamp, flags)); 4784 } 4785 4786 int 4787 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 4788 struct timespec *timestamp) 4789 { 4790 struct rte_eth_dev *dev; 4791 4792 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4793 dev = &rte_eth_devices[port_id]; 4794 4795 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 4796 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 4797 (dev, timestamp)); 4798 } 4799 4800 int 4801 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 4802 { 4803 struct rte_eth_dev *dev; 4804 4805 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4806 dev = &rte_eth_devices[port_id]; 4807 4808 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 4809 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, 4810 delta)); 4811 } 4812 4813 int 4814 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 4815 { 4816 struct rte_eth_dev *dev; 4817 4818 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4819 dev = &rte_eth_devices[port_id]; 4820 4821 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 4822 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 4823 timestamp)); 4824 } 4825 4826 int 4827 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 4828 { 4829 struct rte_eth_dev *dev; 4830 4831 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4832 dev = &rte_eth_devices[port_id]; 4833 4834 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 4835 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 4836 timestamp)); 4837 } 4838 4839 int 4840 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 4841 { 4842 struct rte_eth_dev *dev; 4843 4844 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4845 dev = &rte_eth_devices[port_id]; 4846 4847 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); 4848 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 4849 } 4850 4851 int 4852 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 4853 { 4854 struct rte_eth_dev *dev; 4855 4856 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4857 4858 dev = &rte_eth_devices[port_id]; 4859 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 4860 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 4861 } 4862 4863 int 4864 rte_eth_dev_get_eeprom_length(uint16_t port_id) 4865 { 4866 struct rte_eth_dev *dev; 4867 4868 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4869 4870 dev = &rte_eth_devices[port_id]; 4871 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 4872 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 4873 } 4874 4875 int 4876 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 4877 { 4878 struct rte_eth_dev *dev; 4879 4880 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4881 4882 dev = &rte_eth_devices[port_id]; 4883 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 4884 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 4885 } 4886 4887 int 4888 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 4889 { 4890 struct rte_eth_dev *dev; 4891 4892 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4893 4894 dev = &rte_eth_devices[port_id]; 4895 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 4896 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 4897 } 4898 4899 int 4900 rte_eth_dev_get_module_info(uint16_t port_id, 4901 struct rte_eth_dev_module_info *modinfo) 4902 { 4903 struct rte_eth_dev *dev; 4904 4905 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4906 4907 dev = &rte_eth_devices[port_id]; 4908 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 4909 return (*dev->dev_ops->get_module_info)(dev, modinfo); 4910 } 4911 4912 int 4913 rte_eth_dev_get_module_eeprom(uint16_t port_id, 4914 struct rte_dev_eeprom_info *info) 4915 { 4916 struct rte_eth_dev *dev; 4917 4918 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4919 4920 dev = &rte_eth_devices[port_id]; 4921 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 4922 return (*dev->dev_ops->get_module_eeprom)(dev, info); 4923 } 4924 4925 int 4926 rte_eth_dev_get_dcb_info(uint16_t port_id, 4927 struct rte_eth_dcb_info *dcb_info) 4928 { 4929 struct rte_eth_dev *dev; 4930 4931 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4932 4933 dev = &rte_eth_devices[port_id]; 4934 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 4935 4936 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 4937 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 4938 } 4939 4940 int 4941 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id, 4942 struct rte_eth_l2_tunnel_conf *l2_tunnel) 4943 { 4944 struct rte_eth_dev *dev; 4945 4946 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4947 if (l2_tunnel == NULL) { 4948 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n"); 4949 return -EINVAL; 4950 } 4951 4952 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) { 4953 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4954 return -EINVAL; 4955 } 4956 4957 dev = &rte_eth_devices[port_id]; 4958 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf, 4959 -ENOTSUP); 4960 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, 4961 l2_tunnel)); 4962 } 4963 4964 int 4965 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id, 4966 struct rte_eth_l2_tunnel_conf *l2_tunnel, 4967 uint32_t mask, 4968 uint8_t en) 4969 { 4970 struct rte_eth_dev *dev; 4971 4972 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4973 4974 if (l2_tunnel == NULL) { 4975 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n"); 4976 return -EINVAL; 4977 } 4978 4979 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) { 4980 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4981 return -EINVAL; 4982 } 4983 4984 if (mask == 0) { 4985 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n"); 4986 return -EINVAL; 4987 } 4988 4989 dev = &rte_eth_devices[port_id]; 4990 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set, 4991 -ENOTSUP); 4992 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev, 4993 l2_tunnel, mask, en)); 4994 } 4995 4996 static void 4997 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc, 4998 const struct rte_eth_desc_lim *desc_lim) 4999 { 5000 if (desc_lim->nb_align != 0) 5001 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5002 5003 if (desc_lim->nb_max != 0) 5004 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5005 5006 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5007 } 5008 5009 int 5010 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5011 uint16_t *nb_rx_desc, 5012 uint16_t *nb_tx_desc) 5013 { 5014 struct rte_eth_dev_info dev_info; 5015 int ret; 5016 5017 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5018 5019 ret = rte_eth_dev_info_get(port_id, &dev_info); 5020 if (ret != 0) 5021 return ret; 5022 5023 if (nb_rx_desc != NULL) 5024 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5025 5026 if (nb_tx_desc != NULL) 5027 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5028 5029 return 0; 5030 } 5031 5032 int 5033 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5034 struct rte_eth_hairpin_cap *cap) 5035 { 5036 struct rte_eth_dev *dev; 5037 5038 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); 5039 5040 dev = &rte_eth_devices[port_id]; 5041 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); 5042 memset(cap, 0, sizeof(*cap)); 5043 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 5044 } 5045 5046 int 5047 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5048 { 5049 if (dev->data->rx_queue_state[queue_id] == 5050 RTE_ETH_QUEUE_STATE_HAIRPIN) 5051 return 1; 5052 return 0; 5053 } 5054 5055 int 5056 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5057 { 5058 if (dev->data->tx_queue_state[queue_id] == 5059 RTE_ETH_QUEUE_STATE_HAIRPIN) 5060 return 1; 5061 return 0; 5062 } 5063 5064 int 5065 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 5066 { 5067 struct rte_eth_dev *dev; 5068 5069 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5070 5071 if (pool == NULL) 5072 return -EINVAL; 5073 5074 dev = &rte_eth_devices[port_id]; 5075 5076 if (*dev->dev_ops->pool_ops_supported == NULL) 5077 return 1; /* all pools are supported */ 5078 5079 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 5080 } 5081 5082 /** 5083 * A set of values to describe the possible states of a switch domain. 5084 */ 5085 enum rte_eth_switch_domain_state { 5086 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0, 5087 RTE_ETH_SWITCH_DOMAIN_ALLOCATED 5088 }; 5089 5090 /** 5091 * Array of switch domains available for allocation. Array is sized to 5092 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than 5093 * ethdev ports in a single process. 5094 */ 5095 static struct rte_eth_dev_switch { 5096 enum rte_eth_switch_domain_state state; 5097 } rte_eth_switch_domains[RTE_MAX_ETHPORTS]; 5098 5099 int 5100 rte_eth_switch_domain_alloc(uint16_t *domain_id) 5101 { 5102 uint16_t i; 5103 5104 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 5105 5106 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 5107 if (rte_eth_switch_domains[i].state == 5108 RTE_ETH_SWITCH_DOMAIN_UNUSED) { 5109 rte_eth_switch_domains[i].state = 5110 RTE_ETH_SWITCH_DOMAIN_ALLOCATED; 5111 *domain_id = i; 5112 return 0; 5113 } 5114 } 5115 5116 return -ENOSPC; 5117 } 5118 5119 int 5120 rte_eth_switch_domain_free(uint16_t domain_id) 5121 { 5122 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || 5123 domain_id >= RTE_MAX_ETHPORTS) 5124 return -EINVAL; 5125 5126 if (rte_eth_switch_domains[domain_id].state != 5127 RTE_ETH_SWITCH_DOMAIN_ALLOCATED) 5128 return -EINVAL; 5129 5130 rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED; 5131 5132 return 0; 5133 } 5134 5135 static int 5136 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) 5137 { 5138 int state; 5139 struct rte_kvargs_pair *pair; 5140 char *letter; 5141 5142 arglist->str = strdup(str_in); 5143 if (arglist->str == NULL) 5144 return -ENOMEM; 5145 5146 letter = arglist->str; 5147 state = 0; 5148 arglist->count = 0; 5149 pair = &arglist->pairs[0]; 5150 while (1) { 5151 switch (state) { 5152 case 0: /* Initial */ 5153 if (*letter == '=') 5154 return -EINVAL; 5155 else if (*letter == '\0') 5156 return 0; 5157 5158 state = 1; 5159 pair->key = letter; 5160 /* fall-thru */ 5161 5162 case 1: /* Parsing key */ 5163 if (*letter == '=') { 5164 *letter = '\0'; 5165 pair->value = letter + 1; 5166 state = 2; 5167 } else if (*letter == ',' || *letter == '\0') 5168 return -EINVAL; 5169 break; 5170 5171 5172 case 2: /* Parsing value */ 5173 if (*letter == '[') 5174 state = 3; 5175 else if (*letter == ',') { 5176 *letter = '\0'; 5177 arglist->count++; 5178 pair = &arglist->pairs[arglist->count]; 5179 state = 0; 5180 } else if (*letter == '\0') { 5181 letter--; 5182 arglist->count++; 5183 pair = &arglist->pairs[arglist->count]; 5184 state = 0; 5185 } 5186 break; 5187 5188 case 3: /* Parsing list */ 5189 if (*letter == ']') 5190 state = 2; 5191 else if (*letter == '\0') 5192 return -EINVAL; 5193 break; 5194 } 5195 letter++; 5196 } 5197 } 5198 5199 int 5200 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) 5201 { 5202 struct rte_kvargs args; 5203 struct rte_kvargs_pair *pair; 5204 unsigned int i; 5205 int result = 0; 5206 5207 memset(eth_da, 0, sizeof(*eth_da)); 5208 5209 result = rte_eth_devargs_tokenise(&args, dargs); 5210 if (result < 0) 5211 goto parse_cleanup; 5212 5213 for (i = 0; i < args.count; i++) { 5214 pair = &args.pairs[i]; 5215 if (strcmp("representor", pair->key) == 0) { 5216 result = rte_eth_devargs_parse_list(pair->value, 5217 rte_eth_devargs_parse_representor_ports, 5218 eth_da); 5219 if (result < 0) 5220 goto parse_cleanup; 5221 } 5222 } 5223 5224 parse_cleanup: 5225 if (args.str) 5226 free(args.str); 5227 5228 return result; 5229 } 5230 5231 RTE_INIT(ethdev_init_log) 5232 { 5233 rte_eth_dev_logtype = rte_log_register("lib.ethdev"); 5234 if (rte_eth_dev_logtype >= 0) 5235 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO); 5236 } 5237