1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <stdarg.h> 7 #include <errno.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 #include <fcntl.h> 17 #include <unistd.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_debug.h> 22 #include <rte_log.h> 23 #include <rte_memory.h> 24 #include <rte_memcpy.h> 25 #include <rte_memzone.h> 26 #include <rte_launch.h> 27 #include <rte_eal.h> 28 #include <rte_per_lcore.h> 29 #include <rte_lcore.h> 30 #include <rte_branch_prediction.h> 31 #include <rte_mempool.h> 32 #include <rte_mbuf.h> 33 #include <rte_interrupts.h> 34 #include <rte_pci.h> 35 #include <rte_ether.h> 36 #include <rte_ethdev.h> 37 #include <rte_string_fns.h> 38 #include <rte_cycles.h> 39 #include <rte_flow.h> 40 #include <rte_mtr.h> 41 #include <rte_errno.h> 42 #ifdef RTE_NET_IXGBE 43 #include <rte_pmd_ixgbe.h> 44 #endif 45 #ifdef RTE_NET_I40E 46 #include <rte_pmd_i40e.h> 47 #endif 48 #ifdef RTE_NET_BNXT 49 #include <rte_pmd_bnxt.h> 50 #endif 51 #ifdef RTE_LIB_GRO 52 #include <rte_gro.h> 53 #endif 54 #include <rte_hexdump.h> 55 56 #include "testpmd.h" 57 #include "cmdline_mtr.h" 58 59 #define ETHDEV_FWVERS_LEN 32 60 61 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 62 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW 63 #else 64 #define CLOCK_TYPE_ID CLOCK_MONOTONIC 65 #endif 66 67 #define NS_PER_SEC 1E9 68 69 static char *flowtype_to_str(uint16_t flow_type); 70 71 static const struct { 72 enum tx_pkt_split split; 73 const char *name; 74 } tx_split_name[] = { 75 { 76 .split = TX_PKT_SPLIT_OFF, 77 .name = "off", 78 }, 79 { 80 .split = TX_PKT_SPLIT_ON, 81 .name = "on", 82 }, 83 { 84 .split = TX_PKT_SPLIT_RND, 85 .name = "rand", 86 }, 87 }; 88 89 const struct rss_type_info rss_type_table[] = { 90 { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | 91 RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | 92 RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | 93 RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2}, 94 { "none", 0 }, 95 { "eth", RTE_ETH_RSS_ETH }, 96 { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, 97 { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, 98 { "vlan", RTE_ETH_RSS_VLAN }, 99 { "s-vlan", RTE_ETH_RSS_S_VLAN }, 100 { "c-vlan", RTE_ETH_RSS_C_VLAN }, 101 { "ipv4", RTE_ETH_RSS_IPV4 }, 102 { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, 103 { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, 104 { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, 105 { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, 106 { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, 107 { "ipv6", RTE_ETH_RSS_IPV6 }, 108 { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, 109 { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, 110 { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, 111 { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, 112 { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, 113 { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, 114 { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, 115 { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, 116 { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, 117 { "port", RTE_ETH_RSS_PORT }, 118 { "vxlan", RTE_ETH_RSS_VXLAN }, 119 { "geneve", RTE_ETH_RSS_GENEVE }, 120 { "nvgre", RTE_ETH_RSS_NVGRE }, 121 { "ip", RTE_ETH_RSS_IP }, 122 { "udp", RTE_ETH_RSS_UDP }, 123 { "tcp", RTE_ETH_RSS_TCP }, 124 { "sctp", RTE_ETH_RSS_SCTP }, 125 { "tunnel", RTE_ETH_RSS_TUNNEL }, 126 { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, 127 { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, 128 { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, 129 { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, 130 { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, 131 { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, 132 { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, 133 { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, 134 { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, 135 { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, 136 { "esp", RTE_ETH_RSS_ESP }, 137 { "ah", RTE_ETH_RSS_AH }, 138 { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, 139 { "pfcp", RTE_ETH_RSS_PFCP }, 140 { "pppoe", RTE_ETH_RSS_PPPOE }, 141 { "gtpu", RTE_ETH_RSS_GTPU }, 142 { "ecpri", RTE_ETH_RSS_ECPRI }, 143 { "mpls", RTE_ETH_RSS_MPLS }, 144 { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, 145 { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, 146 { "l2tpv2", RTE_ETH_RSS_L2TPV2 }, 147 { NULL, 0 }, 148 }; 149 150 static const struct { 151 enum rte_eth_fec_mode mode; 152 const char *name; 153 } fec_mode_name[] = { 154 { 155 .mode = RTE_ETH_FEC_NOFEC, 156 .name = "off", 157 }, 158 { 159 .mode = RTE_ETH_FEC_AUTO, 160 .name = "auto", 161 }, 162 { 163 .mode = RTE_ETH_FEC_BASER, 164 .name = "baser", 165 }, 166 { 167 .mode = RTE_ETH_FEC_RS, 168 .name = "rs", 169 }, 170 }; 171 172 static void 173 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) 174 { 175 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 176 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 177 printf("%s%s", name, buf); 178 } 179 180 static void 181 nic_xstats_display_periodic(portid_t port_id) 182 { 183 struct xstat_display_info *xstats_info; 184 uint64_t *prev_values, *curr_values; 185 uint64_t diff_value, value_rate; 186 struct timespec cur_time; 187 uint64_t *ids_supp; 188 size_t ids_supp_sz; 189 uint64_t diff_ns; 190 unsigned int i; 191 int rc; 192 193 xstats_info = &ports[port_id].xstats_info; 194 195 ids_supp_sz = xstats_info->ids_supp_sz; 196 if (ids_supp_sz == 0) 197 return; 198 199 printf("\n"); 200 201 ids_supp = xstats_info->ids_supp; 202 prev_values = xstats_info->prev_values; 203 curr_values = xstats_info->curr_values; 204 205 rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, 206 ids_supp_sz); 207 if (rc != (int)ids_supp_sz) { 208 fprintf(stderr, 209 "Failed to get values of %zu xstats for port %u - return code %d\n", 210 ids_supp_sz, port_id, rc); 211 return; 212 } 213 214 diff_ns = 0; 215 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 216 uint64_t ns; 217 218 ns = cur_time.tv_sec * NS_PER_SEC; 219 ns += cur_time.tv_nsec; 220 221 if (xstats_info->prev_ns != 0) 222 diff_ns = ns - xstats_info->prev_ns; 223 xstats_info->prev_ns = ns; 224 } 225 226 printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); 227 for (i = 0; i < ids_supp_sz; i++) { 228 diff_value = (curr_values[i] > prev_values[i]) ? 229 (curr_values[i] - prev_values[i]) : 0; 230 prev_values[i] = curr_values[i]; 231 value_rate = diff_ns > 0 ? 232 (double)diff_value / diff_ns * NS_PER_SEC : 0; 233 234 printf(" %-25s%12"PRIu64" %15"PRIu64"\n", 235 xstats_display[i].name, curr_values[i], value_rate); 236 } 237 } 238 239 void 240 nic_stats_display(portid_t port_id) 241 { 242 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; 243 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; 244 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; 245 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; 246 static uint64_t prev_ns[RTE_MAX_ETHPORTS]; 247 struct timespec cur_time; 248 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, 249 diff_ns; 250 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; 251 struct rte_eth_stats stats; 252 253 static const char *nic_stats_border = "########################"; 254 255 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 256 print_valid_ports(); 257 return; 258 } 259 rte_eth_stats_get(port_id, &stats); 260 printf("\n %s NIC statistics for port %-2d %s\n", 261 nic_stats_border, port_id, nic_stats_border); 262 263 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " 264 "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); 265 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); 266 printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); 267 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " 268 "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); 269 270 diff_ns = 0; 271 if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { 272 uint64_t ns; 273 274 ns = cur_time.tv_sec * NS_PER_SEC; 275 ns += cur_time.tv_nsec; 276 277 if (prev_ns[port_id] != 0) 278 diff_ns = ns - prev_ns[port_id]; 279 prev_ns[port_id] = ns; 280 } 281 282 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? 283 (stats.ipackets - prev_pkts_rx[port_id]) : 0; 284 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? 285 (stats.opackets - prev_pkts_tx[port_id]) : 0; 286 prev_pkts_rx[port_id] = stats.ipackets; 287 prev_pkts_tx[port_id] = stats.opackets; 288 mpps_rx = diff_ns > 0 ? 289 (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; 290 mpps_tx = diff_ns > 0 ? 291 (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; 292 293 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? 294 (stats.ibytes - prev_bytes_rx[port_id]) : 0; 295 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? 296 (stats.obytes - prev_bytes_tx[port_id]) : 0; 297 prev_bytes_rx[port_id] = stats.ibytes; 298 prev_bytes_tx[port_id] = stats.obytes; 299 mbps_rx = diff_ns > 0 ? 300 (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; 301 mbps_tx = diff_ns > 0 ? 302 (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; 303 304 printf("\n Throughput (since last show)\n"); 305 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" 306 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, 307 mpps_tx, mbps_tx * 8); 308 309 if (xstats_display_num > 0) 310 nic_xstats_display_periodic(port_id); 311 312 printf(" %s############################%s\n", 313 nic_stats_border, nic_stats_border); 314 } 315 316 void 317 nic_stats_clear(portid_t port_id) 318 { 319 int ret; 320 321 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 322 print_valid_ports(); 323 return; 324 } 325 326 ret = rte_eth_stats_reset(port_id); 327 if (ret != 0) { 328 fprintf(stderr, 329 "%s: Error: failed to reset stats (port %u): %s", 330 __func__, port_id, strerror(-ret)); 331 return; 332 } 333 334 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 335 if (ret != 0) { 336 if (ret < 0) 337 ret = -ret; 338 fprintf(stderr, 339 "%s: Error: failed to get stats (port %u): %s", 340 __func__, port_id, strerror(ret)); 341 return; 342 } 343 printf("\n NIC statistics for port %d cleared\n", port_id); 344 } 345 346 void 347 nic_xstats_display(portid_t port_id) 348 { 349 struct rte_eth_xstat *xstats; 350 int cnt_xstats, idx_xstat; 351 struct rte_eth_xstat_name *xstats_names; 352 353 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 354 print_valid_ports(); 355 return; 356 } 357 printf("###### NIC extended statistics for port %-2d\n", port_id); 358 if (!rte_eth_dev_is_valid_port(port_id)) { 359 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 360 return; 361 } 362 363 /* Get count */ 364 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); 365 if (cnt_xstats < 0) { 366 fprintf(stderr, "Error: Cannot get count of xstats\n"); 367 return; 368 } 369 370 /* Get id-name lookup table */ 371 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); 372 if (xstats_names == NULL) { 373 fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); 374 return; 375 } 376 if (cnt_xstats != rte_eth_xstats_get_names( 377 port_id, xstats_names, cnt_xstats)) { 378 fprintf(stderr, "Error: Cannot get xstats lookup\n"); 379 free(xstats_names); 380 return; 381 } 382 383 /* Get stats themselves */ 384 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); 385 if (xstats == NULL) { 386 fprintf(stderr, "Cannot allocate memory for xstats\n"); 387 free(xstats_names); 388 return; 389 } 390 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { 391 fprintf(stderr, "Error: Unable to get xstats\n"); 392 free(xstats_names); 393 free(xstats); 394 return; 395 } 396 397 /* Display xstats */ 398 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 399 if (xstats_hide_zero && !xstats[idx_xstat].value) 400 continue; 401 printf("%s: %"PRIu64"\n", 402 xstats_names[idx_xstat].name, 403 xstats[idx_xstat].value); 404 } 405 free(xstats_names); 406 free(xstats); 407 } 408 409 void 410 nic_xstats_clear(portid_t port_id) 411 { 412 int ret; 413 414 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 415 print_valid_ports(); 416 return; 417 } 418 419 ret = rte_eth_xstats_reset(port_id); 420 if (ret != 0) { 421 fprintf(stderr, 422 "%s: Error: failed to reset xstats (port %u): %s\n", 423 __func__, port_id, strerror(-ret)); 424 return; 425 } 426 427 ret = rte_eth_stats_get(port_id, &ports[port_id].stats); 428 if (ret != 0) { 429 if (ret < 0) 430 ret = -ret; 431 fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", 432 __func__, port_id, strerror(ret)); 433 return; 434 } 435 } 436 437 static const char * 438 get_queue_state_name(uint8_t queue_state) 439 { 440 if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) 441 return "stopped"; 442 else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) 443 return "started"; 444 else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) 445 return "hairpin"; 446 else 447 return "unknown"; 448 } 449 450 void 451 rx_queue_infos_display(portid_t port_id, uint16_t queue_id) 452 { 453 struct rte_eth_burst_mode mode; 454 struct rte_eth_rxq_info qinfo; 455 int32_t rc; 456 static const char *info_border = "*********************"; 457 458 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); 459 if (rc != 0) { 460 fprintf(stderr, 461 "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", 462 port_id, queue_id, strerror(-rc), rc); 463 return; 464 } 465 466 printf("\n%s Infos for port %-2u, RX queue %-2u %s", 467 info_border, port_id, queue_id, info_border); 468 469 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); 470 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); 471 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); 472 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); 473 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); 474 printf("\nRX drop packets: %s", 475 (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); 476 printf("\nRX deferred start: %s", 477 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); 478 printf("\nRX scattered packets: %s", 479 (qinfo.scattered_rx != 0) ? "on" : "off"); 480 printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); 481 if (qinfo.rx_buf_size != 0) 482 printf("\nRX buffer size: %hu", qinfo.rx_buf_size); 483 printf("\nNumber of RXDs: %hu", qinfo.nb_desc); 484 485 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) 486 printf("\nBurst mode: %s%s", 487 mode.info, 488 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 489 " (per queue)" : ""); 490 491 printf("\n"); 492 } 493 494 void 495 tx_queue_infos_display(portid_t port_id, uint16_t queue_id) 496 { 497 struct rte_eth_burst_mode mode; 498 struct rte_eth_txq_info qinfo; 499 int32_t rc; 500 static const char *info_border = "*********************"; 501 502 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); 503 if (rc != 0) { 504 fprintf(stderr, 505 "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", 506 port_id, queue_id, strerror(-rc), rc); 507 return; 508 } 509 510 printf("\n%s Infos for port %-2u, TX queue %-2u %s", 511 info_border, port_id, queue_id, info_border); 512 513 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); 514 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); 515 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); 516 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); 517 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); 518 printf("\nTX deferred start: %s", 519 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); 520 printf("\nNumber of TXDs: %hu", qinfo.nb_desc); 521 printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); 522 523 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) 524 printf("\nBurst mode: %s%s", 525 mode.info, 526 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? 527 " (per queue)" : ""); 528 529 printf("\n"); 530 } 531 532 static int bus_match_all(const struct rte_bus *bus, const void *data) 533 { 534 RTE_SET_USED(bus); 535 RTE_SET_USED(data); 536 return 0; 537 } 538 539 static void 540 device_infos_display_speeds(uint32_t speed_capa) 541 { 542 printf("\n\tDevice speed capability:"); 543 if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) 544 printf(" Autonegotiate (all speeds)"); 545 if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) 546 printf(" Disable autonegotiate (fixed speed) "); 547 if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) 548 printf(" 10 Mbps half-duplex "); 549 if (speed_capa & RTE_ETH_LINK_SPEED_10M) 550 printf(" 10 Mbps full-duplex "); 551 if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) 552 printf(" 100 Mbps half-duplex "); 553 if (speed_capa & RTE_ETH_LINK_SPEED_100M) 554 printf(" 100 Mbps full-duplex "); 555 if (speed_capa & RTE_ETH_LINK_SPEED_1G) 556 printf(" 1 Gbps "); 557 if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) 558 printf(" 2.5 Gbps "); 559 if (speed_capa & RTE_ETH_LINK_SPEED_5G) 560 printf(" 5 Gbps "); 561 if (speed_capa & RTE_ETH_LINK_SPEED_10G) 562 printf(" 10 Gbps "); 563 if (speed_capa & RTE_ETH_LINK_SPEED_20G) 564 printf(" 20 Gbps "); 565 if (speed_capa & RTE_ETH_LINK_SPEED_25G) 566 printf(" 25 Gbps "); 567 if (speed_capa & RTE_ETH_LINK_SPEED_40G) 568 printf(" 40 Gbps "); 569 if (speed_capa & RTE_ETH_LINK_SPEED_50G) 570 printf(" 50 Gbps "); 571 if (speed_capa & RTE_ETH_LINK_SPEED_56G) 572 printf(" 56 Gbps "); 573 if (speed_capa & RTE_ETH_LINK_SPEED_100G) 574 printf(" 100 Gbps "); 575 if (speed_capa & RTE_ETH_LINK_SPEED_200G) 576 printf(" 200 Gbps "); 577 } 578 579 void 580 device_infos_display(const char *identifier) 581 { 582 static const char *info_border = "*********************"; 583 struct rte_bus *start = NULL, *next; 584 struct rte_dev_iterator dev_iter; 585 char name[RTE_ETH_NAME_MAX_LEN]; 586 struct rte_ether_addr mac_addr; 587 struct rte_device *dev; 588 struct rte_devargs da; 589 portid_t port_id; 590 struct rte_eth_dev_info dev_info; 591 char devstr[128]; 592 593 memset(&da, 0, sizeof(da)); 594 if (!identifier) 595 goto skip_parse; 596 597 if (rte_devargs_parsef(&da, "%s", identifier)) { 598 fprintf(stderr, "cannot parse identifier\n"); 599 return; 600 } 601 602 skip_parse: 603 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { 604 605 start = next; 606 if (identifier && da.bus != next) 607 continue; 608 609 /* Skip buses that don't have iterate method */ 610 if (!next->dev_iterate) 611 continue; 612 613 snprintf(devstr, sizeof(devstr), "bus=%s", next->name); 614 RTE_DEV_FOREACH(dev, devstr, &dev_iter) { 615 616 if (!dev->driver) 617 continue; 618 /* Check for matching device if identifier is present */ 619 if (identifier && 620 strncmp(da.name, dev->name, strlen(dev->name))) 621 continue; 622 printf("\n%s Infos for device %s %s\n", 623 info_border, dev->name, info_border); 624 printf("Bus name: %s", dev->bus->name); 625 printf("\nDriver name: %s", dev->driver->name); 626 printf("\nDevargs: %s", 627 dev->devargs ? dev->devargs->args : ""); 628 printf("\nConnect to socket: %d", dev->numa_node); 629 printf("\n"); 630 631 /* List ports with matching device name */ 632 RTE_ETH_FOREACH_DEV_OF(port_id, dev) { 633 printf("\n\tPort id: %-2d", port_id); 634 if (eth_macaddr_get_print_err(port_id, 635 &mac_addr) == 0) 636 print_ethaddr("\n\tMAC address: ", 637 &mac_addr); 638 rte_eth_dev_get_name_by_port(port_id, name); 639 printf("\n\tDevice name: %s", name); 640 if (rte_eth_dev_info_get(port_id, &dev_info) == 0) 641 device_infos_display_speeds(dev_info.speed_capa); 642 printf("\n"); 643 } 644 } 645 }; 646 rte_devargs_reset(&da); 647 } 648 649 static void 650 print_dev_capabilities(uint64_t capabilities) 651 { 652 uint64_t single_capa; 653 int begin; 654 int end; 655 int bit; 656 657 if (capabilities == 0) 658 return; 659 660 begin = __builtin_ctzll(capabilities); 661 end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities); 662 663 single_capa = 1ULL << begin; 664 for (bit = begin; bit < end; bit++) { 665 if (capabilities & single_capa) 666 printf(" %s", 667 rte_eth_dev_capability_name(single_capa)); 668 single_capa <<= 1; 669 } 670 } 671 672 void 673 port_infos_display(portid_t port_id) 674 { 675 struct rte_port *port; 676 struct rte_ether_addr mac_addr; 677 struct rte_eth_link link; 678 struct rte_eth_dev_info dev_info; 679 int vlan_offload; 680 struct rte_mempool * mp; 681 static const char *info_border = "*********************"; 682 uint16_t mtu; 683 char name[RTE_ETH_NAME_MAX_LEN]; 684 int ret; 685 char fw_version[ETHDEV_FWVERS_LEN]; 686 687 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 688 print_valid_ports(); 689 return; 690 } 691 port = &ports[port_id]; 692 ret = eth_link_get_nowait_print_err(port_id, &link); 693 if (ret < 0) 694 return; 695 696 ret = eth_dev_info_get_print_err(port_id, &dev_info); 697 if (ret != 0) 698 return; 699 700 printf("\n%s Infos for port %-2d %s\n", 701 info_border, port_id, info_border); 702 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) 703 print_ethaddr("MAC address: ", &mac_addr); 704 rte_eth_dev_get_name_by_port(port_id, name); 705 printf("\nDevice name: %s", name); 706 printf("\nDriver name: %s", dev_info.driver_name); 707 708 if (rte_eth_dev_fw_version_get(port_id, fw_version, 709 ETHDEV_FWVERS_LEN) == 0) 710 printf("\nFirmware-version: %s", fw_version); 711 else 712 printf("\nFirmware-version: %s", "not available"); 713 714 if (dev_info.device->devargs && dev_info.device->devargs->args) 715 printf("\nDevargs: %s", dev_info.device->devargs->args); 716 printf("\nConnect to socket: %u", port->socket_id); 717 718 if (port_numa[port_id] != NUMA_NO_CONFIG) { 719 mp = mbuf_pool_find(port_numa[port_id], 0); 720 if (mp) 721 printf("\nmemory allocation on the socket: %d", 722 port_numa[port_id]); 723 } else 724 printf("\nmemory allocation on the socket: %u",port->socket_id); 725 726 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); 727 printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); 728 printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 729 ("full-duplex") : ("half-duplex")); 730 printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? 731 ("On") : ("Off")); 732 733 if (!rte_eth_dev_get_mtu(port_id, &mtu)) 734 printf("MTU: %u\n", mtu); 735 736 printf("Promiscuous mode: %s\n", 737 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); 738 printf("Allmulticast mode: %s\n", 739 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); 740 printf("Maximum number of MAC addresses: %u\n", 741 (unsigned int)(port->dev_info.max_mac_addrs)); 742 printf("Maximum number of MAC addresses of hash filtering: %u\n", 743 (unsigned int)(port->dev_info.max_hash_mac_addrs)); 744 745 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 746 if (vlan_offload >= 0){ 747 printf("VLAN offload: \n"); 748 if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) 749 printf(" strip on, "); 750 else 751 printf(" strip off, "); 752 753 if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) 754 printf("filter on, "); 755 else 756 printf("filter off, "); 757 758 if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) 759 printf("extend on, "); 760 else 761 printf("extend off, "); 762 763 if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) 764 printf("qinq strip on\n"); 765 else 766 printf("qinq strip off\n"); 767 } 768 769 if (dev_info.hash_key_size > 0) 770 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); 771 if (dev_info.reta_size > 0) 772 printf("Redirection table size: %u\n", dev_info.reta_size); 773 if (!dev_info.flow_type_rss_offloads) 774 printf("No RSS offload flow type is supported.\n"); 775 else { 776 uint16_t i; 777 char *p; 778 779 printf("Supported RSS offload flow types:\n"); 780 for (i = RTE_ETH_FLOW_UNKNOWN + 1; 781 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { 782 if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) 783 continue; 784 p = flowtype_to_str(i); 785 if (p) 786 printf(" %s\n", p); 787 else 788 printf(" user defined %d\n", i); 789 } 790 } 791 792 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); 793 printf("Maximum configurable length of RX packet: %u\n", 794 dev_info.max_rx_pktlen); 795 printf("Maximum configurable size of LRO aggregated packet: %u\n", 796 dev_info.max_lro_pkt_size); 797 if (dev_info.max_vfs) 798 printf("Maximum number of VFs: %u\n", dev_info.max_vfs); 799 if (dev_info.max_vmdq_pools) 800 printf("Maximum number of VMDq pools: %u\n", 801 dev_info.max_vmdq_pools); 802 803 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); 804 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); 805 printf("Max possible number of RXDs per queue: %hu\n", 806 dev_info.rx_desc_lim.nb_max); 807 printf("Min possible number of RXDs per queue: %hu\n", 808 dev_info.rx_desc_lim.nb_min); 809 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); 810 811 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); 812 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); 813 printf("Max possible number of TXDs per queue: %hu\n", 814 dev_info.tx_desc_lim.nb_max); 815 printf("Min possible number of TXDs per queue: %hu\n", 816 dev_info.tx_desc_lim.nb_min); 817 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); 818 printf("Max segment number per packet: %hu\n", 819 dev_info.tx_desc_lim.nb_seg_max); 820 printf("Max segment number per MTU/TSO: %hu\n", 821 dev_info.tx_desc_lim.nb_mtu_seg_max); 822 823 printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); 824 print_dev_capabilities(dev_info.dev_capa); 825 printf(" )\n"); 826 /* Show switch info only if valid switch domain and port id is set */ 827 if (dev_info.switch_info.domain_id != 828 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 829 if (dev_info.switch_info.name) 830 printf("Switch name: %s\n", dev_info.switch_info.name); 831 832 printf("Switch domain Id: %u\n", 833 dev_info.switch_info.domain_id); 834 printf("Switch Port Id: %u\n", 835 dev_info.switch_info.port_id); 836 if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) 837 printf("Switch Rx domain: %u\n", 838 dev_info.switch_info.rx_domain); 839 } 840 } 841 842 void 843 port_summary_header_display(void) 844 { 845 uint16_t port_number; 846 847 port_number = rte_eth_dev_count_avail(); 848 printf("Number of available ports: %i\n", port_number); 849 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", 850 "Driver", "Status", "Link"); 851 } 852 853 void 854 port_summary_display(portid_t port_id) 855 { 856 struct rte_ether_addr mac_addr; 857 struct rte_eth_link link; 858 struct rte_eth_dev_info dev_info; 859 char name[RTE_ETH_NAME_MAX_LEN]; 860 int ret; 861 862 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 863 print_valid_ports(); 864 return; 865 } 866 867 ret = eth_link_get_nowait_print_err(port_id, &link); 868 if (ret < 0) 869 return; 870 871 ret = eth_dev_info_get_print_err(port_id, &dev_info); 872 if (ret != 0) 873 return; 874 875 rte_eth_dev_get_name_by_port(port_id, name); 876 ret = eth_macaddr_get_print_err(port_id, &mac_addr); 877 if (ret != 0) 878 return; 879 880 printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", 881 port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, 882 dev_info.driver_name, (link.link_status) ? ("up") : ("down"), 883 rte_eth_link_speed_to_str(link.link_speed)); 884 } 885 886 void 887 port_eeprom_display(portid_t port_id) 888 { 889 struct rte_dev_eeprom_info einfo; 890 int ret; 891 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 892 print_valid_ports(); 893 return; 894 } 895 896 int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); 897 if (len_eeprom < 0) { 898 switch (len_eeprom) { 899 case -ENODEV: 900 fprintf(stderr, "port index %d invalid\n", port_id); 901 break; 902 case -ENOTSUP: 903 fprintf(stderr, "operation not supported by device\n"); 904 break; 905 case -EIO: 906 fprintf(stderr, "device is removed\n"); 907 break; 908 default: 909 fprintf(stderr, "Unable to get EEPROM: %d\n", 910 len_eeprom); 911 break; 912 } 913 return; 914 } 915 916 einfo.offset = 0; 917 einfo.length = len_eeprom; 918 einfo.data = calloc(1, len_eeprom); 919 if (!einfo.data) { 920 fprintf(stderr, 921 "Allocation of port %u eeprom data failed\n", 922 port_id); 923 return; 924 } 925 926 ret = rte_eth_dev_get_eeprom(port_id, &einfo); 927 if (ret != 0) { 928 switch (ret) { 929 case -ENODEV: 930 fprintf(stderr, "port index %d invalid\n", port_id); 931 break; 932 case -ENOTSUP: 933 fprintf(stderr, "operation not supported by device\n"); 934 break; 935 case -EIO: 936 fprintf(stderr, "device is removed\n"); 937 break; 938 default: 939 fprintf(stderr, "Unable to get EEPROM: %d\n", ret); 940 break; 941 } 942 free(einfo.data); 943 return; 944 } 945 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 946 printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); 947 free(einfo.data); 948 } 949 950 void 951 port_module_eeprom_display(portid_t port_id) 952 { 953 struct rte_eth_dev_module_info minfo; 954 struct rte_dev_eeprom_info einfo; 955 int ret; 956 957 if (port_id_is_invalid(port_id, ENABLED_WARN)) { 958 print_valid_ports(); 959 return; 960 } 961 962 963 ret = rte_eth_dev_get_module_info(port_id, &minfo); 964 if (ret != 0) { 965 switch (ret) { 966 case -ENODEV: 967 fprintf(stderr, "port index %d invalid\n", port_id); 968 break; 969 case -ENOTSUP: 970 fprintf(stderr, "operation not supported by device\n"); 971 break; 972 case -EIO: 973 fprintf(stderr, "device is removed\n"); 974 break; 975 default: 976 fprintf(stderr, "Unable to get module EEPROM: %d\n", 977 ret); 978 break; 979 } 980 return; 981 } 982 983 einfo.offset = 0; 984 einfo.length = minfo.eeprom_len; 985 einfo.data = calloc(1, minfo.eeprom_len); 986 if (!einfo.data) { 987 fprintf(stderr, 988 "Allocation of port %u eeprom data failed\n", 989 port_id); 990 return; 991 } 992 993 ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); 994 if (ret != 0) { 995 switch (ret) { 996 case -ENODEV: 997 fprintf(stderr, "port index %d invalid\n", port_id); 998 break; 999 case -ENOTSUP: 1000 fprintf(stderr, "operation not supported by device\n"); 1001 break; 1002 case -EIO: 1003 fprintf(stderr, "device is removed\n"); 1004 break; 1005 default: 1006 fprintf(stderr, "Unable to get module EEPROM: %d\n", 1007 ret); 1008 break; 1009 } 1010 free(einfo.data); 1011 return; 1012 } 1013 1014 rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); 1015 printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); 1016 free(einfo.data); 1017 } 1018 1019 int 1020 port_id_is_invalid(portid_t port_id, enum print_warning warning) 1021 { 1022 uint16_t pid; 1023 1024 if (port_id == (portid_t)RTE_PORT_ALL) 1025 return 0; 1026 1027 RTE_ETH_FOREACH_DEV(pid) 1028 if (port_id == pid) 1029 return 0; 1030 1031 if (warning == ENABLED_WARN) 1032 fprintf(stderr, "Invalid port %d\n", port_id); 1033 1034 return 1; 1035 } 1036 1037 void print_valid_ports(void) 1038 { 1039 portid_t pid; 1040 1041 printf("The valid ports array is ["); 1042 RTE_ETH_FOREACH_DEV(pid) { 1043 printf(" %d", pid); 1044 } 1045 printf(" ]\n"); 1046 } 1047 1048 static int 1049 vlan_id_is_invalid(uint16_t vlan_id) 1050 { 1051 if (vlan_id < 4096) 1052 return 0; 1053 fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); 1054 return 1; 1055 } 1056 1057 static int 1058 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) 1059 { 1060 const struct rte_pci_device *pci_dev; 1061 const struct rte_bus *bus; 1062 uint64_t pci_len; 1063 1064 if (reg_off & 0x3) { 1065 fprintf(stderr, 1066 "Port register offset 0x%X not aligned on a 4-byte boundary\n", 1067 (unsigned int)reg_off); 1068 return 1; 1069 } 1070 1071 if (!ports[port_id].dev_info.device) { 1072 fprintf(stderr, "Invalid device\n"); 1073 return 0; 1074 } 1075 1076 bus = rte_bus_find_by_device(ports[port_id].dev_info.device); 1077 if (bus && !strcmp(bus->name, "pci")) { 1078 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); 1079 } else { 1080 fprintf(stderr, "Not a PCI device\n"); 1081 return 1; 1082 } 1083 1084 pci_len = pci_dev->mem_resource[0].len; 1085 if (reg_off >= pci_len) { 1086 fprintf(stderr, 1087 "Port %d: register offset %u (0x%X) out of port PCI resource (length=%"PRIu64")\n", 1088 port_id, (unsigned int)reg_off, (unsigned int)reg_off, 1089 pci_len); 1090 return 1; 1091 } 1092 return 0; 1093 } 1094 1095 static int 1096 reg_bit_pos_is_invalid(uint8_t bit_pos) 1097 { 1098 if (bit_pos <= 31) 1099 return 0; 1100 fprintf(stderr, "Invalid bit position %d (must be <= 31)\n", bit_pos); 1101 return 1; 1102 } 1103 1104 #define display_port_and_reg_off(port_id, reg_off) \ 1105 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) 1106 1107 static inline void 1108 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1109 { 1110 display_port_and_reg_off(port_id, (unsigned)reg_off); 1111 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); 1112 } 1113 1114 void 1115 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) 1116 { 1117 uint32_t reg_v; 1118 1119 1120 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1121 return; 1122 if (port_reg_off_is_invalid(port_id, reg_off)) 1123 return; 1124 if (reg_bit_pos_is_invalid(bit_x)) 1125 return; 1126 reg_v = port_id_pci_reg_read(port_id, reg_off); 1127 display_port_and_reg_off(port_id, (unsigned)reg_off); 1128 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); 1129 } 1130 1131 void 1132 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 1133 uint8_t bit1_pos, uint8_t bit2_pos) 1134 { 1135 uint32_t reg_v; 1136 uint8_t l_bit; 1137 uint8_t h_bit; 1138 1139 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1140 return; 1141 if (port_reg_off_is_invalid(port_id, reg_off)) 1142 return; 1143 if (reg_bit_pos_is_invalid(bit1_pos)) 1144 return; 1145 if (reg_bit_pos_is_invalid(bit2_pos)) 1146 return; 1147 if (bit1_pos > bit2_pos) 1148 l_bit = bit2_pos, h_bit = bit1_pos; 1149 else 1150 l_bit = bit1_pos, h_bit = bit2_pos; 1151 1152 reg_v = port_id_pci_reg_read(port_id, reg_off); 1153 reg_v >>= l_bit; 1154 if (h_bit < 31) 1155 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); 1156 display_port_and_reg_off(port_id, (unsigned)reg_off); 1157 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, 1158 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); 1159 } 1160 1161 void 1162 port_reg_display(portid_t port_id, uint32_t reg_off) 1163 { 1164 uint32_t reg_v; 1165 1166 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1167 return; 1168 if (port_reg_off_is_invalid(port_id, reg_off)) 1169 return; 1170 reg_v = port_id_pci_reg_read(port_id, reg_off); 1171 display_port_reg_value(port_id, reg_off, reg_v); 1172 } 1173 1174 void 1175 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 1176 uint8_t bit_v) 1177 { 1178 uint32_t reg_v; 1179 1180 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1181 return; 1182 if (port_reg_off_is_invalid(port_id, reg_off)) 1183 return; 1184 if (reg_bit_pos_is_invalid(bit_pos)) 1185 return; 1186 if (bit_v > 1) { 1187 fprintf(stderr, "Invalid bit value %d (must be 0 or 1)\n", 1188 (int) bit_v); 1189 return; 1190 } 1191 reg_v = port_id_pci_reg_read(port_id, reg_off); 1192 if (bit_v == 0) 1193 reg_v &= ~(1 << bit_pos); 1194 else 1195 reg_v |= (1 << bit_pos); 1196 port_id_pci_reg_write(port_id, reg_off, reg_v); 1197 display_port_reg_value(port_id, reg_off, reg_v); 1198 } 1199 1200 void 1201 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 1202 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) 1203 { 1204 uint32_t max_v; 1205 uint32_t reg_v; 1206 uint8_t l_bit; 1207 uint8_t h_bit; 1208 1209 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1210 return; 1211 if (port_reg_off_is_invalid(port_id, reg_off)) 1212 return; 1213 if (reg_bit_pos_is_invalid(bit1_pos)) 1214 return; 1215 if (reg_bit_pos_is_invalid(bit2_pos)) 1216 return; 1217 if (bit1_pos > bit2_pos) 1218 l_bit = bit2_pos, h_bit = bit1_pos; 1219 else 1220 l_bit = bit1_pos, h_bit = bit2_pos; 1221 1222 if ((h_bit - l_bit) < 31) 1223 max_v = (1 << (h_bit - l_bit + 1)) - 1; 1224 else 1225 max_v = 0xFFFFFFFF; 1226 1227 if (value > max_v) { 1228 fprintf(stderr, "Invalid value %u (0x%x) must be < %u (0x%x)\n", 1229 (unsigned)value, (unsigned)value, 1230 (unsigned)max_v, (unsigned)max_v); 1231 return; 1232 } 1233 reg_v = port_id_pci_reg_read(port_id, reg_off); 1234 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ 1235 reg_v |= (value << l_bit); /* Set changed bits */ 1236 port_id_pci_reg_write(port_id, reg_off, reg_v); 1237 display_port_reg_value(port_id, reg_off, reg_v); 1238 } 1239 1240 void 1241 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) 1242 { 1243 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1244 return; 1245 if (port_reg_off_is_invalid(port_id, reg_off)) 1246 return; 1247 port_id_pci_reg_write(port_id, reg_off, reg_v); 1248 display_port_reg_value(port_id, reg_off, reg_v); 1249 } 1250 1251 void 1252 port_mtu_set(portid_t port_id, uint16_t mtu) 1253 { 1254 struct rte_port *port = &ports[port_id]; 1255 int diag; 1256 1257 if (port_id_is_invalid(port_id, ENABLED_WARN)) 1258 return; 1259 1260 if (port->need_reconfig == 0) { 1261 diag = rte_eth_dev_set_mtu(port_id, mtu); 1262 if (diag != 0) { 1263 fprintf(stderr, "Set MTU failed. diag=%d\n", diag); 1264 return; 1265 } 1266 } 1267 1268 port->dev_conf.rxmode.mtu = mtu; 1269 } 1270 1271 /* Generic flow management functions. */ 1272 1273 static struct port_flow_tunnel * 1274 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) 1275 { 1276 struct port_flow_tunnel *flow_tunnel; 1277 1278 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1279 if (flow_tunnel->id == port_tunnel_id) 1280 goto out; 1281 } 1282 flow_tunnel = NULL; 1283 1284 out: 1285 return flow_tunnel; 1286 } 1287 1288 const char * 1289 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) 1290 { 1291 const char *type; 1292 switch (tunnel->type) { 1293 default: 1294 type = "unknown"; 1295 break; 1296 case RTE_FLOW_ITEM_TYPE_VXLAN: 1297 type = "vxlan"; 1298 break; 1299 case RTE_FLOW_ITEM_TYPE_GRE: 1300 type = "gre"; 1301 break; 1302 case RTE_FLOW_ITEM_TYPE_NVGRE: 1303 type = "nvgre"; 1304 break; 1305 case RTE_FLOW_ITEM_TYPE_GENEVE: 1306 type = "geneve"; 1307 break; 1308 } 1309 1310 return type; 1311 } 1312 1313 struct port_flow_tunnel * 1314 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) 1315 { 1316 struct rte_port *port = &ports[port_id]; 1317 struct port_flow_tunnel *flow_tunnel; 1318 1319 LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { 1320 if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) 1321 goto out; 1322 } 1323 flow_tunnel = NULL; 1324 1325 out: 1326 return flow_tunnel; 1327 } 1328 1329 void port_flow_tunnel_list(portid_t port_id) 1330 { 1331 struct rte_port *port = &ports[port_id]; 1332 struct port_flow_tunnel *flt; 1333 1334 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1335 printf("port %u tunnel #%u type=%s", 1336 port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); 1337 if (flt->tunnel.tun_id) 1338 printf(" id=%" PRIu64, flt->tunnel.tun_id); 1339 printf("\n"); 1340 } 1341 } 1342 1343 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) 1344 { 1345 struct rte_port *port = &ports[port_id]; 1346 struct port_flow_tunnel *flt; 1347 1348 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1349 if (flt->id == tunnel_id) 1350 break; 1351 } 1352 if (flt) { 1353 LIST_REMOVE(flt, chain); 1354 free(flt); 1355 printf("port %u: flow tunnel #%u destroyed\n", 1356 port_id, tunnel_id); 1357 } 1358 } 1359 1360 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) 1361 { 1362 struct rte_port *port = &ports[port_id]; 1363 enum rte_flow_item_type type; 1364 struct port_flow_tunnel *flt; 1365 1366 if (!strcmp(ops->type, "vxlan")) 1367 type = RTE_FLOW_ITEM_TYPE_VXLAN; 1368 else if (!strcmp(ops->type, "gre")) 1369 type = RTE_FLOW_ITEM_TYPE_GRE; 1370 else if (!strcmp(ops->type, "nvgre")) 1371 type = RTE_FLOW_ITEM_TYPE_NVGRE; 1372 else if (!strcmp(ops->type, "geneve")) 1373 type = RTE_FLOW_ITEM_TYPE_GENEVE; 1374 else { 1375 fprintf(stderr, "cannot offload \"%s\" tunnel type\n", 1376 ops->type); 1377 return; 1378 } 1379 LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { 1380 if (flt->tunnel.type == type) 1381 break; 1382 } 1383 if (!flt) { 1384 flt = calloc(1, sizeof(*flt)); 1385 if (!flt) { 1386 fprintf(stderr, "failed to allocate port flt object\n"); 1387 return; 1388 } 1389 flt->tunnel.type = type; 1390 flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : 1391 LIST_FIRST(&port->flow_tunnel_list)->id + 1; 1392 LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); 1393 } 1394 printf("port %d: flow tunnel #%u type %s\n", 1395 port_id, flt->id, ops->type); 1396 } 1397 1398 /** Generate a port_flow entry from attributes/pattern/actions. */ 1399 static struct port_flow * 1400 port_flow_new(const struct rte_flow_attr *attr, 1401 const struct rte_flow_item *pattern, 1402 const struct rte_flow_action *actions, 1403 struct rte_flow_error *error) 1404 { 1405 const struct rte_flow_conv_rule rule = { 1406 .attr_ro = attr, 1407 .pattern_ro = pattern, 1408 .actions_ro = actions, 1409 }; 1410 struct port_flow *pf; 1411 int ret; 1412 1413 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); 1414 if (ret < 0) 1415 return NULL; 1416 pf = calloc(1, offsetof(struct port_flow, rule) + ret); 1417 if (!pf) { 1418 rte_flow_error_set 1419 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1420 "calloc() failed"); 1421 return NULL; 1422 } 1423 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, 1424 error) >= 0) 1425 return pf; 1426 free(pf); 1427 return NULL; 1428 } 1429 1430 /** Print a message out of a flow error. */ 1431 static int 1432 port_flow_complain(struct rte_flow_error *error) 1433 { 1434 static const char *const errstrlist[] = { 1435 [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 1436 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 1437 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 1438 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 1439 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 1440 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 1441 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 1442 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", 1443 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 1444 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 1445 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", 1446 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", 1447 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", 1448 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 1449 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 1450 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", 1451 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 1452 }; 1453 const char *errstr; 1454 char buf[32]; 1455 int err = rte_errno; 1456 1457 if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 1458 !errstrlist[error->type]) 1459 errstr = "unknown type"; 1460 else 1461 errstr = errstrlist[error->type]; 1462 fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", 1463 __func__, error->type, errstr, 1464 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 1465 error->cause), buf) : "", 1466 error->message ? error->message : "(no stated reason)", 1467 rte_strerror(err)); 1468 1469 switch (error->type) { 1470 case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER: 1471 fprintf(stderr, "The status suggests the use of \"transfer\" " 1472 "as the possible cause of the failure. Make " 1473 "sure that the flow in question and its " 1474 "indirect components (if any) are managed " 1475 "via \"transfer\" proxy port. Use command " 1476 "\"show port (port_id) flow transfer proxy\" " 1477 "to figure out the proxy port ID\n"); 1478 break; 1479 default: 1480 break; 1481 } 1482 1483 return -err; 1484 } 1485 1486 static void 1487 rss_config_display(struct rte_flow_action_rss *rss_conf) 1488 { 1489 uint8_t i; 1490 1491 if (rss_conf == NULL) { 1492 fprintf(stderr, "Invalid rule\n"); 1493 return; 1494 } 1495 1496 printf("RSS:\n" 1497 " queues:"); 1498 if (rss_conf->queue_num == 0) 1499 printf(" none"); 1500 for (i = 0; i < rss_conf->queue_num; i++) 1501 printf(" %d", rss_conf->queue[i]); 1502 printf("\n"); 1503 1504 printf(" function: "); 1505 switch (rss_conf->func) { 1506 case RTE_ETH_HASH_FUNCTION_DEFAULT: 1507 printf("default\n"); 1508 break; 1509 case RTE_ETH_HASH_FUNCTION_TOEPLITZ: 1510 printf("toeplitz\n"); 1511 break; 1512 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: 1513 printf("simple_xor\n"); 1514 break; 1515 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: 1516 printf("symmetric_toeplitz\n"); 1517 break; 1518 default: 1519 printf("Unknown function\n"); 1520 return; 1521 } 1522 1523 printf(" types:\n"); 1524 if (rss_conf->types == 0) { 1525 printf(" none\n"); 1526 return; 1527 } 1528 for (i = 0; rss_type_table[i].str; i++) { 1529 if ((rss_conf->types & 1530 rss_type_table[i].rss_type) == 1531 rss_type_table[i].rss_type && 1532 rss_type_table[i].rss_type != 0) 1533 printf(" %s\n", rss_type_table[i].str); 1534 } 1535 } 1536 1537 static struct port_indirect_action * 1538 action_get_by_id(portid_t port_id, uint32_t id) 1539 { 1540 struct rte_port *port; 1541 struct port_indirect_action **ppia; 1542 struct port_indirect_action *pia = NULL; 1543 1544 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1545 port_id == (portid_t)RTE_PORT_ALL) 1546 return NULL; 1547 port = &ports[port_id]; 1548 ppia = &port->actions_list; 1549 while (*ppia) { 1550 if ((*ppia)->id == id) { 1551 pia = *ppia; 1552 break; 1553 } 1554 ppia = &(*ppia)->next; 1555 } 1556 if (!pia) 1557 fprintf(stderr, 1558 "Failed to find indirect action #%u on port %u\n", 1559 id, port_id); 1560 return pia; 1561 } 1562 1563 static int 1564 action_alloc(portid_t port_id, uint32_t id, 1565 struct port_indirect_action **action) 1566 { 1567 struct rte_port *port; 1568 struct port_indirect_action **ppia; 1569 struct port_indirect_action *pia = NULL; 1570 1571 *action = NULL; 1572 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1573 port_id == (portid_t)RTE_PORT_ALL) 1574 return -EINVAL; 1575 port = &ports[port_id]; 1576 if (id == UINT32_MAX) { 1577 /* taking first available ID */ 1578 if (port->actions_list) { 1579 if (port->actions_list->id == UINT32_MAX - 1) { 1580 fprintf(stderr, 1581 "Highest indirect action ID is already assigned, delete it first\n"); 1582 return -ENOMEM; 1583 } 1584 id = port->actions_list->id + 1; 1585 } else { 1586 id = 0; 1587 } 1588 } 1589 pia = calloc(1, sizeof(*pia)); 1590 if (!pia) { 1591 fprintf(stderr, 1592 "Allocation of port %u indirect action failed\n", 1593 port_id); 1594 return -ENOMEM; 1595 } 1596 ppia = &port->actions_list; 1597 while (*ppia && (*ppia)->id > id) 1598 ppia = &(*ppia)->next; 1599 if (*ppia && (*ppia)->id == id) { 1600 fprintf(stderr, 1601 "Indirect action #%u is already assigned, delete it first\n", 1602 id); 1603 free(pia); 1604 return -EINVAL; 1605 } 1606 pia->next = *ppia; 1607 pia->id = id; 1608 *ppia = pia; 1609 *action = pia; 1610 return 0; 1611 } 1612 1613 static int 1614 template_alloc(uint32_t id, struct port_template **template, 1615 struct port_template **list) 1616 { 1617 struct port_template *lst = *list; 1618 struct port_template **ppt; 1619 struct port_template *pt = NULL; 1620 1621 *template = NULL; 1622 if (id == UINT32_MAX) { 1623 /* taking first available ID */ 1624 if (lst) { 1625 if (lst->id == UINT32_MAX - 1) { 1626 printf("Highest template ID is already" 1627 " assigned, delete it first\n"); 1628 return -ENOMEM; 1629 } 1630 id = lst->id + 1; 1631 } else { 1632 id = 0; 1633 } 1634 } 1635 pt = calloc(1, sizeof(*pt)); 1636 if (!pt) { 1637 printf("Allocation of port template failed\n"); 1638 return -ENOMEM; 1639 } 1640 ppt = list; 1641 while (*ppt && (*ppt)->id > id) 1642 ppt = &(*ppt)->next; 1643 if (*ppt && (*ppt)->id == id) { 1644 printf("Template #%u is already assigned," 1645 " delete it first\n", id); 1646 free(pt); 1647 return -EINVAL; 1648 } 1649 pt->next = *ppt; 1650 pt->id = id; 1651 *ppt = pt; 1652 *template = pt; 1653 return 0; 1654 } 1655 1656 static int 1657 table_alloc(uint32_t id, struct port_table **table, 1658 struct port_table **list) 1659 { 1660 struct port_table *lst = *list; 1661 struct port_table **ppt; 1662 struct port_table *pt = NULL; 1663 1664 *table = NULL; 1665 if (id == UINT32_MAX) { 1666 /* taking first available ID */ 1667 if (lst) { 1668 if (lst->id == UINT32_MAX - 1) { 1669 printf("Highest table ID is already" 1670 " assigned, delete it first\n"); 1671 return -ENOMEM; 1672 } 1673 id = lst->id + 1; 1674 } else { 1675 id = 0; 1676 } 1677 } 1678 pt = calloc(1, sizeof(*pt)); 1679 if (!pt) { 1680 printf("Allocation of table failed\n"); 1681 return -ENOMEM; 1682 } 1683 ppt = list; 1684 while (*ppt && (*ppt)->id > id) 1685 ppt = &(*ppt)->next; 1686 if (*ppt && (*ppt)->id == id) { 1687 printf("Table #%u is already assigned," 1688 " delete it first\n", id); 1689 free(pt); 1690 return -EINVAL; 1691 } 1692 pt->next = *ppt; 1693 pt->id = id; 1694 *ppt = pt; 1695 *table = pt; 1696 return 0; 1697 } 1698 1699 /** Get info about flow management resources. */ 1700 int 1701 port_flow_get_info(portid_t port_id) 1702 { 1703 struct rte_flow_port_info port_info; 1704 struct rte_flow_queue_info queue_info; 1705 struct rte_flow_error error; 1706 1707 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1708 port_id == (portid_t)RTE_PORT_ALL) 1709 return -EINVAL; 1710 /* Poisoning to make sure PMDs update it in case of error. */ 1711 memset(&error, 0x99, sizeof(error)); 1712 memset(&port_info, 0, sizeof(port_info)); 1713 memset(&queue_info, 0, sizeof(queue_info)); 1714 if (rte_flow_info_get(port_id, &port_info, &queue_info, &error)) 1715 return port_flow_complain(&error); 1716 printf("Flow engine resources on port %u:\n" 1717 "Number of queues: %d\n" 1718 "Size of queues: %d\n" 1719 "Number of counters: %d\n" 1720 "Number of aging objects: %d\n" 1721 "Number of meter actions: %d\n", 1722 port_id, port_info.max_nb_queues, 1723 queue_info.max_size, 1724 port_info.max_nb_counters, 1725 port_info.max_nb_aging_objects, 1726 port_info.max_nb_meters); 1727 return 0; 1728 } 1729 1730 /** Configure flow management resources. */ 1731 int 1732 port_flow_configure(portid_t port_id, 1733 const struct rte_flow_port_attr *port_attr, 1734 uint16_t nb_queue, 1735 const struct rte_flow_queue_attr *queue_attr) 1736 { 1737 struct rte_port *port; 1738 struct rte_flow_error error; 1739 const struct rte_flow_queue_attr *attr_list[nb_queue]; 1740 int std_queue; 1741 1742 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1743 port_id == (portid_t)RTE_PORT_ALL) 1744 return -EINVAL; 1745 port = &ports[port_id]; 1746 port->queue_nb = nb_queue; 1747 port->queue_sz = queue_attr->size; 1748 for (std_queue = 0; std_queue < nb_queue; std_queue++) 1749 attr_list[std_queue] = queue_attr; 1750 /* Poisoning to make sure PMDs update it in case of error. */ 1751 memset(&error, 0x66, sizeof(error)); 1752 if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error)) 1753 return port_flow_complain(&error); 1754 printf("Configure flows on port %u: " 1755 "number of queues %d with %d elements\n", 1756 port_id, nb_queue, queue_attr->size); 1757 return 0; 1758 } 1759 1760 /** Create indirect action */ 1761 int 1762 port_action_handle_create(portid_t port_id, uint32_t id, 1763 const struct rte_flow_indir_action_conf *conf, 1764 const struct rte_flow_action *action) 1765 { 1766 struct port_indirect_action *pia; 1767 int ret; 1768 struct rte_flow_error error; 1769 1770 ret = action_alloc(port_id, id, &pia); 1771 if (ret) 1772 return ret; 1773 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 1774 struct rte_flow_action_age *age = 1775 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 1776 1777 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 1778 age->context = &pia->age_type; 1779 } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { 1780 struct rte_flow_action_conntrack *ct = 1781 (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); 1782 1783 memcpy(ct, &conntrack_context, sizeof(*ct)); 1784 } 1785 /* Poisoning to make sure PMDs update it in case of error. */ 1786 memset(&error, 0x22, sizeof(error)); 1787 pia->handle = rte_flow_action_handle_create(port_id, conf, action, 1788 &error); 1789 if (!pia->handle) { 1790 uint32_t destroy_id = pia->id; 1791 port_action_handle_destroy(port_id, 1, &destroy_id); 1792 return port_flow_complain(&error); 1793 } 1794 pia->type = action->type; 1795 printf("Indirect action #%u created\n", pia->id); 1796 return 0; 1797 } 1798 1799 /** Destroy indirect action */ 1800 int 1801 port_action_handle_destroy(portid_t port_id, 1802 uint32_t n, 1803 const uint32_t *actions) 1804 { 1805 struct rte_port *port; 1806 struct port_indirect_action **tmp; 1807 uint32_t c = 0; 1808 int ret = 0; 1809 1810 if (port_id_is_invalid(port_id, ENABLED_WARN) || 1811 port_id == (portid_t)RTE_PORT_ALL) 1812 return -EINVAL; 1813 port = &ports[port_id]; 1814 tmp = &port->actions_list; 1815 while (*tmp) { 1816 uint32_t i; 1817 1818 for (i = 0; i != n; ++i) { 1819 struct rte_flow_error error; 1820 struct port_indirect_action *pia = *tmp; 1821 1822 if (actions[i] != pia->id) 1823 continue; 1824 /* 1825 * Poisoning to make sure PMDs update it in case 1826 * of error. 1827 */ 1828 memset(&error, 0x33, sizeof(error)); 1829 1830 if (pia->handle && rte_flow_action_handle_destroy( 1831 port_id, pia->handle, &error)) { 1832 ret = port_flow_complain(&error); 1833 continue; 1834 } 1835 *tmp = pia->next; 1836 printf("Indirect action #%u destroyed\n", pia->id); 1837 free(pia); 1838 break; 1839 } 1840 if (i == n) 1841 tmp = &(*tmp)->next; 1842 ++c; 1843 } 1844 return ret; 1845 } 1846 1847 1848 /** Get indirect action by port + id */ 1849 struct rte_flow_action_handle * 1850 port_action_handle_get_by_id(portid_t port_id, uint32_t id) 1851 { 1852 1853 struct port_indirect_action *pia = action_get_by_id(port_id, id); 1854 1855 return (pia) ? pia->handle : NULL; 1856 } 1857 1858 /** Update indirect action */ 1859 int 1860 port_action_handle_update(portid_t port_id, uint32_t id, 1861 const struct rte_flow_action *action) 1862 { 1863 struct rte_flow_error error; 1864 struct rte_flow_action_handle *action_handle; 1865 struct port_indirect_action *pia; 1866 const void *update; 1867 1868 action_handle = port_action_handle_get_by_id(port_id, id); 1869 if (!action_handle) 1870 return -EINVAL; 1871 pia = action_get_by_id(port_id, id); 1872 if (!pia) 1873 return -EINVAL; 1874 switch (pia->type) { 1875 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1876 update = action->conf; 1877 break; 1878 default: 1879 update = action; 1880 break; 1881 } 1882 if (rte_flow_action_handle_update(port_id, action_handle, update, 1883 &error)) { 1884 return port_flow_complain(&error); 1885 } 1886 printf("Indirect action #%u updated\n", id); 1887 return 0; 1888 } 1889 1890 int 1891 port_action_handle_query(portid_t port_id, uint32_t id) 1892 { 1893 struct rte_flow_error error; 1894 struct port_indirect_action *pia; 1895 union { 1896 struct rte_flow_query_count count; 1897 struct rte_flow_query_age age; 1898 struct rte_flow_action_conntrack ct; 1899 } query; 1900 1901 pia = action_get_by_id(port_id, id); 1902 if (!pia) 1903 return -EINVAL; 1904 switch (pia->type) { 1905 case RTE_FLOW_ACTION_TYPE_AGE: 1906 case RTE_FLOW_ACTION_TYPE_COUNT: 1907 break; 1908 default: 1909 fprintf(stderr, 1910 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1911 id, pia->type, port_id); 1912 return -ENOTSUP; 1913 } 1914 /* Poisoning to make sure PMDs update it in case of error. */ 1915 memset(&error, 0x55, sizeof(error)); 1916 memset(&query, 0, sizeof(query)); 1917 if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) 1918 return port_flow_complain(&error); 1919 switch (pia->type) { 1920 case RTE_FLOW_ACTION_TYPE_AGE: 1921 printf("Indirect AGE action:\n" 1922 " aged: %u\n" 1923 " sec_since_last_hit_valid: %u\n" 1924 " sec_since_last_hit: %" PRIu32 "\n", 1925 query.age.aged, 1926 query.age.sec_since_last_hit_valid, 1927 query.age.sec_since_last_hit); 1928 break; 1929 case RTE_FLOW_ACTION_TYPE_COUNT: 1930 printf("Indirect COUNT action:\n" 1931 " hits_set: %u\n" 1932 " bytes_set: %u\n" 1933 " hits: %" PRIu64 "\n" 1934 " bytes: %" PRIu64 "\n", 1935 query.count.hits_set, 1936 query.count.bytes_set, 1937 query.count.hits, 1938 query.count.bytes); 1939 break; 1940 case RTE_FLOW_ACTION_TYPE_CONNTRACK: 1941 printf("Conntrack Context:\n" 1942 " Peer: %u, Flow dir: %s, Enable: %u\n" 1943 " Live: %u, SACK: %u, CACK: %u\n" 1944 " Packet dir: %s, Liberal: %u, State: %u\n" 1945 " Factor: %u, Retrans: %u, TCP flags: %u\n" 1946 " Last Seq: %u, Last ACK: %u\n" 1947 " Last Win: %u, Last End: %u\n", 1948 query.ct.peer_port, 1949 query.ct.is_original_dir ? "Original" : "Reply", 1950 query.ct.enable, query.ct.live_connection, 1951 query.ct.selective_ack, query.ct.challenge_ack_passed, 1952 query.ct.last_direction ? "Original" : "Reply", 1953 query.ct.liberal_mode, query.ct.state, 1954 query.ct.max_ack_window, query.ct.retransmission_limit, 1955 query.ct.last_index, query.ct.last_seq, 1956 query.ct.last_ack, query.ct.last_window, 1957 query.ct.last_end); 1958 printf(" Original Dir:\n" 1959 " scale: %u, fin: %u, ack seen: %u\n" 1960 " unacked data: %u\n Sent end: %u," 1961 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1962 query.ct.original_dir.scale, 1963 query.ct.original_dir.close_initiated, 1964 query.ct.original_dir.last_ack_seen, 1965 query.ct.original_dir.data_unacked, 1966 query.ct.original_dir.sent_end, 1967 query.ct.original_dir.reply_end, 1968 query.ct.original_dir.max_win, 1969 query.ct.original_dir.max_ack); 1970 printf(" Reply Dir:\n" 1971 " scale: %u, fin: %u, ack seen: %u\n" 1972 " unacked data: %u\n Sent end: %u," 1973 " Reply end: %u, Max win: %u, Max ACK: %u\n", 1974 query.ct.reply_dir.scale, 1975 query.ct.reply_dir.close_initiated, 1976 query.ct.reply_dir.last_ack_seen, 1977 query.ct.reply_dir.data_unacked, 1978 query.ct.reply_dir.sent_end, 1979 query.ct.reply_dir.reply_end, 1980 query.ct.reply_dir.max_win, 1981 query.ct.reply_dir.max_ack); 1982 break; 1983 default: 1984 fprintf(stderr, 1985 "Indirect action %u (type: %d) on port %u doesn't support query\n", 1986 id, pia->type, port_id); 1987 break; 1988 } 1989 return 0; 1990 } 1991 1992 static struct port_flow_tunnel * 1993 port_flow_tunnel_offload_cmd_prep(portid_t port_id, 1994 const struct rte_flow_item *pattern, 1995 const struct rte_flow_action *actions, 1996 const struct tunnel_ops *tunnel_ops) 1997 { 1998 int ret; 1999 struct rte_port *port; 2000 struct port_flow_tunnel *pft; 2001 struct rte_flow_error error; 2002 2003 port = &ports[port_id]; 2004 pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); 2005 if (!pft) { 2006 fprintf(stderr, "failed to locate port flow tunnel #%u\n", 2007 tunnel_ops->id); 2008 return NULL; 2009 } 2010 if (tunnel_ops->actions) { 2011 uint32_t num_actions; 2012 const struct rte_flow_action *aptr; 2013 2014 ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, 2015 &pft->pmd_actions, 2016 &pft->num_pmd_actions, 2017 &error); 2018 if (ret) { 2019 port_flow_complain(&error); 2020 return NULL; 2021 } 2022 for (aptr = actions, num_actions = 1; 2023 aptr->type != RTE_FLOW_ACTION_TYPE_END; 2024 aptr++, num_actions++); 2025 pft->actions = malloc( 2026 (num_actions + pft->num_pmd_actions) * 2027 sizeof(actions[0])); 2028 if (!pft->actions) { 2029 rte_flow_tunnel_action_decap_release( 2030 port_id, pft->actions, 2031 pft->num_pmd_actions, &error); 2032 return NULL; 2033 } 2034 rte_memcpy(pft->actions, pft->pmd_actions, 2035 pft->num_pmd_actions * sizeof(actions[0])); 2036 rte_memcpy(pft->actions + pft->num_pmd_actions, actions, 2037 num_actions * sizeof(actions[0])); 2038 } 2039 if (tunnel_ops->items) { 2040 uint32_t num_items; 2041 const struct rte_flow_item *iptr; 2042 2043 ret = rte_flow_tunnel_match(port_id, &pft->tunnel, 2044 &pft->pmd_items, 2045 &pft->num_pmd_items, 2046 &error); 2047 if (ret) { 2048 port_flow_complain(&error); 2049 return NULL; 2050 } 2051 for (iptr = pattern, num_items = 1; 2052 iptr->type != RTE_FLOW_ITEM_TYPE_END; 2053 iptr++, num_items++); 2054 pft->items = malloc((num_items + pft->num_pmd_items) * 2055 sizeof(pattern[0])); 2056 if (!pft->items) { 2057 rte_flow_tunnel_item_release( 2058 port_id, pft->pmd_items, 2059 pft->num_pmd_items, &error); 2060 return NULL; 2061 } 2062 rte_memcpy(pft->items, pft->pmd_items, 2063 pft->num_pmd_items * sizeof(pattern[0])); 2064 rte_memcpy(pft->items + pft->num_pmd_items, pattern, 2065 num_items * sizeof(pattern[0])); 2066 } 2067 2068 return pft; 2069 } 2070 2071 static void 2072 port_flow_tunnel_offload_cmd_release(portid_t port_id, 2073 const struct tunnel_ops *tunnel_ops, 2074 struct port_flow_tunnel *pft) 2075 { 2076 struct rte_flow_error error; 2077 2078 if (tunnel_ops->actions) { 2079 free(pft->actions); 2080 rte_flow_tunnel_action_decap_release( 2081 port_id, pft->pmd_actions, 2082 pft->num_pmd_actions, &error); 2083 pft->actions = NULL; 2084 pft->pmd_actions = NULL; 2085 } 2086 if (tunnel_ops->items) { 2087 free(pft->items); 2088 rte_flow_tunnel_item_release(port_id, pft->pmd_items, 2089 pft->num_pmd_items, 2090 &error); 2091 pft->items = NULL; 2092 pft->pmd_items = NULL; 2093 } 2094 } 2095 2096 /** Add port meter policy */ 2097 int 2098 port_meter_policy_add(portid_t port_id, uint32_t policy_id, 2099 const struct rte_flow_action *actions) 2100 { 2101 struct rte_mtr_error error; 2102 const struct rte_flow_action *act = actions; 2103 const struct rte_flow_action *start; 2104 struct rte_mtr_meter_policy_params policy; 2105 uint32_t i = 0, act_n; 2106 int ret; 2107 2108 for (i = 0; i < RTE_COLORS; i++) { 2109 for (act_n = 0, start = act; 2110 act->type != RTE_FLOW_ACTION_TYPE_END; act++) 2111 act_n++; 2112 if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) 2113 policy.actions[i] = start; 2114 else 2115 policy.actions[i] = NULL; 2116 act++; 2117 } 2118 ret = rte_mtr_meter_policy_add(port_id, 2119 policy_id, 2120 &policy, &error); 2121 if (ret) 2122 print_mtr_err_msg(&error); 2123 return ret; 2124 } 2125 2126 /** Validate flow rule. */ 2127 int 2128 port_flow_validate(portid_t port_id, 2129 const struct rte_flow_attr *attr, 2130 const struct rte_flow_item *pattern, 2131 const struct rte_flow_action *actions, 2132 const struct tunnel_ops *tunnel_ops) 2133 { 2134 struct rte_flow_error error; 2135 struct port_flow_tunnel *pft = NULL; 2136 int ret; 2137 2138 /* Poisoning to make sure PMDs update it in case of error. */ 2139 memset(&error, 0x11, sizeof(error)); 2140 if (tunnel_ops->enabled) { 2141 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2142 actions, tunnel_ops); 2143 if (!pft) 2144 return -ENOENT; 2145 if (pft->items) 2146 pattern = pft->items; 2147 if (pft->actions) 2148 actions = pft->actions; 2149 } 2150 ret = rte_flow_validate(port_id, attr, pattern, actions, &error); 2151 if (tunnel_ops->enabled) 2152 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2153 if (ret) 2154 return port_flow_complain(&error); 2155 printf("Flow rule validated\n"); 2156 return 0; 2157 } 2158 2159 /** Return age action structure if exists, otherwise NULL. */ 2160 static struct rte_flow_action_age * 2161 age_action_get(const struct rte_flow_action *actions) 2162 { 2163 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 2164 switch (actions->type) { 2165 case RTE_FLOW_ACTION_TYPE_AGE: 2166 return (struct rte_flow_action_age *) 2167 (uintptr_t)actions->conf; 2168 default: 2169 break; 2170 } 2171 } 2172 return NULL; 2173 } 2174 2175 /** Create pattern template */ 2176 int 2177 port_flow_pattern_template_create(portid_t port_id, uint32_t id, 2178 const struct rte_flow_pattern_template_attr *attr, 2179 const struct rte_flow_item *pattern) 2180 { 2181 struct rte_port *port; 2182 struct port_template *pit; 2183 int ret; 2184 struct rte_flow_error error; 2185 2186 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2187 port_id == (portid_t)RTE_PORT_ALL) 2188 return -EINVAL; 2189 port = &ports[port_id]; 2190 ret = template_alloc(id, &pit, &port->pattern_templ_list); 2191 if (ret) 2192 return ret; 2193 /* Poisoning to make sure PMDs update it in case of error. */ 2194 memset(&error, 0x22, sizeof(error)); 2195 pit->template.pattern_template = rte_flow_pattern_template_create(port_id, 2196 attr, pattern, &error); 2197 if (!pit->template.pattern_template) { 2198 uint32_t destroy_id = pit->id; 2199 port_flow_pattern_template_destroy(port_id, 1, &destroy_id); 2200 return port_flow_complain(&error); 2201 } 2202 printf("Pattern template #%u created\n", pit->id); 2203 return 0; 2204 } 2205 2206 /** Destroy pattern template */ 2207 int 2208 port_flow_pattern_template_destroy(portid_t port_id, uint32_t n, 2209 const uint32_t *template) 2210 { 2211 struct rte_port *port; 2212 struct port_template **tmp; 2213 uint32_t c = 0; 2214 int ret = 0; 2215 2216 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2217 port_id == (portid_t)RTE_PORT_ALL) 2218 return -EINVAL; 2219 port = &ports[port_id]; 2220 tmp = &port->pattern_templ_list; 2221 while (*tmp) { 2222 uint32_t i; 2223 2224 for (i = 0; i != n; ++i) { 2225 struct rte_flow_error error; 2226 struct port_template *pit = *tmp; 2227 2228 if (template[i] != pit->id) 2229 continue; 2230 /* 2231 * Poisoning to make sure PMDs update it in case 2232 * of error. 2233 */ 2234 memset(&error, 0x33, sizeof(error)); 2235 2236 if (pit->template.pattern_template && 2237 rte_flow_pattern_template_destroy(port_id, 2238 pit->template.pattern_template, 2239 &error)) { 2240 ret = port_flow_complain(&error); 2241 continue; 2242 } 2243 *tmp = pit->next; 2244 printf("Pattern template #%u destroyed\n", pit->id); 2245 free(pit); 2246 break; 2247 } 2248 if (i == n) 2249 tmp = &(*tmp)->next; 2250 ++c; 2251 } 2252 return ret; 2253 } 2254 2255 /** Create actions template */ 2256 int 2257 port_flow_actions_template_create(portid_t port_id, uint32_t id, 2258 const struct rte_flow_actions_template_attr *attr, 2259 const struct rte_flow_action *actions, 2260 const struct rte_flow_action *masks) 2261 { 2262 struct rte_port *port; 2263 struct port_template *pat; 2264 int ret; 2265 struct rte_flow_error error; 2266 2267 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2268 port_id == (portid_t)RTE_PORT_ALL) 2269 return -EINVAL; 2270 port = &ports[port_id]; 2271 ret = template_alloc(id, &pat, &port->actions_templ_list); 2272 if (ret) 2273 return ret; 2274 /* Poisoning to make sure PMDs update it in case of error. */ 2275 memset(&error, 0x22, sizeof(error)); 2276 pat->template.actions_template = rte_flow_actions_template_create(port_id, 2277 attr, actions, masks, &error); 2278 if (!pat->template.actions_template) { 2279 uint32_t destroy_id = pat->id; 2280 port_flow_actions_template_destroy(port_id, 1, &destroy_id); 2281 return port_flow_complain(&error); 2282 } 2283 printf("Actions template #%u created\n", pat->id); 2284 return 0; 2285 } 2286 2287 /** Destroy actions template */ 2288 int 2289 port_flow_actions_template_destroy(portid_t port_id, uint32_t n, 2290 const uint32_t *template) 2291 { 2292 struct rte_port *port; 2293 struct port_template **tmp; 2294 uint32_t c = 0; 2295 int ret = 0; 2296 2297 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2298 port_id == (portid_t)RTE_PORT_ALL) 2299 return -EINVAL; 2300 port = &ports[port_id]; 2301 tmp = &port->actions_templ_list; 2302 while (*tmp) { 2303 uint32_t i; 2304 2305 for (i = 0; i != n; ++i) { 2306 struct rte_flow_error error; 2307 struct port_template *pat = *tmp; 2308 2309 if (template[i] != pat->id) 2310 continue; 2311 /* 2312 * Poisoning to make sure PMDs update it in case 2313 * of error. 2314 */ 2315 memset(&error, 0x33, sizeof(error)); 2316 2317 if (pat->template.actions_template && 2318 rte_flow_actions_template_destroy(port_id, 2319 pat->template.actions_template, &error)) { 2320 ret = port_flow_complain(&error); 2321 continue; 2322 } 2323 *tmp = pat->next; 2324 printf("Actions template #%u destroyed\n", pat->id); 2325 free(pat); 2326 break; 2327 } 2328 if (i == n) 2329 tmp = &(*tmp)->next; 2330 ++c; 2331 } 2332 return ret; 2333 } 2334 2335 /** Create table */ 2336 int 2337 port_flow_template_table_create(portid_t port_id, uint32_t id, 2338 const struct rte_flow_template_table_attr *table_attr, 2339 uint32_t nb_pattern_templates, uint32_t *pattern_templates, 2340 uint32_t nb_actions_templates, uint32_t *actions_templates) 2341 { 2342 struct rte_port *port; 2343 struct port_table *pt; 2344 struct port_template *temp = NULL; 2345 int ret; 2346 uint32_t i; 2347 struct rte_flow_error error; 2348 struct rte_flow_pattern_template 2349 *flow_pattern_templates[nb_pattern_templates]; 2350 struct rte_flow_actions_template 2351 *flow_actions_templates[nb_actions_templates]; 2352 2353 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2354 port_id == (portid_t)RTE_PORT_ALL) 2355 return -EINVAL; 2356 port = &ports[port_id]; 2357 for (i = 0; i < nb_pattern_templates; ++i) { 2358 bool found = false; 2359 temp = port->pattern_templ_list; 2360 while (temp) { 2361 if (pattern_templates[i] == temp->id) { 2362 flow_pattern_templates[i] = 2363 temp->template.pattern_template; 2364 found = true; 2365 break; 2366 } 2367 temp = temp->next; 2368 } 2369 if (!found) { 2370 printf("Pattern template #%u is invalid\n", 2371 pattern_templates[i]); 2372 return -EINVAL; 2373 } 2374 } 2375 for (i = 0; i < nb_actions_templates; ++i) { 2376 bool found = false; 2377 temp = port->actions_templ_list; 2378 while (temp) { 2379 if (actions_templates[i] == temp->id) { 2380 flow_actions_templates[i] = 2381 temp->template.actions_template; 2382 found = true; 2383 break; 2384 } 2385 temp = temp->next; 2386 } 2387 if (!found) { 2388 printf("Actions template #%u is invalid\n", 2389 actions_templates[i]); 2390 return -EINVAL; 2391 } 2392 } 2393 ret = table_alloc(id, &pt, &port->table_list); 2394 if (ret) 2395 return ret; 2396 /* Poisoning to make sure PMDs update it in case of error. */ 2397 memset(&error, 0x22, sizeof(error)); 2398 pt->table = rte_flow_template_table_create(port_id, table_attr, 2399 flow_pattern_templates, nb_pattern_templates, 2400 flow_actions_templates, nb_actions_templates, 2401 &error); 2402 2403 if (!pt->table) { 2404 uint32_t destroy_id = pt->id; 2405 port_flow_template_table_destroy(port_id, 1, &destroy_id); 2406 return port_flow_complain(&error); 2407 } 2408 pt->nb_pattern_templates = nb_pattern_templates; 2409 pt->nb_actions_templates = nb_actions_templates; 2410 printf("Template table #%u created\n", pt->id); 2411 return 0; 2412 } 2413 2414 /** Destroy table */ 2415 int 2416 port_flow_template_table_destroy(portid_t port_id, 2417 uint32_t n, const uint32_t *table) 2418 { 2419 struct rte_port *port; 2420 struct port_table **tmp; 2421 uint32_t c = 0; 2422 int ret = 0; 2423 2424 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2425 port_id == (portid_t)RTE_PORT_ALL) 2426 return -EINVAL; 2427 port = &ports[port_id]; 2428 tmp = &port->table_list; 2429 while (*tmp) { 2430 uint32_t i; 2431 2432 for (i = 0; i != n; ++i) { 2433 struct rte_flow_error error; 2434 struct port_table *pt = *tmp; 2435 2436 if (table[i] != pt->id) 2437 continue; 2438 /* 2439 * Poisoning to make sure PMDs update it in case 2440 * of error. 2441 */ 2442 memset(&error, 0x33, sizeof(error)); 2443 2444 if (pt->table && 2445 rte_flow_template_table_destroy(port_id, 2446 pt->table, 2447 &error)) { 2448 ret = port_flow_complain(&error); 2449 continue; 2450 } 2451 *tmp = pt->next; 2452 printf("Template table #%u destroyed\n", pt->id); 2453 free(pt); 2454 break; 2455 } 2456 if (i == n) 2457 tmp = &(*tmp)->next; 2458 ++c; 2459 } 2460 return ret; 2461 } 2462 2463 /** Enqueue create flow rule operation. */ 2464 int 2465 port_queue_flow_create(portid_t port_id, queueid_t queue_id, 2466 bool postpone, uint32_t table_id, 2467 uint32_t pattern_idx, uint32_t actions_idx, 2468 const struct rte_flow_item *pattern, 2469 const struct rte_flow_action *actions) 2470 { 2471 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2472 struct rte_flow *flow; 2473 struct rte_port *port; 2474 struct port_flow *pf; 2475 struct port_table *pt; 2476 uint32_t id = 0; 2477 bool found; 2478 struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; 2479 struct rte_flow_action_age *age = age_action_get(actions); 2480 2481 port = &ports[port_id]; 2482 if (port->flow_list) { 2483 if (port->flow_list->id == UINT32_MAX) { 2484 printf("Highest rule ID is already assigned," 2485 " delete it first"); 2486 return -ENOMEM; 2487 } 2488 id = port->flow_list->id + 1; 2489 } 2490 2491 if (queue_id >= port->queue_nb) { 2492 printf("Queue #%u is invalid\n", queue_id); 2493 return -EINVAL; 2494 } 2495 2496 found = false; 2497 pt = port->table_list; 2498 while (pt) { 2499 if (table_id == pt->id) { 2500 found = true; 2501 break; 2502 } 2503 pt = pt->next; 2504 } 2505 if (!found) { 2506 printf("Table #%u is invalid\n", table_id); 2507 return -EINVAL; 2508 } 2509 2510 if (pattern_idx >= pt->nb_pattern_templates) { 2511 printf("Pattern template index #%u is invalid," 2512 " %u templates present in the table\n", 2513 pattern_idx, pt->nb_pattern_templates); 2514 return -EINVAL; 2515 } 2516 if (actions_idx >= pt->nb_actions_templates) { 2517 printf("Actions template index #%u is invalid," 2518 " %u templates present in the table\n", 2519 actions_idx, pt->nb_actions_templates); 2520 return -EINVAL; 2521 } 2522 2523 pf = port_flow_new(NULL, pattern, actions, &error); 2524 if (!pf) 2525 return port_flow_complain(&error); 2526 if (age) { 2527 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2528 age->context = &pf->age_type; 2529 } 2530 /* Poisoning to make sure PMDs update it in case of error. */ 2531 memset(&error, 0x11, sizeof(error)); 2532 flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table, 2533 pattern, pattern_idx, actions, actions_idx, NULL, &error); 2534 if (!flow) { 2535 uint32_t flow_id = pf->id; 2536 port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id); 2537 return port_flow_complain(&error); 2538 } 2539 2540 pf->next = port->flow_list; 2541 pf->id = id; 2542 pf->flow = flow; 2543 port->flow_list = pf; 2544 printf("Flow rule #%u creation enqueued\n", pf->id); 2545 return 0; 2546 } 2547 2548 /** Enqueue number of destroy flow rules operations. */ 2549 int 2550 port_queue_flow_destroy(portid_t port_id, queueid_t queue_id, 2551 bool postpone, uint32_t n, const uint32_t *rule) 2552 { 2553 struct rte_flow_op_attr op_attr = { .postpone = postpone }; 2554 struct rte_port *port; 2555 struct port_flow **tmp; 2556 uint32_t c = 0; 2557 int ret = 0; 2558 2559 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2560 port_id == (portid_t)RTE_PORT_ALL) 2561 return -EINVAL; 2562 port = &ports[port_id]; 2563 2564 if (queue_id >= port->queue_nb) { 2565 printf("Queue #%u is invalid\n", queue_id); 2566 return -EINVAL; 2567 } 2568 2569 tmp = &port->flow_list; 2570 while (*tmp) { 2571 uint32_t i; 2572 2573 for (i = 0; i != n; ++i) { 2574 struct rte_flow_error error; 2575 struct port_flow *pf = *tmp; 2576 2577 if (rule[i] != pf->id) 2578 continue; 2579 /* 2580 * Poisoning to make sure PMD 2581 * update it in case of error. 2582 */ 2583 memset(&error, 0x33, sizeof(error)); 2584 if (rte_flow_async_destroy(port_id, queue_id, &op_attr, 2585 pf->flow, NULL, &error)) { 2586 ret = port_flow_complain(&error); 2587 continue; 2588 } 2589 printf("Flow rule #%u destruction enqueued\n", pf->id); 2590 *tmp = pf->next; 2591 free(pf); 2592 break; 2593 } 2594 if (i == n) 2595 tmp = &(*tmp)->next; 2596 ++c; 2597 } 2598 return ret; 2599 } 2600 2601 /** Enqueue indirect action create operation. */ 2602 int 2603 port_queue_action_handle_create(portid_t port_id, uint32_t queue_id, 2604 bool postpone, uint32_t id, 2605 const struct rte_flow_indir_action_conf *conf, 2606 const struct rte_flow_action *action) 2607 { 2608 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2609 struct rte_port *port; 2610 struct port_indirect_action *pia; 2611 int ret; 2612 struct rte_flow_error error; 2613 2614 ret = action_alloc(port_id, id, &pia); 2615 if (ret) 2616 return ret; 2617 2618 port = &ports[port_id]; 2619 if (queue_id >= port->queue_nb) { 2620 printf("Queue #%u is invalid\n", queue_id); 2621 return -EINVAL; 2622 } 2623 2624 if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { 2625 struct rte_flow_action_age *age = 2626 (struct rte_flow_action_age *)(uintptr_t)(action->conf); 2627 2628 pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; 2629 age->context = &pia->age_type; 2630 } 2631 /* Poisoning to make sure PMDs update it in case of error. */ 2632 memset(&error, 0x88, sizeof(error)); 2633 pia->handle = rte_flow_async_action_handle_create(port_id, queue_id, 2634 &attr, conf, action, NULL, &error); 2635 if (!pia->handle) { 2636 uint32_t destroy_id = pia->id; 2637 port_queue_action_handle_destroy(port_id, queue_id, 2638 postpone, 1, &destroy_id); 2639 return port_flow_complain(&error); 2640 } 2641 pia->type = action->type; 2642 printf("Indirect action #%u creation queued\n", pia->id); 2643 return 0; 2644 } 2645 2646 /** Enqueue indirect action destroy operation. */ 2647 int 2648 port_queue_action_handle_destroy(portid_t port_id, 2649 uint32_t queue_id, bool postpone, 2650 uint32_t n, const uint32_t *actions) 2651 { 2652 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2653 struct rte_port *port; 2654 struct port_indirect_action **tmp; 2655 uint32_t c = 0; 2656 int ret = 0; 2657 2658 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2659 port_id == (portid_t)RTE_PORT_ALL) 2660 return -EINVAL; 2661 port = &ports[port_id]; 2662 2663 if (queue_id >= port->queue_nb) { 2664 printf("Queue #%u is invalid\n", queue_id); 2665 return -EINVAL; 2666 } 2667 2668 tmp = &port->actions_list; 2669 while (*tmp) { 2670 uint32_t i; 2671 2672 for (i = 0; i != n; ++i) { 2673 struct rte_flow_error error; 2674 struct port_indirect_action *pia = *tmp; 2675 2676 if (actions[i] != pia->id) 2677 continue; 2678 /* 2679 * Poisoning to make sure PMDs update it in case 2680 * of error. 2681 */ 2682 memset(&error, 0x99, sizeof(error)); 2683 2684 if (pia->handle && 2685 rte_flow_async_action_handle_destroy(port_id, 2686 queue_id, &attr, pia->handle, NULL, &error)) { 2687 ret = port_flow_complain(&error); 2688 continue; 2689 } 2690 *tmp = pia->next; 2691 printf("Indirect action #%u destruction queued\n", 2692 pia->id); 2693 free(pia); 2694 break; 2695 } 2696 if (i == n) 2697 tmp = &(*tmp)->next; 2698 ++c; 2699 } 2700 return ret; 2701 } 2702 2703 /** Enqueue indirect action update operation. */ 2704 int 2705 port_queue_action_handle_update(portid_t port_id, 2706 uint32_t queue_id, bool postpone, uint32_t id, 2707 const struct rte_flow_action *action) 2708 { 2709 const struct rte_flow_op_attr attr = { .postpone = postpone}; 2710 struct rte_port *port; 2711 struct rte_flow_error error; 2712 struct rte_flow_action_handle *action_handle; 2713 2714 action_handle = port_action_handle_get_by_id(port_id, id); 2715 if (!action_handle) 2716 return -EINVAL; 2717 2718 port = &ports[port_id]; 2719 if (queue_id >= port->queue_nb) { 2720 printf("Queue #%u is invalid\n", queue_id); 2721 return -EINVAL; 2722 } 2723 2724 if (rte_flow_async_action_handle_update(port_id, queue_id, &attr, 2725 action_handle, action, NULL, &error)) { 2726 return port_flow_complain(&error); 2727 } 2728 printf("Indirect action #%u update queued\n", id); 2729 return 0; 2730 } 2731 2732 /** Push all the queue operations in the queue to the NIC. */ 2733 int 2734 port_queue_flow_push(portid_t port_id, queueid_t queue_id) 2735 { 2736 struct rte_port *port; 2737 struct rte_flow_error error; 2738 int ret = 0; 2739 2740 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2741 port_id == (portid_t)RTE_PORT_ALL) 2742 return -EINVAL; 2743 port = &ports[port_id]; 2744 2745 if (queue_id >= port->queue_nb) { 2746 printf("Queue #%u is invalid\n", queue_id); 2747 return -EINVAL; 2748 } 2749 2750 memset(&error, 0x55, sizeof(error)); 2751 ret = rte_flow_push(port_id, queue_id, &error); 2752 if (ret < 0) { 2753 printf("Failed to push operations in the queue\n"); 2754 return -EINVAL; 2755 } 2756 printf("Queue #%u operations pushed\n", queue_id); 2757 return ret; 2758 } 2759 2760 /** Pull queue operation results from the queue. */ 2761 int 2762 port_queue_flow_pull(portid_t port_id, queueid_t queue_id) 2763 { 2764 struct rte_port *port; 2765 struct rte_flow_op_result *res; 2766 struct rte_flow_error error; 2767 int ret = 0; 2768 int success = 0; 2769 int i; 2770 2771 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2772 port_id == (portid_t)RTE_PORT_ALL) 2773 return -EINVAL; 2774 port = &ports[port_id]; 2775 2776 if (queue_id >= port->queue_nb) { 2777 printf("Queue #%u is invalid\n", queue_id); 2778 return -EINVAL; 2779 } 2780 2781 res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); 2782 if (!res) { 2783 printf("Failed to allocate memory for pulled results\n"); 2784 return -ENOMEM; 2785 } 2786 2787 memset(&error, 0x66, sizeof(error)); 2788 ret = rte_flow_pull(port_id, queue_id, res, 2789 port->queue_sz, &error); 2790 if (ret < 0) { 2791 printf("Failed to pull a operation results\n"); 2792 free(res); 2793 return -EINVAL; 2794 } 2795 2796 for (i = 0; i < ret; i++) { 2797 if (res[i].status == RTE_FLOW_OP_SUCCESS) 2798 success++; 2799 } 2800 printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n", 2801 queue_id, ret, ret - success, success); 2802 free(res); 2803 return ret; 2804 } 2805 2806 /** Create flow rule. */ 2807 int 2808 port_flow_create(portid_t port_id, 2809 const struct rte_flow_attr *attr, 2810 const struct rte_flow_item *pattern, 2811 const struct rte_flow_action *actions, 2812 const struct tunnel_ops *tunnel_ops) 2813 { 2814 struct rte_flow *flow; 2815 struct rte_port *port; 2816 struct port_flow *pf; 2817 uint32_t id = 0; 2818 struct rte_flow_error error; 2819 struct port_flow_tunnel *pft = NULL; 2820 struct rte_flow_action_age *age = age_action_get(actions); 2821 2822 port = &ports[port_id]; 2823 if (port->flow_list) { 2824 if (port->flow_list->id == UINT32_MAX) { 2825 fprintf(stderr, 2826 "Highest rule ID is already assigned, delete it first"); 2827 return -ENOMEM; 2828 } 2829 id = port->flow_list->id + 1; 2830 } 2831 if (tunnel_ops->enabled) { 2832 pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, 2833 actions, tunnel_ops); 2834 if (!pft) 2835 return -ENOENT; 2836 if (pft->items) 2837 pattern = pft->items; 2838 if (pft->actions) 2839 actions = pft->actions; 2840 } 2841 pf = port_flow_new(attr, pattern, actions, &error); 2842 if (!pf) 2843 return port_flow_complain(&error); 2844 if (age) { 2845 pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; 2846 age->context = &pf->age_type; 2847 } 2848 /* Poisoning to make sure PMDs update it in case of error. */ 2849 memset(&error, 0x22, sizeof(error)); 2850 flow = rte_flow_create(port_id, attr, pattern, actions, &error); 2851 if (!flow) { 2852 if (tunnel_ops->enabled) 2853 port_flow_tunnel_offload_cmd_release(port_id, 2854 tunnel_ops, pft); 2855 free(pf); 2856 return port_flow_complain(&error); 2857 } 2858 pf->next = port->flow_list; 2859 pf->id = id; 2860 pf->flow = flow; 2861 port->flow_list = pf; 2862 if (tunnel_ops->enabled) 2863 port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); 2864 printf("Flow rule #%u created\n", pf->id); 2865 return 0; 2866 } 2867 2868 /** Destroy a number of flow rules. */ 2869 int 2870 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) 2871 { 2872 struct rte_port *port; 2873 struct port_flow **tmp; 2874 uint32_t c = 0; 2875 int ret = 0; 2876 2877 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2878 port_id == (portid_t)RTE_PORT_ALL) 2879 return -EINVAL; 2880 port = &ports[port_id]; 2881 tmp = &port->flow_list; 2882 while (*tmp) { 2883 uint32_t i; 2884 2885 for (i = 0; i != n; ++i) { 2886 struct rte_flow_error error; 2887 struct port_flow *pf = *tmp; 2888 2889 if (rule[i] != pf->id) 2890 continue; 2891 /* 2892 * Poisoning to make sure PMDs update it in case 2893 * of error. 2894 */ 2895 memset(&error, 0x33, sizeof(error)); 2896 if (rte_flow_destroy(port_id, pf->flow, &error)) { 2897 ret = port_flow_complain(&error); 2898 continue; 2899 } 2900 printf("Flow rule #%u destroyed\n", pf->id); 2901 *tmp = pf->next; 2902 free(pf); 2903 break; 2904 } 2905 if (i == n) 2906 tmp = &(*tmp)->next; 2907 ++c; 2908 } 2909 return ret; 2910 } 2911 2912 /** Remove all flow rules. */ 2913 int 2914 port_flow_flush(portid_t port_id) 2915 { 2916 struct rte_flow_error error; 2917 struct rte_port *port; 2918 int ret = 0; 2919 2920 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2921 port_id == (portid_t)RTE_PORT_ALL) 2922 return -EINVAL; 2923 2924 port = &ports[port_id]; 2925 2926 if (port->flow_list == NULL) 2927 return ret; 2928 2929 /* Poisoning to make sure PMDs update it in case of error. */ 2930 memset(&error, 0x44, sizeof(error)); 2931 if (rte_flow_flush(port_id, &error)) { 2932 port_flow_complain(&error); 2933 } 2934 2935 while (port->flow_list) { 2936 struct port_flow *pf = port->flow_list->next; 2937 2938 free(port->flow_list); 2939 port->flow_list = pf; 2940 } 2941 return ret; 2942 } 2943 2944 /** Dump flow rules. */ 2945 int 2946 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, 2947 const char *file_name) 2948 { 2949 int ret = 0; 2950 FILE *file = stdout; 2951 struct rte_flow_error error; 2952 struct rte_port *port; 2953 struct port_flow *pflow; 2954 struct rte_flow *tmpFlow = NULL; 2955 bool found = false; 2956 2957 if (port_id_is_invalid(port_id, ENABLED_WARN) || 2958 port_id == (portid_t)RTE_PORT_ALL) 2959 return -EINVAL; 2960 2961 if (!dump_all) { 2962 port = &ports[port_id]; 2963 pflow = port->flow_list; 2964 while (pflow) { 2965 if (rule_id != pflow->id) { 2966 pflow = pflow->next; 2967 } else { 2968 tmpFlow = pflow->flow; 2969 if (tmpFlow) 2970 found = true; 2971 break; 2972 } 2973 } 2974 if (found == false) { 2975 fprintf(stderr, "Failed to dump to flow %d\n", rule_id); 2976 return -EINVAL; 2977 } 2978 } 2979 2980 if (file_name && strlen(file_name)) { 2981 file = fopen(file_name, "w"); 2982 if (!file) { 2983 fprintf(stderr, "Failed to create file %s: %s\n", 2984 file_name, strerror(errno)); 2985 return -errno; 2986 } 2987 } 2988 2989 if (!dump_all) 2990 ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); 2991 else 2992 ret = rte_flow_dev_dump(port_id, NULL, file, &error); 2993 if (ret) { 2994 port_flow_complain(&error); 2995 fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); 2996 } else 2997 printf("Flow dump finished\n"); 2998 if (file_name && strlen(file_name)) 2999 fclose(file); 3000 return ret; 3001 } 3002 3003 /** Query a flow rule. */ 3004 int 3005 port_flow_query(portid_t port_id, uint32_t rule, 3006 const struct rte_flow_action *action) 3007 { 3008 struct rte_flow_error error; 3009 struct rte_port *port; 3010 struct port_flow *pf; 3011 const char *name; 3012 union { 3013 struct rte_flow_query_count count; 3014 struct rte_flow_action_rss rss_conf; 3015 struct rte_flow_query_age age; 3016 } query; 3017 int ret; 3018 3019 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3020 port_id == (portid_t)RTE_PORT_ALL) 3021 return -EINVAL; 3022 port = &ports[port_id]; 3023 for (pf = port->flow_list; pf; pf = pf->next) 3024 if (pf->id == rule) 3025 break; 3026 if (!pf) { 3027 fprintf(stderr, "Flow rule #%u not found\n", rule); 3028 return -ENOENT; 3029 } 3030 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3031 &name, sizeof(name), 3032 (void *)(uintptr_t)action->type, &error); 3033 if (ret < 0) 3034 return port_flow_complain(&error); 3035 switch (action->type) { 3036 case RTE_FLOW_ACTION_TYPE_COUNT: 3037 case RTE_FLOW_ACTION_TYPE_RSS: 3038 case RTE_FLOW_ACTION_TYPE_AGE: 3039 break; 3040 default: 3041 fprintf(stderr, "Cannot query action type %d (%s)\n", 3042 action->type, name); 3043 return -ENOTSUP; 3044 } 3045 /* Poisoning to make sure PMDs update it in case of error. */ 3046 memset(&error, 0x55, sizeof(error)); 3047 memset(&query, 0, sizeof(query)); 3048 if (rte_flow_query(port_id, pf->flow, action, &query, &error)) 3049 return port_flow_complain(&error); 3050 switch (action->type) { 3051 case RTE_FLOW_ACTION_TYPE_COUNT: 3052 printf("%s:\n" 3053 " hits_set: %u\n" 3054 " bytes_set: %u\n" 3055 " hits: %" PRIu64 "\n" 3056 " bytes: %" PRIu64 "\n", 3057 name, 3058 query.count.hits_set, 3059 query.count.bytes_set, 3060 query.count.hits, 3061 query.count.bytes); 3062 break; 3063 case RTE_FLOW_ACTION_TYPE_RSS: 3064 rss_config_display(&query.rss_conf); 3065 break; 3066 case RTE_FLOW_ACTION_TYPE_AGE: 3067 printf("%s:\n" 3068 " aged: %u\n" 3069 " sec_since_last_hit_valid: %u\n" 3070 " sec_since_last_hit: %" PRIu32 "\n", 3071 name, 3072 query.age.aged, 3073 query.age.sec_since_last_hit_valid, 3074 query.age.sec_since_last_hit); 3075 break; 3076 default: 3077 fprintf(stderr, 3078 "Cannot display result for action type %d (%s)\n", 3079 action->type, name); 3080 break; 3081 } 3082 return 0; 3083 } 3084 3085 /** List simply and destroy all aged flows. */ 3086 void 3087 port_flow_aged(portid_t port_id, uint8_t destroy) 3088 { 3089 void **contexts; 3090 int nb_context, total = 0, idx; 3091 struct rte_flow_error error; 3092 enum age_action_context_type *type; 3093 union { 3094 struct port_flow *pf; 3095 struct port_indirect_action *pia; 3096 } ctx; 3097 3098 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3099 port_id == (portid_t)RTE_PORT_ALL) 3100 return; 3101 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); 3102 printf("Port %u total aged flows: %d\n", port_id, total); 3103 if (total < 0) { 3104 port_flow_complain(&error); 3105 return; 3106 } 3107 if (total == 0) 3108 return; 3109 contexts = malloc(sizeof(void *) * total); 3110 if (contexts == NULL) { 3111 fprintf(stderr, "Cannot allocate contexts for aged flow\n"); 3112 return; 3113 } 3114 printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); 3115 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); 3116 if (nb_context != total) { 3117 fprintf(stderr, 3118 "Port:%d get aged flows count(%d) != total(%d)\n", 3119 port_id, nb_context, total); 3120 free(contexts); 3121 return; 3122 } 3123 total = 0; 3124 for (idx = 0; idx < nb_context; idx++) { 3125 if (!contexts[idx]) { 3126 fprintf(stderr, "Error: get Null context in port %u\n", 3127 port_id); 3128 continue; 3129 } 3130 type = (enum age_action_context_type *)contexts[idx]; 3131 switch (*type) { 3132 case ACTION_AGE_CONTEXT_TYPE_FLOW: 3133 ctx.pf = container_of(type, struct port_flow, age_type); 3134 printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 3135 "\t%c%c%c\t\n", 3136 "Flow", 3137 ctx.pf->id, 3138 ctx.pf->rule.attr->group, 3139 ctx.pf->rule.attr->priority, 3140 ctx.pf->rule.attr->ingress ? 'i' : '-', 3141 ctx.pf->rule.attr->egress ? 'e' : '-', 3142 ctx.pf->rule.attr->transfer ? 't' : '-'); 3143 if (destroy && !port_flow_destroy(port_id, 1, 3144 &ctx.pf->id)) 3145 total++; 3146 break; 3147 case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: 3148 ctx.pia = container_of(type, 3149 struct port_indirect_action, age_type); 3150 printf("%-20s\t%" PRIu32 "\n", "Indirect action", 3151 ctx.pia->id); 3152 break; 3153 default: 3154 fprintf(stderr, "Error: invalid context type %u\n", 3155 port_id); 3156 break; 3157 } 3158 } 3159 printf("\n%d flows destroyed\n", total); 3160 free(contexts); 3161 } 3162 3163 /** List flow rules. */ 3164 void 3165 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) 3166 { 3167 struct rte_port *port; 3168 struct port_flow *pf; 3169 struct port_flow *list = NULL; 3170 uint32_t i; 3171 3172 if (port_id_is_invalid(port_id, ENABLED_WARN) || 3173 port_id == (portid_t)RTE_PORT_ALL) 3174 return; 3175 port = &ports[port_id]; 3176 if (!port->flow_list) 3177 return; 3178 /* Sort flows by group, priority and ID. */ 3179 for (pf = port->flow_list; pf != NULL; pf = pf->next) { 3180 struct port_flow **tmp; 3181 const struct rte_flow_attr *curr = pf->rule.attr; 3182 3183 if (n) { 3184 /* Filter out unwanted groups. */ 3185 for (i = 0; i != n; ++i) 3186 if (curr->group == group[i]) 3187 break; 3188 if (i == n) 3189 continue; 3190 } 3191 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { 3192 const struct rte_flow_attr *comp = (*tmp)->rule.attr; 3193 3194 if (curr->group > comp->group || 3195 (curr->group == comp->group && 3196 curr->priority > comp->priority) || 3197 (curr->group == comp->group && 3198 curr->priority == comp->priority && 3199 pf->id > (*tmp)->id)) 3200 continue; 3201 break; 3202 } 3203 pf->tmp = *tmp; 3204 *tmp = pf; 3205 } 3206 printf("ID\tGroup\tPrio\tAttr\tRule\n"); 3207 for (pf = list; pf != NULL; pf = pf->tmp) { 3208 const struct rte_flow_item *item = pf->rule.pattern; 3209 const struct rte_flow_action *action = pf->rule.actions; 3210 const char *name; 3211 3212 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", 3213 pf->id, 3214 pf->rule.attr->group, 3215 pf->rule.attr->priority, 3216 pf->rule.attr->ingress ? 'i' : '-', 3217 pf->rule.attr->egress ? 'e' : '-', 3218 pf->rule.attr->transfer ? 't' : '-'); 3219 while (item->type != RTE_FLOW_ITEM_TYPE_END) { 3220 if ((uint32_t)item->type > INT_MAX) 3221 name = "PMD_INTERNAL"; 3222 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, 3223 &name, sizeof(name), 3224 (void *)(uintptr_t)item->type, 3225 NULL) <= 0) 3226 name = "[UNKNOWN]"; 3227 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 3228 printf("%s ", name); 3229 ++item; 3230 } 3231 printf("=>"); 3232 while (action->type != RTE_FLOW_ACTION_TYPE_END) { 3233 if ((uint32_t)action->type > INT_MAX) 3234 name = "PMD_INTERNAL"; 3235 else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, 3236 &name, sizeof(name), 3237 (void *)(uintptr_t)action->type, 3238 NULL) <= 0) 3239 name = "[UNKNOWN]"; 3240 if (action->type != RTE_FLOW_ACTION_TYPE_VOID) 3241 printf(" %s", name); 3242 ++action; 3243 } 3244 printf("\n"); 3245 } 3246 } 3247 3248 /** Restrict ingress traffic to the defined flow rules. */ 3249 int 3250 port_flow_isolate(portid_t port_id, int set) 3251 { 3252 struct rte_flow_error error; 3253 3254 /* Poisoning to make sure PMDs update it in case of error. */ 3255 memset(&error, 0x66, sizeof(error)); 3256 if (rte_flow_isolate(port_id, set, &error)) 3257 return port_flow_complain(&error); 3258 printf("Ingress traffic on port %u is %s to the defined flow rules\n", 3259 port_id, 3260 set ? "now restricted" : "not restricted anymore"); 3261 return 0; 3262 } 3263 3264 /* 3265 * RX/TX ring descriptors display functions. 3266 */ 3267 int 3268 rx_queue_id_is_invalid(queueid_t rxq_id) 3269 { 3270 if (rxq_id < nb_rxq) 3271 return 0; 3272 fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", 3273 rxq_id, nb_rxq); 3274 return 1; 3275 } 3276 3277 int 3278 tx_queue_id_is_invalid(queueid_t txq_id) 3279 { 3280 if (txq_id < nb_txq) 3281 return 0; 3282 fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", 3283 txq_id, nb_txq); 3284 return 1; 3285 } 3286 3287 static int 3288 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) 3289 { 3290 struct rte_port *port = &ports[port_id]; 3291 struct rte_eth_rxq_info rx_qinfo; 3292 int ret; 3293 3294 ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); 3295 if (ret == 0) { 3296 *ring_size = rx_qinfo.nb_desc; 3297 return ret; 3298 } 3299 3300 if (ret != -ENOTSUP) 3301 return ret; 3302 /* 3303 * If the rte_eth_rx_queue_info_get is not support for this PMD, 3304 * ring_size stored in testpmd will be used for validity verification. 3305 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc 3306 * being 0, it will use a default value provided by PMDs to setup this 3307 * rxq. If the default value is 0, it will use the 3308 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. 3309 */ 3310 if (port->nb_rx_desc[rxq_id]) 3311 *ring_size = port->nb_rx_desc[rxq_id]; 3312 else if (port->dev_info.default_rxportconf.ring_size) 3313 *ring_size = port->dev_info.default_rxportconf.ring_size; 3314 else 3315 *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 3316 return 0; 3317 } 3318 3319 static int 3320 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) 3321 { 3322 struct rte_port *port = &ports[port_id]; 3323 struct rte_eth_txq_info tx_qinfo; 3324 int ret; 3325 3326 ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); 3327 if (ret == 0) { 3328 *ring_size = tx_qinfo.nb_desc; 3329 return ret; 3330 } 3331 3332 if (ret != -ENOTSUP) 3333 return ret; 3334 /* 3335 * If the rte_eth_tx_queue_info_get is not support for this PMD, 3336 * ring_size stored in testpmd will be used for validity verification. 3337 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc 3338 * being 0, it will use a default value provided by PMDs to setup this 3339 * txq. If the default value is 0, it will use the 3340 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. 3341 */ 3342 if (port->nb_tx_desc[txq_id]) 3343 *ring_size = port->nb_tx_desc[txq_id]; 3344 else if (port->dev_info.default_txportconf.ring_size) 3345 *ring_size = port->dev_info.default_txportconf.ring_size; 3346 else 3347 *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 3348 return 0; 3349 } 3350 3351 static int 3352 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) 3353 { 3354 uint16_t ring_size; 3355 int ret; 3356 3357 ret = get_rx_ring_size(port_id, rxq_id, &ring_size); 3358 if (ret) 3359 return 1; 3360 3361 if (rxdesc_id < ring_size) 3362 return 0; 3363 3364 fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", 3365 rxdesc_id, ring_size); 3366 return 1; 3367 } 3368 3369 static int 3370 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) 3371 { 3372 uint16_t ring_size; 3373 int ret; 3374 3375 ret = get_tx_ring_size(port_id, txq_id, &ring_size); 3376 if (ret) 3377 return 1; 3378 3379 if (txdesc_id < ring_size) 3380 return 0; 3381 3382 fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", 3383 txdesc_id, ring_size); 3384 return 1; 3385 } 3386 3387 static const struct rte_memzone * 3388 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) 3389 { 3390 char mz_name[RTE_MEMZONE_NAMESIZE]; 3391 const struct rte_memzone *mz; 3392 3393 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", 3394 port_id, q_id, ring_name); 3395 mz = rte_memzone_lookup(mz_name); 3396 if (mz == NULL) 3397 fprintf(stderr, 3398 "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", 3399 ring_name, port_id, q_id, mz_name); 3400 return mz; 3401 } 3402 3403 union igb_ring_dword { 3404 uint64_t dword; 3405 struct { 3406 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 3407 uint32_t lo; 3408 uint32_t hi; 3409 #else 3410 uint32_t hi; 3411 uint32_t lo; 3412 #endif 3413 } words; 3414 }; 3415 3416 struct igb_ring_desc_32_bytes { 3417 union igb_ring_dword lo_dword; 3418 union igb_ring_dword hi_dword; 3419 union igb_ring_dword resv1; 3420 union igb_ring_dword resv2; 3421 }; 3422 3423 struct igb_ring_desc_16_bytes { 3424 union igb_ring_dword lo_dword; 3425 union igb_ring_dword hi_dword; 3426 }; 3427 3428 static void 3429 ring_rxd_display_dword(union igb_ring_dword dword) 3430 { 3431 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, 3432 (unsigned)dword.words.hi); 3433 } 3434 3435 static void 3436 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, 3437 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 3438 portid_t port_id, 3439 #else 3440 __rte_unused portid_t port_id, 3441 #endif 3442 uint16_t desc_id) 3443 { 3444 struct igb_ring_desc_16_bytes *ring = 3445 (struct igb_ring_desc_16_bytes *)ring_mz->addr; 3446 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC 3447 int ret; 3448 struct rte_eth_dev_info dev_info; 3449 3450 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3451 if (ret != 0) 3452 return; 3453 3454 if (strstr(dev_info.driver_name, "i40e") != NULL) { 3455 /* 32 bytes RX descriptor, i40e only */ 3456 struct igb_ring_desc_32_bytes *ring = 3457 (struct igb_ring_desc_32_bytes *)ring_mz->addr; 3458 ring[desc_id].lo_dword.dword = 3459 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3460 ring_rxd_display_dword(ring[desc_id].lo_dword); 3461 ring[desc_id].hi_dword.dword = 3462 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3463 ring_rxd_display_dword(ring[desc_id].hi_dword); 3464 ring[desc_id].resv1.dword = 3465 rte_le_to_cpu_64(ring[desc_id].resv1.dword); 3466 ring_rxd_display_dword(ring[desc_id].resv1); 3467 ring[desc_id].resv2.dword = 3468 rte_le_to_cpu_64(ring[desc_id].resv2.dword); 3469 ring_rxd_display_dword(ring[desc_id].resv2); 3470 3471 return; 3472 } 3473 #endif 3474 /* 16 bytes RX descriptor */ 3475 ring[desc_id].lo_dword.dword = 3476 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3477 ring_rxd_display_dword(ring[desc_id].lo_dword); 3478 ring[desc_id].hi_dword.dword = 3479 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3480 ring_rxd_display_dword(ring[desc_id].hi_dword); 3481 } 3482 3483 static void 3484 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) 3485 { 3486 struct igb_ring_desc_16_bytes *ring; 3487 struct igb_ring_desc_16_bytes txd; 3488 3489 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; 3490 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); 3491 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); 3492 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", 3493 (unsigned)txd.lo_dword.words.lo, 3494 (unsigned)txd.lo_dword.words.hi, 3495 (unsigned)txd.hi_dword.words.lo, 3496 (unsigned)txd.hi_dword.words.hi); 3497 } 3498 3499 void 3500 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) 3501 { 3502 const struct rte_memzone *rx_mz; 3503 3504 if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) 3505 return; 3506 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); 3507 if (rx_mz == NULL) 3508 return; 3509 ring_rx_descriptor_display(rx_mz, port_id, rxd_id); 3510 } 3511 3512 void 3513 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) 3514 { 3515 const struct rte_memzone *tx_mz; 3516 3517 if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) 3518 return; 3519 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); 3520 if (tx_mz == NULL) 3521 return; 3522 ring_tx_descriptor_display(tx_mz, txd_id); 3523 } 3524 3525 void 3526 fwd_lcores_config_display(void) 3527 { 3528 lcoreid_t lc_id; 3529 3530 printf("List of forwarding lcores:"); 3531 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) 3532 printf(" %2u", fwd_lcores_cpuids[lc_id]); 3533 printf("\n"); 3534 } 3535 void 3536 rxtx_config_display(void) 3537 { 3538 portid_t pid; 3539 queueid_t qid; 3540 3541 printf(" %s packet forwarding%s packets/burst=%d\n", 3542 cur_fwd_eng->fwd_mode_name, 3543 retry_enabled == 0 ? "" : " with retry", 3544 nb_pkt_per_burst); 3545 3546 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) 3547 printf(" packet len=%u - nb packet segments=%d\n", 3548 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); 3549 3550 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", 3551 nb_fwd_lcores, nb_fwd_ports); 3552 3553 RTE_ETH_FOREACH_DEV(pid) { 3554 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; 3555 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; 3556 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; 3557 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; 3558 struct rte_eth_rxq_info rx_qinfo; 3559 struct rte_eth_txq_info tx_qinfo; 3560 uint16_t rx_free_thresh_tmp; 3561 uint16_t tx_free_thresh_tmp; 3562 uint16_t tx_rs_thresh_tmp; 3563 uint16_t nb_rx_desc_tmp; 3564 uint16_t nb_tx_desc_tmp; 3565 uint64_t offloads_tmp; 3566 uint8_t pthresh_tmp; 3567 uint8_t hthresh_tmp; 3568 uint8_t wthresh_tmp; 3569 int32_t rc; 3570 3571 /* per port config */ 3572 printf(" port %d: RX queue number: %d Tx queue number: %d\n", 3573 (unsigned int)pid, nb_rxq, nb_txq); 3574 3575 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", 3576 ports[pid].dev_conf.rxmode.offloads, 3577 ports[pid].dev_conf.txmode.offloads); 3578 3579 /* per rx queue config only for first queue to be less verbose */ 3580 for (qid = 0; qid < 1; qid++) { 3581 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); 3582 if (rc) { 3583 nb_rx_desc_tmp = nb_rx_desc[qid]; 3584 rx_free_thresh_tmp = 3585 rx_conf[qid].rx_free_thresh; 3586 pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; 3587 hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; 3588 wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; 3589 offloads_tmp = rx_conf[qid].offloads; 3590 } else { 3591 nb_rx_desc_tmp = rx_qinfo.nb_desc; 3592 rx_free_thresh_tmp = 3593 rx_qinfo.conf.rx_free_thresh; 3594 pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; 3595 hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; 3596 wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; 3597 offloads_tmp = rx_qinfo.conf.offloads; 3598 } 3599 3600 printf(" RX queue: %d\n", qid); 3601 printf(" RX desc=%d - RX free threshold=%d\n", 3602 nb_rx_desc_tmp, rx_free_thresh_tmp); 3603 printf(" RX threshold registers: pthresh=%d hthresh=%d " 3604 " wthresh=%d\n", 3605 pthresh_tmp, hthresh_tmp, wthresh_tmp); 3606 printf(" RX Offloads=0x%"PRIx64, offloads_tmp); 3607 if (rx_conf->share_group > 0) 3608 printf(" share_group=%u share_qid=%u", 3609 rx_conf->share_group, 3610 rx_conf->share_qid); 3611 printf("\n"); 3612 } 3613 3614 /* per tx queue config only for first queue to be less verbose */ 3615 for (qid = 0; qid < 1; qid++) { 3616 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); 3617 if (rc) { 3618 nb_tx_desc_tmp = nb_tx_desc[qid]; 3619 tx_free_thresh_tmp = 3620 tx_conf[qid].tx_free_thresh; 3621 pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; 3622 hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; 3623 wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; 3624 offloads_tmp = tx_conf[qid].offloads; 3625 tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; 3626 } else { 3627 nb_tx_desc_tmp = tx_qinfo.nb_desc; 3628 tx_free_thresh_tmp = 3629 tx_qinfo.conf.tx_free_thresh; 3630 pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; 3631 hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; 3632 wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; 3633 offloads_tmp = tx_qinfo.conf.offloads; 3634 tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; 3635 } 3636 3637 printf(" TX queue: %d\n", qid); 3638 printf(" TX desc=%d - TX free threshold=%d\n", 3639 nb_tx_desc_tmp, tx_free_thresh_tmp); 3640 printf(" TX threshold registers: pthresh=%d hthresh=%d " 3641 " wthresh=%d\n", 3642 pthresh_tmp, hthresh_tmp, wthresh_tmp); 3643 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", 3644 offloads_tmp, tx_rs_thresh_tmp); 3645 } 3646 } 3647 } 3648 3649 void 3650 port_rss_reta_info(portid_t port_id, 3651 struct rte_eth_rss_reta_entry64 *reta_conf, 3652 uint16_t nb_entries) 3653 { 3654 uint16_t i, idx, shift; 3655 int ret; 3656 3657 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3658 return; 3659 3660 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); 3661 if (ret != 0) { 3662 fprintf(stderr, 3663 "Failed to get RSS RETA info, return code = %d\n", 3664 ret); 3665 return; 3666 } 3667 3668 for (i = 0; i < nb_entries; i++) { 3669 idx = i / RTE_ETH_RETA_GROUP_SIZE; 3670 shift = i % RTE_ETH_RETA_GROUP_SIZE; 3671 if (!(reta_conf[idx].mask & (1ULL << shift))) 3672 continue; 3673 printf("RSS RETA configuration: hash index=%u, queue=%u\n", 3674 i, reta_conf[idx].reta[shift]); 3675 } 3676 } 3677 3678 /* 3679 * Displays the RSS hash functions of a port, and, optionally, the RSS hash 3680 * key of the port. 3681 */ 3682 void 3683 port_rss_hash_conf_show(portid_t port_id, int show_rss_key) 3684 { 3685 struct rte_eth_rss_conf rss_conf = {0}; 3686 uint8_t rss_key[RSS_HASH_KEY_LENGTH]; 3687 uint64_t rss_hf; 3688 uint8_t i; 3689 int diag; 3690 struct rte_eth_dev_info dev_info; 3691 uint8_t hash_key_size; 3692 int ret; 3693 3694 if (port_id_is_invalid(port_id, ENABLED_WARN)) 3695 return; 3696 3697 ret = eth_dev_info_get_print_err(port_id, &dev_info); 3698 if (ret != 0) 3699 return; 3700 3701 if (dev_info.hash_key_size > 0 && 3702 dev_info.hash_key_size <= sizeof(rss_key)) 3703 hash_key_size = dev_info.hash_key_size; 3704 else { 3705 fprintf(stderr, 3706 "dev_info did not provide a valid hash key size\n"); 3707 return; 3708 } 3709 3710 /* Get RSS hash key if asked to display it */ 3711 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; 3712 rss_conf.rss_key_len = hash_key_size; 3713 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 3714 if (diag != 0) { 3715 switch (diag) { 3716 case -ENODEV: 3717 fprintf(stderr, "port index %d invalid\n", port_id); 3718 break; 3719 case -ENOTSUP: 3720 fprintf(stderr, "operation not supported by device\n"); 3721 break; 3722 default: 3723 fprintf(stderr, "operation failed - diag=%d\n", diag); 3724 break; 3725 } 3726 return; 3727 } 3728 rss_hf = rss_conf.rss_hf; 3729 if (rss_hf == 0) { 3730 printf("RSS disabled\n"); 3731 return; 3732 } 3733 printf("RSS functions:\n "); 3734 for (i = 0; rss_type_table[i].str; i++) { 3735 if (rss_type_table[i].rss_type == 0) 3736 continue; 3737 if ((rss_hf & rss_type_table[i].rss_type) == rss_type_table[i].rss_type) 3738 printf("%s ", rss_type_table[i].str); 3739 } 3740 printf("\n"); 3741 if (!show_rss_key) 3742 return; 3743 printf("RSS key:\n"); 3744 for (i = 0; i < hash_key_size; i++) 3745 printf("%02X", rss_key[i]); 3746 printf("\n"); 3747 } 3748 3749 void 3750 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, 3751 uint8_t hash_key_len) 3752 { 3753 struct rte_eth_rss_conf rss_conf; 3754 int diag; 3755 unsigned int i; 3756 3757 rss_conf.rss_key = NULL; 3758 rss_conf.rss_key_len = 0; 3759 rss_conf.rss_hf = 0; 3760 for (i = 0; rss_type_table[i].str; i++) { 3761 if (!strcmp(rss_type_table[i].str, rss_type)) 3762 rss_conf.rss_hf = rss_type_table[i].rss_type; 3763 } 3764 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); 3765 if (diag == 0) { 3766 rss_conf.rss_key = hash_key; 3767 rss_conf.rss_key_len = hash_key_len; 3768 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); 3769 } 3770 if (diag == 0) 3771 return; 3772 3773 switch (diag) { 3774 case -ENODEV: 3775 fprintf(stderr, "port index %d invalid\n", port_id); 3776 break; 3777 case -ENOTSUP: 3778 fprintf(stderr, "operation not supported by device\n"); 3779 break; 3780 default: 3781 fprintf(stderr, "operation failed - diag=%d\n", diag); 3782 break; 3783 } 3784 } 3785 3786 /* 3787 * Check whether a shared rxq scheduled on other lcores. 3788 */ 3789 static bool 3790 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, 3791 portid_t src_port, queueid_t src_rxq, 3792 uint32_t share_group, queueid_t share_rxq) 3793 { 3794 streamid_t sm_id; 3795 streamid_t nb_fs_per_lcore; 3796 lcoreid_t nb_fc; 3797 lcoreid_t lc_id; 3798 struct fwd_stream *fs; 3799 struct rte_port *port; 3800 struct rte_eth_dev_info *dev_info; 3801 struct rte_eth_rxconf *rxq_conf; 3802 3803 nb_fc = cur_fwd_config.nb_fwd_lcores; 3804 /* Check remaining cores. */ 3805 for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { 3806 sm_id = fwd_lcores[lc_id]->stream_idx; 3807 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3808 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3809 sm_id++) { 3810 fs = fwd_streams[sm_id]; 3811 port = &ports[fs->rx_port]; 3812 dev_info = &port->dev_info; 3813 rxq_conf = &port->rx_conf[fs->rx_queue]; 3814 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3815 == 0 || rxq_conf->share_group == 0) 3816 /* Not shared rxq. */ 3817 continue; 3818 if (domain_id != port->dev_info.switch_info.domain_id) 3819 continue; 3820 if (rxq_conf->share_group != share_group) 3821 continue; 3822 if (rxq_conf->share_qid != share_rxq) 3823 continue; 3824 printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", 3825 share_group, share_rxq); 3826 printf(" lcore %hhu Port %hu queue %hu\n", 3827 src_lc, src_port, src_rxq); 3828 printf(" lcore %hhu Port %hu queue %hu\n", 3829 lc_id, fs->rx_port, fs->rx_queue); 3830 printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", 3831 nb_rxq); 3832 return true; 3833 } 3834 } 3835 return false; 3836 } 3837 3838 /* 3839 * Check shared rxq configuration. 3840 * 3841 * Shared group must not being scheduled on different core. 3842 */ 3843 bool 3844 pkt_fwd_shared_rxq_check(void) 3845 { 3846 streamid_t sm_id; 3847 streamid_t nb_fs_per_lcore; 3848 lcoreid_t nb_fc; 3849 lcoreid_t lc_id; 3850 struct fwd_stream *fs; 3851 uint16_t domain_id; 3852 struct rte_port *port; 3853 struct rte_eth_dev_info *dev_info; 3854 struct rte_eth_rxconf *rxq_conf; 3855 3856 if (rxq_share == 0) 3857 return true; 3858 nb_fc = cur_fwd_config.nb_fwd_lcores; 3859 /* 3860 * Check streams on each core, make sure the same switch domain + 3861 * group + queue doesn't get scheduled on other cores. 3862 */ 3863 for (lc_id = 0; lc_id < nb_fc; lc_id++) { 3864 sm_id = fwd_lcores[lc_id]->stream_idx; 3865 nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; 3866 for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; 3867 sm_id++) { 3868 fs = fwd_streams[sm_id]; 3869 /* Update lcore info stream being scheduled. */ 3870 fs->lcore = fwd_lcores[lc_id]; 3871 port = &ports[fs->rx_port]; 3872 dev_info = &port->dev_info; 3873 rxq_conf = &port->rx_conf[fs->rx_queue]; 3874 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) 3875 == 0 || rxq_conf->share_group == 0) 3876 /* Not shared rxq. */ 3877 continue; 3878 /* Check shared rxq not scheduled on remaining cores. */ 3879 domain_id = port->dev_info.switch_info.domain_id; 3880 if (fwd_stream_on_other_lcores(domain_id, lc_id, 3881 fs->rx_port, 3882 fs->rx_queue, 3883 rxq_conf->share_group, 3884 rxq_conf->share_qid)) 3885 return false; 3886 } 3887 } 3888 return true; 3889 } 3890 3891 /* 3892 * Setup forwarding configuration for each logical core. 3893 */ 3894 static void 3895 setup_fwd_config_of_each_lcore(struct fwd_config *cfg) 3896 { 3897 streamid_t nb_fs_per_lcore; 3898 streamid_t nb_fs; 3899 streamid_t sm_id; 3900 lcoreid_t nb_extra; 3901 lcoreid_t nb_fc; 3902 lcoreid_t nb_lc; 3903 lcoreid_t lc_id; 3904 3905 nb_fs = cfg->nb_fwd_streams; 3906 nb_fc = cfg->nb_fwd_lcores; 3907 if (nb_fs <= nb_fc) { 3908 nb_fs_per_lcore = 1; 3909 nb_extra = 0; 3910 } else { 3911 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); 3912 nb_extra = (lcoreid_t) (nb_fs % nb_fc); 3913 } 3914 3915 nb_lc = (lcoreid_t) (nb_fc - nb_extra); 3916 sm_id = 0; 3917 for (lc_id = 0; lc_id < nb_lc; lc_id++) { 3918 fwd_lcores[lc_id]->stream_idx = sm_id; 3919 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; 3920 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3921 } 3922 3923 /* 3924 * Assign extra remaining streams, if any. 3925 */ 3926 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); 3927 for (lc_id = 0; lc_id < nb_extra; lc_id++) { 3928 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; 3929 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; 3930 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); 3931 } 3932 } 3933 3934 static portid_t 3935 fwd_topology_tx_port_get(portid_t rxp) 3936 { 3937 static int warning_once = 1; 3938 3939 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); 3940 3941 switch (port_topology) { 3942 default: 3943 case PORT_TOPOLOGY_PAIRED: 3944 if ((rxp & 0x1) == 0) { 3945 if (rxp + 1 < cur_fwd_config.nb_fwd_ports) 3946 return rxp + 1; 3947 if (warning_once) { 3948 fprintf(stderr, 3949 "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); 3950 warning_once = 0; 3951 } 3952 return rxp; 3953 } 3954 return rxp - 1; 3955 case PORT_TOPOLOGY_CHAINED: 3956 return (rxp + 1) % cur_fwd_config.nb_fwd_ports; 3957 case PORT_TOPOLOGY_LOOP: 3958 return rxp; 3959 } 3960 } 3961 3962 static void 3963 simple_fwd_config_setup(void) 3964 { 3965 portid_t i; 3966 3967 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; 3968 cur_fwd_config.nb_fwd_streams = 3969 (streamid_t) cur_fwd_config.nb_fwd_ports; 3970 3971 /* reinitialize forwarding streams */ 3972 init_fwd_streams(); 3973 3974 /* 3975 * In the simple forwarding test, the number of forwarding cores 3976 * must be lower or equal to the number of forwarding ports. 3977 */ 3978 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 3979 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) 3980 cur_fwd_config.nb_fwd_lcores = 3981 (lcoreid_t) cur_fwd_config.nb_fwd_ports; 3982 setup_fwd_config_of_each_lcore(&cur_fwd_config); 3983 3984 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { 3985 fwd_streams[i]->rx_port = fwd_ports_ids[i]; 3986 fwd_streams[i]->rx_queue = 0; 3987 fwd_streams[i]->tx_port = 3988 fwd_ports_ids[fwd_topology_tx_port_get(i)]; 3989 fwd_streams[i]->tx_queue = 0; 3990 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; 3991 fwd_streams[i]->retry_enabled = retry_enabled; 3992 } 3993 } 3994 3995 /** 3996 * For the RSS forwarding test all streams distributed over lcores. Each stream 3997 * being composed of a RX queue to poll on a RX port for input messages, 3998 * associated with a TX queue of a TX port where to send forwarded packets. 3999 */ 4000 static void 4001 rss_fwd_config_setup(void) 4002 { 4003 portid_t rxp; 4004 portid_t txp; 4005 queueid_t rxq; 4006 queueid_t nb_q; 4007 streamid_t sm_id; 4008 int start; 4009 int end; 4010 4011 nb_q = nb_rxq; 4012 if (nb_q > nb_txq) 4013 nb_q = nb_txq; 4014 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4015 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4016 cur_fwd_config.nb_fwd_streams = 4017 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); 4018 4019 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4020 cur_fwd_config.nb_fwd_lcores = 4021 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4022 4023 /* reinitialize forwarding streams */ 4024 init_fwd_streams(); 4025 4026 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4027 4028 if (proc_id > 0 && nb_q % num_procs != 0) 4029 printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); 4030 4031 /** 4032 * In multi-process, All queues are allocated to different 4033 * processes based on num_procs and proc_id. For example: 4034 * if supports 4 queues(nb_q), 2 processes(num_procs), 4035 * the 0~1 queue for primary process. 4036 * the 2~3 queue for secondary process. 4037 */ 4038 start = proc_id * nb_q / num_procs; 4039 end = start + nb_q / num_procs; 4040 rxp = 0; 4041 rxq = start; 4042 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { 4043 struct fwd_stream *fs; 4044 4045 fs = fwd_streams[sm_id]; 4046 txp = fwd_topology_tx_port_get(rxp); 4047 fs->rx_port = fwd_ports_ids[rxp]; 4048 fs->rx_queue = rxq; 4049 fs->tx_port = fwd_ports_ids[txp]; 4050 fs->tx_queue = rxq; 4051 fs->peer_addr = fs->tx_port; 4052 fs->retry_enabled = retry_enabled; 4053 rxp++; 4054 if (rxp < nb_fwd_ports) 4055 continue; 4056 rxp = 0; 4057 rxq++; 4058 if (rxq >= end) 4059 rxq = start; 4060 } 4061 } 4062 4063 static uint16_t 4064 get_fwd_port_total_tc_num(void) 4065 { 4066 struct rte_eth_dcb_info dcb_info; 4067 uint16_t total_tc_num = 0; 4068 unsigned int i; 4069 4070 for (i = 0; i < nb_fwd_ports; i++) { 4071 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); 4072 total_tc_num += dcb_info.nb_tcs; 4073 } 4074 4075 return total_tc_num; 4076 } 4077 4078 /** 4079 * For the DCB forwarding test, each core is assigned on each traffic class. 4080 * 4081 * Each core is assigned a multi-stream, each stream being composed of 4082 * a RX queue to poll on a RX port for input messages, associated with 4083 * a TX queue of a TX port where to send forwarded packets. All RX and 4084 * TX queues are mapping to the same traffic class. 4085 * If VMDQ and DCB co-exist, each traffic class on different POOLs share 4086 * the same core 4087 */ 4088 static void 4089 dcb_fwd_config_setup(void) 4090 { 4091 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; 4092 portid_t txp, rxp = 0; 4093 queueid_t txq, rxq = 0; 4094 lcoreid_t lc_id; 4095 uint16_t nb_rx_queue, nb_tx_queue; 4096 uint16_t i, j, k, sm_id = 0; 4097 uint16_t total_tc_num; 4098 struct rte_port *port; 4099 uint8_t tc = 0; 4100 portid_t pid; 4101 int ret; 4102 4103 /* 4104 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED 4105 * or RTE_PORT_STOPPED. 4106 * 4107 * Re-configure ports to get updated mapping between tc and queue in 4108 * case the queue number of the port is changed. Skip for started ports 4109 * since modifying queue number and calling dev_configure need to stop 4110 * ports first. 4111 */ 4112 for (pid = 0; pid < nb_fwd_ports; pid++) { 4113 if (port_is_started(pid) == 1) 4114 continue; 4115 4116 port = &ports[pid]; 4117 ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, 4118 &port->dev_conf); 4119 if (ret < 0) { 4120 fprintf(stderr, 4121 "Failed to re-configure port %d, ret = %d.\n", 4122 pid, ret); 4123 return; 4124 } 4125 } 4126 4127 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4128 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4129 cur_fwd_config.nb_fwd_streams = 4130 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4131 total_tc_num = get_fwd_port_total_tc_num(); 4132 if (cur_fwd_config.nb_fwd_lcores > total_tc_num) 4133 cur_fwd_config.nb_fwd_lcores = total_tc_num; 4134 4135 /* reinitialize forwarding streams */ 4136 init_fwd_streams(); 4137 sm_id = 0; 4138 txp = 1; 4139 /* get the dcb info on the first RX and TX ports */ 4140 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4141 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4142 4143 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4144 fwd_lcores[lc_id]->stream_nb = 0; 4145 fwd_lcores[lc_id]->stream_idx = sm_id; 4146 for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { 4147 /* if the nb_queue is zero, means this tc is 4148 * not enabled on the POOL 4149 */ 4150 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) 4151 break; 4152 k = fwd_lcores[lc_id]->stream_nb + 4153 fwd_lcores[lc_id]->stream_idx; 4154 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; 4155 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; 4156 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4157 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; 4158 for (j = 0; j < nb_rx_queue; j++) { 4159 struct fwd_stream *fs; 4160 4161 fs = fwd_streams[k + j]; 4162 fs->rx_port = fwd_ports_ids[rxp]; 4163 fs->rx_queue = rxq + j; 4164 fs->tx_port = fwd_ports_ids[txp]; 4165 fs->tx_queue = txq + j % nb_tx_queue; 4166 fs->peer_addr = fs->tx_port; 4167 fs->retry_enabled = retry_enabled; 4168 } 4169 fwd_lcores[lc_id]->stream_nb += 4170 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; 4171 } 4172 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); 4173 4174 tc++; 4175 if (tc < rxp_dcb_info.nb_tcs) 4176 continue; 4177 /* Restart from TC 0 on next RX port */ 4178 tc = 0; 4179 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) 4180 rxp = (portid_t) 4181 (rxp + ((nb_ports >> 1) / nb_fwd_ports)); 4182 else 4183 rxp++; 4184 if (rxp >= nb_fwd_ports) 4185 return; 4186 /* get the dcb information on next RX and TX ports */ 4187 if ((rxp & 0x1) == 0) 4188 txp = (portid_t) (rxp + 1); 4189 else 4190 txp = (portid_t) (rxp - 1); 4191 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); 4192 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); 4193 } 4194 } 4195 4196 static void 4197 icmp_echo_config_setup(void) 4198 { 4199 portid_t rxp; 4200 queueid_t rxq; 4201 lcoreid_t lc_id; 4202 uint16_t sm_id; 4203 4204 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) 4205 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) 4206 (nb_txq * nb_fwd_ports); 4207 else 4208 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; 4209 cur_fwd_config.nb_fwd_ports = nb_fwd_ports; 4210 cur_fwd_config.nb_fwd_streams = 4211 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); 4212 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) 4213 cur_fwd_config.nb_fwd_lcores = 4214 (lcoreid_t)cur_fwd_config.nb_fwd_streams; 4215 if (verbose_level > 0) { 4216 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", 4217 __FUNCTION__, 4218 cur_fwd_config.nb_fwd_lcores, 4219 cur_fwd_config.nb_fwd_ports, 4220 cur_fwd_config.nb_fwd_streams); 4221 } 4222 4223 /* reinitialize forwarding streams */ 4224 init_fwd_streams(); 4225 setup_fwd_config_of_each_lcore(&cur_fwd_config); 4226 rxp = 0; rxq = 0; 4227 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { 4228 if (verbose_level > 0) 4229 printf(" core=%d: \n", lc_id); 4230 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 4231 struct fwd_stream *fs; 4232 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 4233 fs->rx_port = fwd_ports_ids[rxp]; 4234 fs->rx_queue = rxq; 4235 fs->tx_port = fs->rx_port; 4236 fs->tx_queue = rxq; 4237 fs->peer_addr = fs->tx_port; 4238 fs->retry_enabled = retry_enabled; 4239 if (verbose_level > 0) 4240 printf(" stream=%d port=%d rxq=%d txq=%d\n", 4241 sm_id, fs->rx_port, fs->rx_queue, 4242 fs->tx_queue); 4243 rxq = (queueid_t) (rxq + 1); 4244 if (rxq == nb_rxq) { 4245 rxq = 0; 4246 rxp = (portid_t) (rxp + 1); 4247 } 4248 } 4249 } 4250 } 4251 4252 void 4253 fwd_config_setup(void) 4254 { 4255 struct rte_port *port; 4256 portid_t pt_id; 4257 unsigned int i; 4258 4259 cur_fwd_config.fwd_eng = cur_fwd_eng; 4260 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { 4261 icmp_echo_config_setup(); 4262 return; 4263 } 4264 4265 if ((nb_rxq > 1) && (nb_txq > 1)){ 4266 if (dcb_config) { 4267 for (i = 0; i < nb_fwd_ports; i++) { 4268 pt_id = fwd_ports_ids[i]; 4269 port = &ports[pt_id]; 4270 if (!port->dcb_flag) { 4271 fprintf(stderr, 4272 "In DCB mode, all forwarding ports must be configured in this mode.\n"); 4273 return; 4274 } 4275 } 4276 if (nb_fwd_lcores == 1) { 4277 fprintf(stderr, 4278 "In DCB mode,the nb forwarding cores should be larger than 1.\n"); 4279 return; 4280 } 4281 4282 dcb_fwd_config_setup(); 4283 } else 4284 rss_fwd_config_setup(); 4285 } 4286 else 4287 simple_fwd_config_setup(); 4288 } 4289 4290 static const char * 4291 mp_alloc_to_str(uint8_t mode) 4292 { 4293 switch (mode) { 4294 case MP_ALLOC_NATIVE: 4295 return "native"; 4296 case MP_ALLOC_ANON: 4297 return "anon"; 4298 case MP_ALLOC_XMEM: 4299 return "xmem"; 4300 case MP_ALLOC_XMEM_HUGE: 4301 return "xmemhuge"; 4302 case MP_ALLOC_XBUF: 4303 return "xbuf"; 4304 default: 4305 return "invalid"; 4306 } 4307 } 4308 4309 void 4310 pkt_fwd_config_display(struct fwd_config *cfg) 4311 { 4312 struct fwd_stream *fs; 4313 lcoreid_t lc_id; 4314 streamid_t sm_id; 4315 4316 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " 4317 "NUMA support %s, MP allocation mode: %s\n", 4318 cfg->fwd_eng->fwd_mode_name, 4319 retry_enabled == 0 ? "" : " with retry", 4320 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, 4321 numa_support == 1 ? "enabled" : "disabled", 4322 mp_alloc_to_str(mp_alloc_type)); 4323 4324 if (retry_enabled) 4325 printf("TX retry num: %u, delay between TX retries: %uus\n", 4326 burst_tx_retry_num, burst_tx_delay_time); 4327 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { 4328 printf("Logical Core %u (socket %u) forwards packets on " 4329 "%d streams:", 4330 fwd_lcores_cpuids[lc_id], 4331 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 4332 fwd_lcores[lc_id]->stream_nb); 4333 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { 4334 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; 4335 printf("\n RX P=%d/Q=%d (socket %u) -> TX " 4336 "P=%d/Q=%d (socket %u) ", 4337 fs->rx_port, fs->rx_queue, 4338 ports[fs->rx_port].socket_id, 4339 fs->tx_port, fs->tx_queue, 4340 ports[fs->tx_port].socket_id); 4341 print_ethaddr("peer=", 4342 &peer_eth_addrs[fs->peer_addr]); 4343 } 4344 printf("\n"); 4345 } 4346 printf("\n"); 4347 } 4348 4349 void 4350 set_fwd_eth_peer(portid_t port_id, char *peer_addr) 4351 { 4352 struct rte_ether_addr new_peer_addr; 4353 if (!rte_eth_dev_is_valid_port(port_id)) { 4354 fprintf(stderr, "Error: Invalid port number %i\n", port_id); 4355 return; 4356 } 4357 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { 4358 fprintf(stderr, "Error: Invalid ethernet address: %s\n", 4359 peer_addr); 4360 return; 4361 } 4362 peer_eth_addrs[port_id] = new_peer_addr; 4363 } 4364 4365 int 4366 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) 4367 { 4368 unsigned int i; 4369 unsigned int lcore_cpuid; 4370 int record_now; 4371 4372 record_now = 0; 4373 again: 4374 for (i = 0; i < nb_lc; i++) { 4375 lcore_cpuid = lcorelist[i]; 4376 if (! rte_lcore_is_enabled(lcore_cpuid)) { 4377 fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); 4378 return -1; 4379 } 4380 if (lcore_cpuid == rte_get_main_lcore()) { 4381 fprintf(stderr, 4382 "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", 4383 lcore_cpuid); 4384 return -1; 4385 } 4386 if (record_now) 4387 fwd_lcores_cpuids[i] = lcore_cpuid; 4388 } 4389 if (record_now == 0) { 4390 record_now = 1; 4391 goto again; 4392 } 4393 nb_cfg_lcores = (lcoreid_t) nb_lc; 4394 if (nb_fwd_lcores != (lcoreid_t) nb_lc) { 4395 printf("previous number of forwarding cores %u - changed to " 4396 "number of configured cores %u\n", 4397 (unsigned int) nb_fwd_lcores, nb_lc); 4398 nb_fwd_lcores = (lcoreid_t) nb_lc; 4399 } 4400 4401 return 0; 4402 } 4403 4404 int 4405 set_fwd_lcores_mask(uint64_t lcoremask) 4406 { 4407 unsigned int lcorelist[64]; 4408 unsigned int nb_lc; 4409 unsigned int i; 4410 4411 if (lcoremask == 0) { 4412 fprintf(stderr, "Invalid NULL mask of cores\n"); 4413 return -1; 4414 } 4415 nb_lc = 0; 4416 for (i = 0; i < 64; i++) { 4417 if (! ((uint64_t)(1ULL << i) & lcoremask)) 4418 continue; 4419 lcorelist[nb_lc++] = i; 4420 } 4421 return set_fwd_lcores_list(lcorelist, nb_lc); 4422 } 4423 4424 void 4425 set_fwd_lcores_number(uint16_t nb_lc) 4426 { 4427 if (test_done == 0) { 4428 fprintf(stderr, "Please stop forwarding first\n"); 4429 return; 4430 } 4431 if (nb_lc > nb_cfg_lcores) { 4432 fprintf(stderr, 4433 "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", 4434 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); 4435 return; 4436 } 4437 nb_fwd_lcores = (lcoreid_t) nb_lc; 4438 printf("Number of forwarding cores set to %u\n", 4439 (unsigned int) nb_fwd_lcores); 4440 } 4441 4442 void 4443 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) 4444 { 4445 unsigned int i; 4446 portid_t port_id; 4447 int record_now; 4448 4449 record_now = 0; 4450 again: 4451 for (i = 0; i < nb_pt; i++) { 4452 port_id = (portid_t) portlist[i]; 4453 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4454 return; 4455 if (record_now) 4456 fwd_ports_ids[i] = port_id; 4457 } 4458 if (record_now == 0) { 4459 record_now = 1; 4460 goto again; 4461 } 4462 nb_cfg_ports = (portid_t) nb_pt; 4463 if (nb_fwd_ports != (portid_t) nb_pt) { 4464 printf("previous number of forwarding ports %u - changed to " 4465 "number of configured ports %u\n", 4466 (unsigned int) nb_fwd_ports, nb_pt); 4467 nb_fwd_ports = (portid_t) nb_pt; 4468 } 4469 } 4470 4471 /** 4472 * Parse the user input and obtain the list of forwarding ports 4473 * 4474 * @param[in] list 4475 * String containing the user input. User can specify 4476 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. 4477 * For example, if the user wants to use all the available 4478 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. 4479 * If the user wants to use only the ports 1,2 then the input 4480 * is 1,2. 4481 * valid characters are '-' and ',' 4482 * @param[out] values 4483 * This array will be filled with a list of port IDs 4484 * based on the user input 4485 * Note that duplicate entries are discarded and only the first 4486 * count entries in this array are port IDs and all the rest 4487 * will contain default values 4488 * @param[in] maxsize 4489 * This parameter denotes 2 things 4490 * 1) Number of elements in the values array 4491 * 2) Maximum value of each element in the values array 4492 * @return 4493 * On success, returns total count of parsed port IDs 4494 * On failure, returns 0 4495 */ 4496 static unsigned int 4497 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) 4498 { 4499 unsigned int count = 0; 4500 char *end = NULL; 4501 int min, max; 4502 int value, i; 4503 unsigned int marked[maxsize]; 4504 4505 if (list == NULL || values == NULL) 4506 return 0; 4507 4508 for (i = 0; i < (int)maxsize; i++) 4509 marked[i] = 0; 4510 4511 min = INT_MAX; 4512 4513 do { 4514 /*Remove the blank spaces if any*/ 4515 while (isblank(*list)) 4516 list++; 4517 if (*list == '\0') 4518 break; 4519 errno = 0; 4520 value = strtol(list, &end, 10); 4521 if (errno || end == NULL) 4522 return 0; 4523 if (value < 0 || value >= (int)maxsize) 4524 return 0; 4525 while (isblank(*end)) 4526 end++; 4527 if (*end == '-' && min == INT_MAX) { 4528 min = value; 4529 } else if ((*end == ',') || (*end == '\0')) { 4530 max = value; 4531 if (min == INT_MAX) 4532 min = value; 4533 for (i = min; i <= max; i++) { 4534 if (count < maxsize) { 4535 if (marked[i]) 4536 continue; 4537 values[count] = i; 4538 marked[i] = 1; 4539 count++; 4540 } 4541 } 4542 min = INT_MAX; 4543 } else 4544 return 0; 4545 list = end + 1; 4546 } while (*end != '\0'); 4547 4548 return count; 4549 } 4550 4551 void 4552 parse_fwd_portlist(const char *portlist) 4553 { 4554 unsigned int portcount; 4555 unsigned int portindex[RTE_MAX_ETHPORTS]; 4556 unsigned int i, valid_port_count = 0; 4557 4558 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); 4559 if (!portcount) 4560 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); 4561 4562 /* 4563 * Here we verify the validity of the ports 4564 * and thereby calculate the total number of 4565 * valid ports 4566 */ 4567 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { 4568 if (rte_eth_dev_is_valid_port(portindex[i])) { 4569 portindex[valid_port_count] = portindex[i]; 4570 valid_port_count++; 4571 } 4572 } 4573 4574 set_fwd_ports_list(portindex, valid_port_count); 4575 } 4576 4577 void 4578 set_fwd_ports_mask(uint64_t portmask) 4579 { 4580 unsigned int portlist[64]; 4581 unsigned int nb_pt; 4582 unsigned int i; 4583 4584 if (portmask == 0) { 4585 fprintf(stderr, "Invalid NULL mask of ports\n"); 4586 return; 4587 } 4588 nb_pt = 0; 4589 RTE_ETH_FOREACH_DEV(i) { 4590 if (! ((uint64_t)(1ULL << i) & portmask)) 4591 continue; 4592 portlist[nb_pt++] = i; 4593 } 4594 set_fwd_ports_list(portlist, nb_pt); 4595 } 4596 4597 void 4598 set_fwd_ports_number(uint16_t nb_pt) 4599 { 4600 if (nb_pt > nb_cfg_ports) { 4601 fprintf(stderr, 4602 "nb fwd ports %u > %u (number of configured ports) - ignored\n", 4603 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); 4604 return; 4605 } 4606 nb_fwd_ports = (portid_t) nb_pt; 4607 printf("Number of forwarding ports set to %u\n", 4608 (unsigned int) nb_fwd_ports); 4609 } 4610 4611 int 4612 port_is_forwarding(portid_t port_id) 4613 { 4614 unsigned int i; 4615 4616 if (port_id_is_invalid(port_id, ENABLED_WARN)) 4617 return -1; 4618 4619 for (i = 0; i < nb_fwd_ports; i++) { 4620 if (fwd_ports_ids[i] == port_id) 4621 return 1; 4622 } 4623 4624 return 0; 4625 } 4626 4627 void 4628 set_nb_pkt_per_burst(uint16_t nb) 4629 { 4630 if (nb > MAX_PKT_BURST) { 4631 fprintf(stderr, 4632 "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", 4633 (unsigned int) nb, (unsigned int) MAX_PKT_BURST); 4634 return; 4635 } 4636 nb_pkt_per_burst = nb; 4637 printf("Number of packets per burst set to %u\n", 4638 (unsigned int) nb_pkt_per_burst); 4639 } 4640 4641 static const char * 4642 tx_split_get_name(enum tx_pkt_split split) 4643 { 4644 uint32_t i; 4645 4646 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 4647 if (tx_split_name[i].split == split) 4648 return tx_split_name[i].name; 4649 } 4650 return NULL; 4651 } 4652 4653 void 4654 set_tx_pkt_split(const char *name) 4655 { 4656 uint32_t i; 4657 4658 for (i = 0; i != RTE_DIM(tx_split_name); i++) { 4659 if (strcmp(tx_split_name[i].name, name) == 0) { 4660 tx_pkt_split = tx_split_name[i].split; 4661 return; 4662 } 4663 } 4664 fprintf(stderr, "unknown value: \"%s\"\n", name); 4665 } 4666 4667 int 4668 parse_fec_mode(const char *name, uint32_t *fec_capa) 4669 { 4670 uint8_t i; 4671 4672 for (i = 0; i < RTE_DIM(fec_mode_name); i++) { 4673 if (strcmp(fec_mode_name[i].name, name) == 0) { 4674 *fec_capa = 4675 RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); 4676 return 0; 4677 } 4678 } 4679 return -1; 4680 } 4681 4682 void 4683 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) 4684 { 4685 unsigned int i, j; 4686 4687 printf("FEC capabilities:\n"); 4688 4689 for (i = 0; i < num; i++) { 4690 printf("%s : ", 4691 rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); 4692 4693 for (j = 0; j < RTE_DIM(fec_mode_name); j++) { 4694 if (RTE_ETH_FEC_MODE_TO_CAPA(j) & 4695 speed_fec_capa[i].capa) 4696 printf("%s ", fec_mode_name[j].name); 4697 } 4698 printf("\n"); 4699 } 4700 } 4701 4702 void 4703 show_rx_pkt_offsets(void) 4704 { 4705 uint32_t i, n; 4706 4707 n = rx_pkt_nb_offs; 4708 printf("Number of offsets: %u\n", n); 4709 if (n) { 4710 printf("Segment offsets: "); 4711 for (i = 0; i != n - 1; i++) 4712 printf("%hu,", rx_pkt_seg_offsets[i]); 4713 printf("%hu\n", rx_pkt_seg_lengths[i]); 4714 } 4715 } 4716 4717 void 4718 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) 4719 { 4720 unsigned int i; 4721 4722 if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { 4723 printf("nb segments per RX packets=%u >= " 4724 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); 4725 return; 4726 } 4727 4728 /* 4729 * No extra check here, the segment length will be checked by PMD 4730 * in the extended queue setup. 4731 */ 4732 for (i = 0; i < nb_offs; i++) { 4733 if (seg_offsets[i] >= UINT16_MAX) { 4734 printf("offset[%u]=%u > UINT16_MAX - give up\n", 4735 i, seg_offsets[i]); 4736 return; 4737 } 4738 } 4739 4740 for (i = 0; i < nb_offs; i++) 4741 rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; 4742 4743 rx_pkt_nb_offs = (uint8_t) nb_offs; 4744 } 4745 4746 void 4747 show_rx_pkt_segments(void) 4748 { 4749 uint32_t i, n; 4750 4751 n = rx_pkt_nb_segs; 4752 printf("Number of segments: %u\n", n); 4753 if (n) { 4754 printf("Segment sizes: "); 4755 for (i = 0; i != n - 1; i++) 4756 printf("%hu,", rx_pkt_seg_lengths[i]); 4757 printf("%hu\n", rx_pkt_seg_lengths[i]); 4758 } 4759 } 4760 4761 void 4762 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 4763 { 4764 unsigned int i; 4765 4766 if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { 4767 printf("nb segments per RX packets=%u >= " 4768 "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); 4769 return; 4770 } 4771 4772 /* 4773 * No extra check here, the segment length will be checked by PMD 4774 * in the extended queue setup. 4775 */ 4776 for (i = 0; i < nb_segs; i++) { 4777 if (seg_lengths[i] >= UINT16_MAX) { 4778 printf("length[%u]=%u > UINT16_MAX - give up\n", 4779 i, seg_lengths[i]); 4780 return; 4781 } 4782 } 4783 4784 for (i = 0; i < nb_segs; i++) 4785 rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 4786 4787 rx_pkt_nb_segs = (uint8_t) nb_segs; 4788 } 4789 4790 void 4791 show_tx_pkt_segments(void) 4792 { 4793 uint32_t i, n; 4794 const char *split; 4795 4796 n = tx_pkt_nb_segs; 4797 split = tx_split_get_name(tx_pkt_split); 4798 4799 printf("Number of segments: %u\n", n); 4800 printf("Segment sizes: "); 4801 for (i = 0; i != n - 1; i++) 4802 printf("%hu,", tx_pkt_seg_lengths[i]); 4803 printf("%hu\n", tx_pkt_seg_lengths[i]); 4804 printf("Split packet: %s\n", split); 4805 } 4806 4807 static bool 4808 nb_segs_is_invalid(unsigned int nb_segs) 4809 { 4810 uint16_t ring_size; 4811 uint16_t queue_id; 4812 uint16_t port_id; 4813 int ret; 4814 4815 RTE_ETH_FOREACH_DEV(port_id) { 4816 for (queue_id = 0; queue_id < nb_txq; queue_id++) { 4817 ret = get_tx_ring_size(port_id, queue_id, &ring_size); 4818 if (ret) { 4819 /* Port may not be initialized yet, can't say 4820 * the port is invalid in this stage. 4821 */ 4822 continue; 4823 } 4824 if (ring_size < nb_segs) { 4825 printf("nb segments per TX packets=%u >= TX " 4826 "queue(%u) ring_size=%u - txpkts ignored\n", 4827 nb_segs, queue_id, ring_size); 4828 return true; 4829 } 4830 } 4831 } 4832 4833 return false; 4834 } 4835 4836 void 4837 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) 4838 { 4839 uint16_t tx_pkt_len; 4840 unsigned int i; 4841 4842 /* 4843 * For single segment settings failed check is ignored. 4844 * It is a very basic capability to send the single segment 4845 * packets, suppose it is always supported. 4846 */ 4847 if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { 4848 fprintf(stderr, 4849 "Tx segment size(%u) is not supported - txpkts ignored\n", 4850 nb_segs); 4851 return; 4852 } 4853 4854 if (nb_segs > RTE_MAX_SEGS_PER_PKT) { 4855 fprintf(stderr, 4856 "Tx segment size(%u) is bigger than max number of segment(%u)\n", 4857 nb_segs, RTE_MAX_SEGS_PER_PKT); 4858 return; 4859 } 4860 4861 /* 4862 * Check that each segment length is greater or equal than 4863 * the mbuf data size. 4864 * Check also that the total packet length is greater or equal than the 4865 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + 4866 * 20 + 8). 4867 */ 4868 tx_pkt_len = 0; 4869 for (i = 0; i < nb_segs; i++) { 4870 if (seg_lengths[i] > mbuf_data_size[0]) { 4871 fprintf(stderr, 4872 "length[%u]=%u > mbuf_data_size=%u - give up\n", 4873 i, seg_lengths[i], mbuf_data_size[0]); 4874 return; 4875 } 4876 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); 4877 } 4878 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { 4879 fprintf(stderr, "total packet length=%u < %d - give up\n", 4880 (unsigned) tx_pkt_len, 4881 (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); 4882 return; 4883 } 4884 4885 for (i = 0; i < nb_segs; i++) 4886 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; 4887 4888 tx_pkt_length = tx_pkt_len; 4889 tx_pkt_nb_segs = (uint8_t) nb_segs; 4890 } 4891 4892 void 4893 show_tx_pkt_times(void) 4894 { 4895 printf("Interburst gap: %u\n", tx_pkt_times_inter); 4896 printf("Intraburst gap: %u\n", tx_pkt_times_intra); 4897 } 4898 4899 void 4900 set_tx_pkt_times(unsigned int *tx_times) 4901 { 4902 tx_pkt_times_inter = tx_times[0]; 4903 tx_pkt_times_intra = tx_times[1]; 4904 } 4905 4906 #ifdef RTE_LIB_GRO 4907 void 4908 setup_gro(const char *onoff, portid_t port_id) 4909 { 4910 if (!rte_eth_dev_is_valid_port(port_id)) { 4911 fprintf(stderr, "invalid port id %u\n", port_id); 4912 return; 4913 } 4914 if (test_done == 0) { 4915 fprintf(stderr, 4916 "Before enable/disable GRO, please stop forwarding first\n"); 4917 return; 4918 } 4919 if (strcmp(onoff, "on") == 0) { 4920 if (gro_ports[port_id].enable != 0) { 4921 fprintf(stderr, 4922 "Port %u has enabled GRO. Please disable GRO first\n", 4923 port_id); 4924 return; 4925 } 4926 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4927 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; 4928 gro_ports[port_id].param.max_flow_num = 4929 GRO_DEFAULT_FLOW_NUM; 4930 gro_ports[port_id].param.max_item_per_flow = 4931 GRO_DEFAULT_ITEM_NUM_PER_FLOW; 4932 } 4933 gro_ports[port_id].enable = 1; 4934 } else { 4935 if (gro_ports[port_id].enable == 0) { 4936 fprintf(stderr, "Port %u has disabled GRO\n", port_id); 4937 return; 4938 } 4939 gro_ports[port_id].enable = 0; 4940 } 4941 } 4942 4943 void 4944 setup_gro_flush_cycles(uint8_t cycles) 4945 { 4946 if (test_done == 0) { 4947 fprintf(stderr, 4948 "Before change flush interval for GRO, please stop forwarding first.\n"); 4949 return; 4950 } 4951 4952 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < 4953 GRO_DEFAULT_FLUSH_CYCLES) { 4954 fprintf(stderr, 4955 "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", 4956 GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); 4957 cycles = GRO_DEFAULT_FLUSH_CYCLES; 4958 } 4959 4960 gro_flush_cycles = cycles; 4961 } 4962 4963 void 4964 show_gro(portid_t port_id) 4965 { 4966 struct rte_gro_param *param; 4967 uint32_t max_pkts_num; 4968 4969 param = &gro_ports[port_id].param; 4970 4971 if (!rte_eth_dev_is_valid_port(port_id)) { 4972 fprintf(stderr, "Invalid port id %u.\n", port_id); 4973 return; 4974 } 4975 if (gro_ports[port_id].enable) { 4976 printf("GRO type: TCP/IPv4\n"); 4977 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { 4978 max_pkts_num = param->max_flow_num * 4979 param->max_item_per_flow; 4980 } else 4981 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; 4982 printf("Max number of packets to perform GRO: %u\n", 4983 max_pkts_num); 4984 printf("Flushing cycles: %u\n", gro_flush_cycles); 4985 } else 4986 printf("Port %u doesn't enable GRO.\n", port_id); 4987 } 4988 #endif /* RTE_LIB_GRO */ 4989 4990 #ifdef RTE_LIB_GSO 4991 void 4992 setup_gso(const char *mode, portid_t port_id) 4993 { 4994 if (!rte_eth_dev_is_valid_port(port_id)) { 4995 fprintf(stderr, "invalid port id %u\n", port_id); 4996 return; 4997 } 4998 if (strcmp(mode, "on") == 0) { 4999 if (test_done == 0) { 5000 fprintf(stderr, 5001 "before enabling GSO, please stop forwarding first\n"); 5002 return; 5003 } 5004 gso_ports[port_id].enable = 1; 5005 } else if (strcmp(mode, "off") == 0) { 5006 if (test_done == 0) { 5007 fprintf(stderr, 5008 "before disabling GSO, please stop forwarding first\n"); 5009 return; 5010 } 5011 gso_ports[port_id].enable = 0; 5012 } 5013 } 5014 #endif /* RTE_LIB_GSO */ 5015 5016 char* 5017 list_pkt_forwarding_modes(void) 5018 { 5019 static char fwd_modes[128] = ""; 5020 const char *separator = "|"; 5021 struct fwd_engine *fwd_eng; 5022 unsigned i = 0; 5023 5024 if (strlen (fwd_modes) == 0) { 5025 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5026 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5027 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5028 strncat(fwd_modes, separator, 5029 sizeof(fwd_modes) - strlen(fwd_modes) - 1); 5030 } 5031 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5032 } 5033 5034 return fwd_modes; 5035 } 5036 5037 char* 5038 list_pkt_forwarding_retry_modes(void) 5039 { 5040 static char fwd_modes[128] = ""; 5041 const char *separator = "|"; 5042 struct fwd_engine *fwd_eng; 5043 unsigned i = 0; 5044 5045 if (strlen(fwd_modes) == 0) { 5046 while ((fwd_eng = fwd_engines[i++]) != NULL) { 5047 if (fwd_eng == &rx_only_engine) 5048 continue; 5049 strncat(fwd_modes, fwd_eng->fwd_mode_name, 5050 sizeof(fwd_modes) - 5051 strlen(fwd_modes) - 1); 5052 strncat(fwd_modes, separator, 5053 sizeof(fwd_modes) - 5054 strlen(fwd_modes) - 1); 5055 } 5056 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; 5057 } 5058 5059 return fwd_modes; 5060 } 5061 5062 void 5063 set_pkt_forwarding_mode(const char *fwd_mode_name) 5064 { 5065 struct fwd_engine *fwd_eng; 5066 unsigned i; 5067 5068 i = 0; 5069 while ((fwd_eng = fwd_engines[i]) != NULL) { 5070 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { 5071 printf("Set %s packet forwarding mode%s\n", 5072 fwd_mode_name, 5073 retry_enabled == 0 ? "" : " with retry"); 5074 cur_fwd_eng = fwd_eng; 5075 return; 5076 } 5077 i++; 5078 } 5079 fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); 5080 } 5081 5082 void 5083 add_rx_dump_callbacks(portid_t portid) 5084 { 5085 struct rte_eth_dev_info dev_info; 5086 uint16_t queue; 5087 int ret; 5088 5089 if (port_id_is_invalid(portid, ENABLED_WARN)) 5090 return; 5091 5092 ret = eth_dev_info_get_print_err(portid, &dev_info); 5093 if (ret != 0) 5094 return; 5095 5096 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5097 if (!ports[portid].rx_dump_cb[queue]) 5098 ports[portid].rx_dump_cb[queue] = 5099 rte_eth_add_rx_callback(portid, queue, 5100 dump_rx_pkts, NULL); 5101 } 5102 5103 void 5104 add_tx_dump_callbacks(portid_t portid) 5105 { 5106 struct rte_eth_dev_info dev_info; 5107 uint16_t queue; 5108 int ret; 5109 5110 if (port_id_is_invalid(portid, ENABLED_WARN)) 5111 return; 5112 5113 ret = eth_dev_info_get_print_err(portid, &dev_info); 5114 if (ret != 0) 5115 return; 5116 5117 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5118 if (!ports[portid].tx_dump_cb[queue]) 5119 ports[portid].tx_dump_cb[queue] = 5120 rte_eth_add_tx_callback(portid, queue, 5121 dump_tx_pkts, NULL); 5122 } 5123 5124 void 5125 remove_rx_dump_callbacks(portid_t portid) 5126 { 5127 struct rte_eth_dev_info dev_info; 5128 uint16_t queue; 5129 int ret; 5130 5131 if (port_id_is_invalid(portid, ENABLED_WARN)) 5132 return; 5133 5134 ret = eth_dev_info_get_print_err(portid, &dev_info); 5135 if (ret != 0) 5136 return; 5137 5138 for (queue = 0; queue < dev_info.nb_rx_queues; queue++) 5139 if (ports[portid].rx_dump_cb[queue]) { 5140 rte_eth_remove_rx_callback(portid, queue, 5141 ports[portid].rx_dump_cb[queue]); 5142 ports[portid].rx_dump_cb[queue] = NULL; 5143 } 5144 } 5145 5146 void 5147 remove_tx_dump_callbacks(portid_t portid) 5148 { 5149 struct rte_eth_dev_info dev_info; 5150 uint16_t queue; 5151 int ret; 5152 5153 if (port_id_is_invalid(portid, ENABLED_WARN)) 5154 return; 5155 5156 ret = eth_dev_info_get_print_err(portid, &dev_info); 5157 if (ret != 0) 5158 return; 5159 5160 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) 5161 if (ports[portid].tx_dump_cb[queue]) { 5162 rte_eth_remove_tx_callback(portid, queue, 5163 ports[portid].tx_dump_cb[queue]); 5164 ports[portid].tx_dump_cb[queue] = NULL; 5165 } 5166 } 5167 5168 void 5169 configure_rxtx_dump_callbacks(uint16_t verbose) 5170 { 5171 portid_t portid; 5172 5173 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5174 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); 5175 return; 5176 #endif 5177 5178 RTE_ETH_FOREACH_DEV(portid) 5179 { 5180 if (verbose == 1 || verbose > 2) 5181 add_rx_dump_callbacks(portid); 5182 else 5183 remove_rx_dump_callbacks(portid); 5184 if (verbose >= 2) 5185 add_tx_dump_callbacks(portid); 5186 else 5187 remove_tx_dump_callbacks(portid); 5188 } 5189 } 5190 5191 void 5192 set_verbose_level(uint16_t vb_level) 5193 { 5194 printf("Change verbose level from %u to %u\n", 5195 (unsigned int) verbose_level, (unsigned int) vb_level); 5196 verbose_level = vb_level; 5197 configure_rxtx_dump_callbacks(verbose_level); 5198 } 5199 5200 void 5201 vlan_extend_set(portid_t port_id, int on) 5202 { 5203 int diag; 5204 int vlan_offload; 5205 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5206 5207 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5208 return; 5209 5210 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5211 5212 if (on) { 5213 vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 5214 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 5215 } else { 5216 vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; 5217 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 5218 } 5219 5220 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5221 if (diag < 0) { 5222 fprintf(stderr, 5223 "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", 5224 port_id, on, diag); 5225 return; 5226 } 5227 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5228 } 5229 5230 void 5231 rx_vlan_strip_set(portid_t port_id, int on) 5232 { 5233 int diag; 5234 int vlan_offload; 5235 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5236 5237 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5238 return; 5239 5240 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5241 5242 if (on) { 5243 vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; 5244 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 5245 } else { 5246 vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; 5247 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 5248 } 5249 5250 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5251 if (diag < 0) { 5252 fprintf(stderr, 5253 "%s(port_pi=%d, on=%d) failed diag=%d\n", 5254 __func__, port_id, on, diag); 5255 return; 5256 } 5257 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5258 } 5259 5260 void 5261 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) 5262 { 5263 int diag; 5264 5265 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5266 return; 5267 5268 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); 5269 if (diag < 0) 5270 fprintf(stderr, 5271 "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", 5272 __func__, port_id, queue_id, on, diag); 5273 } 5274 5275 void 5276 rx_vlan_filter_set(portid_t port_id, int on) 5277 { 5278 int diag; 5279 int vlan_offload; 5280 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5281 5282 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5283 return; 5284 5285 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5286 5287 if (on) { 5288 vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; 5289 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 5290 } else { 5291 vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; 5292 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 5293 } 5294 5295 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5296 if (diag < 0) { 5297 fprintf(stderr, 5298 "%s(port_pi=%d, on=%d) failed diag=%d\n", 5299 __func__, port_id, on, diag); 5300 return; 5301 } 5302 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5303 } 5304 5305 void 5306 rx_vlan_qinq_strip_set(portid_t port_id, int on) 5307 { 5308 int diag; 5309 int vlan_offload; 5310 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; 5311 5312 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5313 return; 5314 5315 vlan_offload = rte_eth_dev_get_vlan_offload(port_id); 5316 5317 if (on) { 5318 vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; 5319 port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 5320 } else { 5321 vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; 5322 port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 5323 } 5324 5325 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); 5326 if (diag < 0) { 5327 fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", 5328 __func__, port_id, on, diag); 5329 return; 5330 } 5331 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; 5332 } 5333 5334 int 5335 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) 5336 { 5337 int diag; 5338 5339 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5340 return 1; 5341 if (vlan_id_is_invalid(vlan_id)) 5342 return 1; 5343 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); 5344 if (diag == 0) 5345 return 0; 5346 fprintf(stderr, 5347 "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", 5348 port_id, vlan_id, on, diag); 5349 return -1; 5350 } 5351 5352 void 5353 rx_vlan_all_filter_set(portid_t port_id, int on) 5354 { 5355 uint16_t vlan_id; 5356 5357 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5358 return; 5359 for (vlan_id = 0; vlan_id < 4096; vlan_id++) { 5360 if (rx_vft_set(port_id, vlan_id, on)) 5361 break; 5362 } 5363 } 5364 5365 void 5366 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) 5367 { 5368 int diag; 5369 5370 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5371 return; 5372 5373 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); 5374 if (diag == 0) 5375 return; 5376 5377 fprintf(stderr, 5378 "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", 5379 port_id, vlan_type, tp_id, diag); 5380 } 5381 5382 void 5383 tx_vlan_set(portid_t port_id, uint16_t vlan_id) 5384 { 5385 struct rte_eth_dev_info dev_info; 5386 int ret; 5387 5388 if (vlan_id_is_invalid(vlan_id)) 5389 return; 5390 5391 if (ports[port_id].dev_conf.txmode.offloads & 5392 RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { 5393 fprintf(stderr, "Error, as QinQ has been enabled.\n"); 5394 return; 5395 } 5396 5397 ret = eth_dev_info_get_print_err(port_id, &dev_info); 5398 if (ret != 0) 5399 return; 5400 5401 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { 5402 fprintf(stderr, 5403 "Error: vlan insert is not supported by port %d\n", 5404 port_id); 5405 return; 5406 } 5407 5408 tx_vlan_reset(port_id); 5409 ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 5410 ports[port_id].tx_vlan_id = vlan_id; 5411 } 5412 5413 void 5414 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) 5415 { 5416 struct rte_eth_dev_info dev_info; 5417 int ret; 5418 5419 if (vlan_id_is_invalid(vlan_id)) 5420 return; 5421 if (vlan_id_is_invalid(vlan_id_outer)) 5422 return; 5423 5424 ret = eth_dev_info_get_print_err(port_id, &dev_info); 5425 if (ret != 0) 5426 return; 5427 5428 if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { 5429 fprintf(stderr, 5430 "Error: qinq insert not supported by port %d\n", 5431 port_id); 5432 return; 5433 } 5434 5435 tx_vlan_reset(port_id); 5436 ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 5437 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 5438 ports[port_id].tx_vlan_id = vlan_id; 5439 ports[port_id].tx_vlan_id_outer = vlan_id_outer; 5440 } 5441 5442 void 5443 tx_vlan_reset(portid_t port_id) 5444 { 5445 ports[port_id].dev_conf.txmode.offloads &= 5446 ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 5447 RTE_ETH_TX_OFFLOAD_QINQ_INSERT); 5448 ports[port_id].tx_vlan_id = 0; 5449 ports[port_id].tx_vlan_id_outer = 0; 5450 } 5451 5452 void 5453 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) 5454 { 5455 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5456 return; 5457 5458 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); 5459 } 5460 5461 void 5462 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) 5463 { 5464 int ret; 5465 5466 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5467 return; 5468 5469 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) 5470 return; 5471 5472 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { 5473 fprintf(stderr, "map_value not in required range 0..%d\n", 5474 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 5475 return; 5476 } 5477 5478 if (!is_rx) { /* tx */ 5479 ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, 5480 map_value); 5481 if (ret) { 5482 fprintf(stderr, 5483 "failed to set tx queue stats mapping.\n"); 5484 return; 5485 } 5486 } else { /* rx */ 5487 ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, 5488 map_value); 5489 if (ret) { 5490 fprintf(stderr, 5491 "failed to set rx queue stats mapping.\n"); 5492 return; 5493 } 5494 } 5495 } 5496 5497 void 5498 set_xstats_hide_zero(uint8_t on_off) 5499 { 5500 xstats_hide_zero = on_off; 5501 } 5502 5503 void 5504 set_record_core_cycles(uint8_t on_off) 5505 { 5506 record_core_cycles = on_off; 5507 } 5508 5509 void 5510 set_record_burst_stats(uint8_t on_off) 5511 { 5512 record_burst_stats = on_off; 5513 } 5514 5515 static char* 5516 flowtype_to_str(uint16_t flow_type) 5517 { 5518 struct flow_type_info { 5519 char str[32]; 5520 uint16_t ftype; 5521 }; 5522 5523 uint8_t i; 5524 static struct flow_type_info flowtype_str_table[] = { 5525 {"raw", RTE_ETH_FLOW_RAW}, 5526 {"ipv4", RTE_ETH_FLOW_IPV4}, 5527 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, 5528 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, 5529 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, 5530 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, 5531 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, 5532 {"ipv6", RTE_ETH_FLOW_IPV6}, 5533 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, 5534 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, 5535 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, 5536 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, 5537 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, 5538 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, 5539 {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX}, 5540 {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX}, 5541 {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX}, 5542 {"port", RTE_ETH_FLOW_PORT}, 5543 {"vxlan", RTE_ETH_FLOW_VXLAN}, 5544 {"geneve", RTE_ETH_FLOW_GENEVE}, 5545 {"nvgre", RTE_ETH_FLOW_NVGRE}, 5546 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, 5547 {"gtpu", RTE_ETH_FLOW_GTPU}, 5548 }; 5549 5550 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { 5551 if (flowtype_str_table[i].ftype == flow_type) 5552 return flowtype_str_table[i].str; 5553 } 5554 5555 return NULL; 5556 } 5557 5558 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) 5559 5560 static inline void 5561 print_fdir_mask(struct rte_eth_fdir_masks *mask) 5562 { 5563 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); 5564 5565 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 5566 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," 5567 " tunnel_id: 0x%08x", 5568 mask->mac_addr_byte_mask, mask->tunnel_type_mask, 5569 rte_be_to_cpu_32(mask->tunnel_id_mask)); 5570 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 5571 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", 5572 rte_be_to_cpu_32(mask->ipv4_mask.src_ip), 5573 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); 5574 5575 printf("\n src_port: 0x%04x, dst_port: 0x%04x", 5576 rte_be_to_cpu_16(mask->src_port_mask), 5577 rte_be_to_cpu_16(mask->dst_port_mask)); 5578 5579 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 5580 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), 5581 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), 5582 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), 5583 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); 5584 5585 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", 5586 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), 5587 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), 5588 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), 5589 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); 5590 } 5591 5592 printf("\n"); 5593 } 5594 5595 static inline void 5596 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 5597 { 5598 struct rte_eth_flex_payload_cfg *cfg; 5599 uint32_t i, j; 5600 5601 for (i = 0; i < flex_conf->nb_payloads; i++) { 5602 cfg = &flex_conf->flex_set[i]; 5603 if (cfg->type == RTE_ETH_RAW_PAYLOAD) 5604 printf("\n RAW: "); 5605 else if (cfg->type == RTE_ETH_L2_PAYLOAD) 5606 printf("\n L2_PAYLOAD: "); 5607 else if (cfg->type == RTE_ETH_L3_PAYLOAD) 5608 printf("\n L3_PAYLOAD: "); 5609 else if (cfg->type == RTE_ETH_L4_PAYLOAD) 5610 printf("\n L4_PAYLOAD: "); 5611 else 5612 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); 5613 for (j = 0; j < num; j++) 5614 printf(" %-5u", cfg->src_offset[j]); 5615 } 5616 printf("\n"); 5617 } 5618 5619 static inline void 5620 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) 5621 { 5622 struct rte_eth_fdir_flex_mask *mask; 5623 uint32_t i, j; 5624 char *p; 5625 5626 for (i = 0; i < flex_conf->nb_flexmasks; i++) { 5627 mask = &flex_conf->flex_mask[i]; 5628 p = flowtype_to_str(mask->flow_type); 5629 printf("\n %s:\t", p ? p : "unknown"); 5630 for (j = 0; j < num; j++) 5631 printf(" %02x", mask->mask[j]); 5632 } 5633 printf("\n"); 5634 } 5635 5636 static inline void 5637 print_fdir_flow_type(uint32_t flow_types_mask) 5638 { 5639 int i; 5640 char *p; 5641 5642 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { 5643 if (!(flow_types_mask & (1 << i))) 5644 continue; 5645 p = flowtype_to_str(i); 5646 if (p) 5647 printf(" %s", p); 5648 else 5649 printf(" unknown"); 5650 } 5651 printf("\n"); 5652 } 5653 5654 static int 5655 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, 5656 struct rte_eth_fdir_stats *fdir_stat) 5657 { 5658 int ret = -ENOTSUP; 5659 5660 #ifdef RTE_NET_I40E 5661 if (ret == -ENOTSUP) { 5662 ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); 5663 if (!ret) 5664 ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); 5665 } 5666 #endif 5667 #ifdef RTE_NET_IXGBE 5668 if (ret == -ENOTSUP) { 5669 ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); 5670 if (!ret) 5671 ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); 5672 } 5673 #endif 5674 switch (ret) { 5675 case 0: 5676 break; 5677 case -ENOTSUP: 5678 fprintf(stderr, "\n FDIR is not supported on port %-2d\n", 5679 port_id); 5680 break; 5681 default: 5682 fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); 5683 break; 5684 } 5685 return ret; 5686 } 5687 5688 void 5689 fdir_get_infos(portid_t port_id) 5690 { 5691 struct rte_eth_fdir_stats fdir_stat; 5692 struct rte_eth_fdir_info fdir_info; 5693 5694 static const char *fdir_stats_border = "########################"; 5695 5696 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5697 return; 5698 5699 memset(&fdir_info, 0, sizeof(fdir_info)); 5700 memset(&fdir_stat, 0, sizeof(fdir_stat)); 5701 if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) 5702 return; 5703 5704 printf("\n %s FDIR infos for port %-2d %s\n", 5705 fdir_stats_border, port_id, fdir_stats_border); 5706 printf(" MODE: "); 5707 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) 5708 printf(" PERFECT\n"); 5709 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) 5710 printf(" PERFECT-MAC-VLAN\n"); 5711 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 5712 printf(" PERFECT-TUNNEL\n"); 5713 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) 5714 printf(" SIGNATURE\n"); 5715 else 5716 printf(" DISABLE\n"); 5717 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN 5718 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { 5719 printf(" SUPPORTED FLOW TYPE: "); 5720 print_fdir_flow_type(fdir_info.flow_types_mask[0]); 5721 } 5722 printf(" FLEX PAYLOAD INFO:\n"); 5723 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" 5724 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" 5725 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", 5726 fdir_info.max_flexpayload, fdir_info.flex_payload_limit, 5727 fdir_info.flex_payload_unit, 5728 fdir_info.max_flex_payload_segment_num, 5729 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); 5730 printf(" MASK: "); 5731 print_fdir_mask(&fdir_info.mask); 5732 if (fdir_info.flex_conf.nb_payloads > 0) { 5733 printf(" FLEX PAYLOAD SRC OFFSET:"); 5734 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); 5735 } 5736 if (fdir_info.flex_conf.nb_flexmasks > 0) { 5737 printf(" FLEX MASK CFG:"); 5738 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); 5739 } 5740 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", 5741 fdir_stat.guarant_cnt, fdir_stat.best_cnt); 5742 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", 5743 fdir_info.guarant_spc, fdir_info.best_spc); 5744 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" 5745 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" 5746 " add: %-10"PRIu64" remove: %"PRIu64"\n" 5747 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", 5748 fdir_stat.collision, fdir_stat.free, 5749 fdir_stat.maxhash, fdir_stat.maxlen, 5750 fdir_stat.add, fdir_stat.remove, 5751 fdir_stat.f_add, fdir_stat.f_remove); 5752 printf(" %s############################%s\n", 5753 fdir_stats_border, fdir_stats_border); 5754 } 5755 5756 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */ 5757 5758 void 5759 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) 5760 { 5761 struct rte_port *port; 5762 struct rte_eth_fdir_flex_conf *flex_conf; 5763 int i, idx = 0; 5764 5765 port = &ports[port_id]; 5766 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 5767 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { 5768 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { 5769 idx = i; 5770 break; 5771 } 5772 } 5773 if (i >= RTE_ETH_FLOW_MAX) { 5774 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { 5775 idx = flex_conf->nb_flexmasks; 5776 flex_conf->nb_flexmasks++; 5777 } else { 5778 fprintf(stderr, 5779 "The flex mask table is full. Can not set flex mask for flow_type(%u).", 5780 cfg->flow_type); 5781 return; 5782 } 5783 } 5784 rte_memcpy(&flex_conf->flex_mask[idx], 5785 cfg, 5786 sizeof(struct rte_eth_fdir_flex_mask)); 5787 } 5788 5789 void 5790 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) 5791 { 5792 struct rte_port *port; 5793 struct rte_eth_fdir_flex_conf *flex_conf; 5794 int i, idx = 0; 5795 5796 port = &ports[port_id]; 5797 flex_conf = &port->dev_conf.fdir_conf.flex_conf; 5798 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { 5799 if (cfg->type == flex_conf->flex_set[i].type) { 5800 idx = i; 5801 break; 5802 } 5803 } 5804 if (i >= RTE_ETH_PAYLOAD_MAX) { 5805 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { 5806 idx = flex_conf->nb_payloads; 5807 flex_conf->nb_payloads++; 5808 } else { 5809 fprintf(stderr, 5810 "The flex payload table is full. Can not set flex payload for type(%u).", 5811 cfg->type); 5812 return; 5813 } 5814 } 5815 rte_memcpy(&flex_conf->flex_set[idx], 5816 cfg, 5817 sizeof(struct rte_eth_flex_payload_cfg)); 5818 5819 } 5820 5821 void 5822 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) 5823 { 5824 #ifdef RTE_NET_IXGBE 5825 int diag; 5826 5827 if (is_rx) 5828 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); 5829 else 5830 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); 5831 5832 if (diag == 0) 5833 return; 5834 fprintf(stderr, 5835 "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", 5836 is_rx ? "rx" : "tx", port_id, diag); 5837 return; 5838 #endif 5839 fprintf(stderr, "VF %s setting not supported for port %d\n", 5840 is_rx ? "Rx" : "Tx", port_id); 5841 RTE_SET_USED(vf); 5842 RTE_SET_USED(on); 5843 } 5844 5845 int 5846 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) 5847 { 5848 int diag; 5849 struct rte_eth_link link; 5850 int ret; 5851 5852 if (port_id_is_invalid(port_id, ENABLED_WARN)) 5853 return 1; 5854 ret = eth_link_get_nowait_print_err(port_id, &link); 5855 if (ret < 0) 5856 return 1; 5857 if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && 5858 rate > link.link_speed) { 5859 fprintf(stderr, 5860 "Invalid rate value:%u bigger than link speed: %u\n", 5861 rate, link.link_speed); 5862 return 1; 5863 } 5864 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); 5865 if (diag == 0) 5866 return diag; 5867 fprintf(stderr, 5868 "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", 5869 port_id, diag); 5870 return diag; 5871 } 5872 5873 int 5874 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) 5875 { 5876 int diag = -ENOTSUP; 5877 5878 RTE_SET_USED(vf); 5879 RTE_SET_USED(rate); 5880 RTE_SET_USED(q_msk); 5881 5882 #ifdef RTE_NET_IXGBE 5883 if (diag == -ENOTSUP) 5884 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, 5885 q_msk); 5886 #endif 5887 #ifdef RTE_NET_BNXT 5888 if (diag == -ENOTSUP) 5889 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); 5890 #endif 5891 if (diag == 0) 5892 return diag; 5893 5894 fprintf(stderr, 5895 "%s for port_id=%d failed diag=%d\n", 5896 __func__, port_id, diag); 5897 return diag; 5898 } 5899 5900 /* 5901 * Functions to manage the set of filtered Multicast MAC addresses. 5902 * 5903 * A pool of filtered multicast MAC addresses is associated with each port. 5904 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. 5905 * The address of the pool and the number of valid multicast MAC addresses 5906 * recorded in the pool are stored in the fields "mc_addr_pool" and 5907 * "mc_addr_nb" of the "rte_port" data structure. 5908 * 5909 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes 5910 * to be supplied a contiguous array of multicast MAC addresses. 5911 * To comply with this constraint, the set of multicast addresses recorded 5912 * into the pool are systematically compacted at the beginning of the pool. 5913 * Hence, when a multicast address is removed from the pool, all following 5914 * addresses, if any, are copied back to keep the set contiguous. 5915 */ 5916 #define MCAST_POOL_INC 32 5917 5918 static int 5919 mcast_addr_pool_extend(struct rte_port *port) 5920 { 5921 struct rte_ether_addr *mc_pool; 5922 size_t mc_pool_size; 5923 5924 /* 5925 * If a free entry is available at the end of the pool, just 5926 * increment the number of recorded multicast addresses. 5927 */ 5928 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { 5929 port->mc_addr_nb++; 5930 return 0; 5931 } 5932 5933 /* 5934 * [re]allocate a pool with MCAST_POOL_INC more entries. 5935 * The previous test guarantees that port->mc_addr_nb is a multiple 5936 * of MCAST_POOL_INC. 5937 */ 5938 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + 5939 MCAST_POOL_INC); 5940 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, 5941 mc_pool_size); 5942 if (mc_pool == NULL) { 5943 fprintf(stderr, 5944 "allocation of pool of %u multicast addresses failed\n", 5945 port->mc_addr_nb + MCAST_POOL_INC); 5946 return -ENOMEM; 5947 } 5948 5949 port->mc_addr_pool = mc_pool; 5950 port->mc_addr_nb++; 5951 return 0; 5952 5953 } 5954 5955 static void 5956 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) 5957 { 5958 if (mcast_addr_pool_extend(port) != 0) 5959 return; 5960 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); 5961 } 5962 5963 static void 5964 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) 5965 { 5966 port->mc_addr_nb--; 5967 if (addr_idx == port->mc_addr_nb) { 5968 /* No need to recompact the set of multicast addresses. */ 5969 if (port->mc_addr_nb == 0) { 5970 /* free the pool of multicast addresses. */ 5971 free(port->mc_addr_pool); 5972 port->mc_addr_pool = NULL; 5973 } 5974 return; 5975 } 5976 memmove(&port->mc_addr_pool[addr_idx], 5977 &port->mc_addr_pool[addr_idx + 1], 5978 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); 5979 } 5980 5981 static int 5982 eth_port_multicast_addr_list_set(portid_t port_id) 5983 { 5984 struct rte_port *port; 5985 int diag; 5986 5987 port = &ports[port_id]; 5988 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, 5989 port->mc_addr_nb); 5990 if (diag < 0) 5991 fprintf(stderr, 5992 "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", 5993 port_id, port->mc_addr_nb, diag); 5994 5995 return diag; 5996 } 5997 5998 void 5999 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) 6000 { 6001 struct rte_port *port; 6002 uint32_t i; 6003 6004 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6005 return; 6006 6007 port = &ports[port_id]; 6008 6009 /* 6010 * Check that the added multicast MAC address is not already recorded 6011 * in the pool of multicast addresses. 6012 */ 6013 for (i = 0; i < port->mc_addr_nb; i++) { 6014 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { 6015 fprintf(stderr, 6016 "multicast address already filtered by port\n"); 6017 return; 6018 } 6019 } 6020 6021 mcast_addr_pool_append(port, mc_addr); 6022 if (eth_port_multicast_addr_list_set(port_id) < 0) 6023 /* Rollback on failure, remove the address from the pool */ 6024 mcast_addr_pool_remove(port, i); 6025 } 6026 6027 void 6028 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) 6029 { 6030 struct rte_port *port; 6031 uint32_t i; 6032 6033 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6034 return; 6035 6036 port = &ports[port_id]; 6037 6038 /* 6039 * Search the pool of multicast MAC addresses for the removed address. 6040 */ 6041 for (i = 0; i < port->mc_addr_nb; i++) { 6042 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) 6043 break; 6044 } 6045 if (i == port->mc_addr_nb) { 6046 fprintf(stderr, "multicast address not filtered by port %d\n", 6047 port_id); 6048 return; 6049 } 6050 6051 mcast_addr_pool_remove(port, i); 6052 if (eth_port_multicast_addr_list_set(port_id) < 0) 6053 /* Rollback on failure, add the address back into the pool */ 6054 mcast_addr_pool_append(port, mc_addr); 6055 } 6056 6057 void 6058 port_dcb_info_display(portid_t port_id) 6059 { 6060 struct rte_eth_dcb_info dcb_info; 6061 uint16_t i; 6062 int ret; 6063 static const char *border = "================"; 6064 6065 if (port_id_is_invalid(port_id, ENABLED_WARN)) 6066 return; 6067 6068 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); 6069 if (ret) { 6070 fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", 6071 port_id); 6072 return; 6073 } 6074 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); 6075 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); 6076 printf("\n TC : "); 6077 for (i = 0; i < dcb_info.nb_tcs; i++) 6078 printf("\t%4d", i); 6079 printf("\n Priority : "); 6080 for (i = 0; i < dcb_info.nb_tcs; i++) 6081 printf("\t%4d", dcb_info.prio_tc[i]); 6082 printf("\n BW percent :"); 6083 for (i = 0; i < dcb_info.nb_tcs; i++) 6084 printf("\t%4d%%", dcb_info.tc_bws[i]); 6085 printf("\n RXQ base : "); 6086 for (i = 0; i < dcb_info.nb_tcs; i++) 6087 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); 6088 printf("\n RXQ number :"); 6089 for (i = 0; i < dcb_info.nb_tcs; i++) 6090 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); 6091 printf("\n TXQ base : "); 6092 for (i = 0; i < dcb_info.nb_tcs; i++) 6093 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); 6094 printf("\n TXQ number :"); 6095 for (i = 0; i < dcb_info.nb_tcs; i++) 6096 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); 6097 printf("\n"); 6098 } 6099 6100 uint8_t * 6101 open_file(const char *file_path, uint32_t *size) 6102 { 6103 int fd = open(file_path, O_RDONLY); 6104 off_t pkg_size; 6105 uint8_t *buf = NULL; 6106 int ret = 0; 6107 struct stat st_buf; 6108 6109 if (size) 6110 *size = 0; 6111 6112 if (fd == -1) { 6113 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6114 return buf; 6115 } 6116 6117 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { 6118 close(fd); 6119 fprintf(stderr, "%s: File operations failed\n", __func__); 6120 return buf; 6121 } 6122 6123 pkg_size = st_buf.st_size; 6124 if (pkg_size < 0) { 6125 close(fd); 6126 fprintf(stderr, "%s: File operations failed\n", __func__); 6127 return buf; 6128 } 6129 6130 buf = (uint8_t *)malloc(pkg_size); 6131 if (!buf) { 6132 close(fd); 6133 fprintf(stderr, "%s: Failed to malloc memory\n", __func__); 6134 return buf; 6135 } 6136 6137 ret = read(fd, buf, pkg_size); 6138 if (ret < 0) { 6139 close(fd); 6140 fprintf(stderr, "%s: File read operation failed\n", __func__); 6141 close_file(buf); 6142 return NULL; 6143 } 6144 6145 if (size) 6146 *size = pkg_size; 6147 6148 close(fd); 6149 6150 return buf; 6151 } 6152 6153 int 6154 save_file(const char *file_path, uint8_t *buf, uint32_t size) 6155 { 6156 FILE *fh = fopen(file_path, "wb"); 6157 6158 if (fh == NULL) { 6159 fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); 6160 return -1; 6161 } 6162 6163 if (fwrite(buf, 1, size, fh) != size) { 6164 fclose(fh); 6165 fprintf(stderr, "%s: File write operation failed\n", __func__); 6166 return -1; 6167 } 6168 6169 fclose(fh); 6170 6171 return 0; 6172 } 6173 6174 int 6175 close_file(uint8_t *buf) 6176 { 6177 if (buf) { 6178 free((void *)buf); 6179 return 0; 6180 } 6181 6182 return -1; 6183 } 6184 6185 void 6186 port_queue_region_info_display(portid_t port_id, void *buf) 6187 { 6188 #ifdef RTE_NET_I40E 6189 uint16_t i, j; 6190 struct rte_pmd_i40e_queue_regions *info = 6191 (struct rte_pmd_i40e_queue_regions *)buf; 6192 static const char *queue_region_info_stats_border = "-------"; 6193 6194 if (!info->queue_region_number) 6195 printf("there is no region has been set before"); 6196 6197 printf("\n %s All queue region info for port=%2d %s", 6198 queue_region_info_stats_border, port_id, 6199 queue_region_info_stats_border); 6200 printf("\n queue_region_number: %-14u \n", 6201 info->queue_region_number); 6202 6203 for (i = 0; i < info->queue_region_number; i++) { 6204 printf("\n region_id: %-14u queue_number: %-14u " 6205 "queue_start_index: %-14u \n", 6206 info->region[i].region_id, 6207 info->region[i].queue_num, 6208 info->region[i].queue_start_index); 6209 6210 printf(" user_priority_num is %-14u :", 6211 info->region[i].user_priority_num); 6212 for (j = 0; j < info->region[i].user_priority_num; j++) 6213 printf(" %-14u ", info->region[i].user_priority[j]); 6214 6215 printf("\n flowtype_num is %-14u :", 6216 info->region[i].flowtype_num); 6217 for (j = 0; j < info->region[i].flowtype_num; j++) 6218 printf(" %-14u ", info->region[i].hw_flowtype[j]); 6219 } 6220 #else 6221 RTE_SET_USED(port_id); 6222 RTE_SET_USED(buf); 6223 #endif 6224 6225 printf("\n\n"); 6226 } 6227 6228 void 6229 show_macs(portid_t port_id) 6230 { 6231 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6232 struct rte_eth_dev_info dev_info; 6233 int32_t i, rc, num_macs = 0; 6234 6235 if (eth_dev_info_get_print_err(port_id, &dev_info)) 6236 return; 6237 6238 struct rte_ether_addr addr[dev_info.max_mac_addrs]; 6239 rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); 6240 if (rc < 0) 6241 return; 6242 6243 for (i = 0; i < rc; i++) { 6244 6245 /* skip zero address */ 6246 if (rte_is_zero_ether_addr(&addr[i])) 6247 continue; 6248 6249 num_macs++; 6250 } 6251 6252 printf("Number of MAC address added: %d\n", num_macs); 6253 6254 for (i = 0; i < rc; i++) { 6255 6256 /* skip zero address */ 6257 if (rte_is_zero_ether_addr(&addr[i])) 6258 continue; 6259 6260 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); 6261 printf(" %s\n", buf); 6262 } 6263 } 6264 6265 void 6266 show_mcast_macs(portid_t port_id) 6267 { 6268 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 6269 struct rte_ether_addr *addr; 6270 struct rte_port *port; 6271 uint32_t i; 6272 6273 port = &ports[port_id]; 6274 6275 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); 6276 6277 for (i = 0; i < port->mc_addr_nb; i++) { 6278 addr = &port->mc_addr_pool[i]; 6279 6280 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); 6281 printf(" %s\n", buf); 6282 } 6283 } 6284