1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #ifndef _TESTPMD_H_ 35 #define _TESTPMD_H_ 36 37 #define RTE_PORT_ALL (~(portid_t)0x0) 38 39 #define RTE_TEST_RX_DESC_MAX 2048 40 #define RTE_TEST_TX_DESC_MAX 2048 41 42 #define RTE_PORT_STOPPED (uint16_t)0 43 #define RTE_PORT_STARTED (uint16_t)1 44 #define RTE_PORT_CLOSED (uint16_t)2 45 #define RTE_PORT_HANDLING (uint16_t)3 46 47 /* 48 * Default size of the mbuf data buffer to receive standard 1518-byte 49 * Ethernet frames in a mono-segment memory buffer. 50 */ 51 #define DEFAULT_MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE 52 /**< Default size of mbuf data buffer. */ 53 54 /* 55 * The maximum number of segments per packet is used when creating 56 * scattered transmit packets composed of a list of mbufs. 57 */ 58 #define RTE_MAX_SEGS_PER_PKT 255 /**< nb_segs is a 8-bit unsigned char. */ 59 60 #define MAX_PKT_BURST 512 61 #define DEF_PKT_BURST 32 62 63 #define DEF_MBUF_CACHE 250 64 65 #define RTE_CACHE_LINE_SIZE_ROUNDUP(size) \ 66 (RTE_CACHE_LINE_SIZE * ((size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE)) 67 68 #define NUMA_NO_CONFIG 0xFF 69 #define UMA_NO_CONFIG 0xFF 70 71 typedef uint8_t lcoreid_t; 72 typedef uint8_t portid_t; 73 typedef uint16_t queueid_t; 74 typedef uint16_t streamid_t; 75 76 #define MAX_QUEUE_ID ((1 << (sizeof(queueid_t) * 8)) - 1) 77 78 enum { 79 PORT_TOPOLOGY_PAIRED, 80 PORT_TOPOLOGY_CHAINED, 81 PORT_TOPOLOGY_LOOP, 82 }; 83 84 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 85 /** 86 * The data structure associated with RX and TX packet burst statistics 87 * that are recorded for each forwarding stream. 88 */ 89 struct pkt_burst_stats { 90 unsigned int pkt_burst_spread[MAX_PKT_BURST]; 91 }; 92 #endif 93 94 /** 95 * The data structure associated with a forwarding stream between a receive 96 * port/queue and a transmit port/queue. 97 */ 98 struct fwd_stream { 99 /* "read-only" data */ 100 portid_t rx_port; /**< port to poll for received packets */ 101 queueid_t rx_queue; /**< RX queue to poll on "rx_port" */ 102 portid_t tx_port; /**< forwarding port of received packets */ 103 queueid_t tx_queue; /**< TX queue to send forwarded packets */ 104 streamid_t peer_addr; /**< index of peer ethernet address of packets */ 105 106 unsigned int retry_enabled; 107 108 /* "read-write" results */ 109 unsigned int rx_packets; /**< received packets */ 110 unsigned int tx_packets; /**< received packets transmitted */ 111 unsigned int fwd_dropped; /**< received packets not forwarded */ 112 unsigned int rx_bad_ip_csum ; /**< received packets has bad ip checksum */ 113 unsigned int rx_bad_l4_csum ; /**< received packets has bad l4 checksum */ 114 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 115 uint64_t core_cycles; /**< used for RX and TX processing */ 116 #endif 117 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 118 struct pkt_burst_stats rx_burst_stats; 119 struct pkt_burst_stats tx_burst_stats; 120 #endif 121 }; 122 123 /** Offload IP checksum in csum forward engine */ 124 #define TESTPMD_TX_OFFLOAD_IP_CKSUM 0x0001 125 /** Offload UDP checksum in csum forward engine */ 126 #define TESTPMD_TX_OFFLOAD_UDP_CKSUM 0x0002 127 /** Offload TCP checksum in csum forward engine */ 128 #define TESTPMD_TX_OFFLOAD_TCP_CKSUM 0x0004 129 /** Offload SCTP checksum in csum forward engine */ 130 #define TESTPMD_TX_OFFLOAD_SCTP_CKSUM 0x0008 131 /** Offload outer IP checksum in csum forward engine for recognized tunnels */ 132 #define TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM 0x0010 133 /** Parse tunnel in csum forward engine. If set, dissect tunnel headers 134 * of rx packets. If not set, treat inner headers as payload. */ 135 #define TESTPMD_TX_OFFLOAD_PARSE_TUNNEL 0x0020 136 /** Insert VLAN header in forward engine */ 137 #define TESTPMD_TX_OFFLOAD_INSERT_VLAN 0x0040 138 /** Insert double VLAN header in forward engine */ 139 #define TESTPMD_TX_OFFLOAD_INSERT_QINQ 0x0080 140 141 /** 142 * The data structure associated with each port. 143 */ 144 struct rte_port { 145 uint8_t enabled; /**< Port enabled or not */ 146 struct rte_eth_dev_info dev_info; /**< PCI info + driver name */ 147 struct rte_eth_conf dev_conf; /**< Port configuration. */ 148 struct ether_addr eth_addr; /**< Port ethernet address */ 149 struct rte_eth_stats stats; /**< Last port statistics */ 150 uint64_t tx_dropped; /**< If no descriptor in TX ring */ 151 struct fwd_stream *rx_stream; /**< Port RX stream, if unique */ 152 struct fwd_stream *tx_stream; /**< Port TX stream, if unique */ 153 unsigned int socket_id; /**< For NUMA support */ 154 uint16_t tx_ol_flags;/**< TX Offload Flags (TESTPMD_TX_OFFLOAD...). */ 155 uint16_t tso_segsz; /**< Segmentation offload MSS for non-tunneled packets. */ 156 uint16_t tunnel_tso_segsz; /**< Segmentation offload MSS for tunneled pkts. */ 157 uint16_t tx_vlan_id;/**< The tag ID */ 158 uint16_t tx_vlan_id_outer;/**< The outer tag ID */ 159 void *fwd_ctx; /**< Forwarding mode context */ 160 uint64_t rx_bad_ip_csum; /**< rx pkts with bad ip checksum */ 161 uint64_t rx_bad_l4_csum; /**< rx pkts with bad l4 checksum */ 162 uint8_t tx_queue_stats_mapping_enabled; 163 uint8_t rx_queue_stats_mapping_enabled; 164 volatile uint16_t port_status; /**< port started or not */ 165 uint8_t need_reconfig; /**< need reconfiguring port or not */ 166 uint8_t need_reconfig_queues; /**< need reconfiguring queues or not */ 167 uint8_t rss_flag; /**< enable rss or not */ 168 uint8_t dcb_flag; /**< enable dcb */ 169 struct rte_eth_rxconf rx_conf; /**< rx configuration */ 170 struct rte_eth_txconf tx_conf; /**< tx configuration */ 171 struct ether_addr *mc_addr_pool; /**< pool of multicast addrs */ 172 uint32_t mc_addr_nb; /**< nb. of addr. in mc_addr_pool */ 173 uint8_t slave_flag; /**< bonding slave port */ 174 }; 175 176 extern portid_t __rte_unused 177 find_next_port(portid_t p, struct rte_port *ports, int size); 178 179 #define FOREACH_PORT(p, ports) \ 180 for (p = find_next_port(0, ports, RTE_MAX_ETHPORTS); \ 181 p < RTE_MAX_ETHPORTS; \ 182 p = find_next_port(p + 1, ports, RTE_MAX_ETHPORTS)) 183 184 /** 185 * The data structure associated with each forwarding logical core. 186 * The logical cores are internally numbered by a core index from 0 to 187 * the maximum number of logical cores - 1. 188 * The system CPU identifier of all logical cores are setup in a global 189 * CPU id. configuration table. 190 */ 191 struct fwd_lcore { 192 struct rte_mempool *mbp; /**< The mbuf pool to use by this core */ 193 streamid_t stream_idx; /**< index of 1st stream in "fwd_streams" */ 194 streamid_t stream_nb; /**< number of streams in "fwd_streams" */ 195 lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */ 196 queueid_t tx_queue; /**< TX queue to send forwarded packets */ 197 volatile char stopped; /**< stop forwarding when set */ 198 }; 199 200 /* 201 * Forwarding mode operations: 202 * - IO forwarding mode (default mode) 203 * Forwards packets unchanged. 204 * 205 * - MAC forwarding mode 206 * Set the source and the destination Ethernet addresses of packets 207 * before forwarding them. 208 * 209 * - IEEE1588 forwarding mode 210 * Check that received IEEE1588 Precise Time Protocol (PTP) packets are 211 * filtered and timestamped by the hardware. 212 * Forwards packets unchanged on the same port. 213 * Check that sent IEEE1588 PTP packets are timestamped by the hardware. 214 */ 215 typedef void (*port_fwd_begin_t)(portid_t pi); 216 typedef void (*port_fwd_end_t)(portid_t pi); 217 typedef void (*packet_fwd_t)(struct fwd_stream *fs); 218 219 struct fwd_engine { 220 const char *fwd_mode_name; /**< Forwarding mode name. */ 221 port_fwd_begin_t port_fwd_begin; /**< NULL if nothing special to do. */ 222 port_fwd_end_t port_fwd_end; /**< NULL if nothing special to do. */ 223 packet_fwd_t packet_fwd; /**< Mandatory. */ 224 }; 225 226 #define BURST_TX_WAIT_US 1 227 #define BURST_TX_RETRIES 64 228 229 extern uint32_t burst_tx_delay_time; 230 extern uint32_t burst_tx_retry_num; 231 232 extern struct fwd_engine io_fwd_engine; 233 extern struct fwd_engine mac_fwd_engine; 234 extern struct fwd_engine mac_swap_engine; 235 extern struct fwd_engine flow_gen_engine; 236 extern struct fwd_engine rx_only_engine; 237 extern struct fwd_engine tx_only_engine; 238 extern struct fwd_engine csum_fwd_engine; 239 extern struct fwd_engine icmp_echo_engine; 240 #ifdef RTE_LIBRTE_IEEE1588 241 extern struct fwd_engine ieee1588_fwd_engine; 242 #endif 243 244 extern struct fwd_engine * fwd_engines[]; /**< NULL terminated array. */ 245 246 /** 247 * Forwarding Configuration 248 * 249 */ 250 struct fwd_config { 251 struct fwd_engine *fwd_eng; /**< Packet forwarding mode. */ 252 streamid_t nb_fwd_streams; /**< Nb. of forward streams to process. */ 253 lcoreid_t nb_fwd_lcores; /**< Nb. of logical cores to launch. */ 254 portid_t nb_fwd_ports; /**< Nb. of ports involved. */ 255 }; 256 257 /** 258 * DCB mode enable 259 */ 260 enum dcb_mode_enable 261 { 262 DCB_VT_ENABLED, 263 DCB_ENABLED 264 }; 265 266 #define MAX_TX_QUEUE_STATS_MAPPINGS 1024 /* MAX_PORT of 32 @ 32 tx_queues/port */ 267 #define MAX_RX_QUEUE_STATS_MAPPINGS 4096 /* MAX_PORT of 32 @ 128 rx_queues/port */ 268 269 struct queue_stats_mappings { 270 uint8_t port_id; 271 uint16_t queue_id; 272 uint8_t stats_counter_id; 273 } __rte_cache_aligned; 274 275 extern struct queue_stats_mappings tx_queue_stats_mappings_array[]; 276 extern struct queue_stats_mappings rx_queue_stats_mappings_array[]; 277 278 /* Assign both tx and rx queue stats mappings to the same default values */ 279 extern struct queue_stats_mappings *tx_queue_stats_mappings; 280 extern struct queue_stats_mappings *rx_queue_stats_mappings; 281 282 extern uint16_t nb_tx_queue_stats_mappings; 283 extern uint16_t nb_rx_queue_stats_mappings; 284 285 /* globals used for configuration */ 286 extern uint16_t verbose_level; /**< Drives messages being displayed, if any. */ 287 extern uint8_t interactive; 288 extern uint8_t auto_start; 289 extern uint8_t numa_support; /**< set by "--numa" parameter */ 290 extern uint16_t port_topology; /**< set by "--port-topology" parameter */ 291 extern uint8_t no_flush_rx; /**<set by "--no-flush-rx" parameter */ 292 extern uint8_t mp_anon; /**< set by "--mp-anon" parameter */ 293 extern uint8_t no_link_check; /**<set by "--disable-link-check" parameter */ 294 extern volatile int test_done; /* stop packet forwarding when set to 1. */ 295 296 #ifdef RTE_NIC_BYPASS 297 extern uint32_t bypass_timeout; /**< Store the NIC bypass watchdog timeout */ 298 #endif 299 300 /* 301 * Store specified sockets on which memory pool to be used by ports 302 * is allocated. 303 */ 304 uint8_t port_numa[RTE_MAX_ETHPORTS]; 305 306 /* 307 * Store specified sockets on which RX ring to be used by ports 308 * is allocated. 309 */ 310 uint8_t rxring_numa[RTE_MAX_ETHPORTS]; 311 312 /* 313 * Store specified sockets on which TX ring to be used by ports 314 * is allocated. 315 */ 316 uint8_t txring_numa[RTE_MAX_ETHPORTS]; 317 318 extern uint8_t socket_num; 319 320 /* 321 * Configuration of logical cores: 322 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores 323 */ 324 extern lcoreid_t nb_lcores; /**< Number of logical cores probed at init time. */ 325 extern lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ 326 extern lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ 327 extern unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; 328 extern unsigned max_socket; 329 330 /* 331 * Configuration of Ethernet ports: 332 * nb_fwd_ports <= nb_cfg_ports <= nb_ports 333 */ 334 extern portid_t nb_ports; /**< Number of ethernet ports probed at init time. */ 335 extern portid_t nb_cfg_ports; /**< Number of configured ports. */ 336 extern portid_t nb_fwd_ports; /**< Number of forwarding ports. */ 337 extern portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; 338 extern struct rte_port *ports; 339 340 extern struct rte_eth_rxmode rx_mode; 341 extern uint64_t rss_hf; 342 343 extern queueid_t nb_rxq; 344 extern queueid_t nb_txq; 345 346 extern uint16_t nb_rxd; 347 extern uint16_t nb_txd; 348 349 extern int16_t rx_free_thresh; 350 extern int8_t rx_drop_en; 351 extern int16_t tx_free_thresh; 352 extern int16_t tx_rs_thresh; 353 extern int32_t txq_flags; 354 355 extern uint8_t dcb_config; 356 extern uint8_t dcb_test; 357 extern enum dcb_queue_mapping_mode dcb_q_mapping; 358 359 extern uint16_t mbuf_data_size; /**< Mbuf data space size. */ 360 extern uint32_t param_total_num_mbufs; 361 362 extern struct rte_fdir_conf fdir_conf; 363 364 /* 365 * Configuration of packet segments used by the "txonly" processing engine. 366 */ 367 #define TXONLY_DEF_PACKET_LEN 64 368 extern uint16_t tx_pkt_length; /**< Length of TXONLY packet */ 369 extern uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT]; /**< Seg. lengths */ 370 extern uint8_t tx_pkt_nb_segs; /**< Number of segments in TX packets */ 371 372 enum tx_pkt_split { 373 TX_PKT_SPLIT_OFF, 374 TX_PKT_SPLIT_ON, 375 TX_PKT_SPLIT_RND, 376 }; 377 378 extern enum tx_pkt_split tx_pkt_split; 379 380 extern uint16_t nb_pkt_per_burst; 381 extern uint16_t mb_mempool_cache; 382 extern int8_t rx_pthresh; 383 extern int8_t rx_hthresh; 384 extern int8_t rx_wthresh; 385 extern int8_t tx_pthresh; 386 extern int8_t tx_hthresh; 387 extern int8_t tx_wthresh; 388 389 extern struct fwd_config cur_fwd_config; 390 extern struct fwd_engine *cur_fwd_eng; 391 extern uint32_t retry_enabled; 392 extern struct fwd_lcore **fwd_lcores; 393 extern struct fwd_stream **fwd_streams; 394 395 extern portid_t nb_peer_eth_addrs; /**< Number of peer ethernet addresses. */ 396 extern struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; 397 398 extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */ 399 extern uint32_t burst_tx_retry_num; /**< Burst tx retry number for mac-retry. */ 400 401 static inline unsigned int 402 lcore_num(void) 403 { 404 unsigned int i; 405 406 for (i = 0; i < RTE_MAX_LCORE; ++i) 407 if (fwd_lcores_cpuids[i] == rte_lcore_id()) 408 return i; 409 410 rte_panic("lcore_id of current thread not found in fwd_lcores_cpuids\n"); 411 } 412 413 static inline struct fwd_lcore * 414 current_fwd_lcore(void) 415 { 416 return fwd_lcores[lcore_num()]; 417 } 418 419 /* Mbuf Pools */ 420 static inline void 421 mbuf_poolname_build(unsigned int sock_id, char* mp_name, int name_size) 422 { 423 snprintf(mp_name, name_size, "mbuf_pool_socket_%u", sock_id); 424 } 425 426 static inline struct rte_mempool * 427 mbuf_pool_find(unsigned int sock_id) 428 { 429 char pool_name[RTE_MEMPOOL_NAMESIZE]; 430 431 mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name)); 432 return rte_mempool_lookup((const char *)pool_name); 433 } 434 435 /** 436 * Read/Write operations on a PCI register of a port. 437 */ 438 static inline uint32_t 439 port_pci_reg_read(struct rte_port *port, uint32_t reg_off) 440 { 441 void *reg_addr; 442 uint32_t reg_v; 443 444 reg_addr = (void *) 445 ((char *)port->dev_info.pci_dev->mem_resource[0].addr + 446 reg_off); 447 reg_v = *((volatile uint32_t *)reg_addr); 448 return rte_le_to_cpu_32(reg_v); 449 } 450 451 #define port_id_pci_reg_read(pt_id, reg_off) \ 452 port_pci_reg_read(&ports[(pt_id)], (reg_off)) 453 454 static inline void 455 port_pci_reg_write(struct rte_port *port, uint32_t reg_off, uint32_t reg_v) 456 { 457 void *reg_addr; 458 459 reg_addr = (void *) 460 ((char *)port->dev_info.pci_dev->mem_resource[0].addr + 461 reg_off); 462 *((volatile uint32_t *)reg_addr) = rte_cpu_to_le_32(reg_v); 463 } 464 465 #define port_id_pci_reg_write(pt_id, reg_off, reg_value) \ 466 port_pci_reg_write(&ports[(pt_id)], (reg_off), (reg_value)) 467 468 /* Prototypes */ 469 unsigned int parse_item_list(char* str, const char* item_name, 470 unsigned int max_items, 471 unsigned int *parsed_items, int check_unique_values); 472 void launch_args_parse(int argc, char** argv); 473 void prompt(void); 474 void prompt_exit(void); 475 void nic_stats_display(portid_t port_id); 476 void nic_stats_clear(portid_t port_id); 477 void nic_xstats_display(portid_t port_id); 478 void nic_xstats_clear(portid_t port_id); 479 void nic_stats_mapping_display(portid_t port_id); 480 void port_infos_display(portid_t port_id); 481 void rx_queue_infos_display(portid_t port_idi, uint16_t queue_id); 482 void tx_queue_infos_display(portid_t port_idi, uint16_t queue_id); 483 void fwd_lcores_config_display(void); 484 void pkt_fwd_config_display(struct fwd_config *cfg); 485 void rxtx_config_display(void); 486 void fwd_config_setup(void); 487 void set_def_fwd_config(void); 488 void reconfig(portid_t new_port_id, unsigned socket_id); 489 int init_fwd_streams(void); 490 491 void port_mtu_set(portid_t port_id, uint16_t mtu); 492 void port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_pos); 493 void port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, 494 uint8_t bit_v); 495 void port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, 496 uint8_t bit1_pos, uint8_t bit2_pos); 497 void port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, 498 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value); 499 void port_reg_display(portid_t port_id, uint32_t reg_off); 500 void port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t value); 501 502 void rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id); 503 void tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id); 504 505 int set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc); 506 int set_fwd_lcores_mask(uint64_t lcoremask); 507 void set_fwd_lcores_number(uint16_t nb_lc); 508 509 void set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt); 510 void set_fwd_ports_mask(uint64_t portmask); 511 void set_fwd_ports_number(uint16_t nb_pt); 512 int port_is_forwarding(portid_t port_id); 513 514 void rx_vlan_strip_set(portid_t port_id, int on); 515 void rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on); 516 517 void rx_vlan_filter_set(portid_t port_id, int on); 518 void rx_vlan_all_filter_set(portid_t port_id, int on); 519 int rx_vft_set(portid_t port_id, uint16_t vlan_id, int on); 520 void vlan_extend_set(portid_t port_id, int on); 521 void vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, 522 uint16_t tp_id); 523 void tx_vlan_set(portid_t port_id, uint16_t vlan_id); 524 void tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer); 525 void tx_vlan_reset(portid_t port_id); 526 void tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on); 527 528 void set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value); 529 530 void set_verbose_level(uint16_t vb_level); 531 void set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs); 532 void show_tx_pkt_segments(void); 533 void set_tx_pkt_split(const char *name); 534 void set_nb_pkt_per_burst(uint16_t pkt_burst); 535 char *list_pkt_forwarding_modes(void); 536 char *list_pkt_forwarding_retry_modes(void); 537 void set_pkt_forwarding_mode(const char *fwd_mode); 538 void start_packet_forwarding(int with_tx_first); 539 void stop_packet_forwarding(void); 540 void dev_set_link_up(portid_t pid); 541 void dev_set_link_down(portid_t pid); 542 void init_port_config(void); 543 void set_port_slave_flag(portid_t slave_pid); 544 void clear_port_slave_flag(portid_t slave_pid); 545 uint8_t port_is_bonding_slave(portid_t slave_pid); 546 547 int init_port_dcb_config(portid_t pid, enum dcb_mode_enable dcb_mode, 548 enum rte_eth_nb_tcs num_tcs, 549 uint8_t pfc_en); 550 int start_port(portid_t pid); 551 void stop_port(portid_t pid); 552 void close_port(portid_t pid); 553 void attach_port(char *identifier); 554 void detach_port(uint8_t port_id); 555 int all_ports_stopped(void); 556 int port_is_started(portid_t port_id); 557 void pmd_test_exit(void); 558 void fdir_get_infos(portid_t port_id); 559 void fdir_set_flex_mask(portid_t port_id, 560 struct rte_eth_fdir_flex_mask *cfg); 561 void fdir_set_flex_payload(portid_t port_id, 562 struct rte_eth_flex_payload_cfg *cfg); 563 void port_rss_reta_info(portid_t port_id, 564 struct rte_eth_rss_reta_entry64 *reta_conf, 565 uint16_t nb_entries); 566 567 void set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on); 568 void set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, 569 uint64_t vf_mask, uint8_t on); 570 571 int set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate); 572 int set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, 573 uint64_t q_msk); 574 575 void port_rss_hash_conf_show(portid_t port_id, char rss_info[], 576 int show_rss_key); 577 void port_rss_hash_key_update(portid_t port_id, char rss_type[], 578 uint8_t *hash_key, uint hash_key_len); 579 void get_syn_filter(uint8_t port_id); 580 void get_ethertype_filter(uint8_t port_id, uint16_t index); 581 void get_2tuple_filter(uint8_t port_id, uint16_t index); 582 void get_5tuple_filter(uint8_t port_id, uint16_t index); 583 int rx_queue_id_is_invalid(queueid_t rxq_id); 584 int tx_queue_id_is_invalid(queueid_t txq_id); 585 586 /* Functions to manage the set of filtered Multicast MAC addresses */ 587 void mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr); 588 void mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr); 589 void port_dcb_info_display(uint8_t port_id); 590 591 enum print_warning { 592 ENABLED_WARN = 0, 593 DISABLED_WARN 594 }; 595 int port_id_is_invalid(portid_t port_id, enum print_warning warning); 596 597 /* 598 * Work-around of a compilation error with ICC on invocations of the 599 * rte_be_to_cpu_16() function. 600 */ 601 #ifdef __GCC__ 602 #define RTE_BE_TO_CPU_16(be_16_v) rte_be_to_cpu_16((be_16_v)) 603 #define RTE_CPU_TO_BE_16(cpu_16_v) rte_cpu_to_be_16((cpu_16_v)) 604 #else 605 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 606 #define RTE_BE_TO_CPU_16(be_16_v) (be_16_v) 607 #define RTE_CPU_TO_BE_16(cpu_16_v) (cpu_16_v) 608 #else 609 #define RTE_BE_TO_CPU_16(be_16_v) \ 610 (uint16_t) ((((be_16_v) & 0xFF) << 8) | ((be_16_v) >> 8)) 611 #define RTE_CPU_TO_BE_16(cpu_16_v) \ 612 (uint16_t) ((((cpu_16_v) & 0xFF) << 8) | ((cpu_16_v) >> 8)) 613 #endif 614 #endif /* __GCC__ */ 615 616 #endif /* _TESTPMD_H_ */ 617