xref: /f-stack/dpdk/app/test-pmd/testpmd.c (revision 2d9fd380)
1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606  * Copyright(c) 2010-2017 Intel Corporation
3a9643ea8Slogwang  */
4a9643ea8Slogwang 
5a9643ea8Slogwang #include <stdarg.h>
6a9643ea8Slogwang #include <stdio.h>
7a9643ea8Slogwang #include <stdlib.h>
8a9643ea8Slogwang #include <signal.h>
9a9643ea8Slogwang #include <string.h>
10a9643ea8Slogwang #include <time.h>
11a9643ea8Slogwang #include <fcntl.h>
122bfe3f2eSlogwang #include <sys/mman.h>
13a9643ea8Slogwang #include <sys/types.h>
14a9643ea8Slogwang #include <errno.h>
15d30ea906Sjfb8856606 #include <stdbool.h>
16a9643ea8Slogwang 
17a9643ea8Slogwang #include <sys/queue.h>
18a9643ea8Slogwang #include <sys/stat.h>
19a9643ea8Slogwang 
20a9643ea8Slogwang #include <stdint.h>
21a9643ea8Slogwang #include <unistd.h>
22a9643ea8Slogwang #include <inttypes.h>
23a9643ea8Slogwang 
24a9643ea8Slogwang #include <rte_common.h>
25a9643ea8Slogwang #include <rte_errno.h>
26a9643ea8Slogwang #include <rte_byteorder.h>
27a9643ea8Slogwang #include <rte_log.h>
28a9643ea8Slogwang #include <rte_debug.h>
29a9643ea8Slogwang #include <rte_cycles.h>
30a9643ea8Slogwang #include <rte_memory.h>
31a9643ea8Slogwang #include <rte_memcpy.h>
32a9643ea8Slogwang #include <rte_launch.h>
33a9643ea8Slogwang #include <rte_eal.h>
342bfe3f2eSlogwang #include <rte_alarm.h>
35a9643ea8Slogwang #include <rte_per_lcore.h>
36a9643ea8Slogwang #include <rte_lcore.h>
37a9643ea8Slogwang #include <rte_atomic.h>
38a9643ea8Slogwang #include <rte_branch_prediction.h>
39a9643ea8Slogwang #include <rte_mempool.h>
40a9643ea8Slogwang #include <rte_malloc.h>
41a9643ea8Slogwang #include <rte_mbuf.h>
42d30ea906Sjfb8856606 #include <rte_mbuf_pool_ops.h>
43a9643ea8Slogwang #include <rte_interrupts.h>
44a9643ea8Slogwang #include <rte_pci.h>
45a9643ea8Slogwang #include <rte_ether.h>
46a9643ea8Slogwang #include <rte_ethdev.h>
47a9643ea8Slogwang #include <rte_dev.h>
48a9643ea8Slogwang #include <rte_string_fns.h>
49*2d9fd380Sjfb8856606 #ifdef RTE_NET_IXGBE
502bfe3f2eSlogwang #include <rte_pmd_ixgbe.h>
51a9643ea8Slogwang #endif
52*2d9fd380Sjfb8856606 #ifdef RTE_LIB_PDUMP
53a9643ea8Slogwang #include <rte_pdump.h>
54a9643ea8Slogwang #endif
552bfe3f2eSlogwang #include <rte_flow.h>
562bfe3f2eSlogwang #include <rte_metrics.h>
57*2d9fd380Sjfb8856606 #ifdef RTE_LIB_BITRATESTATS
582bfe3f2eSlogwang #include <rte_bitrate.h>
592bfe3f2eSlogwang #endif
60*2d9fd380Sjfb8856606 #ifdef RTE_LIB_LATENCYSTATS
612bfe3f2eSlogwang #include <rte_latencystats.h>
622bfe3f2eSlogwang #endif
63a9643ea8Slogwang 
64a9643ea8Slogwang #include "testpmd.h"
65a9643ea8Slogwang 
66d30ea906Sjfb8856606 #ifndef MAP_HUGETLB
67d30ea906Sjfb8856606 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68d30ea906Sjfb8856606 #define HUGE_FLAG (0x40000)
69d30ea906Sjfb8856606 #else
70d30ea906Sjfb8856606 #define HUGE_FLAG MAP_HUGETLB
71d30ea906Sjfb8856606 #endif
72d30ea906Sjfb8856606 
73d30ea906Sjfb8856606 #ifndef MAP_HUGE_SHIFT
74d30ea906Sjfb8856606 /* older kernels (or FreeBSD) will not have this define */
75d30ea906Sjfb8856606 #define HUGE_SHIFT (26)
76d30ea906Sjfb8856606 #else
77d30ea906Sjfb8856606 #define HUGE_SHIFT MAP_HUGE_SHIFT
78d30ea906Sjfb8856606 #endif
79d30ea906Sjfb8856606 
80d30ea906Sjfb8856606 #define EXTMEM_HEAP_NAME "extmem"
81*2d9fd380Sjfb8856606 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
82d30ea906Sjfb8856606 
83a9643ea8Slogwang uint16_t verbose_level = 0; /**< Silent by default. */
84d30ea906Sjfb8856606 int testpmd_logtype; /**< Log type for testpmd logs */
85a9643ea8Slogwang 
86*2d9fd380Sjfb8856606 /* use main core for command line ? */
87a9643ea8Slogwang uint8_t interactive = 0;
88a9643ea8Slogwang uint8_t auto_start = 0;
892bfe3f2eSlogwang uint8_t tx_first;
902bfe3f2eSlogwang char cmdline_filename[PATH_MAX] = {0};
91a9643ea8Slogwang 
92a9643ea8Slogwang /*
93a9643ea8Slogwang  * NUMA support configuration.
94a9643ea8Slogwang  * When set, the NUMA support attempts to dispatch the allocation of the
95a9643ea8Slogwang  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96a9643ea8Slogwang  * probed ports among the CPU sockets 0 and 1.
97a9643ea8Slogwang  * Otherwise, all memory is allocated from CPU socket 0.
98a9643ea8Slogwang  */
992bfe3f2eSlogwang uint8_t numa_support = 1; /**< numa enabled by default */
100a9643ea8Slogwang 
101a9643ea8Slogwang /*
102a9643ea8Slogwang  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103a9643ea8Slogwang  * not configured.
104a9643ea8Slogwang  */
105a9643ea8Slogwang uint8_t socket_num = UMA_NO_CONFIG;
106a9643ea8Slogwang 
107a9643ea8Slogwang /*
108d30ea906Sjfb8856606  * Select mempool allocation type:
109d30ea906Sjfb8856606  * - native: use regular DPDK memory
110d30ea906Sjfb8856606  * - anon: use regular DPDK memory to create mempool, but populate using
111d30ea906Sjfb8856606  *         anonymous memory (may not be IOVA-contiguous)
112d30ea906Sjfb8856606  * - xmem: use externally allocated hugepage memory
113a9643ea8Slogwang  */
114d30ea906Sjfb8856606 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115d30ea906Sjfb8856606 
116d30ea906Sjfb8856606 /*
117d30ea906Sjfb8856606  * Store specified sockets on which memory pool to be used by ports
118d30ea906Sjfb8856606  * is allocated.
119d30ea906Sjfb8856606  */
120d30ea906Sjfb8856606 uint8_t port_numa[RTE_MAX_ETHPORTS];
121d30ea906Sjfb8856606 
122d30ea906Sjfb8856606 /*
123d30ea906Sjfb8856606  * Store specified sockets on which RX ring to be used by ports
124d30ea906Sjfb8856606  * is allocated.
125d30ea906Sjfb8856606  */
126d30ea906Sjfb8856606 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
127d30ea906Sjfb8856606 
128d30ea906Sjfb8856606 /*
129d30ea906Sjfb8856606  * Store specified sockets on which TX ring to be used by ports
130d30ea906Sjfb8856606  * is allocated.
131d30ea906Sjfb8856606  */
132d30ea906Sjfb8856606 uint8_t txring_numa[RTE_MAX_ETHPORTS];
133a9643ea8Slogwang 
134a9643ea8Slogwang /*
135a9643ea8Slogwang  * Record the Ethernet address of peer target ports to which packets are
136a9643ea8Slogwang  * forwarded.
1372bfe3f2eSlogwang  * Must be instantiated with the ethernet addresses of peer traffic generator
138a9643ea8Slogwang  * ports.
139a9643ea8Slogwang  */
1404418919fSjohnjiang struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141a9643ea8Slogwang portid_t nb_peer_eth_addrs = 0;
142a9643ea8Slogwang 
143a9643ea8Slogwang /*
144a9643ea8Slogwang  * Probed Target Environment.
145a9643ea8Slogwang  */
146a9643ea8Slogwang struct rte_port *ports;	       /**< For all probed ethernet ports. */
147a9643ea8Slogwang portid_t nb_ports;             /**< Number of probed ethernet ports. */
148a9643ea8Slogwang struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149a9643ea8Slogwang lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150a9643ea8Slogwang 
151d30ea906Sjfb8856606 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
152d30ea906Sjfb8856606 
153a9643ea8Slogwang /*
154a9643ea8Slogwang  * Test Forwarding Configuration.
155a9643ea8Slogwang  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156a9643ea8Slogwang  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157a9643ea8Slogwang  */
158a9643ea8Slogwang lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159a9643ea8Slogwang lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160a9643ea8Slogwang portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161a9643ea8Slogwang portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162a9643ea8Slogwang 
163a9643ea8Slogwang unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164a9643ea8Slogwang portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165a9643ea8Slogwang 
166a9643ea8Slogwang struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167a9643ea8Slogwang streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168a9643ea8Slogwang 
169a9643ea8Slogwang /*
170a9643ea8Slogwang  * Forwarding engines.
171a9643ea8Slogwang  */
172a9643ea8Slogwang struct fwd_engine * fwd_engines[] = {
173a9643ea8Slogwang 	&io_fwd_engine,
174a9643ea8Slogwang 	&mac_fwd_engine,
175a9643ea8Slogwang 	&mac_swap_engine,
176a9643ea8Slogwang 	&flow_gen_engine,
177a9643ea8Slogwang 	&rx_only_engine,
178a9643ea8Slogwang 	&tx_only_engine,
179a9643ea8Slogwang 	&csum_fwd_engine,
180a9643ea8Slogwang 	&icmp_echo_engine,
181d30ea906Sjfb8856606 	&noisy_vnf_engine,
182*2d9fd380Sjfb8856606 	&five_tuple_swap_fwd_engine,
183a9643ea8Slogwang #ifdef RTE_LIBRTE_IEEE1588
184a9643ea8Slogwang 	&ieee1588_fwd_engine,
185a9643ea8Slogwang #endif
186a9643ea8Slogwang 	NULL,
187a9643ea8Slogwang };
188a9643ea8Slogwang 
189*2d9fd380Sjfb8856606 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
1904418919fSjohnjiang uint16_t mempool_flags;
1911646932aSjfb8856606 
192a9643ea8Slogwang struct fwd_config cur_fwd_config;
193a9643ea8Slogwang struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
194a9643ea8Slogwang uint32_t retry_enabled;
195a9643ea8Slogwang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
196a9643ea8Slogwang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
197a9643ea8Slogwang 
198*2d9fd380Sjfb8856606 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
199*2d9fd380Sjfb8856606 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
200*2d9fd380Sjfb8856606 	DEFAULT_MBUF_DATA_SIZE
201*2d9fd380Sjfb8856606 }; /**< Mbuf data space size. */
202a9643ea8Slogwang uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
203a9643ea8Slogwang                                       * specified on command-line. */
2042bfe3f2eSlogwang uint16_t stats_period; /**< Period to show statistics (disabled by default) */
2052bfe3f2eSlogwang 
2062bfe3f2eSlogwang /*
2072bfe3f2eSlogwang  * In container, it cannot terminate the process which running with 'stats-period'
2082bfe3f2eSlogwang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
2092bfe3f2eSlogwang  */
2102bfe3f2eSlogwang uint8_t f_quit;
211a9643ea8Slogwang 
212a9643ea8Slogwang /*
213*2d9fd380Sjfb8856606  * Configuration of packet segments used to scatter received packets
214*2d9fd380Sjfb8856606  * if some of split features is configured.
215*2d9fd380Sjfb8856606  */
216*2d9fd380Sjfb8856606 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
217*2d9fd380Sjfb8856606 uint8_t  rx_pkt_nb_segs; /**< Number of segments to split */
218*2d9fd380Sjfb8856606 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
219*2d9fd380Sjfb8856606 uint8_t  rx_pkt_nb_offs; /**< Number of specified offsets */
220*2d9fd380Sjfb8856606 
221*2d9fd380Sjfb8856606 /*
222a9643ea8Slogwang  * Configuration of packet segments used by the "txonly" processing engine.
223a9643ea8Slogwang  */
224a9643ea8Slogwang uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
225a9643ea8Slogwang uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
226a9643ea8Slogwang 	TXONLY_DEF_PACKET_LEN,
227a9643ea8Slogwang };
228a9643ea8Slogwang uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
229a9643ea8Slogwang 
230a9643ea8Slogwang enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
231a9643ea8Slogwang /**< Split policy for packets to TX. */
232a9643ea8Slogwang 
2334418919fSjohnjiang uint8_t txonly_multi_flow;
2344418919fSjohnjiang /**< Whether multiple flows are generated in TXONLY mode. */
2354418919fSjohnjiang 
236*2d9fd380Sjfb8856606 uint32_t tx_pkt_times_inter;
237*2d9fd380Sjfb8856606 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
238*2d9fd380Sjfb8856606 
239*2d9fd380Sjfb8856606 uint32_t tx_pkt_times_intra;
240*2d9fd380Sjfb8856606 /**< Timings for send scheduling in TXONLY mode, time between packets. */
241*2d9fd380Sjfb8856606 
242a9643ea8Slogwang uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
243a9643ea8Slogwang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
244a9643ea8Slogwang 
245a9643ea8Slogwang /* current configuration is in DCB or not,0 means it is not in DCB mode */
246a9643ea8Slogwang uint8_t dcb_config = 0;
247a9643ea8Slogwang 
248a9643ea8Slogwang /* Whether the dcb is in testing status */
249a9643ea8Slogwang uint8_t dcb_test = 0;
250a9643ea8Slogwang 
251a9643ea8Slogwang /*
252a9643ea8Slogwang  * Configurable number of RX/TX queues.
253a9643ea8Slogwang  */
2544418919fSjohnjiang queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
255a9643ea8Slogwang queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
256a9643ea8Slogwang queueid_t nb_txq = 1; /**< Number of TX queues per port. */
257a9643ea8Slogwang 
258a9643ea8Slogwang /*
259a9643ea8Slogwang  * Configurable number of RX/TX ring descriptors.
260d30ea906Sjfb8856606  * Defaults are supplied by drivers via ethdev.
261a9643ea8Slogwang  */
262d30ea906Sjfb8856606 #define RTE_TEST_RX_DESC_DEFAULT 0
263d30ea906Sjfb8856606 #define RTE_TEST_TX_DESC_DEFAULT 0
264a9643ea8Slogwang uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
265a9643ea8Slogwang uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
266a9643ea8Slogwang 
267a9643ea8Slogwang #define RTE_PMD_PARAM_UNSET -1
268a9643ea8Slogwang /*
269a9643ea8Slogwang  * Configurable values of RX and TX ring threshold registers.
270a9643ea8Slogwang  */
271a9643ea8Slogwang 
272a9643ea8Slogwang int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
273a9643ea8Slogwang int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
274a9643ea8Slogwang int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
275a9643ea8Slogwang 
276a9643ea8Slogwang int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
277a9643ea8Slogwang int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
278a9643ea8Slogwang int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
279a9643ea8Slogwang 
280a9643ea8Slogwang /*
281a9643ea8Slogwang  * Configurable value of RX free threshold.
282a9643ea8Slogwang  */
283a9643ea8Slogwang int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
284a9643ea8Slogwang 
285a9643ea8Slogwang /*
286a9643ea8Slogwang  * Configurable value of RX drop enable.
287a9643ea8Slogwang  */
288a9643ea8Slogwang int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
289a9643ea8Slogwang 
290a9643ea8Slogwang /*
291a9643ea8Slogwang  * Configurable value of TX free threshold.
292a9643ea8Slogwang  */
293a9643ea8Slogwang int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
294a9643ea8Slogwang 
295a9643ea8Slogwang /*
296a9643ea8Slogwang  * Configurable value of TX RS bit threshold.
297a9643ea8Slogwang  */
298a9643ea8Slogwang int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
299a9643ea8Slogwang 
300a9643ea8Slogwang /*
301d30ea906Sjfb8856606  * Configurable value of buffered packets before sending.
302a9643ea8Slogwang  */
303d30ea906Sjfb8856606 uint16_t noisy_tx_sw_bufsz;
304d30ea906Sjfb8856606 
305d30ea906Sjfb8856606 /*
306d30ea906Sjfb8856606  * Configurable value of packet buffer timeout.
307d30ea906Sjfb8856606  */
308d30ea906Sjfb8856606 uint16_t noisy_tx_sw_buf_flush_time;
309d30ea906Sjfb8856606 
310d30ea906Sjfb8856606 /*
311d30ea906Sjfb8856606  * Configurable value for size of VNF internal memory area
312d30ea906Sjfb8856606  * used for simulating noisy neighbour behaviour
313d30ea906Sjfb8856606  */
314d30ea906Sjfb8856606 uint64_t noisy_lkup_mem_sz;
315d30ea906Sjfb8856606 
316d30ea906Sjfb8856606 /*
317d30ea906Sjfb8856606  * Configurable value of number of random writes done in
318d30ea906Sjfb8856606  * VNF simulation memory area.
319d30ea906Sjfb8856606  */
320d30ea906Sjfb8856606 uint64_t noisy_lkup_num_writes;
321d30ea906Sjfb8856606 
322d30ea906Sjfb8856606 /*
323d30ea906Sjfb8856606  * Configurable value of number of random reads done in
324d30ea906Sjfb8856606  * VNF simulation memory area.
325d30ea906Sjfb8856606  */
326d30ea906Sjfb8856606 uint64_t noisy_lkup_num_reads;
327d30ea906Sjfb8856606 
328d30ea906Sjfb8856606 /*
329d30ea906Sjfb8856606  * Configurable value of number of random reads/writes done in
330d30ea906Sjfb8856606  * VNF simulation memory area.
331d30ea906Sjfb8856606  */
332d30ea906Sjfb8856606 uint64_t noisy_lkup_num_reads_writes;
333a9643ea8Slogwang 
334a9643ea8Slogwang /*
335a9643ea8Slogwang  * Receive Side Scaling (RSS) configuration.
336a9643ea8Slogwang  */
337a9643ea8Slogwang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
338a9643ea8Slogwang 
339a9643ea8Slogwang /*
340a9643ea8Slogwang  * Port topology configuration
341a9643ea8Slogwang  */
342a9643ea8Slogwang uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
343a9643ea8Slogwang 
344a9643ea8Slogwang /*
345a9643ea8Slogwang  * Avoids to flush all the RX streams before starts forwarding.
346a9643ea8Slogwang  */
347a9643ea8Slogwang uint8_t no_flush_rx = 0; /* flush by default */
348a9643ea8Slogwang 
349a9643ea8Slogwang /*
3502bfe3f2eSlogwang  * Flow API isolated mode.
3512bfe3f2eSlogwang  */
3522bfe3f2eSlogwang uint8_t flow_isolate_all;
3532bfe3f2eSlogwang 
3542bfe3f2eSlogwang /*
355a9643ea8Slogwang  * Avoids to check link status when starting/stopping a port.
356a9643ea8Slogwang  */
357a9643ea8Slogwang uint8_t no_link_check = 0; /* check by default */
358a9643ea8Slogwang 
359a9643ea8Slogwang /*
3604418919fSjohnjiang  * Don't automatically start all ports in interactive mode.
3614418919fSjohnjiang  */
3624418919fSjohnjiang uint8_t no_device_start = 0;
3634418919fSjohnjiang 
3644418919fSjohnjiang /*
3652bfe3f2eSlogwang  * Enable link status change notification
3662bfe3f2eSlogwang  */
3672bfe3f2eSlogwang uint8_t lsc_interrupt = 1; /* enabled by default */
3682bfe3f2eSlogwang 
3692bfe3f2eSlogwang /*
3702bfe3f2eSlogwang  * Enable device removal notification.
3712bfe3f2eSlogwang  */
3722bfe3f2eSlogwang uint8_t rmv_interrupt = 1; /* enabled by default */
3732bfe3f2eSlogwang 
374d30ea906Sjfb8856606 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
375d30ea906Sjfb8856606 
376d30ea906Sjfb8856606 /* After attach, port setup is called on event or by iterator */
377d30ea906Sjfb8856606 bool setup_on_probe_event = true;
378d30ea906Sjfb8856606 
3794418919fSjohnjiang /* Clear ptypes on port initialization. */
3804418919fSjohnjiang uint8_t clear_ptypes = true;
3814418919fSjohnjiang 
382*2d9fd380Sjfb8856606 /* Hairpin ports configuration mode. */
383*2d9fd380Sjfb8856606 uint16_t hairpin_mode;
384*2d9fd380Sjfb8856606 
385d30ea906Sjfb8856606 /* Pretty printing of ethdev events */
386d30ea906Sjfb8856606 static const char * const eth_event_desc[] = {
387d30ea906Sjfb8856606 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
388d30ea906Sjfb8856606 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
389d30ea906Sjfb8856606 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
390d30ea906Sjfb8856606 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
391d30ea906Sjfb8856606 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
392d30ea906Sjfb8856606 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
393d30ea906Sjfb8856606 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
394d30ea906Sjfb8856606 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
395d30ea906Sjfb8856606 	[RTE_ETH_EVENT_NEW] = "device probed",
396d30ea906Sjfb8856606 	[RTE_ETH_EVENT_DESTROY] = "device released",
397*2d9fd380Sjfb8856606 	[RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
398d30ea906Sjfb8856606 	[RTE_ETH_EVENT_MAX] = NULL,
399d30ea906Sjfb8856606 };
400d30ea906Sjfb8856606 
4012bfe3f2eSlogwang /*
4022bfe3f2eSlogwang  * Display or mask ether events
4032bfe3f2eSlogwang  * Default to all events except VF_MBOX
4042bfe3f2eSlogwang  */
4052bfe3f2eSlogwang uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
4062bfe3f2eSlogwang 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
4072bfe3f2eSlogwang 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
4082bfe3f2eSlogwang 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
409d30ea906Sjfb8856606 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
4102bfe3f2eSlogwang 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
411*2d9fd380Sjfb8856606 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
412*2d9fd380Sjfb8856606 			    (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
413d30ea906Sjfb8856606 /*
414d30ea906Sjfb8856606  * Decide if all memory are locked for performance.
415d30ea906Sjfb8856606  */
416d30ea906Sjfb8856606 int do_mlockall = 0;
4172bfe3f2eSlogwang 
4182bfe3f2eSlogwang /*
419a9643ea8Slogwang  * NIC bypass mode configuration options.
420a9643ea8Slogwang  */
421a9643ea8Slogwang 
422*2d9fd380Sjfb8856606 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
423a9643ea8Slogwang /* The NIC bypass watchdog timeout. */
4242bfe3f2eSlogwang uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
4252bfe3f2eSlogwang #endif
4262bfe3f2eSlogwang 
4272bfe3f2eSlogwang 
428*2d9fd380Sjfb8856606 #ifdef RTE_LIB_LATENCYSTATS
4292bfe3f2eSlogwang 
4302bfe3f2eSlogwang /*
4312bfe3f2eSlogwang  * Set when latency stats is enabled in the commandline
4322bfe3f2eSlogwang  */
4332bfe3f2eSlogwang uint8_t latencystats_enabled;
4342bfe3f2eSlogwang 
4352bfe3f2eSlogwang /*
4362bfe3f2eSlogwang  * Lcore ID to serive latency statistics.
4372bfe3f2eSlogwang  */
4382bfe3f2eSlogwang lcoreid_t latencystats_lcore_id = -1;
439a9643ea8Slogwang 
440a9643ea8Slogwang #endif
441a9643ea8Slogwang 
442a9643ea8Slogwang /*
443a9643ea8Slogwang  * Ethernet device configuration.
444a9643ea8Slogwang  */
445a9643ea8Slogwang struct rte_eth_rxmode rx_mode = {
4464418919fSjohnjiang 	.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
4474418919fSjohnjiang 		/**< Default maximum frame length. */
448d30ea906Sjfb8856606 };
449d30ea906Sjfb8856606 
450d30ea906Sjfb8856606 struct rte_eth_txmode tx_mode = {
451d30ea906Sjfb8856606 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
452a9643ea8Slogwang };
453a9643ea8Slogwang 
454a9643ea8Slogwang struct rte_fdir_conf fdir_conf = {
455a9643ea8Slogwang 	.mode = RTE_FDIR_MODE_NONE,
456a9643ea8Slogwang 	.pballoc = RTE_FDIR_PBALLOC_64K,
457a9643ea8Slogwang 	.status = RTE_FDIR_REPORT_STATUS,
458a9643ea8Slogwang 	.mask = {
459579bf1e2Sjfb8856606 		.vlan_tci_mask = 0xFFEF,
460a9643ea8Slogwang 		.ipv4_mask     = {
461a9643ea8Slogwang 			.src_ip = 0xFFFFFFFF,
462a9643ea8Slogwang 			.dst_ip = 0xFFFFFFFF,
463a9643ea8Slogwang 		},
464a9643ea8Slogwang 		.ipv6_mask     = {
465a9643ea8Slogwang 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
466a9643ea8Slogwang 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
467a9643ea8Slogwang 		},
468a9643ea8Slogwang 		.src_port_mask = 0xFFFF,
469a9643ea8Slogwang 		.dst_port_mask = 0xFFFF,
470a9643ea8Slogwang 		.mac_addr_byte_mask = 0xFF,
471a9643ea8Slogwang 		.tunnel_type_mask = 1,
472a9643ea8Slogwang 		.tunnel_id_mask = 0xFFFFFFFF,
473a9643ea8Slogwang 	},
474a9643ea8Slogwang 	.drop_queue = 127,
475a9643ea8Slogwang };
476a9643ea8Slogwang 
477a9643ea8Slogwang volatile int test_done = 1; /* stop packet forwarding when set to 1. */
478a9643ea8Slogwang 
479a9643ea8Slogwang struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
480a9643ea8Slogwang struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
481a9643ea8Slogwang 
482a9643ea8Slogwang struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
483a9643ea8Slogwang struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
484a9643ea8Slogwang 
485a9643ea8Slogwang uint16_t nb_tx_queue_stats_mappings = 0;
486a9643ea8Slogwang uint16_t nb_rx_queue_stats_mappings = 0;
487a9643ea8Slogwang 
4882bfe3f2eSlogwang /*
4892bfe3f2eSlogwang  * Display zero values by default for xstats
4902bfe3f2eSlogwang  */
4912bfe3f2eSlogwang uint8_t xstats_hide_zero;
4922bfe3f2eSlogwang 
493*2d9fd380Sjfb8856606 /*
494*2d9fd380Sjfb8856606  * Measure of CPU cycles disabled by default
495*2d9fd380Sjfb8856606  */
496*2d9fd380Sjfb8856606 uint8_t record_core_cycles;
497*2d9fd380Sjfb8856606 
498*2d9fd380Sjfb8856606 /*
499*2d9fd380Sjfb8856606  * Display of RX and TX bursts disabled by default
500*2d9fd380Sjfb8856606  */
501*2d9fd380Sjfb8856606 uint8_t record_burst_stats;
502*2d9fd380Sjfb8856606 
5032bfe3f2eSlogwang unsigned int num_sockets = 0;
5042bfe3f2eSlogwang unsigned int socket_ids[RTE_MAX_NUMA_NODES];
5052bfe3f2eSlogwang 
506*2d9fd380Sjfb8856606 #ifdef RTE_LIB_BITRATESTATS
5072bfe3f2eSlogwang /* Bitrate statistics */
5082bfe3f2eSlogwang struct rte_stats_bitrates *bitrate_data;
5092bfe3f2eSlogwang lcoreid_t bitrate_lcore_id;
5102bfe3f2eSlogwang uint8_t bitrate_enabled;
5112bfe3f2eSlogwang #endif
5122bfe3f2eSlogwang 
5132bfe3f2eSlogwang struct gro_status gro_ports[RTE_MAX_ETHPORTS];
5142bfe3f2eSlogwang uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
515a9643ea8Slogwang 
516*2d9fd380Sjfb8856606 /*
517*2d9fd380Sjfb8856606  * hexadecimal bitmask of RX mq mode can be enabled.
518*2d9fd380Sjfb8856606  */
519*2d9fd380Sjfb8856606 enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
520*2d9fd380Sjfb8856606 
521a9643ea8Slogwang /* Forward function declarations */
522d30ea906Sjfb8856606 static void setup_attached_port(portid_t pi);
5232bfe3f2eSlogwang static void map_port_queue_stats_mapping_registers(portid_t pi,
5242bfe3f2eSlogwang 						   struct rte_port *port);
525a9643ea8Slogwang static void check_all_ports_link_status(uint32_t port_mask);
5262bfe3f2eSlogwang static int eth_event_callback(portid_t port_id,
5272bfe3f2eSlogwang 			      enum rte_eth_event_type type,
5282bfe3f2eSlogwang 			      void *param, void *ret_param);
529d30ea906Sjfb8856606 static void dev_event_callback(const char *device_name,
530d30ea906Sjfb8856606 				enum rte_dev_event_type type,
531d30ea906Sjfb8856606 				void *param);
532a9643ea8Slogwang 
533a9643ea8Slogwang /*
534a9643ea8Slogwang  * Check if all the ports are started.
535a9643ea8Slogwang  * If yes, return positive value. If not, return zero.
536a9643ea8Slogwang  */
537a9643ea8Slogwang static int all_ports_started(void);
538a9643ea8Slogwang 
5392bfe3f2eSlogwang struct gso_status gso_ports[RTE_MAX_ETHPORTS];
5404418919fSjohnjiang uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
541a9643ea8Slogwang 
542*2d9fd380Sjfb8856606 /* Holds the registered mbuf dynamic flags names. */
543*2d9fd380Sjfb8856606 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
544*2d9fd380Sjfb8856606 
5452bfe3f2eSlogwang /*
5462bfe3f2eSlogwang  * Helper function to check if socket is already discovered.
5472bfe3f2eSlogwang  * If yes, return positive value. If not, return zero.
5482bfe3f2eSlogwang  */
5492bfe3f2eSlogwang int
new_socket_id(unsigned int socket_id)5502bfe3f2eSlogwang new_socket_id(unsigned int socket_id)
5512bfe3f2eSlogwang {
5522bfe3f2eSlogwang 	unsigned int i;
5532bfe3f2eSlogwang 
5542bfe3f2eSlogwang 	for (i = 0; i < num_sockets; i++) {
5552bfe3f2eSlogwang 		if (socket_ids[i] == socket_id)
5562bfe3f2eSlogwang 			return 0;
5572bfe3f2eSlogwang 	}
5582bfe3f2eSlogwang 	return 1;
559a9643ea8Slogwang }
560a9643ea8Slogwang 
561a9643ea8Slogwang /*
562a9643ea8Slogwang  * Setup default configuration.
563a9643ea8Slogwang  */
564a9643ea8Slogwang static void
set_default_fwd_lcores_config(void)565a9643ea8Slogwang set_default_fwd_lcores_config(void)
566a9643ea8Slogwang {
567a9643ea8Slogwang 	unsigned int i;
568a9643ea8Slogwang 	unsigned int nb_lc;
569a9643ea8Slogwang 	unsigned int sock_num;
570a9643ea8Slogwang 
571a9643ea8Slogwang 	nb_lc = 0;
572a9643ea8Slogwang 	for (i = 0; i < RTE_MAX_LCORE; i++) {
573d30ea906Sjfb8856606 		if (!rte_lcore_is_enabled(i))
574d30ea906Sjfb8856606 			continue;
5752bfe3f2eSlogwang 		sock_num = rte_lcore_to_socket_id(i);
5762bfe3f2eSlogwang 		if (new_socket_id(sock_num)) {
5772bfe3f2eSlogwang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
5782bfe3f2eSlogwang 				rte_exit(EXIT_FAILURE,
5792bfe3f2eSlogwang 					 "Total sockets greater than %u\n",
5802bfe3f2eSlogwang 					 RTE_MAX_NUMA_NODES);
5812bfe3f2eSlogwang 			}
5822bfe3f2eSlogwang 			socket_ids[num_sockets++] = sock_num;
583a9643ea8Slogwang 		}
584*2d9fd380Sjfb8856606 		if (i == rte_get_main_lcore())
585a9643ea8Slogwang 			continue;
586a9643ea8Slogwang 		fwd_lcores_cpuids[nb_lc++] = i;
587a9643ea8Slogwang 	}
588a9643ea8Slogwang 	nb_lcores = (lcoreid_t) nb_lc;
589a9643ea8Slogwang 	nb_cfg_lcores = nb_lcores;
590a9643ea8Slogwang 	nb_fwd_lcores = 1;
591a9643ea8Slogwang }
592a9643ea8Slogwang 
593a9643ea8Slogwang static void
set_def_peer_eth_addrs(void)594a9643ea8Slogwang set_def_peer_eth_addrs(void)
595a9643ea8Slogwang {
596a9643ea8Slogwang 	portid_t i;
597a9643ea8Slogwang 
598a9643ea8Slogwang 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5994418919fSjohnjiang 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
600a9643ea8Slogwang 		peer_eth_addrs[i].addr_bytes[5] = i;
601a9643ea8Slogwang 	}
602a9643ea8Slogwang }
603a9643ea8Slogwang 
604a9643ea8Slogwang static void
set_default_fwd_ports_config(void)605a9643ea8Slogwang set_default_fwd_ports_config(void)
606a9643ea8Slogwang {
607a9643ea8Slogwang 	portid_t pt_id;
6082bfe3f2eSlogwang 	int i = 0;
609a9643ea8Slogwang 
610d30ea906Sjfb8856606 	RTE_ETH_FOREACH_DEV(pt_id) {
6112bfe3f2eSlogwang 		fwd_ports_ids[i++] = pt_id;
612a9643ea8Slogwang 
613d30ea906Sjfb8856606 		/* Update sockets info according to the attached device */
614d30ea906Sjfb8856606 		int socket_id = rte_eth_dev_socket_id(pt_id);
615d30ea906Sjfb8856606 		if (socket_id >= 0 && new_socket_id(socket_id)) {
616d30ea906Sjfb8856606 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
617d30ea906Sjfb8856606 				rte_exit(EXIT_FAILURE,
618d30ea906Sjfb8856606 					 "Total sockets greater than %u\n",
619d30ea906Sjfb8856606 					 RTE_MAX_NUMA_NODES);
620d30ea906Sjfb8856606 			}
621d30ea906Sjfb8856606 			socket_ids[num_sockets++] = socket_id;
622d30ea906Sjfb8856606 		}
623d30ea906Sjfb8856606 	}
624d30ea906Sjfb8856606 
625a9643ea8Slogwang 	nb_cfg_ports = nb_ports;
626a9643ea8Slogwang 	nb_fwd_ports = nb_ports;
627a9643ea8Slogwang }
628a9643ea8Slogwang 
629a9643ea8Slogwang void
set_def_fwd_config(void)630a9643ea8Slogwang set_def_fwd_config(void)
631a9643ea8Slogwang {
632a9643ea8Slogwang 	set_default_fwd_lcores_config();
633a9643ea8Slogwang 	set_def_peer_eth_addrs();
634a9643ea8Slogwang 	set_default_fwd_ports_config();
635a9643ea8Slogwang }
636a9643ea8Slogwang 
637d30ea906Sjfb8856606 /* extremely pessimistic estimation of memory required to create a mempool */
638d30ea906Sjfb8856606 static int
calc_mem_size(uint32_t nb_mbufs,uint32_t mbuf_sz,size_t pgsz,size_t * out)639d30ea906Sjfb8856606 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
640d30ea906Sjfb8856606 {
641d30ea906Sjfb8856606 	unsigned int n_pages, mbuf_per_pg, leftover;
642d30ea906Sjfb8856606 	uint64_t total_mem, mbuf_mem, obj_sz;
643d30ea906Sjfb8856606 
644d30ea906Sjfb8856606 	/* there is no good way to predict how much space the mempool will
645d30ea906Sjfb8856606 	 * occupy because it will allocate chunks on the fly, and some of those
646d30ea906Sjfb8856606 	 * will come from default DPDK memory while some will come from our
647d30ea906Sjfb8856606 	 * external memory, so just assume 128MB will be enough for everyone.
648d30ea906Sjfb8856606 	 */
649d30ea906Sjfb8856606 	uint64_t hdr_mem = 128 << 20;
650d30ea906Sjfb8856606 
651d30ea906Sjfb8856606 	/* account for possible non-contiguousness */
652d30ea906Sjfb8856606 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
653d30ea906Sjfb8856606 	if (obj_sz > pgsz) {
654d30ea906Sjfb8856606 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
655d30ea906Sjfb8856606 		return -1;
656d30ea906Sjfb8856606 	}
657d30ea906Sjfb8856606 
658d30ea906Sjfb8856606 	mbuf_per_pg = pgsz / obj_sz;
659d30ea906Sjfb8856606 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
660d30ea906Sjfb8856606 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
661d30ea906Sjfb8856606 
662d30ea906Sjfb8856606 	mbuf_mem = n_pages * pgsz;
663d30ea906Sjfb8856606 
664d30ea906Sjfb8856606 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
665d30ea906Sjfb8856606 
666d30ea906Sjfb8856606 	if (total_mem > SIZE_MAX) {
667d30ea906Sjfb8856606 		TESTPMD_LOG(ERR, "Memory size too big\n");
668d30ea906Sjfb8856606 		return -1;
669d30ea906Sjfb8856606 	}
670d30ea906Sjfb8856606 	*out = (size_t)total_mem;
671d30ea906Sjfb8856606 
672d30ea906Sjfb8856606 	return 0;
673d30ea906Sjfb8856606 }
674d30ea906Sjfb8856606 
675d30ea906Sjfb8856606 static int
pagesz_flags(uint64_t page_sz)676d30ea906Sjfb8856606 pagesz_flags(uint64_t page_sz)
677d30ea906Sjfb8856606 {
678d30ea906Sjfb8856606 	/* as per mmap() manpage, all page sizes are log2 of page size
679d30ea906Sjfb8856606 	 * shifted by MAP_HUGE_SHIFT
680d30ea906Sjfb8856606 	 */
6814418919fSjohnjiang 	int log2 = rte_log2_u64(page_sz);
682d30ea906Sjfb8856606 
683d30ea906Sjfb8856606 	return (log2 << HUGE_SHIFT);
684d30ea906Sjfb8856606 }
685d30ea906Sjfb8856606 
686d30ea906Sjfb8856606 static void *
alloc_mem(size_t memsz,size_t pgsz,bool huge)687d30ea906Sjfb8856606 alloc_mem(size_t memsz, size_t pgsz, bool huge)
688d30ea906Sjfb8856606 {
689d30ea906Sjfb8856606 	void *addr;
690d30ea906Sjfb8856606 	int flags;
691d30ea906Sjfb8856606 
692d30ea906Sjfb8856606 	/* allocate anonymous hugepages */
693d30ea906Sjfb8856606 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
694d30ea906Sjfb8856606 	if (huge)
695d30ea906Sjfb8856606 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
696d30ea906Sjfb8856606 
697d30ea906Sjfb8856606 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
698d30ea906Sjfb8856606 	if (addr == MAP_FAILED)
699d30ea906Sjfb8856606 		return NULL;
700d30ea906Sjfb8856606 
701d30ea906Sjfb8856606 	return addr;
702d30ea906Sjfb8856606 }
703d30ea906Sjfb8856606 
704d30ea906Sjfb8856606 struct extmem_param {
705d30ea906Sjfb8856606 	void *addr;
706d30ea906Sjfb8856606 	size_t len;
707d30ea906Sjfb8856606 	size_t pgsz;
708d30ea906Sjfb8856606 	rte_iova_t *iova_table;
709d30ea906Sjfb8856606 	unsigned int iova_table_len;
710d30ea906Sjfb8856606 };
711d30ea906Sjfb8856606 
712d30ea906Sjfb8856606 static int
create_extmem(uint32_t nb_mbufs,uint32_t mbuf_sz,struct extmem_param * param,bool huge)713d30ea906Sjfb8856606 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
714d30ea906Sjfb8856606 		bool huge)
715d30ea906Sjfb8856606 {
716d30ea906Sjfb8856606 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
717d30ea906Sjfb8856606 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
718d30ea906Sjfb8856606 	unsigned int cur_page, n_pages, pgsz_idx;
719d30ea906Sjfb8856606 	size_t mem_sz, cur_pgsz;
720d30ea906Sjfb8856606 	rte_iova_t *iovas = NULL;
721d30ea906Sjfb8856606 	void *addr;
722d30ea906Sjfb8856606 	int ret;
723d30ea906Sjfb8856606 
724d30ea906Sjfb8856606 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
725d30ea906Sjfb8856606 		/* skip anything that is too big */
726d30ea906Sjfb8856606 		if (pgsizes[pgsz_idx] > SIZE_MAX)
727d30ea906Sjfb8856606 			continue;
728d30ea906Sjfb8856606 
729d30ea906Sjfb8856606 		cur_pgsz = pgsizes[pgsz_idx];
730d30ea906Sjfb8856606 
731d30ea906Sjfb8856606 		/* if we were told not to allocate hugepages, override */
732d30ea906Sjfb8856606 		if (!huge)
733d30ea906Sjfb8856606 			cur_pgsz = sysconf(_SC_PAGESIZE);
734d30ea906Sjfb8856606 
735d30ea906Sjfb8856606 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
736d30ea906Sjfb8856606 		if (ret < 0) {
737d30ea906Sjfb8856606 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
738d30ea906Sjfb8856606 			return -1;
739d30ea906Sjfb8856606 		}
740d30ea906Sjfb8856606 
741d30ea906Sjfb8856606 		/* allocate our memory */
742d30ea906Sjfb8856606 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
743d30ea906Sjfb8856606 
744d30ea906Sjfb8856606 		/* if we couldn't allocate memory with a specified page size,
745d30ea906Sjfb8856606 		 * that doesn't mean we can't do it with other page sizes, so
746d30ea906Sjfb8856606 		 * try another one.
747d30ea906Sjfb8856606 		 */
748d30ea906Sjfb8856606 		if (addr == NULL)
749d30ea906Sjfb8856606 			continue;
750d30ea906Sjfb8856606 
751d30ea906Sjfb8856606 		/* store IOVA addresses for every page in this memory area */
752d30ea906Sjfb8856606 		n_pages = mem_sz / cur_pgsz;
753d30ea906Sjfb8856606 
754d30ea906Sjfb8856606 		iovas = malloc(sizeof(*iovas) * n_pages);
755d30ea906Sjfb8856606 
756d30ea906Sjfb8856606 		if (iovas == NULL) {
757d30ea906Sjfb8856606 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
758d30ea906Sjfb8856606 			goto fail;
759d30ea906Sjfb8856606 		}
760d30ea906Sjfb8856606 		/* lock memory if it's not huge pages */
761d30ea906Sjfb8856606 		if (!huge)
762d30ea906Sjfb8856606 			mlock(addr, mem_sz);
763d30ea906Sjfb8856606 
764d30ea906Sjfb8856606 		/* populate IOVA addresses */
765d30ea906Sjfb8856606 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
766d30ea906Sjfb8856606 			rte_iova_t iova;
767d30ea906Sjfb8856606 			size_t offset;
768d30ea906Sjfb8856606 			void *cur;
769d30ea906Sjfb8856606 
770d30ea906Sjfb8856606 			offset = cur_pgsz * cur_page;
771d30ea906Sjfb8856606 			cur = RTE_PTR_ADD(addr, offset);
772d30ea906Sjfb8856606 
773d30ea906Sjfb8856606 			/* touch the page before getting its IOVA */
774d30ea906Sjfb8856606 			*(volatile char *)cur = 0;
775d30ea906Sjfb8856606 
776d30ea906Sjfb8856606 			iova = rte_mem_virt2iova(cur);
777d30ea906Sjfb8856606 
778d30ea906Sjfb8856606 			iovas[cur_page] = iova;
779d30ea906Sjfb8856606 		}
780d30ea906Sjfb8856606 
781d30ea906Sjfb8856606 		break;
782d30ea906Sjfb8856606 	}
783d30ea906Sjfb8856606 	/* if we couldn't allocate anything */
784d30ea906Sjfb8856606 	if (iovas == NULL)
785d30ea906Sjfb8856606 		return -1;
786d30ea906Sjfb8856606 
787d30ea906Sjfb8856606 	param->addr = addr;
788d30ea906Sjfb8856606 	param->len = mem_sz;
789d30ea906Sjfb8856606 	param->pgsz = cur_pgsz;
790d30ea906Sjfb8856606 	param->iova_table = iovas;
791d30ea906Sjfb8856606 	param->iova_table_len = n_pages;
792d30ea906Sjfb8856606 
793d30ea906Sjfb8856606 	return 0;
794d30ea906Sjfb8856606 fail:
795d30ea906Sjfb8856606 	if (iovas)
796d30ea906Sjfb8856606 		free(iovas);
797d30ea906Sjfb8856606 	if (addr)
798d30ea906Sjfb8856606 		munmap(addr, mem_sz);
799d30ea906Sjfb8856606 
800d30ea906Sjfb8856606 	return -1;
801d30ea906Sjfb8856606 }
802d30ea906Sjfb8856606 
803d30ea906Sjfb8856606 static int
setup_extmem(uint32_t nb_mbufs,uint32_t mbuf_sz,bool huge)804d30ea906Sjfb8856606 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
805d30ea906Sjfb8856606 {
806d30ea906Sjfb8856606 	struct extmem_param param;
807d30ea906Sjfb8856606 	int socket_id, ret;
808d30ea906Sjfb8856606 
809d30ea906Sjfb8856606 	memset(&param, 0, sizeof(param));
810d30ea906Sjfb8856606 
811d30ea906Sjfb8856606 	/* check if our heap exists */
812d30ea906Sjfb8856606 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
813d30ea906Sjfb8856606 	if (socket_id < 0) {
814d30ea906Sjfb8856606 		/* create our heap */
815d30ea906Sjfb8856606 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
816d30ea906Sjfb8856606 		if (ret < 0) {
817d30ea906Sjfb8856606 			TESTPMD_LOG(ERR, "Cannot create heap\n");
818d30ea906Sjfb8856606 			return -1;
819d30ea906Sjfb8856606 		}
820d30ea906Sjfb8856606 	}
821d30ea906Sjfb8856606 
822d30ea906Sjfb8856606 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
823d30ea906Sjfb8856606 	if (ret < 0) {
824d30ea906Sjfb8856606 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
825d30ea906Sjfb8856606 		return -1;
826d30ea906Sjfb8856606 	}
827d30ea906Sjfb8856606 
828d30ea906Sjfb8856606 	/* we now have a valid memory area, so add it to heap */
829d30ea906Sjfb8856606 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
830d30ea906Sjfb8856606 			param.addr, param.len, param.iova_table,
831d30ea906Sjfb8856606 			param.iova_table_len, param.pgsz);
832d30ea906Sjfb8856606 
833d30ea906Sjfb8856606 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
834d30ea906Sjfb8856606 
835d30ea906Sjfb8856606 	/* not needed any more */
836d30ea906Sjfb8856606 	free(param.iova_table);
837d30ea906Sjfb8856606 
838d30ea906Sjfb8856606 	if (ret < 0) {
839d30ea906Sjfb8856606 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
840d30ea906Sjfb8856606 		munmap(param.addr, param.len);
841d30ea906Sjfb8856606 		return -1;
842d30ea906Sjfb8856606 	}
843d30ea906Sjfb8856606 
844d30ea906Sjfb8856606 	/* success */
845d30ea906Sjfb8856606 
846d30ea906Sjfb8856606 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
847d30ea906Sjfb8856606 			param.len >> 20);
848d30ea906Sjfb8856606 
849d30ea906Sjfb8856606 	return 0;
850d30ea906Sjfb8856606 }
8514418919fSjohnjiang static void
dma_unmap_cb(struct rte_mempool * mp __rte_unused,void * opaque __rte_unused,struct rte_mempool_memhdr * memhdr,unsigned mem_idx __rte_unused)8524418919fSjohnjiang dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8534418919fSjohnjiang 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8544418919fSjohnjiang {
8554418919fSjohnjiang 	uint16_t pid = 0;
8564418919fSjohnjiang 	int ret;
8574418919fSjohnjiang 
8584418919fSjohnjiang 	RTE_ETH_FOREACH_DEV(pid) {
8594418919fSjohnjiang 		struct rte_eth_dev *dev =
8604418919fSjohnjiang 			&rte_eth_devices[pid];
8614418919fSjohnjiang 
8624418919fSjohnjiang 		ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
8634418919fSjohnjiang 					memhdr->len);
8644418919fSjohnjiang 		if (ret) {
8654418919fSjohnjiang 			TESTPMD_LOG(DEBUG,
8664418919fSjohnjiang 				    "unable to DMA unmap addr 0x%p "
8674418919fSjohnjiang 				    "for device %s\n",
8684418919fSjohnjiang 				    memhdr->addr, dev->data->name);
8694418919fSjohnjiang 		}
8704418919fSjohnjiang 	}
8714418919fSjohnjiang 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
8724418919fSjohnjiang 	if (ret) {
8734418919fSjohnjiang 		TESTPMD_LOG(DEBUG,
8744418919fSjohnjiang 			    "unable to un-register addr 0x%p\n", memhdr->addr);
8754418919fSjohnjiang 	}
8764418919fSjohnjiang }
8774418919fSjohnjiang 
8784418919fSjohnjiang static void
dma_map_cb(struct rte_mempool * mp __rte_unused,void * opaque __rte_unused,struct rte_mempool_memhdr * memhdr,unsigned mem_idx __rte_unused)8794418919fSjohnjiang dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8804418919fSjohnjiang 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8814418919fSjohnjiang {
8824418919fSjohnjiang 	uint16_t pid = 0;
8834418919fSjohnjiang 	size_t page_size = sysconf(_SC_PAGESIZE);
8844418919fSjohnjiang 	int ret;
8854418919fSjohnjiang 
8864418919fSjohnjiang 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
8874418919fSjohnjiang 				  page_size);
8884418919fSjohnjiang 	if (ret) {
8894418919fSjohnjiang 		TESTPMD_LOG(DEBUG,
8904418919fSjohnjiang 			    "unable to register addr 0x%p\n", memhdr->addr);
8914418919fSjohnjiang 		return;
8924418919fSjohnjiang 	}
8934418919fSjohnjiang 	RTE_ETH_FOREACH_DEV(pid) {
8944418919fSjohnjiang 		struct rte_eth_dev *dev =
8954418919fSjohnjiang 			&rte_eth_devices[pid];
8964418919fSjohnjiang 
8974418919fSjohnjiang 		ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
8984418919fSjohnjiang 				      memhdr->len);
8994418919fSjohnjiang 		if (ret) {
9004418919fSjohnjiang 			TESTPMD_LOG(DEBUG,
9014418919fSjohnjiang 				    "unable to DMA map addr 0x%p "
9024418919fSjohnjiang 				    "for device %s\n",
9034418919fSjohnjiang 				    memhdr->addr, dev->data->name);
9044418919fSjohnjiang 		}
9054418919fSjohnjiang 	}
9064418919fSjohnjiang }
907d30ea906Sjfb8856606 
908*2d9fd380Sjfb8856606 static unsigned int
setup_extbuf(uint32_t nb_mbufs,uint16_t mbuf_sz,unsigned int socket_id,char * pool_name,struct rte_pktmbuf_extmem ** ext_mem)909*2d9fd380Sjfb8856606 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
910*2d9fd380Sjfb8856606 	    char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
911*2d9fd380Sjfb8856606 {
912*2d9fd380Sjfb8856606 	struct rte_pktmbuf_extmem *xmem;
913*2d9fd380Sjfb8856606 	unsigned int ext_num, zone_num, elt_num;
914*2d9fd380Sjfb8856606 	uint16_t elt_size;
915*2d9fd380Sjfb8856606 
916*2d9fd380Sjfb8856606 	elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
917*2d9fd380Sjfb8856606 	elt_num = EXTBUF_ZONE_SIZE / elt_size;
918*2d9fd380Sjfb8856606 	zone_num = (nb_mbufs + elt_num - 1) / elt_num;
919*2d9fd380Sjfb8856606 
920*2d9fd380Sjfb8856606 	xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
921*2d9fd380Sjfb8856606 	if (xmem == NULL) {
922*2d9fd380Sjfb8856606 		TESTPMD_LOG(ERR, "Cannot allocate memory for "
923*2d9fd380Sjfb8856606 				 "external buffer descriptors\n");
924*2d9fd380Sjfb8856606 		*ext_mem = NULL;
925*2d9fd380Sjfb8856606 		return 0;
926*2d9fd380Sjfb8856606 	}
927*2d9fd380Sjfb8856606 	for (ext_num = 0; ext_num < zone_num; ext_num++) {
928*2d9fd380Sjfb8856606 		struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
929*2d9fd380Sjfb8856606 		const struct rte_memzone *mz;
930*2d9fd380Sjfb8856606 		char mz_name[RTE_MEMZONE_NAMESIZE];
931*2d9fd380Sjfb8856606 		int ret;
932*2d9fd380Sjfb8856606 
933*2d9fd380Sjfb8856606 		ret = snprintf(mz_name, sizeof(mz_name),
934*2d9fd380Sjfb8856606 			RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
935*2d9fd380Sjfb8856606 		if (ret < 0 || ret >= (int)sizeof(mz_name)) {
936*2d9fd380Sjfb8856606 			errno = ENAMETOOLONG;
937*2d9fd380Sjfb8856606 			ext_num = 0;
938*2d9fd380Sjfb8856606 			break;
939*2d9fd380Sjfb8856606 		}
940*2d9fd380Sjfb8856606 		mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
941*2d9fd380Sjfb8856606 						 socket_id,
942*2d9fd380Sjfb8856606 						 RTE_MEMZONE_IOVA_CONTIG |
943*2d9fd380Sjfb8856606 						 RTE_MEMZONE_1GB |
944*2d9fd380Sjfb8856606 						 RTE_MEMZONE_SIZE_HINT_ONLY,
945*2d9fd380Sjfb8856606 						 EXTBUF_ZONE_SIZE);
946*2d9fd380Sjfb8856606 		if (mz == NULL) {
947*2d9fd380Sjfb8856606 			/*
948*2d9fd380Sjfb8856606 			 * The caller exits on external buffer creation
949*2d9fd380Sjfb8856606 			 * error, so there is no need to free memzones.
950*2d9fd380Sjfb8856606 			 */
951*2d9fd380Sjfb8856606 			errno = ENOMEM;
952*2d9fd380Sjfb8856606 			ext_num = 0;
953*2d9fd380Sjfb8856606 			break;
954*2d9fd380Sjfb8856606 		}
955*2d9fd380Sjfb8856606 		xseg->buf_ptr = mz->addr;
956*2d9fd380Sjfb8856606 		xseg->buf_iova = mz->iova;
957*2d9fd380Sjfb8856606 		xseg->buf_len = EXTBUF_ZONE_SIZE;
958*2d9fd380Sjfb8856606 		xseg->elt_size = elt_size;
959*2d9fd380Sjfb8856606 	}
960*2d9fd380Sjfb8856606 	if (ext_num == 0 && xmem != NULL) {
961*2d9fd380Sjfb8856606 		free(xmem);
962*2d9fd380Sjfb8856606 		xmem = NULL;
963*2d9fd380Sjfb8856606 	}
964*2d9fd380Sjfb8856606 	*ext_mem = xmem;
965*2d9fd380Sjfb8856606 	return ext_num;
966*2d9fd380Sjfb8856606 }
967*2d9fd380Sjfb8856606 
968a9643ea8Slogwang /*
969a9643ea8Slogwang  * Configuration initialisation done once at init time.
970a9643ea8Slogwang  */
9711646932aSjfb8856606 static struct rte_mempool *
mbuf_pool_create(uint16_t mbuf_seg_size,unsigned nb_mbuf,unsigned int socket_id,uint16_t size_idx)972a9643ea8Slogwang mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
973*2d9fd380Sjfb8856606 		 unsigned int socket_id, uint16_t size_idx)
974a9643ea8Slogwang {
975a9643ea8Slogwang 	char pool_name[RTE_MEMPOOL_NAMESIZE];
976a9643ea8Slogwang 	struct rte_mempool *rte_mp = NULL;
977a9643ea8Slogwang 	uint32_t mb_size;
978a9643ea8Slogwang 
979a9643ea8Slogwang 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
980*2d9fd380Sjfb8856606 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
981a9643ea8Slogwang 
982d30ea906Sjfb8856606 	TESTPMD_LOG(INFO,
983a9643ea8Slogwang 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
984a9643ea8Slogwang 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
985a9643ea8Slogwang 
986d30ea906Sjfb8856606 	switch (mp_alloc_type) {
987d30ea906Sjfb8856606 	case MP_ALLOC_NATIVE:
988d30ea906Sjfb8856606 		{
989d30ea906Sjfb8856606 			/* wrapper to rte_mempool_create() */
990d30ea906Sjfb8856606 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
991d30ea906Sjfb8856606 					rte_mbuf_best_mempool_ops());
992d30ea906Sjfb8856606 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
993d30ea906Sjfb8856606 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
994d30ea906Sjfb8856606 			break;
995d30ea906Sjfb8856606 		}
996d30ea906Sjfb8856606 	case MP_ALLOC_ANON:
997d30ea906Sjfb8856606 		{
998a9643ea8Slogwang 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
999d30ea906Sjfb8856606 				mb_size, (unsigned int) mb_mempool_cache,
1000a9643ea8Slogwang 				sizeof(struct rte_pktmbuf_pool_private),
10014418919fSjohnjiang 				socket_id, mempool_flags);
1002a9643ea8Slogwang 			if (rte_mp == NULL)
1003a9643ea8Slogwang 				goto err;
1004a9643ea8Slogwang 
1005a9643ea8Slogwang 			if (rte_mempool_populate_anon(rte_mp) == 0) {
1006a9643ea8Slogwang 				rte_mempool_free(rte_mp);
1007a9643ea8Slogwang 				rte_mp = NULL;
1008a9643ea8Slogwang 				goto err;
1009a9643ea8Slogwang 			}
1010a9643ea8Slogwang 			rte_pktmbuf_pool_init(rte_mp, NULL);
1011a9643ea8Slogwang 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
10124418919fSjohnjiang 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1013d30ea906Sjfb8856606 			break;
1014d30ea906Sjfb8856606 		}
1015d30ea906Sjfb8856606 	case MP_ALLOC_XMEM:
1016d30ea906Sjfb8856606 	case MP_ALLOC_XMEM_HUGE:
1017d30ea906Sjfb8856606 		{
1018d30ea906Sjfb8856606 			int heap_socket;
1019d30ea906Sjfb8856606 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1020d30ea906Sjfb8856606 
1021d30ea906Sjfb8856606 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1022d30ea906Sjfb8856606 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1023d30ea906Sjfb8856606 
1024d30ea906Sjfb8856606 			heap_socket =
1025d30ea906Sjfb8856606 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1026d30ea906Sjfb8856606 			if (heap_socket < 0)
1027d30ea906Sjfb8856606 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1028d30ea906Sjfb8856606 
1029d30ea906Sjfb8856606 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1030d30ea906Sjfb8856606 					rte_mbuf_best_mempool_ops());
1031a9643ea8Slogwang 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1032d30ea906Sjfb8856606 					mb_mempool_cache, 0, mbuf_seg_size,
1033d30ea906Sjfb8856606 					heap_socket);
1034d30ea906Sjfb8856606 			break;
1035d30ea906Sjfb8856606 		}
1036*2d9fd380Sjfb8856606 	case MP_ALLOC_XBUF:
1037*2d9fd380Sjfb8856606 		{
1038*2d9fd380Sjfb8856606 			struct rte_pktmbuf_extmem *ext_mem;
1039*2d9fd380Sjfb8856606 			unsigned int ext_num;
1040*2d9fd380Sjfb8856606 
1041*2d9fd380Sjfb8856606 			ext_num = setup_extbuf(nb_mbuf,	mbuf_seg_size,
1042*2d9fd380Sjfb8856606 					       socket_id, pool_name, &ext_mem);
1043*2d9fd380Sjfb8856606 			if (ext_num == 0)
1044*2d9fd380Sjfb8856606 				rte_exit(EXIT_FAILURE,
1045*2d9fd380Sjfb8856606 					 "Can't create pinned data buffers\n");
1046*2d9fd380Sjfb8856606 
1047*2d9fd380Sjfb8856606 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1048*2d9fd380Sjfb8856606 					rte_mbuf_best_mempool_ops());
1049*2d9fd380Sjfb8856606 			rte_mp = rte_pktmbuf_pool_create_extbuf
1050*2d9fd380Sjfb8856606 					(pool_name, nb_mbuf, mb_mempool_cache,
1051*2d9fd380Sjfb8856606 					 0, mbuf_seg_size, socket_id,
1052*2d9fd380Sjfb8856606 					 ext_mem, ext_num);
1053*2d9fd380Sjfb8856606 			free(ext_mem);
1054*2d9fd380Sjfb8856606 			break;
1055*2d9fd380Sjfb8856606 		}
1056d30ea906Sjfb8856606 	default:
1057d30ea906Sjfb8856606 		{
1058d30ea906Sjfb8856606 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1059d30ea906Sjfb8856606 		}
1060a9643ea8Slogwang 	}
1061a9643ea8Slogwang 
1062a9643ea8Slogwang err:
1063a9643ea8Slogwang 	if (rte_mp == NULL) {
1064a9643ea8Slogwang 		rte_exit(EXIT_FAILURE,
1065a9643ea8Slogwang 			"Creation of mbuf pool for socket %u failed: %s\n",
1066a9643ea8Slogwang 			socket_id, rte_strerror(rte_errno));
1067a9643ea8Slogwang 	} else if (verbose_level > 0) {
1068a9643ea8Slogwang 		rte_mempool_dump(stdout, rte_mp);
1069a9643ea8Slogwang 	}
10701646932aSjfb8856606 	return rte_mp;
1071a9643ea8Slogwang }
1072a9643ea8Slogwang 
1073a9643ea8Slogwang /*
1074a9643ea8Slogwang  * Check given socket id is valid or not with NUMA mode,
1075a9643ea8Slogwang  * if valid, return 0, else return -1
1076a9643ea8Slogwang  */
1077a9643ea8Slogwang static int
check_socket_id(const unsigned int socket_id)1078a9643ea8Slogwang check_socket_id(const unsigned int socket_id)
1079a9643ea8Slogwang {
1080a9643ea8Slogwang 	static int warning_once = 0;
1081a9643ea8Slogwang 
10822bfe3f2eSlogwang 	if (new_socket_id(socket_id)) {
1083a9643ea8Slogwang 		if (!warning_once && numa_support)
1084a9643ea8Slogwang 			printf("Warning: NUMA should be configured manually by"
1085a9643ea8Slogwang 			       " using --port-numa-config and"
1086a9643ea8Slogwang 			       " --ring-numa-config parameters along with"
1087a9643ea8Slogwang 			       " --numa.\n");
1088a9643ea8Slogwang 		warning_once = 1;
1089a9643ea8Slogwang 		return -1;
1090a9643ea8Slogwang 	}
1091a9643ea8Slogwang 	return 0;
1092a9643ea8Slogwang }
1093a9643ea8Slogwang 
10942bfe3f2eSlogwang /*
10952bfe3f2eSlogwang  * Get the allowed maximum number of RX queues.
10962bfe3f2eSlogwang  * *pid return the port id which has minimal value of
10972bfe3f2eSlogwang  * max_rx_queues in all ports.
10982bfe3f2eSlogwang  */
10992bfe3f2eSlogwang queueid_t
get_allowed_max_nb_rxq(portid_t * pid)11002bfe3f2eSlogwang get_allowed_max_nb_rxq(portid_t *pid)
11012bfe3f2eSlogwang {
11024418919fSjohnjiang 	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
11034418919fSjohnjiang 	bool max_rxq_valid = false;
11042bfe3f2eSlogwang 	portid_t pi;
11052bfe3f2eSlogwang 	struct rte_eth_dev_info dev_info;
11062bfe3f2eSlogwang 
11072bfe3f2eSlogwang 	RTE_ETH_FOREACH_DEV(pi) {
11084418919fSjohnjiang 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
11094418919fSjohnjiang 			continue;
11104418919fSjohnjiang 
11114418919fSjohnjiang 		max_rxq_valid = true;
11122bfe3f2eSlogwang 		if (dev_info.max_rx_queues < allowed_max_rxq) {
11132bfe3f2eSlogwang 			allowed_max_rxq = dev_info.max_rx_queues;
11142bfe3f2eSlogwang 			*pid = pi;
11152bfe3f2eSlogwang 		}
11162bfe3f2eSlogwang 	}
11174418919fSjohnjiang 	return max_rxq_valid ? allowed_max_rxq : 0;
11182bfe3f2eSlogwang }
11192bfe3f2eSlogwang 
11202bfe3f2eSlogwang /*
11212bfe3f2eSlogwang  * Check input rxq is valid or not.
11222bfe3f2eSlogwang  * If input rxq is not greater than any of maximum number
11232bfe3f2eSlogwang  * of RX queues of all ports, it is valid.
11242bfe3f2eSlogwang  * if valid, return 0, else return -1
11252bfe3f2eSlogwang  */
11262bfe3f2eSlogwang int
check_nb_rxq(queueid_t rxq)11272bfe3f2eSlogwang check_nb_rxq(queueid_t rxq)
11282bfe3f2eSlogwang {
11292bfe3f2eSlogwang 	queueid_t allowed_max_rxq;
11302bfe3f2eSlogwang 	portid_t pid = 0;
11312bfe3f2eSlogwang 
11322bfe3f2eSlogwang 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
11332bfe3f2eSlogwang 	if (rxq > allowed_max_rxq) {
11342bfe3f2eSlogwang 		printf("Fail: input rxq (%u) can't be greater "
11352bfe3f2eSlogwang 		       "than max_rx_queues (%u) of port %u\n",
11362bfe3f2eSlogwang 		       rxq,
11372bfe3f2eSlogwang 		       allowed_max_rxq,
11382bfe3f2eSlogwang 		       pid);
11392bfe3f2eSlogwang 		return -1;
11402bfe3f2eSlogwang 	}
11412bfe3f2eSlogwang 	return 0;
11422bfe3f2eSlogwang }
11432bfe3f2eSlogwang 
11442bfe3f2eSlogwang /*
11452bfe3f2eSlogwang  * Get the allowed maximum number of TX queues.
11462bfe3f2eSlogwang  * *pid return the port id which has minimal value of
11472bfe3f2eSlogwang  * max_tx_queues in all ports.
11482bfe3f2eSlogwang  */
11492bfe3f2eSlogwang queueid_t
get_allowed_max_nb_txq(portid_t * pid)11502bfe3f2eSlogwang get_allowed_max_nb_txq(portid_t *pid)
11512bfe3f2eSlogwang {
11524418919fSjohnjiang 	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
11534418919fSjohnjiang 	bool max_txq_valid = false;
11542bfe3f2eSlogwang 	portid_t pi;
11552bfe3f2eSlogwang 	struct rte_eth_dev_info dev_info;
11562bfe3f2eSlogwang 
11572bfe3f2eSlogwang 	RTE_ETH_FOREACH_DEV(pi) {
11584418919fSjohnjiang 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
11594418919fSjohnjiang 			continue;
11604418919fSjohnjiang 
11614418919fSjohnjiang 		max_txq_valid = true;
11622bfe3f2eSlogwang 		if (dev_info.max_tx_queues < allowed_max_txq) {
11632bfe3f2eSlogwang 			allowed_max_txq = dev_info.max_tx_queues;
11642bfe3f2eSlogwang 			*pid = pi;
11652bfe3f2eSlogwang 		}
11662bfe3f2eSlogwang 	}
11674418919fSjohnjiang 	return max_txq_valid ? allowed_max_txq : 0;
11682bfe3f2eSlogwang }
11692bfe3f2eSlogwang 
11702bfe3f2eSlogwang /*
11712bfe3f2eSlogwang  * Check input txq is valid or not.
11722bfe3f2eSlogwang  * If input txq is not greater than any of maximum number
11732bfe3f2eSlogwang  * of TX queues of all ports, it is valid.
11742bfe3f2eSlogwang  * if valid, return 0, else return -1
11752bfe3f2eSlogwang  */
11762bfe3f2eSlogwang int
check_nb_txq(queueid_t txq)11772bfe3f2eSlogwang check_nb_txq(queueid_t txq)
11782bfe3f2eSlogwang {
11792bfe3f2eSlogwang 	queueid_t allowed_max_txq;
11802bfe3f2eSlogwang 	portid_t pid = 0;
11812bfe3f2eSlogwang 
11822bfe3f2eSlogwang 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
11832bfe3f2eSlogwang 	if (txq > allowed_max_txq) {
11842bfe3f2eSlogwang 		printf("Fail: input txq (%u) can't be greater "
11852bfe3f2eSlogwang 		       "than max_tx_queues (%u) of port %u\n",
11862bfe3f2eSlogwang 		       txq,
11872bfe3f2eSlogwang 		       allowed_max_txq,
11882bfe3f2eSlogwang 		       pid);
11892bfe3f2eSlogwang 		return -1;
11902bfe3f2eSlogwang 	}
11912bfe3f2eSlogwang 	return 0;
11922bfe3f2eSlogwang }
11932bfe3f2eSlogwang 
11944418919fSjohnjiang /*
11950c6bd470Sfengbojiang  * Get the allowed maximum number of RXDs of every rx queue.
11960c6bd470Sfengbojiang  * *pid return the port id which has minimal value of
11970c6bd470Sfengbojiang  * max_rxd in all queues of all ports.
11980c6bd470Sfengbojiang  */
11990c6bd470Sfengbojiang static uint16_t
get_allowed_max_nb_rxd(portid_t * pid)12000c6bd470Sfengbojiang get_allowed_max_nb_rxd(portid_t *pid)
12010c6bd470Sfengbojiang {
12020c6bd470Sfengbojiang 	uint16_t allowed_max_rxd = UINT16_MAX;
12030c6bd470Sfengbojiang 	portid_t pi;
12040c6bd470Sfengbojiang 	struct rte_eth_dev_info dev_info;
12050c6bd470Sfengbojiang 
12060c6bd470Sfengbojiang 	RTE_ETH_FOREACH_DEV(pi) {
12070c6bd470Sfengbojiang 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
12080c6bd470Sfengbojiang 			continue;
12090c6bd470Sfengbojiang 
12100c6bd470Sfengbojiang 		if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
12110c6bd470Sfengbojiang 			allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
12120c6bd470Sfengbojiang 			*pid = pi;
12130c6bd470Sfengbojiang 		}
12140c6bd470Sfengbojiang 	}
12150c6bd470Sfengbojiang 	return allowed_max_rxd;
12160c6bd470Sfengbojiang }
12170c6bd470Sfengbojiang 
12180c6bd470Sfengbojiang /*
12190c6bd470Sfengbojiang  * Get the allowed minimal number of RXDs of every rx queue.
12200c6bd470Sfengbojiang  * *pid return the port id which has minimal value of
12210c6bd470Sfengbojiang  * min_rxd in all queues of all ports.
12220c6bd470Sfengbojiang  */
12230c6bd470Sfengbojiang static uint16_t
get_allowed_min_nb_rxd(portid_t * pid)12240c6bd470Sfengbojiang get_allowed_min_nb_rxd(portid_t *pid)
12250c6bd470Sfengbojiang {
12260c6bd470Sfengbojiang 	uint16_t allowed_min_rxd = 0;
12270c6bd470Sfengbojiang 	portid_t pi;
12280c6bd470Sfengbojiang 	struct rte_eth_dev_info dev_info;
12290c6bd470Sfengbojiang 
12300c6bd470Sfengbojiang 	RTE_ETH_FOREACH_DEV(pi) {
12310c6bd470Sfengbojiang 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
12320c6bd470Sfengbojiang 			continue;
12330c6bd470Sfengbojiang 
12340c6bd470Sfengbojiang 		if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
12350c6bd470Sfengbojiang 			allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
12360c6bd470Sfengbojiang 			*pid = pi;
12370c6bd470Sfengbojiang 		}
12380c6bd470Sfengbojiang 	}
12390c6bd470Sfengbojiang 
12400c6bd470Sfengbojiang 	return allowed_min_rxd;
12410c6bd470Sfengbojiang }
12420c6bd470Sfengbojiang 
12430c6bd470Sfengbojiang /*
12440c6bd470Sfengbojiang  * Check input rxd is valid or not.
12450c6bd470Sfengbojiang  * If input rxd is not greater than any of maximum number
12460c6bd470Sfengbojiang  * of RXDs of every Rx queues and is not less than any of
12470c6bd470Sfengbojiang  * minimal number of RXDs of every Rx queues, it is valid.
12480c6bd470Sfengbojiang  * if valid, return 0, else return -1
12490c6bd470Sfengbojiang  */
12500c6bd470Sfengbojiang int
check_nb_rxd(queueid_t rxd)12510c6bd470Sfengbojiang check_nb_rxd(queueid_t rxd)
12520c6bd470Sfengbojiang {
12530c6bd470Sfengbojiang 	uint16_t allowed_max_rxd;
12540c6bd470Sfengbojiang 	uint16_t allowed_min_rxd;
12550c6bd470Sfengbojiang 	portid_t pid = 0;
12560c6bd470Sfengbojiang 
12570c6bd470Sfengbojiang 	allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
12580c6bd470Sfengbojiang 	if (rxd > allowed_max_rxd) {
12590c6bd470Sfengbojiang 		printf("Fail: input rxd (%u) can't be greater "
12600c6bd470Sfengbojiang 		       "than max_rxds (%u) of port %u\n",
12610c6bd470Sfengbojiang 		       rxd,
12620c6bd470Sfengbojiang 		       allowed_max_rxd,
12630c6bd470Sfengbojiang 		       pid);
12640c6bd470Sfengbojiang 		return -1;
12650c6bd470Sfengbojiang 	}
12660c6bd470Sfengbojiang 
12670c6bd470Sfengbojiang 	allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
12680c6bd470Sfengbojiang 	if (rxd < allowed_min_rxd) {
12690c6bd470Sfengbojiang 		printf("Fail: input rxd (%u) can't be less "
12700c6bd470Sfengbojiang 		       "than min_rxds (%u) of port %u\n",
12710c6bd470Sfengbojiang 		       rxd,
12720c6bd470Sfengbojiang 		       allowed_min_rxd,
12730c6bd470Sfengbojiang 		       pid);
12740c6bd470Sfengbojiang 		return -1;
12750c6bd470Sfengbojiang 	}
12760c6bd470Sfengbojiang 
12770c6bd470Sfengbojiang 	return 0;
12780c6bd470Sfengbojiang }
12790c6bd470Sfengbojiang 
12800c6bd470Sfengbojiang /*
12810c6bd470Sfengbojiang  * Get the allowed maximum number of TXDs of every rx queues.
12820c6bd470Sfengbojiang  * *pid return the port id which has minimal value of
12830c6bd470Sfengbojiang  * max_txd in every tx queue.
12840c6bd470Sfengbojiang  */
12850c6bd470Sfengbojiang static uint16_t
get_allowed_max_nb_txd(portid_t * pid)12860c6bd470Sfengbojiang get_allowed_max_nb_txd(portid_t *pid)
12870c6bd470Sfengbojiang {
12880c6bd470Sfengbojiang 	uint16_t allowed_max_txd = UINT16_MAX;
12890c6bd470Sfengbojiang 	portid_t pi;
12900c6bd470Sfengbojiang 	struct rte_eth_dev_info dev_info;
12910c6bd470Sfengbojiang 
12920c6bd470Sfengbojiang 	RTE_ETH_FOREACH_DEV(pi) {
12930c6bd470Sfengbojiang 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
12940c6bd470Sfengbojiang 			continue;
12950c6bd470Sfengbojiang 
12960c6bd470Sfengbojiang 		if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
12970c6bd470Sfengbojiang 			allowed_max_txd = dev_info.tx_desc_lim.nb_max;
12980c6bd470Sfengbojiang 			*pid = pi;
12990c6bd470Sfengbojiang 		}
13000c6bd470Sfengbojiang 	}
13010c6bd470Sfengbojiang 	return allowed_max_txd;
13020c6bd470Sfengbojiang }
13030c6bd470Sfengbojiang 
13040c6bd470Sfengbojiang /*
13050c6bd470Sfengbojiang  * Get the allowed maximum number of TXDs of every tx queues.
13060c6bd470Sfengbojiang  * *pid return the port id which has minimal value of
13070c6bd470Sfengbojiang  * min_txd in every tx queue.
13080c6bd470Sfengbojiang  */
13090c6bd470Sfengbojiang static uint16_t
get_allowed_min_nb_txd(portid_t * pid)13100c6bd470Sfengbojiang get_allowed_min_nb_txd(portid_t *pid)
13110c6bd470Sfengbojiang {
13120c6bd470Sfengbojiang 	uint16_t allowed_min_txd = 0;
13130c6bd470Sfengbojiang 	portid_t pi;
13140c6bd470Sfengbojiang 	struct rte_eth_dev_info dev_info;
13150c6bd470Sfengbojiang 
13160c6bd470Sfengbojiang 	RTE_ETH_FOREACH_DEV(pi) {
13170c6bd470Sfengbojiang 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
13180c6bd470Sfengbojiang 			continue;
13190c6bd470Sfengbojiang 
13200c6bd470Sfengbojiang 		if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
13210c6bd470Sfengbojiang 			allowed_min_txd = dev_info.tx_desc_lim.nb_min;
13220c6bd470Sfengbojiang 			*pid = pi;
13230c6bd470Sfengbojiang 		}
13240c6bd470Sfengbojiang 	}
13250c6bd470Sfengbojiang 
13260c6bd470Sfengbojiang 	return allowed_min_txd;
13270c6bd470Sfengbojiang }
13280c6bd470Sfengbojiang 
13290c6bd470Sfengbojiang /*
13300c6bd470Sfengbojiang  * Check input txd is valid or not.
13310c6bd470Sfengbojiang  * If input txd is not greater than any of maximum number
13320c6bd470Sfengbojiang  * of TXDs of every Rx queues, it is valid.
13330c6bd470Sfengbojiang  * if valid, return 0, else return -1
13340c6bd470Sfengbojiang  */
13350c6bd470Sfengbojiang int
check_nb_txd(queueid_t txd)13360c6bd470Sfengbojiang check_nb_txd(queueid_t txd)
13370c6bd470Sfengbojiang {
13380c6bd470Sfengbojiang 	uint16_t allowed_max_txd;
13390c6bd470Sfengbojiang 	uint16_t allowed_min_txd;
13400c6bd470Sfengbojiang 	portid_t pid = 0;
13410c6bd470Sfengbojiang 
13420c6bd470Sfengbojiang 	allowed_max_txd = get_allowed_max_nb_txd(&pid);
13430c6bd470Sfengbojiang 	if (txd > allowed_max_txd) {
13440c6bd470Sfengbojiang 		printf("Fail: input txd (%u) can't be greater "
13450c6bd470Sfengbojiang 		       "than max_txds (%u) of port %u\n",
13460c6bd470Sfengbojiang 		       txd,
13470c6bd470Sfengbojiang 		       allowed_max_txd,
13480c6bd470Sfengbojiang 		       pid);
13490c6bd470Sfengbojiang 		return -1;
13500c6bd470Sfengbojiang 	}
13510c6bd470Sfengbojiang 
13520c6bd470Sfengbojiang 	allowed_min_txd = get_allowed_min_nb_txd(&pid);
13530c6bd470Sfengbojiang 	if (txd < allowed_min_txd) {
13540c6bd470Sfengbojiang 		printf("Fail: input txd (%u) can't be less "
13550c6bd470Sfengbojiang 		       "than min_txds (%u) of port %u\n",
13560c6bd470Sfengbojiang 		       txd,
13570c6bd470Sfengbojiang 		       allowed_min_txd,
13580c6bd470Sfengbojiang 		       pid);
13590c6bd470Sfengbojiang 		return -1;
13600c6bd470Sfengbojiang 	}
13610c6bd470Sfengbojiang 	return 0;
13620c6bd470Sfengbojiang }
13630c6bd470Sfengbojiang 
13640c6bd470Sfengbojiang 
13650c6bd470Sfengbojiang /*
13664418919fSjohnjiang  * Get the allowed maximum number of hairpin queues.
13674418919fSjohnjiang  * *pid return the port id which has minimal value of
13684418919fSjohnjiang  * max_hairpin_queues in all ports.
13694418919fSjohnjiang  */
13704418919fSjohnjiang queueid_t
get_allowed_max_nb_hairpinq(portid_t * pid)13714418919fSjohnjiang get_allowed_max_nb_hairpinq(portid_t *pid)
13724418919fSjohnjiang {
13734418919fSjohnjiang 	queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
13744418919fSjohnjiang 	portid_t pi;
13754418919fSjohnjiang 	struct rte_eth_hairpin_cap cap;
13764418919fSjohnjiang 
13774418919fSjohnjiang 	RTE_ETH_FOREACH_DEV(pi) {
13784418919fSjohnjiang 		if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
13794418919fSjohnjiang 			*pid = pi;
13804418919fSjohnjiang 			return 0;
13814418919fSjohnjiang 		}
13824418919fSjohnjiang 		if (cap.max_nb_queues < allowed_max_hairpinq) {
13834418919fSjohnjiang 			allowed_max_hairpinq = cap.max_nb_queues;
13844418919fSjohnjiang 			*pid = pi;
13854418919fSjohnjiang 		}
13864418919fSjohnjiang 	}
13874418919fSjohnjiang 	return allowed_max_hairpinq;
13884418919fSjohnjiang }
13894418919fSjohnjiang 
13904418919fSjohnjiang /*
13914418919fSjohnjiang  * Check input hairpin is valid or not.
13924418919fSjohnjiang  * If input hairpin is not greater than any of maximum number
13934418919fSjohnjiang  * of hairpin queues of all ports, it is valid.
13944418919fSjohnjiang  * if valid, return 0, else return -1
13954418919fSjohnjiang  */
13964418919fSjohnjiang int
check_nb_hairpinq(queueid_t hairpinq)13974418919fSjohnjiang check_nb_hairpinq(queueid_t hairpinq)
13984418919fSjohnjiang {
13994418919fSjohnjiang 	queueid_t allowed_max_hairpinq;
14004418919fSjohnjiang 	portid_t pid = 0;
14014418919fSjohnjiang 
14024418919fSjohnjiang 	allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
14034418919fSjohnjiang 	if (hairpinq > allowed_max_hairpinq) {
14044418919fSjohnjiang 		printf("Fail: input hairpin (%u) can't be greater "
14054418919fSjohnjiang 		       "than max_hairpin_queues (%u) of port %u\n",
14064418919fSjohnjiang 		       hairpinq, allowed_max_hairpinq, pid);
14074418919fSjohnjiang 		return -1;
14084418919fSjohnjiang 	}
14094418919fSjohnjiang 	return 0;
14104418919fSjohnjiang }
14114418919fSjohnjiang 
1412a9643ea8Slogwang static void
init_config(void)1413a9643ea8Slogwang init_config(void)
1414a9643ea8Slogwang {
1415a9643ea8Slogwang 	portid_t pid;
1416a9643ea8Slogwang 	struct rte_port *port;
1417a9643ea8Slogwang 	struct rte_mempool *mbp;
1418a9643ea8Slogwang 	unsigned int nb_mbuf_per_pool;
1419a9643ea8Slogwang 	lcoreid_t  lc_id;
1420a9643ea8Slogwang 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
14212bfe3f2eSlogwang 	struct rte_gro_param gro_param;
14222bfe3f2eSlogwang 	uint32_t gso_types;
14234418919fSjohnjiang 	uint16_t data_size;
14244418919fSjohnjiang 	bool warning = 0;
1425d30ea906Sjfb8856606 	int k;
14264418919fSjohnjiang 	int ret;
1427a9643ea8Slogwang 
1428a9643ea8Slogwang 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
14292bfe3f2eSlogwang 
1430a9643ea8Slogwang 	/* Configuration of logical cores. */
1431a9643ea8Slogwang 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1432a9643ea8Slogwang 				sizeof(struct fwd_lcore *) * nb_lcores,
1433a9643ea8Slogwang 				RTE_CACHE_LINE_SIZE);
1434a9643ea8Slogwang 	if (fwd_lcores == NULL) {
1435a9643ea8Slogwang 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1436a9643ea8Slogwang 							"failed\n", nb_lcores);
1437a9643ea8Slogwang 	}
1438a9643ea8Slogwang 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1439a9643ea8Slogwang 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1440a9643ea8Slogwang 					       sizeof(struct fwd_lcore),
1441a9643ea8Slogwang 					       RTE_CACHE_LINE_SIZE);
1442a9643ea8Slogwang 		if (fwd_lcores[lc_id] == NULL) {
1443a9643ea8Slogwang 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1444a9643ea8Slogwang 								"failed\n");
1445a9643ea8Slogwang 		}
1446a9643ea8Slogwang 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1447a9643ea8Slogwang 	}
1448a9643ea8Slogwang 
14492bfe3f2eSlogwang 	RTE_ETH_FOREACH_DEV(pid) {
1450a9643ea8Slogwang 		port = &ports[pid];
1451d30ea906Sjfb8856606 		/* Apply default TxRx configuration for all ports */
1452d30ea906Sjfb8856606 		port->dev_conf.txmode = tx_mode;
1453d30ea906Sjfb8856606 		port->dev_conf.rxmode = rx_mode;
14544418919fSjohnjiang 
14554418919fSjohnjiang 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
14564418919fSjohnjiang 		if (ret != 0)
14574418919fSjohnjiang 			rte_exit(EXIT_FAILURE,
14584418919fSjohnjiang 				 "rte_eth_dev_info_get() failed\n");
1459a9643ea8Slogwang 
1460d30ea906Sjfb8856606 		if (!(port->dev_info.tx_offload_capa &
1461d30ea906Sjfb8856606 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1462d30ea906Sjfb8856606 			port->dev_conf.txmode.offloads &=
1463d30ea906Sjfb8856606 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1464a9643ea8Slogwang 		if (numa_support) {
1465a9643ea8Slogwang 			if (port_numa[pid] != NUMA_NO_CONFIG)
1466a9643ea8Slogwang 				port_per_socket[port_numa[pid]]++;
1467a9643ea8Slogwang 			else {
1468a9643ea8Slogwang 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
1469a9643ea8Slogwang 
1470d30ea906Sjfb8856606 				/*
1471d30ea906Sjfb8856606 				 * if socket_id is invalid,
1472d30ea906Sjfb8856606 				 * set to the first available socket.
1473d30ea906Sjfb8856606 				 */
1474a9643ea8Slogwang 				if (check_socket_id(socket_id) < 0)
1475d30ea906Sjfb8856606 					socket_id = socket_ids[0];
1476a9643ea8Slogwang 				port_per_socket[socket_id]++;
1477a9643ea8Slogwang 			}
1478a9643ea8Slogwang 		}
1479a9643ea8Slogwang 
1480d30ea906Sjfb8856606 		/* Apply Rx offloads configuration */
1481d30ea906Sjfb8856606 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1482d30ea906Sjfb8856606 			port->rx_conf[k].offloads =
1483d30ea906Sjfb8856606 				port->dev_conf.rxmode.offloads;
1484d30ea906Sjfb8856606 		/* Apply Tx offloads configuration */
1485d30ea906Sjfb8856606 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1486d30ea906Sjfb8856606 			port->tx_conf[k].offloads =
1487d30ea906Sjfb8856606 				port->dev_conf.txmode.offloads;
1488d30ea906Sjfb8856606 
1489a9643ea8Slogwang 		/* set flag to initialize port/queue */
1490a9643ea8Slogwang 		port->need_reconfig = 1;
1491a9643ea8Slogwang 		port->need_reconfig_queues = 1;
1492d30ea906Sjfb8856606 		port->tx_metadata = 0;
14934418919fSjohnjiang 
14944418919fSjohnjiang 		/* Check for maximum number of segments per MTU. Accordingly
14954418919fSjohnjiang 		 * update the mbuf data size.
14964418919fSjohnjiang 		 */
14974418919fSjohnjiang 		if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
14984418919fSjohnjiang 				port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
14994418919fSjohnjiang 			data_size = rx_mode.max_rx_pkt_len /
15004418919fSjohnjiang 				port->dev_info.rx_desc_lim.nb_mtu_seg_max;
15014418919fSjohnjiang 
15024418919fSjohnjiang 			if ((data_size + RTE_PKTMBUF_HEADROOM) >
1503*2d9fd380Sjfb8856606 							mbuf_data_size[0]) {
1504*2d9fd380Sjfb8856606 				mbuf_data_size[0] = data_size +
15054418919fSjohnjiang 						 RTE_PKTMBUF_HEADROOM;
15064418919fSjohnjiang 				warning = 1;
1507a9643ea8Slogwang 			}
15084418919fSjohnjiang 		}
15094418919fSjohnjiang 	}
15104418919fSjohnjiang 
15114418919fSjohnjiang 	if (warning)
1512*2d9fd380Sjfb8856606 		TESTPMD_LOG(WARNING,
1513*2d9fd380Sjfb8856606 			    "Configured mbuf size of the first segment %hu\n",
1514*2d9fd380Sjfb8856606 			    mbuf_data_size[0]);
15152bfe3f2eSlogwang 	/*
15162bfe3f2eSlogwang 	 * Create pools of mbuf.
15172bfe3f2eSlogwang 	 * If NUMA support is disabled, create a single pool of mbuf in
15182bfe3f2eSlogwang 	 * socket 0 memory by default.
15192bfe3f2eSlogwang 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
15202bfe3f2eSlogwang 	 *
15212bfe3f2eSlogwang 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
15222bfe3f2eSlogwang 	 * nb_txd can be configured at run time.
15232bfe3f2eSlogwang 	 */
15242bfe3f2eSlogwang 	if (param_total_num_mbufs)
15252bfe3f2eSlogwang 		nb_mbuf_per_pool = param_total_num_mbufs;
15262bfe3f2eSlogwang 	else {
15272bfe3f2eSlogwang 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
15282bfe3f2eSlogwang 			(nb_lcores * mb_mempool_cache) +
15292bfe3f2eSlogwang 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
15302bfe3f2eSlogwang 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
15312bfe3f2eSlogwang 	}
15322bfe3f2eSlogwang 
1533a9643ea8Slogwang 	if (numa_support) {
1534*2d9fd380Sjfb8856606 		uint8_t i, j;
1535a9643ea8Slogwang 
15362bfe3f2eSlogwang 		for (i = 0; i < num_sockets; i++)
1537*2d9fd380Sjfb8856606 			for (j = 0; j < mbuf_data_size_n; j++)
1538*2d9fd380Sjfb8856606 				mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1539*2d9fd380Sjfb8856606 					mbuf_pool_create(mbuf_data_size[j],
15401646932aSjfb8856606 							  nb_mbuf_per_pool,
1541*2d9fd380Sjfb8856606 							  socket_ids[i], j);
15422bfe3f2eSlogwang 	} else {
1543*2d9fd380Sjfb8856606 		uint8_t i;
1544*2d9fd380Sjfb8856606 
1545*2d9fd380Sjfb8856606 		for (i = 0; i < mbuf_data_size_n; i++)
1546*2d9fd380Sjfb8856606 			mempools[i] = mbuf_pool_create
1547*2d9fd380Sjfb8856606 					(mbuf_data_size[i],
15481646932aSjfb8856606 					 nb_mbuf_per_pool,
1549*2d9fd380Sjfb8856606 					 socket_num == UMA_NO_CONFIG ?
1550*2d9fd380Sjfb8856606 					 0 : socket_num, i);
15512bfe3f2eSlogwang 	}
1552a9643ea8Slogwang 
1553a9643ea8Slogwang 	init_port_config();
1554a9643ea8Slogwang 
15552bfe3f2eSlogwang 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1556d30ea906Sjfb8856606 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1557a9643ea8Slogwang 	/*
1558a9643ea8Slogwang 	 * Records which Mbuf pool to use by each logical core, if needed.
1559a9643ea8Slogwang 	 */
1560a9643ea8Slogwang 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1561a9643ea8Slogwang 		mbp = mbuf_pool_find(
1562*2d9fd380Sjfb8856606 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1563a9643ea8Slogwang 
1564a9643ea8Slogwang 		if (mbp == NULL)
1565*2d9fd380Sjfb8856606 			mbp = mbuf_pool_find(0, 0);
1566a9643ea8Slogwang 		fwd_lcores[lc_id]->mbp = mbp;
15672bfe3f2eSlogwang 		/* initialize GSO context */
15682bfe3f2eSlogwang 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
15692bfe3f2eSlogwang 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
15702bfe3f2eSlogwang 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
15714418919fSjohnjiang 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
15724418919fSjohnjiang 			RTE_ETHER_CRC_LEN;
15732bfe3f2eSlogwang 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
1574a9643ea8Slogwang 	}
1575a9643ea8Slogwang 
1576a9643ea8Slogwang 	/* Configuration of packet forwarding streams. */
1577a9643ea8Slogwang 	if (init_fwd_streams() < 0)
1578a9643ea8Slogwang 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1579a9643ea8Slogwang 
1580a9643ea8Slogwang 	fwd_config_setup();
15812bfe3f2eSlogwang 
15822bfe3f2eSlogwang 	/* create a gro context for each lcore */
15832bfe3f2eSlogwang 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
15842bfe3f2eSlogwang 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
15852bfe3f2eSlogwang 	gro_param.max_item_per_flow = MAX_PKT_BURST;
15862bfe3f2eSlogwang 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
15872bfe3f2eSlogwang 		gro_param.socket_id = rte_lcore_to_socket_id(
15882bfe3f2eSlogwang 				fwd_lcores_cpuids[lc_id]);
15892bfe3f2eSlogwang 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
15902bfe3f2eSlogwang 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
15912bfe3f2eSlogwang 			rte_exit(EXIT_FAILURE,
15922bfe3f2eSlogwang 					"rte_gro_ctx_create() failed\n");
15932bfe3f2eSlogwang 		}
15942bfe3f2eSlogwang 	}
1595a9643ea8Slogwang }
1596a9643ea8Slogwang 
1597a9643ea8Slogwang 
1598a9643ea8Slogwang void
reconfig(portid_t new_port_id,unsigned socket_id)1599a9643ea8Slogwang reconfig(portid_t new_port_id, unsigned socket_id)
1600a9643ea8Slogwang {
1601a9643ea8Slogwang 	struct rte_port *port;
16024418919fSjohnjiang 	int ret;
1603a9643ea8Slogwang 
1604a9643ea8Slogwang 	/* Reconfiguration of Ethernet ports. */
1605a9643ea8Slogwang 	port = &ports[new_port_id];
16064418919fSjohnjiang 
16074418919fSjohnjiang 	ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
16084418919fSjohnjiang 	if (ret != 0)
16094418919fSjohnjiang 		return;
1610a9643ea8Slogwang 
1611a9643ea8Slogwang 	/* set flag to initialize port/queue */
1612a9643ea8Slogwang 	port->need_reconfig = 1;
1613a9643ea8Slogwang 	port->need_reconfig_queues = 1;
1614a9643ea8Slogwang 	port->socket_id = socket_id;
1615a9643ea8Slogwang 
1616a9643ea8Slogwang 	init_port_config();
1617a9643ea8Slogwang }
1618a9643ea8Slogwang 
1619a9643ea8Slogwang 
1620a9643ea8Slogwang int
init_fwd_streams(void)1621a9643ea8Slogwang init_fwd_streams(void)
1622a9643ea8Slogwang {
1623a9643ea8Slogwang 	portid_t pid;
1624a9643ea8Slogwang 	struct rte_port *port;
1625a9643ea8Slogwang 	streamid_t sm_id, nb_fwd_streams_new;
1626a9643ea8Slogwang 	queueid_t q;
1627a9643ea8Slogwang 
1628a9643ea8Slogwang 	/* set socket id according to numa or not */
16292bfe3f2eSlogwang 	RTE_ETH_FOREACH_DEV(pid) {
1630a9643ea8Slogwang 		port = &ports[pid];
1631a9643ea8Slogwang 		if (nb_rxq > port->dev_info.max_rx_queues) {
1632a9643ea8Slogwang 			printf("Fail: nb_rxq(%d) is greater than "
1633a9643ea8Slogwang 				"max_rx_queues(%d)\n", nb_rxq,
1634a9643ea8Slogwang 				port->dev_info.max_rx_queues);
1635a9643ea8Slogwang 			return -1;
1636a9643ea8Slogwang 		}
1637a9643ea8Slogwang 		if (nb_txq > port->dev_info.max_tx_queues) {
1638a9643ea8Slogwang 			printf("Fail: nb_txq(%d) is greater than "
1639a9643ea8Slogwang 				"max_tx_queues(%d)\n", nb_txq,
1640a9643ea8Slogwang 				port->dev_info.max_tx_queues);
1641a9643ea8Slogwang 			return -1;
1642a9643ea8Slogwang 		}
1643a9643ea8Slogwang 		if (numa_support) {
1644a9643ea8Slogwang 			if (port_numa[pid] != NUMA_NO_CONFIG)
1645a9643ea8Slogwang 				port->socket_id = port_numa[pid];
1646a9643ea8Slogwang 			else {
1647a9643ea8Slogwang 				port->socket_id = rte_eth_dev_socket_id(pid);
1648a9643ea8Slogwang 
1649d30ea906Sjfb8856606 				/*
1650d30ea906Sjfb8856606 				 * if socket_id is invalid,
1651d30ea906Sjfb8856606 				 * set to the first available socket.
1652d30ea906Sjfb8856606 				 */
1653a9643ea8Slogwang 				if (check_socket_id(port->socket_id) < 0)
1654d30ea906Sjfb8856606 					port->socket_id = socket_ids[0];
1655a9643ea8Slogwang 			}
1656a9643ea8Slogwang 		}
1657a9643ea8Slogwang 		else {
1658a9643ea8Slogwang 			if (socket_num == UMA_NO_CONFIG)
1659a9643ea8Slogwang 				port->socket_id = 0;
1660a9643ea8Slogwang 			else
1661a9643ea8Slogwang 				port->socket_id = socket_num;
1662a9643ea8Slogwang 		}
1663a9643ea8Slogwang 	}
1664a9643ea8Slogwang 
1665a9643ea8Slogwang 	q = RTE_MAX(nb_rxq, nb_txq);
1666a9643ea8Slogwang 	if (q == 0) {
1667a9643ea8Slogwang 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1668a9643ea8Slogwang 		return -1;
1669a9643ea8Slogwang 	}
1670a9643ea8Slogwang 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1671a9643ea8Slogwang 	if (nb_fwd_streams_new == nb_fwd_streams)
1672a9643ea8Slogwang 		return 0;
1673a9643ea8Slogwang 	/* clear the old */
1674a9643ea8Slogwang 	if (fwd_streams != NULL) {
1675a9643ea8Slogwang 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1676a9643ea8Slogwang 			if (fwd_streams[sm_id] == NULL)
1677a9643ea8Slogwang 				continue;
1678a9643ea8Slogwang 			rte_free(fwd_streams[sm_id]);
1679a9643ea8Slogwang 			fwd_streams[sm_id] = NULL;
1680a9643ea8Slogwang 		}
1681a9643ea8Slogwang 		rte_free(fwd_streams);
1682a9643ea8Slogwang 		fwd_streams = NULL;
1683a9643ea8Slogwang 	}
1684a9643ea8Slogwang 
1685a9643ea8Slogwang 	/* init new */
1686a9643ea8Slogwang 	nb_fwd_streams = nb_fwd_streams_new;
1687579bf1e2Sjfb8856606 	if (nb_fwd_streams) {
1688a9643ea8Slogwang 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1689579bf1e2Sjfb8856606 			sizeof(struct fwd_stream *) * nb_fwd_streams,
1690579bf1e2Sjfb8856606 			RTE_CACHE_LINE_SIZE);
1691a9643ea8Slogwang 		if (fwd_streams == NULL)
1692579bf1e2Sjfb8856606 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1693579bf1e2Sjfb8856606 				 " (struct fwd_stream *)) failed\n",
1694579bf1e2Sjfb8856606 				 nb_fwd_streams);
1695a9643ea8Slogwang 
1696a9643ea8Slogwang 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1697579bf1e2Sjfb8856606 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1698579bf1e2Sjfb8856606 				" struct fwd_stream", sizeof(struct fwd_stream),
1699579bf1e2Sjfb8856606 				RTE_CACHE_LINE_SIZE);
1700a9643ea8Slogwang 			if (fwd_streams[sm_id] == NULL)
1701579bf1e2Sjfb8856606 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
1702579bf1e2Sjfb8856606 					 "(struct fwd_stream) failed\n");
1703579bf1e2Sjfb8856606 		}
1704a9643ea8Slogwang 	}
1705a9643ea8Slogwang 
1706a9643ea8Slogwang 	return 0;
1707a9643ea8Slogwang }
1708a9643ea8Slogwang 
1709a9643ea8Slogwang static void
pkt_burst_stats_display(const char * rx_tx,struct pkt_burst_stats * pbs)1710a9643ea8Slogwang pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1711a9643ea8Slogwang {
1712*2d9fd380Sjfb8856606 	uint64_t total_burst, sburst;
17130c6bd470Sfengbojiang 	uint64_t nb_burst;
1714*2d9fd380Sjfb8856606 	uint64_t burst_stats[4];
1715*2d9fd380Sjfb8856606 	uint16_t pktnb_stats[4];
1716a9643ea8Slogwang 	uint16_t nb_pkt;
1717*2d9fd380Sjfb8856606 	int burst_percent[4], sburstp;
1718*2d9fd380Sjfb8856606 	int i;
1719a9643ea8Slogwang 
1720a9643ea8Slogwang 	/*
1721a9643ea8Slogwang 	 * First compute the total number of packet bursts and the
1722a9643ea8Slogwang 	 * two highest numbers of bursts of the same number of packets.
1723a9643ea8Slogwang 	 */
1724*2d9fd380Sjfb8856606 	memset(&burst_stats, 0x0, sizeof(burst_stats));
1725*2d9fd380Sjfb8856606 	memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1726*2d9fd380Sjfb8856606 
1727*2d9fd380Sjfb8856606 	/* Show stats for 0 burst size always */
1728*2d9fd380Sjfb8856606 	total_burst = pbs->pkt_burst_spread[0];
1729*2d9fd380Sjfb8856606 	burst_stats[0] = pbs->pkt_burst_spread[0];
1730*2d9fd380Sjfb8856606 	pktnb_stats[0] = 0;
1731*2d9fd380Sjfb8856606 
1732*2d9fd380Sjfb8856606 	/* Find the next 2 burst sizes with highest occurrences. */
1733*2d9fd380Sjfb8856606 	for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1734a9643ea8Slogwang 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
1735*2d9fd380Sjfb8856606 
1736a9643ea8Slogwang 		if (nb_burst == 0)
1737a9643ea8Slogwang 			continue;
1738*2d9fd380Sjfb8856606 
1739a9643ea8Slogwang 		total_burst += nb_burst;
1740*2d9fd380Sjfb8856606 
1741*2d9fd380Sjfb8856606 		if (nb_burst > burst_stats[1]) {
1742*2d9fd380Sjfb8856606 			burst_stats[2] = burst_stats[1];
1743*2d9fd380Sjfb8856606 			pktnb_stats[2] = pktnb_stats[1];
1744579bf1e2Sjfb8856606 			burst_stats[1] = nb_burst;
1745579bf1e2Sjfb8856606 			pktnb_stats[1] = nb_pkt;
1746*2d9fd380Sjfb8856606 		} else if (nb_burst > burst_stats[2]) {
1747*2d9fd380Sjfb8856606 			burst_stats[2] = nb_burst;
1748*2d9fd380Sjfb8856606 			pktnb_stats[2] = nb_pkt;
1749a9643ea8Slogwang 		}
1750a9643ea8Slogwang 	}
1751a9643ea8Slogwang 	if (total_burst == 0)
1752a9643ea8Slogwang 		return;
1753*2d9fd380Sjfb8856606 
1754*2d9fd380Sjfb8856606 	printf("  %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1755*2d9fd380Sjfb8856606 	for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1756*2d9fd380Sjfb8856606 		if (i == 3) {
1757*2d9fd380Sjfb8856606 			printf("%d%% of other]\n", 100 - sburstp);
1758a9643ea8Slogwang 			return;
1759a9643ea8Slogwang 		}
1760*2d9fd380Sjfb8856606 
1761*2d9fd380Sjfb8856606 		sburst += burst_stats[i];
1762*2d9fd380Sjfb8856606 		if (sburst == total_burst) {
1763*2d9fd380Sjfb8856606 			printf("%d%% of %d pkts]\n",
1764*2d9fd380Sjfb8856606 				100 - sburstp, (int) pktnb_stats[i]);
1765a9643ea8Slogwang 			return;
1766a9643ea8Slogwang 		}
1767*2d9fd380Sjfb8856606 
1768*2d9fd380Sjfb8856606 		burst_percent[i] =
1769*2d9fd380Sjfb8856606 			(double)burst_stats[i] / total_burst * 100;
1770*2d9fd380Sjfb8856606 		printf("%d%% of %d pkts + ",
1771*2d9fd380Sjfb8856606 			burst_percent[i], (int) pktnb_stats[i]);
1772*2d9fd380Sjfb8856606 		sburstp += burst_percent[i];
1773a9643ea8Slogwang 	}
1774a9643ea8Slogwang }
1775a9643ea8Slogwang 
1776a9643ea8Slogwang static void
fwd_stream_stats_display(streamid_t stream_id)1777a9643ea8Slogwang fwd_stream_stats_display(streamid_t stream_id)
1778a9643ea8Slogwang {
1779a9643ea8Slogwang 	struct fwd_stream *fs;
1780a9643ea8Slogwang 	static const char *fwd_top_stats_border = "-------";
1781a9643ea8Slogwang 
1782a9643ea8Slogwang 	fs = fwd_streams[stream_id];
1783a9643ea8Slogwang 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1784a9643ea8Slogwang 	    (fs->fwd_dropped == 0))
1785a9643ea8Slogwang 		return;
1786a9643ea8Slogwang 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1787a9643ea8Slogwang 	       "TX Port=%2d/Queue=%2d %s\n",
1788a9643ea8Slogwang 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1789a9643ea8Slogwang 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
17901646932aSjfb8856606 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
17911646932aSjfb8856606 	       " TX-dropped: %-14"PRIu64,
1792a9643ea8Slogwang 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1793a9643ea8Slogwang 
1794a9643ea8Slogwang 	/* if checksum mode */
1795a9643ea8Slogwang 	if (cur_fwd_eng == &csum_fwd_engine) {
17961646932aSjfb8856606 		printf("  RX- bad IP checksum: %-14"PRIu64
17971646932aSjfb8856606 		       "  Rx- bad L4 checksum: %-14"PRIu64
17981646932aSjfb8856606 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1799d30ea906Sjfb8856606 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1800d30ea906Sjfb8856606 			fs->rx_bad_outer_l4_csum);
18011646932aSjfb8856606 	} else {
18021646932aSjfb8856606 		printf("\n");
1803a9643ea8Slogwang 	}
1804a9643ea8Slogwang 
1805*2d9fd380Sjfb8856606 	if (record_burst_stats) {
1806a9643ea8Slogwang 		pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1807a9643ea8Slogwang 		pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1808*2d9fd380Sjfb8856606 	}
1809a9643ea8Slogwang }
1810a9643ea8Slogwang 
18114418919fSjohnjiang void
fwd_stats_display(void)18124418919fSjohnjiang fwd_stats_display(void)
18134418919fSjohnjiang {
18144418919fSjohnjiang 	static const char *fwd_stats_border = "----------------------";
18154418919fSjohnjiang 	static const char *acc_stats_border = "+++++++++++++++";
18164418919fSjohnjiang 	struct {
18174418919fSjohnjiang 		struct fwd_stream *rx_stream;
18184418919fSjohnjiang 		struct fwd_stream *tx_stream;
18194418919fSjohnjiang 		uint64_t tx_dropped;
18204418919fSjohnjiang 		uint64_t rx_bad_ip_csum;
18214418919fSjohnjiang 		uint64_t rx_bad_l4_csum;
18224418919fSjohnjiang 		uint64_t rx_bad_outer_l4_csum;
18234418919fSjohnjiang 	} ports_stats[RTE_MAX_ETHPORTS];
18244418919fSjohnjiang 	uint64_t total_rx_dropped = 0;
18254418919fSjohnjiang 	uint64_t total_tx_dropped = 0;
18264418919fSjohnjiang 	uint64_t total_rx_nombuf = 0;
18274418919fSjohnjiang 	struct rte_eth_stats stats;
18284418919fSjohnjiang 	uint64_t fwd_cycles = 0;
18294418919fSjohnjiang 	uint64_t total_recv = 0;
18304418919fSjohnjiang 	uint64_t total_xmit = 0;
18314418919fSjohnjiang 	struct rte_port *port;
18324418919fSjohnjiang 	streamid_t sm_id;
18334418919fSjohnjiang 	portid_t pt_id;
18344418919fSjohnjiang 	int i;
18354418919fSjohnjiang 
18364418919fSjohnjiang 	memset(ports_stats, 0, sizeof(ports_stats));
18374418919fSjohnjiang 
18384418919fSjohnjiang 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
18394418919fSjohnjiang 		struct fwd_stream *fs = fwd_streams[sm_id];
18404418919fSjohnjiang 
18414418919fSjohnjiang 		if (cur_fwd_config.nb_fwd_streams >
18424418919fSjohnjiang 		    cur_fwd_config.nb_fwd_ports) {
18434418919fSjohnjiang 			fwd_stream_stats_display(sm_id);
18444418919fSjohnjiang 		} else {
18454418919fSjohnjiang 			ports_stats[fs->tx_port].tx_stream = fs;
18464418919fSjohnjiang 			ports_stats[fs->rx_port].rx_stream = fs;
18474418919fSjohnjiang 		}
18484418919fSjohnjiang 
18494418919fSjohnjiang 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
18504418919fSjohnjiang 
18514418919fSjohnjiang 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
18524418919fSjohnjiang 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
18534418919fSjohnjiang 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
18544418919fSjohnjiang 				fs->rx_bad_outer_l4_csum;
18554418919fSjohnjiang 
1856*2d9fd380Sjfb8856606 		if (record_core_cycles)
18574418919fSjohnjiang 			fwd_cycles += fs->core_cycles;
18584418919fSjohnjiang 	}
18594418919fSjohnjiang 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
18604418919fSjohnjiang 		uint8_t j;
18614418919fSjohnjiang 
18624418919fSjohnjiang 		pt_id = fwd_ports_ids[i];
18634418919fSjohnjiang 		port = &ports[pt_id];
18644418919fSjohnjiang 
18654418919fSjohnjiang 		rte_eth_stats_get(pt_id, &stats);
18664418919fSjohnjiang 		stats.ipackets -= port->stats.ipackets;
18674418919fSjohnjiang 		stats.opackets -= port->stats.opackets;
18684418919fSjohnjiang 		stats.ibytes -= port->stats.ibytes;
18694418919fSjohnjiang 		stats.obytes -= port->stats.obytes;
18704418919fSjohnjiang 		stats.imissed -= port->stats.imissed;
18714418919fSjohnjiang 		stats.oerrors -= port->stats.oerrors;
18724418919fSjohnjiang 		stats.rx_nombuf -= port->stats.rx_nombuf;
18734418919fSjohnjiang 
18744418919fSjohnjiang 		total_recv += stats.ipackets;
18754418919fSjohnjiang 		total_xmit += stats.opackets;
18764418919fSjohnjiang 		total_rx_dropped += stats.imissed;
18774418919fSjohnjiang 		total_tx_dropped += ports_stats[pt_id].tx_dropped;
18784418919fSjohnjiang 		total_tx_dropped += stats.oerrors;
18794418919fSjohnjiang 		total_rx_nombuf  += stats.rx_nombuf;
18804418919fSjohnjiang 
18814418919fSjohnjiang 		printf("\n  %s Forward statistics for port %-2d %s\n",
18824418919fSjohnjiang 		       fwd_stats_border, pt_id, fwd_stats_border);
18834418919fSjohnjiang 
18844418919fSjohnjiang 		if (!port->rx_queue_stats_mapping_enabled &&
18854418919fSjohnjiang 		    !port->tx_queue_stats_mapping_enabled) {
18864418919fSjohnjiang 			printf("  RX-packets: %-14"PRIu64
18874418919fSjohnjiang 			       " RX-dropped: %-14"PRIu64
18884418919fSjohnjiang 			       "RX-total: %-"PRIu64"\n",
18894418919fSjohnjiang 			       stats.ipackets, stats.imissed,
18904418919fSjohnjiang 			       stats.ipackets + stats.imissed);
18914418919fSjohnjiang 
18924418919fSjohnjiang 			if (cur_fwd_eng == &csum_fwd_engine)
18934418919fSjohnjiang 				printf("  Bad-ipcsum: %-14"PRIu64
18944418919fSjohnjiang 				       " Bad-l4csum: %-14"PRIu64
18954418919fSjohnjiang 				       "Bad-outer-l4csum: %-14"PRIu64"\n",
18964418919fSjohnjiang 				       ports_stats[pt_id].rx_bad_ip_csum,
18974418919fSjohnjiang 				       ports_stats[pt_id].rx_bad_l4_csum,
18984418919fSjohnjiang 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
18994418919fSjohnjiang 			if (stats.ierrors + stats.rx_nombuf > 0) {
19004418919fSjohnjiang 				printf("  RX-error: %-"PRIu64"\n",
19014418919fSjohnjiang 				       stats.ierrors);
19024418919fSjohnjiang 				printf("  RX-nombufs: %-14"PRIu64"\n",
19034418919fSjohnjiang 				       stats.rx_nombuf);
19044418919fSjohnjiang 			}
19054418919fSjohnjiang 
19064418919fSjohnjiang 			printf("  TX-packets: %-14"PRIu64
19074418919fSjohnjiang 			       " TX-dropped: %-14"PRIu64
19084418919fSjohnjiang 			       "TX-total: %-"PRIu64"\n",
19094418919fSjohnjiang 			       stats.opackets, ports_stats[pt_id].tx_dropped,
19104418919fSjohnjiang 			       stats.opackets + ports_stats[pt_id].tx_dropped);
19114418919fSjohnjiang 		} else {
19124418919fSjohnjiang 			printf("  RX-packets:             %14"PRIu64
19134418919fSjohnjiang 			       "    RX-dropped:%14"PRIu64
19144418919fSjohnjiang 			       "    RX-total:%14"PRIu64"\n",
19154418919fSjohnjiang 			       stats.ipackets, stats.imissed,
19164418919fSjohnjiang 			       stats.ipackets + stats.imissed);
19174418919fSjohnjiang 
19184418919fSjohnjiang 			if (cur_fwd_eng == &csum_fwd_engine)
19194418919fSjohnjiang 				printf("  Bad-ipcsum:%14"PRIu64
19204418919fSjohnjiang 				       "    Bad-l4csum:%14"PRIu64
19214418919fSjohnjiang 				       "    Bad-outer-l4csum: %-14"PRIu64"\n",
19224418919fSjohnjiang 				       ports_stats[pt_id].rx_bad_ip_csum,
19234418919fSjohnjiang 				       ports_stats[pt_id].rx_bad_l4_csum,
19244418919fSjohnjiang 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
19254418919fSjohnjiang 			if ((stats.ierrors + stats.rx_nombuf) > 0) {
19264418919fSjohnjiang 				printf("  RX-error:%"PRIu64"\n", stats.ierrors);
19274418919fSjohnjiang 				printf("  RX-nombufs:             %14"PRIu64"\n",
19284418919fSjohnjiang 				       stats.rx_nombuf);
19294418919fSjohnjiang 			}
19304418919fSjohnjiang 
19314418919fSjohnjiang 			printf("  TX-packets:             %14"PRIu64
19324418919fSjohnjiang 			       "    TX-dropped:%14"PRIu64
19334418919fSjohnjiang 			       "    TX-total:%14"PRIu64"\n",
19344418919fSjohnjiang 			       stats.opackets, ports_stats[pt_id].tx_dropped,
19354418919fSjohnjiang 			       stats.opackets + ports_stats[pt_id].tx_dropped);
19364418919fSjohnjiang 		}
19374418919fSjohnjiang 
1938*2d9fd380Sjfb8856606 		if (record_burst_stats) {
19394418919fSjohnjiang 			if (ports_stats[pt_id].rx_stream)
19404418919fSjohnjiang 				pkt_burst_stats_display("RX",
19414418919fSjohnjiang 					&ports_stats[pt_id].rx_stream->rx_burst_stats);
19424418919fSjohnjiang 			if (ports_stats[pt_id].tx_stream)
19434418919fSjohnjiang 				pkt_burst_stats_display("TX",
19444418919fSjohnjiang 					&ports_stats[pt_id].tx_stream->tx_burst_stats);
1945*2d9fd380Sjfb8856606 		}
19464418919fSjohnjiang 
19474418919fSjohnjiang 		if (port->rx_queue_stats_mapping_enabled) {
19484418919fSjohnjiang 			printf("\n");
19494418919fSjohnjiang 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
19504418919fSjohnjiang 				printf("  Stats reg %2d RX-packets:%14"PRIu64
19514418919fSjohnjiang 				       "     RX-errors:%14"PRIu64
19524418919fSjohnjiang 				       "    RX-bytes:%14"PRIu64"\n",
19534418919fSjohnjiang 				       j, stats.q_ipackets[j],
19544418919fSjohnjiang 				       stats.q_errors[j], stats.q_ibytes[j]);
19554418919fSjohnjiang 			}
19564418919fSjohnjiang 			printf("\n");
19574418919fSjohnjiang 		}
19584418919fSjohnjiang 		if (port->tx_queue_stats_mapping_enabled) {
19594418919fSjohnjiang 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
19604418919fSjohnjiang 				printf("  Stats reg %2d TX-packets:%14"PRIu64
19614418919fSjohnjiang 				       "                                 TX-bytes:%14"
19624418919fSjohnjiang 				       PRIu64"\n",
19634418919fSjohnjiang 				       j, stats.q_opackets[j],
19644418919fSjohnjiang 				       stats.q_obytes[j]);
19654418919fSjohnjiang 			}
19664418919fSjohnjiang 		}
19674418919fSjohnjiang 
19684418919fSjohnjiang 		printf("  %s--------------------------------%s\n",
19694418919fSjohnjiang 		       fwd_stats_border, fwd_stats_border);
19704418919fSjohnjiang 	}
19714418919fSjohnjiang 
19724418919fSjohnjiang 	printf("\n  %s Accumulated forward statistics for all ports"
19734418919fSjohnjiang 	       "%s\n",
19744418919fSjohnjiang 	       acc_stats_border, acc_stats_border);
19754418919fSjohnjiang 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
19764418919fSjohnjiang 	       "%-"PRIu64"\n"
19774418919fSjohnjiang 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
19784418919fSjohnjiang 	       "%-"PRIu64"\n",
19794418919fSjohnjiang 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
19804418919fSjohnjiang 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
19814418919fSjohnjiang 	if (total_rx_nombuf > 0)
19824418919fSjohnjiang 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
19834418919fSjohnjiang 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
19844418919fSjohnjiang 	       "%s\n",
19854418919fSjohnjiang 	       acc_stats_border, acc_stats_border);
1986*2d9fd380Sjfb8856606 	if (record_core_cycles) {
19870c6bd470Sfengbojiang #define CYC_PER_MHZ 1E6
19880c6bd470Sfengbojiang 		if (total_recv > 0 || total_xmit > 0) {
19890c6bd470Sfengbojiang 			uint64_t total_pkts = 0;
19900c6bd470Sfengbojiang 			if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
19910c6bd470Sfengbojiang 			    strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
19920c6bd470Sfengbojiang 				total_pkts = total_xmit;
19930c6bd470Sfengbojiang 			else
19940c6bd470Sfengbojiang 				total_pkts = total_recv;
19950c6bd470Sfengbojiang 
19960c6bd470Sfengbojiang 			printf("\n  CPU cycles/packet=%.2F (total cycles="
19970c6bd470Sfengbojiang 			       "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
19980c6bd470Sfengbojiang 			       " MHz Clock\n",
19990c6bd470Sfengbojiang 			       (double) fwd_cycles / total_pkts,
20000c6bd470Sfengbojiang 			       fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
20010c6bd470Sfengbojiang 			       (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
20020c6bd470Sfengbojiang 		}
2003*2d9fd380Sjfb8856606 	}
20044418919fSjohnjiang }
20054418919fSjohnjiang 
20064418919fSjohnjiang void
fwd_stats_reset(void)20074418919fSjohnjiang fwd_stats_reset(void)
20084418919fSjohnjiang {
20094418919fSjohnjiang 	streamid_t sm_id;
20104418919fSjohnjiang 	portid_t pt_id;
20114418919fSjohnjiang 	int i;
20124418919fSjohnjiang 
20134418919fSjohnjiang 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
20144418919fSjohnjiang 		pt_id = fwd_ports_ids[i];
20154418919fSjohnjiang 		rte_eth_stats_get(pt_id, &ports[pt_id].stats);
20164418919fSjohnjiang 	}
20174418919fSjohnjiang 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
20184418919fSjohnjiang 		struct fwd_stream *fs = fwd_streams[sm_id];
20194418919fSjohnjiang 
20204418919fSjohnjiang 		fs->rx_packets = 0;
20214418919fSjohnjiang 		fs->tx_packets = 0;
20224418919fSjohnjiang 		fs->fwd_dropped = 0;
20234418919fSjohnjiang 		fs->rx_bad_ip_csum = 0;
20244418919fSjohnjiang 		fs->rx_bad_l4_csum = 0;
20254418919fSjohnjiang 		fs->rx_bad_outer_l4_csum = 0;
20264418919fSjohnjiang 
20274418919fSjohnjiang 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
20284418919fSjohnjiang 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
20294418919fSjohnjiang 		fs->core_cycles = 0;
20304418919fSjohnjiang 	}
20314418919fSjohnjiang }
20324418919fSjohnjiang 
2033a9643ea8Slogwang static void
flush_fwd_rx_queues(void)2034a9643ea8Slogwang flush_fwd_rx_queues(void)
2035a9643ea8Slogwang {
2036a9643ea8Slogwang 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2037a9643ea8Slogwang 	portid_t  rxp;
2038a9643ea8Slogwang 	portid_t port_id;
2039a9643ea8Slogwang 	queueid_t rxq;
2040a9643ea8Slogwang 	uint16_t  nb_rx;
2041a9643ea8Slogwang 	uint16_t  i;
2042a9643ea8Slogwang 	uint8_t   j;
2043a9643ea8Slogwang 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2044a9643ea8Slogwang 	uint64_t timer_period;
2045a9643ea8Slogwang 
2046a9643ea8Slogwang 	/* convert to number of cycles */
2047a9643ea8Slogwang 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
2048a9643ea8Slogwang 
2049a9643ea8Slogwang 	for (j = 0; j < 2; j++) {
2050a9643ea8Slogwang 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2051a9643ea8Slogwang 			for (rxq = 0; rxq < nb_rxq; rxq++) {
2052a9643ea8Slogwang 				port_id = fwd_ports_ids[rxp];
2053a9643ea8Slogwang 				/**
2054a9643ea8Slogwang 				* testpmd can stuck in the below do while loop
2055a9643ea8Slogwang 				* if rte_eth_rx_burst() always returns nonzero
2056a9643ea8Slogwang 				* packets. So timer is added to exit this loop
2057a9643ea8Slogwang 				* after 1sec timer expiry.
2058a9643ea8Slogwang 				*/
2059a9643ea8Slogwang 				prev_tsc = rte_rdtsc();
2060a9643ea8Slogwang 				do {
2061a9643ea8Slogwang 					nb_rx = rte_eth_rx_burst(port_id, rxq,
2062a9643ea8Slogwang 						pkts_burst, MAX_PKT_BURST);
2063a9643ea8Slogwang 					for (i = 0; i < nb_rx; i++)
2064a9643ea8Slogwang 						rte_pktmbuf_free(pkts_burst[i]);
2065a9643ea8Slogwang 
2066a9643ea8Slogwang 					cur_tsc = rte_rdtsc();
2067a9643ea8Slogwang 					diff_tsc = cur_tsc - prev_tsc;
2068a9643ea8Slogwang 					timer_tsc += diff_tsc;
2069a9643ea8Slogwang 				} while ((nb_rx > 0) &&
2070a9643ea8Slogwang 					(timer_tsc < timer_period));
2071a9643ea8Slogwang 				timer_tsc = 0;
2072a9643ea8Slogwang 			}
2073a9643ea8Slogwang 		}
2074a9643ea8Slogwang 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2075a9643ea8Slogwang 	}
2076a9643ea8Slogwang }
2077a9643ea8Slogwang 
2078a9643ea8Slogwang static void
run_pkt_fwd_on_lcore(struct fwd_lcore * fc,packet_fwd_t pkt_fwd)2079a9643ea8Slogwang run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2080a9643ea8Slogwang {
2081a9643ea8Slogwang 	struct fwd_stream **fsm;
2082a9643ea8Slogwang 	streamid_t nb_fs;
2083a9643ea8Slogwang 	streamid_t sm_id;
2084*2d9fd380Sjfb8856606 #ifdef RTE_LIB_BITRATESTATS
20852bfe3f2eSlogwang 	uint64_t tics_per_1sec;
20862bfe3f2eSlogwang 	uint64_t tics_datum;
20872bfe3f2eSlogwang 	uint64_t tics_current;
2088d30ea906Sjfb8856606 	uint16_t i, cnt_ports;
2089a9643ea8Slogwang 
2090d30ea906Sjfb8856606 	cnt_ports = nb_ports;
20912bfe3f2eSlogwang 	tics_datum = rte_rdtsc();
20922bfe3f2eSlogwang 	tics_per_1sec = rte_get_timer_hz();
20932bfe3f2eSlogwang #endif
2094a9643ea8Slogwang 	fsm = &fwd_streams[fc->stream_idx];
2095a9643ea8Slogwang 	nb_fs = fc->stream_nb;
2096a9643ea8Slogwang 	do {
2097a9643ea8Slogwang 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
2098a9643ea8Slogwang 			(*pkt_fwd)(fsm[sm_id]);
2099*2d9fd380Sjfb8856606 #ifdef RTE_LIB_BITRATESTATS
21002bfe3f2eSlogwang 		if (bitrate_enabled != 0 &&
21012bfe3f2eSlogwang 				bitrate_lcore_id == rte_lcore_id()) {
21022bfe3f2eSlogwang 			tics_current = rte_rdtsc();
21032bfe3f2eSlogwang 			if (tics_current - tics_datum >= tics_per_1sec) {
21042bfe3f2eSlogwang 				/* Periodic bitrate calculation */
2105d30ea906Sjfb8856606 				for (i = 0; i < cnt_ports; i++)
21062bfe3f2eSlogwang 					rte_stats_bitrate_calc(bitrate_data,
2107d30ea906Sjfb8856606 						ports_ids[i]);
21082bfe3f2eSlogwang 				tics_datum = tics_current;
21092bfe3f2eSlogwang 			}
21102bfe3f2eSlogwang 		}
21112bfe3f2eSlogwang #endif
2112*2d9fd380Sjfb8856606 #ifdef RTE_LIB_LATENCYSTATS
21132bfe3f2eSlogwang 		if (latencystats_enabled != 0 &&
21142bfe3f2eSlogwang 				latencystats_lcore_id == rte_lcore_id())
21152bfe3f2eSlogwang 			rte_latencystats_update();
21162bfe3f2eSlogwang #endif
21172bfe3f2eSlogwang 
2118a9643ea8Slogwang 	} while (! fc->stopped);
2119a9643ea8Slogwang }
2120a9643ea8Slogwang 
2121a9643ea8Slogwang static int
start_pkt_forward_on_core(void * fwd_arg)2122a9643ea8Slogwang start_pkt_forward_on_core(void *fwd_arg)
2123a9643ea8Slogwang {
2124a9643ea8Slogwang 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2125a9643ea8Slogwang 			     cur_fwd_config.fwd_eng->packet_fwd);
2126a9643ea8Slogwang 	return 0;
2127a9643ea8Slogwang }
2128a9643ea8Slogwang 
2129a9643ea8Slogwang /*
2130a9643ea8Slogwang  * Run the TXONLY packet forwarding engine to send a single burst of packets.
2131a9643ea8Slogwang  * Used to start communication flows in network loopback test configurations.
2132a9643ea8Slogwang  */
2133a9643ea8Slogwang static int
run_one_txonly_burst_on_core(void * fwd_arg)2134a9643ea8Slogwang run_one_txonly_burst_on_core(void *fwd_arg)
2135a9643ea8Slogwang {
2136a9643ea8Slogwang 	struct fwd_lcore *fwd_lc;
2137a9643ea8Slogwang 	struct fwd_lcore tmp_lcore;
2138a9643ea8Slogwang 
2139a9643ea8Slogwang 	fwd_lc = (struct fwd_lcore *) fwd_arg;
2140a9643ea8Slogwang 	tmp_lcore = *fwd_lc;
2141a9643ea8Slogwang 	tmp_lcore.stopped = 1;
2142a9643ea8Slogwang 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2143a9643ea8Slogwang 	return 0;
2144a9643ea8Slogwang }
2145a9643ea8Slogwang 
2146a9643ea8Slogwang /*
2147a9643ea8Slogwang  * Launch packet forwarding:
2148a9643ea8Slogwang  *     - Setup per-port forwarding context.
2149a9643ea8Slogwang  *     - launch logical cores with their forwarding configuration.
2150a9643ea8Slogwang  */
2151a9643ea8Slogwang static void
launch_packet_forwarding(lcore_function_t * pkt_fwd_on_lcore)2152a9643ea8Slogwang launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2153a9643ea8Slogwang {
2154a9643ea8Slogwang 	port_fwd_begin_t port_fwd_begin;
2155a9643ea8Slogwang 	unsigned int i;
2156a9643ea8Slogwang 	unsigned int lc_id;
2157a9643ea8Slogwang 	int diag;
2158a9643ea8Slogwang 
2159a9643ea8Slogwang 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2160a9643ea8Slogwang 	if (port_fwd_begin != NULL) {
2161a9643ea8Slogwang 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2162a9643ea8Slogwang 			(*port_fwd_begin)(fwd_ports_ids[i]);
2163a9643ea8Slogwang 	}
2164a9643ea8Slogwang 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2165a9643ea8Slogwang 		lc_id = fwd_lcores_cpuids[i];
2166a9643ea8Slogwang 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2167a9643ea8Slogwang 			fwd_lcores[i]->stopped = 0;
2168a9643ea8Slogwang 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2169a9643ea8Slogwang 						     fwd_lcores[i], lc_id);
2170a9643ea8Slogwang 			if (diag != 0)
2171a9643ea8Slogwang 				printf("launch lcore %u failed - diag=%d\n",
2172a9643ea8Slogwang 				       lc_id, diag);
2173a9643ea8Slogwang 		}
2174a9643ea8Slogwang 	}
2175a9643ea8Slogwang }
2176a9643ea8Slogwang 
2177a9643ea8Slogwang /*
2178a9643ea8Slogwang  * Launch packet forwarding configuration.
2179a9643ea8Slogwang  */
2180a9643ea8Slogwang void
start_packet_forwarding(int with_tx_first)2181a9643ea8Slogwang start_packet_forwarding(int with_tx_first)
2182a9643ea8Slogwang {
2183a9643ea8Slogwang 	port_fwd_begin_t port_fwd_begin;
2184a9643ea8Slogwang 	port_fwd_end_t  port_fwd_end;
2185a9643ea8Slogwang 	struct rte_port *port;
2186a9643ea8Slogwang 	unsigned int i;
2187a9643ea8Slogwang 	portid_t   pt_id;
2188a9643ea8Slogwang 
2189a9643ea8Slogwang 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2190a9643ea8Slogwang 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2191a9643ea8Slogwang 
2192a9643ea8Slogwang 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2193a9643ea8Slogwang 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2194a9643ea8Slogwang 
2195a9643ea8Slogwang 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2196a9643ea8Slogwang 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2197a9643ea8Slogwang 		(!nb_rxq || !nb_txq))
2198a9643ea8Slogwang 		rte_exit(EXIT_FAILURE,
2199a9643ea8Slogwang 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
2200a9643ea8Slogwang 			cur_fwd_eng->fwd_mode_name);
2201a9643ea8Slogwang 
2202a9643ea8Slogwang 	if (all_ports_started() == 0) {
2203a9643ea8Slogwang 		printf("Not all ports were started\n");
2204a9643ea8Slogwang 		return;
2205a9643ea8Slogwang 	}
2206a9643ea8Slogwang 	if (test_done == 0) {
2207a9643ea8Slogwang 		printf("Packet forwarding already started\n");
2208a9643ea8Slogwang 		return;
2209a9643ea8Slogwang 	}
2210a9643ea8Slogwang 
2211a9643ea8Slogwang 
2212a9643ea8Slogwang 	if(dcb_test) {
2213a9643ea8Slogwang 		for (i = 0; i < nb_fwd_ports; i++) {
2214a9643ea8Slogwang 			pt_id = fwd_ports_ids[i];
2215a9643ea8Slogwang 			port = &ports[pt_id];
2216a9643ea8Slogwang 			if (!port->dcb_flag) {
2217a9643ea8Slogwang 				printf("In DCB mode, all forwarding ports must "
2218a9643ea8Slogwang                                        "be configured in this mode.\n");
2219a9643ea8Slogwang 				return;
2220a9643ea8Slogwang 			}
2221a9643ea8Slogwang 		}
2222a9643ea8Slogwang 		if (nb_fwd_lcores == 1) {
2223a9643ea8Slogwang 			printf("In DCB mode,the nb forwarding cores "
2224a9643ea8Slogwang                                "should be larger than 1.\n");
2225a9643ea8Slogwang 			return;
2226a9643ea8Slogwang 		}
2227a9643ea8Slogwang 	}
2228a9643ea8Slogwang 	test_done = 0;
2229a9643ea8Slogwang 
2230579bf1e2Sjfb8856606 	fwd_config_setup();
2231579bf1e2Sjfb8856606 
2232a9643ea8Slogwang 	if(!no_flush_rx)
2233a9643ea8Slogwang 		flush_fwd_rx_queues();
2234a9643ea8Slogwang 
2235a9643ea8Slogwang 	pkt_fwd_config_display(&cur_fwd_config);
2236a9643ea8Slogwang 	rxtx_config_display();
2237a9643ea8Slogwang 
22384418919fSjohnjiang 	fwd_stats_reset();
2239a9643ea8Slogwang 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2240a9643ea8Slogwang 		pt_id = fwd_ports_ids[i];
2241a9643ea8Slogwang 		port = &ports[pt_id];
2242a9643ea8Slogwang 		map_port_queue_stats_mapping_registers(pt_id, port);
2243a9643ea8Slogwang 	}
2244a9643ea8Slogwang 	if (with_tx_first) {
2245a9643ea8Slogwang 		port_fwd_begin = tx_only_engine.port_fwd_begin;
2246a9643ea8Slogwang 		if (port_fwd_begin != NULL) {
2247a9643ea8Slogwang 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2248a9643ea8Slogwang 				(*port_fwd_begin)(fwd_ports_ids[i]);
2249a9643ea8Slogwang 		}
2250a9643ea8Slogwang 		while (with_tx_first--) {
2251a9643ea8Slogwang 			launch_packet_forwarding(
2252a9643ea8Slogwang 					run_one_txonly_burst_on_core);
2253a9643ea8Slogwang 			rte_eal_mp_wait_lcore();
2254a9643ea8Slogwang 		}
2255a9643ea8Slogwang 		port_fwd_end = tx_only_engine.port_fwd_end;
2256a9643ea8Slogwang 		if (port_fwd_end != NULL) {
2257a9643ea8Slogwang 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2258a9643ea8Slogwang 				(*port_fwd_end)(fwd_ports_ids[i]);
2259a9643ea8Slogwang 		}
2260a9643ea8Slogwang 	}
2261a9643ea8Slogwang 	launch_packet_forwarding(start_pkt_forward_on_core);
2262a9643ea8Slogwang }
2263a9643ea8Slogwang 
2264a9643ea8Slogwang void
stop_packet_forwarding(void)2265a9643ea8Slogwang stop_packet_forwarding(void)
2266a9643ea8Slogwang {
2267a9643ea8Slogwang 	port_fwd_end_t port_fwd_end;
2268a9643ea8Slogwang 	lcoreid_t lc_id;
22694418919fSjohnjiang 	portid_t pt_id;
22704418919fSjohnjiang 	int i;
2271a9643ea8Slogwang 
2272a9643ea8Slogwang 	if (test_done) {
2273a9643ea8Slogwang 		printf("Packet forwarding not started\n");
2274a9643ea8Slogwang 		return;
2275a9643ea8Slogwang 	}
2276a9643ea8Slogwang 	printf("Telling cores to stop...");
2277a9643ea8Slogwang 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2278a9643ea8Slogwang 		fwd_lcores[lc_id]->stopped = 1;
2279a9643ea8Slogwang 	printf("\nWaiting for lcores to finish...\n");
2280a9643ea8Slogwang 	rte_eal_mp_wait_lcore();
2281a9643ea8Slogwang 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2282a9643ea8Slogwang 	if (port_fwd_end != NULL) {
2283a9643ea8Slogwang 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2284a9643ea8Slogwang 			pt_id = fwd_ports_ids[i];
2285a9643ea8Slogwang 			(*port_fwd_end)(pt_id);
2286a9643ea8Slogwang 		}
2287a9643ea8Slogwang 	}
22881646932aSjfb8856606 
22894418919fSjohnjiang 	fwd_stats_display();
2290d30ea906Sjfb8856606 
2291a9643ea8Slogwang 	printf("\nDone.\n");
2292a9643ea8Slogwang 	test_done = 1;
2293a9643ea8Slogwang }
2294a9643ea8Slogwang 
2295a9643ea8Slogwang void
dev_set_link_up(portid_t pid)2296a9643ea8Slogwang dev_set_link_up(portid_t pid)
2297a9643ea8Slogwang {
22982bfe3f2eSlogwang 	if (rte_eth_dev_set_link_up(pid) < 0)
2299a9643ea8Slogwang 		printf("\nSet link up fail.\n");
2300a9643ea8Slogwang }
2301a9643ea8Slogwang 
2302a9643ea8Slogwang void
dev_set_link_down(portid_t pid)2303a9643ea8Slogwang dev_set_link_down(portid_t pid)
2304a9643ea8Slogwang {
23052bfe3f2eSlogwang 	if (rte_eth_dev_set_link_down(pid) < 0)
2306a9643ea8Slogwang 		printf("\nSet link down fail.\n");
2307a9643ea8Slogwang }
2308a9643ea8Slogwang 
2309a9643ea8Slogwang static int
all_ports_started(void)2310a9643ea8Slogwang all_ports_started(void)
2311a9643ea8Slogwang {
2312a9643ea8Slogwang 	portid_t pi;
2313a9643ea8Slogwang 	struct rte_port *port;
2314a9643ea8Slogwang 
23152bfe3f2eSlogwang 	RTE_ETH_FOREACH_DEV(pi) {
2316a9643ea8Slogwang 		port = &ports[pi];
2317a9643ea8Slogwang 		/* Check if there is a port which is not started */
2318a9643ea8Slogwang 		if ((port->port_status != RTE_PORT_STARTED) &&
2319a9643ea8Slogwang 			(port->slave_flag == 0))
2320a9643ea8Slogwang 			return 0;
2321a9643ea8Slogwang 	}
2322a9643ea8Slogwang 
2323a9643ea8Slogwang 	/* No port is not started */
2324a9643ea8Slogwang 	return 1;
2325a9643ea8Slogwang }
2326a9643ea8Slogwang 
2327a9643ea8Slogwang int
port_is_stopped(portid_t port_id)2328d30ea906Sjfb8856606 port_is_stopped(portid_t port_id)
2329d30ea906Sjfb8856606 {
2330d30ea906Sjfb8856606 	struct rte_port *port = &ports[port_id];
2331d30ea906Sjfb8856606 
2332d30ea906Sjfb8856606 	if ((port->port_status != RTE_PORT_STOPPED) &&
2333d30ea906Sjfb8856606 	    (port->slave_flag == 0))
2334d30ea906Sjfb8856606 		return 0;
2335d30ea906Sjfb8856606 	return 1;
2336d30ea906Sjfb8856606 }
2337d30ea906Sjfb8856606 
2338d30ea906Sjfb8856606 int
all_ports_stopped(void)2339a9643ea8Slogwang all_ports_stopped(void)
2340a9643ea8Slogwang {
2341a9643ea8Slogwang 	portid_t pi;
2342a9643ea8Slogwang 
23432bfe3f2eSlogwang 	RTE_ETH_FOREACH_DEV(pi) {
2344d30ea906Sjfb8856606 		if (!port_is_stopped(pi))
2345a9643ea8Slogwang 			return 0;
2346a9643ea8Slogwang 	}
2347a9643ea8Slogwang 
2348a9643ea8Slogwang 	return 1;
2349a9643ea8Slogwang }
2350a9643ea8Slogwang 
2351a9643ea8Slogwang int
port_is_started(portid_t port_id)2352a9643ea8Slogwang port_is_started(portid_t port_id)
2353a9643ea8Slogwang {
2354a9643ea8Slogwang 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2355a9643ea8Slogwang 		return 0;
2356a9643ea8Slogwang 
2357a9643ea8Slogwang 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2358a9643ea8Slogwang 		return 0;
2359a9643ea8Slogwang 
2360a9643ea8Slogwang 	return 1;
2361a9643ea8Slogwang }
2362a9643ea8Slogwang 
23634418919fSjohnjiang /* Configure the Rx and Tx hairpin queues for the selected port. */
23644418919fSjohnjiang static int
setup_hairpin_queues(portid_t pi,portid_t p_pi,uint16_t cnt_pi)2365*2d9fd380Sjfb8856606 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
23664418919fSjohnjiang {
23674418919fSjohnjiang 	queueid_t qi;
23684418919fSjohnjiang 	struct rte_eth_hairpin_conf hairpin_conf = {
23694418919fSjohnjiang 		.peer_count = 1,
23704418919fSjohnjiang 	};
23714418919fSjohnjiang 	int i;
23724418919fSjohnjiang 	int diag;
23734418919fSjohnjiang 	struct rte_port *port = &ports[pi];
2374*2d9fd380Sjfb8856606 	uint16_t peer_rx_port = pi;
2375*2d9fd380Sjfb8856606 	uint16_t peer_tx_port = pi;
2376*2d9fd380Sjfb8856606 	uint32_t manual = 1;
2377*2d9fd380Sjfb8856606 	uint32_t tx_exp = hairpin_mode & 0x10;
2378*2d9fd380Sjfb8856606 
2379*2d9fd380Sjfb8856606 	if (!(hairpin_mode & 0xf)) {
2380*2d9fd380Sjfb8856606 		peer_rx_port = pi;
2381*2d9fd380Sjfb8856606 		peer_tx_port = pi;
2382*2d9fd380Sjfb8856606 		manual = 0;
2383*2d9fd380Sjfb8856606 	} else if (hairpin_mode & 0x1) {
2384*2d9fd380Sjfb8856606 		peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2385*2d9fd380Sjfb8856606 						       RTE_ETH_DEV_NO_OWNER);
2386*2d9fd380Sjfb8856606 		if (peer_tx_port >= RTE_MAX_ETHPORTS)
2387*2d9fd380Sjfb8856606 			peer_tx_port = rte_eth_find_next_owned_by(0,
2388*2d9fd380Sjfb8856606 						RTE_ETH_DEV_NO_OWNER);
2389*2d9fd380Sjfb8856606 		if (p_pi != RTE_MAX_ETHPORTS) {
2390*2d9fd380Sjfb8856606 			peer_rx_port = p_pi;
2391*2d9fd380Sjfb8856606 		} else {
2392*2d9fd380Sjfb8856606 			uint16_t next_pi;
2393*2d9fd380Sjfb8856606 
2394*2d9fd380Sjfb8856606 			/* Last port will be the peer RX port of the first. */
2395*2d9fd380Sjfb8856606 			RTE_ETH_FOREACH_DEV(next_pi)
2396*2d9fd380Sjfb8856606 				peer_rx_port = next_pi;
2397*2d9fd380Sjfb8856606 		}
2398*2d9fd380Sjfb8856606 		manual = 1;
2399*2d9fd380Sjfb8856606 	} else if (hairpin_mode & 0x2) {
2400*2d9fd380Sjfb8856606 		if (cnt_pi & 0x1) {
2401*2d9fd380Sjfb8856606 			peer_rx_port = p_pi;
2402*2d9fd380Sjfb8856606 		} else {
2403*2d9fd380Sjfb8856606 			peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2404*2d9fd380Sjfb8856606 						RTE_ETH_DEV_NO_OWNER);
2405*2d9fd380Sjfb8856606 			if (peer_rx_port >= RTE_MAX_ETHPORTS)
2406*2d9fd380Sjfb8856606 				peer_rx_port = pi;
2407*2d9fd380Sjfb8856606 		}
2408*2d9fd380Sjfb8856606 		peer_tx_port = peer_rx_port;
2409*2d9fd380Sjfb8856606 		manual = 1;
2410*2d9fd380Sjfb8856606 	}
24114418919fSjohnjiang 
24124418919fSjohnjiang 	for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2413*2d9fd380Sjfb8856606 		hairpin_conf.peers[0].port = peer_rx_port;
24144418919fSjohnjiang 		hairpin_conf.peers[0].queue = i + nb_rxq;
2415*2d9fd380Sjfb8856606 		hairpin_conf.manual_bind = !!manual;
2416*2d9fd380Sjfb8856606 		hairpin_conf.tx_explicit = !!tx_exp;
24174418919fSjohnjiang 		diag = rte_eth_tx_hairpin_queue_setup
24184418919fSjohnjiang 			(pi, qi, nb_txd, &hairpin_conf);
24194418919fSjohnjiang 		i++;
24204418919fSjohnjiang 		if (diag == 0)
24214418919fSjohnjiang 			continue;
24224418919fSjohnjiang 
24234418919fSjohnjiang 		/* Fail to setup rx queue, return */
24244418919fSjohnjiang 		if (rte_atomic16_cmpset(&(port->port_status),
24254418919fSjohnjiang 					RTE_PORT_HANDLING,
24264418919fSjohnjiang 					RTE_PORT_STOPPED) == 0)
24274418919fSjohnjiang 			printf("Port %d can not be set back "
24284418919fSjohnjiang 					"to stopped\n", pi);
24294418919fSjohnjiang 		printf("Fail to configure port %d hairpin "
24304418919fSjohnjiang 				"queues\n", pi);
24314418919fSjohnjiang 		/* try to reconfigure queues next time */
24324418919fSjohnjiang 		port->need_reconfig_queues = 1;
24334418919fSjohnjiang 		return -1;
24344418919fSjohnjiang 	}
24354418919fSjohnjiang 	for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2436*2d9fd380Sjfb8856606 		hairpin_conf.peers[0].port = peer_tx_port;
24374418919fSjohnjiang 		hairpin_conf.peers[0].queue = i + nb_txq;
2438*2d9fd380Sjfb8856606 		hairpin_conf.manual_bind = !!manual;
2439*2d9fd380Sjfb8856606 		hairpin_conf.tx_explicit = !!tx_exp;
24404418919fSjohnjiang 		diag = rte_eth_rx_hairpin_queue_setup
24414418919fSjohnjiang 			(pi, qi, nb_rxd, &hairpin_conf);
24424418919fSjohnjiang 		i++;
24434418919fSjohnjiang 		if (diag == 0)
24444418919fSjohnjiang 			continue;
24454418919fSjohnjiang 
24464418919fSjohnjiang 		/* Fail to setup rx queue, return */
24474418919fSjohnjiang 		if (rte_atomic16_cmpset(&(port->port_status),
24484418919fSjohnjiang 					RTE_PORT_HANDLING,
24494418919fSjohnjiang 					RTE_PORT_STOPPED) == 0)
24504418919fSjohnjiang 			printf("Port %d can not be set back "
24514418919fSjohnjiang 					"to stopped\n", pi);
24524418919fSjohnjiang 		printf("Fail to configure port %d hairpin "
24534418919fSjohnjiang 				"queues\n", pi);
24544418919fSjohnjiang 		/* try to reconfigure queues next time */
24554418919fSjohnjiang 		port->need_reconfig_queues = 1;
24564418919fSjohnjiang 		return -1;
24574418919fSjohnjiang 	}
24584418919fSjohnjiang 	return 0;
24594418919fSjohnjiang }
24604418919fSjohnjiang 
2461*2d9fd380Sjfb8856606 /* Configure the Rx with optional split. */
2462*2d9fd380Sjfb8856606 int
rx_queue_setup(uint16_t port_id,uint16_t rx_queue_id,uint16_t nb_rx_desc,unsigned int socket_id,struct rte_eth_rxconf * rx_conf,struct rte_mempool * mp)2463*2d9fd380Sjfb8856606 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2464*2d9fd380Sjfb8856606 	       uint16_t nb_rx_desc, unsigned int socket_id,
2465*2d9fd380Sjfb8856606 	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2466*2d9fd380Sjfb8856606 {
2467*2d9fd380Sjfb8856606 	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2468*2d9fd380Sjfb8856606 	unsigned int i, mp_n;
2469*2d9fd380Sjfb8856606 	int ret;
2470*2d9fd380Sjfb8856606 
2471*2d9fd380Sjfb8856606 	if (rx_pkt_nb_segs <= 1 ||
2472*2d9fd380Sjfb8856606 	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2473*2d9fd380Sjfb8856606 		rx_conf->rx_seg = NULL;
2474*2d9fd380Sjfb8856606 		rx_conf->rx_nseg = 0;
2475*2d9fd380Sjfb8856606 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2476*2d9fd380Sjfb8856606 					     nb_rx_desc, socket_id,
2477*2d9fd380Sjfb8856606 					     rx_conf, mp);
2478*2d9fd380Sjfb8856606 		return ret;
2479*2d9fd380Sjfb8856606 	}
2480*2d9fd380Sjfb8856606 	for (i = 0; i < rx_pkt_nb_segs; i++) {
2481*2d9fd380Sjfb8856606 		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2482*2d9fd380Sjfb8856606 		struct rte_mempool *mpx;
2483*2d9fd380Sjfb8856606 		/*
2484*2d9fd380Sjfb8856606 		 * Use last valid pool for the segments with number
2485*2d9fd380Sjfb8856606 		 * exceeding the pool index.
2486*2d9fd380Sjfb8856606 		 */
2487*2d9fd380Sjfb8856606 		mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2488*2d9fd380Sjfb8856606 		mpx = mbuf_pool_find(socket_id, mp_n);
2489*2d9fd380Sjfb8856606 		/* Handle zero as mbuf data buffer size. */
2490*2d9fd380Sjfb8856606 		rx_seg->length = rx_pkt_seg_lengths[i] ?
2491*2d9fd380Sjfb8856606 				   rx_pkt_seg_lengths[i] :
2492*2d9fd380Sjfb8856606 				   mbuf_data_size[mp_n];
2493*2d9fd380Sjfb8856606 		rx_seg->offset = i < rx_pkt_nb_offs ?
2494*2d9fd380Sjfb8856606 				   rx_pkt_seg_offsets[i] : 0;
2495*2d9fd380Sjfb8856606 		rx_seg->mp = mpx ? mpx : mp;
2496*2d9fd380Sjfb8856606 	}
2497*2d9fd380Sjfb8856606 	rx_conf->rx_nseg = rx_pkt_nb_segs;
2498*2d9fd380Sjfb8856606 	rx_conf->rx_seg = rx_useg;
2499*2d9fd380Sjfb8856606 	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2500*2d9fd380Sjfb8856606 				    socket_id, rx_conf, NULL);
2501*2d9fd380Sjfb8856606 	rx_conf->rx_seg = NULL;
2502*2d9fd380Sjfb8856606 	rx_conf->rx_nseg = 0;
2503*2d9fd380Sjfb8856606 	return ret;
2504*2d9fd380Sjfb8856606 }
2505*2d9fd380Sjfb8856606 
2506a9643ea8Slogwang int
start_port(portid_t pid)2507a9643ea8Slogwang start_port(portid_t pid)
2508a9643ea8Slogwang {
2509a9643ea8Slogwang 	int diag, need_check_link_status = -1;
2510a9643ea8Slogwang 	portid_t pi;
2511*2d9fd380Sjfb8856606 	portid_t p_pi = RTE_MAX_ETHPORTS;
2512*2d9fd380Sjfb8856606 	portid_t pl[RTE_MAX_ETHPORTS];
2513*2d9fd380Sjfb8856606 	portid_t peer_pl[RTE_MAX_ETHPORTS];
2514*2d9fd380Sjfb8856606 	uint16_t cnt_pi = 0;
2515*2d9fd380Sjfb8856606 	uint16_t cfg_pi = 0;
2516*2d9fd380Sjfb8856606 	int peer_pi;
2517a9643ea8Slogwang 	queueid_t qi;
2518a9643ea8Slogwang 	struct rte_port *port;
25194418919fSjohnjiang 	struct rte_ether_addr mac_addr;
25204418919fSjohnjiang 	struct rte_eth_hairpin_cap cap;
2521a9643ea8Slogwang 
2522a9643ea8Slogwang 	if (port_id_is_invalid(pid, ENABLED_WARN))
2523a9643ea8Slogwang 		return 0;
2524a9643ea8Slogwang 
2525a9643ea8Slogwang 	if(dcb_config)
2526a9643ea8Slogwang 		dcb_test = 1;
25272bfe3f2eSlogwang 	RTE_ETH_FOREACH_DEV(pi) {
2528a9643ea8Slogwang 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2529a9643ea8Slogwang 			continue;
2530a9643ea8Slogwang 
2531a9643ea8Slogwang 		need_check_link_status = 0;
2532a9643ea8Slogwang 		port = &ports[pi];
2533a9643ea8Slogwang 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2534a9643ea8Slogwang 						 RTE_PORT_HANDLING) == 0) {
2535a9643ea8Slogwang 			printf("Port %d is now not stopped\n", pi);
2536a9643ea8Slogwang 			continue;
2537a9643ea8Slogwang 		}
2538a9643ea8Slogwang 
2539a9643ea8Slogwang 		if (port->need_reconfig > 0) {
2540a9643ea8Slogwang 			port->need_reconfig = 0;
2541a9643ea8Slogwang 
25422bfe3f2eSlogwang 			if (flow_isolate_all) {
25432bfe3f2eSlogwang 				int ret = port_flow_isolate(pi, 1);
25442bfe3f2eSlogwang 				if (ret) {
25452bfe3f2eSlogwang 					printf("Failed to apply isolated"
25462bfe3f2eSlogwang 					       " mode on port %d\n", pi);
25472bfe3f2eSlogwang 					return -1;
25482bfe3f2eSlogwang 				}
25492bfe3f2eSlogwang 			}
2550d30ea906Sjfb8856606 			configure_rxtx_dump_callbacks(0);
2551a9643ea8Slogwang 			printf("Configuring Port %d (socket %u)\n", pi,
2552a9643ea8Slogwang 					port->socket_id);
25534418919fSjohnjiang 			if (nb_hairpinq > 0 &&
25544418919fSjohnjiang 			    rte_eth_dev_hairpin_capability_get(pi, &cap)) {
25554418919fSjohnjiang 				printf("Port %d doesn't support hairpin "
25564418919fSjohnjiang 				       "queues\n", pi);
25574418919fSjohnjiang 				return -1;
25584418919fSjohnjiang 			}
2559a9643ea8Slogwang 			/* configure port */
25604418919fSjohnjiang 			diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
25614418919fSjohnjiang 						     nb_txq + nb_hairpinq,
2562a9643ea8Slogwang 						     &(port->dev_conf));
2563a9643ea8Slogwang 			if (diag != 0) {
2564a9643ea8Slogwang 				if (rte_atomic16_cmpset(&(port->port_status),
2565a9643ea8Slogwang 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2566a9643ea8Slogwang 					printf("Port %d can not be set back "
2567a9643ea8Slogwang 							"to stopped\n", pi);
2568a9643ea8Slogwang 				printf("Fail to configure port %d\n", pi);
2569a9643ea8Slogwang 				/* try to reconfigure port next time */
2570a9643ea8Slogwang 				port->need_reconfig = 1;
2571a9643ea8Slogwang 				return -1;
2572a9643ea8Slogwang 			}
2573a9643ea8Slogwang 		}
2574a9643ea8Slogwang 		if (port->need_reconfig_queues > 0) {
2575a9643ea8Slogwang 			port->need_reconfig_queues = 0;
2576a9643ea8Slogwang 			/* setup tx queues */
2577a9643ea8Slogwang 			for (qi = 0; qi < nb_txq; qi++) {
2578a9643ea8Slogwang 				if ((numa_support) &&
2579a9643ea8Slogwang 					(txring_numa[pi] != NUMA_NO_CONFIG))
2580a9643ea8Slogwang 					diag = rte_eth_tx_queue_setup(pi, qi,
2581d30ea906Sjfb8856606 						port->nb_tx_desc[qi],
2582d30ea906Sjfb8856606 						txring_numa[pi],
2583d30ea906Sjfb8856606 						&(port->tx_conf[qi]));
2584a9643ea8Slogwang 				else
2585a9643ea8Slogwang 					diag = rte_eth_tx_queue_setup(pi, qi,
2586d30ea906Sjfb8856606 						port->nb_tx_desc[qi],
2587d30ea906Sjfb8856606 						port->socket_id,
2588d30ea906Sjfb8856606 						&(port->tx_conf[qi]));
2589a9643ea8Slogwang 
2590a9643ea8Slogwang 				if (diag == 0)
2591a9643ea8Slogwang 					continue;
2592a9643ea8Slogwang 
2593a9643ea8Slogwang 				/* Fail to setup tx queue, return */
2594a9643ea8Slogwang 				if (rte_atomic16_cmpset(&(port->port_status),
2595a9643ea8Slogwang 							RTE_PORT_HANDLING,
2596a9643ea8Slogwang 							RTE_PORT_STOPPED) == 0)
2597a9643ea8Slogwang 					printf("Port %d can not be set back "
2598a9643ea8Slogwang 							"to stopped\n", pi);
2599d30ea906Sjfb8856606 				printf("Fail to configure port %d tx queues\n",
2600d30ea906Sjfb8856606 				       pi);
2601a9643ea8Slogwang 				/* try to reconfigure queues next time */
2602a9643ea8Slogwang 				port->need_reconfig_queues = 1;
2603a9643ea8Slogwang 				return -1;
2604a9643ea8Slogwang 			}
26055af785ecSfengbojiang(姜凤波) 			for (qi = 0; qi < nb_rxq; qi++) {
2606d30ea906Sjfb8856606 				/* setup rx queues */
2607a9643ea8Slogwang 				if ((numa_support) &&
2608a9643ea8Slogwang 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2609a9643ea8Slogwang 					struct rte_mempool * mp =
2610*2d9fd380Sjfb8856606 						mbuf_pool_find
2611*2d9fd380Sjfb8856606 							(rxring_numa[pi], 0);
2612a9643ea8Slogwang 					if (mp == NULL) {
2613a9643ea8Slogwang 						printf("Failed to setup RX queue:"
2614a9643ea8Slogwang 							"No mempool allocation"
2615a9643ea8Slogwang 							" on the socket %d\n",
2616a9643ea8Slogwang 							rxring_numa[pi]);
2617a9643ea8Slogwang 						return -1;
2618a9643ea8Slogwang 					}
2619a9643ea8Slogwang 
2620*2d9fd380Sjfb8856606 					diag = rx_queue_setup(pi, qi,
2621d30ea906Sjfb8856606 					     port->nb_rx_desc[qi],
2622d30ea906Sjfb8856606 					     rxring_numa[pi],
2623d30ea906Sjfb8856606 					     &(port->rx_conf[qi]),
2624d30ea906Sjfb8856606 					     mp);
2625a9643ea8Slogwang 				} else {
2626a9643ea8Slogwang 					struct rte_mempool *mp =
2627*2d9fd380Sjfb8856606 						mbuf_pool_find
2628*2d9fd380Sjfb8856606 							(port->socket_id, 0);
2629a9643ea8Slogwang 					if (mp == NULL) {
2630a9643ea8Slogwang 						printf("Failed to setup RX queue:"
2631a9643ea8Slogwang 							"No mempool allocation"
2632a9643ea8Slogwang 							" on the socket %d\n",
2633a9643ea8Slogwang 							port->socket_id);
2634a9643ea8Slogwang 						return -1;
2635a9643ea8Slogwang 					}
2636*2d9fd380Sjfb8856606 					diag = rx_queue_setup(pi, qi,
2637d30ea906Sjfb8856606 					     port->nb_rx_desc[qi],
2638d30ea906Sjfb8856606 					     port->socket_id,
2639d30ea906Sjfb8856606 					     &(port->rx_conf[qi]),
2640d30ea906Sjfb8856606 					     mp);
2641a9643ea8Slogwang 				}
2642a9643ea8Slogwang 				if (diag == 0)
2643a9643ea8Slogwang 					continue;
2644a9643ea8Slogwang 
2645a9643ea8Slogwang 				/* Fail to setup rx queue, return */
2646a9643ea8Slogwang 				if (rte_atomic16_cmpset(&(port->port_status),
2647a9643ea8Slogwang 							RTE_PORT_HANDLING,
2648a9643ea8Slogwang 							RTE_PORT_STOPPED) == 0)
2649a9643ea8Slogwang 					printf("Port %d can not be set back "
2650a9643ea8Slogwang 							"to stopped\n", pi);
2651d30ea906Sjfb8856606 				printf("Fail to configure port %d rx queues\n",
2652d30ea906Sjfb8856606 				       pi);
2653a9643ea8Slogwang 				/* try to reconfigure queues next time */
2654a9643ea8Slogwang 				port->need_reconfig_queues = 1;
2655a9643ea8Slogwang 				return -1;
2656a9643ea8Slogwang 			}
26574418919fSjohnjiang 			/* setup hairpin queues */
2658*2d9fd380Sjfb8856606 			if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
26594418919fSjohnjiang 				return -1;
2660a9643ea8Slogwang 		}
2661d30ea906Sjfb8856606 		configure_rxtx_dump_callbacks(verbose_level);
26624418919fSjohnjiang 		if (clear_ptypes) {
26634418919fSjohnjiang 			diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
26644418919fSjohnjiang 					NULL, 0);
26654418919fSjohnjiang 			if (diag < 0)
26664418919fSjohnjiang 				printf(
26674418919fSjohnjiang 				"Port %d: Failed to disable Ptype parsing\n",
26684418919fSjohnjiang 				pi);
26694418919fSjohnjiang 		}
26704418919fSjohnjiang 
2671*2d9fd380Sjfb8856606 		p_pi = pi;
2672*2d9fd380Sjfb8856606 		cnt_pi++;
2673*2d9fd380Sjfb8856606 
2674a9643ea8Slogwang 		/* start port */
2675a9643ea8Slogwang 		if (rte_eth_dev_start(pi) < 0) {
2676a9643ea8Slogwang 			printf("Fail to start port %d\n", pi);
2677a9643ea8Slogwang 
2678a9643ea8Slogwang 			/* Fail to setup rx queue, return */
2679a9643ea8Slogwang 			if (rte_atomic16_cmpset(&(port->port_status),
2680a9643ea8Slogwang 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2681a9643ea8Slogwang 				printf("Port %d can not be set back to "
2682a9643ea8Slogwang 							"stopped\n", pi);
2683a9643ea8Slogwang 			continue;
2684a9643ea8Slogwang 		}
2685a9643ea8Slogwang 
2686a9643ea8Slogwang 		if (rte_atomic16_cmpset(&(port->port_status),
2687a9643ea8Slogwang 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2688a9643ea8Slogwang 			printf("Port %d can not be set into started\n", pi);
2689a9643ea8Slogwang 
26904418919fSjohnjiang 		if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2691a9643ea8Slogwang 			printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2692a9643ea8Slogwang 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2693a9643ea8Slogwang 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2694a9643ea8Slogwang 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2695a9643ea8Slogwang 
2696a9643ea8Slogwang 		/* at least one port started, need checking link status */
2697a9643ea8Slogwang 		need_check_link_status = 1;
2698*2d9fd380Sjfb8856606 
2699*2d9fd380Sjfb8856606 		pl[cfg_pi++] = pi;
2700a9643ea8Slogwang 	}
2701a9643ea8Slogwang 
2702a9643ea8Slogwang 	if (need_check_link_status == 1 && !no_link_check)
2703a9643ea8Slogwang 		check_all_ports_link_status(RTE_PORT_ALL);
2704a9643ea8Slogwang 	else if (need_check_link_status == 0)
2705a9643ea8Slogwang 		printf("Please stop the ports first\n");
2706a9643ea8Slogwang 
2707*2d9fd380Sjfb8856606 	if (hairpin_mode & 0xf) {
2708*2d9fd380Sjfb8856606 		uint16_t i;
2709*2d9fd380Sjfb8856606 		int j;
2710*2d9fd380Sjfb8856606 
2711*2d9fd380Sjfb8856606 		/* bind all started hairpin ports */
2712*2d9fd380Sjfb8856606 		for (i = 0; i < cfg_pi; i++) {
2713*2d9fd380Sjfb8856606 			pi = pl[i];
2714*2d9fd380Sjfb8856606 			/* bind current Tx to all peer Rx */
2715*2d9fd380Sjfb8856606 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2716*2d9fd380Sjfb8856606 							RTE_MAX_ETHPORTS, 1);
2717*2d9fd380Sjfb8856606 			if (peer_pi < 0)
2718*2d9fd380Sjfb8856606 				return peer_pi;
2719*2d9fd380Sjfb8856606 			for (j = 0; j < peer_pi; j++) {
2720*2d9fd380Sjfb8856606 				if (!port_is_started(peer_pl[j]))
2721*2d9fd380Sjfb8856606 					continue;
2722*2d9fd380Sjfb8856606 				diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
2723*2d9fd380Sjfb8856606 				if (diag < 0) {
2724*2d9fd380Sjfb8856606 					printf("Error during binding hairpin"
2725*2d9fd380Sjfb8856606 					       " Tx port %u to %u: %s\n",
2726*2d9fd380Sjfb8856606 					       pi, peer_pl[j],
2727*2d9fd380Sjfb8856606 					       rte_strerror(-diag));
2728*2d9fd380Sjfb8856606 					return -1;
2729*2d9fd380Sjfb8856606 				}
2730*2d9fd380Sjfb8856606 			}
2731*2d9fd380Sjfb8856606 			/* bind all peer Tx to current Rx */
2732*2d9fd380Sjfb8856606 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2733*2d9fd380Sjfb8856606 							RTE_MAX_ETHPORTS, 0);
2734*2d9fd380Sjfb8856606 			if (peer_pi < 0)
2735*2d9fd380Sjfb8856606 				return peer_pi;
2736*2d9fd380Sjfb8856606 			for (j = 0; j < peer_pi; j++) {
2737*2d9fd380Sjfb8856606 				if (!port_is_started(peer_pl[j]))
2738*2d9fd380Sjfb8856606 					continue;
2739*2d9fd380Sjfb8856606 				diag = rte_eth_hairpin_bind(peer_pl[j], pi);
2740*2d9fd380Sjfb8856606 				if (diag < 0) {
2741*2d9fd380Sjfb8856606 					printf("Error during binding hairpin"
2742*2d9fd380Sjfb8856606 					       " Tx port %u to %u: %s\n",
2743*2d9fd380Sjfb8856606 					       peer_pl[j], pi,
2744*2d9fd380Sjfb8856606 					       rte_strerror(-diag));
2745*2d9fd380Sjfb8856606 					return -1;
2746*2d9fd380Sjfb8856606 				}
2747*2d9fd380Sjfb8856606 			}
2748*2d9fd380Sjfb8856606 		}
2749*2d9fd380Sjfb8856606 	}
2750*2d9fd380Sjfb8856606 
2751a9643ea8Slogwang 	printf("Done\n");
2752a9643ea8Slogwang 	return 0;
2753a9643ea8Slogwang }
2754a9643ea8Slogwang 
2755a9643ea8Slogwang void
stop_port(portid_t pid)2756a9643ea8Slogwang stop_port(portid_t pid)
2757a9643ea8Slogwang {
2758a9643ea8Slogwang 	portid_t pi;
2759a9643ea8Slogwang 	struct rte_port *port;
2760a9643ea8Slogwang 	int need_check_link_status = 0;
2761*2d9fd380Sjfb8856606 	portid_t peer_pl[RTE_MAX_ETHPORTS];
2762*2d9fd380Sjfb8856606 	int peer_pi;
2763a9643ea8Slogwang 
2764a9643ea8Slogwang 	if (dcb_test) {
2765a9643ea8Slogwang 		dcb_test = 0;
2766a9643ea8Slogwang 		dcb_config = 0;
2767a9643ea8Slogwang 	}
2768a9643ea8Slogwang 
2769a9643ea8Slogwang 	if (port_id_is_invalid(pid, ENABLED_WARN))
2770a9643ea8Slogwang 		return;
2771a9643ea8Slogwang 
2772a9643ea8Slogwang 	printf("Stopping ports...\n");
2773a9643ea8Slogwang 
27742bfe3f2eSlogwang 	RTE_ETH_FOREACH_DEV(pi) {
2775a9643ea8Slogwang 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2776a9643ea8Slogwang 			continue;
2777a9643ea8Slogwang 
2778a9643ea8Slogwang 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2779a9643ea8Slogwang 			printf("Please remove port %d from forwarding configuration.\n", pi);
2780a9643ea8Slogwang 			continue;
2781a9643ea8Slogwang 		}
2782a9643ea8Slogwang 
2783a9643ea8Slogwang 		if (port_is_bonding_slave(pi)) {
2784a9643ea8Slogwang 			printf("Please remove port %d from bonded device.\n", pi);
2785a9643ea8Slogwang 			continue;
2786a9643ea8Slogwang 		}
2787a9643ea8Slogwang 
2788a9643ea8Slogwang 		port = &ports[pi];
2789a9643ea8Slogwang 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2790a9643ea8Slogwang 						RTE_PORT_HANDLING) == 0)
2791a9643ea8Slogwang 			continue;
2792a9643ea8Slogwang 
2793*2d9fd380Sjfb8856606 		if (hairpin_mode & 0xf) {
2794*2d9fd380Sjfb8856606 			int j;
2795*2d9fd380Sjfb8856606 
2796*2d9fd380Sjfb8856606 			rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
2797*2d9fd380Sjfb8856606 			/* unbind all peer Tx from current Rx */
2798*2d9fd380Sjfb8856606 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2799*2d9fd380Sjfb8856606 							RTE_MAX_ETHPORTS, 0);
2800*2d9fd380Sjfb8856606 			if (peer_pi < 0)
2801*2d9fd380Sjfb8856606 				continue;
2802*2d9fd380Sjfb8856606 			for (j = 0; j < peer_pi; j++) {
2803*2d9fd380Sjfb8856606 				if (!port_is_started(peer_pl[j]))
2804*2d9fd380Sjfb8856606 					continue;
2805*2d9fd380Sjfb8856606 				rte_eth_hairpin_unbind(peer_pl[j], pi);
2806*2d9fd380Sjfb8856606 			}
2807*2d9fd380Sjfb8856606 		}
2808*2d9fd380Sjfb8856606 
2809*2d9fd380Sjfb8856606 		if (rte_eth_dev_stop(pi) != 0)
2810*2d9fd380Sjfb8856606 			RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
2811*2d9fd380Sjfb8856606 				pi);
2812a9643ea8Slogwang 
2813a9643ea8Slogwang 		if (rte_atomic16_cmpset(&(port->port_status),
2814a9643ea8Slogwang 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2815a9643ea8Slogwang 			printf("Port %d can not be set into stopped\n", pi);
2816a9643ea8Slogwang 		need_check_link_status = 1;
2817a9643ea8Slogwang 	}
2818a9643ea8Slogwang 	if (need_check_link_status && !no_link_check)
2819a9643ea8Slogwang 		check_all_ports_link_status(RTE_PORT_ALL);
2820a9643ea8Slogwang 
2821a9643ea8Slogwang 	printf("Done\n");
2822a9643ea8Slogwang }
2823a9643ea8Slogwang 
2824d30ea906Sjfb8856606 static void
remove_invalid_ports_in(portid_t * array,portid_t * total)2825d30ea906Sjfb8856606 remove_invalid_ports_in(portid_t *array, portid_t *total)
2826d30ea906Sjfb8856606 {
2827d30ea906Sjfb8856606 	portid_t i;
2828d30ea906Sjfb8856606 	portid_t new_total = 0;
2829d30ea906Sjfb8856606 
2830d30ea906Sjfb8856606 	for (i = 0; i < *total; i++)
2831d30ea906Sjfb8856606 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2832d30ea906Sjfb8856606 			array[new_total] = array[i];
2833d30ea906Sjfb8856606 			new_total++;
2834d30ea906Sjfb8856606 		}
2835d30ea906Sjfb8856606 	*total = new_total;
2836d30ea906Sjfb8856606 }
2837d30ea906Sjfb8856606 
2838d30ea906Sjfb8856606 static void
remove_invalid_ports(void)2839d30ea906Sjfb8856606 remove_invalid_ports(void)
2840d30ea906Sjfb8856606 {
2841d30ea906Sjfb8856606 	remove_invalid_ports_in(ports_ids, &nb_ports);
2842d30ea906Sjfb8856606 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2843d30ea906Sjfb8856606 	nb_cfg_ports = nb_fwd_ports;
2844d30ea906Sjfb8856606 }
2845d30ea906Sjfb8856606 
2846a9643ea8Slogwang void
close_port(portid_t pid)2847a9643ea8Slogwang close_port(portid_t pid)
2848a9643ea8Slogwang {
2849a9643ea8Slogwang 	portid_t pi;
2850a9643ea8Slogwang 	struct rte_port *port;
2851a9643ea8Slogwang 
2852a9643ea8Slogwang 	if (port_id_is_invalid(pid, ENABLED_WARN))
2853a9643ea8Slogwang 		return;
2854a9643ea8Slogwang 
2855a9643ea8Slogwang 	printf("Closing ports...\n");
2856a9643ea8Slogwang 
28572bfe3f2eSlogwang 	RTE_ETH_FOREACH_DEV(pi) {
2858a9643ea8Slogwang 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2859a9643ea8Slogwang 			continue;
2860a9643ea8Slogwang 
2861a9643ea8Slogwang 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2862a9643ea8Slogwang 			printf("Please remove port %d from forwarding configuration.\n", pi);
2863a9643ea8Slogwang 			continue;
2864a9643ea8Slogwang 		}
2865a9643ea8Slogwang 
2866a9643ea8Slogwang 		if (port_is_bonding_slave(pi)) {
2867a9643ea8Slogwang 			printf("Please remove port %d from bonded device.\n", pi);
2868a9643ea8Slogwang 			continue;
2869a9643ea8Slogwang 		}
2870a9643ea8Slogwang 
2871a9643ea8Slogwang 		port = &ports[pi];
2872a9643ea8Slogwang 		if (rte_atomic16_cmpset(&(port->port_status),
2873a9643ea8Slogwang 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2874a9643ea8Slogwang 			printf("Port %d is already closed\n", pi);
2875a9643ea8Slogwang 			continue;
2876a9643ea8Slogwang 		}
2877a9643ea8Slogwang 
28782bfe3f2eSlogwang 		port_flow_flush(pi);
2879a9643ea8Slogwang 		rte_eth_dev_close(pi);
2880a9643ea8Slogwang 	}
2881a9643ea8Slogwang 
2882*2d9fd380Sjfb8856606 	remove_invalid_ports();
2883a9643ea8Slogwang 	printf("Done\n");
2884a9643ea8Slogwang }
2885a9643ea8Slogwang 
2886a9643ea8Slogwang void
reset_port(portid_t pid)28872bfe3f2eSlogwang reset_port(portid_t pid)
28882bfe3f2eSlogwang {
28892bfe3f2eSlogwang 	int diag;
28902bfe3f2eSlogwang 	portid_t pi;
28912bfe3f2eSlogwang 	struct rte_port *port;
28922bfe3f2eSlogwang 
28932bfe3f2eSlogwang 	if (port_id_is_invalid(pid, ENABLED_WARN))
28942bfe3f2eSlogwang 		return;
28952bfe3f2eSlogwang 
28964418919fSjohnjiang 	if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
28974418919fSjohnjiang 		(pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
28984418919fSjohnjiang 		printf("Can not reset port(s), please stop port(s) first.\n");
28994418919fSjohnjiang 		return;
29004418919fSjohnjiang 	}
29014418919fSjohnjiang 
29022bfe3f2eSlogwang 	printf("Resetting ports...\n");
29032bfe3f2eSlogwang 
29042bfe3f2eSlogwang 	RTE_ETH_FOREACH_DEV(pi) {
29052bfe3f2eSlogwang 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
29062bfe3f2eSlogwang 			continue;
29072bfe3f2eSlogwang 
29082bfe3f2eSlogwang 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
29092bfe3f2eSlogwang 			printf("Please remove port %d from forwarding "
29102bfe3f2eSlogwang 			       "configuration.\n", pi);
29112bfe3f2eSlogwang 			continue;
29122bfe3f2eSlogwang 		}
29132bfe3f2eSlogwang 
29142bfe3f2eSlogwang 		if (port_is_bonding_slave(pi)) {
29152bfe3f2eSlogwang 			printf("Please remove port %d from bonded device.\n",
29162bfe3f2eSlogwang 			       pi);
29172bfe3f2eSlogwang 			continue;
29182bfe3f2eSlogwang 		}
29192bfe3f2eSlogwang 
29202bfe3f2eSlogwang 		diag = rte_eth_dev_reset(pi);
29212bfe3f2eSlogwang 		if (diag == 0) {
29222bfe3f2eSlogwang 			port = &ports[pi];
29232bfe3f2eSlogwang 			port->need_reconfig = 1;
29242bfe3f2eSlogwang 			port->need_reconfig_queues = 1;
29252bfe3f2eSlogwang 		} else {
29262bfe3f2eSlogwang 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
29272bfe3f2eSlogwang 		}
29282bfe3f2eSlogwang 	}
29292bfe3f2eSlogwang 
29302bfe3f2eSlogwang 	printf("Done\n");
29312bfe3f2eSlogwang }
29322bfe3f2eSlogwang 
29332bfe3f2eSlogwang void
attach_port(char * identifier)2934a9643ea8Slogwang attach_port(char *identifier)
2935a9643ea8Slogwang {
2936d30ea906Sjfb8856606 	portid_t pi;
2937d30ea906Sjfb8856606 	struct rte_dev_iterator iterator;
2938a9643ea8Slogwang 
2939a9643ea8Slogwang 	printf("Attaching a new port...\n");
2940a9643ea8Slogwang 
2941a9643ea8Slogwang 	if (identifier == NULL) {
2942a9643ea8Slogwang 		printf("Invalid parameters are specified\n");
2943a9643ea8Slogwang 		return;
2944a9643ea8Slogwang 	}
2945a9643ea8Slogwang 
29464b05018fSfengbojiang 	if (rte_dev_probe(identifier) < 0) {
2947d30ea906Sjfb8856606 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2948a9643ea8Slogwang 		return;
2949d30ea906Sjfb8856606 	}
2950d30ea906Sjfb8856606 
2951d30ea906Sjfb8856606 	/* first attach mode: event */
2952d30ea906Sjfb8856606 	if (setup_on_probe_event) {
2953d30ea906Sjfb8856606 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
2954d30ea906Sjfb8856606 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2955d30ea906Sjfb8856606 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
2956d30ea906Sjfb8856606 					ports[pi].need_setup != 0)
2957d30ea906Sjfb8856606 				setup_attached_port(pi);
2958d30ea906Sjfb8856606 		return;
2959d30ea906Sjfb8856606 	}
2960d30ea906Sjfb8856606 
2961d30ea906Sjfb8856606 	/* second attach mode: iterator */
2962d30ea906Sjfb8856606 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2963d30ea906Sjfb8856606 		/* setup ports matching the devargs used for probing */
2964d30ea906Sjfb8856606 		if (port_is_forwarding(pi))
2965d30ea906Sjfb8856606 			continue; /* port was already attached before */
2966d30ea906Sjfb8856606 		setup_attached_port(pi);
2967d30ea906Sjfb8856606 	}
2968d30ea906Sjfb8856606 }
2969d30ea906Sjfb8856606 
2970d30ea906Sjfb8856606 static void
setup_attached_port(portid_t pi)2971d30ea906Sjfb8856606 setup_attached_port(portid_t pi)
2972d30ea906Sjfb8856606 {
2973d30ea906Sjfb8856606 	unsigned int socket_id;
29744418919fSjohnjiang 	int ret;
2975a9643ea8Slogwang 
2976a9643ea8Slogwang 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2977d30ea906Sjfb8856606 	/* if socket_id is invalid, set to the first available socket. */
2978a9643ea8Slogwang 	if (check_socket_id(socket_id) < 0)
2979d30ea906Sjfb8856606 		socket_id = socket_ids[0];
2980a9643ea8Slogwang 	reconfig(pi, socket_id);
29814418919fSjohnjiang 	ret = rte_eth_promiscuous_enable(pi);
29824418919fSjohnjiang 	if (ret != 0)
29834418919fSjohnjiang 		printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
29844418919fSjohnjiang 			pi, rte_strerror(-ret));
2985a9643ea8Slogwang 
2986d30ea906Sjfb8856606 	ports_ids[nb_ports++] = pi;
2987d30ea906Sjfb8856606 	fwd_ports_ids[nb_fwd_ports++] = pi;
2988d30ea906Sjfb8856606 	nb_cfg_ports = nb_fwd_ports;
2989d30ea906Sjfb8856606 	ports[pi].need_setup = 0;
2990a9643ea8Slogwang 	ports[pi].port_status = RTE_PORT_STOPPED;
2991a9643ea8Slogwang 
2992a9643ea8Slogwang 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2993a9643ea8Slogwang 	printf("Done\n");
2994a9643ea8Slogwang }
2995a9643ea8Slogwang 
29964418919fSjohnjiang static void
detach_device(struct rte_device * dev)29974418919fSjohnjiang detach_device(struct rte_device *dev)
2998a9643ea8Slogwang {
2999d30ea906Sjfb8856606 	portid_t sibling;
3000a9643ea8Slogwang 
3001d30ea906Sjfb8856606 	if (dev == NULL) {
3002d30ea906Sjfb8856606 		printf("Device already removed\n");
3003a9643ea8Slogwang 		return;
3004a9643ea8Slogwang 	}
3005a9643ea8Slogwang 
30064418919fSjohnjiang 	printf("Removing a device...\n");
3007a9643ea8Slogwang 
3008*2d9fd380Sjfb8856606 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
3009*2d9fd380Sjfb8856606 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
3010*2d9fd380Sjfb8856606 			if (ports[sibling].port_status != RTE_PORT_STOPPED) {
3011*2d9fd380Sjfb8856606 				printf("Port %u not stopped\n", sibling);
3012*2d9fd380Sjfb8856606 				return;
3013*2d9fd380Sjfb8856606 			}
3014*2d9fd380Sjfb8856606 			port_flow_flush(sibling);
3015*2d9fd380Sjfb8856606 		}
3016*2d9fd380Sjfb8856606 	}
3017*2d9fd380Sjfb8856606 
30184b05018fSfengbojiang 	if (rte_dev_remove(dev) < 0) {
3019d30ea906Sjfb8856606 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
30202bfe3f2eSlogwang 		return;
30212bfe3f2eSlogwang 	}
3022d30ea906Sjfb8856606 	remove_invalid_ports();
3023579bf1e2Sjfb8856606 
30244418919fSjohnjiang 	printf("Device is detached\n");
3025d30ea906Sjfb8856606 	printf("Now total ports is %d\n", nb_ports);
3026a9643ea8Slogwang 	printf("Done\n");
3027a9643ea8Slogwang 	return;
3028a9643ea8Slogwang }
3029a9643ea8Slogwang 
3030a9643ea8Slogwang void
detach_port_device(portid_t port_id)30314418919fSjohnjiang detach_port_device(portid_t port_id)
30324418919fSjohnjiang {
30334418919fSjohnjiang 	if (port_id_is_invalid(port_id, ENABLED_WARN))
30344418919fSjohnjiang 		return;
30354418919fSjohnjiang 
30364418919fSjohnjiang 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
30374418919fSjohnjiang 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
30384418919fSjohnjiang 			printf("Port not stopped\n");
30394418919fSjohnjiang 			return;
30404418919fSjohnjiang 		}
30414418919fSjohnjiang 		printf("Port was not closed\n");
30424418919fSjohnjiang 	}
30434418919fSjohnjiang 
30444418919fSjohnjiang 	detach_device(rte_eth_devices[port_id].device);
30454418919fSjohnjiang }
30464418919fSjohnjiang 
30474418919fSjohnjiang void
detach_devargs(char * identifier)30484418919fSjohnjiang detach_devargs(char *identifier)
30494418919fSjohnjiang {
30504418919fSjohnjiang 	struct rte_dev_iterator iterator;
30514418919fSjohnjiang 	struct rte_devargs da;
30524418919fSjohnjiang 	portid_t port_id;
30534418919fSjohnjiang 
30544418919fSjohnjiang 	printf("Removing a device...\n");
30554418919fSjohnjiang 
30564418919fSjohnjiang 	memset(&da, 0, sizeof(da));
30574418919fSjohnjiang 	if (rte_devargs_parsef(&da, "%s", identifier)) {
30584418919fSjohnjiang 		printf("cannot parse identifier\n");
30594418919fSjohnjiang 		if (da.args)
30604418919fSjohnjiang 			free(da.args);
30614418919fSjohnjiang 		return;
30624418919fSjohnjiang 	}
30634418919fSjohnjiang 
30644418919fSjohnjiang 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
30654418919fSjohnjiang 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
30664418919fSjohnjiang 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
30674418919fSjohnjiang 				printf("Port %u not stopped\n", port_id);
30684418919fSjohnjiang 				rte_eth_iterator_cleanup(&iterator);
30694418919fSjohnjiang 				return;
30704418919fSjohnjiang 			}
30714418919fSjohnjiang 			port_flow_flush(port_id);
30724418919fSjohnjiang 		}
30734418919fSjohnjiang 	}
30744418919fSjohnjiang 
30754418919fSjohnjiang 	if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
30764418919fSjohnjiang 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
30774418919fSjohnjiang 			    da.name, da.bus->name);
30784418919fSjohnjiang 		return;
30794418919fSjohnjiang 	}
30804418919fSjohnjiang 
30814418919fSjohnjiang 	remove_invalid_ports();
30824418919fSjohnjiang 
30834418919fSjohnjiang 	printf("Device %s is detached\n", identifier);
30844418919fSjohnjiang 	printf("Now total ports is %d\n", nb_ports);
30854418919fSjohnjiang 	printf("Done\n");
30864418919fSjohnjiang }
30874418919fSjohnjiang 
30884418919fSjohnjiang void
pmd_test_exit(void)3089a9643ea8Slogwang pmd_test_exit(void)
3090a9643ea8Slogwang {
3091a9643ea8Slogwang 	portid_t pt_id;
3092*2d9fd380Sjfb8856606 	unsigned int i;
3093d30ea906Sjfb8856606 	int ret;
3094a9643ea8Slogwang 
3095a9643ea8Slogwang 	if (test_done == 0)
3096a9643ea8Slogwang 		stop_packet_forwarding();
3097a9643ea8Slogwang 
3098*2d9fd380Sjfb8856606 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
30994418919fSjohnjiang 		if (mempools[i]) {
31004418919fSjohnjiang 			if (mp_alloc_type == MP_ALLOC_ANON)
31014418919fSjohnjiang 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
31024418919fSjohnjiang 						     NULL);
31034418919fSjohnjiang 		}
31044418919fSjohnjiang 	}
3105a9643ea8Slogwang 	if (ports != NULL) {
3106a9643ea8Slogwang 		no_link_check = 1;
31072bfe3f2eSlogwang 		RTE_ETH_FOREACH_DEV(pt_id) {
31081646932aSjfb8856606 			printf("\nStopping port %d...\n", pt_id);
3109a9643ea8Slogwang 			fflush(stdout);
3110a9643ea8Slogwang 			stop_port(pt_id);
31111646932aSjfb8856606 		}
31121646932aSjfb8856606 		RTE_ETH_FOREACH_DEV(pt_id) {
31131646932aSjfb8856606 			printf("\nShutting down port %d...\n", pt_id);
31141646932aSjfb8856606 			fflush(stdout);
3115a9643ea8Slogwang 			close_port(pt_id);
3116a9643ea8Slogwang 		}
3117a9643ea8Slogwang 	}
3118d30ea906Sjfb8856606 
3119d30ea906Sjfb8856606 	if (hot_plug) {
3120d30ea906Sjfb8856606 		ret = rte_dev_event_monitor_stop();
3121d30ea906Sjfb8856606 		if (ret) {
3122d30ea906Sjfb8856606 			RTE_LOG(ERR, EAL,
3123d30ea906Sjfb8856606 				"fail to stop device event monitor.");
3124d30ea906Sjfb8856606 			return;
3125d30ea906Sjfb8856606 		}
3126d30ea906Sjfb8856606 
3127d30ea906Sjfb8856606 		ret = rte_dev_event_callback_unregister(NULL,
3128d30ea906Sjfb8856606 			dev_event_callback, NULL);
3129d30ea906Sjfb8856606 		if (ret < 0) {
3130d30ea906Sjfb8856606 			RTE_LOG(ERR, EAL,
3131d30ea906Sjfb8856606 				"fail to unregister device event callback.\n");
3132d30ea906Sjfb8856606 			return;
3133d30ea906Sjfb8856606 		}
3134d30ea906Sjfb8856606 
3135d30ea906Sjfb8856606 		ret = rte_dev_hotplug_handle_disable();
3136d30ea906Sjfb8856606 		if (ret) {
3137d30ea906Sjfb8856606 			RTE_LOG(ERR, EAL,
3138d30ea906Sjfb8856606 				"fail to disable hotplug handling.\n");
3139d30ea906Sjfb8856606 			return;
3140d30ea906Sjfb8856606 		}
3141d30ea906Sjfb8856606 	}
3142*2d9fd380Sjfb8856606 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
31431646932aSjfb8856606 		if (mempools[i])
31441646932aSjfb8856606 			rte_mempool_free(mempools[i]);
31451646932aSjfb8856606 	}
3146d30ea906Sjfb8856606 
3147a9643ea8Slogwang 	printf("\nBye...\n");
3148a9643ea8Slogwang }
3149a9643ea8Slogwang 
3150a9643ea8Slogwang typedef void (*cmd_func_t)(void);
3151a9643ea8Slogwang struct pmd_test_command {
3152a9643ea8Slogwang 	const char *cmd_name;
3153a9643ea8Slogwang 	cmd_func_t cmd_func;
3154a9643ea8Slogwang };
3155a9643ea8Slogwang 
3156a9643ea8Slogwang /* Check the link status of all ports in up to 9s, and print them finally */
3157a9643ea8Slogwang static void
check_all_ports_link_status(uint32_t port_mask)3158a9643ea8Slogwang check_all_ports_link_status(uint32_t port_mask)
3159a9643ea8Slogwang {
3160a9643ea8Slogwang #define CHECK_INTERVAL 100 /* 100ms */
3161a9643ea8Slogwang #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
31622bfe3f2eSlogwang 	portid_t portid;
31632bfe3f2eSlogwang 	uint8_t count, all_ports_up, print_flag = 0;
3164a9643ea8Slogwang 	struct rte_eth_link link;
31654418919fSjohnjiang 	int ret;
3166*2d9fd380Sjfb8856606 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3167a9643ea8Slogwang 
3168a9643ea8Slogwang 	printf("Checking link statuses...\n");
3169a9643ea8Slogwang 	fflush(stdout);
3170a9643ea8Slogwang 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
3171a9643ea8Slogwang 		all_ports_up = 1;
31722bfe3f2eSlogwang 		RTE_ETH_FOREACH_DEV(portid) {
3173a9643ea8Slogwang 			if ((port_mask & (1 << portid)) == 0)
3174a9643ea8Slogwang 				continue;
3175a9643ea8Slogwang 			memset(&link, 0, sizeof(link));
31764418919fSjohnjiang 			ret = rte_eth_link_get_nowait(portid, &link);
31774418919fSjohnjiang 			if (ret < 0) {
31784418919fSjohnjiang 				all_ports_up = 0;
31794418919fSjohnjiang 				if (print_flag == 1)
31804418919fSjohnjiang 					printf("Port %u link get failed: %s\n",
31814418919fSjohnjiang 						portid, rte_strerror(-ret));
31824418919fSjohnjiang 				continue;
31834418919fSjohnjiang 			}
3184a9643ea8Slogwang 			/* print link status if flag set */
3185a9643ea8Slogwang 			if (print_flag == 1) {
3186*2d9fd380Sjfb8856606 				rte_eth_link_to_str(link_status,
3187*2d9fd380Sjfb8856606 					sizeof(link_status), &link);
3188*2d9fd380Sjfb8856606 				printf("Port %d %s\n", portid, link_status);
3189a9643ea8Slogwang 				continue;
3190a9643ea8Slogwang 			}
3191a9643ea8Slogwang 			/* clear all_ports_up flag if any link down */
3192a9643ea8Slogwang 			if (link.link_status == ETH_LINK_DOWN) {
3193a9643ea8Slogwang 				all_ports_up = 0;
3194a9643ea8Slogwang 				break;
3195a9643ea8Slogwang 			}
3196a9643ea8Slogwang 		}
3197a9643ea8Slogwang 		/* after finally printing all link status, get out */
3198a9643ea8Slogwang 		if (print_flag == 1)
3199a9643ea8Slogwang 			break;
3200a9643ea8Slogwang 
3201a9643ea8Slogwang 		if (all_ports_up == 0) {
3202a9643ea8Slogwang 			fflush(stdout);
3203a9643ea8Slogwang 			rte_delay_ms(CHECK_INTERVAL);
3204a9643ea8Slogwang 		}
3205a9643ea8Slogwang 
3206a9643ea8Slogwang 		/* set the print_flag if all ports up or timeout */
3207a9643ea8Slogwang 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3208a9643ea8Slogwang 			print_flag = 1;
3209a9643ea8Slogwang 		}
32102bfe3f2eSlogwang 
32112bfe3f2eSlogwang 		if (lsc_interrupt)
32122bfe3f2eSlogwang 			break;
3213a9643ea8Slogwang 	}
3214a9643ea8Slogwang }
3215a9643ea8Slogwang 
32162bfe3f2eSlogwang static void
rmv_port_callback(void * arg)3217d30ea906Sjfb8856606 rmv_port_callback(void *arg)
32182bfe3f2eSlogwang {
3219d30ea906Sjfb8856606 	int need_to_start = 0;
3220579bf1e2Sjfb8856606 	int org_no_link_check = no_link_check;
32212bfe3f2eSlogwang 	portid_t port_id = (intptr_t)arg;
32224418919fSjohnjiang 	struct rte_device *dev;
32232bfe3f2eSlogwang 
32242bfe3f2eSlogwang 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
32252bfe3f2eSlogwang 
3226d30ea906Sjfb8856606 	if (!test_done && port_is_forwarding(port_id)) {
3227d30ea906Sjfb8856606 		need_to_start = 1;
3228d30ea906Sjfb8856606 		stop_packet_forwarding();
3229d30ea906Sjfb8856606 	}
3230579bf1e2Sjfb8856606 	no_link_check = 1;
32312bfe3f2eSlogwang 	stop_port(port_id);
3232579bf1e2Sjfb8856606 	no_link_check = org_no_link_check;
32334418919fSjohnjiang 
32344418919fSjohnjiang 	/* Save rte_device pointer before closing ethdev port */
32354418919fSjohnjiang 	dev = rte_eth_devices[port_id].device;
32362bfe3f2eSlogwang 	close_port(port_id);
32374418919fSjohnjiang 	detach_device(dev); /* might be already removed or have more ports */
32384418919fSjohnjiang 
3239d30ea906Sjfb8856606 	if (need_to_start)
3240d30ea906Sjfb8856606 		start_packet_forwarding(0);
32412bfe3f2eSlogwang }
32422bfe3f2eSlogwang 
32432bfe3f2eSlogwang /* This function is used by the interrupt thread */
3244a9643ea8Slogwang static int
eth_event_callback(portid_t port_id,enum rte_eth_event_type type,void * param,void * ret_param)32452bfe3f2eSlogwang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
32462bfe3f2eSlogwang 		  void *ret_param)
32472bfe3f2eSlogwang {
32482bfe3f2eSlogwang 	RTE_SET_USED(param);
32492bfe3f2eSlogwang 	RTE_SET_USED(ret_param);
32502bfe3f2eSlogwang 
32512bfe3f2eSlogwang 	if (type >= RTE_ETH_EVENT_MAX) {
3252d30ea906Sjfb8856606 		fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
32532bfe3f2eSlogwang 			port_id, __func__, type);
32542bfe3f2eSlogwang 		fflush(stderr);
32552bfe3f2eSlogwang 	} else if (event_print_mask & (UINT32_C(1) << type)) {
3256d30ea906Sjfb8856606 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
3257d30ea906Sjfb8856606 			eth_event_desc[type]);
32582bfe3f2eSlogwang 		fflush(stdout);
32592bfe3f2eSlogwang 	}
32602bfe3f2eSlogwang 
32612bfe3f2eSlogwang 	switch (type) {
3262d30ea906Sjfb8856606 	case RTE_ETH_EVENT_NEW:
3263d30ea906Sjfb8856606 		ports[port_id].need_setup = 1;
3264d30ea906Sjfb8856606 		ports[port_id].port_status = RTE_PORT_HANDLING;
3265d30ea906Sjfb8856606 		break;
32662bfe3f2eSlogwang 	case RTE_ETH_EVENT_INTR_RMV:
3267d30ea906Sjfb8856606 		if (port_id_is_invalid(port_id, DISABLED_WARN))
3268d30ea906Sjfb8856606 			break;
32692bfe3f2eSlogwang 		if (rte_eal_alarm_set(100000,
3270d30ea906Sjfb8856606 				rmv_port_callback, (void *)(intptr_t)port_id))
32712bfe3f2eSlogwang 			fprintf(stderr, "Could not set up deferred device removal\n");
32722bfe3f2eSlogwang 		break;
3273*2d9fd380Sjfb8856606 	case RTE_ETH_EVENT_DESTROY:
3274*2d9fd380Sjfb8856606 		ports[port_id].port_status = RTE_PORT_CLOSED;
3275*2d9fd380Sjfb8856606 		printf("Port %u is closed\n", port_id);
3276*2d9fd380Sjfb8856606 		break;
32772bfe3f2eSlogwang 	default:
32782bfe3f2eSlogwang 		break;
32792bfe3f2eSlogwang 	}
32802bfe3f2eSlogwang 	return 0;
32812bfe3f2eSlogwang }
32822bfe3f2eSlogwang 
32832bfe3f2eSlogwang static int
register_eth_event_callback(void)3284d30ea906Sjfb8856606 register_eth_event_callback(void)
3285d30ea906Sjfb8856606 {
3286d30ea906Sjfb8856606 	int ret;
3287d30ea906Sjfb8856606 	enum rte_eth_event_type event;
3288d30ea906Sjfb8856606 
3289d30ea906Sjfb8856606 	for (event = RTE_ETH_EVENT_UNKNOWN;
3290d30ea906Sjfb8856606 			event < RTE_ETH_EVENT_MAX; event++) {
3291d30ea906Sjfb8856606 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3292d30ea906Sjfb8856606 				event,
3293d30ea906Sjfb8856606 				eth_event_callback,
3294d30ea906Sjfb8856606 				NULL);
3295d30ea906Sjfb8856606 		if (ret != 0) {
3296d30ea906Sjfb8856606 			TESTPMD_LOG(ERR, "Failed to register callback for "
3297d30ea906Sjfb8856606 					"%s event\n", eth_event_desc[event]);
3298d30ea906Sjfb8856606 			return -1;
3299d30ea906Sjfb8856606 		}
3300d30ea906Sjfb8856606 	}
3301d30ea906Sjfb8856606 
3302d30ea906Sjfb8856606 	return 0;
3303d30ea906Sjfb8856606 }
3304d30ea906Sjfb8856606 
3305d30ea906Sjfb8856606 /* This function is used by the interrupt thread */
3306d30ea906Sjfb8856606 static void
dev_event_callback(const char * device_name,enum rte_dev_event_type type,__rte_unused void * arg)3307d30ea906Sjfb8856606 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3308d30ea906Sjfb8856606 			     __rte_unused void *arg)
3309d30ea906Sjfb8856606 {
3310d30ea906Sjfb8856606 	uint16_t port_id;
3311d30ea906Sjfb8856606 	int ret;
3312d30ea906Sjfb8856606 
3313d30ea906Sjfb8856606 	if (type >= RTE_DEV_EVENT_MAX) {
3314d30ea906Sjfb8856606 		fprintf(stderr, "%s called upon invalid event %d\n",
3315d30ea906Sjfb8856606 			__func__, type);
3316d30ea906Sjfb8856606 		fflush(stderr);
3317d30ea906Sjfb8856606 	}
3318d30ea906Sjfb8856606 
3319d30ea906Sjfb8856606 	switch (type) {
3320d30ea906Sjfb8856606 	case RTE_DEV_EVENT_REMOVE:
3321d30ea906Sjfb8856606 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3322d30ea906Sjfb8856606 			device_name);
3323d30ea906Sjfb8856606 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3324d30ea906Sjfb8856606 		if (ret) {
3325d30ea906Sjfb8856606 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3326d30ea906Sjfb8856606 				device_name);
3327d30ea906Sjfb8856606 			return;
3328d30ea906Sjfb8856606 		}
3329d30ea906Sjfb8856606 		/*
3330d30ea906Sjfb8856606 		 * Because the user's callback is invoked in eal interrupt
3331d30ea906Sjfb8856606 		 * callback, the interrupt callback need to be finished before
3332d30ea906Sjfb8856606 		 * it can be unregistered when detaching device. So finish
3333d30ea906Sjfb8856606 		 * callback soon and use a deferred removal to detach device
3334d30ea906Sjfb8856606 		 * is need. It is a workaround, once the device detaching be
3335d30ea906Sjfb8856606 		 * moved into the eal in the future, the deferred removal could
3336d30ea906Sjfb8856606 		 * be deleted.
3337d30ea906Sjfb8856606 		 */
3338d30ea906Sjfb8856606 		if (rte_eal_alarm_set(100000,
3339d30ea906Sjfb8856606 				rmv_port_callback, (void *)(intptr_t)port_id))
3340d30ea906Sjfb8856606 			RTE_LOG(ERR, EAL,
3341d30ea906Sjfb8856606 				"Could not set up deferred device removal\n");
3342d30ea906Sjfb8856606 		break;
3343d30ea906Sjfb8856606 	case RTE_DEV_EVENT_ADD:
3344d30ea906Sjfb8856606 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3345d30ea906Sjfb8856606 			device_name);
3346d30ea906Sjfb8856606 		/* TODO: After finish kernel driver binding,
3347d30ea906Sjfb8856606 		 * begin to attach port.
3348d30ea906Sjfb8856606 		 */
3349d30ea906Sjfb8856606 		break;
3350d30ea906Sjfb8856606 	default:
3351d30ea906Sjfb8856606 		break;
3352d30ea906Sjfb8856606 	}
3353d30ea906Sjfb8856606 }
3354d30ea906Sjfb8856606 
3355d30ea906Sjfb8856606 static int
set_tx_queue_stats_mapping_registers(portid_t port_id,struct rte_port * port)33562bfe3f2eSlogwang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3357a9643ea8Slogwang {
3358a9643ea8Slogwang 	uint16_t i;
3359a9643ea8Slogwang 	int diag;
3360a9643ea8Slogwang 	uint8_t mapping_found = 0;
3361a9643ea8Slogwang 
3362a9643ea8Slogwang 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3363a9643ea8Slogwang 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3364a9643ea8Slogwang 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
3365a9643ea8Slogwang 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
3366a9643ea8Slogwang 					tx_queue_stats_mappings[i].queue_id,
3367a9643ea8Slogwang 					tx_queue_stats_mappings[i].stats_counter_id);
3368a9643ea8Slogwang 			if (diag != 0)
3369a9643ea8Slogwang 				return diag;
3370a9643ea8Slogwang 			mapping_found = 1;
3371a9643ea8Slogwang 		}
3372a9643ea8Slogwang 	}
3373a9643ea8Slogwang 	if (mapping_found)
3374a9643ea8Slogwang 		port->tx_queue_stats_mapping_enabled = 1;
3375a9643ea8Slogwang 	return 0;
3376a9643ea8Slogwang }
3377a9643ea8Slogwang 
3378a9643ea8Slogwang static int
set_rx_queue_stats_mapping_registers(portid_t port_id,struct rte_port * port)33792bfe3f2eSlogwang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3380a9643ea8Slogwang {
3381a9643ea8Slogwang 	uint16_t i;
3382a9643ea8Slogwang 	int diag;
3383a9643ea8Slogwang 	uint8_t mapping_found = 0;
3384a9643ea8Slogwang 
3385a9643ea8Slogwang 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3386a9643ea8Slogwang 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3387a9643ea8Slogwang 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
3388a9643ea8Slogwang 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
3389a9643ea8Slogwang 					rx_queue_stats_mappings[i].queue_id,
3390a9643ea8Slogwang 					rx_queue_stats_mappings[i].stats_counter_id);
3391a9643ea8Slogwang 			if (diag != 0)
3392a9643ea8Slogwang 				return diag;
3393a9643ea8Slogwang 			mapping_found = 1;
3394a9643ea8Slogwang 		}
3395a9643ea8Slogwang 	}
3396a9643ea8Slogwang 	if (mapping_found)
3397a9643ea8Slogwang 		port->rx_queue_stats_mapping_enabled = 1;
3398a9643ea8Slogwang 	return 0;
3399a9643ea8Slogwang }
3400a9643ea8Slogwang 
3401a9643ea8Slogwang static void
map_port_queue_stats_mapping_registers(portid_t pi,struct rte_port * port)34022bfe3f2eSlogwang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
3403a9643ea8Slogwang {
3404a9643ea8Slogwang 	int diag = 0;
3405a9643ea8Slogwang 
3406a9643ea8Slogwang 	diag = set_tx_queue_stats_mapping_registers(pi, port);
3407a9643ea8Slogwang 	if (diag != 0) {
3408a9643ea8Slogwang 		if (diag == -ENOTSUP) {
3409a9643ea8Slogwang 			port->tx_queue_stats_mapping_enabled = 0;
3410a9643ea8Slogwang 			printf("TX queue stats mapping not supported port id=%d\n", pi);
3411a9643ea8Slogwang 		}
3412a9643ea8Slogwang 		else
3413a9643ea8Slogwang 			rte_exit(EXIT_FAILURE,
3414a9643ea8Slogwang 					"set_tx_queue_stats_mapping_registers "
3415a9643ea8Slogwang 					"failed for port id=%d diag=%d\n",
3416a9643ea8Slogwang 					pi, diag);
3417a9643ea8Slogwang 	}
3418a9643ea8Slogwang 
3419a9643ea8Slogwang 	diag = set_rx_queue_stats_mapping_registers(pi, port);
3420a9643ea8Slogwang 	if (diag != 0) {
3421a9643ea8Slogwang 		if (diag == -ENOTSUP) {
3422a9643ea8Slogwang 			port->rx_queue_stats_mapping_enabled = 0;
3423a9643ea8Slogwang 			printf("RX queue stats mapping not supported port id=%d\n", pi);
3424a9643ea8Slogwang 		}
3425a9643ea8Slogwang 		else
3426a9643ea8Slogwang 			rte_exit(EXIT_FAILURE,
3427a9643ea8Slogwang 					"set_rx_queue_stats_mapping_registers "
3428a9643ea8Slogwang 					"failed for port id=%d diag=%d\n",
3429a9643ea8Slogwang 					pi, diag);
3430a9643ea8Slogwang 	}
3431a9643ea8Slogwang }
3432a9643ea8Slogwang 
3433a9643ea8Slogwang static void
rxtx_port_config(struct rte_port * port)3434a9643ea8Slogwang rxtx_port_config(struct rte_port *port)
3435a9643ea8Slogwang {
3436d30ea906Sjfb8856606 	uint16_t qid;
34374b05018fSfengbojiang 	uint64_t offloads;
3438a9643ea8Slogwang 
3439d30ea906Sjfb8856606 	for (qid = 0; qid < nb_rxq; qid++) {
34404b05018fSfengbojiang 		offloads = port->rx_conf[qid].offloads;
3441d30ea906Sjfb8856606 		port->rx_conf[qid] = port->dev_info.default_rxconf;
34424b05018fSfengbojiang 		if (offloads != 0)
34434b05018fSfengbojiang 			port->rx_conf[qid].offloads = offloads;
3444d30ea906Sjfb8856606 
3445d30ea906Sjfb8856606 		/* Check if any Rx parameters have been passed */
3446a9643ea8Slogwang 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3447d30ea906Sjfb8856606 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3448a9643ea8Slogwang 
3449a9643ea8Slogwang 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3450d30ea906Sjfb8856606 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3451a9643ea8Slogwang 
3452a9643ea8Slogwang 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3453d30ea906Sjfb8856606 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3454a9643ea8Slogwang 
3455a9643ea8Slogwang 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3456d30ea906Sjfb8856606 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3457a9643ea8Slogwang 
3458a9643ea8Slogwang 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3459d30ea906Sjfb8856606 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
3460a9643ea8Slogwang 
3461d30ea906Sjfb8856606 		port->nb_rx_desc[qid] = nb_rxd;
3462d30ea906Sjfb8856606 	}
3463d30ea906Sjfb8856606 
3464d30ea906Sjfb8856606 	for (qid = 0; qid < nb_txq; qid++) {
34654b05018fSfengbojiang 		offloads = port->tx_conf[qid].offloads;
3466d30ea906Sjfb8856606 		port->tx_conf[qid] = port->dev_info.default_txconf;
34674b05018fSfengbojiang 		if (offloads != 0)
34684b05018fSfengbojiang 			port->tx_conf[qid].offloads = offloads;
3469d30ea906Sjfb8856606 
3470d30ea906Sjfb8856606 		/* Check if any Tx parameters have been passed */
3471a9643ea8Slogwang 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3472d30ea906Sjfb8856606 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3473a9643ea8Slogwang 
3474a9643ea8Slogwang 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3475d30ea906Sjfb8856606 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3476a9643ea8Slogwang 
3477a9643ea8Slogwang 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3478d30ea906Sjfb8856606 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3479a9643ea8Slogwang 
3480a9643ea8Slogwang 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3481d30ea906Sjfb8856606 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3482a9643ea8Slogwang 
3483a9643ea8Slogwang 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3484d30ea906Sjfb8856606 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3485a9643ea8Slogwang 
3486d30ea906Sjfb8856606 		port->nb_tx_desc[qid] = nb_txd;
3487d30ea906Sjfb8856606 	}
3488a9643ea8Slogwang }
3489a9643ea8Slogwang 
3490a9643ea8Slogwang void
init_port_config(void)3491a9643ea8Slogwang init_port_config(void)
3492a9643ea8Slogwang {
3493a9643ea8Slogwang 	portid_t pid;
3494a9643ea8Slogwang 	struct rte_port *port;
34954418919fSjohnjiang 	int ret;
3496a9643ea8Slogwang 
34972bfe3f2eSlogwang 	RTE_ETH_FOREACH_DEV(pid) {
3498a9643ea8Slogwang 		port = &ports[pid];
3499a9643ea8Slogwang 		port->dev_conf.fdir_conf = fdir_conf;
35004418919fSjohnjiang 
35014418919fSjohnjiang 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
35024418919fSjohnjiang 		if (ret != 0)
35034418919fSjohnjiang 			return;
35044418919fSjohnjiang 
3505a9643ea8Slogwang 		if (nb_rxq > 1) {
3506a9643ea8Slogwang 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3507d30ea906Sjfb8856606 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3508d30ea906Sjfb8856606 				rss_hf & port->dev_info.flow_type_rss_offloads;
3509a9643ea8Slogwang 		} else {
3510a9643ea8Slogwang 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3511a9643ea8Slogwang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3512a9643ea8Slogwang 		}
3513a9643ea8Slogwang 
35142bfe3f2eSlogwang 		if (port->dcb_flag == 0) {
3515a9643ea8Slogwang 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3516*2d9fd380Sjfb8856606 				port->dev_conf.rxmode.mq_mode =
3517*2d9fd380Sjfb8856606 					(enum rte_eth_rx_mq_mode)
3518*2d9fd380Sjfb8856606 						(rx_mq_mode & ETH_MQ_RX_RSS);
3519a9643ea8Slogwang 			else
3520a9643ea8Slogwang 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3521a9643ea8Slogwang 		}
3522a9643ea8Slogwang 
3523a9643ea8Slogwang 		rxtx_port_config(port);
3524a9643ea8Slogwang 
35254418919fSjohnjiang 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
35264418919fSjohnjiang 		if (ret != 0)
35274418919fSjohnjiang 			return;
3528a9643ea8Slogwang 
3529a9643ea8Slogwang 		map_port_queue_stats_mapping_registers(pid, port);
3530*2d9fd380Sjfb8856606 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
35312bfe3f2eSlogwang 		rte_pmd_ixgbe_bypass_init(pid);
35322bfe3f2eSlogwang #endif
35332bfe3f2eSlogwang 
35342bfe3f2eSlogwang 		if (lsc_interrupt &&
35352bfe3f2eSlogwang 		    (rte_eth_devices[pid].data->dev_flags &
35362bfe3f2eSlogwang 		     RTE_ETH_DEV_INTR_LSC))
35372bfe3f2eSlogwang 			port->dev_conf.intr_conf.lsc = 1;
35382bfe3f2eSlogwang 		if (rmv_interrupt &&
35392bfe3f2eSlogwang 		    (rte_eth_devices[pid].data->dev_flags &
35402bfe3f2eSlogwang 		     RTE_ETH_DEV_INTR_RMV))
35412bfe3f2eSlogwang 			port->dev_conf.intr_conf.rmv = 1;
3542a9643ea8Slogwang 	}
3543a9643ea8Slogwang }
3544a9643ea8Slogwang 
set_port_slave_flag(portid_t slave_pid)3545a9643ea8Slogwang void set_port_slave_flag(portid_t slave_pid)
3546a9643ea8Slogwang {
3547a9643ea8Slogwang 	struct rte_port *port;
3548a9643ea8Slogwang 
3549a9643ea8Slogwang 	port = &ports[slave_pid];
3550a9643ea8Slogwang 	port->slave_flag = 1;
3551a9643ea8Slogwang }
3552a9643ea8Slogwang 
clear_port_slave_flag(portid_t slave_pid)3553a9643ea8Slogwang void clear_port_slave_flag(portid_t slave_pid)
3554a9643ea8Slogwang {
3555a9643ea8Slogwang 	struct rte_port *port;
3556a9643ea8Slogwang 
3557a9643ea8Slogwang 	port = &ports[slave_pid];
3558a9643ea8Slogwang 	port->slave_flag = 0;
3559a9643ea8Slogwang }
3560a9643ea8Slogwang 
port_is_bonding_slave(portid_t slave_pid)3561a9643ea8Slogwang uint8_t port_is_bonding_slave(portid_t slave_pid)
3562a9643ea8Slogwang {
3563a9643ea8Slogwang 	struct rte_port *port;
3564a9643ea8Slogwang 
3565a9643ea8Slogwang 	port = &ports[slave_pid];
3566579bf1e2Sjfb8856606 	if ((rte_eth_devices[slave_pid].data->dev_flags &
3567579bf1e2Sjfb8856606 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3568579bf1e2Sjfb8856606 		return 1;
3569579bf1e2Sjfb8856606 	return 0;
3570a9643ea8Slogwang }
3571a9643ea8Slogwang 
3572a9643ea8Slogwang const uint16_t vlan_tags[] = {
3573a9643ea8Slogwang 		0,  1,  2,  3,  4,  5,  6,  7,
3574a9643ea8Slogwang 		8,  9, 10, 11,  12, 13, 14, 15,
3575a9643ea8Slogwang 		16, 17, 18, 19, 20, 21, 22, 23,
3576a9643ea8Slogwang 		24, 25, 26, 27, 28, 29, 30, 31
3577a9643ea8Slogwang };
3578a9643ea8Slogwang 
3579a9643ea8Slogwang static  int
get_eth_dcb_conf(portid_t pid,struct rte_eth_conf * eth_conf,enum dcb_mode_enable dcb_mode,enum rte_eth_nb_tcs num_tcs,uint8_t pfc_en)3580579bf1e2Sjfb8856606 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3581a9643ea8Slogwang 		 enum dcb_mode_enable dcb_mode,
3582a9643ea8Slogwang 		 enum rte_eth_nb_tcs num_tcs,
3583a9643ea8Slogwang 		 uint8_t pfc_en)
3584a9643ea8Slogwang {
3585a9643ea8Slogwang 	uint8_t i;
3586579bf1e2Sjfb8856606 	int32_t rc;
3587579bf1e2Sjfb8856606 	struct rte_eth_rss_conf rss_conf;
3588a9643ea8Slogwang 
3589a9643ea8Slogwang 	/*
3590a9643ea8Slogwang 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3591a9643ea8Slogwang 	 * given above, and the number of traffic classes available for use.
3592a9643ea8Slogwang 	 */
3593a9643ea8Slogwang 	if (dcb_mode == DCB_VT_ENABLED) {
3594a9643ea8Slogwang 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3595a9643ea8Slogwang 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
3596a9643ea8Slogwang 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3597a9643ea8Slogwang 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3598a9643ea8Slogwang 
35992bfe3f2eSlogwang 		/* VMDQ+DCB RX and TX configurations */
3600a9643ea8Slogwang 		vmdq_rx_conf->enable_default_pool = 0;
3601a9643ea8Slogwang 		vmdq_rx_conf->default_pool = 0;
3602a9643ea8Slogwang 		vmdq_rx_conf->nb_queue_pools =
3603a9643ea8Slogwang 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3604a9643ea8Slogwang 		vmdq_tx_conf->nb_queue_pools =
3605a9643ea8Slogwang 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3606a9643ea8Slogwang 
3607a9643ea8Slogwang 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3608a9643ea8Slogwang 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3609a9643ea8Slogwang 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3610a9643ea8Slogwang 			vmdq_rx_conf->pool_map[i].pools =
3611a9643ea8Slogwang 				1 << (i % vmdq_rx_conf->nb_queue_pools);
3612a9643ea8Slogwang 		}
3613a9643ea8Slogwang 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
36142bfe3f2eSlogwang 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
36152bfe3f2eSlogwang 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3616a9643ea8Slogwang 		}
3617a9643ea8Slogwang 
3618a9643ea8Slogwang 		/* set DCB mode of RX and TX of multiple queues */
3619*2d9fd380Sjfb8856606 		eth_conf->rxmode.mq_mode =
3620*2d9fd380Sjfb8856606 				(enum rte_eth_rx_mq_mode)
3621*2d9fd380Sjfb8856606 					(rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
3622a9643ea8Slogwang 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3623a9643ea8Slogwang 	} else {
3624a9643ea8Slogwang 		struct rte_eth_dcb_rx_conf *rx_conf =
3625a9643ea8Slogwang 				&eth_conf->rx_adv_conf.dcb_rx_conf;
3626a9643ea8Slogwang 		struct rte_eth_dcb_tx_conf *tx_conf =
3627a9643ea8Slogwang 				&eth_conf->tx_adv_conf.dcb_tx_conf;
3628a9643ea8Slogwang 
36290c6bd470Sfengbojiang 		memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
36300c6bd470Sfengbojiang 
3631579bf1e2Sjfb8856606 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3632579bf1e2Sjfb8856606 		if (rc != 0)
3633579bf1e2Sjfb8856606 			return rc;
3634579bf1e2Sjfb8856606 
3635a9643ea8Slogwang 		rx_conf->nb_tcs = num_tcs;
3636a9643ea8Slogwang 		tx_conf->nb_tcs = num_tcs;
3637a9643ea8Slogwang 
36382bfe3f2eSlogwang 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
36392bfe3f2eSlogwang 			rx_conf->dcb_tc[i] = i % num_tcs;
36402bfe3f2eSlogwang 			tx_conf->dcb_tc[i] = i % num_tcs;
3641a9643ea8Slogwang 		}
3642579bf1e2Sjfb8856606 
3643*2d9fd380Sjfb8856606 		eth_conf->rxmode.mq_mode =
3644*2d9fd380Sjfb8856606 				(enum rte_eth_rx_mq_mode)
3645*2d9fd380Sjfb8856606 					(rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3646579bf1e2Sjfb8856606 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
3647a9643ea8Slogwang 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3648a9643ea8Slogwang 	}
3649a9643ea8Slogwang 
3650a9643ea8Slogwang 	if (pfc_en)
3651a9643ea8Slogwang 		eth_conf->dcb_capability_en =
3652a9643ea8Slogwang 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3653a9643ea8Slogwang 	else
3654a9643ea8Slogwang 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3655a9643ea8Slogwang 
3656a9643ea8Slogwang 	return 0;
3657a9643ea8Slogwang }
3658a9643ea8Slogwang 
3659a9643ea8Slogwang int
init_port_dcb_config(portid_t pid,enum dcb_mode_enable dcb_mode,enum rte_eth_nb_tcs num_tcs,uint8_t pfc_en)3660a9643ea8Slogwang init_port_dcb_config(portid_t pid,
3661a9643ea8Slogwang 		     enum dcb_mode_enable dcb_mode,
3662a9643ea8Slogwang 		     enum rte_eth_nb_tcs num_tcs,
3663a9643ea8Slogwang 		     uint8_t pfc_en)
3664a9643ea8Slogwang {
3665a9643ea8Slogwang 	struct rte_eth_conf port_conf;
3666a9643ea8Slogwang 	struct rte_port *rte_port;
3667a9643ea8Slogwang 	int retval;
3668a9643ea8Slogwang 	uint16_t i;
3669a9643ea8Slogwang 
3670a9643ea8Slogwang 	rte_port = &ports[pid];
3671a9643ea8Slogwang 
3672a9643ea8Slogwang 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3673a9643ea8Slogwang 	/* Enter DCB configuration status */
3674a9643ea8Slogwang 	dcb_config = 1;
3675a9643ea8Slogwang 
3676d30ea906Sjfb8856606 	port_conf.rxmode = rte_port->dev_conf.rxmode;
3677d30ea906Sjfb8856606 	port_conf.txmode = rte_port->dev_conf.txmode;
3678d30ea906Sjfb8856606 
3679a9643ea8Slogwang 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
3680579bf1e2Sjfb8856606 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3681a9643ea8Slogwang 	if (retval < 0)
3682a9643ea8Slogwang 		return retval;
3683d30ea906Sjfb8856606 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3684a9643ea8Slogwang 
3685d30ea906Sjfb8856606 	/* re-configure the device . */
36861646932aSjfb8856606 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
36871646932aSjfb8856606 	if (retval < 0)
36881646932aSjfb8856606 		return retval;
36894418919fSjohnjiang 
36904418919fSjohnjiang 	retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
36914418919fSjohnjiang 	if (retval != 0)
36924418919fSjohnjiang 		return retval;
3693a9643ea8Slogwang 
3694a9643ea8Slogwang 	/* If dev_info.vmdq_pool_base is greater than 0,
3695a9643ea8Slogwang 	 * the queue id of vmdq pools is started after pf queues.
3696a9643ea8Slogwang 	 */
3697a9643ea8Slogwang 	if (dcb_mode == DCB_VT_ENABLED &&
3698a9643ea8Slogwang 	    rte_port->dev_info.vmdq_pool_base > 0) {
3699a9643ea8Slogwang 		printf("VMDQ_DCB multi-queue mode is nonsensical"
3700a9643ea8Slogwang 			" for port %d.", pid);
3701a9643ea8Slogwang 		return -1;
3702a9643ea8Slogwang 	}
3703a9643ea8Slogwang 
3704a9643ea8Slogwang 	/* Assume the ports in testpmd have the same dcb capability
3705a9643ea8Slogwang 	 * and has the same number of rxq and txq in dcb mode
3706a9643ea8Slogwang 	 */
3707a9643ea8Slogwang 	if (dcb_mode == DCB_VT_ENABLED) {
3708a9643ea8Slogwang 		if (rte_port->dev_info.max_vfs > 0) {
3709a9643ea8Slogwang 			nb_rxq = rte_port->dev_info.nb_rx_queues;
3710a9643ea8Slogwang 			nb_txq = rte_port->dev_info.nb_tx_queues;
3711a9643ea8Slogwang 		} else {
3712a9643ea8Slogwang 			nb_rxq = rte_port->dev_info.max_rx_queues;
3713a9643ea8Slogwang 			nb_txq = rte_port->dev_info.max_tx_queues;
3714a9643ea8Slogwang 		}
3715a9643ea8Slogwang 	} else {
3716a9643ea8Slogwang 		/*if vt is disabled, use all pf queues */
3717a9643ea8Slogwang 		if (rte_port->dev_info.vmdq_pool_base == 0) {
3718a9643ea8Slogwang 			nb_rxq = rte_port->dev_info.max_rx_queues;
3719a9643ea8Slogwang 			nb_txq = rte_port->dev_info.max_tx_queues;
3720a9643ea8Slogwang 		} else {
3721a9643ea8Slogwang 			nb_rxq = (queueid_t)num_tcs;
3722a9643ea8Slogwang 			nb_txq = (queueid_t)num_tcs;
3723a9643ea8Slogwang 
3724a9643ea8Slogwang 		}
3725a9643ea8Slogwang 	}
3726a9643ea8Slogwang 	rx_free_thresh = 64;
3727a9643ea8Slogwang 
3728a9643ea8Slogwang 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3729a9643ea8Slogwang 
3730a9643ea8Slogwang 	rxtx_port_config(rte_port);
3731a9643ea8Slogwang 	/* VLAN filter */
3732d30ea906Sjfb8856606 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3733a9643ea8Slogwang 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
3734a9643ea8Slogwang 		rx_vft_set(pid, vlan_tags[i], 1);
3735a9643ea8Slogwang 
37364418919fSjohnjiang 	retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
37374418919fSjohnjiang 	if (retval != 0)
37384418919fSjohnjiang 		return retval;
37394418919fSjohnjiang 
3740a9643ea8Slogwang 	map_port_queue_stats_mapping_registers(pid, rte_port);
3741a9643ea8Slogwang 
3742a9643ea8Slogwang 	rte_port->dcb_flag = 1;
3743a9643ea8Slogwang 
3744a9643ea8Slogwang 	return 0;
3745a9643ea8Slogwang }
3746a9643ea8Slogwang 
3747a9643ea8Slogwang static void
init_port(void)3748a9643ea8Slogwang init_port(void)
3749a9643ea8Slogwang {
3750*2d9fd380Sjfb8856606 	int i;
3751*2d9fd380Sjfb8856606 
3752a9643ea8Slogwang 	/* Configuration of Ethernet ports. */
3753a9643ea8Slogwang 	ports = rte_zmalloc("testpmd: ports",
3754a9643ea8Slogwang 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3755a9643ea8Slogwang 			    RTE_CACHE_LINE_SIZE);
3756a9643ea8Slogwang 	if (ports == NULL) {
3757a9643ea8Slogwang 		rte_exit(EXIT_FAILURE,
3758a9643ea8Slogwang 				"rte_zmalloc(%d struct rte_port) failed\n",
3759a9643ea8Slogwang 				RTE_MAX_ETHPORTS);
3760a9643ea8Slogwang 	}
3761*2d9fd380Sjfb8856606 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3762*2d9fd380Sjfb8856606 		LIST_INIT(&ports[i].flow_tunnel_list);
3763d30ea906Sjfb8856606 	/* Initialize ports NUMA structures */
3764d30ea906Sjfb8856606 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3765d30ea906Sjfb8856606 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3766d30ea906Sjfb8856606 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3767a9643ea8Slogwang }
3768a9643ea8Slogwang 
3769a9643ea8Slogwang static void
force_quit(void)3770a9643ea8Slogwang force_quit(void)
3771a9643ea8Slogwang {
3772a9643ea8Slogwang 	pmd_test_exit();
3773a9643ea8Slogwang 	prompt_exit();
3774a9643ea8Slogwang }
3775a9643ea8Slogwang 
3776a9643ea8Slogwang static void
print_stats(void)37772bfe3f2eSlogwang print_stats(void)
37782bfe3f2eSlogwang {
37792bfe3f2eSlogwang 	uint8_t i;
37802bfe3f2eSlogwang 	const char clr[] = { 27, '[', '2', 'J', '\0' };
37812bfe3f2eSlogwang 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
37822bfe3f2eSlogwang 
37832bfe3f2eSlogwang 	/* Clear screen and move to top left */
37842bfe3f2eSlogwang 	printf("%s%s", clr, top_left);
37852bfe3f2eSlogwang 
37862bfe3f2eSlogwang 	printf("\nPort statistics ====================================");
37872bfe3f2eSlogwang 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
37882bfe3f2eSlogwang 		nic_stats_display(fwd_ports_ids[i]);
37891646932aSjfb8856606 
37901646932aSjfb8856606 	fflush(stdout);
37912bfe3f2eSlogwang }
37922bfe3f2eSlogwang 
37932bfe3f2eSlogwang static void
signal_handler(int signum)3794a9643ea8Slogwang signal_handler(int signum)
3795a9643ea8Slogwang {
3796a9643ea8Slogwang 	if (signum == SIGINT || signum == SIGTERM) {
3797a9643ea8Slogwang 		printf("\nSignal %d received, preparing to exit...\n",
3798a9643ea8Slogwang 				signum);
3799*2d9fd380Sjfb8856606 #ifdef RTE_LIB_PDUMP
3800a9643ea8Slogwang 		/* uninitialize packet capture framework */
3801a9643ea8Slogwang 		rte_pdump_uninit();
3802a9643ea8Slogwang #endif
3803*2d9fd380Sjfb8856606 #ifdef RTE_LIB_LATENCYSTATS
38044b05018fSfengbojiang 		if (latencystats_enabled != 0)
38052bfe3f2eSlogwang 			rte_latencystats_uninit();
38062bfe3f2eSlogwang #endif
3807a9643ea8Slogwang 		force_quit();
38082bfe3f2eSlogwang 		/* Set flag to indicate the force termination. */
38092bfe3f2eSlogwang 		f_quit = 1;
3810a9643ea8Slogwang 		/* exit with the expected status */
3811a9643ea8Slogwang 		signal(signum, SIG_DFL);
3812a9643ea8Slogwang 		kill(getpid(), signum);
3813a9643ea8Slogwang 	}
3814a9643ea8Slogwang }
3815a9643ea8Slogwang 
3816a9643ea8Slogwang int
main(int argc,char ** argv)3817a9643ea8Slogwang main(int argc, char** argv)
3818a9643ea8Slogwang {
3819a9643ea8Slogwang 	int diag;
38202bfe3f2eSlogwang 	portid_t port_id;
3821d30ea906Sjfb8856606 	uint16_t count;
3822d30ea906Sjfb8856606 	int ret;
3823a9643ea8Slogwang 
3824a9643ea8Slogwang 	signal(SIGINT, signal_handler);
3825a9643ea8Slogwang 	signal(SIGTERM, signal_handler);
3826a9643ea8Slogwang 
3827d30ea906Sjfb8856606 	testpmd_logtype = rte_log_register("testpmd");
3828d30ea906Sjfb8856606 	if (testpmd_logtype < 0)
38294418919fSjohnjiang 		rte_exit(EXIT_FAILURE, "Cannot register log type");
3830d30ea906Sjfb8856606 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3831d30ea906Sjfb8856606 
38324418919fSjohnjiang 	diag = rte_eal_init(argc, argv);
38334418919fSjohnjiang 	if (diag < 0)
38344418919fSjohnjiang 		rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
38354418919fSjohnjiang 			 rte_strerror(rte_errno));
38364418919fSjohnjiang 
38374418919fSjohnjiang 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
38384418919fSjohnjiang 		rte_exit(EXIT_FAILURE,
38394418919fSjohnjiang 			 "Secondary process type not supported.\n");
38404418919fSjohnjiang 
3841d30ea906Sjfb8856606 	ret = register_eth_event_callback();
3842d30ea906Sjfb8856606 	if (ret != 0)
38434418919fSjohnjiang 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
38442bfe3f2eSlogwang 
3845*2d9fd380Sjfb8856606 #ifdef RTE_LIB_PDUMP
3846a9643ea8Slogwang 	/* initialize packet capture framework */
38474418919fSjohnjiang 	rte_pdump_init();
3848a9643ea8Slogwang #endif
3849a9643ea8Slogwang 
3850d30ea906Sjfb8856606 	count = 0;
3851d30ea906Sjfb8856606 	RTE_ETH_FOREACH_DEV(port_id) {
3852d30ea906Sjfb8856606 		ports_ids[count] = port_id;
3853d30ea906Sjfb8856606 		count++;
3854d30ea906Sjfb8856606 	}
3855d30ea906Sjfb8856606 	nb_ports = (portid_t) count;
3856a9643ea8Slogwang 	if (nb_ports == 0)
3857d30ea906Sjfb8856606 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3858a9643ea8Slogwang 
3859a9643ea8Slogwang 	/* allocate port structures, and init them */
3860a9643ea8Slogwang 	init_port();
3861a9643ea8Slogwang 
3862a9643ea8Slogwang 	set_def_fwd_config();
3863a9643ea8Slogwang 	if (nb_lcores == 0)
38644418919fSjohnjiang 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
38654418919fSjohnjiang 			 "Check the core mask argument\n");
3866a9643ea8Slogwang 
38672bfe3f2eSlogwang 	/* Bitrate/latency stats disabled by default */
3868*2d9fd380Sjfb8856606 #ifdef RTE_LIB_BITRATESTATS
38692bfe3f2eSlogwang 	bitrate_enabled = 0;
38702bfe3f2eSlogwang #endif
3871*2d9fd380Sjfb8856606 #ifdef RTE_LIB_LATENCYSTATS
38722bfe3f2eSlogwang 	latencystats_enabled = 0;
38732bfe3f2eSlogwang #endif
38742bfe3f2eSlogwang 
3875d30ea906Sjfb8856606 	/* on FreeBSD, mlockall() is disabled by default */
38764418919fSjohnjiang #ifdef RTE_EXEC_ENV_FREEBSD
3877d30ea906Sjfb8856606 	do_mlockall = 0;
3878d30ea906Sjfb8856606 #else
3879d30ea906Sjfb8856606 	do_mlockall = 1;
3880d30ea906Sjfb8856606 #endif
3881d30ea906Sjfb8856606 
3882a9643ea8Slogwang 	argc -= diag;
3883a9643ea8Slogwang 	argv += diag;
3884a9643ea8Slogwang 	if (argc > 1)
3885a9643ea8Slogwang 		launch_args_parse(argc, argv);
3886a9643ea8Slogwang 
3887d30ea906Sjfb8856606 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3888d30ea906Sjfb8856606 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3889d30ea906Sjfb8856606 			strerror(errno));
3890d30ea906Sjfb8856606 	}
3891d30ea906Sjfb8856606 
38922bfe3f2eSlogwang 	if (tx_first && interactive)
38932bfe3f2eSlogwang 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
38942bfe3f2eSlogwang 				"interactive mode.\n");
38952bfe3f2eSlogwang 
38962bfe3f2eSlogwang 	if (tx_first && lsc_interrupt) {
38972bfe3f2eSlogwang 		printf("Warning: lsc_interrupt needs to be off when "
38982bfe3f2eSlogwang 				" using tx_first. Disabling.\n");
38992bfe3f2eSlogwang 		lsc_interrupt = 0;
39002bfe3f2eSlogwang 	}
39012bfe3f2eSlogwang 
3902a9643ea8Slogwang 	if (!nb_rxq && !nb_txq)
3903a9643ea8Slogwang 		printf("Warning: Either rx or tx queues should be non-zero\n");
3904a9643ea8Slogwang 
3905a9643ea8Slogwang 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3906a9643ea8Slogwang 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3907a9643ea8Slogwang 		       "but nb_txq=%d will prevent to fully test it.\n",
3908a9643ea8Slogwang 		       nb_rxq, nb_txq);
3909a9643ea8Slogwang 
3910a9643ea8Slogwang 	init_config();
3911d30ea906Sjfb8856606 
3912d30ea906Sjfb8856606 	if (hot_plug) {
3913d30ea906Sjfb8856606 		ret = rte_dev_hotplug_handle_enable();
3914d30ea906Sjfb8856606 		if (ret) {
3915d30ea906Sjfb8856606 			RTE_LOG(ERR, EAL,
3916d30ea906Sjfb8856606 				"fail to enable hotplug handling.");
3917d30ea906Sjfb8856606 			return -1;
3918d30ea906Sjfb8856606 		}
3919d30ea906Sjfb8856606 
3920d30ea906Sjfb8856606 		ret = rte_dev_event_monitor_start();
3921d30ea906Sjfb8856606 		if (ret) {
3922d30ea906Sjfb8856606 			RTE_LOG(ERR, EAL,
3923d30ea906Sjfb8856606 				"fail to start device event monitoring.");
3924d30ea906Sjfb8856606 			return -1;
3925d30ea906Sjfb8856606 		}
3926d30ea906Sjfb8856606 
3927d30ea906Sjfb8856606 		ret = rte_dev_event_callback_register(NULL,
3928d30ea906Sjfb8856606 			dev_event_callback, NULL);
3929d30ea906Sjfb8856606 		if (ret) {
3930d30ea906Sjfb8856606 			RTE_LOG(ERR, EAL,
3931d30ea906Sjfb8856606 				"fail  to register device event callback\n");
3932d30ea906Sjfb8856606 			return -1;
3933d30ea906Sjfb8856606 		}
3934d30ea906Sjfb8856606 	}
3935d30ea906Sjfb8856606 
39364418919fSjohnjiang 	if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3937a9643ea8Slogwang 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3938a9643ea8Slogwang 
3939a9643ea8Slogwang 	/* set all ports to promiscuous mode by default */
39404418919fSjohnjiang 	RTE_ETH_FOREACH_DEV(port_id) {
39414418919fSjohnjiang 		ret = rte_eth_promiscuous_enable(port_id);
39424418919fSjohnjiang 		if (ret != 0)
39434418919fSjohnjiang 			printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
39444418919fSjohnjiang 				port_id, rte_strerror(-ret));
39454418919fSjohnjiang 	}
3946a9643ea8Slogwang 
39472bfe3f2eSlogwang 	/* Init metrics library */
39482bfe3f2eSlogwang 	rte_metrics_init(rte_socket_id());
39492bfe3f2eSlogwang 
3950*2d9fd380Sjfb8856606 #ifdef RTE_LIB_LATENCYSTATS
39512bfe3f2eSlogwang 	if (latencystats_enabled != 0) {
39522bfe3f2eSlogwang 		int ret = rte_latencystats_init(1, NULL);
39532bfe3f2eSlogwang 		if (ret)
39542bfe3f2eSlogwang 			printf("Warning: latencystats init()"
39552bfe3f2eSlogwang 				" returned error %d\n",	ret);
39562bfe3f2eSlogwang 		printf("Latencystats running on lcore %d\n",
39572bfe3f2eSlogwang 			latencystats_lcore_id);
39582bfe3f2eSlogwang 	}
39592bfe3f2eSlogwang #endif
39602bfe3f2eSlogwang 
39612bfe3f2eSlogwang 	/* Setup bitrate stats */
3962*2d9fd380Sjfb8856606 #ifdef RTE_LIB_BITRATESTATS
39632bfe3f2eSlogwang 	if (bitrate_enabled != 0) {
39642bfe3f2eSlogwang 		bitrate_data = rte_stats_bitrate_create();
39652bfe3f2eSlogwang 		if (bitrate_data == NULL)
39662bfe3f2eSlogwang 			rte_exit(EXIT_FAILURE,
39672bfe3f2eSlogwang 				"Could not allocate bitrate data.\n");
39682bfe3f2eSlogwang 		rte_stats_bitrate_reg(bitrate_data);
39692bfe3f2eSlogwang 	}
39702bfe3f2eSlogwang #endif
39712bfe3f2eSlogwang 
3972*2d9fd380Sjfb8856606 #ifdef RTE_LIB_CMDLINE
39732bfe3f2eSlogwang 	if (strlen(cmdline_filename) != 0)
39742bfe3f2eSlogwang 		cmdline_read_from_file(cmdline_filename);
39752bfe3f2eSlogwang 
3976a9643ea8Slogwang 	if (interactive == 1) {
3977a9643ea8Slogwang 		if (auto_start) {
3978a9643ea8Slogwang 			printf("Start automatic packet forwarding\n");
3979a9643ea8Slogwang 			start_packet_forwarding(0);
3980a9643ea8Slogwang 		}
3981a9643ea8Slogwang 		prompt();
39822bfe3f2eSlogwang 		pmd_test_exit();
3983a9643ea8Slogwang 	} else
3984a9643ea8Slogwang #endif
3985a9643ea8Slogwang 	{
3986a9643ea8Slogwang 		char c;
3987a9643ea8Slogwang 		int rc;
3988a9643ea8Slogwang 
39892bfe3f2eSlogwang 		f_quit = 0;
39902bfe3f2eSlogwang 
3991a9643ea8Slogwang 		printf("No commandline core given, start packet forwarding\n");
39922bfe3f2eSlogwang 		start_packet_forwarding(tx_first);
39932bfe3f2eSlogwang 		if (stats_period != 0) {
39942bfe3f2eSlogwang 			uint64_t prev_time = 0, cur_time, diff_time = 0;
39952bfe3f2eSlogwang 			uint64_t timer_period;
39962bfe3f2eSlogwang 
39972bfe3f2eSlogwang 			/* Convert to number of cycles */
39982bfe3f2eSlogwang 			timer_period = stats_period * rte_get_timer_hz();
39992bfe3f2eSlogwang 
40002bfe3f2eSlogwang 			while (f_quit == 0) {
40012bfe3f2eSlogwang 				cur_time = rte_get_timer_cycles();
40022bfe3f2eSlogwang 				diff_time += cur_time - prev_time;
40032bfe3f2eSlogwang 
40042bfe3f2eSlogwang 				if (diff_time >= timer_period) {
40052bfe3f2eSlogwang 					print_stats();
40062bfe3f2eSlogwang 					/* Reset the timer */
40072bfe3f2eSlogwang 					diff_time = 0;
40082bfe3f2eSlogwang 				}
40092bfe3f2eSlogwang 				/* Sleep to avoid unnecessary checks */
40102bfe3f2eSlogwang 				prev_time = cur_time;
40112bfe3f2eSlogwang 				sleep(1);
40122bfe3f2eSlogwang 			}
40132bfe3f2eSlogwang 		}
40142bfe3f2eSlogwang 
4015a9643ea8Slogwang 		printf("Press enter to exit\n");
4016a9643ea8Slogwang 		rc = read(0, &c, 1);
4017a9643ea8Slogwang 		pmd_test_exit();
4018a9643ea8Slogwang 		if (rc < 0)
4019a9643ea8Slogwang 			return 1;
4020a9643ea8Slogwang 	}
4021a9643ea8Slogwang 
40224418919fSjohnjiang 	ret = rte_eal_cleanup();
40234418919fSjohnjiang 	if (ret != 0)
40244418919fSjohnjiang 		rte_exit(EXIT_FAILURE,
40254418919fSjohnjiang 			 "EAL cleanup failed: %s\n", strerror(-ret));
40264418919fSjohnjiang 
40274418919fSjohnjiang 	return EXIT_SUCCESS;
4028a9643ea8Slogwang }
4029