1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 #include <sys/types.h>
10 #include <string.h>
11 #include <sys/queue.h>
12 #include <stdarg.h>
13 #include <errno.h>
14 #include <getopt.h>
15 #include <signal.h>
16 #include <stdbool.h>
17
18 #include <rte_common.h>
19 #include <rte_vect.h>
20 #include <rte_byteorder.h>
21 #include <rte_log.h>
22 #include <rte_malloc.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_eal.h>
26 #include <rte_launch.h>
27 #include <rte_atomic.h>
28 #include <rte_cycles.h>
29 #include <rte_prefetch.h>
30 #include <rte_lcore.h>
31 #include <rte_per_lcore.h>
32 #include <rte_branch_prediction.h>
33 #include <rte_interrupts.h>
34 #include <rte_random.h>
35 #include <rte_debug.h>
36 #include <rte_ether.h>
37 #include <rte_mempool.h>
38 #include <rte_mbuf.h>
39 #include <rte_ip.h>
40 #include <rte_tcp.h>
41 #include <rte_udp.h>
42 #include <rte_string_fns.h>
43 #include <rte_cpuflags.h>
44
45 #include <cmdline_parse.h>
46 #include <cmdline_parse_etheraddr.h>
47
48 #include "l3fwd.h"
49 #include "l3fwd_event.h"
50
51 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
52 #define MAX_RX_QUEUE_PER_PORT 128
53
54 #define MAX_LCORE_PARAMS 1024
55
56 /* Static global variables used within this file. */
57 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
58 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
59
60 /**< Ports set in promiscuous mode off by default. */
61 static int promiscuous_on;
62
63 /* Select Longest-Prefix or Exact match. */
64 static int l3fwd_lpm_on;
65 static int l3fwd_em_on;
66
67 /* Global variables. */
68
69 static int numa_on = 1; /**< NUMA is enabled by default. */
70 static int parse_ptype; /**< Parse packet type using rx callback, and */
71 /**< disabled by default */
72 static int per_port_pool; /**< Use separate buffer pools per port; disabled */
73 /**< by default */
74
75 volatile bool force_quit;
76
77 /* ethernet addresses of ports */
78 uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
79 struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
80
81 xmm_t val_eth[RTE_MAX_ETHPORTS];
82
83 /* mask of enabled ports */
84 uint32_t enabled_port_mask;
85
86 /* Used only in exact match mode. */
87 int ipv6; /**< ipv6 is false by default. */
88 uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
89
90 struct lcore_conf lcore_conf[RTE_MAX_LCORE];
91
92 struct lcore_params {
93 uint16_t port_id;
94 uint8_t queue_id;
95 uint8_t lcore_id;
96 } __rte_cache_aligned;
97
98 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
99 static struct lcore_params lcore_params_array_default[] = {
100 {0, 0, 2},
101 {0, 1, 2},
102 {0, 2, 2},
103 {1, 0, 2},
104 {1, 1, 2},
105 {1, 2, 2},
106 {2, 0, 2},
107 {3, 0, 3},
108 {3, 1, 3},
109 };
110
111 static struct lcore_params * lcore_params = lcore_params_array_default;
112 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
113 sizeof(lcore_params_array_default[0]);
114
115 static struct rte_eth_conf port_conf = {
116 .rxmode = {
117 .mq_mode = ETH_MQ_RX_RSS,
118 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
119 .split_hdr_size = 0,
120 .offloads = DEV_RX_OFFLOAD_CHECKSUM,
121 },
122 .rx_adv_conf = {
123 .rss_conf = {
124 .rss_key = NULL,
125 .rss_hf = ETH_RSS_IP,
126 },
127 },
128 .txmode = {
129 .mq_mode = ETH_MQ_TX_NONE,
130 },
131 };
132
133 static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
134 static uint8_t lkp_per_socket[NB_SOCKETS];
135
136 struct l3fwd_lkp_mode {
137 void (*setup)(int);
138 int (*check_ptype)(int);
139 rte_rx_callback_fn cb_parse_ptype;
140 int (*main_loop)(void *);
141 void* (*get_ipv4_lookup_struct)(int);
142 void* (*get_ipv6_lookup_struct)(int);
143 };
144
145 static struct l3fwd_lkp_mode l3fwd_lkp;
146
147 static struct l3fwd_lkp_mode l3fwd_em_lkp = {
148 .setup = setup_hash,
149 .check_ptype = em_check_ptype,
150 .cb_parse_ptype = em_cb_parse_ptype,
151 .main_loop = em_main_loop,
152 .get_ipv4_lookup_struct = em_get_ipv4_l3fwd_lookup_struct,
153 .get_ipv6_lookup_struct = em_get_ipv6_l3fwd_lookup_struct,
154 };
155
156 static struct l3fwd_lkp_mode l3fwd_lpm_lkp = {
157 .setup = setup_lpm,
158 .check_ptype = lpm_check_ptype,
159 .cb_parse_ptype = lpm_cb_parse_ptype,
160 .main_loop = lpm_main_loop,
161 .get_ipv4_lookup_struct = lpm_get_ipv4_l3fwd_lookup_struct,
162 .get_ipv6_lookup_struct = lpm_get_ipv6_l3fwd_lookup_struct,
163 };
164
165 /*
166 * Setup lookup methods for forwarding.
167 * Currently exact-match and longest-prefix-match
168 * are supported ones.
169 */
170 static void
setup_l3fwd_lookup_tables(void)171 setup_l3fwd_lookup_tables(void)
172 {
173 /* Setup HASH lookup functions. */
174 if (l3fwd_em_on)
175 l3fwd_lkp = l3fwd_em_lkp;
176 /* Setup LPM lookup functions. */
177 else
178 l3fwd_lkp = l3fwd_lpm_lkp;
179 }
180
181 static int
check_lcore_params(void)182 check_lcore_params(void)
183 {
184 uint8_t queue, lcore;
185 uint16_t i;
186 int socketid;
187
188 for (i = 0; i < nb_lcore_params; ++i) {
189 queue = lcore_params[i].queue_id;
190 if (queue >= MAX_RX_QUEUE_PER_PORT) {
191 printf("invalid queue number: %hhu\n", queue);
192 return -1;
193 }
194 lcore = lcore_params[i].lcore_id;
195 if (!rte_lcore_is_enabled(lcore)) {
196 printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
197 return -1;
198 }
199 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
200 (numa_on == 0)) {
201 printf("warning: lcore %hhu is on socket %d with numa off \n",
202 lcore, socketid);
203 }
204 }
205 return 0;
206 }
207
208 static int
check_port_config(void)209 check_port_config(void)
210 {
211 uint16_t portid;
212 uint16_t i;
213
214 for (i = 0; i < nb_lcore_params; ++i) {
215 portid = lcore_params[i].port_id;
216 if ((enabled_port_mask & (1 << portid)) == 0) {
217 printf("port %u is not enabled in port mask\n", portid);
218 return -1;
219 }
220 if (!rte_eth_dev_is_valid_port(portid)) {
221 printf("port %u is not present on the board\n", portid);
222 return -1;
223 }
224 }
225 return 0;
226 }
227
228 static uint8_t
get_port_n_rx_queues(const uint16_t port)229 get_port_n_rx_queues(const uint16_t port)
230 {
231 int queue = -1;
232 uint16_t i;
233
234 for (i = 0; i < nb_lcore_params; ++i) {
235 if (lcore_params[i].port_id == port) {
236 if (lcore_params[i].queue_id == queue+1)
237 queue = lcore_params[i].queue_id;
238 else
239 rte_exit(EXIT_FAILURE, "queue ids of the port %d must be"
240 " in sequence and must start with 0\n",
241 lcore_params[i].port_id);
242 }
243 }
244 return (uint8_t)(++queue);
245 }
246
247 static int
init_lcore_rx_queues(void)248 init_lcore_rx_queues(void)
249 {
250 uint16_t i, nb_rx_queue;
251 uint8_t lcore;
252
253 for (i = 0; i < nb_lcore_params; ++i) {
254 lcore = lcore_params[i].lcore_id;
255 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
256 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
257 printf("error: too many queues (%u) for lcore: %u\n",
258 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
259 return -1;
260 } else {
261 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
262 lcore_params[i].port_id;
263 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
264 lcore_params[i].queue_id;
265 lcore_conf[lcore].n_rx_queue++;
266 }
267 }
268 return 0;
269 }
270
271 /* display usage */
272 static void
print_usage(const char * prgname)273 print_usage(const char *prgname)
274 {
275 fprintf(stderr, "%s [EAL options] --"
276 " -p PORTMASK"
277 " [-P]"
278 " [-E]"
279 " [-L]"
280 " --config (port,queue,lcore)[,(port,queue,lcore)]"
281 " [--eth-dest=X,MM:MM:MM:MM:MM:MM]"
282 " [--enable-jumbo [--max-pkt-len PKTLEN]]"
283 " [--no-numa]"
284 " [--hash-entry-num]"
285 " [--ipv6]"
286 " [--parse-ptype]"
287 " [--per-port-pool]"
288 " [--mode]"
289 " [--eventq-sched]\n\n"
290
291 " -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
292 " -P : Enable promiscuous mode\n"
293 " -E : Enable exact match\n"
294 " -L : Enable longest prefix match (default)\n"
295 " --config (port,queue,lcore): Rx queue configuration\n"
296 " --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for port X\n"
297 " --enable-jumbo: Enable jumbo frames\n"
298 " --max-pkt-len: Under the premise of enabling jumbo,\n"
299 " maximum packet length in decimal (64-9600)\n"
300 " --no-numa: Disable numa awareness\n"
301 " --hash-entry-num: Specify the hash entry number in hexadecimal to be setup\n"
302 " --ipv6: Set if running ipv6 packets\n"
303 " --parse-ptype: Set to use software to analyze packet type\n"
304 " --per-port-pool: Use separate buffer pool per port\n"
305 " --mode: Packet transfer mode for I/O, poll or eventdev\n"
306 " Default mode = poll\n"
307 " --eventq-sched: Event queue synchronization method\n"
308 " ordered, atomic or parallel.\n"
309 " Default: atomic\n"
310 " Valid only if --mode=eventdev\n"
311 " --event-eth-rxqs: Number of ethernet RX queues per device.\n"
312 " Default: 1\n"
313 " Valid only if --mode=eventdev\n\n",
314 prgname);
315 }
316
317 static int
parse_max_pkt_len(const char * pktlen)318 parse_max_pkt_len(const char *pktlen)
319 {
320 char *end = NULL;
321 unsigned long len;
322
323 /* parse decimal string */
324 len = strtoul(pktlen, &end, 10);
325 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
326 return -1;
327
328 if (len == 0)
329 return -1;
330
331 return len;
332 }
333
334 static int
parse_portmask(const char * portmask)335 parse_portmask(const char *portmask)
336 {
337 char *end = NULL;
338 unsigned long pm;
339
340 /* parse hexadecimal string */
341 pm = strtoul(portmask, &end, 16);
342 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
343 return 0;
344
345 return pm;
346 }
347
348 static int
parse_hash_entry_number(const char * hash_entry_num)349 parse_hash_entry_number(const char *hash_entry_num)
350 {
351 char *end = NULL;
352 unsigned long hash_en;
353 /* parse hexadecimal string */
354 hash_en = strtoul(hash_entry_num, &end, 16);
355 if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0'))
356 return -1;
357
358 if (hash_en == 0)
359 return -1;
360
361 return hash_en;
362 }
363
364 static int
parse_config(const char * q_arg)365 parse_config(const char *q_arg)
366 {
367 char s[256];
368 const char *p, *p0 = q_arg;
369 char *end;
370 enum fieldnames {
371 FLD_PORT = 0,
372 FLD_QUEUE,
373 FLD_LCORE,
374 _NUM_FLD
375 };
376 unsigned long int_fld[_NUM_FLD];
377 char *str_fld[_NUM_FLD];
378 int i;
379 unsigned size;
380
381 nb_lcore_params = 0;
382
383 while ((p = strchr(p0,'(')) != NULL) {
384 ++p;
385 if((p0 = strchr(p,')')) == NULL)
386 return -1;
387
388 size = p0 - p;
389 if(size >= sizeof(s))
390 return -1;
391
392 snprintf(s, sizeof(s), "%.*s", size, p);
393 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
394 return -1;
395 for (i = 0; i < _NUM_FLD; i++){
396 errno = 0;
397 int_fld[i] = strtoul(str_fld[i], &end, 0);
398 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
399 return -1;
400 }
401 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
402 printf("exceeded max number of lcore params: %hu\n",
403 nb_lcore_params);
404 return -1;
405 }
406 lcore_params_array[nb_lcore_params].port_id =
407 (uint8_t)int_fld[FLD_PORT];
408 lcore_params_array[nb_lcore_params].queue_id =
409 (uint8_t)int_fld[FLD_QUEUE];
410 lcore_params_array[nb_lcore_params].lcore_id =
411 (uint8_t)int_fld[FLD_LCORE];
412 ++nb_lcore_params;
413 }
414 lcore_params = lcore_params_array;
415 return 0;
416 }
417
418 static void
parse_eth_dest(const char * optarg)419 parse_eth_dest(const char *optarg)
420 {
421 uint16_t portid;
422 char *port_end;
423 uint8_t c, *dest, peer_addr[6];
424
425 errno = 0;
426 portid = strtoul(optarg, &port_end, 10);
427 if (errno != 0 || port_end == optarg || *port_end++ != ',')
428 rte_exit(EXIT_FAILURE,
429 "Invalid eth-dest: %s", optarg);
430 if (portid >= RTE_MAX_ETHPORTS)
431 rte_exit(EXIT_FAILURE,
432 "eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n",
433 portid, RTE_MAX_ETHPORTS);
434
435 if (cmdline_parse_etheraddr(NULL, port_end,
436 &peer_addr, sizeof(peer_addr)) < 0)
437 rte_exit(EXIT_FAILURE,
438 "Invalid ethernet address: %s\n",
439 port_end);
440 dest = (uint8_t *)&dest_eth_addr[portid];
441 for (c = 0; c < 6; c++)
442 dest[c] = peer_addr[c];
443 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
444 }
445
446 static void
parse_mode(const char * optarg)447 parse_mode(const char *optarg)
448 {
449 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
450
451 if (!strcmp(optarg, "poll"))
452 evt_rsrc->enabled = false;
453 else if (!strcmp(optarg, "eventdev"))
454 evt_rsrc->enabled = true;
455 }
456
457 static void
parse_eventq_sched(const char * optarg)458 parse_eventq_sched(const char *optarg)
459 {
460 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
461
462 if (!strcmp(optarg, "ordered"))
463 evt_rsrc->sched_type = RTE_SCHED_TYPE_ORDERED;
464 if (!strcmp(optarg, "atomic"))
465 evt_rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
466 if (!strcmp(optarg, "parallel"))
467 evt_rsrc->sched_type = RTE_SCHED_TYPE_PARALLEL;
468 }
469
470 static void
parse_event_eth_rx_queues(const char * eth_rx_queues)471 parse_event_eth_rx_queues(const char *eth_rx_queues)
472 {
473 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
474 char *end = NULL;
475 uint8_t num_eth_rx_queues;
476
477 /* parse decimal string */
478 num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10);
479 if ((eth_rx_queues[0] == '\0') || (end == NULL) || (*end != '\0'))
480 return;
481
482 if (num_eth_rx_queues == 0)
483 return;
484
485 evt_rsrc->eth_rx_queues = num_eth_rx_queues;
486 }
487
488 #define MAX_JUMBO_PKT_LEN 9600
489
490 static const char short_options[] =
491 "p:" /* portmask */
492 "P" /* promiscuous */
493 "L" /* enable long prefix match */
494 "E" /* enable exact match */
495 ;
496
497 #define CMD_LINE_OPT_CONFIG "config"
498 #define CMD_LINE_OPT_ETH_DEST "eth-dest"
499 #define CMD_LINE_OPT_NO_NUMA "no-numa"
500 #define CMD_LINE_OPT_IPV6 "ipv6"
501 #define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
502 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
503 #define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
504 #define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
505 #define CMD_LINE_OPT_MODE "mode"
506 #define CMD_LINE_OPT_EVENTQ_SYNC "eventq-sched"
507 #define CMD_LINE_OPT_EVENT_ETH_RX_QUEUES "event-eth-rxqs"
508 enum {
509 /* long options mapped to a short option */
510
511 /* first long only option value must be >= 256, so that we won't
512 * conflict with short options */
513 CMD_LINE_OPT_MIN_NUM = 256,
514 CMD_LINE_OPT_CONFIG_NUM,
515 CMD_LINE_OPT_ETH_DEST_NUM,
516 CMD_LINE_OPT_NO_NUMA_NUM,
517 CMD_LINE_OPT_IPV6_NUM,
518 CMD_LINE_OPT_ENABLE_JUMBO_NUM,
519 CMD_LINE_OPT_HASH_ENTRY_NUM_NUM,
520 CMD_LINE_OPT_PARSE_PTYPE_NUM,
521 CMD_LINE_OPT_PARSE_PER_PORT_POOL,
522 CMD_LINE_OPT_MODE_NUM,
523 CMD_LINE_OPT_EVENTQ_SYNC_NUM,
524 CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM,
525 };
526
527 static const struct option lgopts[] = {
528 {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
529 {CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
530 {CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
531 {CMD_LINE_OPT_IPV6, 0, 0, CMD_LINE_OPT_IPV6_NUM},
532 {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, CMD_LINE_OPT_ENABLE_JUMBO_NUM},
533 {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM},
534 {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM},
535 {CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PARSE_PER_PORT_POOL},
536 {CMD_LINE_OPT_MODE, 1, 0, CMD_LINE_OPT_MODE_NUM},
537 {CMD_LINE_OPT_EVENTQ_SYNC, 1, 0, CMD_LINE_OPT_EVENTQ_SYNC_NUM},
538 {CMD_LINE_OPT_EVENT_ETH_RX_QUEUES, 1, 0,
539 CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM},
540 {NULL, 0, 0, 0}
541 };
542
543 /*
544 * This expression is used to calculate the number of mbufs needed
545 * depending on user input, taking into account memory for rx and
546 * tx hardware rings, cache per lcore and mtable per port per lcore.
547 * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum
548 * value of 8192
549 */
550 #define NB_MBUF(nports) RTE_MAX( \
551 (nports*nb_rx_queue*nb_rxd + \
552 nports*nb_lcores*MAX_PKT_BURST + \
553 nports*n_tx_queue*nb_txd + \
554 nb_lcores*MEMPOOL_CACHE_SIZE), \
555 (unsigned)8192)
556
557 /* Parse the argument given in the command line of the application */
558 static int
parse_args(int argc,char ** argv)559 parse_args(int argc, char **argv)
560 {
561 int opt, ret;
562 char **argvopt;
563 int option_index;
564 char *prgname = argv[0];
565 uint8_t lcore_params = 0;
566 uint8_t eventq_sched = 0;
567 uint8_t eth_rx_q = 0;
568 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
569
570 argvopt = argv;
571
572 /* Error or normal output strings. */
573 while ((opt = getopt_long(argc, argvopt, short_options,
574 lgopts, &option_index)) != EOF) {
575
576 switch (opt) {
577 /* portmask */
578 case 'p':
579 enabled_port_mask = parse_portmask(optarg);
580 if (enabled_port_mask == 0) {
581 fprintf(stderr, "Invalid portmask\n");
582 print_usage(prgname);
583 return -1;
584 }
585 break;
586
587 case 'P':
588 promiscuous_on = 1;
589 break;
590
591 case 'E':
592 l3fwd_em_on = 1;
593 break;
594
595 case 'L':
596 l3fwd_lpm_on = 1;
597 break;
598
599 /* long options */
600 case CMD_LINE_OPT_CONFIG_NUM:
601 ret = parse_config(optarg);
602 if (ret) {
603 fprintf(stderr, "Invalid config\n");
604 print_usage(prgname);
605 return -1;
606 }
607 lcore_params = 1;
608 break;
609
610 case CMD_LINE_OPT_ETH_DEST_NUM:
611 parse_eth_dest(optarg);
612 break;
613
614 case CMD_LINE_OPT_NO_NUMA_NUM:
615 numa_on = 0;
616 break;
617
618 case CMD_LINE_OPT_IPV6_NUM:
619 ipv6 = 1;
620 break;
621
622 case CMD_LINE_OPT_ENABLE_JUMBO_NUM: {
623 const struct option lenopts = {
624 "max-pkt-len", required_argument, 0, 0
625 };
626
627 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
628 port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
629
630 /*
631 * if no max-pkt-len set, use the default
632 * value RTE_ETHER_MAX_LEN.
633 */
634 if (getopt_long(argc, argvopt, "",
635 &lenopts, &option_index) == 0) {
636 ret = parse_max_pkt_len(optarg);
637 if (ret < 64 || ret > MAX_JUMBO_PKT_LEN) {
638 fprintf(stderr,
639 "invalid maximum packet length\n");
640 print_usage(prgname);
641 return -1;
642 }
643 port_conf.rxmode.max_rx_pkt_len = ret;
644 }
645 break;
646 }
647
648 case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM:
649 ret = parse_hash_entry_number(optarg);
650 if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) {
651 hash_entry_number = ret;
652 } else {
653 fprintf(stderr, "invalid hash entry number\n");
654 print_usage(prgname);
655 return -1;
656 }
657 break;
658
659 case CMD_LINE_OPT_PARSE_PTYPE_NUM:
660 printf("soft parse-ptype is enabled\n");
661 parse_ptype = 1;
662 break;
663
664 case CMD_LINE_OPT_PARSE_PER_PORT_POOL:
665 printf("per port buffer pool is enabled\n");
666 per_port_pool = 1;
667 break;
668
669 case CMD_LINE_OPT_MODE_NUM:
670 parse_mode(optarg);
671 break;
672
673 case CMD_LINE_OPT_EVENTQ_SYNC_NUM:
674 parse_eventq_sched(optarg);
675 eventq_sched = 1;
676 break;
677
678 case CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM:
679 parse_event_eth_rx_queues(optarg);
680 eth_rx_q = 1;
681 break;
682
683 default:
684 print_usage(prgname);
685 return -1;
686 }
687 }
688
689 /* If both LPM and EM are selected, return error. */
690 if (l3fwd_lpm_on && l3fwd_em_on) {
691 fprintf(stderr, "LPM and EM are mutually exclusive, select only one\n");
692 return -1;
693 }
694
695 if (evt_rsrc->enabled && lcore_params) {
696 fprintf(stderr, "lcore config is not valid when event mode is selected\n");
697 return -1;
698 }
699
700 if (!evt_rsrc->enabled && eth_rx_q) {
701 fprintf(stderr, "eth_rx_queues is valid only when event mode is selected\n");
702 return -1;
703 }
704
705 if (!evt_rsrc->enabled && eventq_sched) {
706 fprintf(stderr, "eventq_sched is valid only when event mode is selected\n");
707 return -1;
708 }
709
710 /*
711 * Nothing is selected, pick longest-prefix match
712 * as default match.
713 */
714 if (!l3fwd_lpm_on && !l3fwd_em_on) {
715 fprintf(stderr, "LPM or EM none selected, default LPM on\n");
716 l3fwd_lpm_on = 1;
717 }
718
719 /*
720 * ipv6 and hash flags are valid only for
721 * exact macth, reset them to default for
722 * longest-prefix match.
723 */
724 if (l3fwd_lpm_on) {
725 ipv6 = 0;
726 hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
727 }
728
729 if (optind >= 0)
730 argv[optind-1] = prgname;
731
732 ret = optind-1;
733 optind = 1; /* reset getopt lib */
734 return ret;
735 }
736
737 static void
print_ethaddr(const char * name,const struct rte_ether_addr * eth_addr)738 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
739 {
740 char buf[RTE_ETHER_ADDR_FMT_SIZE];
741 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
742 printf("%s%s", name, buf);
743 }
744
745 int
init_mem(uint16_t portid,unsigned int nb_mbuf)746 init_mem(uint16_t portid, unsigned int nb_mbuf)
747 {
748 struct lcore_conf *qconf;
749 int socketid;
750 unsigned lcore_id;
751 char s[64];
752
753 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
754 if (rte_lcore_is_enabled(lcore_id) == 0)
755 continue;
756
757 if (numa_on)
758 socketid = rte_lcore_to_socket_id(lcore_id);
759 else
760 socketid = 0;
761
762 if (socketid >= NB_SOCKETS) {
763 rte_exit(EXIT_FAILURE,
764 "Socket %d of lcore %u is out of range %d\n",
765 socketid, lcore_id, NB_SOCKETS);
766 }
767
768 if (pktmbuf_pool[portid][socketid] == NULL) {
769 snprintf(s, sizeof(s), "mbuf_pool_%d:%d",
770 portid, socketid);
771 pktmbuf_pool[portid][socketid] =
772 rte_pktmbuf_pool_create(s, nb_mbuf,
773 MEMPOOL_CACHE_SIZE, 0,
774 RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
775 if (pktmbuf_pool[portid][socketid] == NULL)
776 rte_exit(EXIT_FAILURE,
777 "Cannot init mbuf pool on socket %d\n",
778 socketid);
779 else
780 printf("Allocated mbuf pool on socket %d\n",
781 socketid);
782
783 /* Setup either LPM or EM(f.e Hash). But, only once per
784 * available socket.
785 */
786 if (!lkp_per_socket[socketid]) {
787 l3fwd_lkp.setup(socketid);
788 lkp_per_socket[socketid] = 1;
789 }
790 }
791 qconf = &lcore_conf[lcore_id];
792 qconf->ipv4_lookup_struct =
793 l3fwd_lkp.get_ipv4_lookup_struct(socketid);
794 qconf->ipv6_lookup_struct =
795 l3fwd_lkp.get_ipv6_lookup_struct(socketid);
796 }
797 return 0;
798 }
799
800 /* Check the link status of all ports in up to 9s, and print them finally */
801 static void
check_all_ports_link_status(uint32_t port_mask)802 check_all_ports_link_status(uint32_t port_mask)
803 {
804 #define CHECK_INTERVAL 100 /* 100ms */
805 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
806 uint16_t portid;
807 uint8_t count, all_ports_up, print_flag = 0;
808 struct rte_eth_link link;
809 int ret;
810 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
811
812 printf("\nChecking link status");
813 fflush(stdout);
814 for (count = 0; count <= MAX_CHECK_TIME; count++) {
815 if (force_quit)
816 return;
817 all_ports_up = 1;
818 RTE_ETH_FOREACH_DEV(portid) {
819 if (force_quit)
820 return;
821 if ((port_mask & (1 << portid)) == 0)
822 continue;
823 memset(&link, 0, sizeof(link));
824 ret = rte_eth_link_get_nowait(portid, &link);
825 if (ret < 0) {
826 all_ports_up = 0;
827 if (print_flag == 1)
828 printf("Port %u link get failed: %s\n",
829 portid, rte_strerror(-ret));
830 continue;
831 }
832 /* print link status if flag set */
833 if (print_flag == 1) {
834 rte_eth_link_to_str(link_status_text,
835 sizeof(link_status_text), &link);
836 printf("Port %d %s\n", portid,
837 link_status_text);
838 continue;
839 }
840 /* clear all_ports_up flag if any link down */
841 if (link.link_status == ETH_LINK_DOWN) {
842 all_ports_up = 0;
843 break;
844 }
845 }
846 /* after finally printing all link status, get out */
847 if (print_flag == 1)
848 break;
849
850 if (all_ports_up == 0) {
851 printf(".");
852 fflush(stdout);
853 rte_delay_ms(CHECK_INTERVAL);
854 }
855
856 /* set the print_flag if all ports up or timeout */
857 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
858 print_flag = 1;
859 printf("done\n");
860 }
861 }
862 }
863
864 static void
signal_handler(int signum)865 signal_handler(int signum)
866 {
867 if (signum == SIGINT || signum == SIGTERM) {
868 printf("\n\nSignal %d received, preparing to exit...\n",
869 signum);
870 force_quit = true;
871 }
872 }
873
874 static int
prepare_ptype_parser(uint16_t portid,uint16_t queueid)875 prepare_ptype_parser(uint16_t portid, uint16_t queueid)
876 {
877 if (parse_ptype) {
878 printf("Port %d: softly parse packet type info\n", portid);
879 if (rte_eth_add_rx_callback(portid, queueid,
880 l3fwd_lkp.cb_parse_ptype,
881 NULL))
882 return 1;
883
884 printf("Failed to add rx callback: port=%d\n", portid);
885 return 0;
886 }
887
888 if (l3fwd_lkp.check_ptype(portid))
889 return 1;
890
891 printf("port %d cannot parse packet type, please add --%s\n",
892 portid, CMD_LINE_OPT_PARSE_PTYPE);
893 return 0;
894 }
895
896 static void
l3fwd_poll_resource_setup(void)897 l3fwd_poll_resource_setup(void)
898 {
899 uint8_t nb_rx_queue, queue, socketid;
900 struct rte_eth_dev_info dev_info;
901 uint32_t n_tx_queue, nb_lcores;
902 struct rte_eth_txconf *txconf;
903 struct lcore_conf *qconf;
904 uint16_t queueid, portid;
905 unsigned int nb_ports;
906 unsigned int lcore_id;
907 int ret;
908
909 if (check_lcore_params() < 0)
910 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
911
912 ret = init_lcore_rx_queues();
913 if (ret < 0)
914 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
915
916 nb_ports = rte_eth_dev_count_avail();
917
918 if (check_port_config() < 0)
919 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
920
921 nb_lcores = rte_lcore_count();
922
923 /* initialize all ports */
924 RTE_ETH_FOREACH_DEV(portid) {
925 struct rte_eth_conf local_port_conf = port_conf;
926
927 /* skip ports that are not enabled */
928 if ((enabled_port_mask & (1 << portid)) == 0) {
929 printf("\nSkipping disabled port %d\n", portid);
930 continue;
931 }
932
933 /* init port */
934 printf("Initializing port %d ... ", portid );
935 fflush(stdout);
936
937 nb_rx_queue = get_port_n_rx_queues(portid);
938 n_tx_queue = nb_lcores;
939 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
940 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
941 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
942 nb_rx_queue, (unsigned)n_tx_queue );
943
944 ret = rte_eth_dev_info_get(portid, &dev_info);
945 if (ret != 0)
946 rte_exit(EXIT_FAILURE,
947 "Error during getting device (port %u) info: %s\n",
948 portid, strerror(-ret));
949
950 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
951 local_port_conf.txmode.offloads |=
952 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
953
954 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
955 dev_info.flow_type_rss_offloads;
956 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
957 port_conf.rx_adv_conf.rss_conf.rss_hf) {
958 printf("Port %u modified RSS hash function based on hardware support,"
959 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
960 portid,
961 port_conf.rx_adv_conf.rss_conf.rss_hf,
962 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
963 }
964
965 ret = rte_eth_dev_configure(portid, nb_rx_queue,
966 (uint16_t)n_tx_queue, &local_port_conf);
967 if (ret < 0)
968 rte_exit(EXIT_FAILURE,
969 "Cannot configure device: err=%d, port=%d\n",
970 ret, portid);
971
972 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
973 &nb_txd);
974 if (ret < 0)
975 rte_exit(EXIT_FAILURE,
976 "Cannot adjust number of descriptors: err=%d, "
977 "port=%d\n", ret, portid);
978
979 ret = rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
980 if (ret < 0)
981 rte_exit(EXIT_FAILURE,
982 "Cannot get MAC address: err=%d, port=%d\n",
983 ret, portid);
984
985 print_ethaddr(" Address:", &ports_eth_addr[portid]);
986 printf(", ");
987 print_ethaddr("Destination:",
988 (const struct rte_ether_addr *)&dest_eth_addr[portid]);
989 printf(", ");
990
991 /*
992 * prepare src MACs for each port.
993 */
994 rte_ether_addr_copy(&ports_eth_addr[portid],
995 (struct rte_ether_addr *)(val_eth + portid) + 1);
996
997 /* init memory */
998 if (!per_port_pool) {
999 /* portid = 0; this is *not* signifying the first port,
1000 * rather, it signifies that portid is ignored.
1001 */
1002 ret = init_mem(0, NB_MBUF(nb_ports));
1003 } else {
1004 ret = init_mem(portid, NB_MBUF(1));
1005 }
1006 if (ret < 0)
1007 rte_exit(EXIT_FAILURE, "init_mem failed\n");
1008
1009 /* init one TX queue per couple (lcore,port) */
1010 queueid = 0;
1011 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1012 if (rte_lcore_is_enabled(lcore_id) == 0)
1013 continue;
1014
1015 if (numa_on)
1016 socketid =
1017 (uint8_t)rte_lcore_to_socket_id(lcore_id);
1018 else
1019 socketid = 0;
1020
1021 printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
1022 fflush(stdout);
1023
1024 txconf = &dev_info.default_txconf;
1025 txconf->offloads = local_port_conf.txmode.offloads;
1026 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1027 socketid, txconf);
1028 if (ret < 0)
1029 rte_exit(EXIT_FAILURE,
1030 "rte_eth_tx_queue_setup: err=%d, "
1031 "port=%d\n", ret, portid);
1032
1033 qconf = &lcore_conf[lcore_id];
1034 qconf->tx_queue_id[portid] = queueid;
1035 queueid++;
1036
1037 qconf->tx_port_id[qconf->n_tx_port] = portid;
1038 qconf->n_tx_port++;
1039 }
1040 printf("\n");
1041 }
1042
1043 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1044 if (rte_lcore_is_enabled(lcore_id) == 0)
1045 continue;
1046 qconf = &lcore_conf[lcore_id];
1047 printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
1048 fflush(stdout);
1049 /* init RX queues */
1050 for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
1051 struct rte_eth_rxconf rxq_conf;
1052
1053 portid = qconf->rx_queue_list[queue].port_id;
1054 queueid = qconf->rx_queue_list[queue].queue_id;
1055
1056 if (numa_on)
1057 socketid =
1058 (uint8_t)rte_lcore_to_socket_id(lcore_id);
1059 else
1060 socketid = 0;
1061
1062 printf("rxq=%d,%d,%d ", portid, queueid, socketid);
1063 fflush(stdout);
1064
1065 ret = rte_eth_dev_info_get(portid, &dev_info);
1066 if (ret != 0)
1067 rte_exit(EXIT_FAILURE,
1068 "Error during getting device (port %u) info: %s\n",
1069 portid, strerror(-ret));
1070
1071 rxq_conf = dev_info.default_rxconf;
1072 rxq_conf.offloads = port_conf.rxmode.offloads;
1073 if (!per_port_pool)
1074 ret = rte_eth_rx_queue_setup(portid, queueid,
1075 nb_rxd, socketid,
1076 &rxq_conf,
1077 pktmbuf_pool[0][socketid]);
1078 else
1079 ret = rte_eth_rx_queue_setup(portid, queueid,
1080 nb_rxd, socketid,
1081 &rxq_conf,
1082 pktmbuf_pool[portid][socketid]);
1083 if (ret < 0)
1084 rte_exit(EXIT_FAILURE,
1085 "rte_eth_rx_queue_setup: err=%d, port=%d\n",
1086 ret, portid);
1087 }
1088 }
1089 }
1090
1091 static inline int
l3fwd_service_enable(uint32_t service_id)1092 l3fwd_service_enable(uint32_t service_id)
1093 {
1094 uint8_t min_service_count = UINT8_MAX;
1095 uint32_t slcore_array[RTE_MAX_LCORE];
1096 unsigned int slcore = 0;
1097 uint8_t service_count;
1098 int32_t slcore_count;
1099
1100 if (!rte_service_lcore_count())
1101 return -ENOENT;
1102
1103 slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
1104 if (slcore_count < 0)
1105 return -ENOENT;
1106 /* Get the core which has least number of services running. */
1107 while (slcore_count--) {
1108 /* Reset default mapping */
1109 if (rte_service_map_lcore_set(service_id,
1110 slcore_array[slcore_count], 0) != 0)
1111 return -ENOENT;
1112 service_count = rte_service_lcore_count_services(
1113 slcore_array[slcore_count]);
1114 if (service_count < min_service_count) {
1115 slcore = slcore_array[slcore_count];
1116 min_service_count = service_count;
1117 }
1118 }
1119 if (rte_service_map_lcore_set(service_id, slcore, 1))
1120 return -ENOENT;
1121 rte_service_lcore_start(slcore);
1122
1123 return 0;
1124 }
1125
1126 static void
l3fwd_event_service_setup(void)1127 l3fwd_event_service_setup(void)
1128 {
1129 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
1130 struct rte_event_dev_info evdev_info;
1131 uint32_t service_id, caps;
1132 int ret, i;
1133
1134 rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
1135 if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
1136 ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
1137 &service_id);
1138 if (ret != -ESRCH && ret != 0)
1139 rte_exit(EXIT_FAILURE,
1140 "Error in starting eventdev service\n");
1141 l3fwd_service_enable(service_id);
1142 }
1143
1144 for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
1145 ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
1146 evt_rsrc->rx_adptr.rx_adptr[i], &caps);
1147 if (ret < 0)
1148 rte_exit(EXIT_FAILURE,
1149 "Failed to get Rx adapter[%d] caps\n",
1150 evt_rsrc->rx_adptr.rx_adptr[i]);
1151 ret = rte_event_eth_rx_adapter_service_id_get(
1152 evt_rsrc->event_d_id,
1153 &service_id);
1154 if (ret != -ESRCH && ret != 0)
1155 rte_exit(EXIT_FAILURE,
1156 "Error in starting Rx adapter[%d] service\n",
1157 evt_rsrc->rx_adptr.rx_adptr[i]);
1158 l3fwd_service_enable(service_id);
1159 }
1160
1161 for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
1162 ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
1163 evt_rsrc->tx_adptr.tx_adptr[i], &caps);
1164 if (ret < 0)
1165 rte_exit(EXIT_FAILURE,
1166 "Failed to get Rx adapter[%d] caps\n",
1167 evt_rsrc->tx_adptr.tx_adptr[i]);
1168 ret = rte_event_eth_tx_adapter_service_id_get(
1169 evt_rsrc->event_d_id,
1170 &service_id);
1171 if (ret != -ESRCH && ret != 0)
1172 rte_exit(EXIT_FAILURE,
1173 "Error in starting Rx adapter[%d] service\n",
1174 evt_rsrc->tx_adptr.tx_adptr[i]);
1175 l3fwd_service_enable(service_id);
1176 }
1177 }
1178
1179 int
main(int argc,char ** argv)1180 main(int argc, char **argv)
1181 {
1182 struct l3fwd_event_resources *evt_rsrc;
1183 struct lcore_conf *qconf;
1184 uint16_t queueid, portid;
1185 unsigned int lcore_id;
1186 uint8_t queue;
1187 int i, ret;
1188
1189 /* init EAL */
1190 ret = rte_eal_init(argc, argv);
1191 if (ret < 0)
1192 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1193 argc -= ret;
1194 argv += ret;
1195
1196 force_quit = false;
1197 signal(SIGINT, signal_handler);
1198 signal(SIGTERM, signal_handler);
1199
1200 /* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
1201 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1202 dest_eth_addr[portid] =
1203 RTE_ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
1204 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
1205 }
1206
1207 evt_rsrc = l3fwd_get_eventdev_rsrc();
1208 /* parse application arguments (after the EAL ones) */
1209 ret = parse_args(argc, argv);
1210 if (ret < 0)
1211 rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
1212
1213 /* Setup function pointers for lookup method. */
1214 setup_l3fwd_lookup_tables();
1215
1216 evt_rsrc->per_port_pool = per_port_pool;
1217 evt_rsrc->pkt_pool = pktmbuf_pool;
1218 evt_rsrc->port_mask = enabled_port_mask;
1219 /* Configure eventdev parameters if user has requested */
1220 if (evt_rsrc->enabled) {
1221 l3fwd_event_resource_setup(&port_conf);
1222 if (l3fwd_em_on)
1223 l3fwd_lkp.main_loop = evt_rsrc->ops.em_event_loop;
1224 else
1225 l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop;
1226 l3fwd_event_service_setup();
1227 } else
1228 l3fwd_poll_resource_setup();
1229
1230 /* start ports */
1231 RTE_ETH_FOREACH_DEV(portid) {
1232 if ((enabled_port_mask & (1 << portid)) == 0) {
1233 continue;
1234 }
1235 /* Start device */
1236 ret = rte_eth_dev_start(portid);
1237 if (ret < 0)
1238 rte_exit(EXIT_FAILURE,
1239 "rte_eth_dev_start: err=%d, port=%d\n",
1240 ret, portid);
1241
1242 /*
1243 * If enabled, put device in promiscuous mode.
1244 * This allows IO forwarding mode to forward packets
1245 * to itself through 2 cross-connected ports of the
1246 * target machine.
1247 */
1248 if (promiscuous_on) {
1249 ret = rte_eth_promiscuous_enable(portid);
1250 if (ret != 0)
1251 rte_exit(EXIT_FAILURE,
1252 "rte_eth_promiscuous_enable: err=%s, port=%u\n",
1253 rte_strerror(-ret), portid);
1254 }
1255 }
1256
1257 printf("\n");
1258
1259 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1260 if (rte_lcore_is_enabled(lcore_id) == 0)
1261 continue;
1262 qconf = &lcore_conf[lcore_id];
1263 for (queue = 0; queue < qconf->n_rx_queue; ++queue) {
1264 portid = qconf->rx_queue_list[queue].port_id;
1265 queueid = qconf->rx_queue_list[queue].queue_id;
1266 if (prepare_ptype_parser(portid, queueid) == 0)
1267 rte_exit(EXIT_FAILURE, "ptype check fails\n");
1268 }
1269 }
1270
1271 check_all_ports_link_status(enabled_port_mask);
1272
1273 ret = 0;
1274 /* launch per-lcore init on every lcore */
1275 rte_eal_mp_remote_launch(l3fwd_lkp.main_loop, NULL, CALL_MAIN);
1276 if (evt_rsrc->enabled) {
1277 for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++)
1278 rte_event_eth_rx_adapter_stop(
1279 evt_rsrc->rx_adptr.rx_adptr[i]);
1280 for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++)
1281 rte_event_eth_tx_adapter_stop(
1282 evt_rsrc->tx_adptr.tx_adptr[i]);
1283
1284 RTE_ETH_FOREACH_DEV(portid) {
1285 if ((enabled_port_mask & (1 << portid)) == 0)
1286 continue;
1287 ret = rte_eth_dev_stop(portid);
1288 if (ret != 0)
1289 printf("rte_eth_dev_stop: err=%d, port=%u\n",
1290 ret, portid);
1291 }
1292
1293 rte_eal_mp_wait_lcore();
1294 RTE_ETH_FOREACH_DEV(portid) {
1295 if ((enabled_port_mask & (1 << portid)) == 0)
1296 continue;
1297 rte_eth_dev_close(portid);
1298 }
1299
1300 rte_event_dev_stop(evt_rsrc->event_d_id);
1301 rte_event_dev_close(evt_rsrc->event_d_id);
1302
1303 } else {
1304 rte_eal_mp_wait_lcore();
1305
1306 RTE_ETH_FOREACH_DEV(portid) {
1307 if ((enabled_port_mask & (1 << portid)) == 0)
1308 continue;
1309 printf("Closing port %d...", portid);
1310 ret = rte_eth_dev_stop(portid);
1311 if (ret != 0)
1312 printf("rte_eth_dev_stop: err=%d, port=%u\n",
1313 ret, portid);
1314 rte_eth_dev_close(portid);
1315 printf(" Done\n");
1316 }
1317 }
1318 printf("Bye...\n");
1319
1320 return ret;
1321 }
1322