1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2021 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 #include <sys/types.h>
10 #include <string.h>
11 #include <sys/queue.h>
12 #include <stdarg.h>
13 #include <errno.h>
14 #include <getopt.h>
15 #include <signal.h>
16 #include <stdbool.h>
17
18 #include <rte_common.h>
19 #include <rte_vect.h>
20 #include <rte_byteorder.h>
21 #include <rte_log.h>
22 #include <rte_malloc.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_eal.h>
26 #include <rte_launch.h>
27 #include <rte_cycles.h>
28 #include <rte_prefetch.h>
29 #include <rte_lcore.h>
30 #include <rte_per_lcore.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_interrupts.h>
33 #include <rte_random.h>
34 #include <rte_debug.h>
35 #include <rte_ether.h>
36 #include <rte_mempool.h>
37 #include <rte_mbuf.h>
38 #include <rte_ip.h>
39 #include <rte_tcp.h>
40 #include <rte_udp.h>
41 #include <rte_string_fns.h>
42 #include <rte_cpuflags.h>
43
44 #include <cmdline_parse.h>
45 #include <cmdline_parse_etheraddr.h>
46
47 #include "l3fwd.h"
48 #include "l3fwd_event.h"
49 #include "l3fwd_route.h"
50
51 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_LCORE
52 #define MAX_RX_QUEUE_PER_PORT 128
53
54 #define MAX_LCORE_PARAMS 1024
55
56 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
57 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
58
59 /**< Ports set in promiscuous mode off by default. */
60 static int promiscuous_on;
61
62 /* Select Longest-Prefix, Exact match or Forwarding Information Base. */
63 enum L3FWD_LOOKUP_MODE {
64 L3FWD_LOOKUP_DEFAULT,
65 L3FWD_LOOKUP_LPM,
66 L3FWD_LOOKUP_EM,
67 L3FWD_LOOKUP_FIB
68 };
69 static enum L3FWD_LOOKUP_MODE lookup_mode;
70
71 /* Global variables. */
72
73 static int numa_on = 1; /**< NUMA is enabled by default. */
74 static int parse_ptype; /**< Parse packet type using rx callback, and */
75 /**< disabled by default */
76 static int per_port_pool; /**< Use separate buffer pools per port; disabled */
77 /**< by default */
78
79 volatile bool force_quit;
80
81 /* ethernet addresses of ports */
82 uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
83 struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
84
85 xmm_t val_eth[RTE_MAX_ETHPORTS];
86
87 /* mask of enabled ports */
88 uint32_t enabled_port_mask;
89
90 /* Used only in exact match mode. */
91 int ipv6; /**< ipv6 is false by default. */
92 uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
93
94 struct lcore_conf lcore_conf[RTE_MAX_LCORE];
95
96 struct parm_cfg parm_config;
97
98 struct lcore_params {
99 uint16_t port_id;
100 uint8_t queue_id;
101 uint8_t lcore_id;
102 } __rte_cache_aligned;
103
104 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
105 static struct lcore_params lcore_params_array_default[] = {
106 {0, 0, 2},
107 {0, 1, 2},
108 {0, 2, 2},
109 {1, 0, 2},
110 {1, 1, 2},
111 {1, 2, 2},
112 {2, 0, 2},
113 {3, 0, 3},
114 {3, 1, 3},
115 };
116
117 static struct lcore_params * lcore_params = lcore_params_array_default;
118 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
119 sizeof(lcore_params_array_default[0]);
120
121 static struct rte_eth_conf port_conf = {
122 .rxmode = {
123 .mq_mode = RTE_ETH_MQ_RX_RSS,
124 .split_hdr_size = 0,
125 .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
126 },
127 .rx_adv_conf = {
128 .rss_conf = {
129 .rss_key = NULL,
130 .rss_hf = RTE_ETH_RSS_IP,
131 },
132 },
133 .txmode = {
134 .mq_mode = RTE_ETH_MQ_TX_NONE,
135 },
136 };
137
138 static uint32_t max_pkt_len;
139
140 static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
141 static struct rte_mempool *vector_pool[RTE_MAX_ETHPORTS];
142 static uint8_t lkp_per_socket[NB_SOCKETS];
143
144 struct l3fwd_lkp_mode {
145 void (*read_config_files)(void);
146 void (*setup)(int);
147 int (*check_ptype)(int);
148 rte_rx_callback_fn cb_parse_ptype;
149 int (*main_loop)(void *);
150 void* (*get_ipv4_lookup_struct)(int);
151 void* (*get_ipv6_lookup_struct)(int);
152 void (*free_routes)(void);
153 };
154
155 static struct l3fwd_lkp_mode l3fwd_lkp;
156
157 static struct l3fwd_lkp_mode l3fwd_em_lkp = {
158 .read_config_files = read_config_files_em,
159 .setup = setup_hash,
160 .check_ptype = em_check_ptype,
161 .cb_parse_ptype = em_cb_parse_ptype,
162 .main_loop = em_main_loop,
163 .get_ipv4_lookup_struct = em_get_ipv4_l3fwd_lookup_struct,
164 .get_ipv6_lookup_struct = em_get_ipv6_l3fwd_lookup_struct,
165 .free_routes = em_free_routes,
166 };
167
168 static struct l3fwd_lkp_mode l3fwd_lpm_lkp = {
169 .read_config_files = read_config_files_lpm,
170 .setup = setup_lpm,
171 .check_ptype = lpm_check_ptype,
172 .cb_parse_ptype = lpm_cb_parse_ptype,
173 .main_loop = lpm_main_loop,
174 .get_ipv4_lookup_struct = lpm_get_ipv4_l3fwd_lookup_struct,
175 .get_ipv6_lookup_struct = lpm_get_ipv6_l3fwd_lookup_struct,
176 .free_routes = lpm_free_routes,
177 };
178
179 static struct l3fwd_lkp_mode l3fwd_fib_lkp = {
180 .read_config_files = read_config_files_lpm,
181 .setup = setup_fib,
182 .check_ptype = lpm_check_ptype,
183 .cb_parse_ptype = lpm_cb_parse_ptype,
184 .main_loop = fib_main_loop,
185 .get_ipv4_lookup_struct = fib_get_ipv4_l3fwd_lookup_struct,
186 .get_ipv6_lookup_struct = fib_get_ipv6_l3fwd_lookup_struct,
187 .free_routes = lpm_free_routes,
188 };
189
190 /*
191 * 198.18.0.0/16 are set aside for RFC2544 benchmarking (RFC5735).
192 * 198.18.{0-15}.0/24 = Port {0-15}
193 */
194 const struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
195 {RTE_IPV4(198, 18, 0, 0), 24, 0},
196 {RTE_IPV4(198, 18, 1, 0), 24, 1},
197 {RTE_IPV4(198, 18, 2, 0), 24, 2},
198 {RTE_IPV4(198, 18, 3, 0), 24, 3},
199 {RTE_IPV4(198, 18, 4, 0), 24, 4},
200 {RTE_IPV4(198, 18, 5, 0), 24, 5},
201 {RTE_IPV4(198, 18, 6, 0), 24, 6},
202 {RTE_IPV4(198, 18, 7, 0), 24, 7},
203 {RTE_IPV4(198, 18, 8, 0), 24, 8},
204 {RTE_IPV4(198, 18, 9, 0), 24, 9},
205 {RTE_IPV4(198, 18, 10, 0), 24, 10},
206 {RTE_IPV4(198, 18, 11, 0), 24, 11},
207 {RTE_IPV4(198, 18, 12, 0), 24, 12},
208 {RTE_IPV4(198, 18, 13, 0), 24, 13},
209 {RTE_IPV4(198, 18, 14, 0), 24, 14},
210 {RTE_IPV4(198, 18, 15, 0), 24, 15},
211 };
212
213 /*
214 * 2001:200::/48 is IANA reserved range for IPv6 benchmarking (RFC5180).
215 * 2001:200:0:{0-f}::/64 = Port {0-15}
216 */
217 const struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
218 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 0},
219 {{32, 1, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 1},
220 {{32, 1, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 2},
221 {{32, 1, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 3},
222 {{32, 1, 2, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 4},
223 {{32, 1, 2, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 5},
224 {{32, 1, 2, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 6},
225 {{32, 1, 2, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 7},
226 {{32, 1, 2, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 8},
227 {{32, 1, 2, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 9},
228 {{32, 1, 2, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 10},
229 {{32, 1, 2, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 11},
230 {{32, 1, 2, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 12},
231 {{32, 1, 2, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 13},
232 {{32, 1, 2, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 14},
233 {{32, 1, 2, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 15},
234 };
235
236 /*
237 * API's called during initialization to setup ACL/EM/LPM rules.
238 */
239 static void
l3fwd_set_rule_ipv4_name(const char * optarg)240 l3fwd_set_rule_ipv4_name(const char *optarg)
241 {
242 parm_config.rule_ipv4_name = optarg;
243 }
244
245 static void
l3fwd_set_rule_ipv6_name(const char * optarg)246 l3fwd_set_rule_ipv6_name(const char *optarg)
247 {
248 parm_config.rule_ipv6_name = optarg;
249 }
250
251 /*
252 * Setup lookup methods for forwarding.
253 * Currently exact-match, longest-prefix-match and forwarding information
254 * base are the supported ones.
255 */
256 static void
setup_l3fwd_lookup_tables(void)257 setup_l3fwd_lookup_tables(void)
258 {
259 /* Setup HASH lookup functions. */
260 if (lookup_mode == L3FWD_LOOKUP_EM)
261 l3fwd_lkp = l3fwd_em_lkp;
262 /* Setup FIB lookup functions. */
263 else if (lookup_mode == L3FWD_LOOKUP_FIB)
264 l3fwd_lkp = l3fwd_fib_lkp;
265 /* Setup LPM lookup functions. */
266 else
267 l3fwd_lkp = l3fwd_lpm_lkp;
268 }
269
270 static int
check_lcore_params(void)271 check_lcore_params(void)
272 {
273 uint8_t queue, lcore;
274 uint16_t i;
275 int socketid;
276
277 for (i = 0; i < nb_lcore_params; ++i) {
278 queue = lcore_params[i].queue_id;
279 if (queue >= MAX_RX_QUEUE_PER_PORT) {
280 printf("invalid queue number: %hhu\n", queue);
281 return -1;
282 }
283 lcore = lcore_params[i].lcore_id;
284 if (!rte_lcore_is_enabled(lcore)) {
285 printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
286 return -1;
287 }
288 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
289 (numa_on == 0)) {
290 printf("warning: lcore %hhu is on socket %d with numa off \n",
291 lcore, socketid);
292 }
293 }
294 return 0;
295 }
296
297 static int
check_port_config(void)298 check_port_config(void)
299 {
300 uint16_t portid;
301 uint16_t i;
302
303 for (i = 0; i < nb_lcore_params; ++i) {
304 portid = lcore_params[i].port_id;
305 if ((enabled_port_mask & (1 << portid)) == 0) {
306 printf("port %u is not enabled in port mask\n", portid);
307 return -1;
308 }
309 if (!rte_eth_dev_is_valid_port(portid)) {
310 printf("port %u is not present on the board\n", portid);
311 return -1;
312 }
313 }
314 return 0;
315 }
316
317 static uint8_t
get_port_n_rx_queues(const uint16_t port)318 get_port_n_rx_queues(const uint16_t port)
319 {
320 int queue = -1;
321 uint16_t i;
322
323 for (i = 0; i < nb_lcore_params; ++i) {
324 if (lcore_params[i].port_id == port) {
325 if (lcore_params[i].queue_id == queue+1)
326 queue = lcore_params[i].queue_id;
327 else
328 rte_exit(EXIT_FAILURE, "queue ids of the port %d must be"
329 " in sequence and must start with 0\n",
330 lcore_params[i].port_id);
331 }
332 }
333 return (uint8_t)(++queue);
334 }
335
336 static int
init_lcore_rx_queues(void)337 init_lcore_rx_queues(void)
338 {
339 uint16_t i, nb_rx_queue;
340 uint8_t lcore;
341
342 for (i = 0; i < nb_lcore_params; ++i) {
343 lcore = lcore_params[i].lcore_id;
344 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
345 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
346 printf("error: too many queues (%u) for lcore: %u\n",
347 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
348 return -1;
349 } else {
350 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
351 lcore_params[i].port_id;
352 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
353 lcore_params[i].queue_id;
354 lcore_conf[lcore].n_rx_queue++;
355 }
356 }
357 return 0;
358 }
359
360 /* display usage */
361 static void
print_usage(const char * prgname)362 print_usage(const char *prgname)
363 {
364 fprintf(stderr, "%s [EAL options] --"
365 " -p PORTMASK"
366 " --rule_ipv4=FILE"
367 " --rule_ipv6=FILE"
368 " [-P]"
369 " [--lookup]"
370 " --config (port,queue,lcore)[,(port,queue,lcore)]"
371 " [--rx-queue-size NPKTS]"
372 " [--tx-queue-size NPKTS]"
373 " [--eth-dest=X,MM:MM:MM:MM:MM:MM]"
374 " [--max-pkt-len PKTLEN]"
375 " [--no-numa]"
376 " [--hash-entry-num]"
377 " [--ipv6]"
378 " [--parse-ptype]"
379 " [--per-port-pool]"
380 " [--mode]"
381 " [--eventq-sched]"
382 " [--event-vector [--event-vector-size SIZE] [--event-vector-tmo NS]]"
383 " [-E]"
384 " [-L]\n\n"
385
386 " -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
387 " -P : Enable promiscuous mode\n"
388 " --lookup: Select the lookup method\n"
389 " Default: lpm\n"
390 " Accepted: em (Exact Match), lpm (Longest Prefix Match), fib (Forwarding Information Base)\n"
391 " --config (port,queue,lcore): Rx queue configuration\n"
392 " --rx-queue-size NPKTS: Rx queue size in decimal\n"
393 " Default: %d\n"
394 " --tx-queue-size NPKTS: Tx queue size in decimal\n"
395 " Default: %d\n"
396 " --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for port X\n"
397 " --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n"
398 " --no-numa: Disable numa awareness\n"
399 " --hash-entry-num: Specify the hash entry number in hexadecimal to be setup\n"
400 " --ipv6: Set if running ipv6 packets\n"
401 " --parse-ptype: Set to use software to analyze packet type\n"
402 " --per-port-pool: Use separate buffer pool per port\n"
403 " --mode: Packet transfer mode for I/O, poll or eventdev\n"
404 " Default mode = poll\n"
405 " --eventq-sched: Event queue synchronization method\n"
406 " ordered, atomic or parallel.\n"
407 " Default: atomic\n"
408 " Valid only if --mode=eventdev\n"
409 " --event-eth-rxqs: Number of ethernet RX queues per device.\n"
410 " Default: 1\n"
411 " Valid only if --mode=eventdev\n"
412 " --event-vector: Enable event vectorization.\n"
413 " --event-vector-size: Max vector size if event vectorization is enabled.\n"
414 " --event-vector-tmo: Max timeout to form vector in nanoseconds if event vectorization is enabled\n"
415 " -E : Enable exact match, legacy flag please use --lookup=em instead\n"
416 " -L : Enable longest prefix match, legacy flag please use --lookup=lpm instead\n"
417 " --rule_ipv4=FILE: Specify the ipv4 rules entries file.\n"
418 " Each rule occupies one line.\n"
419 " --rule_ipv6=FILE: Specify the ipv6 rules entries file.\n\n",
420 prgname, RTE_TEST_RX_DESC_DEFAULT, RTE_TEST_TX_DESC_DEFAULT);
421 }
422
423 static int
parse_max_pkt_len(const char * pktlen)424 parse_max_pkt_len(const char *pktlen)
425 {
426 char *end = NULL;
427 unsigned long len;
428
429 /* parse decimal string */
430 len = strtoul(pktlen, &end, 10);
431 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
432 return -1;
433
434 if (len == 0)
435 return -1;
436
437 return len;
438 }
439
440 static int
parse_portmask(const char * portmask)441 parse_portmask(const char *portmask)
442 {
443 char *end = NULL;
444 unsigned long pm;
445
446 /* parse hexadecimal string */
447 pm = strtoul(portmask, &end, 16);
448 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
449 return 0;
450
451 return pm;
452 }
453
454 static int
parse_hash_entry_number(const char * hash_entry_num)455 parse_hash_entry_number(const char *hash_entry_num)
456 {
457 char *end = NULL;
458 unsigned long hash_en;
459 /* parse hexadecimal string */
460 hash_en = strtoul(hash_entry_num, &end, 16);
461 if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0'))
462 return -1;
463
464 if (hash_en == 0)
465 return -1;
466
467 return hash_en;
468 }
469
470 static int
parse_config(const char * q_arg)471 parse_config(const char *q_arg)
472 {
473 char s[256];
474 const char *p, *p0 = q_arg;
475 char *end;
476 enum fieldnames {
477 FLD_PORT = 0,
478 FLD_QUEUE,
479 FLD_LCORE,
480 _NUM_FLD
481 };
482 unsigned long int_fld[_NUM_FLD];
483 char *str_fld[_NUM_FLD];
484 int i;
485 unsigned size;
486
487 nb_lcore_params = 0;
488
489 while ((p = strchr(p0,'(')) != NULL) {
490 ++p;
491 if((p0 = strchr(p,')')) == NULL)
492 return -1;
493
494 size = p0 - p;
495 if(size >= sizeof(s))
496 return -1;
497
498 snprintf(s, sizeof(s), "%.*s", size, p);
499 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
500 return -1;
501 for (i = 0; i < _NUM_FLD; i++){
502 errno = 0;
503 int_fld[i] = strtoul(str_fld[i], &end, 0);
504 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
505 return -1;
506 }
507 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
508 printf("exceeded max number of lcore params: %hu\n",
509 nb_lcore_params);
510 return -1;
511 }
512 lcore_params_array[nb_lcore_params].port_id =
513 (uint8_t)int_fld[FLD_PORT];
514 lcore_params_array[nb_lcore_params].queue_id =
515 (uint8_t)int_fld[FLD_QUEUE];
516 lcore_params_array[nb_lcore_params].lcore_id =
517 (uint8_t)int_fld[FLD_LCORE];
518 ++nb_lcore_params;
519 }
520 lcore_params = lcore_params_array;
521 return 0;
522 }
523
524 static void
parse_eth_dest(const char * optarg)525 parse_eth_dest(const char *optarg)
526 {
527 uint16_t portid;
528 char *port_end;
529 uint8_t c, *dest, peer_addr[6];
530
531 errno = 0;
532 portid = strtoul(optarg, &port_end, 10);
533 if (errno != 0 || port_end == optarg || *port_end++ != ',')
534 rte_exit(EXIT_FAILURE,
535 "Invalid eth-dest: %s", optarg);
536 if (portid >= RTE_MAX_ETHPORTS)
537 rte_exit(EXIT_FAILURE,
538 "eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n",
539 portid, RTE_MAX_ETHPORTS);
540
541 if (cmdline_parse_etheraddr(NULL, port_end,
542 &peer_addr, sizeof(peer_addr)) < 0)
543 rte_exit(EXIT_FAILURE,
544 "Invalid ethernet address: %s\n",
545 port_end);
546 dest = (uint8_t *)&dest_eth_addr[portid];
547 for (c = 0; c < 6; c++)
548 dest[c] = peer_addr[c];
549 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
550 }
551
552 static void
parse_mode(const char * optarg)553 parse_mode(const char *optarg)
554 {
555 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
556
557 if (!strcmp(optarg, "poll"))
558 evt_rsrc->enabled = false;
559 else if (!strcmp(optarg, "eventdev"))
560 evt_rsrc->enabled = true;
561 }
562
563 static void
parse_queue_size(const char * queue_size_arg,uint16_t * queue_size,int rx)564 parse_queue_size(const char *queue_size_arg, uint16_t *queue_size, int rx)
565 {
566 char *end = NULL;
567 unsigned long value;
568
569 /* parse decimal string */
570 value = strtoul(queue_size_arg, &end, 10);
571 if ((queue_size_arg[0] == '\0') || (end == NULL) ||
572 (*end != '\0') || (value == 0)) {
573 if (rx == 1)
574 rte_exit(EXIT_FAILURE, "Invalid rx-queue-size\n");
575 else
576 rte_exit(EXIT_FAILURE, "Invalid tx-queue-size\n");
577
578 return;
579 }
580
581 if (value > UINT16_MAX) {
582 if (rx == 1)
583 rte_exit(EXIT_FAILURE, "rx-queue-size %lu > %d\n",
584 value, UINT16_MAX);
585 else
586 rte_exit(EXIT_FAILURE, "tx-queue-size %lu > %d\n",
587 value, UINT16_MAX);
588
589 return;
590 }
591
592 *queue_size = value;
593 }
594
595 static void
parse_eventq_sched(const char * optarg)596 parse_eventq_sched(const char *optarg)
597 {
598 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
599
600 if (!strcmp(optarg, "ordered"))
601 evt_rsrc->sched_type = RTE_SCHED_TYPE_ORDERED;
602 if (!strcmp(optarg, "atomic"))
603 evt_rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
604 if (!strcmp(optarg, "parallel"))
605 evt_rsrc->sched_type = RTE_SCHED_TYPE_PARALLEL;
606 }
607
608 static void
parse_event_eth_rx_queues(const char * eth_rx_queues)609 parse_event_eth_rx_queues(const char *eth_rx_queues)
610 {
611 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
612 char *end = NULL;
613 uint8_t num_eth_rx_queues;
614
615 /* parse decimal string */
616 num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10);
617 if ((eth_rx_queues[0] == '\0') || (end == NULL) || (*end != '\0'))
618 return;
619
620 if (num_eth_rx_queues == 0)
621 return;
622
623 evt_rsrc->eth_rx_queues = num_eth_rx_queues;
624 }
625
626 static int
parse_lookup(const char * optarg)627 parse_lookup(const char *optarg)
628 {
629 if (!strcmp(optarg, "em"))
630 lookup_mode = L3FWD_LOOKUP_EM;
631 else if (!strcmp(optarg, "lpm"))
632 lookup_mode = L3FWD_LOOKUP_LPM;
633 else if (!strcmp(optarg, "fib"))
634 lookup_mode = L3FWD_LOOKUP_FIB;
635 else {
636 fprintf(stderr, "Invalid lookup option! Accepted options: em, lpm, fib\n");
637 return -1;
638 }
639 return 0;
640 }
641
642 #define MAX_JUMBO_PKT_LEN 9600
643
644 static const char short_options[] =
645 "p:" /* portmask */
646 "P" /* promiscuous */
647 "L" /* legacy enable long prefix match */
648 "E" /* legacy enable exact match */
649 ;
650
651 #define CMD_LINE_OPT_CONFIG "config"
652 #define CMD_LINE_OPT_RX_QUEUE_SIZE "rx-queue-size"
653 #define CMD_LINE_OPT_TX_QUEUE_SIZE "tx-queue-size"
654 #define CMD_LINE_OPT_ETH_DEST "eth-dest"
655 #define CMD_LINE_OPT_NO_NUMA "no-numa"
656 #define CMD_LINE_OPT_IPV6 "ipv6"
657 #define CMD_LINE_OPT_MAX_PKT_LEN "max-pkt-len"
658 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
659 #define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
660 #define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
661 #define CMD_LINE_OPT_MODE "mode"
662 #define CMD_LINE_OPT_EVENTQ_SYNC "eventq-sched"
663 #define CMD_LINE_OPT_EVENT_ETH_RX_QUEUES "event-eth-rxqs"
664 #define CMD_LINE_OPT_LOOKUP "lookup"
665 #define CMD_LINE_OPT_ENABLE_VECTOR "event-vector"
666 #define CMD_LINE_OPT_VECTOR_SIZE "event-vector-size"
667 #define CMD_LINE_OPT_VECTOR_TMO_NS "event-vector-tmo"
668 #define CMD_LINE_OPT_RULE_IPV4 "rule_ipv4"
669 #define CMD_LINE_OPT_RULE_IPV6 "rule_ipv6"
670
671 enum {
672 /* long options mapped to a short option */
673
674 /* first long only option value must be >= 256, so that we won't
675 * conflict with short options */
676 CMD_LINE_OPT_MIN_NUM = 256,
677 CMD_LINE_OPT_CONFIG_NUM,
678 CMD_LINE_OPT_RX_QUEUE_SIZE_NUM,
679 CMD_LINE_OPT_TX_QUEUE_SIZE_NUM,
680 CMD_LINE_OPT_ETH_DEST_NUM,
681 CMD_LINE_OPT_NO_NUMA_NUM,
682 CMD_LINE_OPT_IPV6_NUM,
683 CMD_LINE_OPT_MAX_PKT_LEN_NUM,
684 CMD_LINE_OPT_HASH_ENTRY_NUM_NUM,
685 CMD_LINE_OPT_PARSE_PTYPE_NUM,
686 CMD_LINE_OPT_RULE_IPV4_NUM,
687 CMD_LINE_OPT_RULE_IPV6_NUM,
688 CMD_LINE_OPT_PARSE_PER_PORT_POOL,
689 CMD_LINE_OPT_MODE_NUM,
690 CMD_LINE_OPT_EVENTQ_SYNC_NUM,
691 CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM,
692 CMD_LINE_OPT_LOOKUP_NUM,
693 CMD_LINE_OPT_ENABLE_VECTOR_NUM,
694 CMD_LINE_OPT_VECTOR_SIZE_NUM,
695 CMD_LINE_OPT_VECTOR_TMO_NS_NUM
696 };
697
698 static const struct option lgopts[] = {
699 {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
700 {CMD_LINE_OPT_RX_QUEUE_SIZE, 1, 0, CMD_LINE_OPT_RX_QUEUE_SIZE_NUM},
701 {CMD_LINE_OPT_TX_QUEUE_SIZE, 1, 0, CMD_LINE_OPT_TX_QUEUE_SIZE_NUM},
702 {CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
703 {CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
704 {CMD_LINE_OPT_IPV6, 0, 0, CMD_LINE_OPT_IPV6_NUM},
705 {CMD_LINE_OPT_MAX_PKT_LEN, 1, 0, CMD_LINE_OPT_MAX_PKT_LEN_NUM},
706 {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM},
707 {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM},
708 {CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PARSE_PER_PORT_POOL},
709 {CMD_LINE_OPT_MODE, 1, 0, CMD_LINE_OPT_MODE_NUM},
710 {CMD_LINE_OPT_EVENTQ_SYNC, 1, 0, CMD_LINE_OPT_EVENTQ_SYNC_NUM},
711 {CMD_LINE_OPT_EVENT_ETH_RX_QUEUES, 1, 0,
712 CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM},
713 {CMD_LINE_OPT_LOOKUP, 1, 0, CMD_LINE_OPT_LOOKUP_NUM},
714 {CMD_LINE_OPT_ENABLE_VECTOR, 0, 0, CMD_LINE_OPT_ENABLE_VECTOR_NUM},
715 {CMD_LINE_OPT_VECTOR_SIZE, 1, 0, CMD_LINE_OPT_VECTOR_SIZE_NUM},
716 {CMD_LINE_OPT_VECTOR_TMO_NS, 1, 0, CMD_LINE_OPT_VECTOR_TMO_NS_NUM},
717 {CMD_LINE_OPT_RULE_IPV4, 1, 0, CMD_LINE_OPT_RULE_IPV4_NUM},
718 {CMD_LINE_OPT_RULE_IPV6, 1, 0, CMD_LINE_OPT_RULE_IPV6_NUM},
719 {NULL, 0, 0, 0}
720 };
721
722 /*
723 * This expression is used to calculate the number of mbufs needed
724 * depending on user input, taking into account memory for rx and
725 * tx hardware rings, cache per lcore and mtable per port per lcore.
726 * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum
727 * value of 8192
728 */
729 #define NB_MBUF(nports) RTE_MAX( \
730 (nports*nb_rx_queue*nb_rxd + \
731 nports*nb_lcores*MAX_PKT_BURST + \
732 nports*n_tx_queue*nb_txd + \
733 nb_lcores*MEMPOOL_CACHE_SIZE), \
734 (unsigned)8192)
735
736 /* Parse the argument given in the command line of the application */
737 static int
parse_args(int argc,char ** argv)738 parse_args(int argc, char **argv)
739 {
740 int opt, ret;
741 char **argvopt;
742 int option_index;
743 char *prgname = argv[0];
744 uint8_t lcore_params = 0;
745 uint8_t eventq_sched = 0;
746 uint8_t eth_rx_q = 0;
747 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
748
749 argvopt = argv;
750
751 /* Error or normal output strings. */
752 while ((opt = getopt_long(argc, argvopt, short_options,
753 lgopts, &option_index)) != EOF) {
754
755 switch (opt) {
756 /* portmask */
757 case 'p':
758 enabled_port_mask = parse_portmask(optarg);
759 if (enabled_port_mask == 0) {
760 fprintf(stderr, "Invalid portmask\n");
761 print_usage(prgname);
762 return -1;
763 }
764 break;
765
766 case 'P':
767 promiscuous_on = 1;
768 break;
769
770 case 'E':
771 if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
772 fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
773 return -1;
774 }
775 lookup_mode = L3FWD_LOOKUP_EM;
776 break;
777
778 case 'L':
779 if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
780 fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
781 return -1;
782 }
783 lookup_mode = L3FWD_LOOKUP_LPM;
784 break;
785
786 /* long options */
787 case CMD_LINE_OPT_CONFIG_NUM:
788 ret = parse_config(optarg);
789 if (ret) {
790 fprintf(stderr, "Invalid config\n");
791 print_usage(prgname);
792 return -1;
793 }
794 lcore_params = 1;
795 break;
796
797 case CMD_LINE_OPT_RX_QUEUE_SIZE_NUM:
798 parse_queue_size(optarg, &nb_rxd, 1);
799 break;
800
801 case CMD_LINE_OPT_TX_QUEUE_SIZE_NUM:
802 parse_queue_size(optarg, &nb_txd, 0);
803 break;
804
805 case CMD_LINE_OPT_ETH_DEST_NUM:
806 parse_eth_dest(optarg);
807 break;
808
809 case CMD_LINE_OPT_NO_NUMA_NUM:
810 numa_on = 0;
811 break;
812
813 case CMD_LINE_OPT_IPV6_NUM:
814 ipv6 = 1;
815 break;
816
817 case CMD_LINE_OPT_MAX_PKT_LEN_NUM:
818 max_pkt_len = parse_max_pkt_len(optarg);
819 break;
820
821 case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM:
822 ret = parse_hash_entry_number(optarg);
823 if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) {
824 hash_entry_number = ret;
825 } else {
826 fprintf(stderr, "invalid hash entry number\n");
827 print_usage(prgname);
828 return -1;
829 }
830 break;
831
832 case CMD_LINE_OPT_PARSE_PTYPE_NUM:
833 printf("soft parse-ptype is enabled\n");
834 parse_ptype = 1;
835 break;
836
837 case CMD_LINE_OPT_PARSE_PER_PORT_POOL:
838 printf("per port buffer pool is enabled\n");
839 per_port_pool = 1;
840 break;
841
842 case CMD_LINE_OPT_MODE_NUM:
843 parse_mode(optarg);
844 break;
845
846 case CMD_LINE_OPT_EVENTQ_SYNC_NUM:
847 parse_eventq_sched(optarg);
848 eventq_sched = 1;
849 break;
850
851 case CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM:
852 parse_event_eth_rx_queues(optarg);
853 eth_rx_q = 1;
854 break;
855
856 case CMD_LINE_OPT_LOOKUP_NUM:
857 if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
858 fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
859 return -1;
860 }
861 ret = parse_lookup(optarg);
862 /*
863 * If parse_lookup was passed an invalid lookup type
864 * then return -1. Error log included within
865 * parse_lookup for simplicity.
866 */
867 if (ret)
868 return -1;
869 break;
870
871 case CMD_LINE_OPT_ENABLE_VECTOR_NUM:
872 printf("event vectorization is enabled\n");
873 evt_rsrc->vector_enabled = 1;
874 break;
875 case CMD_LINE_OPT_VECTOR_SIZE_NUM:
876 evt_rsrc->vector_size = strtol(optarg, NULL, 10);
877 break;
878 case CMD_LINE_OPT_VECTOR_TMO_NS_NUM:
879 evt_rsrc->vector_tmo_ns = strtoull(optarg, NULL, 10);
880 break;
881 case CMD_LINE_OPT_RULE_IPV4_NUM:
882 l3fwd_set_rule_ipv4_name(optarg);
883 break;
884 case CMD_LINE_OPT_RULE_IPV6_NUM:
885 l3fwd_set_rule_ipv6_name(optarg);
886 break;
887 default:
888 print_usage(prgname);
889 return -1;
890 }
891 }
892
893 if (evt_rsrc->enabled && lcore_params) {
894 fprintf(stderr, "lcore config is not valid when event mode is selected\n");
895 return -1;
896 }
897
898 if (!evt_rsrc->enabled && eth_rx_q) {
899 fprintf(stderr, "eth_rx_queues is valid only when event mode is selected\n");
900 return -1;
901 }
902
903 if (!evt_rsrc->enabled && eventq_sched) {
904 fprintf(stderr, "eventq_sched is valid only when event mode is selected\n");
905 return -1;
906 }
907
908 if (evt_rsrc->vector_enabled && !evt_rsrc->vector_size) {
909 evt_rsrc->vector_size = VECTOR_SIZE_DEFAULT;
910 fprintf(stderr, "vector size set to default (%" PRIu16 ")\n",
911 evt_rsrc->vector_size);
912 }
913
914 if (evt_rsrc->vector_enabled && !evt_rsrc->vector_tmo_ns) {
915 evt_rsrc->vector_tmo_ns = VECTOR_TMO_NS_DEFAULT;
916 fprintf(stderr,
917 "vector timeout set to default (%" PRIu64 " ns)\n",
918 evt_rsrc->vector_tmo_ns);
919 }
920
921 /*
922 * Nothing is selected, pick longest-prefix match
923 * as default match.
924 */
925 if (lookup_mode == L3FWD_LOOKUP_DEFAULT) {
926 fprintf(stderr, "Neither LPM, EM, or FIB selected, defaulting to LPM\n");
927 lookup_mode = L3FWD_LOOKUP_LPM;
928 }
929
930 /*
931 * ipv6 and hash flags are valid only for
932 * exact match, reset them to default for
933 * longest-prefix match.
934 */
935 if (lookup_mode == L3FWD_LOOKUP_LPM) {
936 ipv6 = 0;
937 hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
938 }
939
940 if (optind >= 0)
941 argv[optind-1] = prgname;
942
943 ret = optind-1;
944 optind = 1; /* reset getopt lib */
945 return ret;
946 }
947
948 static void
print_ethaddr(const char * name,const struct rte_ether_addr * eth_addr)949 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
950 {
951 char buf[RTE_ETHER_ADDR_FMT_SIZE];
952 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
953 printf("%s%s", name, buf);
954 }
955
956 int
init_mem(uint16_t portid,unsigned int nb_mbuf)957 init_mem(uint16_t portid, unsigned int nb_mbuf)
958 {
959 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
960 struct lcore_conf *qconf;
961 int socketid;
962 unsigned lcore_id;
963 char s[64];
964
965 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
966 if (rte_lcore_is_enabled(lcore_id) == 0)
967 continue;
968
969 if (numa_on)
970 socketid = rte_lcore_to_socket_id(lcore_id);
971 else
972 socketid = 0;
973
974 if (socketid >= NB_SOCKETS) {
975 rte_exit(EXIT_FAILURE,
976 "Socket %d of lcore %u is out of range %d\n",
977 socketid, lcore_id, NB_SOCKETS);
978 }
979
980 if (pktmbuf_pool[portid][socketid] == NULL) {
981 snprintf(s, sizeof(s), "mbuf_pool_%d:%d",
982 portid, socketid);
983 pktmbuf_pool[portid][socketid] =
984 rte_pktmbuf_pool_create(s, nb_mbuf,
985 MEMPOOL_CACHE_SIZE, 0,
986 RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
987 if (pktmbuf_pool[portid][socketid] == NULL)
988 rte_exit(EXIT_FAILURE,
989 "Cannot init mbuf pool on socket %d\n",
990 socketid);
991 else
992 printf("Allocated mbuf pool on socket %d\n",
993 socketid);
994
995 /* Setup LPM, EM(f.e Hash) or FIB. But, only once per
996 * available socket.
997 */
998 if (!lkp_per_socket[socketid]) {
999 l3fwd_lkp.setup(socketid);
1000 lkp_per_socket[socketid] = 1;
1001 }
1002 }
1003
1004 if (evt_rsrc->vector_enabled && vector_pool[portid] == NULL) {
1005 unsigned int nb_vec;
1006
1007 nb_vec = (nb_mbuf + evt_rsrc->vector_size - 1) /
1008 evt_rsrc->vector_size;
1009 snprintf(s, sizeof(s), "vector_pool_%d", portid);
1010 vector_pool[portid] = rte_event_vector_pool_create(
1011 s, nb_vec, 0, evt_rsrc->vector_size, socketid);
1012 if (vector_pool[portid] == NULL)
1013 rte_exit(EXIT_FAILURE,
1014 "Failed to create vector pool for port %d\n",
1015 portid);
1016 else
1017 printf("Allocated vector pool for port %d\n",
1018 portid);
1019 }
1020
1021 qconf = &lcore_conf[lcore_id];
1022 qconf->ipv4_lookup_struct =
1023 l3fwd_lkp.get_ipv4_lookup_struct(socketid);
1024 qconf->ipv6_lookup_struct =
1025 l3fwd_lkp.get_ipv6_lookup_struct(socketid);
1026 }
1027 return 0;
1028 }
1029
1030 /* Check the link status of all ports in up to 9s, and print them finally */
1031 static void
check_all_ports_link_status(uint32_t port_mask)1032 check_all_ports_link_status(uint32_t port_mask)
1033 {
1034 #define CHECK_INTERVAL 100 /* 100ms */
1035 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1036 uint16_t portid;
1037 uint8_t count, all_ports_up, print_flag = 0;
1038 struct rte_eth_link link;
1039 int ret;
1040 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
1041
1042 printf("\nChecking link status");
1043 fflush(stdout);
1044 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1045 if (force_quit)
1046 return;
1047 all_ports_up = 1;
1048 RTE_ETH_FOREACH_DEV(portid) {
1049 if (force_quit)
1050 return;
1051 if ((port_mask & (1 << portid)) == 0)
1052 continue;
1053 memset(&link, 0, sizeof(link));
1054 ret = rte_eth_link_get_nowait(portid, &link);
1055 if (ret < 0) {
1056 all_ports_up = 0;
1057 if (print_flag == 1)
1058 printf("Port %u link get failed: %s\n",
1059 portid, rte_strerror(-ret));
1060 continue;
1061 }
1062 /* print link status if flag set */
1063 if (print_flag == 1) {
1064 rte_eth_link_to_str(link_status_text,
1065 sizeof(link_status_text), &link);
1066 printf("Port %d %s\n", portid,
1067 link_status_text);
1068 continue;
1069 }
1070 /* clear all_ports_up flag if any link down */
1071 if (link.link_status == RTE_ETH_LINK_DOWN) {
1072 all_ports_up = 0;
1073 break;
1074 }
1075 }
1076 /* after finally printing all link status, get out */
1077 if (print_flag == 1)
1078 break;
1079
1080 if (all_ports_up == 0) {
1081 printf(".");
1082 fflush(stdout);
1083 rte_delay_ms(CHECK_INTERVAL);
1084 }
1085
1086 /* set the print_flag if all ports up or timeout */
1087 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1088 print_flag = 1;
1089 printf("done\n");
1090 }
1091 }
1092 }
1093
1094 static void
signal_handler(int signum)1095 signal_handler(int signum)
1096 {
1097 if (signum == SIGINT || signum == SIGTERM) {
1098 printf("\n\nSignal %d received, preparing to exit...\n",
1099 signum);
1100 force_quit = true;
1101 }
1102 }
1103
1104 static int
prepare_ptype_parser(uint16_t portid,uint16_t queueid)1105 prepare_ptype_parser(uint16_t portid, uint16_t queueid)
1106 {
1107 if (parse_ptype) {
1108 printf("Port %d: softly parse packet type info\n", portid);
1109 if (rte_eth_add_rx_callback(portid, queueid,
1110 l3fwd_lkp.cb_parse_ptype,
1111 NULL))
1112 return 1;
1113
1114 printf("Failed to add rx callback: port=%d\n", portid);
1115 return 0;
1116 }
1117
1118 if (l3fwd_lkp.check_ptype(portid))
1119 return 1;
1120
1121 printf("port %d cannot parse packet type, please add --%s\n",
1122 portid, CMD_LINE_OPT_PARSE_PTYPE);
1123 return 0;
1124 }
1125
1126 static uint32_t
eth_dev_get_overhead_len(uint32_t max_rx_pktlen,uint16_t max_mtu)1127 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1128 {
1129 uint32_t overhead_len;
1130
1131 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1132 overhead_len = max_rx_pktlen - max_mtu;
1133 else
1134 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1135
1136 return overhead_len;
1137 }
1138
1139 static int
config_port_max_pkt_len(struct rte_eth_conf * conf,struct rte_eth_dev_info * dev_info)1140 config_port_max_pkt_len(struct rte_eth_conf *conf,
1141 struct rte_eth_dev_info *dev_info)
1142 {
1143 uint32_t overhead_len;
1144
1145 if (max_pkt_len == 0)
1146 return 0;
1147
1148 if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN)
1149 return -1;
1150
1151 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
1152 dev_info->max_mtu);
1153 conf->rxmode.mtu = max_pkt_len - overhead_len;
1154
1155 if (conf->rxmode.mtu > RTE_ETHER_MTU)
1156 conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1157
1158 return 0;
1159 }
1160
1161 static void
l3fwd_poll_resource_setup(void)1162 l3fwd_poll_resource_setup(void)
1163 {
1164 uint8_t nb_rx_queue, queue, socketid;
1165 struct rte_eth_dev_info dev_info;
1166 uint32_t n_tx_queue, nb_lcores;
1167 struct rte_eth_txconf *txconf;
1168 struct lcore_conf *qconf;
1169 uint16_t queueid, portid;
1170 unsigned int nb_ports;
1171 unsigned int lcore_id;
1172 int ret;
1173
1174 if (check_lcore_params() < 0)
1175 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
1176
1177 ret = init_lcore_rx_queues();
1178 if (ret < 0)
1179 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1180
1181 nb_ports = rte_eth_dev_count_avail();
1182
1183 if (check_port_config() < 0)
1184 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
1185
1186 nb_lcores = rte_lcore_count();
1187
1188 /* initialize all ports */
1189 RTE_ETH_FOREACH_DEV(portid) {
1190 struct rte_eth_conf local_port_conf = port_conf;
1191
1192 /* skip ports that are not enabled */
1193 if ((enabled_port_mask & (1 << portid)) == 0) {
1194 printf("\nSkipping disabled port %d\n", portid);
1195 continue;
1196 }
1197
1198 /* init port */
1199 printf("Initializing port %d ... ", portid );
1200 fflush(stdout);
1201
1202 nb_rx_queue = get_port_n_rx_queues(portid);
1203 n_tx_queue = nb_lcores;
1204 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
1205 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1206 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
1207 nb_rx_queue, (unsigned)n_tx_queue );
1208
1209 ret = rte_eth_dev_info_get(portid, &dev_info);
1210 if (ret != 0)
1211 rte_exit(EXIT_FAILURE,
1212 "Error during getting device (port %u) info: %s\n",
1213 portid, strerror(-ret));
1214
1215 ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
1216 if (ret != 0)
1217 rte_exit(EXIT_FAILURE,
1218 "Invalid max packet length: %u (port %u)\n",
1219 max_pkt_len, portid);
1220
1221 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
1222 local_port_conf.txmode.offloads |=
1223 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1224
1225 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1226 dev_info.flow_type_rss_offloads;
1227
1228 if (dev_info.max_rx_queues == 1)
1229 local_port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
1230
1231 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1232 port_conf.rx_adv_conf.rss_conf.rss_hf) {
1233 printf("Port %u modified RSS hash function based on hardware support,"
1234 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
1235 portid,
1236 port_conf.rx_adv_conf.rss_conf.rss_hf,
1237 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1238 }
1239
1240 ret = rte_eth_dev_configure(portid, nb_rx_queue,
1241 (uint16_t)n_tx_queue, &local_port_conf);
1242 if (ret < 0)
1243 rte_exit(EXIT_FAILURE,
1244 "Cannot configure device: err=%d, port=%d\n",
1245 ret, portid);
1246
1247 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
1248 &nb_txd);
1249 if (ret < 0)
1250 rte_exit(EXIT_FAILURE,
1251 "Cannot adjust number of descriptors: err=%d, "
1252 "port=%d\n", ret, portid);
1253
1254 ret = rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1255 if (ret < 0)
1256 rte_exit(EXIT_FAILURE,
1257 "Cannot get MAC address: err=%d, port=%d\n",
1258 ret, portid);
1259
1260 print_ethaddr(" Address:", &ports_eth_addr[portid]);
1261 printf(", ");
1262 print_ethaddr("Destination:",
1263 (const struct rte_ether_addr *)&dest_eth_addr[portid]);
1264 printf(", ");
1265
1266 /*
1267 * prepare src MACs for each port.
1268 */
1269 rte_ether_addr_copy(&ports_eth_addr[portid],
1270 (struct rte_ether_addr *)(val_eth + portid) + 1);
1271
1272 /* init memory */
1273 if (!per_port_pool) {
1274 /* portid = 0; this is *not* signifying the first port,
1275 * rather, it signifies that portid is ignored.
1276 */
1277 ret = init_mem(0, NB_MBUF(nb_ports));
1278 } else {
1279 ret = init_mem(portid, NB_MBUF(1));
1280 }
1281 if (ret < 0)
1282 rte_exit(EXIT_FAILURE, "init_mem failed\n");
1283
1284 /* init one TX queue per couple (lcore,port) */
1285 queueid = 0;
1286 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1287 if (rte_lcore_is_enabled(lcore_id) == 0)
1288 continue;
1289
1290 if (numa_on)
1291 socketid =
1292 (uint8_t)rte_lcore_to_socket_id(lcore_id);
1293 else
1294 socketid = 0;
1295
1296 printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
1297 fflush(stdout);
1298
1299 txconf = &dev_info.default_txconf;
1300 txconf->offloads = local_port_conf.txmode.offloads;
1301 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1302 socketid, txconf);
1303 if (ret < 0)
1304 rte_exit(EXIT_FAILURE,
1305 "rte_eth_tx_queue_setup: err=%d, "
1306 "port=%d\n", ret, portid);
1307
1308 qconf = &lcore_conf[lcore_id];
1309 qconf->tx_queue_id[portid] = queueid;
1310 queueid++;
1311
1312 qconf->tx_port_id[qconf->n_tx_port] = portid;
1313 qconf->n_tx_port++;
1314 }
1315 printf("\n");
1316 }
1317
1318 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1319 if (rte_lcore_is_enabled(lcore_id) == 0)
1320 continue;
1321 qconf = &lcore_conf[lcore_id];
1322 printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
1323 fflush(stdout);
1324 /* init RX queues */
1325 for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
1326 struct rte_eth_rxconf rxq_conf;
1327
1328 portid = qconf->rx_queue_list[queue].port_id;
1329 queueid = qconf->rx_queue_list[queue].queue_id;
1330
1331 if (numa_on)
1332 socketid =
1333 (uint8_t)rte_lcore_to_socket_id(lcore_id);
1334 else
1335 socketid = 0;
1336
1337 printf("rxq=%d,%d,%d ", portid, queueid, socketid);
1338 fflush(stdout);
1339
1340 ret = rte_eth_dev_info_get(portid, &dev_info);
1341 if (ret != 0)
1342 rte_exit(EXIT_FAILURE,
1343 "Error during getting device (port %u) info: %s\n",
1344 portid, strerror(-ret));
1345
1346 rxq_conf = dev_info.default_rxconf;
1347 rxq_conf.offloads = port_conf.rxmode.offloads;
1348 if (!per_port_pool)
1349 ret = rte_eth_rx_queue_setup(portid, queueid,
1350 nb_rxd, socketid,
1351 &rxq_conf,
1352 pktmbuf_pool[0][socketid]);
1353 else
1354 ret = rte_eth_rx_queue_setup(portid, queueid,
1355 nb_rxd, socketid,
1356 &rxq_conf,
1357 pktmbuf_pool[portid][socketid]);
1358 if (ret < 0)
1359 rte_exit(EXIT_FAILURE,
1360 "rte_eth_rx_queue_setup: err=%d, port=%d\n",
1361 ret, portid);
1362 }
1363 }
1364 }
1365
1366 static inline int
l3fwd_service_enable(uint32_t service_id)1367 l3fwd_service_enable(uint32_t service_id)
1368 {
1369 uint8_t min_service_count = UINT8_MAX;
1370 uint32_t slcore_array[RTE_MAX_LCORE];
1371 unsigned int slcore = 0;
1372 uint8_t service_count;
1373 int32_t slcore_count;
1374
1375 if (!rte_service_lcore_count())
1376 return -ENOENT;
1377
1378 slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
1379 if (slcore_count < 0)
1380 return -ENOENT;
1381 /* Get the core which has least number of services running. */
1382 while (slcore_count--) {
1383 /* Reset default mapping */
1384 if (rte_service_map_lcore_set(service_id,
1385 slcore_array[slcore_count], 0) != 0)
1386 return -ENOENT;
1387 service_count = rte_service_lcore_count_services(
1388 slcore_array[slcore_count]);
1389 if (service_count < min_service_count) {
1390 slcore = slcore_array[slcore_count];
1391 min_service_count = service_count;
1392 }
1393 }
1394 if (rte_service_map_lcore_set(service_id, slcore, 1))
1395 return -ENOENT;
1396 rte_service_lcore_start(slcore);
1397
1398 return 0;
1399 }
1400
1401 static void
l3fwd_event_service_setup(void)1402 l3fwd_event_service_setup(void)
1403 {
1404 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
1405 struct rte_event_dev_info evdev_info;
1406 uint32_t service_id, caps;
1407 int ret, i;
1408
1409 rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
1410 if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
1411 ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
1412 &service_id);
1413 if (ret != -ESRCH && ret != 0)
1414 rte_exit(EXIT_FAILURE,
1415 "Error in starting eventdev service\n");
1416 l3fwd_service_enable(service_id);
1417 }
1418
1419 for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
1420 ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
1421 evt_rsrc->rx_adptr.rx_adptr[i], &caps);
1422 if (ret < 0)
1423 rte_exit(EXIT_FAILURE,
1424 "Failed to get Rx adapter[%d] caps\n",
1425 evt_rsrc->rx_adptr.rx_adptr[i]);
1426 ret = rte_event_eth_rx_adapter_service_id_get(
1427 evt_rsrc->event_d_id,
1428 &service_id);
1429 if (ret != -ESRCH && ret != 0)
1430 rte_exit(EXIT_FAILURE,
1431 "Error in starting Rx adapter[%d] service\n",
1432 evt_rsrc->rx_adptr.rx_adptr[i]);
1433 l3fwd_service_enable(service_id);
1434 }
1435
1436 for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
1437 ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
1438 evt_rsrc->tx_adptr.tx_adptr[i], &caps);
1439 if (ret < 0)
1440 rte_exit(EXIT_FAILURE,
1441 "Failed to get Rx adapter[%d] caps\n",
1442 evt_rsrc->tx_adptr.tx_adptr[i]);
1443 ret = rte_event_eth_tx_adapter_service_id_get(
1444 evt_rsrc->event_d_id,
1445 &service_id);
1446 if (ret != -ESRCH && ret != 0)
1447 rte_exit(EXIT_FAILURE,
1448 "Error in starting Rx adapter[%d] service\n",
1449 evt_rsrc->tx_adptr.tx_adptr[i]);
1450 l3fwd_service_enable(service_id);
1451 }
1452 }
1453
1454 int
main(int argc,char ** argv)1455 main(int argc, char **argv)
1456 {
1457 struct l3fwd_event_resources *evt_rsrc;
1458 struct lcore_conf *qconf;
1459 uint16_t queueid, portid;
1460 unsigned int lcore_id;
1461 uint8_t queue;
1462 int i, ret;
1463
1464 /* init EAL */
1465 ret = rte_eal_init(argc, argv);
1466 if (ret < 0)
1467 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1468 argc -= ret;
1469 argv += ret;
1470
1471 force_quit = false;
1472 signal(SIGINT, signal_handler);
1473 signal(SIGTERM, signal_handler);
1474
1475 /* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
1476 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1477 dest_eth_addr[portid] =
1478 RTE_ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
1479 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
1480 }
1481
1482 evt_rsrc = l3fwd_get_eventdev_rsrc();
1483 /* parse application arguments (after the EAL ones) */
1484 ret = parse_args(argc, argv);
1485 if (ret < 0)
1486 rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
1487
1488 /* Setup function pointers for lookup method. */
1489 setup_l3fwd_lookup_tables();
1490
1491 /* Add the config file rules */
1492 l3fwd_lkp.read_config_files();
1493
1494 evt_rsrc->per_port_pool = per_port_pool;
1495 evt_rsrc->pkt_pool = pktmbuf_pool;
1496 evt_rsrc->vec_pool = vector_pool;
1497 evt_rsrc->port_mask = enabled_port_mask;
1498 /* Configure eventdev parameters if user has requested */
1499 if (evt_rsrc->enabled) {
1500 l3fwd_event_resource_setup(&port_conf);
1501 if (lookup_mode == L3FWD_LOOKUP_EM)
1502 l3fwd_lkp.main_loop = evt_rsrc->ops.em_event_loop;
1503 else if (lookup_mode == L3FWD_LOOKUP_FIB)
1504 l3fwd_lkp.main_loop = evt_rsrc->ops.fib_event_loop;
1505 else
1506 l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop;
1507 l3fwd_event_service_setup();
1508 } else
1509 l3fwd_poll_resource_setup();
1510
1511 /* start ports */
1512 RTE_ETH_FOREACH_DEV(portid) {
1513 if ((enabled_port_mask & (1 << portid)) == 0) {
1514 continue;
1515 }
1516 /* Start device */
1517 ret = rte_eth_dev_start(portid);
1518 if (ret < 0)
1519 rte_exit(EXIT_FAILURE,
1520 "rte_eth_dev_start: err=%d, port=%d\n",
1521 ret, portid);
1522
1523 /*
1524 * If enabled, put device in promiscuous mode.
1525 * This allows IO forwarding mode to forward packets
1526 * to itself through 2 cross-connected ports of the
1527 * target machine.
1528 */
1529 if (promiscuous_on) {
1530 ret = rte_eth_promiscuous_enable(portid);
1531 if (ret != 0)
1532 rte_exit(EXIT_FAILURE,
1533 "rte_eth_promiscuous_enable: err=%s, port=%u\n",
1534 rte_strerror(-ret), portid);
1535 }
1536 }
1537
1538 printf("\n");
1539
1540 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1541 if (rte_lcore_is_enabled(lcore_id) == 0)
1542 continue;
1543 qconf = &lcore_conf[lcore_id];
1544 for (queue = 0; queue < qconf->n_rx_queue; ++queue) {
1545 portid = qconf->rx_queue_list[queue].port_id;
1546 queueid = qconf->rx_queue_list[queue].queue_id;
1547 if (prepare_ptype_parser(portid, queueid) == 0)
1548 rte_exit(EXIT_FAILURE, "ptype check fails\n");
1549 }
1550 }
1551
1552 check_all_ports_link_status(enabled_port_mask);
1553
1554 ret = 0;
1555 /* launch per-lcore init on every lcore */
1556 rte_eal_mp_remote_launch(l3fwd_lkp.main_loop, NULL, CALL_MAIN);
1557 if (evt_rsrc->enabled) {
1558 for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++)
1559 rte_event_eth_rx_adapter_stop(
1560 evt_rsrc->rx_adptr.rx_adptr[i]);
1561 for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++)
1562 rte_event_eth_tx_adapter_stop(
1563 evt_rsrc->tx_adptr.tx_adptr[i]);
1564
1565 RTE_ETH_FOREACH_DEV(portid) {
1566 if ((enabled_port_mask & (1 << portid)) == 0)
1567 continue;
1568 ret = rte_eth_dev_stop(portid);
1569 if (ret != 0)
1570 printf("rte_eth_dev_stop: err=%d, port=%u\n",
1571 ret, portid);
1572 }
1573
1574 rte_eal_mp_wait_lcore();
1575 RTE_ETH_FOREACH_DEV(portid) {
1576 if ((enabled_port_mask & (1 << portid)) == 0)
1577 continue;
1578 rte_eth_dev_close(portid);
1579 }
1580
1581 rte_event_dev_stop(evt_rsrc->event_d_id);
1582 rte_event_dev_close(evt_rsrc->event_d_id);
1583
1584 } else {
1585 rte_eal_mp_wait_lcore();
1586
1587 RTE_ETH_FOREACH_DEV(portid) {
1588 if ((enabled_port_mask & (1 << portid)) == 0)
1589 continue;
1590 printf("Closing port %d...", portid);
1591 ret = rte_eth_dev_stop(portid);
1592 if (ret != 0)
1593 printf("rte_eth_dev_stop: err=%d, port=%u\n",
1594 ret, portid);
1595 rte_eth_dev_close(portid);
1596 printf(" Done\n");
1597 }
1598 }
1599
1600 /* clean up config file routes */
1601 l3fwd_lkp.free_routes();
1602
1603 /* clean up the EAL */
1604 rte_eal_cleanup();
1605
1606 printf("Bye...\n");
1607
1608 return ret;
1609 }
1610