1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 #include <string.h>
10 #include <sys/queue.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <getopt.h>
14
15 #include <netinet/in.h>
16 #include <linux/if.h>
17 #include <linux/if_tun.h>
18 #include <fcntl.h>
19 #include <sys/ioctl.h>
20 #include <unistd.h>
21 #include <signal.h>
22
23 #include <rte_common.h>
24 #include <rte_log.h>
25 #include <rte_memory.h>
26 #include <rte_memcpy.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_launch.h>
30 #include <rte_atomic.h>
31 #include <rte_lcore.h>
32 #include <rte_branch_prediction.h>
33 #include <rte_interrupts.h>
34 #include <rte_bus_pci.h>
35 #include <rte_debug.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_mempool.h>
39 #include <rte_mbuf.h>
40 #include <rte_string_fns.h>
41 #include <rte_cycles.h>
42 #include <rte_malloc.h>
43 #include <rte_kni.h>
44
45 /* Macros for printing using RTE_LOG */
46 #define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
47
48 /* Max size of a single packet */
49 #define MAX_PACKET_SZ 2048
50
51 /* Size of the data buffer in each mbuf */
52 #define MBUF_DATA_SZ (MAX_PACKET_SZ + RTE_PKTMBUF_HEADROOM)
53
54 /* Number of mbufs in mempool that is created */
55 #define NB_MBUF (8192 * 16)
56
57 /* How many packets to attempt to read from NIC in one go */
58 #define PKT_BURST_SZ 32
59
60 /* How many objects (mbufs) to keep in per-lcore mempool cache */
61 #define MEMPOOL_CACHE_SZ PKT_BURST_SZ
62
63 /* Number of RX ring descriptors */
64 #define NB_RXD 1024
65
66 /* Number of TX ring descriptors */
67 #define NB_TXD 1024
68
69 /* Total octets in ethernet header */
70 #define KNI_ENET_HEADER_SIZE 14
71
72 /* Total octets in the FCS */
73 #define KNI_ENET_FCS_SIZE 4
74
75 #define KNI_US_PER_SECOND 1000000
76 #define KNI_SECOND_PER_DAY 86400
77
78 #define KNI_MAX_KTHREAD 32
79 /*
80 * Structure of port parameters
81 */
82 struct kni_port_params {
83 uint16_t port_id;/* Port ID */
84 unsigned lcore_rx; /* lcore ID for RX */
85 unsigned lcore_tx; /* lcore ID for TX */
86 uint32_t nb_lcore_k; /* Number of lcores for KNI multi kernel threads */
87 uint32_t nb_kni; /* Number of KNI devices to be created */
88 unsigned lcore_k[KNI_MAX_KTHREAD]; /* lcore ID list for kthreads */
89 struct rte_kni *kni[KNI_MAX_KTHREAD]; /* KNI context pointers */
90 } __rte_cache_aligned;
91
92 static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
93
94
95 /* Options for configuring ethernet port */
96 static struct rte_eth_conf port_conf = {
97 .txmode = {
98 .mq_mode = ETH_MQ_TX_NONE,
99 },
100 };
101
102 /* Mempool for mbufs */
103 static struct rte_mempool * pktmbuf_pool = NULL;
104
105 /* Mask of enabled ports */
106 static uint32_t ports_mask = 0;
107 /* Ports set in promiscuous mode off by default. */
108 static int promiscuous_on = 0;
109 /* Monitor link status continually. off by default. */
110 static int monitor_links;
111
112 /* Structure type for recording kni interface specific stats */
113 struct kni_interface_stats {
114 /* number of pkts received from NIC, and sent to KNI */
115 uint64_t rx_packets;
116
117 /* number of pkts received from NIC, but failed to send to KNI */
118 uint64_t rx_dropped;
119
120 /* number of pkts received from KNI, and sent to NIC */
121 uint64_t tx_packets;
122
123 /* number of pkts received from KNI, but failed to send to NIC */
124 uint64_t tx_dropped;
125 };
126
127 /* kni device statistics array */
128 static struct kni_interface_stats kni_stats[RTE_MAX_ETHPORTS];
129
130 static int kni_change_mtu(uint16_t port_id, unsigned int new_mtu);
131 static int kni_config_network_interface(uint16_t port_id, uint8_t if_up);
132 static int kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[]);
133
134 static rte_atomic32_t kni_stop = RTE_ATOMIC32_INIT(0);
135 static rte_atomic32_t kni_pause = RTE_ATOMIC32_INIT(0);
136
137 /* Print out statistics on packets handled */
138 static void
print_stats(void)139 print_stats(void)
140 {
141 uint16_t i;
142
143 printf("\n**KNI example application statistics**\n"
144 "====== ============== ============ ============ ============ ============\n"
145 " Port Lcore(RX/TX) rx_packets rx_dropped tx_packets tx_dropped\n"
146 "------ -------------- ------------ ------------ ------------ ------------\n");
147 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
148 if (!kni_port_params_array[i])
149 continue;
150
151 printf("%7d %10u/%2u %13"PRIu64" %13"PRIu64" %13"PRIu64" "
152 "%13"PRIu64"\n", i,
153 kni_port_params_array[i]->lcore_rx,
154 kni_port_params_array[i]->lcore_tx,
155 kni_stats[i].rx_packets,
156 kni_stats[i].rx_dropped,
157 kni_stats[i].tx_packets,
158 kni_stats[i].tx_dropped);
159 }
160 printf("====== ============== ============ ============ ============ ============\n");
161
162 fflush(stdout);
163 }
164
165 /* Custom handling of signals to handle stats and kni processing */
166 static void
signal_handler(int signum)167 signal_handler(int signum)
168 {
169 /* When we receive a USR1 signal, print stats */
170 if (signum == SIGUSR1) {
171 print_stats();
172 }
173
174 /* When we receive a USR2 signal, reset stats */
175 if (signum == SIGUSR2) {
176 memset(&kni_stats, 0, sizeof(kni_stats));
177 printf("\n** Statistics have been reset **\n");
178 return;
179 }
180
181 /*
182 * When we receive a RTMIN or SIGINT or SIGTERM signal,
183 * stop kni processing
184 */
185 if (signum == SIGRTMIN || signum == SIGINT || signum == SIGTERM) {
186 printf("\nSIGRTMIN/SIGINT/SIGTERM received. "
187 "KNI processing stopping.\n");
188 rte_atomic32_inc(&kni_stop);
189 return;
190 }
191 }
192
193 static void
kni_burst_free_mbufs(struct rte_mbuf ** pkts,unsigned num)194 kni_burst_free_mbufs(struct rte_mbuf **pkts, unsigned num)
195 {
196 unsigned i;
197
198 if (pkts == NULL)
199 return;
200
201 for (i = 0; i < num; i++) {
202 rte_pktmbuf_free(pkts[i]);
203 pkts[i] = NULL;
204 }
205 }
206
207 /**
208 * Interface to burst rx and enqueue mbufs into rx_q
209 */
210 static void
kni_ingress(struct kni_port_params * p)211 kni_ingress(struct kni_port_params *p)
212 {
213 uint8_t i;
214 uint16_t port_id;
215 unsigned nb_rx, num;
216 uint32_t nb_kni;
217 struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
218
219 if (p == NULL)
220 return;
221
222 nb_kni = p->nb_kni;
223 port_id = p->port_id;
224 for (i = 0; i < nb_kni; i++) {
225 /* Burst rx from eth */
226 nb_rx = rte_eth_rx_burst(port_id, 0, pkts_burst, PKT_BURST_SZ);
227 if (unlikely(nb_rx > PKT_BURST_SZ)) {
228 RTE_LOG(ERR, APP, "Error receiving from eth\n");
229 return;
230 }
231 /* Burst tx to kni */
232 num = rte_kni_tx_burst(p->kni[i], pkts_burst, nb_rx);
233 if (num)
234 kni_stats[port_id].rx_packets += num;
235
236 rte_kni_handle_request(p->kni[i]);
237 if (unlikely(num < nb_rx)) {
238 /* Free mbufs not tx to kni interface */
239 kni_burst_free_mbufs(&pkts_burst[num], nb_rx - num);
240 kni_stats[port_id].rx_dropped += nb_rx - num;
241 }
242 }
243 }
244
245 /**
246 * Interface to dequeue mbufs from tx_q and burst tx
247 */
248 static void
kni_egress(struct kni_port_params * p)249 kni_egress(struct kni_port_params *p)
250 {
251 uint8_t i;
252 uint16_t port_id;
253 unsigned nb_tx, num;
254 uint32_t nb_kni;
255 struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
256
257 if (p == NULL)
258 return;
259
260 nb_kni = p->nb_kni;
261 port_id = p->port_id;
262 for (i = 0; i < nb_kni; i++) {
263 /* Burst rx from kni */
264 num = rte_kni_rx_burst(p->kni[i], pkts_burst, PKT_BURST_SZ);
265 if (unlikely(num > PKT_BURST_SZ)) {
266 RTE_LOG(ERR, APP, "Error receiving from KNI\n");
267 return;
268 }
269 /* Burst tx to eth */
270 nb_tx = rte_eth_tx_burst(port_id, 0, pkts_burst, (uint16_t)num);
271 if (nb_tx)
272 kni_stats[port_id].tx_packets += nb_tx;
273 if (unlikely(nb_tx < num)) {
274 /* Free mbufs not tx to NIC */
275 kni_burst_free_mbufs(&pkts_burst[nb_tx], num - nb_tx);
276 kni_stats[port_id].tx_dropped += num - nb_tx;
277 }
278 }
279 }
280
281 static int
main_loop(__rte_unused void * arg)282 main_loop(__rte_unused void *arg)
283 {
284 uint16_t i;
285 int32_t f_stop;
286 int32_t f_pause;
287 const unsigned lcore_id = rte_lcore_id();
288 enum lcore_rxtx {
289 LCORE_NONE,
290 LCORE_RX,
291 LCORE_TX,
292 LCORE_MAX
293 };
294 enum lcore_rxtx flag = LCORE_NONE;
295
296 RTE_ETH_FOREACH_DEV(i) {
297 if (!kni_port_params_array[i])
298 continue;
299 if (kni_port_params_array[i]->lcore_rx == (uint8_t)lcore_id) {
300 flag = LCORE_RX;
301 break;
302 } else if (kni_port_params_array[i]->lcore_tx ==
303 (uint8_t)lcore_id) {
304 flag = LCORE_TX;
305 break;
306 }
307 }
308
309 if (flag == LCORE_RX) {
310 RTE_LOG(INFO, APP, "Lcore %u is reading from port %d\n",
311 kni_port_params_array[i]->lcore_rx,
312 kni_port_params_array[i]->port_id);
313 while (1) {
314 f_stop = rte_atomic32_read(&kni_stop);
315 f_pause = rte_atomic32_read(&kni_pause);
316 if (f_stop)
317 break;
318 if (f_pause)
319 continue;
320 kni_ingress(kni_port_params_array[i]);
321 }
322 } else if (flag == LCORE_TX) {
323 RTE_LOG(INFO, APP, "Lcore %u is writing to port %d\n",
324 kni_port_params_array[i]->lcore_tx,
325 kni_port_params_array[i]->port_id);
326 while (1) {
327 f_stop = rte_atomic32_read(&kni_stop);
328 f_pause = rte_atomic32_read(&kni_pause);
329 if (f_stop)
330 break;
331 if (f_pause)
332 continue;
333 kni_egress(kni_port_params_array[i]);
334 }
335 } else
336 RTE_LOG(INFO, APP, "Lcore %u has nothing to do\n", lcore_id);
337
338 return 0;
339 }
340
341 /* Display usage instructions */
342 static void
print_usage(const char * prgname)343 print_usage(const char *prgname)
344 {
345 RTE_LOG(INFO, APP, "\nUsage: %s [EAL options] -- -p PORTMASK -P -m "
346 "[--config (port,lcore_rx,lcore_tx,lcore_kthread...)"
347 "[,(port,lcore_rx,lcore_tx,lcore_kthread...)]]\n"
348 " -p PORTMASK: hex bitmask of ports to use\n"
349 " -P : enable promiscuous mode\n"
350 " -m : enable monitoring of port carrier state\n"
351 " --config (port,lcore_rx,lcore_tx,lcore_kthread...): "
352 "port and lcore configurations\n",
353 prgname);
354 }
355
356 /* Convert string to unsigned number. 0 is returned if error occurs */
357 static uint32_t
parse_unsigned(const char * portmask)358 parse_unsigned(const char *portmask)
359 {
360 char *end = NULL;
361 unsigned long num;
362
363 num = strtoul(portmask, &end, 16);
364 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
365 return 0;
366
367 return (uint32_t)num;
368 }
369
370 static void
print_config(void)371 print_config(void)
372 {
373 uint32_t i, j;
374 struct kni_port_params **p = kni_port_params_array;
375
376 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
377 if (!p[i])
378 continue;
379 RTE_LOG(DEBUG, APP, "Port ID: %d\n", p[i]->port_id);
380 RTE_LOG(DEBUG, APP, "Rx lcore ID: %u, Tx lcore ID: %u\n",
381 p[i]->lcore_rx, p[i]->lcore_tx);
382 for (j = 0; j < p[i]->nb_lcore_k; j++)
383 RTE_LOG(DEBUG, APP, "Kernel thread lcore ID: %u\n",
384 p[i]->lcore_k[j]);
385 }
386 }
387
388 static int
parse_config(const char * arg)389 parse_config(const char *arg)
390 {
391 const char *p, *p0 = arg;
392 char s[256], *end;
393 unsigned size;
394 enum fieldnames {
395 FLD_PORT = 0,
396 FLD_LCORE_RX,
397 FLD_LCORE_TX,
398 _NUM_FLD = KNI_MAX_KTHREAD + 3,
399 };
400 int i, j, nb_token;
401 char *str_fld[_NUM_FLD];
402 unsigned long int_fld[_NUM_FLD];
403 uint16_t port_id, nb_kni_port_params = 0;
404
405 memset(&kni_port_params_array, 0, sizeof(kni_port_params_array));
406 while (((p = strchr(p0, '(')) != NULL) &&
407 nb_kni_port_params < RTE_MAX_ETHPORTS) {
408 p++;
409 if ((p0 = strchr(p, ')')) == NULL)
410 goto fail;
411 size = p0 - p;
412 if (size >= sizeof(s)) {
413 printf("Invalid config parameters\n");
414 goto fail;
415 }
416 snprintf(s, sizeof(s), "%.*s", size, p);
417 nb_token = rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',');
418 if (nb_token <= FLD_LCORE_TX) {
419 printf("Invalid config parameters\n");
420 goto fail;
421 }
422 for (i = 0; i < nb_token; i++) {
423 errno = 0;
424 int_fld[i] = strtoul(str_fld[i], &end, 0);
425 if (errno != 0 || end == str_fld[i]) {
426 printf("Invalid config parameters\n");
427 goto fail;
428 }
429 }
430
431 i = 0;
432 port_id = int_fld[i++];
433 if (port_id >= RTE_MAX_ETHPORTS) {
434 printf("Port ID %d could not exceed the maximum %d\n",
435 port_id, RTE_MAX_ETHPORTS);
436 goto fail;
437 }
438 if (kni_port_params_array[port_id]) {
439 printf("Port %d has been configured\n", port_id);
440 goto fail;
441 }
442 kni_port_params_array[port_id] =
443 rte_zmalloc("KNI_port_params",
444 sizeof(struct kni_port_params), RTE_CACHE_LINE_SIZE);
445 kni_port_params_array[port_id]->port_id = port_id;
446 kni_port_params_array[port_id]->lcore_rx =
447 (uint8_t)int_fld[i++];
448 kni_port_params_array[port_id]->lcore_tx =
449 (uint8_t)int_fld[i++];
450 if (kni_port_params_array[port_id]->lcore_rx >= RTE_MAX_LCORE ||
451 kni_port_params_array[port_id]->lcore_tx >= RTE_MAX_LCORE) {
452 printf("lcore_rx %u or lcore_tx %u ID could not "
453 "exceed the maximum %u\n",
454 kni_port_params_array[port_id]->lcore_rx,
455 kni_port_params_array[port_id]->lcore_tx,
456 (unsigned)RTE_MAX_LCORE);
457 goto fail;
458 }
459 for (j = 0; i < nb_token && j < KNI_MAX_KTHREAD; i++, j++)
460 kni_port_params_array[port_id]->lcore_k[j] =
461 (uint8_t)int_fld[i];
462 kni_port_params_array[port_id]->nb_lcore_k = j;
463 }
464 print_config();
465
466 return 0;
467
468 fail:
469 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
470 if (kni_port_params_array[i]) {
471 rte_free(kni_port_params_array[i]);
472 kni_port_params_array[i] = NULL;
473 }
474 }
475
476 return -1;
477 }
478
479 static int
validate_parameters(uint32_t portmask)480 validate_parameters(uint32_t portmask)
481 {
482 uint32_t i;
483
484 if (!portmask) {
485 printf("No port configured in port mask\n");
486 return -1;
487 }
488
489 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
490 if (((portmask & (1 << i)) && !kni_port_params_array[i]) ||
491 (!(portmask & (1 << i)) && kni_port_params_array[i]))
492 rte_exit(EXIT_FAILURE, "portmask is not consistent "
493 "to port ids specified in --config\n");
494
495 if (kni_port_params_array[i] && !rte_lcore_is_enabled(\
496 (unsigned)(kni_port_params_array[i]->lcore_rx)))
497 rte_exit(EXIT_FAILURE, "lcore id %u for "
498 "port %d receiving not enabled\n",
499 kni_port_params_array[i]->lcore_rx,
500 kni_port_params_array[i]->port_id);
501
502 if (kni_port_params_array[i] && !rte_lcore_is_enabled(\
503 (unsigned)(kni_port_params_array[i]->lcore_tx)))
504 rte_exit(EXIT_FAILURE, "lcore id %u for "
505 "port %d transmitting not enabled\n",
506 kni_port_params_array[i]->lcore_tx,
507 kni_port_params_array[i]->port_id);
508
509 }
510
511 return 0;
512 }
513
514 #define CMDLINE_OPT_CONFIG "config"
515
516 /* Parse the arguments given in the command line of the application */
517 static int
parse_args(int argc,char ** argv)518 parse_args(int argc, char **argv)
519 {
520 int opt, longindex, ret = 0;
521 const char *prgname = argv[0];
522 static struct option longopts[] = {
523 {CMDLINE_OPT_CONFIG, required_argument, NULL, 0},
524 {NULL, 0, NULL, 0}
525 };
526
527 /* Disable printing messages within getopt() */
528 opterr = 0;
529
530 /* Parse command line */
531 while ((opt = getopt_long(argc, argv, "p:Pm", longopts,
532 &longindex)) != EOF) {
533 switch (opt) {
534 case 'p':
535 ports_mask = parse_unsigned(optarg);
536 break;
537 case 'P':
538 promiscuous_on = 1;
539 break;
540 case 'm':
541 monitor_links = 1;
542 break;
543 case 0:
544 if (!strncmp(longopts[longindex].name,
545 CMDLINE_OPT_CONFIG,
546 sizeof(CMDLINE_OPT_CONFIG))) {
547 ret = parse_config(optarg);
548 if (ret) {
549 printf("Invalid config\n");
550 print_usage(prgname);
551 return -1;
552 }
553 }
554 break;
555 default:
556 print_usage(prgname);
557 rte_exit(EXIT_FAILURE, "Invalid option specified\n");
558 }
559 }
560
561 /* Check that options were parsed ok */
562 if (validate_parameters(ports_mask) < 0) {
563 print_usage(prgname);
564 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
565 }
566
567 return ret;
568 }
569
570 /* Initialize KNI subsystem */
571 static void
init_kni(void)572 init_kni(void)
573 {
574 unsigned int num_of_kni_ports = 0, i;
575 struct kni_port_params **params = kni_port_params_array;
576
577 /* Calculate the maximum number of KNI interfaces that will be used */
578 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
579 if (kni_port_params_array[i]) {
580 num_of_kni_ports += (params[i]->nb_lcore_k ?
581 params[i]->nb_lcore_k : 1);
582 }
583 }
584
585 /* Invoke rte KNI init to preallocate the ports */
586 rte_kni_init(num_of_kni_ports);
587 }
588
589 /* Initialise a single port on an Ethernet device */
590 static void
init_port(uint16_t port)591 init_port(uint16_t port)
592 {
593 int ret;
594 uint16_t nb_rxd = NB_RXD;
595 uint16_t nb_txd = NB_TXD;
596 struct rte_eth_dev_info dev_info;
597 struct rte_eth_rxconf rxq_conf;
598 struct rte_eth_txconf txq_conf;
599 struct rte_eth_conf local_port_conf = port_conf;
600
601 /* Initialise device and RX/TX queues */
602 RTE_LOG(INFO, APP, "Initialising port %u ...\n", (unsigned)port);
603 fflush(stdout);
604
605 ret = rte_eth_dev_info_get(port, &dev_info);
606 if (ret != 0)
607 rte_exit(EXIT_FAILURE,
608 "Error during getting device (port %u) info: %s\n",
609 port, strerror(-ret));
610
611 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
612 local_port_conf.txmode.offloads |=
613 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
614 ret = rte_eth_dev_configure(port, 1, 1, &local_port_conf);
615 if (ret < 0)
616 rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)\n",
617 (unsigned)port, ret);
618
619 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
620 if (ret < 0)
621 rte_exit(EXIT_FAILURE, "Could not adjust number of descriptors "
622 "for port%u (%d)\n", (unsigned)port, ret);
623
624 rxq_conf = dev_info.default_rxconf;
625 rxq_conf.offloads = local_port_conf.rxmode.offloads;
626 ret = rte_eth_rx_queue_setup(port, 0, nb_rxd,
627 rte_eth_dev_socket_id(port), &rxq_conf, pktmbuf_pool);
628 if (ret < 0)
629 rte_exit(EXIT_FAILURE, "Could not setup up RX queue for "
630 "port%u (%d)\n", (unsigned)port, ret);
631
632 txq_conf = dev_info.default_txconf;
633 txq_conf.offloads = local_port_conf.txmode.offloads;
634 ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
635 rte_eth_dev_socket_id(port), &txq_conf);
636 if (ret < 0)
637 rte_exit(EXIT_FAILURE, "Could not setup up TX queue for "
638 "port%u (%d)\n", (unsigned)port, ret);
639
640 ret = rte_eth_dev_start(port);
641 if (ret < 0)
642 rte_exit(EXIT_FAILURE, "Could not start port%u (%d)\n",
643 (unsigned)port, ret);
644
645 if (promiscuous_on) {
646 ret = rte_eth_promiscuous_enable(port);
647 if (ret != 0)
648 rte_exit(EXIT_FAILURE,
649 "Could not enable promiscuous mode for port%u: %s\n",
650 port, rte_strerror(-ret));
651 }
652 }
653
654 /* Check the link status of all ports in up to 9s, and print them finally */
655 static void
check_all_ports_link_status(uint32_t port_mask)656 check_all_ports_link_status(uint32_t port_mask)
657 {
658 #define CHECK_INTERVAL 100 /* 100ms */
659 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
660 uint16_t portid;
661 uint8_t count, all_ports_up, print_flag = 0;
662 struct rte_eth_link link;
663 int ret;
664 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
665
666 printf("\nChecking link status\n");
667 fflush(stdout);
668 for (count = 0; count <= MAX_CHECK_TIME; count++) {
669 all_ports_up = 1;
670 RTE_ETH_FOREACH_DEV(portid) {
671 if ((port_mask & (1 << portid)) == 0)
672 continue;
673 memset(&link, 0, sizeof(link));
674 ret = rte_eth_link_get_nowait(portid, &link);
675 if (ret < 0) {
676 all_ports_up = 0;
677 if (print_flag == 1)
678 printf("Port %u link get failed: %s\n",
679 portid, rte_strerror(-ret));
680 continue;
681 }
682 /* print link status if flag set */
683 if (print_flag == 1) {
684 rte_eth_link_to_str(link_status_text,
685 sizeof(link_status_text), &link);
686 printf("Port %d %s\n", portid,
687 link_status_text);
688 continue;
689 }
690 /* clear all_ports_up flag if any link down */
691 if (link.link_status == ETH_LINK_DOWN) {
692 all_ports_up = 0;
693 break;
694 }
695 }
696 /* after finally printing all link status, get out */
697 if (print_flag == 1)
698 break;
699
700 if (all_ports_up == 0) {
701 printf(".");
702 fflush(stdout);
703 rte_delay_ms(CHECK_INTERVAL);
704 }
705
706 /* set the print_flag if all ports up or timeout */
707 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
708 print_flag = 1;
709 printf("done\n");
710 }
711 }
712 }
713
714 static void
log_link_state(struct rte_kni * kni,int prev,struct rte_eth_link * link)715 log_link_state(struct rte_kni *kni, int prev, struct rte_eth_link *link)
716 {
717 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
718 if (kni == NULL || link == NULL)
719 return;
720
721 rte_eth_link_to_str(link_status_text, sizeof(link_status_text), link);
722 if (prev != link->link_status)
723 RTE_LOG(INFO, APP, "%s NIC %s",
724 rte_kni_get_name(kni),
725 link_status_text);
726 }
727
728 /*
729 * Monitor the link status of all ports and update the
730 * corresponding KNI interface(s)
731 */
732 static void *
monitor_all_ports_link_status(void * arg)733 monitor_all_ports_link_status(void *arg)
734 {
735 uint16_t portid;
736 struct rte_eth_link link;
737 unsigned int i;
738 struct kni_port_params **p = kni_port_params_array;
739 int prev;
740 (void) arg;
741 int ret;
742
743 while (monitor_links) {
744 rte_delay_ms(500);
745 RTE_ETH_FOREACH_DEV(portid) {
746 if ((ports_mask & (1 << portid)) == 0)
747 continue;
748 memset(&link, 0, sizeof(link));
749 ret = rte_eth_link_get_nowait(portid, &link);
750 if (ret < 0) {
751 RTE_LOG(ERR, APP,
752 "Get link failed (port %u): %s\n",
753 portid, rte_strerror(-ret));
754 continue;
755 }
756 for (i = 0; i < p[portid]->nb_kni; i++) {
757 prev = rte_kni_update_link(p[portid]->kni[i],
758 link.link_status);
759 log_link_state(p[portid]->kni[i], prev, &link);
760 }
761 }
762 }
763 return NULL;
764 }
765
766 static int
kni_change_mtu_(uint16_t port_id,unsigned int new_mtu)767 kni_change_mtu_(uint16_t port_id, unsigned int new_mtu)
768 {
769 int ret;
770 uint16_t nb_rxd = NB_RXD;
771 uint16_t nb_txd = NB_TXD;
772 struct rte_eth_conf conf;
773 struct rte_eth_dev_info dev_info;
774 struct rte_eth_rxconf rxq_conf;
775 struct rte_eth_txconf txq_conf;
776
777 if (!rte_eth_dev_is_valid_port(port_id)) {
778 RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
779 return -EINVAL;
780 }
781
782 RTE_LOG(INFO, APP, "Change MTU of port %d to %u\n", port_id, new_mtu);
783
784 /* Stop specific port */
785 ret = rte_eth_dev_stop(port_id);
786 if (ret != 0) {
787 RTE_LOG(ERR, APP, "Failed to stop port %d: %s\n",
788 port_id, rte_strerror(-ret));
789 return ret;
790 }
791
792 memcpy(&conf, &port_conf, sizeof(conf));
793 /* Set new MTU */
794 if (new_mtu > RTE_ETHER_MAX_LEN)
795 conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
796 else
797 conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
798
799 /* mtu + length of header + length of FCS = max pkt length */
800 conf.rxmode.max_rx_pkt_len = new_mtu + KNI_ENET_HEADER_SIZE +
801 KNI_ENET_FCS_SIZE;
802 ret = rte_eth_dev_configure(port_id, 1, 1, &conf);
803 if (ret < 0) {
804 RTE_LOG(ERR, APP, "Fail to reconfigure port %d\n", port_id);
805 return ret;
806 }
807
808 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, &nb_txd);
809 if (ret < 0)
810 rte_exit(EXIT_FAILURE, "Could not adjust number of descriptors "
811 "for port%u (%d)\n", (unsigned int)port_id,
812 ret);
813
814 ret = rte_eth_dev_info_get(port_id, &dev_info);
815 if (ret != 0) {
816 RTE_LOG(ERR, APP,
817 "Error during getting device (port %u) info: %s\n",
818 port_id, strerror(-ret));
819
820 return ret;
821 }
822
823 rxq_conf = dev_info.default_rxconf;
824 rxq_conf.offloads = conf.rxmode.offloads;
825 ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd,
826 rte_eth_dev_socket_id(port_id), &rxq_conf, pktmbuf_pool);
827 if (ret < 0) {
828 RTE_LOG(ERR, APP, "Fail to setup Rx queue of port %d\n",
829 port_id);
830 return ret;
831 }
832
833 txq_conf = dev_info.default_txconf;
834 txq_conf.offloads = conf.txmode.offloads;
835 ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
836 rte_eth_dev_socket_id(port_id), &txq_conf);
837 if (ret < 0) {
838 RTE_LOG(ERR, APP, "Fail to setup Tx queue of port %d\n",
839 port_id);
840 return ret;
841 }
842
843 /* Restart specific port */
844 ret = rte_eth_dev_start(port_id);
845 if (ret < 0) {
846 RTE_LOG(ERR, APP, "Fail to restart port %d\n", port_id);
847 return ret;
848 }
849
850 return 0;
851 }
852
853 /* Callback for request of changing MTU */
854 static int
kni_change_mtu(uint16_t port_id,unsigned int new_mtu)855 kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
856 {
857 int ret;
858
859 rte_atomic32_inc(&kni_pause);
860 ret = kni_change_mtu_(port_id, new_mtu);
861 rte_atomic32_dec(&kni_pause);
862
863 return ret;
864 }
865
866 /* Callback for request of configuring network interface up/down */
867 static int
kni_config_network_interface(uint16_t port_id,uint8_t if_up)868 kni_config_network_interface(uint16_t port_id, uint8_t if_up)
869 {
870 int ret = 0;
871
872 if (!rte_eth_dev_is_valid_port(port_id)) {
873 RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
874 return -EINVAL;
875 }
876
877 RTE_LOG(INFO, APP, "Configure network interface of %d %s\n",
878 port_id, if_up ? "up" : "down");
879
880 rte_atomic32_inc(&kni_pause);
881
882 if (if_up != 0) { /* Configure network interface up */
883 ret = rte_eth_dev_stop(port_id);
884 if (ret != 0) {
885 RTE_LOG(ERR, APP, "Failed to stop port %d: %s\n",
886 port_id, rte_strerror(-ret));
887 rte_atomic32_dec(&kni_pause);
888 return ret;
889 }
890 ret = rte_eth_dev_start(port_id);
891 } else { /* Configure network interface down */
892 ret = rte_eth_dev_stop(port_id);
893 if (ret != 0) {
894 RTE_LOG(ERR, APP, "Failed to stop port %d: %s\n",
895 port_id, rte_strerror(-ret));
896 rte_atomic32_dec(&kni_pause);
897 return ret;
898 }
899 }
900
901 rte_atomic32_dec(&kni_pause);
902
903 if (ret < 0)
904 RTE_LOG(ERR, APP, "Failed to start port %d\n", port_id);
905
906 return ret;
907 }
908
909 static void
print_ethaddr(const char * name,struct rte_ether_addr * mac_addr)910 print_ethaddr(const char *name, struct rte_ether_addr *mac_addr)
911 {
912 char buf[RTE_ETHER_ADDR_FMT_SIZE];
913 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);
914 RTE_LOG(INFO, APP, "\t%s%s\n", name, buf);
915 }
916
917 /* Callback for request of configuring mac address */
918 static int
kni_config_mac_address(uint16_t port_id,uint8_t mac_addr[])919 kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[])
920 {
921 int ret = 0;
922
923 if (!rte_eth_dev_is_valid_port(port_id)) {
924 RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
925 return -EINVAL;
926 }
927
928 RTE_LOG(INFO, APP, "Configure mac address of %d\n", port_id);
929 print_ethaddr("Address:", (struct rte_ether_addr *)mac_addr);
930
931 ret = rte_eth_dev_default_mac_addr_set(port_id,
932 (struct rte_ether_addr *)mac_addr);
933 if (ret < 0)
934 RTE_LOG(ERR, APP, "Failed to config mac_addr for port %d\n",
935 port_id);
936
937 return ret;
938 }
939
940 static int
kni_alloc(uint16_t port_id)941 kni_alloc(uint16_t port_id)
942 {
943 uint8_t i;
944 struct rte_kni *kni;
945 struct rte_kni_conf conf;
946 struct kni_port_params **params = kni_port_params_array;
947 int ret;
948
949 if (port_id >= RTE_MAX_ETHPORTS || !params[port_id])
950 return -1;
951
952 params[port_id]->nb_kni = params[port_id]->nb_lcore_k ?
953 params[port_id]->nb_lcore_k : 1;
954
955 for (i = 0; i < params[port_id]->nb_kni; i++) {
956 /* Clear conf at first */
957 memset(&conf, 0, sizeof(conf));
958 if (params[port_id]->nb_lcore_k) {
959 snprintf(conf.name, RTE_KNI_NAMESIZE,
960 "vEth%u_%u", port_id, i);
961 conf.core_id = params[port_id]->lcore_k[i];
962 conf.force_bind = 1;
963 } else
964 snprintf(conf.name, RTE_KNI_NAMESIZE,
965 "vEth%u", port_id);
966 conf.group_id = port_id;
967 conf.mbuf_size = MAX_PACKET_SZ;
968 /*
969 * The first KNI device associated to a port
970 * is the main, for multiple kernel thread
971 * environment.
972 */
973 if (i == 0) {
974 struct rte_kni_ops ops;
975 struct rte_eth_dev_info dev_info;
976
977 ret = rte_eth_dev_info_get(port_id, &dev_info);
978 if (ret != 0)
979 rte_exit(EXIT_FAILURE,
980 "Error during getting device (port %u) info: %s\n",
981 port_id, strerror(-ret));
982
983 /* Get the interface default mac address */
984 ret = rte_eth_macaddr_get(port_id,
985 (struct rte_ether_addr *)&conf.mac_addr);
986 if (ret != 0)
987 rte_exit(EXIT_FAILURE,
988 "Failed to get MAC address (port %u): %s\n",
989 port_id, rte_strerror(-ret));
990
991 rte_eth_dev_get_mtu(port_id, &conf.mtu);
992
993 conf.min_mtu = dev_info.min_mtu;
994 conf.max_mtu = dev_info.max_mtu;
995
996 memset(&ops, 0, sizeof(ops));
997 ops.port_id = port_id;
998 ops.change_mtu = kni_change_mtu;
999 ops.config_network_if = kni_config_network_interface;
1000 ops.config_mac_address = kni_config_mac_address;
1001
1002 kni = rte_kni_alloc(pktmbuf_pool, &conf, &ops);
1003 } else
1004 kni = rte_kni_alloc(pktmbuf_pool, &conf, NULL);
1005
1006 if (!kni)
1007 rte_exit(EXIT_FAILURE, "Fail to create kni for "
1008 "port: %d\n", port_id);
1009 params[port_id]->kni[i] = kni;
1010 }
1011
1012 return 0;
1013 }
1014
1015 static int
kni_free_kni(uint16_t port_id)1016 kni_free_kni(uint16_t port_id)
1017 {
1018 uint8_t i;
1019 int ret;
1020 struct kni_port_params **p = kni_port_params_array;
1021
1022 if (port_id >= RTE_MAX_ETHPORTS || !p[port_id])
1023 return -1;
1024
1025 for (i = 0; i < p[port_id]->nb_kni; i++) {
1026 if (rte_kni_release(p[port_id]->kni[i]))
1027 printf("Fail to release kni\n");
1028 p[port_id]->kni[i] = NULL;
1029 }
1030 ret = rte_eth_dev_stop(port_id);
1031 if (ret != 0)
1032 RTE_LOG(ERR, APP, "Failed to stop port %d: %s\n",
1033 port_id, rte_strerror(-ret));
1034
1035 return 0;
1036 }
1037
1038 /* Initialise ports/queues etc. and start main loop on each core */
1039 int
main(int argc,char ** argv)1040 main(int argc, char** argv)
1041 {
1042 int ret;
1043 uint16_t nb_sys_ports, port;
1044 unsigned i;
1045 void *retval;
1046 pthread_t kni_link_tid;
1047 int pid;
1048
1049 /* Associate signal_hanlder function with USR signals */
1050 signal(SIGUSR1, signal_handler);
1051 signal(SIGUSR2, signal_handler);
1052 signal(SIGRTMIN, signal_handler);
1053 signal(SIGINT, signal_handler);
1054 signal(SIGTERM, signal_handler);
1055
1056 /* Initialise EAL */
1057 ret = rte_eal_init(argc, argv);
1058 if (ret < 0)
1059 rte_exit(EXIT_FAILURE, "Could not initialise EAL (%d)\n", ret);
1060 argc -= ret;
1061 argv += ret;
1062
1063 /* Parse application arguments (after the EAL ones) */
1064 ret = parse_args(argc, argv);
1065 if (ret < 0)
1066 rte_exit(EXIT_FAILURE, "Could not parse input parameters\n");
1067
1068 /* Create the mbuf pool */
1069 pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
1070 MEMPOOL_CACHE_SZ, 0, MBUF_DATA_SZ, rte_socket_id());
1071 if (pktmbuf_pool == NULL) {
1072 rte_exit(EXIT_FAILURE, "Could not initialise mbuf pool\n");
1073 return -1;
1074 }
1075
1076 /* Get number of ports found in scan */
1077 nb_sys_ports = rte_eth_dev_count_avail();
1078 if (nb_sys_ports == 0)
1079 rte_exit(EXIT_FAILURE, "No supported Ethernet device found\n");
1080
1081 /* Check if the configured port ID is valid */
1082 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
1083 if (kni_port_params_array[i] && !rte_eth_dev_is_valid_port(i))
1084 rte_exit(EXIT_FAILURE, "Configured invalid "
1085 "port ID %u\n", i);
1086
1087 /* Initialize KNI subsystem */
1088 init_kni();
1089
1090 /* Initialise each port */
1091 RTE_ETH_FOREACH_DEV(port) {
1092 /* Skip ports that are not enabled */
1093 if (!(ports_mask & (1 << port)))
1094 continue;
1095 init_port(port);
1096
1097 if (port >= RTE_MAX_ETHPORTS)
1098 rte_exit(EXIT_FAILURE, "Can not use more than "
1099 "%d ports for kni\n", RTE_MAX_ETHPORTS);
1100
1101 kni_alloc(port);
1102 }
1103 check_all_ports_link_status(ports_mask);
1104
1105 pid = getpid();
1106 RTE_LOG(INFO, APP, "========================\n");
1107 RTE_LOG(INFO, APP, "KNI Running\n");
1108 RTE_LOG(INFO, APP, "kill -SIGUSR1 %d\n", pid);
1109 RTE_LOG(INFO, APP, " Show KNI Statistics.\n");
1110 RTE_LOG(INFO, APP, "kill -SIGUSR2 %d\n", pid);
1111 RTE_LOG(INFO, APP, " Zero KNI Statistics.\n");
1112 RTE_LOG(INFO, APP, "========================\n");
1113 fflush(stdout);
1114
1115 ret = rte_ctrl_thread_create(&kni_link_tid,
1116 "KNI link status check", NULL,
1117 monitor_all_ports_link_status, NULL);
1118 if (ret < 0)
1119 rte_exit(EXIT_FAILURE,
1120 "Could not create link status thread!\n");
1121
1122 /* Launch per-lcore function on every lcore */
1123 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MAIN);
1124 RTE_LCORE_FOREACH_WORKER(i) {
1125 if (rte_eal_wait_lcore(i) < 0)
1126 return -1;
1127 }
1128 monitor_links = 0;
1129 pthread_join(kni_link_tid, &retval);
1130
1131 /* Release resources */
1132 RTE_ETH_FOREACH_DEV(port) {
1133 if (!(ports_mask & (1 << port)))
1134 continue;
1135 kni_free_kni(port);
1136 }
1137 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
1138 if (kni_port_params_array[i]) {
1139 rte_free(kni_port_params_array[i]);
1140 kni_port_params_array[i] = NULL;
1141 }
1142
1143 return 0;
1144 }
1145