1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
3 */
4
5 #include <arpa/inet.h>
6 #include <getopt.h>
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_ring.h>
11 #include <signal.h>
12 #include <stdint.h>
13 #include <sys/eventfd.h>
14 #include <sys/param.h>
15 #include <unistd.h>
16
17 #include <rte_cycles.h>
18 #include <rte_ethdev.h>
19 #include <rte_log.h>
20 #include <rte_string_fns.h>
21 #include <rte_malloc.h>
22 #include <rte_net.h>
23 #include <rte_vhost.h>
24 #include <rte_ip.h>
25 #include <rte_tcp.h>
26 #include <rte_pause.h>
27 #include <rte_dmadev.h>
28 #include <rte_vhost_async.h>
29
30 #include "main.h"
31
32 #ifndef MAX_QUEUES
33 #define MAX_QUEUES 128
34 #endif
35
36 #define NUM_MBUFS_DEFAULT 0x24000
37
38 /* the maximum number of external ports supported */
39 #define MAX_SUP_PORTS 1
40
41 #define MBUF_CACHE_SIZE 128
42 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
43
44 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
45
46 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
47 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
48
49 #define JUMBO_FRAME_MAX_SIZE 0x2600
50 #define MAX_MTU (JUMBO_FRAME_MAX_SIZE - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN))
51
52 /* State of virtio device. */
53 #define DEVICE_MAC_LEARNING 0
54 #define DEVICE_RX 1
55 #define DEVICE_SAFE_REMOVE 2
56
57 /* Configurable number of RX/TX ring descriptors */
58 #define RTE_TEST_RX_DESC_DEFAULT 1024
59 #define RTE_TEST_TX_DESC_DEFAULT 512
60
61 #define INVALID_PORT_ID 0xFF
62 #define INVALID_DMA_ID -1
63
64 #define DMA_RING_SIZE 4096
65
66 /* number of mbufs in all pools - if specified on command-line. */
67 static int total_num_mbufs = NUM_MBUFS_DEFAULT;
68
69 struct dma_for_vhost dma_bind[RTE_MAX_VHOST_DEVICE];
70 int16_t dmas_id[RTE_DMADEV_DEFAULT_MAX];
71 static int dma_count;
72
73 /* mask of enabled ports */
74 static uint32_t enabled_port_mask = 0;
75
76 /* Promiscuous mode */
77 static uint32_t promiscuous;
78
79 /* number of devices/queues to support*/
80 static uint32_t num_queues = 0;
81 static uint32_t num_devices;
82
83 static struct rte_mempool *mbuf_pool;
84 static int mergeable;
85
86 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
87 typedef enum {
88 VM2VM_DISABLED = 0,
89 VM2VM_SOFTWARE = 1,
90 VM2VM_HARDWARE = 2,
91 VM2VM_LAST
92 } vm2vm_type;
93 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
94
95 /* Enable stats. */
96 static uint32_t enable_stats = 0;
97 /* Enable retries on RX. */
98 static uint32_t enable_retry = 1;
99
100 /* Disable TX checksum offload */
101 static uint32_t enable_tx_csum;
102
103 /* Disable TSO offload */
104 static uint32_t enable_tso;
105
106 static int client_mode;
107
108 static int builtin_net_driver;
109
110 /* Specify timeout (in useconds) between retries on RX. */
111 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
112 /* Specify the number of retries on RX. */
113 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
114
115 /* Socket file paths. Can be set by user */
116 static char *socket_files;
117 static int nb_sockets;
118
119 /* empty VMDq configuration structure. Filled in programmatically */
120 static struct rte_eth_conf vmdq_conf_default = {
121 .rxmode = {
122 .mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY,
123 .split_hdr_size = 0,
124 /*
125 * VLAN strip is necessary for 1G NIC such as I350,
126 * this fixes bug of ipv4 forwarding in guest can't
127 * forward packets from one virtio dev to another virtio dev.
128 */
129 .offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
130 },
131
132 .txmode = {
133 .mq_mode = RTE_ETH_MQ_TX_NONE,
134 .offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
135 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
136 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
137 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
138 RTE_ETH_TX_OFFLOAD_TCP_TSO),
139 },
140 .rx_adv_conf = {
141 /*
142 * should be overridden separately in code with
143 * appropriate values
144 */
145 .vmdq_rx_conf = {
146 .nb_queue_pools = RTE_ETH_8_POOLS,
147 .enable_default_pool = 0,
148 .default_pool = 0,
149 .nb_pool_maps = 0,
150 .pool_map = {{0, 0},},
151 },
152 },
153 };
154
155
156 static unsigned lcore_ids[RTE_MAX_LCORE];
157 static uint16_t ports[RTE_MAX_ETHPORTS];
158 static unsigned num_ports = 0; /**< The number of ports specified in command line */
159 static uint16_t num_pf_queues, num_vmdq_queues;
160 static uint16_t vmdq_pool_base, vmdq_queue_base;
161 static uint16_t queues_per_pool;
162
163 const uint16_t vlan_tags[] = {
164 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
165 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
166 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
167 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
168 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
169 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
170 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
171 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
172 };
173
174 /* ethernet addresses of ports */
175 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
176
177 static struct vhost_dev_tailq_list vhost_dev_list =
178 TAILQ_HEAD_INITIALIZER(vhost_dev_list);
179
180 static struct lcore_info lcore_info[RTE_MAX_LCORE];
181
182 /* Used for queueing bursts of TX packets. */
183 struct mbuf_table {
184 unsigned len;
185 unsigned txq_id;
186 struct rte_mbuf *m_table[MAX_PKT_BURST];
187 };
188
189 struct vhost_bufftable {
190 uint32_t len;
191 uint64_t pre_tsc;
192 struct rte_mbuf *m_table[MAX_PKT_BURST];
193 };
194
195 /* TX queue for each data core. */
196 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
197
198 /*
199 * Vhost TX buffer for each data core.
200 * Every data core maintains a TX buffer for every vhost device,
201 * which is used for batch pkts enqueue for higher performance.
202 */
203 struct vhost_bufftable *vhost_txbuff[RTE_MAX_LCORE * RTE_MAX_VHOST_DEVICE];
204
205 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
206 / US_PER_S * BURST_TX_DRAIN_US)
207
208 static inline bool
is_dma_configured(int16_t dev_id)209 is_dma_configured(int16_t dev_id)
210 {
211 int i;
212
213 for (i = 0; i < dma_count; i++)
214 if (dmas_id[i] == dev_id)
215 return true;
216 return false;
217 }
218
219 static inline int
open_dma(const char * value)220 open_dma(const char *value)
221 {
222 struct dma_for_vhost *dma_info = dma_bind;
223 char *input = strndup(value, strlen(value) + 1);
224 char *addrs = input;
225 char *ptrs[2];
226 char *start, *end, *substr;
227 int64_t vid;
228
229 struct rte_dma_info info;
230 struct rte_dma_conf dev_config = { .nb_vchans = 1 };
231 struct rte_dma_vchan_conf qconf = {
232 .direction = RTE_DMA_DIR_MEM_TO_MEM,
233 .nb_desc = DMA_RING_SIZE
234 };
235
236 int dev_id;
237 int ret = 0;
238 uint16_t i = 0;
239 char *dma_arg[RTE_MAX_VHOST_DEVICE];
240 int args_nr;
241
242 while (isblank(*addrs))
243 addrs++;
244 if (*addrs == '\0') {
245 ret = -1;
246 goto out;
247 }
248
249 /* process DMA devices within bracket. */
250 addrs++;
251 substr = strtok(addrs, ";]");
252 if (!substr) {
253 ret = -1;
254 goto out;
255 }
256
257 args_nr = rte_strsplit(substr, strlen(substr), dma_arg, RTE_MAX_VHOST_DEVICE, ',');
258 if (args_nr <= 0) {
259 ret = -1;
260 goto out;
261 }
262
263 while (i < args_nr) {
264 char *arg_temp = dma_arg[i];
265 uint8_t sub_nr;
266
267 sub_nr = rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@');
268 if (sub_nr != 2) {
269 ret = -1;
270 goto out;
271 }
272
273 start = strstr(ptrs[0], "txd");
274 if (start == NULL) {
275 ret = -1;
276 goto out;
277 }
278
279 start += 3;
280 vid = strtol(start, &end, 0);
281 if (end == start) {
282 ret = -1;
283 goto out;
284 }
285
286 dev_id = rte_dma_get_dev_id_by_name(ptrs[1]);
287 if (dev_id < 0) {
288 RTE_LOG(ERR, VHOST_CONFIG, "Fail to find DMA %s.\n", ptrs[1]);
289 ret = -1;
290 goto out;
291 }
292
293 /* DMA device is already configured, so skip */
294 if (is_dma_configured(dev_id))
295 goto done;
296
297 if (rte_dma_info_get(dev_id, &info) != 0) {
298 RTE_LOG(ERR, VHOST_CONFIG, "Error with rte_dma_info_get()\n");
299 ret = -1;
300 goto out;
301 }
302
303 if (info.max_vchans < 1) {
304 RTE_LOG(ERR, VHOST_CONFIG, "No channels available on device %d\n", dev_id);
305 ret = -1;
306 goto out;
307 }
308
309 if (rte_dma_configure(dev_id, &dev_config) != 0) {
310 RTE_LOG(ERR, VHOST_CONFIG, "Fail to configure DMA %d.\n", dev_id);
311 ret = -1;
312 goto out;
313 }
314
315 /* Check the max desc supported by DMA device */
316 rte_dma_info_get(dev_id, &info);
317 if (info.nb_vchans != 1) {
318 RTE_LOG(ERR, VHOST_CONFIG, "No configured queues reported by DMA %d.\n",
319 dev_id);
320 ret = -1;
321 goto out;
322 }
323
324 qconf.nb_desc = RTE_MIN(DMA_RING_SIZE, info.max_desc);
325
326 if (rte_dma_vchan_setup(dev_id, 0, &qconf) != 0) {
327 RTE_LOG(ERR, VHOST_CONFIG, "Fail to set up DMA %d.\n", dev_id);
328 ret = -1;
329 goto out;
330 }
331
332 if (rte_dma_start(dev_id) != 0) {
333 RTE_LOG(ERR, VHOST_CONFIG, "Fail to start DMA %u.\n", dev_id);
334 ret = -1;
335 goto out;
336 }
337
338 dmas_id[dma_count++] = dev_id;
339
340 done:
341 (dma_info + vid)->dmas[VIRTIO_RXQ].dev_id = dev_id;
342 i++;
343 }
344 out:
345 free(input);
346 return ret;
347 }
348
349 /*
350 * Builds up the correct configuration for VMDQ VLAN pool map
351 * according to the pool & queue limits.
352 */
353 static inline int
get_eth_conf(struct rte_eth_conf * eth_conf,uint32_t num_devices)354 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
355 {
356 struct rte_eth_vmdq_rx_conf conf;
357 struct rte_eth_vmdq_rx_conf *def_conf =
358 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
359 unsigned i;
360
361 memset(&conf, 0, sizeof(conf));
362 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
363 conf.nb_pool_maps = num_devices;
364 conf.enable_loop_back = def_conf->enable_loop_back;
365 conf.rx_mode = def_conf->rx_mode;
366
367 for (i = 0; i < conf.nb_pool_maps; i++) {
368 conf.pool_map[i].vlan_id = vlan_tags[ i ];
369 conf.pool_map[i].pools = (1UL << i);
370 }
371
372 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
373 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
374 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
375 return 0;
376 }
377
378 /*
379 * Initialises a given port using global settings and with the rx buffers
380 * coming from the mbuf_pool passed as parameter
381 */
382 static inline int
port_init(uint16_t port)383 port_init(uint16_t port)
384 {
385 struct rte_eth_dev_info dev_info;
386 struct rte_eth_conf port_conf;
387 struct rte_eth_rxconf *rxconf;
388 struct rte_eth_txconf *txconf;
389 int16_t rx_rings, tx_rings;
390 uint16_t rx_ring_size, tx_ring_size;
391 int retval;
392 uint16_t q;
393
394 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
395 retval = rte_eth_dev_info_get(port, &dev_info);
396 if (retval != 0) {
397 RTE_LOG(ERR, VHOST_PORT,
398 "Error during getting device (port %u) info: %s\n",
399 port, strerror(-retval));
400
401 return retval;
402 }
403
404 rxconf = &dev_info.default_rxconf;
405 txconf = &dev_info.default_txconf;
406 rxconf->rx_drop_en = 1;
407
408 /*configure the number of supported virtio devices based on VMDQ limits */
409 num_devices = dev_info.max_vmdq_pools;
410
411 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
412 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
413
414 tx_rings = (uint16_t)rte_lcore_count();
415
416 if (mergeable) {
417 if (dev_info.max_mtu != UINT16_MAX && dev_info.max_rx_pktlen > dev_info.max_mtu)
418 vmdq_conf_default.rxmode.mtu = dev_info.max_mtu;
419 else
420 vmdq_conf_default.rxmode.mtu = MAX_MTU;
421 }
422
423 /* Get port configuration. */
424 retval = get_eth_conf(&port_conf, num_devices);
425 if (retval < 0)
426 return retval;
427 /* NIC queues are divided into pf queues and vmdq queues. */
428 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
429 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
430 num_vmdq_queues = num_devices * queues_per_pool;
431 num_queues = num_pf_queues + num_vmdq_queues;
432 vmdq_queue_base = dev_info.vmdq_queue_base;
433 vmdq_pool_base = dev_info.vmdq_pool_base;
434 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
435 num_pf_queues, num_devices, queues_per_pool);
436
437 if (!rte_eth_dev_is_valid_port(port))
438 return -1;
439
440 rx_rings = (uint16_t)dev_info.max_rx_queues;
441 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
442 port_conf.txmode.offloads |=
443 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
444 /* Configure ethernet device. */
445 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
446 if (retval != 0) {
447 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
448 port, strerror(-retval));
449 return retval;
450 }
451
452 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
453 &tx_ring_size);
454 if (retval != 0) {
455 RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
456 "for port %u: %s.\n", port, strerror(-retval));
457 return retval;
458 }
459 if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
460 RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
461 "for Rx queues on port %u.\n", port);
462 return -1;
463 }
464
465 /* Setup the queues. */
466 rxconf->offloads = port_conf.rxmode.offloads;
467 for (q = 0; q < rx_rings; q ++) {
468 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
469 rte_eth_dev_socket_id(port),
470 rxconf,
471 mbuf_pool);
472 if (retval < 0) {
473 RTE_LOG(ERR, VHOST_PORT,
474 "Failed to setup rx queue %u of port %u: %s.\n",
475 q, port, strerror(-retval));
476 return retval;
477 }
478 }
479 txconf->offloads = port_conf.txmode.offloads;
480 for (q = 0; q < tx_rings; q ++) {
481 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
482 rte_eth_dev_socket_id(port),
483 txconf);
484 if (retval < 0) {
485 RTE_LOG(ERR, VHOST_PORT,
486 "Failed to setup tx queue %u of port %u: %s.\n",
487 q, port, strerror(-retval));
488 return retval;
489 }
490 }
491
492 /* Start the device. */
493 retval = rte_eth_dev_start(port);
494 if (retval < 0) {
495 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
496 port, strerror(-retval));
497 return retval;
498 }
499
500 if (promiscuous) {
501 retval = rte_eth_promiscuous_enable(port);
502 if (retval != 0) {
503 RTE_LOG(ERR, VHOST_PORT,
504 "Failed to enable promiscuous mode on port %u: %s\n",
505 port, rte_strerror(-retval));
506 return retval;
507 }
508 }
509
510 retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
511 if (retval < 0) {
512 RTE_LOG(ERR, VHOST_PORT,
513 "Failed to get MAC address on port %u: %s\n",
514 port, rte_strerror(-retval));
515 return retval;
516 }
517
518 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
519 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
520 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
521 port, RTE_ETHER_ADDR_BYTES(&vmdq_ports_eth_addr[port]));
522
523 return 0;
524 }
525
526 /*
527 * Set socket file path.
528 */
529 static int
us_vhost_parse_socket_path(const char * q_arg)530 us_vhost_parse_socket_path(const char *q_arg)
531 {
532 char *old;
533
534 /* parse number string */
535 if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
536 return -1;
537
538 old = socket_files;
539 socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
540 if (socket_files == NULL) {
541 free(old);
542 return -1;
543 }
544
545 strlcpy(socket_files + nb_sockets * PATH_MAX, q_arg, PATH_MAX);
546 nb_sockets++;
547
548 return 0;
549 }
550
551 /*
552 * Parse the portmask provided at run time.
553 */
554 static int
parse_portmask(const char * portmask)555 parse_portmask(const char *portmask)
556 {
557 char *end = NULL;
558 unsigned long pm;
559
560 errno = 0;
561
562 /* parse hexadecimal string */
563 pm = strtoul(portmask, &end, 16);
564 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
565 return 0;
566
567 return pm;
568
569 }
570
571 /*
572 * Parse num options at run time.
573 */
574 static int
parse_num_opt(const char * q_arg,uint32_t max_valid_value)575 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
576 {
577 char *end = NULL;
578 unsigned long num;
579
580 errno = 0;
581
582 /* parse unsigned int string */
583 num = strtoul(q_arg, &end, 10);
584 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
585 return -1;
586
587 if (num > max_valid_value)
588 return -1;
589
590 return num;
591
592 }
593
594 /*
595 * Display usage
596 */
597 static void
us_vhost_usage(const char * prgname)598 us_vhost_usage(const char *prgname)
599 {
600 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
601 " --vm2vm [0|1|2]\n"
602 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
603 " --socket-file <path>\n"
604 " --nb-devices ND\n"
605 " -p PORTMASK: Set mask for ports to be used by application\n"
606 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
607 " --rx-retry [0|1]: disable/enable(default) retries on Rx. Enable retry if destination queue is full\n"
608 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
609 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
610 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
611 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
612 " --socket-file: The path of the socket file.\n"
613 " --tx-csum [0|1] disable/enable TX checksum offload.\n"
614 " --tso [0|1] disable/enable TCP segment offload.\n"
615 " --client register a vhost-user socket as client mode.\n"
616 " --dmas register dma channel for specific vhost device.\n"
617 " --total-num-mbufs [0-N] set the number of mbufs to be allocated in mbuf pools, the default value is 147456.\n",
618 prgname);
619 }
620
621 enum {
622 #define OPT_VM2VM "vm2vm"
623 OPT_VM2VM_NUM = 256,
624 #define OPT_RX_RETRY "rx-retry"
625 OPT_RX_RETRY_NUM,
626 #define OPT_RX_RETRY_DELAY "rx-retry-delay"
627 OPT_RX_RETRY_DELAY_NUM,
628 #define OPT_RX_RETRY_NUMB "rx-retry-num"
629 OPT_RX_RETRY_NUMB_NUM,
630 #define OPT_MERGEABLE "mergeable"
631 OPT_MERGEABLE_NUM,
632 #define OPT_STATS "stats"
633 OPT_STATS_NUM,
634 #define OPT_SOCKET_FILE "socket-file"
635 OPT_SOCKET_FILE_NUM,
636 #define OPT_TX_CSUM "tx-csum"
637 OPT_TX_CSUM_NUM,
638 #define OPT_TSO "tso"
639 OPT_TSO_NUM,
640 #define OPT_CLIENT "client"
641 OPT_CLIENT_NUM,
642 #define OPT_BUILTIN_NET_DRIVER "builtin-net-driver"
643 OPT_BUILTIN_NET_DRIVER_NUM,
644 #define OPT_DMAS "dmas"
645 OPT_DMAS_NUM,
646 #define OPT_NUM_MBUFS "total-num-mbufs"
647 OPT_NUM_MBUFS_NUM,
648 };
649
650 /*
651 * Parse the arguments given in the command line of the application.
652 */
653 static int
us_vhost_parse_args(int argc,char ** argv)654 us_vhost_parse_args(int argc, char **argv)
655 {
656 int opt, ret;
657 int option_index;
658 unsigned i;
659 const char *prgname = argv[0];
660 static struct option long_option[] = {
661 {OPT_VM2VM, required_argument,
662 NULL, OPT_VM2VM_NUM},
663 {OPT_RX_RETRY, required_argument,
664 NULL, OPT_RX_RETRY_NUM},
665 {OPT_RX_RETRY_DELAY, required_argument,
666 NULL, OPT_RX_RETRY_DELAY_NUM},
667 {OPT_RX_RETRY_NUMB, required_argument,
668 NULL, OPT_RX_RETRY_NUMB_NUM},
669 {OPT_MERGEABLE, required_argument,
670 NULL, OPT_MERGEABLE_NUM},
671 {OPT_STATS, required_argument,
672 NULL, OPT_STATS_NUM},
673 {OPT_SOCKET_FILE, required_argument,
674 NULL, OPT_SOCKET_FILE_NUM},
675 {OPT_TX_CSUM, required_argument,
676 NULL, OPT_TX_CSUM_NUM},
677 {OPT_TSO, required_argument,
678 NULL, OPT_TSO_NUM},
679 {OPT_CLIENT, no_argument,
680 NULL, OPT_CLIENT_NUM},
681 {OPT_BUILTIN_NET_DRIVER, no_argument,
682 NULL, OPT_BUILTIN_NET_DRIVER_NUM},
683 {OPT_DMAS, required_argument,
684 NULL, OPT_DMAS_NUM},
685 {OPT_NUM_MBUFS, required_argument,
686 NULL, OPT_NUM_MBUFS_NUM},
687 {NULL, 0, 0, 0},
688 };
689
690 /* Parse command line */
691 while ((opt = getopt_long(argc, argv, "p:P",
692 long_option, &option_index)) != EOF) {
693 switch (opt) {
694 /* Portmask */
695 case 'p':
696 enabled_port_mask = parse_portmask(optarg);
697 if (enabled_port_mask == 0) {
698 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
699 us_vhost_usage(prgname);
700 return -1;
701 }
702 break;
703
704 case 'P':
705 promiscuous = 1;
706 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
707 RTE_ETH_VMDQ_ACCEPT_BROADCAST |
708 RTE_ETH_VMDQ_ACCEPT_MULTICAST;
709 break;
710
711 case OPT_VM2VM_NUM:
712 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
713 if (ret == -1) {
714 RTE_LOG(INFO, VHOST_CONFIG,
715 "Invalid argument for "
716 "vm2vm [0|1|2]\n");
717 us_vhost_usage(prgname);
718 return -1;
719 }
720 vm2vm_mode = (vm2vm_type)ret;
721 break;
722
723 case OPT_RX_RETRY_NUM:
724 ret = parse_num_opt(optarg, 1);
725 if (ret == -1) {
726 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
727 us_vhost_usage(prgname);
728 return -1;
729 }
730 enable_retry = ret;
731 break;
732
733 case OPT_TX_CSUM_NUM:
734 ret = parse_num_opt(optarg, 1);
735 if (ret == -1) {
736 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
737 us_vhost_usage(prgname);
738 return -1;
739 }
740 enable_tx_csum = ret;
741 break;
742
743 case OPT_TSO_NUM:
744 ret = parse_num_opt(optarg, 1);
745 if (ret == -1) {
746 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
747 us_vhost_usage(prgname);
748 return -1;
749 }
750 enable_tso = ret;
751 break;
752
753 case OPT_RX_RETRY_DELAY_NUM:
754 ret = parse_num_opt(optarg, INT32_MAX);
755 if (ret == -1) {
756 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
757 us_vhost_usage(prgname);
758 return -1;
759 }
760 burst_rx_delay_time = ret;
761 break;
762
763 case OPT_RX_RETRY_NUMB_NUM:
764 ret = parse_num_opt(optarg, INT32_MAX);
765 if (ret == -1) {
766 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
767 us_vhost_usage(prgname);
768 return -1;
769 }
770 burst_rx_retry_num = ret;
771 break;
772
773 case OPT_MERGEABLE_NUM:
774 ret = parse_num_opt(optarg, 1);
775 if (ret == -1) {
776 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
777 us_vhost_usage(prgname);
778 return -1;
779 }
780 mergeable = !!ret;
781 break;
782
783 case OPT_STATS_NUM:
784 ret = parse_num_opt(optarg, INT32_MAX);
785 if (ret == -1) {
786 RTE_LOG(INFO, VHOST_CONFIG,
787 "Invalid argument for stats [0..N]\n");
788 us_vhost_usage(prgname);
789 return -1;
790 }
791 enable_stats = ret;
792 break;
793
794 /* Set socket file path. */
795 case OPT_SOCKET_FILE_NUM:
796 if (us_vhost_parse_socket_path(optarg) == -1) {
797 RTE_LOG(INFO, VHOST_CONFIG,
798 "Invalid argument for socket name (Max %d characters)\n",
799 PATH_MAX);
800 us_vhost_usage(prgname);
801 return -1;
802 }
803 break;
804
805 case OPT_DMAS_NUM:
806 if (open_dma(optarg) == -1) {
807 RTE_LOG(INFO, VHOST_CONFIG,
808 "Wrong DMA args\n");
809 us_vhost_usage(prgname);
810 return -1;
811 }
812 break;
813
814 case OPT_NUM_MBUFS_NUM:
815 ret = parse_num_opt(optarg, INT32_MAX);
816 if (ret == -1) {
817 RTE_LOG(INFO, VHOST_CONFIG,
818 "Invalid argument for total-num-mbufs [0..N]\n");
819 us_vhost_usage(prgname);
820 return -1;
821 }
822
823 if (total_num_mbufs < ret)
824 total_num_mbufs = ret;
825 break;
826
827 case OPT_CLIENT_NUM:
828 client_mode = 1;
829 break;
830
831 case OPT_BUILTIN_NET_DRIVER_NUM:
832 builtin_net_driver = 1;
833 break;
834
835 /* Invalid option - print options. */
836 default:
837 us_vhost_usage(prgname);
838 return -1;
839 }
840 }
841
842 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
843 if (enabled_port_mask & (1 << i))
844 ports[num_ports++] = i;
845 }
846
847 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
848 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
849 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
850 return -1;
851 }
852
853 return 0;
854 }
855
856 /*
857 * Update the global var NUM_PORTS and array PORTS according to system ports number
858 * and return valid ports number
859 */
check_ports_num(unsigned nb_ports)860 static unsigned check_ports_num(unsigned nb_ports)
861 {
862 unsigned valid_num_ports = num_ports;
863 unsigned portid;
864
865 if (num_ports > nb_ports) {
866 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
867 num_ports, nb_ports);
868 num_ports = nb_ports;
869 }
870
871 for (portid = 0; portid < num_ports; portid ++) {
872 if (!rte_eth_dev_is_valid_port(ports[portid])) {
873 RTE_LOG(INFO, VHOST_PORT,
874 "\nSpecified port ID(%u) is not valid\n",
875 ports[portid]);
876 ports[portid] = INVALID_PORT_ID;
877 valid_num_ports--;
878 }
879 }
880 return valid_num_ports;
881 }
882
883 static __rte_always_inline struct vhost_dev *
find_vhost_dev(struct rte_ether_addr * mac)884 find_vhost_dev(struct rte_ether_addr *mac)
885 {
886 struct vhost_dev *vdev;
887
888 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
889 if (vdev->ready == DEVICE_RX &&
890 rte_is_same_ether_addr(mac, &vdev->mac_address))
891 return vdev;
892 }
893
894 return NULL;
895 }
896
897 /*
898 * This function learns the MAC address of the device and registers this along with a
899 * vlan tag to a VMDQ.
900 */
901 static int
link_vmdq(struct vhost_dev * vdev,struct rte_mbuf * m)902 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
903 {
904 struct rte_ether_hdr *pkt_hdr;
905 int i, ret;
906
907 /* Learn MAC address of guest device from packet */
908 pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
909
910 if (find_vhost_dev(&pkt_hdr->src_addr)) {
911 RTE_LOG(ERR, VHOST_DATA,
912 "(%d) device is using a registered MAC!\n",
913 vdev->vid);
914 return -1;
915 }
916
917 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
918 vdev->mac_address.addr_bytes[i] =
919 pkt_hdr->src_addr.addr_bytes[i];
920
921 /* vlan_tag currently uses the device_id. */
922 vdev->vlan_tag = vlan_tags[vdev->vid];
923
924 /* Print out VMDQ registration info. */
925 RTE_LOG(INFO, VHOST_DATA,
926 "(%d) mac " RTE_ETHER_ADDR_PRT_FMT " and vlan %d registered\n",
927 vdev->vid, RTE_ETHER_ADDR_BYTES(&vdev->mac_address),
928 vdev->vlan_tag);
929
930 /* Register the MAC address. */
931 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
932 (uint32_t)vdev->vid + vmdq_pool_base);
933 if (ret)
934 RTE_LOG(ERR, VHOST_DATA,
935 "(%d) failed to add device MAC address to VMDQ\n",
936 vdev->vid);
937
938 rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
939
940 /* Set device as ready for RX. */
941 vdev->ready = DEVICE_RX;
942
943 return 0;
944 }
945
946 /*
947 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
948 * queue before disabling RX on the device.
949 */
950 static inline void
unlink_vmdq(struct vhost_dev * vdev)951 unlink_vmdq(struct vhost_dev *vdev)
952 {
953 unsigned i = 0;
954 unsigned rx_count;
955 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
956
957 if (vdev->ready == DEVICE_RX) {
958 /*clear MAC and VLAN settings*/
959 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
960 for (i = 0; i < 6; i++)
961 vdev->mac_address.addr_bytes[i] = 0;
962
963 vdev->vlan_tag = 0;
964
965 /*Clear out the receive buffers*/
966 rx_count = rte_eth_rx_burst(ports[0],
967 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
968
969 while (rx_count) {
970 for (i = 0; i < rx_count; i++)
971 rte_pktmbuf_free(pkts_burst[i]);
972
973 rx_count = rte_eth_rx_burst(ports[0],
974 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
975 }
976
977 vdev->ready = DEVICE_MAC_LEARNING;
978 }
979 }
980
981 static inline void
free_pkts(struct rte_mbuf ** pkts,uint16_t n)982 free_pkts(struct rte_mbuf **pkts, uint16_t n)
983 {
984 while (n--)
985 rte_pktmbuf_free(pkts[n]);
986 }
987
988 static __rte_always_inline void
complete_async_pkts(struct vhost_dev * vdev)989 complete_async_pkts(struct vhost_dev *vdev)
990 {
991 struct rte_mbuf *p_cpl[MAX_PKT_BURST];
992 uint16_t complete_count;
993 int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
994
995 complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
996 VIRTIO_RXQ, p_cpl, MAX_PKT_BURST, dma_id, 0);
997 if (complete_count)
998 free_pkts(p_cpl, complete_count);
999
1000 }
1001
1002 static __rte_always_inline void
sync_virtio_xmit(struct vhost_dev * dst_vdev,struct vhost_dev * src_vdev,struct rte_mbuf * m)1003 sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
1004 struct rte_mbuf *m)
1005 {
1006 uint16_t ret;
1007
1008 if (builtin_net_driver) {
1009 ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
1010 } else {
1011 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
1012 }
1013
1014 if (enable_stats) {
1015 __atomic_add_fetch(&dst_vdev->stats.rx_total_atomic, 1,
1016 __ATOMIC_SEQ_CST);
1017 __atomic_add_fetch(&dst_vdev->stats.rx_atomic, ret,
1018 __ATOMIC_SEQ_CST);
1019 src_vdev->stats.tx_total++;
1020 src_vdev->stats.tx += ret;
1021 }
1022 }
1023
1024 static __rte_always_inline void
drain_vhost(struct vhost_dev * vdev)1025 drain_vhost(struct vhost_dev *vdev)
1026 {
1027 uint16_t ret;
1028 uint32_t buff_idx = rte_lcore_id() * RTE_MAX_VHOST_DEVICE + vdev->vid;
1029 uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
1030 struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
1031
1032 if (builtin_net_driver) {
1033 ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
1034 } else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
1035 uint16_t enqueue_fail = 0;
1036 int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
1037
1038 complete_async_pkts(vdev);
1039 ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit, dma_id, 0);
1040
1041 enqueue_fail = nr_xmit - ret;
1042 if (enqueue_fail)
1043 free_pkts(&m[ret], nr_xmit - ret);
1044 } else {
1045 ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1046 m, nr_xmit);
1047 }
1048
1049 if (enable_stats) {
1050 __atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
1051 __ATOMIC_SEQ_CST);
1052 __atomic_add_fetch(&vdev->stats.rx_atomic, ret,
1053 __ATOMIC_SEQ_CST);
1054 }
1055
1056 if (!dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled)
1057 free_pkts(m, nr_xmit);
1058 }
1059
1060 static __rte_always_inline void
drain_vhost_table(void)1061 drain_vhost_table(void)
1062 {
1063 uint16_t lcore_id = rte_lcore_id();
1064 struct vhost_bufftable *vhost_txq;
1065 struct vhost_dev *vdev;
1066 uint64_t cur_tsc;
1067
1068 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1069 if (unlikely(vdev->remove == 1))
1070 continue;
1071
1072 vhost_txq = vhost_txbuff[lcore_id * RTE_MAX_VHOST_DEVICE + vdev->vid];
1073
1074 cur_tsc = rte_rdtsc();
1075 if (unlikely(cur_tsc - vhost_txq->pre_tsc
1076 > MBUF_TABLE_DRAIN_TSC)) {
1077 RTE_LOG_DP(DEBUG, VHOST_DATA,
1078 "Vhost TX queue drained after timeout with burst size %u\n",
1079 vhost_txq->len);
1080 drain_vhost(vdev);
1081 vhost_txq->len = 0;
1082 vhost_txq->pre_tsc = cur_tsc;
1083 }
1084 }
1085 }
1086
1087 /*
1088 * Check if the packet destination MAC address is for a local device. If so then put
1089 * the packet on that devices RX queue. If not then return.
1090 */
1091 static __rte_always_inline int
virtio_tx_local(struct vhost_dev * vdev,struct rte_mbuf * m)1092 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
1093 {
1094 struct rte_ether_hdr *pkt_hdr;
1095 struct vhost_dev *dst_vdev;
1096 struct vhost_bufftable *vhost_txq;
1097 uint16_t lcore_id = rte_lcore_id();
1098 pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1099
1100 dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
1101 if (!dst_vdev)
1102 return -1;
1103
1104 if (vdev->vid == dst_vdev->vid) {
1105 RTE_LOG_DP(DEBUG, VHOST_DATA,
1106 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
1107 vdev->vid);
1108 return 0;
1109 }
1110
1111 RTE_LOG_DP(DEBUG, VHOST_DATA,
1112 "(%d) TX: MAC address is local\n", dst_vdev->vid);
1113
1114 if (unlikely(dst_vdev->remove)) {
1115 RTE_LOG_DP(DEBUG, VHOST_DATA,
1116 "(%d) device is marked for removal\n", dst_vdev->vid);
1117 return 0;
1118 }
1119
1120 vhost_txq = vhost_txbuff[lcore_id * RTE_MAX_VHOST_DEVICE + dst_vdev->vid];
1121 vhost_txq->m_table[vhost_txq->len++] = m;
1122
1123 if (enable_stats) {
1124 vdev->stats.tx_total++;
1125 vdev->stats.tx++;
1126 }
1127
1128 if (unlikely(vhost_txq->len == MAX_PKT_BURST)) {
1129 drain_vhost(dst_vdev);
1130 vhost_txq->len = 0;
1131 vhost_txq->pre_tsc = rte_rdtsc();
1132 }
1133 return 0;
1134 }
1135
1136 /*
1137 * Check if the destination MAC of a packet is one local VM,
1138 * and get its vlan tag, and offset if it is.
1139 */
1140 static __rte_always_inline int
find_local_dest(struct vhost_dev * vdev,struct rte_mbuf * m,uint32_t * offset,uint16_t * vlan_tag)1141 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
1142 uint32_t *offset, uint16_t *vlan_tag)
1143 {
1144 struct vhost_dev *dst_vdev;
1145 struct rte_ether_hdr *pkt_hdr =
1146 rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1147
1148 dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
1149 if (!dst_vdev)
1150 return 0;
1151
1152 if (vdev->vid == dst_vdev->vid) {
1153 RTE_LOG_DP(DEBUG, VHOST_DATA,
1154 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
1155 vdev->vid);
1156 return -1;
1157 }
1158
1159 /*
1160 * HW vlan strip will reduce the packet length
1161 * by minus length of vlan tag, so need restore
1162 * the packet length by plus it.
1163 */
1164 *offset = RTE_VLAN_HLEN;
1165 *vlan_tag = vlan_tags[vdev->vid];
1166
1167 RTE_LOG_DP(DEBUG, VHOST_DATA,
1168 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
1169 vdev->vid, dst_vdev->vid, *vlan_tag);
1170
1171 return 0;
1172 }
1173
virtio_tx_offload(struct rte_mbuf * m)1174 static void virtio_tx_offload(struct rte_mbuf *m)
1175 {
1176 struct rte_net_hdr_lens hdr_lens;
1177 struct rte_ipv4_hdr *ipv4_hdr;
1178 struct rte_tcp_hdr *tcp_hdr;
1179 uint32_t ptype;
1180 void *l3_hdr;
1181
1182 ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
1183 m->l2_len = hdr_lens.l2_len;
1184 m->l3_len = hdr_lens.l3_len;
1185 m->l4_len = hdr_lens.l4_len;
1186
1187 l3_hdr = rte_pktmbuf_mtod_offset(m, void *, m->l2_len);
1188 tcp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *,
1189 m->l2_len + m->l3_len);
1190
1191 m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
1192 if ((ptype & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) {
1193 m->ol_flags |= RTE_MBUF_F_TX_IPV4;
1194 m->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
1195 ipv4_hdr = l3_hdr;
1196 ipv4_hdr->hdr_checksum = 0;
1197 tcp_hdr->cksum = rte_ipv4_phdr_cksum(l3_hdr, m->ol_flags);
1198 } else { /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
1199 m->ol_flags |= RTE_MBUF_F_TX_IPV6;
1200 tcp_hdr->cksum = rte_ipv6_phdr_cksum(l3_hdr, m->ol_flags);
1201 }
1202 }
1203
1204 static __rte_always_inline void
do_drain_mbuf_table(struct mbuf_table * tx_q)1205 do_drain_mbuf_table(struct mbuf_table *tx_q)
1206 {
1207 uint16_t count;
1208
1209 count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
1210 tx_q->m_table, tx_q->len);
1211 if (unlikely(count < tx_q->len))
1212 free_pkts(&tx_q->m_table[count], tx_q->len - count);
1213
1214 tx_q->len = 0;
1215 }
1216
1217 /*
1218 * This function routes the TX packet to the correct interface. This
1219 * may be a local device or the physical port.
1220 */
1221 static __rte_always_inline void
virtio_tx_route(struct vhost_dev * vdev,struct rte_mbuf * m,uint16_t vlan_tag)1222 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
1223 {
1224 struct mbuf_table *tx_q;
1225 unsigned offset = 0;
1226 const uint16_t lcore_id = rte_lcore_id();
1227 struct rte_ether_hdr *nh;
1228
1229
1230 nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1231 if (unlikely(rte_is_broadcast_ether_addr(&nh->dst_addr))) {
1232 struct vhost_dev *vdev2;
1233
1234 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
1235 if (vdev2 != vdev)
1236 sync_virtio_xmit(vdev2, vdev, m);
1237 }
1238 goto queue2nic;
1239 }
1240
1241 /*check if destination is local VM*/
1242 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0))
1243 return;
1244
1245 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1246 if (unlikely(find_local_dest(vdev, m, &offset,
1247 &vlan_tag) != 0)) {
1248 rte_pktmbuf_free(m);
1249 return;
1250 }
1251 }
1252
1253 RTE_LOG_DP(DEBUG, VHOST_DATA,
1254 "(%d) TX: MAC address is external\n", vdev->vid);
1255
1256 queue2nic:
1257
1258 /*Add packet to the port tx queue*/
1259 tx_q = &lcore_tx_queue[lcore_id];
1260
1261 nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1262 if (unlikely(nh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))) {
1263 /* Guest has inserted the vlan tag. */
1264 struct rte_vlan_hdr *vh = (struct rte_vlan_hdr *) (nh + 1);
1265 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
1266 if ((vm2vm_mode == VM2VM_HARDWARE) &&
1267 (vh->vlan_tci != vlan_tag_be))
1268 vh->vlan_tci = vlan_tag_be;
1269 } else {
1270 m->ol_flags |= RTE_MBUF_F_TX_VLAN;
1271
1272 /*
1273 * Find the right seg to adjust the data len when offset is
1274 * bigger than tail room size.
1275 */
1276 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1277 if (likely(offset <= rte_pktmbuf_tailroom(m)))
1278 m->data_len += offset;
1279 else {
1280 struct rte_mbuf *seg = m;
1281
1282 while ((seg->next != NULL) &&
1283 (offset > rte_pktmbuf_tailroom(seg)))
1284 seg = seg->next;
1285
1286 seg->data_len += offset;
1287 }
1288 m->pkt_len += offset;
1289 }
1290
1291 m->vlan_tci = vlan_tag;
1292 }
1293
1294 if (m->ol_flags & RTE_MBUF_F_RX_LRO)
1295 virtio_tx_offload(m);
1296
1297 tx_q->m_table[tx_q->len++] = m;
1298 if (enable_stats) {
1299 vdev->stats.tx_total++;
1300 vdev->stats.tx++;
1301 }
1302
1303 if (unlikely(tx_q->len == MAX_PKT_BURST))
1304 do_drain_mbuf_table(tx_q);
1305 }
1306
1307
1308 static __rte_always_inline void
drain_mbuf_table(struct mbuf_table * tx_q)1309 drain_mbuf_table(struct mbuf_table *tx_q)
1310 {
1311 static uint64_t prev_tsc;
1312 uint64_t cur_tsc;
1313
1314 if (tx_q->len == 0)
1315 return;
1316
1317 cur_tsc = rte_rdtsc();
1318 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1319 prev_tsc = cur_tsc;
1320
1321 RTE_LOG_DP(DEBUG, VHOST_DATA,
1322 "TX queue drained after timeout with burst size %u\n",
1323 tx_q->len);
1324 do_drain_mbuf_table(tx_q);
1325 }
1326 }
1327
1328 static __rte_always_inline void
drain_eth_rx(struct vhost_dev * vdev)1329 drain_eth_rx(struct vhost_dev *vdev)
1330 {
1331 uint16_t rx_count, enqueue_count;
1332 struct rte_mbuf *pkts[MAX_PKT_BURST];
1333
1334 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1335 pkts, MAX_PKT_BURST);
1336
1337 if (!rx_count)
1338 return;
1339
1340 /*
1341 * When "enable_retry" is set, here we wait and retry when there
1342 * is no enough free slots in the queue to hold @rx_count packets,
1343 * to diminish packet loss.
1344 */
1345 if (enable_retry &&
1346 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1347 VIRTIO_RXQ))) {
1348 uint32_t retry;
1349
1350 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1351 rte_delay_us(burst_rx_delay_time);
1352 if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1353 VIRTIO_RXQ))
1354 break;
1355 }
1356 }
1357
1358 if (builtin_net_driver) {
1359 enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
1360 pkts, rx_count);
1361 } else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
1362 uint16_t enqueue_fail = 0;
1363 int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
1364
1365 complete_async_pkts(vdev);
1366 enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
1367 VIRTIO_RXQ, pkts, rx_count, dma_id, 0);
1368
1369 enqueue_fail = rx_count - enqueue_count;
1370 if (enqueue_fail)
1371 free_pkts(&pkts[enqueue_count], enqueue_fail);
1372
1373 } else {
1374 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1375 pkts, rx_count);
1376 }
1377
1378 if (enable_stats) {
1379 __atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
1380 __ATOMIC_SEQ_CST);
1381 __atomic_add_fetch(&vdev->stats.rx_atomic, enqueue_count,
1382 __ATOMIC_SEQ_CST);
1383 }
1384
1385 if (!dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled)
1386 free_pkts(pkts, rx_count);
1387 }
1388
1389 static __rte_always_inline void
drain_virtio_tx(struct vhost_dev * vdev)1390 drain_virtio_tx(struct vhost_dev *vdev)
1391 {
1392 struct rte_mbuf *pkts[MAX_PKT_BURST];
1393 uint16_t count;
1394 uint16_t i;
1395
1396 if (builtin_net_driver) {
1397 count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
1398 pkts, MAX_PKT_BURST);
1399 } else {
1400 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
1401 mbuf_pool, pkts, MAX_PKT_BURST);
1402 }
1403
1404 /* setup VMDq for the first packet */
1405 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1406 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1407 free_pkts(pkts, count);
1408 }
1409
1410 for (i = 0; i < count; ++i)
1411 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1412 }
1413
1414 /*
1415 * Main function of vhost-switch. It basically does:
1416 *
1417 * for each vhost device {
1418 * - drain_eth_rx()
1419 *
1420 * Which drains the host eth Rx queue linked to the vhost device,
1421 * and deliver all of them to guest virito Rx ring associated with
1422 * this vhost device.
1423 *
1424 * - drain_virtio_tx()
1425 *
1426 * Which drains the guest virtio Tx queue and deliver all of them
1427 * to the target, which could be another vhost device, or the
1428 * physical eth dev. The route is done in function "virtio_tx_route".
1429 * }
1430 */
1431 static int
switch_worker(void * arg __rte_unused)1432 switch_worker(void *arg __rte_unused)
1433 {
1434 unsigned i;
1435 unsigned lcore_id = rte_lcore_id();
1436 struct vhost_dev *vdev;
1437 struct mbuf_table *tx_q;
1438
1439 RTE_LOG(INFO, VHOST_DATA, "Processing on Core %u started\n", lcore_id);
1440
1441 tx_q = &lcore_tx_queue[lcore_id];
1442 for (i = 0; i < rte_lcore_count(); i++) {
1443 if (lcore_ids[i] == lcore_id) {
1444 tx_q->txq_id = i;
1445 break;
1446 }
1447 }
1448
1449 while(1) {
1450 drain_mbuf_table(tx_q);
1451 drain_vhost_table();
1452 /*
1453 * Inform the configuration core that we have exited the
1454 * linked list and that no devices are in use if requested.
1455 */
1456 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1457 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1458
1459 /*
1460 * Process vhost devices
1461 */
1462 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1463 lcore_vdev_entry) {
1464 if (unlikely(vdev->remove)) {
1465 unlink_vmdq(vdev);
1466 vdev->ready = DEVICE_SAFE_REMOVE;
1467 continue;
1468 }
1469
1470 if (likely(vdev->ready == DEVICE_RX))
1471 drain_eth_rx(vdev);
1472
1473 if (likely(!vdev->remove))
1474 drain_virtio_tx(vdev);
1475 }
1476 }
1477
1478 return 0;
1479 }
1480
1481 /*
1482 * Remove a device from the specific data core linked list and from the
1483 * main linked list. Synchronization occurs through the use of the
1484 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1485 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1486 */
1487 static void
destroy_device(int vid)1488 destroy_device(int vid)
1489 {
1490 struct vhost_dev *vdev = NULL;
1491 int lcore;
1492 uint16_t i;
1493
1494 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1495 if (vdev->vid == vid)
1496 break;
1497 }
1498 if (!vdev)
1499 return;
1500 /*set the remove flag. */
1501 vdev->remove = 1;
1502 while(vdev->ready != DEVICE_SAFE_REMOVE) {
1503 rte_pause();
1504 }
1505
1506 for (i = 0; i < RTE_MAX_LCORE; i++)
1507 rte_free(vhost_txbuff[i * RTE_MAX_VHOST_DEVICE + vid]);
1508
1509 if (builtin_net_driver)
1510 vs_vhost_net_remove(vdev);
1511
1512 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1513 lcore_vdev_entry);
1514 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1515
1516
1517 /* Set the dev_removal_flag on each lcore. */
1518 RTE_LCORE_FOREACH_WORKER(lcore)
1519 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1520
1521 /*
1522 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1523 * we can be sure that they can no longer access the device removed
1524 * from the linked lists and that the devices are no longer in use.
1525 */
1526 RTE_LCORE_FOREACH_WORKER(lcore) {
1527 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1528 rte_pause();
1529 }
1530
1531 lcore_info[vdev->coreid].device_num--;
1532
1533 RTE_LOG(INFO, VHOST_DATA,
1534 "(%d) device has been removed from data core\n",
1535 vdev->vid);
1536
1537 if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
1538 uint16_t n_pkt = 0;
1539 int pkts_inflight;
1540 int16_t dma_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id;
1541 pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid, VIRTIO_RXQ);
1542 struct rte_mbuf *m_cpl[pkts_inflight];
1543
1544 while (pkts_inflight) {
1545 n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ,
1546 m_cpl, pkts_inflight, dma_id, 0);
1547 free_pkts(m_cpl, n_pkt);
1548 pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid,
1549 VIRTIO_RXQ);
1550 }
1551
1552 rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
1553 dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
1554 }
1555
1556 rte_free(vdev);
1557 }
1558
1559 /*
1560 * A new device is added to a data core. First the device is added to the main linked list
1561 * and then allocated to a specific data core.
1562 */
1563 static int
new_device(int vid)1564 new_device(int vid)
1565 {
1566 int lcore, core_add = 0;
1567 uint16_t i;
1568 uint32_t device_num_min = num_devices;
1569 struct vhost_dev *vdev;
1570 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1571 if (vdev == NULL) {
1572 RTE_LOG(INFO, VHOST_DATA,
1573 "(%d) couldn't allocate memory for vhost dev\n",
1574 vid);
1575 return -1;
1576 }
1577 vdev->vid = vid;
1578
1579 for (i = 0; i < RTE_MAX_LCORE; i++) {
1580 vhost_txbuff[i * RTE_MAX_VHOST_DEVICE + vid]
1581 = rte_zmalloc("vhost bufftable",
1582 sizeof(struct vhost_bufftable),
1583 RTE_CACHE_LINE_SIZE);
1584
1585 if (vhost_txbuff[i * RTE_MAX_VHOST_DEVICE + vid] == NULL) {
1586 RTE_LOG(INFO, VHOST_DATA,
1587 "(%d) couldn't allocate memory for vhost TX\n", vid);
1588 return -1;
1589 }
1590 }
1591
1592 if (builtin_net_driver)
1593 vs_vhost_net_setup(vdev);
1594
1595 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1596 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1597
1598 /*reset ready flag*/
1599 vdev->ready = DEVICE_MAC_LEARNING;
1600 vdev->remove = 0;
1601
1602 /* Find a suitable lcore to add the device. */
1603 RTE_LCORE_FOREACH_WORKER(lcore) {
1604 if (lcore_info[lcore].device_num < device_num_min) {
1605 device_num_min = lcore_info[lcore].device_num;
1606 core_add = lcore;
1607 }
1608 }
1609 vdev->coreid = core_add;
1610
1611 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1612 lcore_vdev_entry);
1613 lcore_info[vdev->coreid].device_num++;
1614
1615 /* Disable notifications. */
1616 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1617 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1618
1619 RTE_LOG(INFO, VHOST_DATA,
1620 "(%d) device has been added to data core %d\n",
1621 vid, vdev->coreid);
1622
1623 if (dma_bind[vid].dmas[VIRTIO_RXQ].dev_id != INVALID_DMA_ID) {
1624 int ret;
1625
1626 ret = rte_vhost_async_channel_register(vid, VIRTIO_RXQ);
1627 if (ret == 0)
1628 dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = true;
1629 return ret;
1630 }
1631
1632 return 0;
1633 }
1634
1635 static int
vring_state_changed(int vid,uint16_t queue_id,int enable)1636 vring_state_changed(int vid, uint16_t queue_id, int enable)
1637 {
1638 struct vhost_dev *vdev = NULL;
1639
1640 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1641 if (vdev->vid == vid)
1642 break;
1643 }
1644 if (!vdev)
1645 return -1;
1646
1647 if (queue_id != VIRTIO_RXQ)
1648 return 0;
1649
1650 if (dma_bind[vid].dmas[queue_id].async_enabled) {
1651 if (!enable) {
1652 uint16_t n_pkt = 0;
1653 int pkts_inflight;
1654 pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid, queue_id);
1655 int16_t dma_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id;
1656 struct rte_mbuf *m_cpl[pkts_inflight];
1657
1658 while (pkts_inflight) {
1659 n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id,
1660 m_cpl, pkts_inflight, dma_id, 0);
1661 free_pkts(m_cpl, n_pkt);
1662 pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid,
1663 queue_id);
1664 }
1665 }
1666 }
1667
1668 return 0;
1669 }
1670
1671 /*
1672 * These callback allow devices to be added to the data core when configuration
1673 * has been fully complete.
1674 */
1675 static const struct rte_vhost_device_ops virtio_net_device_ops =
1676 {
1677 .new_device = new_device,
1678 .destroy_device = destroy_device,
1679 .vring_state_changed = vring_state_changed,
1680 };
1681
1682 /*
1683 * This is a thread will wake up after a period to print stats if the user has
1684 * enabled them.
1685 */
1686 static void *
print_stats(__rte_unused void * arg)1687 print_stats(__rte_unused void *arg)
1688 {
1689 struct vhost_dev *vdev;
1690 uint64_t tx_dropped, rx_dropped;
1691 uint64_t tx, tx_total, rx, rx_total;
1692 const char clr[] = { 27, '[', '2', 'J', '\0' };
1693 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1694
1695 while(1) {
1696 sleep(enable_stats);
1697
1698 /* Clear screen and move to top left */
1699 printf("%s%s\n", clr, top_left);
1700 printf("Device statistics =================================\n");
1701
1702 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1703 tx_total = vdev->stats.tx_total;
1704 tx = vdev->stats.tx;
1705 tx_dropped = tx_total - tx;
1706
1707 rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic,
1708 __ATOMIC_SEQ_CST);
1709 rx = __atomic_load_n(&vdev->stats.rx_atomic,
1710 __ATOMIC_SEQ_CST);
1711 rx_dropped = rx_total - rx;
1712
1713 printf("Statistics for device %d\n"
1714 "-----------------------\n"
1715 "TX total: %" PRIu64 "\n"
1716 "TX dropped: %" PRIu64 "\n"
1717 "TX successful: %" PRIu64 "\n"
1718 "RX total: %" PRIu64 "\n"
1719 "RX dropped: %" PRIu64 "\n"
1720 "RX successful: %" PRIu64 "\n",
1721 vdev->vid,
1722 tx_total, tx_dropped, tx,
1723 rx_total, rx_dropped, rx);
1724 }
1725
1726 printf("===================================================\n");
1727
1728 fflush(stdout);
1729 }
1730
1731 return NULL;
1732 }
1733
1734 static void
unregister_drivers(int socket_num)1735 unregister_drivers(int socket_num)
1736 {
1737 int i, ret;
1738
1739 for (i = 0; i < socket_num; i++) {
1740 ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1741 if (ret != 0)
1742 RTE_LOG(ERR, VHOST_CONFIG,
1743 "Fail to unregister vhost driver for %s.\n",
1744 socket_files + i * PATH_MAX);
1745 }
1746 }
1747
1748 /* When we receive a INT signal, unregister vhost driver */
1749 static void
sigint_handler(__rte_unused int signum)1750 sigint_handler(__rte_unused int signum)
1751 {
1752 /* Unregister vhost driver. */
1753 unregister_drivers(nb_sockets);
1754
1755 exit(0);
1756 }
1757
1758 static void
reset_dma(void)1759 reset_dma(void)
1760 {
1761 int i;
1762
1763 for (i = 0; i < RTE_MAX_VHOST_DEVICE; i++) {
1764 int j;
1765
1766 for (j = 0; j < RTE_MAX_QUEUES_PER_PORT * 2; j++) {
1767 dma_bind[i].dmas[j].dev_id = INVALID_DMA_ID;
1768 dma_bind[i].dmas[j].async_enabled = false;
1769 }
1770 }
1771
1772 for (i = 0; i < RTE_DMADEV_DEFAULT_MAX; i++)
1773 dmas_id[i] = INVALID_DMA_ID;
1774 }
1775
1776 /*
1777 * Main function, does initialisation and calls the per-lcore functions.
1778 */
1779 int
main(int argc,char * argv[])1780 main(int argc, char *argv[])
1781 {
1782 unsigned lcore_id, core_id = 0;
1783 unsigned nb_ports, valid_num_ports;
1784 int ret, i;
1785 uint16_t portid;
1786 static pthread_t tid;
1787 uint64_t flags = RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS;
1788
1789 signal(SIGINT, sigint_handler);
1790
1791 /* init EAL */
1792 ret = rte_eal_init(argc, argv);
1793 if (ret < 0)
1794 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1795 argc -= ret;
1796 argv += ret;
1797
1798 /* initialize dma structures */
1799 reset_dma();
1800
1801 /* parse app arguments */
1802 ret = us_vhost_parse_args(argc, argv);
1803 if (ret < 0)
1804 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1805
1806 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1807 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1808
1809 if (rte_lcore_is_enabled(lcore_id))
1810 lcore_ids[core_id++] = lcore_id;
1811 }
1812
1813 if (rte_lcore_count() > RTE_MAX_LCORE)
1814 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1815
1816 /* Get the number of physical ports. */
1817 nb_ports = rte_eth_dev_count_avail();
1818
1819 /*
1820 * Update the global var NUM_PORTS and global array PORTS
1821 * and get value of var VALID_NUM_PORTS according to system ports number
1822 */
1823 valid_num_ports = check_ports_num(nb_ports);
1824
1825 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1826 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1827 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1828 return -1;
1829 }
1830
1831 /*
1832 * FIXME: here we are trying to allocate mbufs big enough for
1833 * @MAX_QUEUES, but the truth is we're never going to use that
1834 * many queues here. We probably should only do allocation for
1835 * those queues we are going to use.
1836 */
1837 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", total_num_mbufs,
1838 MBUF_CACHE_SIZE, 0, MBUF_DATA_SIZE,
1839 rte_socket_id());
1840 if (mbuf_pool == NULL)
1841 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1842
1843 if (vm2vm_mode == VM2VM_HARDWARE) {
1844 /* Enable VT loop back to let L2 switch to do it. */
1845 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1846 RTE_LOG(DEBUG, VHOST_CONFIG,
1847 "Enable loop back for L2 switch in vmdq.\n");
1848 }
1849
1850 /* initialize all ports */
1851 RTE_ETH_FOREACH_DEV(portid) {
1852 /* skip ports that are not enabled */
1853 if ((enabled_port_mask & (1 << portid)) == 0) {
1854 RTE_LOG(INFO, VHOST_PORT,
1855 "Skipping disabled port %d\n", portid);
1856 continue;
1857 }
1858 if (port_init(portid) != 0)
1859 rte_exit(EXIT_FAILURE,
1860 "Cannot initialize network ports\n");
1861 }
1862
1863 /* Enable stats if the user option is set. */
1864 if (enable_stats) {
1865 ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
1866 print_stats, NULL);
1867 if (ret < 0)
1868 rte_exit(EXIT_FAILURE,
1869 "Cannot create print-stats thread\n");
1870 }
1871
1872 /* Launch all data cores. */
1873 RTE_LCORE_FOREACH_WORKER(lcore_id)
1874 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1875
1876 if (client_mode)
1877 flags |= RTE_VHOST_USER_CLIENT;
1878
1879 for (i = 0; i < dma_count; i++) {
1880 if (rte_vhost_async_dma_configure(dmas_id[i], 0) < 0) {
1881 RTE_LOG(ERR, VHOST_PORT, "Failed to configure DMA in vhost.\n");
1882 rte_exit(EXIT_FAILURE, "Cannot use given DMA device\n");
1883 }
1884 }
1885
1886 /* Register vhost user driver to handle vhost messages. */
1887 for (i = 0; i < nb_sockets; i++) {
1888 char *file = socket_files + i * PATH_MAX;
1889
1890 if (dma_count)
1891 flags = flags | RTE_VHOST_USER_ASYNC_COPY;
1892
1893 ret = rte_vhost_driver_register(file, flags);
1894 if (ret != 0) {
1895 unregister_drivers(i);
1896 rte_exit(EXIT_FAILURE,
1897 "vhost driver register failure.\n");
1898 }
1899
1900 if (builtin_net_driver)
1901 rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
1902
1903 if (mergeable == 0) {
1904 rte_vhost_driver_disable_features(file,
1905 1ULL << VIRTIO_NET_F_MRG_RXBUF);
1906 }
1907
1908 if (enable_tx_csum == 0) {
1909 rte_vhost_driver_disable_features(file,
1910 1ULL << VIRTIO_NET_F_CSUM);
1911 }
1912
1913 if (enable_tso == 0) {
1914 rte_vhost_driver_disable_features(file,
1915 1ULL << VIRTIO_NET_F_HOST_TSO4);
1916 rte_vhost_driver_disable_features(file,
1917 1ULL << VIRTIO_NET_F_HOST_TSO6);
1918 rte_vhost_driver_disable_features(file,
1919 1ULL << VIRTIO_NET_F_GUEST_TSO4);
1920 rte_vhost_driver_disable_features(file,
1921 1ULL << VIRTIO_NET_F_GUEST_TSO6);
1922 }
1923
1924 if (promiscuous) {
1925 rte_vhost_driver_enable_features(file,
1926 1ULL << VIRTIO_NET_F_CTRL_RX);
1927 }
1928
1929 ret = rte_vhost_driver_callback_register(file,
1930 &virtio_net_device_ops);
1931 if (ret != 0) {
1932 rte_exit(EXIT_FAILURE,
1933 "failed to register vhost driver callbacks.\n");
1934 }
1935
1936 if (rte_vhost_driver_start(file) < 0) {
1937 rte_exit(EXIT_FAILURE,
1938 "failed to start vhost driver.\n");
1939 }
1940 }
1941
1942 RTE_LCORE_FOREACH_WORKER(lcore_id)
1943 rte_eal_wait_lcore(lcore_id);
1944
1945 /* clean up the EAL */
1946 rte_eal_cleanup();
1947
1948 return 0;
1949 }
1950