xref: /dpdk/examples/vhost/main.c (revision ebee5594a35e2af743ff03cde7d4bac67ac772f2)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <arpa/inet.h>
35 #include <getopt.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/virtio_net.h>
39 #include <linux/virtio_ring.h>
40 #include <signal.h>
41 #include <stdint.h>
42 #include <sys/eventfd.h>
43 #include <sys/param.h>
44 #include <unistd.h>
45 
46 #include <rte_atomic.h>
47 #include <rte_cycles.h>
48 #include <rte_ethdev.h>
49 #include <rte_log.h>
50 #include <rte_string_fns.h>
51 #include <rte_malloc.h>
52 #include <rte_virtio_net.h>
53 #include <rte_ip.h>
54 #include <rte_tcp.h>
55 
56 #include "main.h"
57 
58 #ifndef MAX_QUEUES
59 #define MAX_QUEUES 128
60 #endif
61 
62 /* the maximum number of external ports supported */
63 #define MAX_SUP_PORTS 1
64 
65 #define MBUF_CACHE_SIZE	128
66 #define MBUF_DATA_SIZE	RTE_MBUF_DEFAULT_BUF_SIZE
67 
68 #define MAX_PKT_BURST 32		/* Max burst size for RX/TX */
69 #define BURST_TX_DRAIN_US 100	/* TX drain every ~100us */
70 
71 #define BURST_RX_WAIT_US 15	/* Defines how long we wait between retries on RX */
72 #define BURST_RX_RETRIES 4		/* Number of retries on RX. */
73 
74 #define JUMBO_FRAME_MAX_SIZE    0x2600
75 
76 /* State of virtio device. */
77 #define DEVICE_MAC_LEARNING 0
78 #define DEVICE_RX			1
79 #define DEVICE_SAFE_REMOVE	2
80 
81 /* Configurable number of RX/TX ring descriptors */
82 #define RTE_TEST_RX_DESC_DEFAULT 1024
83 #define RTE_TEST_TX_DESC_DEFAULT 512
84 
85 #define INVALID_PORT_ID 0xFF
86 
87 /* Max number of devices. Limited by vmdq. */
88 #define MAX_DEVICES 64
89 
90 /* Size of buffers used for snprintfs. */
91 #define MAX_PRINT_BUFF 6072
92 
93 /* Maximum character device basename size. */
94 #define MAX_BASENAME_SZ 10
95 
96 /* Maximum long option length for option parsing. */
97 #define MAX_LONG_OPT_SZ 64
98 
99 /* mask of enabled ports */
100 static uint32_t enabled_port_mask = 0;
101 
102 /* Promiscuous mode */
103 static uint32_t promiscuous;
104 
105 /* number of devices/queues to support*/
106 static uint32_t num_queues = 0;
107 static uint32_t num_devices;
108 
109 static struct rte_mempool *mbuf_pool;
110 static int mergeable;
111 
112 /* Do vlan strip on host, enabled on default */
113 static uint32_t vlan_strip = 1;
114 
115 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
116 typedef enum {
117 	VM2VM_DISABLED = 0,
118 	VM2VM_SOFTWARE = 1,
119 	VM2VM_HARDWARE = 2,
120 	VM2VM_LAST
121 } vm2vm_type;
122 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
123 
124 /* Enable stats. */
125 static uint32_t enable_stats = 0;
126 /* Enable retries on RX. */
127 static uint32_t enable_retry = 1;
128 
129 /* Disable TX checksum offload */
130 static uint32_t enable_tx_csum;
131 
132 /* Disable TSO offload */
133 static uint32_t enable_tso;
134 
135 static int client_mode;
136 
137 /* Specify timeout (in useconds) between retries on RX. */
138 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
139 /* Specify the number of retries on RX. */
140 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
141 
142 /* Character device basename. Can be set by user. */
143 static char dev_basename[MAX_BASENAME_SZ] = "vhost-net";
144 
145 /* empty vmdq configuration structure. Filled in programatically */
146 static struct rte_eth_conf vmdq_conf_default = {
147 	.rxmode = {
148 		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
149 		.split_hdr_size = 0,
150 		.header_split   = 0, /**< Header Split disabled */
151 		.hw_ip_checksum = 0, /**< IP checksum offload disabled */
152 		.hw_vlan_filter = 0, /**< VLAN filtering disabled */
153 		/*
154 		 * It is necessary for 1G NIC such as I350,
155 		 * this fixes bug of ipv4 forwarding in guest can't
156 		 * forward pakets from one virtio dev to another virtio dev.
157 		 */
158 		.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
159 		.jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
160 		.hw_strip_crc   = 0, /**< CRC stripped by hardware */
161 	},
162 
163 	.txmode = {
164 		.mq_mode = ETH_MQ_TX_NONE,
165 	},
166 	.rx_adv_conf = {
167 		/*
168 		 * should be overridden separately in code with
169 		 * appropriate values
170 		 */
171 		.vmdq_rx_conf = {
172 			.nb_queue_pools = ETH_8_POOLS,
173 			.enable_default_pool = 0,
174 			.default_pool = 0,
175 			.nb_pool_maps = 0,
176 			.pool_map = {{0, 0},},
177 		},
178 	},
179 };
180 
181 static unsigned lcore_ids[RTE_MAX_LCORE];
182 static uint8_t ports[RTE_MAX_ETHPORTS];
183 static unsigned num_ports = 0; /**< The number of ports specified in command line */
184 static uint16_t num_pf_queues, num_vmdq_queues;
185 static uint16_t vmdq_pool_base, vmdq_queue_base;
186 static uint16_t queues_per_pool;
187 
188 const uint16_t vlan_tags[] = {
189 	1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
190 	1008, 1009, 1010, 1011,	1012, 1013, 1014, 1015,
191 	1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
192 	1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
193 	1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
194 	1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
195 	1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
196 	1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
197 };
198 
199 /* ethernet addresses of ports */
200 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
201 
202 static struct vhost_dev_tailq_list vhost_dev_list =
203 	TAILQ_HEAD_INITIALIZER(vhost_dev_list);
204 
205 static struct lcore_info lcore_info[RTE_MAX_LCORE];
206 
207 /* Used for queueing bursts of TX packets. */
208 struct mbuf_table {
209 	unsigned len;
210 	unsigned txq_id;
211 	struct rte_mbuf *m_table[MAX_PKT_BURST];
212 };
213 
214 /* TX queue for each data core. */
215 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
216 
217 #define MBUF_TABLE_DRAIN_TSC	((rte_get_tsc_hz() + US_PER_S - 1) \
218 				 / US_PER_S * BURST_TX_DRAIN_US)
219 #define VLAN_HLEN       4
220 
221 /*
222  * Builds up the correct configuration for VMDQ VLAN pool map
223  * according to the pool & queue limits.
224  */
225 static inline int
226 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
227 {
228 	struct rte_eth_vmdq_rx_conf conf;
229 	struct rte_eth_vmdq_rx_conf *def_conf =
230 		&vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
231 	unsigned i;
232 
233 	memset(&conf, 0, sizeof(conf));
234 	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
235 	conf.nb_pool_maps = num_devices;
236 	conf.enable_loop_back = def_conf->enable_loop_back;
237 	conf.rx_mode = def_conf->rx_mode;
238 
239 	for (i = 0; i < conf.nb_pool_maps; i++) {
240 		conf.pool_map[i].vlan_id = vlan_tags[ i ];
241 		conf.pool_map[i].pools = (1UL << i);
242 	}
243 
244 	(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
245 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
246 		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
247 	return 0;
248 }
249 
250 /*
251  * Validate the device number according to the max pool number gotten form
252  * dev_info. If the device number is invalid, give the error message and
253  * return -1. Each device must have its own pool.
254  */
255 static inline int
256 validate_num_devices(uint32_t max_nb_devices)
257 {
258 	if (num_devices > max_nb_devices) {
259 		RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
260 		return -1;
261 	}
262 	return 0;
263 }
264 
265 /*
266  * Initialises a given port using global settings and with the rx buffers
267  * coming from the mbuf_pool passed as parameter
268  */
269 static inline int
270 port_init(uint8_t port)
271 {
272 	struct rte_eth_dev_info dev_info;
273 	struct rte_eth_conf port_conf;
274 	struct rte_eth_rxconf *rxconf;
275 	struct rte_eth_txconf *txconf;
276 	int16_t rx_rings, tx_rings;
277 	uint16_t rx_ring_size, tx_ring_size;
278 	int retval;
279 	uint16_t q;
280 
281 	/* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
282 	rte_eth_dev_info_get (port, &dev_info);
283 
284 	if (dev_info.max_rx_queues > MAX_QUEUES) {
285 		rte_exit(EXIT_FAILURE,
286 			"please define MAX_QUEUES no less than %u in %s\n",
287 			dev_info.max_rx_queues, __FILE__);
288 	}
289 
290 	rxconf = &dev_info.default_rxconf;
291 	txconf = &dev_info.default_txconf;
292 	rxconf->rx_drop_en = 1;
293 
294 	/* Enable vlan offload */
295 	txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL;
296 
297 	/*configure the number of supported virtio devices based on VMDQ limits */
298 	num_devices = dev_info.max_vmdq_pools;
299 
300 	rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
301 	tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
302 	tx_rings = (uint16_t)rte_lcore_count();
303 
304 	retval = validate_num_devices(MAX_DEVICES);
305 	if (retval < 0)
306 		return retval;
307 
308 	/* Get port configuration. */
309 	retval = get_eth_conf(&port_conf, num_devices);
310 	if (retval < 0)
311 		return retval;
312 	/* NIC queues are divided into pf queues and vmdq queues.  */
313 	num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
314 	queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
315 	num_vmdq_queues = num_devices * queues_per_pool;
316 	num_queues = num_pf_queues + num_vmdq_queues;
317 	vmdq_queue_base = dev_info.vmdq_queue_base;
318 	vmdq_pool_base  = dev_info.vmdq_pool_base;
319 	printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
320 		num_pf_queues, num_devices, queues_per_pool);
321 
322 	if (port >= rte_eth_dev_count()) return -1;
323 
324 	if (enable_tx_csum == 0)
325 		rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_CSUM);
326 
327 	if (enable_tso == 0) {
328 		rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4);
329 		rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6);
330 	}
331 
332 	rx_rings = (uint16_t)dev_info.max_rx_queues;
333 	/* Configure ethernet device. */
334 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
335 	if (retval != 0)
336 		return retval;
337 
338 	/* Setup the queues. */
339 	for (q = 0; q < rx_rings; q ++) {
340 		retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
341 						rte_eth_dev_socket_id(port),
342 						rxconf,
343 						mbuf_pool);
344 		if (retval < 0)
345 			return retval;
346 	}
347 	for (q = 0; q < tx_rings; q ++) {
348 		retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
349 						rte_eth_dev_socket_id(port),
350 						txconf);
351 		if (retval < 0)
352 			return retval;
353 	}
354 
355 	/* Start the device. */
356 	retval  = rte_eth_dev_start(port);
357 	if (retval < 0) {
358 		RTE_LOG(ERR, VHOST_DATA, "Failed to start the device.\n");
359 		return retval;
360 	}
361 
362 	if (promiscuous)
363 		rte_eth_promiscuous_enable(port);
364 
365 	rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
366 	RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
367 	RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
368 			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
369 			(unsigned)port,
370 			vmdq_ports_eth_addr[port].addr_bytes[0],
371 			vmdq_ports_eth_addr[port].addr_bytes[1],
372 			vmdq_ports_eth_addr[port].addr_bytes[2],
373 			vmdq_ports_eth_addr[port].addr_bytes[3],
374 			vmdq_ports_eth_addr[port].addr_bytes[4],
375 			vmdq_ports_eth_addr[port].addr_bytes[5]);
376 
377 	return 0;
378 }
379 
380 /*
381  * Set character device basename.
382  */
383 static int
384 us_vhost_parse_basename(const char *q_arg)
385 {
386 	/* parse number string */
387 
388 	if (strnlen(q_arg, MAX_BASENAME_SZ) > MAX_BASENAME_SZ)
389 		return -1;
390 	else
391 		snprintf((char*)&dev_basename, MAX_BASENAME_SZ, "%s", q_arg);
392 
393 	return 0;
394 }
395 
396 /*
397  * Parse the portmask provided at run time.
398  */
399 static int
400 parse_portmask(const char *portmask)
401 {
402 	char *end = NULL;
403 	unsigned long pm;
404 
405 	errno = 0;
406 
407 	/* parse hexadecimal string */
408 	pm = strtoul(portmask, &end, 16);
409 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
410 		return -1;
411 
412 	if (pm == 0)
413 		return -1;
414 
415 	return pm;
416 
417 }
418 
419 /*
420  * Parse num options at run time.
421  */
422 static int
423 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
424 {
425 	char *end = NULL;
426 	unsigned long num;
427 
428 	errno = 0;
429 
430 	/* parse unsigned int string */
431 	num = strtoul(q_arg, &end, 10);
432 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
433 		return -1;
434 
435 	if (num > max_valid_value)
436 		return -1;
437 
438 	return num;
439 
440 }
441 
442 /*
443  * Display usage
444  */
445 static void
446 us_vhost_usage(const char *prgname)
447 {
448 	RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
449 	"		--vm2vm [0|1|2]\n"
450 	"		--rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
451 	"		--dev-basename <name>\n"
452 	"		--nb-devices ND\n"
453 	"		-p PORTMASK: Set mask for ports to be used by application\n"
454 	"		--vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
455 	"		--rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
456 	"		--rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
457 	"		--rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
458 	"		--mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
459 	"		--vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n"
460 	"		--stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
461 	"		--dev-basename: The basename to be used for the character device.\n"
462 	"		--tx-csum [0|1] disable/enable TX checksum offload.\n"
463 	"		--tso [0|1] disable/enable TCP segment offload.\n"
464 	"		--client register a vhost-user socket as client mode.\n",
465 	       prgname);
466 }
467 
468 /*
469  * Parse the arguments given in the command line of the application.
470  */
471 static int
472 us_vhost_parse_args(int argc, char **argv)
473 {
474 	int opt, ret;
475 	int option_index;
476 	unsigned i;
477 	const char *prgname = argv[0];
478 	static struct option long_option[] = {
479 		{"vm2vm", required_argument, NULL, 0},
480 		{"rx-retry", required_argument, NULL, 0},
481 		{"rx-retry-delay", required_argument, NULL, 0},
482 		{"rx-retry-num", required_argument, NULL, 0},
483 		{"mergeable", required_argument, NULL, 0},
484 		{"vlan-strip", required_argument, NULL, 0},
485 		{"stats", required_argument, NULL, 0},
486 		{"dev-basename", required_argument, NULL, 0},
487 		{"tx-csum", required_argument, NULL, 0},
488 		{"tso", required_argument, NULL, 0},
489 		{"client", no_argument, &client_mode, 1},
490 		{NULL, 0, 0, 0},
491 	};
492 
493 	/* Parse command line */
494 	while ((opt = getopt_long(argc, argv, "p:P",
495 			long_option, &option_index)) != EOF) {
496 		switch (opt) {
497 		/* Portmask */
498 		case 'p':
499 			enabled_port_mask = parse_portmask(optarg);
500 			if (enabled_port_mask == 0) {
501 				RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
502 				us_vhost_usage(prgname);
503 				return -1;
504 			}
505 			break;
506 
507 		case 'P':
508 			promiscuous = 1;
509 			vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
510 				ETH_VMDQ_ACCEPT_BROADCAST |
511 				ETH_VMDQ_ACCEPT_MULTICAST;
512 			rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX);
513 
514 			break;
515 
516 		case 0:
517 			/* Enable/disable vm2vm comms. */
518 			if (!strncmp(long_option[option_index].name, "vm2vm",
519 				MAX_LONG_OPT_SZ)) {
520 				ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
521 				if (ret == -1) {
522 					RTE_LOG(INFO, VHOST_CONFIG,
523 						"Invalid argument for "
524 						"vm2vm [0|1|2]\n");
525 					us_vhost_usage(prgname);
526 					return -1;
527 				} else {
528 					vm2vm_mode = (vm2vm_type)ret;
529 				}
530 			}
531 
532 			/* Enable/disable retries on RX. */
533 			if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
534 				ret = parse_num_opt(optarg, 1);
535 				if (ret == -1) {
536 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
537 					us_vhost_usage(prgname);
538 					return -1;
539 				} else {
540 					enable_retry = ret;
541 				}
542 			}
543 
544 			/* Enable/disable TX checksum offload. */
545 			if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
546 				ret = parse_num_opt(optarg, 1);
547 				if (ret == -1) {
548 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
549 					us_vhost_usage(prgname);
550 					return -1;
551 				} else
552 					enable_tx_csum = ret;
553 			}
554 
555 			/* Enable/disable TSO offload. */
556 			if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
557 				ret = parse_num_opt(optarg, 1);
558 				if (ret == -1) {
559 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
560 					us_vhost_usage(prgname);
561 					return -1;
562 				} else
563 					enable_tso = ret;
564 			}
565 
566 			/* Specify the retries delay time (in useconds) on RX. */
567 			if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
568 				ret = parse_num_opt(optarg, INT32_MAX);
569 				if (ret == -1) {
570 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
571 					us_vhost_usage(prgname);
572 					return -1;
573 				} else {
574 					burst_rx_delay_time = ret;
575 				}
576 			}
577 
578 			/* Specify the retries number on RX. */
579 			if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
580 				ret = parse_num_opt(optarg, INT32_MAX);
581 				if (ret == -1) {
582 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
583 					us_vhost_usage(prgname);
584 					return -1;
585 				} else {
586 					burst_rx_retry_num = ret;
587 				}
588 			}
589 
590 			/* Enable/disable RX mergeable buffers. */
591 			if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
592 				ret = parse_num_opt(optarg, 1);
593 				if (ret == -1) {
594 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
595 					us_vhost_usage(prgname);
596 					return -1;
597 				} else {
598 					mergeable = !!ret;
599 					if (ret) {
600 						vmdq_conf_default.rxmode.jumbo_frame = 1;
601 						vmdq_conf_default.rxmode.max_rx_pkt_len
602 							= JUMBO_FRAME_MAX_SIZE;
603 					}
604 				}
605 			}
606 
607 			/* Enable/disable RX VLAN strip on host. */
608 			if (!strncmp(long_option[option_index].name,
609 				"vlan-strip", MAX_LONG_OPT_SZ)) {
610 				ret = parse_num_opt(optarg, 1);
611 				if (ret == -1) {
612 					RTE_LOG(INFO, VHOST_CONFIG,
613 						"Invalid argument for VLAN strip [0|1]\n");
614 					us_vhost_usage(prgname);
615 					return -1;
616 				} else {
617 					vlan_strip = !!ret;
618 					vmdq_conf_default.rxmode.hw_vlan_strip =
619 						vlan_strip;
620 				}
621 			}
622 
623 			/* Enable/disable stats. */
624 			if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
625 				ret = parse_num_opt(optarg, INT32_MAX);
626 				if (ret == -1) {
627 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n");
628 					us_vhost_usage(prgname);
629 					return -1;
630 				} else {
631 					enable_stats = ret;
632 				}
633 			}
634 
635 			/* Set character device basename. */
636 			if (!strncmp(long_option[option_index].name, "dev-basename", MAX_LONG_OPT_SZ)) {
637 				if (us_vhost_parse_basename(optarg) == -1) {
638 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for character device basename (Max %d characters)\n", MAX_BASENAME_SZ);
639 					us_vhost_usage(prgname);
640 					return -1;
641 				}
642 			}
643 
644 			break;
645 
646 			/* Invalid option - print options. */
647 		default:
648 			us_vhost_usage(prgname);
649 			return -1;
650 		}
651 	}
652 
653 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
654 		if (enabled_port_mask & (1 << i))
655 			ports[num_ports++] = (uint8_t)i;
656 	}
657 
658 	if ((num_ports ==  0) || (num_ports > MAX_SUP_PORTS)) {
659 		RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
660 			"but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
661 		return -1;
662 	}
663 
664 	return 0;
665 }
666 
667 /*
668  * Update the global var NUM_PORTS and array PORTS according to system ports number
669  * and return valid ports number
670  */
671 static unsigned check_ports_num(unsigned nb_ports)
672 {
673 	unsigned valid_num_ports = num_ports;
674 	unsigned portid;
675 
676 	if (num_ports > nb_ports) {
677 		RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
678 			num_ports, nb_ports);
679 		num_ports = nb_ports;
680 	}
681 
682 	for (portid = 0; portid < num_ports; portid ++) {
683 		if (ports[portid] >= nb_ports) {
684 			RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
685 				ports[portid], (nb_ports - 1));
686 			ports[portid] = INVALID_PORT_ID;
687 			valid_num_ports--;
688 		}
689 	}
690 	return valid_num_ports;
691 }
692 
693 static inline struct vhost_dev *__attribute__((always_inline))
694 find_vhost_dev(struct ether_addr *mac)
695 {
696 	struct vhost_dev *vdev;
697 
698 	TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
699 		if (vdev->ready == DEVICE_RX &&
700 		    is_same_ether_addr(mac, &vdev->mac_address))
701 			return vdev;
702 	}
703 
704 	return NULL;
705 }
706 
707 /*
708  * This function learns the MAC address of the device and registers this along with a
709  * vlan tag to a VMDQ.
710  */
711 static int
712 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
713 {
714 	struct ether_hdr *pkt_hdr;
715 	int i, ret;
716 
717 	/* Learn MAC address of guest device from packet */
718 	pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
719 
720 	if (find_vhost_dev(&pkt_hdr->s_addr)) {
721 		RTE_LOG(ERR, VHOST_DATA,
722 			"(%d) device is using a registered MAC!\n",
723 			vdev->vid);
724 		return -1;
725 	}
726 
727 	for (i = 0; i < ETHER_ADDR_LEN; i++)
728 		vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
729 
730 	/* vlan_tag currently uses the device_id. */
731 	vdev->vlan_tag = vlan_tags[vdev->vid];
732 
733 	/* Print out VMDQ registration info. */
734 	RTE_LOG(INFO, VHOST_DATA,
735 		"(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
736 		vdev->vid,
737 		vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
738 		vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
739 		vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
740 		vdev->vlan_tag);
741 
742 	/* Register the MAC address. */
743 	ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
744 				(uint32_t)vdev->vid + vmdq_pool_base);
745 	if (ret)
746 		RTE_LOG(ERR, VHOST_DATA,
747 			"(%d) failed to add device MAC address to VMDQ\n",
748 			vdev->vid);
749 
750 	/* Enable stripping of the vlan tag as we handle routing. */
751 	if (vlan_strip)
752 		rte_eth_dev_set_vlan_strip_on_queue(ports[0],
753 			(uint16_t)vdev->vmdq_rx_q, 1);
754 
755 	/* Set device as ready for RX. */
756 	vdev->ready = DEVICE_RX;
757 
758 	return 0;
759 }
760 
761 /*
762  * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
763  * queue before disabling RX on the device.
764  */
765 static inline void
766 unlink_vmdq(struct vhost_dev *vdev)
767 {
768 	unsigned i = 0;
769 	unsigned rx_count;
770 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
771 
772 	if (vdev->ready == DEVICE_RX) {
773 		/*clear MAC and VLAN settings*/
774 		rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
775 		for (i = 0; i < 6; i++)
776 			vdev->mac_address.addr_bytes[i] = 0;
777 
778 		vdev->vlan_tag = 0;
779 
780 		/*Clear out the receive buffers*/
781 		rx_count = rte_eth_rx_burst(ports[0],
782 					(uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
783 
784 		while (rx_count) {
785 			for (i = 0; i < rx_count; i++)
786 				rte_pktmbuf_free(pkts_burst[i]);
787 
788 			rx_count = rte_eth_rx_burst(ports[0],
789 					(uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
790 		}
791 
792 		vdev->ready = DEVICE_MAC_LEARNING;
793 	}
794 }
795 
796 static inline void __attribute__((always_inline))
797 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
798 	    struct rte_mbuf *m)
799 {
800 	uint16_t ret;
801 
802 	ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
803 	if (enable_stats) {
804 		rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
805 		rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
806 		src_vdev->stats.tx_total++;
807 		src_vdev->stats.tx += ret;
808 	}
809 }
810 
811 /*
812  * Check if the packet destination MAC address is for a local device. If so then put
813  * the packet on that devices RX queue. If not then return.
814  */
815 static inline int __attribute__((always_inline))
816 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
817 {
818 	struct ether_hdr *pkt_hdr;
819 	struct vhost_dev *dst_vdev;
820 
821 	pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
822 
823 	dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
824 	if (!dst_vdev)
825 		return -1;
826 
827 	if (vdev->vid == dst_vdev->vid) {
828 		RTE_LOG(DEBUG, VHOST_DATA,
829 			"(%d) TX: src and dst MAC is same. Dropping packet.\n",
830 			vdev->vid);
831 		return 0;
832 	}
833 
834 	RTE_LOG(DEBUG, VHOST_DATA,
835 		"(%d) TX: MAC address is local\n", dst_vdev->vid);
836 
837 	if (unlikely(dst_vdev->remove)) {
838 		RTE_LOG(DEBUG, VHOST_DATA,
839 			"(%d) device is marked for removal\n", dst_vdev->vid);
840 		return 0;
841 	}
842 
843 	virtio_xmit(dst_vdev, vdev, m);
844 	return 0;
845 }
846 
847 /*
848  * Check if the destination MAC of a packet is one local VM,
849  * and get its vlan tag, and offset if it is.
850  */
851 static inline int __attribute__((always_inline))
852 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
853 	uint32_t *offset, uint16_t *vlan_tag)
854 {
855 	struct vhost_dev *dst_vdev;
856 	struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
857 
858 	dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
859 	if (!dst_vdev)
860 		return 0;
861 
862 	if (vdev->vid == dst_vdev->vid) {
863 		RTE_LOG(DEBUG, VHOST_DATA,
864 			"(%d) TX: src and dst MAC is same. Dropping packet.\n",
865 			vdev->vid);
866 		return -1;
867 	}
868 
869 	/*
870 	 * HW vlan strip will reduce the packet length
871 	 * by minus length of vlan tag, so need restore
872 	 * the packet length by plus it.
873 	 */
874 	*offset  = VLAN_HLEN;
875 	*vlan_tag = vlan_tags[vdev->vid];
876 
877 	RTE_LOG(DEBUG, VHOST_DATA,
878 		"(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
879 		vdev->vid, dst_vdev->vid, *vlan_tag);
880 
881 	return 0;
882 }
883 
884 static uint16_t
885 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
886 {
887 	if (ol_flags & PKT_TX_IPV4)
888 		return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
889 	else /* assume ethertype == ETHER_TYPE_IPv6 */
890 		return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
891 }
892 
893 static void virtio_tx_offload(struct rte_mbuf *m)
894 {
895 	void *l3_hdr;
896 	struct ipv4_hdr *ipv4_hdr = NULL;
897 	struct tcp_hdr *tcp_hdr = NULL;
898 	struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
899 
900 	l3_hdr = (char *)eth_hdr + m->l2_len;
901 
902 	if (m->ol_flags & PKT_TX_IPV4) {
903 		ipv4_hdr = l3_hdr;
904 		ipv4_hdr->hdr_checksum = 0;
905 		m->ol_flags |= PKT_TX_IP_CKSUM;
906 	}
907 
908 	tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
909 	tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
910 }
911 
912 static inline void
913 free_pkts(struct rte_mbuf **pkts, uint16_t n)
914 {
915 	while (n--)
916 		rte_pktmbuf_free(pkts[n]);
917 }
918 
919 static inline void __attribute__((always_inline))
920 do_drain_mbuf_table(struct mbuf_table *tx_q)
921 {
922 	uint16_t count;
923 
924 	count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
925 				 tx_q->m_table, tx_q->len);
926 	if (unlikely(count < tx_q->len))
927 		free_pkts(&tx_q->m_table[count], tx_q->len - count);
928 
929 	tx_q->len = 0;
930 }
931 
932 /*
933  * This function routes the TX packet to the correct interface. This
934  * may be a local device or the physical port.
935  */
936 static inline void __attribute__((always_inline))
937 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
938 {
939 	struct mbuf_table *tx_q;
940 	unsigned offset = 0;
941 	const uint16_t lcore_id = rte_lcore_id();
942 	struct ether_hdr *nh;
943 
944 
945 	nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
946 	if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
947 		struct vhost_dev *vdev2;
948 
949 		TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
950 			virtio_xmit(vdev2, vdev, m);
951 		}
952 		goto queue2nic;
953 	}
954 
955 	/*check if destination is local VM*/
956 	if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
957 		rte_pktmbuf_free(m);
958 		return;
959 	}
960 
961 	if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
962 		if (unlikely(find_local_dest(vdev, m, &offset,
963 					     &vlan_tag) != 0)) {
964 			rte_pktmbuf_free(m);
965 			return;
966 		}
967 	}
968 
969 	RTE_LOG(DEBUG, VHOST_DATA,
970 		"(%d) TX: MAC address is external\n", vdev->vid);
971 
972 queue2nic:
973 
974 	/*Add packet to the port tx queue*/
975 	tx_q = &lcore_tx_queue[lcore_id];
976 
977 	nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
978 	if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
979 		/* Guest has inserted the vlan tag. */
980 		struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
981 		uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
982 		if ((vm2vm_mode == VM2VM_HARDWARE) &&
983 			(vh->vlan_tci != vlan_tag_be))
984 			vh->vlan_tci = vlan_tag_be;
985 	} else {
986 		m->ol_flags |= PKT_TX_VLAN_PKT;
987 
988 		/*
989 		 * Find the right seg to adjust the data len when offset is
990 		 * bigger than tail room size.
991 		 */
992 		if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
993 			if (likely(offset <= rte_pktmbuf_tailroom(m)))
994 				m->data_len += offset;
995 			else {
996 				struct rte_mbuf *seg = m;
997 
998 				while ((seg->next != NULL) &&
999 					(offset > rte_pktmbuf_tailroom(seg)))
1000 					seg = seg->next;
1001 
1002 				seg->data_len += offset;
1003 			}
1004 			m->pkt_len += offset;
1005 		}
1006 
1007 		m->vlan_tci = vlan_tag;
1008 	}
1009 
1010 	if (m->ol_flags & PKT_TX_TCP_SEG)
1011 		virtio_tx_offload(m);
1012 
1013 	tx_q->m_table[tx_q->len++] = m;
1014 	if (enable_stats) {
1015 		vdev->stats.tx_total++;
1016 		vdev->stats.tx++;
1017 	}
1018 
1019 	if (unlikely(tx_q->len == MAX_PKT_BURST))
1020 		do_drain_mbuf_table(tx_q);
1021 }
1022 
1023 
1024 static inline void __attribute__((always_inline))
1025 drain_mbuf_table(struct mbuf_table *tx_q)
1026 {
1027 	static uint64_t prev_tsc;
1028 	uint64_t cur_tsc;
1029 
1030 	if (tx_q->len == 0)
1031 		return;
1032 
1033 	cur_tsc = rte_rdtsc();
1034 	if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1035 		prev_tsc = cur_tsc;
1036 
1037 		RTE_LOG(DEBUG, VHOST_DATA,
1038 			"TX queue drained after timeout with burst size %u\n",
1039 			tx_q->len);
1040 		do_drain_mbuf_table(tx_q);
1041 	}
1042 }
1043 
1044 static inline void __attribute__((always_inline))
1045 drain_eth_rx(struct vhost_dev *vdev)
1046 {
1047 	uint16_t rx_count, enqueue_count;
1048 	struct rte_mbuf *pkts[MAX_PKT_BURST];
1049 
1050 	rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1051 				    pkts, MAX_PKT_BURST);
1052 	if (!rx_count)
1053 		return;
1054 
1055 	/*
1056 	 * When "enable_retry" is set, here we wait and retry when there
1057 	 * is no enough free slots in the queue to hold @rx_count packets,
1058 	 * to diminish packet loss.
1059 	 */
1060 	if (enable_retry &&
1061 	    unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1062 			VIRTIO_RXQ))) {
1063 		uint32_t retry;
1064 
1065 		for (retry = 0; retry < burst_rx_retry_num; retry++) {
1066 			rte_delay_us(burst_rx_delay_time);
1067 			if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1068 					VIRTIO_RXQ))
1069 				break;
1070 		}
1071 	}
1072 
1073 	enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1074 						pkts, rx_count);
1075 	if (enable_stats) {
1076 		rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1077 		rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1078 	}
1079 
1080 	free_pkts(pkts, rx_count);
1081 }
1082 
1083 static inline void __attribute__((always_inline))
1084 drain_virtio_tx(struct vhost_dev *vdev)
1085 {
1086 	struct rte_mbuf *pkts[MAX_PKT_BURST];
1087 	uint16_t count;
1088 	uint16_t i;
1089 
1090 	count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, mbuf_pool,
1091 					pkts, MAX_PKT_BURST);
1092 
1093 	/* setup VMDq for the first packet */
1094 	if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1095 		if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1096 			free_pkts(pkts, count);
1097 	}
1098 
1099 	for (i = 0; i < count; ++i)
1100 		virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1101 }
1102 
1103 /*
1104  * Main function of vhost-switch. It basically does:
1105  *
1106  * for each vhost device {
1107  *    - drain_eth_rx()
1108  *
1109  *      Which drains the host eth Rx queue linked to the vhost device,
1110  *      and deliver all of them to guest virito Rx ring associated with
1111  *      this vhost device.
1112  *
1113  *    - drain_virtio_tx()
1114  *
1115  *      Which drains the guest virtio Tx queue and deliver all of them
1116  *      to the target, which could be another vhost device, or the
1117  *      physical eth dev. The route is done in function "virtio_tx_route".
1118  * }
1119  */
1120 static int
1121 switch_worker(void *arg __rte_unused)
1122 {
1123 	unsigned i;
1124 	unsigned lcore_id = rte_lcore_id();
1125 	struct vhost_dev *vdev;
1126 	struct mbuf_table *tx_q;
1127 
1128 	RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1129 
1130 	tx_q = &lcore_tx_queue[lcore_id];
1131 	for (i = 0; i < rte_lcore_count(); i++) {
1132 		if (lcore_ids[i] == lcore_id) {
1133 			tx_q->txq_id = i;
1134 			break;
1135 		}
1136 	}
1137 
1138 	while(1) {
1139 		drain_mbuf_table(tx_q);
1140 
1141 		/*
1142 		 * Inform the configuration core that we have exited the
1143 		 * linked list and that no devices are in use if requested.
1144 		 */
1145 		if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1146 			lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1147 
1148 		/*
1149 		 * Process vhost devices
1150 		 */
1151 		TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1152 			      lcore_vdev_entry) {
1153 			if (unlikely(vdev->remove)) {
1154 				unlink_vmdq(vdev);
1155 				vdev->ready = DEVICE_SAFE_REMOVE;
1156 				continue;
1157 			}
1158 
1159 			if (likely(vdev->ready == DEVICE_RX))
1160 				drain_eth_rx(vdev);
1161 
1162 			if (likely(!vdev->remove))
1163 				drain_virtio_tx(vdev);
1164 		}
1165 	}
1166 
1167 	return 0;
1168 }
1169 
1170 /*
1171  * Remove a device from the specific data core linked list and from the
1172  * main linked list. Synchonization  occurs through the use of the
1173  * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1174  * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1175  */
1176 static void
1177 destroy_device(int vid)
1178 {
1179 	struct vhost_dev *vdev = NULL;
1180 	int lcore;
1181 
1182 	TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1183 		if (vdev->vid == vid)
1184 			break;
1185 	}
1186 	if (!vdev)
1187 		return;
1188 	/*set the remove flag. */
1189 	vdev->remove = 1;
1190 	while(vdev->ready != DEVICE_SAFE_REMOVE) {
1191 		rte_pause();
1192 	}
1193 
1194 	TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1195 		     lcore_vdev_entry);
1196 	TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1197 
1198 
1199 	/* Set the dev_removal_flag on each lcore. */
1200 	RTE_LCORE_FOREACH_SLAVE(lcore)
1201 		lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1202 
1203 	/*
1204 	 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1205 	 * we can be sure that they can no longer access the device removed
1206 	 * from the linked lists and that the devices are no longer in use.
1207 	 */
1208 	RTE_LCORE_FOREACH_SLAVE(lcore) {
1209 		while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1210 			rte_pause();
1211 	}
1212 
1213 	lcore_info[vdev->coreid].device_num--;
1214 
1215 	RTE_LOG(INFO, VHOST_DATA,
1216 		"(%d) device has been removed from data core\n",
1217 		vdev->vid);
1218 
1219 	rte_free(vdev);
1220 }
1221 
1222 /*
1223  * A new device is added to a data core. First the device is added to the main linked list
1224  * and the allocated to a specific data core.
1225  */
1226 static int
1227 new_device(int vid)
1228 {
1229 	int lcore, core_add = 0;
1230 	uint32_t device_num_min = num_devices;
1231 	struct vhost_dev *vdev;
1232 
1233 	vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1234 	if (vdev == NULL) {
1235 		RTE_LOG(INFO, VHOST_DATA,
1236 			"(%d) couldn't allocate memory for vhost dev\n",
1237 			vid);
1238 		return -1;
1239 	}
1240 	vdev->vid = vid;
1241 
1242 	TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1243 	vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1244 
1245 	/*reset ready flag*/
1246 	vdev->ready = DEVICE_MAC_LEARNING;
1247 	vdev->remove = 0;
1248 
1249 	/* Find a suitable lcore to add the device. */
1250 	RTE_LCORE_FOREACH_SLAVE(lcore) {
1251 		if (lcore_info[lcore].device_num < device_num_min) {
1252 			device_num_min = lcore_info[lcore].device_num;
1253 			core_add = lcore;
1254 		}
1255 	}
1256 	vdev->coreid = core_add;
1257 
1258 	TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1259 			  lcore_vdev_entry);
1260 	lcore_info[vdev->coreid].device_num++;
1261 
1262 	/* Disable notifications. */
1263 	rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1264 	rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1265 
1266 	RTE_LOG(INFO, VHOST_DATA,
1267 		"(%d) device has been added to data core %d\n",
1268 		vid, vdev->coreid);
1269 
1270 	return 0;
1271 }
1272 
1273 /*
1274  * These callback allow devices to be added to the data core when configuration
1275  * has been fully complete.
1276  */
1277 static const struct virtio_net_device_ops virtio_net_device_ops =
1278 {
1279 	.new_device =  new_device,
1280 	.destroy_device = destroy_device,
1281 };
1282 
1283 /*
1284  * This is a thread will wake up after a period to print stats if the user has
1285  * enabled them.
1286  */
1287 static void
1288 print_stats(void)
1289 {
1290 	struct vhost_dev *vdev;
1291 	uint64_t tx_dropped, rx_dropped;
1292 	uint64_t tx, tx_total, rx, rx_total;
1293 	const char clr[] = { 27, '[', '2', 'J', '\0' };
1294 	const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1295 
1296 	while(1) {
1297 		sleep(enable_stats);
1298 
1299 		/* Clear screen and move to top left */
1300 		printf("%s%s\n", clr, top_left);
1301 		printf("Device statistics =================================\n");
1302 
1303 		TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1304 			tx_total   = vdev->stats.tx_total;
1305 			tx         = vdev->stats.tx;
1306 			tx_dropped = tx_total - tx;
1307 
1308 			rx_total   = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1309 			rx         = rte_atomic64_read(&vdev->stats.rx_atomic);
1310 			rx_dropped = rx_total - rx;
1311 
1312 			printf("Statistics for device %d\n"
1313 				"-----------------------\n"
1314 				"TX total:              %" PRIu64 "\n"
1315 				"TX dropped:            %" PRIu64 "\n"
1316 				"TX successful:         %" PRIu64 "\n"
1317 				"RX total:              %" PRIu64 "\n"
1318 				"RX dropped:            %" PRIu64 "\n"
1319 				"RX successful:         %" PRIu64 "\n",
1320 				vdev->vid,
1321 				tx_total, tx_dropped, tx,
1322 				rx_total, rx_dropped, rx);
1323 		}
1324 
1325 		printf("===================================================\n");
1326 	}
1327 }
1328 
1329 /* When we receive a INT signal, unregister vhost driver */
1330 static void
1331 sigint_handler(__rte_unused int signum)
1332 {
1333 	/* Unregister vhost driver. */
1334 	int ret = rte_vhost_driver_unregister((char *)&dev_basename);
1335 	if (ret != 0)
1336 		rte_exit(EXIT_FAILURE, "vhost driver unregister failure.\n");
1337 	exit(0);
1338 }
1339 
1340 /*
1341  * While creating an mbuf pool, one key thing is to figure out how
1342  * many mbuf entries is enough for our use. FYI, here are some
1343  * guidelines:
1344  *
1345  * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1346  *
1347  * - For each switch core (A CPU core does the packet switch), we need
1348  *   also make some reservation for receiving the packets from virtio
1349  *   Tx queue. How many is enough depends on the usage. It's normally
1350  *   a simple calculation like following:
1351  *
1352  *       MAX_PKT_BURST * max packet size / mbuf size
1353  *
1354  *   So, we definitely need allocate more mbufs when TSO is enabled.
1355  *
1356  * - Similarly, for each switching core, we should serve @nr_rx_desc
1357  *   mbufs for receiving the packets from physical NIC device.
1358  *
1359  * - We also need make sure, for each switch core, we have allocated
1360  *   enough mbufs to fill up the mbuf cache.
1361  */
1362 static void
1363 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1364 	uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1365 {
1366 	uint32_t nr_mbufs;
1367 	uint32_t nr_mbufs_per_core;
1368 	uint32_t mtu = 1500;
1369 
1370 	if (mergeable)
1371 		mtu = 9000;
1372 	if (enable_tso)
1373 		mtu = 64 * 1024;
1374 
1375 	nr_mbufs_per_core  = (mtu + mbuf_size) * MAX_PKT_BURST /
1376 			(mbuf_size - RTE_PKTMBUF_HEADROOM) * MAX_PKT_BURST;
1377 	nr_mbufs_per_core += nr_rx_desc;
1378 	nr_mbufs_per_core  = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1379 
1380 	nr_mbufs  = nr_queues * nr_rx_desc;
1381 	nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1382 	nr_mbufs *= nr_port;
1383 
1384 	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1385 					    nr_mbuf_cache, 0, mbuf_size,
1386 					    rte_socket_id());
1387 	if (mbuf_pool == NULL)
1388 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1389 }
1390 
1391 /*
1392  * Main function, does initialisation and calls the per-lcore functions. The CUSE
1393  * device is also registered here to handle the IOCTLs.
1394  */
1395 int
1396 main(int argc, char *argv[])
1397 {
1398 	unsigned lcore_id, core_id = 0;
1399 	unsigned nb_ports, valid_num_ports;
1400 	int ret;
1401 	uint8_t portid;
1402 	static pthread_t tid;
1403 	char thread_name[RTE_MAX_THREAD_NAME_LEN];
1404 	uint64_t flags = 0;
1405 
1406 	signal(SIGINT, sigint_handler);
1407 
1408 	/* init EAL */
1409 	ret = rte_eal_init(argc, argv);
1410 	if (ret < 0)
1411 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1412 	argc -= ret;
1413 	argv += ret;
1414 
1415 	/* parse app arguments */
1416 	ret = us_vhost_parse_args(argc, argv);
1417 	if (ret < 0)
1418 		rte_exit(EXIT_FAILURE, "Invalid argument\n");
1419 
1420 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
1421 		TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1422 
1423 		if (rte_lcore_is_enabled(lcore_id))
1424 			lcore_ids[core_id ++] = lcore_id;
1425 
1426 	if (rte_lcore_count() > RTE_MAX_LCORE)
1427 		rte_exit(EXIT_FAILURE,"Not enough cores\n");
1428 
1429 	/* Get the number of physical ports. */
1430 	nb_ports = rte_eth_dev_count();
1431 
1432 	/*
1433 	 * Update the global var NUM_PORTS and global array PORTS
1434 	 * and get value of var VALID_NUM_PORTS according to system ports number
1435 	 */
1436 	valid_num_ports = check_ports_num(nb_ports);
1437 
1438 	if ((valid_num_ports ==  0) || (valid_num_ports > MAX_SUP_PORTS)) {
1439 		RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1440 			"but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1441 		return -1;
1442 	}
1443 
1444 	/*
1445 	 * FIXME: here we are trying to allocate mbufs big enough for
1446 	 * @MAX_QUEUES, but the truth is we're never going to use that
1447 	 * many queues here. We probably should only do allocation for
1448 	 * those queues we are going to use.
1449 	 */
1450 	create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1451 			 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1452 
1453 	if (vm2vm_mode == VM2VM_HARDWARE) {
1454 		/* Enable VT loop back to let L2 switch to do it. */
1455 		vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1456 		RTE_LOG(DEBUG, VHOST_CONFIG,
1457 			"Enable loop back for L2 switch in vmdq.\n");
1458 	}
1459 
1460 	/* initialize all ports */
1461 	for (portid = 0; portid < nb_ports; portid++) {
1462 		/* skip ports that are not enabled */
1463 		if ((enabled_port_mask & (1 << portid)) == 0) {
1464 			RTE_LOG(INFO, VHOST_PORT,
1465 				"Skipping disabled port %d\n", portid);
1466 			continue;
1467 		}
1468 		if (port_init(portid) != 0)
1469 			rte_exit(EXIT_FAILURE,
1470 				"Cannot initialize network ports\n");
1471 	}
1472 
1473 	/* Enable stats if the user option is set. */
1474 	if (enable_stats) {
1475 		ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
1476 		if (ret != 0)
1477 			rte_exit(EXIT_FAILURE,
1478 				"Cannot create print-stats thread\n");
1479 
1480 		/* Set thread_name for aid in debugging.  */
1481 		snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
1482 		ret = rte_thread_setname(tid, thread_name);
1483 		if (ret != 0)
1484 			RTE_LOG(DEBUG, VHOST_CONFIG,
1485 				"Cannot set print-stats name\n");
1486 	}
1487 
1488 	/* Launch all data cores. */
1489 	RTE_LCORE_FOREACH_SLAVE(lcore_id)
1490 		rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1491 
1492 	if (mergeable == 0)
1493 		rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
1494 
1495 	if (client_mode)
1496 		flags |= RTE_VHOST_USER_CLIENT;
1497 
1498 	/* Register vhost(cuse or user) driver to handle vhost messages. */
1499 	ret = rte_vhost_driver_register(dev_basename, flags);
1500 	if (ret != 0)
1501 		rte_exit(EXIT_FAILURE, "vhost driver register failure.\n");
1502 
1503 	rte_vhost_driver_callback_register(&virtio_net_device_ops);
1504 
1505 	/* Start CUSE session. */
1506 	rte_vhost_driver_session_start();
1507 	return 0;
1508 
1509 }
1510