xref: /dpdk/examples/vhost/main.c (revision bb7085b46ac502e9b494e30d2961400d4baf7a4f)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <arpa/inet.h>
35 #include <getopt.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/virtio_net.h>
39 #include <linux/virtio_ring.h>
40 #include <signal.h>
41 #include <stdint.h>
42 #include <sys/eventfd.h>
43 #include <sys/param.h>
44 #include <unistd.h>
45 
46 #include <rte_atomic.h>
47 #include <rte_cycles.h>
48 #include <rte_ethdev.h>
49 #include <rte_log.h>
50 #include <rte_string_fns.h>
51 #include <rte_malloc.h>
52 #include <rte_virtio_net.h>
53 #include <rte_ip.h>
54 #include <rte_tcp.h>
55 
56 #include "main.h"
57 
58 #ifndef MAX_QUEUES
59 #define MAX_QUEUES 128
60 #endif
61 
62 /* the maximum number of external ports supported */
63 #define MAX_SUP_PORTS 1
64 
65 #define MBUF_CACHE_SIZE	128
66 #define MBUF_DATA_SIZE	RTE_MBUF_DEFAULT_BUF_SIZE
67 
68 #define MAX_PKT_BURST 32		/* Max burst size for RX/TX */
69 #define BURST_TX_DRAIN_US 100	/* TX drain every ~100us */
70 
71 #define BURST_RX_WAIT_US 15	/* Defines how long we wait between retries on RX */
72 #define BURST_RX_RETRIES 4		/* Number of retries on RX. */
73 
74 #define JUMBO_FRAME_MAX_SIZE    0x2600
75 
76 /* State of virtio device. */
77 #define DEVICE_MAC_LEARNING 0
78 #define DEVICE_RX			1
79 #define DEVICE_SAFE_REMOVE	2
80 
81 /* Configurable number of RX/TX ring descriptors */
82 #define RTE_TEST_RX_DESC_DEFAULT 1024
83 #define RTE_TEST_TX_DESC_DEFAULT 512
84 
85 #define INVALID_PORT_ID 0xFF
86 
87 /* Max number of devices. Limited by vmdq. */
88 #define MAX_DEVICES 64
89 
90 /* Size of buffers used for snprintfs. */
91 #define MAX_PRINT_BUFF 6072
92 
93 /* Maximum character device basename size. */
94 #define MAX_BASENAME_SZ 10
95 
96 /* Maximum long option length for option parsing. */
97 #define MAX_LONG_OPT_SZ 64
98 
99 /* mask of enabled ports */
100 static uint32_t enabled_port_mask = 0;
101 
102 /* Promiscuous mode */
103 static uint32_t promiscuous;
104 
105 /* number of devices/queues to support*/
106 static uint32_t num_queues = 0;
107 static uint32_t num_devices;
108 
109 static struct rte_mempool *mbuf_pool;
110 static int mergeable;
111 
112 /* Do vlan strip on host, enabled on default */
113 static uint32_t vlan_strip = 1;
114 
115 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
116 typedef enum {
117 	VM2VM_DISABLED = 0,
118 	VM2VM_SOFTWARE = 1,
119 	VM2VM_HARDWARE = 2,
120 	VM2VM_LAST
121 } vm2vm_type;
122 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
123 
124 /* Enable stats. */
125 static uint32_t enable_stats = 0;
126 /* Enable retries on RX. */
127 static uint32_t enable_retry = 1;
128 
129 /* Disable TX checksum offload */
130 static uint32_t enable_tx_csum;
131 
132 /* Disable TSO offload */
133 static uint32_t enable_tso;
134 
135 static int client_mode;
136 
137 /* Specify timeout (in useconds) between retries on RX. */
138 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
139 /* Specify the number of retries on RX. */
140 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
141 
142 /* Character device basename. Can be set by user. */
143 static char dev_basename[MAX_BASENAME_SZ] = "vhost-net";
144 
145 /* empty vmdq configuration structure. Filled in programatically */
146 static struct rte_eth_conf vmdq_conf_default = {
147 	.rxmode = {
148 		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
149 		.split_hdr_size = 0,
150 		.header_split   = 0, /**< Header Split disabled */
151 		.hw_ip_checksum = 0, /**< IP checksum offload disabled */
152 		.hw_vlan_filter = 0, /**< VLAN filtering disabled */
153 		/*
154 		 * It is necessary for 1G NIC such as I350,
155 		 * this fixes bug of ipv4 forwarding in guest can't
156 		 * forward pakets from one virtio dev to another virtio dev.
157 		 */
158 		.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
159 		.jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
160 		.hw_strip_crc   = 0, /**< CRC stripped by hardware */
161 	},
162 
163 	.txmode = {
164 		.mq_mode = ETH_MQ_TX_NONE,
165 	},
166 	.rx_adv_conf = {
167 		/*
168 		 * should be overridden separately in code with
169 		 * appropriate values
170 		 */
171 		.vmdq_rx_conf = {
172 			.nb_queue_pools = ETH_8_POOLS,
173 			.enable_default_pool = 0,
174 			.default_pool = 0,
175 			.nb_pool_maps = 0,
176 			.pool_map = {{0, 0},},
177 		},
178 	},
179 };
180 
181 static unsigned lcore_ids[RTE_MAX_LCORE];
182 static uint8_t ports[RTE_MAX_ETHPORTS];
183 static unsigned num_ports = 0; /**< The number of ports specified in command line */
184 static uint16_t num_pf_queues, num_vmdq_queues;
185 static uint16_t vmdq_pool_base, vmdq_queue_base;
186 static uint16_t queues_per_pool;
187 
188 const uint16_t vlan_tags[] = {
189 	1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
190 	1008, 1009, 1010, 1011,	1012, 1013, 1014, 1015,
191 	1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
192 	1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
193 	1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
194 	1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
195 	1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
196 	1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
197 };
198 
199 /* ethernet addresses of ports */
200 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
201 
202 static struct vhost_dev_tailq_list vhost_dev_list =
203 	TAILQ_HEAD_INITIALIZER(vhost_dev_list);
204 
205 static struct lcore_info lcore_info[RTE_MAX_LCORE];
206 
207 /* Used for queueing bursts of TX packets. */
208 struct mbuf_table {
209 	unsigned len;
210 	unsigned txq_id;
211 	struct rte_mbuf *m_table[MAX_PKT_BURST];
212 };
213 
214 /* TX queue for each data core. */
215 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
216 
217 #define MBUF_TABLE_DRAIN_TSC	((rte_get_tsc_hz() + US_PER_S - 1) \
218 				 / US_PER_S * BURST_TX_DRAIN_US)
219 #define VLAN_HLEN       4
220 
221 /*
222  * Builds up the correct configuration for VMDQ VLAN pool map
223  * according to the pool & queue limits.
224  */
225 static inline int
226 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
227 {
228 	struct rte_eth_vmdq_rx_conf conf;
229 	struct rte_eth_vmdq_rx_conf *def_conf =
230 		&vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
231 	unsigned i;
232 
233 	memset(&conf, 0, sizeof(conf));
234 	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
235 	conf.nb_pool_maps = num_devices;
236 	conf.enable_loop_back = def_conf->enable_loop_back;
237 	conf.rx_mode = def_conf->rx_mode;
238 
239 	for (i = 0; i < conf.nb_pool_maps; i++) {
240 		conf.pool_map[i].vlan_id = vlan_tags[ i ];
241 		conf.pool_map[i].pools = (1UL << i);
242 	}
243 
244 	(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
245 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
246 		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
247 	return 0;
248 }
249 
250 /*
251  * Validate the device number according to the max pool number gotten form
252  * dev_info. If the device number is invalid, give the error message and
253  * return -1. Each device must have its own pool.
254  */
255 static inline int
256 validate_num_devices(uint32_t max_nb_devices)
257 {
258 	if (num_devices > max_nb_devices) {
259 		RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
260 		return -1;
261 	}
262 	return 0;
263 }
264 
265 /*
266  * Initialises a given port using global settings and with the rx buffers
267  * coming from the mbuf_pool passed as parameter
268  */
269 static inline int
270 port_init(uint8_t port)
271 {
272 	struct rte_eth_dev_info dev_info;
273 	struct rte_eth_conf port_conf;
274 	struct rte_eth_rxconf *rxconf;
275 	struct rte_eth_txconf *txconf;
276 	int16_t rx_rings, tx_rings;
277 	uint16_t rx_ring_size, tx_ring_size;
278 	int retval;
279 	uint16_t q;
280 
281 	/* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
282 	rte_eth_dev_info_get (port, &dev_info);
283 
284 	if (dev_info.max_rx_queues > MAX_QUEUES) {
285 		rte_exit(EXIT_FAILURE,
286 			"please define MAX_QUEUES no less than %u in %s\n",
287 			dev_info.max_rx_queues, __FILE__);
288 	}
289 
290 	rxconf = &dev_info.default_rxconf;
291 	txconf = &dev_info.default_txconf;
292 	rxconf->rx_drop_en = 1;
293 
294 	/* Enable vlan offload */
295 	txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL;
296 
297 	/*configure the number of supported virtio devices based on VMDQ limits */
298 	num_devices = dev_info.max_vmdq_pools;
299 
300 	rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
301 	tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
302 	tx_rings = (uint16_t)rte_lcore_count();
303 
304 	retval = validate_num_devices(MAX_DEVICES);
305 	if (retval < 0)
306 		return retval;
307 
308 	/* Get port configuration. */
309 	retval = get_eth_conf(&port_conf, num_devices);
310 	if (retval < 0)
311 		return retval;
312 	/* NIC queues are divided into pf queues and vmdq queues.  */
313 	num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
314 	queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
315 	num_vmdq_queues = num_devices * queues_per_pool;
316 	num_queues = num_pf_queues + num_vmdq_queues;
317 	vmdq_queue_base = dev_info.vmdq_queue_base;
318 	vmdq_pool_base  = dev_info.vmdq_pool_base;
319 	printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
320 		num_pf_queues, num_devices, queues_per_pool);
321 
322 	if (port >= rte_eth_dev_count()) return -1;
323 
324 	if (enable_tx_csum == 0)
325 		rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_CSUM);
326 
327 	if (enable_tso == 0) {
328 		rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4);
329 		rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6);
330 	}
331 
332 	rx_rings = (uint16_t)dev_info.max_rx_queues;
333 	/* Configure ethernet device. */
334 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
335 	if (retval != 0) {
336 		RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
337 			port, strerror(-retval));
338 		return retval;
339 	}
340 
341 	/* Setup the queues. */
342 	for (q = 0; q < rx_rings; q ++) {
343 		retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
344 						rte_eth_dev_socket_id(port),
345 						rxconf,
346 						mbuf_pool);
347 		if (retval < 0) {
348 			RTE_LOG(ERR, VHOST_PORT,
349 				"Failed to setup rx queue %u of port %u: %s.\n",
350 				q, port, strerror(-retval));
351 			return retval;
352 		}
353 	}
354 	for (q = 0; q < tx_rings; q ++) {
355 		retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
356 						rte_eth_dev_socket_id(port),
357 						txconf);
358 		if (retval < 0) {
359 			RTE_LOG(ERR, VHOST_PORT,
360 				"Failed to setup tx queue %u of port %u: %s.\n",
361 				q, port, strerror(-retval));
362 			return retval;
363 		}
364 	}
365 
366 	/* Start the device. */
367 	retval  = rte_eth_dev_start(port);
368 	if (retval < 0) {
369 		RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
370 			port, strerror(-retval));
371 		return retval;
372 	}
373 
374 	if (promiscuous)
375 		rte_eth_promiscuous_enable(port);
376 
377 	rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
378 	RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
379 	RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
380 			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
381 			(unsigned)port,
382 			vmdq_ports_eth_addr[port].addr_bytes[0],
383 			vmdq_ports_eth_addr[port].addr_bytes[1],
384 			vmdq_ports_eth_addr[port].addr_bytes[2],
385 			vmdq_ports_eth_addr[port].addr_bytes[3],
386 			vmdq_ports_eth_addr[port].addr_bytes[4],
387 			vmdq_ports_eth_addr[port].addr_bytes[5]);
388 
389 	return 0;
390 }
391 
392 /*
393  * Set character device basename.
394  */
395 static int
396 us_vhost_parse_basename(const char *q_arg)
397 {
398 	/* parse number string */
399 
400 	if (strnlen(q_arg, MAX_BASENAME_SZ) > MAX_BASENAME_SZ)
401 		return -1;
402 	else
403 		snprintf((char*)&dev_basename, MAX_BASENAME_SZ, "%s", q_arg);
404 
405 	return 0;
406 }
407 
408 /*
409  * Parse the portmask provided at run time.
410  */
411 static int
412 parse_portmask(const char *portmask)
413 {
414 	char *end = NULL;
415 	unsigned long pm;
416 
417 	errno = 0;
418 
419 	/* parse hexadecimal string */
420 	pm = strtoul(portmask, &end, 16);
421 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
422 		return -1;
423 
424 	if (pm == 0)
425 		return -1;
426 
427 	return pm;
428 
429 }
430 
431 /*
432  * Parse num options at run time.
433  */
434 static int
435 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
436 {
437 	char *end = NULL;
438 	unsigned long num;
439 
440 	errno = 0;
441 
442 	/* parse unsigned int string */
443 	num = strtoul(q_arg, &end, 10);
444 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
445 		return -1;
446 
447 	if (num > max_valid_value)
448 		return -1;
449 
450 	return num;
451 
452 }
453 
454 /*
455  * Display usage
456  */
457 static void
458 us_vhost_usage(const char *prgname)
459 {
460 	RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
461 	"		--vm2vm [0|1|2]\n"
462 	"		--rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
463 	"		--dev-basename <name>\n"
464 	"		--nb-devices ND\n"
465 	"		-p PORTMASK: Set mask for ports to be used by application\n"
466 	"		--vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
467 	"		--rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
468 	"		--rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
469 	"		--rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
470 	"		--mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
471 	"		--vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n"
472 	"		--stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
473 	"		--dev-basename: The basename to be used for the character device.\n"
474 	"		--tx-csum [0|1] disable/enable TX checksum offload.\n"
475 	"		--tso [0|1] disable/enable TCP segment offload.\n"
476 	"		--client register a vhost-user socket as client mode.\n",
477 	       prgname);
478 }
479 
480 /*
481  * Parse the arguments given in the command line of the application.
482  */
483 static int
484 us_vhost_parse_args(int argc, char **argv)
485 {
486 	int opt, ret;
487 	int option_index;
488 	unsigned i;
489 	const char *prgname = argv[0];
490 	static struct option long_option[] = {
491 		{"vm2vm", required_argument, NULL, 0},
492 		{"rx-retry", required_argument, NULL, 0},
493 		{"rx-retry-delay", required_argument, NULL, 0},
494 		{"rx-retry-num", required_argument, NULL, 0},
495 		{"mergeable", required_argument, NULL, 0},
496 		{"vlan-strip", required_argument, NULL, 0},
497 		{"stats", required_argument, NULL, 0},
498 		{"dev-basename", required_argument, NULL, 0},
499 		{"tx-csum", required_argument, NULL, 0},
500 		{"tso", required_argument, NULL, 0},
501 		{"client", no_argument, &client_mode, 1},
502 		{NULL, 0, 0, 0},
503 	};
504 
505 	/* Parse command line */
506 	while ((opt = getopt_long(argc, argv, "p:P",
507 			long_option, &option_index)) != EOF) {
508 		switch (opt) {
509 		/* Portmask */
510 		case 'p':
511 			enabled_port_mask = parse_portmask(optarg);
512 			if (enabled_port_mask == 0) {
513 				RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
514 				us_vhost_usage(prgname);
515 				return -1;
516 			}
517 			break;
518 
519 		case 'P':
520 			promiscuous = 1;
521 			vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
522 				ETH_VMDQ_ACCEPT_BROADCAST |
523 				ETH_VMDQ_ACCEPT_MULTICAST;
524 			rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX);
525 
526 			break;
527 
528 		case 0:
529 			/* Enable/disable vm2vm comms. */
530 			if (!strncmp(long_option[option_index].name, "vm2vm",
531 				MAX_LONG_OPT_SZ)) {
532 				ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
533 				if (ret == -1) {
534 					RTE_LOG(INFO, VHOST_CONFIG,
535 						"Invalid argument for "
536 						"vm2vm [0|1|2]\n");
537 					us_vhost_usage(prgname);
538 					return -1;
539 				} else {
540 					vm2vm_mode = (vm2vm_type)ret;
541 				}
542 			}
543 
544 			/* Enable/disable retries on RX. */
545 			if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
546 				ret = parse_num_opt(optarg, 1);
547 				if (ret == -1) {
548 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
549 					us_vhost_usage(prgname);
550 					return -1;
551 				} else {
552 					enable_retry = ret;
553 				}
554 			}
555 
556 			/* Enable/disable TX checksum offload. */
557 			if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
558 				ret = parse_num_opt(optarg, 1);
559 				if (ret == -1) {
560 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
561 					us_vhost_usage(prgname);
562 					return -1;
563 				} else
564 					enable_tx_csum = ret;
565 			}
566 
567 			/* Enable/disable TSO offload. */
568 			if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
569 				ret = parse_num_opt(optarg, 1);
570 				if (ret == -1) {
571 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
572 					us_vhost_usage(prgname);
573 					return -1;
574 				} else
575 					enable_tso = ret;
576 			}
577 
578 			/* Specify the retries delay time (in useconds) on RX. */
579 			if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
580 				ret = parse_num_opt(optarg, INT32_MAX);
581 				if (ret == -1) {
582 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
583 					us_vhost_usage(prgname);
584 					return -1;
585 				} else {
586 					burst_rx_delay_time = ret;
587 				}
588 			}
589 
590 			/* Specify the retries number on RX. */
591 			if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
592 				ret = parse_num_opt(optarg, INT32_MAX);
593 				if (ret == -1) {
594 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
595 					us_vhost_usage(prgname);
596 					return -1;
597 				} else {
598 					burst_rx_retry_num = ret;
599 				}
600 			}
601 
602 			/* Enable/disable RX mergeable buffers. */
603 			if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
604 				ret = parse_num_opt(optarg, 1);
605 				if (ret == -1) {
606 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
607 					us_vhost_usage(prgname);
608 					return -1;
609 				} else {
610 					mergeable = !!ret;
611 					if (ret) {
612 						vmdq_conf_default.rxmode.jumbo_frame = 1;
613 						vmdq_conf_default.rxmode.max_rx_pkt_len
614 							= JUMBO_FRAME_MAX_SIZE;
615 					}
616 				}
617 			}
618 
619 			/* Enable/disable RX VLAN strip on host. */
620 			if (!strncmp(long_option[option_index].name,
621 				"vlan-strip", MAX_LONG_OPT_SZ)) {
622 				ret = parse_num_opt(optarg, 1);
623 				if (ret == -1) {
624 					RTE_LOG(INFO, VHOST_CONFIG,
625 						"Invalid argument for VLAN strip [0|1]\n");
626 					us_vhost_usage(prgname);
627 					return -1;
628 				} else {
629 					vlan_strip = !!ret;
630 					vmdq_conf_default.rxmode.hw_vlan_strip =
631 						vlan_strip;
632 				}
633 			}
634 
635 			/* Enable/disable stats. */
636 			if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
637 				ret = parse_num_opt(optarg, INT32_MAX);
638 				if (ret == -1) {
639 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n");
640 					us_vhost_usage(prgname);
641 					return -1;
642 				} else {
643 					enable_stats = ret;
644 				}
645 			}
646 
647 			/* Set character device basename. */
648 			if (!strncmp(long_option[option_index].name, "dev-basename", MAX_LONG_OPT_SZ)) {
649 				if (us_vhost_parse_basename(optarg) == -1) {
650 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for character device basename (Max %d characters)\n", MAX_BASENAME_SZ);
651 					us_vhost_usage(prgname);
652 					return -1;
653 				}
654 			}
655 
656 			break;
657 
658 			/* Invalid option - print options. */
659 		default:
660 			us_vhost_usage(prgname);
661 			return -1;
662 		}
663 	}
664 
665 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
666 		if (enabled_port_mask & (1 << i))
667 			ports[num_ports++] = (uint8_t)i;
668 	}
669 
670 	if ((num_ports ==  0) || (num_ports > MAX_SUP_PORTS)) {
671 		RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
672 			"but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
673 		return -1;
674 	}
675 
676 	return 0;
677 }
678 
679 /*
680  * Update the global var NUM_PORTS and array PORTS according to system ports number
681  * and return valid ports number
682  */
683 static unsigned check_ports_num(unsigned nb_ports)
684 {
685 	unsigned valid_num_ports = num_ports;
686 	unsigned portid;
687 
688 	if (num_ports > nb_ports) {
689 		RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
690 			num_ports, nb_ports);
691 		num_ports = nb_ports;
692 	}
693 
694 	for (portid = 0; portid < num_ports; portid ++) {
695 		if (ports[portid] >= nb_ports) {
696 			RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
697 				ports[portid], (nb_ports - 1));
698 			ports[portid] = INVALID_PORT_ID;
699 			valid_num_ports--;
700 		}
701 	}
702 	return valid_num_ports;
703 }
704 
705 static inline struct vhost_dev *__attribute__((always_inline))
706 find_vhost_dev(struct ether_addr *mac)
707 {
708 	struct vhost_dev *vdev;
709 
710 	TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
711 		if (vdev->ready == DEVICE_RX &&
712 		    is_same_ether_addr(mac, &vdev->mac_address))
713 			return vdev;
714 	}
715 
716 	return NULL;
717 }
718 
719 /*
720  * This function learns the MAC address of the device and registers this along with a
721  * vlan tag to a VMDQ.
722  */
723 static int
724 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
725 {
726 	struct ether_hdr *pkt_hdr;
727 	int i, ret;
728 
729 	/* Learn MAC address of guest device from packet */
730 	pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
731 
732 	if (find_vhost_dev(&pkt_hdr->s_addr)) {
733 		RTE_LOG(ERR, VHOST_DATA,
734 			"(%d) device is using a registered MAC!\n",
735 			vdev->vid);
736 		return -1;
737 	}
738 
739 	for (i = 0; i < ETHER_ADDR_LEN; i++)
740 		vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
741 
742 	/* vlan_tag currently uses the device_id. */
743 	vdev->vlan_tag = vlan_tags[vdev->vid];
744 
745 	/* Print out VMDQ registration info. */
746 	RTE_LOG(INFO, VHOST_DATA,
747 		"(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
748 		vdev->vid,
749 		vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
750 		vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
751 		vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
752 		vdev->vlan_tag);
753 
754 	/* Register the MAC address. */
755 	ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
756 				(uint32_t)vdev->vid + vmdq_pool_base);
757 	if (ret)
758 		RTE_LOG(ERR, VHOST_DATA,
759 			"(%d) failed to add device MAC address to VMDQ\n",
760 			vdev->vid);
761 
762 	/* Enable stripping of the vlan tag as we handle routing. */
763 	if (vlan_strip)
764 		rte_eth_dev_set_vlan_strip_on_queue(ports[0],
765 			(uint16_t)vdev->vmdq_rx_q, 1);
766 
767 	/* Set device as ready for RX. */
768 	vdev->ready = DEVICE_RX;
769 
770 	return 0;
771 }
772 
773 /*
774  * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
775  * queue before disabling RX on the device.
776  */
777 static inline void
778 unlink_vmdq(struct vhost_dev *vdev)
779 {
780 	unsigned i = 0;
781 	unsigned rx_count;
782 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
783 
784 	if (vdev->ready == DEVICE_RX) {
785 		/*clear MAC and VLAN settings*/
786 		rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
787 		for (i = 0; i < 6; i++)
788 			vdev->mac_address.addr_bytes[i] = 0;
789 
790 		vdev->vlan_tag = 0;
791 
792 		/*Clear out the receive buffers*/
793 		rx_count = rte_eth_rx_burst(ports[0],
794 					(uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
795 
796 		while (rx_count) {
797 			for (i = 0; i < rx_count; i++)
798 				rte_pktmbuf_free(pkts_burst[i]);
799 
800 			rx_count = rte_eth_rx_burst(ports[0],
801 					(uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
802 		}
803 
804 		vdev->ready = DEVICE_MAC_LEARNING;
805 	}
806 }
807 
808 static inline void __attribute__((always_inline))
809 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
810 	    struct rte_mbuf *m)
811 {
812 	uint16_t ret;
813 
814 	ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
815 	if (enable_stats) {
816 		rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
817 		rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
818 		src_vdev->stats.tx_total++;
819 		src_vdev->stats.tx += ret;
820 	}
821 }
822 
823 /*
824  * Check if the packet destination MAC address is for a local device. If so then put
825  * the packet on that devices RX queue. If not then return.
826  */
827 static inline int __attribute__((always_inline))
828 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
829 {
830 	struct ether_hdr *pkt_hdr;
831 	struct vhost_dev *dst_vdev;
832 
833 	pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
834 
835 	dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
836 	if (!dst_vdev)
837 		return -1;
838 
839 	if (vdev->vid == dst_vdev->vid) {
840 		RTE_LOG(DEBUG, VHOST_DATA,
841 			"(%d) TX: src and dst MAC is same. Dropping packet.\n",
842 			vdev->vid);
843 		return 0;
844 	}
845 
846 	RTE_LOG(DEBUG, VHOST_DATA,
847 		"(%d) TX: MAC address is local\n", dst_vdev->vid);
848 
849 	if (unlikely(dst_vdev->remove)) {
850 		RTE_LOG(DEBUG, VHOST_DATA,
851 			"(%d) device is marked for removal\n", dst_vdev->vid);
852 		return 0;
853 	}
854 
855 	virtio_xmit(dst_vdev, vdev, m);
856 	return 0;
857 }
858 
859 /*
860  * Check if the destination MAC of a packet is one local VM,
861  * and get its vlan tag, and offset if it is.
862  */
863 static inline int __attribute__((always_inline))
864 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
865 	uint32_t *offset, uint16_t *vlan_tag)
866 {
867 	struct vhost_dev *dst_vdev;
868 	struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
869 
870 	dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
871 	if (!dst_vdev)
872 		return 0;
873 
874 	if (vdev->vid == dst_vdev->vid) {
875 		RTE_LOG(DEBUG, VHOST_DATA,
876 			"(%d) TX: src and dst MAC is same. Dropping packet.\n",
877 			vdev->vid);
878 		return -1;
879 	}
880 
881 	/*
882 	 * HW vlan strip will reduce the packet length
883 	 * by minus length of vlan tag, so need restore
884 	 * the packet length by plus it.
885 	 */
886 	*offset  = VLAN_HLEN;
887 	*vlan_tag = vlan_tags[vdev->vid];
888 
889 	RTE_LOG(DEBUG, VHOST_DATA,
890 		"(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
891 		vdev->vid, dst_vdev->vid, *vlan_tag);
892 
893 	return 0;
894 }
895 
896 static uint16_t
897 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
898 {
899 	if (ol_flags & PKT_TX_IPV4)
900 		return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
901 	else /* assume ethertype == ETHER_TYPE_IPv6 */
902 		return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
903 }
904 
905 static void virtio_tx_offload(struct rte_mbuf *m)
906 {
907 	void *l3_hdr;
908 	struct ipv4_hdr *ipv4_hdr = NULL;
909 	struct tcp_hdr *tcp_hdr = NULL;
910 	struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
911 
912 	l3_hdr = (char *)eth_hdr + m->l2_len;
913 
914 	if (m->ol_flags & PKT_TX_IPV4) {
915 		ipv4_hdr = l3_hdr;
916 		ipv4_hdr->hdr_checksum = 0;
917 		m->ol_flags |= PKT_TX_IP_CKSUM;
918 	}
919 
920 	tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
921 	tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
922 }
923 
924 static inline void
925 free_pkts(struct rte_mbuf **pkts, uint16_t n)
926 {
927 	while (n--)
928 		rte_pktmbuf_free(pkts[n]);
929 }
930 
931 static inline void __attribute__((always_inline))
932 do_drain_mbuf_table(struct mbuf_table *tx_q)
933 {
934 	uint16_t count;
935 
936 	count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
937 				 tx_q->m_table, tx_q->len);
938 	if (unlikely(count < tx_q->len))
939 		free_pkts(&tx_q->m_table[count], tx_q->len - count);
940 
941 	tx_q->len = 0;
942 }
943 
944 /*
945  * This function routes the TX packet to the correct interface. This
946  * may be a local device or the physical port.
947  */
948 static inline void __attribute__((always_inline))
949 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
950 {
951 	struct mbuf_table *tx_q;
952 	unsigned offset = 0;
953 	const uint16_t lcore_id = rte_lcore_id();
954 	struct ether_hdr *nh;
955 
956 
957 	nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
958 	if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
959 		struct vhost_dev *vdev2;
960 
961 		TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
962 			virtio_xmit(vdev2, vdev, m);
963 		}
964 		goto queue2nic;
965 	}
966 
967 	/*check if destination is local VM*/
968 	if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
969 		rte_pktmbuf_free(m);
970 		return;
971 	}
972 
973 	if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
974 		if (unlikely(find_local_dest(vdev, m, &offset,
975 					     &vlan_tag) != 0)) {
976 			rte_pktmbuf_free(m);
977 			return;
978 		}
979 	}
980 
981 	RTE_LOG(DEBUG, VHOST_DATA,
982 		"(%d) TX: MAC address is external\n", vdev->vid);
983 
984 queue2nic:
985 
986 	/*Add packet to the port tx queue*/
987 	tx_q = &lcore_tx_queue[lcore_id];
988 
989 	nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
990 	if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
991 		/* Guest has inserted the vlan tag. */
992 		struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
993 		uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
994 		if ((vm2vm_mode == VM2VM_HARDWARE) &&
995 			(vh->vlan_tci != vlan_tag_be))
996 			vh->vlan_tci = vlan_tag_be;
997 	} else {
998 		m->ol_flags |= PKT_TX_VLAN_PKT;
999 
1000 		/*
1001 		 * Find the right seg to adjust the data len when offset is
1002 		 * bigger than tail room size.
1003 		 */
1004 		if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1005 			if (likely(offset <= rte_pktmbuf_tailroom(m)))
1006 				m->data_len += offset;
1007 			else {
1008 				struct rte_mbuf *seg = m;
1009 
1010 				while ((seg->next != NULL) &&
1011 					(offset > rte_pktmbuf_tailroom(seg)))
1012 					seg = seg->next;
1013 
1014 				seg->data_len += offset;
1015 			}
1016 			m->pkt_len += offset;
1017 		}
1018 
1019 		m->vlan_tci = vlan_tag;
1020 	}
1021 
1022 	if (m->ol_flags & PKT_TX_TCP_SEG)
1023 		virtio_tx_offload(m);
1024 
1025 	tx_q->m_table[tx_q->len++] = m;
1026 	if (enable_stats) {
1027 		vdev->stats.tx_total++;
1028 		vdev->stats.tx++;
1029 	}
1030 
1031 	if (unlikely(tx_q->len == MAX_PKT_BURST))
1032 		do_drain_mbuf_table(tx_q);
1033 }
1034 
1035 
1036 static inline void __attribute__((always_inline))
1037 drain_mbuf_table(struct mbuf_table *tx_q)
1038 {
1039 	static uint64_t prev_tsc;
1040 	uint64_t cur_tsc;
1041 
1042 	if (tx_q->len == 0)
1043 		return;
1044 
1045 	cur_tsc = rte_rdtsc();
1046 	if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1047 		prev_tsc = cur_tsc;
1048 
1049 		RTE_LOG(DEBUG, VHOST_DATA,
1050 			"TX queue drained after timeout with burst size %u\n",
1051 			tx_q->len);
1052 		do_drain_mbuf_table(tx_q);
1053 	}
1054 }
1055 
1056 static inline void __attribute__((always_inline))
1057 drain_eth_rx(struct vhost_dev *vdev)
1058 {
1059 	uint16_t rx_count, enqueue_count;
1060 	struct rte_mbuf *pkts[MAX_PKT_BURST];
1061 
1062 	rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1063 				    pkts, MAX_PKT_BURST);
1064 	if (!rx_count)
1065 		return;
1066 
1067 	/*
1068 	 * When "enable_retry" is set, here we wait and retry when there
1069 	 * is no enough free slots in the queue to hold @rx_count packets,
1070 	 * to diminish packet loss.
1071 	 */
1072 	if (enable_retry &&
1073 	    unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1074 			VIRTIO_RXQ))) {
1075 		uint32_t retry;
1076 
1077 		for (retry = 0; retry < burst_rx_retry_num; retry++) {
1078 			rte_delay_us(burst_rx_delay_time);
1079 			if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1080 					VIRTIO_RXQ))
1081 				break;
1082 		}
1083 	}
1084 
1085 	enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1086 						pkts, rx_count);
1087 	if (enable_stats) {
1088 		rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1089 		rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1090 	}
1091 
1092 	free_pkts(pkts, rx_count);
1093 }
1094 
1095 static inline void __attribute__((always_inline))
1096 drain_virtio_tx(struct vhost_dev *vdev)
1097 {
1098 	struct rte_mbuf *pkts[MAX_PKT_BURST];
1099 	uint16_t count;
1100 	uint16_t i;
1101 
1102 	count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, mbuf_pool,
1103 					pkts, MAX_PKT_BURST);
1104 
1105 	/* setup VMDq for the first packet */
1106 	if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1107 		if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1108 			free_pkts(pkts, count);
1109 	}
1110 
1111 	for (i = 0; i < count; ++i)
1112 		virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1113 }
1114 
1115 /*
1116  * Main function of vhost-switch. It basically does:
1117  *
1118  * for each vhost device {
1119  *    - drain_eth_rx()
1120  *
1121  *      Which drains the host eth Rx queue linked to the vhost device,
1122  *      and deliver all of them to guest virito Rx ring associated with
1123  *      this vhost device.
1124  *
1125  *    - drain_virtio_tx()
1126  *
1127  *      Which drains the guest virtio Tx queue and deliver all of them
1128  *      to the target, which could be another vhost device, or the
1129  *      physical eth dev. The route is done in function "virtio_tx_route".
1130  * }
1131  */
1132 static int
1133 switch_worker(void *arg __rte_unused)
1134 {
1135 	unsigned i;
1136 	unsigned lcore_id = rte_lcore_id();
1137 	struct vhost_dev *vdev;
1138 	struct mbuf_table *tx_q;
1139 
1140 	RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1141 
1142 	tx_q = &lcore_tx_queue[lcore_id];
1143 	for (i = 0; i < rte_lcore_count(); i++) {
1144 		if (lcore_ids[i] == lcore_id) {
1145 			tx_q->txq_id = i;
1146 			break;
1147 		}
1148 	}
1149 
1150 	while(1) {
1151 		drain_mbuf_table(tx_q);
1152 
1153 		/*
1154 		 * Inform the configuration core that we have exited the
1155 		 * linked list and that no devices are in use if requested.
1156 		 */
1157 		if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1158 			lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1159 
1160 		/*
1161 		 * Process vhost devices
1162 		 */
1163 		TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1164 			      lcore_vdev_entry) {
1165 			if (unlikely(vdev->remove)) {
1166 				unlink_vmdq(vdev);
1167 				vdev->ready = DEVICE_SAFE_REMOVE;
1168 				continue;
1169 			}
1170 
1171 			if (likely(vdev->ready == DEVICE_RX))
1172 				drain_eth_rx(vdev);
1173 
1174 			if (likely(!vdev->remove))
1175 				drain_virtio_tx(vdev);
1176 		}
1177 	}
1178 
1179 	return 0;
1180 }
1181 
1182 /*
1183  * Remove a device from the specific data core linked list and from the
1184  * main linked list. Synchonization  occurs through the use of the
1185  * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1186  * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1187  */
1188 static void
1189 destroy_device(int vid)
1190 {
1191 	struct vhost_dev *vdev = NULL;
1192 	int lcore;
1193 
1194 	TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1195 		if (vdev->vid == vid)
1196 			break;
1197 	}
1198 	if (!vdev)
1199 		return;
1200 	/*set the remove flag. */
1201 	vdev->remove = 1;
1202 	while(vdev->ready != DEVICE_SAFE_REMOVE) {
1203 		rte_pause();
1204 	}
1205 
1206 	TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1207 		     lcore_vdev_entry);
1208 	TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1209 
1210 
1211 	/* Set the dev_removal_flag on each lcore. */
1212 	RTE_LCORE_FOREACH_SLAVE(lcore)
1213 		lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1214 
1215 	/*
1216 	 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1217 	 * we can be sure that they can no longer access the device removed
1218 	 * from the linked lists and that the devices are no longer in use.
1219 	 */
1220 	RTE_LCORE_FOREACH_SLAVE(lcore) {
1221 		while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1222 			rte_pause();
1223 	}
1224 
1225 	lcore_info[vdev->coreid].device_num--;
1226 
1227 	RTE_LOG(INFO, VHOST_DATA,
1228 		"(%d) device has been removed from data core\n",
1229 		vdev->vid);
1230 
1231 	rte_free(vdev);
1232 }
1233 
1234 /*
1235  * A new device is added to a data core. First the device is added to the main linked list
1236  * and the allocated to a specific data core.
1237  */
1238 static int
1239 new_device(int vid)
1240 {
1241 	int lcore, core_add = 0;
1242 	uint32_t device_num_min = num_devices;
1243 	struct vhost_dev *vdev;
1244 
1245 	vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1246 	if (vdev == NULL) {
1247 		RTE_LOG(INFO, VHOST_DATA,
1248 			"(%d) couldn't allocate memory for vhost dev\n",
1249 			vid);
1250 		return -1;
1251 	}
1252 	vdev->vid = vid;
1253 
1254 	TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1255 	vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1256 
1257 	/*reset ready flag*/
1258 	vdev->ready = DEVICE_MAC_LEARNING;
1259 	vdev->remove = 0;
1260 
1261 	/* Find a suitable lcore to add the device. */
1262 	RTE_LCORE_FOREACH_SLAVE(lcore) {
1263 		if (lcore_info[lcore].device_num < device_num_min) {
1264 			device_num_min = lcore_info[lcore].device_num;
1265 			core_add = lcore;
1266 		}
1267 	}
1268 	vdev->coreid = core_add;
1269 
1270 	TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1271 			  lcore_vdev_entry);
1272 	lcore_info[vdev->coreid].device_num++;
1273 
1274 	/* Disable notifications. */
1275 	rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1276 	rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1277 
1278 	RTE_LOG(INFO, VHOST_DATA,
1279 		"(%d) device has been added to data core %d\n",
1280 		vid, vdev->coreid);
1281 
1282 	return 0;
1283 }
1284 
1285 /*
1286  * These callback allow devices to be added to the data core when configuration
1287  * has been fully complete.
1288  */
1289 static const struct virtio_net_device_ops virtio_net_device_ops =
1290 {
1291 	.new_device =  new_device,
1292 	.destroy_device = destroy_device,
1293 };
1294 
1295 /*
1296  * This is a thread will wake up after a period to print stats if the user has
1297  * enabled them.
1298  */
1299 static void
1300 print_stats(void)
1301 {
1302 	struct vhost_dev *vdev;
1303 	uint64_t tx_dropped, rx_dropped;
1304 	uint64_t tx, tx_total, rx, rx_total;
1305 	const char clr[] = { 27, '[', '2', 'J', '\0' };
1306 	const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1307 
1308 	while(1) {
1309 		sleep(enable_stats);
1310 
1311 		/* Clear screen and move to top left */
1312 		printf("%s%s\n", clr, top_left);
1313 		printf("Device statistics =================================\n");
1314 
1315 		TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1316 			tx_total   = vdev->stats.tx_total;
1317 			tx         = vdev->stats.tx;
1318 			tx_dropped = tx_total - tx;
1319 
1320 			rx_total   = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1321 			rx         = rte_atomic64_read(&vdev->stats.rx_atomic);
1322 			rx_dropped = rx_total - rx;
1323 
1324 			printf("Statistics for device %d\n"
1325 				"-----------------------\n"
1326 				"TX total:              %" PRIu64 "\n"
1327 				"TX dropped:            %" PRIu64 "\n"
1328 				"TX successful:         %" PRIu64 "\n"
1329 				"RX total:              %" PRIu64 "\n"
1330 				"RX dropped:            %" PRIu64 "\n"
1331 				"RX successful:         %" PRIu64 "\n",
1332 				vdev->vid,
1333 				tx_total, tx_dropped, tx,
1334 				rx_total, rx_dropped, rx);
1335 		}
1336 
1337 		printf("===================================================\n");
1338 	}
1339 }
1340 
1341 /* When we receive a INT signal, unregister vhost driver */
1342 static void
1343 sigint_handler(__rte_unused int signum)
1344 {
1345 	/* Unregister vhost driver. */
1346 	int ret = rte_vhost_driver_unregister((char *)&dev_basename);
1347 	if (ret != 0)
1348 		rte_exit(EXIT_FAILURE, "vhost driver unregister failure.\n");
1349 	exit(0);
1350 }
1351 
1352 /*
1353  * While creating an mbuf pool, one key thing is to figure out how
1354  * many mbuf entries is enough for our use. FYI, here are some
1355  * guidelines:
1356  *
1357  * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1358  *
1359  * - For each switch core (A CPU core does the packet switch), we need
1360  *   also make some reservation for receiving the packets from virtio
1361  *   Tx queue. How many is enough depends on the usage. It's normally
1362  *   a simple calculation like following:
1363  *
1364  *       MAX_PKT_BURST * max packet size / mbuf size
1365  *
1366  *   So, we definitely need allocate more mbufs when TSO is enabled.
1367  *
1368  * - Similarly, for each switching core, we should serve @nr_rx_desc
1369  *   mbufs for receiving the packets from physical NIC device.
1370  *
1371  * - We also need make sure, for each switch core, we have allocated
1372  *   enough mbufs to fill up the mbuf cache.
1373  */
1374 static void
1375 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1376 	uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1377 {
1378 	uint32_t nr_mbufs;
1379 	uint32_t nr_mbufs_per_core;
1380 	uint32_t mtu = 1500;
1381 
1382 	if (mergeable)
1383 		mtu = 9000;
1384 	if (enable_tso)
1385 		mtu = 64 * 1024;
1386 
1387 	nr_mbufs_per_core  = (mtu + mbuf_size) * MAX_PKT_BURST /
1388 			(mbuf_size - RTE_PKTMBUF_HEADROOM) * MAX_PKT_BURST;
1389 	nr_mbufs_per_core += nr_rx_desc;
1390 	nr_mbufs_per_core  = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1391 
1392 	nr_mbufs  = nr_queues * nr_rx_desc;
1393 	nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1394 	nr_mbufs *= nr_port;
1395 
1396 	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1397 					    nr_mbuf_cache, 0, mbuf_size,
1398 					    rte_socket_id());
1399 	if (mbuf_pool == NULL)
1400 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1401 }
1402 
1403 /*
1404  * Main function, does initialisation and calls the per-lcore functions. The CUSE
1405  * device is also registered here to handle the IOCTLs.
1406  */
1407 int
1408 main(int argc, char *argv[])
1409 {
1410 	unsigned lcore_id, core_id = 0;
1411 	unsigned nb_ports, valid_num_ports;
1412 	int ret;
1413 	uint8_t portid;
1414 	static pthread_t tid;
1415 	char thread_name[RTE_MAX_THREAD_NAME_LEN];
1416 	uint64_t flags = 0;
1417 
1418 	signal(SIGINT, sigint_handler);
1419 
1420 	/* init EAL */
1421 	ret = rte_eal_init(argc, argv);
1422 	if (ret < 0)
1423 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1424 	argc -= ret;
1425 	argv += ret;
1426 
1427 	/* parse app arguments */
1428 	ret = us_vhost_parse_args(argc, argv);
1429 	if (ret < 0)
1430 		rte_exit(EXIT_FAILURE, "Invalid argument\n");
1431 
1432 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
1433 		TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1434 
1435 		if (rte_lcore_is_enabled(lcore_id))
1436 			lcore_ids[core_id ++] = lcore_id;
1437 
1438 	if (rte_lcore_count() > RTE_MAX_LCORE)
1439 		rte_exit(EXIT_FAILURE,"Not enough cores\n");
1440 
1441 	/* Get the number of physical ports. */
1442 	nb_ports = rte_eth_dev_count();
1443 
1444 	/*
1445 	 * Update the global var NUM_PORTS and global array PORTS
1446 	 * and get value of var VALID_NUM_PORTS according to system ports number
1447 	 */
1448 	valid_num_ports = check_ports_num(nb_ports);
1449 
1450 	if ((valid_num_ports ==  0) || (valid_num_ports > MAX_SUP_PORTS)) {
1451 		RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1452 			"but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1453 		return -1;
1454 	}
1455 
1456 	/*
1457 	 * FIXME: here we are trying to allocate mbufs big enough for
1458 	 * @MAX_QUEUES, but the truth is we're never going to use that
1459 	 * many queues here. We probably should only do allocation for
1460 	 * those queues we are going to use.
1461 	 */
1462 	create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1463 			 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1464 
1465 	if (vm2vm_mode == VM2VM_HARDWARE) {
1466 		/* Enable VT loop back to let L2 switch to do it. */
1467 		vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1468 		RTE_LOG(DEBUG, VHOST_CONFIG,
1469 			"Enable loop back for L2 switch in vmdq.\n");
1470 	}
1471 
1472 	/* initialize all ports */
1473 	for (portid = 0; portid < nb_ports; portid++) {
1474 		/* skip ports that are not enabled */
1475 		if ((enabled_port_mask & (1 << portid)) == 0) {
1476 			RTE_LOG(INFO, VHOST_PORT,
1477 				"Skipping disabled port %d\n", portid);
1478 			continue;
1479 		}
1480 		if (port_init(portid) != 0)
1481 			rte_exit(EXIT_FAILURE,
1482 				"Cannot initialize network ports\n");
1483 	}
1484 
1485 	/* Enable stats if the user option is set. */
1486 	if (enable_stats) {
1487 		ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
1488 		if (ret != 0)
1489 			rte_exit(EXIT_FAILURE,
1490 				"Cannot create print-stats thread\n");
1491 
1492 		/* Set thread_name for aid in debugging.  */
1493 		snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
1494 		ret = rte_thread_setname(tid, thread_name);
1495 		if (ret != 0)
1496 			RTE_LOG(DEBUG, VHOST_CONFIG,
1497 				"Cannot set print-stats name\n");
1498 	}
1499 
1500 	/* Launch all data cores. */
1501 	RTE_LCORE_FOREACH_SLAVE(lcore_id)
1502 		rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1503 
1504 	if (mergeable == 0)
1505 		rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
1506 
1507 	if (client_mode)
1508 		flags |= RTE_VHOST_USER_CLIENT;
1509 
1510 	/* Register vhost(cuse or user) driver to handle vhost messages. */
1511 	ret = rte_vhost_driver_register(dev_basename, flags);
1512 	if (ret != 0)
1513 		rte_exit(EXIT_FAILURE, "vhost driver register failure.\n");
1514 
1515 	rte_vhost_driver_callback_register(&virtio_net_device_ops);
1516 
1517 	/* Start CUSE session. */
1518 	rte_vhost_driver_session_start();
1519 	return 0;
1520 
1521 }
1522