xref: /dpdk/examples/vhost/main.c (revision 10b71caecbe1cddcbb65c050ca775fba575e88db)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <arpa/inet.h>
6 #include <getopt.h>
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_ring.h>
11 #include <signal.h>
12 #include <stdint.h>
13 #include <sys/eventfd.h>
14 #include <sys/param.h>
15 #include <unistd.h>
16 
17 #include <rte_atomic.h>
18 #include <rte_cycles.h>
19 #include <rte_ethdev.h>
20 #include <rte_log.h>
21 #include <rte_string_fns.h>
22 #include <rte_malloc.h>
23 #include <rte_vhost.h>
24 #include <rte_ip.h>
25 #include <rte_tcp.h>
26 #include <rte_pause.h>
27 
28 #include "main.h"
29 
30 #ifndef MAX_QUEUES
31 #define MAX_QUEUES 128
32 #endif
33 
34 /* the maximum number of external ports supported */
35 #define MAX_SUP_PORTS 1
36 
37 #define MBUF_CACHE_SIZE	128
38 #define MBUF_DATA_SIZE	RTE_MBUF_DEFAULT_BUF_SIZE
39 
40 #define BURST_TX_DRAIN_US 100	/* TX drain every ~100us */
41 
42 #define BURST_RX_WAIT_US 15	/* Defines how long we wait between retries on RX */
43 #define BURST_RX_RETRIES 4		/* Number of retries on RX. */
44 
45 #define JUMBO_FRAME_MAX_SIZE    0x2600
46 
47 /* State of virtio device. */
48 #define DEVICE_MAC_LEARNING 0
49 #define DEVICE_RX			1
50 #define DEVICE_SAFE_REMOVE	2
51 
52 /* Configurable number of RX/TX ring descriptors */
53 #define RTE_TEST_RX_DESC_DEFAULT 1024
54 #define RTE_TEST_TX_DESC_DEFAULT 512
55 
56 #define INVALID_PORT_ID 0xFF
57 
58 /* Maximum long option length for option parsing. */
59 #define MAX_LONG_OPT_SZ 64
60 
61 /* mask of enabled ports */
62 static uint32_t enabled_port_mask = 0;
63 
64 /* Promiscuous mode */
65 static uint32_t promiscuous;
66 
67 /* number of devices/queues to support*/
68 static uint32_t num_queues = 0;
69 static uint32_t num_devices;
70 
71 static struct rte_mempool *mbuf_pool;
72 static int mergeable;
73 
74 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
75 typedef enum {
76 	VM2VM_DISABLED = 0,
77 	VM2VM_SOFTWARE = 1,
78 	VM2VM_HARDWARE = 2,
79 	VM2VM_LAST
80 } vm2vm_type;
81 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
82 
83 /* Enable stats. */
84 static uint32_t enable_stats = 0;
85 /* Enable retries on RX. */
86 static uint32_t enable_retry = 1;
87 
88 /* Disable TX checksum offload */
89 static uint32_t enable_tx_csum;
90 
91 /* Disable TSO offload */
92 static uint32_t enable_tso;
93 
94 static int client_mode;
95 static int dequeue_zero_copy;
96 
97 static int builtin_net_driver;
98 
99 /* Specify timeout (in useconds) between retries on RX. */
100 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
101 /* Specify the number of retries on RX. */
102 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
103 
104 /* Socket file paths. Can be set by user */
105 static char *socket_files;
106 static int nb_sockets;
107 
108 /* empty vmdq configuration structure. Filled in programatically */
109 static struct rte_eth_conf vmdq_conf_default = {
110 	.rxmode = {
111 		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
112 		.split_hdr_size = 0,
113 		/*
114 		 * VLAN strip is necessary for 1G NIC such as I350,
115 		 * this fixes bug of ipv4 forwarding in guest can't
116 		 * forward pakets from one virtio dev to another virtio dev.
117 		 */
118 		.offloads = DEV_RX_OFFLOAD_VLAN_STRIP,
119 	},
120 
121 	.txmode = {
122 		.mq_mode = ETH_MQ_TX_NONE,
123 		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
124 			     DEV_TX_OFFLOAD_TCP_CKSUM |
125 			     DEV_TX_OFFLOAD_VLAN_INSERT |
126 			     DEV_TX_OFFLOAD_MULTI_SEGS |
127 			     DEV_TX_OFFLOAD_TCP_TSO),
128 	},
129 	.rx_adv_conf = {
130 		/*
131 		 * should be overridden separately in code with
132 		 * appropriate values
133 		 */
134 		.vmdq_rx_conf = {
135 			.nb_queue_pools = ETH_8_POOLS,
136 			.enable_default_pool = 0,
137 			.default_pool = 0,
138 			.nb_pool_maps = 0,
139 			.pool_map = {{0, 0},},
140 		},
141 	},
142 };
143 
144 
145 static unsigned lcore_ids[RTE_MAX_LCORE];
146 static uint16_t ports[RTE_MAX_ETHPORTS];
147 static unsigned num_ports = 0; /**< The number of ports specified in command line */
148 static uint16_t num_pf_queues, num_vmdq_queues;
149 static uint16_t vmdq_pool_base, vmdq_queue_base;
150 static uint16_t queues_per_pool;
151 
152 const uint16_t vlan_tags[] = {
153 	1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
154 	1008, 1009, 1010, 1011,	1012, 1013, 1014, 1015,
155 	1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
156 	1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
157 	1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
158 	1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
159 	1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
160 	1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
161 };
162 
163 /* ethernet addresses of ports */
164 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
165 
166 static struct vhost_dev_tailq_list vhost_dev_list =
167 	TAILQ_HEAD_INITIALIZER(vhost_dev_list);
168 
169 static struct lcore_info lcore_info[RTE_MAX_LCORE];
170 
171 /* Used for queueing bursts of TX packets. */
172 struct mbuf_table {
173 	unsigned len;
174 	unsigned txq_id;
175 	struct rte_mbuf *m_table[MAX_PKT_BURST];
176 };
177 
178 /* TX queue for each data core. */
179 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
180 
181 #define MBUF_TABLE_DRAIN_TSC	((rte_get_tsc_hz() + US_PER_S - 1) \
182 				 / US_PER_S * BURST_TX_DRAIN_US)
183 #define VLAN_HLEN       4
184 
185 /*
186  * Builds up the correct configuration for VMDQ VLAN pool map
187  * according to the pool & queue limits.
188  */
189 static inline int
190 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
191 {
192 	struct rte_eth_vmdq_rx_conf conf;
193 	struct rte_eth_vmdq_rx_conf *def_conf =
194 		&vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
195 	unsigned i;
196 
197 	memset(&conf, 0, sizeof(conf));
198 	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
199 	conf.nb_pool_maps = num_devices;
200 	conf.enable_loop_back = def_conf->enable_loop_back;
201 	conf.rx_mode = def_conf->rx_mode;
202 
203 	for (i = 0; i < conf.nb_pool_maps; i++) {
204 		conf.pool_map[i].vlan_id = vlan_tags[ i ];
205 		conf.pool_map[i].pools = (1UL << i);
206 	}
207 
208 	(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
209 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
210 		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
211 	return 0;
212 }
213 
214 /*
215  * Initialises a given port using global settings and with the rx buffers
216  * coming from the mbuf_pool passed as parameter
217  */
218 static inline int
219 port_init(uint16_t port)
220 {
221 	struct rte_eth_dev_info dev_info;
222 	struct rte_eth_conf port_conf;
223 	struct rte_eth_rxconf *rxconf;
224 	struct rte_eth_txconf *txconf;
225 	int16_t rx_rings, tx_rings;
226 	uint16_t rx_ring_size, tx_ring_size;
227 	int retval;
228 	uint16_t q;
229 
230 	/* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
231 	retval = rte_eth_dev_info_get(port, &dev_info);
232 	if (retval != 0) {
233 		RTE_LOG(ERR, VHOST_PORT,
234 			"Error during getting device (port %u) info: %s\n",
235 			port, strerror(-retval));
236 
237 		return retval;
238 	}
239 
240 	rxconf = &dev_info.default_rxconf;
241 	txconf = &dev_info.default_txconf;
242 	rxconf->rx_drop_en = 1;
243 
244 	/*configure the number of supported virtio devices based on VMDQ limits */
245 	num_devices = dev_info.max_vmdq_pools;
246 
247 	rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
248 	tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
249 
250 	/*
251 	 * When dequeue zero copy is enabled, guest Tx used vring will be
252 	 * updated only when corresponding mbuf is freed. Thus, the nb_tx_desc
253 	 * (tx_ring_size here) must be small enough so that the driver will
254 	 * hit the free threshold easily and free mbufs timely. Otherwise,
255 	 * guest Tx vring would be starved.
256 	 */
257 	if (dequeue_zero_copy)
258 		tx_ring_size = 64;
259 
260 	tx_rings = (uint16_t)rte_lcore_count();
261 
262 	/* Get port configuration. */
263 	retval = get_eth_conf(&port_conf, num_devices);
264 	if (retval < 0)
265 		return retval;
266 	/* NIC queues are divided into pf queues and vmdq queues.  */
267 	num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
268 	queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
269 	num_vmdq_queues = num_devices * queues_per_pool;
270 	num_queues = num_pf_queues + num_vmdq_queues;
271 	vmdq_queue_base = dev_info.vmdq_queue_base;
272 	vmdq_pool_base  = dev_info.vmdq_pool_base;
273 	printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
274 		num_pf_queues, num_devices, queues_per_pool);
275 
276 	if (!rte_eth_dev_is_valid_port(port))
277 		return -1;
278 
279 	rx_rings = (uint16_t)dev_info.max_rx_queues;
280 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
281 		port_conf.txmode.offloads |=
282 			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
283 	/* Configure ethernet device. */
284 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
285 	if (retval != 0) {
286 		RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
287 			port, strerror(-retval));
288 		return retval;
289 	}
290 
291 	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
292 		&tx_ring_size);
293 	if (retval != 0) {
294 		RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
295 			"for port %u: %s.\n", port, strerror(-retval));
296 		return retval;
297 	}
298 	if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
299 		RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
300 			"for Rx queues on port %u.\n", port);
301 		return -1;
302 	}
303 
304 	/* Setup the queues. */
305 	rxconf->offloads = port_conf.rxmode.offloads;
306 	for (q = 0; q < rx_rings; q ++) {
307 		retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
308 						rte_eth_dev_socket_id(port),
309 						rxconf,
310 						mbuf_pool);
311 		if (retval < 0) {
312 			RTE_LOG(ERR, VHOST_PORT,
313 				"Failed to setup rx queue %u of port %u: %s.\n",
314 				q, port, strerror(-retval));
315 			return retval;
316 		}
317 	}
318 	txconf->offloads = port_conf.txmode.offloads;
319 	for (q = 0; q < tx_rings; q ++) {
320 		retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
321 						rte_eth_dev_socket_id(port),
322 						txconf);
323 		if (retval < 0) {
324 			RTE_LOG(ERR, VHOST_PORT,
325 				"Failed to setup tx queue %u of port %u: %s.\n",
326 				q, port, strerror(-retval));
327 			return retval;
328 		}
329 	}
330 
331 	/* Start the device. */
332 	retval  = rte_eth_dev_start(port);
333 	if (retval < 0) {
334 		RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
335 			port, strerror(-retval));
336 		return retval;
337 	}
338 
339 	if (promiscuous) {
340 		retval = rte_eth_promiscuous_enable(port);
341 		if (retval != 0) {
342 			RTE_LOG(ERR, VHOST_PORT,
343 				"Failed to enable promiscuous mode on port %u: %s\n",
344 				port, rte_strerror(-retval));
345 			return retval;
346 		}
347 	}
348 
349 	retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
350 	if (retval < 0) {
351 		RTE_LOG(ERR, VHOST_PORT,
352 			"Failed to get MAC address on port %u: %s\n",
353 			port, rte_strerror(-retval));
354 		return retval;
355 	}
356 
357 	RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
358 	RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
359 			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
360 			port,
361 			vmdq_ports_eth_addr[port].addr_bytes[0],
362 			vmdq_ports_eth_addr[port].addr_bytes[1],
363 			vmdq_ports_eth_addr[port].addr_bytes[2],
364 			vmdq_ports_eth_addr[port].addr_bytes[3],
365 			vmdq_ports_eth_addr[port].addr_bytes[4],
366 			vmdq_ports_eth_addr[port].addr_bytes[5]);
367 
368 	return 0;
369 }
370 
371 /*
372  * Set socket file path.
373  */
374 static int
375 us_vhost_parse_socket_path(const char *q_arg)
376 {
377 	char *old;
378 
379 	/* parse number string */
380 	if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
381 		return -1;
382 
383 	old = socket_files;
384 	socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
385 	if (socket_files == NULL) {
386 		free(old);
387 		return -1;
388 	}
389 
390 	strlcpy(socket_files + nb_sockets * PATH_MAX, q_arg, PATH_MAX);
391 	nb_sockets++;
392 
393 	return 0;
394 }
395 
396 /*
397  * Parse the portmask provided at run time.
398  */
399 static int
400 parse_portmask(const char *portmask)
401 {
402 	char *end = NULL;
403 	unsigned long pm;
404 
405 	errno = 0;
406 
407 	/* parse hexadecimal string */
408 	pm = strtoul(portmask, &end, 16);
409 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
410 		return 0;
411 
412 	return pm;
413 
414 }
415 
416 /*
417  * Parse num options at run time.
418  */
419 static int
420 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
421 {
422 	char *end = NULL;
423 	unsigned long num;
424 
425 	errno = 0;
426 
427 	/* parse unsigned int string */
428 	num = strtoul(q_arg, &end, 10);
429 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
430 		return -1;
431 
432 	if (num > max_valid_value)
433 		return -1;
434 
435 	return num;
436 
437 }
438 
439 /*
440  * Display usage
441  */
442 static void
443 us_vhost_usage(const char *prgname)
444 {
445 	RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
446 	"		--vm2vm [0|1|2]\n"
447 	"		--rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
448 	"		--socket-file <path>\n"
449 	"		--nb-devices ND\n"
450 	"		-p PORTMASK: Set mask for ports to be used by application\n"
451 	"		--vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
452 	"		--rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
453 	"		--rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
454 	"		--rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
455 	"		--mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
456 	"		--stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
457 	"		--socket-file: The path of the socket file.\n"
458 	"		--tx-csum [0|1] disable/enable TX checksum offload.\n"
459 	"		--tso [0|1] disable/enable TCP segment offload.\n"
460 	"		--client register a vhost-user socket as client mode.\n"
461 	"		--dequeue-zero-copy enables dequeue zero copy\n",
462 	       prgname);
463 }
464 
465 /*
466  * Parse the arguments given in the command line of the application.
467  */
468 static int
469 us_vhost_parse_args(int argc, char **argv)
470 {
471 	int opt, ret;
472 	int option_index;
473 	unsigned i;
474 	const char *prgname = argv[0];
475 	static struct option long_option[] = {
476 		{"vm2vm", required_argument, NULL, 0},
477 		{"rx-retry", required_argument, NULL, 0},
478 		{"rx-retry-delay", required_argument, NULL, 0},
479 		{"rx-retry-num", required_argument, NULL, 0},
480 		{"mergeable", required_argument, NULL, 0},
481 		{"stats", required_argument, NULL, 0},
482 		{"socket-file", required_argument, NULL, 0},
483 		{"tx-csum", required_argument, NULL, 0},
484 		{"tso", required_argument, NULL, 0},
485 		{"client", no_argument, &client_mode, 1},
486 		{"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
487 		{"builtin-net-driver", no_argument, &builtin_net_driver, 1},
488 		{NULL, 0, 0, 0},
489 	};
490 
491 	/* Parse command line */
492 	while ((opt = getopt_long(argc, argv, "p:P",
493 			long_option, &option_index)) != EOF) {
494 		switch (opt) {
495 		/* Portmask */
496 		case 'p':
497 			enabled_port_mask = parse_portmask(optarg);
498 			if (enabled_port_mask == 0) {
499 				RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
500 				us_vhost_usage(prgname);
501 				return -1;
502 			}
503 			break;
504 
505 		case 'P':
506 			promiscuous = 1;
507 			vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
508 				ETH_VMDQ_ACCEPT_BROADCAST |
509 				ETH_VMDQ_ACCEPT_MULTICAST;
510 
511 			break;
512 
513 		case 0:
514 			/* Enable/disable vm2vm comms. */
515 			if (!strncmp(long_option[option_index].name, "vm2vm",
516 				MAX_LONG_OPT_SZ)) {
517 				ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
518 				if (ret == -1) {
519 					RTE_LOG(INFO, VHOST_CONFIG,
520 						"Invalid argument for "
521 						"vm2vm [0|1|2]\n");
522 					us_vhost_usage(prgname);
523 					return -1;
524 				} else {
525 					vm2vm_mode = (vm2vm_type)ret;
526 				}
527 			}
528 
529 			/* Enable/disable retries on RX. */
530 			if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
531 				ret = parse_num_opt(optarg, 1);
532 				if (ret == -1) {
533 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
534 					us_vhost_usage(prgname);
535 					return -1;
536 				} else {
537 					enable_retry = ret;
538 				}
539 			}
540 
541 			/* Enable/disable TX checksum offload. */
542 			if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
543 				ret = parse_num_opt(optarg, 1);
544 				if (ret == -1) {
545 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
546 					us_vhost_usage(prgname);
547 					return -1;
548 				} else
549 					enable_tx_csum = ret;
550 			}
551 
552 			/* Enable/disable TSO offload. */
553 			if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
554 				ret = parse_num_opt(optarg, 1);
555 				if (ret == -1) {
556 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
557 					us_vhost_usage(prgname);
558 					return -1;
559 				} else
560 					enable_tso = ret;
561 			}
562 
563 			/* Specify the retries delay time (in useconds) on RX. */
564 			if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
565 				ret = parse_num_opt(optarg, INT32_MAX);
566 				if (ret == -1) {
567 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
568 					us_vhost_usage(prgname);
569 					return -1;
570 				} else {
571 					burst_rx_delay_time = ret;
572 				}
573 			}
574 
575 			/* Specify the retries number on RX. */
576 			if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
577 				ret = parse_num_opt(optarg, INT32_MAX);
578 				if (ret == -1) {
579 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
580 					us_vhost_usage(prgname);
581 					return -1;
582 				} else {
583 					burst_rx_retry_num = ret;
584 				}
585 			}
586 
587 			/* Enable/disable RX mergeable buffers. */
588 			if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
589 				ret = parse_num_opt(optarg, 1);
590 				if (ret == -1) {
591 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
592 					us_vhost_usage(prgname);
593 					return -1;
594 				} else {
595 					mergeable = !!ret;
596 					if (ret) {
597 						vmdq_conf_default.rxmode.offloads |=
598 							DEV_RX_OFFLOAD_JUMBO_FRAME;
599 						vmdq_conf_default.rxmode.max_rx_pkt_len
600 							= JUMBO_FRAME_MAX_SIZE;
601 					}
602 				}
603 			}
604 
605 			/* Enable/disable stats. */
606 			if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
607 				ret = parse_num_opt(optarg, INT32_MAX);
608 				if (ret == -1) {
609 					RTE_LOG(INFO, VHOST_CONFIG,
610 						"Invalid argument for stats [0..N]\n");
611 					us_vhost_usage(prgname);
612 					return -1;
613 				} else {
614 					enable_stats = ret;
615 				}
616 			}
617 
618 			/* Set socket file path. */
619 			if (!strncmp(long_option[option_index].name,
620 						"socket-file", MAX_LONG_OPT_SZ)) {
621 				if (us_vhost_parse_socket_path(optarg) == -1) {
622 					RTE_LOG(INFO, VHOST_CONFIG,
623 					"Invalid argument for socket name (Max %d characters)\n",
624 					PATH_MAX);
625 					us_vhost_usage(prgname);
626 					return -1;
627 				}
628 			}
629 
630 			break;
631 
632 			/* Invalid option - print options. */
633 		default:
634 			us_vhost_usage(prgname);
635 			return -1;
636 		}
637 	}
638 
639 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
640 		if (enabled_port_mask & (1 << i))
641 			ports[num_ports++] = i;
642 	}
643 
644 	if ((num_ports ==  0) || (num_ports > MAX_SUP_PORTS)) {
645 		RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
646 			"but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
647 		return -1;
648 	}
649 
650 	return 0;
651 }
652 
653 /*
654  * Update the global var NUM_PORTS and array PORTS according to system ports number
655  * and return valid ports number
656  */
657 static unsigned check_ports_num(unsigned nb_ports)
658 {
659 	unsigned valid_num_ports = num_ports;
660 	unsigned portid;
661 
662 	if (num_ports > nb_ports) {
663 		RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
664 			num_ports, nb_ports);
665 		num_ports = nb_ports;
666 	}
667 
668 	for (portid = 0; portid < num_ports; portid ++) {
669 		if (!rte_eth_dev_is_valid_port(ports[portid])) {
670 			RTE_LOG(INFO, VHOST_PORT,
671 				"\nSpecified port ID(%u) is not valid\n",
672 				ports[portid]);
673 			ports[portid] = INVALID_PORT_ID;
674 			valid_num_ports--;
675 		}
676 	}
677 	return valid_num_ports;
678 }
679 
680 static __rte_always_inline struct vhost_dev *
681 find_vhost_dev(struct rte_ether_addr *mac)
682 {
683 	struct vhost_dev *vdev;
684 
685 	TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
686 		if (vdev->ready == DEVICE_RX &&
687 		    rte_is_same_ether_addr(mac, &vdev->mac_address))
688 			return vdev;
689 	}
690 
691 	return NULL;
692 }
693 
694 /*
695  * This function learns the MAC address of the device and registers this along with a
696  * vlan tag to a VMDQ.
697  */
698 static int
699 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
700 {
701 	struct rte_ether_hdr *pkt_hdr;
702 	int i, ret;
703 
704 	/* Learn MAC address of guest device from packet */
705 	pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
706 
707 	if (find_vhost_dev(&pkt_hdr->s_addr)) {
708 		RTE_LOG(ERR, VHOST_DATA,
709 			"(%d) device is using a registered MAC!\n",
710 			vdev->vid);
711 		return -1;
712 	}
713 
714 	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
715 		vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
716 
717 	/* vlan_tag currently uses the device_id. */
718 	vdev->vlan_tag = vlan_tags[vdev->vid];
719 
720 	/* Print out VMDQ registration info. */
721 	RTE_LOG(INFO, VHOST_DATA,
722 		"(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
723 		vdev->vid,
724 		vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
725 		vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
726 		vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
727 		vdev->vlan_tag);
728 
729 	/* Register the MAC address. */
730 	ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
731 				(uint32_t)vdev->vid + vmdq_pool_base);
732 	if (ret)
733 		RTE_LOG(ERR, VHOST_DATA,
734 			"(%d) failed to add device MAC address to VMDQ\n",
735 			vdev->vid);
736 
737 	rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
738 
739 	/* Set device as ready for RX. */
740 	vdev->ready = DEVICE_RX;
741 
742 	return 0;
743 }
744 
745 /*
746  * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
747  * queue before disabling RX on the device.
748  */
749 static inline void
750 unlink_vmdq(struct vhost_dev *vdev)
751 {
752 	unsigned i = 0;
753 	unsigned rx_count;
754 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
755 
756 	if (vdev->ready == DEVICE_RX) {
757 		/*clear MAC and VLAN settings*/
758 		rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
759 		for (i = 0; i < 6; i++)
760 			vdev->mac_address.addr_bytes[i] = 0;
761 
762 		vdev->vlan_tag = 0;
763 
764 		/*Clear out the receive buffers*/
765 		rx_count = rte_eth_rx_burst(ports[0],
766 					(uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
767 
768 		while (rx_count) {
769 			for (i = 0; i < rx_count; i++)
770 				rte_pktmbuf_free(pkts_burst[i]);
771 
772 			rx_count = rte_eth_rx_burst(ports[0],
773 					(uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
774 		}
775 
776 		vdev->ready = DEVICE_MAC_LEARNING;
777 	}
778 }
779 
780 static __rte_always_inline void
781 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
782 	    struct rte_mbuf *m)
783 {
784 	uint16_t ret;
785 
786 	if (builtin_net_driver) {
787 		ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
788 	} else {
789 		ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
790 	}
791 
792 	if (enable_stats) {
793 		rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
794 		rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
795 		src_vdev->stats.tx_total++;
796 		src_vdev->stats.tx += ret;
797 	}
798 }
799 
800 /*
801  * Check if the packet destination MAC address is for a local device. If so then put
802  * the packet on that devices RX queue. If not then return.
803  */
804 static __rte_always_inline int
805 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
806 {
807 	struct rte_ether_hdr *pkt_hdr;
808 	struct vhost_dev *dst_vdev;
809 
810 	pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
811 
812 	dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
813 	if (!dst_vdev)
814 		return -1;
815 
816 	if (vdev->vid == dst_vdev->vid) {
817 		RTE_LOG_DP(DEBUG, VHOST_DATA,
818 			"(%d) TX: src and dst MAC is same. Dropping packet.\n",
819 			vdev->vid);
820 		return 0;
821 	}
822 
823 	RTE_LOG_DP(DEBUG, VHOST_DATA,
824 		"(%d) TX: MAC address is local\n", dst_vdev->vid);
825 
826 	if (unlikely(dst_vdev->remove)) {
827 		RTE_LOG_DP(DEBUG, VHOST_DATA,
828 			"(%d) device is marked for removal\n", dst_vdev->vid);
829 		return 0;
830 	}
831 
832 	virtio_xmit(dst_vdev, vdev, m);
833 	return 0;
834 }
835 
836 /*
837  * Check if the destination MAC of a packet is one local VM,
838  * and get its vlan tag, and offset if it is.
839  */
840 static __rte_always_inline int
841 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
842 	uint32_t *offset, uint16_t *vlan_tag)
843 {
844 	struct vhost_dev *dst_vdev;
845 	struct rte_ether_hdr *pkt_hdr =
846 		rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
847 
848 	dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
849 	if (!dst_vdev)
850 		return 0;
851 
852 	if (vdev->vid == dst_vdev->vid) {
853 		RTE_LOG_DP(DEBUG, VHOST_DATA,
854 			"(%d) TX: src and dst MAC is same. Dropping packet.\n",
855 			vdev->vid);
856 		return -1;
857 	}
858 
859 	/*
860 	 * HW vlan strip will reduce the packet length
861 	 * by minus length of vlan tag, so need restore
862 	 * the packet length by plus it.
863 	 */
864 	*offset  = VLAN_HLEN;
865 	*vlan_tag = vlan_tags[vdev->vid];
866 
867 	RTE_LOG_DP(DEBUG, VHOST_DATA,
868 		"(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
869 		vdev->vid, dst_vdev->vid, *vlan_tag);
870 
871 	return 0;
872 }
873 
874 static uint16_t
875 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
876 {
877 	if (ol_flags & PKT_TX_IPV4)
878 		return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
879 	else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
880 		return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
881 }
882 
883 static void virtio_tx_offload(struct rte_mbuf *m)
884 {
885 	void *l3_hdr;
886 	struct rte_ipv4_hdr *ipv4_hdr = NULL;
887 	struct rte_tcp_hdr *tcp_hdr = NULL;
888 	struct rte_ether_hdr *eth_hdr =
889 		rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
890 
891 	l3_hdr = (char *)eth_hdr + m->l2_len;
892 
893 	if (m->ol_flags & PKT_TX_IPV4) {
894 		ipv4_hdr = l3_hdr;
895 		ipv4_hdr->hdr_checksum = 0;
896 		m->ol_flags |= PKT_TX_IP_CKSUM;
897 	}
898 
899 	tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + m->l3_len);
900 	tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
901 }
902 
903 static inline void
904 free_pkts(struct rte_mbuf **pkts, uint16_t n)
905 {
906 	while (n--)
907 		rte_pktmbuf_free(pkts[n]);
908 }
909 
910 static __rte_always_inline void
911 do_drain_mbuf_table(struct mbuf_table *tx_q)
912 {
913 	uint16_t count;
914 
915 	count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
916 				 tx_q->m_table, tx_q->len);
917 	if (unlikely(count < tx_q->len))
918 		free_pkts(&tx_q->m_table[count], tx_q->len - count);
919 
920 	tx_q->len = 0;
921 }
922 
923 /*
924  * This function routes the TX packet to the correct interface. This
925  * may be a local device or the physical port.
926  */
927 static __rte_always_inline void
928 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
929 {
930 	struct mbuf_table *tx_q;
931 	unsigned offset = 0;
932 	const uint16_t lcore_id = rte_lcore_id();
933 	struct rte_ether_hdr *nh;
934 
935 
936 	nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
937 	if (unlikely(rte_is_broadcast_ether_addr(&nh->d_addr))) {
938 		struct vhost_dev *vdev2;
939 
940 		TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
941 			if (vdev2 != vdev)
942 				virtio_xmit(vdev2, vdev, m);
943 		}
944 		goto queue2nic;
945 	}
946 
947 	/*check if destination is local VM*/
948 	if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
949 		rte_pktmbuf_free(m);
950 		return;
951 	}
952 
953 	if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
954 		if (unlikely(find_local_dest(vdev, m, &offset,
955 					     &vlan_tag) != 0)) {
956 			rte_pktmbuf_free(m);
957 			return;
958 		}
959 	}
960 
961 	RTE_LOG_DP(DEBUG, VHOST_DATA,
962 		"(%d) TX: MAC address is external\n", vdev->vid);
963 
964 queue2nic:
965 
966 	/*Add packet to the port tx queue*/
967 	tx_q = &lcore_tx_queue[lcore_id];
968 
969 	nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
970 	if (unlikely(nh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))) {
971 		/* Guest has inserted the vlan tag. */
972 		struct rte_vlan_hdr *vh = (struct rte_vlan_hdr *) (nh + 1);
973 		uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
974 		if ((vm2vm_mode == VM2VM_HARDWARE) &&
975 			(vh->vlan_tci != vlan_tag_be))
976 			vh->vlan_tci = vlan_tag_be;
977 	} else {
978 		m->ol_flags |= PKT_TX_VLAN_PKT;
979 
980 		/*
981 		 * Find the right seg to adjust the data len when offset is
982 		 * bigger than tail room size.
983 		 */
984 		if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
985 			if (likely(offset <= rte_pktmbuf_tailroom(m)))
986 				m->data_len += offset;
987 			else {
988 				struct rte_mbuf *seg = m;
989 
990 				while ((seg->next != NULL) &&
991 					(offset > rte_pktmbuf_tailroom(seg)))
992 					seg = seg->next;
993 
994 				seg->data_len += offset;
995 			}
996 			m->pkt_len += offset;
997 		}
998 
999 		m->vlan_tci = vlan_tag;
1000 	}
1001 
1002 	if (m->ol_flags & PKT_TX_TCP_SEG)
1003 		virtio_tx_offload(m);
1004 
1005 	tx_q->m_table[tx_q->len++] = m;
1006 	if (enable_stats) {
1007 		vdev->stats.tx_total++;
1008 		vdev->stats.tx++;
1009 	}
1010 
1011 	if (unlikely(tx_q->len == MAX_PKT_BURST))
1012 		do_drain_mbuf_table(tx_q);
1013 }
1014 
1015 
1016 static __rte_always_inline void
1017 drain_mbuf_table(struct mbuf_table *tx_q)
1018 {
1019 	static uint64_t prev_tsc;
1020 	uint64_t cur_tsc;
1021 
1022 	if (tx_q->len == 0)
1023 		return;
1024 
1025 	cur_tsc = rte_rdtsc();
1026 	if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1027 		prev_tsc = cur_tsc;
1028 
1029 		RTE_LOG_DP(DEBUG, VHOST_DATA,
1030 			"TX queue drained after timeout with burst size %u\n",
1031 			tx_q->len);
1032 		do_drain_mbuf_table(tx_q);
1033 	}
1034 }
1035 
1036 static __rte_always_inline void
1037 drain_eth_rx(struct vhost_dev *vdev)
1038 {
1039 	uint16_t rx_count, enqueue_count;
1040 	struct rte_mbuf *pkts[MAX_PKT_BURST];
1041 
1042 	rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1043 				    pkts, MAX_PKT_BURST);
1044 	if (!rx_count)
1045 		return;
1046 
1047 	/*
1048 	 * When "enable_retry" is set, here we wait and retry when there
1049 	 * is no enough free slots in the queue to hold @rx_count packets,
1050 	 * to diminish packet loss.
1051 	 */
1052 	if (enable_retry &&
1053 	    unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1054 			VIRTIO_RXQ))) {
1055 		uint32_t retry;
1056 
1057 		for (retry = 0; retry < burst_rx_retry_num; retry++) {
1058 			rte_delay_us(burst_rx_delay_time);
1059 			if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1060 					VIRTIO_RXQ))
1061 				break;
1062 		}
1063 	}
1064 
1065 	if (builtin_net_driver) {
1066 		enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
1067 						pkts, rx_count);
1068 	} else {
1069 		enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1070 						pkts, rx_count);
1071 	}
1072 	if (enable_stats) {
1073 		rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1074 		rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1075 	}
1076 
1077 	free_pkts(pkts, rx_count);
1078 }
1079 
1080 static __rte_always_inline void
1081 drain_virtio_tx(struct vhost_dev *vdev)
1082 {
1083 	struct rte_mbuf *pkts[MAX_PKT_BURST];
1084 	uint16_t count;
1085 	uint16_t i;
1086 
1087 	if (builtin_net_driver) {
1088 		count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
1089 					pkts, MAX_PKT_BURST);
1090 	} else {
1091 		count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
1092 					mbuf_pool, pkts, MAX_PKT_BURST);
1093 	}
1094 
1095 	/* setup VMDq for the first packet */
1096 	if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1097 		if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1098 			free_pkts(pkts, count);
1099 	}
1100 
1101 	for (i = 0; i < count; ++i)
1102 		virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1103 }
1104 
1105 /*
1106  * Main function of vhost-switch. It basically does:
1107  *
1108  * for each vhost device {
1109  *    - drain_eth_rx()
1110  *
1111  *      Which drains the host eth Rx queue linked to the vhost device,
1112  *      and deliver all of them to guest virito Rx ring associated with
1113  *      this vhost device.
1114  *
1115  *    - drain_virtio_tx()
1116  *
1117  *      Which drains the guest virtio Tx queue and deliver all of them
1118  *      to the target, which could be another vhost device, or the
1119  *      physical eth dev. The route is done in function "virtio_tx_route".
1120  * }
1121  */
1122 static int
1123 switch_worker(void *arg __rte_unused)
1124 {
1125 	unsigned i;
1126 	unsigned lcore_id = rte_lcore_id();
1127 	struct vhost_dev *vdev;
1128 	struct mbuf_table *tx_q;
1129 
1130 	RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1131 
1132 	tx_q = &lcore_tx_queue[lcore_id];
1133 	for (i = 0; i < rte_lcore_count(); i++) {
1134 		if (lcore_ids[i] == lcore_id) {
1135 			tx_q->txq_id = i;
1136 			break;
1137 		}
1138 	}
1139 
1140 	while(1) {
1141 		drain_mbuf_table(tx_q);
1142 
1143 		/*
1144 		 * Inform the configuration core that we have exited the
1145 		 * linked list and that no devices are in use if requested.
1146 		 */
1147 		if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1148 			lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1149 
1150 		/*
1151 		 * Process vhost devices
1152 		 */
1153 		TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1154 			      lcore_vdev_entry) {
1155 			if (unlikely(vdev->remove)) {
1156 				unlink_vmdq(vdev);
1157 				vdev->ready = DEVICE_SAFE_REMOVE;
1158 				continue;
1159 			}
1160 
1161 			if (likely(vdev->ready == DEVICE_RX))
1162 				drain_eth_rx(vdev);
1163 
1164 			if (likely(!vdev->remove))
1165 				drain_virtio_tx(vdev);
1166 		}
1167 	}
1168 
1169 	return 0;
1170 }
1171 
1172 /*
1173  * Remove a device from the specific data core linked list and from the
1174  * main linked list. Synchonization  occurs through the use of the
1175  * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1176  * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1177  */
1178 static void
1179 destroy_device(int vid)
1180 {
1181 	struct vhost_dev *vdev = NULL;
1182 	int lcore;
1183 
1184 	TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1185 		if (vdev->vid == vid)
1186 			break;
1187 	}
1188 	if (!vdev)
1189 		return;
1190 	/*set the remove flag. */
1191 	vdev->remove = 1;
1192 	while(vdev->ready != DEVICE_SAFE_REMOVE) {
1193 		rte_pause();
1194 	}
1195 
1196 	if (builtin_net_driver)
1197 		vs_vhost_net_remove(vdev);
1198 
1199 	TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1200 		     lcore_vdev_entry);
1201 	TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1202 
1203 
1204 	/* Set the dev_removal_flag on each lcore. */
1205 	RTE_LCORE_FOREACH_SLAVE(lcore)
1206 		lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1207 
1208 	/*
1209 	 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1210 	 * we can be sure that they can no longer access the device removed
1211 	 * from the linked lists and that the devices are no longer in use.
1212 	 */
1213 	RTE_LCORE_FOREACH_SLAVE(lcore) {
1214 		while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1215 			rte_pause();
1216 	}
1217 
1218 	lcore_info[vdev->coreid].device_num--;
1219 
1220 	RTE_LOG(INFO, VHOST_DATA,
1221 		"(%d) device has been removed from data core\n",
1222 		vdev->vid);
1223 
1224 	rte_free(vdev);
1225 }
1226 
1227 /*
1228  * A new device is added to a data core. First the device is added to the main linked list
1229  * and then allocated to a specific data core.
1230  */
1231 static int
1232 new_device(int vid)
1233 {
1234 	int lcore, core_add = 0;
1235 	uint32_t device_num_min = num_devices;
1236 	struct vhost_dev *vdev;
1237 
1238 	vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1239 	if (vdev == NULL) {
1240 		RTE_LOG(INFO, VHOST_DATA,
1241 			"(%d) couldn't allocate memory for vhost dev\n",
1242 			vid);
1243 		return -1;
1244 	}
1245 	vdev->vid = vid;
1246 
1247 	if (builtin_net_driver)
1248 		vs_vhost_net_setup(vdev);
1249 
1250 	TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1251 	vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1252 
1253 	/*reset ready flag*/
1254 	vdev->ready = DEVICE_MAC_LEARNING;
1255 	vdev->remove = 0;
1256 
1257 	/* Find a suitable lcore to add the device. */
1258 	RTE_LCORE_FOREACH_SLAVE(lcore) {
1259 		if (lcore_info[lcore].device_num < device_num_min) {
1260 			device_num_min = lcore_info[lcore].device_num;
1261 			core_add = lcore;
1262 		}
1263 	}
1264 	vdev->coreid = core_add;
1265 
1266 	TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1267 			  lcore_vdev_entry);
1268 	lcore_info[vdev->coreid].device_num++;
1269 
1270 	/* Disable notifications. */
1271 	rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1272 	rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1273 
1274 	RTE_LOG(INFO, VHOST_DATA,
1275 		"(%d) device has been added to data core %d\n",
1276 		vid, vdev->coreid);
1277 
1278 	return 0;
1279 }
1280 
1281 /*
1282  * These callback allow devices to be added to the data core when configuration
1283  * has been fully complete.
1284  */
1285 static const struct vhost_device_ops virtio_net_device_ops =
1286 {
1287 	.new_device =  new_device,
1288 	.destroy_device = destroy_device,
1289 };
1290 
1291 /*
1292  * This is a thread will wake up after a period to print stats if the user has
1293  * enabled them.
1294  */
1295 static void *
1296 print_stats(__rte_unused void *arg)
1297 {
1298 	struct vhost_dev *vdev;
1299 	uint64_t tx_dropped, rx_dropped;
1300 	uint64_t tx, tx_total, rx, rx_total;
1301 	const char clr[] = { 27, '[', '2', 'J', '\0' };
1302 	const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1303 
1304 	while(1) {
1305 		sleep(enable_stats);
1306 
1307 		/* Clear screen and move to top left */
1308 		printf("%s%s\n", clr, top_left);
1309 		printf("Device statistics =================================\n");
1310 
1311 		TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1312 			tx_total   = vdev->stats.tx_total;
1313 			tx         = vdev->stats.tx;
1314 			tx_dropped = tx_total - tx;
1315 
1316 			rx_total   = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1317 			rx         = rte_atomic64_read(&vdev->stats.rx_atomic);
1318 			rx_dropped = rx_total - rx;
1319 
1320 			printf("Statistics for device %d\n"
1321 				"-----------------------\n"
1322 				"TX total:              %" PRIu64 "\n"
1323 				"TX dropped:            %" PRIu64 "\n"
1324 				"TX successful:         %" PRIu64 "\n"
1325 				"RX total:              %" PRIu64 "\n"
1326 				"RX dropped:            %" PRIu64 "\n"
1327 				"RX successful:         %" PRIu64 "\n",
1328 				vdev->vid,
1329 				tx_total, tx_dropped, tx,
1330 				rx_total, rx_dropped, rx);
1331 		}
1332 
1333 		printf("===================================================\n");
1334 
1335 		fflush(stdout);
1336 	}
1337 
1338 	return NULL;
1339 }
1340 
1341 static void
1342 unregister_drivers(int socket_num)
1343 {
1344 	int i, ret;
1345 
1346 	for (i = 0; i < socket_num; i++) {
1347 		ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1348 		if (ret != 0)
1349 			RTE_LOG(ERR, VHOST_CONFIG,
1350 				"Fail to unregister vhost driver for %s.\n",
1351 				socket_files + i * PATH_MAX);
1352 	}
1353 }
1354 
1355 /* When we receive a INT signal, unregister vhost driver */
1356 static void
1357 sigint_handler(__rte_unused int signum)
1358 {
1359 	/* Unregister vhost driver. */
1360 	unregister_drivers(nb_sockets);
1361 
1362 	exit(0);
1363 }
1364 
1365 /*
1366  * While creating an mbuf pool, one key thing is to figure out how
1367  * many mbuf entries is enough for our use. FYI, here are some
1368  * guidelines:
1369  *
1370  * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1371  *
1372  * - For each switch core (A CPU core does the packet switch), we need
1373  *   also make some reservation for receiving the packets from virtio
1374  *   Tx queue. How many is enough depends on the usage. It's normally
1375  *   a simple calculation like following:
1376  *
1377  *       MAX_PKT_BURST * max packet size / mbuf size
1378  *
1379  *   So, we definitely need allocate more mbufs when TSO is enabled.
1380  *
1381  * - Similarly, for each switching core, we should serve @nr_rx_desc
1382  *   mbufs for receiving the packets from physical NIC device.
1383  *
1384  * - We also need make sure, for each switch core, we have allocated
1385  *   enough mbufs to fill up the mbuf cache.
1386  */
1387 static void
1388 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1389 	uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1390 {
1391 	uint32_t nr_mbufs;
1392 	uint32_t nr_mbufs_per_core;
1393 	uint32_t mtu = 1500;
1394 
1395 	if (mergeable)
1396 		mtu = 9000;
1397 	if (enable_tso)
1398 		mtu = 64 * 1024;
1399 
1400 	nr_mbufs_per_core  = (mtu + mbuf_size) * MAX_PKT_BURST /
1401 			(mbuf_size - RTE_PKTMBUF_HEADROOM);
1402 	nr_mbufs_per_core += nr_rx_desc;
1403 	nr_mbufs_per_core  = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1404 
1405 	nr_mbufs  = nr_queues * nr_rx_desc;
1406 	nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1407 	nr_mbufs *= nr_port;
1408 
1409 	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1410 					    nr_mbuf_cache, 0, mbuf_size,
1411 					    rte_socket_id());
1412 	if (mbuf_pool == NULL)
1413 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1414 }
1415 
1416 /*
1417  * Main function, does initialisation and calls the per-lcore functions.
1418  */
1419 int
1420 main(int argc, char *argv[])
1421 {
1422 	unsigned lcore_id, core_id = 0;
1423 	unsigned nb_ports, valid_num_ports;
1424 	int ret, i;
1425 	uint16_t portid;
1426 	static pthread_t tid;
1427 	uint64_t flags = 0;
1428 
1429 	signal(SIGINT, sigint_handler);
1430 
1431 	/* init EAL */
1432 	ret = rte_eal_init(argc, argv);
1433 	if (ret < 0)
1434 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1435 	argc -= ret;
1436 	argv += ret;
1437 
1438 	/* parse app arguments */
1439 	ret = us_vhost_parse_args(argc, argv);
1440 	if (ret < 0)
1441 		rte_exit(EXIT_FAILURE, "Invalid argument\n");
1442 
1443 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1444 		TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1445 
1446 		if (rte_lcore_is_enabled(lcore_id))
1447 			lcore_ids[core_id++] = lcore_id;
1448 	}
1449 
1450 	if (rte_lcore_count() > RTE_MAX_LCORE)
1451 		rte_exit(EXIT_FAILURE,"Not enough cores\n");
1452 
1453 	/* Get the number of physical ports. */
1454 	nb_ports = rte_eth_dev_count_avail();
1455 
1456 	/*
1457 	 * Update the global var NUM_PORTS and global array PORTS
1458 	 * and get value of var VALID_NUM_PORTS according to system ports number
1459 	 */
1460 	valid_num_ports = check_ports_num(nb_ports);
1461 
1462 	if ((valid_num_ports ==  0) || (valid_num_ports > MAX_SUP_PORTS)) {
1463 		RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1464 			"but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1465 		return -1;
1466 	}
1467 
1468 	/*
1469 	 * FIXME: here we are trying to allocate mbufs big enough for
1470 	 * @MAX_QUEUES, but the truth is we're never going to use that
1471 	 * many queues here. We probably should only do allocation for
1472 	 * those queues we are going to use.
1473 	 */
1474 	create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1475 			 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1476 
1477 	if (vm2vm_mode == VM2VM_HARDWARE) {
1478 		/* Enable VT loop back to let L2 switch to do it. */
1479 		vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1480 		RTE_LOG(DEBUG, VHOST_CONFIG,
1481 			"Enable loop back for L2 switch in vmdq.\n");
1482 	}
1483 
1484 	/* initialize all ports */
1485 	RTE_ETH_FOREACH_DEV(portid) {
1486 		/* skip ports that are not enabled */
1487 		if ((enabled_port_mask & (1 << portid)) == 0) {
1488 			RTE_LOG(INFO, VHOST_PORT,
1489 				"Skipping disabled port %d\n", portid);
1490 			continue;
1491 		}
1492 		if (port_init(portid) != 0)
1493 			rte_exit(EXIT_FAILURE,
1494 				"Cannot initialize network ports\n");
1495 	}
1496 
1497 	/* Enable stats if the user option is set. */
1498 	if (enable_stats) {
1499 		ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
1500 					print_stats, NULL);
1501 		if (ret < 0)
1502 			rte_exit(EXIT_FAILURE,
1503 				"Cannot create print-stats thread\n");
1504 	}
1505 
1506 	/* Launch all data cores. */
1507 	RTE_LCORE_FOREACH_SLAVE(lcore_id)
1508 		rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1509 
1510 	if (client_mode)
1511 		flags |= RTE_VHOST_USER_CLIENT;
1512 
1513 	if (dequeue_zero_copy)
1514 		flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1515 
1516 	/* Register vhost user driver to handle vhost messages. */
1517 	for (i = 0; i < nb_sockets; i++) {
1518 		char *file = socket_files + i * PATH_MAX;
1519 		ret = rte_vhost_driver_register(file, flags);
1520 		if (ret != 0) {
1521 			unregister_drivers(i);
1522 			rte_exit(EXIT_FAILURE,
1523 				"vhost driver register failure.\n");
1524 		}
1525 
1526 		if (builtin_net_driver)
1527 			rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
1528 
1529 		if (mergeable == 0) {
1530 			rte_vhost_driver_disable_features(file,
1531 				1ULL << VIRTIO_NET_F_MRG_RXBUF);
1532 		}
1533 
1534 		if (enable_tx_csum == 0) {
1535 			rte_vhost_driver_disable_features(file,
1536 				1ULL << VIRTIO_NET_F_CSUM);
1537 		}
1538 
1539 		if (enable_tso == 0) {
1540 			rte_vhost_driver_disable_features(file,
1541 				1ULL << VIRTIO_NET_F_HOST_TSO4);
1542 			rte_vhost_driver_disable_features(file,
1543 				1ULL << VIRTIO_NET_F_HOST_TSO6);
1544 			rte_vhost_driver_disable_features(file,
1545 				1ULL << VIRTIO_NET_F_GUEST_TSO4);
1546 			rte_vhost_driver_disable_features(file,
1547 				1ULL << VIRTIO_NET_F_GUEST_TSO6);
1548 		}
1549 
1550 		if (promiscuous) {
1551 			rte_vhost_driver_enable_features(file,
1552 				1ULL << VIRTIO_NET_F_CTRL_RX);
1553 		}
1554 
1555 		ret = rte_vhost_driver_callback_register(file,
1556 			&virtio_net_device_ops);
1557 		if (ret != 0) {
1558 			rte_exit(EXIT_FAILURE,
1559 				"failed to register vhost driver callbacks.\n");
1560 		}
1561 
1562 		if (rte_vhost_driver_start(file) < 0) {
1563 			rte_exit(EXIT_FAILURE,
1564 				"failed to start vhost driver.\n");
1565 		}
1566 	}
1567 
1568 	RTE_LCORE_FOREACH_SLAVE(lcore_id)
1569 		rte_eal_wait_lcore(lcore_id);
1570 
1571 	return 0;
1572 
1573 }
1574