xref: /dpdk/examples/vhost/main.c (revision a68ba8e0a6b62ec9d038705fa920cddbdb6fb830)
13998e2a0SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
23998e2a0SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3d19533e8SHuawei Xie  */
4d19533e8SHuawei Xie 
5d19533e8SHuawei Xie #include <arpa/inet.h>
6d19533e8SHuawei Xie #include <getopt.h>
7d19533e8SHuawei Xie #include <linux/if_ether.h>
8d19533e8SHuawei Xie #include <linux/if_vlan.h>
9d19533e8SHuawei Xie #include <linux/virtio_net.h>
10d19533e8SHuawei Xie #include <linux/virtio_ring.h>
11d19533e8SHuawei Xie #include <signal.h>
12d19533e8SHuawei Xie #include <stdint.h>
13d19533e8SHuawei Xie #include <sys/eventfd.h>
14d19533e8SHuawei Xie #include <sys/param.h>
15d19533e8SHuawei Xie #include <unistd.h>
16d19533e8SHuawei Xie 
17d19533e8SHuawei Xie #include <rte_cycles.h>
18d19533e8SHuawei Xie #include <rte_ethdev.h>
19d19533e8SHuawei Xie #include <rte_log.h>
20d19533e8SHuawei Xie #include <rte_string_fns.h>
21d19533e8SHuawei Xie #include <rte_malloc.h>
22a798beb4SYuanhan Liu #include <rte_vhost.h>
23691693c6SJijiang Liu #include <rte_ip.h>
249fd72e3cSJijiang Liu #include <rte_tcp.h>
25577329e6SJerin Jacob #include <rte_pause.h>
26d19533e8SHuawei Xie 
273a04ecb2SCheng Jiang #include "ioat.h"
28d19533e8SHuawei Xie #include "main.h"
29d19533e8SHuawei Xie 
30f17eb179SBernard Iremonger #ifndef MAX_QUEUES
31f17eb179SBernard Iremonger #define MAX_QUEUES 128
32f17eb179SBernard Iremonger #endif
33d19533e8SHuawei Xie 
34d19533e8SHuawei Xie /* the maximum number of external ports supported */
35d19533e8SHuawei Xie #define MAX_SUP_PORTS 1
36d19533e8SHuawei Xie 
37d19533e8SHuawei Xie #define MBUF_CACHE_SIZE	128
38824cb29cSKonstantin Ananyev #define MBUF_DATA_SIZE	RTE_MBUF_DEFAULT_BUF_SIZE
39d19533e8SHuawei Xie 
40d19533e8SHuawei Xie #define BURST_TX_DRAIN_US 100	/* TX drain every ~100us */
41d19533e8SHuawei Xie 
42d19533e8SHuawei Xie #define BURST_RX_WAIT_US 15	/* Defines how long we wait between retries on RX */
43d19533e8SHuawei Xie #define BURST_RX_RETRIES 4		/* Number of retries on RX. */
44d19533e8SHuawei Xie 
45d19533e8SHuawei Xie #define JUMBO_FRAME_MAX_SIZE    0x2600
46d19533e8SHuawei Xie 
47d19533e8SHuawei Xie /* State of virtio device. */
48d19533e8SHuawei Xie #define DEVICE_MAC_LEARNING 0
49d19533e8SHuawei Xie #define DEVICE_RX			1
50d19533e8SHuawei Xie #define DEVICE_SAFE_REMOVE	2
51d19533e8SHuawei Xie 
52d19533e8SHuawei Xie /* Configurable number of RX/TX ring descriptors */
53d19533e8SHuawei Xie #define RTE_TEST_RX_DESC_DEFAULT 1024
54d19533e8SHuawei Xie #define RTE_TEST_TX_DESC_DEFAULT 512
55d19533e8SHuawei Xie 
56d19533e8SHuawei Xie #define INVALID_PORT_ID 0xFF
57d19533e8SHuawei Xie 
58d19533e8SHuawei Xie /* Maximum long option length for option parsing. */
59d19533e8SHuawei Xie #define MAX_LONG_OPT_SZ 64
60d19533e8SHuawei Xie 
61d19533e8SHuawei Xie /* mask of enabled ports */
62d19533e8SHuawei Xie static uint32_t enabled_port_mask = 0;
63d19533e8SHuawei Xie 
6490924cafSOuyang Changchun /* Promiscuous mode */
6590924cafSOuyang Changchun static uint32_t promiscuous;
6690924cafSOuyang Changchun 
67d19533e8SHuawei Xie /* number of devices/queues to support*/
68d19533e8SHuawei Xie static uint32_t num_queues = 0;
69a981294bSHuawei Xie static uint32_t num_devices;
70d19533e8SHuawei Xie 
7168363d85SYuanhan Liu static struct rte_mempool *mbuf_pool;
7228deb020SHuawei Xie static int mergeable;
73d19533e8SHuawei Xie 
74d19533e8SHuawei Xie /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
75d19533e8SHuawei Xie typedef enum {
76d19533e8SHuawei Xie 	VM2VM_DISABLED = 0,
77d19533e8SHuawei Xie 	VM2VM_SOFTWARE = 1,
78d19533e8SHuawei Xie 	VM2VM_HARDWARE = 2,
79d19533e8SHuawei Xie 	VM2VM_LAST
80d19533e8SHuawei Xie } vm2vm_type;
81d19533e8SHuawei Xie static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
82d19533e8SHuawei Xie 
83d19533e8SHuawei Xie /* Enable stats. */
84d19533e8SHuawei Xie static uint32_t enable_stats = 0;
85d19533e8SHuawei Xie /* Enable retries on RX. */
86d19533e8SHuawei Xie static uint32_t enable_retry = 1;
879fd72e3cSJijiang Liu 
889fd72e3cSJijiang Liu /* Disable TX checksum offload */
899fd72e3cSJijiang Liu static uint32_t enable_tx_csum;
909fd72e3cSJijiang Liu 
919fd72e3cSJijiang Liu /* Disable TSO offload */
929fd72e3cSJijiang Liu static uint32_t enable_tso;
939fd72e3cSJijiang Liu 
942345e3beSYuanhan Liu static int client_mode;
952345e3beSYuanhan Liu 
96ca059fa5SYuanhan Liu static int builtin_net_driver;
97ca059fa5SYuanhan Liu 
983a04ecb2SCheng Jiang static int async_vhost_driver;
993a04ecb2SCheng Jiang 
1003a04ecb2SCheng Jiang static char dma_type[MAX_LONG_OPT_SZ];
1013a04ecb2SCheng Jiang 
102d19533e8SHuawei Xie /* Specify timeout (in useconds) between retries on RX. */
103d19533e8SHuawei Xie static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
104d19533e8SHuawei Xie /* Specify the number of retries on RX. */
105d19533e8SHuawei Xie static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
106d19533e8SHuawei Xie 
107ad0eef4dSJiayu Hu /* Socket file paths. Can be set by user */
108ad0eef4dSJiayu Hu static char *socket_files;
109ad0eef4dSJiayu Hu static int nb_sockets;
110d19533e8SHuawei Xie 
111d19533e8SHuawei Xie /* empty vmdq configuration structure. Filled in programatically */
112d19533e8SHuawei Xie static struct rte_eth_conf vmdq_conf_default = {
113d19533e8SHuawei Xie 	.rxmode = {
114d19533e8SHuawei Xie 		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
115d19533e8SHuawei Xie 		.split_hdr_size = 0,
116d19533e8SHuawei Xie 		/*
117cc22d8caSShahaf Shuler 		 * VLAN strip is necessary for 1G NIC such as I350,
118d19533e8SHuawei Xie 		 * this fixes bug of ipv4 forwarding in guest can't
119d19533e8SHuawei Xie 		 * forward pakets from one virtio dev to another virtio dev.
120d19533e8SHuawei Xie 		 */
121323e7b66SFerruh Yigit 		.offloads = DEV_RX_OFFLOAD_VLAN_STRIP,
122d19533e8SHuawei Xie 	},
123d19533e8SHuawei Xie 
124d19533e8SHuawei Xie 	.txmode = {
125d19533e8SHuawei Xie 		.mq_mode = ETH_MQ_TX_NONE,
126cc22d8caSShahaf Shuler 		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
127cc22d8caSShahaf Shuler 			     DEV_TX_OFFLOAD_TCP_CKSUM |
128cc22d8caSShahaf Shuler 			     DEV_TX_OFFLOAD_VLAN_INSERT |
129cc22d8caSShahaf Shuler 			     DEV_TX_OFFLOAD_MULTI_SEGS |
130cc22d8caSShahaf Shuler 			     DEV_TX_OFFLOAD_TCP_TSO),
131d19533e8SHuawei Xie 	},
132d19533e8SHuawei Xie 	.rx_adv_conf = {
133d19533e8SHuawei Xie 		/*
134d19533e8SHuawei Xie 		 * should be overridden separately in code with
135d19533e8SHuawei Xie 		 * appropriate values
136d19533e8SHuawei Xie 		 */
137d19533e8SHuawei Xie 		.vmdq_rx_conf = {
138d19533e8SHuawei Xie 			.nb_queue_pools = ETH_8_POOLS,
139d19533e8SHuawei Xie 			.enable_default_pool = 0,
140d19533e8SHuawei Xie 			.default_pool = 0,
141d19533e8SHuawei Xie 			.nb_pool_maps = 0,
142d19533e8SHuawei Xie 			.pool_map = {{0, 0},},
143d19533e8SHuawei Xie 		},
144d19533e8SHuawei Xie 	},
145d19533e8SHuawei Xie };
146d19533e8SHuawei Xie 
147cc22d8caSShahaf Shuler 
148d19533e8SHuawei Xie static unsigned lcore_ids[RTE_MAX_LCORE];
149f8244c63SZhiyong Yang static uint16_t ports[RTE_MAX_ETHPORTS];
150d19533e8SHuawei Xie static unsigned num_ports = 0; /**< The number of ports specified in command line */
15184b02d16SHuawei Xie static uint16_t num_pf_queues, num_vmdq_queues;
15284b02d16SHuawei Xie static uint16_t vmdq_pool_base, vmdq_queue_base;
15384b02d16SHuawei Xie static uint16_t queues_per_pool;
154d19533e8SHuawei Xie 
155d19533e8SHuawei Xie const uint16_t vlan_tags[] = {
156d19533e8SHuawei Xie 	1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
157d19533e8SHuawei Xie 	1008, 1009, 1010, 1011,	1012, 1013, 1014, 1015,
158d19533e8SHuawei Xie 	1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
159d19533e8SHuawei Xie 	1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
160d19533e8SHuawei Xie 	1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
161d19533e8SHuawei Xie 	1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
162d19533e8SHuawei Xie 	1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
163d19533e8SHuawei Xie 	1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
164d19533e8SHuawei Xie };
165d19533e8SHuawei Xie 
166d19533e8SHuawei Xie /* ethernet addresses of ports */
1676d13ea8eSOlivier Matz static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
168d19533e8SHuawei Xie 
16945657a5cSYuanhan Liu static struct vhost_dev_tailq_list vhost_dev_list =
17045657a5cSYuanhan Liu 	TAILQ_HEAD_INITIALIZER(vhost_dev_list);
171d19533e8SHuawei Xie 
172d19533e8SHuawei Xie static struct lcore_info lcore_info[RTE_MAX_LCORE];
173d19533e8SHuawei Xie 
174d19533e8SHuawei Xie /* Used for queueing bursts of TX packets. */
175d19533e8SHuawei Xie struct mbuf_table {
176d19533e8SHuawei Xie 	unsigned len;
177d19533e8SHuawei Xie 	unsigned txq_id;
178d19533e8SHuawei Xie 	struct rte_mbuf *m_table[MAX_PKT_BURST];
179d19533e8SHuawei Xie };
180d19533e8SHuawei Xie 
181*a68ba8e0SCheng Jiang struct vhost_bufftable {
182*a68ba8e0SCheng Jiang 	uint32_t len;
183*a68ba8e0SCheng Jiang 	uint64_t pre_tsc;
184*a68ba8e0SCheng Jiang 	struct rte_mbuf *m_table[MAX_PKT_BURST];
185*a68ba8e0SCheng Jiang };
186*a68ba8e0SCheng Jiang 
187d19533e8SHuawei Xie /* TX queue for each data core. */
188d19533e8SHuawei Xie struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
189d19533e8SHuawei Xie 
190*a68ba8e0SCheng Jiang /*
191*a68ba8e0SCheng Jiang  * Vhost TX buffer for each data core.
192*a68ba8e0SCheng Jiang  * Every data core maintains a TX buffer for every vhost device,
193*a68ba8e0SCheng Jiang  * which is used for batch pkts enqueue for higher performance.
194*a68ba8e0SCheng Jiang  */
195*a68ba8e0SCheng Jiang struct vhost_bufftable *vhost_txbuff[RTE_MAX_LCORE * MAX_VHOST_DEVICE];
196*a68ba8e0SCheng Jiang 
197273ecdbcSYuanhan Liu #define MBUF_TABLE_DRAIN_TSC	((rte_get_tsc_hz() + US_PER_S - 1) \
198273ecdbcSYuanhan Liu 				 / US_PER_S * BURST_TX_DRAIN_US)
199d19533e8SHuawei Xie #define VLAN_HLEN       4
200d19533e8SHuawei Xie 
2013a04ecb2SCheng Jiang static inline int
2023a04ecb2SCheng Jiang open_dma(const char *value)
2033a04ecb2SCheng Jiang {
2043a04ecb2SCheng Jiang 	if (strncmp(dma_type, "ioat", 4) == 0)
2053a04ecb2SCheng Jiang 		return open_ioat(value);
2063a04ecb2SCheng Jiang 
2073a04ecb2SCheng Jiang 	return -1;
2083a04ecb2SCheng Jiang }
2093a04ecb2SCheng Jiang 
210d19533e8SHuawei Xie /*
211d19533e8SHuawei Xie  * Builds up the correct configuration for VMDQ VLAN pool map
212d19533e8SHuawei Xie  * according to the pool & queue limits.
213d19533e8SHuawei Xie  */
214d19533e8SHuawei Xie static inline int
215d19533e8SHuawei Xie get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
216d19533e8SHuawei Xie {
217d19533e8SHuawei Xie 	struct rte_eth_vmdq_rx_conf conf;
21890924cafSOuyang Changchun 	struct rte_eth_vmdq_rx_conf *def_conf =
21990924cafSOuyang Changchun 		&vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
220d19533e8SHuawei Xie 	unsigned i;
221d19533e8SHuawei Xie 
222d19533e8SHuawei Xie 	memset(&conf, 0, sizeof(conf));
223d19533e8SHuawei Xie 	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
224d19533e8SHuawei Xie 	conf.nb_pool_maps = num_devices;
22590924cafSOuyang Changchun 	conf.enable_loop_back = def_conf->enable_loop_back;
22690924cafSOuyang Changchun 	conf.rx_mode = def_conf->rx_mode;
227d19533e8SHuawei Xie 
228d19533e8SHuawei Xie 	for (i = 0; i < conf.nb_pool_maps; i++) {
229d19533e8SHuawei Xie 		conf.pool_map[i].vlan_id = vlan_tags[ i ];
230d19533e8SHuawei Xie 		conf.pool_map[i].pools = (1UL << i);
231d19533e8SHuawei Xie 	}
232d19533e8SHuawei Xie 
233d19533e8SHuawei Xie 	(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
234d19533e8SHuawei Xie 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
235d19533e8SHuawei Xie 		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
236d19533e8SHuawei Xie 	return 0;
237d19533e8SHuawei Xie }
238d19533e8SHuawei Xie 
239d19533e8SHuawei Xie /*
240d19533e8SHuawei Xie  * Initialises a given port using global settings and with the rx buffers
241d19533e8SHuawei Xie  * coming from the mbuf_pool passed as parameter
242d19533e8SHuawei Xie  */
243d19533e8SHuawei Xie static inline int
244f8244c63SZhiyong Yang port_init(uint16_t port)
245d19533e8SHuawei Xie {
246d19533e8SHuawei Xie 	struct rte_eth_dev_info dev_info;
247d19533e8SHuawei Xie 	struct rte_eth_conf port_conf;
248db4014f2SHuawei Xie 	struct rte_eth_rxconf *rxconf;
249db4014f2SHuawei Xie 	struct rte_eth_txconf *txconf;
250db4014f2SHuawei Xie 	int16_t rx_rings, tx_rings;
251d19533e8SHuawei Xie 	uint16_t rx_ring_size, tx_ring_size;
252d19533e8SHuawei Xie 	int retval;
253d19533e8SHuawei Xie 	uint16_t q;
254d19533e8SHuawei Xie 
255d19533e8SHuawei Xie 	/* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
25637fb306cSIvan Ilchenko 	retval = rte_eth_dev_info_get(port, &dev_info);
25737fb306cSIvan Ilchenko 	if (retval != 0) {
25837fb306cSIvan Ilchenko 		RTE_LOG(ERR, VHOST_PORT,
25937fb306cSIvan Ilchenko 			"Error during getting device (port %u) info: %s\n",
26037fb306cSIvan Ilchenko 			port, strerror(-retval));
26137fb306cSIvan Ilchenko 
26237fb306cSIvan Ilchenko 		return retval;
26337fb306cSIvan Ilchenko 	}
264d19533e8SHuawei Xie 
265db4014f2SHuawei Xie 	rxconf = &dev_info.default_rxconf;
266db4014f2SHuawei Xie 	txconf = &dev_info.default_txconf;
267db4014f2SHuawei Xie 	rxconf->rx_drop_en = 1;
268f0adccd4SOuyang Changchun 
269d19533e8SHuawei Xie 	/*configure the number of supported virtio devices based on VMDQ limits */
270d19533e8SHuawei Xie 	num_devices = dev_info.max_vmdq_pools;
271d19533e8SHuawei Xie 
272d19533e8SHuawei Xie 	rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
273d19533e8SHuawei Xie 	tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
27400b8b706SYuanhan Liu 
275d19533e8SHuawei Xie 	tx_rings = (uint16_t)rte_lcore_count();
276d19533e8SHuawei Xie 
277d19533e8SHuawei Xie 	/* Get port configuration. */
278d19533e8SHuawei Xie 	retval = get_eth_conf(&port_conf, num_devices);
279d19533e8SHuawei Xie 	if (retval < 0)
280d19533e8SHuawei Xie 		return retval;
28184b02d16SHuawei Xie 	/* NIC queues are divided into pf queues and vmdq queues.  */
28284b02d16SHuawei Xie 	num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
28384b02d16SHuawei Xie 	queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
28484b02d16SHuawei Xie 	num_vmdq_queues = num_devices * queues_per_pool;
28584b02d16SHuawei Xie 	num_queues = num_pf_queues + num_vmdq_queues;
28684b02d16SHuawei Xie 	vmdq_queue_base = dev_info.vmdq_queue_base;
28784b02d16SHuawei Xie 	vmdq_pool_base  = dev_info.vmdq_pool_base;
28884b02d16SHuawei Xie 	printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
28984b02d16SHuawei Xie 		num_pf_queues, num_devices, queues_per_pool);
290d19533e8SHuawei Xie 
291a9dbe180SThomas Monjalon 	if (!rte_eth_dev_is_valid_port(port))
292a9dbe180SThomas Monjalon 		return -1;
293d19533e8SHuawei Xie 
29484b02d16SHuawei Xie 	rx_rings = (uint16_t)dev_info.max_rx_queues;
295cc22d8caSShahaf Shuler 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
296cc22d8caSShahaf Shuler 		port_conf.txmode.offloads |=
297cc22d8caSShahaf Shuler 			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
298d19533e8SHuawei Xie 	/* Configure ethernet device. */
299d19533e8SHuawei Xie 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
300bb7085b4SJianfeng Tan 	if (retval != 0) {
301bb7085b4SJianfeng Tan 		RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
302bb7085b4SJianfeng Tan 			port, strerror(-retval));
303d19533e8SHuawei Xie 		return retval;
304bb7085b4SJianfeng Tan 	}
305d19533e8SHuawei Xie 
30660efb44fSRoman Zhukov 	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
30760efb44fSRoman Zhukov 		&tx_ring_size);
30860efb44fSRoman Zhukov 	if (retval != 0) {
30960efb44fSRoman Zhukov 		RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
31060efb44fSRoman Zhukov 			"for port %u: %s.\n", port, strerror(-retval));
31160efb44fSRoman Zhukov 		return retval;
31260efb44fSRoman Zhukov 	}
31360efb44fSRoman Zhukov 	if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
31460efb44fSRoman Zhukov 		RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
31560efb44fSRoman Zhukov 			"for Rx queues on port %u.\n", port);
31660efb44fSRoman Zhukov 		return -1;
31760efb44fSRoman Zhukov 	}
31860efb44fSRoman Zhukov 
319d19533e8SHuawei Xie 	/* Setup the queues. */
320cc22d8caSShahaf Shuler 	rxconf->offloads = port_conf.rxmode.offloads;
321d19533e8SHuawei Xie 	for (q = 0; q < rx_rings; q ++) {
322d19533e8SHuawei Xie 		retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
323db4014f2SHuawei Xie 						rte_eth_dev_socket_id(port),
324db4014f2SHuawei Xie 						rxconf,
32568363d85SYuanhan Liu 						mbuf_pool);
326bb7085b4SJianfeng Tan 		if (retval < 0) {
327bb7085b4SJianfeng Tan 			RTE_LOG(ERR, VHOST_PORT,
328bb7085b4SJianfeng Tan 				"Failed to setup rx queue %u of port %u: %s.\n",
329bb7085b4SJianfeng Tan 				q, port, strerror(-retval));
330d19533e8SHuawei Xie 			return retval;
331d19533e8SHuawei Xie 		}
332bb7085b4SJianfeng Tan 	}
333cc22d8caSShahaf Shuler 	txconf->offloads = port_conf.txmode.offloads;
334d19533e8SHuawei Xie 	for (q = 0; q < tx_rings; q ++) {
335d19533e8SHuawei Xie 		retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
336db4014f2SHuawei Xie 						rte_eth_dev_socket_id(port),
337db4014f2SHuawei Xie 						txconf);
338bb7085b4SJianfeng Tan 		if (retval < 0) {
339bb7085b4SJianfeng Tan 			RTE_LOG(ERR, VHOST_PORT,
340bb7085b4SJianfeng Tan 				"Failed to setup tx queue %u of port %u: %s.\n",
341bb7085b4SJianfeng Tan 				q, port, strerror(-retval));
342d19533e8SHuawei Xie 			return retval;
343d19533e8SHuawei Xie 		}
344bb7085b4SJianfeng Tan 	}
345d19533e8SHuawei Xie 
346d19533e8SHuawei Xie 	/* Start the device. */
347d19533e8SHuawei Xie 	retval  = rte_eth_dev_start(port);
348d19533e8SHuawei Xie 	if (retval < 0) {
349bb7085b4SJianfeng Tan 		RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
350bb7085b4SJianfeng Tan 			port, strerror(-retval));
351d19533e8SHuawei Xie 		return retval;
352d19533e8SHuawei Xie 	}
353d19533e8SHuawei Xie 
354f430bbceSIvan Ilchenko 	if (promiscuous) {
355f430bbceSIvan Ilchenko 		retval = rte_eth_promiscuous_enable(port);
356f430bbceSIvan Ilchenko 		if (retval != 0) {
357f430bbceSIvan Ilchenko 			RTE_LOG(ERR, VHOST_PORT,
358f430bbceSIvan Ilchenko 				"Failed to enable promiscuous mode on port %u: %s\n",
359f430bbceSIvan Ilchenko 				port, rte_strerror(-retval));
360f430bbceSIvan Ilchenko 			return retval;
361f430bbceSIvan Ilchenko 		}
362f430bbceSIvan Ilchenko 	}
36390924cafSOuyang Changchun 
36470febdcfSIgor Romanov 	retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
36570febdcfSIgor Romanov 	if (retval < 0) {
36670febdcfSIgor Romanov 		RTE_LOG(ERR, VHOST_PORT,
36770febdcfSIgor Romanov 			"Failed to get MAC address on port %u: %s\n",
36870febdcfSIgor Romanov 			port, rte_strerror(-retval));
36970febdcfSIgor Romanov 		return retval;
37070febdcfSIgor Romanov 	}
37170febdcfSIgor Romanov 
372d19533e8SHuawei Xie 	RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
373d19533e8SHuawei Xie 	RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
374d19533e8SHuawei Xie 			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
375f8244c63SZhiyong Yang 			port,
376d19533e8SHuawei Xie 			vmdq_ports_eth_addr[port].addr_bytes[0],
377d19533e8SHuawei Xie 			vmdq_ports_eth_addr[port].addr_bytes[1],
378d19533e8SHuawei Xie 			vmdq_ports_eth_addr[port].addr_bytes[2],
379d19533e8SHuawei Xie 			vmdq_ports_eth_addr[port].addr_bytes[3],
380d19533e8SHuawei Xie 			vmdq_ports_eth_addr[port].addr_bytes[4],
381d19533e8SHuawei Xie 			vmdq_ports_eth_addr[port].addr_bytes[5]);
382d19533e8SHuawei Xie 
383d19533e8SHuawei Xie 	return 0;
384d19533e8SHuawei Xie }
385d19533e8SHuawei Xie 
386d19533e8SHuawei Xie /*
387bde19a4dSJiayu Hu  * Set socket file path.
388d19533e8SHuawei Xie  */
389d19533e8SHuawei Xie static int
390bde19a4dSJiayu Hu us_vhost_parse_socket_path(const char *q_arg)
391d19533e8SHuawei Xie {
392d79035b7STiwei Bie 	char *old;
393d79035b7STiwei Bie 
394d19533e8SHuawei Xie 	/* parse number string */
395fa81d3b9SGang Jiang 	if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
396d19533e8SHuawei Xie 		return -1;
397ad0eef4dSJiayu Hu 
398d79035b7STiwei Bie 	old = socket_files;
399ad0eef4dSJiayu Hu 	socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
400d79035b7STiwei Bie 	if (socket_files == NULL) {
401d79035b7STiwei Bie 		free(old);
402d79035b7STiwei Bie 		return -1;
403d79035b7STiwei Bie 	}
404d79035b7STiwei Bie 
405f9acaf84SBruce Richardson 	strlcpy(socket_files + nb_sockets * PATH_MAX, q_arg, PATH_MAX);
406ad0eef4dSJiayu Hu 	nb_sockets++;
407d19533e8SHuawei Xie 
408d19533e8SHuawei Xie 	return 0;
409d19533e8SHuawei Xie }
410d19533e8SHuawei Xie 
411d19533e8SHuawei Xie /*
412d19533e8SHuawei Xie  * Parse the portmask provided at run time.
413d19533e8SHuawei Xie  */
414d19533e8SHuawei Xie static int
415d19533e8SHuawei Xie parse_portmask(const char *portmask)
416d19533e8SHuawei Xie {
417d19533e8SHuawei Xie 	char *end = NULL;
418d19533e8SHuawei Xie 	unsigned long pm;
419d19533e8SHuawei Xie 
420d19533e8SHuawei Xie 	errno = 0;
421d19533e8SHuawei Xie 
422d19533e8SHuawei Xie 	/* parse hexadecimal string */
423d19533e8SHuawei Xie 	pm = strtoul(portmask, &end, 16);
424d19533e8SHuawei Xie 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
425ce6b8c31SSarosh Arif 		return 0;
426d19533e8SHuawei Xie 
427d19533e8SHuawei Xie 	return pm;
428d19533e8SHuawei Xie 
429d19533e8SHuawei Xie }
430d19533e8SHuawei Xie 
431d19533e8SHuawei Xie /*
432d19533e8SHuawei Xie  * Parse num options at run time.
433d19533e8SHuawei Xie  */
434d19533e8SHuawei Xie static int
435d19533e8SHuawei Xie parse_num_opt(const char *q_arg, uint32_t max_valid_value)
436d19533e8SHuawei Xie {
437d19533e8SHuawei Xie 	char *end = NULL;
438d19533e8SHuawei Xie 	unsigned long num;
439d19533e8SHuawei Xie 
440d19533e8SHuawei Xie 	errno = 0;
441d19533e8SHuawei Xie 
442d19533e8SHuawei Xie 	/* parse unsigned int string */
443d19533e8SHuawei Xie 	num = strtoul(q_arg, &end, 10);
444d19533e8SHuawei Xie 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
445d19533e8SHuawei Xie 		return -1;
446d19533e8SHuawei Xie 
447d19533e8SHuawei Xie 	if (num > max_valid_value)
448d19533e8SHuawei Xie 		return -1;
449d19533e8SHuawei Xie 
450d19533e8SHuawei Xie 	return num;
451d19533e8SHuawei Xie 
452d19533e8SHuawei Xie }
453d19533e8SHuawei Xie 
454d19533e8SHuawei Xie /*
455d19533e8SHuawei Xie  * Display usage
456d19533e8SHuawei Xie  */
457d19533e8SHuawei Xie static void
458d19533e8SHuawei Xie us_vhost_usage(const char *prgname)
459d19533e8SHuawei Xie {
460d19533e8SHuawei Xie 	RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
461d19533e8SHuawei Xie 	"		--vm2vm [0|1|2]\n"
462d19533e8SHuawei Xie 	"		--rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
463bde19a4dSJiayu Hu 	"		--socket-file <path>\n"
464d19533e8SHuawei Xie 	"		--nb-devices ND\n"
465d19533e8SHuawei Xie 	"		-p PORTMASK: Set mask for ports to be used by application\n"
466d19533e8SHuawei Xie 	"		--vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
467d19533e8SHuawei Xie 	"		--rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
468d19533e8SHuawei Xie 	"		--rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
469d19533e8SHuawei Xie 	"		--rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
470d19533e8SHuawei Xie 	"		--mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
471d19533e8SHuawei Xie 	"		--stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
472bde19a4dSJiayu Hu 	"		--socket-file: The path of the socket file.\n"
4739fd72e3cSJijiang Liu 	"		--tx-csum [0|1] disable/enable TX checksum offload.\n"
4742345e3beSYuanhan Liu 	"		--tso [0|1] disable/enable TCP segment offload.\n"
4753a04ecb2SCheng Jiang 	"		--client register a vhost-user socket as client mode.\n"
4763a04ecb2SCheng Jiang 	"		--dma-type register dma type for your vhost async driver. For example \"ioat\" for now.\n"
4773a04ecb2SCheng Jiang 	"		--dmas register dma channel for specific vhost device.\n",
478d19533e8SHuawei Xie 	       prgname);
479d19533e8SHuawei Xie }
480d19533e8SHuawei Xie 
481d19533e8SHuawei Xie /*
482d19533e8SHuawei Xie  * Parse the arguments given in the command line of the application.
483d19533e8SHuawei Xie  */
484d19533e8SHuawei Xie static int
485d19533e8SHuawei Xie us_vhost_parse_args(int argc, char **argv)
486d19533e8SHuawei Xie {
487d19533e8SHuawei Xie 	int opt, ret;
488d19533e8SHuawei Xie 	int option_index;
489d19533e8SHuawei Xie 	unsigned i;
490d19533e8SHuawei Xie 	const char *prgname = argv[0];
491d19533e8SHuawei Xie 	static struct option long_option[] = {
492d19533e8SHuawei Xie 		{"vm2vm", required_argument, NULL, 0},
493d19533e8SHuawei Xie 		{"rx-retry", required_argument, NULL, 0},
494d19533e8SHuawei Xie 		{"rx-retry-delay", required_argument, NULL, 0},
495d19533e8SHuawei Xie 		{"rx-retry-num", required_argument, NULL, 0},
496d19533e8SHuawei Xie 		{"mergeable", required_argument, NULL, 0},
497d19533e8SHuawei Xie 		{"stats", required_argument, NULL, 0},
498bde19a4dSJiayu Hu 		{"socket-file", required_argument, NULL, 0},
4999fd72e3cSJijiang Liu 		{"tx-csum", required_argument, NULL, 0},
5009fd72e3cSJijiang Liu 		{"tso", required_argument, NULL, 0},
5012345e3beSYuanhan Liu 		{"client", no_argument, &client_mode, 1},
502ca059fa5SYuanhan Liu 		{"builtin-net-driver", no_argument, &builtin_net_driver, 1},
5033a04ecb2SCheng Jiang 		{"dma-type", required_argument, NULL, 0},
5043a04ecb2SCheng Jiang 		{"dmas", required_argument, NULL, 0},
505d19533e8SHuawei Xie 		{NULL, 0, 0, 0},
506d19533e8SHuawei Xie 	};
507d19533e8SHuawei Xie 
508d19533e8SHuawei Xie 	/* Parse command line */
50990924cafSOuyang Changchun 	while ((opt = getopt_long(argc, argv, "p:P",
51090924cafSOuyang Changchun 			long_option, &option_index)) != EOF) {
511d19533e8SHuawei Xie 		switch (opt) {
512d19533e8SHuawei Xie 		/* Portmask */
513d19533e8SHuawei Xie 		case 'p':
514d19533e8SHuawei Xie 			enabled_port_mask = parse_portmask(optarg);
515d19533e8SHuawei Xie 			if (enabled_port_mask == 0) {
516d19533e8SHuawei Xie 				RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
517d19533e8SHuawei Xie 				us_vhost_usage(prgname);
518d19533e8SHuawei Xie 				return -1;
519d19533e8SHuawei Xie 			}
520d19533e8SHuawei Xie 			break;
521d19533e8SHuawei Xie 
52290924cafSOuyang Changchun 		case 'P':
52390924cafSOuyang Changchun 			promiscuous = 1;
52490924cafSOuyang Changchun 			vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
52590924cafSOuyang Changchun 				ETH_VMDQ_ACCEPT_BROADCAST |
52690924cafSOuyang Changchun 				ETH_VMDQ_ACCEPT_MULTICAST;
52790924cafSOuyang Changchun 
52890924cafSOuyang Changchun 			break;
52990924cafSOuyang Changchun 
530d19533e8SHuawei Xie 		case 0:
531d19533e8SHuawei Xie 			/* Enable/disable vm2vm comms. */
532d19533e8SHuawei Xie 			if (!strncmp(long_option[option_index].name, "vm2vm",
533d19533e8SHuawei Xie 				MAX_LONG_OPT_SZ)) {
534d19533e8SHuawei Xie 				ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
535d19533e8SHuawei Xie 				if (ret == -1) {
536d19533e8SHuawei Xie 					RTE_LOG(INFO, VHOST_CONFIG,
537d19533e8SHuawei Xie 						"Invalid argument for "
538d19533e8SHuawei Xie 						"vm2vm [0|1|2]\n");
539d19533e8SHuawei Xie 					us_vhost_usage(prgname);
540d19533e8SHuawei Xie 					return -1;
541d19533e8SHuawei Xie 				} else {
542d19533e8SHuawei Xie 					vm2vm_mode = (vm2vm_type)ret;
543d19533e8SHuawei Xie 				}
544d19533e8SHuawei Xie 			}
545d19533e8SHuawei Xie 
546d19533e8SHuawei Xie 			/* Enable/disable retries on RX. */
547d19533e8SHuawei Xie 			if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
548d19533e8SHuawei Xie 				ret = parse_num_opt(optarg, 1);
549d19533e8SHuawei Xie 				if (ret == -1) {
550d19533e8SHuawei Xie 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
551d19533e8SHuawei Xie 					us_vhost_usage(prgname);
552d19533e8SHuawei Xie 					return -1;
553d19533e8SHuawei Xie 				} else {
554d19533e8SHuawei Xie 					enable_retry = ret;
555d19533e8SHuawei Xie 				}
556d19533e8SHuawei Xie 			}
557d19533e8SHuawei Xie 
5589fd72e3cSJijiang Liu 			/* Enable/disable TX checksum offload. */
5599fd72e3cSJijiang Liu 			if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
5609fd72e3cSJijiang Liu 				ret = parse_num_opt(optarg, 1);
5619fd72e3cSJijiang Liu 				if (ret == -1) {
5629fd72e3cSJijiang Liu 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
5639fd72e3cSJijiang Liu 					us_vhost_usage(prgname);
5649fd72e3cSJijiang Liu 					return -1;
5659fd72e3cSJijiang Liu 				} else
5669fd72e3cSJijiang Liu 					enable_tx_csum = ret;
5679fd72e3cSJijiang Liu 			}
5689fd72e3cSJijiang Liu 
5699fd72e3cSJijiang Liu 			/* Enable/disable TSO offload. */
5709fd72e3cSJijiang Liu 			if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
5719fd72e3cSJijiang Liu 				ret = parse_num_opt(optarg, 1);
5729fd72e3cSJijiang Liu 				if (ret == -1) {
5739fd72e3cSJijiang Liu 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
5749fd72e3cSJijiang Liu 					us_vhost_usage(prgname);
5759fd72e3cSJijiang Liu 					return -1;
5769fd72e3cSJijiang Liu 				} else
5779fd72e3cSJijiang Liu 					enable_tso = ret;
5789fd72e3cSJijiang Liu 			}
5799fd72e3cSJijiang Liu 
580d19533e8SHuawei Xie 			/* Specify the retries delay time (in useconds) on RX. */
581d19533e8SHuawei Xie 			if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
582d19533e8SHuawei Xie 				ret = parse_num_opt(optarg, INT32_MAX);
583d19533e8SHuawei Xie 				if (ret == -1) {
584d19533e8SHuawei Xie 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
585d19533e8SHuawei Xie 					us_vhost_usage(prgname);
586d19533e8SHuawei Xie 					return -1;
587d19533e8SHuawei Xie 				} else {
588d19533e8SHuawei Xie 					burst_rx_delay_time = ret;
589d19533e8SHuawei Xie 				}
590d19533e8SHuawei Xie 			}
591d19533e8SHuawei Xie 
592d19533e8SHuawei Xie 			/* Specify the retries number on RX. */
593d19533e8SHuawei Xie 			if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
594d19533e8SHuawei Xie 				ret = parse_num_opt(optarg, INT32_MAX);
595d19533e8SHuawei Xie 				if (ret == -1) {
596d19533e8SHuawei Xie 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
597d19533e8SHuawei Xie 					us_vhost_usage(prgname);
598d19533e8SHuawei Xie 					return -1;
599d19533e8SHuawei Xie 				} else {
600d19533e8SHuawei Xie 					burst_rx_retry_num = ret;
601d19533e8SHuawei Xie 				}
602d19533e8SHuawei Xie 			}
603d19533e8SHuawei Xie 
604d19533e8SHuawei Xie 			/* Enable/disable RX mergeable buffers. */
605d19533e8SHuawei Xie 			if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
606d19533e8SHuawei Xie 				ret = parse_num_opt(optarg, 1);
607d19533e8SHuawei Xie 				if (ret == -1) {
608d19533e8SHuawei Xie 					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
609d19533e8SHuawei Xie 					us_vhost_usage(prgname);
610d19533e8SHuawei Xie 					return -1;
611d19533e8SHuawei Xie 				} else {
61228deb020SHuawei Xie 					mergeable = !!ret;
613d19533e8SHuawei Xie 					if (ret) {
614cc22d8caSShahaf Shuler 						vmdq_conf_default.rxmode.offloads |=
615cc22d8caSShahaf Shuler 							DEV_RX_OFFLOAD_JUMBO_FRAME;
616d19533e8SHuawei Xie 						vmdq_conf_default.rxmode.max_rx_pkt_len
617d19533e8SHuawei Xie 							= JUMBO_FRAME_MAX_SIZE;
618d19533e8SHuawei Xie 					}
619d19533e8SHuawei Xie 				}
620d19533e8SHuawei Xie 			}
621d19533e8SHuawei Xie 
622d19533e8SHuawei Xie 			/* Enable/disable stats. */
623d19533e8SHuawei Xie 			if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
624d19533e8SHuawei Xie 				ret = parse_num_opt(optarg, INT32_MAX);
625d19533e8SHuawei Xie 				if (ret == -1) {
626bde19a4dSJiayu Hu 					RTE_LOG(INFO, VHOST_CONFIG,
627bde19a4dSJiayu Hu 						"Invalid argument for stats [0..N]\n");
628d19533e8SHuawei Xie 					us_vhost_usage(prgname);
629d19533e8SHuawei Xie 					return -1;
630d19533e8SHuawei Xie 				} else {
631d19533e8SHuawei Xie 					enable_stats = ret;
632d19533e8SHuawei Xie 				}
633d19533e8SHuawei Xie 			}
634d19533e8SHuawei Xie 
635bde19a4dSJiayu Hu 			/* Set socket file path. */
636bde19a4dSJiayu Hu 			if (!strncmp(long_option[option_index].name,
637bde19a4dSJiayu Hu 						"socket-file", MAX_LONG_OPT_SZ)) {
638bde19a4dSJiayu Hu 				if (us_vhost_parse_socket_path(optarg) == -1) {
639bde19a4dSJiayu Hu 					RTE_LOG(INFO, VHOST_CONFIG,
640bde19a4dSJiayu Hu 					"Invalid argument for socket name (Max %d characters)\n",
641bde19a4dSJiayu Hu 					PATH_MAX);
642d19533e8SHuawei Xie 					us_vhost_usage(prgname);
643d19533e8SHuawei Xie 					return -1;
644d19533e8SHuawei Xie 				}
645d19533e8SHuawei Xie 			}
646d19533e8SHuawei Xie 
6473a04ecb2SCheng Jiang 			if (!strncmp(long_option[option_index].name,
6483a04ecb2SCheng Jiang 						"dma-type", MAX_LONG_OPT_SZ)) {
6492b7126f5SCheng Jiang 				if (strlen(optarg) >= MAX_LONG_OPT_SZ) {
6502b7126f5SCheng Jiang 					RTE_LOG(INFO, VHOST_CONFIG,
6512b7126f5SCheng Jiang 						"Wrong DMA type\n");
6522b7126f5SCheng Jiang 					us_vhost_usage(prgname);
6532b7126f5SCheng Jiang 					return -1;
6542b7126f5SCheng Jiang 				}
6553a04ecb2SCheng Jiang 				strcpy(dma_type, optarg);
6563a04ecb2SCheng Jiang 			}
6573a04ecb2SCheng Jiang 
6583a04ecb2SCheng Jiang 			if (!strncmp(long_option[option_index].name,
6593a04ecb2SCheng Jiang 						"dmas", MAX_LONG_OPT_SZ)) {
6603a04ecb2SCheng Jiang 				if (open_dma(optarg) == -1) {
6613a04ecb2SCheng Jiang 					RTE_LOG(INFO, VHOST_CONFIG,
6623a04ecb2SCheng Jiang 						"Wrong DMA args\n");
6633a04ecb2SCheng Jiang 					us_vhost_usage(prgname);
6643a04ecb2SCheng Jiang 					return -1;
6653a04ecb2SCheng Jiang 				}
6663a04ecb2SCheng Jiang 				async_vhost_driver = 1;
6673a04ecb2SCheng Jiang 			}
6683a04ecb2SCheng Jiang 
669d19533e8SHuawei Xie 			break;
670d19533e8SHuawei Xie 
671d19533e8SHuawei Xie 			/* Invalid option - print options. */
672d19533e8SHuawei Xie 		default:
673d19533e8SHuawei Xie 			us_vhost_usage(prgname);
674d19533e8SHuawei Xie 			return -1;
675d19533e8SHuawei Xie 		}
676d19533e8SHuawei Xie 	}
677d19533e8SHuawei Xie 
678d19533e8SHuawei Xie 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
679d19533e8SHuawei Xie 		if (enabled_port_mask & (1 << i))
680f8244c63SZhiyong Yang 			ports[num_ports++] = i;
681d19533e8SHuawei Xie 	}
682d19533e8SHuawei Xie 
683d19533e8SHuawei Xie 	if ((num_ports ==  0) || (num_ports > MAX_SUP_PORTS)) {
684d19533e8SHuawei Xie 		RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
685d19533e8SHuawei Xie 			"but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
686d19533e8SHuawei Xie 		return -1;
687d19533e8SHuawei Xie 	}
688d19533e8SHuawei Xie 
689d19533e8SHuawei Xie 	return 0;
690d19533e8SHuawei Xie }
691d19533e8SHuawei Xie 
692d19533e8SHuawei Xie /*
693d19533e8SHuawei Xie  * Update the global var NUM_PORTS and array PORTS according to system ports number
694d19533e8SHuawei Xie  * and return valid ports number
695d19533e8SHuawei Xie  */
696d19533e8SHuawei Xie static unsigned check_ports_num(unsigned nb_ports)
697d19533e8SHuawei Xie {
698d19533e8SHuawei Xie 	unsigned valid_num_ports = num_ports;
699d19533e8SHuawei Xie 	unsigned portid;
700d19533e8SHuawei Xie 
701d19533e8SHuawei Xie 	if (num_ports > nb_ports) {
702d19533e8SHuawei Xie 		RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
703d19533e8SHuawei Xie 			num_ports, nb_ports);
704d19533e8SHuawei Xie 		num_ports = nb_ports;
705d19533e8SHuawei Xie 	}
706d19533e8SHuawei Xie 
707d19533e8SHuawei Xie 	for (portid = 0; portid < num_ports; portid ++) {
708a9dbe180SThomas Monjalon 		if (!rte_eth_dev_is_valid_port(ports[portid])) {
709a9dbe180SThomas Monjalon 			RTE_LOG(INFO, VHOST_PORT,
710a9dbe180SThomas Monjalon 				"\nSpecified port ID(%u) is not valid\n",
711a9dbe180SThomas Monjalon 				ports[portid]);
712d19533e8SHuawei Xie 			ports[portid] = INVALID_PORT_ID;
713d19533e8SHuawei Xie 			valid_num_ports--;
714d19533e8SHuawei Xie 		}
715d19533e8SHuawei Xie 	}
716d19533e8SHuawei Xie 	return valid_num_ports;
717d19533e8SHuawei Xie }
718d19533e8SHuawei Xie 
719c0583d98SJerin Jacob static __rte_always_inline struct vhost_dev *
7206d13ea8eSOlivier Matz find_vhost_dev(struct rte_ether_addr *mac)
72145657a5cSYuanhan Liu {
72245657a5cSYuanhan Liu 	struct vhost_dev *vdev;
72345657a5cSYuanhan Liu 
72497daf19eSYuanhan Liu 	TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
72545657a5cSYuanhan Liu 		if (vdev->ready == DEVICE_RX &&
726538da7a1SOlivier Matz 		    rte_is_same_ether_addr(mac, &vdev->mac_address))
72745657a5cSYuanhan Liu 			return vdev;
72845657a5cSYuanhan Liu 	}
72945657a5cSYuanhan Liu 
73045657a5cSYuanhan Liu 	return NULL;
73145657a5cSYuanhan Liu }
73245657a5cSYuanhan Liu 
733d19533e8SHuawei Xie /*
734d19533e8SHuawei Xie  * This function learns the MAC address of the device and registers this along with a
735d19533e8SHuawei Xie  * vlan tag to a VMDQ.
736d19533e8SHuawei Xie  */
737d19533e8SHuawei Xie static int
738e571e6b4SHuawei Xie link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
739d19533e8SHuawei Xie {
7406d13ea8eSOlivier Matz 	struct rte_ether_hdr *pkt_hdr;
741d19533e8SHuawei Xie 	int i, ret;
742d19533e8SHuawei Xie 
743d19533e8SHuawei Xie 	/* Learn MAC address of guest device from packet */
7446d13ea8eSOlivier Matz 	pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
745d19533e8SHuawei Xie 
74645657a5cSYuanhan Liu 	if (find_vhost_dev(&pkt_hdr->s_addr)) {
74745657a5cSYuanhan Liu 		RTE_LOG(ERR, VHOST_DATA,
748c08a3490SYuanhan Liu 			"(%d) device is using a registered MAC!\n",
749e2a1dd12SYuanhan Liu 			vdev->vid);
750d19533e8SHuawei Xie 		return -1;
751d19533e8SHuawei Xie 	}
752d19533e8SHuawei Xie 
75335b2d13fSOlivier Matz 	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
754e571e6b4SHuawei Xie 		vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
755d19533e8SHuawei Xie 
756d19533e8SHuawei Xie 	/* vlan_tag currently uses the device_id. */
757e2a1dd12SYuanhan Liu 	vdev->vlan_tag = vlan_tags[vdev->vid];
758d19533e8SHuawei Xie 
759d19533e8SHuawei Xie 	/* Print out VMDQ registration info. */
760c08a3490SYuanhan Liu 	RTE_LOG(INFO, VHOST_DATA,
761c08a3490SYuanhan Liu 		"(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
762e2a1dd12SYuanhan Liu 		vdev->vid,
763e571e6b4SHuawei Xie 		vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
764e571e6b4SHuawei Xie 		vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
765e571e6b4SHuawei Xie 		vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
766e571e6b4SHuawei Xie 		vdev->vlan_tag);
767d19533e8SHuawei Xie 
768d19533e8SHuawei Xie 	/* Register the MAC address. */
76984b02d16SHuawei Xie 	ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
770e2a1dd12SYuanhan Liu 				(uint32_t)vdev->vid + vmdq_pool_base);
771d19533e8SHuawei Xie 	if (ret)
772c08a3490SYuanhan Liu 		RTE_LOG(ERR, VHOST_DATA,
773c08a3490SYuanhan Liu 			"(%d) failed to add device MAC address to VMDQ\n",
774e2a1dd12SYuanhan Liu 			vdev->vid);
775d19533e8SHuawei Xie 
77665453928SJianfeng Tan 	rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
777d19533e8SHuawei Xie 
778d19533e8SHuawei Xie 	/* Set device as ready for RX. */
779e571e6b4SHuawei Xie 	vdev->ready = DEVICE_RX;
780d19533e8SHuawei Xie 
781d19533e8SHuawei Xie 	return 0;
782d19533e8SHuawei Xie }
783d19533e8SHuawei Xie 
784d19533e8SHuawei Xie /*
785d19533e8SHuawei Xie  * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
786d19533e8SHuawei Xie  * queue before disabling RX on the device.
787d19533e8SHuawei Xie  */
788d19533e8SHuawei Xie static inline void
789e571e6b4SHuawei Xie unlink_vmdq(struct vhost_dev *vdev)
790d19533e8SHuawei Xie {
791d19533e8SHuawei Xie 	unsigned i = 0;
792d19533e8SHuawei Xie 	unsigned rx_count;
793d19533e8SHuawei Xie 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
794d19533e8SHuawei Xie 
795e571e6b4SHuawei Xie 	if (vdev->ready == DEVICE_RX) {
796d19533e8SHuawei Xie 		/*clear MAC and VLAN settings*/
797e571e6b4SHuawei Xie 		rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
798d19533e8SHuawei Xie 		for (i = 0; i < 6; i++)
799e571e6b4SHuawei Xie 			vdev->mac_address.addr_bytes[i] = 0;
800d19533e8SHuawei Xie 
801e571e6b4SHuawei Xie 		vdev->vlan_tag = 0;
802d19533e8SHuawei Xie 
803d19533e8SHuawei Xie 		/*Clear out the receive buffers*/
804d19533e8SHuawei Xie 		rx_count = rte_eth_rx_burst(ports[0],
805e571e6b4SHuawei Xie 					(uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
806d19533e8SHuawei Xie 
807d19533e8SHuawei Xie 		while (rx_count) {
808d19533e8SHuawei Xie 			for (i = 0; i < rx_count; i++)
809d19533e8SHuawei Xie 				rte_pktmbuf_free(pkts_burst[i]);
810d19533e8SHuawei Xie 
811d19533e8SHuawei Xie 			rx_count = rte_eth_rx_burst(ports[0],
812e571e6b4SHuawei Xie 					(uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
813d19533e8SHuawei Xie 		}
814d19533e8SHuawei Xie 
815e571e6b4SHuawei Xie 		vdev->ready = DEVICE_MAC_LEARNING;
816d19533e8SHuawei Xie 	}
817d19533e8SHuawei Xie }
818d19533e8SHuawei Xie 
819*a68ba8e0SCheng Jiang static inline void
820*a68ba8e0SCheng Jiang free_pkts(struct rte_mbuf **pkts, uint16_t n)
821*a68ba8e0SCheng Jiang {
822*a68ba8e0SCheng Jiang 	while (n--)
823*a68ba8e0SCheng Jiang 		rte_pktmbuf_free(pkts[n]);
824*a68ba8e0SCheng Jiang }
825*a68ba8e0SCheng Jiang 
826c0583d98SJerin Jacob static __rte_always_inline void
827*a68ba8e0SCheng Jiang complete_async_pkts(struct vhost_dev *vdev)
828*a68ba8e0SCheng Jiang {
829*a68ba8e0SCheng Jiang 	struct rte_mbuf *p_cpl[MAX_PKT_BURST];
830*a68ba8e0SCheng Jiang 	uint16_t complete_count;
831*a68ba8e0SCheng Jiang 
832*a68ba8e0SCheng Jiang 	complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
833*a68ba8e0SCheng Jiang 					VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
834*a68ba8e0SCheng Jiang 	if (complete_count) {
835*a68ba8e0SCheng Jiang 		__atomic_sub_fetch(&vdev->nr_async_pkts, complete_count,
836*a68ba8e0SCheng Jiang 			__ATOMIC_SEQ_CST);
837*a68ba8e0SCheng Jiang 		free_pkts(p_cpl, complete_count);
838*a68ba8e0SCheng Jiang 	}
839*a68ba8e0SCheng Jiang }
840*a68ba8e0SCheng Jiang 
841*a68ba8e0SCheng Jiang static __rte_always_inline void
842*a68ba8e0SCheng Jiang sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
8439c5ef512SYuanhan Liu 	    struct rte_mbuf *m)
8449c5ef512SYuanhan Liu {
8459c5ef512SYuanhan Liu 	uint16_t ret;
8469c5ef512SYuanhan Liu 
847ca059fa5SYuanhan Liu 	if (builtin_net_driver) {
848ca059fa5SYuanhan Liu 		ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
849ca059fa5SYuanhan Liu 	} else {
8504ecf22e3SYuanhan Liu 		ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
851ca059fa5SYuanhan Liu 	}
852ca059fa5SYuanhan Liu 
8539c5ef512SYuanhan Liu 	if (enable_stats) {
854*a68ba8e0SCheng Jiang 		__atomic_add_fetch(&dst_vdev->stats.rx_total_atomic, 1,
855*a68ba8e0SCheng Jiang 				__ATOMIC_SEQ_CST);
856*a68ba8e0SCheng Jiang 		__atomic_add_fetch(&dst_vdev->stats.rx_atomic, ret,
857*a68ba8e0SCheng Jiang 				__ATOMIC_SEQ_CST);
85856fe86f8SYuanhan Liu 		src_vdev->stats.tx_total++;
85956fe86f8SYuanhan Liu 		src_vdev->stats.tx += ret;
8609c5ef512SYuanhan Liu 	}
8619c5ef512SYuanhan Liu }
8629c5ef512SYuanhan Liu 
863*a68ba8e0SCheng Jiang static __rte_always_inline void
864*a68ba8e0SCheng Jiang drain_vhost(struct vhost_dev *vdev)
865*a68ba8e0SCheng Jiang {
866*a68ba8e0SCheng Jiang 	uint16_t ret;
867*a68ba8e0SCheng Jiang 	uint64_t buff_idx = rte_lcore_id() * MAX_VHOST_DEVICE + vdev->vid;
868*a68ba8e0SCheng Jiang 	uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
869*a68ba8e0SCheng Jiang 	struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
870*a68ba8e0SCheng Jiang 
871*a68ba8e0SCheng Jiang 	if (builtin_net_driver) {
872*a68ba8e0SCheng Jiang 		ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
873*a68ba8e0SCheng Jiang 	} else if (async_vhost_driver) {
874*a68ba8e0SCheng Jiang 		uint32_t cpu_cpl_nr = 0;
875*a68ba8e0SCheng Jiang 		uint16_t enqueue_fail = 0;
876*a68ba8e0SCheng Jiang 		struct rte_mbuf *m_cpu_cpl[nr_xmit];
877*a68ba8e0SCheng Jiang 
878*a68ba8e0SCheng Jiang 		complete_async_pkts(vdev);
879*a68ba8e0SCheng Jiang 		ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,
880*a68ba8e0SCheng Jiang 					m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
881*a68ba8e0SCheng Jiang 		__atomic_add_fetch(&vdev->nr_async_pkts, ret - cpu_cpl_nr,
882*a68ba8e0SCheng Jiang 				__ATOMIC_SEQ_CST);
883*a68ba8e0SCheng Jiang 
884*a68ba8e0SCheng Jiang 		if (cpu_cpl_nr)
885*a68ba8e0SCheng Jiang 			free_pkts(m_cpu_cpl, cpu_cpl_nr);
886*a68ba8e0SCheng Jiang 
887*a68ba8e0SCheng Jiang 		enqueue_fail = nr_xmit - ret;
888*a68ba8e0SCheng Jiang 		if (enqueue_fail)
889*a68ba8e0SCheng Jiang 			free_pkts(&m[ret], nr_xmit - ret);
890*a68ba8e0SCheng Jiang 	} else {
891*a68ba8e0SCheng Jiang 		ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
892*a68ba8e0SCheng Jiang 						m, nr_xmit);
893*a68ba8e0SCheng Jiang 	}
894*a68ba8e0SCheng Jiang 
895*a68ba8e0SCheng Jiang 	if (enable_stats) {
896*a68ba8e0SCheng Jiang 		__atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
897*a68ba8e0SCheng Jiang 				__ATOMIC_SEQ_CST);
898*a68ba8e0SCheng Jiang 		__atomic_add_fetch(&vdev->stats.rx_atomic, ret,
899*a68ba8e0SCheng Jiang 				__ATOMIC_SEQ_CST);
900*a68ba8e0SCheng Jiang 	}
901*a68ba8e0SCheng Jiang 
902*a68ba8e0SCheng Jiang 	if (!async_vhost_driver)
903*a68ba8e0SCheng Jiang 		free_pkts(m, nr_xmit);
904*a68ba8e0SCheng Jiang }
905*a68ba8e0SCheng Jiang 
906*a68ba8e0SCheng Jiang static __rte_always_inline void
907*a68ba8e0SCheng Jiang drain_vhost_table(void)
908*a68ba8e0SCheng Jiang {
909*a68ba8e0SCheng Jiang 	uint16_t lcore_id = rte_lcore_id();
910*a68ba8e0SCheng Jiang 	struct vhost_bufftable *vhost_txq;
911*a68ba8e0SCheng Jiang 	struct vhost_dev *vdev;
912*a68ba8e0SCheng Jiang 	uint64_t cur_tsc;
913*a68ba8e0SCheng Jiang 
914*a68ba8e0SCheng Jiang 	TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
915*a68ba8e0SCheng Jiang 		vhost_txq = vhost_txbuff[lcore_id * MAX_VHOST_DEVICE
916*a68ba8e0SCheng Jiang 						+ vdev->vid];
917*a68ba8e0SCheng Jiang 
918*a68ba8e0SCheng Jiang 		cur_tsc = rte_rdtsc();
919*a68ba8e0SCheng Jiang 		if (unlikely(cur_tsc - vhost_txq->pre_tsc
920*a68ba8e0SCheng Jiang 				> MBUF_TABLE_DRAIN_TSC)) {
921*a68ba8e0SCheng Jiang 			RTE_LOG_DP(DEBUG, VHOST_DATA,
922*a68ba8e0SCheng Jiang 				"Vhost TX queue drained after timeout with burst size %u\n",
923*a68ba8e0SCheng Jiang 				vhost_txq->len);
924*a68ba8e0SCheng Jiang 			drain_vhost(vdev);
925*a68ba8e0SCheng Jiang 			vhost_txq->len = 0;
926*a68ba8e0SCheng Jiang 			vhost_txq->pre_tsc = cur_tsc;
927*a68ba8e0SCheng Jiang 		}
928*a68ba8e0SCheng Jiang 	}
929*a68ba8e0SCheng Jiang }
930*a68ba8e0SCheng Jiang 
931d19533e8SHuawei Xie /*
932d19533e8SHuawei Xie  * Check if the packet destination MAC address is for a local device. If so then put
933d19533e8SHuawei Xie  * the packet on that devices RX queue. If not then return.
934d19533e8SHuawei Xie  */
935c0583d98SJerin Jacob static __rte_always_inline int
936e571e6b4SHuawei Xie virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
937d19533e8SHuawei Xie {
9386d13ea8eSOlivier Matz 	struct rte_ether_hdr *pkt_hdr;
93945657a5cSYuanhan Liu 	struct vhost_dev *dst_vdev;
940*a68ba8e0SCheng Jiang 	struct vhost_bufftable *vhost_txq;
941*a68ba8e0SCheng Jiang 	uint16_t lcore_id = rte_lcore_id();
9426d13ea8eSOlivier Matz 	pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
943d19533e8SHuawei Xie 
94445657a5cSYuanhan Liu 	dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
94545657a5cSYuanhan Liu 	if (!dst_vdev)
946d19533e8SHuawei Xie 		return -1;
94745657a5cSYuanhan Liu 
948e2a1dd12SYuanhan Liu 	if (vdev->vid == dst_vdev->vid) {
9495d8f0bafSOlivier Matz 		RTE_LOG_DP(DEBUG, VHOST_DATA,
950c08a3490SYuanhan Liu 			"(%d) TX: src and dst MAC is same. Dropping packet.\n",
951e2a1dd12SYuanhan Liu 			vdev->vid);
95245657a5cSYuanhan Liu 		return 0;
95345657a5cSYuanhan Liu 	}
95445657a5cSYuanhan Liu 
9555d8f0bafSOlivier Matz 	RTE_LOG_DP(DEBUG, VHOST_DATA,
956e2a1dd12SYuanhan Liu 		"(%d) TX: MAC address is local\n", dst_vdev->vid);
95745657a5cSYuanhan Liu 
95845657a5cSYuanhan Liu 	if (unlikely(dst_vdev->remove)) {
9595d8f0bafSOlivier Matz 		RTE_LOG_DP(DEBUG, VHOST_DATA,
960e2a1dd12SYuanhan Liu 			"(%d) device is marked for removal\n", dst_vdev->vid);
96145657a5cSYuanhan Liu 		return 0;
96245657a5cSYuanhan Liu 	}
96345657a5cSYuanhan Liu 
964*a68ba8e0SCheng Jiang 	vhost_txq = vhost_txbuff[lcore_id * MAX_VHOST_DEVICE + dst_vdev->vid];
965*a68ba8e0SCheng Jiang 	vhost_txq->m_table[vhost_txq->len++] = m;
966*a68ba8e0SCheng Jiang 
967*a68ba8e0SCheng Jiang 	if (enable_stats) {
968*a68ba8e0SCheng Jiang 		vdev->stats.tx_total++;
969*a68ba8e0SCheng Jiang 		vdev->stats.tx++;
970*a68ba8e0SCheng Jiang 	}
971*a68ba8e0SCheng Jiang 
972*a68ba8e0SCheng Jiang 	if (unlikely(vhost_txq->len == MAX_PKT_BURST)) {
973*a68ba8e0SCheng Jiang 		drain_vhost(dst_vdev);
974*a68ba8e0SCheng Jiang 		vhost_txq->len = 0;
975*a68ba8e0SCheng Jiang 		vhost_txq->pre_tsc = rte_rdtsc();
976*a68ba8e0SCheng Jiang 	}
97745657a5cSYuanhan Liu 	return 0;
978d19533e8SHuawei Xie }
979d19533e8SHuawei Xie 
980d19533e8SHuawei Xie /*
98172ec8d77SOuyang Changchun  * Check if the destination MAC of a packet is one local VM,
98272ec8d77SOuyang Changchun  * and get its vlan tag, and offset if it is.
983d19533e8SHuawei Xie  */
984c0583d98SJerin Jacob static __rte_always_inline int
9857f262239SYuanhan Liu find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
98672ec8d77SOuyang Changchun 	uint32_t *offset, uint16_t *vlan_tag)
987d19533e8SHuawei Xie {
98845657a5cSYuanhan Liu 	struct vhost_dev *dst_vdev;
9896d13ea8eSOlivier Matz 	struct rte_ether_hdr *pkt_hdr =
9906d13ea8eSOlivier Matz 		rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
991d19533e8SHuawei Xie 
99245657a5cSYuanhan Liu 	dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
99345657a5cSYuanhan Liu 	if (!dst_vdev)
99445657a5cSYuanhan Liu 		return 0;
99545657a5cSYuanhan Liu 
996e2a1dd12SYuanhan Liu 	if (vdev->vid == dst_vdev->vid) {
9975d8f0bafSOlivier Matz 		RTE_LOG_DP(DEBUG, VHOST_DATA,
998c08a3490SYuanhan Liu 			"(%d) TX: src and dst MAC is same. Dropping packet.\n",
999e2a1dd12SYuanhan Liu 			vdev->vid);
100072ec8d77SOuyang Changchun 		return -1;
1001d19533e8SHuawei Xie 	}
1002e44fb8a4SOuyang Changchun 
1003e44fb8a4SOuyang Changchun 	/*
1004e44fb8a4SOuyang Changchun 	 * HW vlan strip will reduce the packet length
1005e44fb8a4SOuyang Changchun 	 * by minus length of vlan tag, so need restore
1006e44fb8a4SOuyang Changchun 	 * the packet length by plus it.
1007e44fb8a4SOuyang Changchun 	 */
100872ec8d77SOuyang Changchun 	*offset  = VLAN_HLEN;
1009e2a1dd12SYuanhan Liu 	*vlan_tag = vlan_tags[vdev->vid];
1010d19533e8SHuawei Xie 
10115d8f0bafSOlivier Matz 	RTE_LOG_DP(DEBUG, VHOST_DATA,
10127f262239SYuanhan Liu 		"(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
1013e2a1dd12SYuanhan Liu 		vdev->vid, dst_vdev->vid, *vlan_tag);
1014d19533e8SHuawei Xie 
101572ec8d77SOuyang Changchun 	return 0;
101672ec8d77SOuyang Changchun }
101772ec8d77SOuyang Changchun 
10189fd72e3cSJijiang Liu static uint16_t
10199fd72e3cSJijiang Liu get_psd_sum(void *l3_hdr, uint64_t ol_flags)
10209fd72e3cSJijiang Liu {
10219fd72e3cSJijiang Liu 	if (ol_flags & PKT_TX_IPV4)
10229fd72e3cSJijiang Liu 		return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
10230c9da755SDavid Marchand 	else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
10249fd72e3cSJijiang Liu 		return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
10259fd72e3cSJijiang Liu }
10269fd72e3cSJijiang Liu 
10279fd72e3cSJijiang Liu static void virtio_tx_offload(struct rte_mbuf *m)
10289fd72e3cSJijiang Liu {
10299fd72e3cSJijiang Liu 	void *l3_hdr;
1030a7c528e5SOlivier Matz 	struct rte_ipv4_hdr *ipv4_hdr = NULL;
1031f41b5156SOlivier Matz 	struct rte_tcp_hdr *tcp_hdr = NULL;
10326d13ea8eSOlivier Matz 	struct rte_ether_hdr *eth_hdr =
10336d13ea8eSOlivier Matz 		rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
10349fd72e3cSJijiang Liu 
10359fd72e3cSJijiang Liu 	l3_hdr = (char *)eth_hdr + m->l2_len;
10369fd72e3cSJijiang Liu 
1037df40169aSYuanhan Liu 	if (m->ol_flags & PKT_TX_IPV4) {
1038df40169aSYuanhan Liu 		ipv4_hdr = l3_hdr;
10399fd72e3cSJijiang Liu 		ipv4_hdr->hdr_checksum = 0;
1040df40169aSYuanhan Liu 		m->ol_flags |= PKT_TX_IP_CKSUM;
1041df40169aSYuanhan Liu 	}
1042df40169aSYuanhan Liu 
1043f41b5156SOlivier Matz 	tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + m->l3_len);
10449fd72e3cSJijiang Liu 	tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
10459fd72e3cSJijiang Liu }
10469fd72e3cSJijiang Liu 
1047c0583d98SJerin Jacob static __rte_always_inline void
1048273ecdbcSYuanhan Liu do_drain_mbuf_table(struct mbuf_table *tx_q)
1049273ecdbcSYuanhan Liu {
1050273ecdbcSYuanhan Liu 	uint16_t count;
1051273ecdbcSYuanhan Liu 
1052273ecdbcSYuanhan Liu 	count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
1053273ecdbcSYuanhan Liu 				 tx_q->m_table, tx_q->len);
1054273ecdbcSYuanhan Liu 	if (unlikely(count < tx_q->len))
1055273ecdbcSYuanhan Liu 		free_pkts(&tx_q->m_table[count], tx_q->len - count);
1056273ecdbcSYuanhan Liu 
1057273ecdbcSYuanhan Liu 	tx_q->len = 0;
1058273ecdbcSYuanhan Liu }
1059273ecdbcSYuanhan Liu 
106072ec8d77SOuyang Changchun /*
1061273ecdbcSYuanhan Liu  * This function routes the TX packet to the correct interface. This
1062273ecdbcSYuanhan Liu  * may be a local device or the physical port.
106372ec8d77SOuyang Changchun  */
1064c0583d98SJerin Jacob static __rte_always_inline void
106572ec8d77SOuyang Changchun virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
106672ec8d77SOuyang Changchun {
106772ec8d77SOuyang Changchun 	struct mbuf_table *tx_q;
1068273ecdbcSYuanhan Liu 	unsigned offset = 0;
106972ec8d77SOuyang Changchun 	const uint16_t lcore_id = rte_lcore_id();
10706d13ea8eSOlivier Matz 	struct rte_ether_hdr *nh;
107172ec8d77SOuyang Changchun 
10729c5ef512SYuanhan Liu 
10736d13ea8eSOlivier Matz 	nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1074538da7a1SOlivier Matz 	if (unlikely(rte_is_broadcast_ether_addr(&nh->d_addr))) {
10759c5ef512SYuanhan Liu 		struct vhost_dev *vdev2;
10769c5ef512SYuanhan Liu 
107797daf19eSYuanhan Liu 		TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
1078a3fdb532SJunjie Chen 			if (vdev2 != vdev)
1079*a68ba8e0SCheng Jiang 				sync_virtio_xmit(vdev2, vdev, m);
10809c5ef512SYuanhan Liu 		}
10819c5ef512SYuanhan Liu 		goto queue2nic;
10829c5ef512SYuanhan Liu 	}
10839c5ef512SYuanhan Liu 
108472ec8d77SOuyang Changchun 	/*check if destination is local VM*/
1085*a68ba8e0SCheng Jiang 	if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0))
108672ec8d77SOuyang Changchun 		return;
108772ec8d77SOuyang Changchun 
1088c2ab5162SOuyang Changchun 	if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
10897f262239SYuanhan Liu 		if (unlikely(find_local_dest(vdev, m, &offset,
10907f262239SYuanhan Liu 					     &vlan_tag) != 0)) {
109172ec8d77SOuyang Changchun 			rte_pktmbuf_free(m);
109272ec8d77SOuyang Changchun 			return;
109372ec8d77SOuyang Changchun 		}
1094d19533e8SHuawei Xie 	}
1095d19533e8SHuawei Xie 
10965d8f0bafSOlivier Matz 	RTE_LOG_DP(DEBUG, VHOST_DATA,
1097e2a1dd12SYuanhan Liu 		"(%d) TX: MAC address is external\n", vdev->vid);
1098d19533e8SHuawei Xie 
10999c5ef512SYuanhan Liu queue2nic:
11009c5ef512SYuanhan Liu 
1101d19533e8SHuawei Xie 	/*Add packet to the port tx queue*/
1102d19533e8SHuawei Xie 	tx_q = &lcore_tx_queue[lcore_id];
1103d19533e8SHuawei Xie 
11046d13ea8eSOlivier Matz 	nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
110535b2d13fSOlivier Matz 	if (unlikely(nh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))) {
11068b9bb988SOuyang Changchun 		/* Guest has inserted the vlan tag. */
11076d13ea8eSOlivier Matz 		struct rte_vlan_hdr *vh = (struct rte_vlan_hdr *) (nh + 1);
11088b9bb988SOuyang Changchun 		uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
11098b9bb988SOuyang Changchun 		if ((vm2vm_mode == VM2VM_HARDWARE) &&
11108b9bb988SOuyang Changchun 			(vh->vlan_tci != vlan_tag_be))
11118b9bb988SOuyang Changchun 			vh->vlan_tci = vlan_tag_be;
11128b9bb988SOuyang Changchun 	} else {
11139fd72e3cSJijiang Liu 		m->ol_flags |= PKT_TX_VLAN_PKT;
1114e44fb8a4SOuyang Changchun 
1115c2ab5162SOuyang Changchun 		/*
1116c2ab5162SOuyang Changchun 		 * Find the right seg to adjust the data len when offset is
1117c2ab5162SOuyang Changchun 		 * bigger than tail room size.
1118c2ab5162SOuyang Changchun 		 */
1119c2ab5162SOuyang Changchun 		if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1120c2ab5162SOuyang Changchun 			if (likely(offset <= rte_pktmbuf_tailroom(m)))
11214d50b6acSHuawei Xie 				m->data_len += offset;
1122c2ab5162SOuyang Changchun 			else {
1123c2ab5162SOuyang Changchun 				struct rte_mbuf *seg = m;
1124c2ab5162SOuyang Changchun 
1125c2ab5162SOuyang Changchun 				while ((seg->next != NULL) &&
1126c2ab5162SOuyang Changchun 					(offset > rte_pktmbuf_tailroom(seg)))
1127c2ab5162SOuyang Changchun 					seg = seg->next;
1128c2ab5162SOuyang Changchun 
1129c2ab5162SOuyang Changchun 				seg->data_len += offset;
1130c2ab5162SOuyang Changchun 			}
1131e44fb8a4SOuyang Changchun 			m->pkt_len += offset;
1132c2ab5162SOuyang Changchun 		}
1133e44fb8a4SOuyang Changchun 
11344d50b6acSHuawei Xie 		m->vlan_tci = vlan_tag;
11358b9bb988SOuyang Changchun 	}
1136d19533e8SHuawei Xie 
11375674dad2SYuanhan Liu 	if (m->ol_flags & PKT_TX_TCP_SEG)
11389fd72e3cSJijiang Liu 		virtio_tx_offload(m);
11399fd72e3cSJijiang Liu 
1140273ecdbcSYuanhan Liu 	tx_q->m_table[tx_q->len++] = m;
1141d19533e8SHuawei Xie 	if (enable_stats) {
114256fe86f8SYuanhan Liu 		vdev->stats.tx_total++;
114356fe86f8SYuanhan Liu 		vdev->stats.tx++;
1144d19533e8SHuawei Xie 	}
1145d19533e8SHuawei Xie 
1146273ecdbcSYuanhan Liu 	if (unlikely(tx_q->len == MAX_PKT_BURST))
1147273ecdbcSYuanhan Liu 		do_drain_mbuf_table(tx_q);
1148d19533e8SHuawei Xie }
1149d19533e8SHuawei Xie 
1150d19533e8SHuawei Xie 
1151c0583d98SJerin Jacob static __rte_always_inline void
1152273ecdbcSYuanhan Liu drain_mbuf_table(struct mbuf_table *tx_q)
1153273ecdbcSYuanhan Liu {
1154273ecdbcSYuanhan Liu 	static uint64_t prev_tsc;
1155273ecdbcSYuanhan Liu 	uint64_t cur_tsc;
1156273ecdbcSYuanhan Liu 
1157273ecdbcSYuanhan Liu 	if (tx_q->len == 0)
1158d19533e8SHuawei Xie 		return;
1159273ecdbcSYuanhan Liu 
1160273ecdbcSYuanhan Liu 	cur_tsc = rte_rdtsc();
1161273ecdbcSYuanhan Liu 	if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1162273ecdbcSYuanhan Liu 		prev_tsc = cur_tsc;
1163273ecdbcSYuanhan Liu 
11645d8f0bafSOlivier Matz 		RTE_LOG_DP(DEBUG, VHOST_DATA,
1165273ecdbcSYuanhan Liu 			"TX queue drained after timeout with burst size %u\n",
1166273ecdbcSYuanhan Liu 			tx_q->len);
1167273ecdbcSYuanhan Liu 		do_drain_mbuf_table(tx_q);
1168d19533e8SHuawei Xie 	}
1169273ecdbcSYuanhan Liu }
1170273ecdbcSYuanhan Liu 
1171c0583d98SJerin Jacob static __rte_always_inline void
1172273ecdbcSYuanhan Liu drain_eth_rx(struct vhost_dev *vdev)
1173273ecdbcSYuanhan Liu {
1174273ecdbcSYuanhan Liu 	uint16_t rx_count, enqueue_count;
1175*a68ba8e0SCheng Jiang 	struct rte_mbuf *pkts[MAX_PKT_BURST];
1176273ecdbcSYuanhan Liu 
1177273ecdbcSYuanhan Liu 	rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1178273ecdbcSYuanhan Liu 				    pkts, MAX_PKT_BURST);
1179abec60e7SCheng Jiang 
1180273ecdbcSYuanhan Liu 	if (!rx_count)
1181273ecdbcSYuanhan Liu 		return;
1182273ecdbcSYuanhan Liu 
1183d19533e8SHuawei Xie 	/*
1184273ecdbcSYuanhan Liu 	 * When "enable_retry" is set, here we wait and retry when there
1185273ecdbcSYuanhan Liu 	 * is no enough free slots in the queue to hold @rx_count packets,
1186273ecdbcSYuanhan Liu 	 * to diminish packet loss.
1187273ecdbcSYuanhan Liu 	 */
1188273ecdbcSYuanhan Liu 	if (enable_retry &&
11894ecf22e3SYuanhan Liu 	    unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1190273ecdbcSYuanhan Liu 			VIRTIO_RXQ))) {
1191273ecdbcSYuanhan Liu 		uint32_t retry;
1192273ecdbcSYuanhan Liu 
1193273ecdbcSYuanhan Liu 		for (retry = 0; retry < burst_rx_retry_num; retry++) {
1194273ecdbcSYuanhan Liu 			rte_delay_us(burst_rx_delay_time);
11954ecf22e3SYuanhan Liu 			if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1196273ecdbcSYuanhan Liu 					VIRTIO_RXQ))
1197273ecdbcSYuanhan Liu 				break;
1198273ecdbcSYuanhan Liu 		}
1199273ecdbcSYuanhan Liu 	}
1200273ecdbcSYuanhan Liu 
1201ca059fa5SYuanhan Liu 	if (builtin_net_driver) {
1202ca059fa5SYuanhan Liu 		enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
1203ca059fa5SYuanhan Liu 						pkts, rx_count);
1204abec60e7SCheng Jiang 	} else if (async_vhost_driver) {
1205*a68ba8e0SCheng Jiang 		uint32_t cpu_cpl_nr = 0;
1206*a68ba8e0SCheng Jiang 		uint16_t enqueue_fail = 0;
1207*a68ba8e0SCheng Jiang 		struct rte_mbuf *m_cpu_cpl[MAX_PKT_BURST];
1208*a68ba8e0SCheng Jiang 
1209*a68ba8e0SCheng Jiang 		complete_async_pkts(vdev);
1210abec60e7SCheng Jiang 		enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
1211*a68ba8e0SCheng Jiang 					VIRTIO_RXQ, pkts, rx_count,
1212*a68ba8e0SCheng Jiang 					m_cpu_cpl, &cpu_cpl_nr);
1213*a68ba8e0SCheng Jiang 		__atomic_add_fetch(&vdev->nr_async_pkts,
1214*a68ba8e0SCheng Jiang 					enqueue_count - cpu_cpl_nr,
1215*a68ba8e0SCheng Jiang 					__ATOMIC_SEQ_CST);
1216*a68ba8e0SCheng Jiang 		if (cpu_cpl_nr)
1217*a68ba8e0SCheng Jiang 			free_pkts(m_cpu_cpl, cpu_cpl_nr);
1218*a68ba8e0SCheng Jiang 
1219*a68ba8e0SCheng Jiang 		enqueue_fail = rx_count - enqueue_count;
1220*a68ba8e0SCheng Jiang 		if (enqueue_fail)
1221*a68ba8e0SCheng Jiang 			free_pkts(&pkts[enqueue_count], enqueue_fail);
1222*a68ba8e0SCheng Jiang 
1223ca059fa5SYuanhan Liu 	} else {
12244ecf22e3SYuanhan Liu 		enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1225273ecdbcSYuanhan Liu 						pkts, rx_count);
1226ca059fa5SYuanhan Liu 	}
1227abec60e7SCheng Jiang 
1228273ecdbcSYuanhan Liu 	if (enable_stats) {
1229*a68ba8e0SCheng Jiang 		__atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
1230*a68ba8e0SCheng Jiang 				__ATOMIC_SEQ_CST);
1231*a68ba8e0SCheng Jiang 		__atomic_add_fetch(&vdev->stats.rx_atomic, enqueue_count,
1232*a68ba8e0SCheng Jiang 				__ATOMIC_SEQ_CST);
1233273ecdbcSYuanhan Liu 	}
1234273ecdbcSYuanhan Liu 
1235abec60e7SCheng Jiang 	if (!async_vhost_driver)
1236273ecdbcSYuanhan Liu 		free_pkts(pkts, rx_count);
1237273ecdbcSYuanhan Liu }
1238273ecdbcSYuanhan Liu 
1239c0583d98SJerin Jacob static __rte_always_inline void
1240273ecdbcSYuanhan Liu drain_virtio_tx(struct vhost_dev *vdev)
1241273ecdbcSYuanhan Liu {
1242273ecdbcSYuanhan Liu 	struct rte_mbuf *pkts[MAX_PKT_BURST];
1243273ecdbcSYuanhan Liu 	uint16_t count;
1244273ecdbcSYuanhan Liu 	uint16_t i;
1245273ecdbcSYuanhan Liu 
1246ca059fa5SYuanhan Liu 	if (builtin_net_driver) {
1247ca059fa5SYuanhan Liu 		count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
1248273ecdbcSYuanhan Liu 					pkts, MAX_PKT_BURST);
1249ca059fa5SYuanhan Liu 	} else {
1250ca059fa5SYuanhan Liu 		count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
1251ca059fa5SYuanhan Liu 					mbuf_pool, pkts, MAX_PKT_BURST);
1252ca059fa5SYuanhan Liu 	}
1253273ecdbcSYuanhan Liu 
1254273ecdbcSYuanhan Liu 	/* setup VMDq for the first packet */
1255273ecdbcSYuanhan Liu 	if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1256273ecdbcSYuanhan Liu 		if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1257273ecdbcSYuanhan Liu 			free_pkts(pkts, count);
1258273ecdbcSYuanhan Liu 	}
1259273ecdbcSYuanhan Liu 
12607f262239SYuanhan Liu 	for (i = 0; i < count; ++i)
1261e2a1dd12SYuanhan Liu 		virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1262273ecdbcSYuanhan Liu }
1263273ecdbcSYuanhan Liu 
1264273ecdbcSYuanhan Liu /*
1265273ecdbcSYuanhan Liu  * Main function of vhost-switch. It basically does:
1266273ecdbcSYuanhan Liu  *
1267273ecdbcSYuanhan Liu  * for each vhost device {
1268273ecdbcSYuanhan Liu  *    - drain_eth_rx()
1269273ecdbcSYuanhan Liu  *
1270273ecdbcSYuanhan Liu  *      Which drains the host eth Rx queue linked to the vhost device,
1271273ecdbcSYuanhan Liu  *      and deliver all of them to guest virito Rx ring associated with
1272273ecdbcSYuanhan Liu  *      this vhost device.
1273273ecdbcSYuanhan Liu  *
1274273ecdbcSYuanhan Liu  *    - drain_virtio_tx()
1275273ecdbcSYuanhan Liu  *
1276273ecdbcSYuanhan Liu  *      Which drains the guest virtio Tx queue and deliver all of them
1277273ecdbcSYuanhan Liu  *      to the target, which could be another vhost device, or the
1278273ecdbcSYuanhan Liu  *      physical eth dev. The route is done in function "virtio_tx_route".
1279273ecdbcSYuanhan Liu  * }
1280d19533e8SHuawei Xie  */
1281d19533e8SHuawei Xie static int
1282273ecdbcSYuanhan Liu switch_worker(void *arg __rte_unused)
1283d19533e8SHuawei Xie {
1284273ecdbcSYuanhan Liu 	unsigned i;
1285273ecdbcSYuanhan Liu 	unsigned lcore_id = rte_lcore_id();
1286273ecdbcSYuanhan Liu 	struct vhost_dev *vdev;
1287d19533e8SHuawei Xie 	struct mbuf_table *tx_q;
1288d19533e8SHuawei Xie 
1289d19533e8SHuawei Xie 	RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1290d19533e8SHuawei Xie 
1291d19533e8SHuawei Xie 	tx_q = &lcore_tx_queue[lcore_id];
1292273ecdbcSYuanhan Liu 	for (i = 0; i < rte_lcore_count(); i++) {
1293d19533e8SHuawei Xie 		if (lcore_ids[i] == lcore_id) {
1294d19533e8SHuawei Xie 			tx_q->txq_id = i;
1295d19533e8SHuawei Xie 			break;
1296d19533e8SHuawei Xie 		}
1297d19533e8SHuawei Xie 	}
1298d19533e8SHuawei Xie 
1299d19533e8SHuawei Xie 	while(1) {
1300273ecdbcSYuanhan Liu 		drain_mbuf_table(tx_q);
1301*a68ba8e0SCheng Jiang 		drain_vhost_table();
1302d19533e8SHuawei Xie 		/*
130345657a5cSYuanhan Liu 		 * Inform the configuration core that we have exited the
130445657a5cSYuanhan Liu 		 * linked list and that no devices are in use if requested.
1305d19533e8SHuawei Xie 		 */
130645657a5cSYuanhan Liu 		if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
130745657a5cSYuanhan Liu 			lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1308d19533e8SHuawei Xie 
1309d19533e8SHuawei Xie 		/*
1310273ecdbcSYuanhan Liu 		 * Process vhost devices
1311d19533e8SHuawei Xie 		 */
131297daf19eSYuanhan Liu 		TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
131397daf19eSYuanhan Liu 			      lcore_vdev_entry) {
1314364dddcdSHuawei Xie 			if (unlikely(vdev->remove)) {
1315e571e6b4SHuawei Xie 				unlink_vmdq(vdev);
1316e571e6b4SHuawei Xie 				vdev->ready = DEVICE_SAFE_REMOVE;
1317d19533e8SHuawei Xie 				continue;
1318d19533e8SHuawei Xie 			}
131945657a5cSYuanhan Liu 
1320273ecdbcSYuanhan Liu 			if (likely(vdev->ready == DEVICE_RX))
1321273ecdbcSYuanhan Liu 				drain_eth_rx(vdev);
1322d19533e8SHuawei Xie 
1323273ecdbcSYuanhan Liu 			if (likely(!vdev->remove))
1324273ecdbcSYuanhan Liu 				drain_virtio_tx(vdev);
1325d19533e8SHuawei Xie 		}
1326d19533e8SHuawei Xie 	}
1327d19533e8SHuawei Xie 
1328d19533e8SHuawei Xie 	return 0;
1329d19533e8SHuawei Xie }
1330d19533e8SHuawei Xie 
1331d19533e8SHuawei Xie /*
133245657a5cSYuanhan Liu  * Remove a device from the specific data core linked list and from the
133345657a5cSYuanhan Liu  * main linked list. Synchonization  occurs through the use of the
133445657a5cSYuanhan Liu  * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1335d19533e8SHuawei Xie  * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1336d19533e8SHuawei Xie  */
1337d19533e8SHuawei Xie static void
13384ecf22e3SYuanhan Liu destroy_device(int vid)
1339d19533e8SHuawei Xie {
134016ae8abeSYuanhan Liu 	struct vhost_dev *vdev = NULL;
1341d19533e8SHuawei Xie 	int lcore;
1342*a68ba8e0SCheng Jiang 	uint16_t i;
1343d19533e8SHuawei Xie 
134416ae8abeSYuanhan Liu 	TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
13454ecf22e3SYuanhan Liu 		if (vdev->vid == vid)
134616ae8abeSYuanhan Liu 			break;
134716ae8abeSYuanhan Liu 	}
134816ae8abeSYuanhan Liu 	if (!vdev)
134916ae8abeSYuanhan Liu 		return;
1350d19533e8SHuawei Xie 	/*set the remove flag. */
1351e571e6b4SHuawei Xie 	vdev->remove = 1;
1352e571e6b4SHuawei Xie 	while(vdev->ready != DEVICE_SAFE_REMOVE) {
1353d19533e8SHuawei Xie 		rte_pause();
1354d19533e8SHuawei Xie 	}
1355d19533e8SHuawei Xie 
1356*a68ba8e0SCheng Jiang 	for (i = 0; i < RTE_MAX_LCORE; i++)
1357*a68ba8e0SCheng Jiang 		rte_free(vhost_txbuff[i * MAX_VHOST_DEVICE + vid]);
1358*a68ba8e0SCheng Jiang 
1359ca059fa5SYuanhan Liu 	if (builtin_net_driver)
1360ca059fa5SYuanhan Liu 		vs_vhost_net_remove(vdev);
1361ca059fa5SYuanhan Liu 
136297daf19eSYuanhan Liu 	TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
136397daf19eSYuanhan Liu 		     lcore_vdev_entry);
136497daf19eSYuanhan Liu 	TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
136597daf19eSYuanhan Liu 
1366d19533e8SHuawei Xie 
1367d19533e8SHuawei Xie 	/* Set the dev_removal_flag on each lcore. */
1368cb056611SStephen Hemminger 	RTE_LCORE_FOREACH_WORKER(lcore)
136945657a5cSYuanhan Liu 		lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1370d19533e8SHuawei Xie 
1371d19533e8SHuawei Xie 	/*
137245657a5cSYuanhan Liu 	 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
137345657a5cSYuanhan Liu 	 * we can be sure that they can no longer access the device removed
137445657a5cSYuanhan Liu 	 * from the linked lists and that the devices are no longer in use.
1375d19533e8SHuawei Xie 	 */
1376cb056611SStephen Hemminger 	RTE_LCORE_FOREACH_WORKER(lcore) {
137745657a5cSYuanhan Liu 		while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1378d19533e8SHuawei Xie 			rte_pause();
1379d19533e8SHuawei Xie 	}
1380d19533e8SHuawei Xie 
138145657a5cSYuanhan Liu 	lcore_info[vdev->coreid].device_num--;
1382d19533e8SHuawei Xie 
138345657a5cSYuanhan Liu 	RTE_LOG(INFO, VHOST_DATA,
1384c08a3490SYuanhan Liu 		"(%d) device has been removed from data core\n",
1385e2a1dd12SYuanhan Liu 		vdev->vid);
1386d19533e8SHuawei Xie 
1387abec60e7SCheng Jiang 	if (async_vhost_driver)
1388abec60e7SCheng Jiang 		rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
1389abec60e7SCheng Jiang 
1390e571e6b4SHuawei Xie 	rte_free(vdev);
1391d19533e8SHuawei Xie }
1392d19533e8SHuawei Xie 
1393d19533e8SHuawei Xie /*
1394d19533e8SHuawei Xie  * A new device is added to a data core. First the device is added to the main linked list
139510b4270fSRami Rosen  * and then allocated to a specific data core.
1396d19533e8SHuawei Xie  */
1397d19533e8SHuawei Xie static int
13984ecf22e3SYuanhan Liu new_device(int vid)
1399d19533e8SHuawei Xie {
1400d19533e8SHuawei Xie 	int lcore, core_add = 0;
1401*a68ba8e0SCheng Jiang 	uint16_t i;
1402d19533e8SHuawei Xie 	uint32_t device_num_min = num_devices;
1403e571e6b4SHuawei Xie 	struct vhost_dev *vdev;
1404fdf20fa7SSergio Gonzalez Monroy 	vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1405e571e6b4SHuawei Xie 	if (vdev == NULL) {
1406c08a3490SYuanhan Liu 		RTE_LOG(INFO, VHOST_DATA,
14077f262239SYuanhan Liu 			"(%d) couldn't allocate memory for vhost dev\n",
1408e2a1dd12SYuanhan Liu 			vid);
1409e571e6b4SHuawei Xie 		return -1;
1410e571e6b4SHuawei Xie 	}
1411e2a1dd12SYuanhan Liu 	vdev->vid = vid;
1412d19533e8SHuawei Xie 
1413*a68ba8e0SCheng Jiang 	for (i = 0; i < RTE_MAX_LCORE; i++) {
1414*a68ba8e0SCheng Jiang 		vhost_txbuff[i * MAX_VHOST_DEVICE + vid]
1415*a68ba8e0SCheng Jiang 			= rte_zmalloc("vhost bufftable",
1416*a68ba8e0SCheng Jiang 				sizeof(struct vhost_bufftable),
1417*a68ba8e0SCheng Jiang 				RTE_CACHE_LINE_SIZE);
1418*a68ba8e0SCheng Jiang 
1419*a68ba8e0SCheng Jiang 		if (vhost_txbuff[i * MAX_VHOST_DEVICE + vid] == NULL) {
1420*a68ba8e0SCheng Jiang 			RTE_LOG(INFO, VHOST_DATA,
1421*a68ba8e0SCheng Jiang 			  "(%d) couldn't allocate memory for vhost TX\n", vid);
1422*a68ba8e0SCheng Jiang 			return -1;
1423*a68ba8e0SCheng Jiang 		}
1424*a68ba8e0SCheng Jiang 	}
1425*a68ba8e0SCheng Jiang 
1426ca059fa5SYuanhan Liu 	if (builtin_net_driver)
1427ca059fa5SYuanhan Liu 		vs_vhost_net_setup(vdev);
1428ca059fa5SYuanhan Liu 
142997daf19eSYuanhan Liu 	TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1430e2a1dd12SYuanhan Liu 	vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1431d19533e8SHuawei Xie 
1432d19533e8SHuawei Xie 	/*reset ready flag*/
1433e571e6b4SHuawei Xie 	vdev->ready = DEVICE_MAC_LEARNING;
1434e571e6b4SHuawei Xie 	vdev->remove = 0;
1435d19533e8SHuawei Xie 
1436d19533e8SHuawei Xie 	/* Find a suitable lcore to add the device. */
1437cb056611SStephen Hemminger 	RTE_LCORE_FOREACH_WORKER(lcore) {
143845657a5cSYuanhan Liu 		if (lcore_info[lcore].device_num < device_num_min) {
143945657a5cSYuanhan Liu 			device_num_min = lcore_info[lcore].device_num;
1440d19533e8SHuawei Xie 			core_add = lcore;
1441d19533e8SHuawei Xie 		}
1442d19533e8SHuawei Xie 	}
1443e571e6b4SHuawei Xie 	vdev->coreid = core_add;
1444e571e6b4SHuawei Xie 
144597daf19eSYuanhan Liu 	TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
144697daf19eSYuanhan Liu 			  lcore_vdev_entry);
144745657a5cSYuanhan Liu 	lcore_info[vdev->coreid].device_num++;
1448d19533e8SHuawei Xie 
1449d19533e8SHuawei Xie 	/* Disable notifications. */
14504ecf22e3SYuanhan Liu 	rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
14514ecf22e3SYuanhan Liu 	rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1452d19533e8SHuawei Xie 
1453c08a3490SYuanhan Liu 	RTE_LOG(INFO, VHOST_DATA,
1454c08a3490SYuanhan Liu 		"(%d) device has been added to data core %d\n",
1455e2a1dd12SYuanhan Liu 		vid, vdev->coreid);
1456d19533e8SHuawei Xie 
1457abec60e7SCheng Jiang 	if (async_vhost_driver) {
14586e9a9d2aSCheng Jiang 		struct rte_vhost_async_features f;
14596e9a9d2aSCheng Jiang 		struct rte_vhost_async_channel_ops channel_ops;
1460*a68ba8e0SCheng Jiang 
14616e9a9d2aSCheng Jiang 		if (strncmp(dma_type, "ioat", 4) == 0) {
14626e9a9d2aSCheng Jiang 			channel_ops.transfer_data = ioat_transfer_data_cb;
14636e9a9d2aSCheng Jiang 			channel_ops.check_completed_copies =
14646e9a9d2aSCheng Jiang 				ioat_check_completed_copies_cb;
1465*a68ba8e0SCheng Jiang 
1466abec60e7SCheng Jiang 			f.async_inorder = 1;
1467abec60e7SCheng Jiang 			f.async_threshold = 256;
1468*a68ba8e0SCheng Jiang 
1469abec60e7SCheng Jiang 			return rte_vhost_async_channel_register(vid, VIRTIO_RXQ,
1470abec60e7SCheng Jiang 				f.intval, &channel_ops);
1471abec60e7SCheng Jiang 		}
14726e9a9d2aSCheng Jiang 	}
1473abec60e7SCheng Jiang 
1474d19533e8SHuawei Xie 	return 0;
1475d19533e8SHuawei Xie }
1476d19533e8SHuawei Xie 
1477d19533e8SHuawei Xie /*
1478d19533e8SHuawei Xie  * These callback allow devices to be added to the data core when configuration
1479d19533e8SHuawei Xie  * has been fully complete.
1480d19533e8SHuawei Xie  */
14817c129037SYuanhan Liu static const struct vhost_device_ops virtio_net_device_ops =
1482d19533e8SHuawei Xie {
1483d19533e8SHuawei Xie 	.new_device =  new_device,
1484d19533e8SHuawei Xie 	.destroy_device = destroy_device,
1485d19533e8SHuawei Xie };
1486d19533e8SHuawei Xie 
1487d19533e8SHuawei Xie /*
1488d19533e8SHuawei Xie  * This is a thread will wake up after a period to print stats if the user has
1489d19533e8SHuawei Xie  * enabled them.
1490d19533e8SHuawei Xie  */
1491fa204854SOlivier Matz static void *
1492fa204854SOlivier Matz print_stats(__rte_unused void *arg)
1493d19533e8SHuawei Xie {
149445657a5cSYuanhan Liu 	struct vhost_dev *vdev;
1495d19533e8SHuawei Xie 	uint64_t tx_dropped, rx_dropped;
1496d19533e8SHuawei Xie 	uint64_t tx, tx_total, rx, rx_total;
1497d19533e8SHuawei Xie 	const char clr[] = { 27, '[', '2', 'J', '\0' };
1498d19533e8SHuawei Xie 	const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1499d19533e8SHuawei Xie 
1500d19533e8SHuawei Xie 	while(1) {
1501d19533e8SHuawei Xie 		sleep(enable_stats);
1502d19533e8SHuawei Xie 
1503d19533e8SHuawei Xie 		/* Clear screen and move to top left */
150456fe86f8SYuanhan Liu 		printf("%s%s\n", clr, top_left);
150556fe86f8SYuanhan Liu 		printf("Device statistics =================================\n");
1506d19533e8SHuawei Xie 
150797daf19eSYuanhan Liu 		TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
150856fe86f8SYuanhan Liu 			tx_total   = vdev->stats.tx_total;
150956fe86f8SYuanhan Liu 			tx         = vdev->stats.tx;
1510d19533e8SHuawei Xie 			tx_dropped = tx_total - tx;
151156fe86f8SYuanhan Liu 
1512*a68ba8e0SCheng Jiang 			rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic,
1513*a68ba8e0SCheng Jiang 				__ATOMIC_SEQ_CST);
1514*a68ba8e0SCheng Jiang 			rx         = __atomic_load_n(&vdev->stats.rx_atomic,
1515*a68ba8e0SCheng Jiang 				__ATOMIC_SEQ_CST);
1516d19533e8SHuawei Xie 			rx_dropped = rx_total - rx;
1517d19533e8SHuawei Xie 
1518c08a3490SYuanhan Liu 			printf("Statistics for device %d\n"
151956fe86f8SYuanhan Liu 				"-----------------------\n"
152056fe86f8SYuanhan Liu 				"TX total:              %" PRIu64 "\n"
152156fe86f8SYuanhan Liu 				"TX dropped:            %" PRIu64 "\n"
152256fe86f8SYuanhan Liu 				"TX successful:         %" PRIu64 "\n"
152356fe86f8SYuanhan Liu 				"RX total:              %" PRIu64 "\n"
152456fe86f8SYuanhan Liu 				"RX dropped:            %" PRIu64 "\n"
152556fe86f8SYuanhan Liu 				"RX successful:         %" PRIu64 "\n",
15264ecf22e3SYuanhan Liu 				vdev->vid,
152756fe86f8SYuanhan Liu 				tx_total, tx_dropped, tx,
152856fe86f8SYuanhan Liu 				rx_total, rx_dropped, rx);
1529d19533e8SHuawei Xie 		}
153056fe86f8SYuanhan Liu 
153156fe86f8SYuanhan Liu 		printf("===================================================\n");
15323ee6f706SGeorgiy Levashov 
15333ee6f706SGeorgiy Levashov 		fflush(stdout);
1534d19533e8SHuawei Xie 	}
1535fa204854SOlivier Matz 
1536fa204854SOlivier Matz 	return NULL;
1537d19533e8SHuawei Xie }
1538d19533e8SHuawei Xie 
1539ad0eef4dSJiayu Hu static void
1540ad0eef4dSJiayu Hu unregister_drivers(int socket_num)
1541ad0eef4dSJiayu Hu {
1542ad0eef4dSJiayu Hu 	int i, ret;
1543ad0eef4dSJiayu Hu 
1544ad0eef4dSJiayu Hu 	for (i = 0; i < socket_num; i++) {
1545ad0eef4dSJiayu Hu 		ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1546ad0eef4dSJiayu Hu 		if (ret != 0)
1547ad0eef4dSJiayu Hu 			RTE_LOG(ERR, VHOST_CONFIG,
1548ad0eef4dSJiayu Hu 				"Fail to unregister vhost driver for %s.\n",
1549ad0eef4dSJiayu Hu 				socket_files + i * PATH_MAX);
1550ad0eef4dSJiayu Hu 	}
1551ad0eef4dSJiayu Hu }
1552ad0eef4dSJiayu Hu 
1553c83d2d00SOuyang Changchun /* When we receive a INT signal, unregister vhost driver */
1554c83d2d00SOuyang Changchun static void
1555c83d2d00SOuyang Changchun sigint_handler(__rte_unused int signum)
1556c83d2d00SOuyang Changchun {
1557c83d2d00SOuyang Changchun 	/* Unregister vhost driver. */
1558ad0eef4dSJiayu Hu 	unregister_drivers(nb_sockets);
1559ad0eef4dSJiayu Hu 
1560c83d2d00SOuyang Changchun 	exit(0);
1561c83d2d00SOuyang Changchun }
1562d19533e8SHuawei Xie 
1563d19533e8SHuawei Xie /*
1564bdb19b77SYuanhan Liu  * While creating an mbuf pool, one key thing is to figure out how
1565bdb19b77SYuanhan Liu  * many mbuf entries is enough for our use. FYI, here are some
1566bdb19b77SYuanhan Liu  * guidelines:
1567bdb19b77SYuanhan Liu  *
1568bdb19b77SYuanhan Liu  * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1569bdb19b77SYuanhan Liu  *
1570bdb19b77SYuanhan Liu  * - For each switch core (A CPU core does the packet switch), we need
1571bdb19b77SYuanhan Liu  *   also make some reservation for receiving the packets from virtio
1572bdb19b77SYuanhan Liu  *   Tx queue. How many is enough depends on the usage. It's normally
1573bdb19b77SYuanhan Liu  *   a simple calculation like following:
1574bdb19b77SYuanhan Liu  *
1575bdb19b77SYuanhan Liu  *       MAX_PKT_BURST * max packet size / mbuf size
1576bdb19b77SYuanhan Liu  *
1577bdb19b77SYuanhan Liu  *   So, we definitely need allocate more mbufs when TSO is enabled.
1578bdb19b77SYuanhan Liu  *
1579bdb19b77SYuanhan Liu  * - Similarly, for each switching core, we should serve @nr_rx_desc
1580bdb19b77SYuanhan Liu  *   mbufs for receiving the packets from physical NIC device.
1581bdb19b77SYuanhan Liu  *
1582bdb19b77SYuanhan Liu  * - We also need make sure, for each switch core, we have allocated
1583bdb19b77SYuanhan Liu  *   enough mbufs to fill up the mbuf cache.
1584bdb19b77SYuanhan Liu  */
1585bdb19b77SYuanhan Liu static void
1586bdb19b77SYuanhan Liu create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1587bdb19b77SYuanhan Liu 	uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1588bdb19b77SYuanhan Liu {
1589bdb19b77SYuanhan Liu 	uint32_t nr_mbufs;
1590bdb19b77SYuanhan Liu 	uint32_t nr_mbufs_per_core;
1591bdb19b77SYuanhan Liu 	uint32_t mtu = 1500;
1592bdb19b77SYuanhan Liu 
1593bdb19b77SYuanhan Liu 	if (mergeable)
1594bdb19b77SYuanhan Liu 		mtu = 9000;
1595bdb19b77SYuanhan Liu 	if (enable_tso)
1596bdb19b77SYuanhan Liu 		mtu = 64 * 1024;
1597bdb19b77SYuanhan Liu 
1598bdb19b77SYuanhan Liu 	nr_mbufs_per_core  = (mtu + mbuf_size) * MAX_PKT_BURST /
159912ee45a3SYong Wang 			(mbuf_size - RTE_PKTMBUF_HEADROOM);
1600bdb19b77SYuanhan Liu 	nr_mbufs_per_core += nr_rx_desc;
1601bdb19b77SYuanhan Liu 	nr_mbufs_per_core  = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1602bdb19b77SYuanhan Liu 
1603bdb19b77SYuanhan Liu 	nr_mbufs  = nr_queues * nr_rx_desc;
1604bdb19b77SYuanhan Liu 	nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1605bdb19b77SYuanhan Liu 	nr_mbufs *= nr_port;
1606bdb19b77SYuanhan Liu 
1607bdb19b77SYuanhan Liu 	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1608bdb19b77SYuanhan Liu 					    nr_mbuf_cache, 0, mbuf_size,
1609bdb19b77SYuanhan Liu 					    rte_socket_id());
1610bdb19b77SYuanhan Liu 	if (mbuf_pool == NULL)
1611bdb19b77SYuanhan Liu 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1612bdb19b77SYuanhan Liu }
1613bdb19b77SYuanhan Liu 
1614bdb19b77SYuanhan Liu /*
1615164a601bSYuanhan Liu  * Main function, does initialisation and calls the per-lcore functions.
1616d19533e8SHuawei Xie  */
1617d19533e8SHuawei Xie int
161898a16481SDavid Marchand main(int argc, char *argv[])
1619d19533e8SHuawei Xie {
1620d19533e8SHuawei Xie 	unsigned lcore_id, core_id = 0;
1621d19533e8SHuawei Xie 	unsigned nb_ports, valid_num_ports;
1622ad0eef4dSJiayu Hu 	int ret, i;
1623f8244c63SZhiyong Yang 	uint16_t portid;
1624d19533e8SHuawei Xie 	static pthread_t tid;
16252345e3beSYuanhan Liu 	uint64_t flags = 0;
1626d19533e8SHuawei Xie 
1627c83d2d00SOuyang Changchun 	signal(SIGINT, sigint_handler);
1628c83d2d00SOuyang Changchun 
1629d19533e8SHuawei Xie 	/* init EAL */
1630d19533e8SHuawei Xie 	ret = rte_eal_init(argc, argv);
1631d19533e8SHuawei Xie 	if (ret < 0)
1632d19533e8SHuawei Xie 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1633d19533e8SHuawei Xie 	argc -= ret;
1634d19533e8SHuawei Xie 	argv += ret;
1635d19533e8SHuawei Xie 
1636d19533e8SHuawei Xie 	/* parse app arguments */
1637d19533e8SHuawei Xie 	ret = us_vhost_parse_args(argc, argv);
1638d19533e8SHuawei Xie 	if (ret < 0)
1639d19533e8SHuawei Xie 		rte_exit(EXIT_FAILURE, "Invalid argument\n");
1640d19533e8SHuawei Xie 
1641b3bee7d8SYong Wang 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
164245657a5cSYuanhan Liu 		TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
164345657a5cSYuanhan Liu 
1644d19533e8SHuawei Xie 		if (rte_lcore_is_enabled(lcore_id))
1645d19533e8SHuawei Xie 			lcore_ids[core_id++] = lcore_id;
1646b3bee7d8SYong Wang 	}
1647d19533e8SHuawei Xie 
1648d19533e8SHuawei Xie 	if (rte_lcore_count() > RTE_MAX_LCORE)
1649d19533e8SHuawei Xie 		rte_exit(EXIT_FAILURE,"Not enough cores\n");
1650d19533e8SHuawei Xie 
1651d19533e8SHuawei Xie 	/* Get the number of physical ports. */
1652d9a42a69SThomas Monjalon 	nb_ports = rte_eth_dev_count_avail();
1653d19533e8SHuawei Xie 
1654d19533e8SHuawei Xie 	/*
1655d19533e8SHuawei Xie 	 * Update the global var NUM_PORTS and global array PORTS
1656d19533e8SHuawei Xie 	 * and get value of var VALID_NUM_PORTS according to system ports number
1657d19533e8SHuawei Xie 	 */
1658d19533e8SHuawei Xie 	valid_num_ports = check_ports_num(nb_ports);
1659d19533e8SHuawei Xie 
1660d19533e8SHuawei Xie 	if ((valid_num_ports ==  0) || (valid_num_ports > MAX_SUP_PORTS)) {
1661d19533e8SHuawei Xie 		RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1662d19533e8SHuawei Xie 			"but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1663d19533e8SHuawei Xie 		return -1;
1664d19533e8SHuawei Xie 	}
1665d19533e8SHuawei Xie 
1666bdb19b77SYuanhan Liu 	/*
1667bdb19b77SYuanhan Liu 	 * FIXME: here we are trying to allocate mbufs big enough for
1668bdb19b77SYuanhan Liu 	 * @MAX_QUEUES, but the truth is we're never going to use that
1669bdb19b77SYuanhan Liu 	 * many queues here. We probably should only do allocation for
1670bdb19b77SYuanhan Liu 	 * those queues we are going to use.
1671bdb19b77SYuanhan Liu 	 */
1672bdb19b77SYuanhan Liu 	create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1673bdb19b77SYuanhan Liu 			 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1674d19533e8SHuawei Xie 
1675d19533e8SHuawei Xie 	if (vm2vm_mode == VM2VM_HARDWARE) {
1676d19533e8SHuawei Xie 		/* Enable VT loop back to let L2 switch to do it. */
1677d19533e8SHuawei Xie 		vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
16781f49ec15SThomas Monjalon 		RTE_LOG(DEBUG, VHOST_CONFIG,
1679d19533e8SHuawei Xie 			"Enable loop back for L2 switch in vmdq.\n");
1680d19533e8SHuawei Xie 	}
1681d19533e8SHuawei Xie 
1682d19533e8SHuawei Xie 	/* initialize all ports */
16838728ccf3SThomas Monjalon 	RTE_ETH_FOREACH_DEV(portid) {
1684d19533e8SHuawei Xie 		/* skip ports that are not enabled */
1685d19533e8SHuawei Xie 		if ((enabled_port_mask & (1 << portid)) == 0) {
1686d19533e8SHuawei Xie 			RTE_LOG(INFO, VHOST_PORT,
1687d19533e8SHuawei Xie 				"Skipping disabled port %d\n", portid);
1688d19533e8SHuawei Xie 			continue;
1689d19533e8SHuawei Xie 		}
1690d19533e8SHuawei Xie 		if (port_init(portid) != 0)
1691d19533e8SHuawei Xie 			rte_exit(EXIT_FAILURE,
1692d19533e8SHuawei Xie 				"Cannot initialize network ports\n");
1693d19533e8SHuawei Xie 	}
1694d19533e8SHuawei Xie 
1695d19533e8SHuawei Xie 	/* Enable stats if the user option is set. */
169667b6d303SRavi Kerur 	if (enable_stats) {
1697fa204854SOlivier Matz 		ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
1698fa204854SOlivier Matz 					print_stats, NULL);
1699fa204854SOlivier Matz 		if (ret < 0)
170067b6d303SRavi Kerur 			rte_exit(EXIT_FAILURE,
170167b6d303SRavi Kerur 				"Cannot create print-stats thread\n");
170267b6d303SRavi Kerur 	}
1703d19533e8SHuawei Xie 
1704d19533e8SHuawei Xie 	/* Launch all data cores. */
1705cb056611SStephen Hemminger 	RTE_LCORE_FOREACH_WORKER(lcore_id)
170668363d85SYuanhan Liu 		rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1707d19533e8SHuawei Xie 
17082345e3beSYuanhan Liu 	if (client_mode)
17092345e3beSYuanhan Liu 		flags |= RTE_VHOST_USER_CLIENT;
17102345e3beSYuanhan Liu 
1711bde19a4dSJiayu Hu 	/* Register vhost user driver to handle vhost messages. */
1712ad0eef4dSJiayu Hu 	for (i = 0; i < nb_sockets; i++) {
17130917f9d1SYuanhan Liu 		char *file = socket_files + i * PATH_MAX;
1714*a68ba8e0SCheng Jiang 
1715abec60e7SCheng Jiang 		if (async_vhost_driver)
1716abec60e7SCheng Jiang 			flags = flags | RTE_VHOST_USER_ASYNC_COPY;
1717abec60e7SCheng Jiang 
17180917f9d1SYuanhan Liu 		ret = rte_vhost_driver_register(file, flags);
1719ad0eef4dSJiayu Hu 		if (ret != 0) {
1720ad0eef4dSJiayu Hu 			unregister_drivers(i);
1721ad0eef4dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1722ad0eef4dSJiayu Hu 				"vhost driver register failure.\n");
1723ad0eef4dSJiayu Hu 		}
1724ca059fa5SYuanhan Liu 
1725ca059fa5SYuanhan Liu 		if (builtin_net_driver)
1726ca059fa5SYuanhan Liu 			rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
1727ca059fa5SYuanhan Liu 
17280917f9d1SYuanhan Liu 		if (mergeable == 0) {
17290917f9d1SYuanhan Liu 			rte_vhost_driver_disable_features(file,
17300917f9d1SYuanhan Liu 				1ULL << VIRTIO_NET_F_MRG_RXBUF);
17310917f9d1SYuanhan Liu 		}
17320917f9d1SYuanhan Liu 
17330917f9d1SYuanhan Liu 		if (enable_tx_csum == 0) {
17340917f9d1SYuanhan Liu 			rte_vhost_driver_disable_features(file,
17350917f9d1SYuanhan Liu 				1ULL << VIRTIO_NET_F_CSUM);
17360917f9d1SYuanhan Liu 		}
17370917f9d1SYuanhan Liu 
17380917f9d1SYuanhan Liu 		if (enable_tso == 0) {
17390917f9d1SYuanhan Liu 			rte_vhost_driver_disable_features(file,
17400917f9d1SYuanhan Liu 				1ULL << VIRTIO_NET_F_HOST_TSO4);
17410917f9d1SYuanhan Liu 			rte_vhost_driver_disable_features(file,
17420917f9d1SYuanhan Liu 				1ULL << VIRTIO_NET_F_HOST_TSO6);
17430917f9d1SYuanhan Liu 			rte_vhost_driver_disable_features(file,
17440917f9d1SYuanhan Liu 				1ULL << VIRTIO_NET_F_GUEST_TSO4);
17450917f9d1SYuanhan Liu 			rte_vhost_driver_disable_features(file,
17460917f9d1SYuanhan Liu 				1ULL << VIRTIO_NET_F_GUEST_TSO6);
17470917f9d1SYuanhan Liu 		}
17480917f9d1SYuanhan Liu 
17490917f9d1SYuanhan Liu 		if (promiscuous) {
17500917f9d1SYuanhan Liu 			rte_vhost_driver_enable_features(file,
17510917f9d1SYuanhan Liu 				1ULL << VIRTIO_NET_F_CTRL_RX);
17520917f9d1SYuanhan Liu 		}
1753d19533e8SHuawei Xie 
175493433b63SYuanhan Liu 		ret = rte_vhost_driver_callback_register(file,
175593433b63SYuanhan Liu 			&virtio_net_device_ops);
175693433b63SYuanhan Liu 		if (ret != 0) {
175793433b63SYuanhan Liu 			rte_exit(EXIT_FAILURE,
175893433b63SYuanhan Liu 				"failed to register vhost driver callbacks.\n");
175993433b63SYuanhan Liu 		}
1760af147591SYuanhan Liu 
1761af147591SYuanhan Liu 		if (rte_vhost_driver_start(file) < 0) {
1762af147591SYuanhan Liu 			rte_exit(EXIT_FAILURE,
1763af147591SYuanhan Liu 				"failed to start vhost driver.\n");
1764af147591SYuanhan Liu 		}
176593433b63SYuanhan Liu 	}
1766d19533e8SHuawei Xie 
1767cb056611SStephen Hemminger 	RTE_LCORE_FOREACH_WORKER(lcore_id)
1768af147591SYuanhan Liu 		rte_eal_wait_lcore(lcore_id);
1769af147591SYuanhan Liu 
1770d19533e8SHuawei Xie 	return 0;
1771d19533e8SHuawei Xie 
1772d19533e8SHuawei Xie }
1773