xref: /dpdk/drivers/net/virtio/virtio_user_ethdev.c (revision 25d11a86c56d50947af33d0b79ede622809bd8b9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <sys/types.h>
7 #include <unistd.h>
8 #include <fcntl.h>
9 #include <sys/socket.h>
10 
11 #include <rte_malloc.h>
12 #include <rte_kvargs.h>
13 #include <rte_ethdev_vdev.h>
14 #include <rte_bus_vdev.h>
15 #include <rte_alarm.h>
16 
17 #include "virtio_ethdev.h"
18 #include "virtio_logs.h"
19 #include "virtio_pci.h"
20 #include "virtqueue.h"
21 #include "virtio_rxtx.h"
22 #include "virtio_user/virtio_user_dev.h"
23 
24 #define virtio_user_get_dev(hw) \
25 	((struct virtio_user_dev *)(hw)->virtio_user_dev)
26 
27 static int
28 virtio_user_server_reconnect(struct virtio_user_dev *dev)
29 {
30 	int ret;
31 	int connectfd;
32 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
33 
34 	connectfd = accept(dev->listenfd, NULL, NULL);
35 	if (connectfd < 0)
36 		return -1;
37 
38 	dev->vhostfd = connectfd;
39 	if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
40 				   &dev->device_features) < 0) {
41 		PMD_INIT_LOG(ERR, "get_features failed: %s",
42 			     strerror(errno));
43 		return -1;
44 	}
45 
46 	dev->device_features |= dev->frontend_features;
47 
48 	/* umask vhost-user unsupported features */
49 	dev->device_features &= ~(dev->unsupported_features);
50 
51 	dev->features &= dev->device_features;
52 
53 	ret = virtio_user_start_device(dev);
54 	if (ret < 0)
55 		return -1;
56 
57 	if (dev->queue_pairs > 1) {
58 		ret = virtio_user_handle_mq(dev, dev->queue_pairs);
59 		if (ret != 0) {
60 			PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!");
61 			return -1;
62 		}
63 	}
64 	if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
65 		if (rte_intr_disable(eth_dev->intr_handle) < 0) {
66 			PMD_DRV_LOG(ERR, "interrupt disable failed");
67 			return -1;
68 		}
69 		rte_intr_callback_unregister(eth_dev->intr_handle,
70 					     virtio_interrupt_handler,
71 					     eth_dev);
72 		eth_dev->intr_handle->fd = connectfd;
73 		rte_intr_callback_register(eth_dev->intr_handle,
74 					   virtio_interrupt_handler, eth_dev);
75 
76 		if (rte_intr_enable(eth_dev->intr_handle) < 0) {
77 			PMD_DRV_LOG(ERR, "interrupt enable failed");
78 			return -1;
79 		}
80 	}
81 	PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!");
82 	return 0;
83 }
84 
85 static void
86 virtio_user_delayed_handler(void *param)
87 {
88 	struct virtio_hw *hw = (struct virtio_hw *)param;
89 	struct rte_eth_dev *eth_dev = &rte_eth_devices[hw->port_id];
90 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
91 
92 	if (rte_intr_disable(eth_dev->intr_handle) < 0) {
93 		PMD_DRV_LOG(ERR, "interrupt disable failed");
94 		return;
95 	}
96 	rte_intr_callback_unregister(eth_dev->intr_handle,
97 				     virtio_interrupt_handler, eth_dev);
98 	if (dev->is_server) {
99 		if (dev->vhostfd >= 0) {
100 			close(dev->vhostfd);
101 			dev->vhostfd = -1;
102 		}
103 		eth_dev->intr_handle->fd = dev->listenfd;
104 		rte_intr_callback_register(eth_dev->intr_handle,
105 					   virtio_interrupt_handler, eth_dev);
106 		if (rte_intr_enable(eth_dev->intr_handle) < 0) {
107 			PMD_DRV_LOG(ERR, "interrupt enable failed");
108 			return;
109 		}
110 	}
111 }
112 
113 static void
114 virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
115 		     void *dst, int length)
116 {
117 	int i;
118 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
119 
120 	if (offset == offsetof(struct virtio_net_config, mac) &&
121 	    length == ETHER_ADDR_LEN) {
122 		for (i = 0; i < ETHER_ADDR_LEN; ++i)
123 			((uint8_t *)dst)[i] = dev->mac_addr[i];
124 		return;
125 	}
126 
127 	if (offset == offsetof(struct virtio_net_config, status)) {
128 		char buf[128];
129 
130 		if (dev->vhostfd >= 0) {
131 			int r;
132 			int flags;
133 
134 			flags = fcntl(dev->vhostfd, F_GETFL);
135 			if (fcntl(dev->vhostfd, F_SETFL,
136 					flags | O_NONBLOCK) == -1) {
137 				PMD_DRV_LOG(ERR, "error setting O_NONBLOCK flag");
138 				return;
139 			}
140 			r = recv(dev->vhostfd, buf, 128, MSG_PEEK);
141 			if (r == 0 || (r < 0 && errno != EAGAIN)) {
142 				dev->status &= (~VIRTIO_NET_S_LINK_UP);
143 				PMD_DRV_LOG(ERR, "virtio-user port %u is down",
144 					    hw->port_id);
145 
146 				/* This function could be called in the process
147 				 * of interrupt handling, callback cannot be
148 				 * unregistered here, set an alarm to do it.
149 				 */
150 				rte_eal_alarm_set(1,
151 						  virtio_user_delayed_handler,
152 						  (void *)hw);
153 			} else {
154 				dev->status |= VIRTIO_NET_S_LINK_UP;
155 			}
156 			if (fcntl(dev->vhostfd, F_SETFL,
157 					flags & ~O_NONBLOCK) == -1) {
158 				PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag");
159 				return;
160 			}
161 		} else if (dev->is_server) {
162 			dev->status &= (~VIRTIO_NET_S_LINK_UP);
163 			if (virtio_user_server_reconnect(dev) >= 0)
164 				dev->status |= VIRTIO_NET_S_LINK_UP;
165 		}
166 
167 		*(uint16_t *)dst = dev->status;
168 	}
169 
170 	if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs))
171 		*(uint16_t *)dst = dev->max_queue_pairs;
172 }
173 
174 static void
175 virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset,
176 		      const void *src, int length)
177 {
178 	int i;
179 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
180 
181 	if ((offset == offsetof(struct virtio_net_config, mac)) &&
182 	    (length == ETHER_ADDR_LEN))
183 		for (i = 0; i < ETHER_ADDR_LEN; ++i)
184 			dev->mac_addr[i] = ((const uint8_t *)src)[i];
185 	else
186 		PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d",
187 			    offset, length);
188 }
189 
190 static void
191 virtio_user_reset(struct virtio_hw *hw)
192 {
193 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
194 
195 	if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
196 		virtio_user_stop_device(dev);
197 }
198 
199 static void
200 virtio_user_set_status(struct virtio_hw *hw, uint8_t status)
201 {
202 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
203 
204 	if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
205 		virtio_user_start_device(dev);
206 	else if (status == VIRTIO_CONFIG_STATUS_RESET)
207 		virtio_user_reset(hw);
208 	dev->status = status;
209 }
210 
211 static uint8_t
212 virtio_user_get_status(struct virtio_hw *hw)
213 {
214 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
215 
216 	return dev->status;
217 }
218 
219 static uint64_t
220 virtio_user_get_features(struct virtio_hw *hw)
221 {
222 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
223 
224 	/* unmask feature bits defined in vhost user protocol */
225 	return dev->device_features & VIRTIO_PMD_SUPPORTED_GUEST_FEATURES;
226 }
227 
228 static void
229 virtio_user_set_features(struct virtio_hw *hw, uint64_t features)
230 {
231 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
232 
233 	dev->features = features & dev->device_features;
234 }
235 
236 static uint8_t
237 virtio_user_get_isr(struct virtio_hw *hw __rte_unused)
238 {
239 	/* rxq interrupts and config interrupt are separated in virtio-user,
240 	 * here we only report config change.
241 	 */
242 	return VIRTIO_PCI_ISR_CONFIG;
243 }
244 
245 static uint16_t
246 virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused,
247 		    uint16_t vec __rte_unused)
248 {
249 	return 0;
250 }
251 
252 static uint16_t
253 virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused,
254 			  struct virtqueue *vq __rte_unused,
255 			  uint16_t vec)
256 {
257 	/* pretend we have done that */
258 	return vec;
259 }
260 
261 /* This function is to get the queue size, aka, number of descs, of a specified
262  * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the
263  * max supported queues.
264  */
265 static uint16_t
266 virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused)
267 {
268 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
269 
270 	/* Currently, each queue has same queue size */
271 	return dev->queue_size;
272 }
273 
274 static void
275 virtio_user_setup_queue_packed(struct virtqueue *vq,
276 			       struct virtio_user_dev *dev)
277 {
278 	uint16_t queue_idx = vq->vq_queue_index;
279 	struct vring_packed *vring;
280 	uint64_t desc_addr;
281 	uint64_t avail_addr;
282 	uint64_t used_addr;
283 	uint16_t i;
284 
285 	vring  = &dev->packed_vrings[queue_idx];
286 	desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
287 	avail_addr = desc_addr + vq->vq_nentries *
288 		sizeof(struct vring_packed_desc);
289 	used_addr = RTE_ALIGN_CEIL(avail_addr +
290 			   sizeof(struct vring_packed_desc_event),
291 			   VIRTIO_PCI_VRING_ALIGN);
292 	vring->num = vq->vq_nentries;
293 	vring->desc_packed =
294 		(void *)(uintptr_t)desc_addr;
295 	vring->driver_event =
296 		(void *)(uintptr_t)avail_addr;
297 	vring->device_event =
298 		(void *)(uintptr_t)used_addr;
299 	dev->packed_queues[queue_idx].avail_wrap_counter = true;
300 	dev->packed_queues[queue_idx].used_wrap_counter = true;
301 
302 	for (i = 0; i < vring->num; i++)
303 		vring->desc_packed[i].flags = 0;
304 }
305 
306 static void
307 virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
308 {
309 	uint16_t queue_idx = vq->vq_queue_index;
310 	uint64_t desc_addr, avail_addr, used_addr;
311 
312 	desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
313 	avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
314 	used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
315 							 ring[vq->vq_nentries]),
316 				   VIRTIO_PCI_VRING_ALIGN);
317 
318 	dev->vrings[queue_idx].num = vq->vq_nentries;
319 	dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;
320 	dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr;
321 	dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr;
322 }
323 
324 static int
325 virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
326 {
327 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
328 
329 	if (vtpci_packed_queue(hw))
330 		virtio_user_setup_queue_packed(vq, dev);
331 	else
332 		virtio_user_setup_queue_split(vq, dev);
333 
334 	return 0;
335 }
336 
337 static void
338 virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
339 {
340 	/* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU
341 	 * correspondingly stops the ioeventfds, and reset the status of
342 	 * the device.
343 	 * For modern devices, set queue desc, avail, used in PCI bar to 0,
344 	 * not see any more behavior in QEMU.
345 	 *
346 	 * Here we just care about what information to deliver to vhost-user
347 	 * or vhost-kernel. So we just close ioeventfd for now.
348 	 */
349 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
350 
351 	close(dev->callfds[vq->vq_queue_index]);
352 	close(dev->kickfds[vq->vq_queue_index]);
353 }
354 
355 static void
356 virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
357 {
358 	uint64_t buf = 1;
359 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
360 
361 	if (hw->cvq && (hw->cvq->vq == vq)) {
362 		if (vtpci_packed_queue(vq->hw))
363 			virtio_user_handle_cq_packed(dev, vq->vq_queue_index);
364 		else
365 			virtio_user_handle_cq(dev, vq->vq_queue_index);
366 		return;
367 	}
368 
369 	if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
370 		PMD_DRV_LOG(ERR, "failed to kick backend: %s",
371 			    strerror(errno));
372 }
373 
374 const struct virtio_pci_ops virtio_user_ops = {
375 	.read_dev_cfg	= virtio_user_read_dev_config,
376 	.write_dev_cfg	= virtio_user_write_dev_config,
377 	.get_status	= virtio_user_get_status,
378 	.set_status	= virtio_user_set_status,
379 	.get_features	= virtio_user_get_features,
380 	.set_features	= virtio_user_set_features,
381 	.get_isr	= virtio_user_get_isr,
382 	.set_config_irq	= virtio_user_set_config_irq,
383 	.set_queue_irq	= virtio_user_set_queue_irq,
384 	.get_queue_num	= virtio_user_get_queue_num,
385 	.setup_queue	= virtio_user_setup_queue,
386 	.del_queue	= virtio_user_del_queue,
387 	.notify_queue	= virtio_user_notify_queue,
388 };
389 
390 static const char *valid_args[] = {
391 #define VIRTIO_USER_ARG_QUEUES_NUM     "queues"
392 	VIRTIO_USER_ARG_QUEUES_NUM,
393 #define VIRTIO_USER_ARG_CQ_NUM         "cq"
394 	VIRTIO_USER_ARG_CQ_NUM,
395 #define VIRTIO_USER_ARG_MAC            "mac"
396 	VIRTIO_USER_ARG_MAC,
397 #define VIRTIO_USER_ARG_PATH           "path"
398 	VIRTIO_USER_ARG_PATH,
399 #define VIRTIO_USER_ARG_QUEUE_SIZE     "queue_size"
400 	VIRTIO_USER_ARG_QUEUE_SIZE,
401 #define VIRTIO_USER_ARG_INTERFACE_NAME "iface"
402 	VIRTIO_USER_ARG_INTERFACE_NAME,
403 #define VIRTIO_USER_ARG_SERVER_MODE    "server"
404 	VIRTIO_USER_ARG_SERVER_MODE,
405 #define VIRTIO_USER_ARG_MRG_RXBUF      "mrg_rxbuf"
406 	VIRTIO_USER_ARG_MRG_RXBUF,
407 #define VIRTIO_USER_ARG_IN_ORDER       "in_order"
408 	VIRTIO_USER_ARG_IN_ORDER,
409 #define VIRTIO_USER_ARG_PACKED_VQ      "packed_vq"
410 	VIRTIO_USER_ARG_PACKED_VQ,
411 	NULL
412 };
413 
414 #define VIRTIO_USER_DEF_CQ_EN	0
415 #define VIRTIO_USER_DEF_Q_NUM	1
416 #define VIRTIO_USER_DEF_Q_SZ	256
417 #define VIRTIO_USER_DEF_SERVER_MODE	0
418 
419 static int
420 get_string_arg(const char *key __rte_unused,
421 	       const char *value, void *extra_args)
422 {
423 	if (!value || !extra_args)
424 		return -EINVAL;
425 
426 	*(char **)extra_args = strdup(value);
427 
428 	if (!*(char **)extra_args)
429 		return -ENOMEM;
430 
431 	return 0;
432 }
433 
434 static int
435 get_integer_arg(const char *key __rte_unused,
436 		const char *value, void *extra_args)
437 {
438 	if (!value || !extra_args)
439 		return -EINVAL;
440 
441 	*(uint64_t *)extra_args = strtoull(value, NULL, 0);
442 
443 	return 0;
444 }
445 
446 static struct rte_vdev_driver virtio_user_driver;
447 
448 static struct rte_eth_dev *
449 virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
450 {
451 	struct rte_eth_dev *eth_dev;
452 	struct rte_eth_dev_data *data;
453 	struct virtio_hw *hw;
454 	struct virtio_user_dev *dev;
455 
456 	eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*hw));
457 	if (!eth_dev) {
458 		PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev");
459 		return NULL;
460 	}
461 
462 	data = eth_dev->data;
463 	hw = eth_dev->data->dev_private;
464 
465 	dev = rte_zmalloc(NULL, sizeof(*dev), 0);
466 	if (!dev) {
467 		PMD_INIT_LOG(ERR, "malloc virtio_user_dev failed");
468 		rte_eth_dev_release_port(eth_dev);
469 		return NULL;
470 	}
471 
472 	hw->port_id = data->port_id;
473 	dev->port_id = data->port_id;
474 	virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops;
475 	/*
476 	 * MSIX is required to enable LSC (see virtio_init_device).
477 	 * Here just pretend that we support msix.
478 	 */
479 	hw->use_msix = 1;
480 	hw->modern   = 0;
481 	hw->use_simple_rx = 0;
482 	hw->use_inorder_rx = 0;
483 	hw->use_inorder_tx = 0;
484 	hw->virtio_user_dev = dev;
485 	return eth_dev;
486 }
487 
488 static void
489 virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev)
490 {
491 	struct rte_eth_dev_data *data = eth_dev->data;
492 	struct virtio_hw *hw = data->dev_private;
493 
494 	rte_free(hw->virtio_user_dev);
495 	rte_eth_dev_release_port(eth_dev);
496 }
497 
498 /* Dev initialization routine. Invoked once for each virtio vdev at
499  * EAL init time, see rte_bus_probe().
500  * Returns 0 on success.
501  */
502 static int
503 virtio_user_pmd_probe(struct rte_vdev_device *dev)
504 {
505 	struct rte_kvargs *kvlist = NULL;
506 	struct rte_eth_dev *eth_dev;
507 	struct virtio_hw *hw;
508 	uint64_t queues = VIRTIO_USER_DEF_Q_NUM;
509 	uint64_t cq = VIRTIO_USER_DEF_CQ_EN;
510 	uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ;
511 	uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE;
512 	uint64_t mrg_rxbuf = 1;
513 	uint64_t in_order = 1;
514 	uint64_t packed_vq = 0;
515 	char *path = NULL;
516 	char *ifname = NULL;
517 	char *mac_addr = NULL;
518 	int ret = -1;
519 
520 	kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_args);
521 	if (!kvlist) {
522 		PMD_INIT_LOG(ERR, "error when parsing param");
523 		goto end;
524 	}
525 
526 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) {
527 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH,
528 				       &get_string_arg, &path) < 0) {
529 			PMD_INIT_LOG(ERR, "error to parse %s",
530 				     VIRTIO_USER_ARG_PATH);
531 			goto end;
532 		}
533 	} else {
534 		PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user",
535 			     VIRTIO_USER_ARG_PATH);
536 		goto end;
537 	}
538 
539 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME) == 1) {
540 		if (is_vhost_user_by_type(path)) {
541 			PMD_INIT_LOG(ERR,
542 				"arg %s applies only to vhost-kernel backend",
543 				VIRTIO_USER_ARG_INTERFACE_NAME);
544 			goto end;
545 		}
546 
547 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME,
548 				       &get_string_arg, &ifname) < 0) {
549 			PMD_INIT_LOG(ERR, "error to parse %s",
550 				     VIRTIO_USER_ARG_INTERFACE_NAME);
551 			goto end;
552 		}
553 	}
554 
555 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) {
556 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC,
557 				       &get_string_arg, &mac_addr) < 0) {
558 			PMD_INIT_LOG(ERR, "error to parse %s",
559 				     VIRTIO_USER_ARG_MAC);
560 			goto end;
561 		}
562 	}
563 
564 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) {
565 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE,
566 				       &get_integer_arg, &queue_size) < 0) {
567 			PMD_INIT_LOG(ERR, "error to parse %s",
568 				     VIRTIO_USER_ARG_QUEUE_SIZE);
569 			goto end;
570 		}
571 	}
572 
573 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) {
574 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM,
575 				       &get_integer_arg, &queues) < 0) {
576 			PMD_INIT_LOG(ERR, "error to parse %s",
577 				     VIRTIO_USER_ARG_QUEUES_NUM);
578 			goto end;
579 		}
580 	}
581 
582 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) {
583 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE,
584 				       &get_integer_arg, &server_mode) < 0) {
585 			PMD_INIT_LOG(ERR, "error to parse %s",
586 				     VIRTIO_USER_ARG_SERVER_MODE);
587 			goto end;
588 		}
589 	}
590 
591 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) {
592 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM,
593 				       &get_integer_arg, &cq) < 0) {
594 			PMD_INIT_LOG(ERR, "error to parse %s",
595 				     VIRTIO_USER_ARG_CQ_NUM);
596 			goto end;
597 		}
598 	} else if (queues > 1) {
599 		cq = 1;
600 	}
601 
602 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PACKED_VQ) == 1) {
603 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PACKED_VQ,
604 				       &get_integer_arg, &packed_vq) < 0) {
605 			PMD_INIT_LOG(ERR, "error to parse %s",
606 				     VIRTIO_USER_ARG_PACKED_VQ);
607 			goto end;
608 		}
609 	}
610 
611 	if (queues > 1 && cq == 0) {
612 		PMD_INIT_LOG(ERR, "multi-q requires ctrl-q");
613 		goto end;
614 	}
615 
616 	if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
617 		PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u",
618 			VIRTIO_USER_ARG_QUEUES_NUM, queues,
619 			VIRTIO_MAX_VIRTQUEUE_PAIRS);
620 		goto end;
621 	}
622 
623 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) {
624 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF,
625 				       &get_integer_arg, &mrg_rxbuf) < 0) {
626 			PMD_INIT_LOG(ERR, "error to parse %s",
627 				     VIRTIO_USER_ARG_MRG_RXBUF);
628 			goto end;
629 		}
630 	}
631 
632 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_IN_ORDER) == 1) {
633 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_IN_ORDER,
634 				       &get_integer_arg, &in_order) < 0) {
635 			PMD_INIT_LOG(ERR, "error to parse %s",
636 				     VIRTIO_USER_ARG_IN_ORDER);
637 			goto end;
638 		}
639 	}
640 
641 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
642 		struct virtio_user_dev *vu_dev;
643 
644 		eth_dev = virtio_user_eth_dev_alloc(dev);
645 		if (!eth_dev) {
646 			PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
647 			goto end;
648 		}
649 
650 		hw = eth_dev->data->dev_private;
651 		vu_dev = virtio_user_get_dev(hw);
652 		if (server_mode == 1)
653 			vu_dev->is_server = true;
654 		else
655 			vu_dev->is_server = false;
656 		if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
657 				 queue_size, mac_addr, &ifname, mrg_rxbuf,
658 				 in_order, packed_vq) < 0) {
659 			PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
660 			virtio_user_eth_dev_free(eth_dev);
661 			goto end;
662 		}
663 
664 	} else {
665 		eth_dev = rte_eth_dev_attach_secondary(rte_vdev_device_name(dev));
666 		if (!eth_dev)
667 			goto end;
668 	}
669 
670 	/* previously called by rte_pci_probe() for physical dev */
671 	if (eth_virtio_dev_init(eth_dev) < 0) {
672 		PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
673 		virtio_user_eth_dev_free(eth_dev);
674 		goto end;
675 	}
676 
677 	rte_eth_dev_probing_finish(eth_dev);
678 	ret = 0;
679 
680 end:
681 	if (kvlist)
682 		rte_kvargs_free(kvlist);
683 	if (path)
684 		free(path);
685 	if (mac_addr)
686 		free(mac_addr);
687 	if (ifname)
688 		free(ifname);
689 	return ret;
690 }
691 
692 static int
693 virtio_user_pmd_remove(struct rte_vdev_device *vdev)
694 {
695 	const char *name;
696 	struct rte_eth_dev *eth_dev;
697 	struct virtio_hw *hw;
698 	struct virtio_user_dev *dev;
699 
700 	if (!vdev)
701 		return -EINVAL;
702 
703 	name = rte_vdev_device_name(vdev);
704 	PMD_DRV_LOG(INFO, "Un-Initializing %s", name);
705 	eth_dev = rte_eth_dev_allocated(name);
706 	if (!eth_dev)
707 		return -ENODEV;
708 
709 	/* make sure the device is stopped, queues freed */
710 	rte_eth_dev_close(eth_dev->data->port_id);
711 
712 	hw = eth_dev->data->dev_private;
713 	dev = hw->virtio_user_dev;
714 	virtio_user_dev_uninit(dev);
715 
716 	rte_eth_dev_release_port(eth_dev);
717 
718 	return 0;
719 }
720 
721 static struct rte_vdev_driver virtio_user_driver = {
722 	.probe = virtio_user_pmd_probe,
723 	.remove = virtio_user_pmd_remove,
724 };
725 
726 RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver);
727 RTE_PMD_REGISTER_ALIAS(net_virtio_user, virtio_user);
728 RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user,
729 	"path=<path> "
730 	"mac=<mac addr> "
731 	"cq=<int> "
732 	"queue_size=<int> "
733 	"queues=<int> "
734 	"iface=<string> "
735 	"server=<0|1> "
736 	"mrg_rxbuf=<0|1> "
737 	"in_order=<0|1> "
738 	"packed_vq=<0|1>");
739