xref: /dpdk/drivers/net/virtio/virtio_user_ethdev.c (revision 08aa6271c86a561b66c6dd91f9a54fa2f12bc859)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <sys/types.h>
7 #include <unistd.h>
8 #include <fcntl.h>
9 #include <sys/socket.h>
10 
11 #include <rte_malloc.h>
12 #include <rte_kvargs.h>
13 #include <rte_ethdev_vdev.h>
14 #include <rte_bus_vdev.h>
15 #include <rte_alarm.h>
16 
17 #include "virtio_ethdev.h"
18 #include "virtio_logs.h"
19 #include "virtio_pci.h"
20 #include "virtqueue.h"
21 #include "virtio_rxtx.h"
22 #include "virtio_user/virtio_user_dev.h"
23 
24 #define virtio_user_get_dev(hw) \
25 	((struct virtio_user_dev *)(hw)->virtio_user_dev)
26 
27 static int
28 virtio_user_server_reconnect(struct virtio_user_dev *dev)
29 {
30 	int ret;
31 	int flag;
32 	int connectfd;
33 	uint64_t features = dev->device_features;
34 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
35 
36 	connectfd = accept(dev->listenfd, NULL, NULL);
37 	if (connectfd < 0)
38 		return -1;
39 
40 	dev->vhostfd = connectfd;
41 	if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
42 				   &dev->device_features) < 0) {
43 		PMD_INIT_LOG(ERR, "get_features failed: %s",
44 			     strerror(errno));
45 		return -1;
46 	}
47 
48 	features &= ~dev->device_features;
49 	/* For following bits, vhost-user doesn't really need to know */
50 	features &= ~(1ull << VIRTIO_NET_F_MAC);
51 	features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN);
52 	features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
53 	features &= ~(1ull << VIRTIO_NET_F_STATUS);
54 	if (features)
55 		PMD_INIT_LOG(ERR, "WARNING: Some features 0x%" PRIx64 " are not supported by vhost-user!",
56 			     features);
57 
58 	dev->features &= dev->device_features;
59 
60 	flag = fcntl(connectfd, F_GETFD);
61 	fcntl(connectfd, F_SETFL, flag | O_NONBLOCK);
62 
63 	ret = virtio_user_start_device(dev);
64 	if (ret < 0)
65 		return -1;
66 
67 	if (dev->queue_pairs > 1) {
68 		ret = virtio_user_handle_mq(dev, dev->queue_pairs);
69 		if (ret != 0) {
70 			PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!");
71 			return -1;
72 		}
73 	}
74 	if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
75 		if (rte_intr_disable(eth_dev->intr_handle) < 0) {
76 			PMD_DRV_LOG(ERR, "interrupt disable failed");
77 			return -1;
78 		}
79 		rte_intr_callback_unregister(eth_dev->intr_handle,
80 					     virtio_interrupt_handler,
81 					     eth_dev);
82 		eth_dev->intr_handle->fd = connectfd;
83 		rte_intr_callback_register(eth_dev->intr_handle,
84 					   virtio_interrupt_handler, eth_dev);
85 
86 		if (rte_intr_enable(eth_dev->intr_handle) < 0) {
87 			PMD_DRV_LOG(ERR, "interrupt enable failed");
88 			return -1;
89 		}
90 	}
91 	PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!");
92 	return 0;
93 }
94 
95 static void
96 virtio_user_delayed_handler(void *param)
97 {
98 	struct virtio_hw *hw = (struct virtio_hw *)param;
99 	struct rte_eth_dev *eth_dev = &rte_eth_devices[hw->port_id];
100 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
101 
102 	if (rte_intr_disable(eth_dev->intr_handle) < 0) {
103 		PMD_DRV_LOG(ERR, "interrupt disable failed");
104 		return;
105 	}
106 	rte_intr_callback_unregister(eth_dev->intr_handle,
107 				     virtio_interrupt_handler, eth_dev);
108 	if (dev->is_server) {
109 		if (dev->vhostfd >= 0) {
110 			close(dev->vhostfd);
111 			dev->vhostfd = -1;
112 		}
113 		eth_dev->intr_handle->fd = dev->listenfd;
114 		rte_intr_callback_register(eth_dev->intr_handle,
115 					   virtio_interrupt_handler, eth_dev);
116 		if (rte_intr_enable(eth_dev->intr_handle) < 0) {
117 			PMD_DRV_LOG(ERR, "interrupt enable failed");
118 			return;
119 		}
120 	}
121 }
122 
123 static void
124 virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
125 		     void *dst, int length)
126 {
127 	int i;
128 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
129 
130 	if (offset == offsetof(struct virtio_net_config, mac) &&
131 	    length == ETHER_ADDR_LEN) {
132 		for (i = 0; i < ETHER_ADDR_LEN; ++i)
133 			((uint8_t *)dst)[i] = dev->mac_addr[i];
134 		return;
135 	}
136 
137 	if (offset == offsetof(struct virtio_net_config, status)) {
138 		char buf[128];
139 
140 		if (dev->vhostfd >= 0) {
141 			int r;
142 			int flags;
143 
144 			flags = fcntl(dev->vhostfd, F_GETFL);
145 			if (fcntl(dev->vhostfd, F_SETFL,
146 					flags | O_NONBLOCK) == -1) {
147 				PMD_DRV_LOG(ERR, "error setting O_NONBLOCK flag");
148 				return;
149 			}
150 			r = recv(dev->vhostfd, buf, 128, MSG_PEEK);
151 			if (r == 0 || (r < 0 && errno != EAGAIN)) {
152 				dev->status &= (~VIRTIO_NET_S_LINK_UP);
153 				PMD_DRV_LOG(ERR, "virtio-user port %u is down",
154 					    hw->port_id);
155 
156 				/* This function could be called in the process
157 				 * of interrupt handling, callback cannot be
158 				 * unregistered here, set an alarm to do it.
159 				 */
160 				rte_eal_alarm_set(1,
161 						  virtio_user_delayed_handler,
162 						  (void *)hw);
163 			} else {
164 				dev->status |= VIRTIO_NET_S_LINK_UP;
165 			}
166 			if (fcntl(dev->vhostfd, F_SETFL,
167 					flags & ~O_NONBLOCK) == -1) {
168 				PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag");
169 				return;
170 			}
171 		} else if (dev->is_server) {
172 			dev->status &= (~VIRTIO_NET_S_LINK_UP);
173 			if (virtio_user_server_reconnect(dev) >= 0)
174 				dev->status |= VIRTIO_NET_S_LINK_UP;
175 		}
176 
177 		*(uint16_t *)dst = dev->status;
178 	}
179 
180 	if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs))
181 		*(uint16_t *)dst = dev->max_queue_pairs;
182 }
183 
184 static void
185 virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset,
186 		      const void *src, int length)
187 {
188 	int i;
189 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
190 
191 	if ((offset == offsetof(struct virtio_net_config, mac)) &&
192 	    (length == ETHER_ADDR_LEN))
193 		for (i = 0; i < ETHER_ADDR_LEN; ++i)
194 			dev->mac_addr[i] = ((const uint8_t *)src)[i];
195 	else
196 		PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d",
197 			    offset, length);
198 }
199 
200 static void
201 virtio_user_reset(struct virtio_hw *hw)
202 {
203 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
204 
205 	if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
206 		virtio_user_stop_device(dev);
207 }
208 
209 static void
210 virtio_user_set_status(struct virtio_hw *hw, uint8_t status)
211 {
212 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
213 
214 	if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
215 		virtio_user_start_device(dev);
216 	else if (status == VIRTIO_CONFIG_STATUS_RESET)
217 		virtio_user_reset(hw);
218 	dev->status = status;
219 }
220 
221 static uint8_t
222 virtio_user_get_status(struct virtio_hw *hw)
223 {
224 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
225 
226 	return dev->status;
227 }
228 
229 static uint64_t
230 virtio_user_get_features(struct virtio_hw *hw)
231 {
232 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
233 
234 	/* unmask feature bits defined in vhost user protocol */
235 	return dev->device_features & VIRTIO_PMD_SUPPORTED_GUEST_FEATURES;
236 }
237 
238 static void
239 virtio_user_set_features(struct virtio_hw *hw, uint64_t features)
240 {
241 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
242 
243 	dev->features = features & dev->device_features;
244 }
245 
246 static uint8_t
247 virtio_user_get_isr(struct virtio_hw *hw __rte_unused)
248 {
249 	/* rxq interrupts and config interrupt are separated in virtio-user,
250 	 * here we only report config change.
251 	 */
252 	return VIRTIO_PCI_ISR_CONFIG;
253 }
254 
255 static uint16_t
256 virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused,
257 		    uint16_t vec __rte_unused)
258 {
259 	return 0;
260 }
261 
262 static uint16_t
263 virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused,
264 			  struct virtqueue *vq __rte_unused,
265 			  uint16_t vec)
266 {
267 	/* pretend we have done that */
268 	return vec;
269 }
270 
271 /* This function is to get the queue size, aka, number of descs, of a specified
272  * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the
273  * max supported queues.
274  */
275 static uint16_t
276 virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused)
277 {
278 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
279 
280 	/* Currently, each queue has same queue size */
281 	return dev->queue_size;
282 }
283 
284 static int
285 virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
286 {
287 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
288 	uint16_t queue_idx = vq->vq_queue_index;
289 	uint64_t desc_addr, avail_addr, used_addr;
290 
291 	desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
292 	avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
293 	used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
294 							 ring[vq->vq_nentries]),
295 				   VIRTIO_PCI_VRING_ALIGN);
296 
297 	dev->vrings[queue_idx].num = vq->vq_nentries;
298 	dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;
299 	dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr;
300 	dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr;
301 
302 	return 0;
303 }
304 
305 static void
306 virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
307 {
308 	/* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU
309 	 * correspondingly stops the ioeventfds, and reset the status of
310 	 * the device.
311 	 * For modern devices, set queue desc, avail, used in PCI bar to 0,
312 	 * not see any more behavior in QEMU.
313 	 *
314 	 * Here we just care about what information to deliver to vhost-user
315 	 * or vhost-kernel. So we just close ioeventfd for now.
316 	 */
317 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
318 
319 	close(dev->callfds[vq->vq_queue_index]);
320 	close(dev->kickfds[vq->vq_queue_index]);
321 }
322 
323 static void
324 virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
325 {
326 	uint64_t buf = 1;
327 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
328 
329 	if (hw->cvq && (hw->cvq->vq == vq)) {
330 		virtio_user_handle_cq(dev, vq->vq_queue_index);
331 		return;
332 	}
333 
334 	if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
335 		PMD_DRV_LOG(ERR, "failed to kick backend: %s",
336 			    strerror(errno));
337 }
338 
339 const struct virtio_pci_ops virtio_user_ops = {
340 	.read_dev_cfg	= virtio_user_read_dev_config,
341 	.write_dev_cfg	= virtio_user_write_dev_config,
342 	.reset		= virtio_user_reset,
343 	.get_status	= virtio_user_get_status,
344 	.set_status	= virtio_user_set_status,
345 	.get_features	= virtio_user_get_features,
346 	.set_features	= virtio_user_set_features,
347 	.get_isr	= virtio_user_get_isr,
348 	.set_config_irq	= virtio_user_set_config_irq,
349 	.set_queue_irq	= virtio_user_set_queue_irq,
350 	.get_queue_num	= virtio_user_get_queue_num,
351 	.setup_queue	= virtio_user_setup_queue,
352 	.del_queue	= virtio_user_del_queue,
353 	.notify_queue	= virtio_user_notify_queue,
354 };
355 
356 static const char *valid_args[] = {
357 #define VIRTIO_USER_ARG_QUEUES_NUM     "queues"
358 	VIRTIO_USER_ARG_QUEUES_NUM,
359 #define VIRTIO_USER_ARG_CQ_NUM         "cq"
360 	VIRTIO_USER_ARG_CQ_NUM,
361 #define VIRTIO_USER_ARG_MAC            "mac"
362 	VIRTIO_USER_ARG_MAC,
363 #define VIRTIO_USER_ARG_PATH           "path"
364 	VIRTIO_USER_ARG_PATH,
365 #define VIRTIO_USER_ARG_QUEUE_SIZE     "queue_size"
366 	VIRTIO_USER_ARG_QUEUE_SIZE,
367 #define VIRTIO_USER_ARG_INTERFACE_NAME "iface"
368 	VIRTIO_USER_ARG_INTERFACE_NAME,
369 #define VIRTIO_USER_ARG_SERVER_MODE "server"
370 	VIRTIO_USER_ARG_SERVER_MODE,
371 	NULL
372 };
373 
374 #define VIRTIO_USER_DEF_CQ_EN	0
375 #define VIRTIO_USER_DEF_Q_NUM	1
376 #define VIRTIO_USER_DEF_Q_SZ	256
377 #define VIRTIO_USER_DEF_SERVER_MODE	0
378 
379 static int
380 get_string_arg(const char *key __rte_unused,
381 	       const char *value, void *extra_args)
382 {
383 	if (!value || !extra_args)
384 		return -EINVAL;
385 
386 	*(char **)extra_args = strdup(value);
387 
388 	if (!*(char **)extra_args)
389 		return -ENOMEM;
390 
391 	return 0;
392 }
393 
394 static int
395 get_integer_arg(const char *key __rte_unused,
396 		const char *value, void *extra_args)
397 {
398 	if (!value || !extra_args)
399 		return -EINVAL;
400 
401 	*(uint64_t *)extra_args = strtoull(value, NULL, 0);
402 
403 	return 0;
404 }
405 
406 static struct rte_vdev_driver virtio_user_driver;
407 
408 static struct rte_eth_dev *
409 virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
410 {
411 	struct rte_eth_dev *eth_dev;
412 	struct rte_eth_dev_data *data;
413 	struct virtio_hw *hw;
414 	struct virtio_user_dev *dev;
415 
416 	eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*hw));
417 	if (!eth_dev) {
418 		PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev");
419 		return NULL;
420 	}
421 
422 	data = eth_dev->data;
423 	hw = eth_dev->data->dev_private;
424 
425 	dev = rte_zmalloc(NULL, sizeof(*dev), 0);
426 	if (!dev) {
427 		PMD_INIT_LOG(ERR, "malloc virtio_user_dev failed");
428 		rte_eth_dev_release_port(eth_dev);
429 		rte_free(hw);
430 		return NULL;
431 	}
432 
433 	hw->port_id = data->port_id;
434 	dev->port_id = data->port_id;
435 	virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops;
436 	/*
437 	 * MSIX is required to enable LSC (see virtio_init_device).
438 	 * Here just pretend that we support msix.
439 	 */
440 	hw->use_msix = 1;
441 	hw->modern   = 0;
442 	hw->use_simple_rx = 0;
443 	hw->use_simple_tx = 0;
444 	hw->virtio_user_dev = dev;
445 	return eth_dev;
446 }
447 
448 static void
449 virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev)
450 {
451 	struct rte_eth_dev_data *data = eth_dev->data;
452 	struct virtio_hw *hw = data->dev_private;
453 
454 	rte_free(hw->virtio_user_dev);
455 	rte_free(hw);
456 	rte_eth_dev_release_port(eth_dev);
457 }
458 
459 /* Dev initialization routine. Invoked once for each virtio vdev at
460  * EAL init time, see rte_bus_probe().
461  * Returns 0 on success.
462  */
463 static int
464 virtio_user_pmd_probe(struct rte_vdev_device *dev)
465 {
466 	struct rte_kvargs *kvlist = NULL;
467 	struct rte_eth_dev *eth_dev;
468 	struct virtio_hw *hw;
469 	uint64_t queues = VIRTIO_USER_DEF_Q_NUM;
470 	uint64_t cq = VIRTIO_USER_DEF_CQ_EN;
471 	uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ;
472 	uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE;
473 	char *path = NULL;
474 	char *ifname = NULL;
475 	char *mac_addr = NULL;
476 	int ret = -1;
477 
478 	kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_args);
479 	if (!kvlist) {
480 		PMD_INIT_LOG(ERR, "error when parsing param");
481 		goto end;
482 	}
483 
484 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) {
485 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH,
486 				       &get_string_arg, &path) < 0) {
487 			PMD_INIT_LOG(ERR, "error to parse %s",
488 				     VIRTIO_USER_ARG_PATH);
489 			goto end;
490 		}
491 	} else {
492 		PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user",
493 			  VIRTIO_USER_ARG_QUEUE_SIZE);
494 		goto end;
495 	}
496 
497 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME) == 1) {
498 		if (is_vhost_user_by_type(path)) {
499 			PMD_INIT_LOG(ERR,
500 				"arg %s applies only to vhost-kernel backend",
501 				VIRTIO_USER_ARG_INTERFACE_NAME);
502 			goto end;
503 		}
504 
505 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME,
506 				       &get_string_arg, &ifname) < 0) {
507 			PMD_INIT_LOG(ERR, "error to parse %s",
508 				     VIRTIO_USER_ARG_INTERFACE_NAME);
509 			goto end;
510 		}
511 	}
512 
513 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) {
514 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC,
515 				       &get_string_arg, &mac_addr) < 0) {
516 			PMD_INIT_LOG(ERR, "error to parse %s",
517 				     VIRTIO_USER_ARG_MAC);
518 			goto end;
519 		}
520 	}
521 
522 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) {
523 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE,
524 				       &get_integer_arg, &queue_size) < 0) {
525 			PMD_INIT_LOG(ERR, "error to parse %s",
526 				     VIRTIO_USER_ARG_QUEUE_SIZE);
527 			goto end;
528 		}
529 	}
530 
531 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) {
532 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM,
533 				       &get_integer_arg, &queues) < 0) {
534 			PMD_INIT_LOG(ERR, "error to parse %s",
535 				     VIRTIO_USER_ARG_QUEUES_NUM);
536 			goto end;
537 		}
538 	}
539 
540 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) {
541 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE,
542 				       &get_integer_arg, &server_mode) < 0) {
543 			PMD_INIT_LOG(ERR, "error to parse %s",
544 				     VIRTIO_USER_ARG_SERVER_MODE);
545 			goto end;
546 		}
547 	}
548 
549 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) {
550 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM,
551 				       &get_integer_arg, &cq) < 0) {
552 			PMD_INIT_LOG(ERR, "error to parse %s",
553 				     VIRTIO_USER_ARG_CQ_NUM);
554 			goto end;
555 		}
556 	} else if (queues > 1) {
557 		cq = 1;
558 	}
559 
560 	if (queues > 1 && cq == 0) {
561 		PMD_INIT_LOG(ERR, "multi-q requires ctrl-q");
562 		goto end;
563 	}
564 
565 	if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
566 		PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u",
567 			VIRTIO_USER_ARG_QUEUES_NUM, queues,
568 			VIRTIO_MAX_VIRTQUEUE_PAIRS);
569 		goto end;
570 	}
571 
572 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
573 		struct virtio_user_dev *vu_dev;
574 
575 		eth_dev = virtio_user_eth_dev_alloc(dev);
576 		if (!eth_dev) {
577 			PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
578 			goto end;
579 		}
580 
581 		hw = eth_dev->data->dev_private;
582 		vu_dev = virtio_user_get_dev(hw);
583 		if (server_mode == 1)
584 			vu_dev->is_server = true;
585 		else
586 			vu_dev->is_server = false;
587 		if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
588 				 queue_size, mac_addr, &ifname) < 0) {
589 			PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
590 			virtio_user_eth_dev_free(eth_dev);
591 			goto end;
592 		}
593 
594 	} else {
595 		eth_dev = rte_eth_dev_attach_secondary(rte_vdev_device_name(dev));
596 		if (!eth_dev)
597 			goto end;
598 	}
599 
600 	/* previously called by rte_pci_probe() for physical dev */
601 	if (eth_virtio_dev_init(eth_dev) < 0) {
602 		PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
603 		virtio_user_eth_dev_free(eth_dev);
604 		goto end;
605 	}
606 
607 	rte_eth_dev_probing_finish(eth_dev);
608 	ret = 0;
609 
610 end:
611 	if (kvlist)
612 		rte_kvargs_free(kvlist);
613 	if (path)
614 		free(path);
615 	if (mac_addr)
616 		free(mac_addr);
617 	if (ifname)
618 		free(ifname);
619 	return ret;
620 }
621 
622 /** Called by rte_eth_dev_detach() */
623 static int
624 virtio_user_pmd_remove(struct rte_vdev_device *vdev)
625 {
626 	const char *name;
627 	struct rte_eth_dev *eth_dev;
628 	struct virtio_hw *hw;
629 	struct virtio_user_dev *dev;
630 
631 	if (!vdev)
632 		return -EINVAL;
633 
634 	name = rte_vdev_device_name(vdev);
635 	PMD_DRV_LOG(INFO, "Un-Initializing %s", name);
636 	eth_dev = rte_eth_dev_allocated(name);
637 	if (!eth_dev)
638 		return -ENODEV;
639 
640 	/* make sure the device is stopped, queues freed */
641 	rte_eth_dev_close(eth_dev->data->port_id);
642 
643 	hw = eth_dev->data->dev_private;
644 	dev = hw->virtio_user_dev;
645 	virtio_user_dev_uninit(dev);
646 
647 	rte_free(eth_dev->data->dev_private);
648 	rte_eth_dev_release_port(eth_dev);
649 
650 	return 0;
651 }
652 
653 static struct rte_vdev_driver virtio_user_driver = {
654 	.probe = virtio_user_pmd_probe,
655 	.remove = virtio_user_pmd_remove,
656 };
657 
658 RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver);
659 RTE_PMD_REGISTER_ALIAS(net_virtio_user, virtio_user);
660 RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user,
661 	"path=<path> "
662 	"mac=<mac addr> "
663 	"cq=<int> "
664 	"queue_size=<int> "
665 	"queues=<int> "
666 	"iface=<string>");
667