xref: /dpdk/lib/vhost/vduse.c (revision 2d9c7e56e52ceb2e14b5134dcd9673dd227e3072)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2023 Red Hat, Inc.
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 #include <fcntl.h>
9 
10 
11 #include <linux/vduse.h>
12 #include <linux/virtio_net.h>
13 
14 #include <sys/ioctl.h>
15 #include <sys/mman.h>
16 #include <sys/stat.h>
17 
18 #include <rte_common.h>
19 #include <rte_thread.h>
20 
21 #include "fd_man.h"
22 #include "iotlb.h"
23 #include "vduse.h"
24 #include "vhost.h"
25 #include "virtio_net_ctrl.h"
26 
27 #define VHOST_VDUSE_API_VERSION 0
28 #define VDUSE_CTRL_PATH "/dev/vduse/control"
29 
30 struct vduse {
31 	struct fdset fdset;
32 };
33 
34 static struct vduse vduse = {
35 	.fdset = {
36 		.fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} },
37 		.fd_mutex = PTHREAD_MUTEX_INITIALIZER,
38 		.fd_pooling_mutex = PTHREAD_MUTEX_INITIALIZER,
39 		.sync_mutex = PTHREAD_MUTEX_INITIALIZER,
40 		.num = 0
41 	},
42 };
43 
44 static bool vduse_events_thread;
45 
46 static const char * const vduse_reqs_str[] = {
47 	"VDUSE_GET_VQ_STATE",
48 	"VDUSE_SET_STATUS",
49 	"VDUSE_UPDATE_IOTLB",
50 };
51 
52 #define vduse_req_id_to_str(id) \
53 	(id < RTE_DIM(vduse_reqs_str) ? \
54 	vduse_reqs_str[id] : "Unknown")
55 
56 static int
57 vduse_inject_irq(struct virtio_net *dev, struct vhost_virtqueue *vq)
58 {
59 	return ioctl(dev->vduse_dev_fd, VDUSE_VQ_INJECT_IRQ, &vq->index);
60 }
61 
62 static void
63 vduse_iotlb_remove_notify(uint64_t addr, uint64_t offset, uint64_t size)
64 {
65 	munmap((void *)(uintptr_t)addr, offset + size);
66 }
67 
68 static int
69 vduse_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm __rte_unused)
70 {
71 	struct vduse_iotlb_entry entry;
72 	uint64_t size, page_size;
73 	struct stat stat;
74 	void *mmap_addr;
75 	int fd, ret;
76 
77 	entry.start = iova;
78 	entry.last = iova + 1;
79 
80 	ret = ioctl(dev->vduse_dev_fd, VDUSE_IOTLB_GET_FD, &entry);
81 	if (ret < 0) {
82 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to get IOTLB entry for 0x%" PRIx64,
83 				iova);
84 		return -1;
85 	}
86 
87 	fd = ret;
88 
89 	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "New IOTLB entry:");
90 	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "\tIOVA: %" PRIx64 " - %" PRIx64,
91 			(uint64_t)entry.start, (uint64_t)entry.last);
92 	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "\toffset: %" PRIx64, (uint64_t)entry.offset);
93 	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "\tfd: %d", fd);
94 	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "\tperm: %x", entry.perm);
95 
96 	size = entry.last - entry.start + 1;
97 	mmap_addr = mmap(0, size + entry.offset, entry.perm, MAP_SHARED, fd, 0);
98 	if (!mmap_addr) {
99 		VHOST_CONFIG_LOG(dev->ifname, ERR,
100 				"Failed to mmap IOTLB entry for 0x%" PRIx64, iova);
101 		ret = -1;
102 		goto close_fd;
103 	}
104 
105 	ret = fstat(fd, &stat);
106 	if (ret < 0) {
107 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to get page size.");
108 		munmap(mmap_addr, entry.offset + size);
109 		goto close_fd;
110 	}
111 	page_size = (uint64_t)stat.st_blksize;
112 
113 	vhost_user_iotlb_cache_insert(dev, entry.start, (uint64_t)(uintptr_t)mmap_addr,
114 		entry.offset, size, page_size, entry.perm);
115 
116 	ret = 0;
117 close_fd:
118 	close(fd);
119 
120 	return ret;
121 }
122 
123 static struct vhost_backend_ops vduse_backend_ops = {
124 	.iotlb_miss = vduse_iotlb_miss,
125 	.iotlb_remove_notify = vduse_iotlb_remove_notify,
126 	.inject_irq = vduse_inject_irq,
127 };
128 
129 static void
130 vduse_control_queue_event(int fd, void *arg, int *remove __rte_unused)
131 {
132 	struct virtio_net *dev = arg;
133 	uint64_t buf;
134 	int ret;
135 
136 	ret = read(fd, &buf, sizeof(buf));
137 	if (ret < 0) {
138 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to read control queue event: %s",
139 				strerror(errno));
140 		return;
141 	}
142 
143 	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "Control queue kicked");
144 	if (virtio_net_ctrl_handle(dev))
145 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to handle ctrl request");
146 }
147 
148 static void
149 vduse_vring_setup(struct virtio_net *dev, unsigned int index)
150 {
151 	struct vhost_virtqueue *vq = dev->virtqueue[index];
152 	struct vhost_vring_addr *ra = &vq->ring_addrs;
153 	struct vduse_vq_info vq_info;
154 	struct vduse_vq_eventfd vq_efd;
155 	int ret;
156 
157 	vq_info.index = index;
158 	ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_GET_INFO, &vq_info);
159 	if (ret) {
160 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to get VQ %u info: %s",
161 				index, strerror(errno));
162 		return;
163 	}
164 
165 	VHOST_CONFIG_LOG(dev->ifname, INFO, "VQ %u info:", index);
166 	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tnum: %u", vq_info.num);
167 	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tdesc_addr: %llx",
168 			(unsigned long long)vq_info.desc_addr);
169 	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tdriver_addr: %llx",
170 			(unsigned long long)vq_info.driver_addr);
171 	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tdevice_addr: %llx",
172 			(unsigned long long)vq_info.device_addr);
173 	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tavail_idx: %u", vq_info.split.avail_index);
174 	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tready: %u", vq_info.ready);
175 
176 	vq->last_avail_idx = vq_info.split.avail_index;
177 	vq->size = vq_info.num;
178 	vq->ready = true;
179 	vq->enabled = vq_info.ready;
180 	ra->desc_user_addr = vq_info.desc_addr;
181 	ra->avail_user_addr = vq_info.driver_addr;
182 	ra->used_user_addr = vq_info.device_addr;
183 
184 	vq->kickfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
185 	if (vq->kickfd < 0) {
186 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to init kickfd for VQ %u: %s",
187 				index, strerror(errno));
188 		vq->kickfd = VIRTIO_INVALID_EVENTFD;
189 		return;
190 	}
191 	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tkick fd: %d", vq->kickfd);
192 
193 	vq->shadow_used_split = rte_malloc_socket(NULL,
194 				vq->size * sizeof(struct vring_used_elem),
195 				RTE_CACHE_LINE_SIZE, 0);
196 	vq->batch_copy_elems = rte_malloc_socket(NULL,
197 				vq->size * sizeof(struct batch_copy_elem),
198 				RTE_CACHE_LINE_SIZE, 0);
199 
200 	rte_rwlock_write_lock(&vq->access_lock);
201 	vhost_user_iotlb_rd_lock(vq);
202 	if (vring_translate(dev, vq))
203 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to translate vring %d addresses",
204 				index);
205 
206 	if (vhost_enable_guest_notification(dev, vq, 0))
207 		VHOST_CONFIG_LOG(dev->ifname, ERR,
208 				"Failed to disable guest notifications on vring %d",
209 				index);
210 	vhost_user_iotlb_rd_unlock(vq);
211 	rte_rwlock_write_unlock(&vq->access_lock);
212 
213 	vq_efd.index = index;
214 	vq_efd.fd = vq->kickfd;
215 
216 	ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP_KICKFD, &vq_efd);
217 	if (ret) {
218 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to setup kickfd for VQ %u: %s",
219 				index, strerror(errno));
220 		close(vq->kickfd);
221 		vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
222 		return;
223 	}
224 
225 	if (vq == dev->cvq) {
226 		ret = fdset_add(&vduse.fdset, vq->kickfd, vduse_control_queue_event, NULL, dev);
227 		if (ret) {
228 			VHOST_CONFIG_LOG(dev->ifname, ERR,
229 					"Failed to setup kickfd handler for VQ %u: %s",
230 					index, strerror(errno));
231 			vq_efd.fd = VDUSE_EVENTFD_DEASSIGN;
232 			ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP_KICKFD, &vq_efd);
233 			close(vq->kickfd);
234 			vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
235 		}
236 		fdset_pipe_notify(&vduse.fdset);
237 		vhost_enable_guest_notification(dev, vq, 1);
238 		VHOST_CONFIG_LOG(dev->ifname, INFO, "Ctrl queue event handler installed");
239 	}
240 }
241 
242 static void
243 vduse_vring_cleanup(struct virtio_net *dev, unsigned int index)
244 {
245 	struct vhost_virtqueue *vq = dev->virtqueue[index];
246 	struct vduse_vq_eventfd vq_efd;
247 	int ret;
248 
249 	if (vq == dev->cvq && vq->kickfd >= 0) {
250 		fdset_del(&vduse.fdset, vq->kickfd);
251 		fdset_pipe_notify(&vduse.fdset);
252 	}
253 
254 	vq_efd.index = index;
255 	vq_efd.fd = VDUSE_EVENTFD_DEASSIGN;
256 
257 	ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP_KICKFD, &vq_efd);
258 	if (ret)
259 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to cleanup kickfd for VQ %u: %s",
260 				index, strerror(errno));
261 
262 	close(vq->kickfd);
263 	vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
264 
265 	rte_rwlock_write_lock(&vq->access_lock);
266 	vring_invalidate(dev, vq);
267 	rte_rwlock_write_unlock(&vq->access_lock);
268 
269 	rte_free(vq->batch_copy_elems);
270 	vq->batch_copy_elems = NULL;
271 
272 	rte_free(vq->shadow_used_split);
273 	vq->shadow_used_split = NULL;
274 
275 	vq->enabled = false;
276 	vq->ready = false;
277 	vq->size = 0;
278 	vq->last_used_idx = 0;
279 	vq->last_avail_idx = 0;
280 }
281 
282 static void
283 vduse_device_start(struct virtio_net *dev)
284 {
285 	unsigned int i, ret;
286 
287 	VHOST_CONFIG_LOG(dev->ifname, INFO, "Starting device...");
288 
289 	dev->notify_ops = vhost_driver_callback_get(dev->ifname);
290 	if (!dev->notify_ops) {
291 		VHOST_CONFIG_LOG(dev->ifname, ERR,
292 				"Failed to get callback ops for driver");
293 		return;
294 	}
295 
296 	ret = ioctl(dev->vduse_dev_fd, VDUSE_DEV_GET_FEATURES, &dev->features);
297 	if (ret) {
298 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to get features: %s",
299 				strerror(errno));
300 		return;
301 	}
302 
303 	VHOST_CONFIG_LOG(dev->ifname, INFO, "Negotiated Virtio features: 0x%" PRIx64,
304 		dev->features);
305 
306 	if (dev->features &
307 		((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
308 		 (1ULL << VIRTIO_F_VERSION_1) |
309 		 (1ULL << VIRTIO_F_RING_PACKED))) {
310 		dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
311 	} else {
312 		dev->vhost_hlen = sizeof(struct virtio_net_hdr);
313 	}
314 
315 	for (i = 0; i < dev->nr_vring; i++)
316 		vduse_vring_setup(dev, i);
317 
318 	dev->flags |= VIRTIO_DEV_READY;
319 
320 	if (dev->notify_ops->new_device(dev->vid) == 0)
321 		dev->flags |= VIRTIO_DEV_RUNNING;
322 
323 	for (i = 0; i < dev->nr_vring; i++) {
324 		struct vhost_virtqueue *vq = dev->virtqueue[i];
325 
326 		if (vq == dev->cvq)
327 			continue;
328 
329 		if (dev->notify_ops->vring_state_changed)
330 			dev->notify_ops->vring_state_changed(dev->vid, i, vq->enabled);
331 	}
332 }
333 
334 static void
335 vduse_device_stop(struct virtio_net *dev)
336 {
337 	unsigned int i;
338 
339 	VHOST_CONFIG_LOG(dev->ifname, INFO, "Stopping device...");
340 
341 	vhost_destroy_device_notify(dev);
342 
343 	dev->flags &= ~VIRTIO_DEV_READY;
344 
345 	for (i = 0; i < dev->nr_vring; i++)
346 		vduse_vring_cleanup(dev, i);
347 
348 	vhost_user_iotlb_flush_all(dev);
349 }
350 
351 static void
352 vduse_events_handler(int fd, void *arg, int *remove __rte_unused)
353 {
354 	struct virtio_net *dev = arg;
355 	struct vduse_dev_request req;
356 	struct vduse_dev_response resp;
357 	struct vhost_virtqueue *vq;
358 	uint8_t old_status = dev->status;
359 	int ret;
360 
361 	memset(&resp, 0, sizeof(resp));
362 
363 	ret = read(fd, &req, sizeof(req));
364 	if (ret < 0) {
365 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to read request: %s",
366 				strerror(errno));
367 		return;
368 	} else if (ret < (int)sizeof(req)) {
369 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Incomplete to read request %d", ret);
370 		return;
371 	}
372 
373 	VHOST_CONFIG_LOG(dev->ifname, INFO, "New request: %s (%u)",
374 			vduse_req_id_to_str(req.type), req.type);
375 
376 	switch (req.type) {
377 	case VDUSE_GET_VQ_STATE:
378 		vq = dev->virtqueue[req.vq_state.index];
379 		VHOST_CONFIG_LOG(dev->ifname, INFO, "\tvq index: %u, avail_index: %u",
380 				req.vq_state.index, vq->last_avail_idx);
381 		resp.vq_state.split.avail_index = vq->last_avail_idx;
382 		resp.result = VDUSE_REQ_RESULT_OK;
383 		break;
384 	case VDUSE_SET_STATUS:
385 		VHOST_CONFIG_LOG(dev->ifname, INFO, "\tnew status: 0x%08x",
386 				req.s.status);
387 		old_status = dev->status;
388 		dev->status = req.s.status;
389 		resp.result = VDUSE_REQ_RESULT_OK;
390 		break;
391 	case VDUSE_UPDATE_IOTLB:
392 		VHOST_CONFIG_LOG(dev->ifname, INFO, "\tIOVA range: %" PRIx64 " - %" PRIx64,
393 				(uint64_t)req.iova.start, (uint64_t)req.iova.last);
394 		vhost_user_iotlb_cache_remove(dev, req.iova.start,
395 				req.iova.last - req.iova.start + 1);
396 		resp.result = VDUSE_REQ_RESULT_OK;
397 		break;
398 	default:
399 		resp.result = VDUSE_REQ_RESULT_FAILED;
400 		break;
401 	}
402 
403 	resp.request_id = req.request_id;
404 
405 	ret = write(dev->vduse_dev_fd, &resp, sizeof(resp));
406 	if (ret != sizeof(resp)) {
407 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to write response %s",
408 				strerror(errno));
409 		return;
410 	}
411 
412 	if ((old_status ^ dev->status) & VIRTIO_DEVICE_STATUS_DRIVER_OK) {
413 		if (dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK)
414 			vduse_device_start(dev);
415 		else
416 			vduse_device_stop(dev);
417 	}
418 
419 	VHOST_CONFIG_LOG(dev->ifname, INFO, "Request %s (%u) handled successfully",
420 			vduse_req_id_to_str(req.type), req.type);
421 }
422 
423 int
424 vduse_device_create(const char *path, bool compliant_ol_flags)
425 {
426 	int control_fd, dev_fd, vid, ret;
427 	rte_thread_t fdset_tid;
428 	uint32_t i, max_queue_pairs, total_queues;
429 	struct virtio_net *dev;
430 	struct virtio_net_config vnet_config = {{ 0 }};
431 	uint64_t ver = VHOST_VDUSE_API_VERSION;
432 	uint64_t features;
433 	struct vduse_dev_config *dev_config = NULL;
434 	const char *name = path + strlen("/dev/vduse/");
435 
436 	/* If first device, create events dispatcher thread */
437 	if (vduse_events_thread == false) {
438 		/**
439 		 * create a pipe which will be waited by poll and notified to
440 		 * rebuild the wait list of poll.
441 		 */
442 		if (fdset_pipe_init(&vduse.fdset) < 0) {
443 			VHOST_CONFIG_LOG(path, ERR, "failed to create pipe for vduse fdset");
444 			return -1;
445 		}
446 
447 		ret = rte_thread_create_internal_control(&fdset_tid, "vduse-evt",
448 				fdset_event_dispatch, &vduse.fdset);
449 		if (ret != 0) {
450 			VHOST_CONFIG_LOG(path, ERR, "failed to create vduse fdset handling thread");
451 			fdset_pipe_uninit(&vduse.fdset);
452 			return -1;
453 		}
454 
455 		vduse_events_thread = true;
456 	}
457 
458 	control_fd = open(VDUSE_CTRL_PATH, O_RDWR);
459 	if (control_fd < 0) {
460 		VHOST_CONFIG_LOG(name, ERR, "Failed to open %s: %s",
461 				VDUSE_CTRL_PATH, strerror(errno));
462 		return -1;
463 	}
464 
465 	if (ioctl(control_fd, VDUSE_SET_API_VERSION, &ver)) {
466 		VHOST_CONFIG_LOG(name, ERR, "Failed to set API version: %" PRIu64 ": %s",
467 				ver, strerror(errno));
468 		ret = -1;
469 		goto out_ctrl_close;
470 	}
471 
472 	dev_config = malloc(offsetof(struct vduse_dev_config, config) +
473 			sizeof(vnet_config));
474 	if (!dev_config) {
475 		VHOST_CONFIG_LOG(name, ERR, "Failed to allocate VDUSE config");
476 		ret = -1;
477 		goto out_ctrl_close;
478 	}
479 
480 	ret = rte_vhost_driver_get_features(path, &features);
481 	if (ret < 0) {
482 		VHOST_CONFIG_LOG(name, ERR, "Failed to get backend features");
483 		goto out_free;
484 	}
485 
486 	ret = rte_vhost_driver_get_queue_num(path, &max_queue_pairs);
487 	if (ret < 0) {
488 		VHOST_CONFIG_LOG(name, ERR, "Failed to get max queue pairs");
489 		goto out_free;
490 	}
491 
492 	VHOST_CONFIG_LOG(path, INFO, "VDUSE max queue pairs: %u", max_queue_pairs);
493 	total_queues = max_queue_pairs * 2;
494 
495 	if (max_queue_pairs == 1)
496 		features &= ~(RTE_BIT64(VIRTIO_NET_F_CTRL_VQ) | RTE_BIT64(VIRTIO_NET_F_MQ));
497 	else
498 		total_queues += 1; /* Includes ctrl queue */
499 
500 	vnet_config.max_virtqueue_pairs = max_queue_pairs;
501 	memset(dev_config, 0, sizeof(struct vduse_dev_config));
502 
503 	strncpy(dev_config->name, name, VDUSE_NAME_MAX - 1);
504 	dev_config->device_id = VIRTIO_ID_NET;
505 	dev_config->vendor_id = 0;
506 	dev_config->features = features;
507 	dev_config->vq_num = total_queues;
508 	dev_config->vq_align = sysconf(_SC_PAGE_SIZE);
509 	dev_config->config_size = sizeof(struct virtio_net_config);
510 	memcpy(dev_config->config, &vnet_config, sizeof(vnet_config));
511 
512 	ret = ioctl(control_fd, VDUSE_CREATE_DEV, dev_config);
513 	if (ret < 0) {
514 		VHOST_CONFIG_LOG(name, ERR, "Failed to create VDUSE device: %s",
515 				strerror(errno));
516 		goto out_free;
517 	}
518 
519 	dev_fd = open(path, O_RDWR);
520 	if (dev_fd < 0) {
521 		VHOST_CONFIG_LOG(name, ERR, "Failed to open device %s: %s",
522 				path, strerror(errno));
523 		ret = -1;
524 		goto out_dev_close;
525 	}
526 
527 	ret = fcntl(dev_fd, F_SETFL, O_NONBLOCK);
528 	if (ret < 0) {
529 		VHOST_CONFIG_LOG(name, ERR, "Failed to set chardev as non-blocking: %s",
530 				strerror(errno));
531 		goto out_dev_close;
532 	}
533 
534 	vid = vhost_new_device(&vduse_backend_ops);
535 	if (vid < 0) {
536 		VHOST_CONFIG_LOG(name, ERR, "Failed to create new Vhost device");
537 		ret = -1;
538 		goto out_dev_close;
539 	}
540 
541 	dev = get_device(vid);
542 	if (!dev) {
543 		ret = -1;
544 		goto out_dev_close;
545 	}
546 
547 	strncpy(dev->ifname, path, IF_NAME_SZ - 1);
548 	dev->vduse_ctrl_fd = control_fd;
549 	dev->vduse_dev_fd = dev_fd;
550 	vhost_setup_virtio_net(dev->vid, true, compliant_ol_flags, true, true);
551 
552 	for (i = 0; i < total_queues; i++) {
553 		struct vduse_vq_config vq_cfg = { 0 };
554 
555 		ret = alloc_vring_queue(dev, i);
556 		if (ret) {
557 			VHOST_CONFIG_LOG(name, ERR, "Failed to alloc vring %d metadata", i);
558 			goto out_dev_destroy;
559 		}
560 
561 		vq_cfg.index = i;
562 		vq_cfg.max_size = 1024;
563 
564 		ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP, &vq_cfg);
565 		if (ret) {
566 			VHOST_CONFIG_LOG(name, ERR, "Failed to set-up VQ %d", i);
567 			goto out_dev_destroy;
568 		}
569 	}
570 
571 	dev->cvq = dev->virtqueue[max_queue_pairs * 2];
572 
573 	ret = fdset_add(&vduse.fdset, dev->vduse_dev_fd, vduse_events_handler, NULL, dev);
574 	if (ret) {
575 		VHOST_CONFIG_LOG(name, ERR, "Failed to add fd %d to vduse fdset",
576 				dev->vduse_dev_fd);
577 		goto out_dev_destroy;
578 	}
579 	fdset_pipe_notify(&vduse.fdset);
580 
581 	free(dev_config);
582 
583 	return 0;
584 
585 out_dev_destroy:
586 	vhost_destroy_device(vid);
587 out_dev_close:
588 	if (dev_fd >= 0)
589 		close(dev_fd);
590 	ioctl(control_fd, VDUSE_DESTROY_DEV, name);
591 out_free:
592 	free(dev_config);
593 out_ctrl_close:
594 	close(control_fd);
595 
596 	return ret;
597 }
598 
599 int
600 vduse_device_destroy(const char *path)
601 {
602 	const char *name = path + strlen("/dev/vduse/");
603 	struct virtio_net *dev;
604 	int vid, ret;
605 
606 	for (vid = 0; vid < RTE_MAX_VHOST_DEVICE; vid++) {
607 		dev = vhost_devices[vid];
608 
609 		if (dev == NULL)
610 			continue;
611 
612 		if (!strcmp(path, dev->ifname))
613 			break;
614 	}
615 
616 	if (vid == RTE_MAX_VHOST_DEVICE)
617 		return -1;
618 
619 	vduse_device_stop(dev);
620 
621 	fdset_del(&vduse.fdset, dev->vduse_dev_fd);
622 	fdset_pipe_notify_sync(&vduse.fdset);
623 
624 	if (dev->vduse_dev_fd >= 0) {
625 		close(dev->vduse_dev_fd);
626 		dev->vduse_dev_fd = -1;
627 	}
628 
629 	if (dev->vduse_ctrl_fd >= 0) {
630 		ret = ioctl(dev->vduse_ctrl_fd, VDUSE_DESTROY_DEV, name);
631 		if (ret)
632 			VHOST_CONFIG_LOG(name, ERR, "Failed to destroy VDUSE device: %s",
633 					strerror(errno));
634 		close(dev->vduse_ctrl_fd);
635 		dev->vduse_ctrl_fd = -1;
636 	}
637 
638 	vhost_destroy_device(vid);
639 
640 	return 0;
641 }
642