xref: /dpdk/lib/vhost/vduse.c (revision 3da59f30a23f2e795d2315f3d949e1b3e0ce0c3d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2023 Red Hat, Inc.
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 #include <fcntl.h>
9 
10 
11 #include <linux/vduse.h>
12 #include <linux/virtio_net.h>
13 
14 #include <sys/ioctl.h>
15 #include <sys/mman.h>
16 #include <sys/stat.h>
17 
18 #include <rte_common.h>
19 #include <rte_thread.h>
20 
21 #include "fd_man.h"
22 #include "iotlb.h"
23 #include "vduse.h"
24 #include "vhost.h"
25 #include "virtio_net_ctrl.h"
26 
27 #define VHOST_VDUSE_API_VERSION 0
28 #define VDUSE_CTRL_PATH "/dev/vduse/control"
29 
30 struct vduse {
31 	struct fdset fdset;
32 };
33 
34 static struct vduse vduse = {
35 	.fdset = {
36 		.fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} },
37 		.fd_mutex = PTHREAD_MUTEX_INITIALIZER,
38 		.fd_pooling_mutex = PTHREAD_MUTEX_INITIALIZER,
39 		.num = 0
40 	},
41 };
42 
43 static bool vduse_events_thread;
44 
45 static const char * const vduse_reqs_str[] = {
46 	"VDUSE_GET_VQ_STATE",
47 	"VDUSE_SET_STATUS",
48 	"VDUSE_UPDATE_IOTLB",
49 };
50 
51 #define vduse_req_id_to_str(id) \
52 	(id < RTE_DIM(vduse_reqs_str) ? \
53 	vduse_reqs_str[id] : "Unknown")
54 
55 static int
56 vduse_inject_irq(struct virtio_net *dev, struct vhost_virtqueue *vq)
57 {
58 	return ioctl(dev->vduse_dev_fd, VDUSE_VQ_INJECT_IRQ, &vq->index);
59 }
60 
61 static void
62 vduse_iotlb_remove_notify(uint64_t addr, uint64_t offset, uint64_t size)
63 {
64 	munmap((void *)(uintptr_t)addr, offset + size);
65 }
66 
67 static int
68 vduse_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm __rte_unused)
69 {
70 	struct vduse_iotlb_entry entry;
71 	uint64_t size, page_size;
72 	struct stat stat;
73 	void *mmap_addr;
74 	int fd, ret;
75 
76 	entry.start = iova;
77 	entry.last = iova + 1;
78 
79 	ret = ioctl(dev->vduse_dev_fd, VDUSE_IOTLB_GET_FD, &entry);
80 	if (ret < 0) {
81 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to get IOTLB entry for 0x%" PRIx64,
82 				iova);
83 		return -1;
84 	}
85 
86 	fd = ret;
87 
88 	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "New IOTLB entry:");
89 	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "\tIOVA: %" PRIx64 " - %" PRIx64,
90 			(uint64_t)entry.start, (uint64_t)entry.last);
91 	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "\toffset: %" PRIx64, (uint64_t)entry.offset);
92 	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "\tfd: %d", fd);
93 	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "\tperm: %x", entry.perm);
94 
95 	size = entry.last - entry.start + 1;
96 	mmap_addr = mmap(0, size + entry.offset, entry.perm, MAP_SHARED, fd, 0);
97 	if (!mmap_addr) {
98 		VHOST_CONFIG_LOG(dev->ifname, ERR,
99 				"Failed to mmap IOTLB entry for 0x%" PRIx64, iova);
100 		ret = -1;
101 		goto close_fd;
102 	}
103 
104 	ret = fstat(fd, &stat);
105 	if (ret < 0) {
106 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to get page size.");
107 		munmap(mmap_addr, entry.offset + size);
108 		goto close_fd;
109 	}
110 	page_size = (uint64_t)stat.st_blksize;
111 
112 	vhost_user_iotlb_cache_insert(dev, entry.start, (uint64_t)(uintptr_t)mmap_addr,
113 		entry.offset, size, page_size, entry.perm);
114 
115 	ret = 0;
116 close_fd:
117 	close(fd);
118 
119 	return ret;
120 }
121 
122 static struct vhost_backend_ops vduse_backend_ops = {
123 	.iotlb_miss = vduse_iotlb_miss,
124 	.iotlb_remove_notify = vduse_iotlb_remove_notify,
125 	.inject_irq = vduse_inject_irq,
126 };
127 
128 static void
129 vduse_control_queue_event(int fd, void *arg, int *remove __rte_unused)
130 {
131 	struct virtio_net *dev = arg;
132 	uint64_t buf;
133 	int ret;
134 
135 	ret = read(fd, &buf, sizeof(buf));
136 	if (ret < 0) {
137 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to read control queue event: %s",
138 				strerror(errno));
139 		return;
140 	}
141 
142 	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "Control queue kicked");
143 	if (virtio_net_ctrl_handle(dev))
144 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to handle ctrl request");
145 }
146 
147 static void
148 vduse_vring_setup(struct virtio_net *dev, unsigned int index)
149 {
150 	struct vhost_virtqueue *vq = dev->virtqueue[index];
151 	struct vhost_vring_addr *ra = &vq->ring_addrs;
152 	struct vduse_vq_info vq_info;
153 	struct vduse_vq_eventfd vq_efd;
154 	int ret;
155 
156 	vq_info.index = index;
157 	ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_GET_INFO, &vq_info);
158 	if (ret) {
159 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to get VQ %u info: %s",
160 				index, strerror(errno));
161 		return;
162 	}
163 
164 	VHOST_CONFIG_LOG(dev->ifname, INFO, "VQ %u info:", index);
165 	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tnum: %u", vq_info.num);
166 	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tdesc_addr: %llx",
167 			(unsigned long long)vq_info.desc_addr);
168 	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tdriver_addr: %llx",
169 			(unsigned long long)vq_info.driver_addr);
170 	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tdevice_addr: %llx",
171 			(unsigned long long)vq_info.device_addr);
172 	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tavail_idx: %u", vq_info.split.avail_index);
173 	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tready: %u", vq_info.ready);
174 
175 	vq->last_avail_idx = vq_info.split.avail_index;
176 	vq->size = vq_info.num;
177 	vq->ready = true;
178 	vq->enabled = vq_info.ready;
179 	ra->desc_user_addr = vq_info.desc_addr;
180 	ra->avail_user_addr = vq_info.driver_addr;
181 	ra->used_user_addr = vq_info.device_addr;
182 
183 	vq->kickfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
184 	if (vq->kickfd < 0) {
185 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to init kickfd for VQ %u: %s",
186 				index, strerror(errno));
187 		vq->kickfd = VIRTIO_INVALID_EVENTFD;
188 		return;
189 	}
190 	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tkick fd: %d", vq->kickfd);
191 
192 	vq->shadow_used_split = rte_malloc_socket(NULL,
193 				vq->size * sizeof(struct vring_used_elem),
194 				RTE_CACHE_LINE_SIZE, 0);
195 	vq->batch_copy_elems = rte_malloc_socket(NULL,
196 				vq->size * sizeof(struct batch_copy_elem),
197 				RTE_CACHE_LINE_SIZE, 0);
198 
199 	rte_rwlock_write_lock(&vq->access_lock);
200 	vhost_user_iotlb_rd_lock(vq);
201 	if (vring_translate(dev, vq))
202 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to translate vring %d addresses",
203 				index);
204 
205 	if (vhost_enable_guest_notification(dev, vq, 0))
206 		VHOST_CONFIG_LOG(dev->ifname, ERR,
207 				"Failed to disable guest notifications on vring %d",
208 				index);
209 	vhost_user_iotlb_rd_unlock(vq);
210 	rte_rwlock_write_unlock(&vq->access_lock);
211 
212 	vq_efd.index = index;
213 	vq_efd.fd = vq->kickfd;
214 
215 	ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP_KICKFD, &vq_efd);
216 	if (ret) {
217 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to setup kickfd for VQ %u: %s",
218 				index, strerror(errno));
219 		close(vq->kickfd);
220 		vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
221 		return;
222 	}
223 
224 	if (vq == dev->cvq) {
225 		ret = fdset_add(&vduse.fdset, vq->kickfd, vduse_control_queue_event, NULL, dev);
226 		if (ret) {
227 			VHOST_CONFIG_LOG(dev->ifname, ERR,
228 					"Failed to setup kickfd handler for VQ %u: %s",
229 					index, strerror(errno));
230 			vq_efd.fd = VDUSE_EVENTFD_DEASSIGN;
231 			ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP_KICKFD, &vq_efd);
232 			close(vq->kickfd);
233 			vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
234 		}
235 		fdset_pipe_notify(&vduse.fdset);
236 		vhost_enable_guest_notification(dev, vq, 1);
237 		VHOST_CONFIG_LOG(dev->ifname, INFO, "Ctrl queue event handler installed");
238 	}
239 }
240 
241 static void
242 vduse_vring_cleanup(struct virtio_net *dev, unsigned int index)
243 {
244 	struct vhost_virtqueue *vq = dev->virtqueue[index];
245 	struct vduse_vq_eventfd vq_efd;
246 	int ret;
247 
248 	if (vq == dev->cvq && vq->kickfd >= 0) {
249 		fdset_del(&vduse.fdset, vq->kickfd);
250 		fdset_pipe_notify(&vduse.fdset);
251 	}
252 
253 	vq_efd.index = index;
254 	vq_efd.fd = VDUSE_EVENTFD_DEASSIGN;
255 
256 	ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP_KICKFD, &vq_efd);
257 	if (ret)
258 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to cleanup kickfd for VQ %u: %s",
259 				index, strerror(errno));
260 
261 	close(vq->kickfd);
262 	vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
263 
264 	rte_rwlock_write_lock(&vq->access_lock);
265 	vring_invalidate(dev, vq);
266 	rte_rwlock_write_unlock(&vq->access_lock);
267 
268 	rte_free(vq->batch_copy_elems);
269 	vq->batch_copy_elems = NULL;
270 
271 	rte_free(vq->shadow_used_split);
272 	vq->shadow_used_split = NULL;
273 
274 	vq->enabled = false;
275 	vq->ready = false;
276 	vq->size = 0;
277 	vq->last_used_idx = 0;
278 	vq->last_avail_idx = 0;
279 }
280 
281 static void
282 vduse_device_start(struct virtio_net *dev)
283 {
284 	unsigned int i, ret;
285 
286 	VHOST_CONFIG_LOG(dev->ifname, INFO, "Starting device...");
287 
288 	dev->notify_ops = vhost_driver_callback_get(dev->ifname);
289 	if (!dev->notify_ops) {
290 		VHOST_CONFIG_LOG(dev->ifname, ERR,
291 				"Failed to get callback ops for driver");
292 		return;
293 	}
294 
295 	ret = ioctl(dev->vduse_dev_fd, VDUSE_DEV_GET_FEATURES, &dev->features);
296 	if (ret) {
297 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to get features: %s",
298 				strerror(errno));
299 		return;
300 	}
301 
302 	VHOST_CONFIG_LOG(dev->ifname, INFO, "Negotiated Virtio features: 0x%" PRIx64,
303 		dev->features);
304 
305 	if (dev->features &
306 		((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
307 		 (1ULL << VIRTIO_F_VERSION_1) |
308 		 (1ULL << VIRTIO_F_RING_PACKED))) {
309 		dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
310 	} else {
311 		dev->vhost_hlen = sizeof(struct virtio_net_hdr);
312 	}
313 
314 	for (i = 0; i < dev->nr_vring; i++)
315 		vduse_vring_setup(dev, i);
316 
317 	dev->flags |= VIRTIO_DEV_READY;
318 
319 	if (dev->notify_ops->new_device(dev->vid) == 0)
320 		dev->flags |= VIRTIO_DEV_RUNNING;
321 
322 	for (i = 0; i < dev->nr_vring; i++) {
323 		struct vhost_virtqueue *vq = dev->virtqueue[i];
324 
325 		if (vq == dev->cvq)
326 			continue;
327 
328 		if (dev->notify_ops->vring_state_changed)
329 			dev->notify_ops->vring_state_changed(dev->vid, i, vq->enabled);
330 	}
331 }
332 
333 static void
334 vduse_device_stop(struct virtio_net *dev)
335 {
336 	unsigned int i;
337 
338 	VHOST_CONFIG_LOG(dev->ifname, INFO, "Stopping device...");
339 
340 	vhost_destroy_device_notify(dev);
341 
342 	dev->flags &= ~VIRTIO_DEV_READY;
343 
344 	for (i = 0; i < dev->nr_vring; i++)
345 		vduse_vring_cleanup(dev, i);
346 
347 	vhost_user_iotlb_flush_all(dev);
348 }
349 
350 static void
351 vduse_events_handler(int fd, void *arg, int *remove __rte_unused)
352 {
353 	struct virtio_net *dev = arg;
354 	struct vduse_dev_request req;
355 	struct vduse_dev_response resp;
356 	struct vhost_virtqueue *vq;
357 	uint8_t old_status = dev->status;
358 	int ret;
359 
360 	memset(&resp, 0, sizeof(resp));
361 
362 	ret = read(fd, &req, sizeof(req));
363 	if (ret < 0) {
364 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to read request: %s",
365 				strerror(errno));
366 		return;
367 	} else if (ret < (int)sizeof(req)) {
368 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Incomplete to read request %d", ret);
369 		return;
370 	}
371 
372 	VHOST_CONFIG_LOG(dev->ifname, INFO, "New request: %s (%u)",
373 			vduse_req_id_to_str(req.type), req.type);
374 
375 	switch (req.type) {
376 	case VDUSE_GET_VQ_STATE:
377 		vq = dev->virtqueue[req.vq_state.index];
378 		VHOST_CONFIG_LOG(dev->ifname, INFO, "\tvq index: %u, avail_index: %u",
379 				req.vq_state.index, vq->last_avail_idx);
380 		resp.vq_state.split.avail_index = vq->last_avail_idx;
381 		resp.result = VDUSE_REQ_RESULT_OK;
382 		break;
383 	case VDUSE_SET_STATUS:
384 		VHOST_CONFIG_LOG(dev->ifname, INFO, "\tnew status: 0x%08x",
385 				req.s.status);
386 		old_status = dev->status;
387 		dev->status = req.s.status;
388 		resp.result = VDUSE_REQ_RESULT_OK;
389 		break;
390 	case VDUSE_UPDATE_IOTLB:
391 		VHOST_CONFIG_LOG(dev->ifname, INFO, "\tIOVA range: %" PRIx64 " - %" PRIx64,
392 				(uint64_t)req.iova.start, (uint64_t)req.iova.last);
393 		vhost_user_iotlb_cache_remove(dev, req.iova.start,
394 				req.iova.last - req.iova.start + 1);
395 		resp.result = VDUSE_REQ_RESULT_OK;
396 		break;
397 	default:
398 		resp.result = VDUSE_REQ_RESULT_FAILED;
399 		break;
400 	}
401 
402 	resp.request_id = req.request_id;
403 
404 	ret = write(dev->vduse_dev_fd, &resp, sizeof(resp));
405 	if (ret != sizeof(resp)) {
406 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to write response %s",
407 				strerror(errno));
408 		return;
409 	}
410 
411 	if ((old_status ^ dev->status) & VIRTIO_DEVICE_STATUS_DRIVER_OK) {
412 		if (dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK)
413 			vduse_device_start(dev);
414 		else
415 			vduse_device_stop(dev);
416 	}
417 
418 	VHOST_CONFIG_LOG(dev->ifname, INFO, "Request %s (%u) handled successfully",
419 			vduse_req_id_to_str(req.type), req.type);
420 }
421 
422 int
423 vduse_device_create(const char *path, bool compliant_ol_flags)
424 {
425 	int control_fd, dev_fd, vid, ret;
426 	rte_thread_t fdset_tid;
427 	uint32_t i, max_queue_pairs, total_queues;
428 	struct virtio_net *dev;
429 	struct virtio_net_config vnet_config = {{ 0 }};
430 	uint64_t ver = VHOST_VDUSE_API_VERSION;
431 	uint64_t features;
432 	struct vduse_dev_config *dev_config = NULL;
433 	const char *name = path + strlen("/dev/vduse/");
434 
435 	/* If first device, create events dispatcher thread */
436 	if (vduse_events_thread == false) {
437 		/**
438 		 * create a pipe which will be waited by poll and notified to
439 		 * rebuild the wait list of poll.
440 		 */
441 		if (fdset_pipe_init(&vduse.fdset) < 0) {
442 			VHOST_CONFIG_LOG(path, ERR, "failed to create pipe for vduse fdset");
443 			return -1;
444 		}
445 
446 		ret = rte_thread_create_internal_control(&fdset_tid, "vduse-evt",
447 				fdset_event_dispatch, &vduse.fdset);
448 		if (ret != 0) {
449 			VHOST_CONFIG_LOG(path, ERR, "failed to create vduse fdset handling thread");
450 			fdset_pipe_uninit(&vduse.fdset);
451 			return -1;
452 		}
453 
454 		vduse_events_thread = true;
455 	}
456 
457 	control_fd = open(VDUSE_CTRL_PATH, O_RDWR);
458 	if (control_fd < 0) {
459 		VHOST_CONFIG_LOG(name, ERR, "Failed to open %s: %s",
460 				VDUSE_CTRL_PATH, strerror(errno));
461 		return -1;
462 	}
463 
464 	if (ioctl(control_fd, VDUSE_SET_API_VERSION, &ver)) {
465 		VHOST_CONFIG_LOG(name, ERR, "Failed to set API version: %" PRIu64 ": %s",
466 				ver, strerror(errno));
467 		ret = -1;
468 		goto out_ctrl_close;
469 	}
470 
471 	dev_config = malloc(offsetof(struct vduse_dev_config, config) +
472 			sizeof(vnet_config));
473 	if (!dev_config) {
474 		VHOST_CONFIG_LOG(name, ERR, "Failed to allocate VDUSE config");
475 		ret = -1;
476 		goto out_ctrl_close;
477 	}
478 
479 	ret = rte_vhost_driver_get_features(path, &features);
480 	if (ret < 0) {
481 		VHOST_CONFIG_LOG(name, ERR, "Failed to get backend features");
482 		goto out_free;
483 	}
484 
485 	ret = rte_vhost_driver_get_queue_num(path, &max_queue_pairs);
486 	if (ret < 0) {
487 		VHOST_CONFIG_LOG(name, ERR, "Failed to get max queue pairs");
488 		goto out_free;
489 	}
490 
491 	VHOST_CONFIG_LOG(path, INFO, "VDUSE max queue pairs: %u", max_queue_pairs);
492 	total_queues = max_queue_pairs * 2;
493 
494 	if (max_queue_pairs == 1)
495 		features &= ~(RTE_BIT64(VIRTIO_NET_F_CTRL_VQ) | RTE_BIT64(VIRTIO_NET_F_MQ));
496 	else
497 		total_queues += 1; /* Includes ctrl queue */
498 
499 	vnet_config.max_virtqueue_pairs = max_queue_pairs;
500 	memset(dev_config, 0, sizeof(struct vduse_dev_config));
501 
502 	strncpy(dev_config->name, name, VDUSE_NAME_MAX - 1);
503 	dev_config->device_id = VIRTIO_ID_NET;
504 	dev_config->vendor_id = 0;
505 	dev_config->features = features;
506 	dev_config->vq_num = total_queues;
507 	dev_config->vq_align = sysconf(_SC_PAGE_SIZE);
508 	dev_config->config_size = sizeof(struct virtio_net_config);
509 	memcpy(dev_config->config, &vnet_config, sizeof(vnet_config));
510 
511 	ret = ioctl(control_fd, VDUSE_CREATE_DEV, dev_config);
512 	if (ret < 0) {
513 		VHOST_CONFIG_LOG(name, ERR, "Failed to create VDUSE device: %s",
514 				strerror(errno));
515 		goto out_free;
516 	}
517 
518 	dev_fd = open(path, O_RDWR);
519 	if (dev_fd < 0) {
520 		VHOST_CONFIG_LOG(name, ERR, "Failed to open device %s: %s",
521 				path, strerror(errno));
522 		ret = -1;
523 		goto out_dev_close;
524 	}
525 
526 	ret = fcntl(dev_fd, F_SETFL, O_NONBLOCK);
527 	if (ret < 0) {
528 		VHOST_CONFIG_LOG(name, ERR, "Failed to set chardev as non-blocking: %s",
529 				strerror(errno));
530 		goto out_dev_close;
531 	}
532 
533 	vid = vhost_new_device(&vduse_backend_ops);
534 	if (vid < 0) {
535 		VHOST_CONFIG_LOG(name, ERR, "Failed to create new Vhost device");
536 		ret = -1;
537 		goto out_dev_close;
538 	}
539 
540 	dev = get_device(vid);
541 	if (!dev) {
542 		ret = -1;
543 		goto out_dev_close;
544 	}
545 
546 	strncpy(dev->ifname, path, IF_NAME_SZ - 1);
547 	dev->vduse_ctrl_fd = control_fd;
548 	dev->vduse_dev_fd = dev_fd;
549 	vhost_setup_virtio_net(dev->vid, true, compliant_ol_flags, true, true);
550 
551 	for (i = 0; i < total_queues; i++) {
552 		struct vduse_vq_config vq_cfg = { 0 };
553 
554 		ret = alloc_vring_queue(dev, i);
555 		if (ret) {
556 			VHOST_CONFIG_LOG(name, ERR, "Failed to alloc vring %d metadata", i);
557 			goto out_dev_destroy;
558 		}
559 
560 		vq_cfg.index = i;
561 		vq_cfg.max_size = 1024;
562 
563 		ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP, &vq_cfg);
564 		if (ret) {
565 			VHOST_CONFIG_LOG(name, ERR, "Failed to set-up VQ %d", i);
566 			goto out_dev_destroy;
567 		}
568 	}
569 
570 	dev->cvq = dev->virtqueue[max_queue_pairs * 2];
571 
572 	ret = fdset_add(&vduse.fdset, dev->vduse_dev_fd, vduse_events_handler, NULL, dev);
573 	if (ret) {
574 		VHOST_CONFIG_LOG(name, ERR, "Failed to add fd %d to vduse fdset",
575 				dev->vduse_dev_fd);
576 		goto out_dev_destroy;
577 	}
578 	fdset_pipe_notify(&vduse.fdset);
579 
580 	free(dev_config);
581 
582 	return 0;
583 
584 out_dev_destroy:
585 	vhost_destroy_device(vid);
586 out_dev_close:
587 	if (dev_fd >= 0)
588 		close(dev_fd);
589 	ioctl(control_fd, VDUSE_DESTROY_DEV, name);
590 out_free:
591 	free(dev_config);
592 out_ctrl_close:
593 	close(control_fd);
594 
595 	return ret;
596 }
597 
598 int
599 vduse_device_destroy(const char *path)
600 {
601 	const char *name = path + strlen("/dev/vduse/");
602 	struct virtio_net *dev;
603 	int vid, ret;
604 
605 	for (vid = 0; vid < RTE_MAX_VHOST_DEVICE; vid++) {
606 		dev = vhost_devices[vid];
607 
608 		if (dev == NULL)
609 			continue;
610 
611 		if (!strcmp(path, dev->ifname))
612 			break;
613 	}
614 
615 	if (vid == RTE_MAX_VHOST_DEVICE)
616 		return -1;
617 
618 	vduse_device_stop(dev);
619 
620 	fdset_del(&vduse.fdset, dev->vduse_dev_fd);
621 	fdset_pipe_notify(&vduse.fdset);
622 
623 	if (dev->vduse_dev_fd >= 0) {
624 		close(dev->vduse_dev_fd);
625 		dev->vduse_dev_fd = -1;
626 	}
627 
628 	if (dev->vduse_ctrl_fd >= 0) {
629 		ret = ioctl(dev->vduse_ctrl_fd, VDUSE_DESTROY_DEV, name);
630 		if (ret)
631 			VHOST_CONFIG_LOG(name, ERR, "Failed to destroy VDUSE device: %s",
632 					strerror(errno));
633 		close(dev->vduse_ctrl_fd);
634 		dev->vduse_ctrl_fd = -1;
635 	}
636 
637 	vhost_destroy_device(vid);
638 
639 	return 0;
640 }
641