xref: /dpdk/lib/vhost/vduse.c (revision da7e701151ea8b742d4c38ace3e4fefd1b4507fc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2023 Red Hat, Inc.
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 #include <fcntl.h>
9 
10 
11 #include <linux/vduse.h>
12 #include <linux/virtio_net.h>
13 
14 #include <sys/ioctl.h>
15 #include <sys/mman.h>
16 #include <sys/stat.h>
17 
18 #include <rte_common.h>
19 #include <rte_thread.h>
20 
21 #include "fd_man.h"
22 #include "iotlb.h"
23 #include "vduse.h"
24 #include "vhost.h"
25 #include "virtio_net_ctrl.h"
26 
27 #define VHOST_VDUSE_API_VERSION 0
28 #define VDUSE_CTRL_PATH "/dev/vduse/control"
29 
30 struct vduse {
31 	struct fdset fdset;
32 };
33 
34 static struct vduse vduse = {
35 	.fdset = {
36 		.fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} },
37 		.fd_mutex = PTHREAD_MUTEX_INITIALIZER,
38 		.fd_pooling_mutex = PTHREAD_MUTEX_INITIALIZER,
39 		.num = 0
40 	},
41 };
42 
43 static bool vduse_events_thread;
44 
45 static const char * const vduse_reqs_str[] = {
46 	"VDUSE_GET_VQ_STATE",
47 	"VDUSE_SET_STATUS",
48 	"VDUSE_UPDATE_IOTLB",
49 };
50 
51 #define vduse_req_id_to_str(id) \
52 	(id < RTE_DIM(vduse_reqs_str) ? \
53 	vduse_reqs_str[id] : "Unknown")
54 
55 static int
56 vduse_inject_irq(struct virtio_net *dev, struct vhost_virtqueue *vq)
57 {
58 	return ioctl(dev->vduse_dev_fd, VDUSE_VQ_INJECT_IRQ, &vq->index);
59 }
60 
61 static void
62 vduse_iotlb_remove_notify(uint64_t addr, uint64_t offset, uint64_t size)
63 {
64 	munmap((void *)(uintptr_t)addr, offset + size);
65 }
66 
67 static int
68 vduse_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm __rte_unused)
69 {
70 	struct vduse_iotlb_entry entry;
71 	uint64_t size, page_size;
72 	struct stat stat;
73 	void *mmap_addr;
74 	int fd, ret;
75 
76 	entry.start = iova;
77 	entry.last = iova + 1;
78 
79 	ret = ioctl(dev->vduse_dev_fd, VDUSE_IOTLB_GET_FD, &entry);
80 	if (ret < 0) {
81 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to get IOTLB entry for 0x%" PRIx64 "\n",
82 				iova);
83 		return -1;
84 	}
85 
86 	fd = ret;
87 
88 	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "New IOTLB entry:\n");
89 	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "\tIOVA: %" PRIx64 " - %" PRIx64 "\n",
90 			(uint64_t)entry.start, (uint64_t)entry.last);
91 	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "\toffset: %" PRIx64 "\n", (uint64_t)entry.offset);
92 	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "\tfd: %d\n", fd);
93 	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "\tperm: %x\n", entry.perm);
94 
95 	size = entry.last - entry.start + 1;
96 	mmap_addr = mmap(0, size + entry.offset, entry.perm, MAP_SHARED, fd, 0);
97 	if (!mmap_addr) {
98 		VHOST_LOG_CONFIG(dev->ifname, ERR,
99 				"Failed to mmap IOTLB entry for 0x%" PRIx64 "\n", iova);
100 		ret = -1;
101 		goto close_fd;
102 	}
103 
104 	ret = fstat(fd, &stat);
105 	if (ret < 0) {
106 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to get page size.\n");
107 		munmap(mmap_addr, entry.offset + size);
108 		goto close_fd;
109 	}
110 	page_size = (uint64_t)stat.st_blksize;
111 
112 	vhost_user_iotlb_cache_insert(dev, entry.start, (uint64_t)(uintptr_t)mmap_addr,
113 		entry.offset, size, page_size, entry.perm);
114 
115 	ret = 0;
116 close_fd:
117 	close(fd);
118 
119 	return ret;
120 }
121 
122 static struct vhost_backend_ops vduse_backend_ops = {
123 	.iotlb_miss = vduse_iotlb_miss,
124 	.iotlb_remove_notify = vduse_iotlb_remove_notify,
125 	.inject_irq = vduse_inject_irq,
126 };
127 
128 static void
129 vduse_control_queue_event(int fd, void *arg, int *remove __rte_unused)
130 {
131 	struct virtio_net *dev = arg;
132 	uint64_t buf;
133 	int ret;
134 
135 	ret = read(fd, &buf, sizeof(buf));
136 	if (ret < 0) {
137 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to read control queue event: %s\n",
138 				strerror(errno));
139 		return;
140 	}
141 
142 	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "Control queue kicked\n");
143 	if (virtio_net_ctrl_handle(dev))
144 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to handle ctrl request\n");
145 }
146 
147 static void
148 vduse_vring_setup(struct virtio_net *dev, unsigned int index)
149 {
150 	struct vhost_virtqueue *vq = dev->virtqueue[index];
151 	struct vhost_vring_addr *ra = &vq->ring_addrs;
152 	struct vduse_vq_info vq_info;
153 	struct vduse_vq_eventfd vq_efd;
154 	int ret;
155 
156 	vq_info.index = index;
157 	ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_GET_INFO, &vq_info);
158 	if (ret) {
159 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to get VQ %u info: %s\n",
160 				index, strerror(errno));
161 		return;
162 	}
163 
164 	VHOST_LOG_CONFIG(dev->ifname, INFO, "VQ %u info:\n", index);
165 	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tnum: %u\n", vq_info.num);
166 	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdesc_addr: %llx\n",
167 			(unsigned long long)vq_info.desc_addr);
168 	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdriver_addr: %llx\n",
169 			(unsigned long long)vq_info.driver_addr);
170 	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdevice_addr: %llx\n",
171 			(unsigned long long)vq_info.device_addr);
172 	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tavail_idx: %u\n", vq_info.split.avail_index);
173 	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tready: %u\n", vq_info.ready);
174 
175 	vq->last_avail_idx = vq_info.split.avail_index;
176 	vq->size = vq_info.num;
177 	vq->ready = true;
178 	vq->enabled = vq_info.ready;
179 	ra->desc_user_addr = vq_info.desc_addr;
180 	ra->avail_user_addr = vq_info.driver_addr;
181 	ra->used_user_addr = vq_info.device_addr;
182 
183 	vq->kickfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
184 	if (vq->kickfd < 0) {
185 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to init kickfd for VQ %u: %s\n",
186 				index, strerror(errno));
187 		vq->kickfd = VIRTIO_INVALID_EVENTFD;
188 		return;
189 	}
190 	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tkick fd: %d\n", vq->kickfd);
191 
192 	vq->shadow_used_split = rte_malloc_socket(NULL,
193 				vq->size * sizeof(struct vring_used_elem),
194 				RTE_CACHE_LINE_SIZE, 0);
195 	vq->batch_copy_elems = rte_malloc_socket(NULL,
196 				vq->size * sizeof(struct batch_copy_elem),
197 				RTE_CACHE_LINE_SIZE, 0);
198 
199 	vhost_user_iotlb_rd_lock(vq);
200 	if (vring_translate(dev, vq))
201 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to translate vring %d addresses\n",
202 				index);
203 
204 	if (vhost_enable_guest_notification(dev, vq, 0))
205 		VHOST_LOG_CONFIG(dev->ifname, ERR,
206 				"Failed to disable guest notifications on vring %d\n",
207 				index);
208 	vhost_user_iotlb_rd_unlock(vq);
209 
210 	vq_efd.index = index;
211 	vq_efd.fd = vq->kickfd;
212 
213 	ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP_KICKFD, &vq_efd);
214 	if (ret) {
215 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to setup kickfd for VQ %u: %s\n",
216 				index, strerror(errno));
217 		close(vq->kickfd);
218 		vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
219 		return;
220 	}
221 
222 	if (vq == dev->cvq) {
223 		ret = fdset_add(&vduse.fdset, vq->kickfd, vduse_control_queue_event, NULL, dev);
224 		if (ret) {
225 			VHOST_LOG_CONFIG(dev->ifname, ERR,
226 					"Failed to setup kickfd handler for VQ %u: %s\n",
227 					index, strerror(errno));
228 			vq_efd.fd = VDUSE_EVENTFD_DEASSIGN;
229 			ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP_KICKFD, &vq_efd);
230 			close(vq->kickfd);
231 			vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
232 		}
233 		fdset_pipe_notify(&vduse.fdset);
234 		vhost_enable_guest_notification(dev, vq, 1);
235 		VHOST_LOG_CONFIG(dev->ifname, INFO, "Ctrl queue event handler installed\n");
236 	}
237 }
238 
239 static void
240 vduse_vring_cleanup(struct virtio_net *dev, unsigned int index)
241 {
242 	struct vhost_virtqueue *vq = dev->virtqueue[index];
243 	struct vduse_vq_eventfd vq_efd;
244 	int ret;
245 
246 	if (vq == dev->cvq && vq->kickfd >= 0) {
247 		fdset_del(&vduse.fdset, vq->kickfd);
248 		fdset_pipe_notify(&vduse.fdset);
249 	}
250 
251 	vq_efd.index = index;
252 	vq_efd.fd = VDUSE_EVENTFD_DEASSIGN;
253 
254 	ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP_KICKFD, &vq_efd);
255 	if (ret)
256 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to cleanup kickfd for VQ %u: %s\n",
257 				index, strerror(errno));
258 
259 	close(vq->kickfd);
260 	vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
261 
262 	vring_invalidate(dev, vq);
263 
264 	rte_free(vq->batch_copy_elems);
265 	vq->batch_copy_elems = NULL;
266 
267 	rte_free(vq->shadow_used_split);
268 	vq->shadow_used_split = NULL;
269 
270 	vq->enabled = false;
271 	vq->ready = false;
272 	vq->size = 0;
273 	vq->last_used_idx = 0;
274 	vq->last_avail_idx = 0;
275 }
276 
277 static void
278 vduse_device_start(struct virtio_net *dev)
279 {
280 	unsigned int i, ret;
281 
282 	VHOST_LOG_CONFIG(dev->ifname, INFO, "Starting device...\n");
283 
284 	dev->notify_ops = vhost_driver_callback_get(dev->ifname);
285 	if (!dev->notify_ops) {
286 		VHOST_LOG_CONFIG(dev->ifname, ERR,
287 				"Failed to get callback ops for driver\n");
288 		return;
289 	}
290 
291 	ret = ioctl(dev->vduse_dev_fd, VDUSE_DEV_GET_FEATURES, &dev->features);
292 	if (ret) {
293 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to get features: %s\n",
294 				strerror(errno));
295 		return;
296 	}
297 
298 	VHOST_LOG_CONFIG(dev->ifname, INFO, "Negotiated Virtio features: 0x%" PRIx64 "\n",
299 		dev->features);
300 
301 	if (dev->features &
302 		((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
303 		 (1ULL << VIRTIO_F_VERSION_1) |
304 		 (1ULL << VIRTIO_F_RING_PACKED))) {
305 		dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
306 	} else {
307 		dev->vhost_hlen = sizeof(struct virtio_net_hdr);
308 	}
309 
310 	for (i = 0; i < dev->nr_vring; i++)
311 		vduse_vring_setup(dev, i);
312 
313 	dev->flags |= VIRTIO_DEV_READY;
314 
315 	if (dev->notify_ops->new_device(dev->vid) == 0)
316 		dev->flags |= VIRTIO_DEV_RUNNING;
317 
318 	for (i = 0; i < dev->nr_vring; i++) {
319 		struct vhost_virtqueue *vq = dev->virtqueue[i];
320 
321 		if (vq == dev->cvq)
322 			continue;
323 
324 		if (dev->notify_ops->vring_state_changed)
325 			dev->notify_ops->vring_state_changed(dev->vid, i, vq->enabled);
326 	}
327 }
328 
329 static void
330 vduse_device_stop(struct virtio_net *dev)
331 {
332 	unsigned int i;
333 
334 	VHOST_LOG_CONFIG(dev->ifname, INFO, "Stopping device...\n");
335 
336 	vhost_destroy_device_notify(dev);
337 
338 	dev->flags &= ~VIRTIO_DEV_READY;
339 
340 	for (i = 0; i < dev->nr_vring; i++)
341 		vduse_vring_cleanup(dev, i);
342 
343 	vhost_user_iotlb_flush_all(dev);
344 }
345 
346 static void
347 vduse_events_handler(int fd, void *arg, int *remove __rte_unused)
348 {
349 	struct virtio_net *dev = arg;
350 	struct vduse_dev_request req;
351 	struct vduse_dev_response resp;
352 	struct vhost_virtqueue *vq;
353 	uint8_t old_status = dev->status;
354 	int ret;
355 
356 	memset(&resp, 0, sizeof(resp));
357 
358 	ret = read(fd, &req, sizeof(req));
359 	if (ret < 0) {
360 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to read request: %s\n",
361 				strerror(errno));
362 		return;
363 	} else if (ret < (int)sizeof(req)) {
364 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Incomplete to read request %d\n", ret);
365 		return;
366 	}
367 
368 	VHOST_LOG_CONFIG(dev->ifname, INFO, "New request: %s (%u)\n",
369 			vduse_req_id_to_str(req.type), req.type);
370 
371 	switch (req.type) {
372 	case VDUSE_GET_VQ_STATE:
373 		vq = dev->virtqueue[req.vq_state.index];
374 		VHOST_LOG_CONFIG(dev->ifname, INFO, "\tvq index: %u, avail_index: %u\n",
375 				req.vq_state.index, vq->last_avail_idx);
376 		resp.vq_state.split.avail_index = vq->last_avail_idx;
377 		resp.result = VDUSE_REQ_RESULT_OK;
378 		break;
379 	case VDUSE_SET_STATUS:
380 		VHOST_LOG_CONFIG(dev->ifname, INFO, "\tnew status: 0x%08x\n",
381 				req.s.status);
382 		old_status = dev->status;
383 		dev->status = req.s.status;
384 		resp.result = VDUSE_REQ_RESULT_OK;
385 		break;
386 	case VDUSE_UPDATE_IOTLB:
387 		VHOST_LOG_CONFIG(dev->ifname, INFO, "\tIOVA range: %" PRIx64 " - %" PRIx64 "\n",
388 				(uint64_t)req.iova.start, (uint64_t)req.iova.last);
389 		vhost_user_iotlb_cache_remove(dev, req.iova.start,
390 				req.iova.last - req.iova.start + 1);
391 		resp.result = VDUSE_REQ_RESULT_OK;
392 		break;
393 	default:
394 		resp.result = VDUSE_REQ_RESULT_FAILED;
395 		break;
396 	}
397 
398 	resp.request_id = req.request_id;
399 
400 	ret = write(dev->vduse_dev_fd, &resp, sizeof(resp));
401 	if (ret != sizeof(resp)) {
402 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to write response %s\n",
403 				strerror(errno));
404 		return;
405 	}
406 
407 	if ((old_status ^ dev->status) & VIRTIO_DEVICE_STATUS_DRIVER_OK) {
408 		if (dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK)
409 			vduse_device_start(dev);
410 		else
411 			vduse_device_stop(dev);
412 	}
413 
414 	VHOST_LOG_CONFIG(dev->ifname, INFO, "Request %s (%u) handled successfully\n",
415 			vduse_req_id_to_str(req.type), req.type);
416 }
417 
418 int
419 vduse_device_create(const char *path, bool compliant_ol_flags)
420 {
421 	int control_fd, dev_fd, vid, ret;
422 	rte_thread_t fdset_tid;
423 	uint32_t i, max_queue_pairs, total_queues;
424 	struct virtio_net *dev;
425 	struct virtio_net_config vnet_config = {{ 0 }};
426 	uint64_t ver = VHOST_VDUSE_API_VERSION;
427 	uint64_t features;
428 	struct vduse_dev_config *dev_config = NULL;
429 	const char *name = path + strlen("/dev/vduse/");
430 
431 	/* If first device, create events dispatcher thread */
432 	if (vduse_events_thread == false) {
433 		/**
434 		 * create a pipe which will be waited by poll and notified to
435 		 * rebuild the wait list of poll.
436 		 */
437 		if (fdset_pipe_init(&vduse.fdset) < 0) {
438 			VHOST_LOG_CONFIG(path, ERR, "failed to create pipe for vduse fdset\n");
439 			return -1;
440 		}
441 
442 		ret = rte_thread_create_internal_control(&fdset_tid, "vduse-evt",
443 				fdset_event_dispatch, &vduse.fdset);
444 		if (ret != 0) {
445 			VHOST_LOG_CONFIG(path, ERR, "failed to create vduse fdset handling thread\n");
446 			fdset_pipe_uninit(&vduse.fdset);
447 			return -1;
448 		}
449 
450 		vduse_events_thread = true;
451 	}
452 
453 	control_fd = open(VDUSE_CTRL_PATH, O_RDWR);
454 	if (control_fd < 0) {
455 		VHOST_LOG_CONFIG(name, ERR, "Failed to open %s: %s\n",
456 				VDUSE_CTRL_PATH, strerror(errno));
457 		return -1;
458 	}
459 
460 	if (ioctl(control_fd, VDUSE_SET_API_VERSION, &ver)) {
461 		VHOST_LOG_CONFIG(name, ERR, "Failed to set API version: %" PRIu64 ": %s\n",
462 				ver, strerror(errno));
463 		ret = -1;
464 		goto out_ctrl_close;
465 	}
466 
467 	dev_config = malloc(offsetof(struct vduse_dev_config, config) +
468 			sizeof(vnet_config));
469 	if (!dev_config) {
470 		VHOST_LOG_CONFIG(name, ERR, "Failed to allocate VDUSE config\n");
471 		ret = -1;
472 		goto out_ctrl_close;
473 	}
474 
475 	ret = rte_vhost_driver_get_features(path, &features);
476 	if (ret < 0) {
477 		VHOST_LOG_CONFIG(name, ERR, "Failed to get backend features\n");
478 		goto out_free;
479 	}
480 
481 	ret = rte_vhost_driver_get_queue_num(path, &max_queue_pairs);
482 	if (ret < 0) {
483 		VHOST_LOG_CONFIG(name, ERR, "Failed to get max queue pairs\n");
484 		goto out_free;
485 	}
486 
487 	VHOST_LOG_CONFIG(path, INFO, "VDUSE max queue pairs: %u\n", max_queue_pairs);
488 	total_queues = max_queue_pairs * 2;
489 
490 	if (max_queue_pairs == 1)
491 		features &= ~(RTE_BIT64(VIRTIO_NET_F_CTRL_VQ) | RTE_BIT64(VIRTIO_NET_F_MQ));
492 	else
493 		total_queues += 1; /* Includes ctrl queue */
494 
495 	vnet_config.max_virtqueue_pairs = max_queue_pairs;
496 	memset(dev_config, 0, sizeof(struct vduse_dev_config));
497 
498 	strncpy(dev_config->name, name, VDUSE_NAME_MAX - 1);
499 	dev_config->device_id = VIRTIO_ID_NET;
500 	dev_config->vendor_id = 0;
501 	dev_config->features = features;
502 	dev_config->vq_num = total_queues;
503 	dev_config->vq_align = sysconf(_SC_PAGE_SIZE);
504 	dev_config->config_size = sizeof(struct virtio_net_config);
505 	memcpy(dev_config->config, &vnet_config, sizeof(vnet_config));
506 
507 	ret = ioctl(control_fd, VDUSE_CREATE_DEV, dev_config);
508 	if (ret < 0) {
509 		VHOST_LOG_CONFIG(name, ERR, "Failed to create VDUSE device: %s\n",
510 				strerror(errno));
511 		goto out_free;
512 	}
513 
514 	dev_fd = open(path, O_RDWR);
515 	if (dev_fd < 0) {
516 		VHOST_LOG_CONFIG(name, ERR, "Failed to open device %s: %s\n",
517 				path, strerror(errno));
518 		ret = -1;
519 		goto out_dev_close;
520 	}
521 
522 	ret = fcntl(dev_fd, F_SETFL, O_NONBLOCK);
523 	if (ret < 0) {
524 		VHOST_LOG_CONFIG(name, ERR, "Failed to set chardev as non-blocking: %s\n",
525 				strerror(errno));
526 		goto out_dev_close;
527 	}
528 
529 	vid = vhost_new_device(&vduse_backend_ops);
530 	if (vid < 0) {
531 		VHOST_LOG_CONFIG(name, ERR, "Failed to create new Vhost device\n");
532 		ret = -1;
533 		goto out_dev_close;
534 	}
535 
536 	dev = get_device(vid);
537 	if (!dev) {
538 		ret = -1;
539 		goto out_dev_close;
540 	}
541 
542 	strncpy(dev->ifname, path, IF_NAME_SZ - 1);
543 	dev->vduse_ctrl_fd = control_fd;
544 	dev->vduse_dev_fd = dev_fd;
545 	vhost_setup_virtio_net(dev->vid, true, compliant_ol_flags, true, true);
546 
547 	for (i = 0; i < total_queues; i++) {
548 		struct vduse_vq_config vq_cfg = { 0 };
549 
550 		ret = alloc_vring_queue(dev, i);
551 		if (ret) {
552 			VHOST_LOG_CONFIG(name, ERR, "Failed to alloc vring %d metadata\n", i);
553 			goto out_dev_destroy;
554 		}
555 
556 		vq_cfg.index = i;
557 		vq_cfg.max_size = 1024;
558 
559 		ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP, &vq_cfg);
560 		if (ret) {
561 			VHOST_LOG_CONFIG(name, ERR, "Failed to set-up VQ %d\n", i);
562 			goto out_dev_destroy;
563 		}
564 	}
565 
566 	dev->cvq = dev->virtqueue[max_queue_pairs * 2];
567 
568 	ret = fdset_add(&vduse.fdset, dev->vduse_dev_fd, vduse_events_handler, NULL, dev);
569 	if (ret) {
570 		VHOST_LOG_CONFIG(name, ERR, "Failed to add fd %d to vduse fdset\n",
571 				dev->vduse_dev_fd);
572 		goto out_dev_destroy;
573 	}
574 	fdset_pipe_notify(&vduse.fdset);
575 
576 	free(dev_config);
577 
578 	return 0;
579 
580 out_dev_destroy:
581 	vhost_destroy_device(vid);
582 out_dev_close:
583 	if (dev_fd >= 0)
584 		close(dev_fd);
585 	ioctl(control_fd, VDUSE_DESTROY_DEV, name);
586 out_free:
587 	free(dev_config);
588 out_ctrl_close:
589 	close(control_fd);
590 
591 	return ret;
592 }
593 
594 int
595 vduse_device_destroy(const char *path)
596 {
597 	const char *name = path + strlen("/dev/vduse/");
598 	struct virtio_net *dev;
599 	int vid, ret;
600 
601 	for (vid = 0; vid < RTE_MAX_VHOST_DEVICE; vid++) {
602 		dev = vhost_devices[vid];
603 
604 		if (dev == NULL)
605 			continue;
606 
607 		if (!strcmp(path, dev->ifname))
608 			break;
609 	}
610 
611 	if (vid == RTE_MAX_VHOST_DEVICE)
612 		return -1;
613 
614 	vduse_device_stop(dev);
615 
616 	fdset_del(&vduse.fdset, dev->vduse_dev_fd);
617 	fdset_pipe_notify(&vduse.fdset);
618 
619 	if (dev->vduse_dev_fd >= 0) {
620 		close(dev->vduse_dev_fd);
621 		dev->vduse_dev_fd = -1;
622 	}
623 
624 	if (dev->vduse_ctrl_fd >= 0) {
625 		ret = ioctl(dev->vduse_ctrl_fd, VDUSE_DESTROY_DEV, name);
626 		if (ret)
627 			VHOST_LOG_CONFIG(name, ERR, "Failed to destroy VDUSE device: %s\n",
628 					strerror(errno));
629 		close(dev->vduse_ctrl_fd);
630 		dev->vduse_ctrl_fd = -1;
631 	}
632 
633 	vhost_destroy_device(vid);
634 
635 	return 0;
636 }
637