xref: /dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c (revision bc8e32473cc3978d763a1387eaa8244bcf75e77d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <fcntl.h>
8 #include <string.h>
9 #include <errno.h>
10 #include <sys/mman.h>
11 #include <unistd.h>
12 #include <sys/eventfd.h>
13 #include <sys/types.h>
14 #include <sys/stat.h>
15 
16 #include <rte_string_fns.h>
17 #include <rte_eal_memconfig.h>
18 
19 #include "vhost.h"
20 #include "virtio_user_dev.h"
21 #include "../virtio_ethdev.h"
22 
23 #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
24 
25 const char * const virtio_user_backend_strings[] = {
26 	[VIRTIO_USER_BACKEND_UNKNOWN] = "VIRTIO_USER_BACKEND_UNKNOWN",
27 	[VIRTIO_USER_BACKEND_VHOST_USER] = "VHOST_USER",
28 	[VIRTIO_USER_BACKEND_VHOST_KERNEL] = "VHOST_NET",
29 	[VIRTIO_USER_BACKEND_VHOST_VDPA] = "VHOST_VDPA",
30 };
31 
32 static int
33 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
34 {
35 	/* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
36 	 * firstly because vhost depends on this msg to allocate virtqueue
37 	 * pair.
38 	 */
39 	struct vhost_vring_file file;
40 
41 	file.index = queue_sel;
42 	file.fd = dev->callfds[queue_sel];
43 	dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file);
44 
45 	return 0;
46 }
47 
48 static int
49 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
50 {
51 	struct vhost_vring_file file;
52 	struct vhost_vring_state state;
53 	struct vring *vring = &dev->vrings[queue_sel];
54 	struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
55 	struct vhost_vring_addr addr = {
56 		.index = queue_sel,
57 		.log_guest_addr = 0,
58 		.flags = 0, /* disable log */
59 	};
60 
61 	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
62 		addr.desc_user_addr =
63 			(uint64_t)(uintptr_t)pq_vring->desc;
64 		addr.avail_user_addr =
65 			(uint64_t)(uintptr_t)pq_vring->driver;
66 		addr.used_user_addr =
67 			(uint64_t)(uintptr_t)pq_vring->device;
68 	} else {
69 		addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
70 		addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
71 		addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
72 	}
73 
74 	state.index = queue_sel;
75 	state.num = vring->num;
76 	dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state);
77 
78 	state.index = queue_sel;
79 	state.num = 0; /* no reservation */
80 	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED))
81 		state.num |= (1 << 15);
82 	dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state);
83 
84 	dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr);
85 
86 	/* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
87 	 * lastly because vhost depends on this msg to judge if
88 	 * virtio is ready.
89 	 */
90 	file.index = queue_sel;
91 	file.fd = dev->kickfds[queue_sel];
92 	dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file);
93 
94 	return 0;
95 }
96 
97 static int
98 virtio_user_queue_setup(struct virtio_user_dev *dev,
99 			int (*fn)(struct virtio_user_dev *, uint32_t))
100 {
101 	uint32_t i, queue_sel;
102 
103 	for (i = 0; i < dev->max_queue_pairs; ++i) {
104 		queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX;
105 		if (fn(dev, queue_sel) < 0) {
106 			PMD_DRV_LOG(INFO, "setup rx vq fails: %u", i);
107 			return -1;
108 		}
109 	}
110 	for (i = 0; i < dev->max_queue_pairs; ++i) {
111 		queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX;
112 		if (fn(dev, queue_sel) < 0) {
113 			PMD_DRV_LOG(INFO, "setup tx vq fails: %u", i);
114 			return -1;
115 		}
116 	}
117 
118 	return 0;
119 }
120 
121 int
122 virtio_user_dev_set_features(struct virtio_user_dev *dev)
123 {
124 	uint64_t features;
125 	int ret = -1;
126 
127 	pthread_mutex_lock(&dev->mutex);
128 
129 	if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER &&
130 			dev->vhostfd < 0)
131 		goto error;
132 
133 	/* Step 0: tell vhost to create queues */
134 	if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
135 		goto error;
136 
137 	features = dev->features;
138 
139 	/* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
140 	features &= ~(1ull << VIRTIO_NET_F_MAC);
141 	/* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
142 	features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
143 	features &= ~(1ull << VIRTIO_NET_F_STATUS);
144 	ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features);
145 	if (ret < 0)
146 		goto error;
147 	PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
148 error:
149 	pthread_mutex_unlock(&dev->mutex);
150 
151 	return ret;
152 }
153 
154 int
155 virtio_user_start_device(struct virtio_user_dev *dev)
156 {
157 	int ret;
158 
159 	/*
160 	 * XXX workaround!
161 	 *
162 	 * We need to make sure that the locks will be
163 	 * taken in the correct order to avoid deadlocks.
164 	 *
165 	 * Before releasing this lock, this thread should
166 	 * not trigger any memory hotplug events.
167 	 *
168 	 * This is a temporary workaround, and should be
169 	 * replaced when we get proper supports from the
170 	 * memory subsystem in the future.
171 	 */
172 	rte_mcfg_mem_read_lock();
173 	pthread_mutex_lock(&dev->mutex);
174 
175 	if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER &&
176 			dev->vhostfd < 0)
177 		goto error;
178 
179 	/* Step 2: share memory regions */
180 	ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
181 	if (ret < 0)
182 		goto error;
183 
184 	/* Step 3: kick queues */
185 	if (virtio_user_queue_setup(dev, virtio_user_kick_queue) < 0)
186 		goto error;
187 
188 	/* Step 4: enable queues
189 	 * we enable the 1st queue pair by default.
190 	 */
191 	dev->ops->enable_qp(dev, 0, 1);
192 
193 	dev->started = true;
194 	pthread_mutex_unlock(&dev->mutex);
195 	rte_mcfg_mem_read_unlock();
196 
197 	return 0;
198 error:
199 	pthread_mutex_unlock(&dev->mutex);
200 	rte_mcfg_mem_read_unlock();
201 	/* TODO: free resource here or caller to check */
202 	return -1;
203 }
204 
205 int virtio_user_stop_device(struct virtio_user_dev *dev)
206 {
207 	struct vhost_vring_state state;
208 	uint32_t i;
209 	int error = 0;
210 
211 	pthread_mutex_lock(&dev->mutex);
212 	if (!dev->started)
213 		goto out;
214 
215 	for (i = 0; i < dev->max_queue_pairs; ++i)
216 		dev->ops->enable_qp(dev, i, 0);
217 
218 	/* Stop the backend. */
219 	for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
220 		state.index = i;
221 		if (dev->ops->send_request(dev, VHOST_USER_GET_VRING_BASE,
222 					   &state) < 0) {
223 			PMD_DRV_LOG(ERR, "get_vring_base failed, index=%u\n",
224 				    i);
225 			error = -1;
226 			goto out;
227 		}
228 	}
229 
230 	dev->started = false;
231 out:
232 	pthread_mutex_unlock(&dev->mutex);
233 
234 	return error;
235 }
236 
237 static inline void
238 parse_mac(struct virtio_user_dev *dev, const char *mac)
239 {
240 	struct rte_ether_addr tmp;
241 
242 	if (!mac)
243 		return;
244 
245 	if (rte_ether_unformat_addr(mac, &tmp) == 0) {
246 		memcpy(dev->mac_addr, &tmp, RTE_ETHER_ADDR_LEN);
247 		dev->mac_specified = 1;
248 	} else {
249 		/* ignore the wrong mac, use random mac */
250 		PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac);
251 	}
252 }
253 
254 static int
255 virtio_user_dev_init_notify(struct virtio_user_dev *dev)
256 {
257 	uint32_t i, j;
258 	int callfd;
259 	int kickfd;
260 
261 	for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) {
262 		if (i >= dev->max_queue_pairs * 2) {
263 			dev->kickfds[i] = -1;
264 			dev->callfds[i] = -1;
265 			continue;
266 		}
267 
268 		/* May use invalid flag, but some backend uses kickfd and
269 		 * callfd as criteria to judge if dev is alive. so finally we
270 		 * use real event_fd.
271 		 */
272 		callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
273 		if (callfd < 0) {
274 			PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno));
275 			break;
276 		}
277 		kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
278 		if (kickfd < 0) {
279 			PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno));
280 			break;
281 		}
282 		dev->callfds[i] = callfd;
283 		dev->kickfds[i] = kickfd;
284 	}
285 
286 	if (i < VIRTIO_MAX_VIRTQUEUES) {
287 		for (j = 0; j <= i; ++j) {
288 			close(dev->callfds[j]);
289 			close(dev->kickfds[j]);
290 		}
291 
292 		return -1;
293 	}
294 
295 	return 0;
296 }
297 
298 static int
299 virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
300 {
301 	uint32_t i;
302 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
303 
304 	if (!eth_dev->intr_handle) {
305 		eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle));
306 		if (!eth_dev->intr_handle) {
307 			PMD_DRV_LOG(ERR, "fail to allocate intr_handle");
308 			return -1;
309 		}
310 		memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle));
311 	}
312 
313 	for (i = 0; i < dev->max_queue_pairs; ++i)
314 		eth_dev->intr_handle->efds[i] = dev->callfds[i];
315 	eth_dev->intr_handle->nb_efd = dev->max_queue_pairs;
316 	eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1;
317 	eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
318 	/* For virtio vdev, no need to read counter for clean */
319 	eth_dev->intr_handle->efd_counter_size = 0;
320 	eth_dev->intr_handle->fd = -1;
321 	if (dev->vhostfd >= 0)
322 		eth_dev->intr_handle->fd = dev->vhostfd;
323 	else if (dev->is_server)
324 		eth_dev->intr_handle->fd = dev->listenfd;
325 
326 	return 0;
327 }
328 
329 static void
330 virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
331 			 const void *addr,
332 			 size_t len __rte_unused,
333 			 void *arg)
334 {
335 	struct virtio_user_dev *dev = arg;
336 	struct rte_memseg_list *msl;
337 	uint16_t i;
338 
339 	/* ignore externally allocated memory */
340 	msl = rte_mem_virt2memseg_list(addr);
341 	if (msl->external)
342 		return;
343 
344 	pthread_mutex_lock(&dev->mutex);
345 
346 	if (dev->started == false)
347 		goto exit;
348 
349 	/* Step 1: pause the active queues */
350 	for (i = 0; i < dev->queue_pairs; i++)
351 		dev->ops->enable_qp(dev, i, 0);
352 
353 	/* Step 2: update memory regions */
354 	dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
355 
356 	/* Step 3: resume the active queues */
357 	for (i = 0; i < dev->queue_pairs; i++)
358 		dev->ops->enable_qp(dev, i, 1);
359 
360 exit:
361 	pthread_mutex_unlock(&dev->mutex);
362 }
363 
364 static int
365 virtio_user_dev_setup(struct virtio_user_dev *dev)
366 {
367 	uint32_t q;
368 
369 	dev->vhostfd = -1;
370 	dev->vhostfds = NULL;
371 	dev->tapfds = NULL;
372 
373 	if (dev->is_server) {
374 		if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) {
375 			PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!");
376 			return -1;
377 		}
378 		dev->ops = &virtio_ops_user;
379 	} else {
380 		if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) {
381 			dev->ops = &virtio_ops_user;
382 		} else if (dev->backend_type ==
383 					VIRTIO_USER_BACKEND_VHOST_KERNEL) {
384 			dev->ops = &virtio_ops_kernel;
385 
386 			dev->vhostfds = malloc(dev->max_queue_pairs *
387 					       sizeof(int));
388 			dev->tapfds = malloc(dev->max_queue_pairs *
389 					     sizeof(int));
390 			if (!dev->vhostfds || !dev->tapfds) {
391 				PMD_INIT_LOG(ERR, "Failed to malloc");
392 				return -1;
393 			}
394 
395 			for (q = 0; q < dev->max_queue_pairs; ++q) {
396 				dev->vhostfds[q] = -1;
397 				dev->tapfds[q] = -1;
398 			}
399 		} else if (dev->backend_type ==
400 				VIRTIO_USER_BACKEND_VHOST_VDPA) {
401 			dev->ops = &virtio_ops_vdpa;
402 		} else {
403 			PMD_DRV_LOG(ERR, "Unknown backend type");
404 			return -1;
405 		}
406 	}
407 
408 	if (dev->ops->setup(dev) < 0)
409 		return -1;
410 
411 	if (virtio_user_dev_init_notify(dev) < 0)
412 		return -1;
413 
414 	if (virtio_user_fill_intr_handle(dev) < 0)
415 		return -1;
416 
417 	return 0;
418 }
419 
420 /* Use below macro to filter features from vhost backend */
421 #define VIRTIO_USER_SUPPORTED_FEATURES			\
422 	(1ULL << VIRTIO_NET_F_MAC		|	\
423 	 1ULL << VIRTIO_NET_F_STATUS		|	\
424 	 1ULL << VIRTIO_NET_F_MQ		|	\
425 	 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR	|	\
426 	 1ULL << VIRTIO_NET_F_CTRL_VQ		|	\
427 	 1ULL << VIRTIO_NET_F_CTRL_RX		|	\
428 	 1ULL << VIRTIO_NET_F_CTRL_VLAN		|	\
429 	 1ULL << VIRTIO_NET_F_CSUM		|	\
430 	 1ULL << VIRTIO_NET_F_HOST_TSO4		|	\
431 	 1ULL << VIRTIO_NET_F_HOST_TSO6		|	\
432 	 1ULL << VIRTIO_NET_F_MRG_RXBUF		|	\
433 	 1ULL << VIRTIO_RING_F_INDIRECT_DESC	|	\
434 	 1ULL << VIRTIO_NET_F_GUEST_CSUM	|	\
435 	 1ULL << VIRTIO_NET_F_GUEST_TSO4	|	\
436 	 1ULL << VIRTIO_NET_F_GUEST_TSO6	|	\
437 	 1ULL << VIRTIO_F_IN_ORDER		|	\
438 	 1ULL << VIRTIO_F_VERSION_1		|	\
439 	 1ULL << VIRTIO_F_RING_PACKED		|	\
440 	 1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
441 
442 #define VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES		\
443 	(1ULL << VHOST_USER_PROTOCOL_F_MQ |		\
444 	 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK |	\
445 	 1ULL << VHOST_USER_PROTOCOL_F_STATUS)
446 
447 int
448 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
449 		     int cq, int queue_size, const char *mac, char **ifname,
450 		     int server, int mrg_rxbuf, int in_order, int packed_vq,
451 		     enum virtio_user_backend_type backend_type)
452 {
453 	uint64_t protocol_features = 0;
454 
455 	pthread_mutex_init(&dev->mutex, NULL);
456 	strlcpy(dev->path, path, PATH_MAX);
457 	dev->started = 0;
458 	dev->max_queue_pairs = queues;
459 	dev->queue_pairs = 1; /* mq disabled by default */
460 	dev->queue_size = queue_size;
461 	dev->is_server = server;
462 	dev->mac_specified = 0;
463 	dev->frontend_features = 0;
464 	dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES;
465 	dev->protocol_features = VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES;
466 	dev->backend_type = backend_type;
467 
468 	parse_mac(dev, mac);
469 
470 	if (*ifname) {
471 		dev->ifname = *ifname;
472 		*ifname = NULL;
473 	}
474 
475 	if (virtio_user_dev_setup(dev) < 0) {
476 		PMD_INIT_LOG(ERR, "backend set up fails");
477 		return -1;
478 	}
479 
480 	if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER)
481 		dev->unsupported_features |=
482 			(1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
483 
484 	if (!dev->is_server) {
485 		if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER,
486 					   NULL) < 0) {
487 			PMD_INIT_LOG(ERR, "set_owner fails: %s",
488 				     strerror(errno));
489 			return -1;
490 		}
491 
492 		if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
493 					   &dev->device_features) < 0) {
494 			PMD_INIT_LOG(ERR, "get_features failed: %s",
495 				     strerror(errno));
496 			return -1;
497 		}
498 
499 
500 		if (dev->device_features &
501 				(1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) {
502 			if (dev->ops->send_request(dev,
503 					VHOST_USER_GET_PROTOCOL_FEATURES,
504 					&protocol_features))
505 				return -1;
506 
507 			dev->protocol_features &= protocol_features;
508 
509 			if (dev->ops->send_request(dev,
510 					VHOST_USER_SET_PROTOCOL_FEATURES,
511 					&dev->protocol_features))
512 				return -1;
513 
514 			if (!(dev->protocol_features &
515 					(1ULL << VHOST_USER_PROTOCOL_F_MQ)))
516 				dev->unsupported_features |=
517 					(1ull << VIRTIO_NET_F_MQ);
518 		}
519 	} else {
520 		/* We just pretend vhost-user can support all these features.
521 		 * Note that this could be problematic that if some feature is
522 		 * negotiated but not supported by the vhost-user which comes
523 		 * later.
524 		 */
525 		dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES;
526 
527 		/* We cannot assume VHOST_USER_PROTOCOL_F_STATUS is supported
528 		 * until it's negotiated
529 		 */
530 		dev->protocol_features &=
531 			~(1ULL << VHOST_USER_PROTOCOL_F_STATUS);
532 	}
533 
534 
535 
536 	if (!mrg_rxbuf)
537 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
538 
539 	if (!in_order)
540 		dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
541 
542 	if (!packed_vq)
543 		dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED);
544 
545 	if (dev->mac_specified)
546 		dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
547 	else
548 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
549 
550 	if (cq) {
551 		/* device does not really need to know anything about CQ,
552 		 * so if necessary, we just claim to support CQ
553 		 */
554 		dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
555 	} else {
556 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
557 		/* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */
558 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX);
559 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN);
560 		dev->unsupported_features |=
561 			(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
562 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
563 		dev->unsupported_features |=
564 			(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
565 	}
566 
567 	/* The backend will not report this feature, we add it explicitly */
568 	if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER)
569 		dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS);
570 
571 	/*
572 	 * Device features =
573 	 *     (frontend_features | backend_features) & ~unsupported_features;
574 	 */
575 	dev->device_features |= dev->frontend_features;
576 	dev->device_features &= ~dev->unsupported_features;
577 
578 	if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
579 				virtio_user_mem_event_cb, dev)) {
580 		if (rte_errno != ENOTSUP) {
581 			PMD_INIT_LOG(ERR, "Failed to register mem event"
582 					" callback\n");
583 			return -1;
584 		}
585 	}
586 
587 	return 0;
588 }
589 
590 void
591 virtio_user_dev_uninit(struct virtio_user_dev *dev)
592 {
593 	uint32_t i;
594 
595 	virtio_user_stop_device(dev);
596 
597 	rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
598 
599 	for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
600 		close(dev->callfds[i]);
601 		close(dev->kickfds[i]);
602 	}
603 
604 	if (dev->vhostfd >= 0)
605 		close(dev->vhostfd);
606 
607 	if (dev->is_server && dev->listenfd >= 0) {
608 		close(dev->listenfd);
609 		dev->listenfd = -1;
610 	}
611 
612 	if (dev->vhostfds) {
613 		for (i = 0; i < dev->max_queue_pairs; ++i) {
614 			close(dev->vhostfds[i]);
615 			if (dev->tapfds[i] >= 0)
616 				close(dev->tapfds[i]);
617 		}
618 		free(dev->vhostfds);
619 		free(dev->tapfds);
620 	}
621 
622 	free(dev->ifname);
623 
624 	if (dev->is_server)
625 		unlink(dev->path);
626 }
627 
628 uint8_t
629 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
630 {
631 	uint16_t i;
632 	uint8_t ret = 0;
633 
634 	if (q_pairs > dev->max_queue_pairs) {
635 		PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported",
636 			     q_pairs, dev->max_queue_pairs);
637 		return -1;
638 	}
639 
640 	/* Server mode can't enable queue pairs if vhostfd is invalid,
641 	 * always return 0 in this case.
642 	 */
643 	if (!dev->is_server || dev->vhostfd >= 0) {
644 		for (i = 0; i < q_pairs; ++i)
645 			ret |= dev->ops->enable_qp(dev, i, 1);
646 		for (i = q_pairs; i < dev->max_queue_pairs; ++i)
647 			ret |= dev->ops->enable_qp(dev, i, 0);
648 	}
649 	dev->queue_pairs = q_pairs;
650 
651 	return ret;
652 }
653 
654 static uint32_t
655 virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
656 			    uint16_t idx_hdr)
657 {
658 	struct virtio_net_ctrl_hdr *hdr;
659 	virtio_net_ctrl_ack status = ~0;
660 	uint16_t i, idx_data, idx_status;
661 	uint32_t n_descs = 0;
662 
663 	/* locate desc for header, data, and status */
664 	idx_data = vring->desc[idx_hdr].next;
665 	n_descs++;
666 
667 	i = idx_data;
668 	while (vring->desc[i].flags == VRING_DESC_F_NEXT) {
669 		i = vring->desc[i].next;
670 		n_descs++;
671 	}
672 
673 	/* locate desc for status */
674 	idx_status = i;
675 	n_descs++;
676 
677 	hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
678 	if (hdr->class == VIRTIO_NET_CTRL_MQ &&
679 	    hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
680 		uint16_t queues;
681 
682 		queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
683 		status = virtio_user_handle_mq(dev, queues);
684 	} else if (hdr->class == VIRTIO_NET_CTRL_RX  ||
685 		   hdr->class == VIRTIO_NET_CTRL_MAC ||
686 		   hdr->class == VIRTIO_NET_CTRL_VLAN) {
687 		status = 0;
688 	}
689 
690 	/* Update status */
691 	*(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
692 
693 	return n_descs;
694 }
695 
696 static inline int
697 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
698 {
699 	uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
700 
701 	return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
702 		wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
703 }
704 
705 static uint32_t
706 virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
707 				   struct vring_packed *vring,
708 				   uint16_t idx_hdr)
709 {
710 	struct virtio_net_ctrl_hdr *hdr;
711 	virtio_net_ctrl_ack status = ~0;
712 	uint16_t idx_data, idx_status;
713 	/* initialize to one, header is first */
714 	uint32_t n_descs = 1;
715 
716 	/* locate desc for header, data, and status */
717 	idx_data = idx_hdr + 1;
718 	if (idx_data >= dev->queue_size)
719 		idx_data -= dev->queue_size;
720 
721 	n_descs++;
722 
723 	idx_status = idx_data;
724 	while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) {
725 		idx_status++;
726 		if (idx_status >= dev->queue_size)
727 			idx_status -= dev->queue_size;
728 		n_descs++;
729 	}
730 
731 	hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
732 	if (hdr->class == VIRTIO_NET_CTRL_MQ &&
733 	    hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
734 		uint16_t queues;
735 
736 		queues = *(uint16_t *)(uintptr_t)
737 				vring->desc[idx_data].addr;
738 		status = virtio_user_handle_mq(dev, queues);
739 	} else if (hdr->class == VIRTIO_NET_CTRL_RX  ||
740 		   hdr->class == VIRTIO_NET_CTRL_MAC ||
741 		   hdr->class == VIRTIO_NET_CTRL_VLAN) {
742 		status = 0;
743 	}
744 
745 	/* Update status */
746 	*(virtio_net_ctrl_ack *)(uintptr_t)
747 		vring->desc[idx_status].addr = status;
748 
749 	/* Update used descriptor */
750 	vring->desc[idx_hdr].id = vring->desc[idx_status].id;
751 	vring->desc[idx_hdr].len = sizeof(status);
752 
753 	return n_descs;
754 }
755 
756 void
757 virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
758 {
759 	struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
760 	struct vring_packed *vring = &dev->packed_vrings[queue_idx];
761 	uint16_t n_descs, flags;
762 
763 	/* Perform a load-acquire barrier in desc_is_avail to
764 	 * enforce the ordering between desc flags and desc
765 	 * content.
766 	 */
767 	while (desc_is_avail(&vring->desc[vq->used_idx],
768 			     vq->used_wrap_counter)) {
769 
770 		n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring,
771 				vq->used_idx);
772 
773 		flags = VRING_DESC_F_WRITE;
774 		if (vq->used_wrap_counter)
775 			flags |= VRING_PACKED_DESC_F_AVAIL_USED;
776 
777 		__atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
778 				 __ATOMIC_RELEASE);
779 
780 		vq->used_idx += n_descs;
781 		if (vq->used_idx >= dev->queue_size) {
782 			vq->used_idx -= dev->queue_size;
783 			vq->used_wrap_counter ^= 1;
784 		}
785 	}
786 }
787 
788 void
789 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
790 {
791 	uint16_t avail_idx, desc_idx;
792 	struct vring_used_elem *uep;
793 	uint32_t n_descs;
794 	struct vring *vring = &dev->vrings[queue_idx];
795 
796 	/* Consume avail ring, using used ring idx as first one */
797 	while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
798 	       != vring->avail->idx) {
799 		avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
800 			    & (vring->num - 1);
801 		desc_idx = vring->avail->ring[avail_idx];
802 
803 		n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx);
804 
805 		/* Update used ring */
806 		uep = &vring->used->ring[avail_idx];
807 		uep->id = desc_idx;
808 		uep->len = n_descs;
809 
810 		__atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED);
811 	}
812 }
813 
814 int
815 virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status)
816 {
817 	int ret;
818 	uint64_t arg = status;
819 
820 	pthread_mutex_lock(&dev->mutex);
821 	dev->status = status;
822 	if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER)
823 		ret = dev->ops->send_request(dev,
824 				VHOST_USER_SET_STATUS, &arg);
825 	else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA)
826 		ret = dev->ops->send_request(dev,
827 				VHOST_USER_SET_STATUS, &status);
828 	else
829 		ret = -ENOTSUP;
830 
831 	if (ret && ret != -ENOTSUP) {
832 		PMD_INIT_LOG(ERR, "VHOST_USER_SET_STATUS failed (%d): %s", ret,
833 			     strerror(errno));
834 	}
835 
836 	pthread_mutex_unlock(&dev->mutex);
837 	return ret;
838 }
839 
840 int
841 virtio_user_dev_update_status(struct virtio_user_dev *dev)
842 {
843 	uint64_t ret;
844 	uint8_t status;
845 	int err;
846 
847 	pthread_mutex_lock(&dev->mutex);
848 	if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) {
849 		err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS, &ret);
850 		if (!err && ret > UINT8_MAX) {
851 			PMD_INIT_LOG(ERR, "Invalid VHOST_USER_GET_STATUS "
852 					"response 0x%" PRIx64 "\n", ret);
853 			err = -1;
854 			goto error;
855 		}
856 
857 		status = ret;
858 	} else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA) {
859 		err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS,
860 				&status);
861 	} else {
862 		err = -ENOTSUP;
863 	}
864 
865 	if (!err) {
866 		dev->status = status;
867 		PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n"
868 			"\t-RESET: %u\n"
869 			"\t-ACKNOWLEDGE: %u\n"
870 			"\t-DRIVER: %u\n"
871 			"\t-DRIVER_OK: %u\n"
872 			"\t-FEATURES_OK: %u\n"
873 			"\t-DEVICE_NEED_RESET: %u\n"
874 			"\t-FAILED: %u\n",
875 			dev->status,
876 			(dev->status == VIRTIO_CONFIG_STATUS_RESET),
877 			!!(dev->status & VIRTIO_CONFIG_STATUS_ACK),
878 			!!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER),
879 			!!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK),
880 			!!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK),
881 			!!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET),
882 			!!(dev->status & VIRTIO_CONFIG_STATUS_FAILED));
883 	} else if (err != -ENOTSUP) {
884 		PMD_INIT_LOG(ERR, "VHOST_USER_GET_STATUS failed (%d): %s", err,
885 			     strerror(errno));
886 	}
887 
888 error:
889 	pthread_mutex_unlock(&dev->mutex);
890 	return err;
891 }
892