xref: /dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c (revision 1f93bee4e77cb634235a8346ae01297a80a32c4e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <fcntl.h>
8 #include <string.h>
9 #include <errno.h>
10 #include <sys/mman.h>
11 #include <unistd.h>
12 #include <sys/eventfd.h>
13 #include <sys/types.h>
14 #include <sys/stat.h>
15 
16 #include <rte_string_fns.h>
17 #include <rte_eal_memconfig.h>
18 
19 #include "vhost.h"
20 #include "virtio_user_dev.h"
21 #include "../virtio_ethdev.h"
22 
23 #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
24 
25 const char * const virtio_user_backend_strings[] = {
26 	[VIRTIO_USER_BACKEND_UNKNOWN] = "VIRTIO_USER_BACKEND_UNKNOWN",
27 	[VIRTIO_USER_BACKEND_VHOST_USER] = "VHOST_USER",
28 	[VIRTIO_USER_BACKEND_VHOST_KERNEL] = "VHOST_NET",
29 	[VIRTIO_USER_BACKEND_VHOST_VDPA] = "VHOST_VDPA",
30 };
31 
32 static int
33 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
34 {
35 	/* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
36 	 * firstly because vhost depends on this msg to allocate virtqueue
37 	 * pair.
38 	 */
39 	struct vhost_vring_file file;
40 
41 	file.index = queue_sel;
42 	file.fd = dev->callfds[queue_sel];
43 	dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file);
44 
45 	return 0;
46 }
47 
48 static int
49 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
50 {
51 	struct vhost_vring_file file;
52 	struct vhost_vring_state state;
53 	struct vring *vring = &dev->vrings[queue_sel];
54 	struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
55 	struct vhost_vring_addr addr = {
56 		.index = queue_sel,
57 		.log_guest_addr = 0,
58 		.flags = 0, /* disable log */
59 	};
60 
61 	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
62 		addr.desc_user_addr =
63 			(uint64_t)(uintptr_t)pq_vring->desc;
64 		addr.avail_user_addr =
65 			(uint64_t)(uintptr_t)pq_vring->driver;
66 		addr.used_user_addr =
67 			(uint64_t)(uintptr_t)pq_vring->device;
68 	} else {
69 		addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
70 		addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
71 		addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
72 	}
73 
74 	state.index = queue_sel;
75 	state.num = vring->num;
76 	dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state);
77 
78 	state.index = queue_sel;
79 	state.num = 0; /* no reservation */
80 	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED))
81 		state.num |= (1 << 15);
82 	dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state);
83 
84 	dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr);
85 
86 	/* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
87 	 * lastly because vhost depends on this msg to judge if
88 	 * virtio is ready.
89 	 */
90 	file.index = queue_sel;
91 	file.fd = dev->kickfds[queue_sel];
92 	dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file);
93 
94 	return 0;
95 }
96 
97 static int
98 virtio_user_queue_setup(struct virtio_user_dev *dev,
99 			int (*fn)(struct virtio_user_dev *, uint32_t))
100 {
101 	uint32_t i, queue_sel;
102 
103 	for (i = 0; i < dev->max_queue_pairs; ++i) {
104 		queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX;
105 		if (fn(dev, queue_sel) < 0) {
106 			PMD_DRV_LOG(INFO, "setup rx vq fails: %u", i);
107 			return -1;
108 		}
109 	}
110 	for (i = 0; i < dev->max_queue_pairs; ++i) {
111 		queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX;
112 		if (fn(dev, queue_sel) < 0) {
113 			PMD_DRV_LOG(INFO, "setup tx vq fails: %u", i);
114 			return -1;
115 		}
116 	}
117 
118 	return 0;
119 }
120 
121 int
122 virtio_user_dev_set_features(struct virtio_user_dev *dev)
123 {
124 	uint64_t features;
125 	int ret = -1;
126 
127 	pthread_mutex_lock(&dev->mutex);
128 
129 	if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER &&
130 			dev->vhostfd < 0)
131 		goto error;
132 
133 	/* Step 0: tell vhost to create queues */
134 	if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
135 		goto error;
136 
137 	features = dev->features;
138 
139 	/* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
140 	features &= ~(1ull << VIRTIO_NET_F_MAC);
141 	/* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
142 	features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
143 	features &= ~(1ull << VIRTIO_NET_F_STATUS);
144 	ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features);
145 	if (ret < 0)
146 		goto error;
147 	PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
148 error:
149 	pthread_mutex_unlock(&dev->mutex);
150 
151 	return ret;
152 }
153 
154 int
155 virtio_user_start_device(struct virtio_user_dev *dev)
156 {
157 	int ret;
158 
159 	/*
160 	 * XXX workaround!
161 	 *
162 	 * We need to make sure that the locks will be
163 	 * taken in the correct order to avoid deadlocks.
164 	 *
165 	 * Before releasing this lock, this thread should
166 	 * not trigger any memory hotplug events.
167 	 *
168 	 * This is a temporary workaround, and should be
169 	 * replaced when we get proper supports from the
170 	 * memory subsystem in the future.
171 	 */
172 	rte_mcfg_mem_read_lock();
173 	pthread_mutex_lock(&dev->mutex);
174 
175 	if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER &&
176 			dev->vhostfd < 0)
177 		goto error;
178 
179 	/* Step 2: share memory regions */
180 	ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
181 	if (ret < 0)
182 		goto error;
183 
184 	/* Step 3: kick queues */
185 	if (virtio_user_queue_setup(dev, virtio_user_kick_queue) < 0)
186 		goto error;
187 
188 	/* Step 4: enable queues
189 	 * we enable the 1st queue pair by default.
190 	 */
191 	dev->ops->enable_qp(dev, 0, 1);
192 
193 	dev->started = true;
194 	pthread_mutex_unlock(&dev->mutex);
195 	rte_mcfg_mem_read_unlock();
196 
197 	return 0;
198 error:
199 	pthread_mutex_unlock(&dev->mutex);
200 	rte_mcfg_mem_read_unlock();
201 	/* TODO: free resource here or caller to check */
202 	return -1;
203 }
204 
205 int virtio_user_stop_device(struct virtio_user_dev *dev)
206 {
207 	struct vhost_vring_state state;
208 	uint32_t i;
209 	int error = 0;
210 
211 	pthread_mutex_lock(&dev->mutex);
212 	if (!dev->started)
213 		goto out;
214 
215 	for (i = 0; i < dev->max_queue_pairs; ++i)
216 		dev->ops->enable_qp(dev, i, 0);
217 
218 	/* Stop the backend. */
219 	for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
220 		state.index = i;
221 		if (dev->ops->send_request(dev, VHOST_USER_GET_VRING_BASE,
222 					   &state) < 0) {
223 			PMD_DRV_LOG(ERR, "get_vring_base failed, index=%u\n",
224 				    i);
225 			error = -1;
226 			goto out;
227 		}
228 	}
229 
230 	dev->started = false;
231 out:
232 	pthread_mutex_unlock(&dev->mutex);
233 
234 	return error;
235 }
236 
237 static inline void
238 parse_mac(struct virtio_user_dev *dev, const char *mac)
239 {
240 	struct rte_ether_addr tmp;
241 
242 	if (!mac)
243 		return;
244 
245 	if (rte_ether_unformat_addr(mac, &tmp) == 0) {
246 		memcpy(dev->mac_addr, &tmp, RTE_ETHER_ADDR_LEN);
247 		dev->mac_specified = 1;
248 	} else {
249 		/* ignore the wrong mac, use random mac */
250 		PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac);
251 	}
252 }
253 
254 static int
255 virtio_user_dev_init_notify(struct virtio_user_dev *dev)
256 {
257 	uint32_t i, j;
258 	int callfd;
259 	int kickfd;
260 
261 	for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) {
262 		if (i >= dev->max_queue_pairs * 2) {
263 			dev->kickfds[i] = -1;
264 			dev->callfds[i] = -1;
265 			continue;
266 		}
267 
268 		/* May use invalid flag, but some backend uses kickfd and
269 		 * callfd as criteria to judge if dev is alive. so finally we
270 		 * use real event_fd.
271 		 */
272 		callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
273 		if (callfd < 0) {
274 			PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno));
275 			break;
276 		}
277 		kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
278 		if (kickfd < 0) {
279 			close(callfd);
280 			PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno));
281 			break;
282 		}
283 		dev->callfds[i] = callfd;
284 		dev->kickfds[i] = kickfd;
285 	}
286 
287 	if (i < VIRTIO_MAX_VIRTQUEUES) {
288 		for (j = 0; j < i; ++j) {
289 			close(dev->callfds[j]);
290 			close(dev->kickfds[j]);
291 		}
292 
293 		return -1;
294 	}
295 
296 	return 0;
297 }
298 
299 static int
300 virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
301 {
302 	uint32_t i;
303 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
304 
305 	if (!eth_dev->intr_handle) {
306 		eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle));
307 		if (!eth_dev->intr_handle) {
308 			PMD_DRV_LOG(ERR, "fail to allocate intr_handle");
309 			return -1;
310 		}
311 		memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle));
312 	}
313 
314 	for (i = 0; i < dev->max_queue_pairs; ++i)
315 		eth_dev->intr_handle->efds[i] = dev->callfds[i];
316 	eth_dev->intr_handle->nb_efd = dev->max_queue_pairs;
317 	eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1;
318 	eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
319 	/* For virtio vdev, no need to read counter for clean */
320 	eth_dev->intr_handle->efd_counter_size = 0;
321 	eth_dev->intr_handle->fd = -1;
322 	if (dev->vhostfd >= 0)
323 		eth_dev->intr_handle->fd = dev->vhostfd;
324 	else if (dev->is_server)
325 		eth_dev->intr_handle->fd = dev->listenfd;
326 
327 	return 0;
328 }
329 
330 static void
331 virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
332 			 const void *addr,
333 			 size_t len __rte_unused,
334 			 void *arg)
335 {
336 	struct virtio_user_dev *dev = arg;
337 	struct rte_memseg_list *msl;
338 	uint16_t i;
339 
340 	/* ignore externally allocated memory */
341 	msl = rte_mem_virt2memseg_list(addr);
342 	if (msl->external)
343 		return;
344 
345 	pthread_mutex_lock(&dev->mutex);
346 
347 	if (dev->started == false)
348 		goto exit;
349 
350 	/* Step 1: pause the active queues */
351 	for (i = 0; i < dev->queue_pairs; i++)
352 		dev->ops->enable_qp(dev, i, 0);
353 
354 	/* Step 2: update memory regions */
355 	dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
356 
357 	/* Step 3: resume the active queues */
358 	for (i = 0; i < dev->queue_pairs; i++)
359 		dev->ops->enable_qp(dev, i, 1);
360 
361 exit:
362 	pthread_mutex_unlock(&dev->mutex);
363 }
364 
365 static int
366 virtio_user_dev_setup(struct virtio_user_dev *dev)
367 {
368 	uint32_t q;
369 
370 	dev->vhostfd = -1;
371 	dev->vhostfds = NULL;
372 	dev->tapfds = NULL;
373 
374 	if (dev->is_server) {
375 		if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) {
376 			PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!");
377 			return -1;
378 		}
379 		dev->ops = &virtio_ops_user;
380 	} else {
381 		if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) {
382 			dev->ops = &virtio_ops_user;
383 		} else if (dev->backend_type ==
384 					VIRTIO_USER_BACKEND_VHOST_KERNEL) {
385 			dev->ops = &virtio_ops_kernel;
386 
387 			dev->vhostfds = malloc(dev->max_queue_pairs *
388 					       sizeof(int));
389 			dev->tapfds = malloc(dev->max_queue_pairs *
390 					     sizeof(int));
391 			if (!dev->vhostfds || !dev->tapfds) {
392 				PMD_INIT_LOG(ERR, "Failed to malloc");
393 				return -1;
394 			}
395 
396 			for (q = 0; q < dev->max_queue_pairs; ++q) {
397 				dev->vhostfds[q] = -1;
398 				dev->tapfds[q] = -1;
399 			}
400 		} else if (dev->backend_type ==
401 				VIRTIO_USER_BACKEND_VHOST_VDPA) {
402 			dev->ops = &virtio_ops_vdpa;
403 		} else {
404 			PMD_DRV_LOG(ERR, "Unknown backend type");
405 			return -1;
406 		}
407 	}
408 
409 	if (dev->ops->setup(dev) < 0)
410 		return -1;
411 
412 	if (virtio_user_dev_init_notify(dev) < 0)
413 		return -1;
414 
415 	if (virtio_user_fill_intr_handle(dev) < 0)
416 		return -1;
417 
418 	return 0;
419 }
420 
421 /* Use below macro to filter features from vhost backend */
422 #define VIRTIO_USER_SUPPORTED_FEATURES			\
423 	(1ULL << VIRTIO_NET_F_MAC		|	\
424 	 1ULL << VIRTIO_NET_F_STATUS		|	\
425 	 1ULL << VIRTIO_NET_F_MQ		|	\
426 	 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR	|	\
427 	 1ULL << VIRTIO_NET_F_CTRL_VQ		|	\
428 	 1ULL << VIRTIO_NET_F_CTRL_RX		|	\
429 	 1ULL << VIRTIO_NET_F_CTRL_VLAN		|	\
430 	 1ULL << VIRTIO_NET_F_CSUM		|	\
431 	 1ULL << VIRTIO_NET_F_HOST_TSO4		|	\
432 	 1ULL << VIRTIO_NET_F_HOST_TSO6		|	\
433 	 1ULL << VIRTIO_NET_F_MRG_RXBUF		|	\
434 	 1ULL << VIRTIO_RING_F_INDIRECT_DESC	|	\
435 	 1ULL << VIRTIO_NET_F_GUEST_CSUM	|	\
436 	 1ULL << VIRTIO_NET_F_GUEST_TSO4	|	\
437 	 1ULL << VIRTIO_NET_F_GUEST_TSO6	|	\
438 	 1ULL << VIRTIO_F_IN_ORDER		|	\
439 	 1ULL << VIRTIO_F_VERSION_1		|	\
440 	 1ULL << VIRTIO_F_RING_PACKED		|	\
441 	 1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
442 
443 #define VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES		\
444 	(1ULL << VHOST_USER_PROTOCOL_F_MQ |		\
445 	 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK |	\
446 	 1ULL << VHOST_USER_PROTOCOL_F_STATUS)
447 
448 int
449 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
450 		     int cq, int queue_size, const char *mac, char **ifname,
451 		     int server, int mrg_rxbuf, int in_order, int packed_vq,
452 		     enum virtio_user_backend_type backend_type)
453 {
454 	uint64_t protocol_features = 0;
455 
456 	pthread_mutex_init(&dev->mutex, NULL);
457 	strlcpy(dev->path, path, PATH_MAX);
458 	dev->started = 0;
459 	dev->max_queue_pairs = queues;
460 	dev->queue_pairs = 1; /* mq disabled by default */
461 	dev->queue_size = queue_size;
462 	dev->is_server = server;
463 	dev->mac_specified = 0;
464 	dev->frontend_features = 0;
465 	dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES;
466 	dev->protocol_features = VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES;
467 	dev->backend_type = backend_type;
468 
469 	parse_mac(dev, mac);
470 
471 	if (*ifname) {
472 		dev->ifname = *ifname;
473 		*ifname = NULL;
474 	}
475 
476 	if (virtio_user_dev_setup(dev) < 0) {
477 		PMD_INIT_LOG(ERR, "backend set up fails");
478 		return -1;
479 	}
480 
481 	if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER)
482 		dev->unsupported_features |=
483 			(1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
484 
485 	if (!dev->is_server) {
486 		if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER,
487 					   NULL) < 0) {
488 			PMD_INIT_LOG(ERR, "set_owner fails: %s",
489 				     strerror(errno));
490 			return -1;
491 		}
492 
493 		if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
494 					   &dev->device_features) < 0) {
495 			PMD_INIT_LOG(ERR, "get_features failed: %s",
496 				     strerror(errno));
497 			return -1;
498 		}
499 
500 
501 		if (dev->device_features &
502 				(1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) {
503 			if (dev->ops->send_request(dev,
504 					VHOST_USER_GET_PROTOCOL_FEATURES,
505 					&protocol_features))
506 				return -1;
507 
508 			dev->protocol_features &= protocol_features;
509 
510 			if (dev->ops->send_request(dev,
511 					VHOST_USER_SET_PROTOCOL_FEATURES,
512 					&dev->protocol_features))
513 				return -1;
514 
515 			if (!(dev->protocol_features &
516 					(1ULL << VHOST_USER_PROTOCOL_F_MQ)))
517 				dev->unsupported_features |=
518 					(1ull << VIRTIO_NET_F_MQ);
519 		}
520 	} else {
521 		/* We just pretend vhost-user can support all these features.
522 		 * Note that this could be problematic that if some feature is
523 		 * negotiated but not supported by the vhost-user which comes
524 		 * later.
525 		 */
526 		dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES;
527 
528 		/* We cannot assume VHOST_USER_PROTOCOL_F_STATUS is supported
529 		 * until it's negotiated
530 		 */
531 		dev->protocol_features &=
532 			~(1ULL << VHOST_USER_PROTOCOL_F_STATUS);
533 	}
534 
535 
536 
537 	if (!mrg_rxbuf)
538 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
539 
540 	if (!in_order)
541 		dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
542 
543 	if (!packed_vq)
544 		dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED);
545 
546 	if (dev->mac_specified)
547 		dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
548 	else
549 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
550 
551 	if (cq) {
552 		/* device does not really need to know anything about CQ,
553 		 * so if necessary, we just claim to support CQ
554 		 */
555 		dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
556 	} else {
557 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
558 		/* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */
559 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX);
560 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN);
561 		dev->unsupported_features |=
562 			(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
563 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
564 		dev->unsupported_features |=
565 			(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
566 	}
567 
568 	/* The backend will not report this feature, we add it explicitly */
569 	if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER)
570 		dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS);
571 
572 	/*
573 	 * Device features =
574 	 *     (frontend_features | backend_features) & ~unsupported_features;
575 	 */
576 	dev->device_features |= dev->frontend_features;
577 	dev->device_features &= ~dev->unsupported_features;
578 
579 	if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
580 				virtio_user_mem_event_cb, dev)) {
581 		if (rte_errno != ENOTSUP) {
582 			PMD_INIT_LOG(ERR, "Failed to register mem event"
583 					" callback\n");
584 			return -1;
585 		}
586 	}
587 
588 	return 0;
589 }
590 
591 void
592 virtio_user_dev_uninit(struct virtio_user_dev *dev)
593 {
594 	uint32_t i;
595 
596 	virtio_user_stop_device(dev);
597 
598 	rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
599 
600 	for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
601 		close(dev->callfds[i]);
602 		close(dev->kickfds[i]);
603 	}
604 
605 	if (dev->vhostfd >= 0)
606 		close(dev->vhostfd);
607 
608 	if (dev->is_server && dev->listenfd >= 0) {
609 		close(dev->listenfd);
610 		dev->listenfd = -1;
611 	}
612 
613 	if (dev->vhostfds) {
614 		for (i = 0; i < dev->max_queue_pairs; ++i) {
615 			close(dev->vhostfds[i]);
616 			if (dev->tapfds[i] >= 0)
617 				close(dev->tapfds[i]);
618 		}
619 		free(dev->vhostfds);
620 		free(dev->tapfds);
621 	}
622 
623 	free(dev->ifname);
624 
625 	if (dev->is_server)
626 		unlink(dev->path);
627 }
628 
629 uint8_t
630 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
631 {
632 	uint16_t i;
633 	uint8_t ret = 0;
634 
635 	if (q_pairs > dev->max_queue_pairs) {
636 		PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported",
637 			     q_pairs, dev->max_queue_pairs);
638 		return -1;
639 	}
640 
641 	/* Server mode can't enable queue pairs if vhostfd is invalid,
642 	 * always return 0 in this case.
643 	 */
644 	if (!dev->is_server || dev->vhostfd >= 0) {
645 		for (i = 0; i < q_pairs; ++i)
646 			ret |= dev->ops->enable_qp(dev, i, 1);
647 		for (i = q_pairs; i < dev->max_queue_pairs; ++i)
648 			ret |= dev->ops->enable_qp(dev, i, 0);
649 	}
650 	dev->queue_pairs = q_pairs;
651 
652 	return ret;
653 }
654 
655 static uint32_t
656 virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
657 			    uint16_t idx_hdr)
658 {
659 	struct virtio_net_ctrl_hdr *hdr;
660 	virtio_net_ctrl_ack status = ~0;
661 	uint16_t i, idx_data, idx_status;
662 	uint32_t n_descs = 0;
663 
664 	/* locate desc for header, data, and status */
665 	idx_data = vring->desc[idx_hdr].next;
666 	n_descs++;
667 
668 	i = idx_data;
669 	while (vring->desc[i].flags == VRING_DESC_F_NEXT) {
670 		i = vring->desc[i].next;
671 		n_descs++;
672 	}
673 
674 	/* locate desc for status */
675 	idx_status = i;
676 	n_descs++;
677 
678 	hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
679 	if (hdr->class == VIRTIO_NET_CTRL_MQ &&
680 	    hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
681 		uint16_t queues;
682 
683 		queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
684 		status = virtio_user_handle_mq(dev, queues);
685 	} else if (hdr->class == VIRTIO_NET_CTRL_RX  ||
686 		   hdr->class == VIRTIO_NET_CTRL_MAC ||
687 		   hdr->class == VIRTIO_NET_CTRL_VLAN) {
688 		status = 0;
689 	}
690 
691 	/* Update status */
692 	*(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
693 
694 	return n_descs;
695 }
696 
697 static inline int
698 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
699 {
700 	uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
701 
702 	return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
703 		wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
704 }
705 
706 static uint32_t
707 virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
708 				   struct vring_packed *vring,
709 				   uint16_t idx_hdr)
710 {
711 	struct virtio_net_ctrl_hdr *hdr;
712 	virtio_net_ctrl_ack status = ~0;
713 	uint16_t idx_data, idx_status;
714 	/* initialize to one, header is first */
715 	uint32_t n_descs = 1;
716 
717 	/* locate desc for header, data, and status */
718 	idx_data = idx_hdr + 1;
719 	if (idx_data >= dev->queue_size)
720 		idx_data -= dev->queue_size;
721 
722 	n_descs++;
723 
724 	idx_status = idx_data;
725 	while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) {
726 		idx_status++;
727 		if (idx_status >= dev->queue_size)
728 			idx_status -= dev->queue_size;
729 		n_descs++;
730 	}
731 
732 	hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
733 	if (hdr->class == VIRTIO_NET_CTRL_MQ &&
734 	    hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
735 		uint16_t queues;
736 
737 		queues = *(uint16_t *)(uintptr_t)
738 				vring->desc[idx_data].addr;
739 		status = virtio_user_handle_mq(dev, queues);
740 	} else if (hdr->class == VIRTIO_NET_CTRL_RX  ||
741 		   hdr->class == VIRTIO_NET_CTRL_MAC ||
742 		   hdr->class == VIRTIO_NET_CTRL_VLAN) {
743 		status = 0;
744 	}
745 
746 	/* Update status */
747 	*(virtio_net_ctrl_ack *)(uintptr_t)
748 		vring->desc[idx_status].addr = status;
749 
750 	/* Update used descriptor */
751 	vring->desc[idx_hdr].id = vring->desc[idx_status].id;
752 	vring->desc[idx_hdr].len = sizeof(status);
753 
754 	return n_descs;
755 }
756 
757 void
758 virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
759 {
760 	struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
761 	struct vring_packed *vring = &dev->packed_vrings[queue_idx];
762 	uint16_t n_descs, flags;
763 
764 	/* Perform a load-acquire barrier in desc_is_avail to
765 	 * enforce the ordering between desc flags and desc
766 	 * content.
767 	 */
768 	while (desc_is_avail(&vring->desc[vq->used_idx],
769 			     vq->used_wrap_counter)) {
770 
771 		n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring,
772 				vq->used_idx);
773 
774 		flags = VRING_DESC_F_WRITE;
775 		if (vq->used_wrap_counter)
776 			flags |= VRING_PACKED_DESC_F_AVAIL_USED;
777 
778 		__atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
779 				 __ATOMIC_RELEASE);
780 
781 		vq->used_idx += n_descs;
782 		if (vq->used_idx >= dev->queue_size) {
783 			vq->used_idx -= dev->queue_size;
784 			vq->used_wrap_counter ^= 1;
785 		}
786 	}
787 }
788 
789 void
790 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
791 {
792 	uint16_t avail_idx, desc_idx;
793 	struct vring_used_elem *uep;
794 	uint32_t n_descs;
795 	struct vring *vring = &dev->vrings[queue_idx];
796 
797 	/* Consume avail ring, using used ring idx as first one */
798 	while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
799 	       != vring->avail->idx) {
800 		avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
801 			    & (vring->num - 1);
802 		desc_idx = vring->avail->ring[avail_idx];
803 
804 		n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx);
805 
806 		/* Update used ring */
807 		uep = &vring->used->ring[avail_idx];
808 		uep->id = desc_idx;
809 		uep->len = n_descs;
810 
811 		__atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED);
812 	}
813 }
814 
815 int
816 virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status)
817 {
818 	int ret;
819 	uint64_t arg = status;
820 
821 	pthread_mutex_lock(&dev->mutex);
822 	dev->status = status;
823 	if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER)
824 		ret = dev->ops->send_request(dev,
825 				VHOST_USER_SET_STATUS, &arg);
826 	else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA)
827 		ret = dev->ops->send_request(dev,
828 				VHOST_USER_SET_STATUS, &status);
829 	else
830 		ret = -ENOTSUP;
831 
832 	if (ret && ret != -ENOTSUP) {
833 		PMD_INIT_LOG(ERR, "VHOST_USER_SET_STATUS failed (%d): %s", ret,
834 			     strerror(errno));
835 	}
836 
837 	pthread_mutex_unlock(&dev->mutex);
838 	return ret;
839 }
840 
841 int
842 virtio_user_dev_update_status(struct virtio_user_dev *dev)
843 {
844 	uint64_t ret;
845 	uint8_t status;
846 	int err;
847 
848 	pthread_mutex_lock(&dev->mutex);
849 	if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) {
850 		err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS, &ret);
851 		if (!err && ret > UINT8_MAX) {
852 			PMD_INIT_LOG(ERR, "Invalid VHOST_USER_GET_STATUS "
853 					"response 0x%" PRIx64 "\n", ret);
854 			err = -1;
855 			goto error;
856 		}
857 
858 		status = ret;
859 	} else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA) {
860 		err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS,
861 				&status);
862 	} else {
863 		err = -ENOTSUP;
864 	}
865 
866 	if (!err) {
867 		dev->status = status;
868 		PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n"
869 			"\t-RESET: %u\n"
870 			"\t-ACKNOWLEDGE: %u\n"
871 			"\t-DRIVER: %u\n"
872 			"\t-DRIVER_OK: %u\n"
873 			"\t-FEATURES_OK: %u\n"
874 			"\t-DEVICE_NEED_RESET: %u\n"
875 			"\t-FAILED: %u\n",
876 			dev->status,
877 			(dev->status == VIRTIO_CONFIG_STATUS_RESET),
878 			!!(dev->status & VIRTIO_CONFIG_STATUS_ACK),
879 			!!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER),
880 			!!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK),
881 			!!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK),
882 			!!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET),
883 			!!(dev->status & VIRTIO_CONFIG_STATUS_FAILED));
884 	} else if (err != -ENOTSUP) {
885 		PMD_INIT_LOG(ERR, "VHOST_USER_GET_STATUS failed (%d): %s", err,
886 			     strerror(errno));
887 	}
888 
889 error:
890 	pthread_mutex_unlock(&dev->mutex);
891 	return err;
892 }
893