xref: /dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c (revision be26e898ffb3a1719c88bf025d45a847d3a60ffc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <fcntl.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <sys/mman.h>
12 #include <unistd.h>
13 #include <sys/eventfd.h>
14 #include <sys/types.h>
15 #include <sys/stat.h>
16 
17 #include <rte_alarm.h>
18 #include <rte_string_fns.h>
19 #include <rte_eal_memconfig.h>
20 #include <rte_malloc.h>
21 
22 #include "vhost.h"
23 #include "virtio_user_dev.h"
24 #include "../virtio_ethdev.h"
25 
26 #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
27 
28 const char * const virtio_user_backend_strings[] = {
29 	[VIRTIO_USER_BACKEND_UNKNOWN] = "VIRTIO_USER_BACKEND_UNKNOWN",
30 	[VIRTIO_USER_BACKEND_VHOST_USER] = "VHOST_USER",
31 	[VIRTIO_USER_BACKEND_VHOST_KERNEL] = "VHOST_NET",
32 	[VIRTIO_USER_BACKEND_VHOST_VDPA] = "VHOST_VDPA",
33 };
34 
35 static int
36 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
37 {
38 	/* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
39 	 * firstly because vhost depends on this msg to allocate virtqueue
40 	 * pair.
41 	 */
42 	struct vhost_vring_file file;
43 	int ret;
44 
45 	file.index = queue_sel;
46 	file.fd = dev->callfds[queue_sel];
47 	ret = dev->ops->set_vring_call(dev, &file);
48 	if (ret < 0) {
49 		PMD_INIT_LOG(ERR, "(%s) Failed to create queue %u", dev->path, queue_sel);
50 		return -1;
51 	}
52 
53 	return 0;
54 }
55 
56 static int
57 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
58 {
59 	int ret;
60 	struct vhost_vring_file file;
61 	struct vhost_vring_state state;
62 	struct vring *vring = &dev->vrings.split[queue_sel];
63 	struct vring_packed *pq_vring = &dev->vrings.packed[queue_sel];
64 	struct vhost_vring_addr addr = {
65 		.index = queue_sel,
66 		.log_guest_addr = 0,
67 		.flags = 0, /* disable log */
68 	};
69 
70 	if (queue_sel == dev->max_queue_pairs * 2) {
71 		if (!dev->scvq) {
72 			PMD_INIT_LOG(ERR, "(%s) Shadow control queue expected but missing",
73 					dev->path);
74 			goto err;
75 		}
76 
77 		/* Use shadow control queue information */
78 		vring = &dev->scvq->vq_split.ring;
79 		pq_vring = &dev->scvq->vq_packed.ring;
80 	}
81 
82 	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
83 		addr.desc_user_addr =
84 			(uint64_t)(uintptr_t)pq_vring->desc;
85 		addr.avail_user_addr =
86 			(uint64_t)(uintptr_t)pq_vring->driver;
87 		addr.used_user_addr =
88 			(uint64_t)(uintptr_t)pq_vring->device;
89 	} else {
90 		addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
91 		addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
92 		addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
93 	}
94 
95 	state.index = queue_sel;
96 	state.num = vring->num;
97 	ret = dev->ops->set_vring_num(dev, &state);
98 	if (ret < 0)
99 		goto err;
100 
101 	state.index = queue_sel;
102 	state.num = 0; /* no reservation */
103 	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED))
104 		state.num |= (1 << 15);
105 	ret = dev->ops->set_vring_base(dev, &state);
106 	if (ret < 0)
107 		goto err;
108 
109 	ret = dev->ops->set_vring_addr(dev, &addr);
110 	if (ret < 0)
111 		goto err;
112 
113 	/* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
114 	 * lastly because vhost depends on this msg to judge if
115 	 * virtio is ready.
116 	 */
117 	file.index = queue_sel;
118 	file.fd = dev->kickfds[queue_sel];
119 	ret = dev->ops->set_vring_kick(dev, &file);
120 	if (ret < 0)
121 		goto err;
122 
123 	return 0;
124 err:
125 	PMD_INIT_LOG(ERR, "(%s) Failed to kick queue %u", dev->path, queue_sel);
126 
127 	return -1;
128 }
129 
130 static int
131 virtio_user_queue_setup(struct virtio_user_dev *dev,
132 			int (*fn)(struct virtio_user_dev *, uint32_t))
133 {
134 	uint32_t i, nr_vq;
135 
136 	nr_vq = dev->max_queue_pairs * 2;
137 	if (dev->hw_cvq)
138 		nr_vq++;
139 
140 	for (i = 0; i < nr_vq; i++) {
141 		if (fn(dev, i) < 0) {
142 			PMD_DRV_LOG(ERR, "(%s) setup VQ %u failed", dev->path, i);
143 			return -1;
144 		}
145 	}
146 
147 	return 0;
148 }
149 
150 int
151 virtio_user_dev_set_features(struct virtio_user_dev *dev)
152 {
153 	uint64_t features;
154 	int ret = -1;
155 
156 	pthread_mutex_lock(&dev->mutex);
157 
158 	/* Step 0: tell vhost to create queues */
159 	if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
160 		goto error;
161 
162 	features = dev->features;
163 
164 	/* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
165 	features &= ~(1ull << VIRTIO_NET_F_MAC);
166 	/* Strip VIRTIO_NET_F_CTRL_VQ if the devices does not really support control VQ */
167 	if (!dev->hw_cvq)
168 		features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
169 	features &= ~(1ull << VIRTIO_NET_F_STATUS);
170 	ret = dev->ops->set_features(dev, features);
171 	if (ret < 0)
172 		goto error;
173 	PMD_DRV_LOG(INFO, "(%s) set features: 0x%" PRIx64, dev->path, features);
174 error:
175 	pthread_mutex_unlock(&dev->mutex);
176 
177 	return ret;
178 }
179 
180 int
181 virtio_user_start_device(struct virtio_user_dev *dev)
182 {
183 	int ret;
184 
185 	/*
186 	 * XXX workaround!
187 	 *
188 	 * We need to make sure that the locks will be
189 	 * taken in the correct order to avoid deadlocks.
190 	 *
191 	 * Before releasing this lock, this thread should
192 	 * not trigger any memory hotplug events.
193 	 *
194 	 * This is a temporary workaround, and should be
195 	 * replaced when we get proper supports from the
196 	 * memory subsystem in the future.
197 	 */
198 	rte_mcfg_mem_read_lock();
199 	pthread_mutex_lock(&dev->mutex);
200 
201 	/* Step 2: share memory regions */
202 	ret = dev->ops->set_memory_table(dev);
203 	if (ret < 0)
204 		goto error;
205 
206 	/* Step 3: kick queues */
207 	ret = virtio_user_queue_setup(dev, virtio_user_kick_queue);
208 	if (ret < 0)
209 		goto error;
210 
211 	/* Step 4: enable queues
212 	 * we enable the 1st queue pair by default.
213 	 */
214 	ret = dev->ops->enable_qp(dev, 0, 1);
215 	if (ret < 0)
216 		goto error;
217 
218 	dev->started = true;
219 
220 	pthread_mutex_unlock(&dev->mutex);
221 	rte_mcfg_mem_read_unlock();
222 
223 	return 0;
224 error:
225 	pthread_mutex_unlock(&dev->mutex);
226 	rte_mcfg_mem_read_unlock();
227 
228 	PMD_INIT_LOG(ERR, "(%s) Failed to start device", dev->path);
229 
230 	/* TODO: free resource here or caller to check */
231 	return -1;
232 }
233 
234 int virtio_user_stop_device(struct virtio_user_dev *dev)
235 {
236 	struct vhost_vring_state state;
237 	uint32_t i;
238 	int ret;
239 
240 	pthread_mutex_lock(&dev->mutex);
241 	if (!dev->started)
242 		goto out;
243 
244 	for (i = 0; i < dev->max_queue_pairs; ++i) {
245 		ret = dev->ops->enable_qp(dev, i, 0);
246 		if (ret < 0)
247 			goto err;
248 	}
249 
250 	/* Stop the backend. */
251 	for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
252 		state.index = i;
253 		ret = dev->ops->get_vring_base(dev, &state);
254 		if (ret < 0) {
255 			PMD_DRV_LOG(ERR, "(%s) get_vring_base failed, index=%u", dev->path, i);
256 			goto err;
257 		}
258 	}
259 
260 	dev->started = false;
261 
262 out:
263 	pthread_mutex_unlock(&dev->mutex);
264 
265 	return 0;
266 err:
267 	pthread_mutex_unlock(&dev->mutex);
268 
269 	PMD_INIT_LOG(ERR, "(%s) Failed to stop device", dev->path);
270 
271 	return -1;
272 }
273 
274 static int
275 virtio_user_dev_init_max_queue_pairs(struct virtio_user_dev *dev, uint32_t user_max_qp)
276 {
277 	int ret;
278 
279 	if (!(dev->device_features & (1ULL << VIRTIO_NET_F_MQ))) {
280 		dev->max_queue_pairs = 1;
281 		return 0;
282 	}
283 
284 	if (!dev->ops->get_config) {
285 		dev->max_queue_pairs = user_max_qp;
286 		return 0;
287 	}
288 
289 	ret = dev->ops->get_config(dev, (uint8_t *)&dev->max_queue_pairs,
290 			offsetof(struct virtio_net_config, max_virtqueue_pairs),
291 			sizeof(uint16_t));
292 	if (ret) {
293 		/*
294 		 * We need to know the max queue pair from the device so that
295 		 * the control queue gets the right index.
296 		 */
297 		dev->max_queue_pairs = 1;
298 		PMD_DRV_LOG(ERR, "(%s) Failed to get max queue pairs from device", dev->path);
299 
300 		return ret;
301 	}
302 
303 	return 0;
304 }
305 
306 int
307 virtio_user_dev_set_mac(struct virtio_user_dev *dev)
308 {
309 	int ret = 0;
310 
311 	if (!(dev->device_features & (1ULL << VIRTIO_NET_F_MAC)))
312 		return -ENOTSUP;
313 
314 	if (!dev->ops->set_config)
315 		return -ENOTSUP;
316 
317 	ret = dev->ops->set_config(dev, dev->mac_addr,
318 			offsetof(struct virtio_net_config, mac),
319 			RTE_ETHER_ADDR_LEN);
320 	if (ret)
321 		PMD_DRV_LOG(ERR, "(%s) Failed to set MAC address in device", dev->path);
322 
323 	return ret;
324 }
325 
326 int
327 virtio_user_dev_get_mac(struct virtio_user_dev *dev)
328 {
329 	int ret = 0;
330 
331 	if (!(dev->device_features & (1ULL << VIRTIO_NET_F_MAC)))
332 		return -ENOTSUP;
333 
334 	if (!dev->ops->get_config)
335 		return -ENOTSUP;
336 
337 	ret = dev->ops->get_config(dev, dev->mac_addr,
338 			offsetof(struct virtio_net_config, mac),
339 			RTE_ETHER_ADDR_LEN);
340 	if (ret)
341 		PMD_DRV_LOG(ERR, "(%s) Failed to get MAC address from device", dev->path);
342 
343 	return ret;
344 }
345 
346 static void
347 virtio_user_dev_init_mac(struct virtio_user_dev *dev, const char *mac)
348 {
349 	struct rte_ether_addr cmdline_mac;
350 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
351 	int ret;
352 
353 	if (mac && rte_ether_unformat_addr(mac, &cmdline_mac) == 0) {
354 		/*
355 		 * MAC address was passed from command-line, try to store
356 		 * it in the device if it supports it. Otherwise try to use
357 		 * the device one.
358 		 */
359 		memcpy(dev->mac_addr, &cmdline_mac, RTE_ETHER_ADDR_LEN);
360 		dev->mac_specified = 1;
361 
362 		/* Setting MAC may fail, continue to get the device one in this case */
363 		virtio_user_dev_set_mac(dev);
364 		ret = virtio_user_dev_get_mac(dev);
365 		if (ret == -ENOTSUP)
366 			goto out;
367 
368 		if (memcmp(&cmdline_mac, dev->mac_addr, RTE_ETHER_ADDR_LEN))
369 			PMD_DRV_LOG(INFO, "(%s) Device MAC update failed", dev->path);
370 	} else {
371 		ret = virtio_user_dev_get_mac(dev);
372 		if (ret) {
373 			PMD_DRV_LOG(ERR, "(%s) No valid MAC in devargs or device, use random",
374 					dev->path);
375 			return;
376 		}
377 
378 		dev->mac_specified = 1;
379 	}
380 out:
381 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE,
382 			(struct rte_ether_addr *)dev->mac_addr);
383 	PMD_DRV_LOG(INFO, "(%s) MAC %s specified", dev->path, buf);
384 }
385 
386 static int
387 virtio_user_dev_init_notify(struct virtio_user_dev *dev)
388 {
389 	uint32_t i, j, nr_vq;
390 	int callfd;
391 	int kickfd;
392 
393 	nr_vq = dev->max_queue_pairs * 2;
394 	if (dev->hw_cvq)
395 		nr_vq++;
396 
397 	for (i = 0; i < nr_vq; i++) {
398 		/* May use invalid flag, but some backend uses kickfd and
399 		 * callfd as criteria to judge if dev is alive. so finally we
400 		 * use real event_fd.
401 		 */
402 		callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
403 		if (callfd < 0) {
404 			PMD_DRV_LOG(ERR, "(%s) callfd error, %s", dev->path, strerror(errno));
405 			goto err;
406 		}
407 		kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
408 		if (kickfd < 0) {
409 			close(callfd);
410 			PMD_DRV_LOG(ERR, "(%s) kickfd error, %s", dev->path, strerror(errno));
411 			goto err;
412 		}
413 		dev->callfds[i] = callfd;
414 		dev->kickfds[i] = kickfd;
415 	}
416 
417 	return 0;
418 err:
419 	for (j = 0; j < i; j++) {
420 		if (dev->kickfds[j] >= 0) {
421 			close(dev->kickfds[j]);
422 			dev->kickfds[j] = -1;
423 		}
424 		if (dev->callfds[j] >= 0) {
425 			close(dev->callfds[j]);
426 			dev->callfds[j] = -1;
427 		}
428 	}
429 
430 	return -1;
431 }
432 
433 static void
434 virtio_user_dev_uninit_notify(struct virtio_user_dev *dev)
435 {
436 	uint32_t i;
437 
438 	for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
439 		if (dev->kickfds[i] >= 0) {
440 			close(dev->kickfds[i]);
441 			dev->kickfds[i] = -1;
442 		}
443 		if (dev->callfds[i] >= 0) {
444 			close(dev->callfds[i]);
445 			dev->callfds[i] = -1;
446 		}
447 	}
448 }
449 
450 static int
451 virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
452 {
453 	uint32_t i;
454 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
455 
456 	if (eth_dev->intr_handle == NULL) {
457 		eth_dev->intr_handle =
458 			rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
459 		if (eth_dev->intr_handle == NULL) {
460 			PMD_DRV_LOG(ERR, "(%s) failed to allocate intr_handle", dev->path);
461 			return -1;
462 		}
463 	}
464 
465 	for (i = 0; i < dev->max_queue_pairs; ++i) {
466 		if (rte_intr_efds_index_set(eth_dev->intr_handle, i,
467 				dev->callfds[2 * i + VTNET_SQ_RQ_QUEUE_IDX]))
468 			return -rte_errno;
469 	}
470 
471 	if (rte_intr_nb_efd_set(eth_dev->intr_handle, dev->max_queue_pairs))
472 		return -rte_errno;
473 
474 	if (rte_intr_max_intr_set(eth_dev->intr_handle,
475 			dev->max_queue_pairs + 1))
476 		return -rte_errno;
477 
478 	if (rte_intr_type_set(eth_dev->intr_handle, RTE_INTR_HANDLE_VDEV))
479 		return -rte_errno;
480 
481 	/* For virtio vdev, no need to read counter for clean */
482 	if (rte_intr_efd_counter_size_set(eth_dev->intr_handle, 0))
483 		return -rte_errno;
484 
485 	if (rte_intr_fd_set(eth_dev->intr_handle, dev->ops->get_intr_fd(dev)))
486 		return -rte_errno;
487 
488 	return 0;
489 }
490 
491 static void
492 virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
493 			 const void *addr,
494 			 size_t len __rte_unused,
495 			 void *arg)
496 {
497 	struct virtio_user_dev *dev = arg;
498 	struct rte_memseg_list *msl;
499 	uint16_t i;
500 	int ret = 0;
501 
502 	/* ignore externally allocated memory */
503 	msl = rte_mem_virt2memseg_list(addr);
504 	if (msl->external)
505 		return;
506 
507 	pthread_mutex_lock(&dev->mutex);
508 
509 	if (dev->started == false)
510 		goto exit;
511 
512 	/* Step 1: pause the active queues */
513 	for (i = 0; i < dev->queue_pairs; i++) {
514 		ret = dev->ops->enable_qp(dev, i, 0);
515 		if (ret < 0)
516 			goto exit;
517 	}
518 
519 	/* Step 2: update memory regions */
520 	ret = dev->ops->set_memory_table(dev);
521 	if (ret < 0)
522 		goto exit;
523 
524 	/* Step 3: resume the active queues */
525 	for (i = 0; i < dev->queue_pairs; i++) {
526 		ret = dev->ops->enable_qp(dev, i, 1);
527 		if (ret < 0)
528 			goto exit;
529 	}
530 
531 exit:
532 	pthread_mutex_unlock(&dev->mutex);
533 
534 	if (ret < 0)
535 		PMD_DRV_LOG(ERR, "(%s) Failed to update memory table", dev->path);
536 }
537 
538 static int
539 virtio_user_dev_setup(struct virtio_user_dev *dev)
540 {
541 	if (dev->is_server) {
542 		if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) {
543 			PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!");
544 			return -1;
545 		}
546 	}
547 
548 	switch (dev->backend_type) {
549 	case VIRTIO_USER_BACKEND_VHOST_USER:
550 		dev->ops = &virtio_ops_user;
551 		break;
552 	case VIRTIO_USER_BACKEND_VHOST_KERNEL:
553 		dev->ops = &virtio_ops_kernel;
554 		break;
555 	case VIRTIO_USER_BACKEND_VHOST_VDPA:
556 		dev->ops = &virtio_ops_vdpa;
557 		break;
558 	default:
559 		PMD_DRV_LOG(ERR, "(%s) Unknown backend type", dev->path);
560 		return -1;
561 	}
562 
563 	if (dev->ops->setup(dev) < 0) {
564 		PMD_INIT_LOG(ERR, "(%s) Failed to setup backend", dev->path);
565 		return -1;
566 	}
567 
568 	return 0;
569 }
570 
571 static int
572 virtio_user_alloc_vrings(struct virtio_user_dev *dev)
573 {
574 	int i, size, nr_vrings;
575 	bool packed_ring = !!(dev->device_features & (1ull << VIRTIO_F_RING_PACKED));
576 
577 	nr_vrings = dev->max_queue_pairs * 2;
578 	if (dev->device_features & (1ull << VIRTIO_NET_F_MQ))
579 		nr_vrings++;
580 
581 	dev->callfds = rte_zmalloc("virtio_user_dev", nr_vrings * sizeof(*dev->callfds), 0);
582 	if (!dev->callfds) {
583 		PMD_INIT_LOG(ERR, "(%s) Failed to alloc callfds", dev->path);
584 		return -1;
585 	}
586 
587 	dev->kickfds = rte_zmalloc("virtio_user_dev", nr_vrings * sizeof(*dev->kickfds), 0);
588 	if (!dev->kickfds) {
589 		PMD_INIT_LOG(ERR, "(%s) Failed to alloc kickfds", dev->path);
590 		goto free_callfds;
591 	}
592 
593 	for (i = 0; i < nr_vrings; i++) {
594 		dev->callfds[i] = -1;
595 		dev->kickfds[i] = -1;
596 	}
597 
598 	if (packed_ring)
599 		size = sizeof(*dev->vrings.packed);
600 	else
601 		size = sizeof(*dev->vrings.split);
602 	dev->vrings.ptr = rte_zmalloc("virtio_user_dev", nr_vrings * size, 0);
603 	if (!dev->vrings.ptr) {
604 		PMD_INIT_LOG(ERR, "(%s) Failed to alloc vrings metadata", dev->path);
605 		goto free_kickfds;
606 	}
607 
608 	if (packed_ring) {
609 		dev->packed_queues = rte_zmalloc("virtio_user_dev",
610 				nr_vrings * sizeof(*dev->packed_queues), 0);
611 		if (!dev->packed_queues) {
612 			PMD_INIT_LOG(ERR, "(%s) Failed to alloc packed queues metadata",
613 					dev->path);
614 			goto free_vrings;
615 		}
616 	}
617 
618 	dev->qp_enabled = rte_zmalloc("virtio_user_dev",
619 			dev->max_queue_pairs * sizeof(*dev->qp_enabled), 0);
620 	if (!dev->qp_enabled) {
621 		PMD_INIT_LOG(ERR, "(%s) Failed to alloc QP enable states", dev->path);
622 		goto free_packed_queues;
623 	}
624 
625 	return 0;
626 
627 free_packed_queues:
628 	rte_free(dev->packed_queues);
629 	dev->packed_queues = NULL;
630 free_vrings:
631 	rte_free(dev->vrings.ptr);
632 	dev->vrings.ptr = NULL;
633 free_kickfds:
634 	rte_free(dev->kickfds);
635 	dev->kickfds = NULL;
636 free_callfds:
637 	rte_free(dev->callfds);
638 	dev->callfds = NULL;
639 
640 	return -1;
641 }
642 
643 static void
644 virtio_user_free_vrings(struct virtio_user_dev *dev)
645 {
646 	rte_free(dev->qp_enabled);
647 	dev->qp_enabled = NULL;
648 	rte_free(dev->packed_queues);
649 	dev->packed_queues = NULL;
650 	rte_free(dev->vrings.ptr);
651 	dev->vrings.ptr = NULL;
652 	rte_free(dev->kickfds);
653 	dev->kickfds = NULL;
654 	rte_free(dev->callfds);
655 	dev->callfds = NULL;
656 }
657 
658 /* Use below macro to filter features from vhost backend */
659 #define VIRTIO_USER_SUPPORTED_FEATURES			\
660 	(1ULL << VIRTIO_NET_F_MAC		|	\
661 	 1ULL << VIRTIO_NET_F_STATUS		|	\
662 	 1ULL << VIRTIO_NET_F_MQ		|	\
663 	 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR	|	\
664 	 1ULL << VIRTIO_NET_F_CTRL_VQ		|	\
665 	 1ULL << VIRTIO_NET_F_CTRL_RX		|	\
666 	 1ULL << VIRTIO_NET_F_CTRL_VLAN		|	\
667 	 1ULL << VIRTIO_NET_F_CSUM		|	\
668 	 1ULL << VIRTIO_NET_F_HOST_TSO4		|	\
669 	 1ULL << VIRTIO_NET_F_HOST_TSO6		|	\
670 	 1ULL << VIRTIO_NET_F_MRG_RXBUF		|	\
671 	 1ULL << VIRTIO_RING_F_INDIRECT_DESC	|	\
672 	 1ULL << VIRTIO_NET_F_GUEST_CSUM	|	\
673 	 1ULL << VIRTIO_NET_F_GUEST_TSO4	|	\
674 	 1ULL << VIRTIO_NET_F_GUEST_TSO6	|	\
675 	 1ULL << VIRTIO_F_IN_ORDER		|	\
676 	 1ULL << VIRTIO_F_VERSION_1		|	\
677 	 1ULL << VIRTIO_F_RING_PACKED)
678 
679 int
680 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues,
681 		     int cq, int queue_size, const char *mac, char **ifname,
682 		     int server, int mrg_rxbuf, int in_order, int packed_vq,
683 		     enum virtio_user_backend_type backend_type)
684 {
685 	uint64_t backend_features;
686 
687 	pthread_mutex_init(&dev->mutex, NULL);
688 	strlcpy(dev->path, path, PATH_MAX);
689 
690 	dev->started = 0;
691 	dev->queue_pairs = 1; /* mq disabled by default */
692 	dev->max_queue_pairs = queues; /* initialize to user requested value for kernel backend */
693 	dev->queue_size = queue_size;
694 	dev->is_server = server;
695 	dev->mac_specified = 0;
696 	dev->frontend_features = 0;
697 	dev->unsupported_features = 0;
698 	dev->backend_type = backend_type;
699 
700 	if (*ifname) {
701 		dev->ifname = *ifname;
702 		*ifname = NULL;
703 	}
704 
705 	if (virtio_user_dev_setup(dev) < 0) {
706 		PMD_INIT_LOG(ERR, "(%s) backend set up fails", dev->path);
707 		return -1;
708 	}
709 
710 	if (dev->ops->set_owner(dev) < 0) {
711 		PMD_INIT_LOG(ERR, "(%s) Failed to set backend owner", dev->path);
712 		goto destroy;
713 	}
714 
715 	if (dev->ops->get_backend_features(&backend_features) < 0) {
716 		PMD_INIT_LOG(ERR, "(%s) Failed to get backend features", dev->path);
717 		goto destroy;
718 	}
719 
720 	dev->unsupported_features = ~(VIRTIO_USER_SUPPORTED_FEATURES | backend_features);
721 
722 	if (dev->ops->get_features(dev, &dev->device_features) < 0) {
723 		PMD_INIT_LOG(ERR, "(%s) Failed to get device features", dev->path);
724 		goto destroy;
725 	}
726 
727 	virtio_user_dev_init_mac(dev, mac);
728 
729 	if (virtio_user_dev_init_max_queue_pairs(dev, queues))
730 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
731 
732 	if (dev->max_queue_pairs > 1)
733 		cq = 1;
734 
735 	if (!mrg_rxbuf)
736 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
737 
738 	if (!in_order)
739 		dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
740 
741 	if (!packed_vq)
742 		dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED);
743 
744 	if (dev->mac_specified)
745 		dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
746 	else
747 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
748 
749 	if (cq) {
750 		/* device does not really need to know anything about CQ,
751 		 * so if necessary, we just claim to support CQ
752 		 */
753 		dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
754 	} else {
755 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
756 		/* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */
757 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX);
758 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN);
759 		dev->unsupported_features |=
760 			(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
761 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
762 		dev->unsupported_features |=
763 			(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
764 	}
765 
766 	/* The backend will not report this feature, we add it explicitly */
767 	if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER)
768 		dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS);
769 
770 	dev->frontend_features &= ~dev->unsupported_features;
771 	dev->device_features &= ~dev->unsupported_features;
772 
773 	if (virtio_user_alloc_vrings(dev) < 0) {
774 		PMD_INIT_LOG(ERR, "(%s) Failed to allocate vring metadata", dev->path);
775 		goto destroy;
776 	}
777 
778 	if (virtio_user_dev_init_notify(dev) < 0) {
779 		PMD_INIT_LOG(ERR, "(%s) Failed to init notifiers", dev->path);
780 		goto free_vrings;
781 	}
782 
783 	if (virtio_user_fill_intr_handle(dev) < 0) {
784 		PMD_INIT_LOG(ERR, "(%s) Failed to init interrupt handler", dev->path);
785 		goto notify_uninit;
786 	}
787 
788 	if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
789 				virtio_user_mem_event_cb, dev)) {
790 		if (rte_errno != ENOTSUP) {
791 			PMD_INIT_LOG(ERR, "(%s) Failed to register mem event callback",
792 					dev->path);
793 			goto notify_uninit;
794 		}
795 	}
796 
797 	return 0;
798 
799 notify_uninit:
800 	virtio_user_dev_uninit_notify(dev);
801 free_vrings:
802 	virtio_user_free_vrings(dev);
803 destroy:
804 	dev->ops->destroy(dev);
805 
806 	return -1;
807 }
808 
809 void
810 virtio_user_dev_uninit(struct virtio_user_dev *dev)
811 {
812 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
813 
814 	rte_intr_instance_free(eth_dev->intr_handle);
815 	eth_dev->intr_handle = NULL;
816 
817 	virtio_user_stop_device(dev);
818 
819 	rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
820 
821 	virtio_user_dev_uninit_notify(dev);
822 
823 	virtio_user_free_vrings(dev);
824 
825 	free(dev->ifname);
826 
827 	if (dev->is_server)
828 		unlink(dev->path);
829 
830 	dev->ops->destroy(dev);
831 }
832 
833 static uint8_t
834 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
835 {
836 	uint16_t i;
837 	uint8_t ret = 0;
838 
839 	if (q_pairs > dev->max_queue_pairs) {
840 		PMD_INIT_LOG(ERR, "(%s) multi-q config %u, but only %u supported",
841 			     dev->path, q_pairs, dev->max_queue_pairs);
842 		return -1;
843 	}
844 
845 	for (i = 0; i < q_pairs; ++i)
846 		ret |= dev->ops->enable_qp(dev, i, 1);
847 	for (i = q_pairs; i < dev->max_queue_pairs; ++i)
848 		ret |= dev->ops->enable_qp(dev, i, 0);
849 
850 	if (dev->scvq)
851 		ret |= dev->ops->cvq_enable(dev, 1);
852 
853 	dev->queue_pairs = q_pairs;
854 
855 	return ret;
856 }
857 
858 #define CVQ_MAX_DATA_DESCS 32
859 
860 static uint32_t
861 virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vring,
862 			    uint16_t idx_hdr)
863 {
864 	struct virtio_net_ctrl_hdr *hdr;
865 	virtio_net_ctrl_ack status = ~0;
866 	uint16_t i, idx_data, idx_status;
867 	uint32_t n_descs = 0;
868 	int dlen[CVQ_MAX_DATA_DESCS], nb_dlen = 0;
869 
870 	/* locate desc for header, data, and status */
871 	idx_data = vring->desc[idx_hdr].next;
872 	n_descs++;
873 
874 	i = idx_data;
875 	while (vring->desc[i].flags == VRING_DESC_F_NEXT) {
876 		dlen[nb_dlen++] = vring->desc[i].len;
877 		i = vring->desc[i].next;
878 		n_descs++;
879 	}
880 
881 	/* locate desc for status */
882 	idx_status = i;
883 	n_descs++;
884 
885 	hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
886 	if (hdr->class == VIRTIO_NET_CTRL_MQ &&
887 	    hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
888 		uint16_t queues;
889 
890 		queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
891 		status = virtio_user_handle_mq(dev, queues);
892 	} else if (hdr->class == VIRTIO_NET_CTRL_RX  ||
893 		   hdr->class == VIRTIO_NET_CTRL_MAC ||
894 		   hdr->class == VIRTIO_NET_CTRL_VLAN) {
895 		status = 0;
896 	}
897 
898 	if (!status && dev->scvq)
899 		status = virtio_send_command(&dev->scvq->cq,
900 				(struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
901 
902 	/* Update status */
903 	*(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
904 
905 	return n_descs;
906 }
907 
908 static inline int
909 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
910 {
911 	uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
912 
913 	return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
914 		wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
915 }
916 
917 static uint32_t
918 virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
919 				   struct vring_packed *vring,
920 				   uint16_t idx_hdr)
921 {
922 	struct virtio_net_ctrl_hdr *hdr;
923 	virtio_net_ctrl_ack status = ~0;
924 	uint16_t idx_data, idx_status;
925 	/* initialize to one, header is first */
926 	uint32_t n_descs = 1;
927 	int dlen[CVQ_MAX_DATA_DESCS], nb_dlen = 0;
928 
929 	/* locate desc for header, data, and status */
930 	idx_data = idx_hdr + 1;
931 	if (idx_data >= dev->queue_size)
932 		idx_data -= dev->queue_size;
933 
934 	n_descs++;
935 
936 	idx_status = idx_data;
937 	while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) {
938 		dlen[nb_dlen++] = vring->desc[idx_status].len;
939 		idx_status++;
940 		if (idx_status >= dev->queue_size)
941 			idx_status -= dev->queue_size;
942 		n_descs++;
943 	}
944 
945 	hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
946 	if (hdr->class == VIRTIO_NET_CTRL_MQ &&
947 	    hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
948 		uint16_t queues;
949 
950 		queues = *(uint16_t *)(uintptr_t)
951 				vring->desc[idx_data].addr;
952 		status = virtio_user_handle_mq(dev, queues);
953 	} else if (hdr->class == VIRTIO_NET_CTRL_RX  ||
954 		   hdr->class == VIRTIO_NET_CTRL_MAC ||
955 		   hdr->class == VIRTIO_NET_CTRL_VLAN) {
956 		status = 0;
957 	}
958 
959 	if (!status && dev->scvq)
960 		status = virtio_send_command(&dev->scvq->cq,
961 				(struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
962 
963 	/* Update status */
964 	*(virtio_net_ctrl_ack *)(uintptr_t)
965 		vring->desc[idx_status].addr = status;
966 
967 	/* Update used descriptor */
968 	vring->desc[idx_hdr].id = vring->desc[idx_status].id;
969 	vring->desc[idx_hdr].len = sizeof(status);
970 
971 	return n_descs;
972 }
973 
974 static void
975 virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
976 {
977 	struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
978 	struct vring_packed *vring = &dev->vrings.packed[queue_idx];
979 	uint16_t n_descs, flags;
980 
981 	/* Perform a load-acquire barrier in desc_is_avail to
982 	 * enforce the ordering between desc flags and desc
983 	 * content.
984 	 */
985 	while (desc_is_avail(&vring->desc[vq->used_idx],
986 			     vq->used_wrap_counter)) {
987 
988 		n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring,
989 				vq->used_idx);
990 
991 		flags = VRING_DESC_F_WRITE;
992 		if (vq->used_wrap_counter)
993 			flags |= VRING_PACKED_DESC_F_AVAIL_USED;
994 
995 		__atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
996 				 __ATOMIC_RELEASE);
997 
998 		vq->used_idx += n_descs;
999 		if (vq->used_idx >= dev->queue_size) {
1000 			vq->used_idx -= dev->queue_size;
1001 			vq->used_wrap_counter ^= 1;
1002 		}
1003 	}
1004 }
1005 
1006 static void
1007 virtio_user_handle_cq_split(struct virtio_user_dev *dev, uint16_t queue_idx)
1008 {
1009 	uint16_t avail_idx, desc_idx;
1010 	struct vring_used_elem *uep;
1011 	uint32_t n_descs;
1012 	struct vring *vring = &dev->vrings.split[queue_idx];
1013 
1014 	/* Consume avail ring, using used ring idx as first one */
1015 	while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
1016 	       != vring->avail->idx) {
1017 		avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
1018 			    & (vring->num - 1);
1019 		desc_idx = vring->avail->ring[avail_idx];
1020 
1021 		n_descs = virtio_user_handle_ctrl_msg_split(dev, vring, desc_idx);
1022 
1023 		/* Update used ring */
1024 		uep = &vring->used->ring[avail_idx];
1025 		uep->id = desc_idx;
1026 		uep->len = n_descs;
1027 
1028 		__atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED);
1029 	}
1030 }
1031 
1032 void
1033 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
1034 {
1035 	if (virtio_with_packed_queue(&dev->hw))
1036 		virtio_user_handle_cq_packed(dev, queue_idx);
1037 	else
1038 		virtio_user_handle_cq_split(dev, queue_idx);
1039 }
1040 
1041 static void
1042 virtio_user_control_queue_notify(struct virtqueue *vq, void *cookie)
1043 {
1044 	struct virtio_user_dev *dev = cookie;
1045 	uint64_t buf = 1;
1046 
1047 	if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
1048 		PMD_DRV_LOG(ERR, "failed to kick backend: %s",
1049 			    strerror(errno));
1050 }
1051 
1052 int
1053 virtio_user_dev_create_shadow_cvq(struct virtio_user_dev *dev, struct virtqueue *vq)
1054 {
1055 	char name[VIRTQUEUE_MAX_NAME_SZ];
1056 	struct virtqueue *scvq;
1057 
1058 	snprintf(name, sizeof(name), "port%d_shadow_cvq", vq->hw->port_id);
1059 	scvq = virtqueue_alloc(&dev->hw, vq->vq_queue_index, vq->vq_nentries,
1060 			VTNET_CQ, SOCKET_ID_ANY, name);
1061 	if (!scvq) {
1062 		PMD_INIT_LOG(ERR, "(%s) Failed to alloc shadow control vq\n", dev->path);
1063 		return -ENOMEM;
1064 	}
1065 
1066 	scvq->cq.notify_queue = &virtio_user_control_queue_notify;
1067 	scvq->cq.notify_cookie = dev;
1068 	dev->scvq = scvq;
1069 
1070 	return 0;
1071 }
1072 
1073 void
1074 virtio_user_dev_destroy_shadow_cvq(struct virtio_user_dev *dev)
1075 {
1076 	if (!dev->scvq)
1077 		return;
1078 
1079 	virtqueue_free(dev->scvq);
1080 	dev->scvq = NULL;
1081 }
1082 
1083 int
1084 virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status)
1085 {
1086 	int ret;
1087 
1088 	pthread_mutex_lock(&dev->mutex);
1089 	dev->status = status;
1090 	ret = dev->ops->set_status(dev, status);
1091 	if (ret && ret != -ENOTSUP)
1092 		PMD_INIT_LOG(ERR, "(%s) Failed to set backend status", dev->path);
1093 
1094 	pthread_mutex_unlock(&dev->mutex);
1095 	return ret;
1096 }
1097 
1098 int
1099 virtio_user_dev_update_status(struct virtio_user_dev *dev)
1100 {
1101 	int ret;
1102 	uint8_t status;
1103 
1104 	pthread_mutex_lock(&dev->mutex);
1105 
1106 	ret = dev->ops->get_status(dev, &status);
1107 	if (!ret) {
1108 		dev->status = status;
1109 		PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n"
1110 			"\t-RESET: %u\n"
1111 			"\t-ACKNOWLEDGE: %u\n"
1112 			"\t-DRIVER: %u\n"
1113 			"\t-DRIVER_OK: %u\n"
1114 			"\t-FEATURES_OK: %u\n"
1115 			"\t-DEVICE_NEED_RESET: %u\n"
1116 			"\t-FAILED: %u",
1117 			dev->status,
1118 			(dev->status == VIRTIO_CONFIG_STATUS_RESET),
1119 			!!(dev->status & VIRTIO_CONFIG_STATUS_ACK),
1120 			!!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER),
1121 			!!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK),
1122 			!!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK),
1123 			!!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET),
1124 			!!(dev->status & VIRTIO_CONFIG_STATUS_FAILED));
1125 	} else if (ret != -ENOTSUP) {
1126 		PMD_INIT_LOG(ERR, "(%s) Failed to get backend status", dev->path);
1127 	}
1128 
1129 	pthread_mutex_unlock(&dev->mutex);
1130 	return ret;
1131 }
1132 
1133 int
1134 virtio_user_dev_update_link_state(struct virtio_user_dev *dev)
1135 {
1136 	if (dev->ops->update_link_state)
1137 		return dev->ops->update_link_state(dev);
1138 
1139 	return 0;
1140 }
1141 
1142 static void
1143 virtio_user_dev_reset_queues_packed(struct rte_eth_dev *eth_dev)
1144 {
1145 	struct virtio_user_dev *dev = eth_dev->data->dev_private;
1146 	struct virtio_hw *hw = &dev->hw;
1147 	struct virtnet_rx *rxvq;
1148 	struct virtnet_tx *txvq;
1149 	uint16_t i;
1150 
1151 	/* Add lock to avoid queue contention. */
1152 	rte_spinlock_lock(&hw->state_lock);
1153 	hw->started = 0;
1154 
1155 	/*
1156 	 * Waiting for datapath to complete before resetting queues.
1157 	 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
1158 	 */
1159 	rte_delay_ms(1);
1160 
1161 	/* Vring reset for each Tx queue and Rx queue. */
1162 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1163 		rxvq = eth_dev->data->rx_queues[i];
1164 		virtqueue_rxvq_reset_packed(virtnet_rxq_to_vq(rxvq));
1165 		virtio_dev_rx_queue_setup_finish(eth_dev, i);
1166 	}
1167 
1168 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1169 		txvq = eth_dev->data->tx_queues[i];
1170 		virtqueue_txvq_reset_packed(virtnet_txq_to_vq(txvq));
1171 	}
1172 
1173 	hw->started = 1;
1174 	rte_spinlock_unlock(&hw->state_lock);
1175 }
1176 
1177 void
1178 virtio_user_dev_delayed_disconnect_handler(void *param)
1179 {
1180 	struct virtio_user_dev *dev = param;
1181 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
1182 
1183 	if (rte_intr_disable(eth_dev->intr_handle) < 0) {
1184 		PMD_DRV_LOG(ERR, "interrupt disable failed");
1185 		return;
1186 	}
1187 	PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d",
1188 		    rte_intr_fd_get(eth_dev->intr_handle));
1189 	if (rte_intr_callback_unregister(eth_dev->intr_handle,
1190 					 virtio_interrupt_handler,
1191 					 eth_dev) != 1)
1192 		PMD_DRV_LOG(ERR, "interrupt unregister failed");
1193 
1194 	if (dev->is_server) {
1195 		if (dev->ops->server_disconnect)
1196 			dev->ops->server_disconnect(dev);
1197 
1198 		rte_intr_fd_set(eth_dev->intr_handle,
1199 			dev->ops->get_intr_fd(dev));
1200 
1201 		PMD_DRV_LOG(DEBUG, "Registering intr fd: %d",
1202 			    rte_intr_fd_get(eth_dev->intr_handle));
1203 
1204 		if (rte_intr_callback_register(eth_dev->intr_handle,
1205 					       virtio_interrupt_handler,
1206 					       eth_dev))
1207 			PMD_DRV_LOG(ERR, "interrupt register failed");
1208 
1209 		if (rte_intr_enable(eth_dev->intr_handle) < 0) {
1210 			PMD_DRV_LOG(ERR, "interrupt enable failed");
1211 			return;
1212 		}
1213 	}
1214 }
1215 
1216 static void
1217 virtio_user_dev_delayed_intr_reconfig_handler(void *param)
1218 {
1219 	struct virtio_user_dev *dev = param;
1220 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
1221 
1222 	PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d",
1223 		    rte_intr_fd_get(eth_dev->intr_handle));
1224 
1225 	if (rte_intr_callback_unregister(eth_dev->intr_handle,
1226 					 virtio_interrupt_handler,
1227 					 eth_dev) != 1)
1228 		PMD_DRV_LOG(ERR, "interrupt unregister failed");
1229 
1230 	rte_intr_fd_set(eth_dev->intr_handle, dev->ops->get_intr_fd(dev));
1231 
1232 	PMD_DRV_LOG(DEBUG, "Registering intr fd: %d",
1233 		    rte_intr_fd_get(eth_dev->intr_handle));
1234 
1235 	if (rte_intr_callback_register(eth_dev->intr_handle,
1236 				       virtio_interrupt_handler, eth_dev))
1237 		PMD_DRV_LOG(ERR, "interrupt register failed");
1238 
1239 	if (rte_intr_enable(eth_dev->intr_handle) < 0)
1240 		PMD_DRV_LOG(ERR, "interrupt enable failed");
1241 }
1242 
1243 int
1244 virtio_user_dev_server_reconnect(struct virtio_user_dev *dev)
1245 {
1246 	int ret, old_status;
1247 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
1248 	struct virtio_hw *hw = &dev->hw;
1249 
1250 	if (!dev->ops->server_reconnect) {
1251 		PMD_DRV_LOG(ERR, "(%s) Missing server reconnect callback", dev->path);
1252 		return -1;
1253 	}
1254 
1255 	if (dev->ops->server_reconnect(dev)) {
1256 		PMD_DRV_LOG(ERR, "(%s) Reconnect callback call failed", dev->path);
1257 		return -1;
1258 	}
1259 
1260 	old_status = dev->status;
1261 
1262 	virtio_reset(hw);
1263 
1264 	virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
1265 
1266 	virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
1267 
1268 	if (dev->ops->get_features(dev, &dev->device_features) < 0) {
1269 		PMD_INIT_LOG(ERR, "get_features failed: %s",
1270 			     strerror(errno));
1271 		return -1;
1272 	}
1273 
1274 	/* unmask vhost-user unsupported features */
1275 	dev->device_features &= ~(dev->unsupported_features);
1276 
1277 	dev->features &= (dev->device_features | dev->frontend_features);
1278 
1279 	/* For packed ring, resetting queues is required in reconnection. */
1280 	if (virtio_with_packed_queue(hw) &&
1281 	   (old_status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) {
1282 		PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
1283 				" when packed ring reconnecting.");
1284 		virtio_user_dev_reset_queues_packed(eth_dev);
1285 	}
1286 
1287 	virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
1288 
1289 	/* Start the device */
1290 	virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
1291 	if (!dev->started)
1292 		return -1;
1293 
1294 	if (dev->queue_pairs > 1) {
1295 		ret = virtio_user_handle_mq(dev, dev->queue_pairs);
1296 		if (ret != 0) {
1297 			PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!");
1298 			return -1;
1299 		}
1300 	}
1301 	if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1302 		if (rte_intr_disable(eth_dev->intr_handle) < 0) {
1303 			PMD_DRV_LOG(ERR, "interrupt disable failed");
1304 			return -1;
1305 		}
1306 		/*
1307 		 * This function can be called from the interrupt handler, so
1308 		 * we can't unregister interrupt handler here.  Setting
1309 		 * alarm to do that later.
1310 		 */
1311 		rte_eal_alarm_set(1,
1312 			virtio_user_dev_delayed_intr_reconfig_handler,
1313 			(void *)dev);
1314 	}
1315 	PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!");
1316 	return 0;
1317 }
1318