xref: /dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c (revision fcdb603adcc27e9e11c660a1839cfb59e0951d3d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <fcntl.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <sys/mman.h>
12 #include <unistd.h>
13 #include <sys/eventfd.h>
14 #include <sys/types.h>
15 #include <sys/stat.h>
16 
17 #include <rte_alarm.h>
18 #include <rte_string_fns.h>
19 #include <rte_eal_memconfig.h>
20 
21 #include "vhost.h"
22 #include "virtio_user_dev.h"
23 #include "../virtio_ethdev.h"
24 
25 #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
26 
27 const char * const virtio_user_backend_strings[] = {
28 	[VIRTIO_USER_BACKEND_UNKNOWN] = "VIRTIO_USER_BACKEND_UNKNOWN",
29 	[VIRTIO_USER_BACKEND_VHOST_USER] = "VHOST_USER",
30 	[VIRTIO_USER_BACKEND_VHOST_KERNEL] = "VHOST_NET",
31 	[VIRTIO_USER_BACKEND_VHOST_VDPA] = "VHOST_VDPA",
32 };
33 
34 static int
35 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
36 {
37 	/* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
38 	 * firstly because vhost depends on this msg to allocate virtqueue
39 	 * pair.
40 	 */
41 	struct vhost_vring_file file;
42 	int ret;
43 
44 	file.index = queue_sel;
45 	file.fd = dev->callfds[queue_sel];
46 	ret = dev->ops->set_vring_call(dev, &file);
47 	if (ret < 0) {
48 		PMD_INIT_LOG(ERR, "(%s) Failed to create queue %u", dev->path, queue_sel);
49 		return -1;
50 	}
51 
52 	return 0;
53 }
54 
55 static int
56 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
57 {
58 	int ret;
59 	struct vhost_vring_file file;
60 	struct vhost_vring_state state;
61 	struct vring *vring = &dev->vrings[queue_sel];
62 	struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
63 	struct vhost_vring_addr addr = {
64 		.index = queue_sel,
65 		.log_guest_addr = 0,
66 		.flags = 0, /* disable log */
67 	};
68 
69 	if (queue_sel == dev->max_queue_pairs * 2) {
70 		if (!dev->scvq) {
71 			PMD_INIT_LOG(ERR, "(%s) Shadow control queue expected but missing",
72 					dev->path);
73 			goto err;
74 		}
75 
76 		/* Use shadow control queue information */
77 		vring = &dev->scvq->vq_split.ring;
78 		pq_vring = &dev->scvq->vq_packed.ring;
79 	}
80 
81 	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
82 		addr.desc_user_addr =
83 			(uint64_t)(uintptr_t)pq_vring->desc;
84 		addr.avail_user_addr =
85 			(uint64_t)(uintptr_t)pq_vring->driver;
86 		addr.used_user_addr =
87 			(uint64_t)(uintptr_t)pq_vring->device;
88 	} else {
89 		addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
90 		addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
91 		addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
92 	}
93 
94 	state.index = queue_sel;
95 	state.num = vring->num;
96 	ret = dev->ops->set_vring_num(dev, &state);
97 	if (ret < 0)
98 		goto err;
99 
100 	state.index = queue_sel;
101 	state.num = 0; /* no reservation */
102 	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED))
103 		state.num |= (1 << 15);
104 	ret = dev->ops->set_vring_base(dev, &state);
105 	if (ret < 0)
106 		goto err;
107 
108 	ret = dev->ops->set_vring_addr(dev, &addr);
109 	if (ret < 0)
110 		goto err;
111 
112 	/* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
113 	 * lastly because vhost depends on this msg to judge if
114 	 * virtio is ready.
115 	 */
116 	file.index = queue_sel;
117 	file.fd = dev->kickfds[queue_sel];
118 	ret = dev->ops->set_vring_kick(dev, &file);
119 	if (ret < 0)
120 		goto err;
121 
122 	return 0;
123 err:
124 	PMD_INIT_LOG(ERR, "(%s) Failed to kick queue %u", dev->path, queue_sel);
125 
126 	return -1;
127 }
128 
129 static int
130 virtio_user_queue_setup(struct virtio_user_dev *dev,
131 			int (*fn)(struct virtio_user_dev *, uint32_t))
132 {
133 	uint32_t i, nr_vq;
134 
135 	nr_vq = dev->max_queue_pairs * 2;
136 	if (dev->hw_cvq)
137 		nr_vq++;
138 
139 	for (i = 0; i < nr_vq; i++) {
140 		if (fn(dev, i) < 0) {
141 			PMD_DRV_LOG(ERR, "(%s) setup VQ %u failed", dev->path, i);
142 			return -1;
143 		}
144 	}
145 
146 	return 0;
147 }
148 
149 int
150 virtio_user_dev_set_features(struct virtio_user_dev *dev)
151 {
152 	uint64_t features;
153 	int ret = -1;
154 
155 	pthread_mutex_lock(&dev->mutex);
156 
157 	/* Step 0: tell vhost to create queues */
158 	if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
159 		goto error;
160 
161 	features = dev->features;
162 
163 	/* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
164 	features &= ~(1ull << VIRTIO_NET_F_MAC);
165 	/* Strip VIRTIO_NET_F_CTRL_VQ if the devices does not really support control VQ */
166 	if (!dev->hw_cvq)
167 		features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
168 	features &= ~(1ull << VIRTIO_NET_F_STATUS);
169 	ret = dev->ops->set_features(dev, features);
170 	if (ret < 0)
171 		goto error;
172 	PMD_DRV_LOG(INFO, "(%s) set features: 0x%" PRIx64, dev->path, features);
173 error:
174 	pthread_mutex_unlock(&dev->mutex);
175 
176 	return ret;
177 }
178 
179 int
180 virtio_user_start_device(struct virtio_user_dev *dev)
181 {
182 	int ret;
183 
184 	/*
185 	 * XXX workaround!
186 	 *
187 	 * We need to make sure that the locks will be
188 	 * taken in the correct order to avoid deadlocks.
189 	 *
190 	 * Before releasing this lock, this thread should
191 	 * not trigger any memory hotplug events.
192 	 *
193 	 * This is a temporary workaround, and should be
194 	 * replaced when we get proper supports from the
195 	 * memory subsystem in the future.
196 	 */
197 	rte_mcfg_mem_read_lock();
198 	pthread_mutex_lock(&dev->mutex);
199 
200 	/* Step 2: share memory regions */
201 	ret = dev->ops->set_memory_table(dev);
202 	if (ret < 0)
203 		goto error;
204 
205 	/* Step 3: kick queues */
206 	ret = virtio_user_queue_setup(dev, virtio_user_kick_queue);
207 	if (ret < 0)
208 		goto error;
209 
210 	/* Step 4: enable queues
211 	 * we enable the 1st queue pair by default.
212 	 */
213 	ret = dev->ops->enable_qp(dev, 0, 1);
214 	if (ret < 0)
215 		goto error;
216 
217 	dev->started = true;
218 
219 	pthread_mutex_unlock(&dev->mutex);
220 	rte_mcfg_mem_read_unlock();
221 
222 	return 0;
223 error:
224 	pthread_mutex_unlock(&dev->mutex);
225 	rte_mcfg_mem_read_unlock();
226 
227 	PMD_INIT_LOG(ERR, "(%s) Failed to start device", dev->path);
228 
229 	/* TODO: free resource here or caller to check */
230 	return -1;
231 }
232 
233 int virtio_user_stop_device(struct virtio_user_dev *dev)
234 {
235 	struct vhost_vring_state state;
236 	uint32_t i;
237 	int ret;
238 
239 	pthread_mutex_lock(&dev->mutex);
240 	if (!dev->started)
241 		goto out;
242 
243 	for (i = 0; i < dev->max_queue_pairs; ++i) {
244 		ret = dev->ops->enable_qp(dev, i, 0);
245 		if (ret < 0)
246 			goto err;
247 	}
248 
249 	/* Stop the backend. */
250 	for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
251 		state.index = i;
252 		ret = dev->ops->get_vring_base(dev, &state);
253 		if (ret < 0) {
254 			PMD_DRV_LOG(ERR, "(%s) get_vring_base failed, index=%u", dev->path, i);
255 			goto err;
256 		}
257 	}
258 
259 	dev->started = false;
260 
261 out:
262 	pthread_mutex_unlock(&dev->mutex);
263 
264 	return 0;
265 err:
266 	pthread_mutex_unlock(&dev->mutex);
267 
268 	PMD_INIT_LOG(ERR, "(%s) Failed to stop device", dev->path);
269 
270 	return -1;
271 }
272 
273 static int
274 virtio_user_dev_init_max_queue_pairs(struct virtio_user_dev *dev, uint32_t user_max_qp)
275 {
276 	int ret;
277 
278 	if (!(dev->device_features & (1ULL << VIRTIO_NET_F_MQ))) {
279 		dev->max_queue_pairs = 1;
280 		return 0;
281 	}
282 
283 	if (!dev->ops->get_config) {
284 		dev->max_queue_pairs = user_max_qp;
285 		return 0;
286 	}
287 
288 	ret = dev->ops->get_config(dev, (uint8_t *)&dev->max_queue_pairs,
289 			offsetof(struct virtio_net_config, max_virtqueue_pairs),
290 			sizeof(uint16_t));
291 	if (ret) {
292 		/*
293 		 * We need to know the max queue pair from the device so that
294 		 * the control queue gets the right index.
295 		 */
296 		dev->max_queue_pairs = 1;
297 		PMD_DRV_LOG(ERR, "(%s) Failed to get max queue pairs from device", dev->path);
298 
299 		return ret;
300 	}
301 
302 	if (dev->max_queue_pairs > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
303 		/*
304 		 * If the device supports control queue, the control queue
305 		 * index is max_virtqueue_pairs * 2. Disable MQ if it happens.
306 		 */
307 		PMD_DRV_LOG(ERR, "(%s) Device advertises too many queues (%u, max supported %u)",
308 				dev->path, dev->max_queue_pairs, VIRTIO_MAX_VIRTQUEUE_PAIRS);
309 		dev->max_queue_pairs = 1;
310 
311 		return -1;
312 	}
313 
314 	return 0;
315 }
316 
317 int
318 virtio_user_dev_set_mac(struct virtio_user_dev *dev)
319 {
320 	int ret = 0;
321 
322 	if (!(dev->device_features & (1ULL << VIRTIO_NET_F_MAC)))
323 		return -ENOTSUP;
324 
325 	if (!dev->ops->set_config)
326 		return -ENOTSUP;
327 
328 	ret = dev->ops->set_config(dev, dev->mac_addr,
329 			offsetof(struct virtio_net_config, mac),
330 			RTE_ETHER_ADDR_LEN);
331 	if (ret)
332 		PMD_DRV_LOG(ERR, "(%s) Failed to set MAC address in device", dev->path);
333 
334 	return ret;
335 }
336 
337 int
338 virtio_user_dev_get_mac(struct virtio_user_dev *dev)
339 {
340 	int ret = 0;
341 
342 	if (!(dev->device_features & (1ULL << VIRTIO_NET_F_MAC)))
343 		return -ENOTSUP;
344 
345 	if (!dev->ops->get_config)
346 		return -ENOTSUP;
347 
348 	ret = dev->ops->get_config(dev, dev->mac_addr,
349 			offsetof(struct virtio_net_config, mac),
350 			RTE_ETHER_ADDR_LEN);
351 	if (ret)
352 		PMD_DRV_LOG(ERR, "(%s) Failed to get MAC address from device", dev->path);
353 
354 	return ret;
355 }
356 
357 static void
358 virtio_user_dev_init_mac(struct virtio_user_dev *dev, const char *mac)
359 {
360 	struct rte_ether_addr cmdline_mac;
361 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
362 	int ret;
363 
364 	if (mac && rte_ether_unformat_addr(mac, &cmdline_mac) == 0) {
365 		/*
366 		 * MAC address was passed from command-line, try to store
367 		 * it in the device if it supports it. Otherwise try to use
368 		 * the device one.
369 		 */
370 		memcpy(dev->mac_addr, &cmdline_mac, RTE_ETHER_ADDR_LEN);
371 		dev->mac_specified = 1;
372 
373 		/* Setting MAC may fail, continue to get the device one in this case */
374 		virtio_user_dev_set_mac(dev);
375 		ret = virtio_user_dev_get_mac(dev);
376 		if (ret == -ENOTSUP)
377 			goto out;
378 
379 		if (memcmp(&cmdline_mac, dev->mac_addr, RTE_ETHER_ADDR_LEN))
380 			PMD_DRV_LOG(INFO, "(%s) Device MAC update failed", dev->path);
381 	} else {
382 		ret = virtio_user_dev_get_mac(dev);
383 		if (ret) {
384 			PMD_DRV_LOG(ERR, "(%s) No valid MAC in devargs or device, use random",
385 					dev->path);
386 			return;
387 		}
388 
389 		dev->mac_specified = 1;
390 	}
391 out:
392 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE,
393 			(struct rte_ether_addr *)dev->mac_addr);
394 	PMD_DRV_LOG(INFO, "(%s) MAC %s specified", dev->path, buf);
395 }
396 
397 static int
398 virtio_user_dev_init_notify(struct virtio_user_dev *dev)
399 {
400 	uint32_t i, j, nr_vq;
401 	int callfd;
402 	int kickfd;
403 
404 	nr_vq = dev->max_queue_pairs * 2;
405 	if (dev->hw_cvq)
406 		nr_vq++;
407 
408 	for (i = 0; i < nr_vq; i++) {
409 		/* May use invalid flag, but some backend uses kickfd and
410 		 * callfd as criteria to judge if dev is alive. so finally we
411 		 * use real event_fd.
412 		 */
413 		callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
414 		if (callfd < 0) {
415 			PMD_DRV_LOG(ERR, "(%s) callfd error, %s", dev->path, strerror(errno));
416 			goto err;
417 		}
418 		kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
419 		if (kickfd < 0) {
420 			close(callfd);
421 			PMD_DRV_LOG(ERR, "(%s) kickfd error, %s", dev->path, strerror(errno));
422 			goto err;
423 		}
424 		dev->callfds[i] = callfd;
425 		dev->kickfds[i] = kickfd;
426 	}
427 
428 	return 0;
429 err:
430 	for (j = 0; j < i; j++) {
431 		if (dev->kickfds[j] >= 0) {
432 			close(dev->kickfds[j]);
433 			dev->kickfds[j] = -1;
434 		}
435 		if (dev->callfds[j] >= 0) {
436 			close(dev->callfds[j]);
437 			dev->callfds[j] = -1;
438 		}
439 	}
440 
441 	return -1;
442 }
443 
444 static void
445 virtio_user_dev_uninit_notify(struct virtio_user_dev *dev)
446 {
447 	uint32_t i;
448 
449 	for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
450 		if (dev->kickfds[i] >= 0) {
451 			close(dev->kickfds[i]);
452 			dev->kickfds[i] = -1;
453 		}
454 		if (dev->callfds[i] >= 0) {
455 			close(dev->callfds[i]);
456 			dev->callfds[i] = -1;
457 		}
458 	}
459 }
460 
461 static int
462 virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
463 {
464 	uint32_t i;
465 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
466 
467 	if (eth_dev->intr_handle == NULL) {
468 		eth_dev->intr_handle =
469 			rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
470 		if (eth_dev->intr_handle == NULL) {
471 			PMD_DRV_LOG(ERR, "(%s) failed to allocate intr_handle", dev->path);
472 			return -1;
473 		}
474 	}
475 
476 	for (i = 0; i < dev->max_queue_pairs; ++i) {
477 		if (rte_intr_efds_index_set(eth_dev->intr_handle, i,
478 				dev->callfds[2 * i + VTNET_SQ_RQ_QUEUE_IDX]))
479 			return -rte_errno;
480 	}
481 
482 	if (rte_intr_nb_efd_set(eth_dev->intr_handle, dev->max_queue_pairs))
483 		return -rte_errno;
484 
485 	if (rte_intr_max_intr_set(eth_dev->intr_handle,
486 			dev->max_queue_pairs + 1))
487 		return -rte_errno;
488 
489 	if (rte_intr_type_set(eth_dev->intr_handle, RTE_INTR_HANDLE_VDEV))
490 		return -rte_errno;
491 
492 	/* For virtio vdev, no need to read counter for clean */
493 	if (rte_intr_efd_counter_size_set(eth_dev->intr_handle, 0))
494 		return -rte_errno;
495 
496 	if (rte_intr_fd_set(eth_dev->intr_handle, dev->ops->get_intr_fd(dev)))
497 		return -rte_errno;
498 
499 	return 0;
500 }
501 
502 static void
503 virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
504 			 const void *addr,
505 			 size_t len __rte_unused,
506 			 void *arg)
507 {
508 	struct virtio_user_dev *dev = arg;
509 	struct rte_memseg_list *msl;
510 	uint16_t i;
511 	int ret = 0;
512 
513 	/* ignore externally allocated memory */
514 	msl = rte_mem_virt2memseg_list(addr);
515 	if (msl->external)
516 		return;
517 
518 	pthread_mutex_lock(&dev->mutex);
519 
520 	if (dev->started == false)
521 		goto exit;
522 
523 	/* Step 1: pause the active queues */
524 	for (i = 0; i < dev->queue_pairs; i++) {
525 		ret = dev->ops->enable_qp(dev, i, 0);
526 		if (ret < 0)
527 			goto exit;
528 	}
529 
530 	/* Step 2: update memory regions */
531 	ret = dev->ops->set_memory_table(dev);
532 	if (ret < 0)
533 		goto exit;
534 
535 	/* Step 3: resume the active queues */
536 	for (i = 0; i < dev->queue_pairs; i++) {
537 		ret = dev->ops->enable_qp(dev, i, 1);
538 		if (ret < 0)
539 			goto exit;
540 	}
541 
542 exit:
543 	pthread_mutex_unlock(&dev->mutex);
544 
545 	if (ret < 0)
546 		PMD_DRV_LOG(ERR, "(%s) Failed to update memory table", dev->path);
547 }
548 
549 static int
550 virtio_user_dev_setup(struct virtio_user_dev *dev)
551 {
552 	if (dev->is_server) {
553 		if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) {
554 			PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!");
555 			return -1;
556 		}
557 	}
558 
559 	switch (dev->backend_type) {
560 	case VIRTIO_USER_BACKEND_VHOST_USER:
561 		dev->ops = &virtio_ops_user;
562 		break;
563 	case VIRTIO_USER_BACKEND_VHOST_KERNEL:
564 		dev->ops = &virtio_ops_kernel;
565 		break;
566 	case VIRTIO_USER_BACKEND_VHOST_VDPA:
567 		dev->ops = &virtio_ops_vdpa;
568 		break;
569 	default:
570 		PMD_DRV_LOG(ERR, "(%s) Unknown backend type", dev->path);
571 		return -1;
572 	}
573 
574 	if (dev->ops->setup(dev) < 0) {
575 		PMD_INIT_LOG(ERR, "(%s) Failed to setup backend", dev->path);
576 		return -1;
577 	}
578 
579 	return 0;
580 }
581 
582 /* Use below macro to filter features from vhost backend */
583 #define VIRTIO_USER_SUPPORTED_FEATURES			\
584 	(1ULL << VIRTIO_NET_F_MAC		|	\
585 	 1ULL << VIRTIO_NET_F_STATUS		|	\
586 	 1ULL << VIRTIO_NET_F_MQ		|	\
587 	 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR	|	\
588 	 1ULL << VIRTIO_NET_F_CTRL_VQ		|	\
589 	 1ULL << VIRTIO_NET_F_CTRL_RX		|	\
590 	 1ULL << VIRTIO_NET_F_CTRL_VLAN		|	\
591 	 1ULL << VIRTIO_NET_F_CSUM		|	\
592 	 1ULL << VIRTIO_NET_F_HOST_TSO4		|	\
593 	 1ULL << VIRTIO_NET_F_HOST_TSO6		|	\
594 	 1ULL << VIRTIO_NET_F_MRG_RXBUF		|	\
595 	 1ULL << VIRTIO_RING_F_INDIRECT_DESC	|	\
596 	 1ULL << VIRTIO_NET_F_GUEST_CSUM	|	\
597 	 1ULL << VIRTIO_NET_F_GUEST_TSO4	|	\
598 	 1ULL << VIRTIO_NET_F_GUEST_TSO6	|	\
599 	 1ULL << VIRTIO_F_IN_ORDER		|	\
600 	 1ULL << VIRTIO_F_VERSION_1		|	\
601 	 1ULL << VIRTIO_F_RING_PACKED)
602 
603 int
604 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues,
605 		     int cq, int queue_size, const char *mac, char **ifname,
606 		     int server, int mrg_rxbuf, int in_order, int packed_vq,
607 		     enum virtio_user_backend_type backend_type)
608 {
609 	uint64_t backend_features;
610 	int i;
611 
612 	pthread_mutex_init(&dev->mutex, NULL);
613 	strlcpy(dev->path, path, PATH_MAX);
614 
615 	for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; i++) {
616 		dev->kickfds[i] = -1;
617 		dev->callfds[i] = -1;
618 	}
619 
620 	dev->started = 0;
621 	dev->queue_pairs = 1; /* mq disabled by default */
622 	dev->queue_size = queue_size;
623 	dev->is_server = server;
624 	dev->mac_specified = 0;
625 	dev->frontend_features = 0;
626 	dev->unsupported_features = 0;
627 	dev->backend_type = backend_type;
628 
629 	if (*ifname) {
630 		dev->ifname = *ifname;
631 		*ifname = NULL;
632 	}
633 
634 	if (virtio_user_dev_setup(dev) < 0) {
635 		PMD_INIT_LOG(ERR, "(%s) backend set up fails", dev->path);
636 		return -1;
637 	}
638 
639 	if (dev->ops->set_owner(dev) < 0) {
640 		PMD_INIT_LOG(ERR, "(%s) Failed to set backend owner", dev->path);
641 		goto destroy;
642 	}
643 
644 	if (dev->ops->get_backend_features(&backend_features) < 0) {
645 		PMD_INIT_LOG(ERR, "(%s) Failed to get backend features", dev->path);
646 		goto destroy;
647 	}
648 
649 	dev->unsupported_features = ~(VIRTIO_USER_SUPPORTED_FEATURES | backend_features);
650 
651 	if (dev->ops->get_features(dev, &dev->device_features) < 0) {
652 		PMD_INIT_LOG(ERR, "(%s) Failed to get device features", dev->path);
653 		goto destroy;
654 	}
655 
656 	virtio_user_dev_init_mac(dev, mac);
657 
658 	if (virtio_user_dev_init_max_queue_pairs(dev, queues))
659 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
660 
661 	if (dev->max_queue_pairs > 1)
662 		cq = 1;
663 
664 	if (virtio_user_dev_init_notify(dev) < 0) {
665 		PMD_INIT_LOG(ERR, "(%s) Failed to init notifiers", dev->path);
666 		goto destroy;
667 	}
668 
669 	if (virtio_user_fill_intr_handle(dev) < 0) {
670 		PMD_INIT_LOG(ERR, "(%s) Failed to init interrupt handler", dev->path);
671 		goto notify_uninit;
672 	}
673 
674 	if (!mrg_rxbuf)
675 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
676 
677 	if (!in_order)
678 		dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
679 
680 	if (!packed_vq)
681 		dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED);
682 
683 	if (dev->mac_specified)
684 		dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
685 	else
686 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
687 
688 	if (cq) {
689 		/* device does not really need to know anything about CQ,
690 		 * so if necessary, we just claim to support CQ
691 		 */
692 		dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
693 	} else {
694 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
695 		/* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */
696 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX);
697 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN);
698 		dev->unsupported_features |=
699 			(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
700 		dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
701 		dev->unsupported_features |=
702 			(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
703 	}
704 
705 	/* The backend will not report this feature, we add it explicitly */
706 	if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER)
707 		dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS);
708 
709 	dev->frontend_features &= ~dev->unsupported_features;
710 	dev->device_features &= ~dev->unsupported_features;
711 
712 	if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
713 				virtio_user_mem_event_cb, dev)) {
714 		if (rte_errno != ENOTSUP) {
715 			PMD_INIT_LOG(ERR, "(%s) Failed to register mem event callback",
716 					dev->path);
717 			goto notify_uninit;
718 		}
719 	}
720 
721 	return 0;
722 
723 notify_uninit:
724 	virtio_user_dev_uninit_notify(dev);
725 destroy:
726 	dev->ops->destroy(dev);
727 
728 	return -1;
729 }
730 
731 void
732 virtio_user_dev_uninit(struct virtio_user_dev *dev)
733 {
734 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
735 
736 	rte_intr_instance_free(eth_dev->intr_handle);
737 	eth_dev->intr_handle = NULL;
738 
739 	virtio_user_stop_device(dev);
740 
741 	rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
742 
743 	virtio_user_dev_uninit_notify(dev);
744 
745 	free(dev->ifname);
746 
747 	if (dev->is_server)
748 		unlink(dev->path);
749 
750 	dev->ops->destroy(dev);
751 }
752 
753 static uint8_t
754 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
755 {
756 	uint16_t i;
757 	uint8_t ret = 0;
758 
759 	if (q_pairs > dev->max_queue_pairs) {
760 		PMD_INIT_LOG(ERR, "(%s) multi-q config %u, but only %u supported",
761 			     dev->path, q_pairs, dev->max_queue_pairs);
762 		return -1;
763 	}
764 
765 	for (i = 0; i < q_pairs; ++i)
766 		ret |= dev->ops->enable_qp(dev, i, 1);
767 	for (i = q_pairs; i < dev->max_queue_pairs; ++i)
768 		ret |= dev->ops->enable_qp(dev, i, 0);
769 
770 	if (dev->scvq)
771 		ret |= dev->ops->cvq_enable(dev, 1);
772 
773 	dev->queue_pairs = q_pairs;
774 
775 	return ret;
776 }
777 
778 #define CVQ_MAX_DATA_DESCS 32
779 
780 static uint32_t
781 virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vring,
782 			    uint16_t idx_hdr)
783 {
784 	struct virtio_net_ctrl_hdr *hdr;
785 	virtio_net_ctrl_ack status = ~0;
786 	uint16_t i, idx_data, idx_status;
787 	uint32_t n_descs = 0;
788 	int dlen[CVQ_MAX_DATA_DESCS], nb_dlen = 0;
789 
790 	/* locate desc for header, data, and status */
791 	idx_data = vring->desc[idx_hdr].next;
792 	n_descs++;
793 
794 	i = idx_data;
795 	while (vring->desc[i].flags == VRING_DESC_F_NEXT) {
796 		dlen[nb_dlen++] = vring->desc[i].len;
797 		i = vring->desc[i].next;
798 		n_descs++;
799 	}
800 
801 	/* locate desc for status */
802 	idx_status = i;
803 	n_descs++;
804 
805 	hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
806 	if (hdr->class == VIRTIO_NET_CTRL_MQ &&
807 	    hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
808 		uint16_t queues;
809 
810 		queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
811 		status = virtio_user_handle_mq(dev, queues);
812 	} else if (hdr->class == VIRTIO_NET_CTRL_RX  ||
813 		   hdr->class == VIRTIO_NET_CTRL_MAC ||
814 		   hdr->class == VIRTIO_NET_CTRL_VLAN) {
815 		status = 0;
816 	}
817 
818 	if (!status && dev->scvq)
819 		status = virtio_send_command(&dev->scvq->cq,
820 				(struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
821 
822 	/* Update status */
823 	*(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
824 
825 	return n_descs;
826 }
827 
828 static inline int
829 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
830 {
831 	uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
832 
833 	return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
834 		wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
835 }
836 
837 static uint32_t
838 virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
839 				   struct vring_packed *vring,
840 				   uint16_t idx_hdr)
841 {
842 	struct virtio_net_ctrl_hdr *hdr;
843 	virtio_net_ctrl_ack status = ~0;
844 	uint16_t idx_data, idx_status;
845 	/* initialize to one, header is first */
846 	uint32_t n_descs = 1;
847 	int dlen[CVQ_MAX_DATA_DESCS], nb_dlen = 0;
848 
849 	/* locate desc for header, data, and status */
850 	idx_data = idx_hdr + 1;
851 	if (idx_data >= dev->queue_size)
852 		idx_data -= dev->queue_size;
853 
854 	n_descs++;
855 
856 	idx_status = idx_data;
857 	while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) {
858 		dlen[nb_dlen++] = vring->desc[idx_status].len;
859 		idx_status++;
860 		if (idx_status >= dev->queue_size)
861 			idx_status -= dev->queue_size;
862 		n_descs++;
863 	}
864 
865 	hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
866 	if (hdr->class == VIRTIO_NET_CTRL_MQ &&
867 	    hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
868 		uint16_t queues;
869 
870 		queues = *(uint16_t *)(uintptr_t)
871 				vring->desc[idx_data].addr;
872 		status = virtio_user_handle_mq(dev, queues);
873 	} else if (hdr->class == VIRTIO_NET_CTRL_RX  ||
874 		   hdr->class == VIRTIO_NET_CTRL_MAC ||
875 		   hdr->class == VIRTIO_NET_CTRL_VLAN) {
876 		status = 0;
877 	}
878 
879 	if (!status && dev->scvq)
880 		status = virtio_send_command(&dev->scvq->cq,
881 				(struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
882 
883 	/* Update status */
884 	*(virtio_net_ctrl_ack *)(uintptr_t)
885 		vring->desc[idx_status].addr = status;
886 
887 	/* Update used descriptor */
888 	vring->desc[idx_hdr].id = vring->desc[idx_status].id;
889 	vring->desc[idx_hdr].len = sizeof(status);
890 
891 	return n_descs;
892 }
893 
894 static void
895 virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
896 {
897 	struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
898 	struct vring_packed *vring = &dev->packed_vrings[queue_idx];
899 	uint16_t n_descs, flags;
900 
901 	/* Perform a load-acquire barrier in desc_is_avail to
902 	 * enforce the ordering between desc flags and desc
903 	 * content.
904 	 */
905 	while (desc_is_avail(&vring->desc[vq->used_idx],
906 			     vq->used_wrap_counter)) {
907 
908 		n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring,
909 				vq->used_idx);
910 
911 		flags = VRING_DESC_F_WRITE;
912 		if (vq->used_wrap_counter)
913 			flags |= VRING_PACKED_DESC_F_AVAIL_USED;
914 
915 		__atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
916 				 __ATOMIC_RELEASE);
917 
918 		vq->used_idx += n_descs;
919 		if (vq->used_idx >= dev->queue_size) {
920 			vq->used_idx -= dev->queue_size;
921 			vq->used_wrap_counter ^= 1;
922 		}
923 	}
924 }
925 
926 static void
927 virtio_user_handle_cq_split(struct virtio_user_dev *dev, uint16_t queue_idx)
928 {
929 	uint16_t avail_idx, desc_idx;
930 	struct vring_used_elem *uep;
931 	uint32_t n_descs;
932 	struct vring *vring = &dev->vrings[queue_idx];
933 
934 	/* Consume avail ring, using used ring idx as first one */
935 	while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
936 	       != vring->avail->idx) {
937 		avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
938 			    & (vring->num - 1);
939 		desc_idx = vring->avail->ring[avail_idx];
940 
941 		n_descs = virtio_user_handle_ctrl_msg_split(dev, vring, desc_idx);
942 
943 		/* Update used ring */
944 		uep = &vring->used->ring[avail_idx];
945 		uep->id = desc_idx;
946 		uep->len = n_descs;
947 
948 		__atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED);
949 	}
950 }
951 
952 void
953 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
954 {
955 	if (virtio_with_packed_queue(&dev->hw))
956 		virtio_user_handle_cq_packed(dev, queue_idx);
957 	else
958 		virtio_user_handle_cq_split(dev, queue_idx);
959 }
960 
961 static void
962 virtio_user_control_queue_notify(struct virtqueue *vq, void *cookie)
963 {
964 	struct virtio_user_dev *dev = cookie;
965 	uint64_t buf = 1;
966 
967 	if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
968 		PMD_DRV_LOG(ERR, "failed to kick backend: %s",
969 			    strerror(errno));
970 }
971 
972 int
973 virtio_user_dev_create_shadow_cvq(struct virtio_user_dev *dev, struct virtqueue *vq)
974 {
975 	char name[VIRTQUEUE_MAX_NAME_SZ];
976 	struct virtqueue *scvq;
977 
978 	snprintf(name, sizeof(name), "port%d_shadow_cvq", vq->hw->port_id);
979 	scvq = virtqueue_alloc(&dev->hw, vq->vq_queue_index, vq->vq_nentries,
980 			VTNET_CQ, SOCKET_ID_ANY, name);
981 	if (!scvq) {
982 		PMD_INIT_LOG(ERR, "(%s) Failed to alloc shadow control vq\n", dev->path);
983 		return -ENOMEM;
984 	}
985 
986 	scvq->cq.notify_queue = &virtio_user_control_queue_notify;
987 	scvq->cq.notify_cookie = dev;
988 	dev->scvq = scvq;
989 
990 	return 0;
991 }
992 
993 void
994 virtio_user_dev_destroy_shadow_cvq(struct virtio_user_dev *dev)
995 {
996 	if (!dev->scvq)
997 		return;
998 
999 	virtqueue_free(dev->scvq);
1000 	dev->scvq = NULL;
1001 }
1002 
1003 int
1004 virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status)
1005 {
1006 	int ret;
1007 
1008 	pthread_mutex_lock(&dev->mutex);
1009 	dev->status = status;
1010 	ret = dev->ops->set_status(dev, status);
1011 	if (ret && ret != -ENOTSUP)
1012 		PMD_INIT_LOG(ERR, "(%s) Failed to set backend status", dev->path);
1013 
1014 	pthread_mutex_unlock(&dev->mutex);
1015 	return ret;
1016 }
1017 
1018 int
1019 virtio_user_dev_update_status(struct virtio_user_dev *dev)
1020 {
1021 	int ret;
1022 	uint8_t status;
1023 
1024 	pthread_mutex_lock(&dev->mutex);
1025 
1026 	ret = dev->ops->get_status(dev, &status);
1027 	if (!ret) {
1028 		dev->status = status;
1029 		PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n"
1030 			"\t-RESET: %u\n"
1031 			"\t-ACKNOWLEDGE: %u\n"
1032 			"\t-DRIVER: %u\n"
1033 			"\t-DRIVER_OK: %u\n"
1034 			"\t-FEATURES_OK: %u\n"
1035 			"\t-DEVICE_NEED_RESET: %u\n"
1036 			"\t-FAILED: %u",
1037 			dev->status,
1038 			(dev->status == VIRTIO_CONFIG_STATUS_RESET),
1039 			!!(dev->status & VIRTIO_CONFIG_STATUS_ACK),
1040 			!!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER),
1041 			!!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK),
1042 			!!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK),
1043 			!!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET),
1044 			!!(dev->status & VIRTIO_CONFIG_STATUS_FAILED));
1045 	} else if (ret != -ENOTSUP) {
1046 		PMD_INIT_LOG(ERR, "(%s) Failed to get backend status", dev->path);
1047 	}
1048 
1049 	pthread_mutex_unlock(&dev->mutex);
1050 	return ret;
1051 }
1052 
1053 int
1054 virtio_user_dev_update_link_state(struct virtio_user_dev *dev)
1055 {
1056 	if (dev->ops->update_link_state)
1057 		return dev->ops->update_link_state(dev);
1058 
1059 	return 0;
1060 }
1061 
1062 static void
1063 virtio_user_dev_reset_queues_packed(struct rte_eth_dev *eth_dev)
1064 {
1065 	struct virtio_user_dev *dev = eth_dev->data->dev_private;
1066 	struct virtio_hw *hw = &dev->hw;
1067 	struct virtnet_rx *rxvq;
1068 	struct virtnet_tx *txvq;
1069 	uint16_t i;
1070 
1071 	/* Add lock to avoid queue contention. */
1072 	rte_spinlock_lock(&hw->state_lock);
1073 	hw->started = 0;
1074 
1075 	/*
1076 	 * Waiting for datapath to complete before resetting queues.
1077 	 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
1078 	 */
1079 	rte_delay_ms(1);
1080 
1081 	/* Vring reset for each Tx queue and Rx queue. */
1082 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1083 		rxvq = eth_dev->data->rx_queues[i];
1084 		virtqueue_rxvq_reset_packed(virtnet_rxq_to_vq(rxvq));
1085 		virtio_dev_rx_queue_setup_finish(eth_dev, i);
1086 	}
1087 
1088 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1089 		txvq = eth_dev->data->tx_queues[i];
1090 		virtqueue_txvq_reset_packed(virtnet_txq_to_vq(txvq));
1091 	}
1092 
1093 	hw->started = 1;
1094 	rte_spinlock_unlock(&hw->state_lock);
1095 }
1096 
1097 void
1098 virtio_user_dev_delayed_disconnect_handler(void *param)
1099 {
1100 	struct virtio_user_dev *dev = param;
1101 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
1102 
1103 	if (rte_intr_disable(eth_dev->intr_handle) < 0) {
1104 		PMD_DRV_LOG(ERR, "interrupt disable failed");
1105 		return;
1106 	}
1107 	PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d",
1108 		    rte_intr_fd_get(eth_dev->intr_handle));
1109 	if (rte_intr_callback_unregister(eth_dev->intr_handle,
1110 					 virtio_interrupt_handler,
1111 					 eth_dev) != 1)
1112 		PMD_DRV_LOG(ERR, "interrupt unregister failed");
1113 
1114 	if (dev->is_server) {
1115 		if (dev->ops->server_disconnect)
1116 			dev->ops->server_disconnect(dev);
1117 
1118 		rte_intr_fd_set(eth_dev->intr_handle,
1119 			dev->ops->get_intr_fd(dev));
1120 
1121 		PMD_DRV_LOG(DEBUG, "Registering intr fd: %d",
1122 			    rte_intr_fd_get(eth_dev->intr_handle));
1123 
1124 		if (rte_intr_callback_register(eth_dev->intr_handle,
1125 					       virtio_interrupt_handler,
1126 					       eth_dev))
1127 			PMD_DRV_LOG(ERR, "interrupt register failed");
1128 
1129 		if (rte_intr_enable(eth_dev->intr_handle) < 0) {
1130 			PMD_DRV_LOG(ERR, "interrupt enable failed");
1131 			return;
1132 		}
1133 	}
1134 }
1135 
1136 static void
1137 virtio_user_dev_delayed_intr_reconfig_handler(void *param)
1138 {
1139 	struct virtio_user_dev *dev = param;
1140 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
1141 
1142 	PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d",
1143 		    rte_intr_fd_get(eth_dev->intr_handle));
1144 
1145 	if (rte_intr_callback_unregister(eth_dev->intr_handle,
1146 					 virtio_interrupt_handler,
1147 					 eth_dev) != 1)
1148 		PMD_DRV_LOG(ERR, "interrupt unregister failed");
1149 
1150 	rte_intr_fd_set(eth_dev->intr_handle, dev->ops->get_intr_fd(dev));
1151 
1152 	PMD_DRV_LOG(DEBUG, "Registering intr fd: %d",
1153 		    rte_intr_fd_get(eth_dev->intr_handle));
1154 
1155 	if (rte_intr_callback_register(eth_dev->intr_handle,
1156 				       virtio_interrupt_handler, eth_dev))
1157 		PMD_DRV_LOG(ERR, "interrupt register failed");
1158 
1159 	if (rte_intr_enable(eth_dev->intr_handle) < 0)
1160 		PMD_DRV_LOG(ERR, "interrupt enable failed");
1161 }
1162 
1163 int
1164 virtio_user_dev_server_reconnect(struct virtio_user_dev *dev)
1165 {
1166 	int ret, old_status;
1167 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
1168 	struct virtio_hw *hw = &dev->hw;
1169 
1170 	if (!dev->ops->server_reconnect) {
1171 		PMD_DRV_LOG(ERR, "(%s) Missing server reconnect callback", dev->path);
1172 		return -1;
1173 	}
1174 
1175 	if (dev->ops->server_reconnect(dev)) {
1176 		PMD_DRV_LOG(ERR, "(%s) Reconnect callback call failed", dev->path);
1177 		return -1;
1178 	}
1179 
1180 	old_status = dev->status;
1181 
1182 	virtio_reset(hw);
1183 
1184 	virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
1185 
1186 	virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
1187 
1188 	if (dev->ops->get_features(dev, &dev->device_features) < 0) {
1189 		PMD_INIT_LOG(ERR, "get_features failed: %s",
1190 			     strerror(errno));
1191 		return -1;
1192 	}
1193 
1194 	/* unmask vhost-user unsupported features */
1195 	dev->device_features &= ~(dev->unsupported_features);
1196 
1197 	dev->features &= (dev->device_features | dev->frontend_features);
1198 
1199 	/* For packed ring, resetting queues is required in reconnection. */
1200 	if (virtio_with_packed_queue(hw) &&
1201 	   (old_status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) {
1202 		PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
1203 				" when packed ring reconnecting.");
1204 		virtio_user_dev_reset_queues_packed(eth_dev);
1205 	}
1206 
1207 	virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
1208 
1209 	/* Start the device */
1210 	virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
1211 	if (!dev->started)
1212 		return -1;
1213 
1214 	if (dev->queue_pairs > 1) {
1215 		ret = virtio_user_handle_mq(dev, dev->queue_pairs);
1216 		if (ret != 0) {
1217 			PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!");
1218 			return -1;
1219 		}
1220 	}
1221 	if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1222 		if (rte_intr_disable(eth_dev->intr_handle) < 0) {
1223 			PMD_DRV_LOG(ERR, "interrupt disable failed");
1224 			return -1;
1225 		}
1226 		/*
1227 		 * This function can be called from the interrupt handler, so
1228 		 * we can't unregister interrupt handler here.  Setting
1229 		 * alarm to do that later.
1230 		 */
1231 		rte_eal_alarm_set(1,
1232 			virtio_user_dev_delayed_intr_reconfig_handler,
1233 			(void *)dev);
1234 	}
1235 	PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!");
1236 	return 0;
1237 }
1238