xref: /spdk/lib/vhost/rte_vhost_user.c (revision 295e54d14440954213cc698bad19f73cff3820f0)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/env.h"
10 #include "spdk/likely.h"
11 #include "spdk/string.h"
12 #include "spdk/util.h"
13 #include "spdk/memory.h"
14 #include "spdk/barrier.h"
15 #include "spdk/vhost.h"
16 #include "vhost_internal.h"
17 #include <rte_version.h>
18 
19 #include "spdk_internal/vhost_user.h"
20 
21 /* Path to folder where character device will be created. Can be set by user. */
22 static char g_vhost_user_dev_dirname[PATH_MAX] = "";
23 
24 static struct spdk_thread *g_vhost_user_init_thread;
25 
26 /**
27  * DPDK calls our callbacks synchronously but the work those callbacks
28  * perform needs to be async. Luckily, all DPDK callbacks are called on
29  * a DPDK-internal pthread, so we'll just wait on a semaphore in there.
30  */
31 static sem_t g_dpdk_sem;
32 
33 /** Return code for the current DPDK callback */
34 static int g_dpdk_response;
35 
36 struct vhost_session_fn_ctx {
37 	/** Device pointer obtained before enqueueing the event */
38 	struct spdk_vhost_dev *vdev;
39 
40 	/** ID of the session to send event to. */
41 	uint32_t vsession_id;
42 
43 	/** User provided function to be executed on session's thread. */
44 	spdk_vhost_session_fn cb_fn;
45 
46 	/**
47 	 * User provided function to be called on the init thread
48 	 * after iterating through all sessions.
49 	 */
50 	spdk_vhost_dev_fn cpl_fn;
51 
52 	/** Custom user context */
53 	void *user_ctx;
54 };
55 
56 static struct spdk_vhost_user_dev *
57 to_user_dev(struct spdk_vhost_dev *vdev)
58 {
59 	assert(vdev != NULL);
60 	return vdev->ctxt;
61 }
62 
63 static void
64 __attribute__((constructor))
65 _vhost_user_sem_init(void)
66 {
67 	if (sem_init(&g_dpdk_sem, 0, 0) != 0) {
68 		SPDK_ERRLOG("Failed to initialize semaphore for rte_vhost pthread.\n");
69 		abort();
70 	}
71 }
72 
73 static void
74 __attribute__((destructor))
75 _vhost_user_sem_destroy(void)
76 {
77 	sem_destroy(&g_dpdk_sem);
78 }
79 
80 void *
81 vhost_gpa_to_vva(struct spdk_vhost_session *vsession, uint64_t addr, uint64_t len)
82 {
83 	void *vva;
84 	uint64_t newlen;
85 
86 	newlen = len;
87 	vva = (void *)rte_vhost_va_from_guest_pa(vsession->mem, addr, &newlen);
88 	if (newlen != len) {
89 		return NULL;
90 	}
91 
92 	return vva;
93 
94 }
95 
96 static void
97 vhost_log_req_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *virtqueue,
98 		   uint16_t req_id)
99 {
100 	struct vring_desc *desc, *desc_table;
101 	uint32_t desc_table_size;
102 	int rc;
103 
104 	if (spdk_likely(!vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) {
105 		return;
106 	}
107 
108 	rc = vhost_vq_get_desc(vsession, virtqueue, req_id, &desc, &desc_table, &desc_table_size);
109 	if (spdk_unlikely(rc != 0)) {
110 		SPDK_ERRLOG("Can't log used ring descriptors!\n");
111 		return;
112 	}
113 
114 	do {
115 		if (vhost_vring_desc_is_wr(desc)) {
116 			/* To be honest, only pages realy touched should be logged, but
117 			 * doing so would require tracking those changes in each backed.
118 			 * Also backend most likely will touch all/most of those pages so
119 			 * for lets assume we touched all pages passed to as writeable buffers. */
120 			rte_vhost_log_write(vsession->vid, desc->addr, desc->len);
121 		}
122 		vhost_vring_desc_get_next(&desc, desc_table, desc_table_size);
123 	} while (desc);
124 }
125 
126 static void
127 vhost_log_used_vring_elem(struct spdk_vhost_session *vsession,
128 			  struct spdk_vhost_virtqueue *virtqueue,
129 			  uint16_t idx)
130 {
131 	uint64_t offset, len;
132 
133 	if (spdk_likely(!vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) {
134 		return;
135 	}
136 
137 	if (spdk_unlikely(virtqueue->packed.packed_ring)) {
138 		offset = idx * sizeof(struct vring_packed_desc);
139 		len = sizeof(struct vring_packed_desc);
140 	} else {
141 		offset = offsetof(struct vring_used, ring[idx]);
142 		len = sizeof(virtqueue->vring.used->ring[idx]);
143 	}
144 
145 	rte_vhost_log_used_vring(vsession->vid, virtqueue->vring_idx, offset, len);
146 }
147 
148 static void
149 vhost_log_used_vring_idx(struct spdk_vhost_session *vsession,
150 			 struct spdk_vhost_virtqueue *virtqueue)
151 {
152 	uint64_t offset, len;
153 	uint16_t vq_idx;
154 
155 	if (spdk_likely(!vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) {
156 		return;
157 	}
158 
159 	offset = offsetof(struct vring_used, idx);
160 	len = sizeof(virtqueue->vring.used->idx);
161 	vq_idx = virtqueue - vsession->virtqueue;
162 
163 	rte_vhost_log_used_vring(vsession->vid, vq_idx, offset, len);
164 }
165 
166 /*
167  * Get available requests from avail ring.
168  */
169 uint16_t
170 vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *virtqueue, uint16_t *reqs,
171 			uint16_t reqs_len)
172 {
173 	struct rte_vhost_vring *vring = &virtqueue->vring;
174 	struct vring_avail *avail = vring->avail;
175 	uint16_t size_mask = vring->size - 1;
176 	uint16_t last_idx = virtqueue->last_avail_idx, avail_idx = avail->idx;
177 	uint16_t count, i;
178 	int rc;
179 	uint64_t u64_value;
180 
181 	spdk_smp_rmb();
182 
183 	if (virtqueue->vsession && spdk_unlikely(virtqueue->vsession->interrupt_mode)) {
184 		/* Read to clear vring's kickfd */
185 		rc = read(vring->kickfd, &u64_value, sizeof(u64_value));
186 		if (rc < 0) {
187 			SPDK_ERRLOG("failed to acknowledge kickfd: %s.\n", spdk_strerror(errno));
188 			return -errno;
189 		}
190 	}
191 
192 	count = avail_idx - last_idx;
193 	if (spdk_likely(count == 0)) {
194 		return 0;
195 	}
196 
197 	if (spdk_unlikely(count > vring->size)) {
198 		/* TODO: the queue is unrecoverably broken and should be marked so.
199 		 * For now we will fail silently and report there are no new avail entries.
200 		 */
201 		return 0;
202 	}
203 
204 	count = spdk_min(count, reqs_len);
205 
206 	virtqueue->last_avail_idx += count;
207 	/* Check whether there are unprocessed reqs in vq, then kick vq manually */
208 	if (virtqueue->vsession && spdk_unlikely(virtqueue->vsession->interrupt_mode)) {
209 		/* If avail_idx is larger than virtqueue's last_avail_idx, then there is unprocessed reqs.
210 		 * avail_idx should get updated here from memory, in case of race condition with guest.
211 		 */
212 		avail_idx = * (volatile uint16_t *) &avail->idx;
213 		if (avail_idx > virtqueue->last_avail_idx) {
214 			/* Write to notify vring's kickfd */
215 			rc = write(vring->kickfd, &u64_value, sizeof(u64_value));
216 			if (rc < 0) {
217 				SPDK_ERRLOG("failed to kick vring: %s.\n", spdk_strerror(errno));
218 				return -errno;
219 			}
220 		}
221 	}
222 
223 	for (i = 0; i < count; i++) {
224 		reqs[i] = vring->avail->ring[(last_idx + i) & size_mask];
225 	}
226 
227 	SPDK_DEBUGLOG(vhost_ring,
228 		      "AVAIL: last_idx=%"PRIu16" avail_idx=%"PRIu16" count=%"PRIu16"\n",
229 		      last_idx, avail_idx, count);
230 
231 	return count;
232 }
233 
234 static bool
235 vhost_vring_desc_is_indirect(struct vring_desc *cur_desc)
236 {
237 	return !!(cur_desc->flags & VRING_DESC_F_INDIRECT);
238 }
239 
240 static bool
241 vhost_vring_packed_desc_is_indirect(struct vring_packed_desc *cur_desc)
242 {
243 	return (cur_desc->flags & VRING_DESC_F_INDIRECT) != 0;
244 }
245 
246 static bool
247 vhost_inflight_packed_desc_is_indirect(spdk_vhost_inflight_desc *cur_desc)
248 {
249 	return (cur_desc->flags & VRING_DESC_F_INDIRECT) != 0;
250 }
251 
252 int
253 vhost_vq_get_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *virtqueue,
254 		  uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table,
255 		  uint32_t *desc_table_size)
256 {
257 	if (spdk_unlikely(req_idx >= virtqueue->vring.size)) {
258 		return -1;
259 	}
260 
261 	*desc = &virtqueue->vring.desc[req_idx];
262 
263 	if (vhost_vring_desc_is_indirect(*desc)) {
264 		*desc_table_size = (*desc)->len / sizeof(**desc);
265 		*desc_table = vhost_gpa_to_vva(vsession, (*desc)->addr,
266 					       sizeof(**desc) * *desc_table_size);
267 		*desc = *desc_table;
268 		if (*desc == NULL) {
269 			return -1;
270 		}
271 
272 		return 0;
273 	}
274 
275 	*desc_table = virtqueue->vring.desc;
276 	*desc_table_size = virtqueue->vring.size;
277 
278 	return 0;
279 }
280 
281 static bool
282 vhost_packed_desc_indirect_to_desc_table(struct spdk_vhost_session *vsession,
283 		uint64_t addr, uint32_t len,
284 		struct vring_packed_desc **desc_table,
285 		uint32_t *desc_table_size)
286 {
287 	*desc_table_size = len / sizeof(struct vring_packed_desc);
288 
289 	*desc_table = vhost_gpa_to_vva(vsession, addr, len);
290 	if (spdk_unlikely(*desc_table == NULL)) {
291 		return false;
292 	}
293 
294 	return true;
295 }
296 
297 int
298 vhost_vq_get_desc_packed(struct spdk_vhost_session *vsession,
299 			 struct spdk_vhost_virtqueue *virtqueue,
300 			 uint16_t req_idx, struct vring_packed_desc **desc,
301 			 struct vring_packed_desc **desc_table, uint32_t *desc_table_size)
302 {
303 	*desc =  &virtqueue->vring.desc_packed[req_idx];
304 
305 	/* In packed ring when the desc is non-indirect we get next desc
306 	 * by judging (desc->flag & VRING_DESC_F_NEXT) != 0. When the desc
307 	 * is indirect we get next desc by idx and desc_table_size. It's
308 	 * different from split ring.
309 	 */
310 	if (vhost_vring_packed_desc_is_indirect(*desc)) {
311 		if (!vhost_packed_desc_indirect_to_desc_table(vsession, (*desc)->addr, (*desc)->len,
312 				desc_table, desc_table_size)) {
313 			return -1;
314 		}
315 
316 		*desc = *desc_table;
317 	} else {
318 		*desc_table = NULL;
319 		*desc_table_size  = 0;
320 	}
321 
322 	return 0;
323 }
324 
325 int
326 vhost_inflight_queue_get_desc(struct spdk_vhost_session *vsession,
327 			      spdk_vhost_inflight_desc *desc_array,
328 			      uint16_t req_idx, spdk_vhost_inflight_desc **desc,
329 			      struct vring_packed_desc  **desc_table, uint32_t *desc_table_size)
330 {
331 	*desc = &desc_array[req_idx];
332 
333 	if (vhost_inflight_packed_desc_is_indirect(*desc)) {
334 		if (!vhost_packed_desc_indirect_to_desc_table(vsession, (*desc)->addr, (*desc)->len,
335 				desc_table, desc_table_size)) {
336 			return -1;
337 		}
338 
339 		/* This desc is the inflight desc not the packed desc.
340 		 * When set the F_INDIRECT the table entry should be the packed desc
341 		 * so set the inflight desc NULL.
342 		 */
343 		*desc = NULL;
344 	} else {
345 		/* When not set the F_INDIRECT means there is no packed desc table */
346 		*desc_table = NULL;
347 		*desc_table_size = 0;
348 	}
349 
350 	return 0;
351 }
352 
353 int
354 vhost_vq_used_signal(struct spdk_vhost_session *vsession,
355 		     struct spdk_vhost_virtqueue *virtqueue)
356 {
357 	/* The flag is true when DPDK "vhost-events" thread is holding all
358 	 * VQ's access lock, we will skip to post IRQs this round poll, and
359 	 * try to post IRQs in next poll or after starting the device again.
360 	 */
361 	if (spdk_unlikely(vsession->skip_used_signal)) {
362 		return 0;
363 	}
364 
365 	if (virtqueue->used_req_cnt == 0) {
366 		return 0;
367 	}
368 
369 	SPDK_DEBUGLOG(vhost_ring,
370 		      "Queue %td - USED RING: sending IRQ: last used %"PRIu16"\n",
371 		      virtqueue - vsession->virtqueue, virtqueue->last_used_idx);
372 
373 	if (rte_vhost_vring_call(vsession->vid, virtqueue->vring_idx) == 0) {
374 		/* interrupt signalled */
375 		virtqueue->req_cnt += virtqueue->used_req_cnt;
376 		virtqueue->used_req_cnt = 0;
377 		return 1;
378 	} else {
379 		/* interrupt not signalled */
380 		return 0;
381 	}
382 }
383 
384 static void
385 session_vq_io_stats_update(struct spdk_vhost_session *vsession,
386 			   struct spdk_vhost_virtqueue *virtqueue, uint64_t now)
387 {
388 	uint32_t irq_delay_base = vsession->coalescing_delay_time_base;
389 	uint32_t io_threshold = vsession->coalescing_io_rate_threshold;
390 	int32_t irq_delay;
391 	uint32_t req_cnt;
392 
393 	req_cnt = virtqueue->req_cnt + virtqueue->used_req_cnt;
394 	if (req_cnt <= io_threshold) {
395 		return;
396 	}
397 
398 	irq_delay = (irq_delay_base * (req_cnt - io_threshold)) / io_threshold;
399 	virtqueue->irq_delay_time = (uint32_t) spdk_max(0, irq_delay);
400 
401 	virtqueue->req_cnt = 0;
402 	virtqueue->next_event_time = now;
403 }
404 
405 static void
406 check_session_vq_io_stats(struct spdk_vhost_session *vsession,
407 			  struct spdk_vhost_virtqueue *virtqueue, uint64_t now)
408 {
409 	if (now < vsession->next_stats_check_time) {
410 		return;
411 	}
412 
413 	vsession->next_stats_check_time = now + vsession->stats_check_interval;
414 	session_vq_io_stats_update(vsession, virtqueue, now);
415 }
416 
417 static inline bool
418 vhost_vq_event_is_suppressed(struct spdk_vhost_virtqueue *vq)
419 {
420 	if (spdk_unlikely(vq->packed.packed_ring)) {
421 		if (vq->vring.driver_event->flags & VRING_PACKED_EVENT_FLAG_DISABLE) {
422 			return true;
423 		}
424 	} else {
425 		if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
426 			return true;
427 		}
428 	}
429 
430 	return false;
431 }
432 
433 void
434 vhost_session_vq_used_signal(struct spdk_vhost_virtqueue *virtqueue)
435 {
436 	struct spdk_vhost_session *vsession = virtqueue->vsession;
437 	uint64_t now;
438 
439 	if (vsession->coalescing_delay_time_base == 0) {
440 		if (virtqueue->vring.desc == NULL) {
441 			return;
442 		}
443 
444 		if (vhost_vq_event_is_suppressed(virtqueue)) {
445 			return;
446 		}
447 
448 		vhost_vq_used_signal(vsession, virtqueue);
449 	} else {
450 		now = spdk_get_ticks();
451 		check_session_vq_io_stats(vsession, virtqueue, now);
452 
453 		/* No need for event right now */
454 		if (now < virtqueue->next_event_time) {
455 			return;
456 		}
457 
458 		if (vhost_vq_event_is_suppressed(virtqueue)) {
459 			return;
460 		}
461 
462 		if (!vhost_vq_used_signal(vsession, virtqueue)) {
463 			return;
464 		}
465 
466 		/* Syscall is quite long so update time */
467 		now = spdk_get_ticks();
468 		virtqueue->next_event_time = now + virtqueue->irq_delay_time;
469 	}
470 }
471 
472 void
473 vhost_session_used_signal(struct spdk_vhost_session *vsession)
474 {
475 	struct spdk_vhost_virtqueue *virtqueue;
476 	uint16_t q_idx;
477 
478 	for (q_idx = 0; q_idx < vsession->max_queues; q_idx++) {
479 		virtqueue = &vsession->virtqueue[q_idx];
480 		vhost_session_vq_used_signal(virtqueue);
481 	}
482 }
483 
484 /*
485  * Enqueue id and len to used ring.
486  */
487 void
488 vhost_vq_used_ring_enqueue(struct spdk_vhost_session *vsession,
489 			   struct spdk_vhost_virtqueue *virtqueue,
490 			   uint16_t id, uint32_t len)
491 {
492 	struct rte_vhost_vring *vring = &virtqueue->vring;
493 	struct vring_used *used = vring->used;
494 	uint16_t last_idx = virtqueue->last_used_idx & (vring->size - 1);
495 	uint16_t vq_idx = virtqueue->vring_idx;
496 
497 	SPDK_DEBUGLOG(vhost_ring,
498 		      "Queue %td - USED RING: last_idx=%"PRIu16" req id=%"PRIu16" len=%"PRIu32"\n",
499 		      virtqueue - vsession->virtqueue, virtqueue->last_used_idx, id, len);
500 
501 	vhost_log_req_desc(vsession, virtqueue, id);
502 
503 	virtqueue->last_used_idx++;
504 	used->ring[last_idx].id = id;
505 	used->ring[last_idx].len = len;
506 
507 	/* Ensure the used ring is updated before we log it or increment used->idx. */
508 	spdk_smp_wmb();
509 
510 	rte_vhost_set_last_inflight_io_split(vsession->vid, vq_idx, id);
511 
512 	vhost_log_used_vring_elem(vsession, virtqueue, last_idx);
513 	* (volatile uint16_t *) &used->idx = virtqueue->last_used_idx;
514 	vhost_log_used_vring_idx(vsession, virtqueue);
515 
516 	rte_vhost_clr_inflight_desc_split(vsession->vid, vq_idx, virtqueue->last_used_idx, id);
517 
518 	virtqueue->used_req_cnt++;
519 
520 	if (vsession->interrupt_mode) {
521 		if (virtqueue->vring.desc == NULL || vhost_vq_event_is_suppressed(virtqueue)) {
522 			return;
523 		}
524 
525 		vhost_vq_used_signal(vsession, virtqueue);
526 	}
527 }
528 
529 void
530 vhost_vq_packed_ring_enqueue(struct spdk_vhost_session *vsession,
531 			     struct spdk_vhost_virtqueue *virtqueue,
532 			     uint16_t num_descs, uint16_t buffer_id,
533 			     uint32_t length, uint16_t inflight_head)
534 {
535 	struct vring_packed_desc *desc = &virtqueue->vring.desc_packed[virtqueue->last_used_idx];
536 	bool used, avail;
537 
538 	SPDK_DEBUGLOG(vhost_ring,
539 		      "Queue %td - RING: buffer_id=%"PRIu16"\n",
540 		      virtqueue - vsession->virtqueue, buffer_id);
541 
542 	/* When the descriptor is used, two flags in descriptor
543 	 * avail flag and used flag are set to equal
544 	 * and used flag value == used_wrap_counter.
545 	 */
546 	used = !!(desc->flags & VRING_DESC_F_USED);
547 	avail = !!(desc->flags & VRING_DESC_F_AVAIL);
548 	if (spdk_unlikely(used == virtqueue->packed.used_phase && used == avail)) {
549 		SPDK_ERRLOG("descriptor has been used before\n");
550 		return;
551 	}
552 
553 	/* In used desc addr is unused and len specifies the buffer length
554 	 * that has been written to by the device.
555 	 */
556 	desc->addr = 0;
557 	desc->len = length;
558 
559 	/* This bit specifies whether any data has been written by the device */
560 	if (length != 0) {
561 		desc->flags |= VRING_DESC_F_WRITE;
562 	}
563 
564 	/* Buffer ID is included in the last descriptor in the list.
565 	 * The driver needs to keep track of the size of the list corresponding
566 	 * to each buffer ID.
567 	 */
568 	desc->id = buffer_id;
569 
570 	/* A device MUST NOT make the descriptor used before buffer_id is
571 	 * written to the descriptor.
572 	 */
573 	spdk_smp_wmb();
574 
575 	rte_vhost_set_last_inflight_io_packed(vsession->vid, virtqueue->vring_idx, inflight_head);
576 	/* To mark a desc as used, the device sets the F_USED bit in flags to match
577 	 * the internal Device ring wrap counter. It also sets the F_AVAIL bit to
578 	 * match the same value.
579 	 */
580 	if (virtqueue->packed.used_phase) {
581 		desc->flags |= VRING_DESC_F_AVAIL_USED;
582 	} else {
583 		desc->flags &= ~VRING_DESC_F_AVAIL_USED;
584 	}
585 	rte_vhost_clr_inflight_desc_packed(vsession->vid, virtqueue->vring_idx, inflight_head);
586 
587 	vhost_log_used_vring_elem(vsession, virtqueue, virtqueue->last_used_idx);
588 	virtqueue->last_used_idx += num_descs;
589 	if (virtqueue->last_used_idx >= virtqueue->vring.size) {
590 		virtqueue->last_used_idx -= virtqueue->vring.size;
591 		virtqueue->packed.used_phase = !virtqueue->packed.used_phase;
592 	}
593 
594 	virtqueue->used_req_cnt++;
595 }
596 
597 bool
598 vhost_vq_packed_ring_is_avail(struct spdk_vhost_virtqueue *virtqueue)
599 {
600 	uint16_t flags = virtqueue->vring.desc_packed[virtqueue->last_avail_idx].flags;
601 
602 	/* To mark a desc as available, the driver sets the F_AVAIL bit in flags
603 	 * to match the internal avail wrap counter. It also sets the F_USED bit to
604 	 * match the inverse value but it's not mandatory.
605 	 */
606 	return (!!(flags & VRING_DESC_F_AVAIL) == virtqueue->packed.avail_phase);
607 }
608 
609 bool
610 vhost_vring_packed_desc_is_wr(struct vring_packed_desc *cur_desc)
611 {
612 	return (cur_desc->flags & VRING_DESC_F_WRITE) != 0;
613 }
614 
615 bool
616 vhost_vring_inflight_desc_is_wr(spdk_vhost_inflight_desc *cur_desc)
617 {
618 	return (cur_desc->flags & VRING_DESC_F_WRITE) != 0;
619 }
620 
621 int
622 vhost_vring_packed_desc_get_next(struct vring_packed_desc **desc, uint16_t *req_idx,
623 				 struct spdk_vhost_virtqueue *vq,
624 				 struct vring_packed_desc *desc_table,
625 				 uint32_t desc_table_size)
626 {
627 	if (desc_table != NULL) {
628 		/* When the desc_table isn't NULL means it's indirect and we get the next
629 		 * desc by req_idx and desc_table_size. The return value is NULL means
630 		 * we reach the last desc of this request.
631 		 */
632 		(*req_idx)++;
633 		if (*req_idx < desc_table_size) {
634 			*desc = &desc_table[*req_idx];
635 		} else {
636 			*desc = NULL;
637 		}
638 	} else {
639 		/* When the desc_table is NULL means it's non-indirect and we get the next
640 		 * desc by req_idx and F_NEXT in flags. The return value is NULL means
641 		 * we reach the last desc of this request. When return new desc
642 		 * we update the req_idx too.
643 		 */
644 		if (((*desc)->flags & VRING_DESC_F_NEXT) == 0) {
645 			*desc = NULL;
646 			return 0;
647 		}
648 
649 		*req_idx = (*req_idx + 1) % vq->vring.size;
650 		*desc = &vq->vring.desc_packed[*req_idx];
651 	}
652 
653 	return 0;
654 }
655 
656 static int
657 vhost_vring_desc_payload_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
658 				uint16_t *iov_index, uintptr_t payload, uint64_t remaining)
659 {
660 	uintptr_t vva;
661 	uint64_t len;
662 
663 	do {
664 		if (*iov_index >= SPDK_VHOST_IOVS_MAX) {
665 			SPDK_ERRLOG("SPDK_VHOST_IOVS_MAX(%d) reached\n", SPDK_VHOST_IOVS_MAX);
666 			return -1;
667 		}
668 		len = remaining;
669 		vva = (uintptr_t)rte_vhost_va_from_guest_pa(vsession->mem, payload, &len);
670 		if (vva == 0 || len == 0) {
671 			SPDK_ERRLOG("gpa_to_vva(%p) == NULL\n", (void *)payload);
672 			return -1;
673 		}
674 		iov[*iov_index].iov_base = (void *)vva;
675 		iov[*iov_index].iov_len = len;
676 		remaining -= len;
677 		payload += len;
678 		(*iov_index)++;
679 	} while (remaining);
680 
681 	return 0;
682 }
683 
684 int
685 vhost_vring_packed_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
686 			       uint16_t *iov_index, const struct vring_packed_desc *desc)
687 {
688 	return vhost_vring_desc_payload_to_iov(vsession, iov, iov_index,
689 					       desc->addr, desc->len);
690 }
691 
692 int
693 vhost_vring_inflight_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
694 				 uint16_t *iov_index, const spdk_vhost_inflight_desc *desc)
695 {
696 	return vhost_vring_desc_payload_to_iov(vsession, iov, iov_index,
697 					       desc->addr, desc->len);
698 }
699 
700 /* 1, Traverse the desc chain to get the buffer_id and return buffer_id as task_idx.
701  * 2, Update the vq->last_avail_idx to point next available desc chain.
702  * 3, Update the avail_wrap_counter if last_avail_idx overturn.
703  */
704 uint16_t
705 vhost_vring_packed_desc_get_buffer_id(struct spdk_vhost_virtqueue *vq, uint16_t req_idx,
706 				      uint16_t *num_descs)
707 {
708 	struct vring_packed_desc *desc;
709 	uint16_t desc_head = req_idx;
710 
711 	*num_descs = 1;
712 
713 	desc =  &vq->vring.desc_packed[req_idx];
714 	if (!vhost_vring_packed_desc_is_indirect(desc)) {
715 		while ((desc->flags & VRING_DESC_F_NEXT) != 0) {
716 			req_idx = (req_idx + 1) % vq->vring.size;
717 			desc = &vq->vring.desc_packed[req_idx];
718 			(*num_descs)++;
719 		}
720 	}
721 
722 	/* Queue Size doesn't have to be a power of 2
723 	 * Device maintains last_avail_idx so we can make sure
724 	 * the value is valid(0 ~ vring.size - 1)
725 	 */
726 	vq->last_avail_idx = (req_idx + 1) % vq->vring.size;
727 	if (vq->last_avail_idx < desc_head) {
728 		vq->packed.avail_phase = !vq->packed.avail_phase;
729 	}
730 
731 	return desc->id;
732 }
733 
734 int
735 vhost_vring_desc_get_next(struct vring_desc **desc,
736 			  struct vring_desc *desc_table, uint32_t desc_table_size)
737 {
738 	struct vring_desc *old_desc = *desc;
739 	uint16_t next_idx;
740 
741 	if ((old_desc->flags & VRING_DESC_F_NEXT) == 0) {
742 		*desc = NULL;
743 		return 0;
744 	}
745 
746 	next_idx = old_desc->next;
747 	if (spdk_unlikely(next_idx >= desc_table_size)) {
748 		*desc = NULL;
749 		return -1;
750 	}
751 
752 	*desc = &desc_table[next_idx];
753 	return 0;
754 }
755 
756 int
757 vhost_vring_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
758 			uint16_t *iov_index, const struct vring_desc *desc)
759 {
760 	return vhost_vring_desc_payload_to_iov(vsession, iov, iov_index,
761 					       desc->addr, desc->len);
762 }
763 
764 static inline void
765 vhost_session_mem_region_calc(uint64_t *previous_start, uint64_t *start, uint64_t *end,
766 			      uint64_t *len, struct rte_vhost_mem_region *region)
767 {
768 	*start = FLOOR_2MB(region->mmap_addr);
769 	*end = CEIL_2MB(region->mmap_addr + region->mmap_size);
770 	if (*start == *previous_start) {
771 		*start += (size_t) VALUE_2MB;
772 	}
773 	*previous_start = *start;
774 	*len = *end - *start;
775 }
776 
777 void
778 vhost_session_mem_register(struct rte_vhost_memory *mem)
779 {
780 	uint64_t start, end, len;
781 	uint32_t i;
782 	uint64_t previous_start = UINT64_MAX;
783 
784 
785 	for (i = 0; i < mem->nregions; i++) {
786 		vhost_session_mem_region_calc(&previous_start, &start, &end, &len, &mem->regions[i]);
787 		SPDK_INFOLOG(vhost, "Registering VM memory for vtophys translation - 0x%jx len:0x%jx\n",
788 			     start, len);
789 
790 		if (spdk_mem_register((void *)start, len) != 0) {
791 			SPDK_WARNLOG("Failed to register memory region %"PRIu32". Future vtophys translation might fail.\n",
792 				     i);
793 			continue;
794 		}
795 	}
796 }
797 
798 void
799 vhost_session_mem_unregister(struct rte_vhost_memory *mem)
800 {
801 	uint64_t start, end, len;
802 	uint32_t i;
803 	uint64_t previous_start = UINT64_MAX;
804 
805 	for (i = 0; i < mem->nregions; i++) {
806 		vhost_session_mem_region_calc(&previous_start, &start, &end, &len, &mem->regions[i]);
807 		if (spdk_vtophys((void *) start, NULL) == SPDK_VTOPHYS_ERROR) {
808 			continue; /* region has not been registered */
809 		}
810 
811 		if (spdk_mem_unregister((void *)start, len) != 0) {
812 			assert(false);
813 		}
814 	}
815 }
816 
817 static bool
818 vhost_memory_changed(struct rte_vhost_memory *new,
819 		     struct rte_vhost_memory *old)
820 {
821 	uint32_t i;
822 
823 	if (new->nregions != old->nregions) {
824 		return true;
825 	}
826 
827 	for (i = 0; i < new->nregions; ++i) {
828 		struct rte_vhost_mem_region *new_r = &new->regions[i];
829 		struct rte_vhost_mem_region *old_r = &old->regions[i];
830 
831 		if (new_r->guest_phys_addr != old_r->guest_phys_addr) {
832 			return true;
833 		}
834 		if (new_r->size != old_r->size) {
835 			return true;
836 		}
837 		if (new_r->guest_user_addr != old_r->guest_user_addr) {
838 			return true;
839 		}
840 		if (new_r->mmap_addr != old_r->mmap_addr) {
841 			return true;
842 		}
843 		if (new_r->fd != old_r->fd) {
844 			return true;
845 		}
846 	}
847 
848 	return false;
849 }
850 
851 static int
852 vhost_register_memtable_if_required(struct spdk_vhost_session *vsession, int vid)
853 {
854 	struct rte_vhost_memory *new_mem;
855 
856 	if (vhost_get_mem_table(vid, &new_mem) != 0) {
857 		SPDK_ERRLOG("vhost device %d: Failed to get guest memory table\n", vid);
858 		return -1;
859 	}
860 
861 	if (vsession->mem == NULL) {
862 		SPDK_INFOLOG(vhost, "Start to set memtable\n");
863 		vsession->mem = new_mem;
864 		vhost_session_mem_register(vsession->mem);
865 		return 0;
866 	}
867 
868 	if (vhost_memory_changed(new_mem, vsession->mem)) {
869 		SPDK_INFOLOG(vhost, "Memtable is changed\n");
870 		vhost_session_mem_unregister(vsession->mem);
871 		free(vsession->mem);
872 
873 		vsession->mem = new_mem;
874 		vhost_session_mem_register(vsession->mem);
875 		return 0;
876 
877 	}
878 
879 	SPDK_INFOLOG(vhost, "Memtable is unchanged\n");
880 	free(new_mem);
881 	return 0;
882 }
883 
884 static int
885 _stop_session(struct spdk_vhost_session *vsession)
886 {
887 	struct spdk_vhost_dev *vdev = vsession->vdev;
888 	struct spdk_vhost_user_dev *user_vdev = to_user_dev(vdev);
889 	struct spdk_vhost_virtqueue *q;
890 	int rc;
891 	uint16_t i;
892 
893 	rc = user_vdev->user_backend->stop_session(vsession);
894 	if (rc != 0) {
895 		SPDK_ERRLOG("Couldn't stop device with vid %d.\n", vsession->vid);
896 		return rc;
897 	}
898 
899 	for (i = 0; i < vsession->max_queues; i++) {
900 		q = &vsession->virtqueue[i];
901 
902 		/* vring.desc and vring.desc_packed are in a union struct
903 		 * so q->vring.desc can replace q->vring.desc_packed.
904 		 */
905 		if (q->vring.desc == NULL) {
906 			continue;
907 		}
908 
909 		/* Packed virtqueues support up to 2^15 entries each
910 		 * so left one bit can be used as wrap counter.
911 		 */
912 		if (q->packed.packed_ring) {
913 			q->last_avail_idx = q->last_avail_idx |
914 					    ((uint16_t)q->packed.avail_phase << 15);
915 			q->last_used_idx = q->last_used_idx |
916 					   ((uint16_t)q->packed.used_phase << 15);
917 		}
918 
919 		rte_vhost_set_vring_base(vsession->vid, i, q->last_avail_idx, q->last_used_idx);
920 	}
921 
922 	return 0;
923 }
924 
925 static int
926 new_connection(int vid)
927 {
928 	struct spdk_vhost_dev *vdev;
929 	struct spdk_vhost_user_dev *user_dev;
930 	struct spdk_vhost_session *vsession;
931 	size_t dev_dirname_len;
932 	char ifname[PATH_MAX];
933 	char *ctrlr_name;
934 
935 	if (rte_vhost_get_ifname(vid, ifname, PATH_MAX) < 0) {
936 		SPDK_ERRLOG("Couldn't get a valid ifname for device with vid %d\n", vid);
937 		return -1;
938 	}
939 
940 	spdk_vhost_lock();
941 
942 	ctrlr_name = &ifname[0];
943 	dev_dirname_len = strlen(g_vhost_user_dev_dirname);
944 	if (strncmp(ctrlr_name, g_vhost_user_dev_dirname, dev_dirname_len) == 0) {
945 		ctrlr_name += dev_dirname_len;
946 	}
947 
948 	vdev = spdk_vhost_dev_find(ctrlr_name);
949 	if (vdev == NULL) {
950 		SPDK_ERRLOG("Couldn't find device with vid %d to create connection for.\n", vid);
951 		spdk_vhost_unlock();
952 		return -1;
953 	}
954 	user_dev = to_user_dev(vdev);
955 
956 	/* We expect sessions inside user_dev->vsessions to be sorted in ascending
957 	 * order in regard of vsession->id. For now we always set id = vsessions_cnt++
958 	 * and append each session to the very end of the vsessions list.
959 	 * This is required for vhost_user_dev_foreach_session() to work.
960 	 */
961 	if (user_dev->vsessions_num == UINT_MAX) {
962 		assert(false);
963 		return -EINVAL;
964 	}
965 
966 	if (posix_memalign((void **)&vsession, SPDK_CACHE_LINE_SIZE, sizeof(*vsession) +
967 			   user_dev->user_backend->session_ctx_size)) {
968 		SPDK_ERRLOG("vsession alloc failed\n");
969 		spdk_vhost_unlock();
970 		return -1;
971 	}
972 	memset(vsession, 0, sizeof(*vsession) + user_dev->user_backend->session_ctx_size);
973 
974 	vsession->vdev = vdev;
975 	vsession->vid = vid;
976 	vsession->id = user_dev->vsessions_num++;
977 	vsession->name = spdk_sprintf_alloc("%ss%u", vdev->name, vsession->vid);
978 	if (vsession->name == NULL) {
979 		SPDK_ERRLOG("vsession alloc failed\n");
980 		spdk_vhost_unlock();
981 		free(vsession);
982 		return -1;
983 	}
984 	vsession->started = false;
985 	vsession->initialized = false;
986 	vsession->next_stats_check_time = 0;
987 	vsession->stats_check_interval = SPDK_VHOST_STATS_CHECK_INTERVAL_MS *
988 					 spdk_get_ticks_hz() / 1000UL;
989 	TAILQ_INSERT_TAIL(&user_dev->vsessions, vsession, tailq);
990 
991 	vhost_session_install_rte_compat_hooks(vsession);
992 	spdk_vhost_unlock();
993 	return 0;
994 }
995 
996 static int
997 vhost_user_session_start_cb(struct spdk_vhost_dev *vdev,
998 			    struct spdk_vhost_session *vsession, void *unused)
999 {
1000 	const struct spdk_vhost_user_dev_backend *backend;
1001 	int rc;
1002 
1003 	backend = to_user_dev(vdev)->user_backend;
1004 	rc = backend->start_session(vdev, vsession, NULL);
1005 	vhost_user_session_start_done(vsession, rc);
1006 	return rc;
1007 }
1008 
1009 static int
1010 vhost_user_session_start(struct spdk_vhost_dev *vdev, struct spdk_vhost_session *vsession)
1011 {
1012 	return vhost_user_session_send_event(vsession, vhost_user_session_start_cb, 3, "start session");
1013 }
1014 
1015 static int
1016 start_device(int vid)
1017 {
1018 	struct spdk_vhost_dev *vdev;
1019 	struct spdk_vhost_session *vsession;
1020 	int rc = -1;
1021 	uint16_t i;
1022 	bool packed_ring;
1023 
1024 	spdk_vhost_lock();
1025 
1026 	vsession = vhost_session_find_by_vid(vid);
1027 	if (vsession == NULL) {
1028 		SPDK_ERRLOG("Couldn't find session with vid %d.\n", vid);
1029 		goto out;
1030 	}
1031 
1032 	vdev = vsession->vdev;
1033 	if (vsession->started) {
1034 		/* already started, nothing to do */
1035 		rc = 0;
1036 		goto out;
1037 	}
1038 
1039 	if (vhost_get_negotiated_features(vid, &vsession->negotiated_features) != 0) {
1040 		SPDK_ERRLOG("vhost device %d: Failed to get negotiated driver features\n", vid);
1041 		goto out;
1042 	}
1043 
1044 	packed_ring = ((vsession->negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) != 0);
1045 
1046 	vsession->max_queues = 0;
1047 	memset(vsession->virtqueue, 0, sizeof(vsession->virtqueue));
1048 	for (i = 0; i < SPDK_VHOST_MAX_VQUEUES; i++) {
1049 		struct spdk_vhost_virtqueue *q = &vsession->virtqueue[i];
1050 
1051 		q->vsession = vsession;
1052 		q->vring_idx = -1;
1053 		if (rte_vhost_get_vhost_vring(vid, i, &q->vring)) {
1054 			continue;
1055 		}
1056 		q->vring_idx = i;
1057 		rte_vhost_get_vhost_ring_inflight(vid, i, &q->vring_inflight);
1058 
1059 		/* vring.desc and vring.desc_packed are in a union struct
1060 		 * so q->vring.desc can replace q->vring.desc_packed.
1061 		 */
1062 		if (q->vring.desc == NULL || q->vring.size == 0) {
1063 			continue;
1064 		}
1065 
1066 		if (rte_vhost_get_vring_base(vsession->vid, i, &q->last_avail_idx, &q->last_used_idx)) {
1067 			q->vring.desc = NULL;
1068 			continue;
1069 		}
1070 
1071 		if (packed_ring) {
1072 			/* Use the inflight mem to restore the last_avail_idx and last_used_idx.
1073 			 * When the vring format is packed, there is no used_idx in the
1074 			 * used ring, so VM can't resend the used_idx to VHOST when reconnect.
1075 			 * QEMU version 5.2.0 supports the packed inflight before that it only
1076 			 * supports split ring inflight because it doesn't send negotiated features
1077 			 * before get inflight fd. Users can use RPC to enable this function.
1078 			 */
1079 			if (spdk_unlikely(vdev->packed_ring_recovery)) {
1080 				rte_vhost_get_vring_base_from_inflight(vsession->vid, i,
1081 								       &q->last_avail_idx,
1082 								       &q->last_used_idx);
1083 			}
1084 
1085 			/* Packed virtqueues support up to 2^15 entries each
1086 			 * so left one bit can be used as wrap counter.
1087 			 */
1088 			q->packed.avail_phase = q->last_avail_idx >> 15;
1089 			q->last_avail_idx = q->last_avail_idx & 0x7FFF;
1090 			q->packed.used_phase = q->last_used_idx >> 15;
1091 			q->last_used_idx = q->last_used_idx & 0x7FFF;
1092 
1093 			if (!vsession->interrupt_mode) {
1094 				/* Disable I/O submission notifications, we'll be polling. */
1095 				q->vring.device_event->flags = VRING_PACKED_EVENT_FLAG_DISABLE;
1096 			}
1097 		} else {
1098 			if (!vsession->interrupt_mode) {
1099 				/* Disable I/O submission notifications, we'll be polling. */
1100 				q->vring.used->flags = VRING_USED_F_NO_NOTIFY;
1101 			}
1102 		}
1103 
1104 		q->packed.packed_ring = packed_ring;
1105 		vsession->max_queues = i + 1;
1106 	}
1107 
1108 	if (!vsession->mem) {
1109 		SPDK_ERRLOG("Session %s doesn't set memory table yet\n", vsession->name);
1110 		goto out;
1111 	}
1112 
1113 	/*
1114 	 * Not sure right now but this look like some kind of QEMU bug and guest IO
1115 	 * might be frozed without kicking all queues after live-migration. This look like
1116 	 * the previous vhost instance failed to effectively deliver all interrupts before
1117 	 * the GET_VRING_BASE message. This shouldn't harm guest since spurious interrupts
1118 	 * should be ignored by guest virtio driver.
1119 	 *
1120 	 * Tested on QEMU 2.10.91 and 2.11.50.
1121 	 */
1122 	for (i = 0; i < vsession->max_queues; i++) {
1123 		struct spdk_vhost_virtqueue *q = &vsession->virtqueue[i];
1124 
1125 		/* vring.desc and vring.desc_packed are in a union struct
1126 		 * so q->vring.desc can replace q->vring.desc_packed.
1127 		 */
1128 		if (q->vring.desc != NULL && q->vring.size > 0) {
1129 			/* Make sure a successful call of
1130 			 * `rte_vhost_vring_call` will happen
1131 			 * after starting the device.
1132 			 */
1133 			q->used_req_cnt += 1;
1134 		}
1135 	}
1136 
1137 	vhost_user_session_set_coalescing(vdev, vsession, NULL);
1138 	vsession->initialized = true;
1139 	rc = vhost_user_session_start(vdev, vsession);
1140 	if (rc != 0) {
1141 		goto out;
1142 	}
1143 
1144 out:
1145 	spdk_vhost_unlock();
1146 	return rc;
1147 }
1148 
1149 static void
1150 stop_device(int vid)
1151 {
1152 	struct spdk_vhost_session *vsession;
1153 
1154 	spdk_vhost_lock();
1155 	vsession = vhost_session_find_by_vid(vid);
1156 	if (vsession == NULL) {
1157 		SPDK_ERRLOG("Couldn't find session with vid %d.\n", vid);
1158 		spdk_vhost_unlock();
1159 		return;
1160 	}
1161 
1162 	if (!vsession->started) {
1163 		/* already stopped, nothing to do */
1164 		spdk_vhost_unlock();
1165 		return;
1166 	}
1167 
1168 	_stop_session(vsession);
1169 	spdk_vhost_unlock();
1170 
1171 	return;
1172 }
1173 
1174 static void
1175 destroy_connection(int vid)
1176 {
1177 	struct spdk_vhost_session *vsession;
1178 
1179 	spdk_vhost_lock();
1180 	vsession = vhost_session_find_by_vid(vid);
1181 	if (vsession == NULL) {
1182 		SPDK_ERRLOG("Couldn't find session with vid %d.\n", vid);
1183 		spdk_vhost_unlock();
1184 		return;
1185 	}
1186 
1187 	if (vsession->started) {
1188 		if (_stop_session(vsession) != 0) {
1189 			spdk_vhost_unlock();
1190 			return;
1191 		}
1192 	}
1193 
1194 	if (vsession->mem) {
1195 		vhost_session_mem_unregister(vsession->mem);
1196 		free(vsession->mem);
1197 	}
1198 
1199 	TAILQ_REMOVE(&to_user_dev(vsession->vdev)->vsessions, vsession, tailq);
1200 	free(vsession->name);
1201 	free(vsession);
1202 	spdk_vhost_unlock();
1203 }
1204 
1205 #if RTE_VERSION >= RTE_VERSION_NUM(21, 11, 0, 0)
1206 static const struct rte_vhost_device_ops g_spdk_vhost_ops = {
1207 #else
1208 static const struct vhost_device_ops g_spdk_vhost_ops = {
1209 #endif
1210 	.new_device =  start_device,
1211 	.destroy_device = stop_device,
1212 	.new_connection = new_connection,
1213 	.destroy_connection = destroy_connection,
1214 };
1215 
1216 static struct spdk_vhost_session *
1217 vhost_session_find_by_id(struct spdk_vhost_dev *vdev, unsigned id)
1218 {
1219 	struct spdk_vhost_session *vsession;
1220 
1221 	TAILQ_FOREACH(vsession, &to_user_dev(vdev)->vsessions, tailq) {
1222 		if (vsession->id == id) {
1223 			return vsession;
1224 		}
1225 	}
1226 
1227 	return NULL;
1228 }
1229 
1230 struct spdk_vhost_session *
1231 vhost_session_find_by_vid(int vid)
1232 {
1233 	struct spdk_vhost_dev *vdev;
1234 	struct spdk_vhost_session *vsession;
1235 
1236 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
1237 	     vdev = spdk_vhost_dev_next(vdev)) {
1238 		TAILQ_FOREACH(vsession, &to_user_dev(vdev)->vsessions, tailq) {
1239 			if (vsession->vid == vid) {
1240 				return vsession;
1241 			}
1242 		}
1243 	}
1244 
1245 	return NULL;
1246 }
1247 
1248 static void
1249 wait_for_semaphore(int timeout_sec, const char *errmsg)
1250 {
1251 	struct timespec timeout;
1252 	int rc;
1253 
1254 	clock_gettime(CLOCK_REALTIME, &timeout);
1255 	timeout.tv_sec += timeout_sec;
1256 	rc = sem_timedwait(&g_dpdk_sem, &timeout);
1257 	if (rc != 0) {
1258 		SPDK_ERRLOG("Timeout waiting for event: %s.\n", errmsg);
1259 		sem_wait(&g_dpdk_sem);
1260 	}
1261 }
1262 
1263 static void
1264 vhost_session_cb_done(int rc)
1265 {
1266 	g_dpdk_response = rc;
1267 	sem_post(&g_dpdk_sem);
1268 }
1269 
1270 void
1271 vhost_user_session_start_done(struct spdk_vhost_session *vsession, int response)
1272 {
1273 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vsession->vdev);
1274 	if (response == 0) {
1275 		vsession->started = true;
1276 
1277 		assert(user_dev->active_session_num < UINT32_MAX);
1278 		user_dev->active_session_num++;
1279 	}
1280 
1281 	vhost_session_cb_done(response);
1282 }
1283 
1284 void
1285 vhost_user_session_stop_done(struct spdk_vhost_session *vsession, int response)
1286 {
1287 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vsession->vdev);
1288 
1289 	if (response == 0) {
1290 		vsession->started = false;
1291 
1292 		assert(user_dev->active_session_num > 0);
1293 		user_dev->active_session_num--;
1294 	}
1295 
1296 	vhost_session_cb_done(response);
1297 }
1298 
1299 static void
1300 vhost_event_cb(void *arg1)
1301 {
1302 	struct vhost_session_fn_ctx *ctx = arg1;
1303 	struct spdk_vhost_session *vsession;
1304 
1305 	if (spdk_vhost_trylock() != 0) {
1306 		spdk_thread_send_msg(spdk_get_thread(), vhost_event_cb, arg1);
1307 		return;
1308 	}
1309 
1310 	vsession = vhost_session_find_by_id(ctx->vdev, ctx->vsession_id);
1311 	ctx->cb_fn(ctx->vdev, vsession, NULL);
1312 	spdk_vhost_unlock();
1313 }
1314 
1315 int
1316 vhost_user_session_send_event(struct spdk_vhost_session *vsession,
1317 			      spdk_vhost_session_fn cb_fn, unsigned timeout_sec,
1318 			      const char *errmsg)
1319 {
1320 	struct vhost_session_fn_ctx ev_ctx = {0};
1321 	struct spdk_vhost_dev *vdev = vsession->vdev;
1322 
1323 	ev_ctx.vdev = vdev;
1324 	ev_ctx.vsession_id = vsession->id;
1325 	ev_ctx.cb_fn = cb_fn;
1326 
1327 	spdk_thread_send_msg(vdev->thread, vhost_event_cb, &ev_ctx);
1328 
1329 	spdk_vhost_unlock();
1330 	wait_for_semaphore(timeout_sec, errmsg);
1331 	spdk_vhost_lock();
1332 
1333 	return g_dpdk_response;
1334 }
1335 
1336 static void
1337 foreach_session_finish_cb(void *arg1)
1338 {
1339 	struct vhost_session_fn_ctx *ev_ctx = arg1;
1340 	struct spdk_vhost_dev *vdev = ev_ctx->vdev;
1341 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
1342 
1343 	if (spdk_vhost_trylock() != 0) {
1344 		spdk_thread_send_msg(spdk_get_thread(),
1345 				     foreach_session_finish_cb, arg1);
1346 		return;
1347 	}
1348 
1349 	assert(user_dev->pending_async_op_num > 0);
1350 	user_dev->pending_async_op_num--;
1351 	if (ev_ctx->cpl_fn != NULL) {
1352 		ev_ctx->cpl_fn(vdev, ev_ctx->user_ctx);
1353 	}
1354 
1355 	spdk_vhost_unlock();
1356 	free(ev_ctx);
1357 }
1358 
1359 static void
1360 foreach_session(void *arg1)
1361 {
1362 	struct vhost_session_fn_ctx *ev_ctx = arg1;
1363 	struct spdk_vhost_session *vsession;
1364 	struct spdk_vhost_dev *vdev = ev_ctx->vdev;
1365 	int rc;
1366 
1367 	if (spdk_vhost_trylock() != 0) {
1368 		spdk_thread_send_msg(spdk_get_thread(), foreach_session, arg1);
1369 		return;
1370 	}
1371 
1372 	TAILQ_FOREACH(vsession, &to_user_dev(vdev)->vsessions, tailq) {
1373 		if (vsession->initialized) {
1374 			rc = ev_ctx->cb_fn(vdev, vsession, ev_ctx->user_ctx);
1375 			if (rc < 0) {
1376 				goto out;
1377 			}
1378 		}
1379 	}
1380 
1381 out:
1382 	spdk_vhost_unlock();
1383 
1384 	spdk_thread_send_msg(g_vhost_user_init_thread, foreach_session_finish_cb, arg1);
1385 }
1386 
1387 void
1388 vhost_user_dev_foreach_session(struct spdk_vhost_dev *vdev,
1389 			       spdk_vhost_session_fn fn,
1390 			       spdk_vhost_dev_fn cpl_fn,
1391 			       void *arg)
1392 {
1393 	struct vhost_session_fn_ctx *ev_ctx;
1394 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
1395 
1396 	ev_ctx = calloc(1, sizeof(*ev_ctx));
1397 	if (ev_ctx == NULL) {
1398 		SPDK_ERRLOG("Failed to alloc vhost event.\n");
1399 		assert(false);
1400 		return;
1401 	}
1402 
1403 	ev_ctx->vdev = vdev;
1404 	ev_ctx->cb_fn = fn;
1405 	ev_ctx->cpl_fn = cpl_fn;
1406 	ev_ctx->user_ctx = arg;
1407 
1408 	assert(user_dev->pending_async_op_num < UINT32_MAX);
1409 	user_dev->pending_async_op_num++;
1410 
1411 	spdk_thread_send_msg(vdev->thread, foreach_session, ev_ctx);
1412 }
1413 
1414 void
1415 vhost_user_session_set_interrupt_mode(struct spdk_vhost_session *vsession, bool interrupt_mode)
1416 {
1417 	uint16_t i;
1418 	bool packed_ring;
1419 	int rc = 0;
1420 
1421 	packed_ring = ((vsession->negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) != 0);
1422 
1423 	for (i = 0; i < vsession->max_queues; i++) {
1424 		struct spdk_vhost_virtqueue *q = &vsession->virtqueue[i];
1425 		uint64_t num_events = 1;
1426 
1427 		/* vring.desc and vring.desc_packed are in a union struct
1428 		 * so q->vring.desc can replace q->vring.desc_packed.
1429 		 */
1430 		if (q->vring.desc == NULL || q->vring.size == 0) {
1431 			continue;
1432 		}
1433 
1434 		if (interrupt_mode) {
1435 			/* Enable I/O submission notifications, we'll be interrupting. */
1436 			if (packed_ring) {
1437 				* (volatile uint16_t *) &q->vring.device_event->flags = VRING_PACKED_EVENT_FLAG_ENABLE;
1438 			} else {
1439 				* (volatile uint16_t *) &q->vring.used->flags = 0;
1440 			}
1441 
1442 			/* In case of race condition, always kick vring when switch to intr */
1443 			rc = write(q->vring.kickfd, &num_events, sizeof(num_events));
1444 			if (rc < 0) {
1445 				SPDK_ERRLOG("failed to kick vring: %s.\n", spdk_strerror(errno));
1446 			}
1447 
1448 			vsession->interrupt_mode = true;
1449 		} else {
1450 			/* Disable I/O submission notifications, we'll be polling. */
1451 			if (packed_ring) {
1452 				* (volatile uint16_t *) &q->vring.device_event->flags = VRING_PACKED_EVENT_FLAG_DISABLE;
1453 			} else {
1454 				* (volatile uint16_t *) &q->vring.used->flags = VRING_USED_F_NO_NOTIFY;
1455 			}
1456 
1457 			vsession->interrupt_mode = false;
1458 		}
1459 	}
1460 }
1461 
1462 
1463 static int
1464 extern_vhost_pre_msg_handler(int vid, void *_msg)
1465 {
1466 	struct vhost_user_msg *msg = _msg;
1467 	struct spdk_vhost_session *vsession;
1468 
1469 	vsession = vhost_session_find_by_vid(vid);
1470 	if (vsession == NULL) {
1471 		SPDK_ERRLOG("Received a message to unitialized session (vid %d).\n", vid);
1472 		assert(false);
1473 		return RTE_VHOST_MSG_RESULT_ERR;
1474 	}
1475 
1476 	switch (msg->request) {
1477 	case VHOST_USER_GET_VRING_BASE:
1478 		if (vsession->forced_polling && vsession->started) {
1479 			/* Our queue is stopped for whatever reason, but we may still
1480 			 * need to poll it after it's initialized again.
1481 			 */
1482 			g_spdk_vhost_ops.destroy_device(vid);
1483 		}
1484 		break;
1485 	case VHOST_USER_SET_VRING_BASE:
1486 	case VHOST_USER_SET_VRING_ADDR:
1487 	case VHOST_USER_SET_VRING_NUM:
1488 		/* For vhost-user socket messages except VHOST_USER_GET_VRING_BASE,
1489 		 * rte_vhost holds all VQ's access lock, then after DPDK 22.07 release,
1490 		 * `rte_vhost_vring_call` also needs to hold VQ's access lock, so we
1491 		 * can't call this function in DPDK "vhost-events" thread context, here
1492 		 * SPDK vring poller will avoid executing this function when it's TRUE.
1493 		 */
1494 		vsession->skip_used_signal = true;
1495 		if (vsession->forced_polling && vsession->started) {
1496 			/* Additional queues are being initialized, so we either processed
1497 			 * enough I/Os and are switching from SeaBIOS to the OS now, or
1498 			 * we were never in SeaBIOS in the first place. Either way, we
1499 			 * don't need our workaround anymore.
1500 			 */
1501 			g_spdk_vhost_ops.destroy_device(vid);
1502 			vsession->forced_polling = false;
1503 		}
1504 		break;
1505 	case VHOST_USER_SET_VRING_KICK:
1506 	/* rte_vhost(after 20.08) will call new_device after one active vring is
1507 	 * configured, we will start the session before all vrings are available,
1508 	 * so for each new vring, if the session is started, we need to restart it
1509 	 * again.
1510 	 */
1511 	case VHOST_USER_SET_VRING_CALL:
1512 	/* rte_vhost will close the previous callfd and won't notify
1513 	 * us about any change. This will effectively make SPDK fail
1514 	 * to deliver any subsequent interrupts until a session is
1515 	 * restarted. We stop the session here before closing the previous
1516 	 * fd (so that all interrupts must have been delivered by the
1517 	 * time the descriptor is closed) and start right after (which
1518 	 * will make SPDK retrieve the latest, up-to-date callfd from
1519 	 * rte_vhost.
1520 	 */
1521 	case VHOST_USER_SET_MEM_TABLE:
1522 		vsession->skip_used_signal = true;
1523 		/* rte_vhost will unmap previous memory that SPDK may still
1524 		 * have pending DMA operations on. We can't let that happen,
1525 		 * so stop the device before letting rte_vhost unmap anything.
1526 		 * This will block until all pending I/Os are finished.
1527 		 * We will start the device again from the post-processing
1528 		 * message handler.
1529 		 */
1530 		if (vsession->started) {
1531 			g_spdk_vhost_ops.destroy_device(vid);
1532 			vsession->needs_restart = true;
1533 		}
1534 		break;
1535 	case VHOST_USER_GET_CONFIG: {
1536 		int rc = 0;
1537 
1538 		spdk_vhost_lock();
1539 		if (vsession->vdev->backend->vhost_get_config) {
1540 			rc = vsession->vdev->backend->vhost_get_config(vsession->vdev,
1541 					msg->payload.cfg.region, msg->payload.cfg.size);
1542 			if (rc != 0) {
1543 				msg->size = 0;
1544 			}
1545 		}
1546 		spdk_vhost_unlock();
1547 
1548 		return RTE_VHOST_MSG_RESULT_REPLY;
1549 	}
1550 	case VHOST_USER_SET_CONFIG: {
1551 		int rc = 0;
1552 
1553 		spdk_vhost_lock();
1554 		if (vsession->vdev->backend->vhost_set_config) {
1555 			rc = vsession->vdev->backend->vhost_set_config(vsession->vdev,
1556 					msg->payload.cfg.region, msg->payload.cfg.offset,
1557 					msg->payload.cfg.size, msg->payload.cfg.flags);
1558 		}
1559 		spdk_vhost_unlock();
1560 
1561 		return rc == 0 ? RTE_VHOST_MSG_RESULT_OK : RTE_VHOST_MSG_RESULT_ERR;
1562 	}
1563 	default:
1564 		break;
1565 	}
1566 
1567 	vsession->skip_used_signal = false;
1568 	return RTE_VHOST_MSG_RESULT_NOT_HANDLED;
1569 }
1570 
1571 static int
1572 extern_vhost_post_msg_handler(int vid, void *_msg)
1573 {
1574 	struct vhost_user_msg *msg = _msg;
1575 	struct spdk_vhost_session *vsession;
1576 
1577 	vsession = vhost_session_find_by_vid(vid);
1578 	if (vsession == NULL) {
1579 		SPDK_ERRLOG("Received a message to unitialized session (vid %d).\n", vid);
1580 		assert(false);
1581 		return RTE_VHOST_MSG_RESULT_ERR;
1582 	}
1583 
1584 	if (msg->request == VHOST_USER_SET_MEM_TABLE) {
1585 		vhost_register_memtable_if_required(vsession, vid);
1586 	}
1587 
1588 	if (vsession->needs_restart) {
1589 		g_spdk_vhost_ops.new_device(vid);
1590 		vsession->needs_restart = false;
1591 		return RTE_VHOST_MSG_RESULT_NOT_HANDLED;
1592 	}
1593 
1594 	switch (msg->request) {
1595 	case VHOST_USER_SET_FEATURES:
1596 		/* rte_vhost requires all queues to be fully initialized in order
1597 		 * to start I/O processing. This behavior is not compliant with the
1598 		 * vhost-user specification and doesn't work with QEMU 2.12+, which
1599 		 * will only initialize 1 I/O queue for the SeaBIOS boot.
1600 		 * Theoretically, we should start polling each virtqueue individually
1601 		 * after receiving its SET_VRING_KICK message, but rte_vhost is not
1602 		 * designed to poll individual queues. So here we use a workaround
1603 		 * to detect when the vhost session could be potentially at that SeaBIOS
1604 		 * stage and we mark it to start polling as soon as its first virtqueue
1605 		 * gets initialized. This doesn't hurt any non-QEMU vhost slaves
1606 		 * and allows QEMU 2.12+ to boot correctly. SET_FEATURES could be sent
1607 		 * at any time, but QEMU will send it at least once on SeaBIOS
1608 		 * initialization - whenever powered-up or rebooted.
1609 		 */
1610 		vsession->forced_polling = true;
1611 		break;
1612 	case VHOST_USER_SET_VRING_KICK:
1613 		/* vhost-user spec tells us to start polling a queue after receiving
1614 		 * its SET_VRING_KICK message. Let's do it!
1615 		 */
1616 		if (vsession->forced_polling && !vsession->started) {
1617 			g_spdk_vhost_ops.new_device(vid);
1618 		}
1619 		break;
1620 	default:
1621 		break;
1622 	}
1623 
1624 	return RTE_VHOST_MSG_RESULT_NOT_HANDLED;
1625 }
1626 
1627 struct rte_vhost_user_extern_ops g_spdk_extern_vhost_ops = {
1628 	.pre_msg_handle = extern_vhost_pre_msg_handler,
1629 	.post_msg_handle = extern_vhost_post_msg_handler,
1630 };
1631 
1632 void
1633 vhost_session_install_rte_compat_hooks(struct spdk_vhost_session *vsession)
1634 {
1635 	int rc;
1636 
1637 	rc = rte_vhost_extern_callback_register(vsession->vid, &g_spdk_extern_vhost_ops, NULL);
1638 	if (rc != 0) {
1639 		SPDK_ERRLOG("rte_vhost_extern_callback_register() failed for vid = %d\n",
1640 			    vsession->vid);
1641 		return;
1642 	}
1643 }
1644 
1645 int
1646 vhost_register_unix_socket(const char *path, const char *ctrl_name,
1647 			   uint64_t virtio_features, uint64_t disabled_features, uint64_t protocol_features)
1648 {
1649 	struct stat file_stat;
1650 	uint64_t features = 0;
1651 
1652 	/* Register vhost driver to handle vhost messages. */
1653 	if (stat(path, &file_stat) != -1) {
1654 		if (!S_ISSOCK(file_stat.st_mode)) {
1655 			SPDK_ERRLOG("Cannot create a domain socket at path \"%s\": "
1656 				    "The file already exists and is not a socket.\n",
1657 				    path);
1658 			return -EIO;
1659 		} else if (unlink(path) != 0) {
1660 			SPDK_ERRLOG("Cannot create a domain socket at path \"%s\": "
1661 				    "The socket already exists and failed to unlink.\n",
1662 				    path);
1663 			return -EIO;
1664 		}
1665 	}
1666 
1667 #if RTE_VERSION < RTE_VERSION_NUM(20, 8, 0, 0)
1668 	if (rte_vhost_driver_register(path, 0) != 0) {
1669 #else
1670 	if (rte_vhost_driver_register(path, RTE_VHOST_USER_ASYNC_COPY) != 0) {
1671 #endif
1672 		SPDK_ERRLOG("Could not register controller %s with vhost library\n", ctrl_name);
1673 		SPDK_ERRLOG("Check if domain socket %s already exists\n", path);
1674 		return -EIO;
1675 	}
1676 	if (rte_vhost_driver_set_features(path, virtio_features) ||
1677 	    rte_vhost_driver_disable_features(path, disabled_features)) {
1678 		SPDK_ERRLOG("Couldn't set vhost features for controller %s\n", ctrl_name);
1679 
1680 		rte_vhost_driver_unregister(path);
1681 		return -EIO;
1682 	}
1683 
1684 	if (rte_vhost_driver_callback_register(path, &g_spdk_vhost_ops) != 0) {
1685 		rte_vhost_driver_unregister(path);
1686 		SPDK_ERRLOG("Couldn't register callbacks for controller %s\n", ctrl_name);
1687 		return -EIO;
1688 	}
1689 
1690 	rte_vhost_driver_get_protocol_features(path, &features);
1691 	features |= protocol_features;
1692 	rte_vhost_driver_set_protocol_features(path, features);
1693 
1694 	if (rte_vhost_driver_start(path) != 0) {
1695 		SPDK_ERRLOG("Failed to start vhost driver for controller %s (%d): %s\n",
1696 			    ctrl_name, errno, spdk_strerror(errno));
1697 		rte_vhost_driver_unregister(path);
1698 		return -EIO;
1699 	}
1700 
1701 	return 0;
1702 }
1703 
1704 int
1705 vhost_get_mem_table(int vid, struct rte_vhost_memory **mem)
1706 {
1707 	return rte_vhost_get_mem_table(vid, mem);
1708 }
1709 
1710 int
1711 vhost_driver_unregister(const char *path)
1712 {
1713 	return rte_vhost_driver_unregister(path);
1714 }
1715 
1716 int
1717 vhost_get_negotiated_features(int vid, uint64_t *negotiated_features)
1718 {
1719 	return rte_vhost_get_negotiated_features(vid, negotiated_features);
1720 }
1721 
1722 int
1723 vhost_user_dev_set_coalescing(struct spdk_vhost_user_dev *user_dev, uint32_t delay_base_us,
1724 			      uint32_t iops_threshold)
1725 {
1726 	uint64_t delay_time_base = delay_base_us * spdk_get_ticks_hz() / 1000000ULL;
1727 	uint32_t io_rate = iops_threshold * SPDK_VHOST_STATS_CHECK_INTERVAL_MS / 1000U;
1728 
1729 	if (delay_time_base >= UINT32_MAX) {
1730 		SPDK_ERRLOG("Delay time of %"PRIu32" is to big\n", delay_base_us);
1731 		return -EINVAL;
1732 	} else if (io_rate == 0) {
1733 		SPDK_ERRLOG("IOPS rate of %"PRIu32" is too low. Min is %u\n", io_rate,
1734 			    1000U / SPDK_VHOST_STATS_CHECK_INTERVAL_MS);
1735 		return -EINVAL;
1736 	}
1737 
1738 	user_dev->coalescing_delay_us = delay_base_us;
1739 	user_dev->coalescing_iops_threshold = iops_threshold;
1740 	return 0;
1741 }
1742 
1743 int
1744 vhost_user_session_set_coalescing(struct spdk_vhost_dev *vdev,
1745 				  struct spdk_vhost_session *vsession, void *ctx)
1746 {
1747 	vsession->coalescing_delay_time_base =
1748 		to_user_dev(vdev)->coalescing_delay_us * spdk_get_ticks_hz() / 1000000ULL;
1749 	vsession->coalescing_io_rate_threshold =
1750 		to_user_dev(vdev)->coalescing_iops_threshold * SPDK_VHOST_STATS_CHECK_INTERVAL_MS / 1000U;
1751 	return 0;
1752 }
1753 
1754 int
1755 spdk_vhost_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
1756 			  uint32_t iops_threshold)
1757 {
1758 	int rc;
1759 
1760 	rc = vhost_user_dev_set_coalescing(to_user_dev(vdev), delay_base_us, iops_threshold);
1761 	if (rc != 0) {
1762 		return rc;
1763 	}
1764 
1765 	vhost_user_dev_foreach_session(vdev, vhost_user_session_set_coalescing, NULL, NULL);
1766 	return 0;
1767 }
1768 
1769 void
1770 spdk_vhost_get_coalescing(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
1771 			  uint32_t *iops_threshold)
1772 {
1773 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
1774 
1775 	if (delay_base_us) {
1776 		*delay_base_us = user_dev->coalescing_delay_us;
1777 	}
1778 
1779 	if (iops_threshold) {
1780 		*iops_threshold = user_dev->coalescing_iops_threshold;
1781 	}
1782 }
1783 
1784 int
1785 spdk_vhost_set_socket_path(const char *basename)
1786 {
1787 	int ret;
1788 
1789 	if (basename && strlen(basename) > 0) {
1790 		ret = snprintf(g_vhost_user_dev_dirname, sizeof(g_vhost_user_dev_dirname) - 2, "%s", basename);
1791 		if (ret <= 0) {
1792 			return -EINVAL;
1793 		}
1794 		if ((size_t)ret >= sizeof(g_vhost_user_dev_dirname) - 2) {
1795 			SPDK_ERRLOG("Char dev dir path length %d is too long\n", ret);
1796 			return -EINVAL;
1797 		}
1798 
1799 		if (g_vhost_user_dev_dirname[ret - 1] != '/') {
1800 			g_vhost_user_dev_dirname[ret] = '/';
1801 			g_vhost_user_dev_dirname[ret + 1]  = '\0';
1802 		}
1803 	}
1804 
1805 	return 0;
1806 }
1807 
1808 static void
1809 vhost_dev_thread_exit(void *arg1)
1810 {
1811 	spdk_thread_exit(spdk_get_thread());
1812 }
1813 
1814 int
1815 vhost_user_dev_register(struct spdk_vhost_dev *vdev, const char *name, struct spdk_cpuset *cpumask,
1816 			const struct spdk_vhost_user_dev_backend *user_backend)
1817 {
1818 	char path[PATH_MAX];
1819 	struct spdk_vhost_user_dev *user_dev;
1820 
1821 	if (snprintf(path, sizeof(path), "%s%s", g_vhost_user_dev_dirname, name) >= (int)sizeof(path)) {
1822 		SPDK_ERRLOG("Resulting socket path for controller %s is too long: %s%s\n",
1823 			    name, g_vhost_user_dev_dirname, name);
1824 		return -EINVAL;
1825 	}
1826 
1827 	vdev->path = strdup(path);
1828 	if (vdev->path == NULL) {
1829 		return -EIO;
1830 	}
1831 
1832 	user_dev = calloc(1, sizeof(*user_dev));
1833 	if (user_dev == NULL) {
1834 		free(vdev->path);
1835 		return -ENOMEM;
1836 	}
1837 	vdev->ctxt = user_dev;
1838 
1839 	vdev->thread = spdk_thread_create(vdev->name, cpumask);
1840 	if (vdev->thread == NULL) {
1841 		free(user_dev);
1842 		free(vdev->path);
1843 		SPDK_ERRLOG("Failed to create thread for vhost controller %s.\n", name);
1844 		return -EIO;
1845 	}
1846 
1847 	vdev->registered = true;
1848 	user_dev->user_backend = user_backend;
1849 	user_dev->vdev = vdev;
1850 	TAILQ_INIT(&user_dev->vsessions);
1851 
1852 	vhost_user_dev_set_coalescing(user_dev, SPDK_VHOST_COALESCING_DELAY_BASE_US,
1853 				      SPDK_VHOST_VQ_IOPS_COALESCING_THRESHOLD);
1854 
1855 	if (vhost_register_unix_socket(path, name, vdev->virtio_features, vdev->disabled_features,
1856 				       vdev->protocol_features)) {
1857 		spdk_thread_send_msg(vdev->thread, vhost_dev_thread_exit, NULL);
1858 		free(user_dev);
1859 		free(vdev->path);
1860 		return -EIO;
1861 	}
1862 
1863 	return 0;
1864 }
1865 
1866 int
1867 vhost_user_dev_unregister(struct spdk_vhost_dev *vdev)
1868 {
1869 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
1870 
1871 	if (user_dev->pending_async_op_num) {
1872 		return -EBUSY;
1873 	}
1874 
1875 	if (!TAILQ_EMPTY(&user_dev->vsessions)) {
1876 		SPDK_ERRLOG("Controller %s has still valid connection.\n", vdev->name);
1877 		return -EBUSY;
1878 	}
1879 
1880 	if (vdev->registered && vhost_driver_unregister(vdev->path) != 0) {
1881 		SPDK_ERRLOG("Could not unregister controller %s with vhost library\n"
1882 			    "Check if domain socket %s still exists\n",
1883 			    vdev->name, vdev->path);
1884 		return -EIO;
1885 	}
1886 
1887 	spdk_thread_send_msg(vdev->thread, vhost_dev_thread_exit, NULL);
1888 	free(user_dev);
1889 	free(vdev->path);
1890 
1891 	return 0;
1892 }
1893 
1894 static bool g_vhost_user_started = false;
1895 
1896 int
1897 vhost_user_init(void)
1898 {
1899 	size_t len;
1900 
1901 	if (g_vhost_user_started) {
1902 		return 0;
1903 	}
1904 
1905 	if (g_vhost_user_dev_dirname[0] == '\0') {
1906 		if (getcwd(g_vhost_user_dev_dirname, sizeof(g_vhost_user_dev_dirname) - 1) == NULL) {
1907 			SPDK_ERRLOG("getcwd failed (%d): %s\n", errno, spdk_strerror(errno));
1908 			return -1;
1909 		}
1910 
1911 		len = strlen(g_vhost_user_dev_dirname);
1912 		if (g_vhost_user_dev_dirname[len - 1] != '/') {
1913 			g_vhost_user_dev_dirname[len] = '/';
1914 			g_vhost_user_dev_dirname[len + 1] = '\0';
1915 		}
1916 	}
1917 
1918 	g_vhost_user_started = true;
1919 
1920 	g_vhost_user_init_thread = spdk_get_thread();
1921 	assert(g_vhost_user_init_thread != NULL);
1922 
1923 	return 0;
1924 }
1925 
1926 static void
1927 vhost_user_session_shutdown_on_init(void *vhost_cb)
1928 {
1929 	spdk_vhost_fini_cb fn = vhost_cb;
1930 
1931 	fn();
1932 }
1933 
1934 static void *
1935 vhost_user_session_shutdown(void *vhost_cb)
1936 {
1937 	struct spdk_vhost_dev *vdev = NULL;
1938 	struct spdk_vhost_session *vsession;
1939 
1940 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
1941 	     vdev = spdk_vhost_dev_next(vdev)) {
1942 		spdk_vhost_lock();
1943 		TAILQ_FOREACH(vsession, &to_user_dev(vdev)->vsessions, tailq) {
1944 			if (vsession->started) {
1945 				_stop_session(vsession);
1946 			}
1947 		}
1948 		spdk_vhost_unlock();
1949 		vhost_driver_unregister(vdev->path);
1950 		vdev->registered = false;
1951 	}
1952 
1953 	SPDK_INFOLOG(vhost, "Exiting\n");
1954 	spdk_thread_send_msg(g_vhost_user_init_thread, vhost_user_session_shutdown_on_init, vhost_cb);
1955 	return NULL;
1956 }
1957 
1958 void
1959 vhost_user_fini(spdk_vhost_fini_cb vhost_cb)
1960 {
1961 	pthread_t tid;
1962 	int rc;
1963 
1964 	if (!g_vhost_user_started) {
1965 		vhost_cb();
1966 		return;
1967 	}
1968 
1969 	g_vhost_user_started = false;
1970 
1971 	/* rte_vhost API for removing sockets is not asynchronous. Since it may call SPDK
1972 	 * ops for stopping a device or removing a connection, we need to call it from
1973 	 * a separate thread to avoid deadlock.
1974 	 */
1975 	rc = pthread_create(&tid, NULL, &vhost_user_session_shutdown, vhost_cb);
1976 	if (rc < 0) {
1977 		SPDK_ERRLOG("Failed to start session shutdown thread (%d): %s\n", rc, spdk_strerror(rc));
1978 		abort();
1979 	}
1980 	pthread_detach(tid);
1981 }
1982