xref: /spdk/lib/vhost/vhost.c (revision 1a00f5c09488e7466a331b8c75cde4969740357f)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/env.h"
37 #include "spdk/likely.h"
38 #include "spdk/string.h"
39 #include "spdk/util.h"
40 #include "spdk/memory.h"
41 #include "spdk/barrier.h"
42 #include "spdk/vhost.h"
43 #include "vhost_internal.h"
44 
45 bool g_packed_ring_recovery = false;
46 
47 static struct spdk_cpuset g_vhost_core_mask;
48 
49 static TAILQ_HEAD(, spdk_vhost_dev) g_vhost_devices = TAILQ_HEAD_INITIALIZER(
50 			g_vhost_devices);
51 static pthread_mutex_t g_vhost_mutex = PTHREAD_MUTEX_INITIALIZER;
52 
53 void *vhost_gpa_to_vva(struct spdk_vhost_session *vsession, uint64_t addr, uint64_t len)
54 {
55 	void *vva;
56 	uint64_t newlen;
57 
58 	newlen = len;
59 	vva = (void *)rte_vhost_va_from_guest_pa(vsession->mem, addr, &newlen);
60 	if (newlen != len) {
61 		return NULL;
62 	}
63 
64 	return vva;
65 
66 }
67 
68 static void
69 vhost_log_req_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *virtqueue,
70 		   uint16_t req_id)
71 {
72 	struct vring_desc *desc, *desc_table;
73 	uint32_t desc_table_size;
74 	int rc;
75 
76 	if (spdk_likely(!vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) {
77 		return;
78 	}
79 
80 	rc = vhost_vq_get_desc(vsession, virtqueue, req_id, &desc, &desc_table, &desc_table_size);
81 	if (spdk_unlikely(rc != 0)) {
82 		SPDK_ERRLOG("Can't log used ring descriptors!\n");
83 		return;
84 	}
85 
86 	do {
87 		if (vhost_vring_desc_is_wr(desc)) {
88 			/* To be honest, only pages realy touched should be logged, but
89 			 * doing so would require tracking those changes in each backed.
90 			 * Also backend most likely will touch all/most of those pages so
91 			 * for lets assume we touched all pages passed to as writeable buffers. */
92 			rte_vhost_log_write(vsession->vid, desc->addr, desc->len);
93 		}
94 		vhost_vring_desc_get_next(&desc, desc_table, desc_table_size);
95 	} while (desc);
96 }
97 
98 static void
99 vhost_log_used_vring_elem(struct spdk_vhost_session *vsession,
100 			  struct spdk_vhost_virtqueue *virtqueue,
101 			  uint16_t idx)
102 {
103 	uint64_t offset, len;
104 
105 	if (spdk_likely(!vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) {
106 		return;
107 	}
108 
109 	if (spdk_unlikely(virtqueue->packed.packed_ring)) {
110 		offset = idx * sizeof(struct vring_packed_desc);
111 		len = sizeof(struct vring_packed_desc);
112 	} else {
113 		offset = offsetof(struct vring_used, ring[idx]);
114 		len = sizeof(virtqueue->vring.used->ring[idx]);
115 	}
116 
117 	rte_vhost_log_used_vring(vsession->vid, virtqueue->vring_idx, offset, len);
118 }
119 
120 static void
121 vhost_log_used_vring_idx(struct spdk_vhost_session *vsession,
122 			 struct spdk_vhost_virtqueue *virtqueue)
123 {
124 	uint64_t offset, len;
125 	uint16_t vq_idx;
126 
127 	if (spdk_likely(!vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) {
128 		return;
129 	}
130 
131 	offset = offsetof(struct vring_used, idx);
132 	len = sizeof(virtqueue->vring.used->idx);
133 	vq_idx = virtqueue - vsession->virtqueue;
134 
135 	rte_vhost_log_used_vring(vsession->vid, vq_idx, offset, len);
136 }
137 
138 /*
139  * Get available requests from avail ring.
140  */
141 uint16_t
142 vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *virtqueue, uint16_t *reqs,
143 			uint16_t reqs_len)
144 {
145 	struct rte_vhost_vring *vring = &virtqueue->vring;
146 	struct vring_avail *avail = vring->avail;
147 	uint16_t size_mask = vring->size - 1;
148 	uint16_t last_idx = virtqueue->last_avail_idx, avail_idx = avail->idx;
149 	uint16_t count, i;
150 	int rc;
151 	uint64_t u64_value;
152 
153 	spdk_smp_rmb();
154 
155 	if (virtqueue->vsession && spdk_unlikely(virtqueue->vsession->interrupt_mode)) {
156 		/* Read to clear vring's kickfd */
157 		rc = read(vring->kickfd, &u64_value, sizeof(u64_value));
158 		if (rc < 0) {
159 			SPDK_ERRLOG("failed to acknowledge kickfd: %s.\n", spdk_strerror(errno));
160 			return -errno;
161 		}
162 	}
163 
164 	count = avail_idx - last_idx;
165 	if (spdk_likely(count == 0)) {
166 		return 0;
167 	}
168 
169 	if (spdk_unlikely(count > vring->size)) {
170 		/* TODO: the queue is unrecoverably broken and should be marked so.
171 		 * For now we will fail silently and report there are no new avail entries.
172 		 */
173 		return 0;
174 	}
175 
176 	count = spdk_min(count, reqs_len);
177 
178 	virtqueue->last_avail_idx += count;
179 	/* Check whether there are unprocessed reqs in vq, then kick vq manually */
180 	if (virtqueue->vsession && spdk_unlikely(virtqueue->vsession->interrupt_mode)) {
181 		/* If avail_idx is larger than virtqueue's last_avail_idx, then there is unprocessed reqs.
182 		 * avail_idx should get updated here from memory, in case of race condition with guest.
183 		 */
184 		avail_idx = * (volatile uint16_t *) &avail->idx;
185 		if (avail_idx > virtqueue->last_avail_idx) {
186 			/* Write to notify vring's kickfd */
187 			rc = write(vring->kickfd, &u64_value, sizeof(u64_value));
188 			if (rc < 0) {
189 				SPDK_ERRLOG("failed to kick vring: %s.\n", spdk_strerror(errno));
190 				return -errno;
191 			}
192 		}
193 	}
194 
195 	for (i = 0; i < count; i++) {
196 		reqs[i] = vring->avail->ring[(last_idx + i) & size_mask];
197 	}
198 
199 	SPDK_DEBUGLOG(vhost_ring,
200 		      "AVAIL: last_idx=%"PRIu16" avail_idx=%"PRIu16" count=%"PRIu16"\n",
201 		      last_idx, avail_idx, count);
202 
203 	return count;
204 }
205 
206 static bool
207 vhost_vring_desc_is_indirect(struct vring_desc *cur_desc)
208 {
209 	return !!(cur_desc->flags & VRING_DESC_F_INDIRECT);
210 }
211 
212 static bool
213 vhost_vring_packed_desc_is_indirect(struct vring_packed_desc *cur_desc)
214 {
215 	return (cur_desc->flags & VRING_DESC_F_INDIRECT) != 0;
216 }
217 
218 static bool
219 vhost_inflight_packed_desc_is_indirect(spdk_vhost_inflight_desc *cur_desc)
220 {
221 	return (cur_desc->flags & VRING_DESC_F_INDIRECT) != 0;
222 }
223 
224 int
225 vhost_vq_get_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *virtqueue,
226 		  uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table,
227 		  uint32_t *desc_table_size)
228 {
229 	if (spdk_unlikely(req_idx >= virtqueue->vring.size)) {
230 		return -1;
231 	}
232 
233 	*desc = &virtqueue->vring.desc[req_idx];
234 
235 	if (vhost_vring_desc_is_indirect(*desc)) {
236 		*desc_table_size = (*desc)->len / sizeof(**desc);
237 		*desc_table = vhost_gpa_to_vva(vsession, (*desc)->addr,
238 					       sizeof(**desc) * *desc_table_size);
239 		*desc = *desc_table;
240 		if (*desc == NULL) {
241 			return -1;
242 		}
243 
244 		return 0;
245 	}
246 
247 	*desc_table = virtqueue->vring.desc;
248 	*desc_table_size = virtqueue->vring.size;
249 
250 	return 0;
251 }
252 
253 static bool
254 vhost_packed_desc_indirect_to_desc_table(struct spdk_vhost_session *vsession,
255 		uint64_t addr, uint32_t len,
256 		struct vring_packed_desc **desc_table,
257 		uint32_t *desc_table_size)
258 {
259 	*desc_table_size = len / sizeof(struct vring_packed_desc);
260 
261 	*desc_table = vhost_gpa_to_vva(vsession, addr, len);
262 	if (spdk_unlikely(*desc_table == NULL)) {
263 		return false;
264 	}
265 
266 	return true;
267 }
268 
269 int
270 vhost_vq_get_desc_packed(struct spdk_vhost_session *vsession,
271 			 struct spdk_vhost_virtqueue *virtqueue,
272 			 uint16_t req_idx, struct vring_packed_desc **desc,
273 			 struct vring_packed_desc **desc_table, uint32_t *desc_table_size)
274 {
275 	*desc =  &virtqueue->vring.desc_packed[req_idx];
276 
277 	/* In packed ring when the desc is non-indirect we get next desc
278 	 * by judging (desc->flag & VRING_DESC_F_NEXT) != 0. When the desc
279 	 * is indirect we get next desc by idx and desc_table_size. It's
280 	 * different from split ring.
281 	 */
282 	if (vhost_vring_packed_desc_is_indirect(*desc)) {
283 		if (!vhost_packed_desc_indirect_to_desc_table(vsession, (*desc)->addr, (*desc)->len,
284 				desc_table, desc_table_size)) {
285 			return -1;
286 		}
287 
288 		*desc = *desc_table;
289 	} else {
290 		*desc_table = NULL;
291 		*desc_table_size  = 0;
292 	}
293 
294 	return 0;
295 }
296 
297 int
298 vhost_inflight_queue_get_desc(struct spdk_vhost_session *vsession,
299 			      spdk_vhost_inflight_desc *desc_array,
300 			      uint16_t req_idx, spdk_vhost_inflight_desc **desc,
301 			      struct vring_packed_desc  **desc_table, uint32_t *desc_table_size)
302 {
303 	*desc = &desc_array[req_idx];
304 
305 	if (vhost_inflight_packed_desc_is_indirect(*desc)) {
306 		if (!vhost_packed_desc_indirect_to_desc_table(vsession, (*desc)->addr, (*desc)->len,
307 				desc_table, desc_table_size)) {
308 			return -1;
309 		}
310 
311 		/* This desc is the inflight desc not the packed desc.
312 		 * When set the F_INDIRECT the table entry should be the packed desc
313 		 * so set the inflight desc NULL.
314 		 */
315 		*desc = NULL;
316 	} else {
317 		/* When not set the F_INDIRECT means there is no packed desc table */
318 		*desc_table = NULL;
319 		*desc_table_size = 0;
320 	}
321 
322 	return 0;
323 }
324 
325 int
326 vhost_vq_used_signal(struct spdk_vhost_session *vsession,
327 		     struct spdk_vhost_virtqueue *virtqueue)
328 {
329 	if (virtqueue->used_req_cnt == 0) {
330 		return 0;
331 	}
332 
333 	virtqueue->req_cnt += virtqueue->used_req_cnt;
334 	virtqueue->used_req_cnt = 0;
335 
336 	SPDK_DEBUGLOG(vhost_ring,
337 		      "Queue %td - USED RING: sending IRQ: last used %"PRIu16"\n",
338 		      virtqueue - vsession->virtqueue, virtqueue->last_used_idx);
339 
340 	if (rte_vhost_vring_call(vsession->vid, virtqueue->vring_idx) == 0) {
341 		/* interrupt signalled */
342 		return 1;
343 	} else {
344 		/* interrupt not signalled */
345 		return 0;
346 	}
347 }
348 
349 static void
350 session_vq_io_stats_update(struct spdk_vhost_session *vsession,
351 			   struct spdk_vhost_virtqueue *virtqueue, uint64_t now)
352 {
353 	uint32_t irq_delay_base = vsession->coalescing_delay_time_base;
354 	uint32_t io_threshold = vsession->coalescing_io_rate_threshold;
355 	int32_t irq_delay;
356 	uint32_t req_cnt;
357 
358 	req_cnt = virtqueue->req_cnt + virtqueue->used_req_cnt;
359 	if (req_cnt <= io_threshold) {
360 		return;
361 	}
362 
363 	irq_delay = (irq_delay_base * (req_cnt - io_threshold)) / io_threshold;
364 	virtqueue->irq_delay_time = (uint32_t) spdk_max(0, irq_delay);
365 
366 	virtqueue->req_cnt = 0;
367 	virtqueue->next_event_time = now;
368 }
369 
370 static void
371 check_session_vq_io_stats(struct spdk_vhost_session *vsession,
372 			  struct spdk_vhost_virtqueue *virtqueue, uint64_t now)
373 {
374 	if (now < vsession->next_stats_check_time) {
375 		return;
376 	}
377 
378 	vsession->next_stats_check_time = now + vsession->stats_check_interval;
379 	session_vq_io_stats_update(vsession, virtqueue, now);
380 }
381 
382 static inline bool
383 vhost_vq_event_is_suppressed(struct spdk_vhost_virtqueue *vq)
384 {
385 	if (spdk_unlikely(vq->packed.packed_ring)) {
386 		if (vq->vring.driver_event->flags & VRING_PACKED_EVENT_FLAG_DISABLE) {
387 			return true;
388 		}
389 	} else {
390 		if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
391 			return true;
392 		}
393 	}
394 
395 	return false;
396 }
397 
398 void
399 vhost_session_vq_used_signal(struct spdk_vhost_virtqueue *virtqueue)
400 {
401 	struct spdk_vhost_session *vsession = virtqueue->vsession;
402 	uint64_t now;
403 
404 	if (vsession->coalescing_delay_time_base == 0) {
405 		if (virtqueue->vring.desc == NULL) {
406 			return;
407 		}
408 
409 		if (vhost_vq_event_is_suppressed(virtqueue)) {
410 			return;
411 		}
412 
413 		vhost_vq_used_signal(vsession, virtqueue);
414 	} else {
415 		now = spdk_get_ticks();
416 		check_session_vq_io_stats(vsession, virtqueue, now);
417 
418 		/* No need for event right now */
419 		if (now < virtqueue->next_event_time) {
420 			return;
421 		}
422 
423 		if (vhost_vq_event_is_suppressed(virtqueue)) {
424 			return;
425 		}
426 
427 		if (!vhost_vq_used_signal(vsession, virtqueue)) {
428 			return;
429 		}
430 
431 		/* Syscall is quite long so update time */
432 		now = spdk_get_ticks();
433 		virtqueue->next_event_time = now + virtqueue->irq_delay_time;
434 	}
435 }
436 
437 void
438 vhost_session_used_signal(struct spdk_vhost_session *vsession)
439 {
440 	struct spdk_vhost_virtqueue *virtqueue;
441 	uint16_t q_idx;
442 
443 	for (q_idx = 0; q_idx < vsession->max_queues; q_idx++) {
444 		virtqueue = &vsession->virtqueue[q_idx];
445 		vhost_session_vq_used_signal(virtqueue);
446 	}
447 }
448 
449 /*
450  * Enqueue id and len to used ring.
451  */
452 void
453 vhost_vq_used_ring_enqueue(struct spdk_vhost_session *vsession,
454 			   struct spdk_vhost_virtqueue *virtqueue,
455 			   uint16_t id, uint32_t len)
456 {
457 	struct rte_vhost_vring *vring = &virtqueue->vring;
458 	struct vring_used *used = vring->used;
459 	uint16_t last_idx = virtqueue->last_used_idx & (vring->size - 1);
460 	uint16_t vq_idx = virtqueue->vring_idx;
461 
462 	SPDK_DEBUGLOG(vhost_ring,
463 		      "Queue %td - USED RING: last_idx=%"PRIu16" req id=%"PRIu16" len=%"PRIu32"\n",
464 		      virtqueue - vsession->virtqueue, virtqueue->last_used_idx, id, len);
465 
466 	vhost_log_req_desc(vsession, virtqueue, id);
467 
468 	virtqueue->last_used_idx++;
469 	used->ring[last_idx].id = id;
470 	used->ring[last_idx].len = len;
471 
472 	/* Ensure the used ring is updated before we log it or increment used->idx. */
473 	spdk_smp_wmb();
474 
475 	rte_vhost_set_last_inflight_io_split(vsession->vid, vq_idx, id);
476 
477 	vhost_log_used_vring_elem(vsession, virtqueue, last_idx);
478 	* (volatile uint16_t *) &used->idx = virtqueue->last_used_idx;
479 	vhost_log_used_vring_idx(vsession, virtqueue);
480 
481 	rte_vhost_clr_inflight_desc_split(vsession->vid, vq_idx, virtqueue->last_used_idx, id);
482 
483 	virtqueue->used_req_cnt++;
484 
485 	if (vsession->interrupt_mode) {
486 		if (virtqueue->vring.desc == NULL || vhost_vq_event_is_suppressed(virtqueue)) {
487 			return;
488 		}
489 
490 		vhost_vq_used_signal(vsession, virtqueue);
491 	}
492 }
493 
494 void
495 vhost_vq_packed_ring_enqueue(struct spdk_vhost_session *vsession,
496 			     struct spdk_vhost_virtqueue *virtqueue,
497 			     uint16_t num_descs, uint16_t buffer_id,
498 			     uint32_t length, uint16_t inflight_head)
499 {
500 	struct vring_packed_desc *desc = &virtqueue->vring.desc_packed[virtqueue->last_used_idx];
501 	bool used, avail;
502 
503 	SPDK_DEBUGLOG(vhost_ring,
504 		      "Queue %td - RING: buffer_id=%"PRIu16"\n",
505 		      virtqueue - vsession->virtqueue, buffer_id);
506 
507 	/* When the descriptor is used, two flags in descriptor
508 	 * avail flag and used flag are set to equal
509 	 * and used flag value == used_wrap_counter.
510 	 */
511 	used = !!(desc->flags & VRING_DESC_F_USED);
512 	avail = !!(desc->flags & VRING_DESC_F_AVAIL);
513 	if (spdk_unlikely(used == virtqueue->packed.used_phase && used == avail)) {
514 		SPDK_ERRLOG("descriptor has been used before\n");
515 		return;
516 	}
517 
518 	/* In used desc addr is unused and len specifies the buffer length
519 	 * that has been written to by the device.
520 	 */
521 	desc->addr = 0;
522 	desc->len = length;
523 
524 	/* This bit specifies whether any data has been written by the device */
525 	if (length != 0) {
526 		desc->flags |= VRING_DESC_F_WRITE;
527 	}
528 
529 	/* Buffer ID is included in the last descriptor in the list.
530 	 * The driver needs to keep track of the size of the list corresponding
531 	 * to each buffer ID.
532 	 */
533 	desc->id = buffer_id;
534 
535 	/* A device MUST NOT make the descriptor used before buffer_id is
536 	 * written to the descriptor.
537 	 */
538 	spdk_smp_wmb();
539 
540 	rte_vhost_set_last_inflight_io_packed(vsession->vid, virtqueue->vring_idx, inflight_head);
541 	/* To mark a desc as used, the device sets the F_USED bit in flags to match
542 	 * the internal Device ring wrap counter. It also sets the F_AVAIL bit to
543 	 * match the same value.
544 	 */
545 	if (virtqueue->packed.used_phase) {
546 		desc->flags |= VRING_DESC_F_AVAIL_USED;
547 	} else {
548 		desc->flags &= ~VRING_DESC_F_AVAIL_USED;
549 	}
550 	rte_vhost_clr_inflight_desc_packed(vsession->vid, virtqueue->vring_idx, inflight_head);
551 
552 	vhost_log_used_vring_elem(vsession, virtqueue, virtqueue->last_used_idx);
553 	virtqueue->last_used_idx += num_descs;
554 	if (virtqueue->last_used_idx >= virtqueue->vring.size) {
555 		virtqueue->last_used_idx -= virtqueue->vring.size;
556 		virtqueue->packed.used_phase = !virtqueue->packed.used_phase;
557 	}
558 
559 	virtqueue->used_req_cnt++;
560 }
561 
562 bool
563 vhost_vq_packed_ring_is_avail(struct spdk_vhost_virtqueue *virtqueue)
564 {
565 	uint16_t flags = virtqueue->vring.desc_packed[virtqueue->last_avail_idx].flags;
566 
567 	/* To mark a desc as available, the driver sets the F_AVAIL bit in flags
568 	 * to match the internal avail wrap counter. It also sets the F_USED bit to
569 	 * match the inverse value but it's not mandatory.
570 	 */
571 	return (!!(flags & VRING_DESC_F_AVAIL) == virtqueue->packed.avail_phase);
572 }
573 
574 bool
575 vhost_vring_packed_desc_is_wr(struct vring_packed_desc *cur_desc)
576 {
577 	return (cur_desc->flags & VRING_DESC_F_WRITE) != 0;
578 }
579 
580 bool
581 vhost_vring_inflight_desc_is_wr(spdk_vhost_inflight_desc *cur_desc)
582 {
583 	return (cur_desc->flags & VRING_DESC_F_WRITE) != 0;
584 }
585 
586 int
587 vhost_vring_packed_desc_get_next(struct vring_packed_desc **desc, uint16_t *req_idx,
588 				 struct spdk_vhost_virtqueue *vq,
589 				 struct vring_packed_desc *desc_table,
590 				 uint32_t desc_table_size)
591 {
592 	if (desc_table != NULL) {
593 		/* When the desc_table isn't NULL means it's indirect and we get the next
594 		 * desc by req_idx and desc_table_size. The return value is NULL means
595 		 * we reach the last desc of this request.
596 		 */
597 		(*req_idx)++;
598 		if (*req_idx < desc_table_size) {
599 			*desc = &desc_table[*req_idx];
600 		} else {
601 			*desc = NULL;
602 		}
603 	} else {
604 		/* When the desc_table is NULL means it's non-indirect and we get the next
605 		 * desc by req_idx and F_NEXT in flags. The return value is NULL means
606 		 * we reach the last desc of this request. When return new desc
607 		 * we update the req_idx too.
608 		 */
609 		if (((*desc)->flags & VRING_DESC_F_NEXT) == 0) {
610 			*desc = NULL;
611 			return 0;
612 		}
613 
614 		*req_idx = (*req_idx + 1) % vq->vring.size;
615 		*desc = &vq->vring.desc_packed[*req_idx];
616 	}
617 
618 	return 0;
619 }
620 
621 static int
622 vhost_vring_desc_payload_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
623 				uint16_t *iov_index, uintptr_t payload, uint64_t remaining)
624 {
625 	uintptr_t vva;
626 	uint64_t len;
627 
628 	do {
629 		if (*iov_index >= SPDK_VHOST_IOVS_MAX) {
630 			SPDK_ERRLOG("SPDK_VHOST_IOVS_MAX(%d) reached\n", SPDK_VHOST_IOVS_MAX);
631 			return -1;
632 		}
633 		len = remaining;
634 		vva = (uintptr_t)rte_vhost_va_from_guest_pa(vsession->mem, payload, &len);
635 		if (vva == 0 || len == 0) {
636 			SPDK_ERRLOG("gpa_to_vva(%p) == NULL\n", (void *)payload);
637 			return -1;
638 		}
639 		iov[*iov_index].iov_base = (void *)vva;
640 		iov[*iov_index].iov_len = len;
641 		remaining -= len;
642 		payload += len;
643 		(*iov_index)++;
644 	} while (remaining);
645 
646 	return 0;
647 }
648 
649 int
650 vhost_vring_packed_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
651 			       uint16_t *iov_index, const struct vring_packed_desc *desc)
652 {
653 	return vhost_vring_desc_payload_to_iov(vsession, iov, iov_index,
654 					       desc->addr, desc->len);
655 }
656 
657 int
658 vhost_vring_inflight_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
659 				 uint16_t *iov_index, const spdk_vhost_inflight_desc *desc)
660 {
661 	return vhost_vring_desc_payload_to_iov(vsession, iov, iov_index,
662 					       desc->addr, desc->len);
663 }
664 
665 /* 1, Traverse the desc chain to get the buffer_id and return buffer_id as task_idx.
666  * 2, Update the vq->last_avail_idx to point next available desc chain.
667  * 3, Update the avail_wrap_counter if last_avail_idx overturn.
668  */
669 uint16_t
670 vhost_vring_packed_desc_get_buffer_id(struct spdk_vhost_virtqueue *vq, uint16_t req_idx,
671 				      uint16_t *num_descs)
672 {
673 	struct vring_packed_desc *desc;
674 	uint16_t desc_head = req_idx;
675 
676 	*num_descs = 1;
677 
678 	desc =  &vq->vring.desc_packed[req_idx];
679 	if (!vhost_vring_packed_desc_is_indirect(desc)) {
680 		while ((desc->flags & VRING_DESC_F_NEXT) != 0) {
681 			req_idx = (req_idx + 1) % vq->vring.size;
682 			desc = &vq->vring.desc_packed[req_idx];
683 			(*num_descs)++;
684 		}
685 	}
686 
687 	/* Queue Size doesn't have to be a power of 2
688 	 * Device maintains last_avail_idx so we can make sure
689 	 * the value is valid(0 ~ vring.size - 1)
690 	 */
691 	vq->last_avail_idx = (req_idx + 1) % vq->vring.size;
692 	if (vq->last_avail_idx < desc_head) {
693 		vq->packed.avail_phase = !vq->packed.avail_phase;
694 	}
695 
696 	return desc->id;
697 }
698 
699 int
700 vhost_vring_desc_get_next(struct vring_desc **desc,
701 			  struct vring_desc *desc_table, uint32_t desc_table_size)
702 {
703 	struct vring_desc *old_desc = *desc;
704 	uint16_t next_idx;
705 
706 	if ((old_desc->flags & VRING_DESC_F_NEXT) == 0) {
707 		*desc = NULL;
708 		return 0;
709 	}
710 
711 	next_idx = old_desc->next;
712 	if (spdk_unlikely(next_idx >= desc_table_size)) {
713 		*desc = NULL;
714 		return -1;
715 	}
716 
717 	*desc = &desc_table[next_idx];
718 	return 0;
719 }
720 
721 int
722 vhost_vring_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
723 			uint16_t *iov_index, const struct vring_desc *desc)
724 {
725 	return vhost_vring_desc_payload_to_iov(vsession, iov, iov_index,
726 					       desc->addr, desc->len);
727 }
728 
729 struct spdk_vhost_dev *
730 spdk_vhost_dev_next(struct spdk_vhost_dev *vdev)
731 {
732 	if (vdev == NULL) {
733 		return TAILQ_FIRST(&g_vhost_devices);
734 	}
735 
736 	return TAILQ_NEXT(vdev, tailq);
737 }
738 
739 struct spdk_vhost_dev *
740 spdk_vhost_dev_find(const char *ctrlr_name)
741 {
742 	struct spdk_vhost_dev *vdev;
743 
744 	TAILQ_FOREACH(vdev, &g_vhost_devices, tailq) {
745 		if (strcmp(vdev->name, ctrlr_name) == 0) {
746 			return vdev;
747 		}
748 	}
749 
750 	return NULL;
751 }
752 
753 static int
754 vhost_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask)
755 {
756 	int rc;
757 	struct spdk_cpuset negative_vhost_mask;
758 
759 	if (cpumask == NULL) {
760 		return -1;
761 	}
762 
763 	if (mask == NULL) {
764 		spdk_cpuset_copy(cpumask, &g_vhost_core_mask);
765 		return 0;
766 	}
767 
768 	rc = spdk_cpuset_parse(cpumask, mask);
769 	if (rc < 0) {
770 		SPDK_ERRLOG("invalid cpumask %s\n", mask);
771 		return -1;
772 	}
773 
774 	spdk_cpuset_copy(&negative_vhost_mask, &g_vhost_core_mask);
775 	spdk_cpuset_negate(&negative_vhost_mask);
776 	spdk_cpuset_and(&negative_vhost_mask, cpumask);
777 
778 	if (spdk_cpuset_count(&negative_vhost_mask) != 0) {
779 		SPDK_ERRLOG("one of selected cpu is outside of core mask(=%s)\n",
780 			    spdk_cpuset_fmt(&g_vhost_core_mask));
781 		return -1;
782 	}
783 
784 	spdk_cpuset_and(cpumask, &g_vhost_core_mask);
785 
786 	if (spdk_cpuset_count(cpumask) == 0) {
787 		SPDK_ERRLOG("no cpu is selected among core mask(=%s)\n",
788 			    spdk_cpuset_fmt(&g_vhost_core_mask));
789 		return -1;
790 	}
791 
792 	return 0;
793 }
794 
795 int
796 vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str,
797 		   const struct spdk_vhost_dev_backend *backend)
798 {
799 	struct spdk_cpuset cpumask = {};
800 	int rc;
801 
802 	assert(vdev);
803 	if (name == NULL) {
804 		SPDK_ERRLOG("Can't register controller with no name\n");
805 		return -EINVAL;
806 	}
807 
808 	if (vhost_parse_core_mask(mask_str, &cpumask) != 0) {
809 		SPDK_ERRLOG("cpumask %s is invalid (core mask is 0x%s)\n",
810 			    mask_str, spdk_cpuset_fmt(&g_vhost_core_mask));
811 		return -EINVAL;
812 	}
813 
814 	if (spdk_vhost_dev_find(name)) {
815 		SPDK_ERRLOG("vhost controller %s already exists.\n", name);
816 		return -EEXIST;
817 	}
818 
819 	vdev->name = strdup(name);
820 	if (vdev->name == NULL) {
821 		return -EIO;
822 	}
823 
824 	rc = vhost_user_dev_register(vdev, name, &cpumask, backend);
825 	if (rc != 0) {
826 		free(vdev->name);
827 		return rc;
828 	}
829 
830 	TAILQ_INSERT_TAIL(&g_vhost_devices, vdev, tailq);
831 
832 	SPDK_INFOLOG(vhost, "Controller %s: new controller added\n", vdev->name);
833 	return 0;
834 }
835 
836 int
837 vhost_dev_unregister(struct spdk_vhost_dev *vdev)
838 {
839 	int rc;
840 
841 	rc = vhost_user_dev_unregister(vdev);
842 	if (rc != 0) {
843 		return rc;
844 	}
845 
846 	SPDK_INFOLOG(vhost, "Controller %s: removed\n", vdev->name);
847 
848 	free(vdev->name);
849 	TAILQ_REMOVE(&g_vhost_devices, vdev, tailq);
850 	return 0;
851 }
852 
853 const char *
854 spdk_vhost_dev_get_name(struct spdk_vhost_dev *vdev)
855 {
856 	assert(vdev != NULL);
857 	return vdev->name;
858 }
859 
860 const struct spdk_cpuset *
861 spdk_vhost_dev_get_cpumask(struct spdk_vhost_dev *vdev)
862 {
863 	assert(vdev != NULL);
864 	return spdk_thread_get_cpumask(vdev->thread);
865 }
866 
867 void
868 vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
869 {
870 	assert(vdev->backend->dump_info_json != NULL);
871 	vdev->backend->dump_info_json(vdev, w);
872 }
873 
874 int
875 spdk_vhost_dev_remove(struct spdk_vhost_dev *vdev)
876 {
877 	if (vdev->pending_async_op_num) {
878 		return -EBUSY;
879 	}
880 
881 	return vdev->backend->remove_device(vdev);
882 }
883 
884 void
885 spdk_vhost_lock(void)
886 {
887 	pthread_mutex_lock(&g_vhost_mutex);
888 }
889 
890 int
891 spdk_vhost_trylock(void)
892 {
893 	return -pthread_mutex_trylock(&g_vhost_mutex);
894 }
895 
896 void
897 spdk_vhost_unlock(void)
898 {
899 	pthread_mutex_unlock(&g_vhost_mutex);
900 }
901 
902 void
903 spdk_vhost_init(spdk_vhost_init_cb init_cb)
904 {
905 	uint32_t i;
906 	int ret = 0;
907 
908 	ret = vhost_user_init();
909 	if (ret != 0) {
910 		init_cb(ret);
911 		return;
912 	}
913 
914 	spdk_cpuset_zero(&g_vhost_core_mask);
915 	SPDK_ENV_FOREACH_CORE(i) {
916 		spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
917 	}
918 	init_cb(ret);
919 }
920 
921 static spdk_vhost_fini_cb g_fini_cb;
922 
923 static void
924 vhost_fini(void *arg1)
925 {
926 	struct spdk_vhost_dev *vdev, *tmp;
927 
928 	spdk_vhost_lock();
929 	vdev = spdk_vhost_dev_next(NULL);
930 	while (vdev != NULL) {
931 		tmp = spdk_vhost_dev_next(vdev);
932 		spdk_vhost_dev_remove(vdev);
933 		/* don't care if it fails, there's nothing we can do for now */
934 		vdev = tmp;
935 	}
936 	spdk_vhost_unlock();
937 
938 	g_fini_cb();
939 }
940 
941 void
942 spdk_vhost_fini(spdk_vhost_fini_cb fini_cb)
943 {
944 	g_fini_cb = fini_cb;
945 
946 	vhost_user_fini(vhost_fini);
947 }
948 
949 void
950 spdk_vhost_config_json(struct spdk_json_write_ctx *w)
951 {
952 	struct spdk_vhost_dev *vdev;
953 	uint32_t delay_base_us;
954 	uint32_t iops_threshold;
955 
956 	spdk_json_write_array_begin(w);
957 
958 	spdk_vhost_lock();
959 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
960 	     vdev = spdk_vhost_dev_next(vdev)) {
961 		vdev->backend->write_config_json(vdev, w);
962 
963 		spdk_vhost_get_coalescing(vdev, &delay_base_us, &iops_threshold);
964 		if (delay_base_us) {
965 			spdk_json_write_object_begin(w);
966 			spdk_json_write_named_string(w, "method", "vhost_controller_set_coalescing");
967 
968 			spdk_json_write_named_object_begin(w, "params");
969 			spdk_json_write_named_string(w, "ctrlr", vdev->name);
970 			spdk_json_write_named_uint32(w, "delay_base_us", delay_base_us);
971 			spdk_json_write_named_uint32(w, "iops_threshold", iops_threshold);
972 			spdk_json_write_object_end(w);
973 
974 			spdk_json_write_object_end(w);
975 		}
976 	}
977 	spdk_vhost_unlock();
978 
979 	spdk_json_write_array_end(w);
980 }
981 
982 SPDK_LOG_REGISTER_COMPONENT(vhost)
983 SPDK_LOG_REGISTER_COMPONENT(vhost_ring)
984