xref: /spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c (revision ceea3088870a3919d6bdfe61d7adba11b9733fb7)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "CUnit/Basic.h"
37 #include "spdk_cunit.h"
38 #include "spdk/thread.h"
39 #include "spdk_internal/mock.h"
40 #include "common/lib/test_env.c"
41 #include "unit/lib/json_mock.c"
42 
43 #include "vhost/vhost.c"
44 
45 DEFINE_STUB(rte_vhost_set_vring_base, int, (int vid, uint16_t queue_id,
46 		uint16_t last_avail_idx, uint16_t last_used_idx), 0);
47 DEFINE_STUB(rte_vhost_get_vring_base, int, (int vid, uint16_t queue_id,
48 		uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
49 DEFINE_STUB_V(vhost_session_install_rte_compat_hooks,
50 	      (struct spdk_vhost_session *vsession));
51 DEFINE_STUB(vhost_register_unix_socket, int, (const char *path, const char *name,
52 		uint64_t virtio_features, uint64_t disabled_features, uint64_t protocol_features), 0);
53 DEFINE_STUB(vhost_driver_unregister, int, (const char *path), 0);
54 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
55 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
56 DEFINE_STUB(rte_vhost_vring_call, int, (int vid, uint16_t vring_idx), 0);
57 DEFINE_STUB_V(rte_vhost_log_used_vring, (int vid, uint16_t vring_idx,
58 		uint64_t offset, uint64_t len));
59 
60 DEFINE_STUB(rte_vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0);
61 DEFINE_STUB(rte_vhost_get_negotiated_features, int, (int vid, uint64_t *features), 0);
62 DEFINE_STUB(rte_vhost_get_vhost_vring, int,
63 	    (int vid, uint16_t vring_idx, struct rte_vhost_vring *vring), 0);
64 DEFINE_STUB(rte_vhost_enable_guest_notification, int,
65 	    (int vid, uint16_t queue_id, int enable), 0);
66 DEFINE_STUB(rte_vhost_get_ifname, int, (int vid, char *buf, size_t len), 0);
67 DEFINE_STUB(rte_vhost_driver_start, int, (const char *name), 0);
68 DEFINE_STUB(rte_vhost_driver_callback_register, int,
69 	    (const char *path, struct vhost_device_ops const *const ops), 0);
70 DEFINE_STUB(rte_vhost_driver_disable_features, int, (const char *path, uint64_t features), 0);
71 DEFINE_STUB(rte_vhost_driver_set_features, int, (const char *path, uint64_t features), 0);
72 DEFINE_STUB(rte_vhost_driver_register, int, (const char *path, uint64_t flags), 0);
73 
74 void *
75 spdk_call_unaffinitized(void *cb(void *arg), void *arg)
76 {
77 	return cb(arg);
78 }
79 
80 static struct spdk_vhost_dev_backend g_vdev_backend;
81 
82 static int
83 test_setup(void)
84 {
85 	return 0;
86 }
87 
88 static int
89 alloc_vdev(struct spdk_vhost_dev **vdev_p, const char *name, const char *cpumask)
90 {
91 	struct spdk_vhost_dev *vdev = NULL;
92 	int rc;
93 
94 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
95 	rc = posix_memalign((void **)&vdev, 64, sizeof(*vdev));
96 	CU_ASSERT(rc == 0);
97 	SPDK_CU_ASSERT_FATAL(vdev != NULL);
98 	memset(vdev, 0, sizeof(*vdev));
99 	rc = vhost_dev_register(vdev, name, cpumask, &g_vdev_backend);
100 	if (rc == 0) {
101 		*vdev_p = vdev;
102 	} else {
103 		free(vdev);
104 		*vdev_p = NULL;
105 	}
106 
107 	return rc;
108 }
109 
110 static void
111 start_vdev(struct spdk_vhost_dev *vdev)
112 {
113 	struct rte_vhost_memory *mem;
114 	struct spdk_vhost_session *vsession = NULL;
115 	int rc;
116 
117 	mem = calloc(1, sizeof(*mem) + 2 * sizeof(struct rte_vhost_mem_region));
118 	SPDK_CU_ASSERT_FATAL(mem != NULL);
119 	mem->nregions = 2;
120 	mem->regions[0].guest_phys_addr = 0;
121 	mem->regions[0].size = 0x400000; /* 4 MB */
122 	mem->regions[0].host_user_addr = 0x1000000;
123 	mem->regions[1].guest_phys_addr = 0x400000;
124 	mem->regions[1].size = 0x400000; /* 4 MB */
125 	mem->regions[1].host_user_addr = 0x2000000;
126 
127 	assert(TAILQ_EMPTY(&vdev->vsessions));
128 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
129 	rc = posix_memalign((void **)&vsession, 64, sizeof(*vsession));
130 	CU_ASSERT(rc == 0);
131 	SPDK_CU_ASSERT_FATAL(vsession != NULL);
132 	vsession->started = true;
133 	vsession->vid = 0;
134 	vsession->mem = mem;
135 	TAILQ_INSERT_TAIL(&vdev->vsessions, vsession, tailq);
136 }
137 
138 static void
139 stop_vdev(struct spdk_vhost_dev *vdev)
140 {
141 	struct spdk_vhost_session *vsession = TAILQ_FIRST(&vdev->vsessions);
142 
143 	TAILQ_REMOVE(&vdev->vsessions, vsession, tailq);
144 	free(vsession->mem);
145 	free(vsession);
146 }
147 
148 static void
149 cleanup_vdev(struct spdk_vhost_dev *vdev)
150 {
151 	if (!TAILQ_EMPTY(&vdev->vsessions)) {
152 		stop_vdev(vdev);
153 	}
154 	vhost_dev_unregister(vdev);
155 	free(vdev);
156 }
157 
158 static void
159 desc_to_iov_test(void)
160 {
161 	struct spdk_vhost_dev *vdev;
162 	struct spdk_vhost_session *vsession;
163 	struct iovec iov[SPDK_VHOST_IOVS_MAX];
164 	uint16_t iov_index;
165 	struct vring_desc desc;
166 	int rc;
167 
168 	spdk_cpuset_set_cpu(&g_vhost_core_mask, 0, true);
169 
170 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
171 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
172 	start_vdev(vdev);
173 
174 	vsession = TAILQ_FIRST(&vdev->vsessions);
175 
176 	/* Test simple case where iov falls fully within a 2MB page. */
177 	desc.addr = 0x110000;
178 	desc.len = 0x1000;
179 	iov_index = 0;
180 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
181 	CU_ASSERT(rc == 0);
182 	CU_ASSERT(iov_index == 1);
183 	CU_ASSERT(iov[0].iov_base == (void *)0x1110000);
184 	CU_ASSERT(iov[0].iov_len == 0x1000);
185 	/*
186 	 * Always memset the iov to ensure each test validates data written by its call
187 	 * to the function under test.
188 	 */
189 	memset(iov, 0, sizeof(iov));
190 
191 	/* Same test, but ensure it respects the non-zero starting iov_index. */
192 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
193 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
194 	CU_ASSERT(rc == 0);
195 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
196 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x1110000);
197 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x1000);
198 	memset(iov, 0, sizeof(iov));
199 
200 	/* Test for failure if iov_index already equals SPDK_VHOST_IOVS_MAX. */
201 	iov_index = SPDK_VHOST_IOVS_MAX;
202 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
203 	CU_ASSERT(rc != 0);
204 	memset(iov, 0, sizeof(iov));
205 
206 	/* Test case where iov spans a 2MB boundary, but does not span a vhost memory region. */
207 	desc.addr = 0x1F0000;
208 	desc.len = 0x20000;
209 	iov_index = 0;
210 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
211 	CU_ASSERT(rc == 0);
212 	CU_ASSERT(iov_index == 1);
213 	CU_ASSERT(iov[0].iov_base == (void *)0x11F0000);
214 	CU_ASSERT(iov[0].iov_len == 0x20000);
215 	memset(iov, 0, sizeof(iov));
216 
217 	/* Same test, but ensure it respects the non-zero starting iov_index. */
218 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
219 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
220 	CU_ASSERT(rc == 0);
221 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
222 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x11F0000);
223 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x20000);
224 	memset(iov, 0, sizeof(iov));
225 
226 	/* Test case where iov spans a vhost memory region. */
227 	desc.addr = 0x3F0000;
228 	desc.len = 0x20000;
229 	iov_index = 0;
230 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
231 	CU_ASSERT(rc == 0);
232 	CU_ASSERT(iov_index == 2);
233 	CU_ASSERT(iov[0].iov_base == (void *)0x13F0000);
234 	CU_ASSERT(iov[0].iov_len == 0x10000);
235 	CU_ASSERT(iov[1].iov_base == (void *)0x2000000);
236 	CU_ASSERT(iov[1].iov_len == 0x10000);
237 	memset(iov, 0, sizeof(iov));
238 
239 	cleanup_vdev(vdev);
240 
241 	CU_ASSERT(true);
242 }
243 
244 static void
245 create_controller_test(void)
246 {
247 	struct spdk_vhost_dev *vdev, *vdev2;
248 	int ret;
249 	char long_name[PATH_MAX];
250 
251 	spdk_cpuset_set_cpu(&g_vhost_core_mask, 0, true);
252 
253 	/* Create device with no name */
254 	ret = alloc_vdev(&vdev, NULL, "0x1");
255 	CU_ASSERT(ret != 0);
256 
257 	/* Create device with incorrect cpumask */
258 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x2");
259 	CU_ASSERT(ret != 0);
260 
261 	/* Create device with too long name and path */
262 	memset(long_name, 'x', sizeof(long_name));
263 	long_name[PATH_MAX - 1] = 0;
264 	snprintf(dev_dirname, sizeof(dev_dirname), "some_path/");
265 	ret = alloc_vdev(&vdev, long_name, "0x1");
266 	CU_ASSERT(ret != 0);
267 	dev_dirname[0] = 0;
268 
269 	/* Create device when device name is already taken */
270 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
271 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
272 	ret = alloc_vdev(&vdev2, "vdev_name_0", "0x1");
273 	CU_ASSERT(ret != 0);
274 	cleanup_vdev(vdev);
275 }
276 
277 static void
278 session_find_by_vid_test(void)
279 {
280 	struct spdk_vhost_dev *vdev;
281 	struct spdk_vhost_session *vsession;
282 	struct spdk_vhost_session *tmp;
283 	int rc;
284 
285 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
286 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
287 	start_vdev(vdev);
288 
289 	vsession = TAILQ_FIRST(&vdev->vsessions);
290 
291 	tmp = vhost_session_find_by_vid(vsession->vid);
292 	CU_ASSERT(tmp == vsession);
293 
294 	/* Search for a device with incorrect vid */
295 	tmp = vhost_session_find_by_vid(vsession->vid + 0xFF);
296 	CU_ASSERT(tmp == NULL);
297 
298 	cleanup_vdev(vdev);
299 }
300 
301 static void
302 remove_controller_test(void)
303 {
304 	struct spdk_vhost_dev *vdev;
305 	int ret;
306 
307 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
308 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
309 
310 	/* Remove device when controller is in use */
311 	start_vdev(vdev);
312 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&vdev->vsessions));
313 	ret = vhost_dev_unregister(vdev);
314 	CU_ASSERT(ret != 0);
315 
316 	cleanup_vdev(vdev);
317 }
318 
319 static void
320 vq_avail_ring_get_test(void)
321 {
322 	struct spdk_vhost_virtqueue vq;
323 	uint16_t avail_mem[34];
324 	uint16_t reqs[32];
325 	uint16_t reqs_len, ret, i;
326 
327 	/* Basic example reap all requests */
328 	vq.vring.avail = (struct vring_avail *)avail_mem;
329 	vq.vring.size = 32;
330 	vq.last_avail_idx = 24;
331 	vq.vring.avail->idx = 29;
332 	reqs_len = 6;
333 
334 	for (i = 0; i < 32; i++) {
335 		vq.vring.avail->ring[i] = i;
336 	}
337 
338 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
339 	CU_ASSERT(ret == 5);
340 	CU_ASSERT(vq.last_avail_idx == 29);
341 	for (i = 0; i < ret; i++) {
342 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 24]);
343 	}
344 
345 	/* Basic example reap only some requests */
346 	vq.last_avail_idx = 20;
347 	vq.vring.avail->idx = 29;
348 	reqs_len = 6;
349 
350 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
351 	CU_ASSERT(ret == reqs_len);
352 	CU_ASSERT(vq.last_avail_idx == 26);
353 	for (i = 0; i < ret; i++) {
354 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 20]);
355 	}
356 
357 	/* Test invalid example */
358 	vq.last_avail_idx = 20;
359 	vq.vring.avail->idx = 156;
360 	reqs_len = 6;
361 
362 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
363 	CU_ASSERT(ret == 0);
364 
365 	/* Test overflow in the avail->idx variable. */
366 	vq.last_avail_idx = 65535;
367 	vq.vring.avail->idx = 4;
368 	reqs_len = 6;
369 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
370 	CU_ASSERT(ret == 5);
371 	CU_ASSERT(vq.last_avail_idx == 4);
372 	CU_ASSERT(reqs[0] == vq.vring.avail->ring[31]);
373 	for (i = 1; i < ret; i++) {
374 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i - 1]);
375 	}
376 }
377 
378 static bool
379 vq_desc_guest_is_used(struct spdk_vhost_virtqueue *vq, int16_t guest_last_used_idx,
380 		      int16_t guest_used_phase)
381 {
382 	return (!!(vq->vring.desc_packed[guest_last_used_idx].flags & VRING_DESC_F_USED) ==
383 		!!guest_used_phase);
384 }
385 
386 static void
387 vq_desc_guest_set_avail(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_avail_idx,
388 			int16_t *guest_avail_phase)
389 {
390 	if (*guest_avail_phase) {
391 		vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_AVAIL;
392 		vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_USED;
393 	} else {
394 		vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_AVAIL;
395 		vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_USED;
396 	}
397 
398 	if (++(*guest_last_avail_idx) >= vq->vring.size) {
399 		*guest_last_avail_idx -= vq->vring.size;
400 		*guest_avail_phase = !(*guest_avail_phase);
401 	}
402 }
403 
404 static int16_t
405 vq_desc_guest_handle_completed_desc(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_used_idx,
406 				    int16_t *guest_used_phase)
407 {
408 	int16_t buffer_id = -1;
409 
410 	if (vq_desc_guest_is_used(vq, *guest_last_used_idx, *guest_used_phase)) {
411 		buffer_id = vq->vring.desc_packed[*guest_last_used_idx].id;
412 		if (++(*guest_last_used_idx) >= vq->vring.size) {
413 			*guest_last_used_idx -= vq->vring.size;
414 			*guest_used_phase = !(*guest_used_phase);
415 		}
416 
417 		return buffer_id;
418 	}
419 
420 	return -1;
421 }
422 
423 static void
424 vq_packed_ring_test(void)
425 {
426 	struct spdk_vhost_session vs = {};
427 	struct spdk_vhost_virtqueue vq = {};
428 	struct vring_packed_desc descs[4];
429 	uint16_t guest_last_avail_idx = 0, guest_last_used_idx = 0;
430 	uint16_t guest_avail_phase = 1, guest_used_phase = 1;
431 	int i;
432 	int16_t chain_num;
433 
434 	vq.vring.desc_packed = descs;
435 	vq.vring.size = 4;
436 
437 	/* avail and used wrap counter are initialized to 1 */
438 	vq.packed.avail_phase = 1;
439 	vq.packed.used_phase = 1;
440 	vq.packed.packed_ring = true;
441 	memset(descs, 0, sizeof(descs));
442 
443 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == false);
444 
445 	/* Guest send requests */
446 	for (i = 0; i < vq.vring.size; i++) {
447 		descs[guest_last_avail_idx].id = i;
448 		/* Set the desc available */
449 		vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
450 	}
451 	CU_ASSERT(guest_last_avail_idx == 0);
452 	CU_ASSERT(guest_avail_phase == 0);
453 
454 	/* Host handle available descs */
455 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
456 	i = 0;
457 	while (vhost_vq_packed_ring_is_avail(&vq)) {
458 		CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i++);
459 		CU_ASSERT(chain_num == 1);
460 	}
461 
462 	/* Host complete them out of order: 1, 0, 2. */
463 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1);
464 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1);
465 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1);
466 
467 	/* Host has got all the available request but only complete three requests */
468 	CU_ASSERT(vq.last_avail_idx == 0);
469 	CU_ASSERT(vq.packed.avail_phase == 0);
470 	CU_ASSERT(vq.last_used_idx == 3);
471 	CU_ASSERT(vq.packed.used_phase == 1);
472 
473 	/* Guest handle completed requests */
474 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
475 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
476 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
477 	CU_ASSERT(guest_last_used_idx == 3);
478 	CU_ASSERT(guest_used_phase == 1);
479 
480 	/* There are three descs available the guest can send three request again */
481 	for (i = 0; i < 3; i++) {
482 		descs[guest_last_avail_idx].id = 2 - i;
483 		/* Set the desc available */
484 		vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
485 	}
486 
487 	/* Host handle available descs */
488 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
489 	i = 2;
490 	while (vhost_vq_packed_ring_is_avail(&vq)) {
491 		CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i--);
492 		CU_ASSERT(chain_num == 1);
493 	}
494 
495 	/* There are four requests in Host, the new three ones and left one */
496 	CU_ASSERT(vq.last_avail_idx == 3);
497 	/* Available wrap conter should overturn */
498 	CU_ASSERT(vq.packed.avail_phase == 0);
499 
500 	/* Host complete all the requests */
501 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1);
502 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1);
503 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 3, 1);
504 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1);
505 
506 	CU_ASSERT(vq.last_used_idx == vq.last_avail_idx);
507 	CU_ASSERT(vq.packed.used_phase == vq.packed.avail_phase);
508 
509 	/* Guest handle completed requests */
510 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
511 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
512 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 3);
513 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
514 
515 	CU_ASSERT(guest_last_avail_idx == guest_last_used_idx);
516 	CU_ASSERT(guest_avail_phase == guest_used_phase);
517 }
518 
519 int
520 main(int argc, char **argv)
521 {
522 	CU_pSuite	suite = NULL;
523 	unsigned int	num_failures;
524 
525 	CU_set_error_action(CUEA_ABORT);
526 	CU_initialize_registry();
527 
528 	suite = CU_add_suite("vhost_suite", test_setup, NULL);
529 
530 	CU_ADD_TEST(suite, desc_to_iov_test);
531 	CU_ADD_TEST(suite, create_controller_test);
532 	CU_ADD_TEST(suite, session_find_by_vid_test);
533 	CU_ADD_TEST(suite, remove_controller_test);
534 	CU_ADD_TEST(suite, vq_avail_ring_get_test);
535 	CU_ADD_TEST(suite, vq_packed_ring_test);
536 
537 	CU_basic_set_mode(CU_BRM_VERBOSE);
538 	CU_basic_run_tests();
539 	num_failures = CU_get_number_of_failures();
540 	CU_cleanup_registry();
541 
542 	return num_failures;
543 }
544