xref: /spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c (revision 1fa071d332db21bf893d581a8e93b425ba788a24)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "CUnit/Basic.h"
37 #include "spdk_cunit.h"
38 #include "spdk/thread.h"
39 #include "spdk_internal/mock.h"
40 #include "common/lib/test_env.c"
41 #include "unit/lib/json_mock.c"
42 
43 #include "vhost/vhost.c"
44 
45 DEFINE_STUB(rte_vhost_set_vring_base, int, (int vid, uint16_t queue_id,
46 		uint16_t last_avail_idx, uint16_t last_used_idx), 0);
47 DEFINE_STUB(rte_vhost_get_vring_base, int, (int vid, uint16_t queue_id,
48 		uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
49 DEFINE_STUB_V(vhost_session_install_rte_compat_hooks,
50 	      (struct spdk_vhost_session *vsession));
51 DEFINE_STUB(vhost_register_unix_socket, int, (const char *path, const char *name,
52 		uint64_t virtio_features, uint64_t disabled_features, uint64_t protocol_features), 0);
53 DEFINE_STUB(vhost_driver_unregister, int, (const char *path), 0);
54 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
55 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
56 DEFINE_STUB(rte_vhost_vring_call, int, (int vid, uint16_t vring_idx), 0);
57 DEFINE_STUB_V(rte_vhost_log_used_vring, (int vid, uint16_t vring_idx,
58 		uint64_t offset, uint64_t len));
59 
60 static struct spdk_cpuset *g_app_core_mask;
61 struct spdk_cpuset *spdk_app_get_core_mask(void)
62 {
63 	if (g_app_core_mask == NULL) {
64 		g_app_core_mask = spdk_cpuset_alloc();
65 		spdk_cpuset_set_cpu(g_app_core_mask, 0, true);
66 	}
67 	return g_app_core_mask;
68 }
69 
70 int
71 spdk_app_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask)
72 {
73 	int ret;
74 	struct spdk_cpuset *validmask;
75 
76 	ret = spdk_cpuset_parse(cpumask, mask);
77 	if (ret < 0) {
78 		return ret;
79 	}
80 
81 	validmask = spdk_app_get_core_mask();
82 	spdk_cpuset_and(cpumask, validmask);
83 
84 	return 0;
85 }
86 
87 DEFINE_STUB(rte_vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0);
88 DEFINE_STUB(rte_vhost_get_negotiated_features, int, (int vid, uint64_t *features), 0);
89 DEFINE_STUB(rte_vhost_get_vhost_vring, int,
90 	    (int vid, uint16_t vring_idx, struct rte_vhost_vring *vring), 0);
91 DEFINE_STUB(rte_vhost_enable_guest_notification, int,
92 	    (int vid, uint16_t queue_id, int enable), 0);
93 DEFINE_STUB(rte_vhost_get_ifname, int, (int vid, char *buf, size_t len), 0);
94 DEFINE_STUB(rte_vhost_driver_start, int, (const char *name), 0);
95 DEFINE_STUB(rte_vhost_driver_callback_register, int,
96 	    (const char *path, struct vhost_device_ops const *const ops), 0);
97 DEFINE_STUB(rte_vhost_driver_disable_features, int, (const char *path, uint64_t features), 0);
98 DEFINE_STUB(rte_vhost_driver_set_features, int, (const char *path, uint64_t features), 0);
99 DEFINE_STUB(rte_vhost_driver_register, int, (const char *path, uint64_t flags), 0);
100 DEFINE_STUB(vhost_nvme_admin_passthrough, int, (int vid, void *cmd, void *cqe, void *buf), 0);
101 DEFINE_STUB(vhost_nvme_set_cq_call, int, (int vid, uint16_t qid, int fd), 0);
102 DEFINE_STUB(vhost_nvme_set_bar_mr, int, (int vid, void *bar, uint64_t bar_size), 0);
103 DEFINE_STUB(vhost_nvme_get_cap, int, (int vid, uint64_t *cap), 0);
104 
105 void *
106 spdk_call_unaffinitized(void *cb(void *arg), void *arg)
107 {
108 	return cb(arg);
109 }
110 
111 static struct spdk_vhost_dev_backend g_vdev_backend;
112 
113 static int
114 test_setup(void)
115 {
116 	return 0;
117 }
118 
119 static int
120 alloc_vdev(struct spdk_vhost_dev **vdev_p, const char *name, const char *cpumask)
121 {
122 	struct spdk_vhost_dev *vdev = NULL;
123 	int rc;
124 
125 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
126 	rc = posix_memalign((void **)&vdev, 64, sizeof(*vdev));
127 	CU_ASSERT(rc == 0);
128 	SPDK_CU_ASSERT_FATAL(vdev != NULL);
129 	memset(vdev, 0, sizeof(*vdev));
130 	rc = vhost_dev_register(vdev, name, cpumask, &g_vdev_backend);
131 	if (rc == 0) {
132 		*vdev_p = vdev;
133 	} else {
134 		free(vdev);
135 		*vdev_p = NULL;
136 	}
137 
138 	return rc;
139 }
140 
141 static void
142 start_vdev(struct spdk_vhost_dev *vdev)
143 {
144 	struct rte_vhost_memory *mem;
145 	struct spdk_vhost_session *vsession = NULL;
146 	int rc;
147 
148 	mem = calloc(1, sizeof(*mem) + 2 * sizeof(struct rte_vhost_mem_region));
149 	SPDK_CU_ASSERT_FATAL(mem != NULL);
150 	mem->nregions = 2;
151 	mem->regions[0].guest_phys_addr = 0;
152 	mem->regions[0].size = 0x400000; /* 4 MB */
153 	mem->regions[0].host_user_addr = 0x1000000;
154 	mem->regions[1].guest_phys_addr = 0x400000;
155 	mem->regions[1].size = 0x400000; /* 4 MB */
156 	mem->regions[1].host_user_addr = 0x2000000;
157 
158 	assert(TAILQ_EMPTY(&vdev->vsessions));
159 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
160 	rc = posix_memalign((void **)&vsession, 64, sizeof(*vsession));
161 	CU_ASSERT(rc == 0);
162 	SPDK_CU_ASSERT_FATAL(vsession != NULL);
163 	vsession->started = true;
164 	vsession->vid = 0;
165 	vsession->mem = mem;
166 	TAILQ_INSERT_TAIL(&vdev->vsessions, vsession, tailq);
167 }
168 
169 static void
170 stop_vdev(struct spdk_vhost_dev *vdev)
171 {
172 	struct spdk_vhost_session *vsession = TAILQ_FIRST(&vdev->vsessions);
173 
174 	TAILQ_REMOVE(&vdev->vsessions, vsession, tailq);
175 	free(vsession->mem);
176 	free(vsession);
177 }
178 
179 static void
180 cleanup_vdev(struct spdk_vhost_dev *vdev)
181 {
182 	if (!TAILQ_EMPTY(&vdev->vsessions)) {
183 		stop_vdev(vdev);
184 	}
185 	vhost_dev_unregister(vdev);
186 	free(vdev);
187 }
188 
189 static void
190 desc_to_iov_test(void)
191 {
192 	struct spdk_vhost_dev *vdev;
193 	struct spdk_vhost_session *vsession;
194 	struct iovec iov[SPDK_VHOST_IOVS_MAX];
195 	uint16_t iov_index;
196 	struct vring_desc desc;
197 	int rc;
198 
199 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
200 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
201 	start_vdev(vdev);
202 
203 	vsession = TAILQ_FIRST(&vdev->vsessions);
204 
205 	/* Test simple case where iov falls fully within a 2MB page. */
206 	desc.addr = 0x110000;
207 	desc.len = 0x1000;
208 	iov_index = 0;
209 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
210 	CU_ASSERT(rc == 0);
211 	CU_ASSERT(iov_index == 1);
212 	CU_ASSERT(iov[0].iov_base == (void *)0x1110000);
213 	CU_ASSERT(iov[0].iov_len == 0x1000);
214 	/*
215 	 * Always memset the iov to ensure each test validates data written by its call
216 	 * to the function under test.
217 	 */
218 	memset(iov, 0, sizeof(iov));
219 
220 	/* Same test, but ensure it respects the non-zero starting iov_index. */
221 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
222 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
223 	CU_ASSERT(rc == 0);
224 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
225 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x1110000);
226 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x1000);
227 	memset(iov, 0, sizeof(iov));
228 
229 	/* Test for failure if iov_index already equals SPDK_VHOST_IOVS_MAX. */
230 	iov_index = SPDK_VHOST_IOVS_MAX;
231 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
232 	CU_ASSERT(rc != 0);
233 	memset(iov, 0, sizeof(iov));
234 
235 	/* Test case where iov spans a 2MB boundary, but does not span a vhost memory region. */
236 	desc.addr = 0x1F0000;
237 	desc.len = 0x20000;
238 	iov_index = 0;
239 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
240 	CU_ASSERT(rc == 0);
241 	CU_ASSERT(iov_index == 1);
242 	CU_ASSERT(iov[0].iov_base == (void *)0x11F0000);
243 	CU_ASSERT(iov[0].iov_len == 0x20000);
244 	memset(iov, 0, sizeof(iov));
245 
246 	/* Same test, but ensure it respects the non-zero starting iov_index. */
247 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
248 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
249 	CU_ASSERT(rc == 0);
250 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
251 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x11F0000);
252 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x20000);
253 	memset(iov, 0, sizeof(iov));
254 
255 	/* Test case where iov spans a vhost memory region. */
256 	desc.addr = 0x3F0000;
257 	desc.len = 0x20000;
258 	iov_index = 0;
259 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
260 	CU_ASSERT(rc == 0);
261 	CU_ASSERT(iov_index == 2);
262 	CU_ASSERT(iov[0].iov_base == (void *)0x13F0000);
263 	CU_ASSERT(iov[0].iov_len == 0x10000);
264 	CU_ASSERT(iov[1].iov_base == (void *)0x2000000);
265 	CU_ASSERT(iov[1].iov_len == 0x10000);
266 	memset(iov, 0, sizeof(iov));
267 
268 	cleanup_vdev(vdev);
269 
270 	CU_ASSERT(true);
271 }
272 
273 static void
274 create_controller_test(void)
275 {
276 	struct spdk_vhost_dev *vdev, *vdev2;
277 	int ret;
278 	char long_name[PATH_MAX];
279 
280 	/* NOTE: spdk_app_get_core_mask stub always sets coremask 0x01 */
281 
282 	/* Create device with no name */
283 	ret = alloc_vdev(&vdev, NULL, "0x1");
284 	CU_ASSERT(ret != 0);
285 
286 	/* Create device with incorrect cpumask */
287 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x2");
288 	CU_ASSERT(ret != 0);
289 
290 	/* Create device with too long name and path */
291 	memset(long_name, 'x', sizeof(long_name));
292 	long_name[PATH_MAX - 1] = 0;
293 	snprintf(dev_dirname, sizeof(dev_dirname), "some_path/");
294 	ret = alloc_vdev(&vdev, long_name, "0x1");
295 	CU_ASSERT(ret != 0);
296 	dev_dirname[0] = 0;
297 
298 	/* Create device when device name is already taken */
299 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
300 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
301 	ret = alloc_vdev(&vdev2, "vdev_name_0", "0x1");
302 	CU_ASSERT(ret != 0);
303 	cleanup_vdev(vdev);
304 }
305 
306 static void
307 session_find_by_vid_test(void)
308 {
309 	struct spdk_vhost_dev *vdev;
310 	struct spdk_vhost_session *vsession;
311 	struct spdk_vhost_session *tmp;
312 	int rc;
313 
314 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
315 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
316 	start_vdev(vdev);
317 
318 	vsession = TAILQ_FIRST(&vdev->vsessions);
319 
320 	tmp = vhost_session_find_by_vid(vsession->vid);
321 	CU_ASSERT(tmp == vsession);
322 
323 	/* Search for a device with incorrect vid */
324 	tmp = vhost_session_find_by_vid(vsession->vid + 0xFF);
325 	CU_ASSERT(tmp == NULL);
326 
327 	cleanup_vdev(vdev);
328 }
329 
330 static void
331 remove_controller_test(void)
332 {
333 	struct spdk_vhost_dev *vdev;
334 	int ret;
335 
336 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
337 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
338 
339 	/* Remove device when controller is in use */
340 	start_vdev(vdev);
341 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&vdev->vsessions));
342 	ret = vhost_dev_unregister(vdev);
343 	CU_ASSERT(ret != 0);
344 
345 	cleanup_vdev(vdev);
346 }
347 
348 static void
349 vq_avail_ring_get_test(void)
350 {
351 	struct spdk_vhost_virtqueue vq;
352 	uint16_t avail_mem[34];
353 	uint16_t reqs[32];
354 	uint16_t reqs_len, ret, i;
355 
356 	/* Basic example reap all requests */
357 	vq.vring.avail = (struct vring_avail *)avail_mem;
358 	vq.vring.size = 32;
359 	vq.last_avail_idx = 24;
360 	vq.vring.avail->idx = 29;
361 	reqs_len = 6;
362 
363 	for (i = 0; i < 32; i++) {
364 		vq.vring.avail->ring[i] = i;
365 	}
366 
367 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
368 	CU_ASSERT(ret == 5);
369 	CU_ASSERT(vq.last_avail_idx == 29);
370 	for (i = 0; i < ret; i++) {
371 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 24]);
372 	}
373 
374 	/* Basic example reap only some requests */
375 	vq.last_avail_idx = 20;
376 	vq.vring.avail->idx = 29;
377 	reqs_len = 6;
378 
379 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
380 	CU_ASSERT(ret == reqs_len);
381 	CU_ASSERT(vq.last_avail_idx == 26);
382 	for (i = 0; i < ret; i++) {
383 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 20]);
384 	}
385 
386 	/* Test invalid example */
387 	vq.last_avail_idx = 20;
388 	vq.vring.avail->idx = 156;
389 	reqs_len = 6;
390 
391 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
392 	CU_ASSERT(ret == 0);
393 
394 	/* Test overflow in the avail->idx variable. */
395 	vq.last_avail_idx = 65535;
396 	vq.vring.avail->idx = 4;
397 	reqs_len = 6;
398 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
399 	CU_ASSERT(ret == 5);
400 	CU_ASSERT(vq.last_avail_idx == 4);
401 	CU_ASSERT(reqs[0] == vq.vring.avail->ring[31]);
402 	for (i = 1; i < ret; i++) {
403 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i - 1]);
404 	}
405 }
406 
407 static bool
408 vq_desc_guest_is_used(struct spdk_vhost_virtqueue *vq, int16_t guest_last_used_idx,
409 		      int16_t guest_used_phase)
410 {
411 	return (!!(vq->vring.desc_packed[guest_last_used_idx].flags & VRING_DESC_F_USED) ==
412 		!!guest_used_phase);
413 }
414 
415 static void
416 vq_desc_guest_set_avail(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_avail_idx,
417 			int16_t *guest_avail_phase)
418 {
419 	if (*guest_avail_phase) {
420 		vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_AVAIL;
421 		vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_USED;
422 	} else {
423 		vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_AVAIL;
424 		vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_USED;
425 	}
426 
427 	if (++(*guest_last_avail_idx) >= vq->vring.size) {
428 		*guest_last_avail_idx -= vq->vring.size;
429 		*guest_avail_phase = !(*guest_avail_phase);
430 	}
431 }
432 
433 static int16_t
434 vq_desc_guest_handle_completed_desc(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_used_idx,
435 				    int16_t *guest_used_phase)
436 {
437 	int16_t buffer_id = -1;
438 
439 	if (vq_desc_guest_is_used(vq, *guest_last_used_idx, *guest_used_phase)) {
440 		buffer_id = vq->vring.desc_packed[*guest_last_used_idx].id;
441 		if (++(*guest_last_used_idx) >= vq->vring.size) {
442 			*guest_last_used_idx -= vq->vring.size;
443 			*guest_used_phase = !(*guest_used_phase);
444 		}
445 
446 		return buffer_id;
447 	}
448 
449 	return -1;
450 }
451 
452 static void
453 vq_packed_ring_test(void)
454 {
455 	struct spdk_vhost_session vs = {};
456 	struct spdk_vhost_virtqueue vq = {};
457 	struct vring_packed_desc descs[4];
458 	uint16_t guest_last_avail_idx = 0, guest_last_used_idx = 0;
459 	uint16_t guest_avail_phase = 1, guest_used_phase = 1;
460 	int i;
461 	int16_t chain_num;
462 
463 	vq.vring.desc_packed = descs;
464 	vq.vring.size = 4;
465 
466 	/* avail and used wrap counter are initialized to 1 */
467 	vq.packed.avail_phase = 1;
468 	vq.packed.used_phase = 1;
469 	vq.packed.packed_ring = true;
470 	memset(descs, 0, sizeof(descs));
471 
472 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == false);
473 
474 	/* Guest send requests */
475 	for (i = 0; i < vq.vring.size; i++) {
476 		descs[guest_last_avail_idx].id = i;
477 		/* Set the desc available */
478 		vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
479 	}
480 	CU_ASSERT(guest_last_avail_idx == 0);
481 	CU_ASSERT(guest_avail_phase == 0);
482 
483 	/* Host handle available descs */
484 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
485 	i = 0;
486 	while (vhost_vq_packed_ring_is_avail(&vq)) {
487 		CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i++);
488 		CU_ASSERT(chain_num == 1);
489 	}
490 
491 	/* Host complete them out of order: 1, 0, 2. */
492 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1);
493 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1);
494 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1);
495 
496 	/* Host has got all the available request but only complete three requests */
497 	CU_ASSERT(vq.last_avail_idx == 0);
498 	CU_ASSERT(vq.packed.avail_phase == 0);
499 	CU_ASSERT(vq.last_used_idx == 3);
500 	CU_ASSERT(vq.packed.used_phase == 1);
501 
502 	/* Guest handle completed requests */
503 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
504 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
505 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
506 	CU_ASSERT(guest_last_used_idx == 3);
507 	CU_ASSERT(guest_used_phase == 1);
508 
509 	/* There are three descs available the guest can send three request again */
510 	for (i = 0; i < 3; i++) {
511 		descs[guest_last_avail_idx].id = 2 - i;
512 		/* Set the desc available */
513 		vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
514 	}
515 
516 	/* Host handle available descs */
517 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
518 	i = 2;
519 	while (vhost_vq_packed_ring_is_avail(&vq)) {
520 		CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i--);
521 		CU_ASSERT(chain_num == 1);
522 	}
523 
524 	/* There are four requests in Host, the new three ones and left one */
525 	CU_ASSERT(vq.last_avail_idx == 3);
526 	/* Available wrap conter should overturn */
527 	CU_ASSERT(vq.packed.avail_phase == 0);
528 
529 	/* Host complete all the requests */
530 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1);
531 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1);
532 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 3, 1);
533 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1);
534 
535 	CU_ASSERT(vq.last_used_idx == vq.last_avail_idx);
536 	CU_ASSERT(vq.packed.used_phase == vq.packed.avail_phase);
537 
538 	/* Guest handle completed requests */
539 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
540 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
541 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 3);
542 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
543 
544 	CU_ASSERT(guest_last_avail_idx == guest_last_used_idx);
545 	CU_ASSERT(guest_avail_phase == guest_used_phase);
546 }
547 
548 int
549 main(int argc, char **argv)
550 {
551 	CU_pSuite	suite = NULL;
552 	unsigned int	num_failures;
553 
554 	CU_set_error_action(CUEA_ABORT);
555 	CU_initialize_registry();
556 
557 	suite = CU_add_suite("vhost_suite", test_setup, NULL);
558 
559 	CU_ADD_TEST(suite, desc_to_iov_test);
560 	CU_ADD_TEST(suite, create_controller_test);
561 	CU_ADD_TEST(suite, session_find_by_vid_test);
562 	CU_ADD_TEST(suite, remove_controller_test);
563 	CU_ADD_TEST(suite, vq_avail_ring_get_test);
564 	CU_ADD_TEST(suite, vq_packed_ring_test);
565 
566 	CU_basic_set_mode(CU_BRM_VERBOSE);
567 	CU_basic_run_tests();
568 	num_failures = CU_get_number_of_failures();
569 	CU_cleanup_registry();
570 
571 	return num_failures;
572 }
573