xref: /spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c (revision 4e527910925f8d001001e96f1719d015a7a51a94)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "CUnit/Basic.h"
37 #include "spdk_cunit.h"
38 #include "spdk/thread.h"
39 #include "spdk_internal/mock.h"
40 #include "common/lib/test_env.c"
41 #include "unit/lib/json_mock.c"
42 
43 #include "vhost/vhost.c"
44 
45 DEFINE_STUB(rte_vhost_set_vring_base, int, (int vid, uint16_t queue_id,
46 		uint16_t last_avail_idx, uint16_t last_used_idx), 0);
47 DEFINE_STUB(rte_vhost_get_vring_base, int, (int vid, uint16_t queue_id,
48 		uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
49 DEFINE_STUB_V(vhost_session_install_rte_compat_hooks,
50 	      (struct spdk_vhost_session *vsession));
51 DEFINE_STUB(vhost_register_unix_socket, int, (const char *path, const char *name,
52 		uint64_t virtio_features, uint64_t disabled_features, uint64_t protocol_features), 0);
53 DEFINE_STUB(vhost_driver_unregister, int, (const char *path), 0);
54 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
55 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
56 DEFINE_STUB(rte_vhost_vring_call, int, (int vid, uint16_t vring_idx), 0);
57 DEFINE_STUB_V(rte_vhost_log_used_vring, (int vid, uint16_t vring_idx,
58 		uint64_t offset, uint64_t len));
59 
60 DEFINE_STUB(rte_vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0);
61 DEFINE_STUB(rte_vhost_get_negotiated_features, int, (int vid, uint64_t *features), 0);
62 DEFINE_STUB(rte_vhost_get_vhost_vring, int,
63 	    (int vid, uint16_t vring_idx, struct rte_vhost_vring *vring), 0);
64 DEFINE_STUB(rte_vhost_enable_guest_notification, int,
65 	    (int vid, uint16_t queue_id, int enable), 0);
66 DEFINE_STUB(rte_vhost_get_ifname, int, (int vid, char *buf, size_t len), 0);
67 DEFINE_STUB(rte_vhost_driver_start, int, (const char *name), 0);
68 DEFINE_STUB(rte_vhost_driver_callback_register, int,
69 	    (const char *path, struct vhost_device_ops const *const ops), 0);
70 DEFINE_STUB(rte_vhost_driver_disable_features, int, (const char *path, uint64_t features), 0);
71 DEFINE_STUB(rte_vhost_driver_set_features, int, (const char *path, uint64_t features), 0);
72 DEFINE_STUB(rte_vhost_driver_register, int, (const char *path, uint64_t flags), 0);
73 
74 DEFINE_STUB(rte_vhost_set_last_inflight_io_split, int,
75 	    (int vid, uint16_t vring_idx, uint16_t idx), 0);
76 DEFINE_STUB(rte_vhost_clr_inflight_desc_split, int,
77 	    (int vid, uint16_t vring_idx, uint16_t last_used_idx, uint16_t idx), 0);
78 DEFINE_STUB(rte_vhost_set_last_inflight_io_packed, int,
79 	    (int vid, uint16_t vring_idx, uint16_t head), 0);
80 DEFINE_STUB(rte_vhost_clr_inflight_desc_packed, int,
81 	    (int vid, uint16_t vring_idx, uint16_t head), 0);
82 DEFINE_STUB_V(rte_vhost_log_write, (int vid, uint64_t addr, uint64_t len));
83 DEFINE_STUB_V(vhost_session_mem_register, (struct rte_vhost_memory *mem));
84 DEFINE_STUB_V(vhost_session_mem_unregister, (struct rte_vhost_memory *mem));
85 DEFINE_STUB(vhost_get_negotiated_features, int,
86 	    (int vid, uint64_t *negotiated_features), 0);
87 DEFINE_STUB(rte_vhost_get_vhost_ring_inflight, int,
88 	    (int vid, uint16_t vring_idx, struct rte_vhost_ring_inflight *vring), 0);
89 DEFINE_STUB(rte_vhost_get_vring_base_from_inflight, int,
90 	    (int vid, uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
91 DEFINE_STUB(vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0);
92 
93 void *
94 spdk_call_unaffinitized(void *cb(void *arg), void *arg)
95 {
96 	return cb(arg);
97 }
98 
99 static struct spdk_vhost_dev_backend g_vdev_backend;
100 
101 static int
102 test_setup(void)
103 {
104 	return 0;
105 }
106 
107 static int
108 alloc_vdev(struct spdk_vhost_dev **vdev_p, const char *name, const char *cpumask)
109 {
110 	struct spdk_vhost_dev *vdev = NULL;
111 	int rc;
112 
113 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
114 	rc = posix_memalign((void **)&vdev, 64, sizeof(*vdev));
115 	CU_ASSERT(rc == 0);
116 	SPDK_CU_ASSERT_FATAL(vdev != NULL);
117 	memset(vdev, 0, sizeof(*vdev));
118 	rc = vhost_dev_register(vdev, name, cpumask, &g_vdev_backend);
119 	if (rc == 0) {
120 		*vdev_p = vdev;
121 	} else {
122 		free(vdev);
123 		*vdev_p = NULL;
124 	}
125 
126 	return rc;
127 }
128 
129 static void
130 start_vdev(struct spdk_vhost_dev *vdev)
131 {
132 	struct rte_vhost_memory *mem;
133 	struct spdk_vhost_session *vsession = NULL;
134 	int rc;
135 
136 	mem = calloc(1, sizeof(*mem) + 2 * sizeof(struct rte_vhost_mem_region));
137 	SPDK_CU_ASSERT_FATAL(mem != NULL);
138 	mem->nregions = 2;
139 	mem->regions[0].guest_phys_addr = 0;
140 	mem->regions[0].size = 0x400000; /* 4 MB */
141 	mem->regions[0].host_user_addr = 0x1000000;
142 	mem->regions[1].guest_phys_addr = 0x400000;
143 	mem->regions[1].size = 0x400000; /* 4 MB */
144 	mem->regions[1].host_user_addr = 0x2000000;
145 
146 	assert(TAILQ_EMPTY(&vdev->vsessions));
147 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
148 	rc = posix_memalign((void **)&vsession, 64, sizeof(*vsession));
149 	CU_ASSERT(rc == 0);
150 	SPDK_CU_ASSERT_FATAL(vsession != NULL);
151 	vsession->started = true;
152 	vsession->vid = 0;
153 	vsession->mem = mem;
154 	TAILQ_INSERT_TAIL(&vdev->vsessions, vsession, tailq);
155 }
156 
157 static void
158 stop_vdev(struct spdk_vhost_dev *vdev)
159 {
160 	struct spdk_vhost_session *vsession = TAILQ_FIRST(&vdev->vsessions);
161 
162 	TAILQ_REMOVE(&vdev->vsessions, vsession, tailq);
163 	free(vsession->mem);
164 	free(vsession);
165 }
166 
167 static void
168 cleanup_vdev(struct spdk_vhost_dev *vdev)
169 {
170 	if (!TAILQ_EMPTY(&vdev->vsessions)) {
171 		stop_vdev(vdev);
172 	}
173 	vhost_dev_unregister(vdev);
174 	free(vdev);
175 }
176 
177 static void
178 desc_to_iov_test(void)
179 {
180 	struct spdk_vhost_dev *vdev;
181 	struct spdk_vhost_session *vsession;
182 	struct iovec iov[SPDK_VHOST_IOVS_MAX];
183 	uint16_t iov_index;
184 	struct vring_desc desc;
185 	int rc;
186 
187 	spdk_cpuset_set_cpu(&g_vhost_core_mask, 0, true);
188 
189 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
190 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
191 	start_vdev(vdev);
192 
193 	vsession = TAILQ_FIRST(&vdev->vsessions);
194 
195 	/* Test simple case where iov falls fully within a 2MB page. */
196 	desc.addr = 0x110000;
197 	desc.len = 0x1000;
198 	iov_index = 0;
199 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
200 	CU_ASSERT(rc == 0);
201 	CU_ASSERT(iov_index == 1);
202 	CU_ASSERT(iov[0].iov_base == (void *)0x1110000);
203 	CU_ASSERT(iov[0].iov_len == 0x1000);
204 	/*
205 	 * Always memset the iov to ensure each test validates data written by its call
206 	 * to the function under test.
207 	 */
208 	memset(iov, 0, sizeof(iov));
209 
210 	/* Same test, but ensure it respects the non-zero starting iov_index. */
211 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
212 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
213 	CU_ASSERT(rc == 0);
214 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
215 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x1110000);
216 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x1000);
217 	memset(iov, 0, sizeof(iov));
218 
219 	/* Test for failure if iov_index already equals SPDK_VHOST_IOVS_MAX. */
220 	iov_index = SPDK_VHOST_IOVS_MAX;
221 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
222 	CU_ASSERT(rc != 0);
223 	memset(iov, 0, sizeof(iov));
224 
225 	/* Test case where iov spans a 2MB boundary, but does not span a vhost memory region. */
226 	desc.addr = 0x1F0000;
227 	desc.len = 0x20000;
228 	iov_index = 0;
229 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
230 	CU_ASSERT(rc == 0);
231 	CU_ASSERT(iov_index == 1);
232 	CU_ASSERT(iov[0].iov_base == (void *)0x11F0000);
233 	CU_ASSERT(iov[0].iov_len == 0x20000);
234 	memset(iov, 0, sizeof(iov));
235 
236 	/* Same test, but ensure it respects the non-zero starting iov_index. */
237 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
238 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
239 	CU_ASSERT(rc == 0);
240 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
241 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x11F0000);
242 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x20000);
243 	memset(iov, 0, sizeof(iov));
244 
245 	/* Test case where iov spans a vhost memory region. */
246 	desc.addr = 0x3F0000;
247 	desc.len = 0x20000;
248 	iov_index = 0;
249 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
250 	CU_ASSERT(rc == 0);
251 	CU_ASSERT(iov_index == 2);
252 	CU_ASSERT(iov[0].iov_base == (void *)0x13F0000);
253 	CU_ASSERT(iov[0].iov_len == 0x10000);
254 	CU_ASSERT(iov[1].iov_base == (void *)0x2000000);
255 	CU_ASSERT(iov[1].iov_len == 0x10000);
256 	memset(iov, 0, sizeof(iov));
257 
258 	cleanup_vdev(vdev);
259 
260 	CU_ASSERT(true);
261 }
262 
263 static void
264 create_controller_test(void)
265 {
266 	struct spdk_vhost_dev *vdev, *vdev2;
267 	int ret;
268 	char long_name[PATH_MAX];
269 
270 	spdk_cpuset_parse(&g_vhost_core_mask, "0xf");
271 
272 	/* Create device with cpumask implcitly matching whole application */
273 	ret = alloc_vdev(&vdev, "vdev_name_0", NULL);
274 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
275 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "f"));
276 	cleanup_vdev(vdev);
277 
278 	/* Create device with cpumask matching whole application */
279 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xf");
280 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
281 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "f"));
282 	cleanup_vdev(vdev);
283 
284 	/* Create device with single core in cpumask */
285 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x2");
286 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
287 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "2"));
288 	cleanup_vdev(vdev);
289 
290 	/* Create device with cpumask spanning two cores */
291 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x3");
292 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
293 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "3"));
294 	cleanup_vdev(vdev);
295 
296 	/* Create device with incorrect cpumask outside of application cpumask */
297 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xf0");
298 	SPDK_CU_ASSERT_FATAL(ret != 0);
299 
300 	/* Create device with incorrect cpumask partially outside of application cpumask */
301 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xff");
302 	SPDK_CU_ASSERT_FATAL(ret != 0);
303 
304 	/* Create device with no name */
305 	ret = alloc_vdev(&vdev, NULL, NULL);
306 	CU_ASSERT(ret != 0);
307 
308 	/* Create device with too long name and path */
309 	memset(long_name, 'x', sizeof(long_name));
310 	long_name[PATH_MAX - 1] = 0;
311 	snprintf(dev_dirname, sizeof(dev_dirname), "some_path/");
312 	ret = alloc_vdev(&vdev, long_name, NULL);
313 	CU_ASSERT(ret != 0);
314 	dev_dirname[0] = 0;
315 
316 	/* Create device when device name is already taken */
317 	ret = alloc_vdev(&vdev, "vdev_name_0", NULL);
318 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
319 	ret = alloc_vdev(&vdev2, "vdev_name_0", NULL);
320 	CU_ASSERT(ret != 0);
321 	cleanup_vdev(vdev);
322 }
323 
324 static void
325 session_find_by_vid_test(void)
326 {
327 	struct spdk_vhost_dev *vdev;
328 	struct spdk_vhost_session *vsession;
329 	struct spdk_vhost_session *tmp;
330 	int rc;
331 
332 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
333 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
334 	start_vdev(vdev);
335 
336 	vsession = TAILQ_FIRST(&vdev->vsessions);
337 
338 	tmp = vhost_session_find_by_vid(vsession->vid);
339 	CU_ASSERT(tmp == vsession);
340 
341 	/* Search for a device with incorrect vid */
342 	tmp = vhost_session_find_by_vid(vsession->vid + 0xFF);
343 	CU_ASSERT(tmp == NULL);
344 
345 	cleanup_vdev(vdev);
346 }
347 
348 static void
349 remove_controller_test(void)
350 {
351 	struct spdk_vhost_dev *vdev;
352 	int ret;
353 
354 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
355 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
356 
357 	/* Remove device when controller is in use */
358 	start_vdev(vdev);
359 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&vdev->vsessions));
360 	ret = vhost_dev_unregister(vdev);
361 	CU_ASSERT(ret != 0);
362 
363 	cleanup_vdev(vdev);
364 }
365 
366 static void
367 vq_avail_ring_get_test(void)
368 {
369 	struct spdk_vhost_virtqueue vq = {};
370 	uint16_t avail_mem[34];
371 	uint16_t reqs[32];
372 	uint16_t reqs_len, ret, i;
373 
374 	/* Basic example reap all requests */
375 	vq.vring.avail = (struct vring_avail *)avail_mem;
376 	vq.vring.size = 32;
377 	vq.last_avail_idx = 24;
378 	vq.vring.avail->idx = 29;
379 	reqs_len = 6;
380 
381 	for (i = 0; i < 32; i++) {
382 		vq.vring.avail->ring[i] = i;
383 	}
384 
385 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
386 	CU_ASSERT(ret == 5);
387 	CU_ASSERT(vq.last_avail_idx == 29);
388 	for (i = 0; i < ret; i++) {
389 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 24]);
390 	}
391 
392 	/* Basic example reap only some requests */
393 	vq.last_avail_idx = 20;
394 	vq.vring.avail->idx = 29;
395 	reqs_len = 6;
396 
397 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
398 	CU_ASSERT(ret == reqs_len);
399 	CU_ASSERT(vq.last_avail_idx == 26);
400 	for (i = 0; i < ret; i++) {
401 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 20]);
402 	}
403 
404 	/* Test invalid example */
405 	vq.last_avail_idx = 20;
406 	vq.vring.avail->idx = 156;
407 	reqs_len = 6;
408 
409 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
410 	CU_ASSERT(ret == 0);
411 
412 	/* Test overflow in the avail->idx variable. */
413 	vq.last_avail_idx = 65535;
414 	vq.vring.avail->idx = 4;
415 	reqs_len = 6;
416 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
417 	CU_ASSERT(ret == 5);
418 	CU_ASSERT(vq.last_avail_idx == 4);
419 	CU_ASSERT(reqs[0] == vq.vring.avail->ring[31]);
420 	for (i = 1; i < ret; i++) {
421 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i - 1]);
422 	}
423 }
424 
425 static bool
426 vq_desc_guest_is_used(struct spdk_vhost_virtqueue *vq, int16_t guest_last_used_idx,
427 		      int16_t guest_used_phase)
428 {
429 	return (!!(vq->vring.desc_packed[guest_last_used_idx].flags & VRING_DESC_F_USED) ==
430 		!!guest_used_phase);
431 }
432 
433 static void
434 vq_desc_guest_set_avail(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_avail_idx,
435 			int16_t *guest_avail_phase)
436 {
437 	if (*guest_avail_phase) {
438 		vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_AVAIL;
439 		vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_USED;
440 	} else {
441 		vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_AVAIL;
442 		vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_USED;
443 	}
444 
445 	if (++(*guest_last_avail_idx) >= vq->vring.size) {
446 		*guest_last_avail_idx -= vq->vring.size;
447 		*guest_avail_phase = !(*guest_avail_phase);
448 	}
449 }
450 
451 static int16_t
452 vq_desc_guest_handle_completed_desc(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_used_idx,
453 				    int16_t *guest_used_phase)
454 {
455 	int16_t buffer_id = -1;
456 
457 	if (vq_desc_guest_is_used(vq, *guest_last_used_idx, *guest_used_phase)) {
458 		buffer_id = vq->vring.desc_packed[*guest_last_used_idx].id;
459 		if (++(*guest_last_used_idx) >= vq->vring.size) {
460 			*guest_last_used_idx -= vq->vring.size;
461 			*guest_used_phase = !(*guest_used_phase);
462 		}
463 
464 		return buffer_id;
465 	}
466 
467 	return -1;
468 }
469 
470 static void
471 vq_packed_ring_test(void)
472 {
473 	struct spdk_vhost_session vs = {};
474 	struct spdk_vhost_virtqueue vq = {};
475 	struct vring_packed_desc descs[4];
476 	uint16_t guest_last_avail_idx = 0, guest_last_used_idx = 0;
477 	uint16_t guest_avail_phase = 1, guest_used_phase = 1;
478 	int i;
479 	int16_t chain_num;
480 
481 	vq.vring.desc_packed = descs;
482 	vq.vring.size = 4;
483 
484 	/* avail and used wrap counter are initialized to 1 */
485 	vq.packed.avail_phase = 1;
486 	vq.packed.used_phase = 1;
487 	vq.packed.packed_ring = true;
488 	memset(descs, 0, sizeof(descs));
489 
490 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == false);
491 
492 	/* Guest send requests */
493 	for (i = 0; i < vq.vring.size; i++) {
494 		descs[guest_last_avail_idx].id = i;
495 		/* Set the desc available */
496 		vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
497 	}
498 	CU_ASSERT(guest_last_avail_idx == 0);
499 	CU_ASSERT(guest_avail_phase == 0);
500 
501 	/* Host handle available descs */
502 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
503 	i = 0;
504 	while (vhost_vq_packed_ring_is_avail(&vq)) {
505 		CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i++);
506 		CU_ASSERT(chain_num == 1);
507 	}
508 
509 	/* Host complete them out of order: 1, 0, 2. */
510 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1, 0);
511 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1, 0);
512 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1, 0);
513 
514 	/* Host has got all the available request but only complete three requests */
515 	CU_ASSERT(vq.last_avail_idx == 0);
516 	CU_ASSERT(vq.packed.avail_phase == 0);
517 	CU_ASSERT(vq.last_used_idx == 3);
518 	CU_ASSERT(vq.packed.used_phase == 1);
519 
520 	/* Guest handle completed requests */
521 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
522 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
523 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
524 	CU_ASSERT(guest_last_used_idx == 3);
525 	CU_ASSERT(guest_used_phase == 1);
526 
527 	/* There are three descs available the guest can send three request again */
528 	for (i = 0; i < 3; i++) {
529 		descs[guest_last_avail_idx].id = 2 - i;
530 		/* Set the desc available */
531 		vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
532 	}
533 
534 	/* Host handle available descs */
535 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
536 	i = 2;
537 	while (vhost_vq_packed_ring_is_avail(&vq)) {
538 		CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i--);
539 		CU_ASSERT(chain_num == 1);
540 	}
541 
542 	/* There are four requests in Host, the new three ones and left one */
543 	CU_ASSERT(vq.last_avail_idx == 3);
544 	/* Available wrap conter should overturn */
545 	CU_ASSERT(vq.packed.avail_phase == 0);
546 
547 	/* Host complete all the requests */
548 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1, 0);
549 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1, 0);
550 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 3, 1, 0);
551 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1, 0);
552 
553 	CU_ASSERT(vq.last_used_idx == vq.last_avail_idx);
554 	CU_ASSERT(vq.packed.used_phase == vq.packed.avail_phase);
555 
556 	/* Guest handle completed requests */
557 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
558 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
559 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 3);
560 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
561 
562 	CU_ASSERT(guest_last_avail_idx == guest_last_used_idx);
563 	CU_ASSERT(guest_avail_phase == guest_used_phase);
564 }
565 
566 int
567 main(int argc, char **argv)
568 {
569 	CU_pSuite	suite = NULL;
570 	unsigned int	num_failures;
571 
572 	CU_set_error_action(CUEA_ABORT);
573 	CU_initialize_registry();
574 
575 	suite = CU_add_suite("vhost_suite", test_setup, NULL);
576 
577 	CU_ADD_TEST(suite, desc_to_iov_test);
578 	CU_ADD_TEST(suite, create_controller_test);
579 	CU_ADD_TEST(suite, session_find_by_vid_test);
580 	CU_ADD_TEST(suite, remove_controller_test);
581 	CU_ADD_TEST(suite, vq_avail_ring_get_test);
582 	CU_ADD_TEST(suite, vq_packed_ring_test);
583 
584 	CU_basic_set_mode(CU_BRM_VERBOSE);
585 	CU_basic_run_tests();
586 	num_failures = CU_get_number_of_failures();
587 	CU_cleanup_registry();
588 
589 	return num_failures;
590 }
591