xref: /spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c (revision cc6920a4763d4b9a43aa40583c8397d8f14fa100)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 
37 #include "CUnit/Basic.h"
38 #include "spdk_cunit.h"
39 #include "spdk/thread.h"
40 #include "spdk_internal/mock.h"
41 #include "common/lib/test_env.c"
42 #include "unit/lib/json_mock.c"
43 
44 #include "vhost/vhost.c"
45 #include <rte_version.h>
46 
47 DEFINE_STUB(rte_vhost_set_vring_base, int, (int vid, uint16_t queue_id,
48 		uint16_t last_avail_idx, uint16_t last_used_idx), 0);
49 DEFINE_STUB(rte_vhost_get_vring_base, int, (int vid, uint16_t queue_id,
50 		uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
51 DEFINE_STUB_V(vhost_session_install_rte_compat_hooks,
52 	      (struct spdk_vhost_session *vsession));
53 DEFINE_STUB(vhost_register_unix_socket, int, (const char *path, const char *name,
54 		uint64_t virtio_features, uint64_t disabled_features, uint64_t protocol_features), 0);
55 DEFINE_STUB(vhost_driver_unregister, int, (const char *path), 0);
56 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
57 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
58 DEFINE_STUB(rte_vhost_vring_call, int, (int vid, uint16_t vring_idx), 0);
59 DEFINE_STUB_V(rte_vhost_log_used_vring, (int vid, uint16_t vring_idx,
60 		uint64_t offset, uint64_t len));
61 
62 DEFINE_STUB(rte_vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0);
63 DEFINE_STUB(rte_vhost_get_negotiated_features, int, (int vid, uint64_t *features), 0);
64 DEFINE_STUB(rte_vhost_get_vhost_vring, int,
65 	    (int vid, uint16_t vring_idx, struct rte_vhost_vring *vring), 0);
66 DEFINE_STUB(rte_vhost_enable_guest_notification, int,
67 	    (int vid, uint16_t queue_id, int enable), 0);
68 DEFINE_STUB(rte_vhost_get_ifname, int, (int vid, char *buf, size_t len), 0);
69 DEFINE_STUB(rte_vhost_driver_start, int, (const char *name), 0);
70 #if RTE_VERSION >= RTE_VERSION_NUM(21, 11, 0, 0)
71 DEFINE_STUB(rte_vhost_driver_callback_register, int,
72 	    (const char *path, struct rte_vhost_device_ops const *const ops), 0);
73 #else
74 DEFINE_STUB(rte_vhost_driver_callback_register, int,
75 	    (const char *path, struct vhost_device_ops const *const ops), 0);
76 #endif
77 DEFINE_STUB(rte_vhost_driver_disable_features, int, (const char *path, uint64_t features), 0);
78 DEFINE_STUB(rte_vhost_driver_set_features, int, (const char *path, uint64_t features), 0);
79 DEFINE_STUB(rte_vhost_driver_register, int, (const char *path, uint64_t flags), 0);
80 
81 DEFINE_STUB(rte_vhost_set_last_inflight_io_split, int,
82 	    (int vid, uint16_t vring_idx, uint16_t idx), 0);
83 DEFINE_STUB(rte_vhost_clr_inflight_desc_split, int,
84 	    (int vid, uint16_t vring_idx, uint16_t last_used_idx, uint16_t idx), 0);
85 DEFINE_STUB(rte_vhost_set_last_inflight_io_packed, int,
86 	    (int vid, uint16_t vring_idx, uint16_t head), 0);
87 DEFINE_STUB(rte_vhost_clr_inflight_desc_packed, int,
88 	    (int vid, uint16_t vring_idx, uint16_t head), 0);
89 DEFINE_STUB_V(rte_vhost_log_write, (int vid, uint64_t addr, uint64_t len));
90 DEFINE_STUB_V(vhost_session_mem_register, (struct rte_vhost_memory *mem));
91 DEFINE_STUB_V(vhost_session_mem_unregister, (struct rte_vhost_memory *mem));
92 DEFINE_STUB(vhost_get_negotiated_features, int,
93 	    (int vid, uint64_t *negotiated_features), 0);
94 DEFINE_STUB(rte_vhost_get_vhost_ring_inflight, int,
95 	    (int vid, uint16_t vring_idx, struct rte_vhost_ring_inflight *vring), 0);
96 DEFINE_STUB(rte_vhost_get_vring_base_from_inflight, int,
97 	    (int vid, uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
98 DEFINE_STUB(vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0);
99 
100 void *
101 spdk_call_unaffinitized(void *cb(void *arg), void *arg)
102 {
103 	return cb(arg);
104 }
105 
106 static struct spdk_vhost_dev_backend g_vdev_backend;
107 
108 static int
109 test_setup(void)
110 {
111 	return 0;
112 }
113 
114 static int
115 alloc_vdev(struct spdk_vhost_dev **vdev_p, const char *name, const char *cpumask)
116 {
117 	struct spdk_vhost_dev *vdev = NULL;
118 	int rc;
119 
120 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
121 	rc = posix_memalign((void **)&vdev, 64, sizeof(*vdev));
122 	CU_ASSERT(rc == 0);
123 	SPDK_CU_ASSERT_FATAL(vdev != NULL);
124 	memset(vdev, 0, sizeof(*vdev));
125 	rc = vhost_dev_register(vdev, name, cpumask, &g_vdev_backend);
126 	if (rc == 0) {
127 		*vdev_p = vdev;
128 	} else {
129 		free(vdev);
130 		*vdev_p = NULL;
131 	}
132 
133 	return rc;
134 }
135 
136 static void
137 start_vdev(struct spdk_vhost_dev *vdev)
138 {
139 	struct rte_vhost_memory *mem;
140 	struct spdk_vhost_session *vsession = NULL;
141 	int rc;
142 
143 	mem = calloc(1, sizeof(*mem) + 2 * sizeof(struct rte_vhost_mem_region));
144 	SPDK_CU_ASSERT_FATAL(mem != NULL);
145 	mem->nregions = 2;
146 	mem->regions[0].guest_phys_addr = 0;
147 	mem->regions[0].size = 0x400000; /* 4 MB */
148 	mem->regions[0].host_user_addr = 0x1000000;
149 	mem->regions[1].guest_phys_addr = 0x400000;
150 	mem->regions[1].size = 0x400000; /* 4 MB */
151 	mem->regions[1].host_user_addr = 0x2000000;
152 
153 	assert(TAILQ_EMPTY(&vdev->vsessions));
154 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
155 	rc = posix_memalign((void **)&vsession, 64, sizeof(*vsession));
156 	CU_ASSERT(rc == 0);
157 	SPDK_CU_ASSERT_FATAL(vsession != NULL);
158 	vsession->started = true;
159 	vsession->vid = 0;
160 	vsession->mem = mem;
161 	TAILQ_INSERT_TAIL(&vdev->vsessions, vsession, tailq);
162 }
163 
164 static void
165 stop_vdev(struct spdk_vhost_dev *vdev)
166 {
167 	struct spdk_vhost_session *vsession = TAILQ_FIRST(&vdev->vsessions);
168 
169 	TAILQ_REMOVE(&vdev->vsessions, vsession, tailq);
170 	free(vsession->mem);
171 	free(vsession);
172 }
173 
174 static void
175 cleanup_vdev(struct spdk_vhost_dev *vdev)
176 {
177 	if (!TAILQ_EMPTY(&vdev->vsessions)) {
178 		stop_vdev(vdev);
179 	}
180 	vhost_dev_unregister(vdev);
181 	free(vdev);
182 }
183 
184 static void
185 desc_to_iov_test(void)
186 {
187 	struct spdk_vhost_dev *vdev;
188 	struct spdk_vhost_session *vsession;
189 	struct iovec iov[SPDK_VHOST_IOVS_MAX];
190 	uint16_t iov_index;
191 	struct vring_desc desc;
192 	int rc;
193 
194 	spdk_cpuset_set_cpu(&g_vhost_core_mask, 0, true);
195 
196 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
197 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
198 	start_vdev(vdev);
199 
200 	vsession = TAILQ_FIRST(&vdev->vsessions);
201 
202 	/* Test simple case where iov falls fully within a 2MB page. */
203 	desc.addr = 0x110000;
204 	desc.len = 0x1000;
205 	iov_index = 0;
206 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
207 	CU_ASSERT(rc == 0);
208 	CU_ASSERT(iov_index == 1);
209 	CU_ASSERT(iov[0].iov_base == (void *)0x1110000);
210 	CU_ASSERT(iov[0].iov_len == 0x1000);
211 	/*
212 	 * Always memset the iov to ensure each test validates data written by its call
213 	 * to the function under test.
214 	 */
215 	memset(iov, 0, sizeof(iov));
216 
217 	/* Same test, but ensure it respects the non-zero starting iov_index. */
218 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
219 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
220 	CU_ASSERT(rc == 0);
221 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
222 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x1110000);
223 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x1000);
224 	memset(iov, 0, sizeof(iov));
225 
226 	/* Test for failure if iov_index already equals SPDK_VHOST_IOVS_MAX. */
227 	iov_index = SPDK_VHOST_IOVS_MAX;
228 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
229 	CU_ASSERT(rc != 0);
230 	memset(iov, 0, sizeof(iov));
231 
232 	/* Test case where iov spans a 2MB boundary, but does not span a vhost memory region. */
233 	desc.addr = 0x1F0000;
234 	desc.len = 0x20000;
235 	iov_index = 0;
236 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
237 	CU_ASSERT(rc == 0);
238 	CU_ASSERT(iov_index == 1);
239 	CU_ASSERT(iov[0].iov_base == (void *)0x11F0000);
240 	CU_ASSERT(iov[0].iov_len == 0x20000);
241 	memset(iov, 0, sizeof(iov));
242 
243 	/* Same test, but ensure it respects the non-zero starting iov_index. */
244 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
245 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
246 	CU_ASSERT(rc == 0);
247 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
248 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x11F0000);
249 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x20000);
250 	memset(iov, 0, sizeof(iov));
251 
252 	/* Test case where iov spans a vhost memory region. */
253 	desc.addr = 0x3F0000;
254 	desc.len = 0x20000;
255 	iov_index = 0;
256 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
257 	CU_ASSERT(rc == 0);
258 	CU_ASSERT(iov_index == 2);
259 	CU_ASSERT(iov[0].iov_base == (void *)0x13F0000);
260 	CU_ASSERT(iov[0].iov_len == 0x10000);
261 	CU_ASSERT(iov[1].iov_base == (void *)0x2000000);
262 	CU_ASSERT(iov[1].iov_len == 0x10000);
263 	memset(iov, 0, sizeof(iov));
264 
265 	cleanup_vdev(vdev);
266 
267 	CU_ASSERT(true);
268 }
269 
270 static void
271 create_controller_test(void)
272 {
273 	struct spdk_vhost_dev *vdev, *vdev2;
274 	int ret;
275 	char long_name[PATH_MAX];
276 
277 	spdk_cpuset_parse(&g_vhost_core_mask, "0xf");
278 
279 	/* Create device with cpumask implcitly matching whole application */
280 	ret = alloc_vdev(&vdev, "vdev_name_0", NULL);
281 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
282 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "f"));
283 	cleanup_vdev(vdev);
284 
285 	/* Create device with cpumask matching whole application */
286 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xf");
287 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
288 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "f"));
289 	cleanup_vdev(vdev);
290 
291 	/* Create device with single core in cpumask */
292 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x2");
293 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
294 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "2"));
295 	cleanup_vdev(vdev);
296 
297 	/* Create device with cpumask spanning two cores */
298 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x3");
299 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
300 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "3"));
301 	cleanup_vdev(vdev);
302 
303 	/* Create device with incorrect cpumask outside of application cpumask */
304 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xf0");
305 	SPDK_CU_ASSERT_FATAL(ret != 0);
306 
307 	/* Create device with incorrect cpumask partially outside of application cpumask */
308 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xff");
309 	SPDK_CU_ASSERT_FATAL(ret != 0);
310 
311 	/* Create device with no name */
312 	ret = alloc_vdev(&vdev, NULL, NULL);
313 	CU_ASSERT(ret != 0);
314 
315 	/* Create device with too long name and path */
316 	memset(long_name, 'x', sizeof(long_name));
317 	long_name[PATH_MAX - 1] = 0;
318 	snprintf(dev_dirname, sizeof(dev_dirname), "some_path/");
319 	ret = alloc_vdev(&vdev, long_name, NULL);
320 	CU_ASSERT(ret != 0);
321 	dev_dirname[0] = 0;
322 
323 	/* Create device when device name is already taken */
324 	ret = alloc_vdev(&vdev, "vdev_name_0", NULL);
325 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
326 	ret = alloc_vdev(&vdev2, "vdev_name_0", NULL);
327 	CU_ASSERT(ret != 0);
328 	cleanup_vdev(vdev);
329 }
330 
331 static void
332 session_find_by_vid_test(void)
333 {
334 	struct spdk_vhost_dev *vdev;
335 	struct spdk_vhost_session *vsession;
336 	struct spdk_vhost_session *tmp;
337 	int rc;
338 
339 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
340 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
341 	start_vdev(vdev);
342 
343 	vsession = TAILQ_FIRST(&vdev->vsessions);
344 
345 	tmp = vhost_session_find_by_vid(vsession->vid);
346 	CU_ASSERT(tmp == vsession);
347 
348 	/* Search for a device with incorrect vid */
349 	tmp = vhost_session_find_by_vid(vsession->vid + 0xFF);
350 	CU_ASSERT(tmp == NULL);
351 
352 	cleanup_vdev(vdev);
353 }
354 
355 static void
356 remove_controller_test(void)
357 {
358 	struct spdk_vhost_dev *vdev;
359 	int ret;
360 
361 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
362 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
363 
364 	/* Remove device when controller is in use */
365 	start_vdev(vdev);
366 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&vdev->vsessions));
367 	ret = vhost_dev_unregister(vdev);
368 	CU_ASSERT(ret != 0);
369 
370 	cleanup_vdev(vdev);
371 }
372 
373 static void
374 vq_avail_ring_get_test(void)
375 {
376 	struct spdk_vhost_virtqueue vq = {};
377 	uint16_t avail_mem[34];
378 	uint16_t reqs[32];
379 	uint16_t reqs_len, ret, i;
380 
381 	/* Basic example reap all requests */
382 	vq.vring.avail = (struct vring_avail *)avail_mem;
383 	vq.vring.size = 32;
384 	vq.last_avail_idx = 24;
385 	vq.vring.avail->idx = 29;
386 	reqs_len = 6;
387 
388 	for (i = 0; i < 32; i++) {
389 		vq.vring.avail->ring[i] = i;
390 	}
391 
392 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
393 	CU_ASSERT(ret == 5);
394 	CU_ASSERT(vq.last_avail_idx == 29);
395 	for (i = 0; i < ret; i++) {
396 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 24]);
397 	}
398 
399 	/* Basic example reap only some requests */
400 	vq.last_avail_idx = 20;
401 	vq.vring.avail->idx = 29;
402 	reqs_len = 6;
403 
404 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
405 	CU_ASSERT(ret == reqs_len);
406 	CU_ASSERT(vq.last_avail_idx == 26);
407 	for (i = 0; i < ret; i++) {
408 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 20]);
409 	}
410 
411 	/* Test invalid example */
412 	vq.last_avail_idx = 20;
413 	vq.vring.avail->idx = 156;
414 	reqs_len = 6;
415 
416 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
417 	CU_ASSERT(ret == 0);
418 
419 	/* Test overflow in the avail->idx variable. */
420 	vq.last_avail_idx = 65535;
421 	vq.vring.avail->idx = 4;
422 	reqs_len = 6;
423 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
424 	CU_ASSERT(ret == 5);
425 	CU_ASSERT(vq.last_avail_idx == 4);
426 	CU_ASSERT(reqs[0] == vq.vring.avail->ring[31]);
427 	for (i = 1; i < ret; i++) {
428 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i - 1]);
429 	}
430 }
431 
432 static bool
433 vq_desc_guest_is_used(struct spdk_vhost_virtqueue *vq, int16_t guest_last_used_idx,
434 		      int16_t guest_used_phase)
435 {
436 	return (!!(vq->vring.desc_packed[guest_last_used_idx].flags & VRING_DESC_F_USED) ==
437 		!!guest_used_phase);
438 }
439 
440 static void
441 vq_desc_guest_set_avail(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_avail_idx,
442 			int16_t *guest_avail_phase)
443 {
444 	if (*guest_avail_phase) {
445 		vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_AVAIL;
446 		vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_USED;
447 	} else {
448 		vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_AVAIL;
449 		vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_USED;
450 	}
451 
452 	if (++(*guest_last_avail_idx) >= vq->vring.size) {
453 		*guest_last_avail_idx -= vq->vring.size;
454 		*guest_avail_phase = !(*guest_avail_phase);
455 	}
456 }
457 
458 static int16_t
459 vq_desc_guest_handle_completed_desc(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_used_idx,
460 				    int16_t *guest_used_phase)
461 {
462 	int16_t buffer_id = -1;
463 
464 	if (vq_desc_guest_is_used(vq, *guest_last_used_idx, *guest_used_phase)) {
465 		buffer_id = vq->vring.desc_packed[*guest_last_used_idx].id;
466 		if (++(*guest_last_used_idx) >= vq->vring.size) {
467 			*guest_last_used_idx -= vq->vring.size;
468 			*guest_used_phase = !(*guest_used_phase);
469 		}
470 
471 		return buffer_id;
472 	}
473 
474 	return -1;
475 }
476 
477 static void
478 vq_packed_ring_test(void)
479 {
480 	struct spdk_vhost_session vs = {};
481 	struct spdk_vhost_virtqueue vq = {};
482 	struct vring_packed_desc descs[4];
483 	uint16_t guest_last_avail_idx = 0, guest_last_used_idx = 0;
484 	uint16_t guest_avail_phase = 1, guest_used_phase = 1;
485 	int i;
486 	int16_t chain_num;
487 
488 	vq.vring.desc_packed = descs;
489 	vq.vring.size = 4;
490 
491 	/* avail and used wrap counter are initialized to 1 */
492 	vq.packed.avail_phase = 1;
493 	vq.packed.used_phase = 1;
494 	vq.packed.packed_ring = true;
495 	memset(descs, 0, sizeof(descs));
496 
497 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == false);
498 
499 	/* Guest send requests */
500 	for (i = 0; i < vq.vring.size; i++) {
501 		descs[guest_last_avail_idx].id = i;
502 		/* Set the desc available */
503 		vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
504 	}
505 	CU_ASSERT(guest_last_avail_idx == 0);
506 	CU_ASSERT(guest_avail_phase == 0);
507 
508 	/* Host handle available descs */
509 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
510 	i = 0;
511 	while (vhost_vq_packed_ring_is_avail(&vq)) {
512 		CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i++);
513 		CU_ASSERT(chain_num == 1);
514 	}
515 
516 	/* Host complete them out of order: 1, 0, 2. */
517 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1, 0);
518 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1, 0);
519 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1, 0);
520 
521 	/* Host has got all the available request but only complete three requests */
522 	CU_ASSERT(vq.last_avail_idx == 0);
523 	CU_ASSERT(vq.packed.avail_phase == 0);
524 	CU_ASSERT(vq.last_used_idx == 3);
525 	CU_ASSERT(vq.packed.used_phase == 1);
526 
527 	/* Guest handle completed requests */
528 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
529 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
530 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
531 	CU_ASSERT(guest_last_used_idx == 3);
532 	CU_ASSERT(guest_used_phase == 1);
533 
534 	/* There are three descs available the guest can send three request again */
535 	for (i = 0; i < 3; i++) {
536 		descs[guest_last_avail_idx].id = 2 - i;
537 		/* Set the desc available */
538 		vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
539 	}
540 
541 	/* Host handle available descs */
542 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
543 	i = 2;
544 	while (vhost_vq_packed_ring_is_avail(&vq)) {
545 		CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i--);
546 		CU_ASSERT(chain_num == 1);
547 	}
548 
549 	/* There are four requests in Host, the new three ones and left one */
550 	CU_ASSERT(vq.last_avail_idx == 3);
551 	/* Available wrap conter should overturn */
552 	CU_ASSERT(vq.packed.avail_phase == 0);
553 
554 	/* Host complete all the requests */
555 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1, 0);
556 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1, 0);
557 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 3, 1, 0);
558 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1, 0);
559 
560 	CU_ASSERT(vq.last_used_idx == vq.last_avail_idx);
561 	CU_ASSERT(vq.packed.used_phase == vq.packed.avail_phase);
562 
563 	/* Guest handle completed requests */
564 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
565 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
566 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 3);
567 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
568 
569 	CU_ASSERT(guest_last_avail_idx == guest_last_used_idx);
570 	CU_ASSERT(guest_avail_phase == guest_used_phase);
571 }
572 
573 int
574 main(int argc, char **argv)
575 {
576 	CU_pSuite	suite = NULL;
577 	unsigned int	num_failures;
578 
579 	CU_set_error_action(CUEA_ABORT);
580 	CU_initialize_registry();
581 
582 	suite = CU_add_suite("vhost_suite", test_setup, NULL);
583 
584 	CU_ADD_TEST(suite, desc_to_iov_test);
585 	CU_ADD_TEST(suite, create_controller_test);
586 	CU_ADD_TEST(suite, session_find_by_vid_test);
587 	CU_ADD_TEST(suite, remove_controller_test);
588 	CU_ADD_TEST(suite, vq_avail_ring_get_test);
589 	CU_ADD_TEST(suite, vq_packed_ring_test);
590 
591 	CU_basic_set_mode(CU_BRM_VERBOSE);
592 	CU_basic_run_tests();
593 	num_failures = CU_get_number_of_failures();
594 	CU_cleanup_registry();
595 
596 	return num_failures;
597 }
598