xref: /spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c (revision a6dbe3721eb3b5990707fc3e378c95e505dd8ab5)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "CUnit/Basic.h"
10 #include "spdk_cunit.h"
11 #include "spdk/thread.h"
12 #include "spdk_internal/mock.h"
13 #include "common/lib/ut_multithread.c"
14 #include "unit/lib/json_mock.c"
15 
16 #include "vhost/vhost.c"
17 #include "vhost/vhost_blk.c"
18 #include <rte_version.h>
19 #include "vhost/rte_vhost_user.c"
20 
21 DEFINE_STUB(rte_vhost_set_vring_base, int, (int vid, uint16_t queue_id,
22 		uint16_t last_avail_idx, uint16_t last_used_idx), 0);
23 DEFINE_STUB(rte_vhost_get_vring_base, int, (int vid, uint16_t queue_id,
24 		uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
25 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
26 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
27 DEFINE_STUB(rte_vhost_vring_call, int, (int vid, uint16_t vring_idx), 0);
28 DEFINE_STUB_V(rte_vhost_log_used_vring, (int vid, uint16_t vring_idx,
29 		uint64_t offset, uint64_t len));
30 
31 DEFINE_STUB(rte_vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0);
32 DEFINE_STUB(rte_vhost_get_negotiated_features, int, (int vid, uint64_t *features), 0);
33 DEFINE_STUB(rte_vhost_get_vhost_vring, int,
34 	    (int vid, uint16_t vring_idx, struct rte_vhost_vring *vring), 0);
35 DEFINE_STUB(rte_vhost_enable_guest_notification, int,
36 	    (int vid, uint16_t queue_id, int enable), 0);
37 DEFINE_STUB(rte_vhost_get_ifname, int, (int vid, char *buf, size_t len), 0);
38 DEFINE_STUB(rte_vhost_driver_start, int, (const char *name), 0);
39 #if RTE_VERSION >= RTE_VERSION_NUM(21, 11, 0, 0)
40 DEFINE_STUB(rte_vhost_driver_callback_register, int,
41 	    (const char *path, struct rte_vhost_device_ops const *const ops), 0);
42 #else
43 DEFINE_STUB(rte_vhost_driver_callback_register, int,
44 	    (const char *path, struct vhost_device_ops const *const ops), 0);
45 #endif
46 DEFINE_STUB(rte_vhost_driver_disable_features, int, (const char *path, uint64_t features), 0);
47 DEFINE_STUB(rte_vhost_driver_set_features, int, (const char *path, uint64_t features), 0);
48 DEFINE_STUB(rte_vhost_driver_register, int, (const char *path, uint64_t flags), 0);
49 DEFINE_STUB(rte_vhost_driver_unregister, int, (const char *path), 0);
50 DEFINE_STUB(rte_vhost_driver_get_protocol_features, int,
51 	    (const char *path, uint64_t *protocol_features), 0);
52 DEFINE_STUB(rte_vhost_driver_set_protocol_features, int,
53 	    (const char *path, uint64_t protocol_features), 0);
54 
55 DEFINE_STUB(rte_vhost_set_last_inflight_io_split, int,
56 	    (int vid, uint16_t vring_idx, uint16_t idx), 0);
57 DEFINE_STUB(rte_vhost_clr_inflight_desc_split, int,
58 	    (int vid, uint16_t vring_idx, uint16_t last_used_idx, uint16_t idx), 0);
59 DEFINE_STUB(rte_vhost_set_last_inflight_io_packed, int,
60 	    (int vid, uint16_t vring_idx, uint16_t head), 0);
61 DEFINE_STUB(rte_vhost_clr_inflight_desc_packed, int,
62 	    (int vid, uint16_t vring_idx, uint16_t head), 0);
63 DEFINE_STUB_V(rte_vhost_log_write, (int vid, uint64_t addr, uint64_t len));
64 DEFINE_STUB(rte_vhost_get_vhost_ring_inflight, int,
65 	    (int vid, uint16_t vring_idx, struct rte_vhost_ring_inflight *vring), 0);
66 DEFINE_STUB(rte_vhost_get_vring_base_from_inflight, int,
67 	    (int vid, uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
68 DEFINE_STUB(rte_vhost_extern_callback_register, int,
69 	    (int vid, struct rte_vhost_user_extern_ops const *const ops, void *ctx), 0);
70 
71 /* rte_vhost_user.c shutdowns vhost_user sessions in a separte pthread */
72 DECLARE_WRAPPER(pthread_create, int, (pthread_t *thread, const pthread_attr_t *attr,
73 				      void *(*start_routine)(void *), void *arg));
74 int
75 pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *),
76 	       void *arg)
77 {
78 	start_routine(arg);
79 	return 0;
80 }
81 DEFINE_STUB(pthread_detach, int, (pthread_t thread), 0);
82 
83 DEFINE_STUB(spdk_bdev_writev, int,
84 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
85 	     struct iovec *iov, int iovcnt, uint64_t offset, uint64_t len,
86 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
87 	    0);
88 
89 DEFINE_STUB(spdk_bdev_unmap, int,
90 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
91 	     uint64_t offset, uint64_t nbytes,
92 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
93 	    0);
94 
95 DEFINE_STUB(spdk_bdev_write_zeroes, int,
96 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
97 	     uint64_t offset, uint64_t nbytes,
98 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
99 	    0);
100 
101 DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 0);
102 
103 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
104 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
105 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
106 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
107 		enum spdk_bdev_io_type io_type), true);
108 DEFINE_STUB(spdk_bdev_open_ext, int,
109 	    (const char *bdev_name, bool write,	spdk_bdev_event_cb_t event_cb,
110 	     void *event_ctx, struct spdk_bdev_desc **desc), 0);
111 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *,
112 	    (struct spdk_bdev_desc *desc), NULL);
113 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
114 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
115 		struct spdk_bdev_io_wait_entry *entry), 0);
116 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
117 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
118 DEFINE_STUB(spdk_bdev_readv, int,
119 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
120 	     struct iovec *iov, int iovcnt, uint64_t offset, uint64_t nbytes,
121 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
122 	    0);
123 DEFINE_STUB(spdk_bdev_flush, int,
124 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
125 	     uint64_t offset, uint64_t nbytes,
126 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
127 	    0);
128 DEFINE_STUB(rte_vhost_set_inflight_desc_split, int, (int vid, uint16_t vring_idx, uint16_t idx), 0);
129 DEFINE_STUB(rte_vhost_set_inflight_desc_packed, int, (int vid, uint16_t vring_idx, uint16_t head,
130 		uint16_t last, uint16_t *inflight_entry), 0);
131 DEFINE_STUB(rte_vhost_slave_config_change, int, (int vid, bool need_reply), 0);
132 DEFINE_STUB(spdk_json_decode_bool, int, (const struct spdk_json_val *val, void *out), 0);
133 DEFINE_STUB(spdk_json_decode_object_relaxed, int,
134 	    (const struct spdk_json_val *values, const struct spdk_json_object_decoder *decoders,
135 	     size_t num_decoders, void *out), 0);
136 
137 void *
138 spdk_call_unaffinitized(void *cb(void *arg), void *arg)
139 {
140 	return cb(arg);
141 }
142 
143 static struct spdk_vhost_dev_backend g_vdev_backend = {.type = VHOST_BACKEND_SCSI};
144 static struct spdk_vhost_user_dev_backend g_vdev_user_backend;
145 
146 static bool g_init_fail;
147 static void
148 init_cb(int rc)
149 {
150 	g_init_fail = rc;
151 }
152 
153 static int
154 test_setup(void)
155 {
156 	allocate_cores(1);
157 	allocate_threads(1);
158 	set_thread(0);
159 
160 	g_init_fail = true;
161 	spdk_vhost_scsi_init(init_cb);
162 	assert(g_init_fail == false);
163 
164 	g_init_fail = true;
165 	spdk_vhost_blk_init(init_cb);
166 	assert(g_init_fail == false);
167 
168 	return 0;
169 }
170 
171 static bool g_fini_fail;
172 static void
173 fini_cb(void)
174 {
175 	g_fini_fail = false;
176 }
177 
178 static int
179 test_cleanup(void)
180 {
181 	g_fini_fail = true;
182 	spdk_vhost_scsi_fini(fini_cb);
183 	poll_threads();
184 	assert(g_fini_fail == false);
185 
186 	g_fini_fail = true;
187 	spdk_vhost_blk_fini(fini_cb);
188 	poll_threads();
189 	assert(g_fini_fail == false);
190 
191 	free_threads();
192 	free_cores();
193 
194 	return 0;
195 }
196 
197 static int
198 alloc_vdev(struct spdk_vhost_dev **vdev_p, const char *name, const char *cpumask)
199 {
200 	struct spdk_vhost_dev *vdev = NULL;
201 	int rc;
202 
203 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
204 	rc = posix_memalign((void **)&vdev, 64, sizeof(*vdev));
205 	CU_ASSERT(rc == 0);
206 	SPDK_CU_ASSERT_FATAL(vdev != NULL);
207 	memset(vdev, 0, sizeof(*vdev));
208 	rc = vhost_dev_register(vdev, name, cpumask, NULL, &g_vdev_backend, &g_vdev_user_backend);
209 	if (rc == 0) {
210 		*vdev_p = vdev;
211 	} else {
212 		free(vdev);
213 		*vdev_p = NULL;
214 	}
215 
216 	return rc;
217 }
218 
219 static void
220 start_vdev(struct spdk_vhost_dev *vdev)
221 {
222 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
223 	struct rte_vhost_memory *mem;
224 	struct spdk_vhost_session *vsession = NULL;
225 	int rc;
226 
227 	mem = calloc(1, sizeof(*mem) + 2 * sizeof(struct rte_vhost_mem_region));
228 	SPDK_CU_ASSERT_FATAL(mem != NULL);
229 	mem->nregions = 2;
230 	mem->regions[0].guest_phys_addr = 0;
231 	mem->regions[0].size = 0x400000; /* 4 MB */
232 	mem->regions[0].host_user_addr = 0x1000000;
233 	mem->regions[1].guest_phys_addr = 0x400000;
234 	mem->regions[1].size = 0x400000; /* 4 MB */
235 	mem->regions[1].host_user_addr = 0x2000000;
236 
237 	assert(TAILQ_EMPTY(&user_dev->vsessions));
238 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
239 	rc = posix_memalign((void **)&vsession, 64, sizeof(*vsession));
240 	CU_ASSERT(rc == 0);
241 	SPDK_CU_ASSERT_FATAL(vsession != NULL);
242 	vsession->started = true;
243 	vsession->vid = 0;
244 	vsession->mem = mem;
245 	TAILQ_INSERT_TAIL(&user_dev->vsessions, vsession, tailq);
246 }
247 
248 static void
249 stop_vdev(struct spdk_vhost_dev *vdev)
250 {
251 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
252 	struct spdk_vhost_session *vsession = TAILQ_FIRST(&user_dev->vsessions);
253 
254 	TAILQ_REMOVE(&user_dev->vsessions, vsession, tailq);
255 	free(vsession->mem);
256 	free(vsession);
257 }
258 
259 static void
260 cleanup_vdev(struct spdk_vhost_dev *vdev)
261 {
262 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
263 
264 	if (!TAILQ_EMPTY(&user_dev->vsessions)) {
265 		stop_vdev(vdev);
266 	}
267 	vhost_dev_unregister(vdev);
268 	free(vdev);
269 }
270 
271 static void
272 desc_to_iov_test(void)
273 {
274 	struct spdk_vhost_dev *vdev;
275 	struct spdk_vhost_session *vsession;
276 	struct iovec iov[SPDK_VHOST_IOVS_MAX];
277 	uint16_t iov_index;
278 	struct vring_desc desc;
279 	int rc;
280 
281 	spdk_cpuset_set_cpu(&g_vhost_core_mask, 0, true);
282 
283 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
284 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
285 	start_vdev(vdev);
286 
287 	vsession = TAILQ_FIRST(&to_user_dev(vdev)->vsessions);
288 
289 	/* Test simple case where iov falls fully within a 2MB page. */
290 	desc.addr = 0x110000;
291 	desc.len = 0x1000;
292 	iov_index = 0;
293 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
294 	CU_ASSERT(rc == 0);
295 	CU_ASSERT(iov_index == 1);
296 	CU_ASSERT(iov[0].iov_base == (void *)0x1110000);
297 	CU_ASSERT(iov[0].iov_len == 0x1000);
298 	/*
299 	 * Always memset the iov to ensure each test validates data written by its call
300 	 * to the function under test.
301 	 */
302 	memset(iov, 0, sizeof(iov));
303 
304 	/* Same test, but ensure it respects the non-zero starting iov_index. */
305 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
306 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
307 	CU_ASSERT(rc == 0);
308 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
309 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x1110000);
310 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x1000);
311 	memset(iov, 0, sizeof(iov));
312 
313 	/* Test for failure if iov_index already equals SPDK_VHOST_IOVS_MAX. */
314 	iov_index = SPDK_VHOST_IOVS_MAX;
315 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
316 	CU_ASSERT(rc != 0);
317 	memset(iov, 0, sizeof(iov));
318 
319 	/* Test case where iov spans a 2MB boundary, but does not span a vhost memory region. */
320 	desc.addr = 0x1F0000;
321 	desc.len = 0x20000;
322 	iov_index = 0;
323 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
324 	CU_ASSERT(rc == 0);
325 	CU_ASSERT(iov_index == 1);
326 	CU_ASSERT(iov[0].iov_base == (void *)0x11F0000);
327 	CU_ASSERT(iov[0].iov_len == 0x20000);
328 	memset(iov, 0, sizeof(iov));
329 
330 	/* Same test, but ensure it respects the non-zero starting iov_index. */
331 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
332 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
333 	CU_ASSERT(rc == 0);
334 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
335 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x11F0000);
336 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x20000);
337 	memset(iov, 0, sizeof(iov));
338 
339 	/* Test case where iov spans a vhost memory region. */
340 	desc.addr = 0x3F0000;
341 	desc.len = 0x20000;
342 	iov_index = 0;
343 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
344 	CU_ASSERT(rc == 0);
345 	CU_ASSERT(iov_index == 2);
346 	CU_ASSERT(iov[0].iov_base == (void *)0x13F0000);
347 	CU_ASSERT(iov[0].iov_len == 0x10000);
348 	CU_ASSERT(iov[1].iov_base == (void *)0x2000000);
349 	CU_ASSERT(iov[1].iov_len == 0x10000);
350 	memset(iov, 0, sizeof(iov));
351 
352 	cleanup_vdev(vdev);
353 
354 	CU_ASSERT(true);
355 }
356 
357 static void
358 create_controller_test(void)
359 {
360 	struct spdk_vhost_dev *vdev, *vdev2;
361 	int ret;
362 	char long_name[PATH_MAX];
363 
364 	spdk_cpuset_parse(&g_vhost_core_mask, "0xf");
365 
366 	/* Create device with cpumask implicitly matching whole application */
367 	ret = alloc_vdev(&vdev, "vdev_name_0", NULL);
368 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
369 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "f"));
370 	cleanup_vdev(vdev);
371 
372 	/* Create device with cpumask matching whole application */
373 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xf");
374 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
375 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "f"));
376 	cleanup_vdev(vdev);
377 
378 	/* Create device with single core in cpumask */
379 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x2");
380 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
381 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "2"));
382 	cleanup_vdev(vdev);
383 
384 	/* Create device with cpumask spanning two cores */
385 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x3");
386 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
387 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "3"));
388 	cleanup_vdev(vdev);
389 
390 	/* Create device with incorrect cpumask outside of application cpumask */
391 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xf0");
392 	SPDK_CU_ASSERT_FATAL(ret != 0);
393 
394 	/* Create device with incorrect cpumask partially outside of application cpumask */
395 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xff");
396 	SPDK_CU_ASSERT_FATAL(ret != 0);
397 
398 	/* Create device with no name */
399 	ret = alloc_vdev(&vdev, NULL, NULL);
400 	CU_ASSERT(ret != 0);
401 
402 	/* Create device with too long name and path */
403 	memset(long_name, 'x', sizeof(long_name));
404 	long_name[PATH_MAX - 1] = 0;
405 	snprintf(g_vhost_user_dev_dirname, sizeof(g_vhost_user_dev_dirname), "some_path/");
406 	ret = alloc_vdev(&vdev, long_name, NULL);
407 	CU_ASSERT(ret != 0);
408 	g_vhost_user_dev_dirname[0] = 0;
409 
410 	/* Create device when device name is already taken */
411 	ret = alloc_vdev(&vdev, "vdev_name_0", NULL);
412 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
413 	ret = alloc_vdev(&vdev2, "vdev_name_0", NULL);
414 	CU_ASSERT(ret != 0);
415 	cleanup_vdev(vdev);
416 }
417 
418 static void
419 session_find_by_vid_test(void)
420 {
421 	struct spdk_vhost_dev *vdev;
422 	struct spdk_vhost_session *vsession;
423 	struct spdk_vhost_session *tmp;
424 	int rc;
425 
426 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
427 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
428 	start_vdev(vdev);
429 
430 	vsession = TAILQ_FIRST(&to_user_dev(vdev)->vsessions);
431 
432 	tmp = vhost_session_find_by_vid(vsession->vid);
433 	CU_ASSERT(tmp == vsession);
434 
435 	/* Search for a device with incorrect vid */
436 	tmp = vhost_session_find_by_vid(vsession->vid + 0xFF);
437 	CU_ASSERT(tmp == NULL);
438 
439 	cleanup_vdev(vdev);
440 }
441 
442 static void
443 remove_controller_test(void)
444 {
445 	struct spdk_vhost_dev *vdev;
446 	int ret;
447 
448 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
449 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
450 
451 	/* Remove device when controller is in use */
452 	start_vdev(vdev);
453 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&to_user_dev(vdev)->vsessions));
454 	ret = vhost_dev_unregister(vdev);
455 	CU_ASSERT(ret != 0);
456 
457 	cleanup_vdev(vdev);
458 }
459 
460 static void
461 vq_avail_ring_get_test(void)
462 {
463 	struct spdk_vhost_virtqueue vq = {};
464 	uint16_t avail_mem[34];
465 	uint16_t reqs[32];
466 	uint16_t reqs_len, ret, i;
467 
468 	/* Basic example reap all requests */
469 	vq.vring.avail = (struct vring_avail *)avail_mem;
470 	vq.vring.size = 32;
471 	vq.last_avail_idx = 24;
472 	vq.vring.avail->idx = 29;
473 	reqs_len = 6;
474 
475 	for (i = 0; i < 32; i++) {
476 		vq.vring.avail->ring[i] = i;
477 	}
478 
479 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
480 	CU_ASSERT(ret == 5);
481 	CU_ASSERT(vq.last_avail_idx == 29);
482 	for (i = 0; i < ret; i++) {
483 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 24]);
484 	}
485 
486 	/* Basic example reap only some requests */
487 	vq.last_avail_idx = 20;
488 	vq.vring.avail->idx = 29;
489 	reqs_len = 6;
490 
491 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
492 	CU_ASSERT(ret == reqs_len);
493 	CU_ASSERT(vq.last_avail_idx == 26);
494 	for (i = 0; i < ret; i++) {
495 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 20]);
496 	}
497 
498 	/* Test invalid example */
499 	vq.last_avail_idx = 20;
500 	vq.vring.avail->idx = 156;
501 	reqs_len = 6;
502 
503 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
504 	CU_ASSERT(ret == 0);
505 
506 	/* Test overflow in the avail->idx variable. */
507 	vq.last_avail_idx = 65535;
508 	vq.vring.avail->idx = 4;
509 	reqs_len = 6;
510 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
511 	CU_ASSERT(ret == 5);
512 	CU_ASSERT(vq.last_avail_idx == 4);
513 	CU_ASSERT(reqs[0] == vq.vring.avail->ring[31]);
514 	for (i = 1; i < ret; i++) {
515 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i - 1]);
516 	}
517 }
518 
519 static bool
520 vq_desc_guest_is_used(struct spdk_vhost_virtqueue *vq, int16_t guest_last_used_idx,
521 		      int16_t guest_used_phase)
522 {
523 	return (!!(vq->vring.desc_packed[guest_last_used_idx].flags & VRING_DESC_F_USED) ==
524 		!!guest_used_phase);
525 }
526 
527 static void
528 vq_desc_guest_set_avail(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_avail_idx,
529 			int16_t *guest_avail_phase)
530 {
531 	if (*guest_avail_phase) {
532 		vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_AVAIL;
533 		vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_USED;
534 	} else {
535 		vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_AVAIL;
536 		vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_USED;
537 	}
538 
539 	if (++(*guest_last_avail_idx) >= vq->vring.size) {
540 		*guest_last_avail_idx -= vq->vring.size;
541 		*guest_avail_phase = !(*guest_avail_phase);
542 	}
543 }
544 
545 static int16_t
546 vq_desc_guest_handle_completed_desc(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_used_idx,
547 				    int16_t *guest_used_phase)
548 {
549 	int16_t buffer_id = -1;
550 
551 	if (vq_desc_guest_is_used(vq, *guest_last_used_idx, *guest_used_phase)) {
552 		buffer_id = vq->vring.desc_packed[*guest_last_used_idx].id;
553 		if (++(*guest_last_used_idx) >= vq->vring.size) {
554 			*guest_last_used_idx -= vq->vring.size;
555 			*guest_used_phase = !(*guest_used_phase);
556 		}
557 
558 		return buffer_id;
559 	}
560 
561 	return -1;
562 }
563 
564 static void
565 vq_packed_ring_test(void)
566 {
567 	struct spdk_vhost_session vs = {};
568 	struct spdk_vhost_virtqueue vq = {};
569 	struct vring_packed_desc descs[4];
570 	uint16_t guest_last_avail_idx = 0, guest_last_used_idx = 0;
571 	uint16_t guest_avail_phase = 1, guest_used_phase = 1;
572 	int i;
573 	int16_t chain_num;
574 
575 	vq.vring.desc_packed = descs;
576 	vq.vring.size = 4;
577 
578 	/* avail and used wrap counter are initialized to 1 */
579 	vq.packed.avail_phase = 1;
580 	vq.packed.used_phase = 1;
581 	vq.packed.packed_ring = true;
582 	memset(descs, 0, sizeof(descs));
583 
584 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == false);
585 
586 	/* Guest send requests */
587 	for (i = 0; i < vq.vring.size; i++) {
588 		descs[guest_last_avail_idx].id = i;
589 		/* Set the desc available */
590 		vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
591 	}
592 	CU_ASSERT(guest_last_avail_idx == 0);
593 	CU_ASSERT(guest_avail_phase == 0);
594 
595 	/* Host handle available descs */
596 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
597 	i = 0;
598 	while (vhost_vq_packed_ring_is_avail(&vq)) {
599 		CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i++);
600 		CU_ASSERT(chain_num == 1);
601 	}
602 
603 	/* Host complete them out of order: 1, 0, 2. */
604 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1, 0);
605 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1, 0);
606 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1, 0);
607 
608 	/* Host has got all the available request but only complete three requests */
609 	CU_ASSERT(vq.last_avail_idx == 0);
610 	CU_ASSERT(vq.packed.avail_phase == 0);
611 	CU_ASSERT(vq.last_used_idx == 3);
612 	CU_ASSERT(vq.packed.used_phase == 1);
613 
614 	/* Guest handle completed requests */
615 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
616 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
617 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
618 	CU_ASSERT(guest_last_used_idx == 3);
619 	CU_ASSERT(guest_used_phase == 1);
620 
621 	/* There are three descs available the guest can send three request again */
622 	for (i = 0; i < 3; i++) {
623 		descs[guest_last_avail_idx].id = 2 - i;
624 		/* Set the desc available */
625 		vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
626 	}
627 
628 	/* Host handle available descs */
629 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
630 	i = 2;
631 	while (vhost_vq_packed_ring_is_avail(&vq)) {
632 		CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i--);
633 		CU_ASSERT(chain_num == 1);
634 	}
635 
636 	/* There are four requests in Host, the new three ones and left one */
637 	CU_ASSERT(vq.last_avail_idx == 3);
638 	/* Available wrap conter should overturn */
639 	CU_ASSERT(vq.packed.avail_phase == 0);
640 
641 	/* Host complete all the requests */
642 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1, 0);
643 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1, 0);
644 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 3, 1, 0);
645 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1, 0);
646 
647 	CU_ASSERT(vq.last_used_idx == vq.last_avail_idx);
648 	CU_ASSERT(vq.packed.used_phase == vq.packed.avail_phase);
649 
650 	/* Guest handle completed requests */
651 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
652 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
653 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 3);
654 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
655 
656 	CU_ASSERT(guest_last_avail_idx == guest_last_used_idx);
657 	CU_ASSERT(guest_avail_phase == guest_used_phase);
658 }
659 
660 int
661 main(int argc, char **argv)
662 {
663 	CU_pSuite	suite = NULL;
664 	unsigned int	num_failures;
665 
666 	CU_set_error_action(CUEA_ABORT);
667 	CU_initialize_registry();
668 
669 	suite = CU_add_suite("vhost_suite", test_setup, test_cleanup);
670 
671 	CU_ADD_TEST(suite, desc_to_iov_test);
672 	CU_ADD_TEST(suite, create_controller_test);
673 	CU_ADD_TEST(suite, session_find_by_vid_test);
674 	CU_ADD_TEST(suite, remove_controller_test);
675 	CU_ADD_TEST(suite, vq_avail_ring_get_test);
676 	CU_ADD_TEST(suite, vq_packed_ring_test);
677 
678 	CU_basic_set_mode(CU_BRM_VERBOSE);
679 	CU_basic_run_tests();
680 	num_failures = CU_get_number_of_failures();
681 	CU_cleanup_registry();
682 
683 	return num_failures;
684 }
685