xref: /spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c (revision 087397ef7b83bd926267c0e1e8a5c40b6dd3e0cd)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "CUnit/Basic.h"
10 #include "spdk_internal/cunit.h"
11 #include "spdk/thread.h"
12 #include "spdk_internal/mock.h"
13 #include "common/lib/ut_multithread.c"
14 #include "unit/lib/json_mock.c"
15 
16 #include "vhost/vhost.c"
17 #include "vhost/vhost_blk.c"
18 #include <rte_version.h>
19 #include "vhost/rte_vhost_user.c"
20 
21 DEFINE_STUB(rte_vhost_set_vring_base, int, (int vid, uint16_t queue_id,
22 		uint16_t last_avail_idx, uint16_t last_used_idx), 0);
23 DEFINE_STUB(rte_vhost_get_vring_base, int, (int vid, uint16_t queue_id,
24 		uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
25 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
26 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
27 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
28 DEFINE_STUB(rte_vhost_vring_call, int, (int vid, uint16_t vring_idx), 0);
29 #else
30 DEFINE_STUB(rte_vhost_vring_call_nonblock, int, (int vid, uint16_t vring_idx), 0);
31 #endif
32 DEFINE_STUB_V(rte_vhost_log_used_vring, (int vid, uint16_t vring_idx,
33 		uint64_t offset, uint64_t len));
34 
35 DEFINE_STUB(rte_vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0);
36 DEFINE_STUB(rte_vhost_get_negotiated_features, int, (int vid, uint64_t *features), 0);
37 DEFINE_STUB(rte_vhost_get_vhost_vring, int,
38 	    (int vid, uint16_t vring_idx, struct rte_vhost_vring *vring), 0);
39 DEFINE_STUB(rte_vhost_enable_guest_notification, int,
40 	    (int vid, uint16_t queue_id, int enable), 0);
41 DEFINE_STUB(rte_vhost_get_ifname, int, (int vid, char *buf, size_t len), 0);
42 DEFINE_STUB(rte_vhost_driver_start, int, (const char *name), 0);
43 #if RTE_VERSION >= RTE_VERSION_NUM(21, 11, 0, 0)
44 DEFINE_STUB(rte_vhost_driver_callback_register, int,
45 	    (const char *path, struct rte_vhost_device_ops const *const ops), 0);
46 #else
47 DEFINE_STUB(rte_vhost_driver_callback_register, int,
48 	    (const char *path, struct vhost_device_ops const *const ops), 0);
49 #endif
50 DEFINE_STUB(rte_vhost_driver_disable_features, int, (const char *path, uint64_t features), 0);
51 DEFINE_STUB(rte_vhost_driver_set_features, int, (const char *path, uint64_t features), 0);
52 DEFINE_STUB(rte_vhost_driver_register, int, (const char *path, uint64_t flags), 0);
53 DEFINE_STUB(rte_vhost_driver_unregister, int, (const char *path), 0);
54 DEFINE_STUB(rte_vhost_driver_get_protocol_features, int,
55 	    (const char *path, uint64_t *protocol_features), 0);
56 DEFINE_STUB(rte_vhost_driver_set_protocol_features, int,
57 	    (const char *path, uint64_t protocol_features), 0);
58 
59 DEFINE_STUB(rte_vhost_set_last_inflight_io_split, int,
60 	    (int vid, uint16_t vring_idx, uint16_t idx), 0);
61 DEFINE_STUB(rte_vhost_clr_inflight_desc_split, int,
62 	    (int vid, uint16_t vring_idx, uint16_t last_used_idx, uint16_t idx), 0);
63 DEFINE_STUB(rte_vhost_set_last_inflight_io_packed, int,
64 	    (int vid, uint16_t vring_idx, uint16_t head), 0);
65 DEFINE_STUB(rte_vhost_clr_inflight_desc_packed, int,
66 	    (int vid, uint16_t vring_idx, uint16_t head), 0);
67 DEFINE_STUB_V(rte_vhost_log_write, (int vid, uint64_t addr, uint64_t len));
68 DEFINE_STUB(rte_vhost_get_vhost_ring_inflight, int,
69 	    (int vid, uint16_t vring_idx, struct rte_vhost_ring_inflight *vring), 0);
70 DEFINE_STUB(rte_vhost_get_vring_base_from_inflight, int,
71 	    (int vid, uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
72 DEFINE_STUB(rte_vhost_extern_callback_register, int,
73 	    (int vid, struct rte_vhost_user_extern_ops const *const ops, void *ctx), 0);
74 DEFINE_STUB(spdk_iommu_is_enabled, bool, (void), 0);
75 
76 /* rte_vhost_user.c shutdowns vhost_user sessions in a separate pthread */
77 DECLARE_WRAPPER(pthread_create, int, (pthread_t *thread, const pthread_attr_t *attr,
78 				      void *(*start_routine)(void *), void *arg));
79 int
80 pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *),
81 	       void *arg)
82 {
83 	start_routine(arg);
84 	return 0;
85 }
86 DEFINE_STUB(pthread_detach, int, (pthread_t thread), 0);
87 
88 DEFINE_STUB(spdk_bdev_writev, int,
89 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
90 	     struct iovec *iov, int iovcnt, uint64_t offset, uint64_t len,
91 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
92 	    0);
93 
94 DEFINE_STUB(spdk_bdev_unmap, int,
95 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
96 	     uint64_t offset, uint64_t nbytes,
97 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
98 	    0);
99 
100 DEFINE_STUB(spdk_bdev_write_zeroes, int,
101 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
102 	     uint64_t offset, uint64_t nbytes,
103 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
104 	    0);
105 
106 DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 0);
107 
108 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
109 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
110 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
111 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
112 		enum spdk_bdev_io_type io_type), true);
113 DEFINE_STUB(spdk_bdev_open_ext, int,
114 	    (const char *bdev_name, bool write,	spdk_bdev_event_cb_t event_cb,
115 	     void *event_ctx, struct spdk_bdev_desc **desc), 0);
116 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *,
117 	    (struct spdk_bdev_desc *desc), NULL);
118 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
119 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
120 		struct spdk_bdev_io_wait_entry *entry), 0);
121 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
122 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
123 DEFINE_STUB(spdk_bdev_readv, int,
124 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
125 	     struct iovec *iov, int iovcnt, uint64_t offset, uint64_t nbytes,
126 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
127 	    0);
128 DEFINE_STUB(spdk_bdev_flush, int,
129 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
130 	     uint64_t offset, uint64_t nbytes,
131 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
132 	    0);
133 DEFINE_STUB(rte_vhost_set_inflight_desc_split, int, (int vid, uint16_t vring_idx, uint16_t idx), 0);
134 DEFINE_STUB(rte_vhost_set_inflight_desc_packed, int, (int vid, uint16_t vring_idx, uint16_t head,
135 		uint16_t last, uint16_t *inflight_entry), 0);
136 #if RTE_VERSION >= RTE_VERSION_NUM(23, 03, 0, 0)
137 DEFINE_STUB(rte_vhost_backend_config_change, int, (int vid, bool need_reply), 0);
138 #else
139 DEFINE_STUB(rte_vhost_slave_config_change, int, (int vid, bool need_reply), 0);
140 #endif
141 DEFINE_STUB(spdk_json_decode_bool, int, (const struct spdk_json_val *val, void *out), 0);
142 DEFINE_STUB(spdk_json_decode_object_relaxed, int,
143 	    (const struct spdk_json_val *values, const struct spdk_json_object_decoder *decoders,
144 	     size_t num_decoders, void *out), 0);
145 
146 void *
147 spdk_call_unaffinitized(void *cb(void *arg), void *arg)
148 {
149 	return cb(arg);
150 }
151 
152 static struct spdk_vhost_dev_backend g_vdev_backend = {.type = VHOST_BACKEND_SCSI};
153 static struct spdk_vhost_user_dev_backend g_vdev_user_backend;
154 
155 static bool g_init_fail;
156 static void
157 init_cb(int rc)
158 {
159 	g_init_fail = rc;
160 }
161 
162 static int
163 test_setup(void)
164 {
165 	allocate_cores(1);
166 	allocate_threads(1);
167 	set_thread(0);
168 
169 	g_init_fail = true;
170 	spdk_vhost_scsi_init(init_cb);
171 	assert(g_init_fail == false);
172 
173 	g_init_fail = true;
174 	spdk_vhost_blk_init(init_cb);
175 	assert(g_init_fail == false);
176 
177 	return 0;
178 }
179 
180 static bool g_fini_fail;
181 static void
182 fini_cb(void)
183 {
184 	g_fini_fail = false;
185 }
186 
187 static int
188 test_cleanup(void)
189 {
190 	g_fini_fail = true;
191 	spdk_vhost_scsi_fini(fini_cb);
192 	poll_threads();
193 	assert(g_fini_fail == false);
194 
195 	g_fini_fail = true;
196 	spdk_vhost_blk_fini(fini_cb);
197 	poll_threads();
198 	assert(g_fini_fail == false);
199 
200 	free_threads();
201 	free_cores();
202 
203 	return 0;
204 }
205 
206 static int
207 alloc_vdev(struct spdk_vhost_dev **vdev_p, const char *name, const char *cpumask)
208 {
209 	struct spdk_vhost_dev *vdev = NULL;
210 	int rc;
211 
212 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
213 	rc = posix_memalign((void **)&vdev, 64, sizeof(*vdev));
214 	CU_ASSERT(rc == 0);
215 	SPDK_CU_ASSERT_FATAL(vdev != NULL);
216 	memset(vdev, 0, sizeof(*vdev));
217 	rc = vhost_dev_register(vdev, name, cpumask, NULL, &g_vdev_backend, &g_vdev_user_backend);
218 	if (rc == 0) {
219 		*vdev_p = vdev;
220 	} else {
221 		free(vdev);
222 		*vdev_p = NULL;
223 	}
224 
225 	return rc;
226 }
227 
228 static void
229 start_vdev(struct spdk_vhost_dev *vdev)
230 {
231 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
232 	struct rte_vhost_memory *mem;
233 	struct spdk_vhost_session *vsession = NULL;
234 	int rc;
235 
236 	mem = calloc(1, sizeof(*mem) + 2 * sizeof(struct rte_vhost_mem_region));
237 	SPDK_CU_ASSERT_FATAL(mem != NULL);
238 	mem->nregions = 2;
239 	mem->regions[0].guest_phys_addr = 0;
240 	mem->regions[0].size = 0x400000; /* 4 MB */
241 	mem->regions[0].host_user_addr = 0x1000000;
242 	mem->regions[1].guest_phys_addr = 0x400000;
243 	mem->regions[1].size = 0x400000; /* 4 MB */
244 	mem->regions[1].host_user_addr = 0x2000000;
245 
246 	assert(TAILQ_EMPTY(&user_dev->vsessions));
247 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
248 	rc = posix_memalign((void **)&vsession, 64, sizeof(*vsession));
249 	CU_ASSERT(rc == 0);
250 	SPDK_CU_ASSERT_FATAL(vsession != NULL);
251 	vsession->started = true;
252 	vsession->vid = 0;
253 	vsession->mem = mem;
254 	TAILQ_INSERT_TAIL(&user_dev->vsessions, vsession, tailq);
255 }
256 
257 static void
258 stop_vdev(struct spdk_vhost_dev *vdev)
259 {
260 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
261 	struct spdk_vhost_session *vsession = TAILQ_FIRST(&user_dev->vsessions);
262 
263 	TAILQ_REMOVE(&user_dev->vsessions, vsession, tailq);
264 	free(vsession->mem);
265 	free(vsession);
266 }
267 
268 static void
269 cleanup_vdev(struct spdk_vhost_dev *vdev)
270 {
271 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
272 
273 	if (!TAILQ_EMPTY(&user_dev->vsessions)) {
274 		stop_vdev(vdev);
275 	}
276 	vhost_dev_unregister(vdev);
277 	free(vdev);
278 }
279 
280 static void
281 desc_to_iov_test(void)
282 {
283 	struct spdk_vhost_dev *vdev;
284 	struct spdk_vhost_session *vsession;
285 	struct iovec iov[SPDK_VHOST_IOVS_MAX];
286 	uint16_t iov_index;
287 	struct vring_desc desc;
288 	int rc;
289 
290 	spdk_cpuset_set_cpu(&g_vhost_core_mask, 0, true);
291 
292 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
293 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
294 	start_vdev(vdev);
295 
296 	vsession = TAILQ_FIRST(&to_user_dev(vdev)->vsessions);
297 
298 	/* Test simple case where iov falls fully within a 2MB page. */
299 	desc.addr = 0x110000;
300 	desc.len = 0x1000;
301 	iov_index = 0;
302 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
303 	CU_ASSERT(rc == 0);
304 	CU_ASSERT(iov_index == 1);
305 	CU_ASSERT(iov[0].iov_base == (void *)0x1110000);
306 	CU_ASSERT(iov[0].iov_len == 0x1000);
307 	/*
308 	 * Always memset the iov to ensure each test validates data written by its call
309 	 * to the function under test.
310 	 */
311 	memset(iov, 0, sizeof(iov));
312 
313 	/* Same test, but ensure it respects the non-zero starting iov_index. */
314 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
315 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
316 	CU_ASSERT(rc == 0);
317 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
318 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x1110000);
319 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x1000);
320 	memset(iov, 0, sizeof(iov));
321 
322 	/* Test for failure if iov_index already equals SPDK_VHOST_IOVS_MAX. */
323 	iov_index = SPDK_VHOST_IOVS_MAX;
324 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
325 	CU_ASSERT(rc != 0);
326 	memset(iov, 0, sizeof(iov));
327 
328 	/* Test case where iov spans a 2MB boundary, but does not span a vhost memory region. */
329 	desc.addr = 0x1F0000;
330 	desc.len = 0x20000;
331 	iov_index = 0;
332 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
333 	CU_ASSERT(rc == 0);
334 	CU_ASSERT(iov_index == 1);
335 	CU_ASSERT(iov[0].iov_base == (void *)0x11F0000);
336 	CU_ASSERT(iov[0].iov_len == 0x20000);
337 	memset(iov, 0, sizeof(iov));
338 
339 	/* Same test, but ensure it respects the non-zero starting iov_index. */
340 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
341 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
342 	CU_ASSERT(rc == 0);
343 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
344 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x11F0000);
345 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x20000);
346 	memset(iov, 0, sizeof(iov));
347 
348 	/* Test case where iov spans a vhost memory region. */
349 	desc.addr = 0x3F0000;
350 	desc.len = 0x20000;
351 	iov_index = 0;
352 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
353 	CU_ASSERT(rc == 0);
354 	CU_ASSERT(iov_index == 2);
355 	CU_ASSERT(iov[0].iov_base == (void *)0x13F0000);
356 	CU_ASSERT(iov[0].iov_len == 0x10000);
357 	CU_ASSERT(iov[1].iov_base == (void *)0x2000000);
358 	CU_ASSERT(iov[1].iov_len == 0x10000);
359 	memset(iov, 0, sizeof(iov));
360 
361 	cleanup_vdev(vdev);
362 
363 	CU_ASSERT(true);
364 }
365 
366 static void
367 create_controller_test(void)
368 {
369 	struct spdk_vhost_dev *vdev, *vdev2;
370 	int ret;
371 	char long_name[PATH_MAX];
372 
373 	spdk_cpuset_parse(&g_vhost_core_mask, "0xf");
374 
375 	/* Create device with cpumask implicitly matching whole application */
376 	ret = alloc_vdev(&vdev, "vdev_name_0", NULL);
377 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
378 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "f"));
379 	cleanup_vdev(vdev);
380 
381 	/* Create device with cpumask matching whole application */
382 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xf");
383 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
384 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "f"));
385 	cleanup_vdev(vdev);
386 
387 	/* Create device with single core in cpumask */
388 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x2");
389 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
390 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "2"));
391 	cleanup_vdev(vdev);
392 
393 	/* Create device with cpumask spanning two cores */
394 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x3");
395 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
396 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "3"));
397 	cleanup_vdev(vdev);
398 
399 	/* Create device with incorrect cpumask outside of application cpumask */
400 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xf0");
401 	SPDK_CU_ASSERT_FATAL(ret != 0);
402 
403 	/* Create device with incorrect cpumask partially outside of application cpumask */
404 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xff");
405 	SPDK_CU_ASSERT_FATAL(ret != 0);
406 
407 	/* Create device with no name */
408 	ret = alloc_vdev(&vdev, NULL, NULL);
409 	CU_ASSERT(ret != 0);
410 
411 	/* Create device with too long name and path */
412 	memset(long_name, 'x', sizeof(long_name));
413 	long_name[PATH_MAX - 1] = 0;
414 	snprintf(g_vhost_user_dev_dirname, sizeof(g_vhost_user_dev_dirname), "some_path/");
415 	ret = alloc_vdev(&vdev, long_name, NULL);
416 	CU_ASSERT(ret != 0);
417 	g_vhost_user_dev_dirname[0] = 0;
418 
419 	/* Create device when device name is already taken */
420 	ret = alloc_vdev(&vdev, "vdev_name_0", NULL);
421 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
422 	ret = alloc_vdev(&vdev2, "vdev_name_0", NULL);
423 	CU_ASSERT(ret != 0);
424 	cleanup_vdev(vdev);
425 }
426 
427 static void
428 session_find_by_vid_test(void)
429 {
430 	struct spdk_vhost_dev *vdev;
431 	struct spdk_vhost_session *vsession;
432 	struct spdk_vhost_session *tmp;
433 	int rc;
434 
435 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
436 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
437 	start_vdev(vdev);
438 
439 	vsession = TAILQ_FIRST(&to_user_dev(vdev)->vsessions);
440 
441 	tmp = vhost_session_find_by_vid(vsession->vid);
442 	CU_ASSERT(tmp == vsession);
443 
444 	/* Search for a device with incorrect vid */
445 	tmp = vhost_session_find_by_vid(vsession->vid + 0xFF);
446 	CU_ASSERT(tmp == NULL);
447 
448 	cleanup_vdev(vdev);
449 }
450 
451 static void
452 remove_controller_test(void)
453 {
454 	struct spdk_vhost_dev *vdev;
455 	int ret;
456 
457 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
458 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
459 
460 	/* Remove device when controller is in use */
461 	start_vdev(vdev);
462 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&to_user_dev(vdev)->vsessions));
463 	ret = vhost_dev_unregister(vdev);
464 	CU_ASSERT(ret != 0);
465 
466 	cleanup_vdev(vdev);
467 }
468 
469 static void
470 vq_avail_ring_get_test(void)
471 {
472 	struct spdk_vhost_virtqueue vq = {};
473 	uint16_t avail_mem[34];
474 	uint16_t reqs[32];
475 	uint16_t reqs_len, ret, i;
476 
477 	/* Basic example reap all requests */
478 	vq.vring.avail = (struct vring_avail *)avail_mem;
479 	vq.vring.size = 32;
480 	vq.last_avail_idx = 24;
481 	vq.vring.avail->idx = 29;
482 	reqs_len = 6;
483 
484 	for (i = 0; i < 32; i++) {
485 		vq.vring.avail->ring[i] = i;
486 	}
487 
488 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
489 	CU_ASSERT(ret == 5);
490 	CU_ASSERT(vq.last_avail_idx == 29);
491 	for (i = 0; i < ret; i++) {
492 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 24]);
493 	}
494 
495 	/* Basic example reap only some requests */
496 	vq.last_avail_idx = 20;
497 	vq.vring.avail->idx = 29;
498 	reqs_len = 6;
499 
500 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
501 	CU_ASSERT(ret == reqs_len);
502 	CU_ASSERT(vq.last_avail_idx == 26);
503 	for (i = 0; i < ret; i++) {
504 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 20]);
505 	}
506 
507 	/* Test invalid example */
508 	vq.last_avail_idx = 20;
509 	vq.vring.avail->idx = 156;
510 	reqs_len = 6;
511 
512 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
513 	CU_ASSERT(ret == 0);
514 
515 	/* Test overflow in the avail->idx variable. */
516 	vq.last_avail_idx = 65535;
517 	vq.vring.avail->idx = 4;
518 	reqs_len = 6;
519 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
520 	CU_ASSERT(ret == 5);
521 	CU_ASSERT(vq.last_avail_idx == 4);
522 	CU_ASSERT(reqs[0] == vq.vring.avail->ring[31]);
523 	for (i = 1; i < ret; i++) {
524 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i - 1]);
525 	}
526 }
527 
528 static bool
529 vq_desc_guest_is_used(struct spdk_vhost_virtqueue *vq, int16_t guest_last_used_idx,
530 		      int16_t guest_used_phase)
531 {
532 	return (!!(vq->vring.desc_packed[guest_last_used_idx].flags & VRING_DESC_F_USED) ==
533 		!!guest_used_phase);
534 }
535 
536 static void
537 vq_desc_guest_set_avail(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_avail_idx,
538 			int16_t *guest_avail_phase)
539 {
540 	if (*guest_avail_phase) {
541 		vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_AVAIL;
542 		vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_USED;
543 	} else {
544 		vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_AVAIL;
545 		vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_USED;
546 	}
547 
548 	if (++(*guest_last_avail_idx) >= vq->vring.size) {
549 		*guest_last_avail_idx -= vq->vring.size;
550 		*guest_avail_phase = !(*guest_avail_phase);
551 	}
552 }
553 
554 static int16_t
555 vq_desc_guest_handle_completed_desc(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_used_idx,
556 				    int16_t *guest_used_phase)
557 {
558 	int16_t buffer_id = -1;
559 
560 	if (vq_desc_guest_is_used(vq, *guest_last_used_idx, *guest_used_phase)) {
561 		buffer_id = vq->vring.desc_packed[*guest_last_used_idx].id;
562 		if (++(*guest_last_used_idx) >= vq->vring.size) {
563 			*guest_last_used_idx -= vq->vring.size;
564 			*guest_used_phase = !(*guest_used_phase);
565 		}
566 
567 		return buffer_id;
568 	}
569 
570 	return -1;
571 }
572 
573 static void
574 vq_packed_ring_test(void)
575 {
576 	struct spdk_vhost_session vs = {};
577 	struct spdk_vhost_virtqueue vq = {};
578 	struct vring_packed_desc descs[4];
579 	uint16_t guest_last_avail_idx = 0, guest_last_used_idx = 0;
580 	uint16_t guest_avail_phase = 1, guest_used_phase = 1;
581 	int i;
582 	int16_t chain_num;
583 
584 	vq.vring.desc_packed = descs;
585 	vq.vring.size = 4;
586 
587 	/* avail and used wrap counter are initialized to 1 */
588 	vq.packed.avail_phase = 1;
589 	vq.packed.used_phase = 1;
590 	vq.packed.packed_ring = true;
591 	memset(descs, 0, sizeof(descs));
592 
593 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == false);
594 
595 	/* Guest send requests */
596 	for (i = 0; i < vq.vring.size; i++) {
597 		descs[guest_last_avail_idx].id = i;
598 		/* Set the desc available */
599 		vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
600 	}
601 	CU_ASSERT(guest_last_avail_idx == 0);
602 	CU_ASSERT(guest_avail_phase == 0);
603 
604 	/* Host handle available descs */
605 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
606 	i = 0;
607 	while (vhost_vq_packed_ring_is_avail(&vq)) {
608 		CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i++);
609 		CU_ASSERT(chain_num == 1);
610 	}
611 
612 	/* Host complete them out of order: 1, 0, 2. */
613 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1, 0);
614 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1, 0);
615 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1, 0);
616 
617 	/* Host has got all the available request but only complete three requests */
618 	CU_ASSERT(vq.last_avail_idx == 0);
619 	CU_ASSERT(vq.packed.avail_phase == 0);
620 	CU_ASSERT(vq.last_used_idx == 3);
621 	CU_ASSERT(vq.packed.used_phase == 1);
622 
623 	/* Guest handle completed requests */
624 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
625 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
626 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
627 	CU_ASSERT(guest_last_used_idx == 3);
628 	CU_ASSERT(guest_used_phase == 1);
629 
630 	/* There are three descs available the guest can send three request again */
631 	for (i = 0; i < 3; i++) {
632 		descs[guest_last_avail_idx].id = 2 - i;
633 		/* Set the desc available */
634 		vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
635 	}
636 
637 	/* Host handle available descs */
638 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
639 	i = 2;
640 	while (vhost_vq_packed_ring_is_avail(&vq)) {
641 		CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i--);
642 		CU_ASSERT(chain_num == 1);
643 	}
644 
645 	/* There are four requests in Host, the new three ones and left one */
646 	CU_ASSERT(vq.last_avail_idx == 3);
647 	/* Available wrap conter should overturn */
648 	CU_ASSERT(vq.packed.avail_phase == 0);
649 
650 	/* Host complete all the requests */
651 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1, 0);
652 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1, 0);
653 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 3, 1, 0);
654 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1, 0);
655 
656 	CU_ASSERT(vq.last_used_idx == vq.last_avail_idx);
657 	CU_ASSERT(vq.packed.used_phase == vq.packed.avail_phase);
658 
659 	/* Guest handle completed requests */
660 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
661 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
662 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 3);
663 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
664 
665 	CU_ASSERT(guest_last_avail_idx == guest_last_used_idx);
666 	CU_ASSERT(guest_avail_phase == guest_used_phase);
667 }
668 
669 static void
670 vhost_blk_construct_test(void)
671 {
672 	int ret;
673 	struct spdk_vhost_dev *vdev = NULL;
674 
675 	ret = spdk_vhost_blk_construct("Malloc0", "0x1", "vhost.blk.0", NULL, NULL);
676 	CU_ASSERT(ret == 0);
677 
678 	vdev = spdk_vhost_dev_find("Malloc0");
679 	CU_ASSERT(vdev != NULL);
680 	CU_ASSERT(strcmp("Malloc0", spdk_vhost_dev_get_name(vdev)) == 0);
681 
682 	ret = spdk_vhost_dev_remove(vdev);
683 	CU_ASSERT(ret == 0);
684 }
685 
686 int
687 main(int argc, char **argv)
688 {
689 	CU_pSuite	suite = NULL;
690 	unsigned int	num_failures;
691 
692 	CU_initialize_registry();
693 
694 	suite = CU_add_suite("vhost_suite", test_setup, test_cleanup);
695 
696 	CU_ADD_TEST(suite, desc_to_iov_test);
697 	CU_ADD_TEST(suite, create_controller_test);
698 	CU_ADD_TEST(suite, session_find_by_vid_test);
699 	CU_ADD_TEST(suite, remove_controller_test);
700 	CU_ADD_TEST(suite, vq_avail_ring_get_test);
701 	CU_ADD_TEST(suite, vq_packed_ring_test);
702 	CU_ADD_TEST(suite, vhost_blk_construct_test);
703 
704 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
705 	CU_cleanup_registry();
706 
707 	return num_failures;
708 }
709