xref: /spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c (revision 8afdeef3becfe9409cc9e7372bd0bc10e8b7d46d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "CUnit/Basic.h"
10 #include "spdk_internal/cunit.h"
11 #include "spdk/thread.h"
12 #include "spdk_internal/mock.h"
13 #include "common/lib/ut_multithread.c"
14 #include "unit/lib/json_mock.c"
15 
16 #include "vhost/vhost.c"
17 #include "vhost/vhost_blk.c"
18 #include <rte_version.h>
19 #include "vhost/rte_vhost_user.c"
20 
21 DEFINE_STUB(rte_vhost_set_vring_base, int, (int vid, uint16_t queue_id,
22 		uint16_t last_avail_idx, uint16_t last_used_idx), 0);
23 DEFINE_STUB(rte_vhost_get_vring_base, int, (int vid, uint16_t queue_id,
24 		uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
25 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
26 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
27 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
28 DEFINE_STUB(rte_vhost_vring_call, int, (int vid, uint16_t vring_idx), 0);
29 #else
30 DEFINE_STUB(rte_vhost_vring_call_nonblock, int, (int vid, uint16_t vring_idx), 0);
31 #endif
32 DEFINE_STUB_V(rte_vhost_log_used_vring, (int vid, uint16_t vring_idx,
33 		uint64_t offset, uint64_t len));
34 
35 DEFINE_STUB(rte_vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0);
36 DEFINE_STUB(rte_vhost_get_negotiated_features, int, (int vid, uint64_t *features), 0);
37 DEFINE_STUB(rte_vhost_get_vhost_vring, int,
38 	    (int vid, uint16_t vring_idx, struct rte_vhost_vring *vring), 0);
39 DEFINE_STUB(rte_vhost_enable_guest_notification, int,
40 	    (int vid, uint16_t queue_id, int enable), 0);
41 DEFINE_STUB(rte_vhost_get_ifname, int, (int vid, char *buf, size_t len), 0);
42 DEFINE_STUB(rte_vhost_driver_start, int, (const char *name), 0);
43 DEFINE_STUB(rte_vhost_driver_callback_register, int,
44 	    (const char *path, struct rte_vhost_device_ops const *const ops), 0);
45 DEFINE_STUB(rte_vhost_driver_disable_features, int, (const char *path, uint64_t features), 0);
46 DEFINE_STUB(rte_vhost_driver_set_features, int, (const char *path, uint64_t features), 0);
47 DEFINE_STUB(rte_vhost_driver_register, int, (const char *path, uint64_t flags), 0);
48 DEFINE_STUB(rte_vhost_driver_unregister, int, (const char *path), 0);
49 DEFINE_STUB(rte_vhost_driver_get_protocol_features, int,
50 	    (const char *path, uint64_t *protocol_features), 0);
51 DEFINE_STUB(rte_vhost_driver_set_protocol_features, int,
52 	    (const char *path, uint64_t protocol_features), 0);
53 
54 DEFINE_STUB(rte_vhost_set_last_inflight_io_split, int,
55 	    (int vid, uint16_t vring_idx, uint16_t idx), 0);
56 DEFINE_STUB(rte_vhost_clr_inflight_desc_split, int,
57 	    (int vid, uint16_t vring_idx, uint16_t last_used_idx, uint16_t idx), 0);
58 DEFINE_STUB(rte_vhost_set_last_inflight_io_packed, int,
59 	    (int vid, uint16_t vring_idx, uint16_t head), 0);
60 DEFINE_STUB(rte_vhost_clr_inflight_desc_packed, int,
61 	    (int vid, uint16_t vring_idx, uint16_t head), 0);
62 DEFINE_STUB_V(rte_vhost_log_write, (int vid, uint64_t addr, uint64_t len));
63 DEFINE_STUB(rte_vhost_get_vhost_ring_inflight, int,
64 	    (int vid, uint16_t vring_idx, struct rte_vhost_ring_inflight *vring), 0);
65 DEFINE_STUB(rte_vhost_get_vring_base_from_inflight, int,
66 	    (int vid, uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
67 DEFINE_STUB(rte_vhost_extern_callback_register, int,
68 	    (int vid, struct rte_vhost_user_extern_ops const *const ops, void *ctx), 0);
69 DEFINE_STUB(spdk_iommu_is_enabled, bool, (void), 0);
70 
71 /* rte_vhost_user.c shutdowns vhost_user sessions in a separate pthread */
72 DECLARE_WRAPPER(pthread_create, int, (pthread_t *thread, const pthread_attr_t *attr,
73 				      void *(*start_routine)(void *), void *arg));
74 int
75 pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *),
76 	       void *arg)
77 {
78 	*thread = 0;
79 	start_routine(arg);
80 	return 0;
81 }
82 DEFINE_STUB(pthread_detach, int, (pthread_t thread), 0);
83 
84 DEFINE_STUB(spdk_bdev_writev, int,
85 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
86 	     struct iovec *iov, int iovcnt, uint64_t offset, uint64_t len,
87 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
88 	    0);
89 
90 DEFINE_STUB(spdk_bdev_unmap, int,
91 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
92 	     uint64_t offset, uint64_t nbytes,
93 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
94 	    0);
95 
96 DEFINE_STUB(spdk_bdev_write_zeroes, int,
97 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
98 	     uint64_t offset, uint64_t nbytes,
99 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
100 	    0);
101 
102 DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 0);
103 
104 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
105 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
106 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
107 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
108 		enum spdk_bdev_io_type io_type), true);
109 DEFINE_STUB(spdk_bdev_open_ext, int,
110 	    (const char *bdev_name, bool write,	spdk_bdev_event_cb_t event_cb,
111 	     void *event_ctx, struct spdk_bdev_desc **desc), 0);
112 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *,
113 	    (struct spdk_bdev_desc *desc), NULL);
114 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
115 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
116 		struct spdk_bdev_io_wait_entry *entry), 0);
117 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
118 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
119 DEFINE_STUB(spdk_bdev_readv, int,
120 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
121 	     struct iovec *iov, int iovcnt, uint64_t offset, uint64_t nbytes,
122 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
123 	    0);
124 DEFINE_STUB(spdk_bdev_flush, int,
125 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
126 	     uint64_t offset, uint64_t nbytes,
127 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
128 	    0);
129 DEFINE_STUB(rte_vhost_set_inflight_desc_split, int, (int vid, uint16_t vring_idx, uint16_t idx), 0);
130 DEFINE_STUB(rte_vhost_set_inflight_desc_packed, int, (int vid, uint16_t vring_idx, uint16_t head,
131 		uint16_t last, uint16_t *inflight_entry), 0);
132 #if RTE_VERSION >= RTE_VERSION_NUM(23, 03, 0, 0)
133 DEFINE_STUB(rte_vhost_backend_config_change, int, (int vid, bool need_reply), 0);
134 #else
135 DEFINE_STUB(rte_vhost_slave_config_change, int, (int vid, bool need_reply), 0);
136 #endif
137 DEFINE_STUB(spdk_json_decode_bool, int, (const struct spdk_json_val *val, void *out), 0);
138 DEFINE_STUB(spdk_json_decode_object_relaxed, int,
139 	    (const struct spdk_json_val *values, const struct spdk_json_object_decoder *decoders,
140 	     size_t num_decoders, void *out), 0);
141 
142 void *
143 spdk_call_unaffinitized(void *cb(void *arg), void *arg)
144 {
145 	return cb(arg);
146 }
147 
148 static struct spdk_vhost_dev_backend g_vdev_backend = {.type = VHOST_BACKEND_SCSI};
149 static struct spdk_vhost_user_dev_backend g_vdev_user_backend;
150 
151 static bool g_init_fail;
152 static void
153 init_cb(int rc)
154 {
155 	g_init_fail = rc;
156 }
157 
158 static int
159 test_setup(void)
160 {
161 	allocate_cores(1);
162 	allocate_threads(1);
163 	set_thread(0);
164 
165 	g_init_fail = true;
166 	spdk_vhost_scsi_init(init_cb);
167 	assert(g_init_fail == false);
168 
169 	g_init_fail = true;
170 	spdk_vhost_blk_init(init_cb);
171 	assert(g_init_fail == false);
172 
173 	return 0;
174 }
175 
176 static bool g_fini_fail;
177 static void
178 fini_cb(void)
179 {
180 	g_fini_fail = false;
181 }
182 
183 static int
184 test_cleanup(void)
185 {
186 	g_fini_fail = true;
187 	spdk_vhost_scsi_fini(fini_cb);
188 	poll_threads();
189 	assert(g_fini_fail == false);
190 
191 	g_fini_fail = true;
192 	spdk_vhost_blk_fini(fini_cb);
193 	poll_threads();
194 	assert(g_fini_fail == false);
195 
196 	free_threads();
197 	free_cores();
198 
199 	return 0;
200 }
201 
202 static int
203 alloc_vdev(struct spdk_vhost_dev **vdev_p, const char *name, const char *cpumask)
204 {
205 	struct spdk_vhost_dev *vdev = NULL;
206 	int rc;
207 
208 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
209 	rc = posix_memalign((void **)&vdev, 64, sizeof(*vdev));
210 	CU_ASSERT(rc == 0);
211 	SPDK_CU_ASSERT_FATAL(vdev != NULL);
212 	memset(vdev, 0, sizeof(*vdev));
213 	rc = vhost_dev_register(vdev, name, cpumask, NULL, &g_vdev_backend, &g_vdev_user_backend, false);
214 	if (rc == 0) {
215 		*vdev_p = vdev;
216 	} else {
217 		free(vdev);
218 		*vdev_p = NULL;
219 	}
220 
221 	return rc;
222 }
223 
224 static void
225 start_vdev(struct spdk_vhost_dev *vdev)
226 {
227 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
228 	struct rte_vhost_memory *mem;
229 	struct spdk_vhost_session *vsession = NULL;
230 	int rc;
231 
232 	mem = calloc(1, sizeof(*mem) + 2 * sizeof(struct rte_vhost_mem_region));
233 	SPDK_CU_ASSERT_FATAL(mem != NULL);
234 	mem->nregions = 2;
235 	mem->regions[0].guest_phys_addr = 0;
236 	mem->regions[0].size = 0x400000; /* 4 MB */
237 	mem->regions[0].host_user_addr = 0x1000000;
238 	mem->regions[1].guest_phys_addr = 0x400000;
239 	mem->regions[1].size = 0x400000; /* 4 MB */
240 	mem->regions[1].host_user_addr = 0x2000000;
241 
242 	assert(TAILQ_EMPTY(&user_dev->vsessions));
243 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
244 	rc = posix_memalign((void **)&vsession, 64, sizeof(*vsession));
245 	CU_ASSERT(rc == 0);
246 	SPDK_CU_ASSERT_FATAL(vsession != NULL);
247 	vsession->started = true;
248 	vsession->vid = 0;
249 	vsession->mem = mem;
250 	TAILQ_INSERT_TAIL(&user_dev->vsessions, vsession, tailq);
251 }
252 
253 static void
254 stop_vdev(struct spdk_vhost_dev *vdev)
255 {
256 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
257 	struct spdk_vhost_session *vsession = TAILQ_FIRST(&user_dev->vsessions);
258 
259 	TAILQ_REMOVE(&user_dev->vsessions, vsession, tailq);
260 	free(vsession->mem);
261 	free(vsession);
262 }
263 
264 static void
265 cleanup_vdev(struct spdk_vhost_dev *vdev)
266 {
267 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
268 
269 	if (!TAILQ_EMPTY(&user_dev->vsessions)) {
270 		stop_vdev(vdev);
271 	}
272 	vhost_dev_unregister(vdev);
273 	free(vdev);
274 }
275 
276 static void
277 desc_to_iov_test(void)
278 {
279 	struct spdk_vhost_dev *vdev;
280 	struct spdk_vhost_session *vsession;
281 	struct iovec iov[SPDK_VHOST_IOVS_MAX];
282 	uint16_t iov_index;
283 	struct vring_desc desc;
284 	int rc;
285 
286 	spdk_cpuset_set_cpu(&g_vhost_core_mask, 0, true);
287 
288 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
289 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
290 	start_vdev(vdev);
291 
292 	vsession = TAILQ_FIRST(&to_user_dev(vdev)->vsessions);
293 
294 	/* Test simple case where iov falls fully within a 2MB page. */
295 	desc.addr = 0x110000;
296 	desc.len = 0x1000;
297 	iov_index = 0;
298 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
299 	CU_ASSERT(rc == 0);
300 	CU_ASSERT(iov_index == 1);
301 	CU_ASSERT(iov[0].iov_base == (void *)0x1110000);
302 	CU_ASSERT(iov[0].iov_len == 0x1000);
303 	/*
304 	 * Always memset the iov to ensure each test validates data written by its call
305 	 * to the function under test.
306 	 */
307 	memset(iov, 0, sizeof(iov));
308 
309 	/* Same test, but ensure it respects the non-zero starting iov_index. */
310 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
311 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
312 	CU_ASSERT(rc == 0);
313 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
314 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x1110000);
315 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x1000);
316 	memset(iov, 0, sizeof(iov));
317 
318 	/* Test for failure if iov_index already equals SPDK_VHOST_IOVS_MAX. */
319 	iov_index = SPDK_VHOST_IOVS_MAX;
320 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
321 	CU_ASSERT(rc != 0);
322 	memset(iov, 0, sizeof(iov));
323 
324 	/* Test case where iov spans a 2MB boundary, but does not span a vhost memory region. */
325 	desc.addr = 0x1F0000;
326 	desc.len = 0x20000;
327 	iov_index = 0;
328 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
329 	CU_ASSERT(rc == 0);
330 	CU_ASSERT(iov_index == 1);
331 	CU_ASSERT(iov[0].iov_base == (void *)0x11F0000);
332 	CU_ASSERT(iov[0].iov_len == 0x20000);
333 	memset(iov, 0, sizeof(iov));
334 
335 	/* Same test, but ensure it respects the non-zero starting iov_index. */
336 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
337 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
338 	CU_ASSERT(rc == 0);
339 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
340 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x11F0000);
341 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x20000);
342 	memset(iov, 0, sizeof(iov));
343 
344 	/* Test case where iov spans a vhost memory region. */
345 	desc.addr = 0x3F0000;
346 	desc.len = 0x20000;
347 	iov_index = 0;
348 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
349 	CU_ASSERT(rc == 0);
350 	CU_ASSERT(iov_index == 2);
351 	CU_ASSERT(iov[0].iov_base == (void *)0x13F0000);
352 	CU_ASSERT(iov[0].iov_len == 0x10000);
353 	CU_ASSERT(iov[1].iov_base == (void *)0x2000000);
354 	CU_ASSERT(iov[1].iov_len == 0x10000);
355 	memset(iov, 0, sizeof(iov));
356 
357 	cleanup_vdev(vdev);
358 
359 	CU_ASSERT(true);
360 }
361 
362 static void
363 create_controller_test(void)
364 {
365 	struct spdk_vhost_dev *vdev, *vdev2;
366 	int ret;
367 	char long_name[PATH_MAX];
368 
369 	spdk_cpuset_parse(&g_vhost_core_mask, "0xf");
370 
371 	/* Create device with cpumask implicitly matching whole application */
372 	ret = alloc_vdev(&vdev, "vdev_name_0", NULL);
373 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
374 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "f"));
375 	cleanup_vdev(vdev);
376 
377 	/* Create device with cpumask matching whole application */
378 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xf");
379 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
380 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "f"));
381 	cleanup_vdev(vdev);
382 
383 	/* Create device with single core in cpumask */
384 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x2");
385 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
386 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "2"));
387 	cleanup_vdev(vdev);
388 
389 	/* Create device with cpumask spanning two cores */
390 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x3");
391 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
392 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "3"));
393 	cleanup_vdev(vdev);
394 
395 	/* Create device with incorrect cpumask outside of application cpumask */
396 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xf0");
397 	SPDK_CU_ASSERT_FATAL(ret != 0);
398 
399 	/* Create device with incorrect cpumask partially outside of application cpumask */
400 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xff");
401 	SPDK_CU_ASSERT_FATAL(ret != 0);
402 
403 	/* Create device with no name */
404 	ret = alloc_vdev(&vdev, NULL, NULL);
405 	CU_ASSERT(ret != 0);
406 
407 	/* Create device with too long name and path */
408 	memset(long_name, 'x', sizeof(long_name));
409 	long_name[PATH_MAX - 1] = 0;
410 	snprintf(g_vhost_user_dev_dirname, sizeof(g_vhost_user_dev_dirname), "some_path/");
411 	ret = alloc_vdev(&vdev, long_name, NULL);
412 	CU_ASSERT(ret != 0);
413 	g_vhost_user_dev_dirname[0] = 0;
414 
415 	/* Create device when device name is already taken */
416 	ret = alloc_vdev(&vdev, "vdev_name_0", NULL);
417 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
418 	ret = alloc_vdev(&vdev2, "vdev_name_0", NULL);
419 	CU_ASSERT(ret != 0);
420 	cleanup_vdev(vdev);
421 }
422 
423 static void
424 session_find_by_vid_test(void)
425 {
426 	struct spdk_vhost_dev *vdev;
427 	struct spdk_vhost_session *vsession;
428 	struct spdk_vhost_session *tmp;
429 	int rc;
430 
431 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
432 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
433 	start_vdev(vdev);
434 
435 	vsession = TAILQ_FIRST(&to_user_dev(vdev)->vsessions);
436 
437 	tmp = vhost_session_find_by_vid(vsession->vid);
438 	CU_ASSERT(tmp == vsession);
439 
440 	/* Search for a device with incorrect vid */
441 	tmp = vhost_session_find_by_vid(vsession->vid + 0xFF);
442 	CU_ASSERT(tmp == NULL);
443 
444 	cleanup_vdev(vdev);
445 }
446 
447 static void
448 remove_controller_test(void)
449 {
450 	struct spdk_vhost_dev *vdev;
451 	int ret;
452 
453 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
454 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
455 
456 	/* Remove device when controller is in use */
457 	start_vdev(vdev);
458 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&to_user_dev(vdev)->vsessions));
459 	ret = vhost_dev_unregister(vdev);
460 	CU_ASSERT(ret != 0);
461 
462 	cleanup_vdev(vdev);
463 }
464 
465 static void
466 vq_avail_ring_get_test(void)
467 {
468 	struct spdk_vhost_virtqueue vq = {};
469 	uint16_t avail_mem[34];
470 	uint16_t reqs[32];
471 	uint16_t reqs_len, ret, i;
472 
473 	/* Basic example reap all requests */
474 	vq.vring.avail = (struct vring_avail *)avail_mem;
475 	vq.vring.size = 32;
476 	vq.last_avail_idx = 24;
477 	vq.vring.avail->idx = 29;
478 	reqs_len = 6;
479 
480 	for (i = 0; i < 32; i++) {
481 		vq.vring.avail->ring[i] = i;
482 	}
483 
484 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
485 	CU_ASSERT(ret == 5);
486 	CU_ASSERT(vq.last_avail_idx == 29);
487 	for (i = 0; i < ret; i++) {
488 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 24]);
489 	}
490 
491 	/* Basic example reap only some requests */
492 	vq.last_avail_idx = 20;
493 	vq.vring.avail->idx = 29;
494 	reqs_len = 6;
495 
496 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
497 	CU_ASSERT(ret == reqs_len);
498 	CU_ASSERT(vq.last_avail_idx == 26);
499 	for (i = 0; i < ret; i++) {
500 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 20]);
501 	}
502 
503 	/* Test invalid example */
504 	vq.last_avail_idx = 20;
505 	vq.vring.avail->idx = 156;
506 	reqs_len = 6;
507 
508 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
509 	CU_ASSERT(ret == 0);
510 
511 	/* Test overflow in the avail->idx variable. */
512 	vq.last_avail_idx = 65535;
513 	vq.vring.avail->idx = 4;
514 	reqs_len = 6;
515 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
516 	CU_ASSERT(ret == 5);
517 	CU_ASSERT(vq.last_avail_idx == 4);
518 	CU_ASSERT(reqs[0] == vq.vring.avail->ring[31]);
519 	for (i = 1; i < ret; i++) {
520 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i - 1]);
521 	}
522 }
523 
524 static bool
525 vq_desc_guest_is_used(struct spdk_vhost_virtqueue *vq, int16_t guest_last_used_idx,
526 		      int16_t guest_used_phase)
527 {
528 	return (!!(vq->vring.desc_packed[guest_last_used_idx].flags & VRING_DESC_F_USED) ==
529 		!!guest_used_phase);
530 }
531 
532 static void
533 vq_desc_guest_set_avail(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_avail_idx,
534 			int16_t *guest_avail_phase)
535 {
536 	if (*guest_avail_phase) {
537 		vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_AVAIL;
538 		vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_USED;
539 	} else {
540 		vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_AVAIL;
541 		vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_USED;
542 	}
543 
544 	if (++(*guest_last_avail_idx) >= vq->vring.size) {
545 		*guest_last_avail_idx -= vq->vring.size;
546 		*guest_avail_phase = !(*guest_avail_phase);
547 	}
548 }
549 
550 static int16_t
551 vq_desc_guest_handle_completed_desc(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_used_idx,
552 				    int16_t *guest_used_phase)
553 {
554 	int16_t buffer_id = -1;
555 
556 	if (vq_desc_guest_is_used(vq, *guest_last_used_idx, *guest_used_phase)) {
557 		buffer_id = vq->vring.desc_packed[*guest_last_used_idx].id;
558 		if (++(*guest_last_used_idx) >= vq->vring.size) {
559 			*guest_last_used_idx -= vq->vring.size;
560 			*guest_used_phase = !(*guest_used_phase);
561 		}
562 
563 		return buffer_id;
564 	}
565 
566 	return -1;
567 }
568 
569 static void
570 vq_packed_ring_test(void)
571 {
572 
573 	struct spdk_vhost_session *vs;
574 	struct spdk_vhost_virtqueue *vq;
575 	struct vring_packed_desc descs[4];
576 	uint16_t guest_last_avail_idx = 0, guest_last_used_idx = 0;
577 	uint16_t guest_avail_phase = 1, guest_used_phase = 1;
578 	int i;
579 	int rc;
580 	int16_t chain_num;
581 
582 	/* See SPDK issue #3004 and #3310 Seems like a bug with gcc + asan on
583 	 * Fedora 38, so we need to explicitly align the variable here.
584 	 */
585 	rc = posix_memalign((void **)&vs, SPDK_CACHE_LINE_SIZE, sizeof(*vs));
586 	SPDK_CU_ASSERT_FATAL(rc == 0);
587 	rc = posix_memalign((void **)&vq, SPDK_CACHE_LINE_SIZE, sizeof(*vq));
588 	SPDK_CU_ASSERT_FATAL(rc == 0);
589 	memset(vs, 0, sizeof(*vs));
590 	memset(vq, 0, sizeof(*vq));
591 
592 	vq->vring.desc_packed = descs;
593 	vq->vring.size = 4;
594 
595 	/* avail and used wrap counter are initialized to 1 */
596 	vq->packed.avail_phase = 1;
597 	vq->packed.used_phase = 1;
598 	vq->packed.packed_ring = true;
599 	memset(descs, 0, sizeof(descs));
600 
601 	CU_ASSERT(vhost_vq_packed_ring_is_avail(vq) == false);
602 
603 	/* Guest send requests */
604 	for (i = 0; i < vq->vring.size; i++) {
605 		descs[guest_last_avail_idx].id = i;
606 		/* Set the desc available */
607 		vq_desc_guest_set_avail(vq, &guest_last_avail_idx, &guest_avail_phase);
608 	}
609 	CU_ASSERT(guest_last_avail_idx == 0);
610 	CU_ASSERT(guest_avail_phase == 0);
611 
612 	/* Host handle available descs */
613 	CU_ASSERT(vhost_vq_packed_ring_is_avail(vq) == true);
614 	i = 0;
615 	while (vhost_vq_packed_ring_is_avail(vq)) {
616 		CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(vq, vq->last_avail_idx, &chain_num) == i++);
617 		CU_ASSERT(chain_num == 1);
618 	}
619 
620 	/* Host complete them out of order: 1, 0, 2. */
621 	vhost_vq_packed_ring_enqueue(vs, vq, 1, 1, 1, 0);
622 	vhost_vq_packed_ring_enqueue(vs, vq, 1, 0, 1, 0);
623 	vhost_vq_packed_ring_enqueue(vs, vq, 1, 2, 1, 0);
624 
625 	/* Host has got all the available request but only complete three requests */
626 	CU_ASSERT(vq->last_avail_idx == 0);
627 	CU_ASSERT(vq->packed.avail_phase == 0);
628 	CU_ASSERT(vq->last_used_idx == 3);
629 	CU_ASSERT(vq->packed.used_phase == 1);
630 
631 	/* Guest handle completed requests */
632 	CU_ASSERT(vq_desc_guest_handle_completed_desc(vq, &guest_last_used_idx, &guest_used_phase) == 1);
633 	CU_ASSERT(vq_desc_guest_handle_completed_desc(vq, &guest_last_used_idx, &guest_used_phase) == 0);
634 	CU_ASSERT(vq_desc_guest_handle_completed_desc(vq, &guest_last_used_idx, &guest_used_phase) == 2);
635 	CU_ASSERT(guest_last_used_idx == 3);
636 	CU_ASSERT(guest_used_phase == 1);
637 
638 	/* There are three descs available the guest can send three request again */
639 	for (i = 0; i < 3; i++) {
640 		descs[guest_last_avail_idx].id = 2 - i;
641 		/* Set the desc available */
642 		vq_desc_guest_set_avail(vq, &guest_last_avail_idx, &guest_avail_phase);
643 	}
644 
645 	/* Host handle available descs */
646 	CU_ASSERT(vhost_vq_packed_ring_is_avail(vq) == true);
647 	i = 2;
648 	while (vhost_vq_packed_ring_is_avail(vq)) {
649 		CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(vq, vq->last_avail_idx, &chain_num) == i--);
650 		CU_ASSERT(chain_num == 1);
651 	}
652 
653 	/* There are four requests in Host, the new three ones and left one */
654 	CU_ASSERT(vq->last_avail_idx == 3);
655 	/* Available wrap counter should overturn */
656 	CU_ASSERT(vq->packed.avail_phase == 0);
657 
658 	/* Host complete all the requests */
659 	vhost_vq_packed_ring_enqueue(vs, vq, 1, 1, 1, 0);
660 	vhost_vq_packed_ring_enqueue(vs, vq, 1, 0, 1, 0);
661 	vhost_vq_packed_ring_enqueue(vs, vq, 1, 3, 1, 0);
662 	vhost_vq_packed_ring_enqueue(vs, vq, 1, 2, 1, 0);
663 
664 	CU_ASSERT(vq->last_used_idx == vq->last_avail_idx);
665 	CU_ASSERT(vq->packed.used_phase == vq->packed.avail_phase);
666 
667 	/* Guest handle completed requests */
668 	CU_ASSERT(vq_desc_guest_handle_completed_desc(vq, &guest_last_used_idx, &guest_used_phase) == 1);
669 	CU_ASSERT(vq_desc_guest_handle_completed_desc(vq, &guest_last_used_idx, &guest_used_phase) == 0);
670 	CU_ASSERT(vq_desc_guest_handle_completed_desc(vq, &guest_last_used_idx, &guest_used_phase) == 3);
671 	CU_ASSERT(vq_desc_guest_handle_completed_desc(vq, &guest_last_used_idx, &guest_used_phase) == 2);
672 
673 	CU_ASSERT(guest_last_avail_idx == guest_last_used_idx);
674 	CU_ASSERT(guest_avail_phase == guest_used_phase);
675 	free(vq);
676 	free(vs);
677 }
678 
679 static void
680 vhost_blk_construct_test(void)
681 {
682 	int ret;
683 	struct spdk_vhost_dev *vdev = NULL;
684 
685 	ret = spdk_vhost_blk_construct("Malloc0", "0x1", "vhost.blk.0", NULL, NULL);
686 	CU_ASSERT(ret == 0);
687 
688 	vdev = spdk_vhost_dev_find("Malloc0");
689 	CU_ASSERT(vdev != NULL);
690 	CU_ASSERT(strcmp("Malloc0", spdk_vhost_dev_get_name(vdev)) == 0);
691 
692 	ret = spdk_vhost_dev_remove(vdev);
693 	CU_ASSERT(ret == 0);
694 }
695 
696 int
697 main(int argc, char **argv)
698 {
699 	CU_pSuite	suite = NULL;
700 	unsigned int	num_failures;
701 
702 	CU_initialize_registry();
703 
704 	suite = CU_add_suite("vhost_suite", test_setup, test_cleanup);
705 
706 	CU_ADD_TEST(suite, desc_to_iov_test);
707 	CU_ADD_TEST(suite, create_controller_test);
708 	CU_ADD_TEST(suite, session_find_by_vid_test);
709 	CU_ADD_TEST(suite, remove_controller_test);
710 	CU_ADD_TEST(suite, vq_avail_ring_get_test);
711 	CU_ADD_TEST(suite, vq_packed_ring_test);
712 	CU_ADD_TEST(suite, vhost_blk_construct_test);
713 
714 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
715 	CU_cleanup_registry();
716 
717 	return num_failures;
718 }
719