xref: /spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c (revision b02581a89058ebaebe03bd0e16e3b58adfe406c1)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "CUnit/Basic.h"
10 #include "spdk_internal/cunit.h"
11 #include "spdk/thread.h"
12 #include "spdk_internal/mock.h"
13 #include "common/lib/ut_multithread.c"
14 #include "unit/lib/json_mock.c"
15 
16 #include "vhost/vhost.c"
17 #include "vhost/vhost_blk.c"
18 #include <rte_version.h>
19 #include "vhost/rte_vhost_user.c"
20 
21 DEFINE_STUB(rte_vhost_set_vring_base, int, (int vid, uint16_t queue_id,
22 		uint16_t last_avail_idx, uint16_t last_used_idx), 0);
23 DEFINE_STUB(rte_vhost_get_vring_base, int, (int vid, uint16_t queue_id,
24 		uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
25 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
26 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
27 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
28 DEFINE_STUB(rte_vhost_vring_call, int, (int vid, uint16_t vring_idx), 0);
29 #else
30 DEFINE_STUB(rte_vhost_vring_call_nonblock, int, (int vid, uint16_t vring_idx), 0);
31 #endif
32 DEFINE_STUB_V(rte_vhost_log_used_vring, (int vid, uint16_t vring_idx,
33 		uint64_t offset, uint64_t len));
34 
35 DEFINE_STUB(rte_vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0);
36 DEFINE_STUB(rte_vhost_get_negotiated_features, int, (int vid, uint64_t *features), 0);
37 DEFINE_STUB(rte_vhost_get_vhost_vring, int,
38 	    (int vid, uint16_t vring_idx, struct rte_vhost_vring *vring), 0);
39 DEFINE_STUB(rte_vhost_enable_guest_notification, int,
40 	    (int vid, uint16_t queue_id, int enable), 0);
41 DEFINE_STUB(rte_vhost_get_ifname, int, (int vid, char *buf, size_t len), 0);
42 DEFINE_STUB(rte_vhost_driver_start, int, (const char *name), 0);
43 DEFINE_STUB(rte_vhost_driver_callback_register, int,
44 	    (const char *path, struct rte_vhost_device_ops const *const ops), 0);
45 DEFINE_STUB(rte_vhost_driver_disable_features, int, (const char *path, uint64_t features), 0);
46 DEFINE_STUB(rte_vhost_driver_set_features, int, (const char *path, uint64_t features), 0);
47 DEFINE_STUB(rte_vhost_driver_register, int, (const char *path, uint64_t flags), 0);
48 DEFINE_STUB(rte_vhost_driver_unregister, int, (const char *path), 0);
49 DEFINE_STUB(rte_vhost_driver_get_protocol_features, int,
50 	    (const char *path, uint64_t *protocol_features), 0);
51 DEFINE_STUB(rte_vhost_driver_set_protocol_features, int,
52 	    (const char *path, uint64_t protocol_features), 0);
53 
54 DEFINE_STUB(rte_vhost_set_last_inflight_io_split, int,
55 	    (int vid, uint16_t vring_idx, uint16_t idx), 0);
56 DEFINE_STUB(rte_vhost_clr_inflight_desc_split, int,
57 	    (int vid, uint16_t vring_idx, uint16_t last_used_idx, uint16_t idx), 0);
58 DEFINE_STUB(rte_vhost_set_last_inflight_io_packed, int,
59 	    (int vid, uint16_t vring_idx, uint16_t head), 0);
60 DEFINE_STUB(rte_vhost_clr_inflight_desc_packed, int,
61 	    (int vid, uint16_t vring_idx, uint16_t head), 0);
62 DEFINE_STUB_V(rte_vhost_log_write, (int vid, uint64_t addr, uint64_t len));
63 DEFINE_STUB(rte_vhost_get_vhost_ring_inflight, int,
64 	    (int vid, uint16_t vring_idx, struct rte_vhost_ring_inflight *vring), 0);
65 DEFINE_STUB(rte_vhost_get_vring_base_from_inflight, int,
66 	    (int vid, uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
67 DEFINE_STUB(rte_vhost_extern_callback_register, int,
68 	    (int vid, struct rte_vhost_user_extern_ops const *const ops, void *ctx), 0);
69 DEFINE_STUB(spdk_iommu_is_enabled, bool, (void), 0);
70 
71 /* rte_vhost_user.c shutdowns vhost_user sessions in a separate pthread */
72 DECLARE_WRAPPER(pthread_create, int, (pthread_t *thread, const pthread_attr_t *attr,
73 				      void *(*start_routine)(void *), void *arg));
74 int
75 pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *),
76 	       void *arg)
77 {
78 	start_routine(arg);
79 	return 0;
80 }
81 DEFINE_STUB(pthread_detach, int, (pthread_t thread), 0);
82 
83 DEFINE_STUB(spdk_bdev_writev, int,
84 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
85 	     struct iovec *iov, int iovcnt, uint64_t offset, uint64_t len,
86 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
87 	    0);
88 
89 DEFINE_STUB(spdk_bdev_unmap, int,
90 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
91 	     uint64_t offset, uint64_t nbytes,
92 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
93 	    0);
94 
95 DEFINE_STUB(spdk_bdev_write_zeroes, int,
96 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
97 	     uint64_t offset, uint64_t nbytes,
98 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
99 	    0);
100 
101 DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 0);
102 
103 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
104 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
105 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
106 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
107 		enum spdk_bdev_io_type io_type), true);
108 DEFINE_STUB(spdk_bdev_open_ext, int,
109 	    (const char *bdev_name, bool write,	spdk_bdev_event_cb_t event_cb,
110 	     void *event_ctx, struct spdk_bdev_desc **desc), 0);
111 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *,
112 	    (struct spdk_bdev_desc *desc), NULL);
113 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
114 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
115 		struct spdk_bdev_io_wait_entry *entry), 0);
116 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
117 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
118 DEFINE_STUB(spdk_bdev_readv, int,
119 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
120 	     struct iovec *iov, int iovcnt, uint64_t offset, uint64_t nbytes,
121 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
122 	    0);
123 DEFINE_STUB(spdk_bdev_flush, int,
124 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
125 	     uint64_t offset, uint64_t nbytes,
126 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
127 	    0);
128 DEFINE_STUB(rte_vhost_set_inflight_desc_split, int, (int vid, uint16_t vring_idx, uint16_t idx), 0);
129 DEFINE_STUB(rte_vhost_set_inflight_desc_packed, int, (int vid, uint16_t vring_idx, uint16_t head,
130 		uint16_t last, uint16_t *inflight_entry), 0);
131 #if RTE_VERSION >= RTE_VERSION_NUM(23, 03, 0, 0)
132 DEFINE_STUB(rte_vhost_backend_config_change, int, (int vid, bool need_reply), 0);
133 #else
134 DEFINE_STUB(rte_vhost_slave_config_change, int, (int vid, bool need_reply), 0);
135 #endif
136 DEFINE_STUB(spdk_json_decode_bool, int, (const struct spdk_json_val *val, void *out), 0);
137 DEFINE_STUB(spdk_json_decode_object_relaxed, int,
138 	    (const struct spdk_json_val *values, const struct spdk_json_object_decoder *decoders,
139 	     size_t num_decoders, void *out), 0);
140 
141 void *
142 spdk_call_unaffinitized(void *cb(void *arg), void *arg)
143 {
144 	return cb(arg);
145 }
146 
147 static struct spdk_vhost_dev_backend g_vdev_backend = {.type = VHOST_BACKEND_SCSI};
148 static struct spdk_vhost_user_dev_backend g_vdev_user_backend;
149 
150 static bool g_init_fail;
151 static void
152 init_cb(int rc)
153 {
154 	g_init_fail = rc;
155 }
156 
157 static int
158 test_setup(void)
159 {
160 	allocate_cores(1);
161 	allocate_threads(1);
162 	set_thread(0);
163 
164 	g_init_fail = true;
165 	spdk_vhost_scsi_init(init_cb);
166 	assert(g_init_fail == false);
167 
168 	g_init_fail = true;
169 	spdk_vhost_blk_init(init_cb);
170 	assert(g_init_fail == false);
171 
172 	return 0;
173 }
174 
175 static bool g_fini_fail;
176 static void
177 fini_cb(void)
178 {
179 	g_fini_fail = false;
180 }
181 
182 static int
183 test_cleanup(void)
184 {
185 	g_fini_fail = true;
186 	spdk_vhost_scsi_fini(fini_cb);
187 	poll_threads();
188 	assert(g_fini_fail == false);
189 
190 	g_fini_fail = true;
191 	spdk_vhost_blk_fini(fini_cb);
192 	poll_threads();
193 	assert(g_fini_fail == false);
194 
195 	free_threads();
196 	free_cores();
197 
198 	return 0;
199 }
200 
201 static int
202 alloc_vdev(struct spdk_vhost_dev **vdev_p, const char *name, const char *cpumask)
203 {
204 	struct spdk_vhost_dev *vdev = NULL;
205 	int rc;
206 
207 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
208 	rc = posix_memalign((void **)&vdev, 64, sizeof(*vdev));
209 	CU_ASSERT(rc == 0);
210 	SPDK_CU_ASSERT_FATAL(vdev != NULL);
211 	memset(vdev, 0, sizeof(*vdev));
212 	rc = vhost_dev_register(vdev, name, cpumask, NULL, &g_vdev_backend, &g_vdev_user_backend, false);
213 	if (rc == 0) {
214 		*vdev_p = vdev;
215 	} else {
216 		free(vdev);
217 		*vdev_p = NULL;
218 	}
219 
220 	return rc;
221 }
222 
223 static void
224 start_vdev(struct spdk_vhost_dev *vdev)
225 {
226 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
227 	struct rte_vhost_memory *mem;
228 	struct spdk_vhost_session *vsession = NULL;
229 	int rc;
230 
231 	mem = calloc(1, sizeof(*mem) + 2 * sizeof(struct rte_vhost_mem_region));
232 	SPDK_CU_ASSERT_FATAL(mem != NULL);
233 	mem->nregions = 2;
234 	mem->regions[0].guest_phys_addr = 0;
235 	mem->regions[0].size = 0x400000; /* 4 MB */
236 	mem->regions[0].host_user_addr = 0x1000000;
237 	mem->regions[1].guest_phys_addr = 0x400000;
238 	mem->regions[1].size = 0x400000; /* 4 MB */
239 	mem->regions[1].host_user_addr = 0x2000000;
240 
241 	assert(TAILQ_EMPTY(&user_dev->vsessions));
242 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
243 	rc = posix_memalign((void **)&vsession, 64, sizeof(*vsession));
244 	CU_ASSERT(rc == 0);
245 	SPDK_CU_ASSERT_FATAL(vsession != NULL);
246 	vsession->started = true;
247 	vsession->vid = 0;
248 	vsession->mem = mem;
249 	TAILQ_INSERT_TAIL(&user_dev->vsessions, vsession, tailq);
250 }
251 
252 static void
253 stop_vdev(struct spdk_vhost_dev *vdev)
254 {
255 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
256 	struct spdk_vhost_session *vsession = TAILQ_FIRST(&user_dev->vsessions);
257 
258 	TAILQ_REMOVE(&user_dev->vsessions, vsession, tailq);
259 	free(vsession->mem);
260 	free(vsession);
261 }
262 
263 static void
264 cleanup_vdev(struct spdk_vhost_dev *vdev)
265 {
266 	struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
267 
268 	if (!TAILQ_EMPTY(&user_dev->vsessions)) {
269 		stop_vdev(vdev);
270 	}
271 	vhost_dev_unregister(vdev);
272 	free(vdev);
273 }
274 
275 static void
276 desc_to_iov_test(void)
277 {
278 	struct spdk_vhost_dev *vdev;
279 	struct spdk_vhost_session *vsession;
280 	struct iovec iov[SPDK_VHOST_IOVS_MAX];
281 	uint16_t iov_index;
282 	struct vring_desc desc;
283 	int rc;
284 
285 	spdk_cpuset_set_cpu(&g_vhost_core_mask, 0, true);
286 
287 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
288 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
289 	start_vdev(vdev);
290 
291 	vsession = TAILQ_FIRST(&to_user_dev(vdev)->vsessions);
292 
293 	/* Test simple case where iov falls fully within a 2MB page. */
294 	desc.addr = 0x110000;
295 	desc.len = 0x1000;
296 	iov_index = 0;
297 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
298 	CU_ASSERT(rc == 0);
299 	CU_ASSERT(iov_index == 1);
300 	CU_ASSERT(iov[0].iov_base == (void *)0x1110000);
301 	CU_ASSERT(iov[0].iov_len == 0x1000);
302 	/*
303 	 * Always memset the iov to ensure each test validates data written by its call
304 	 * to the function under test.
305 	 */
306 	memset(iov, 0, sizeof(iov));
307 
308 	/* Same test, but ensure it respects the non-zero starting iov_index. */
309 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
310 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
311 	CU_ASSERT(rc == 0);
312 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
313 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x1110000);
314 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x1000);
315 	memset(iov, 0, sizeof(iov));
316 
317 	/* Test for failure if iov_index already equals SPDK_VHOST_IOVS_MAX. */
318 	iov_index = SPDK_VHOST_IOVS_MAX;
319 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
320 	CU_ASSERT(rc != 0);
321 	memset(iov, 0, sizeof(iov));
322 
323 	/* Test case where iov spans a 2MB boundary, but does not span a vhost memory region. */
324 	desc.addr = 0x1F0000;
325 	desc.len = 0x20000;
326 	iov_index = 0;
327 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
328 	CU_ASSERT(rc == 0);
329 	CU_ASSERT(iov_index == 1);
330 	CU_ASSERT(iov[0].iov_base == (void *)0x11F0000);
331 	CU_ASSERT(iov[0].iov_len == 0x20000);
332 	memset(iov, 0, sizeof(iov));
333 
334 	/* Same test, but ensure it respects the non-zero starting iov_index. */
335 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
336 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
337 	CU_ASSERT(rc == 0);
338 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
339 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x11F0000);
340 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x20000);
341 	memset(iov, 0, sizeof(iov));
342 
343 	/* Test case where iov spans a vhost memory region. */
344 	desc.addr = 0x3F0000;
345 	desc.len = 0x20000;
346 	iov_index = 0;
347 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
348 	CU_ASSERT(rc == 0);
349 	CU_ASSERT(iov_index == 2);
350 	CU_ASSERT(iov[0].iov_base == (void *)0x13F0000);
351 	CU_ASSERT(iov[0].iov_len == 0x10000);
352 	CU_ASSERT(iov[1].iov_base == (void *)0x2000000);
353 	CU_ASSERT(iov[1].iov_len == 0x10000);
354 	memset(iov, 0, sizeof(iov));
355 
356 	cleanup_vdev(vdev);
357 
358 	CU_ASSERT(true);
359 }
360 
361 static void
362 create_controller_test(void)
363 {
364 	struct spdk_vhost_dev *vdev, *vdev2;
365 	int ret;
366 	char long_name[PATH_MAX];
367 
368 	spdk_cpuset_parse(&g_vhost_core_mask, "0xf");
369 
370 	/* Create device with cpumask implicitly matching whole application */
371 	ret = alloc_vdev(&vdev, "vdev_name_0", NULL);
372 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
373 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "f"));
374 	cleanup_vdev(vdev);
375 
376 	/* Create device with cpumask matching whole application */
377 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xf");
378 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
379 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "f"));
380 	cleanup_vdev(vdev);
381 
382 	/* Create device with single core in cpumask */
383 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x2");
384 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
385 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "2"));
386 	cleanup_vdev(vdev);
387 
388 	/* Create device with cpumask spanning two cores */
389 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x3");
390 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
391 	SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "3"));
392 	cleanup_vdev(vdev);
393 
394 	/* Create device with incorrect cpumask outside of application cpumask */
395 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xf0");
396 	SPDK_CU_ASSERT_FATAL(ret != 0);
397 
398 	/* Create device with incorrect cpumask partially outside of application cpumask */
399 	ret = alloc_vdev(&vdev, "vdev_name_0", "0xff");
400 	SPDK_CU_ASSERT_FATAL(ret != 0);
401 
402 	/* Create device with no name */
403 	ret = alloc_vdev(&vdev, NULL, NULL);
404 	CU_ASSERT(ret != 0);
405 
406 	/* Create device with too long name and path */
407 	memset(long_name, 'x', sizeof(long_name));
408 	long_name[PATH_MAX - 1] = 0;
409 	snprintf(g_vhost_user_dev_dirname, sizeof(g_vhost_user_dev_dirname), "some_path/");
410 	ret = alloc_vdev(&vdev, long_name, NULL);
411 	CU_ASSERT(ret != 0);
412 	g_vhost_user_dev_dirname[0] = 0;
413 
414 	/* Create device when device name is already taken */
415 	ret = alloc_vdev(&vdev, "vdev_name_0", NULL);
416 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
417 	ret = alloc_vdev(&vdev2, "vdev_name_0", NULL);
418 	CU_ASSERT(ret != 0);
419 	cleanup_vdev(vdev);
420 }
421 
422 static void
423 session_find_by_vid_test(void)
424 {
425 	struct spdk_vhost_dev *vdev;
426 	struct spdk_vhost_session *vsession;
427 	struct spdk_vhost_session *tmp;
428 	int rc;
429 
430 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
431 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
432 	start_vdev(vdev);
433 
434 	vsession = TAILQ_FIRST(&to_user_dev(vdev)->vsessions);
435 
436 	tmp = vhost_session_find_by_vid(vsession->vid);
437 	CU_ASSERT(tmp == vsession);
438 
439 	/* Search for a device with incorrect vid */
440 	tmp = vhost_session_find_by_vid(vsession->vid + 0xFF);
441 	CU_ASSERT(tmp == NULL);
442 
443 	cleanup_vdev(vdev);
444 }
445 
446 static void
447 remove_controller_test(void)
448 {
449 	struct spdk_vhost_dev *vdev;
450 	int ret;
451 
452 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
453 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
454 
455 	/* Remove device when controller is in use */
456 	start_vdev(vdev);
457 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&to_user_dev(vdev)->vsessions));
458 	ret = vhost_dev_unregister(vdev);
459 	CU_ASSERT(ret != 0);
460 
461 	cleanup_vdev(vdev);
462 }
463 
464 static void
465 vq_avail_ring_get_test(void)
466 {
467 	struct spdk_vhost_virtqueue vq = {};
468 	uint16_t avail_mem[34];
469 	uint16_t reqs[32];
470 	uint16_t reqs_len, ret, i;
471 
472 	/* Basic example reap all requests */
473 	vq.vring.avail = (struct vring_avail *)avail_mem;
474 	vq.vring.size = 32;
475 	vq.last_avail_idx = 24;
476 	vq.vring.avail->idx = 29;
477 	reqs_len = 6;
478 
479 	for (i = 0; i < 32; i++) {
480 		vq.vring.avail->ring[i] = i;
481 	}
482 
483 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
484 	CU_ASSERT(ret == 5);
485 	CU_ASSERT(vq.last_avail_idx == 29);
486 	for (i = 0; i < ret; i++) {
487 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 24]);
488 	}
489 
490 	/* Basic example reap only some requests */
491 	vq.last_avail_idx = 20;
492 	vq.vring.avail->idx = 29;
493 	reqs_len = 6;
494 
495 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
496 	CU_ASSERT(ret == reqs_len);
497 	CU_ASSERT(vq.last_avail_idx == 26);
498 	for (i = 0; i < ret; i++) {
499 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 20]);
500 	}
501 
502 	/* Test invalid example */
503 	vq.last_avail_idx = 20;
504 	vq.vring.avail->idx = 156;
505 	reqs_len = 6;
506 
507 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
508 	CU_ASSERT(ret == 0);
509 
510 	/* Test overflow in the avail->idx variable. */
511 	vq.last_avail_idx = 65535;
512 	vq.vring.avail->idx = 4;
513 	reqs_len = 6;
514 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
515 	CU_ASSERT(ret == 5);
516 	CU_ASSERT(vq.last_avail_idx == 4);
517 	CU_ASSERT(reqs[0] == vq.vring.avail->ring[31]);
518 	for (i = 1; i < ret; i++) {
519 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i - 1]);
520 	}
521 }
522 
523 static bool
524 vq_desc_guest_is_used(struct spdk_vhost_virtqueue *vq, int16_t guest_last_used_idx,
525 		      int16_t guest_used_phase)
526 {
527 	return (!!(vq->vring.desc_packed[guest_last_used_idx].flags & VRING_DESC_F_USED) ==
528 		!!guest_used_phase);
529 }
530 
531 static void
532 vq_desc_guest_set_avail(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_avail_idx,
533 			int16_t *guest_avail_phase)
534 {
535 	if (*guest_avail_phase) {
536 		vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_AVAIL;
537 		vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_USED;
538 	} else {
539 		vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_AVAIL;
540 		vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_USED;
541 	}
542 
543 	if (++(*guest_last_avail_idx) >= vq->vring.size) {
544 		*guest_last_avail_idx -= vq->vring.size;
545 		*guest_avail_phase = !(*guest_avail_phase);
546 	}
547 }
548 
549 static int16_t
550 vq_desc_guest_handle_completed_desc(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_used_idx,
551 				    int16_t *guest_used_phase)
552 {
553 	int16_t buffer_id = -1;
554 
555 	if (vq_desc_guest_is_used(vq, *guest_last_used_idx, *guest_used_phase)) {
556 		buffer_id = vq->vring.desc_packed[*guest_last_used_idx].id;
557 		if (++(*guest_last_used_idx) >= vq->vring.size) {
558 			*guest_last_used_idx -= vq->vring.size;
559 			*guest_used_phase = !(*guest_used_phase);
560 		}
561 
562 		return buffer_id;
563 	}
564 
565 	return -1;
566 }
567 
568 static void
569 vq_packed_ring_test(void)
570 {
571 	struct spdk_vhost_session vs = {};
572 	struct spdk_vhost_virtqueue vq = {};
573 	struct vring_packed_desc descs[4];
574 	uint16_t guest_last_avail_idx = 0, guest_last_used_idx = 0;
575 	uint16_t guest_avail_phase = 1, guest_used_phase = 1;
576 	int i;
577 	int16_t chain_num;
578 
579 	vq.vring.desc_packed = descs;
580 	vq.vring.size = 4;
581 
582 	/* avail and used wrap counter are initialized to 1 */
583 	vq.packed.avail_phase = 1;
584 	vq.packed.used_phase = 1;
585 	vq.packed.packed_ring = true;
586 	memset(descs, 0, sizeof(descs));
587 
588 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == false);
589 
590 	/* Guest send requests */
591 	for (i = 0; i < vq.vring.size; i++) {
592 		descs[guest_last_avail_idx].id = i;
593 		/* Set the desc available */
594 		vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
595 	}
596 	CU_ASSERT(guest_last_avail_idx == 0);
597 	CU_ASSERT(guest_avail_phase == 0);
598 
599 	/* Host handle available descs */
600 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
601 	i = 0;
602 	while (vhost_vq_packed_ring_is_avail(&vq)) {
603 		CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i++);
604 		CU_ASSERT(chain_num == 1);
605 	}
606 
607 	/* Host complete them out of order: 1, 0, 2. */
608 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1, 0);
609 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1, 0);
610 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1, 0);
611 
612 	/* Host has got all the available request but only complete three requests */
613 	CU_ASSERT(vq.last_avail_idx == 0);
614 	CU_ASSERT(vq.packed.avail_phase == 0);
615 	CU_ASSERT(vq.last_used_idx == 3);
616 	CU_ASSERT(vq.packed.used_phase == 1);
617 
618 	/* Guest handle completed requests */
619 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
620 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
621 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
622 	CU_ASSERT(guest_last_used_idx == 3);
623 	CU_ASSERT(guest_used_phase == 1);
624 
625 	/* There are three descs available the guest can send three request again */
626 	for (i = 0; i < 3; i++) {
627 		descs[guest_last_avail_idx].id = 2 - i;
628 		/* Set the desc available */
629 		vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
630 	}
631 
632 	/* Host handle available descs */
633 	CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
634 	i = 2;
635 	while (vhost_vq_packed_ring_is_avail(&vq)) {
636 		CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i--);
637 		CU_ASSERT(chain_num == 1);
638 	}
639 
640 	/* There are four requests in Host, the new three ones and left one */
641 	CU_ASSERT(vq.last_avail_idx == 3);
642 	/* Available wrap conter should overturn */
643 	CU_ASSERT(vq.packed.avail_phase == 0);
644 
645 	/* Host complete all the requests */
646 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1, 0);
647 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1, 0);
648 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 3, 1, 0);
649 	vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1, 0);
650 
651 	CU_ASSERT(vq.last_used_idx == vq.last_avail_idx);
652 	CU_ASSERT(vq.packed.used_phase == vq.packed.avail_phase);
653 
654 	/* Guest handle completed requests */
655 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
656 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
657 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 3);
658 	CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
659 
660 	CU_ASSERT(guest_last_avail_idx == guest_last_used_idx);
661 	CU_ASSERT(guest_avail_phase == guest_used_phase);
662 }
663 
664 static void
665 vhost_blk_construct_test(void)
666 {
667 	int ret;
668 	struct spdk_vhost_dev *vdev = NULL;
669 
670 	ret = spdk_vhost_blk_construct("Malloc0", "0x1", "vhost.blk.0", NULL, NULL);
671 	CU_ASSERT(ret == 0);
672 
673 	vdev = spdk_vhost_dev_find("Malloc0");
674 	CU_ASSERT(vdev != NULL);
675 	CU_ASSERT(strcmp("Malloc0", spdk_vhost_dev_get_name(vdev)) == 0);
676 
677 	ret = spdk_vhost_dev_remove(vdev);
678 	CU_ASSERT(ret == 0);
679 }
680 
681 int
682 main(int argc, char **argv)
683 {
684 	CU_pSuite	suite = NULL;
685 	unsigned int	num_failures;
686 
687 	CU_initialize_registry();
688 
689 	suite = CU_add_suite("vhost_suite", test_setup, test_cleanup);
690 
691 	CU_ADD_TEST(suite, desc_to_iov_test);
692 	CU_ADD_TEST(suite, create_controller_test);
693 	CU_ADD_TEST(suite, session_find_by_vid_test);
694 	CU_ADD_TEST(suite, remove_controller_test);
695 	CU_ADD_TEST(suite, vq_avail_ring_get_test);
696 	CU_ADD_TEST(suite, vq_packed_ring_test);
697 	CU_ADD_TEST(suite, vhost_blk_construct_test);
698 
699 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
700 	CU_cleanup_registry();
701 
702 	return num_failures;
703 }
704