xref: /spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c (revision 6b6dfea6c704a049e553024aa7e44ae916948e20)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "CUnit/Basic.h"
37 #include "spdk_cunit.h"
38 #include "spdk/thread.h"
39 #include "spdk_internal/mock.h"
40 #include "common/lib/test_env.c"
41 #include "unit/lib/json_mock.c"
42 
43 #include "vhost/vhost.c"
44 
45 DEFINE_STUB(rte_vhost_set_vring_base, int, (int vid, uint16_t queue_id,
46 		uint16_t last_avail_idx, uint16_t last_used_idx), 0);
47 DEFINE_STUB(rte_vhost_get_vring_base, int, (int vid, uint16_t queue_id,
48 		uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
49 DEFINE_STUB_V(vhost_session_install_rte_compat_hooks,
50 	      (struct spdk_vhost_session *vsession));
51 DEFINE_STUB_V(vhost_dev_install_rte_compat_hooks,
52 	      (struct spdk_vhost_dev *vdev));
53 DEFINE_STUB(rte_vhost_driver_unregister, int, (const char *path), 0);
54 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
55 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
56 DEFINE_STUB(rte_vhost_vring_call, int, (int vid, uint16_t vring_idx), 0);
57 
58 static struct spdk_cpuset *g_app_core_mask;
59 struct spdk_cpuset *spdk_app_get_core_mask(void)
60 {
61 	if (g_app_core_mask == NULL) {
62 		g_app_core_mask = spdk_cpuset_alloc();
63 		spdk_cpuset_set_cpu(g_app_core_mask, 0, true);
64 	}
65 	return g_app_core_mask;
66 }
67 
68 int
69 spdk_app_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask)
70 {
71 	int ret;
72 	struct spdk_cpuset *validmask;
73 
74 	ret = spdk_cpuset_parse(cpumask, mask);
75 	if (ret < 0) {
76 		return ret;
77 	}
78 
79 	validmask = spdk_app_get_core_mask();
80 	spdk_cpuset_and(cpumask, validmask);
81 
82 	return 0;
83 }
84 
85 DEFINE_STUB(rte_vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0);
86 DEFINE_STUB(rte_vhost_get_negotiated_features, int, (int vid, uint64_t *features), 0);
87 DEFINE_STUB(rte_vhost_get_vhost_vring, int,
88 	    (int vid, uint16_t vring_idx, struct rte_vhost_vring *vring), 0);
89 DEFINE_STUB(rte_vhost_enable_guest_notification, int,
90 	    (int vid, uint16_t queue_id, int enable), 0);
91 DEFINE_STUB(rte_vhost_get_ifname, int, (int vid, char *buf, size_t len), 0);
92 DEFINE_STUB(rte_vhost_driver_start, int, (const char *name), 0);
93 DEFINE_STUB(rte_vhost_driver_callback_register, int,
94 	    (const char *path, struct vhost_device_ops const *const ops), 0);
95 DEFINE_STUB(rte_vhost_driver_disable_features, int, (const char *path, uint64_t features), 0);
96 DEFINE_STUB(rte_vhost_driver_set_features, int, (const char *path, uint64_t features), 0);
97 DEFINE_STUB(rte_vhost_driver_register, int, (const char *path, uint64_t flags), 0);
98 DEFINE_STUB(vhost_nvme_admin_passthrough, int, (int vid, void *cmd, void *cqe, void *buf), 0);
99 DEFINE_STUB(vhost_nvme_set_cq_call, int, (int vid, uint16_t qid, int fd), 0);
100 DEFINE_STUB(vhost_nvme_set_bar_mr, int, (int vid, void *bar, uint64_t bar_size), 0);
101 DEFINE_STUB(vhost_nvme_get_cap, int, (int vid, uint64_t *cap), 0);
102 
103 void *
104 spdk_call_unaffinitized(void *cb(void *arg), void *arg)
105 {
106 	return cb(arg);
107 }
108 
109 static struct spdk_vhost_dev_backend g_vdev_backend;
110 
111 static int
112 test_setup(void)
113 {
114 	return 0;
115 }
116 
117 static int
118 alloc_vdev(struct spdk_vhost_dev **vdev_p, const char *name, const char *cpumask)
119 {
120 	struct spdk_vhost_dev *vdev = NULL;
121 	int rc;
122 
123 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
124 	rc = posix_memalign((void **)&vdev, 64, sizeof(*vdev));
125 	CU_ASSERT(rc == 0);
126 	SPDK_CU_ASSERT_FATAL(vdev != NULL);
127 	memset(vdev, 0, sizeof(*vdev));
128 	rc = vhost_dev_register(vdev, name, cpumask, &g_vdev_backend);
129 	if (rc == 0) {
130 		*vdev_p = vdev;
131 	} else {
132 		free(vdev);
133 		*vdev_p = NULL;
134 	}
135 
136 	return rc;
137 }
138 
139 static void
140 start_vdev(struct spdk_vhost_dev *vdev)
141 {
142 	struct rte_vhost_memory *mem;
143 	struct spdk_vhost_session *vsession = NULL;
144 	int rc;
145 
146 	mem = calloc(1, sizeof(*mem) + 2 * sizeof(struct rte_vhost_mem_region));
147 	SPDK_CU_ASSERT_FATAL(mem != NULL);
148 	mem->nregions = 2;
149 	mem->regions[0].guest_phys_addr = 0;
150 	mem->regions[0].size = 0x400000; /* 4 MB */
151 	mem->regions[0].host_user_addr = 0x1000000;
152 	mem->regions[1].guest_phys_addr = 0x400000;
153 	mem->regions[1].size = 0x400000; /* 4 MB */
154 	mem->regions[1].host_user_addr = 0x2000000;
155 
156 	assert(TAILQ_EMPTY(&vdev->vsessions));
157 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
158 	rc = posix_memalign((void **)&vsession, 64, sizeof(*vsession));
159 	CU_ASSERT(rc == 0);
160 	SPDK_CU_ASSERT_FATAL(vsession != NULL);
161 	vsession->started = true;
162 	vsession->vid = 0;
163 	vsession->mem = mem;
164 	TAILQ_INSERT_TAIL(&vdev->vsessions, vsession, tailq);
165 }
166 
167 static void
168 stop_vdev(struct spdk_vhost_dev *vdev)
169 {
170 	struct spdk_vhost_session *vsession = TAILQ_FIRST(&vdev->vsessions);
171 
172 	TAILQ_REMOVE(&vdev->vsessions, vsession, tailq);
173 	free(vsession->mem);
174 	free(vsession);
175 }
176 
177 static void
178 cleanup_vdev(struct spdk_vhost_dev *vdev)
179 {
180 	if (!TAILQ_EMPTY(&vdev->vsessions)) {
181 		stop_vdev(vdev);
182 	}
183 	vhost_dev_unregister(vdev);
184 	free(vdev);
185 }
186 
187 static void
188 desc_to_iov_test(void)
189 {
190 	struct spdk_vhost_dev *vdev;
191 	struct spdk_vhost_session *vsession;
192 	struct iovec iov[SPDK_VHOST_IOVS_MAX];
193 	uint16_t iov_index;
194 	struct vring_desc desc;
195 	int rc;
196 
197 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
198 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
199 	start_vdev(vdev);
200 
201 	vsession = TAILQ_FIRST(&vdev->vsessions);
202 
203 	/* Test simple case where iov falls fully within a 2MB page. */
204 	desc.addr = 0x110000;
205 	desc.len = 0x1000;
206 	iov_index = 0;
207 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
208 	CU_ASSERT(rc == 0);
209 	CU_ASSERT(iov_index == 1);
210 	CU_ASSERT(iov[0].iov_base == (void *)0x1110000);
211 	CU_ASSERT(iov[0].iov_len == 0x1000);
212 	/*
213 	 * Always memset the iov to ensure each test validates data written by its call
214 	 * to the function under test.
215 	 */
216 	memset(iov, 0, sizeof(iov));
217 
218 	/* Same test, but ensure it respects the non-zero starting iov_index. */
219 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
220 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
221 	CU_ASSERT(rc == 0);
222 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
223 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x1110000);
224 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x1000);
225 	memset(iov, 0, sizeof(iov));
226 
227 	/* Test for failure if iov_index already equals SPDK_VHOST_IOVS_MAX. */
228 	iov_index = SPDK_VHOST_IOVS_MAX;
229 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
230 	CU_ASSERT(rc != 0);
231 	memset(iov, 0, sizeof(iov));
232 
233 	/* Test case where iov spans a 2MB boundary, but does not span a vhost memory region. */
234 	desc.addr = 0x1F0000;
235 	desc.len = 0x20000;
236 	iov_index = 0;
237 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
238 	CU_ASSERT(rc == 0);
239 	CU_ASSERT(iov_index == 1);
240 	CU_ASSERT(iov[0].iov_base == (void *)0x11F0000);
241 	CU_ASSERT(iov[0].iov_len == 0x20000);
242 	memset(iov, 0, sizeof(iov));
243 
244 	/* Same test, but ensure it respects the non-zero starting iov_index. */
245 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
246 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
247 	CU_ASSERT(rc == 0);
248 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
249 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x11F0000);
250 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x20000);
251 	memset(iov, 0, sizeof(iov));
252 
253 	/* Test case where iov spans a vhost memory region. */
254 	desc.addr = 0x3F0000;
255 	desc.len = 0x20000;
256 	iov_index = 0;
257 	rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
258 	CU_ASSERT(rc == 0);
259 	CU_ASSERT(iov_index == 2);
260 	CU_ASSERT(iov[0].iov_base == (void *)0x13F0000);
261 	CU_ASSERT(iov[0].iov_len == 0x10000);
262 	CU_ASSERT(iov[1].iov_base == (void *)0x2000000);
263 	CU_ASSERT(iov[1].iov_len == 0x10000);
264 	memset(iov, 0, sizeof(iov));
265 
266 	cleanup_vdev(vdev);
267 
268 	CU_ASSERT(true);
269 }
270 
271 static void
272 create_controller_test(void)
273 {
274 	struct spdk_vhost_dev *vdev, *vdev2;
275 	int ret;
276 	char long_name[PATH_MAX];
277 
278 	/* NOTE: spdk_app_get_core_mask stub always sets coremask 0x01 */
279 
280 	/* Create device with no name */
281 	ret = alloc_vdev(&vdev, NULL, "0x1");
282 	CU_ASSERT(ret != 0);
283 
284 	/* Create device with incorrect cpumask */
285 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x2");
286 	CU_ASSERT(ret != 0);
287 
288 	/* Create device with too long name and path */
289 	memset(long_name, 'x', sizeof(long_name));
290 	long_name[PATH_MAX - 1] = 0;
291 	snprintf(dev_dirname, sizeof(dev_dirname), "some_path/");
292 	ret = alloc_vdev(&vdev, long_name, "0x1");
293 	CU_ASSERT(ret != 0);
294 	dev_dirname[0] = 0;
295 
296 	/* Create device when device name is already taken */
297 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
298 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
299 	ret = alloc_vdev(&vdev2, "vdev_name_0", "0x1");
300 	CU_ASSERT(ret != 0);
301 	cleanup_vdev(vdev);
302 }
303 
304 static void
305 session_find_by_vid_test(void)
306 {
307 	struct spdk_vhost_dev *vdev;
308 	struct spdk_vhost_session *vsession;
309 	struct spdk_vhost_session *tmp;
310 	int rc;
311 
312 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
313 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
314 	start_vdev(vdev);
315 
316 	vsession = TAILQ_FIRST(&vdev->vsessions);
317 
318 	tmp = vhost_session_find_by_vid(vsession->vid);
319 	CU_ASSERT(tmp == vsession);
320 
321 	/* Search for a device with incorrect vid */
322 	tmp = vhost_session_find_by_vid(vsession->vid + 0xFF);
323 	CU_ASSERT(tmp == NULL);
324 
325 	cleanup_vdev(vdev);
326 }
327 
328 static void
329 remove_controller_test(void)
330 {
331 	struct spdk_vhost_dev *vdev;
332 	int ret;
333 
334 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
335 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
336 
337 	/* Remove device when controller is in use */
338 	start_vdev(vdev);
339 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&vdev->vsessions));
340 	ret = vhost_dev_unregister(vdev);
341 	CU_ASSERT(ret != 0);
342 
343 	cleanup_vdev(vdev);
344 }
345 
346 static void
347 vq_avail_ring_get_test(void)
348 {
349 	struct spdk_vhost_virtqueue vq;
350 	uint16_t avail_mem[34];
351 	uint16_t reqs[32];
352 	uint16_t reqs_len, ret, i;
353 
354 	/* Basic example reap all requests */
355 	vq.vring.avail = (struct vring_avail *)avail_mem;
356 	vq.vring.size = 32;
357 	vq.last_avail_idx = 24;
358 	vq.vring.avail->idx = 29;
359 	reqs_len = 6;
360 
361 	for (i = 0; i < 32; i++) {
362 		vq.vring.avail->ring[i] = i;
363 	}
364 
365 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
366 	CU_ASSERT(ret == 5);
367 	CU_ASSERT(vq.last_avail_idx == 29);
368 	for (i = 0; i < ret; i++) {
369 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 24]);
370 	}
371 
372 	/* Basic example reap only some requests */
373 	vq.last_avail_idx = 20;
374 	vq.vring.avail->idx = 29;
375 	reqs_len = 6;
376 
377 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
378 	CU_ASSERT(ret == reqs_len);
379 	CU_ASSERT(vq.last_avail_idx == 26);
380 	for (i = 0; i < ret; i++) {
381 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 20]);
382 	}
383 
384 	/* Test invalid example */
385 	vq.last_avail_idx = 20;
386 	vq.vring.avail->idx = 156;
387 	reqs_len = 6;
388 
389 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
390 	CU_ASSERT(ret == 0);
391 
392 	/* Test overflow in the avail->idx variable. */
393 	vq.last_avail_idx = 65535;
394 	vq.vring.avail->idx = 4;
395 	reqs_len = 6;
396 	ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
397 	CU_ASSERT(ret == 5);
398 	CU_ASSERT(vq.last_avail_idx == 4);
399 	CU_ASSERT(reqs[0] == vq.vring.avail->ring[31]);
400 	for (i = 1; i < ret; i++) {
401 		CU_ASSERT(reqs[i] == vq.vring.avail->ring[i - 1]);
402 	}
403 }
404 
405 int
406 main(int argc, char **argv)
407 {
408 	CU_pSuite	suite = NULL;
409 	unsigned int	num_failures;
410 
411 	if (CU_initialize_registry() != CUE_SUCCESS) {
412 		return CU_get_error();
413 	}
414 
415 	suite = CU_add_suite("vhost_suite", test_setup, NULL);
416 	if (suite == NULL) {
417 		CU_cleanup_registry();
418 		return CU_get_error();
419 	}
420 
421 	if (
422 		CU_add_test(suite, "desc_to_iov", desc_to_iov_test) == NULL ||
423 		CU_add_test(suite, "create_controller", create_controller_test) == NULL ||
424 		CU_add_test(suite, "session_find_by_vid", session_find_by_vid_test) == NULL ||
425 		CU_add_test(suite, "remove_controller", remove_controller_test) == NULL ||
426 		CU_add_test(suite, "vq_avail_ring_get", vq_avail_ring_get_test) == NULL
427 	) {
428 		CU_cleanup_registry();
429 		return CU_get_error();
430 	}
431 
432 	CU_basic_set_mode(CU_BRM_VERBOSE);
433 	CU_basic_run_tests();
434 	num_failures = CU_get_number_of_failures();
435 	CU_cleanup_registry();
436 
437 	return num_failures;
438 }
439