xref: /spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c (revision 3aa204fb3138c43e63b868e488277f13b098cef1)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "CUnit/Basic.h"
37 #include "spdk_cunit.h"
38 #include "spdk/thread.h"
39 #include "spdk_internal/mock.h"
40 #include "common/lib/test_env.c"
41 #include "unit/lib/json_mock.c"
42 
43 #include "vhost/vhost.c"
44 
45 DEFINE_STUB(rte_vhost_set_vring_base, int, (int vid, uint16_t queue_id,
46 		uint16_t last_avail_idx, uint16_t last_used_idx), 0);
47 DEFINE_STUB(rte_vhost_get_vring_base, int, (int vid, uint16_t queue_id,
48 		uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
49 DEFINE_STUB_V(spdk_vhost_session_install_rte_compat_hooks,
50 	      (struct spdk_vhost_session *vsession));
51 DEFINE_STUB_V(spdk_vhost_dev_install_rte_compat_hooks,
52 	      (struct spdk_vhost_dev *vdev));
53 DEFINE_STUB(rte_vhost_driver_unregister, int, (const char *path), 0);
54 DEFINE_STUB(spdk_event_allocate, struct spdk_event *,
55 	    (uint32_t lcore, spdk_event_fn fn, void *arg1, void *arg2), NULL);
56 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
57 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
58 
59 static struct spdk_cpuset *g_app_core_mask;
60 struct spdk_cpuset *spdk_app_get_core_mask(void)
61 {
62 	if (g_app_core_mask == NULL) {
63 		g_app_core_mask = spdk_cpuset_alloc();
64 		spdk_cpuset_set_cpu(g_app_core_mask, 0, true);
65 	}
66 	return g_app_core_mask;
67 }
68 
69 int
70 spdk_app_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask)
71 {
72 	int ret;
73 	struct spdk_cpuset *validmask;
74 
75 	ret = spdk_cpuset_parse(cpumask, mask);
76 	if (ret < 0) {
77 		return ret;
78 	}
79 
80 	validmask = spdk_app_get_core_mask();
81 	spdk_cpuset_and(cpumask, validmask);
82 
83 	return 0;
84 }
85 
86 DEFINE_STUB(spdk_env_get_first_core, uint32_t, (void), 0);
87 DEFINE_STUB(spdk_env_get_next_core, uint32_t, (uint32_t prev_core), 0);
88 DEFINE_STUB(spdk_env_get_current_core, uint32_t, (void), 0);
89 DEFINE_STUB_V(spdk_event_call, (struct spdk_event *event));
90 DEFINE_STUB(rte_vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0);
91 DEFINE_STUB(rte_vhost_get_negotiated_features, int, (int vid, uint64_t *features), 0);
92 DEFINE_STUB(rte_vhost_get_vhost_vring, int,
93 	    (int vid, uint16_t vring_idx, struct rte_vhost_vring *vring), 0);
94 DEFINE_STUB(rte_vhost_enable_guest_notification, int,
95 	    (int vid, uint16_t queue_id, int enable), 0);
96 DEFINE_STUB(rte_vhost_get_ifname, int, (int vid, char *buf, size_t len), 0);
97 DEFINE_STUB(rte_vhost_driver_start, int, (const char *name), 0);
98 DEFINE_STUB(rte_vhost_driver_callback_register, int,
99 	    (const char *path, struct vhost_device_ops const *const ops), 0);
100 DEFINE_STUB(rte_vhost_driver_disable_features, int, (const char *path, uint64_t features), 0);
101 DEFINE_STUB(rte_vhost_driver_set_features, int, (const char *path, uint64_t features), 0);
102 DEFINE_STUB(rte_vhost_driver_register, int, (const char *path, uint64_t flags), 0);
103 DEFINE_STUB(spdk_vhost_nvme_admin_passthrough, int, (int vid, void *cmd, void *cqe, void *buf), 0);
104 DEFINE_STUB(spdk_vhost_nvme_set_cq_call, int, (int vid, uint16_t qid, int fd), 0);
105 DEFINE_STUB(spdk_vhost_nvme_set_bar_mr, int, (int vid, void *bar, uint64_t bar_size), 0);
106 DEFINE_STUB(spdk_vhost_nvme_get_cap, int, (int vid, uint64_t *cap), 0);
107 
108 void *
109 spdk_call_unaffinitized(void *cb(void *arg), void *arg)
110 {
111 	return cb(arg);
112 }
113 
114 static struct spdk_vhost_dev_backend g_vdev_backend;
115 
116 static int
117 test_setup(void)
118 {
119 	return 0;
120 }
121 
122 static int
123 alloc_vdev(struct spdk_vhost_dev **vdev_p, const char *name, const char *cpumask)
124 {
125 	struct spdk_vhost_dev *vdev = NULL;
126 	int rc;
127 
128 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
129 	rc = posix_memalign((void **)&vdev, 64, sizeof(*vdev));
130 	CU_ASSERT(rc == 0);
131 	SPDK_CU_ASSERT_FATAL(vdev != NULL);
132 	memset(vdev, 0, sizeof(*vdev));
133 	rc = spdk_vhost_dev_register(vdev, name, cpumask, &g_vdev_backend);
134 	if (rc == 0) {
135 		*vdev_p = vdev;
136 	} else {
137 		free(vdev);
138 		*vdev_p = NULL;
139 	}
140 
141 	return rc;
142 }
143 
144 static void
145 start_vdev(struct spdk_vhost_dev *vdev)
146 {
147 	struct rte_vhost_memory *mem;
148 	struct spdk_vhost_session *vsession = NULL;
149 	int rc;
150 
151 	mem = calloc(1, sizeof(*mem) + 2 * sizeof(struct rte_vhost_mem_region));
152 	SPDK_CU_ASSERT_FATAL(mem != NULL);
153 	mem->nregions = 2;
154 	mem->regions[0].guest_phys_addr = 0;
155 	mem->regions[0].size = 0x400000; /* 4 MB */
156 	mem->regions[0].host_user_addr = 0x1000000;
157 	mem->regions[1].guest_phys_addr = 0x400000;
158 	mem->regions[1].size = 0x400000; /* 4 MB */
159 	mem->regions[1].host_user_addr = 0x2000000;
160 
161 	assert(TAILQ_EMPTY(&vdev->vsessions));
162 	/* spdk_vhost_dev must be allocated on a cache line boundary. */
163 	rc = posix_memalign((void **)&vsession, 64, sizeof(*vsession));
164 	CU_ASSERT(rc == 0);
165 	SPDK_CU_ASSERT_FATAL(vsession != NULL);
166 	vsession->lcore = 0;
167 	vsession->vid = 0;
168 	vsession->mem = mem;
169 	TAILQ_INSERT_TAIL(&vdev->vsessions, vsession, tailq);
170 }
171 
172 static void
173 stop_vdev(struct spdk_vhost_dev *vdev)
174 {
175 	struct spdk_vhost_session *vsession = TAILQ_FIRST(&vdev->vsessions);
176 
177 	TAILQ_REMOVE(&vdev->vsessions, vsession, tailq);
178 	free(vsession->mem);
179 	free(vsession);
180 }
181 
182 static void
183 cleanup_vdev(struct spdk_vhost_dev *vdev)
184 {
185 	if (!TAILQ_EMPTY(&vdev->vsessions)) {
186 		stop_vdev(vdev);
187 	}
188 	spdk_vhost_dev_unregister(vdev);
189 	free(vdev);
190 }
191 
192 static void
193 desc_to_iov_test(void)
194 {
195 	struct spdk_vhost_dev *vdev;
196 	struct spdk_vhost_session *vsession;
197 	struct iovec iov[SPDK_VHOST_IOVS_MAX];
198 	uint16_t iov_index;
199 	struct vring_desc desc;
200 	int rc;
201 
202 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
203 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
204 	start_vdev(vdev);
205 
206 	vsession = TAILQ_FIRST(&vdev->vsessions);
207 
208 	/* Test simple case where iov falls fully within a 2MB page. */
209 	desc.addr = 0x110000;
210 	desc.len = 0x1000;
211 	iov_index = 0;
212 	rc = spdk_vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
213 	CU_ASSERT(rc == 0);
214 	CU_ASSERT(iov_index == 1);
215 	CU_ASSERT(iov[0].iov_base == (void *)0x1110000);
216 	CU_ASSERT(iov[0].iov_len == 0x1000);
217 	/*
218 	 * Always memset the iov to ensure each test validates data written by its call
219 	 * to the function under test.
220 	 */
221 	memset(iov, 0, sizeof(iov));
222 
223 	/* Same test, but ensure it respects the non-zero starting iov_index. */
224 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
225 	rc = spdk_vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
226 	CU_ASSERT(rc == 0);
227 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
228 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x1110000);
229 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x1000);
230 	memset(iov, 0, sizeof(iov));
231 
232 	/* Test for failure if iov_index already equals SPDK_VHOST_IOVS_MAX. */
233 	iov_index = SPDK_VHOST_IOVS_MAX;
234 	rc = spdk_vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
235 	CU_ASSERT(rc != 0);
236 	memset(iov, 0, sizeof(iov));
237 
238 	/* Test case where iov spans a 2MB boundary, but does not span a vhost memory region. */
239 	desc.addr = 0x1F0000;
240 	desc.len = 0x20000;
241 	iov_index = 0;
242 	rc = spdk_vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
243 	CU_ASSERT(rc == 0);
244 	CU_ASSERT(iov_index == 1);
245 	CU_ASSERT(iov[0].iov_base == (void *)0x11F0000);
246 	CU_ASSERT(iov[0].iov_len == 0x20000);
247 	memset(iov, 0, sizeof(iov));
248 
249 	/* Same test, but ensure it respects the non-zero starting iov_index. */
250 	iov_index = SPDK_VHOST_IOVS_MAX - 1;
251 	rc = spdk_vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
252 	CU_ASSERT(rc == 0);
253 	CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
254 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x11F0000);
255 	CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x20000);
256 	memset(iov, 0, sizeof(iov));
257 
258 	/* Test case where iov spans a vhost memory region. */
259 	desc.addr = 0x3F0000;
260 	desc.len = 0x20000;
261 	iov_index = 0;
262 	rc = spdk_vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
263 	CU_ASSERT(rc == 0);
264 	CU_ASSERT(iov_index == 2);
265 	CU_ASSERT(iov[0].iov_base == (void *)0x13F0000);
266 	CU_ASSERT(iov[0].iov_len == 0x10000);
267 	CU_ASSERT(iov[1].iov_base == (void *)0x2000000);
268 	CU_ASSERT(iov[1].iov_len == 0x10000);
269 	memset(iov, 0, sizeof(iov));
270 
271 	cleanup_vdev(vdev);
272 
273 	CU_ASSERT(true);
274 }
275 
276 static void
277 create_controller_test(void)
278 {
279 	struct spdk_vhost_dev *vdev, *vdev2;
280 	int ret;
281 	char long_name[PATH_MAX];
282 
283 	/* NOTE: spdk_app_get_core_mask stub always sets coremask 0x01 */
284 
285 	/* Create device with no name */
286 	ret = alloc_vdev(&vdev, NULL, "0x1");
287 	CU_ASSERT(ret != 0);
288 
289 	/* Create device with incorrect cpumask */
290 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x2");
291 	CU_ASSERT(ret != 0);
292 
293 	/* Create device with too long name and path */
294 	memset(long_name, 'x', sizeof(long_name));
295 	long_name[PATH_MAX - 1] = 0;
296 	snprintf(dev_dirname, sizeof(dev_dirname), "some_path/");
297 	ret = alloc_vdev(&vdev, long_name, "0x1");
298 	CU_ASSERT(ret != 0);
299 	dev_dirname[0] = 0;
300 
301 	/* Create device when device name is already taken */
302 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
303 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
304 	ret = alloc_vdev(&vdev2, "vdev_name_0", "0x1");
305 	CU_ASSERT(ret != 0);
306 	cleanup_vdev(vdev);
307 }
308 
309 static void
310 session_find_by_vid_test(void)
311 {
312 	struct spdk_vhost_dev *vdev;
313 	struct spdk_vhost_session *vsession;
314 	struct spdk_vhost_session *tmp;
315 	int rc;
316 
317 	rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
318 	SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
319 	start_vdev(vdev);
320 
321 	vsession = TAILQ_FIRST(&vdev->vsessions);
322 
323 	tmp = spdk_vhost_session_find_by_vid(vsession->vid);
324 	CU_ASSERT(tmp == vsession);
325 
326 	/* Search for a device with incorrect vid */
327 	tmp = spdk_vhost_session_find_by_vid(vsession->vid + 0xFF);
328 	CU_ASSERT(tmp == NULL);
329 
330 	cleanup_vdev(vdev);
331 }
332 
333 static void
334 remove_controller_test(void)
335 {
336 	struct spdk_vhost_dev *vdev;
337 	int ret;
338 
339 	ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
340 	SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
341 
342 	/* Remove device when controller is in use */
343 	start_vdev(vdev);
344 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&vdev->vsessions));
345 	ret = spdk_vhost_dev_unregister(vdev);
346 	CU_ASSERT(ret != 0);
347 
348 	cleanup_vdev(vdev);
349 }
350 
351 int
352 main(int argc, char **argv)
353 {
354 	CU_pSuite	suite = NULL;
355 	unsigned int	num_failures;
356 
357 	if (CU_initialize_registry() != CUE_SUCCESS) {
358 		return CU_get_error();
359 	}
360 
361 	suite = CU_add_suite("vhost_suite", test_setup, NULL);
362 	if (suite == NULL) {
363 		CU_cleanup_registry();
364 		return CU_get_error();
365 	}
366 
367 	if (
368 		CU_add_test(suite, "desc_to_iov", desc_to_iov_test) == NULL ||
369 		CU_add_test(suite, "create_controller", create_controller_test) == NULL ||
370 		CU_add_test(suite, "session_find_by_vid", session_find_by_vid_test) == NULL ||
371 		CU_add_test(suite, "remove_controller", remove_controller_test) == NULL
372 	) {
373 		CU_cleanup_registry();
374 		return CU_get_error();
375 	}
376 
377 	CU_basic_set_mode(CU_BRM_VERBOSE);
378 	CU_basic_run_tests();
379 	num_failures = CU_get_number_of_failures();
380 	CU_cleanup_registry();
381 
382 	return num_failures;
383 }
384