xref: /spdk/lib/vhost/vhost.c (revision 877573897ad52be4fa8989f7617bd655b87e05c4)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation. All rights reserved.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk/env.h"
9 #include "spdk/likely.h"
10 #include "spdk/string.h"
11 #include "spdk/util.h"
12 #include "spdk/memory.h"
13 #include "spdk/barrier.h"
14 #include "spdk/vhost.h"
15 #include "vhost_internal.h"
16 
17 static struct spdk_cpuset g_vhost_core_mask;
18 
19 static TAILQ_HEAD(, spdk_vhost_dev) g_vhost_devices = TAILQ_HEAD_INITIALIZER(
20 			g_vhost_devices);
21 static pthread_mutex_t g_vhost_mutex = PTHREAD_MUTEX_INITIALIZER;
22 
23 static TAILQ_HEAD(, spdk_virtio_blk_transport) g_virtio_blk_transports = TAILQ_HEAD_INITIALIZER(
24 			g_virtio_blk_transports);
25 
26 static spdk_vhost_fini_cb g_fini_cb;
27 
28 struct spdk_vhost_dev *
29 spdk_vhost_dev_next(struct spdk_vhost_dev *vdev)
30 {
31 	if (vdev == NULL) {
32 		return TAILQ_FIRST(&g_vhost_devices);
33 	}
34 
35 	return TAILQ_NEXT(vdev, tailq);
36 }
37 
38 struct spdk_vhost_dev *
39 spdk_vhost_dev_find(const char *ctrlr_name)
40 {
41 	struct spdk_vhost_dev *vdev;
42 
43 	TAILQ_FOREACH(vdev, &g_vhost_devices, tailq) {
44 		if (strcmp(vdev->name, ctrlr_name) == 0) {
45 			return vdev;
46 		}
47 	}
48 
49 	return NULL;
50 }
51 
52 static int
53 vhost_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask)
54 {
55 	int rc;
56 	struct spdk_cpuset negative_vhost_mask;
57 
58 	if (cpumask == NULL) {
59 		return -1;
60 	}
61 
62 	if (mask == NULL) {
63 		spdk_cpuset_copy(cpumask, &g_vhost_core_mask);
64 		return 0;
65 	}
66 
67 	rc = spdk_cpuset_parse(cpumask, mask);
68 	if (rc < 0) {
69 		SPDK_ERRLOG("invalid cpumask %s\n", mask);
70 		return -1;
71 	}
72 
73 	spdk_cpuset_copy(&negative_vhost_mask, &g_vhost_core_mask);
74 	spdk_cpuset_negate(&negative_vhost_mask);
75 	spdk_cpuset_and(&negative_vhost_mask, cpumask);
76 
77 	if (spdk_cpuset_count(&negative_vhost_mask) != 0) {
78 		SPDK_ERRLOG("one of selected cpu is outside of core mask(=%s)\n",
79 			    spdk_cpuset_fmt(&g_vhost_core_mask));
80 		return -1;
81 	}
82 
83 	spdk_cpuset_and(cpumask, &g_vhost_core_mask);
84 
85 	if (spdk_cpuset_count(cpumask) == 0) {
86 		SPDK_ERRLOG("no cpu is selected among core mask(=%s)\n",
87 			    spdk_cpuset_fmt(&g_vhost_core_mask));
88 		return -1;
89 	}
90 
91 	return 0;
92 }
93 
94 TAILQ_HEAD(, virtio_blk_transport_ops_list_element)
95 g_spdk_virtio_blk_transport_ops = TAILQ_HEAD_INITIALIZER(g_spdk_virtio_blk_transport_ops);
96 
97 const struct spdk_virtio_blk_transport_ops *
98 virtio_blk_get_transport_ops(const char *transport_name)
99 {
100 	struct virtio_blk_transport_ops_list_element *ops;
101 	TAILQ_FOREACH(ops, &g_spdk_virtio_blk_transport_ops, link) {
102 		if (strcasecmp(transport_name, ops->ops.name) == 0) {
103 			return &ops->ops;
104 		}
105 	}
106 	return NULL;
107 }
108 
109 int
110 vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str,
111 		   const struct spdk_json_val *params,
112 		   const struct spdk_vhost_dev_backend *backend,
113 		   const struct spdk_vhost_user_dev_backend *user_backend)
114 {
115 	struct spdk_cpuset cpumask = {};
116 	int rc;
117 
118 	assert(vdev);
119 	if (name == NULL) {
120 		SPDK_ERRLOG("Can't register controller with no name\n");
121 		return -EINVAL;
122 	}
123 
124 	if (vhost_parse_core_mask(mask_str, &cpumask) != 0) {
125 		SPDK_ERRLOG("cpumask %s is invalid (core mask is 0x%s)\n",
126 			    mask_str, spdk_cpuset_fmt(&g_vhost_core_mask));
127 		return -EINVAL;
128 	}
129 
130 	if (spdk_vhost_dev_find(name)) {
131 		SPDK_ERRLOG("vhost controller %s already exists.\n", name);
132 		return -EEXIST;
133 	}
134 
135 	vdev->name = strdup(name);
136 	if (vdev->name == NULL) {
137 		return -EIO;
138 	}
139 
140 	vdev->backend = backend;
141 	if (vdev->backend->type == VHOST_BACKEND_SCSI) {
142 		rc = vhost_user_dev_register(vdev, name, &cpumask, user_backend);
143 	} else {
144 		rc = virtio_blk_construct_ctrlr(vdev, name, &cpumask, params, user_backend);
145 	}
146 	if (rc != 0) {
147 		free(vdev->name);
148 		return rc;
149 	}
150 
151 	TAILQ_INSERT_TAIL(&g_vhost_devices, vdev, tailq);
152 
153 	SPDK_INFOLOG(vhost, "Controller %s: new controller added\n", vdev->name);
154 	return 0;
155 }
156 
157 int
158 vhost_dev_unregister(struct spdk_vhost_dev *vdev)
159 {
160 	int rc;
161 
162 	if (vdev->backend->type == VHOST_BACKEND_SCSI) {
163 		rc = vhost_user_dev_unregister(vdev);
164 	} else {
165 		rc = virtio_blk_destroy_ctrlr(vdev);
166 	}
167 	if (rc != 0) {
168 		return rc;
169 	}
170 
171 	SPDK_INFOLOG(vhost, "Controller %s: removed\n", vdev->name);
172 
173 	free(vdev->name);
174 	TAILQ_REMOVE(&g_vhost_devices, vdev, tailq);
175 
176 	if (TAILQ_EMPTY(&g_vhost_devices) && g_fini_cb != NULL) {
177 		g_fini_cb();
178 	}
179 
180 	return 0;
181 }
182 
183 const char *
184 spdk_vhost_dev_get_name(struct spdk_vhost_dev *vdev)
185 {
186 	assert(vdev != NULL);
187 	return vdev->name;
188 }
189 
190 const struct spdk_cpuset *
191 spdk_vhost_dev_get_cpumask(struct spdk_vhost_dev *vdev)
192 {
193 	assert(vdev != NULL);
194 	return spdk_thread_get_cpumask(vdev->thread);
195 }
196 
197 void
198 vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
199 {
200 	assert(vdev->backend->dump_info_json != NULL);
201 	vdev->backend->dump_info_json(vdev, w);
202 }
203 
204 int
205 spdk_vhost_dev_remove(struct spdk_vhost_dev *vdev)
206 {
207 	return vdev->backend->remove_device(vdev);
208 }
209 
210 int
211 spdk_vhost_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
212 			  uint32_t iops_threshold)
213 {
214 	assert(vdev->backend->set_coalescing != NULL);
215 	return vdev->backend->set_coalescing(vdev, delay_base_us, iops_threshold);
216 }
217 
218 void
219 spdk_vhost_get_coalescing(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
220 			  uint32_t *iops_threshold)
221 {
222 	assert(vdev->backend->get_coalescing != NULL);
223 	vdev->backend->get_coalescing(vdev, delay_base_us, iops_threshold);
224 }
225 
226 void
227 spdk_vhost_lock(void)
228 {
229 	pthread_mutex_lock(&g_vhost_mutex);
230 }
231 
232 int
233 spdk_vhost_trylock(void)
234 {
235 	return -pthread_mutex_trylock(&g_vhost_mutex);
236 }
237 
238 void
239 spdk_vhost_unlock(void)
240 {
241 	pthread_mutex_unlock(&g_vhost_mutex);
242 }
243 
244 void
245 spdk_vhost_scsi_init(spdk_vhost_init_cb init_cb)
246 {
247 	uint32_t i;
248 	int ret = 0;
249 
250 	ret = vhost_user_init();
251 	if (ret != 0) {
252 		init_cb(ret);
253 		return;
254 	}
255 
256 	spdk_cpuset_zero(&g_vhost_core_mask);
257 	SPDK_ENV_FOREACH_CORE(i) {
258 		spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
259 	}
260 	init_cb(ret);
261 }
262 
263 static void
264 vhost_fini(void)
265 {
266 	struct spdk_vhost_dev *vdev, *tmp;
267 
268 	spdk_vhost_lock();
269 	if (spdk_vhost_dev_next(NULL) == NULL) {
270 		spdk_vhost_unlock();
271 		g_fini_cb();
272 		return;
273 	}
274 
275 	vdev = spdk_vhost_dev_next(NULL);
276 	while (vdev != NULL) {
277 		tmp = spdk_vhost_dev_next(vdev);
278 		spdk_vhost_dev_remove(vdev);
279 		/* don't care if it fails, there's nothing we can do for now */
280 		vdev = tmp;
281 	}
282 	spdk_vhost_unlock();
283 
284 	/* g_fini_cb will get called when last device is unregistered. */
285 }
286 
287 void
288 spdk_vhost_blk_init(spdk_vhost_init_cb init_cb)
289 {
290 	uint32_t i;
291 	int ret = 0;
292 
293 	ret = virtio_blk_transport_create("vhost_user_blk", NULL);
294 	if (ret != 0) {
295 		goto out;
296 	}
297 
298 	spdk_cpuset_zero(&g_vhost_core_mask);
299 	SPDK_ENV_FOREACH_CORE(i) {
300 		spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
301 	}
302 out:
303 	init_cb(ret);
304 }
305 
306 void
307 spdk_vhost_scsi_fini(spdk_vhost_fini_cb fini_cb)
308 {
309 	g_fini_cb = fini_cb;
310 
311 	vhost_user_fini(vhost_fini);
312 }
313 
314 static void
315 virtio_blk_transports_destroy(void)
316 {
317 	struct spdk_virtio_blk_transport *transport = TAILQ_FIRST(&g_virtio_blk_transports);
318 
319 	if (transport == NULL) {
320 		g_fini_cb();
321 		return;
322 	}
323 	TAILQ_REMOVE(&g_virtio_blk_transports, transport, tailq);
324 	virtio_blk_transport_destroy(transport, virtio_blk_transports_destroy);
325 }
326 
327 void
328 spdk_vhost_blk_fini(spdk_vhost_fini_cb fini_cb)
329 {
330 	g_fini_cb = fini_cb;
331 
332 	virtio_blk_transports_destroy();
333 }
334 
335 static void
336 vhost_user_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
337 {
338 	uint32_t delay_base_us;
339 	uint32_t iops_threshold;
340 
341 	vdev->backend->write_config_json(vdev, w);
342 
343 	spdk_vhost_get_coalescing(vdev, &delay_base_us, &iops_threshold);
344 	if (delay_base_us) {
345 		spdk_json_write_object_begin(w);
346 		spdk_json_write_named_string(w, "method", "vhost_controller_set_coalescing");
347 
348 		spdk_json_write_named_object_begin(w, "params");
349 		spdk_json_write_named_string(w, "ctrlr", vdev->name);
350 		spdk_json_write_named_uint32(w, "delay_base_us", delay_base_us);
351 		spdk_json_write_named_uint32(w, "iops_threshold", iops_threshold);
352 		spdk_json_write_object_end(w);
353 
354 		spdk_json_write_object_end(w);
355 	}
356 }
357 
358 void
359 spdk_vhost_scsi_config_json(struct spdk_json_write_ctx *w)
360 {
361 	struct spdk_vhost_dev *vdev;
362 
363 	spdk_json_write_array_begin(w);
364 
365 	spdk_vhost_lock();
366 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
367 	     vdev = spdk_vhost_dev_next(vdev)) {
368 		if (vdev->backend->type == VHOST_BACKEND_SCSI) {
369 			vhost_user_config_json(vdev, w);
370 		}
371 	}
372 	spdk_vhost_unlock();
373 
374 	spdk_json_write_array_end(w);
375 }
376 
377 void
378 spdk_vhost_blk_config_json(struct spdk_json_write_ctx *w)
379 {
380 	struct spdk_vhost_dev *vdev;
381 
382 	spdk_json_write_array_begin(w);
383 
384 	spdk_vhost_lock();
385 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
386 	     vdev = spdk_vhost_dev_next(vdev)) {
387 		if (vdev->backend->type == VHOST_BACKEND_BLK) {
388 			vhost_user_config_json(vdev, w);
389 		}
390 	}
391 	spdk_vhost_unlock();
392 
393 	spdk_json_write_array_end(w);
394 }
395 
396 void
397 virtio_blk_transport_register(const struct spdk_virtio_blk_transport_ops *ops)
398 {
399 	struct virtio_blk_transport_ops_list_element *new_ops;
400 
401 	if (virtio_blk_get_transport_ops(ops->name) != NULL) {
402 		SPDK_ERRLOG("Double registering virtio blk transport type %s.\n", ops->name);
403 		assert(false);
404 		return;
405 	}
406 
407 	new_ops = calloc(1, sizeof(*new_ops));
408 	if (new_ops == NULL) {
409 		SPDK_ERRLOG("Unable to allocate memory to register new transport type %s.\n", ops->name);
410 		assert(false);
411 		return;
412 	}
413 
414 	new_ops->ops = *ops;
415 
416 	TAILQ_INSERT_TAIL(&g_spdk_virtio_blk_transport_ops, new_ops, link);
417 }
418 
419 int
420 virtio_blk_transport_create(const char *transport_name,
421 			    const struct spdk_json_val *params)
422 {
423 	const struct spdk_virtio_blk_transport_ops *ops = NULL;
424 	struct spdk_virtio_blk_transport *transport;
425 
426 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
427 		if (strcasecmp(transport->ops->name, transport_name) == 0) {
428 			return -EEXIST;
429 		}
430 	}
431 
432 	ops = virtio_blk_get_transport_ops(transport_name);
433 	if (!ops) {
434 		SPDK_ERRLOG("Transport type '%s' unavailable.\n", transport_name);
435 		return -ENOENT;
436 	}
437 
438 	transport = ops->create(params);
439 	if (!transport) {
440 		SPDK_ERRLOG("Unable to create new transport of type %s\n", transport_name);
441 		return -EPERM;
442 	}
443 
444 	transport->ops = ops;
445 	TAILQ_INSERT_TAIL(&g_virtio_blk_transports, transport, tailq);
446 	return 0;
447 }
448 
449 int
450 virtio_blk_transport_destroy(struct spdk_virtio_blk_transport *transport,
451 			     spdk_vhost_fini_cb cb_fn)
452 {
453 	return transport->ops->destroy(transport, cb_fn);
454 }
455 
456 SPDK_LOG_REGISTER_COMPONENT(vhost)
457 SPDK_LOG_REGISTER_COMPONENT(vhost_ring)
458