xref: /spdk/lib/vhost/vhost.c (revision 06bc8ce530f0c3f5d5947668cc624adba5375403)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation. All rights reserved.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk/env.h"
9 #include "spdk/likely.h"
10 #include "spdk/string.h"
11 #include "spdk/util.h"
12 #include "spdk/memory.h"
13 #include "spdk/barrier.h"
14 #include "spdk/vhost.h"
15 #include "vhost_internal.h"
16 #include "spdk/queue.h"
17 
18 static struct spdk_cpuset g_vhost_core_mask;
19 
20 static pthread_mutex_t g_vhost_mutex = PTHREAD_MUTEX_INITIALIZER;
21 
22 static TAILQ_HEAD(, spdk_virtio_blk_transport) g_virtio_blk_transports = TAILQ_HEAD_INITIALIZER(
23 			g_virtio_blk_transports);
24 
25 static spdk_vhost_fini_cb g_fini_cb;
26 
27 static RB_HEAD(vhost_dev_name_tree,
28 	       spdk_vhost_dev) g_vhost_devices = RB_INITIALIZER(g_vhost_devices);
29 
30 static int
31 vhost_dev_name_cmp(struct spdk_vhost_dev *vdev1, struct spdk_vhost_dev *vdev2)
32 {
33 	return strcmp(vdev1->name, vdev2->name);
34 }
35 
36 RB_GENERATE_STATIC(vhost_dev_name_tree, spdk_vhost_dev, node, vhost_dev_name_cmp);
37 
38 struct spdk_vhost_dev *
39 spdk_vhost_dev_next(struct spdk_vhost_dev *vdev)
40 {
41 	if (vdev == NULL) {
42 		return RB_MIN(vhost_dev_name_tree, &g_vhost_devices);
43 	}
44 
45 	return RB_NEXT(vhost_dev_name_tree, &g_vhost_devices, vdev);
46 }
47 
48 struct spdk_vhost_dev *
49 spdk_vhost_dev_find(const char *ctrlr_name)
50 {
51 	struct spdk_vhost_dev find = {};
52 
53 	find.name = (char *)ctrlr_name;
54 
55 	return RB_FIND(vhost_dev_name_tree, &g_vhost_devices, &find);
56 }
57 
58 static int
59 vhost_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask)
60 {
61 	int rc;
62 	struct spdk_cpuset negative_vhost_mask;
63 
64 	if (cpumask == NULL) {
65 		return -1;
66 	}
67 
68 	if (mask == NULL) {
69 		spdk_cpuset_copy(cpumask, &g_vhost_core_mask);
70 		return 0;
71 	}
72 
73 	rc = spdk_cpuset_parse(cpumask, mask);
74 	if (rc < 0) {
75 		SPDK_ERRLOG("invalid cpumask %s\n", mask);
76 		return -1;
77 	}
78 
79 	spdk_cpuset_copy(&negative_vhost_mask, &g_vhost_core_mask);
80 	spdk_cpuset_negate(&negative_vhost_mask);
81 	spdk_cpuset_and(&negative_vhost_mask, cpumask);
82 
83 	if (spdk_cpuset_count(&negative_vhost_mask) != 0) {
84 		SPDK_ERRLOG("one of selected cpu is outside of core mask(=%s)\n",
85 			    spdk_cpuset_fmt(&g_vhost_core_mask));
86 		return -1;
87 	}
88 
89 	spdk_cpuset_and(cpumask, &g_vhost_core_mask);
90 
91 	if (spdk_cpuset_count(cpumask) == 0) {
92 		SPDK_ERRLOG("no cpu is selected among core mask(=%s)\n",
93 			    spdk_cpuset_fmt(&g_vhost_core_mask));
94 		return -1;
95 	}
96 
97 	return 0;
98 }
99 
100 TAILQ_HEAD(, virtio_blk_transport_ops_list_element)
101 g_spdk_virtio_blk_transport_ops = TAILQ_HEAD_INITIALIZER(g_spdk_virtio_blk_transport_ops);
102 
103 const struct spdk_virtio_blk_transport_ops *
104 virtio_blk_get_transport_ops(const char *transport_name)
105 {
106 	struct virtio_blk_transport_ops_list_element *ops;
107 	TAILQ_FOREACH(ops, &g_spdk_virtio_blk_transport_ops, link) {
108 		if (strcasecmp(transport_name, ops->ops.name) == 0) {
109 			return &ops->ops;
110 		}
111 	}
112 	return NULL;
113 }
114 
115 int
116 vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str,
117 		   const struct spdk_json_val *params, const struct spdk_vhost_dev_backend *backend,
118 		   const struct spdk_vhost_user_dev_backend *user_backend, bool delay)
119 {
120 	struct spdk_cpuset cpumask = {};
121 	int rc;
122 
123 	assert(vdev);
124 	if (name == NULL) {
125 		SPDK_ERRLOG("Can't register controller with no name\n");
126 		return -EINVAL;
127 	}
128 
129 	if (vhost_parse_core_mask(mask_str, &cpumask) != 0) {
130 		SPDK_ERRLOG("cpumask %s is invalid (core mask is 0x%s)\n",
131 			    mask_str, spdk_cpuset_fmt(&g_vhost_core_mask));
132 		return -EINVAL;
133 	}
134 	vdev->use_default_cpumask = false;
135 	if (!mask_str) {
136 		vdev->use_default_cpumask = true;
137 	}
138 
139 	spdk_vhost_lock();
140 	if (spdk_vhost_dev_find(name)) {
141 		SPDK_ERRLOG("vhost controller %s already exists.\n", name);
142 		spdk_vhost_unlock();
143 		return -EEXIST;
144 	}
145 
146 	vdev->name = strdup(name);
147 	if (vdev->name == NULL) {
148 		spdk_vhost_unlock();
149 		return -EIO;
150 	}
151 
152 	vdev->backend = backend;
153 	if (vdev->backend->type == VHOST_BACKEND_SCSI) {
154 		rc = vhost_user_dev_create(vdev, name, &cpumask, user_backend, delay);
155 	} else {
156 		/* When VHOST_BACKEND_BLK, delay should not be true. */
157 		assert(delay == false);
158 		rc = virtio_blk_construct_ctrlr(vdev, name, &cpumask, params, user_backend);
159 	}
160 	if (rc != 0) {
161 		free(vdev->name);
162 		spdk_vhost_unlock();
163 		return rc;
164 	}
165 
166 	RB_INSERT(vhost_dev_name_tree, &g_vhost_devices, vdev);
167 	spdk_vhost_unlock();
168 
169 	SPDK_INFOLOG(vhost, "Controller %s: new controller added\n", vdev->name);
170 	return 0;
171 }
172 
173 int
174 vhost_dev_unregister(struct spdk_vhost_dev *vdev)
175 {
176 	int rc;
177 
178 	spdk_vhost_lock();
179 	if (vdev->backend->type == VHOST_BACKEND_SCSI) {
180 		rc = vhost_user_dev_unregister(vdev);
181 	} else {
182 		rc = virtio_blk_destroy_ctrlr(vdev);
183 	}
184 	if (rc != 0) {
185 		spdk_vhost_unlock();
186 		return rc;
187 	}
188 
189 	SPDK_INFOLOG(vhost, "Controller %s: removed\n", vdev->name);
190 
191 	free(vdev->name);
192 
193 	RB_REMOVE(vhost_dev_name_tree, &g_vhost_devices, vdev);
194 	if (RB_EMPTY(&g_vhost_devices) && g_fini_cb != NULL) {
195 		g_fini_cb();
196 	}
197 	spdk_vhost_unlock();
198 
199 	return 0;
200 }
201 
202 const char *
203 spdk_vhost_dev_get_name(struct spdk_vhost_dev *vdev)
204 {
205 	assert(vdev != NULL);
206 	return vdev->name;
207 }
208 
209 const struct spdk_cpuset *
210 spdk_vhost_dev_get_cpumask(struct spdk_vhost_dev *vdev)
211 {
212 	assert(vdev != NULL);
213 	return spdk_thread_get_cpumask(vdev->thread);
214 }
215 
216 void
217 vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
218 {
219 	assert(vdev->backend->dump_info_json != NULL);
220 	vdev->backend->dump_info_json(vdev, w);
221 }
222 
223 int
224 spdk_vhost_dev_remove(struct spdk_vhost_dev *vdev)
225 {
226 	return vdev->backend->remove_device(vdev);
227 }
228 
229 int
230 spdk_vhost_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
231 			  uint32_t iops_threshold)
232 {
233 	assert(vdev->backend->set_coalescing != NULL);
234 	return vdev->backend->set_coalescing(vdev, delay_base_us, iops_threshold);
235 }
236 
237 void
238 spdk_vhost_get_coalescing(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
239 			  uint32_t *iops_threshold)
240 {
241 	assert(vdev->backend->get_coalescing != NULL);
242 	vdev->backend->get_coalescing(vdev, delay_base_us, iops_threshold);
243 }
244 
245 void
246 spdk_vhost_lock(void)
247 {
248 	pthread_mutex_lock(&g_vhost_mutex);
249 }
250 
251 int
252 spdk_vhost_trylock(void)
253 {
254 	return -pthread_mutex_trylock(&g_vhost_mutex);
255 }
256 
257 void
258 spdk_vhost_unlock(void)
259 {
260 	pthread_mutex_unlock(&g_vhost_mutex);
261 }
262 
263 void
264 spdk_vhost_scsi_init(spdk_vhost_init_cb init_cb)
265 {
266 	uint32_t i;
267 	int ret = 0;
268 
269 	ret = vhost_user_init();
270 	if (ret != 0) {
271 		init_cb(ret);
272 		return;
273 	}
274 
275 	spdk_cpuset_zero(&g_vhost_core_mask);
276 	SPDK_ENV_FOREACH_CORE(i) {
277 		spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
278 	}
279 	init_cb(ret);
280 }
281 
282 static void
283 vhost_fini(void)
284 {
285 	struct spdk_vhost_dev *vdev, *tmp;
286 
287 	if (spdk_vhost_dev_next(NULL) == NULL) {
288 		g_fini_cb();
289 		return;
290 	}
291 
292 	vdev = spdk_vhost_dev_next(NULL);
293 	while (vdev != NULL) {
294 		tmp = spdk_vhost_dev_next(vdev);
295 		spdk_vhost_dev_remove(vdev);
296 		/* don't care if it fails, there's nothing we can do for now */
297 		vdev = tmp;
298 	}
299 
300 	/* g_fini_cb will get called when last device is unregistered. */
301 }
302 
303 void
304 spdk_vhost_blk_init(spdk_vhost_init_cb init_cb)
305 {
306 	uint32_t i;
307 	int ret = 0;
308 
309 	ret = virtio_blk_transport_create("vhost_user_blk", NULL);
310 	if (ret != 0) {
311 		goto out;
312 	}
313 
314 	spdk_cpuset_zero(&g_vhost_core_mask);
315 	SPDK_ENV_FOREACH_CORE(i) {
316 		spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
317 	}
318 out:
319 	init_cb(ret);
320 }
321 
322 void
323 spdk_vhost_scsi_fini(spdk_vhost_fini_cb fini_cb)
324 {
325 	g_fini_cb = fini_cb;
326 
327 	vhost_user_fini(vhost_fini);
328 }
329 
330 static void
331 virtio_blk_transports_destroy(void)
332 {
333 	struct spdk_virtio_blk_transport *transport = TAILQ_FIRST(&g_virtio_blk_transports);
334 
335 	if (transport == NULL) {
336 		g_fini_cb();
337 		return;
338 	}
339 	TAILQ_REMOVE(&g_virtio_blk_transports, transport, tailq);
340 	virtio_blk_transport_destroy(transport, virtio_blk_transports_destroy);
341 }
342 
343 void
344 spdk_vhost_blk_fini(spdk_vhost_fini_cb fini_cb)
345 {
346 	g_fini_cb = fini_cb;
347 
348 	virtio_blk_transports_destroy();
349 }
350 
351 static void
352 vhost_user_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
353 {
354 	uint32_t delay_base_us;
355 	uint32_t iops_threshold;
356 
357 	vdev->backend->write_config_json(vdev, w);
358 
359 	spdk_vhost_get_coalescing(vdev, &delay_base_us, &iops_threshold);
360 	if (delay_base_us) {
361 		spdk_json_write_object_begin(w);
362 		spdk_json_write_named_string(w, "method", "vhost_controller_set_coalescing");
363 
364 		spdk_json_write_named_object_begin(w, "params");
365 		spdk_json_write_named_string(w, "ctrlr", vdev->name);
366 		spdk_json_write_named_uint32(w, "delay_base_us", delay_base_us);
367 		spdk_json_write_named_uint32(w, "iops_threshold", iops_threshold);
368 		spdk_json_write_object_end(w);
369 
370 		spdk_json_write_object_end(w);
371 	}
372 }
373 
374 void
375 spdk_vhost_scsi_config_json(struct spdk_json_write_ctx *w)
376 {
377 	struct spdk_vhost_dev *vdev;
378 
379 	spdk_json_write_array_begin(w);
380 
381 	spdk_vhost_lock();
382 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
383 	     vdev = spdk_vhost_dev_next(vdev)) {
384 		if (vdev->backend->type == VHOST_BACKEND_SCSI) {
385 			vhost_user_config_json(vdev, w);
386 		}
387 	}
388 	spdk_vhost_unlock();
389 
390 	spdk_json_write_array_end(w);
391 }
392 
393 static void
394 vhost_blk_dump_config_json(struct spdk_json_write_ctx *w)
395 {
396 	struct spdk_virtio_blk_transport *transport;
397 
398 	/* Write vhost transports */
399 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
400 		/* Since vhost_user_blk is always added on SPDK startup,
401 		 * do not emit virtio_blk_create_transport RPC. */
402 		if (strcasecmp(transport->ops->name, "vhost_user_blk") != 0) {
403 			spdk_json_write_object_begin(w);
404 			spdk_json_write_named_string(w, "method", "virtio_blk_create_transport");
405 			spdk_json_write_named_object_begin(w, "params");
406 			transport->ops->dump_opts(transport, w);
407 			spdk_json_write_object_end(w);
408 			spdk_json_write_object_end(w);
409 		}
410 	}
411 }
412 
413 void
414 spdk_vhost_blk_config_json(struct spdk_json_write_ctx *w)
415 {
416 	struct spdk_vhost_dev *vdev;
417 
418 	spdk_json_write_array_begin(w);
419 
420 	spdk_vhost_lock();
421 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
422 	     vdev = spdk_vhost_dev_next(vdev)) {
423 		if (vdev->backend->type == VHOST_BACKEND_BLK) {
424 			vhost_user_config_json(vdev, w);
425 		}
426 	}
427 	spdk_vhost_unlock();
428 
429 	vhost_blk_dump_config_json(w);
430 
431 	spdk_json_write_array_end(w);
432 }
433 
434 void
435 virtio_blk_transport_register(const struct spdk_virtio_blk_transport_ops *ops)
436 {
437 	struct virtio_blk_transport_ops_list_element *new_ops;
438 
439 	if (virtio_blk_get_transport_ops(ops->name) != NULL) {
440 		SPDK_ERRLOG("Double registering virtio blk transport type %s.\n", ops->name);
441 		assert(false);
442 		return;
443 	}
444 
445 	new_ops = calloc(1, sizeof(*new_ops));
446 	if (new_ops == NULL) {
447 		SPDK_ERRLOG("Unable to allocate memory to register new transport type %s.\n", ops->name);
448 		assert(false);
449 		return;
450 	}
451 
452 	new_ops->ops = *ops;
453 
454 	TAILQ_INSERT_TAIL(&g_spdk_virtio_blk_transport_ops, new_ops, link);
455 }
456 
457 int
458 virtio_blk_transport_create(const char *transport_name,
459 			    const struct spdk_json_val *params)
460 {
461 	const struct spdk_virtio_blk_transport_ops *ops = NULL;
462 	struct spdk_virtio_blk_transport *transport;
463 
464 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
465 		if (strcasecmp(transport->ops->name, transport_name) == 0) {
466 			return -EEXIST;
467 		}
468 	}
469 
470 	ops = virtio_blk_get_transport_ops(transport_name);
471 	if (!ops) {
472 		SPDK_ERRLOG("Transport type '%s' unavailable.\n", transport_name);
473 		return -ENOENT;
474 	}
475 
476 	transport = ops->create(params);
477 	if (!transport) {
478 		SPDK_ERRLOG("Unable to create new transport of type %s\n", transport_name);
479 		return -EPERM;
480 	}
481 
482 	transport->ops = ops;
483 	TAILQ_INSERT_TAIL(&g_virtio_blk_transports, transport, tailq);
484 	return 0;
485 }
486 
487 struct spdk_virtio_blk_transport *
488 virtio_blk_transport_get_first(void)
489 {
490 	return TAILQ_FIRST(&g_virtio_blk_transports);
491 }
492 
493 struct spdk_virtio_blk_transport *
494 virtio_blk_transport_get_next(struct spdk_virtio_blk_transport *transport)
495 {
496 	return TAILQ_NEXT(transport, tailq);
497 }
498 
499 void
500 virtio_blk_transport_dump_opts(struct spdk_virtio_blk_transport *transport,
501 			       struct spdk_json_write_ctx *w)
502 {
503 	spdk_json_write_object_begin(w);
504 
505 	spdk_json_write_named_string(w, "name", transport->ops->name);
506 
507 	if (transport->ops->dump_opts) {
508 		transport->ops->dump_opts(transport, w);
509 	}
510 
511 	spdk_json_write_object_end(w);
512 }
513 
514 struct spdk_virtio_blk_transport *
515 virtio_blk_tgt_get_transport(const char *transport_name)
516 {
517 	struct spdk_virtio_blk_transport *transport;
518 
519 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
520 		if (strcasecmp(transport->ops->name, transport_name) == 0) {
521 			return transport;
522 		}
523 	}
524 	return NULL;
525 }
526 
527 int
528 virtio_blk_transport_destroy(struct spdk_virtio_blk_transport *transport,
529 			     spdk_vhost_fini_cb cb_fn)
530 {
531 	return transport->ops->destroy(transport, cb_fn);
532 }
533 
534 SPDK_LOG_REGISTER_COMPONENT(vhost)
535 SPDK_LOG_REGISTER_COMPONENT(vhost_ring)
536