xref: /spdk/lib/vhost/vhost.c (revision 8afdeef3becfe9409cc9e7372bd0bc10e8b7d46d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation. All rights reserved.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk/env.h"
9 #include "spdk/likely.h"
10 #include "spdk/string.h"
11 #include "spdk/util.h"
12 #include "spdk/memory.h"
13 #include "spdk/barrier.h"
14 #include "spdk/vhost.h"
15 #include "vhost_internal.h"
16 #include "spdk/queue.h"
17 
18 
19 static struct spdk_cpuset g_vhost_core_mask;
20 
21 static TAILQ_HEAD(, spdk_vhost_dev) g_vhost_devices = TAILQ_HEAD_INITIALIZER(
22 			g_vhost_devices);
23 static pthread_mutex_t g_vhost_mutex = PTHREAD_MUTEX_INITIALIZER;
24 
25 static TAILQ_HEAD(, spdk_virtio_blk_transport) g_virtio_blk_transports = TAILQ_HEAD_INITIALIZER(
26 			g_virtio_blk_transports);
27 
28 static spdk_vhost_fini_cb g_fini_cb;
29 
30 struct spdk_vhost_dev *
31 spdk_vhost_dev_next(struct spdk_vhost_dev *vdev)
32 {
33 	if (vdev == NULL) {
34 		return TAILQ_FIRST(&g_vhost_devices);
35 	}
36 
37 	return TAILQ_NEXT(vdev, tailq);
38 }
39 
40 struct spdk_vhost_dev *
41 spdk_vhost_dev_find(const char *ctrlr_name)
42 {
43 	struct spdk_vhost_dev *vdev;
44 
45 	TAILQ_FOREACH(vdev, &g_vhost_devices, tailq) {
46 		if (strcmp(vdev->name, ctrlr_name) == 0) {
47 			return vdev;
48 		}
49 	}
50 
51 	return NULL;
52 }
53 
54 static int
55 vhost_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask)
56 {
57 	int rc;
58 	struct spdk_cpuset negative_vhost_mask;
59 
60 	if (cpumask == NULL) {
61 		return -1;
62 	}
63 
64 	if (mask == NULL) {
65 		spdk_cpuset_copy(cpumask, &g_vhost_core_mask);
66 		return 0;
67 	}
68 
69 	rc = spdk_cpuset_parse(cpumask, mask);
70 	if (rc < 0) {
71 		SPDK_ERRLOG("invalid cpumask %s\n", mask);
72 		return -1;
73 	}
74 
75 	spdk_cpuset_copy(&negative_vhost_mask, &g_vhost_core_mask);
76 	spdk_cpuset_negate(&negative_vhost_mask);
77 	spdk_cpuset_and(&negative_vhost_mask, cpumask);
78 
79 	if (spdk_cpuset_count(&negative_vhost_mask) != 0) {
80 		SPDK_ERRLOG("one of selected cpu is outside of core mask(=%s)\n",
81 			    spdk_cpuset_fmt(&g_vhost_core_mask));
82 		return -1;
83 	}
84 
85 	spdk_cpuset_and(cpumask, &g_vhost_core_mask);
86 
87 	if (spdk_cpuset_count(cpumask) == 0) {
88 		SPDK_ERRLOG("no cpu is selected among core mask(=%s)\n",
89 			    spdk_cpuset_fmt(&g_vhost_core_mask));
90 		return -1;
91 	}
92 
93 	return 0;
94 }
95 
96 TAILQ_HEAD(, virtio_blk_transport_ops_list_element)
97 g_spdk_virtio_blk_transport_ops = TAILQ_HEAD_INITIALIZER(g_spdk_virtio_blk_transport_ops);
98 
99 const struct spdk_virtio_blk_transport_ops *
100 virtio_blk_get_transport_ops(const char *transport_name)
101 {
102 	struct virtio_blk_transport_ops_list_element *ops;
103 	TAILQ_FOREACH(ops, &g_spdk_virtio_blk_transport_ops, link) {
104 		if (strcasecmp(transport_name, ops->ops.name) == 0) {
105 			return &ops->ops;
106 		}
107 	}
108 	return NULL;
109 }
110 
111 int
112 vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str,
113 		   const struct spdk_json_val *params, const struct spdk_vhost_dev_backend *backend,
114 		   const struct spdk_vhost_user_dev_backend *user_backend, bool delay)
115 {
116 	struct spdk_cpuset cpumask = {};
117 	int rc;
118 
119 	assert(vdev);
120 	if (name == NULL) {
121 		SPDK_ERRLOG("Can't register controller with no name\n");
122 		return -EINVAL;
123 	}
124 
125 	if (vhost_parse_core_mask(mask_str, &cpumask) != 0) {
126 		SPDK_ERRLOG("cpumask %s is invalid (core mask is 0x%s)\n",
127 			    mask_str, spdk_cpuset_fmt(&g_vhost_core_mask));
128 		return -EINVAL;
129 	}
130 	vdev->use_default_cpumask = false;
131 	if (!mask_str) {
132 		vdev->use_default_cpumask = true;
133 	}
134 
135 	spdk_vhost_lock();
136 	if (spdk_vhost_dev_find(name)) {
137 		SPDK_ERRLOG("vhost controller %s already exists.\n", name);
138 		spdk_vhost_unlock();
139 		return -EEXIST;
140 	}
141 
142 	vdev->name = strdup(name);
143 	if (vdev->name == NULL) {
144 		spdk_vhost_unlock();
145 		return -EIO;
146 	}
147 
148 	vdev->backend = backend;
149 	if (vdev->backend->type == VHOST_BACKEND_SCSI) {
150 		rc = vhost_user_dev_create(vdev, name, &cpumask, user_backend, delay);
151 	} else {
152 		/* When VHOST_BACKEND_BLK, delay should not be true. */
153 		assert(delay == false);
154 		rc = virtio_blk_construct_ctrlr(vdev, name, &cpumask, params, user_backend);
155 	}
156 	if (rc != 0) {
157 		free(vdev->name);
158 		spdk_vhost_unlock();
159 		return rc;
160 	}
161 
162 	TAILQ_INSERT_TAIL(&g_vhost_devices, vdev, tailq);
163 	spdk_vhost_unlock();
164 
165 	SPDK_INFOLOG(vhost, "Controller %s: new controller added\n", vdev->name);
166 	return 0;
167 }
168 
169 int
170 vhost_dev_unregister(struct spdk_vhost_dev *vdev)
171 {
172 	int rc;
173 
174 	spdk_vhost_lock();
175 	if (vdev->backend->type == VHOST_BACKEND_SCSI) {
176 		rc = vhost_user_dev_unregister(vdev);
177 	} else {
178 		rc = virtio_blk_destroy_ctrlr(vdev);
179 	}
180 	if (rc != 0) {
181 		spdk_vhost_unlock();
182 		return rc;
183 	}
184 
185 	SPDK_INFOLOG(vhost, "Controller %s: removed\n", vdev->name);
186 
187 	free(vdev->name);
188 
189 	TAILQ_REMOVE(&g_vhost_devices, vdev, tailq);
190 	if (TAILQ_EMPTY(&g_vhost_devices) && g_fini_cb != NULL) {
191 		g_fini_cb();
192 	}
193 	spdk_vhost_unlock();
194 
195 	return 0;
196 }
197 
198 const char *
199 spdk_vhost_dev_get_name(struct spdk_vhost_dev *vdev)
200 {
201 	assert(vdev != NULL);
202 	return vdev->name;
203 }
204 
205 const struct spdk_cpuset *
206 spdk_vhost_dev_get_cpumask(struct spdk_vhost_dev *vdev)
207 {
208 	assert(vdev != NULL);
209 	return spdk_thread_get_cpumask(vdev->thread);
210 }
211 
212 void
213 vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
214 {
215 	assert(vdev->backend->dump_info_json != NULL);
216 	vdev->backend->dump_info_json(vdev, w);
217 }
218 
219 int
220 spdk_vhost_dev_remove(struct spdk_vhost_dev *vdev)
221 {
222 	return vdev->backend->remove_device(vdev);
223 }
224 
225 int
226 spdk_vhost_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
227 			  uint32_t iops_threshold)
228 {
229 	assert(vdev->backend->set_coalescing != NULL);
230 	return vdev->backend->set_coalescing(vdev, delay_base_us, iops_threshold);
231 }
232 
233 void
234 spdk_vhost_get_coalescing(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
235 			  uint32_t *iops_threshold)
236 {
237 	assert(vdev->backend->get_coalescing != NULL);
238 	vdev->backend->get_coalescing(vdev, delay_base_us, iops_threshold);
239 }
240 
241 void
242 spdk_vhost_lock(void)
243 {
244 	pthread_mutex_lock(&g_vhost_mutex);
245 }
246 
247 int
248 spdk_vhost_trylock(void)
249 {
250 	return -pthread_mutex_trylock(&g_vhost_mutex);
251 }
252 
253 void
254 spdk_vhost_unlock(void)
255 {
256 	pthread_mutex_unlock(&g_vhost_mutex);
257 }
258 
259 void
260 spdk_vhost_scsi_init(spdk_vhost_init_cb init_cb)
261 {
262 	uint32_t i;
263 	int ret = 0;
264 
265 	ret = vhost_user_init();
266 	if (ret != 0) {
267 		init_cb(ret);
268 		return;
269 	}
270 
271 	spdk_cpuset_zero(&g_vhost_core_mask);
272 	SPDK_ENV_FOREACH_CORE(i) {
273 		spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
274 	}
275 	init_cb(ret);
276 }
277 
278 static void
279 vhost_fini(void)
280 {
281 	struct spdk_vhost_dev *vdev, *tmp;
282 
283 	if (spdk_vhost_dev_next(NULL) == NULL) {
284 		g_fini_cb();
285 		return;
286 	}
287 
288 	vdev = spdk_vhost_dev_next(NULL);
289 	while (vdev != NULL) {
290 		tmp = spdk_vhost_dev_next(vdev);
291 		spdk_vhost_dev_remove(vdev);
292 		/* don't care if it fails, there's nothing we can do for now */
293 		vdev = tmp;
294 	}
295 
296 	/* g_fini_cb will get called when last device is unregistered. */
297 }
298 
299 void
300 spdk_vhost_blk_init(spdk_vhost_init_cb init_cb)
301 {
302 	uint32_t i;
303 	int ret = 0;
304 
305 	ret = virtio_blk_transport_create("vhost_user_blk", NULL);
306 	if (ret != 0) {
307 		goto out;
308 	}
309 
310 	spdk_cpuset_zero(&g_vhost_core_mask);
311 	SPDK_ENV_FOREACH_CORE(i) {
312 		spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
313 	}
314 out:
315 	init_cb(ret);
316 }
317 
318 void
319 spdk_vhost_scsi_fini(spdk_vhost_fini_cb fini_cb)
320 {
321 	g_fini_cb = fini_cb;
322 
323 	vhost_user_fini(vhost_fini);
324 }
325 
326 static void
327 virtio_blk_transports_destroy(void)
328 {
329 	struct spdk_virtio_blk_transport *transport = TAILQ_FIRST(&g_virtio_blk_transports);
330 
331 	if (transport == NULL) {
332 		g_fini_cb();
333 		return;
334 	}
335 	TAILQ_REMOVE(&g_virtio_blk_transports, transport, tailq);
336 	virtio_blk_transport_destroy(transport, virtio_blk_transports_destroy);
337 }
338 
339 void
340 spdk_vhost_blk_fini(spdk_vhost_fini_cb fini_cb)
341 {
342 	g_fini_cb = fini_cb;
343 
344 	virtio_blk_transports_destroy();
345 }
346 
347 static void
348 vhost_user_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
349 {
350 	uint32_t delay_base_us;
351 	uint32_t iops_threshold;
352 
353 	vdev->backend->write_config_json(vdev, w);
354 
355 	spdk_vhost_get_coalescing(vdev, &delay_base_us, &iops_threshold);
356 	if (delay_base_us) {
357 		spdk_json_write_object_begin(w);
358 		spdk_json_write_named_string(w, "method", "vhost_controller_set_coalescing");
359 
360 		spdk_json_write_named_object_begin(w, "params");
361 		spdk_json_write_named_string(w, "ctrlr", vdev->name);
362 		spdk_json_write_named_uint32(w, "delay_base_us", delay_base_us);
363 		spdk_json_write_named_uint32(w, "iops_threshold", iops_threshold);
364 		spdk_json_write_object_end(w);
365 
366 		spdk_json_write_object_end(w);
367 	}
368 }
369 
370 void
371 spdk_vhost_scsi_config_json(struct spdk_json_write_ctx *w)
372 {
373 	struct spdk_vhost_dev *vdev;
374 
375 	spdk_json_write_array_begin(w);
376 
377 	spdk_vhost_lock();
378 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
379 	     vdev = spdk_vhost_dev_next(vdev)) {
380 		if (vdev->backend->type == VHOST_BACKEND_SCSI) {
381 			vhost_user_config_json(vdev, w);
382 		}
383 	}
384 	spdk_vhost_unlock();
385 
386 	spdk_json_write_array_end(w);
387 }
388 
389 static void
390 vhost_blk_dump_config_json(struct spdk_json_write_ctx *w)
391 {
392 	struct spdk_virtio_blk_transport *transport;
393 
394 	/* Write vhost transports */
395 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
396 		/* Since vhost_user_blk is always added on SPDK startup,
397 		 * do not emit virtio_blk_create_transport RPC. */
398 		if (strcasecmp(transport->ops->name, "vhost_user_blk") != 0) {
399 			spdk_json_write_object_begin(w);
400 			spdk_json_write_named_string(w, "method", "virtio_blk_create_transport");
401 			spdk_json_write_named_object_begin(w, "params");
402 			transport->ops->dump_opts(transport, w);
403 			spdk_json_write_object_end(w);
404 			spdk_json_write_object_end(w);
405 		}
406 	}
407 }
408 
409 void
410 spdk_vhost_blk_config_json(struct spdk_json_write_ctx *w)
411 {
412 	struct spdk_vhost_dev *vdev;
413 
414 	spdk_json_write_array_begin(w);
415 
416 	spdk_vhost_lock();
417 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
418 	     vdev = spdk_vhost_dev_next(vdev)) {
419 		if (vdev->backend->type == VHOST_BACKEND_BLK) {
420 			vhost_user_config_json(vdev, w);
421 		}
422 	}
423 	spdk_vhost_unlock();
424 
425 	vhost_blk_dump_config_json(w);
426 
427 	spdk_json_write_array_end(w);
428 }
429 
430 void
431 virtio_blk_transport_register(const struct spdk_virtio_blk_transport_ops *ops)
432 {
433 	struct virtio_blk_transport_ops_list_element *new_ops;
434 
435 	if (virtio_blk_get_transport_ops(ops->name) != NULL) {
436 		SPDK_ERRLOG("Double registering virtio blk transport type %s.\n", ops->name);
437 		assert(false);
438 		return;
439 	}
440 
441 	new_ops = calloc(1, sizeof(*new_ops));
442 	if (new_ops == NULL) {
443 		SPDK_ERRLOG("Unable to allocate memory to register new transport type %s.\n", ops->name);
444 		assert(false);
445 		return;
446 	}
447 
448 	new_ops->ops = *ops;
449 
450 	TAILQ_INSERT_TAIL(&g_spdk_virtio_blk_transport_ops, new_ops, link);
451 }
452 
453 int
454 virtio_blk_transport_create(const char *transport_name,
455 			    const struct spdk_json_val *params)
456 {
457 	const struct spdk_virtio_blk_transport_ops *ops = NULL;
458 	struct spdk_virtio_blk_transport *transport;
459 
460 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
461 		if (strcasecmp(transport->ops->name, transport_name) == 0) {
462 			return -EEXIST;
463 		}
464 	}
465 
466 	ops = virtio_blk_get_transport_ops(transport_name);
467 	if (!ops) {
468 		SPDK_ERRLOG("Transport type '%s' unavailable.\n", transport_name);
469 		return -ENOENT;
470 	}
471 
472 	transport = ops->create(params);
473 	if (!transport) {
474 		SPDK_ERRLOG("Unable to create new transport of type %s\n", transport_name);
475 		return -EPERM;
476 	}
477 
478 	transport->ops = ops;
479 	TAILQ_INSERT_TAIL(&g_virtio_blk_transports, transport, tailq);
480 	return 0;
481 }
482 
483 struct spdk_virtio_blk_transport *
484 virtio_blk_transport_get_first(void)
485 {
486 	return TAILQ_FIRST(&g_virtio_blk_transports);
487 }
488 
489 struct spdk_virtio_blk_transport *
490 virtio_blk_transport_get_next(struct spdk_virtio_blk_transport *transport)
491 {
492 	return TAILQ_NEXT(transport, tailq);
493 }
494 
495 void
496 virtio_blk_transport_dump_opts(struct spdk_virtio_blk_transport *transport,
497 			       struct spdk_json_write_ctx *w)
498 {
499 	spdk_json_write_object_begin(w);
500 
501 	spdk_json_write_named_string(w, "name", transport->ops->name);
502 
503 	if (transport->ops->dump_opts) {
504 		transport->ops->dump_opts(transport, w);
505 	}
506 
507 	spdk_json_write_object_end(w);
508 }
509 
510 struct spdk_virtio_blk_transport *
511 virtio_blk_tgt_get_transport(const char *transport_name)
512 {
513 	struct spdk_virtio_blk_transport *transport;
514 
515 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
516 		if (strcasecmp(transport->ops->name, transport_name) == 0) {
517 			return transport;
518 		}
519 	}
520 	return NULL;
521 }
522 
523 int
524 virtio_blk_transport_destroy(struct spdk_virtio_blk_transport *transport,
525 			     spdk_vhost_fini_cb cb_fn)
526 {
527 	return transport->ops->destroy(transport, cb_fn);
528 }
529 
530 SPDK_LOG_REGISTER_COMPONENT(vhost)
531 SPDK_LOG_REGISTER_COMPONENT(vhost_ring)
532