xref: /spdk/lib/vhost/vhost.c (revision b02581a89058ebaebe03bd0e16e3b58adfe406c1)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation. All rights reserved.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk/env.h"
9 #include "spdk/likely.h"
10 #include "spdk/string.h"
11 #include "spdk/util.h"
12 #include "spdk/memory.h"
13 #include "spdk/barrier.h"
14 #include "spdk/vhost.h"
15 #include "vhost_internal.h"
16 #include "spdk/queue.h"
17 
18 
19 static struct spdk_cpuset g_vhost_core_mask;
20 
21 static TAILQ_HEAD(, spdk_vhost_dev) g_vhost_devices = TAILQ_HEAD_INITIALIZER(
22 			g_vhost_devices);
23 static pthread_mutex_t g_vhost_mutex = PTHREAD_MUTEX_INITIALIZER;
24 
25 static TAILQ_HEAD(, spdk_virtio_blk_transport) g_virtio_blk_transports = TAILQ_HEAD_INITIALIZER(
26 			g_virtio_blk_transports);
27 
28 static spdk_vhost_fini_cb g_fini_cb;
29 
30 struct spdk_vhost_dev *
31 spdk_vhost_dev_next(struct spdk_vhost_dev *vdev)
32 {
33 	if (vdev == NULL) {
34 		return TAILQ_FIRST(&g_vhost_devices);
35 	}
36 
37 	return TAILQ_NEXT(vdev, tailq);
38 }
39 
40 struct spdk_vhost_dev *
41 spdk_vhost_dev_find(const char *ctrlr_name)
42 {
43 	struct spdk_vhost_dev *vdev;
44 
45 	TAILQ_FOREACH(vdev, &g_vhost_devices, tailq) {
46 		if (strcmp(vdev->name, ctrlr_name) == 0) {
47 			return vdev;
48 		}
49 	}
50 
51 	return NULL;
52 }
53 
54 static int
55 vhost_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask)
56 {
57 	int rc;
58 	struct spdk_cpuset negative_vhost_mask;
59 
60 	if (cpumask == NULL) {
61 		return -1;
62 	}
63 
64 	if (mask == NULL) {
65 		spdk_cpuset_copy(cpumask, &g_vhost_core_mask);
66 		return 0;
67 	}
68 
69 	rc = spdk_cpuset_parse(cpumask, mask);
70 	if (rc < 0) {
71 		SPDK_ERRLOG("invalid cpumask %s\n", mask);
72 		return -1;
73 	}
74 
75 	spdk_cpuset_copy(&negative_vhost_mask, &g_vhost_core_mask);
76 	spdk_cpuset_negate(&negative_vhost_mask);
77 	spdk_cpuset_and(&negative_vhost_mask, cpumask);
78 
79 	if (spdk_cpuset_count(&negative_vhost_mask) != 0) {
80 		SPDK_ERRLOG("one of selected cpu is outside of core mask(=%s)\n",
81 			    spdk_cpuset_fmt(&g_vhost_core_mask));
82 		return -1;
83 	}
84 
85 	spdk_cpuset_and(cpumask, &g_vhost_core_mask);
86 
87 	if (spdk_cpuset_count(cpumask) == 0) {
88 		SPDK_ERRLOG("no cpu is selected among core mask(=%s)\n",
89 			    spdk_cpuset_fmt(&g_vhost_core_mask));
90 		return -1;
91 	}
92 
93 	return 0;
94 }
95 
96 TAILQ_HEAD(, virtio_blk_transport_ops_list_element)
97 g_spdk_virtio_blk_transport_ops = TAILQ_HEAD_INITIALIZER(g_spdk_virtio_blk_transport_ops);
98 
99 const struct spdk_virtio_blk_transport_ops *
100 virtio_blk_get_transport_ops(const char *transport_name)
101 {
102 	struct virtio_blk_transport_ops_list_element *ops;
103 	TAILQ_FOREACH(ops, &g_spdk_virtio_blk_transport_ops, link) {
104 		if (strcasecmp(transport_name, ops->ops.name) == 0) {
105 			return &ops->ops;
106 		}
107 	}
108 	return NULL;
109 }
110 
111 int
112 vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str,
113 		   const struct spdk_json_val *params, const struct spdk_vhost_dev_backend *backend,
114 		   const struct spdk_vhost_user_dev_backend *user_backend, bool delay)
115 {
116 	struct spdk_cpuset cpumask = {};
117 	int rc;
118 
119 	assert(vdev);
120 	if (name == NULL) {
121 		SPDK_ERRLOG("Can't register controller with no name\n");
122 		return -EINVAL;
123 	}
124 
125 	if (vhost_parse_core_mask(mask_str, &cpumask) != 0) {
126 		SPDK_ERRLOG("cpumask %s is invalid (core mask is 0x%s)\n",
127 			    mask_str, spdk_cpuset_fmt(&g_vhost_core_mask));
128 		return -EINVAL;
129 	}
130 
131 	spdk_vhost_lock();
132 	if (spdk_vhost_dev_find(name)) {
133 		SPDK_ERRLOG("vhost controller %s already exists.\n", name);
134 		spdk_vhost_unlock();
135 		return -EEXIST;
136 	}
137 
138 	vdev->name = strdup(name);
139 	if (vdev->name == NULL) {
140 		spdk_vhost_unlock();
141 		return -EIO;
142 	}
143 
144 	vdev->backend = backend;
145 	if (vdev->backend->type == VHOST_BACKEND_SCSI) {
146 		rc = vhost_user_dev_create(vdev, name, &cpumask, user_backend, delay);
147 	} else {
148 		/* When VHOST_BACKEND_BLK, delay should not be true. */
149 		assert(delay == false);
150 		rc = virtio_blk_construct_ctrlr(vdev, name, &cpumask, params, user_backend);
151 	}
152 	if (rc != 0) {
153 		free(vdev->name);
154 		spdk_vhost_unlock();
155 		return rc;
156 	}
157 
158 	TAILQ_INSERT_TAIL(&g_vhost_devices, vdev, tailq);
159 	spdk_vhost_unlock();
160 
161 	SPDK_INFOLOG(vhost, "Controller %s: new controller added\n", vdev->name);
162 	return 0;
163 }
164 
165 int
166 vhost_dev_unregister(struct spdk_vhost_dev *vdev)
167 {
168 	int rc;
169 
170 	spdk_vhost_lock();
171 	if (vdev->backend->type == VHOST_BACKEND_SCSI) {
172 		rc = vhost_user_dev_unregister(vdev);
173 	} else {
174 		rc = virtio_blk_destroy_ctrlr(vdev);
175 	}
176 	if (rc != 0) {
177 		spdk_vhost_unlock();
178 		return rc;
179 	}
180 
181 	SPDK_INFOLOG(vhost, "Controller %s: removed\n", vdev->name);
182 
183 	free(vdev->name);
184 
185 	TAILQ_REMOVE(&g_vhost_devices, vdev, tailq);
186 	if (TAILQ_EMPTY(&g_vhost_devices) && g_fini_cb != NULL) {
187 		g_fini_cb();
188 	}
189 	spdk_vhost_unlock();
190 
191 	return 0;
192 }
193 
194 const char *
195 spdk_vhost_dev_get_name(struct spdk_vhost_dev *vdev)
196 {
197 	assert(vdev != NULL);
198 	return vdev->name;
199 }
200 
201 const struct spdk_cpuset *
202 spdk_vhost_dev_get_cpumask(struct spdk_vhost_dev *vdev)
203 {
204 	assert(vdev != NULL);
205 	return spdk_thread_get_cpumask(vdev->thread);
206 }
207 
208 void
209 vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
210 {
211 	assert(vdev->backend->dump_info_json != NULL);
212 	vdev->backend->dump_info_json(vdev, w);
213 }
214 
215 int
216 spdk_vhost_dev_remove(struct spdk_vhost_dev *vdev)
217 {
218 	return vdev->backend->remove_device(vdev);
219 }
220 
221 int
222 spdk_vhost_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
223 			  uint32_t iops_threshold)
224 {
225 	assert(vdev->backend->set_coalescing != NULL);
226 	return vdev->backend->set_coalescing(vdev, delay_base_us, iops_threshold);
227 }
228 
229 void
230 spdk_vhost_get_coalescing(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
231 			  uint32_t *iops_threshold)
232 {
233 	assert(vdev->backend->get_coalescing != NULL);
234 	vdev->backend->get_coalescing(vdev, delay_base_us, iops_threshold);
235 }
236 
237 void
238 spdk_vhost_lock(void)
239 {
240 	pthread_mutex_lock(&g_vhost_mutex);
241 }
242 
243 int
244 spdk_vhost_trylock(void)
245 {
246 	return -pthread_mutex_trylock(&g_vhost_mutex);
247 }
248 
249 void
250 spdk_vhost_unlock(void)
251 {
252 	pthread_mutex_unlock(&g_vhost_mutex);
253 }
254 
255 void
256 spdk_vhost_scsi_init(spdk_vhost_init_cb init_cb)
257 {
258 	uint32_t i;
259 	int ret = 0;
260 
261 	ret = vhost_user_init();
262 	if (ret != 0) {
263 		init_cb(ret);
264 		return;
265 	}
266 
267 	spdk_cpuset_zero(&g_vhost_core_mask);
268 	SPDK_ENV_FOREACH_CORE(i) {
269 		spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
270 	}
271 	init_cb(ret);
272 }
273 
274 static void
275 vhost_fini(void)
276 {
277 	struct spdk_vhost_dev *vdev, *tmp;
278 
279 	if (spdk_vhost_dev_next(NULL) == NULL) {
280 		g_fini_cb();
281 		return;
282 	}
283 
284 	vdev = spdk_vhost_dev_next(NULL);
285 	while (vdev != NULL) {
286 		tmp = spdk_vhost_dev_next(vdev);
287 		spdk_vhost_dev_remove(vdev);
288 		/* don't care if it fails, there's nothing we can do for now */
289 		vdev = tmp;
290 	}
291 
292 	/* g_fini_cb will get called when last device is unregistered. */
293 }
294 
295 void
296 spdk_vhost_blk_init(spdk_vhost_init_cb init_cb)
297 {
298 	uint32_t i;
299 	int ret = 0;
300 
301 	ret = virtio_blk_transport_create("vhost_user_blk", NULL);
302 	if (ret != 0) {
303 		goto out;
304 	}
305 
306 	spdk_cpuset_zero(&g_vhost_core_mask);
307 	SPDK_ENV_FOREACH_CORE(i) {
308 		spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
309 	}
310 out:
311 	init_cb(ret);
312 }
313 
314 void
315 spdk_vhost_scsi_fini(spdk_vhost_fini_cb fini_cb)
316 {
317 	g_fini_cb = fini_cb;
318 
319 	vhost_user_fini(vhost_fini);
320 }
321 
322 static void
323 virtio_blk_transports_destroy(void)
324 {
325 	struct spdk_virtio_blk_transport *transport = TAILQ_FIRST(&g_virtio_blk_transports);
326 
327 	if (transport == NULL) {
328 		g_fini_cb();
329 		return;
330 	}
331 	TAILQ_REMOVE(&g_virtio_blk_transports, transport, tailq);
332 	virtio_blk_transport_destroy(transport, virtio_blk_transports_destroy);
333 }
334 
335 void
336 spdk_vhost_blk_fini(spdk_vhost_fini_cb fini_cb)
337 {
338 	g_fini_cb = fini_cb;
339 
340 	virtio_blk_transports_destroy();
341 }
342 
343 static void
344 vhost_user_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
345 {
346 	uint32_t delay_base_us;
347 	uint32_t iops_threshold;
348 
349 	vdev->backend->write_config_json(vdev, w);
350 
351 	spdk_vhost_get_coalescing(vdev, &delay_base_us, &iops_threshold);
352 	if (delay_base_us) {
353 		spdk_json_write_object_begin(w);
354 		spdk_json_write_named_string(w, "method", "vhost_controller_set_coalescing");
355 
356 		spdk_json_write_named_object_begin(w, "params");
357 		spdk_json_write_named_string(w, "ctrlr", vdev->name);
358 		spdk_json_write_named_uint32(w, "delay_base_us", delay_base_us);
359 		spdk_json_write_named_uint32(w, "iops_threshold", iops_threshold);
360 		spdk_json_write_object_end(w);
361 
362 		spdk_json_write_object_end(w);
363 	}
364 }
365 
366 void
367 spdk_vhost_scsi_config_json(struct spdk_json_write_ctx *w)
368 {
369 	struct spdk_vhost_dev *vdev;
370 
371 	spdk_json_write_array_begin(w);
372 
373 	spdk_vhost_lock();
374 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
375 	     vdev = spdk_vhost_dev_next(vdev)) {
376 		if (vdev->backend->type == VHOST_BACKEND_SCSI) {
377 			vhost_user_config_json(vdev, w);
378 		}
379 	}
380 	spdk_vhost_unlock();
381 
382 	spdk_json_write_array_end(w);
383 }
384 
385 static void
386 vhost_blk_dump_config_json(struct spdk_json_write_ctx *w)
387 {
388 	struct spdk_virtio_blk_transport *transport;
389 
390 	/* Write vhost transports */
391 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
392 		/* Since vhost_user_blk is always added on SPDK startup,
393 		 * do not emit virtio_blk_create_transport RPC. */
394 		if (strcasecmp(transport->ops->name, "vhost_user_blk") != 0) {
395 			spdk_json_write_object_begin(w);
396 			spdk_json_write_named_string(w, "method", "virtio_blk_create_transport");
397 			spdk_json_write_named_object_begin(w, "params");
398 			transport->ops->dump_opts(transport, w);
399 			spdk_json_write_object_end(w);
400 			spdk_json_write_object_end(w);
401 		}
402 	}
403 }
404 
405 void
406 spdk_vhost_blk_config_json(struct spdk_json_write_ctx *w)
407 {
408 	struct spdk_vhost_dev *vdev;
409 
410 	spdk_json_write_array_begin(w);
411 
412 	spdk_vhost_lock();
413 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
414 	     vdev = spdk_vhost_dev_next(vdev)) {
415 		if (vdev->backend->type == VHOST_BACKEND_BLK) {
416 			vhost_user_config_json(vdev, w);
417 		}
418 	}
419 	spdk_vhost_unlock();
420 
421 	vhost_blk_dump_config_json(w);
422 
423 	spdk_json_write_array_end(w);
424 }
425 
426 void
427 virtio_blk_transport_register(const struct spdk_virtio_blk_transport_ops *ops)
428 {
429 	struct virtio_blk_transport_ops_list_element *new_ops;
430 
431 	if (virtio_blk_get_transport_ops(ops->name) != NULL) {
432 		SPDK_ERRLOG("Double registering virtio blk transport type %s.\n", ops->name);
433 		assert(false);
434 		return;
435 	}
436 
437 	new_ops = calloc(1, sizeof(*new_ops));
438 	if (new_ops == NULL) {
439 		SPDK_ERRLOG("Unable to allocate memory to register new transport type %s.\n", ops->name);
440 		assert(false);
441 		return;
442 	}
443 
444 	new_ops->ops = *ops;
445 
446 	TAILQ_INSERT_TAIL(&g_spdk_virtio_blk_transport_ops, new_ops, link);
447 }
448 
449 int
450 virtio_blk_transport_create(const char *transport_name,
451 			    const struct spdk_json_val *params)
452 {
453 	const struct spdk_virtio_blk_transport_ops *ops = NULL;
454 	struct spdk_virtio_blk_transport *transport;
455 
456 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
457 		if (strcasecmp(transport->ops->name, transport_name) == 0) {
458 			return -EEXIST;
459 		}
460 	}
461 
462 	ops = virtio_blk_get_transport_ops(transport_name);
463 	if (!ops) {
464 		SPDK_ERRLOG("Transport type '%s' unavailable.\n", transport_name);
465 		return -ENOENT;
466 	}
467 
468 	transport = ops->create(params);
469 	if (!transport) {
470 		SPDK_ERRLOG("Unable to create new transport of type %s\n", transport_name);
471 		return -EPERM;
472 	}
473 
474 	transport->ops = ops;
475 	TAILQ_INSERT_TAIL(&g_virtio_blk_transports, transport, tailq);
476 	return 0;
477 }
478 
479 struct spdk_virtio_blk_transport *
480 virtio_blk_transport_get_first(void)
481 {
482 	return TAILQ_FIRST(&g_virtio_blk_transports);
483 }
484 
485 struct spdk_virtio_blk_transport *
486 virtio_blk_transport_get_next(struct spdk_virtio_blk_transport *transport)
487 {
488 	return TAILQ_NEXT(transport, tailq);
489 }
490 
491 void
492 virtio_blk_transport_dump_opts(struct spdk_virtio_blk_transport *transport,
493 			       struct spdk_json_write_ctx *w)
494 {
495 	spdk_json_write_object_begin(w);
496 
497 	spdk_json_write_named_string(w, "name", transport->ops->name);
498 
499 	if (transport->ops->dump_opts) {
500 		transport->ops->dump_opts(transport, w);
501 	}
502 
503 	spdk_json_write_object_end(w);
504 }
505 
506 struct spdk_virtio_blk_transport *
507 virtio_blk_tgt_get_transport(const char *transport_name)
508 {
509 	struct spdk_virtio_blk_transport *transport;
510 
511 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
512 		if (strcasecmp(transport->ops->name, transport_name) == 0) {
513 			return transport;
514 		}
515 	}
516 	return NULL;
517 }
518 
519 int
520 virtio_blk_transport_destroy(struct spdk_virtio_blk_transport *transport,
521 			     spdk_vhost_fini_cb cb_fn)
522 {
523 	return transport->ops->destroy(transport, cb_fn);
524 }
525 
526 SPDK_LOG_REGISTER_COMPONENT(vhost)
527 SPDK_LOG_REGISTER_COMPONENT(vhost_ring)
528