xref: /spdk/lib/vhost/vhost.c (revision 12fbe739a31b09aff0d05f354d4f3bbef99afc55)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation. All rights reserved.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk/env.h"
9 #include "spdk/likely.h"
10 #include "spdk/string.h"
11 #include "spdk/util.h"
12 #include "spdk/memory.h"
13 #include "spdk/barrier.h"
14 #include "spdk/vhost.h"
15 #include "vhost_internal.h"
16 #include "spdk/queue.h"
17 
18 
19 static struct spdk_cpuset g_vhost_core_mask;
20 
21 static TAILQ_HEAD(, spdk_vhost_dev) g_vhost_devices = TAILQ_HEAD_INITIALIZER(
22 			g_vhost_devices);
23 static pthread_mutex_t g_vhost_mutex = PTHREAD_MUTEX_INITIALIZER;
24 
25 static TAILQ_HEAD(, spdk_virtio_blk_transport) g_virtio_blk_transports = TAILQ_HEAD_INITIALIZER(
26 			g_virtio_blk_transports);
27 
28 static spdk_vhost_fini_cb g_fini_cb;
29 
30 struct spdk_vhost_dev *
31 spdk_vhost_dev_next(struct spdk_vhost_dev *vdev)
32 {
33 	if (vdev == NULL) {
34 		return TAILQ_FIRST(&g_vhost_devices);
35 	}
36 
37 	return TAILQ_NEXT(vdev, tailq);
38 }
39 
40 struct spdk_vhost_dev *
41 spdk_vhost_dev_find(const char *ctrlr_name)
42 {
43 	struct spdk_vhost_dev *vdev;
44 
45 	TAILQ_FOREACH(vdev, &g_vhost_devices, tailq) {
46 		if (strcmp(vdev->name, ctrlr_name) == 0) {
47 			return vdev;
48 		}
49 	}
50 
51 	return NULL;
52 }
53 
54 static int
55 vhost_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask)
56 {
57 	int rc;
58 	struct spdk_cpuset negative_vhost_mask;
59 
60 	if (cpumask == NULL) {
61 		return -1;
62 	}
63 
64 	if (mask == NULL) {
65 		spdk_cpuset_copy(cpumask, &g_vhost_core_mask);
66 		return 0;
67 	}
68 
69 	rc = spdk_cpuset_parse(cpumask, mask);
70 	if (rc < 0) {
71 		SPDK_ERRLOG("invalid cpumask %s\n", mask);
72 		return -1;
73 	}
74 
75 	spdk_cpuset_copy(&negative_vhost_mask, &g_vhost_core_mask);
76 	spdk_cpuset_negate(&negative_vhost_mask);
77 	spdk_cpuset_and(&negative_vhost_mask, cpumask);
78 
79 	if (spdk_cpuset_count(&negative_vhost_mask) != 0) {
80 		SPDK_ERRLOG("one of selected cpu is outside of core mask(=%s)\n",
81 			    spdk_cpuset_fmt(&g_vhost_core_mask));
82 		return -1;
83 	}
84 
85 	spdk_cpuset_and(cpumask, &g_vhost_core_mask);
86 
87 	if (spdk_cpuset_count(cpumask) == 0) {
88 		SPDK_ERRLOG("no cpu is selected among core mask(=%s)\n",
89 			    spdk_cpuset_fmt(&g_vhost_core_mask));
90 		return -1;
91 	}
92 
93 	return 0;
94 }
95 
96 TAILQ_HEAD(, virtio_blk_transport_ops_list_element)
97 g_spdk_virtio_blk_transport_ops = TAILQ_HEAD_INITIALIZER(g_spdk_virtio_blk_transport_ops);
98 
99 const struct spdk_virtio_blk_transport_ops *
100 virtio_blk_get_transport_ops(const char *transport_name)
101 {
102 	struct virtio_blk_transport_ops_list_element *ops;
103 	TAILQ_FOREACH(ops, &g_spdk_virtio_blk_transport_ops, link) {
104 		if (strcasecmp(transport_name, ops->ops.name) == 0) {
105 			return &ops->ops;
106 		}
107 	}
108 	return NULL;
109 }
110 
111 int
112 vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str,
113 		   const struct spdk_json_val *params,
114 		   const struct spdk_vhost_dev_backend *backend,
115 		   const struct spdk_vhost_user_dev_backend *user_backend)
116 {
117 	struct spdk_cpuset cpumask = {};
118 	int rc;
119 
120 	assert(vdev);
121 	if (name == NULL) {
122 		SPDK_ERRLOG("Can't register controller with no name\n");
123 		return -EINVAL;
124 	}
125 
126 	if (vhost_parse_core_mask(mask_str, &cpumask) != 0) {
127 		SPDK_ERRLOG("cpumask %s is invalid (core mask is 0x%s)\n",
128 			    mask_str, spdk_cpuset_fmt(&g_vhost_core_mask));
129 		return -EINVAL;
130 	}
131 
132 	spdk_vhost_lock();
133 	if (spdk_vhost_dev_find(name)) {
134 		SPDK_ERRLOG("vhost controller %s already exists.\n", name);
135 		spdk_vhost_unlock();
136 		return -EEXIST;
137 	}
138 
139 	vdev->name = strdup(name);
140 	if (vdev->name == NULL) {
141 		spdk_vhost_unlock();
142 		return -EIO;
143 	}
144 
145 	vdev->backend = backend;
146 	if (vdev->backend->type == VHOST_BACKEND_SCSI) {
147 		rc = vhost_user_dev_register(vdev, name, &cpumask, user_backend);
148 	} else {
149 		rc = virtio_blk_construct_ctrlr(vdev, name, &cpumask, params, user_backend);
150 	}
151 	if (rc != 0) {
152 		free(vdev->name);
153 		spdk_vhost_unlock();
154 		return rc;
155 	}
156 
157 	TAILQ_INSERT_TAIL(&g_vhost_devices, vdev, tailq);
158 	spdk_vhost_unlock();
159 
160 	SPDK_INFOLOG(vhost, "Controller %s: new controller added\n", vdev->name);
161 	return 0;
162 }
163 
164 int
165 vhost_dev_unregister(struct spdk_vhost_dev *vdev)
166 {
167 	int rc;
168 
169 	spdk_vhost_lock();
170 	if (vdev->backend->type == VHOST_BACKEND_SCSI) {
171 		rc = vhost_user_dev_unregister(vdev);
172 	} else {
173 		rc = virtio_blk_destroy_ctrlr(vdev);
174 	}
175 	if (rc != 0) {
176 		spdk_vhost_unlock();
177 		return rc;
178 	}
179 
180 	SPDK_INFOLOG(vhost, "Controller %s: removed\n", vdev->name);
181 
182 	free(vdev->name);
183 
184 	TAILQ_REMOVE(&g_vhost_devices, vdev, tailq);
185 	if (TAILQ_EMPTY(&g_vhost_devices) && g_fini_cb != NULL) {
186 		g_fini_cb();
187 	}
188 	spdk_vhost_unlock();
189 
190 	return 0;
191 }
192 
193 const char *
194 spdk_vhost_dev_get_name(struct spdk_vhost_dev *vdev)
195 {
196 	assert(vdev != NULL);
197 	return vdev->name;
198 }
199 
200 const struct spdk_cpuset *
201 spdk_vhost_dev_get_cpumask(struct spdk_vhost_dev *vdev)
202 {
203 	assert(vdev != NULL);
204 	return spdk_thread_get_cpumask(vdev->thread);
205 }
206 
207 void
208 vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
209 {
210 	assert(vdev->backend->dump_info_json != NULL);
211 	vdev->backend->dump_info_json(vdev, w);
212 }
213 
214 int
215 spdk_vhost_dev_remove(struct spdk_vhost_dev *vdev)
216 {
217 	return vdev->backend->remove_device(vdev);
218 }
219 
220 int
221 spdk_vhost_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
222 			  uint32_t iops_threshold)
223 {
224 	assert(vdev->backend->set_coalescing != NULL);
225 	return vdev->backend->set_coalescing(vdev, delay_base_us, iops_threshold);
226 }
227 
228 void
229 spdk_vhost_get_coalescing(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
230 			  uint32_t *iops_threshold)
231 {
232 	assert(vdev->backend->get_coalescing != NULL);
233 	vdev->backend->get_coalescing(vdev, delay_base_us, iops_threshold);
234 }
235 
236 void
237 spdk_vhost_lock(void)
238 {
239 	pthread_mutex_lock(&g_vhost_mutex);
240 }
241 
242 int
243 spdk_vhost_trylock(void)
244 {
245 	return -pthread_mutex_trylock(&g_vhost_mutex);
246 }
247 
248 void
249 spdk_vhost_unlock(void)
250 {
251 	pthread_mutex_unlock(&g_vhost_mutex);
252 }
253 
254 void
255 spdk_vhost_scsi_init(spdk_vhost_init_cb init_cb)
256 {
257 	uint32_t i;
258 	int ret = 0;
259 
260 	ret = vhost_user_init();
261 	if (ret != 0) {
262 		init_cb(ret);
263 		return;
264 	}
265 
266 	spdk_cpuset_zero(&g_vhost_core_mask);
267 	SPDK_ENV_FOREACH_CORE(i) {
268 		spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
269 	}
270 	init_cb(ret);
271 }
272 
273 static void
274 vhost_fini(void)
275 {
276 	struct spdk_vhost_dev *vdev, *tmp;
277 
278 	if (spdk_vhost_dev_next(NULL) == NULL) {
279 		g_fini_cb();
280 		return;
281 	}
282 
283 	vdev = spdk_vhost_dev_next(NULL);
284 	while (vdev != NULL) {
285 		tmp = spdk_vhost_dev_next(vdev);
286 		spdk_vhost_dev_remove(vdev);
287 		/* don't care if it fails, there's nothing we can do for now */
288 		vdev = tmp;
289 	}
290 
291 	/* g_fini_cb will get called when last device is unregistered. */
292 }
293 
294 void
295 spdk_vhost_blk_init(spdk_vhost_init_cb init_cb)
296 {
297 	uint32_t i;
298 	int ret = 0;
299 
300 	ret = virtio_blk_transport_create("vhost_user_blk", NULL);
301 	if (ret != 0) {
302 		goto out;
303 	}
304 
305 	spdk_cpuset_zero(&g_vhost_core_mask);
306 	SPDK_ENV_FOREACH_CORE(i) {
307 		spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
308 	}
309 out:
310 	init_cb(ret);
311 }
312 
313 void
314 spdk_vhost_scsi_fini(spdk_vhost_fini_cb fini_cb)
315 {
316 	g_fini_cb = fini_cb;
317 
318 	vhost_user_fini(vhost_fini);
319 }
320 
321 static void
322 virtio_blk_transports_destroy(void)
323 {
324 	struct spdk_virtio_blk_transport *transport = TAILQ_FIRST(&g_virtio_blk_transports);
325 
326 	if (transport == NULL) {
327 		g_fini_cb();
328 		return;
329 	}
330 	TAILQ_REMOVE(&g_virtio_blk_transports, transport, tailq);
331 	virtio_blk_transport_destroy(transport, virtio_blk_transports_destroy);
332 }
333 
334 void
335 spdk_vhost_blk_fini(spdk_vhost_fini_cb fini_cb)
336 {
337 	g_fini_cb = fini_cb;
338 
339 	virtio_blk_transports_destroy();
340 }
341 
342 static void
343 vhost_user_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
344 {
345 	uint32_t delay_base_us;
346 	uint32_t iops_threshold;
347 
348 	vdev->backend->write_config_json(vdev, w);
349 
350 	spdk_vhost_get_coalescing(vdev, &delay_base_us, &iops_threshold);
351 	if (delay_base_us) {
352 		spdk_json_write_object_begin(w);
353 		spdk_json_write_named_string(w, "method", "vhost_controller_set_coalescing");
354 
355 		spdk_json_write_named_object_begin(w, "params");
356 		spdk_json_write_named_string(w, "ctrlr", vdev->name);
357 		spdk_json_write_named_uint32(w, "delay_base_us", delay_base_us);
358 		spdk_json_write_named_uint32(w, "iops_threshold", iops_threshold);
359 		spdk_json_write_object_end(w);
360 
361 		spdk_json_write_object_end(w);
362 	}
363 }
364 
365 void
366 spdk_vhost_scsi_config_json(struct spdk_json_write_ctx *w)
367 {
368 	struct spdk_vhost_dev *vdev;
369 
370 	spdk_json_write_array_begin(w);
371 
372 	spdk_vhost_lock();
373 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
374 	     vdev = spdk_vhost_dev_next(vdev)) {
375 		if (vdev->backend->type == VHOST_BACKEND_SCSI) {
376 			vhost_user_config_json(vdev, w);
377 		}
378 	}
379 	spdk_vhost_unlock();
380 
381 	spdk_json_write_array_end(w);
382 }
383 
384 static void
385 vhost_blk_dump_config_json(struct spdk_json_write_ctx *w)
386 {
387 	struct spdk_virtio_blk_transport *transport;
388 
389 	/* Write vhost transports */
390 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
391 		/* Since vhost_user_blk is always added on SPDK startup,
392 		 * do not emit virtio_blk_create_transport RPC. */
393 		if (strcasecmp(transport->ops->name, "vhost_user_blk") != 0) {
394 			spdk_json_write_object_begin(w);
395 			spdk_json_write_named_string(w, "method", "virtio_blk_create_transport");
396 			spdk_json_write_named_object_begin(w, "params");
397 			transport->ops->dump_opts(transport, w);
398 			spdk_json_write_object_end(w);
399 			spdk_json_write_object_end(w);
400 		}
401 	}
402 }
403 
404 void
405 spdk_vhost_blk_config_json(struct spdk_json_write_ctx *w)
406 {
407 	struct spdk_vhost_dev *vdev;
408 
409 	spdk_json_write_array_begin(w);
410 
411 	spdk_vhost_lock();
412 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
413 	     vdev = spdk_vhost_dev_next(vdev)) {
414 		if (vdev->backend->type == VHOST_BACKEND_BLK) {
415 			vhost_user_config_json(vdev, w);
416 		}
417 	}
418 	spdk_vhost_unlock();
419 
420 	vhost_blk_dump_config_json(w);
421 
422 	spdk_json_write_array_end(w);
423 }
424 
425 void
426 virtio_blk_transport_register(const struct spdk_virtio_blk_transport_ops *ops)
427 {
428 	struct virtio_blk_transport_ops_list_element *new_ops;
429 
430 	if (virtio_blk_get_transport_ops(ops->name) != NULL) {
431 		SPDK_ERRLOG("Double registering virtio blk transport type %s.\n", ops->name);
432 		assert(false);
433 		return;
434 	}
435 
436 	new_ops = calloc(1, sizeof(*new_ops));
437 	if (new_ops == NULL) {
438 		SPDK_ERRLOG("Unable to allocate memory to register new transport type %s.\n", ops->name);
439 		assert(false);
440 		return;
441 	}
442 
443 	new_ops->ops = *ops;
444 
445 	TAILQ_INSERT_TAIL(&g_spdk_virtio_blk_transport_ops, new_ops, link);
446 }
447 
448 int
449 virtio_blk_transport_create(const char *transport_name,
450 			    const struct spdk_json_val *params)
451 {
452 	const struct spdk_virtio_blk_transport_ops *ops = NULL;
453 	struct spdk_virtio_blk_transport *transport;
454 
455 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
456 		if (strcasecmp(transport->ops->name, transport_name) == 0) {
457 			return -EEXIST;
458 		}
459 	}
460 
461 	ops = virtio_blk_get_transport_ops(transport_name);
462 	if (!ops) {
463 		SPDK_ERRLOG("Transport type '%s' unavailable.\n", transport_name);
464 		return -ENOENT;
465 	}
466 
467 	transport = ops->create(params);
468 	if (!transport) {
469 		SPDK_ERRLOG("Unable to create new transport of type %s\n", transport_name);
470 		return -EPERM;
471 	}
472 
473 	transport->ops = ops;
474 	TAILQ_INSERT_TAIL(&g_virtio_blk_transports, transport, tailq);
475 	return 0;
476 }
477 
478 struct spdk_virtio_blk_transport *
479 virtio_blk_transport_get_first(void)
480 {
481 	return TAILQ_FIRST(&g_virtio_blk_transports);
482 }
483 
484 struct spdk_virtio_blk_transport *
485 virtio_blk_transport_get_next(struct spdk_virtio_blk_transport *transport)
486 {
487 	return TAILQ_NEXT(transport, tailq);
488 }
489 
490 void
491 virtio_blk_transport_dump_opts(struct spdk_virtio_blk_transport *transport,
492 			       struct spdk_json_write_ctx *w)
493 {
494 	spdk_json_write_object_begin(w);
495 
496 	spdk_json_write_named_string(w, "name", transport->ops->name);
497 
498 	if (transport->ops->dump_opts) {
499 		transport->ops->dump_opts(transport, w);
500 	}
501 
502 	spdk_json_write_object_end(w);
503 }
504 
505 struct spdk_virtio_blk_transport *
506 virtio_blk_tgt_get_transport(const char *transport_name)
507 {
508 	struct spdk_virtio_blk_transport *transport;
509 
510 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
511 		if (strcasecmp(transport->ops->name, transport_name) == 0) {
512 			return transport;
513 		}
514 	}
515 	return NULL;
516 }
517 
518 int
519 virtio_blk_transport_destroy(struct spdk_virtio_blk_transport *transport,
520 			     spdk_vhost_fini_cb cb_fn)
521 {
522 	return transport->ops->destroy(transport, cb_fn);
523 }
524 
525 SPDK_LOG_REGISTER_COMPONENT(vhost)
526 SPDK_LOG_REGISTER_COMPONENT(vhost_ring)
527