xref: /spdk/lib/vhost/vhost.c (revision 60982c759db49b4f4579f16e3b24df0725ba4b94)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation. All rights reserved.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk/env.h"
9 #include "spdk/likely.h"
10 #include "spdk/string.h"
11 #include "spdk/util.h"
12 #include "spdk/memory.h"
13 #include "spdk/barrier.h"
14 #include "spdk/vhost.h"
15 #include "vhost_internal.h"
16 #include "spdk/queue.h"
17 
18 
19 static struct spdk_cpuset g_vhost_core_mask;
20 
21 static TAILQ_HEAD(, spdk_vhost_dev) g_vhost_devices = TAILQ_HEAD_INITIALIZER(
22 			g_vhost_devices);
23 static pthread_mutex_t g_vhost_mutex = PTHREAD_MUTEX_INITIALIZER;
24 
25 static TAILQ_HEAD(, spdk_virtio_blk_transport) g_virtio_blk_transports = TAILQ_HEAD_INITIALIZER(
26 			g_virtio_blk_transports);
27 
28 static spdk_vhost_fini_cb g_fini_cb;
29 
30 struct spdk_vhost_dev *
31 spdk_vhost_dev_next(struct spdk_vhost_dev *vdev)
32 {
33 	if (vdev == NULL) {
34 		return TAILQ_FIRST(&g_vhost_devices);
35 	}
36 
37 	return TAILQ_NEXT(vdev, tailq);
38 }
39 
40 struct spdk_vhost_dev *
41 spdk_vhost_dev_find(const char *ctrlr_name)
42 {
43 	struct spdk_vhost_dev *vdev;
44 
45 	TAILQ_FOREACH(vdev, &g_vhost_devices, tailq) {
46 		if (strcmp(vdev->name, ctrlr_name) == 0) {
47 			return vdev;
48 		}
49 	}
50 
51 	return NULL;
52 }
53 
54 static int
55 vhost_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask)
56 {
57 	int rc;
58 	struct spdk_cpuset negative_vhost_mask;
59 
60 	if (cpumask == NULL) {
61 		return -1;
62 	}
63 
64 	if (mask == NULL) {
65 		spdk_cpuset_copy(cpumask, &g_vhost_core_mask);
66 		return 0;
67 	}
68 
69 	rc = spdk_cpuset_parse(cpumask, mask);
70 	if (rc < 0) {
71 		SPDK_ERRLOG("invalid cpumask %s\n", mask);
72 		return -1;
73 	}
74 
75 	spdk_cpuset_copy(&negative_vhost_mask, &g_vhost_core_mask);
76 	spdk_cpuset_negate(&negative_vhost_mask);
77 	spdk_cpuset_and(&negative_vhost_mask, cpumask);
78 
79 	if (spdk_cpuset_count(&negative_vhost_mask) != 0) {
80 		SPDK_ERRLOG("one of selected cpu is outside of core mask(=%s)\n",
81 			    spdk_cpuset_fmt(&g_vhost_core_mask));
82 		return -1;
83 	}
84 
85 	spdk_cpuset_and(cpumask, &g_vhost_core_mask);
86 
87 	if (spdk_cpuset_count(cpumask) == 0) {
88 		SPDK_ERRLOG("no cpu is selected among core mask(=%s)\n",
89 			    spdk_cpuset_fmt(&g_vhost_core_mask));
90 		return -1;
91 	}
92 
93 	return 0;
94 }
95 
96 TAILQ_HEAD(, virtio_blk_transport_ops_list_element)
97 g_spdk_virtio_blk_transport_ops = TAILQ_HEAD_INITIALIZER(g_spdk_virtio_blk_transport_ops);
98 
99 const struct spdk_virtio_blk_transport_ops *
100 virtio_blk_get_transport_ops(const char *transport_name)
101 {
102 	struct virtio_blk_transport_ops_list_element *ops;
103 	TAILQ_FOREACH(ops, &g_spdk_virtio_blk_transport_ops, link) {
104 		if (strcasecmp(transport_name, ops->ops.name) == 0) {
105 			return &ops->ops;
106 		}
107 	}
108 	return NULL;
109 }
110 
111 int
112 vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str,
113 		   const struct spdk_json_val *params,
114 		   const struct spdk_vhost_dev_backend *backend,
115 		   const struct spdk_vhost_user_dev_backend *user_backend)
116 {
117 	struct spdk_cpuset cpumask = {};
118 	int rc;
119 
120 	assert(vdev);
121 	if (name == NULL) {
122 		SPDK_ERRLOG("Can't register controller with no name\n");
123 		return -EINVAL;
124 	}
125 
126 	if (vhost_parse_core_mask(mask_str, &cpumask) != 0) {
127 		SPDK_ERRLOG("cpumask %s is invalid (core mask is 0x%s)\n",
128 			    mask_str, spdk_cpuset_fmt(&g_vhost_core_mask));
129 		return -EINVAL;
130 	}
131 
132 	spdk_vhost_lock();
133 	if (spdk_vhost_dev_find(name)) {
134 		SPDK_ERRLOG("vhost controller %s already exists.\n", name);
135 		spdk_vhost_unlock();
136 		return -EEXIST;
137 	}
138 
139 	vdev->name = strdup(name);
140 	if (vdev->name == NULL) {
141 		spdk_vhost_unlock();
142 		return -EIO;
143 	}
144 
145 	vdev->backend = backend;
146 	if (vdev->backend->type == VHOST_BACKEND_SCSI) {
147 		rc = vhost_user_dev_register(vdev, name, &cpumask, user_backend);
148 	} else {
149 		rc = virtio_blk_construct_ctrlr(vdev, name, &cpumask, params, user_backend);
150 	}
151 	if (rc != 0) {
152 		free(vdev->name);
153 		spdk_vhost_unlock();
154 		return rc;
155 	}
156 
157 	TAILQ_INSERT_TAIL(&g_vhost_devices, vdev, tailq);
158 	spdk_vhost_unlock();
159 
160 	SPDK_INFOLOG(vhost, "Controller %s: new controller added\n", vdev->name);
161 	return 0;
162 }
163 
164 int
165 vhost_dev_unregister(struct spdk_vhost_dev *vdev)
166 {
167 	int rc;
168 
169 	if (vdev->backend->type == VHOST_BACKEND_SCSI) {
170 		rc = vhost_user_dev_unregister(vdev);
171 	} else {
172 		rc = virtio_blk_destroy_ctrlr(vdev);
173 	}
174 	if (rc != 0) {
175 		return rc;
176 	}
177 
178 	SPDK_INFOLOG(vhost, "Controller %s: removed\n", vdev->name);
179 
180 	free(vdev->name);
181 
182 	spdk_vhost_lock();
183 	TAILQ_REMOVE(&g_vhost_devices, vdev, tailq);
184 	if (TAILQ_EMPTY(&g_vhost_devices) && g_fini_cb != NULL) {
185 		g_fini_cb();
186 	}
187 	spdk_vhost_unlock();
188 
189 	return 0;
190 }
191 
192 const char *
193 spdk_vhost_dev_get_name(struct spdk_vhost_dev *vdev)
194 {
195 	assert(vdev != NULL);
196 	return vdev->name;
197 }
198 
199 const struct spdk_cpuset *
200 spdk_vhost_dev_get_cpumask(struct spdk_vhost_dev *vdev)
201 {
202 	assert(vdev != NULL);
203 	return spdk_thread_get_cpumask(vdev->thread);
204 }
205 
206 void
207 vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
208 {
209 	assert(vdev->backend->dump_info_json != NULL);
210 	vdev->backend->dump_info_json(vdev, w);
211 }
212 
213 int
214 spdk_vhost_dev_remove(struct spdk_vhost_dev *vdev)
215 {
216 	return vdev->backend->remove_device(vdev);
217 }
218 
219 int
220 spdk_vhost_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
221 			  uint32_t iops_threshold)
222 {
223 	assert(vdev->backend->set_coalescing != NULL);
224 	return vdev->backend->set_coalescing(vdev, delay_base_us, iops_threshold);
225 }
226 
227 void
228 spdk_vhost_get_coalescing(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
229 			  uint32_t *iops_threshold)
230 {
231 	assert(vdev->backend->get_coalescing != NULL);
232 	vdev->backend->get_coalescing(vdev, delay_base_us, iops_threshold);
233 }
234 
235 void
236 spdk_vhost_lock(void)
237 {
238 	pthread_mutex_lock(&g_vhost_mutex);
239 }
240 
241 int
242 spdk_vhost_trylock(void)
243 {
244 	return -pthread_mutex_trylock(&g_vhost_mutex);
245 }
246 
247 void
248 spdk_vhost_unlock(void)
249 {
250 	pthread_mutex_unlock(&g_vhost_mutex);
251 }
252 
253 void
254 spdk_vhost_scsi_init(spdk_vhost_init_cb init_cb)
255 {
256 	uint32_t i;
257 	int ret = 0;
258 
259 	ret = vhost_user_init();
260 	if (ret != 0) {
261 		init_cb(ret);
262 		return;
263 	}
264 
265 	spdk_cpuset_zero(&g_vhost_core_mask);
266 	SPDK_ENV_FOREACH_CORE(i) {
267 		spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
268 	}
269 	init_cb(ret);
270 }
271 
272 static void
273 vhost_fini(void)
274 {
275 	struct spdk_vhost_dev *vdev, *tmp;
276 
277 	if (spdk_vhost_dev_next(NULL) == NULL) {
278 		g_fini_cb();
279 		return;
280 	}
281 
282 	vdev = spdk_vhost_dev_next(NULL);
283 	while (vdev != NULL) {
284 		tmp = spdk_vhost_dev_next(vdev);
285 		spdk_vhost_dev_remove(vdev);
286 		/* don't care if it fails, there's nothing we can do for now */
287 		vdev = tmp;
288 	}
289 
290 	/* g_fini_cb will get called when last device is unregistered. */
291 }
292 
293 void
294 spdk_vhost_blk_init(spdk_vhost_init_cb init_cb)
295 {
296 	uint32_t i;
297 	int ret = 0;
298 
299 	ret = virtio_blk_transport_create("vhost_user_blk", NULL);
300 	if (ret != 0) {
301 		goto out;
302 	}
303 
304 	spdk_cpuset_zero(&g_vhost_core_mask);
305 	SPDK_ENV_FOREACH_CORE(i) {
306 		spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
307 	}
308 out:
309 	init_cb(ret);
310 }
311 
312 void
313 spdk_vhost_scsi_fini(spdk_vhost_fini_cb fini_cb)
314 {
315 	g_fini_cb = fini_cb;
316 
317 	vhost_user_fini(vhost_fini);
318 }
319 
320 static void
321 virtio_blk_transports_destroy(void)
322 {
323 	struct spdk_virtio_blk_transport *transport = TAILQ_FIRST(&g_virtio_blk_transports);
324 
325 	if (transport == NULL) {
326 		g_fini_cb();
327 		return;
328 	}
329 	TAILQ_REMOVE(&g_virtio_blk_transports, transport, tailq);
330 	virtio_blk_transport_destroy(transport, virtio_blk_transports_destroy);
331 }
332 
333 void
334 spdk_vhost_blk_fini(spdk_vhost_fini_cb fini_cb)
335 {
336 	g_fini_cb = fini_cb;
337 
338 	virtio_blk_transports_destroy();
339 }
340 
341 static void
342 vhost_user_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
343 {
344 	uint32_t delay_base_us;
345 	uint32_t iops_threshold;
346 
347 	vdev->backend->write_config_json(vdev, w);
348 
349 	spdk_vhost_get_coalescing(vdev, &delay_base_us, &iops_threshold);
350 	if (delay_base_us) {
351 		spdk_json_write_object_begin(w);
352 		spdk_json_write_named_string(w, "method", "vhost_controller_set_coalescing");
353 
354 		spdk_json_write_named_object_begin(w, "params");
355 		spdk_json_write_named_string(w, "ctrlr", vdev->name);
356 		spdk_json_write_named_uint32(w, "delay_base_us", delay_base_us);
357 		spdk_json_write_named_uint32(w, "iops_threshold", iops_threshold);
358 		spdk_json_write_object_end(w);
359 
360 		spdk_json_write_object_end(w);
361 	}
362 }
363 
364 void
365 spdk_vhost_scsi_config_json(struct spdk_json_write_ctx *w)
366 {
367 	struct spdk_vhost_dev *vdev;
368 
369 	spdk_json_write_array_begin(w);
370 
371 	spdk_vhost_lock();
372 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
373 	     vdev = spdk_vhost_dev_next(vdev)) {
374 		if (vdev->backend->type == VHOST_BACKEND_SCSI) {
375 			vhost_user_config_json(vdev, w);
376 		}
377 	}
378 	spdk_vhost_unlock();
379 
380 	spdk_json_write_array_end(w);
381 }
382 
383 static void
384 vhost_blk_dump_config_json(struct spdk_json_write_ctx *w)
385 {
386 	struct spdk_virtio_blk_transport *transport;
387 
388 	/* Write vhost transports */
389 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
390 		/* Since vhost_user_blk is always added on SPDK startup,
391 		 * do not emit virtio_blk_create_transport RPC. */
392 		if (strcasecmp(transport->ops->name, "vhost_user_blk") != 0) {
393 			spdk_json_write_object_begin(w);
394 			spdk_json_write_named_string(w, "method", "virtio_blk_create_transport");
395 			spdk_json_write_named_object_begin(w, "params");
396 			transport->ops->dump_opts(transport, w);
397 			spdk_json_write_object_end(w);
398 			spdk_json_write_object_end(w);
399 		}
400 	}
401 }
402 
403 void
404 spdk_vhost_blk_config_json(struct spdk_json_write_ctx *w)
405 {
406 	struct spdk_vhost_dev *vdev;
407 
408 	spdk_json_write_array_begin(w);
409 
410 	spdk_vhost_lock();
411 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
412 	     vdev = spdk_vhost_dev_next(vdev)) {
413 		if (vdev->backend->type == VHOST_BACKEND_BLK) {
414 			vhost_user_config_json(vdev, w);
415 		}
416 	}
417 	spdk_vhost_unlock();
418 
419 	vhost_blk_dump_config_json(w);
420 
421 	spdk_json_write_array_end(w);
422 }
423 
424 void
425 virtio_blk_transport_register(const struct spdk_virtio_blk_transport_ops *ops)
426 {
427 	struct virtio_blk_transport_ops_list_element *new_ops;
428 
429 	if (virtio_blk_get_transport_ops(ops->name) != NULL) {
430 		SPDK_ERRLOG("Double registering virtio blk transport type %s.\n", ops->name);
431 		assert(false);
432 		return;
433 	}
434 
435 	new_ops = calloc(1, sizeof(*new_ops));
436 	if (new_ops == NULL) {
437 		SPDK_ERRLOG("Unable to allocate memory to register new transport type %s.\n", ops->name);
438 		assert(false);
439 		return;
440 	}
441 
442 	new_ops->ops = *ops;
443 
444 	TAILQ_INSERT_TAIL(&g_spdk_virtio_blk_transport_ops, new_ops, link);
445 }
446 
447 int
448 virtio_blk_transport_create(const char *transport_name,
449 			    const struct spdk_json_val *params)
450 {
451 	const struct spdk_virtio_blk_transport_ops *ops = NULL;
452 	struct spdk_virtio_blk_transport *transport;
453 
454 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
455 		if (strcasecmp(transport->ops->name, transport_name) == 0) {
456 			return -EEXIST;
457 		}
458 	}
459 
460 	ops = virtio_blk_get_transport_ops(transport_name);
461 	if (!ops) {
462 		SPDK_ERRLOG("Transport type '%s' unavailable.\n", transport_name);
463 		return -ENOENT;
464 	}
465 
466 	transport = ops->create(params);
467 	if (!transport) {
468 		SPDK_ERRLOG("Unable to create new transport of type %s\n", transport_name);
469 		return -EPERM;
470 	}
471 
472 	transport->ops = ops;
473 	TAILQ_INSERT_TAIL(&g_virtio_blk_transports, transport, tailq);
474 	return 0;
475 }
476 
477 struct spdk_virtio_blk_transport *
478 virtio_blk_transport_get_first(void)
479 {
480 	return TAILQ_FIRST(&g_virtio_blk_transports);
481 }
482 
483 struct spdk_virtio_blk_transport *
484 virtio_blk_transport_get_next(struct spdk_virtio_blk_transport *transport)
485 {
486 	return TAILQ_NEXT(transport, tailq);
487 }
488 
489 void
490 virtio_blk_transport_dump_opts(struct spdk_virtio_blk_transport *transport,
491 			       struct spdk_json_write_ctx *w)
492 {
493 	spdk_json_write_object_begin(w);
494 
495 	spdk_json_write_named_string(w, "name", transport->ops->name);
496 
497 	if (transport->ops->dump_opts) {
498 		transport->ops->dump_opts(transport, w);
499 	}
500 
501 	spdk_json_write_object_end(w);
502 }
503 
504 struct spdk_virtio_blk_transport *
505 virtio_blk_tgt_get_transport(const char *transport_name)
506 {
507 	struct spdk_virtio_blk_transport *transport;
508 
509 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
510 		if (strcasecmp(transport->ops->name, transport_name) == 0) {
511 			return transport;
512 		}
513 	}
514 	return NULL;
515 }
516 
517 int
518 virtio_blk_transport_destroy(struct spdk_virtio_blk_transport *transport,
519 			     spdk_vhost_fini_cb cb_fn)
520 {
521 	return transport->ops->destroy(transport, cb_fn);
522 }
523 
524 SPDK_LOG_REGISTER_COMPONENT(vhost)
525 SPDK_LOG_REGISTER_COMPONENT(vhost_ring)
526