xref: /spdk/lib/vhost/vhost.c (revision a8d21b9b550dde7d3e7ffc0cd1171528a136165f)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation. All rights reserved.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk/env.h"
9 #include "spdk/likely.h"
10 #include "spdk/string.h"
11 #include "spdk/util.h"
12 #include "spdk/memory.h"
13 #include "spdk/barrier.h"
14 #include "spdk/vhost.h"
15 #include "vhost_internal.h"
16 
17 static struct spdk_cpuset g_vhost_core_mask;
18 
19 static TAILQ_HEAD(, spdk_vhost_dev) g_vhost_devices = TAILQ_HEAD_INITIALIZER(
20 			g_vhost_devices);
21 static pthread_mutex_t g_vhost_mutex = PTHREAD_MUTEX_INITIALIZER;
22 
23 static TAILQ_HEAD(, spdk_virtio_blk_transport) g_virtio_blk_transports = TAILQ_HEAD_INITIALIZER(
24 			g_virtio_blk_transports);
25 
26 static spdk_vhost_fini_cb g_fini_cb;
27 
28 struct spdk_vhost_dev *
29 spdk_vhost_dev_next(struct spdk_vhost_dev *vdev)
30 {
31 	if (vdev == NULL) {
32 		return TAILQ_FIRST(&g_vhost_devices);
33 	}
34 
35 	return TAILQ_NEXT(vdev, tailq);
36 }
37 
38 struct spdk_vhost_dev *
39 spdk_vhost_dev_find(const char *ctrlr_name)
40 {
41 	struct spdk_vhost_dev *vdev;
42 
43 	TAILQ_FOREACH(vdev, &g_vhost_devices, tailq) {
44 		if (strcmp(vdev->name, ctrlr_name) == 0) {
45 			return vdev;
46 		}
47 	}
48 
49 	return NULL;
50 }
51 
52 static int
53 vhost_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask)
54 {
55 	int rc;
56 	struct spdk_cpuset negative_vhost_mask;
57 
58 	if (cpumask == NULL) {
59 		return -1;
60 	}
61 
62 	if (mask == NULL) {
63 		spdk_cpuset_copy(cpumask, &g_vhost_core_mask);
64 		return 0;
65 	}
66 
67 	rc = spdk_cpuset_parse(cpumask, mask);
68 	if (rc < 0) {
69 		SPDK_ERRLOG("invalid cpumask %s\n", mask);
70 		return -1;
71 	}
72 
73 	spdk_cpuset_copy(&negative_vhost_mask, &g_vhost_core_mask);
74 	spdk_cpuset_negate(&negative_vhost_mask);
75 	spdk_cpuset_and(&negative_vhost_mask, cpumask);
76 
77 	if (spdk_cpuset_count(&negative_vhost_mask) != 0) {
78 		SPDK_ERRLOG("one of selected cpu is outside of core mask(=%s)\n",
79 			    spdk_cpuset_fmt(&g_vhost_core_mask));
80 		return -1;
81 	}
82 
83 	spdk_cpuset_and(cpumask, &g_vhost_core_mask);
84 
85 	if (spdk_cpuset_count(cpumask) == 0) {
86 		SPDK_ERRLOG("no cpu is selected among core mask(=%s)\n",
87 			    spdk_cpuset_fmt(&g_vhost_core_mask));
88 		return -1;
89 	}
90 
91 	return 0;
92 }
93 
94 TAILQ_HEAD(, virtio_blk_transport_ops_list_element)
95 g_spdk_virtio_blk_transport_ops = TAILQ_HEAD_INITIALIZER(g_spdk_virtio_blk_transport_ops);
96 
97 const struct spdk_virtio_blk_transport_ops *
98 virtio_blk_get_transport_ops(const char *transport_name)
99 {
100 	struct virtio_blk_transport_ops_list_element *ops;
101 	TAILQ_FOREACH(ops, &g_spdk_virtio_blk_transport_ops, link) {
102 		if (strcasecmp(transport_name, ops->ops.name) == 0) {
103 			return &ops->ops;
104 		}
105 	}
106 	return NULL;
107 }
108 
109 int
110 vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str,
111 		   const struct spdk_json_val *params,
112 		   const struct spdk_vhost_dev_backend *backend,
113 		   const struct spdk_vhost_user_dev_backend *user_backend)
114 {
115 	struct spdk_cpuset cpumask = {};
116 	int rc;
117 
118 	assert(vdev);
119 	if (name == NULL) {
120 		SPDK_ERRLOG("Can't register controller with no name\n");
121 		return -EINVAL;
122 	}
123 
124 	if (vhost_parse_core_mask(mask_str, &cpumask) != 0) {
125 		SPDK_ERRLOG("cpumask %s is invalid (core mask is 0x%s)\n",
126 			    mask_str, spdk_cpuset_fmt(&g_vhost_core_mask));
127 		return -EINVAL;
128 	}
129 
130 	spdk_vhost_lock();
131 	if (spdk_vhost_dev_find(name)) {
132 		SPDK_ERRLOG("vhost controller %s already exists.\n", name);
133 		spdk_vhost_unlock();
134 		return -EEXIST;
135 	}
136 
137 	vdev->name = strdup(name);
138 	if (vdev->name == NULL) {
139 		spdk_vhost_unlock();
140 		return -EIO;
141 	}
142 
143 	vdev->backend = backend;
144 	if (vdev->backend->type == VHOST_BACKEND_SCSI) {
145 		rc = vhost_user_dev_register(vdev, name, &cpumask, user_backend);
146 	} else {
147 		rc = virtio_blk_construct_ctrlr(vdev, name, &cpumask, params, user_backend);
148 	}
149 	if (rc != 0) {
150 		free(vdev->name);
151 		spdk_vhost_unlock();
152 		return rc;
153 	}
154 
155 	TAILQ_INSERT_TAIL(&g_vhost_devices, vdev, tailq);
156 	spdk_vhost_unlock();
157 
158 	SPDK_INFOLOG(vhost, "Controller %s: new controller added\n", vdev->name);
159 	return 0;
160 }
161 
162 int
163 vhost_dev_unregister(struct spdk_vhost_dev *vdev)
164 {
165 	int rc;
166 
167 	if (vdev->backend->type == VHOST_BACKEND_SCSI) {
168 		rc = vhost_user_dev_unregister(vdev);
169 	} else {
170 		rc = virtio_blk_destroy_ctrlr(vdev);
171 	}
172 	if (rc != 0) {
173 		return rc;
174 	}
175 
176 	SPDK_INFOLOG(vhost, "Controller %s: removed\n", vdev->name);
177 
178 	free(vdev->name);
179 
180 	spdk_vhost_lock();
181 	TAILQ_REMOVE(&g_vhost_devices, vdev, tailq);
182 	if (TAILQ_EMPTY(&g_vhost_devices) && g_fini_cb != NULL) {
183 		g_fini_cb();
184 	}
185 	spdk_vhost_unlock();
186 
187 	return 0;
188 }
189 
190 const char *
191 spdk_vhost_dev_get_name(struct spdk_vhost_dev *vdev)
192 {
193 	assert(vdev != NULL);
194 	return vdev->name;
195 }
196 
197 const struct spdk_cpuset *
198 spdk_vhost_dev_get_cpumask(struct spdk_vhost_dev *vdev)
199 {
200 	assert(vdev != NULL);
201 	return spdk_thread_get_cpumask(vdev->thread);
202 }
203 
204 void
205 vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
206 {
207 	assert(vdev->backend->dump_info_json != NULL);
208 	vdev->backend->dump_info_json(vdev, w);
209 }
210 
211 int
212 spdk_vhost_dev_remove(struct spdk_vhost_dev *vdev)
213 {
214 	return vdev->backend->remove_device(vdev);
215 }
216 
217 int
218 spdk_vhost_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
219 			  uint32_t iops_threshold)
220 {
221 	assert(vdev->backend->set_coalescing != NULL);
222 	return vdev->backend->set_coalescing(vdev, delay_base_us, iops_threshold);
223 }
224 
225 void
226 spdk_vhost_get_coalescing(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
227 			  uint32_t *iops_threshold)
228 {
229 	assert(vdev->backend->get_coalescing != NULL);
230 	vdev->backend->get_coalescing(vdev, delay_base_us, iops_threshold);
231 }
232 
233 void
234 spdk_vhost_lock(void)
235 {
236 	pthread_mutex_lock(&g_vhost_mutex);
237 }
238 
239 int
240 spdk_vhost_trylock(void)
241 {
242 	return -pthread_mutex_trylock(&g_vhost_mutex);
243 }
244 
245 void
246 spdk_vhost_unlock(void)
247 {
248 	pthread_mutex_unlock(&g_vhost_mutex);
249 }
250 
251 void
252 spdk_vhost_scsi_init(spdk_vhost_init_cb init_cb)
253 {
254 	uint32_t i;
255 	int ret = 0;
256 
257 	ret = vhost_user_init();
258 	if (ret != 0) {
259 		init_cb(ret);
260 		return;
261 	}
262 
263 	spdk_cpuset_zero(&g_vhost_core_mask);
264 	SPDK_ENV_FOREACH_CORE(i) {
265 		spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
266 	}
267 	init_cb(ret);
268 }
269 
270 static void
271 vhost_fini(void)
272 {
273 	struct spdk_vhost_dev *vdev, *tmp;
274 
275 	if (spdk_vhost_dev_next(NULL) == NULL) {
276 		g_fini_cb();
277 		return;
278 	}
279 
280 	vdev = spdk_vhost_dev_next(NULL);
281 	while (vdev != NULL) {
282 		tmp = spdk_vhost_dev_next(vdev);
283 		spdk_vhost_dev_remove(vdev);
284 		/* don't care if it fails, there's nothing we can do for now */
285 		vdev = tmp;
286 	}
287 
288 	/* g_fini_cb will get called when last device is unregistered. */
289 }
290 
291 void
292 spdk_vhost_blk_init(spdk_vhost_init_cb init_cb)
293 {
294 	uint32_t i;
295 	int ret = 0;
296 
297 	ret = virtio_blk_transport_create("vhost_user_blk", NULL);
298 	if (ret != 0) {
299 		goto out;
300 	}
301 
302 	spdk_cpuset_zero(&g_vhost_core_mask);
303 	SPDK_ENV_FOREACH_CORE(i) {
304 		spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
305 	}
306 out:
307 	init_cb(ret);
308 }
309 
310 void
311 spdk_vhost_scsi_fini(spdk_vhost_fini_cb fini_cb)
312 {
313 	g_fini_cb = fini_cb;
314 
315 	vhost_user_fini(vhost_fini);
316 }
317 
318 static void
319 virtio_blk_transports_destroy(void)
320 {
321 	struct spdk_virtio_blk_transport *transport = TAILQ_FIRST(&g_virtio_blk_transports);
322 
323 	if (transport == NULL) {
324 		g_fini_cb();
325 		return;
326 	}
327 	TAILQ_REMOVE(&g_virtio_blk_transports, transport, tailq);
328 	virtio_blk_transport_destroy(transport, virtio_blk_transports_destroy);
329 }
330 
331 void
332 spdk_vhost_blk_fini(spdk_vhost_fini_cb fini_cb)
333 {
334 	g_fini_cb = fini_cb;
335 
336 	virtio_blk_transports_destroy();
337 }
338 
339 static void
340 vhost_user_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
341 {
342 	uint32_t delay_base_us;
343 	uint32_t iops_threshold;
344 
345 	vdev->backend->write_config_json(vdev, w);
346 
347 	spdk_vhost_get_coalescing(vdev, &delay_base_us, &iops_threshold);
348 	if (delay_base_us) {
349 		spdk_json_write_object_begin(w);
350 		spdk_json_write_named_string(w, "method", "vhost_controller_set_coalescing");
351 
352 		spdk_json_write_named_object_begin(w, "params");
353 		spdk_json_write_named_string(w, "ctrlr", vdev->name);
354 		spdk_json_write_named_uint32(w, "delay_base_us", delay_base_us);
355 		spdk_json_write_named_uint32(w, "iops_threshold", iops_threshold);
356 		spdk_json_write_object_end(w);
357 
358 		spdk_json_write_object_end(w);
359 	}
360 }
361 
362 void
363 spdk_vhost_scsi_config_json(struct spdk_json_write_ctx *w)
364 {
365 	struct spdk_vhost_dev *vdev;
366 
367 	spdk_json_write_array_begin(w);
368 
369 	spdk_vhost_lock();
370 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
371 	     vdev = spdk_vhost_dev_next(vdev)) {
372 		if (vdev->backend->type == VHOST_BACKEND_SCSI) {
373 			vhost_user_config_json(vdev, w);
374 		}
375 	}
376 	spdk_vhost_unlock();
377 
378 	spdk_json_write_array_end(w);
379 }
380 
381 void
382 spdk_vhost_blk_config_json(struct spdk_json_write_ctx *w)
383 {
384 	struct spdk_vhost_dev *vdev;
385 
386 	spdk_json_write_array_begin(w);
387 
388 	spdk_vhost_lock();
389 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
390 	     vdev = spdk_vhost_dev_next(vdev)) {
391 		if (vdev->backend->type == VHOST_BACKEND_BLK) {
392 			vhost_user_config_json(vdev, w);
393 		}
394 	}
395 	spdk_vhost_unlock();
396 
397 	spdk_json_write_array_end(w);
398 }
399 
400 void
401 virtio_blk_transport_register(const struct spdk_virtio_blk_transport_ops *ops)
402 {
403 	struct virtio_blk_transport_ops_list_element *new_ops;
404 
405 	if (virtio_blk_get_transport_ops(ops->name) != NULL) {
406 		SPDK_ERRLOG("Double registering virtio blk transport type %s.\n", ops->name);
407 		assert(false);
408 		return;
409 	}
410 
411 	new_ops = calloc(1, sizeof(*new_ops));
412 	if (new_ops == NULL) {
413 		SPDK_ERRLOG("Unable to allocate memory to register new transport type %s.\n", ops->name);
414 		assert(false);
415 		return;
416 	}
417 
418 	new_ops->ops = *ops;
419 
420 	TAILQ_INSERT_TAIL(&g_spdk_virtio_blk_transport_ops, new_ops, link);
421 }
422 
423 int
424 virtio_blk_transport_create(const char *transport_name,
425 			    const struct spdk_json_val *params)
426 {
427 	const struct spdk_virtio_blk_transport_ops *ops = NULL;
428 	struct spdk_virtio_blk_transport *transport;
429 
430 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
431 		if (strcasecmp(transport->ops->name, transport_name) == 0) {
432 			return -EEXIST;
433 		}
434 	}
435 
436 	ops = virtio_blk_get_transport_ops(transport_name);
437 	if (!ops) {
438 		SPDK_ERRLOG("Transport type '%s' unavailable.\n", transport_name);
439 		return -ENOENT;
440 	}
441 
442 	transport = ops->create(params);
443 	if (!transport) {
444 		SPDK_ERRLOG("Unable to create new transport of type %s\n", transport_name);
445 		return -EPERM;
446 	}
447 
448 	transport->ops = ops;
449 	TAILQ_INSERT_TAIL(&g_virtio_blk_transports, transport, tailq);
450 	return 0;
451 }
452 
453 int
454 virtio_blk_transport_destroy(struct spdk_virtio_blk_transport *transport,
455 			     spdk_vhost_fini_cb cb_fn)
456 {
457 	return transport->ops->destroy(transport, cb_fn);
458 }
459 
460 SPDK_LOG_REGISTER_COMPONENT(vhost)
461 SPDK_LOG_REGISTER_COMPONENT(vhost_ring)
462