xref: /spdk/lib/vhost/vhost.c (revision 99a43e75ed9ac3c87d23e3746173cf5a5a992544)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation. All rights reserved.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk/env.h"
9 #include "spdk/likely.h"
10 #include "spdk/string.h"
11 #include "spdk/util.h"
12 #include "spdk/memory.h"
13 #include "spdk/barrier.h"
14 #include "spdk/vhost.h"
15 #include "vhost_internal.h"
16 
17 static struct spdk_cpuset g_vhost_core_mask;
18 
19 static TAILQ_HEAD(, spdk_vhost_dev) g_vhost_devices = TAILQ_HEAD_INITIALIZER(
20 			g_vhost_devices);
21 static pthread_mutex_t g_vhost_mutex = PTHREAD_MUTEX_INITIALIZER;
22 
23 static TAILQ_HEAD(, spdk_virtio_blk_transport) g_virtio_blk_transports = TAILQ_HEAD_INITIALIZER(
24 			g_virtio_blk_transports);
25 
26 static spdk_vhost_fini_cb g_fini_cb;
27 
28 struct spdk_vhost_dev *
29 spdk_vhost_dev_next(struct spdk_vhost_dev *vdev)
30 {
31 	if (vdev == NULL) {
32 		return TAILQ_FIRST(&g_vhost_devices);
33 	}
34 
35 	return TAILQ_NEXT(vdev, tailq);
36 }
37 
38 struct spdk_vhost_dev *
39 spdk_vhost_dev_find(const char *ctrlr_name)
40 {
41 	struct spdk_vhost_dev *vdev;
42 
43 	TAILQ_FOREACH(vdev, &g_vhost_devices, tailq) {
44 		if (strcmp(vdev->name, ctrlr_name) == 0) {
45 			return vdev;
46 		}
47 	}
48 
49 	return NULL;
50 }
51 
52 static int
53 vhost_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask)
54 {
55 	int rc;
56 	struct spdk_cpuset negative_vhost_mask;
57 
58 	if (cpumask == NULL) {
59 		return -1;
60 	}
61 
62 	if (mask == NULL) {
63 		spdk_cpuset_copy(cpumask, &g_vhost_core_mask);
64 		return 0;
65 	}
66 
67 	rc = spdk_cpuset_parse(cpumask, mask);
68 	if (rc < 0) {
69 		SPDK_ERRLOG("invalid cpumask %s\n", mask);
70 		return -1;
71 	}
72 
73 	spdk_cpuset_copy(&negative_vhost_mask, &g_vhost_core_mask);
74 	spdk_cpuset_negate(&negative_vhost_mask);
75 	spdk_cpuset_and(&negative_vhost_mask, cpumask);
76 
77 	if (spdk_cpuset_count(&negative_vhost_mask) != 0) {
78 		SPDK_ERRLOG("one of selected cpu is outside of core mask(=%s)\n",
79 			    spdk_cpuset_fmt(&g_vhost_core_mask));
80 		return -1;
81 	}
82 
83 	spdk_cpuset_and(cpumask, &g_vhost_core_mask);
84 
85 	if (spdk_cpuset_count(cpumask) == 0) {
86 		SPDK_ERRLOG("no cpu is selected among core mask(=%s)\n",
87 			    spdk_cpuset_fmt(&g_vhost_core_mask));
88 		return -1;
89 	}
90 
91 	return 0;
92 }
93 
94 TAILQ_HEAD(, virtio_blk_transport_ops_list_element)
95 g_spdk_virtio_blk_transport_ops = TAILQ_HEAD_INITIALIZER(g_spdk_virtio_blk_transport_ops);
96 
97 const struct spdk_virtio_blk_transport_ops *
98 virtio_blk_get_transport_ops(const char *transport_name)
99 {
100 	struct virtio_blk_transport_ops_list_element *ops;
101 	TAILQ_FOREACH(ops, &g_spdk_virtio_blk_transport_ops, link) {
102 		if (strcasecmp(transport_name, ops->ops.name) == 0) {
103 			return &ops->ops;
104 		}
105 	}
106 	return NULL;
107 }
108 
109 int
110 vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str,
111 		   const struct spdk_json_val *params,
112 		   const struct spdk_vhost_dev_backend *backend,
113 		   const struct spdk_vhost_user_dev_backend *user_backend)
114 {
115 	struct spdk_cpuset cpumask = {};
116 	int rc;
117 
118 	assert(vdev);
119 	if (name == NULL) {
120 		SPDK_ERRLOG("Can't register controller with no name\n");
121 		return -EINVAL;
122 	}
123 
124 	if (vhost_parse_core_mask(mask_str, &cpumask) != 0) {
125 		SPDK_ERRLOG("cpumask %s is invalid (core mask is 0x%s)\n",
126 			    mask_str, spdk_cpuset_fmt(&g_vhost_core_mask));
127 		return -EINVAL;
128 	}
129 
130 	if (spdk_vhost_dev_find(name)) {
131 		SPDK_ERRLOG("vhost controller %s already exists.\n", name);
132 		return -EEXIST;
133 	}
134 
135 	vdev->name = strdup(name);
136 	if (vdev->name == NULL) {
137 		return -EIO;
138 	}
139 
140 	vdev->backend = backend;
141 	if (vdev->backend->type == VHOST_BACKEND_SCSI) {
142 		rc = vhost_user_dev_register(vdev, name, &cpumask, user_backend);
143 	} else {
144 		rc = virtio_blk_construct_ctrlr(vdev, name, &cpumask, params, user_backend);
145 	}
146 	if (rc != 0) {
147 		free(vdev->name);
148 		return rc;
149 	}
150 
151 	TAILQ_INSERT_TAIL(&g_vhost_devices, vdev, tailq);
152 
153 	SPDK_INFOLOG(vhost, "Controller %s: new controller added\n", vdev->name);
154 	return 0;
155 }
156 
157 int
158 vhost_dev_unregister(struct spdk_vhost_dev *vdev)
159 {
160 	int rc;
161 
162 	if (vdev->backend->type == VHOST_BACKEND_SCSI) {
163 		rc = vhost_user_dev_unregister(vdev);
164 	} else {
165 		rc = virtio_blk_destroy_ctrlr(vdev);
166 	}
167 	if (rc != 0) {
168 		return rc;
169 	}
170 
171 	SPDK_INFOLOG(vhost, "Controller %s: removed\n", vdev->name);
172 
173 	free(vdev->name);
174 	TAILQ_REMOVE(&g_vhost_devices, vdev, tailq);
175 
176 	if (TAILQ_EMPTY(&g_vhost_devices) && g_fini_cb != NULL) {
177 		g_fini_cb();
178 	}
179 
180 	return 0;
181 }
182 
183 const char *
184 spdk_vhost_dev_get_name(struct spdk_vhost_dev *vdev)
185 {
186 	assert(vdev != NULL);
187 	return vdev->name;
188 }
189 
190 const struct spdk_cpuset *
191 spdk_vhost_dev_get_cpumask(struct spdk_vhost_dev *vdev)
192 {
193 	assert(vdev != NULL);
194 	return spdk_thread_get_cpumask(vdev->thread);
195 }
196 
197 void
198 vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
199 {
200 	assert(vdev->backend->dump_info_json != NULL);
201 	vdev->backend->dump_info_json(vdev, w);
202 }
203 
204 int
205 spdk_vhost_dev_remove(struct spdk_vhost_dev *vdev)
206 {
207 	return vdev->backend->remove_device(vdev);
208 }
209 
210 void
211 spdk_vhost_lock(void)
212 {
213 	pthread_mutex_lock(&g_vhost_mutex);
214 }
215 
216 int
217 spdk_vhost_trylock(void)
218 {
219 	return -pthread_mutex_trylock(&g_vhost_mutex);
220 }
221 
222 void
223 spdk_vhost_unlock(void)
224 {
225 	pthread_mutex_unlock(&g_vhost_mutex);
226 }
227 
228 void
229 spdk_vhost_scsi_init(spdk_vhost_init_cb init_cb)
230 {
231 	uint32_t i;
232 	int ret = 0;
233 
234 	ret = vhost_user_init();
235 	if (ret != 0) {
236 		init_cb(ret);
237 		return;
238 	}
239 
240 	spdk_cpuset_zero(&g_vhost_core_mask);
241 	SPDK_ENV_FOREACH_CORE(i) {
242 		spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
243 	}
244 	init_cb(ret);
245 }
246 
247 static void
248 vhost_fini(void)
249 {
250 	struct spdk_vhost_dev *vdev, *tmp;
251 
252 	spdk_vhost_lock();
253 	if (spdk_vhost_dev_next(NULL) == NULL) {
254 		spdk_vhost_unlock();
255 		g_fini_cb();
256 		return;
257 	}
258 
259 	vdev = spdk_vhost_dev_next(NULL);
260 	while (vdev != NULL) {
261 		tmp = spdk_vhost_dev_next(vdev);
262 		spdk_vhost_dev_remove(vdev);
263 		/* don't care if it fails, there's nothing we can do for now */
264 		vdev = tmp;
265 	}
266 	spdk_vhost_unlock();
267 
268 	/* g_fini_cb will get called when last device is unregistered. */
269 }
270 
271 void
272 spdk_vhost_blk_init(spdk_vhost_init_cb init_cb)
273 {
274 	uint32_t i;
275 	int ret = 0;
276 
277 	ret = virtio_blk_transport_create("vhost_user_blk", NULL);
278 	if (ret != 0) {
279 		goto out;
280 	}
281 
282 	spdk_cpuset_zero(&g_vhost_core_mask);
283 	SPDK_ENV_FOREACH_CORE(i) {
284 		spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
285 	}
286 out:
287 	init_cb(ret);
288 }
289 
290 void
291 spdk_vhost_scsi_fini(spdk_vhost_fini_cb fini_cb)
292 {
293 	g_fini_cb = fini_cb;
294 
295 	vhost_user_fini(vhost_fini);
296 }
297 
298 static void
299 virtio_blk_transports_destroy(void)
300 {
301 	struct spdk_virtio_blk_transport *transport = TAILQ_FIRST(&g_virtio_blk_transports);
302 
303 	if (transport == NULL) {
304 		g_fini_cb();
305 		return;
306 	}
307 	TAILQ_REMOVE(&g_virtio_blk_transports, transport, tailq);
308 	virtio_blk_transport_destroy(transport, virtio_blk_transports_destroy);
309 }
310 
311 void
312 spdk_vhost_blk_fini(spdk_vhost_fini_cb fini_cb)
313 {
314 	g_fini_cb = fini_cb;
315 
316 	virtio_blk_transports_destroy();
317 }
318 
319 static void
320 vhost_user_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
321 {
322 	uint32_t delay_base_us;
323 	uint32_t iops_threshold;
324 
325 	vdev->backend->write_config_json(vdev, w);
326 
327 	spdk_vhost_get_coalescing(vdev, &delay_base_us, &iops_threshold);
328 	if (delay_base_us) {
329 		spdk_json_write_object_begin(w);
330 		spdk_json_write_named_string(w, "method", "vhost_controller_set_coalescing");
331 
332 		spdk_json_write_named_object_begin(w, "params");
333 		spdk_json_write_named_string(w, "ctrlr", vdev->name);
334 		spdk_json_write_named_uint32(w, "delay_base_us", delay_base_us);
335 		spdk_json_write_named_uint32(w, "iops_threshold", iops_threshold);
336 		spdk_json_write_object_end(w);
337 
338 		spdk_json_write_object_end(w);
339 	}
340 }
341 
342 void
343 spdk_vhost_scsi_config_json(struct spdk_json_write_ctx *w)
344 {
345 	struct spdk_vhost_dev *vdev;
346 
347 	spdk_json_write_array_begin(w);
348 
349 	spdk_vhost_lock();
350 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
351 	     vdev = spdk_vhost_dev_next(vdev)) {
352 		if (vdev->backend->type == VHOST_BACKEND_SCSI) {
353 			vhost_user_config_json(vdev, w);
354 		}
355 	}
356 	spdk_vhost_unlock();
357 
358 	spdk_json_write_array_end(w);
359 }
360 
361 void
362 spdk_vhost_blk_config_json(struct spdk_json_write_ctx *w)
363 {
364 	struct spdk_vhost_dev *vdev;
365 
366 	spdk_json_write_array_begin(w);
367 
368 	spdk_vhost_lock();
369 	for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
370 	     vdev = spdk_vhost_dev_next(vdev)) {
371 		if (vdev->backend->type == VHOST_BACKEND_BLK) {
372 			vhost_user_config_json(vdev, w);
373 		}
374 	}
375 	spdk_vhost_unlock();
376 
377 	spdk_json_write_array_end(w);
378 }
379 
380 void
381 virtio_blk_transport_register(const struct spdk_virtio_blk_transport_ops *ops)
382 {
383 	struct virtio_blk_transport_ops_list_element *new_ops;
384 
385 	if (virtio_blk_get_transport_ops(ops->name) != NULL) {
386 		SPDK_ERRLOG("Double registering virtio blk transport type %s.\n", ops->name);
387 		assert(false);
388 		return;
389 	}
390 
391 	new_ops = calloc(1, sizeof(*new_ops));
392 	if (new_ops == NULL) {
393 		SPDK_ERRLOG("Unable to allocate memory to register new transport type %s.\n", ops->name);
394 		assert(false);
395 		return;
396 	}
397 
398 	new_ops->ops = *ops;
399 
400 	TAILQ_INSERT_TAIL(&g_spdk_virtio_blk_transport_ops, new_ops, link);
401 }
402 
403 int
404 virtio_blk_transport_create(const char *transport_name,
405 			    const struct spdk_json_val *params)
406 {
407 	const struct spdk_virtio_blk_transport_ops *ops = NULL;
408 	struct spdk_virtio_blk_transport *transport;
409 
410 	TAILQ_FOREACH(transport, &g_virtio_blk_transports, tailq) {
411 		if (strcasecmp(transport->ops->name, transport_name) == 0) {
412 			return -EEXIST;
413 		}
414 	}
415 
416 	ops = virtio_blk_get_transport_ops(transport_name);
417 	if (!ops) {
418 		SPDK_ERRLOG("Transport type '%s' unavailable.\n", transport_name);
419 		return -ENOENT;
420 	}
421 
422 	transport = ops->create(params);
423 	if (!transport) {
424 		SPDK_ERRLOG("Unable to create new transport of type %s\n", transport_name);
425 		return -EPERM;
426 	}
427 
428 	transport->ops = ops;
429 	TAILQ_INSERT_TAIL(&g_virtio_blk_transports, transport, tailq);
430 	return 0;
431 }
432 
433 int
434 virtio_blk_transport_destroy(struct spdk_virtio_blk_transport *transport,
435 			     spdk_vhost_fini_cb cb_fn)
436 {
437 	return transport->ops->destroy(transport, cb_fn);
438 }
439 
440 SPDK_LOG_REGISTER_COMPONENT(vhost)
441 SPDK_LOG_REGISTER_COMPONENT(vhost_ring)
442