xref: /spdk/module/bdev/ocf/vbdev_ocf.c (revision 03e3fc4f5835983a4e6602b4e770922e798ce263)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <ocf/ocf.h>
35 #include <ocf/ocf_types.h>
36 #include <ocf/ocf_mngt.h>
37 
38 #include "ctx.h"
39 #include "data.h"
40 #include "volume.h"
41 #include "utils.h"
42 #include "vbdev_ocf.h"
43 
44 #include "spdk/bdev_module.h"
45 #include "spdk/conf.h"
46 #include "spdk/thread.h"
47 #include "spdk/string.h"
48 #include "spdk_internal/log.h"
49 #include "spdk/cpuset.h"
50 
51 static struct spdk_bdev_module ocf_if;
52 
53 static TAILQ_HEAD(, vbdev_ocf) g_ocf_vbdev_head
54 	= TAILQ_HEAD_INITIALIZER(g_ocf_vbdev_head);
55 
56 static TAILQ_HEAD(, examining_bdev) g_ocf_examining_bdevs_head
57 	= TAILQ_HEAD_INITIALIZER(g_ocf_examining_bdevs_head);
58 
59 bool g_fini_started = false;
60 
61 /* Structure for keeping list of bdevs that are claimed but not used yet */
62 struct examining_bdev {
63 	struct spdk_bdev           *bdev;
64 	TAILQ_ENTRY(examining_bdev) tailq;
65 };
66 
67 /* Add bdev to list of claimed */
68 static void
69 examine_start(struct spdk_bdev *bdev)
70 {
71 	struct examining_bdev *entry = malloc(sizeof(*entry));
72 
73 	assert(entry);
74 	entry->bdev = bdev;
75 	TAILQ_INSERT_TAIL(&g_ocf_examining_bdevs_head, entry, tailq);
76 }
77 
78 /* Find bdev on list of claimed bdevs, then remove it,
79  * if it was the last one on list then report examine done */
80 static void
81 examine_done(int status, struct vbdev_ocf *vbdev, void *cb_arg)
82 {
83 	struct spdk_bdev *bdev = cb_arg;
84 	struct examining_bdev *entry, *safe, *found = NULL;
85 
86 	TAILQ_FOREACH_SAFE(entry, &g_ocf_examining_bdevs_head, tailq, safe) {
87 		if (entry->bdev == bdev) {
88 			if (found) {
89 				goto remove;
90 			} else {
91 				found = entry;
92 			}
93 		}
94 	}
95 
96 	assert(found);
97 	spdk_bdev_module_examine_done(&ocf_if);
98 
99 remove:
100 	TAILQ_REMOVE(&g_ocf_examining_bdevs_head, found, tailq);
101 	free(found);
102 }
103 
104 /* Free allocated strings and structure itself
105  * Used at shutdown only */
106 static void
107 free_vbdev(struct vbdev_ocf *vbdev)
108 {
109 	if (!vbdev) {
110 		return;
111 	}
112 
113 	free(vbdev->name);
114 	free(vbdev->cache.name);
115 	free(vbdev->core.name);
116 	free(vbdev);
117 }
118 
119 /* Get existing cache base
120  * that is attached to other vbdev */
121 static struct vbdev_ocf_base *
122 get_other_cache_base(struct vbdev_ocf_base *base)
123 {
124 	struct vbdev_ocf *vbdev;
125 
126 	TAILQ_FOREACH(vbdev, &g_ocf_vbdev_head, tailq) {
127 		if (&vbdev->cache == base || !vbdev->cache.attached) {
128 			continue;
129 		}
130 		if (!strcmp(vbdev->cache.name, base->name)) {
131 			return &vbdev->cache;
132 		}
133 	}
134 
135 	return NULL;
136 }
137 
138 /* Get existing OCF cache instance
139  * that is started by other vbdev */
140 static ocf_cache_t
141 get_other_cache_instance(struct vbdev_ocf *vbdev)
142 {
143 	struct vbdev_ocf *cmp;
144 
145 	TAILQ_FOREACH(cmp, &g_ocf_vbdev_head, tailq) {
146 		if (cmp->state.doing_finish || cmp == vbdev) {
147 			continue;
148 		}
149 		if (strcmp(cmp->cache.name, vbdev->cache.name)) {
150 			continue;
151 		}
152 		if (cmp->ocf_cache) {
153 			return cmp->ocf_cache;
154 		}
155 	}
156 
157 	return NULL;
158 }
159 
160 static void
161 _remove_base_bdev(void *ctx)
162 {
163 	struct spdk_bdev_desc *desc = ctx;
164 
165 	spdk_bdev_close(desc);
166 }
167 
168 /* Close and unclaim base bdev */
169 static void
170 remove_base_bdev(struct vbdev_ocf_base *base)
171 {
172 	if (base->attached) {
173 		if (base->management_channel) {
174 			spdk_put_io_channel(base->management_channel);
175 		}
176 
177 		spdk_bdev_module_release_bdev(base->bdev);
178 		/* Close the underlying bdev on its same opened thread. */
179 		if (base->thread && base->thread != spdk_get_thread()) {
180 			spdk_thread_send_msg(base->thread, _remove_base_bdev, base->desc);
181 		} else {
182 			spdk_bdev_close(base->desc);
183 		}
184 		base->attached = false;
185 	}
186 }
187 
188 /* Finish unregister operation */
189 static void
190 unregister_finish(struct vbdev_ocf *vbdev)
191 {
192 	spdk_bdev_destruct_done(&vbdev->exp_bdev, vbdev->state.stop_status);
193 	vbdev_ocf_cache_ctx_put(vbdev->cache_ctx);
194 	vbdev_ocf_mngt_continue(vbdev, 0);
195 }
196 
197 static void
198 close_core_bdev(struct vbdev_ocf *vbdev)
199 {
200 	remove_base_bdev(&vbdev->core);
201 	vbdev_ocf_mngt_continue(vbdev, 0);
202 }
203 
204 static void
205 remove_core_cmpl(void *priv, int error)
206 {
207 	struct vbdev_ocf *vbdev = priv;
208 
209 	ocf_mngt_cache_unlock(vbdev->ocf_cache);
210 	vbdev_ocf_mngt_continue(vbdev, error);
211 }
212 
213 /* Try to lock cache, then remove core */
214 static void
215 remove_core_cache_lock_cmpl(ocf_cache_t cache, void *priv, int error)
216 {
217 	struct vbdev_ocf *vbdev = (struct vbdev_ocf *)priv;
218 
219 	if (error) {
220 		SPDK_ERRLOG("Error %d, can not lock cache instance %s\n",
221 			    error, vbdev->name);
222 		vbdev_ocf_mngt_continue(vbdev, error);
223 		return;
224 	}
225 
226 	ocf_mngt_cache_remove_core(vbdev->ocf_core, remove_core_cmpl, vbdev);
227 }
228 
229 /* Detach core base */
230 static void
231 detach_core(struct vbdev_ocf *vbdev)
232 {
233 	if (vbdev->ocf_cache && ocf_cache_is_running(vbdev->ocf_cache)) {
234 		ocf_mngt_cache_lock(vbdev->ocf_cache, remove_core_cache_lock_cmpl, vbdev);
235 	} else {
236 		vbdev_ocf_mngt_continue(vbdev, 0);
237 	}
238 }
239 
240 static void
241 close_cache_bdev(struct vbdev_ocf *vbdev)
242 {
243 	remove_base_bdev(&vbdev->cache);
244 	vbdev_ocf_mngt_continue(vbdev, 0);
245 }
246 
247 /* Detach cache base */
248 static void
249 detach_cache(struct vbdev_ocf *vbdev)
250 {
251 	vbdev->state.stop_status = vbdev->mngt_ctx.status;
252 
253 	/* If some other vbdev references this cache bdev,
254 	 * we detach this only by changing the flag, without actual close */
255 	if (get_other_cache_base(&vbdev->cache)) {
256 		vbdev->cache.attached = false;
257 	}
258 
259 	vbdev_ocf_mngt_continue(vbdev, 0);
260 }
261 
262 static void
263 stop_vbdev_cmpl(ocf_cache_t cache, void *priv, int error)
264 {
265 	struct vbdev_ocf *vbdev = priv;
266 
267 	vbdev_ocf_queue_put(vbdev->cache_ctx->mngt_queue);
268 	ocf_mngt_cache_unlock(cache);
269 
270 	vbdev_ocf_mngt_continue(vbdev, error);
271 }
272 
273 /* Try to lock cache, then stop it */
274 static void
275 stop_vbdev_cache_lock_cmpl(ocf_cache_t cache, void *priv, int error)
276 {
277 	struct vbdev_ocf *vbdev = (struct vbdev_ocf *)priv;
278 
279 	if (error) {
280 		SPDK_ERRLOG("Error %d, can not lock cache instance %s\n",
281 			    error, vbdev->name);
282 		vbdev_ocf_mngt_continue(vbdev, error);
283 		return;
284 	}
285 
286 	ocf_mngt_cache_stop(vbdev->ocf_cache, stop_vbdev_cmpl, vbdev);
287 }
288 
289 /* Stop OCF cache object
290  * vbdev_ocf is not operational after this */
291 static void
292 stop_vbdev(struct vbdev_ocf *vbdev)
293 {
294 	if (!ocf_cache_is_running(vbdev->ocf_cache)) {
295 		vbdev_ocf_mngt_continue(vbdev, 0);
296 		return;
297 	}
298 
299 	if (!g_fini_started && get_other_cache_instance(vbdev)) {
300 		SPDK_NOTICELOG("Not stopping cache instance '%s'"
301 			       " because it is referenced by other OCF bdev\n",
302 			       vbdev->cache.name);
303 		vbdev_ocf_mngt_continue(vbdev, 0);
304 		return;
305 	}
306 
307 	ocf_mngt_cache_lock(vbdev->ocf_cache, stop_vbdev_cache_lock_cmpl, vbdev);
308 }
309 
310 static void
311 flush_vbdev_cmpl(ocf_cache_t cache, void *priv, int error)
312 {
313 	struct vbdev_ocf *vbdev = priv;
314 
315 	ocf_mngt_cache_unlock(cache);
316 	vbdev_ocf_mngt_continue(vbdev, error);
317 }
318 
319 static void
320 flush_vbdev_cache_lock_cmpl(ocf_cache_t cache, void *priv, int error)
321 {
322 	struct vbdev_ocf *vbdev = (struct vbdev_ocf *)priv;
323 
324 	if (error) {
325 		SPDK_ERRLOG("Error %d, can not lock cache instance %s\n",
326 			    error, vbdev->name);
327 		vbdev_ocf_mngt_continue(vbdev, error);
328 		return;
329 	}
330 
331 	ocf_mngt_cache_flush(vbdev->ocf_cache, flush_vbdev_cmpl, vbdev);
332 }
333 
334 static void
335 flush_vbdev(struct vbdev_ocf *vbdev)
336 {
337 	if (!ocf_cache_is_running(vbdev->ocf_cache)) {
338 		vbdev_ocf_mngt_continue(vbdev, -EINVAL);
339 		return;
340 	}
341 
342 	ocf_mngt_cache_lock(vbdev->ocf_cache, flush_vbdev_cache_lock_cmpl, vbdev);
343 }
344 
345 /* Procedures called during dirty unregister */
346 vbdev_ocf_mngt_fn unregister_path_dirty[] = {
347 	flush_vbdev,
348 	stop_vbdev,
349 	detach_cache,
350 	close_cache_bdev,
351 	detach_core,
352 	close_core_bdev,
353 	unregister_finish,
354 	NULL
355 };
356 
357 /* Procedures called during clean unregister */
358 vbdev_ocf_mngt_fn unregister_path_clean[] = {
359 	flush_vbdev,
360 	detach_core,
361 	close_core_bdev,
362 	stop_vbdev,
363 	detach_cache,
364 	close_cache_bdev,
365 	unregister_finish,
366 	NULL
367 };
368 
369 /* Start asynchronous management operation using unregister_path */
370 static void
371 unregister_cb(void *opaque)
372 {
373 	struct vbdev_ocf *vbdev = opaque;
374 	vbdev_ocf_mngt_fn *unregister_path;
375 	int rc;
376 
377 	unregister_path = vbdev->state.doing_clean_delete ?
378 			  unregister_path_clean : unregister_path_dirty;
379 
380 	rc = vbdev_ocf_mngt_start(vbdev, unregister_path, NULL, NULL);
381 	if (rc) {
382 		SPDK_ERRLOG("Unable to unregister OCF bdev: %d\n", rc);
383 		spdk_bdev_destruct_done(&vbdev->exp_bdev, rc);
384 	}
385 }
386 
387 /* Clean remove case - remove core and then cache, this order
388  * will remove instance permanently */
389 static void
390 _vbdev_ocf_destruct_clean(struct vbdev_ocf *vbdev)
391 {
392 	if (vbdev->core.attached) {
393 		detach_core(vbdev);
394 		close_core_bdev(vbdev);
395 	}
396 
397 	if (vbdev->cache.attached) {
398 		detach_cache(vbdev);
399 		close_cache_bdev(vbdev);
400 	}
401 }
402 
403 /* Dirty shutdown/hot remove case - remove cache and then core, this order
404  * will allow us to recover this instance in the future */
405 static void
406 _vbdev_ocf_destruct_dirty(struct vbdev_ocf *vbdev)
407 {
408 	if (vbdev->cache.attached) {
409 		detach_cache(vbdev);
410 		close_cache_bdev(vbdev);
411 	}
412 
413 	if (vbdev->core.attached) {
414 		detach_core(vbdev);
415 		close_core_bdev(vbdev);
416 	}
417 }
418 
419 /* Unregister io device with callback to unregister_cb
420  * This function is called during spdk_bdev_unregister */
421 static int
422 vbdev_ocf_destruct(void *opaque)
423 {
424 	struct vbdev_ocf *vbdev = opaque;
425 
426 	if (vbdev->state.doing_finish) {
427 		return -EALREADY;
428 	}
429 
430 	if (vbdev->state.starting && !vbdev->state.started) {
431 		/* Prevent before detach cache/core during register path of
432 		  this bdev */
433 		return -EBUSY;
434 	}
435 
436 	vbdev->state.doing_finish = true;
437 
438 	if (vbdev->state.started) {
439 		spdk_io_device_unregister(vbdev, unregister_cb);
440 		/* Return 1 because unregister is delayed */
441 		return 1;
442 	}
443 
444 	if (vbdev->state.doing_clean_delete) {
445 		_vbdev_ocf_destruct_clean(vbdev);
446 	} else {
447 		_vbdev_ocf_destruct_dirty(vbdev);
448 	}
449 
450 	return 0;
451 }
452 
453 /* Stop OCF cache and unregister SPDK bdev */
454 int
455 vbdev_ocf_delete(struct vbdev_ocf *vbdev, void (*cb)(void *, int), void *cb_arg)
456 {
457 	int rc = 0;
458 
459 	if (vbdev->state.started) {
460 		spdk_bdev_unregister(&vbdev->exp_bdev, cb, cb_arg);
461 	} else {
462 		rc = vbdev_ocf_destruct(vbdev);
463 		if (rc == 0 && cb) {
464 			cb(cb_arg, 0);
465 		}
466 	}
467 
468 	return rc;
469 }
470 
471 /* Remove cores permanently and then stop OCF cache and unregister SPDK bdev */
472 int
473 vbdev_ocf_delete_clean(struct vbdev_ocf *vbdev, void (*cb)(void *, int),
474 		       void *cb_arg)
475 {
476 	vbdev->state.doing_clean_delete = true;
477 
478 	return vbdev_ocf_delete(vbdev, cb, cb_arg);
479 }
480 
481 
482 /* If vbdev is online, return its object */
483 struct vbdev_ocf *
484 vbdev_ocf_get_by_name(const char *name)
485 {
486 	struct vbdev_ocf *vbdev;
487 
488 	if (name == NULL) {
489 		assert(false);
490 		return NULL;
491 	}
492 
493 	TAILQ_FOREACH(vbdev, &g_ocf_vbdev_head, tailq) {
494 		if (vbdev->name == NULL || vbdev->state.doing_finish) {
495 			continue;
496 		}
497 		if (strcmp(vbdev->name, name) == 0) {
498 			return vbdev;
499 		}
500 	}
501 	return NULL;
502 }
503 
504 /* Return matching base if parent vbdev is online */
505 struct vbdev_ocf_base *
506 vbdev_ocf_get_base_by_name(const char *name)
507 {
508 	struct vbdev_ocf *vbdev;
509 
510 	if (name == NULL) {
511 		assert(false);
512 		return NULL;
513 	}
514 
515 	TAILQ_FOREACH(vbdev, &g_ocf_vbdev_head, tailq) {
516 		if (vbdev->state.doing_finish) {
517 			continue;
518 		}
519 
520 		if (vbdev->cache.name && strcmp(vbdev->cache.name, name) == 0) {
521 			return &vbdev->cache;
522 		}
523 		if (vbdev->core.name && strcmp(vbdev->core.name, name) == 0) {
524 			return &vbdev->core;
525 		}
526 	}
527 	return NULL;
528 }
529 
530 /* Execute fn for each OCF device that is online or waits for base devices */
531 void
532 vbdev_ocf_foreach(vbdev_ocf_foreach_fn fn, void *ctx)
533 {
534 	struct vbdev_ocf *vbdev;
535 
536 	assert(fn != NULL);
537 
538 	TAILQ_FOREACH(vbdev, &g_ocf_vbdev_head, tailq) {
539 		if (!vbdev->state.doing_finish) {
540 			fn(vbdev, ctx);
541 		}
542 	}
543 }
544 
545 /* Called from OCF when SPDK_IO is completed */
546 static void
547 vbdev_ocf_io_submit_cb(struct ocf_io *io, int error)
548 {
549 	struct spdk_bdev_io *bdev_io = io->priv1;
550 
551 	if (error == 0) {
552 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
553 	} else if (error == -ENOMEM) {
554 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
555 	} else {
556 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
557 	}
558 
559 	ocf_io_put(io);
560 }
561 
562 /* Configure io parameters and send it to OCF */
563 static int
564 io_submit_to_ocf(struct spdk_bdev_io *bdev_io, struct ocf_io *io)
565 {
566 	switch (bdev_io->type) {
567 	case SPDK_BDEV_IO_TYPE_WRITE:
568 	case SPDK_BDEV_IO_TYPE_READ:
569 		ocf_core_submit_io(io);
570 		return 0;
571 	case SPDK_BDEV_IO_TYPE_FLUSH:
572 		ocf_core_submit_flush(io);
573 		return 0;
574 	case SPDK_BDEV_IO_TYPE_UNMAP:
575 		ocf_core_submit_discard(io);
576 		return 0;
577 	case SPDK_BDEV_IO_TYPE_RESET:
578 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
579 	default:
580 		SPDK_ERRLOG("Unsupported IO type: %d\n", bdev_io->type);
581 		return -EINVAL;
582 	}
583 }
584 
585 /* Submit SPDK-IO to OCF */
586 static void
587 io_handle(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
588 {
589 	struct vbdev_ocf *vbdev = bdev_io->bdev->ctxt;
590 	struct ocf_io *io = NULL;
591 	struct bdev_ocf_data *data = NULL;
592 	struct vbdev_ocf_qctx *qctx = spdk_io_channel_get_ctx(ch);
593 	uint64_t len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
594 	uint64_t offset = bdev_io->u.bdev.offset_blocks * bdev_io->bdev->blocklen;
595 	int dir, flags = 0;
596 	int err;
597 
598 	switch (bdev_io->type) {
599 	case SPDK_BDEV_IO_TYPE_READ:
600 		dir = OCF_READ;
601 		break;
602 	case SPDK_BDEV_IO_TYPE_WRITE:
603 		dir = OCF_WRITE;
604 		break;
605 	case SPDK_BDEV_IO_TYPE_FLUSH:
606 		dir = OCF_WRITE;
607 		break;
608 	case SPDK_BDEV_IO_TYPE_UNMAP:
609 		dir = OCF_WRITE;
610 		break;
611 	default:
612 		err = -EINVAL;
613 		goto fail;
614 	}
615 
616 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) {
617 		flags = OCF_WRITE_FLUSH;
618 	}
619 
620 	io = ocf_core_new_io(vbdev->ocf_core, qctx->queue, offset, len, dir, 0, flags);
621 	if (!io) {
622 		err = -ENOMEM;
623 		goto fail;
624 	}
625 
626 	data = vbdev_ocf_data_from_spdk_io(bdev_io);
627 	if (!data) {
628 		err = -ENOMEM;
629 		goto fail;
630 	}
631 
632 	err = ocf_io_set_data(io, data, 0);
633 	if (err) {
634 		goto fail;
635 	}
636 
637 	ocf_io_set_cmpl(io, bdev_io, NULL, vbdev_ocf_io_submit_cb);
638 
639 	err = io_submit_to_ocf(bdev_io, io);
640 	if (err) {
641 		goto fail;
642 	}
643 
644 	return;
645 
646 fail:
647 	if (io) {
648 		ocf_io_put(io);
649 	}
650 
651 	if (err == -ENOMEM) {
652 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
653 	} else {
654 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
655 	}
656 }
657 
658 static void
659 vbdev_ocf_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
660 		     bool success)
661 {
662 	if (!success) {
663 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
664 		return;
665 	}
666 
667 	io_handle(ch, bdev_io);
668 }
669 
670 /* Called from bdev layer when an io to Cache vbdev is submitted */
671 static void
672 vbdev_ocf_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
673 {
674 	switch (bdev_io->type) {
675 	case SPDK_BDEV_IO_TYPE_READ:
676 		/* User does not have to allocate io vectors for the request,
677 		 * so in case they are not allocated, we allocate them here */
678 		spdk_bdev_io_get_buf(bdev_io, vbdev_ocf_get_buf_cb,
679 				     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
680 		break;
681 	case SPDK_BDEV_IO_TYPE_WRITE:
682 	case SPDK_BDEV_IO_TYPE_FLUSH:
683 	case SPDK_BDEV_IO_TYPE_UNMAP:
684 		io_handle(ch, bdev_io);
685 		break;
686 	case SPDK_BDEV_IO_TYPE_RESET:
687 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
688 	default:
689 		SPDK_ERRLOG("Unknown I/O type %d\n", bdev_io->type);
690 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
691 		break;
692 	}
693 }
694 
695 /* Called from bdev layer */
696 static bool
697 vbdev_ocf_io_type_supported(void *opaque, enum spdk_bdev_io_type io_type)
698 {
699 	struct vbdev_ocf *vbdev = opaque;
700 
701 	switch (io_type) {
702 	case SPDK_BDEV_IO_TYPE_READ:
703 	case SPDK_BDEV_IO_TYPE_WRITE:
704 	case SPDK_BDEV_IO_TYPE_FLUSH:
705 	case SPDK_BDEV_IO_TYPE_UNMAP:
706 		return spdk_bdev_io_type_supported(vbdev->core.bdev, io_type);
707 	case SPDK_BDEV_IO_TYPE_RESET:
708 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
709 	default:
710 		return false;
711 	}
712 }
713 
714 /* Called from bdev layer */
715 static struct spdk_io_channel *
716 vbdev_ocf_get_io_channel(void *opaque)
717 {
718 	struct vbdev_ocf *bdev = opaque;
719 
720 	return spdk_get_io_channel(bdev);
721 }
722 
723 static int
724 vbdev_ocf_dump_info_json(void *opaque, struct spdk_json_write_ctx *w)
725 {
726 	struct vbdev_ocf *vbdev = opaque;
727 
728 	spdk_json_write_named_string(w, "cache_device", vbdev->cache.name);
729 	spdk_json_write_named_string(w, "core_device", vbdev->core.name);
730 
731 	spdk_json_write_named_string(w, "mode",
732 				     ocf_get_cache_modename(ocf_cache_get_mode(vbdev->ocf_cache)));
733 	spdk_json_write_named_uint32(w, "cache_line_size",
734 				     ocf_cache_get_line_size(vbdev->ocf_cache));
735 	spdk_json_write_named_bool(w, "metadata_volatile",
736 				   vbdev->cfg.cache.metadata_volatile);
737 
738 	return 0;
739 }
740 
741 static void
742 vbdev_ocf_write_json_config(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
743 {
744 	struct vbdev_ocf *vbdev = bdev->ctxt;
745 
746 	spdk_json_write_object_begin(w);
747 
748 	spdk_json_write_named_string(w, "method", "bdev_ocf_create");
749 
750 	spdk_json_write_named_object_begin(w, "params");
751 	spdk_json_write_named_string(w, "name", vbdev->name);
752 	spdk_json_write_named_string(w, "mode",
753 				     ocf_get_cache_modename(ocf_cache_get_mode(vbdev->ocf_cache)));
754 	spdk_json_write_named_string(w, "cache_bdev_name", vbdev->cache.name);
755 	spdk_json_write_named_string(w, "core_bdev_name", vbdev->core.name);
756 	spdk_json_write_object_end(w);
757 
758 	spdk_json_write_object_end(w);
759 }
760 
761 /* Cache vbdev function table
762  * Used by bdev layer */
763 static struct spdk_bdev_fn_table cache_dev_fn_table = {
764 	.destruct = vbdev_ocf_destruct,
765 	.io_type_supported = vbdev_ocf_io_type_supported,
766 	.submit_request	= vbdev_ocf_submit_request,
767 	.get_io_channel	= vbdev_ocf_get_io_channel,
768 	.write_config_json = vbdev_ocf_write_json_config,
769 	.dump_info_json = vbdev_ocf_dump_info_json,
770 };
771 
772 /* Poller function for the OCF queue
773  * We execute OCF requests here synchronously */
774 static int
775 queue_poll(void *opaque)
776 {
777 	struct vbdev_ocf_qctx *qctx = opaque;
778 	uint32_t iono = ocf_queue_pending_io(qctx->queue);
779 	int i, max = spdk_min(32, iono);
780 
781 	for (i = 0; i < max; i++) {
782 		ocf_queue_run_single(qctx->queue);
783 	}
784 
785 	if (iono > 0) {
786 		return 1;
787 	} else {
788 		return 0;
789 	}
790 }
791 
792 /* Called during ocf_submit_io, ocf_purge*
793  * and any other requests that need to submit io */
794 static void
795 vbdev_ocf_ctx_queue_kick(ocf_queue_t q)
796 {
797 }
798 
799 /* OCF queue deinitialization
800  * Called at ocf_cache_stop */
801 static void
802 vbdev_ocf_ctx_queue_stop(ocf_queue_t q)
803 {
804 	struct vbdev_ocf_qctx *qctx = ocf_queue_get_priv(q);
805 
806 	if (qctx) {
807 		spdk_put_io_channel(qctx->cache_ch);
808 		spdk_put_io_channel(qctx->core_ch);
809 		spdk_poller_unregister(&qctx->poller);
810 		if (qctx->allocated) {
811 			free(qctx);
812 		}
813 	}
814 }
815 
816 /* Queue ops is an interface for running queue thread
817  * stop() operation in called just before queue gets destroyed */
818 const struct ocf_queue_ops queue_ops = {
819 	.kick_sync = vbdev_ocf_ctx_queue_kick,
820 	.kick = vbdev_ocf_ctx_queue_kick,
821 	.stop = vbdev_ocf_ctx_queue_stop,
822 };
823 
824 /* Called on cache vbdev creation at every thread
825  * We allocate OCF queues here and SPDK poller for it */
826 static int
827 io_device_create_cb(void *io_device, void *ctx_buf)
828 {
829 	struct vbdev_ocf *vbdev = io_device;
830 	struct vbdev_ocf_qctx *qctx = ctx_buf;
831 	int rc;
832 
833 	rc = vbdev_ocf_queue_create(vbdev->ocf_cache, &qctx->queue, &queue_ops);
834 	if (rc) {
835 		return rc;
836 	}
837 
838 	ocf_queue_set_priv(qctx->queue, qctx);
839 
840 	qctx->vbdev      = vbdev;
841 	qctx->cache_ch   = spdk_bdev_get_io_channel(vbdev->cache.desc);
842 	qctx->core_ch    = spdk_bdev_get_io_channel(vbdev->core.desc);
843 	qctx->poller     = SPDK_POLLER_REGISTER(queue_poll, qctx, 0);
844 
845 	return rc;
846 }
847 
848 /* Called per thread
849  * Put OCF queue and relaunch poller with new context to finish pending requests */
850 static void
851 io_device_destroy_cb(void *io_device, void *ctx_buf)
852 {
853 	/* Making a copy of context to use it after io channel will be destroyed */
854 	struct vbdev_ocf_qctx *copy = malloc(sizeof(*copy));
855 	struct vbdev_ocf_qctx *qctx = ctx_buf;
856 
857 	if (copy) {
858 		ocf_queue_set_priv(qctx->queue, copy);
859 		memcpy(copy, qctx, sizeof(*copy));
860 		spdk_poller_unregister(&qctx->poller);
861 		copy->poller = SPDK_POLLER_REGISTER(queue_poll, copy, 0);
862 		copy->allocated = true;
863 	} else {
864 		SPDK_ERRLOG("Unable to stop OCF queue properly: %s\n",
865 			    spdk_strerror(ENOMEM));
866 	}
867 
868 	vbdev_ocf_queue_put(qctx->queue);
869 }
870 
871 /* OCF management queue deinitialization */
872 static void
873 vbdev_ocf_ctx_mngt_queue_stop(ocf_queue_t q)
874 {
875 	struct spdk_poller *poller = ocf_queue_get_priv(q);
876 
877 	if (poller) {
878 		spdk_poller_unregister(&poller);
879 	}
880 }
881 
882 static int
883 mngt_queue_poll(void *opaque)
884 {
885 	ocf_queue_t q = opaque;
886 	uint32_t iono = ocf_queue_pending_io(q);
887 	int i, max = spdk_min(32, iono);
888 
889 	for (i = 0; i < max; i++) {
890 		ocf_queue_run_single(q);
891 	}
892 
893 	if (iono > 0) {
894 		return 1;
895 	} else {
896 		return 0;
897 	}
898 }
899 
900 static void
901 vbdev_ocf_ctx_mngt_queue_kick(ocf_queue_t q)
902 {
903 }
904 
905 /* Queue ops is an interface for running queue thread
906  * stop() operation in called just before queue gets destroyed */
907 const struct ocf_queue_ops mngt_queue_ops = {
908 	.kick_sync = NULL,
909 	.kick = vbdev_ocf_ctx_mngt_queue_kick,
910 	.stop = vbdev_ocf_ctx_mngt_queue_stop,
911 };
912 
913 static void
914 vbdev_ocf_mngt_exit(struct vbdev_ocf *vbdev, vbdev_ocf_mngt_fn *rollback_path, int rc)
915 {
916 	vbdev->state.starting = false;
917 	vbdev_ocf_mngt_stop(vbdev, rollback_path, rc);
918 }
919 
920 /* Create exported spdk object */
921 static void
922 finish_register(struct vbdev_ocf *vbdev)
923 {
924 	int result;
925 
926 	/* Copy properties of the base bdev */
927 	vbdev->exp_bdev.blocklen = vbdev->core.bdev->blocklen;
928 	vbdev->exp_bdev.write_cache = vbdev->core.bdev->write_cache;
929 	vbdev->exp_bdev.required_alignment = vbdev->core.bdev->required_alignment;
930 
931 	vbdev->exp_bdev.name = vbdev->name;
932 	vbdev->exp_bdev.product_name = "SPDK OCF";
933 
934 	vbdev->exp_bdev.blockcnt = vbdev->core.bdev->blockcnt;
935 	vbdev->exp_bdev.ctxt = vbdev;
936 	vbdev->exp_bdev.fn_table = &cache_dev_fn_table;
937 	vbdev->exp_bdev.module = &ocf_if;
938 
939 	/* Finally register vbdev in SPDK */
940 	spdk_io_device_register(vbdev, io_device_create_cb, io_device_destroy_cb,
941 				sizeof(struct vbdev_ocf_qctx), vbdev->name);
942 	result = spdk_bdev_register(&vbdev->exp_bdev);
943 	if (result) {
944 		SPDK_ERRLOG("Could not register exposed bdev %s\n",
945 			    vbdev->name);
946 		vbdev_ocf_mngt_exit(vbdev, unregister_path_dirty, result);
947 		return;
948 	} else {
949 		vbdev->state.started = true;
950 	}
951 
952 	vbdev_ocf_mngt_continue(vbdev, result);
953 }
954 
955 static void
956 add_core_cmpl(ocf_cache_t cache, ocf_core_t core, void *priv, int error)
957 {
958 	struct vbdev_ocf *vbdev = priv;
959 
960 	ocf_mngt_cache_unlock(cache);
961 
962 	if (error) {
963 		SPDK_ERRLOG("Error %d, failed to add core device to cache instance %s,"
964 			    "starting rollback\n", error, vbdev->name);
965 		vbdev_ocf_mngt_exit(vbdev, unregister_path_dirty, error);
966 		return;
967 	} else {
968 		vbdev->ocf_core = core;
969 	}
970 
971 	vbdev_ocf_mngt_continue(vbdev, error);
972 }
973 
974 /* Try to lock cache, then add core */
975 static void
976 add_core_cache_lock_cmpl(ocf_cache_t cache, void *priv, int error)
977 {
978 	struct vbdev_ocf *vbdev = (struct vbdev_ocf *)priv;
979 
980 	if (error) {
981 		SPDK_ERRLOG("Error %d, can not lock cache instance %s,"
982 			    "starting rollback\n", error, vbdev->name);
983 		vbdev_ocf_mngt_exit(vbdev, unregister_path_dirty, error);
984 	}
985 	ocf_mngt_cache_add_core(vbdev->ocf_cache, &vbdev->cfg.core, add_core_cmpl, vbdev);
986 }
987 
988 /* Add core for existing OCF cache instance */
989 static void
990 add_core(struct vbdev_ocf *vbdev)
991 {
992 	ocf_mngt_cache_lock(vbdev->ocf_cache, add_core_cache_lock_cmpl, vbdev);
993 }
994 
995 static void
996 start_cache_cmpl(ocf_cache_t cache, void *priv, int error)
997 {
998 	struct vbdev_ocf *vbdev = priv;
999 
1000 	ocf_mngt_cache_unlock(cache);
1001 
1002 	if (error) {
1003 		SPDK_ERRLOG("Error %d during start cache %s, starting rollback\n",
1004 			    error, vbdev->name);
1005 		vbdev_ocf_mngt_exit(vbdev, unregister_path_dirty, error);
1006 		return;
1007 	}
1008 
1009 	vbdev_ocf_mngt_continue(vbdev, error);
1010 }
1011 
1012 static int
1013 create_management_queue(struct vbdev_ocf *vbdev)
1014 {
1015 	struct spdk_poller *mngt_poller;
1016 	int rc;
1017 
1018 	rc = vbdev_ocf_queue_create(vbdev->ocf_cache, &vbdev->cache_ctx->mngt_queue, &mngt_queue_ops);
1019 	if (rc) {
1020 		SPDK_ERRLOG("Unable to create mngt_queue: %d\n", rc);
1021 		return rc;
1022 	}
1023 
1024 	mngt_poller = SPDK_POLLER_REGISTER(mngt_queue_poll, vbdev->cache_ctx->mngt_queue, 100);
1025 	if (mngt_poller == NULL) {
1026 		SPDK_ERRLOG("Unable to initiate mngt request: %s", spdk_strerror(ENOMEM));
1027 		return -ENOMEM;
1028 	}
1029 
1030 	ocf_queue_set_priv(vbdev->cache_ctx->mngt_queue, mngt_poller);
1031 	ocf_mngt_cache_set_mngt_queue(vbdev->ocf_cache, vbdev->cache_ctx->mngt_queue);
1032 
1033 	return 0;
1034 }
1035 
1036 /* Start OCF cache, attach caching device */
1037 static void
1038 start_cache(struct vbdev_ocf *vbdev)
1039 {
1040 	ocf_cache_t existing;
1041 	int rc;
1042 
1043 	if (vbdev->ocf_cache) {
1044 		vbdev_ocf_mngt_stop(vbdev, NULL, -EALREADY);
1045 		return;
1046 	}
1047 
1048 	existing = get_other_cache_instance(vbdev);
1049 	if (existing) {
1050 		SPDK_NOTICELOG("OCF bdev %s connects to existing cache device %s\n",
1051 			       vbdev->name, vbdev->cache.name);
1052 		vbdev->ocf_cache = existing;
1053 		vbdev->cache_ctx = ocf_cache_get_priv(existing);
1054 		vbdev_ocf_cache_ctx_get(vbdev->cache_ctx);
1055 		vbdev_ocf_mngt_continue(vbdev, 0);
1056 		return;
1057 	}
1058 
1059 	vbdev->cache_ctx = calloc(1, sizeof(struct vbdev_ocf_cache_ctx));
1060 	if (vbdev->cache_ctx == NULL) {
1061 		vbdev_ocf_mngt_exit(vbdev, unregister_path_dirty, -ENOMEM);
1062 		return;
1063 	}
1064 
1065 	vbdev_ocf_cache_ctx_get(vbdev->cache_ctx);
1066 	pthread_mutex_init(&vbdev->cache_ctx->lock, NULL);
1067 
1068 	rc = ocf_mngt_cache_start(vbdev_ocf_ctx, &vbdev->ocf_cache, &vbdev->cfg.cache);
1069 	if (rc) {
1070 		vbdev_ocf_mngt_exit(vbdev, unregister_path_dirty, rc);
1071 		return;
1072 	}
1073 
1074 	ocf_cache_set_priv(vbdev->ocf_cache, vbdev->cache_ctx);
1075 
1076 	rc = create_management_queue(vbdev);
1077 	if (rc) {
1078 		SPDK_ERRLOG("Unable to create mngt_queue: %d\n", rc);
1079 		vbdev_ocf_mngt_exit(vbdev, unregister_path_dirty, rc);
1080 		return;
1081 	}
1082 
1083 	if (vbdev->cfg.loadq) {
1084 		ocf_mngt_cache_load(vbdev->ocf_cache, &vbdev->cfg.device, start_cache_cmpl, vbdev);
1085 	} else {
1086 		ocf_mngt_cache_attach(vbdev->ocf_cache, &vbdev->cfg.device, start_cache_cmpl, vbdev);
1087 	}
1088 }
1089 
1090 /* Procedures called during register operation */
1091 vbdev_ocf_mngt_fn register_path[] = {
1092 	start_cache,
1093 	add_core,
1094 	finish_register,
1095 	NULL
1096 };
1097 
1098 /* Start cache instance and register OCF bdev */
1099 static void
1100 register_vbdev(struct vbdev_ocf *vbdev, vbdev_ocf_mngt_callback cb, void *cb_arg)
1101 {
1102 	int rc;
1103 
1104 	if (!(vbdev->core.attached && vbdev->cache.attached) || vbdev->state.started) {
1105 		cb(-EPERM, vbdev, cb_arg);
1106 		return;
1107 	}
1108 
1109 	vbdev->state.starting = true;
1110 	rc = vbdev_ocf_mngt_start(vbdev, register_path, cb, cb_arg);
1111 	if (rc) {
1112 		cb(rc, vbdev, cb_arg);
1113 	}
1114 }
1115 
1116 /* Init OCF configuration options
1117  * for core and cache devices */
1118 static void
1119 init_vbdev_config(struct vbdev_ocf *vbdev)
1120 {
1121 	struct vbdev_ocf_config *cfg = &vbdev->cfg;
1122 
1123 	snprintf(cfg->cache.name, sizeof(cfg->cache.name), "%s", vbdev->name);
1124 	snprintf(cfg->core.name, sizeof(cfg->core.name), "%s", vbdev->core.name);
1125 
1126 	/* TODO [metadata]: make configurable with persistent
1127 	 * metadata support */
1128 	cfg->cache.metadata_volatile = false;
1129 
1130 	/* TODO [cache line size]: make cache line size configurable
1131 	 * Using standard 4KiB for now */
1132 	cfg->cache.cache_line_size = ocf_cache_line_size_4;
1133 
1134 	/* This are suggested values that
1135 	 * should be sufficient for most use cases */
1136 	cfg->cache.backfill.max_queue_size = 65536;
1137 	cfg->cache.backfill.queue_unblock_size = 60000;
1138 
1139 	/* TODO [cache line size] */
1140 	cfg->device.cache_line_size = ocf_cache_line_size_4;
1141 	cfg->device.force = true;
1142 	cfg->device.perform_test = false;
1143 	cfg->device.discard_on_start = false;
1144 
1145 	vbdev->cfg.cache.locked = true;
1146 
1147 	cfg->core.volume_type = SPDK_OBJECT;
1148 	cfg->device.volume_type = SPDK_OBJECT;
1149 
1150 	if (vbdev->cfg.loadq) {
1151 		/* When doing cache_load(), we need to set try_add to true,
1152 		 * otherwise OCF will interpret this core as new
1153 		 * instead of the inactive one */
1154 		vbdev->cfg.core.try_add = true;
1155 	}
1156 
1157 	/* Serialize bdev names in OCF UUID to interpret on future loads
1158 	 * Core UUID is a triple of (core name, vbdev name, cache name)
1159 	 * Cache UUID is cache bdev name */
1160 	cfg->device.uuid.size = strlen(vbdev->cache.name) + 1;
1161 	cfg->device.uuid.data = vbdev->cache.name;
1162 
1163 	snprintf(vbdev->uuid, VBDEV_OCF_MD_MAX_LEN, "%s %s %s",
1164 		 vbdev->core.name, vbdev->name, vbdev->cache.name);
1165 	cfg->core.uuid.size = strlen(vbdev->uuid) + 1;
1166 	cfg->core.uuid.data = vbdev->uuid;
1167 	vbdev->uuid[strlen(vbdev->core.name)] = 0;
1168 	vbdev->uuid[strlen(vbdev->core.name) + 1 + strlen(vbdev->name)] = 0;
1169 }
1170 
1171 /* Allocate vbdev structure object and add it to the global list */
1172 static int
1173 init_vbdev(const char *vbdev_name,
1174 	   const char *cache_mode_name,
1175 	   const char *cache_name,
1176 	   const char *core_name,
1177 	   bool loadq)
1178 {
1179 	struct vbdev_ocf *vbdev;
1180 	int rc = 0;
1181 
1182 	if (spdk_bdev_get_by_name(vbdev_name) || vbdev_ocf_get_by_name(vbdev_name)) {
1183 		SPDK_ERRLOG("Device with name '%s' already exists\n", vbdev_name);
1184 		return -EPERM;
1185 	}
1186 
1187 	vbdev = calloc(1, sizeof(*vbdev));
1188 	if (!vbdev) {
1189 		goto error_mem;
1190 	}
1191 
1192 	vbdev->cache.parent = vbdev;
1193 	vbdev->core.parent = vbdev;
1194 	vbdev->cache.is_cache = true;
1195 	vbdev->core.is_cache = false;
1196 
1197 	if (cache_mode_name) {
1198 		vbdev->cfg.cache.cache_mode
1199 			= ocf_get_cache_mode(cache_mode_name);
1200 	} else if (!loadq) { /* In load path it is OK to pass NULL as cache mode */
1201 		SPDK_ERRLOG("No cache mode specified\n");
1202 		rc = -EINVAL;
1203 		goto error_free;
1204 	}
1205 	if (vbdev->cfg.cache.cache_mode < 0) {
1206 		SPDK_ERRLOG("Incorrect cache mode '%s'\n", cache_mode_name);
1207 		rc = -EINVAL;
1208 		goto error_free;
1209 	}
1210 
1211 	vbdev->name = strdup(vbdev_name);
1212 	if (!vbdev->name) {
1213 		goto error_mem;
1214 	}
1215 
1216 	vbdev->cache.name = strdup(cache_name);
1217 	if (!vbdev->cache.name) {
1218 		goto error_mem;
1219 	}
1220 
1221 	vbdev->core.name = strdup(core_name);
1222 	if (!vbdev->core.name) {
1223 		goto error_mem;
1224 	}
1225 
1226 	vbdev->cfg.loadq = loadq;
1227 	init_vbdev_config(vbdev);
1228 	TAILQ_INSERT_TAIL(&g_ocf_vbdev_head, vbdev, tailq);
1229 	return rc;
1230 
1231 error_mem:
1232 	rc = -ENOMEM;
1233 error_free:
1234 	free_vbdev(vbdev);
1235 	return rc;
1236 }
1237 
1238 /* Read configuration file at the start of SPDK application
1239  * This adds vbdevs to global list if some mentioned in config */
1240 static int
1241 vbdev_ocf_init(void)
1242 {
1243 	const char *vbdev_name, *modename, *cache_name, *core_name;
1244 	struct spdk_conf_section *sp;
1245 	int status;
1246 
1247 	status = vbdev_ocf_ctx_init();
1248 	if (status) {
1249 		SPDK_ERRLOG("OCF ctx initialization failed with=%d\n", status);
1250 		return status;
1251 	}
1252 
1253 	status = vbdev_ocf_volume_init();
1254 	if (status) {
1255 		vbdev_ocf_ctx_cleanup();
1256 		SPDK_ERRLOG("OCF volume initialization failed with=%d\n", status);
1257 		return status;
1258 	}
1259 
1260 	sp = spdk_conf_find_section(NULL, "OCF");
1261 	if (sp == NULL) {
1262 		return 0;
1263 	}
1264 
1265 	for (int i = 0; ; i++) {
1266 		if (!spdk_conf_section_get_nval(sp, "OCF", i)) {
1267 			break;
1268 		}
1269 
1270 		vbdev_name = spdk_conf_section_get_nmval(sp, "OCF", i, 0);
1271 		if (!vbdev_name) {
1272 			SPDK_ERRLOG("No vbdev name specified\n");
1273 			continue;
1274 		}
1275 
1276 		modename = spdk_conf_section_get_nmval(sp, "OCF", i, 1);
1277 		if (!modename) {
1278 			SPDK_ERRLOG("No modename specified for OCF vbdev '%s'\n", vbdev_name);
1279 			continue;
1280 		}
1281 
1282 		cache_name = spdk_conf_section_get_nmval(sp, "OCF", i, 2);
1283 		if (!cache_name) {
1284 			SPDK_ERRLOG("No cache device specified for OCF vbdev '%s'\n", vbdev_name);
1285 			continue;
1286 		}
1287 
1288 		core_name = spdk_conf_section_get_nmval(sp, "OCF", i, 3);
1289 		if (!core_name) {
1290 			SPDK_ERRLOG("No core devices specified for OCF vbdev '%s'\n", vbdev_name);
1291 			continue;
1292 		}
1293 
1294 		status = init_vbdev(vbdev_name, modename, cache_name, core_name, false);
1295 		if (status) {
1296 			SPDK_ERRLOG("Config initialization failed with code: %d\n", status);
1297 		}
1298 	}
1299 
1300 	return status;
1301 }
1302 
1303 /* Called after application shutdown started
1304  * Release memory of allocated structures here */
1305 static void
1306 vbdev_ocf_module_fini(void)
1307 {
1308 	struct vbdev_ocf *vbdev;
1309 
1310 	while ((vbdev = TAILQ_FIRST(&g_ocf_vbdev_head))) {
1311 		TAILQ_REMOVE(&g_ocf_vbdev_head, vbdev, tailq);
1312 		free_vbdev(vbdev);
1313 	}
1314 
1315 	vbdev_ocf_volume_cleanup();
1316 	vbdev_ocf_ctx_cleanup();
1317 }
1318 
1319 /* When base device gets unpluged this is called
1320  * We will unregister cache vbdev here
1321  * When cache device is removed, we delete every OCF bdev that used it */
1322 static void
1323 hotremove_cb(void *ctx)
1324 {
1325 	struct vbdev_ocf_base *base = ctx;
1326 	struct vbdev_ocf *vbdev;
1327 
1328 	if (!base->is_cache) {
1329 		if (base->parent->state.doing_finish) {
1330 			return;
1331 		}
1332 
1333 		SPDK_NOTICELOG("Deinitializing '%s' because its core device '%s' was removed\n",
1334 			       base->parent->name, base->name);
1335 		vbdev_ocf_delete(base->parent, NULL, NULL);
1336 		return;
1337 	}
1338 
1339 	TAILQ_FOREACH(vbdev, &g_ocf_vbdev_head, tailq) {
1340 		if (vbdev->state.doing_finish) {
1341 			continue;
1342 		}
1343 		if (strcmp(base->name, vbdev->cache.name) == 0) {
1344 			SPDK_NOTICELOG("Deinitializing '%s' because"
1345 				       " its cache device '%s' was removed\n",
1346 				       vbdev->name, base->name);
1347 			vbdev_ocf_delete(vbdev, NULL, NULL);
1348 		}
1349 	}
1350 }
1351 
1352 /* Open base SPDK bdev and claim it */
1353 static int
1354 attach_base(struct vbdev_ocf_base *base)
1355 {
1356 	int status;
1357 
1358 	if (base->attached) {
1359 		return -EALREADY;
1360 	}
1361 
1362 	/* If base cache bdev was already opened by other vbdev,
1363 	 * we just copy its descriptor here */
1364 	if (base->is_cache) {
1365 		struct vbdev_ocf_base *existing = get_other_cache_base(base);
1366 		if (existing) {
1367 			base->desc = existing->desc;
1368 			base->management_channel = existing->management_channel;
1369 			base->attached = true;
1370 			return 0;
1371 		}
1372 	}
1373 
1374 	status = spdk_bdev_open(base->bdev, true, hotremove_cb, base, &base->desc);
1375 	if (status) {
1376 		SPDK_ERRLOG("Unable to open device '%s' for writing\n", base->name);
1377 		return status;
1378 	}
1379 
1380 	status = spdk_bdev_module_claim_bdev(base->bdev, base->desc,
1381 					     &ocf_if);
1382 	if (status) {
1383 		SPDK_ERRLOG("Unable to claim device '%s'\n", base->name);
1384 		spdk_bdev_close(base->desc);
1385 		return status;
1386 	}
1387 
1388 	base->management_channel = spdk_bdev_get_io_channel(base->desc);
1389 	if (!base->management_channel) {
1390 		SPDK_ERRLOG("Unable to get io channel '%s'\n", base->name);
1391 		spdk_bdev_module_release_bdev(base->bdev);
1392 		spdk_bdev_close(base->desc);
1393 		return -ENOMEM;
1394 	}
1395 
1396 	/* Save the thread where the base device is opened */
1397 	base->thread = spdk_get_thread();
1398 
1399 	base->attached = true;
1400 	return status;
1401 }
1402 
1403 /* Attach base bdevs */
1404 static int
1405 attach_base_bdevs(struct vbdev_ocf *vbdev,
1406 		  struct spdk_bdev *cache_bdev,
1407 		  struct spdk_bdev *core_bdev)
1408 {
1409 	int rc = 0;
1410 
1411 	if (cache_bdev) {
1412 		vbdev->cache.bdev = cache_bdev;
1413 		rc |= attach_base(&vbdev->cache);
1414 	}
1415 
1416 	if (core_bdev) {
1417 		vbdev->core.bdev = core_bdev;
1418 		rc |= attach_base(&vbdev->core);
1419 	}
1420 
1421 	return rc;
1422 }
1423 
1424 /* Init and then start vbdev if all base devices are present */
1425 void
1426 vbdev_ocf_construct(const char *vbdev_name,
1427 		    const char *cache_mode_name,
1428 		    const char *cache_name,
1429 		    const char *core_name,
1430 		    bool loadq,
1431 		    void (*cb)(int, struct vbdev_ocf *, void *),
1432 		    void *cb_arg)
1433 {
1434 	int rc;
1435 	struct spdk_bdev *cache_bdev = spdk_bdev_get_by_name(cache_name);
1436 	struct spdk_bdev *core_bdev = spdk_bdev_get_by_name(core_name);
1437 	struct vbdev_ocf *vbdev;
1438 
1439 	rc = init_vbdev(vbdev_name, cache_mode_name, cache_name, core_name, loadq);
1440 	if (rc) {
1441 		cb(rc, NULL, cb_arg);
1442 		return;
1443 	}
1444 
1445 	vbdev = vbdev_ocf_get_by_name(vbdev_name);
1446 	if (vbdev == NULL) {
1447 		cb(-ENODEV, NULL, cb_arg);
1448 		return;
1449 	}
1450 
1451 	if (cache_bdev == NULL) {
1452 		SPDK_NOTICELOG("OCF bdev '%s' is waiting for cache device '%s' to connect\n",
1453 			       vbdev->name, cache_name);
1454 	}
1455 	if (core_bdev == NULL) {
1456 		SPDK_NOTICELOG("OCF bdev '%s' is waiting for core device '%s' to connect\n",
1457 			       vbdev->name, core_name);
1458 	}
1459 
1460 	rc = attach_base_bdevs(vbdev, cache_bdev, core_bdev);
1461 	if (rc) {
1462 		cb(rc, vbdev, cb_arg);
1463 		return;
1464 	}
1465 
1466 	if (core_bdev && cache_bdev) {
1467 		register_vbdev(vbdev, cb, cb_arg);
1468 	} else {
1469 		cb(0, vbdev, cb_arg);
1470 	}
1471 }
1472 
1473 /* This called if new device is created in SPDK application
1474  * If that device named as one of base bdevs of OCF vbdev,
1475  * claim and open them */
1476 static void
1477 vbdev_ocf_examine(struct spdk_bdev *bdev)
1478 {
1479 	const char *bdev_name = spdk_bdev_get_name(bdev);
1480 	struct vbdev_ocf *vbdev;
1481 
1482 	TAILQ_FOREACH(vbdev, &g_ocf_vbdev_head, tailq) {
1483 		if (vbdev->state.doing_finish) {
1484 			continue;
1485 		}
1486 
1487 		if (!strcmp(bdev_name, vbdev->cache.name)) {
1488 			attach_base_bdevs(vbdev, bdev, NULL);
1489 			continue;
1490 		}
1491 		if (!strcmp(bdev_name, vbdev->core.name)) {
1492 			attach_base_bdevs(vbdev, NULL, bdev);
1493 			break;
1494 		}
1495 	}
1496 	spdk_bdev_module_examine_done(&ocf_if);
1497 }
1498 
1499 struct metadata_probe_ctx {
1500 	struct vbdev_ocf_base base;
1501 	ocf_volume_t volume;
1502 
1503 	struct ocf_volume_uuid *core_uuids;
1504 	unsigned int uuid_count;
1505 
1506 	int result;
1507 	int refcnt;
1508 };
1509 
1510 static void
1511 _examine_ctx_put(void *ctx)
1512 {
1513 	struct spdk_bdev_desc *desc = ctx;
1514 
1515 	spdk_bdev_close(desc);
1516 }
1517 
1518 static void
1519 examine_ctx_put(struct metadata_probe_ctx *ctx)
1520 {
1521 	unsigned int i;
1522 
1523 	ctx->refcnt--;
1524 	if (ctx->refcnt > 0) {
1525 		return;
1526 	}
1527 
1528 	if (ctx->result) {
1529 		SPDK_ERRLOG("OCF metadata probe for bdev '%s' failed with %d\n",
1530 			    spdk_bdev_get_name(ctx->base.bdev), ctx->result);
1531 	}
1532 
1533 	if (ctx->base.desc) {
1534 		/* Close the underlying bdev on its same opened thread. */
1535 		if (ctx->base.thread && ctx->base.thread != spdk_get_thread()) {
1536 			spdk_thread_send_msg(ctx->base.thread, _examine_ctx_put, ctx->base.desc);
1537 		} else {
1538 			spdk_bdev_close(ctx->base.desc);
1539 		}
1540 	}
1541 
1542 	if (ctx->volume) {
1543 		ocf_volume_destroy(ctx->volume);
1544 	}
1545 
1546 	if (ctx->core_uuids) {
1547 		for (i = 0; i < ctx->uuid_count; i++) {
1548 			free(ctx->core_uuids[i].data);
1549 		}
1550 	}
1551 	free(ctx->core_uuids);
1552 
1553 	examine_done(ctx->result, NULL, ctx->base.bdev);
1554 	free(ctx);
1555 }
1556 
1557 static void
1558 metadata_probe_construct_cb(int rc, struct vbdev_ocf *vbdev, void *vctx)
1559 {
1560 	struct metadata_probe_ctx *ctx = vctx;
1561 
1562 	examine_ctx_put(ctx);
1563 }
1564 
1565 /* This is second callback for ocf_metadata_probe_cores()
1566  * Here we create vbdev configurations based on UUIDs */
1567 static void
1568 metadata_probe_cores_construct(void *priv, int error, unsigned int num_cores)
1569 {
1570 	struct metadata_probe_ctx *ctx = priv;
1571 	const char *vbdev_name;
1572 	const char *core_name;
1573 	const char *cache_name;
1574 	unsigned int i;
1575 
1576 	if (error) {
1577 		ctx->result = error;
1578 		examine_ctx_put(ctx);
1579 		return;
1580 	}
1581 
1582 	for (i = 0; i < num_cores; i++) {
1583 		core_name = ocf_uuid_to_str(&ctx->core_uuids[i]);
1584 		vbdev_name = core_name + strlen(core_name) + 1;
1585 		cache_name = vbdev_name + strlen(vbdev_name) + 1;
1586 
1587 		if (strcmp(ctx->base.bdev->name, cache_name)) {
1588 			SPDK_NOTICELOG("OCF metadata found on %s belongs to bdev named '%s'\n",
1589 				       ctx->base.bdev->name, cache_name);
1590 		}
1591 
1592 		ctx->refcnt++;
1593 		vbdev_ocf_construct(vbdev_name, NULL, cache_name, core_name, true,
1594 				    metadata_probe_construct_cb, ctx);
1595 	}
1596 
1597 	examine_ctx_put(ctx);
1598 }
1599 
1600 /* This callback is called after OCF reads cores UUIDs from cache metadata
1601  * Here we allocate memory for those UUIDs and call ocf_metadata_probe_cores() again */
1602 static void
1603 metadata_probe_cores_get_num(void *priv, int error, unsigned int num_cores)
1604 {
1605 	struct metadata_probe_ctx *ctx = priv;
1606 	unsigned int i;
1607 
1608 	if (error) {
1609 		ctx->result = error;
1610 		examine_ctx_put(ctx);
1611 		return;
1612 	}
1613 
1614 	ctx->uuid_count = num_cores;
1615 	ctx->core_uuids = calloc(num_cores, sizeof(struct ocf_volume_uuid));
1616 	if (!ctx->core_uuids) {
1617 		ctx->result = -ENOMEM;
1618 		examine_ctx_put(ctx);
1619 		return;
1620 	}
1621 
1622 	for (i = 0; i < ctx->uuid_count; i++) {
1623 		ctx->core_uuids[i].size = OCF_VOLUME_UUID_MAX_SIZE;
1624 		ctx->core_uuids[i].data = malloc(OCF_VOLUME_UUID_MAX_SIZE);
1625 		if (!ctx->core_uuids[i].data) {
1626 			ctx->result = -ENOMEM;
1627 			examine_ctx_put(ctx);
1628 			return;
1629 		}
1630 	}
1631 
1632 	ocf_metadata_probe_cores(vbdev_ocf_ctx, ctx->volume, ctx->core_uuids, ctx->uuid_count,
1633 				 metadata_probe_cores_construct, ctx);
1634 }
1635 
1636 static void
1637 metadata_probe_cb(void *priv, int rc,
1638 		  struct ocf_metadata_probe_status *status)
1639 {
1640 	struct metadata_probe_ctx *ctx = priv;
1641 
1642 	if (rc) {
1643 		/* -ENODATA means device does not have cache metadata on it */
1644 		if (rc != -OCF_ERR_NO_METADATA) {
1645 			ctx->result = rc;
1646 		}
1647 		examine_ctx_put(ctx);
1648 		return;
1649 	}
1650 
1651 	ocf_metadata_probe_cores(vbdev_ocf_ctx, ctx->volume, NULL, 0,
1652 				 metadata_probe_cores_get_num, ctx);
1653 }
1654 
1655 /* This is called after vbdev_ocf_examine
1656  * It allows to delay application initialization
1657  * until all OCF bdevs get registered
1658  * If vbdev has all of its base devices it starts asynchronously here
1659  * We first check if bdev appears in configuration,
1660  * if not we do metadata_probe() to create its configuration from bdev metadata */
1661 static void
1662 vbdev_ocf_examine_disk(struct spdk_bdev *bdev)
1663 {
1664 	const char *bdev_name = spdk_bdev_get_name(bdev);
1665 	struct vbdev_ocf *vbdev;
1666 	struct metadata_probe_ctx *ctx;
1667 	bool created_from_config = false;
1668 	int rc;
1669 
1670 	examine_start(bdev);
1671 
1672 	TAILQ_FOREACH(vbdev, &g_ocf_vbdev_head, tailq) {
1673 		if (vbdev->state.doing_finish || vbdev->state.started) {
1674 			continue;
1675 		}
1676 
1677 		if (!strcmp(bdev_name, vbdev->cache.name)) {
1678 			examine_start(bdev);
1679 			register_vbdev(vbdev, examine_done, bdev);
1680 			created_from_config = true;
1681 			continue;
1682 		}
1683 		if (!strcmp(bdev_name, vbdev->core.name)) {
1684 			examine_start(bdev);
1685 			register_vbdev(vbdev, examine_done, bdev);
1686 			examine_done(0, NULL, bdev);
1687 			return;
1688 		}
1689 	}
1690 
1691 	/* If devices is discovered during config we do not check for metadata */
1692 	if (created_from_config) {
1693 		examine_done(0, NULL, bdev);
1694 		return;
1695 	}
1696 
1697 	/* Metadata probe path
1698 	 * We create temporary OCF volume and a temporary base structure
1699 	 * to use them for ocf_metadata_probe() and for bottom adapter IOs
1700 	 * Then we get UUIDs of core devices an create configurations based on them */
1701 	ctx = calloc(1, sizeof(*ctx));
1702 	if (!ctx) {
1703 		examine_done(-ENOMEM, NULL, bdev);
1704 		return;
1705 	}
1706 
1707 	ctx->base.bdev = bdev;
1708 	ctx->refcnt = 1;
1709 
1710 	rc = spdk_bdev_open(ctx->base.bdev, true, NULL, NULL, &ctx->base.desc);
1711 	if (rc) {
1712 		ctx->result = rc;
1713 		examine_ctx_put(ctx);
1714 		return;
1715 	}
1716 
1717 	rc = ocf_ctx_volume_create(vbdev_ocf_ctx, &ctx->volume, NULL, SPDK_OBJECT);
1718 	if (rc) {
1719 		ctx->result = rc;
1720 		examine_ctx_put(ctx);
1721 		return;
1722 	}
1723 
1724 	rc = ocf_volume_open(ctx->volume, &ctx->base);
1725 	if (rc) {
1726 		ctx->result = rc;
1727 		examine_ctx_put(ctx);
1728 		return;
1729 	}
1730 
1731 	/* Save the thread where the base device is opened */
1732 	ctx->base.thread = spdk_get_thread();
1733 
1734 	ocf_metadata_probe(vbdev_ocf_ctx, ctx->volume, metadata_probe_cb, ctx);
1735 }
1736 
1737 static int
1738 vbdev_ocf_get_ctx_size(void)
1739 {
1740 	return sizeof(struct bdev_ocf_data);
1741 }
1742 
1743 static void
1744 fini_start(void)
1745 {
1746 	g_fini_started = true;
1747 }
1748 
1749 /* Module-global function table
1750  * Does not relate to vbdev instances */
1751 static struct spdk_bdev_module ocf_if = {
1752 	.name = "ocf",
1753 	.module_init = vbdev_ocf_init,
1754 	.fini_start = fini_start,
1755 	.module_fini = vbdev_ocf_module_fini,
1756 	.config_text = NULL,
1757 	.get_ctx_size = vbdev_ocf_get_ctx_size,
1758 	.examine_config = vbdev_ocf_examine,
1759 	.examine_disk   = vbdev_ocf_examine_disk,
1760 };
1761 SPDK_BDEV_MODULE_REGISTER(ocf, &ocf_if);
1762 
1763 SPDK_LOG_REGISTER_COMPONENT("vbdev_ocf", SPDK_TRACE_VBDEV_OCF)
1764