xref: /spdk/lib/ftl/ftl_init.c (revision 712a3f69d32632bf6c862f00200f7f437d3f7529)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 #include "spdk/nvme.h"
36 #include "spdk/io_channel.h"
37 #include "spdk/string.h"
38 #include "spdk/likely.h"
39 #include "spdk_internal/log.h"
40 #include "spdk/ftl.h"
41 #include "spdk/likely.h"
42 #include "spdk/string.h"
43 #include "spdk/bdev_zone.h"
44 #include "spdk/bdev_module.h"
45 
46 #include "ftl_core.h"
47 #include "ftl_io.h"
48 #include "ftl_reloc.h"
49 #include "ftl_rwb.h"
50 #include "ftl_band.h"
51 #include "ftl_debug.h"
52 
53 #define FTL_CORE_RING_SIZE	4096
54 #define FTL_INIT_TIMEOUT	30
55 #define FTL_NSID		1
56 #define FTL_ZONE_INFO_COUNT	64
57 
58 /* Dummy bdev module used to to claim bdevs. */
59 static struct spdk_bdev_module g_ftl_bdev_module = {
60 	.name	= "ftl_lib",
61 };
62 
63 typedef void (*spdk_ftl_init_fn)(struct spdk_ftl_dev *, void *, int);
64 
65 struct ftl_dev_init_ctx {
66 	/* Owner */
67 	struct spdk_ftl_dev		*dev;
68 	/* Initial arguments */
69 	struct spdk_ftl_dev_init_opts	opts;
70 	/* IO channel for zone info retrieving */
71 	struct spdk_io_channel		*ioch;
72 	/* Buffer for reading zone info  */
73 	struct spdk_bdev_zone_info	info[FTL_ZONE_INFO_COUNT];
74 	/* Currently read zone */
75 	size_t				zone_id;
76 	/* User's callback */
77 	spdk_ftl_init_fn		cb_fn;
78 	/* Callback's argument */
79 	void				*cb_arg;
80 	/* Thread to call the callback on */
81 	struct spdk_thread		*thread;
82 	/* Poller to check if the device has been destroyed/initialized */
83 	struct spdk_poller		*poller;
84 	/* Status to return for halt completion callback */
85 	int				halt_complete_status;
86 };
87 
88 static STAILQ_HEAD(, spdk_ftl_dev)	g_ftl_queue = STAILQ_HEAD_INITIALIZER(g_ftl_queue);
89 static pthread_mutex_t			g_ftl_queue_lock = PTHREAD_MUTEX_INITIALIZER;
90 static const struct spdk_ftl_conf	g_default_conf = {
91 	.limits = {
92 		/* 5 free bands  / 0 % host writes */
93 		[SPDK_FTL_LIMIT_CRIT]  = { .thld = 5,  .limit = 0 },
94 		/* 10 free bands / 5 % host writes */
95 		[SPDK_FTL_LIMIT_HIGH]  = { .thld = 10, .limit = 5 },
96 		/* 20 free bands / 40 % host writes */
97 		[SPDK_FTL_LIMIT_LOW]   = { .thld = 20, .limit = 40 },
98 		/* 40 free bands / 100 % host writes - defrag starts running */
99 		[SPDK_FTL_LIMIT_START] = { .thld = 40, .limit = 100 },
100 	},
101 	/* 10 percent valid blocks */
102 	.invalid_thld = 10,
103 	/* 20% spare blocks */
104 	.lba_rsvd = 20,
105 	/* 6M write buffer */
106 	.rwb_size = 6 * 1024 * 1024,
107 	/* 90% band fill threshold */
108 	.band_thld = 90,
109 	/* Max 32 IO depth per band relocate */
110 	.max_reloc_qdepth = 32,
111 	/* Max 3 active band relocates */
112 	.max_active_relocs = 3,
113 	/* IO pool size per user thread (this should be adjusted to thread IO qdepth) */
114 	.user_io_pool_size = 2048,
115 	/* Number of interleaving units per ws_opt */
116 	/* 1 for default and 3 for 3D TLC NAND */
117 	.num_interleave_units = 1,
118 	/*
119 	 * If clear ftl will return error when restoring after a dirty shutdown
120 	 * If set, last band will be padded, ftl will restore based only on closed bands - this
121 	 * will result in lost data after recovery.
122 	 */
123 	.allow_open_bands = false,
124 	.nv_cache = {
125 		/* Maximum number of concurrent requests */
126 		.max_request_cnt = 2048,
127 		/* Maximum number of blocks per request */
128 		.max_request_size = 16,
129 	}
130 };
131 
132 static int
133 ftl_band_init_md(struct ftl_band *band)
134 {
135 	struct ftl_lba_map *lba_map = &band->lba_map;
136 
137 	lba_map->vld = spdk_bit_array_create(ftl_get_num_blocks_in_band(band->dev));
138 	if (!lba_map->vld) {
139 		return -ENOMEM;
140 	}
141 
142 	pthread_spin_init(&lba_map->lock, PTHREAD_PROCESS_PRIVATE);
143 	ftl_band_md_clear(band);
144 	return 0;
145 }
146 
147 static int
148 ftl_check_conf(const struct spdk_ftl_dev *dev, const struct spdk_ftl_conf *conf)
149 {
150 	size_t i;
151 
152 	if (conf->invalid_thld >= 100) {
153 		return -1;
154 	}
155 	if (conf->lba_rsvd >= 100) {
156 		return -1;
157 	}
158 	if (conf->lba_rsvd == 0) {
159 		return -1;
160 	}
161 	if (conf->rwb_size == 0) {
162 		return -1;
163 	}
164 	if (conf->rwb_size % FTL_BLOCK_SIZE != 0) {
165 		return -1;
166 	}
167 	if (dev->xfer_size % conf->num_interleave_units != 0) {
168 		return -1;
169 	}
170 
171 	for (i = 0; i < SPDK_FTL_LIMIT_MAX; ++i) {
172 		if (conf->limits[i].limit > 100) {
173 			return -1;
174 		}
175 	}
176 
177 	return 0;
178 }
179 
180 static int
181 ftl_dev_init_bands(struct spdk_ftl_dev *dev)
182 {
183 	struct ftl_band *band, *pband;
184 	unsigned int i;
185 	int rc = 0;
186 
187 	LIST_INIT(&dev->free_bands);
188 	LIST_INIT(&dev->shut_bands);
189 
190 	dev->num_free = 0;
191 	dev->bands = calloc(ftl_get_num_bands(dev), sizeof(*dev->bands));
192 	if (!dev->bands) {
193 		return -1;
194 	}
195 
196 	for (i = 0; i < ftl_get_num_bands(dev); ++i) {
197 		band = &dev->bands[i];
198 		band->id = i;
199 		band->dev = dev;
200 		band->state = FTL_BAND_STATE_CLOSED;
201 
202 		if (LIST_EMPTY(&dev->shut_bands)) {
203 			LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
204 		} else {
205 			LIST_INSERT_AFTER(pband, band, list_entry);
206 		}
207 		pband = band;
208 
209 		CIRCLEQ_INIT(&band->zones);
210 		band->zone_buf = calloc(ftl_get_num_punits(dev), sizeof(*band->zone_buf));
211 		if (!band->zone_buf) {
212 			SPDK_ERRLOG("Failed to allocate block state table for band: [%u]\n", i);
213 			rc = -1;
214 			break;
215 		}
216 
217 		rc = ftl_band_init_md(band);
218 		if (rc) {
219 			SPDK_ERRLOG("Failed to initialize metadata structures for band [%u]\n", i);
220 			break;
221 		}
222 
223 		band->reloc_bitmap = spdk_bit_array_create(ftl_get_num_bands(dev));
224 		if (!band->reloc_bitmap) {
225 			SPDK_ERRLOG("Failed to allocate band relocation bitmap\n");
226 			break;
227 		}
228 	}
229 
230 	return rc;
231 }
232 
233 static void
234 ftl_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
235 {
236 	struct spdk_ftl_dev *dev = event_ctx;
237 
238 	switch (type) {
239 	case SPDK_BDEV_EVENT_REMOVE:
240 		assert(0);
241 		break;
242 	case SPDK_BDEV_EVENT_MEDIA_MANAGEMENT:
243 		assert(bdev == spdk_bdev_desc_get_bdev(dev->base_bdev_desc));
244 		ftl_get_media_events(dev);
245 	default:
246 		break;
247 	}
248 }
249 
250 static int
251 ftl_dev_init_nv_cache(struct spdk_ftl_dev *dev, const char *bdev_name)
252 {
253 	struct spdk_bdev *bdev;
254 	struct spdk_ftl_conf *conf = &dev->conf;
255 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
256 	char pool_name[128];
257 	int rc;
258 
259 	if (!bdev_name) {
260 		return 0;
261 	}
262 
263 	bdev = spdk_bdev_get_by_name(bdev_name);
264 	if (!bdev) {
265 		SPDK_ERRLOG("Unable to find bdev: %s\n", bdev_name);
266 		return -1;
267 	}
268 
269 	if (spdk_bdev_open_ext(bdev_name, true, ftl_bdev_event_cb,
270 			       dev, &nv_cache->bdev_desc)) {
271 		SPDK_ERRLOG("Unable to open bdev: %s\n", bdev_name);
272 		return -1;
273 	}
274 
275 	if (spdk_bdev_module_claim_bdev(bdev, nv_cache->bdev_desc, &g_ftl_bdev_module)) {
276 		spdk_bdev_close(nv_cache->bdev_desc);
277 		nv_cache->bdev_desc = NULL;
278 		SPDK_ERRLOG("Unable to claim bdev %s\n", bdev_name);
279 		return -1;
280 	}
281 
282 	SPDK_INFOLOG(SPDK_LOG_FTL_INIT, "Using %s as write buffer cache\n",
283 		     spdk_bdev_get_name(bdev));
284 
285 	if (spdk_bdev_get_block_size(bdev) != FTL_BLOCK_SIZE) {
286 		SPDK_ERRLOG("Unsupported block size (%d)\n", spdk_bdev_get_block_size(bdev));
287 		return -1;
288 	}
289 
290 	if (!spdk_bdev_is_md_separate(bdev)) {
291 		SPDK_ERRLOG("Bdev %s doesn't support separate metadata buffer IO\n",
292 			    spdk_bdev_get_name(bdev));
293 		return -1;
294 	}
295 
296 	if (spdk_bdev_get_md_size(bdev) < sizeof(uint64_t)) {
297 		SPDK_ERRLOG("Bdev's %s metadata is too small (%"PRIu32")\n",
298 			    spdk_bdev_get_name(bdev), spdk_bdev_get_md_size(bdev));
299 		return -1;
300 	}
301 
302 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
303 		SPDK_ERRLOG("Unsupported DIF type used by bdev %s\n",
304 			    spdk_bdev_get_name(bdev));
305 		return -1;
306 	}
307 
308 	/* The cache needs to be capable of storing at least two full bands. This requirement comes
309 	 * from the fact that cache works as a protection against power loss, so before the data
310 	 * inside the cache can be overwritten, the band it's stored on has to be closed. Plus one
311 	 * extra block is needed to store the header.
312 	 */
313 	if (spdk_bdev_get_num_blocks(bdev) < ftl_get_num_blocks_in_band(dev) * 2 + 1) {
314 		SPDK_ERRLOG("Insufficient number of blocks for write buffer cache (available: %"
315 			    PRIu64", required: %"PRIu64")\n", spdk_bdev_get_num_blocks(bdev),
316 			    ftl_get_num_blocks_in_band(dev) * 2 + 1);
317 		return -1;
318 	}
319 
320 	rc = snprintf(pool_name, sizeof(pool_name), "ftl-nvpool-%p", dev);
321 	if (rc < 0 || rc >= 128) {
322 		return -1;
323 	}
324 
325 	nv_cache->md_pool = spdk_mempool_create(pool_name, conf->nv_cache.max_request_cnt,
326 						spdk_bdev_get_md_size(bdev) *
327 						conf->nv_cache.max_request_size,
328 						SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
329 						SPDK_ENV_SOCKET_ID_ANY);
330 	if (!nv_cache->md_pool) {
331 		SPDK_ERRLOG("Failed to initialize non-volatile cache metadata pool\n");
332 		return -1;
333 	}
334 
335 	nv_cache->dma_buf = spdk_dma_zmalloc(FTL_BLOCK_SIZE, spdk_bdev_get_buf_align(bdev), NULL);
336 	if (!nv_cache->dma_buf) {
337 		SPDK_ERRLOG("Memory allocation failure\n");
338 		return -1;
339 	}
340 
341 	if (pthread_spin_init(&nv_cache->lock, PTHREAD_PROCESS_PRIVATE)) {
342 		SPDK_ERRLOG("Failed to initialize cache lock\n");
343 		return -1;
344 	}
345 
346 	nv_cache->current_addr = FTL_NV_CACHE_DATA_OFFSET;
347 	nv_cache->num_data_blocks = spdk_bdev_get_num_blocks(bdev) - 1;
348 	nv_cache->num_available = nv_cache->num_data_blocks;
349 	nv_cache->ready = false;
350 
351 	return 0;
352 }
353 
354 void
355 spdk_ftl_conf_init_defaults(struct spdk_ftl_conf *conf)
356 {
357 	*conf = g_default_conf;
358 }
359 
360 static void
361 ftl_lba_map_request_ctor(struct spdk_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
362 {
363 	struct ftl_lba_map_request *request = obj;
364 	struct spdk_ftl_dev *dev = opaque;
365 
366 	request->segments = spdk_bit_array_create(spdk_divide_round_up(
367 				    ftl_get_num_blocks_in_band(dev), FTL_NUM_LBA_IN_BLOCK));
368 }
369 
370 static int
371 ftl_init_media_events_pool(struct spdk_ftl_dev *dev)
372 {
373 	char pool_name[128];
374 	int rc;
375 
376 	rc = snprintf(pool_name, sizeof(pool_name), "ftl-media-%p", dev);
377 	if (rc < 0 || rc >= (int)sizeof(pool_name)) {
378 		SPDK_ERRLOG("Failed to create media pool name\n");
379 		return -1;
380 	}
381 
382 	dev->media_events_pool = spdk_mempool_create(pool_name, 1024,
383 				 sizeof(struct ftl_media_event),
384 				 SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
385 				 SPDK_ENV_SOCKET_ID_ANY);
386 	if (!dev->media_events_pool) {
387 		SPDK_ERRLOG("Failed to create media events pool\n");
388 		return -1;
389 	}
390 
391 	return 0;
392 }
393 
394 static int
395 ftl_init_lba_map_pools(struct spdk_ftl_dev *dev)
396 {
397 #define POOL_NAME_LEN 128
398 	char pool_name[POOL_NAME_LEN];
399 	int rc;
400 
401 	rc = snprintf(pool_name, sizeof(pool_name), "%s-%s", dev->name, "ftl-lba-pool");
402 	if (rc < 0 || rc >= POOL_NAME_LEN) {
403 		return -ENAMETOOLONG;
404 	}
405 
406 	/* We need to reserve at least 2 buffers for band close / open sequence
407 	 * alone, plus additional (8) buffers for handling write errors.
408 	 * TODO: This memory pool is utilized only by core thread - it introduce
409 	 * unnecessary overhead and should be replaced by different data structure.
410 	 */
411 	dev->lba_pool = spdk_mempool_create(pool_name, 2 + 8,
412 					    ftl_lba_map_pool_elem_size(dev),
413 					    SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
414 					    SPDK_ENV_SOCKET_ID_ANY);
415 	if (!dev->lba_pool) {
416 		return -ENOMEM;
417 	}
418 
419 	rc = snprintf(pool_name, sizeof(pool_name), "%s-%s", dev->name, "ftl-lbareq-pool");
420 	if (rc < 0 || rc >= POOL_NAME_LEN) {
421 		return -ENAMETOOLONG;
422 	}
423 
424 	dev->lba_request_pool = spdk_mempool_create_ctor(pool_name,
425 				dev->conf.max_reloc_qdepth * dev->conf.max_active_relocs,
426 				sizeof(struct ftl_lba_map_request),
427 				SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
428 				SPDK_ENV_SOCKET_ID_ANY,
429 				ftl_lba_map_request_ctor,
430 				dev);
431 	if (!dev->lba_request_pool) {
432 		return -ENOMEM;
433 	}
434 
435 	return 0;
436 }
437 
438 static void
439 ftl_init_wptr_list(struct spdk_ftl_dev *dev)
440 {
441 	LIST_INIT(&dev->wptr_list);
442 	LIST_INIT(&dev->flush_list);
443 	LIST_INIT(&dev->band_flush_list);
444 }
445 
446 static size_t
447 ftl_dev_band_max_seq(struct spdk_ftl_dev *dev)
448 {
449 	struct ftl_band *band;
450 	size_t seq = 0;
451 
452 	LIST_FOREACH(band, &dev->shut_bands, list_entry) {
453 		if (band->seq > seq) {
454 			seq = band->seq;
455 		}
456 	}
457 
458 	return seq;
459 }
460 
461 static void
462 _ftl_init_bands_state(void *ctx)
463 {
464 	struct ftl_band *band, *temp_band;
465 	struct spdk_ftl_dev *dev = ctx;
466 
467 	dev->seq = ftl_dev_band_max_seq(dev);
468 
469 	LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) {
470 		if (!band->lba_map.num_vld) {
471 			ftl_band_set_state(band, FTL_BAND_STATE_FREE);
472 		}
473 	}
474 
475 	ftl_reloc_resume(dev->reloc);
476 	/* Clear the limit applications as they're incremented incorrectly by */
477 	/* the initialization code */
478 	memset(dev->stats.limits, 0, sizeof(dev->stats.limits));
479 }
480 
481 static int
482 ftl_init_num_free_bands(struct spdk_ftl_dev *dev)
483 {
484 	struct ftl_band *band;
485 	int cnt = 0;
486 
487 	LIST_FOREACH(band, &dev->shut_bands, list_entry) {
488 		if (band->num_zones && !band->lba_map.num_vld) {
489 			cnt++;
490 		}
491 	}
492 	return cnt;
493 }
494 
495 static int
496 ftl_init_bands_state(struct spdk_ftl_dev *dev)
497 {
498 	/* TODO: Should we abort initialization or expose read only device */
499 	/* if there is no free bands? */
500 	/* If we abort initialization should we depend on condition that */
501 	/* we have no free bands or should we have some minimal number of */
502 	/* free bands? */
503 	if (!ftl_init_num_free_bands(dev)) {
504 		return -1;
505 	}
506 
507 	spdk_thread_send_msg(ftl_get_core_thread(dev), _ftl_init_bands_state, dev);
508 	return 0;
509 }
510 
511 static void
512 _ftl_dev_init_core_thread(void *ctx)
513 {
514 	struct ftl_thread *thread = ctx;
515 	struct spdk_ftl_dev *dev = thread->dev;
516 
517 	thread->poller = spdk_poller_register(thread->poller_fn, thread, thread->period_us);
518 	if (!thread->poller) {
519 		SPDK_ERRLOG("Unable to register poller\n");
520 		assert(0);
521 	}
522 
523 	thread->ioch = spdk_get_io_channel(dev);
524 }
525 
526 static int
527 ftl_dev_init_core_thread(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts)
528 {
529 	struct ftl_thread *thread = &dev->core_thread;
530 
531 	if (!opts->core_thread) {
532 		return -1;
533 	}
534 
535 	thread->dev = dev;
536 	thread->poller_fn = ftl_task_core;
537 	thread->thread = opts->core_thread;
538 	thread->period_us = 0;
539 
540 	spdk_thread_send_msg(opts->core_thread, _ftl_dev_init_core_thread, thread);
541 	return 0;
542 }
543 
544 static void
545 ftl_dev_free_thread(struct spdk_ftl_dev *dev, struct ftl_thread *thread)
546 {
547 	assert(thread->poller == NULL);
548 
549 	spdk_put_io_channel(thread->ioch);
550 	thread->thread = NULL;
551 	thread->ioch = NULL;
552 }
553 
554 static int
555 ftl_dev_l2p_alloc(struct spdk_ftl_dev *dev)
556 {
557 	size_t addr_size;
558 
559 	if (dev->num_lbas == 0) {
560 		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Invalid l2p table size\n");
561 		return -1;
562 	}
563 
564 	if (dev->l2p) {
565 		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "L2p table already allocated\n");
566 		return -1;
567 	}
568 
569 	addr_size = dev->addr_len >= 32 ? 8 : 4;
570 	dev->l2p = malloc(dev->num_lbas * addr_size);
571 	if (!dev->l2p) {
572 		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Failed to allocate l2p table\n");
573 		return -1;
574 	}
575 
576 	memset(dev->l2p, FTL_ADDR_INVALID, dev->num_lbas * addr_size);
577 
578 	return 0;
579 }
580 
581 static void
582 ftl_dev_free_init_ctx(struct ftl_dev_init_ctx *init_ctx)
583 {
584 	if (!init_ctx) {
585 		return;
586 	}
587 
588 	if (init_ctx->ioch) {
589 		spdk_put_io_channel(init_ctx->ioch);
590 	}
591 
592 	free(init_ctx);
593 }
594 
595 static void
596 ftl_call_init_complete_cb(void *ctx)
597 {
598 	struct ftl_dev_init_ctx *init_ctx = ctx;
599 	struct spdk_ftl_dev *dev = init_ctx->dev;
600 
601 	if (init_ctx->cb_fn != NULL) {
602 		init_ctx->cb_fn(dev, init_ctx->cb_arg, 0);
603 	}
604 
605 	ftl_dev_free_init_ctx(init_ctx);
606 }
607 
608 static void
609 ftl_init_complete(struct ftl_dev_init_ctx *init_ctx)
610 {
611 	struct spdk_ftl_dev *dev = init_ctx->dev;
612 
613 	pthread_mutex_lock(&g_ftl_queue_lock);
614 	STAILQ_INSERT_HEAD(&g_ftl_queue, dev, stailq);
615 	pthread_mutex_unlock(&g_ftl_queue_lock);
616 
617 	dev->initialized = 1;
618 
619 	spdk_thread_send_msg(init_ctx->thread, ftl_call_init_complete_cb, init_ctx);
620 }
621 
622 static void
623 ftl_init_fail_cb(struct spdk_ftl_dev *dev, void *ctx, int status)
624 {
625 	struct ftl_dev_init_ctx *init_ctx = ctx;
626 
627 	if (init_ctx->cb_fn != NULL) {
628 		init_ctx->cb_fn(NULL, init_ctx->cb_arg, -ENODEV);
629 	}
630 
631 	ftl_dev_free_init_ctx(init_ctx);
632 }
633 
634 static int _spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg,
635 			      struct spdk_thread *thread);
636 
637 static void
638 ftl_init_fail(struct ftl_dev_init_ctx *init_ctx)
639 {
640 	if (_spdk_ftl_dev_free(init_ctx->dev, ftl_init_fail_cb, init_ctx, init_ctx->thread)) {
641 		SPDK_ERRLOG("Unable to free the device\n");
642 		assert(0);
643 	}
644 }
645 
646 static void
647 ftl_write_nv_cache_md_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
648 {
649 	struct ftl_dev_init_ctx *init_ctx = cb_arg;
650 	struct spdk_ftl_dev *dev = init_ctx->dev;
651 
652 	spdk_bdev_free_io(bdev_io);
653 	if (spdk_unlikely(!success)) {
654 		SPDK_ERRLOG("Writing non-volatile cache's metadata header failed\n");
655 		ftl_init_fail(init_ctx);
656 		return;
657 	}
658 
659 	dev->nv_cache.ready = true;
660 	ftl_init_complete(init_ctx);
661 }
662 
663 static void
664 ftl_clear_nv_cache_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
665 {
666 	struct ftl_dev_init_ctx *init_ctx = cb_arg;
667 	struct spdk_ftl_dev *dev = init_ctx->dev;
668 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
669 
670 	spdk_bdev_free_io(bdev_io);
671 	if (spdk_unlikely(!success)) {
672 		SPDK_ERRLOG("Unable to clear the non-volatile cache bdev\n");
673 		ftl_init_fail(init_ctx);
674 		return;
675 	}
676 
677 	nv_cache->phase = 1;
678 	if (ftl_nv_cache_write_header(nv_cache, false, ftl_write_nv_cache_md_cb, init_ctx)) {
679 		SPDK_ERRLOG("Unable to write non-volatile cache metadata header\n");
680 		ftl_init_fail(init_ctx);
681 	}
682 }
683 
684 static void
685 _ftl_nv_cache_scrub(void *ctx)
686 {
687 	struct ftl_dev_init_ctx *init_ctx = ctx;
688 	struct spdk_ftl_dev *dev = init_ctx->dev;
689 	int rc;
690 
691 	rc = ftl_nv_cache_scrub(&dev->nv_cache, ftl_clear_nv_cache_cb, init_ctx);
692 
693 	if (spdk_unlikely(rc != 0)) {
694 		SPDK_ERRLOG("Unable to clear the non-volatile cache bdev: %s\n",
695 			    spdk_strerror(-rc));
696 		ftl_init_fail(init_ctx);
697 	}
698 }
699 
700 static int
701 ftl_setup_initial_state(struct ftl_dev_init_ctx *init_ctx)
702 {
703 	struct spdk_ftl_dev *dev = init_ctx->dev;
704 	struct spdk_ftl_conf *conf = &dev->conf;
705 	size_t i;
706 
707 	spdk_uuid_generate(&dev->uuid);
708 
709 	dev->num_lbas = 0;
710 	for (i = 0; i < ftl_get_num_bands(dev); ++i) {
711 		dev->num_lbas += ftl_band_num_usable_blocks(&dev->bands[i]);
712 	}
713 
714 	dev->num_lbas = (dev->num_lbas * (100 - conf->lba_rsvd)) / 100;
715 
716 	if (ftl_dev_l2p_alloc(dev)) {
717 		SPDK_ERRLOG("Unable to init l2p table\n");
718 		return -1;
719 	}
720 
721 	if (ftl_init_bands_state(dev)) {
722 		SPDK_ERRLOG("Unable to finish the initialization\n");
723 		return -1;
724 	}
725 
726 	if (!ftl_dev_has_nv_cache(dev)) {
727 		ftl_init_complete(init_ctx);
728 	} else {
729 		spdk_thread_send_msg(ftl_get_core_thread(dev), _ftl_nv_cache_scrub, init_ctx);
730 	}
731 
732 	return 0;
733 }
734 
735 static void
736 ftl_restore_nv_cache_cb(struct ftl_restore *restore, int status, void *cb_arg)
737 {
738 	struct ftl_dev_init_ctx *init_ctx = cb_arg;
739 
740 	if (spdk_unlikely(status != 0)) {
741 		SPDK_ERRLOG("Failed to restore the non-volatile cache state\n");
742 		ftl_init_fail(init_ctx);
743 		return;
744 	}
745 
746 	ftl_init_complete(init_ctx);
747 }
748 
749 static void
750 ftl_restore_device_cb(struct ftl_restore *restore, int status, void *cb_arg)
751 {
752 	struct ftl_dev_init_ctx *init_ctx = cb_arg;
753 	struct spdk_ftl_dev *dev = init_ctx->dev;
754 
755 	if (status) {
756 		SPDK_ERRLOG("Failed to restore the device from the SSD\n");
757 		ftl_init_fail(init_ctx);
758 		return;
759 	}
760 
761 	if (ftl_init_bands_state(dev)) {
762 		SPDK_ERRLOG("Unable to finish the initialization\n");
763 		ftl_init_fail(init_ctx);
764 		return;
765 	}
766 
767 	if (!ftl_dev_has_nv_cache(dev)) {
768 		ftl_init_complete(init_ctx);
769 		return;
770 	}
771 
772 	ftl_restore_nv_cache(restore, ftl_restore_nv_cache_cb, init_ctx);
773 }
774 
775 static void
776 ftl_restore_md_cb(struct ftl_restore *restore, int status, void *cb_arg)
777 {
778 	struct ftl_dev_init_ctx *init_ctx = cb_arg;
779 
780 	if (status) {
781 		SPDK_ERRLOG("Failed to restore the metadata from the SSD\n");
782 		goto error;
783 	}
784 
785 	/* After the metadata is read it should be possible to allocate the L2P */
786 	if (ftl_dev_l2p_alloc(init_ctx->dev)) {
787 		SPDK_ERRLOG("Failed to allocate the L2P\n");
788 		goto error;
789 	}
790 
791 	if (ftl_restore_device(restore, ftl_restore_device_cb, init_ctx)) {
792 		SPDK_ERRLOG("Failed to start device restoration from the SSD\n");
793 		goto error;
794 	}
795 
796 	return;
797 error:
798 	ftl_init_fail(init_ctx);
799 }
800 
801 static int
802 ftl_restore_state(struct ftl_dev_init_ctx *init_ctx)
803 {
804 	struct spdk_ftl_dev *dev = init_ctx->dev;
805 
806 	dev->uuid = init_ctx->opts.uuid;
807 
808 	if (ftl_restore_md(dev, ftl_restore_md_cb, init_ctx)) {
809 		SPDK_ERRLOG("Failed to start metadata restoration from the SSD\n");
810 		return -1;
811 	}
812 
813 	return 0;
814 }
815 
816 static void
817 ftl_dev_update_bands(struct spdk_ftl_dev *dev)
818 {
819 	struct ftl_band *band, *temp_band;
820 	size_t i;
821 
822 	for (i = 0; i < ftl_get_num_bands(dev); ++i) {
823 		band = &dev->bands[i];
824 		band->tail_md_addr = ftl_band_tail_md_addr(band);
825 	}
826 
827 	/* Remove band from shut_bands list to prevent further processing */
828 	/* if all blocks on this band are bad */
829 	LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) {
830 		if (!band->num_zones) {
831 			dev->num_bands--;
832 			LIST_REMOVE(band, list_entry);
833 		}
834 	}
835 }
836 
837 static void
838 ftl_dev_init_state(struct ftl_dev_init_ctx *init_ctx)
839 {
840 	struct spdk_ftl_dev *dev = init_ctx->dev;
841 
842 	ftl_dev_update_bands(dev);
843 
844 	if (ftl_dev_init_core_thread(dev, &init_ctx->opts)) {
845 		SPDK_ERRLOG("Unable to initialize device thread\n");
846 		ftl_init_fail(init_ctx);
847 		return;
848 	}
849 
850 	if (init_ctx->opts.mode & SPDK_FTL_MODE_CREATE) {
851 		if (ftl_setup_initial_state(init_ctx)) {
852 			SPDK_ERRLOG("Failed to setup initial state of the device\n");
853 			ftl_init_fail(init_ctx);
854 			return;
855 		}
856 	} else {
857 		if (ftl_restore_state(init_ctx)) {
858 			SPDK_ERRLOG("Unable to restore device's state from the SSD\n");
859 			ftl_init_fail(init_ctx);
860 			return;
861 		}
862 	}
863 }
864 
865 static void ftl_dev_get_zone_info(struct ftl_dev_init_ctx *init_ctx);
866 
867 static void
868 ftl_dev_get_zone_info_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
869 {
870 	struct ftl_dev_init_ctx *init_ctx = cb_arg;
871 	struct spdk_ftl_dev *dev = init_ctx->dev;
872 	struct ftl_band *band;
873 	struct ftl_zone *zone;
874 	struct ftl_addr addr;
875 	size_t i, zones_left, num_zones;
876 
877 	spdk_bdev_free_io(bdev_io);
878 
879 	if (spdk_unlikely(!success)) {
880 		SPDK_ERRLOG("Unable to read zone info for zone id: %"PRIu64"\n", init_ctx->zone_id);
881 		ftl_init_fail(init_ctx);
882 		return;
883 	}
884 
885 	zones_left = ftl_get_num_zones(dev) - (init_ctx->zone_id / ftl_get_num_blocks_in_zone(dev));
886 	num_zones = spdk_min(zones_left, FTL_ZONE_INFO_COUNT);
887 
888 	for (i = 0; i < num_zones; ++i) {
889 		addr.offset = init_ctx->info[i].zone_id;
890 		band = &dev->bands[ftl_addr_get_band(dev, addr)];
891 		zone = &band->zone_buf[ftl_addr_get_punit(dev, addr)];
892 		zone->info = init_ctx->info[i];
893 
894 		/* TODO: add support for zone capacity less than zone size */
895 		if (zone->info.capacity != ftl_get_num_blocks_in_zone(dev)) {
896 			zone->info.state = SPDK_BDEV_ZONE_STATE_OFFLINE;
897 			SPDK_ERRLOG("Zone capacity is not equal zone size for "
898 				    "zone id: %"PRIu64"\n", init_ctx->zone_id);
899 		}
900 
901 		if (zone->info.state != SPDK_BDEV_ZONE_STATE_OFFLINE) {
902 			band->num_zones++;
903 			CIRCLEQ_INSERT_TAIL(&band->zones, zone, circleq);
904 		}
905 	}
906 
907 	init_ctx->zone_id = init_ctx->zone_id + num_zones * ftl_get_num_blocks_in_zone(dev);
908 
909 	ftl_dev_get_zone_info(init_ctx);
910 }
911 
912 static void
913 ftl_dev_get_zone_info(struct ftl_dev_init_ctx *init_ctx)
914 {
915 	struct spdk_ftl_dev *dev = init_ctx->dev;
916 	size_t zones_left, num_zones;
917 	int rc;
918 
919 	zones_left = ftl_get_num_zones(dev) - (init_ctx->zone_id / ftl_get_num_blocks_in_zone(dev));
920 	if (zones_left == 0) {
921 		ftl_dev_init_state(init_ctx);
922 		return;
923 	}
924 
925 	num_zones = spdk_min(zones_left, FTL_ZONE_INFO_COUNT);
926 
927 	rc = spdk_bdev_get_zone_info(dev->base_bdev_desc, init_ctx->ioch,
928 				     init_ctx->zone_id, num_zones, init_ctx->info,
929 				     ftl_dev_get_zone_info_cb, init_ctx);
930 
931 	if (spdk_unlikely(rc != 0)) {
932 		SPDK_ERRLOG("Unable to read zone info for zone id: %"PRIu64"\n", init_ctx->zone_id);
933 		ftl_init_fail(init_ctx);
934 	}
935 }
936 
937 static int
938 ftl_dev_init_zones(struct ftl_dev_init_ctx *init_ctx)
939 {
940 	struct spdk_ftl_dev *dev =  init_ctx->dev;
941 
942 	init_ctx->zone_id = 0;
943 	init_ctx->ioch = spdk_bdev_get_io_channel(dev->base_bdev_desc);
944 	if (!init_ctx->ioch) {
945 		SPDK_ERRLOG("Failed to get base bdev IO channel\n");
946 		return -1;
947 	}
948 
949 	ftl_dev_get_zone_info(init_ctx);
950 
951 	return 0;
952 }
953 
954 static int
955 ftl_io_channel_create_cb(void *io_device, void *ctx)
956 {
957 	struct spdk_ftl_dev *dev = io_device;
958 	struct ftl_io_channel *ioch = ctx;
959 	char mempool_name[32];
960 	int rc;
961 
962 	rc = snprintf(mempool_name, sizeof(mempool_name), "ftl_io_%p", ioch);
963 	if (rc < 0 || rc >= (int)sizeof(mempool_name)) {
964 		SPDK_ERRLOG("Failed to create IO channel pool name\n");
965 		return -1;
966 	}
967 
968 	ioch->cache_ioch = NULL;
969 	ioch->dev = dev;
970 	ioch->elem_size = sizeof(struct ftl_md_io);
971 	ioch->io_pool = spdk_mempool_create(mempool_name,
972 					    dev->conf.user_io_pool_size,
973 					    ioch->elem_size,
974 					    0,
975 					    SPDK_ENV_SOCKET_ID_ANY);
976 	if (!ioch->io_pool) {
977 		SPDK_ERRLOG("Failed to create IO channel's IO pool\n");
978 		return -1;
979 	}
980 
981 	ioch->base_ioch = spdk_bdev_get_io_channel(dev->base_bdev_desc);
982 	if (!ioch->base_ioch) {
983 		SPDK_ERRLOG("Failed to create base bdev IO channel\n");
984 		goto fail_ioch;
985 	}
986 
987 	if (ftl_dev_has_nv_cache(dev)) {
988 		ioch->cache_ioch = spdk_bdev_get_io_channel(dev->nv_cache.bdev_desc);
989 		if (!ioch->cache_ioch) {
990 			SPDK_ERRLOG("Failed to create cache IO channel\n");
991 			goto fail_cache;
992 		}
993 	}
994 
995 	TAILQ_INIT(&ioch->write_cmpl_queue);
996 	TAILQ_INIT(&ioch->retry_queue);
997 	ioch->poller = spdk_poller_register(ftl_io_channel_poll, ioch, 0);
998 	if (!ioch->poller) {
999 		SPDK_ERRLOG("Failed to register IO channel poller\n");
1000 		goto fail_poller;
1001 	}
1002 
1003 	return 0;
1004 
1005 fail_poller:
1006 	if (ioch->cache_ioch) {
1007 		spdk_put_io_channel(ioch->cache_ioch);
1008 	}
1009 fail_cache:
1010 	spdk_put_io_channel(ioch->base_ioch);
1011 fail_ioch:
1012 	spdk_mempool_free(ioch->io_pool);
1013 	return -1;
1014 
1015 }
1016 
1017 static void
1018 ftl_io_channel_destroy_cb(void *io_device, void *ctx)
1019 {
1020 	struct ftl_io_channel *ioch = ctx;
1021 
1022 	spdk_poller_unregister(&ioch->poller);
1023 
1024 	spdk_mempool_free(ioch->io_pool);
1025 
1026 	spdk_put_io_channel(ioch->base_ioch);
1027 
1028 	if (ioch->cache_ioch) {
1029 		spdk_put_io_channel(ioch->cache_ioch);
1030 	}
1031 }
1032 
1033 static int
1034 ftl_dev_init_io_channel(struct spdk_ftl_dev *dev)
1035 {
1036 	spdk_io_device_register(dev, ftl_io_channel_create_cb, ftl_io_channel_destroy_cb,
1037 				sizeof(struct ftl_io_channel),
1038 				NULL);
1039 
1040 	return 0;
1041 }
1042 
1043 static int
1044 ftl_dev_init_base_bdev(struct spdk_ftl_dev *dev, const char *bdev_name)
1045 {
1046 	uint32_t block_size;
1047 	uint64_t num_blocks;
1048 	struct spdk_bdev *bdev;
1049 
1050 	bdev = spdk_bdev_get_by_name(bdev_name);
1051 	if (!bdev) {
1052 		SPDK_ERRLOG("Unable to find bdev: %s\n", bdev_name);
1053 		return -1;
1054 	}
1055 
1056 	if (!spdk_bdev_is_zoned(bdev)) {
1057 		SPDK_ERRLOG("Bdev dosen't support zone capabilities: %s\n",
1058 			    spdk_bdev_get_name(bdev));
1059 		return -1;
1060 	}
1061 
1062 	if (spdk_bdev_open_ext(bdev_name, true, ftl_bdev_event_cb,
1063 			       dev, &dev->base_bdev_desc)) {
1064 		SPDK_ERRLOG("Unable to open bdev: %s\n", bdev_name);
1065 		return -1;
1066 	}
1067 
1068 	if (spdk_bdev_module_claim_bdev(bdev, dev->base_bdev_desc, &g_ftl_bdev_module)) {
1069 		spdk_bdev_close(dev->base_bdev_desc);
1070 		dev->base_bdev_desc = NULL;
1071 		SPDK_ERRLOG("Unable to claim bdev %s\n", bdev_name);
1072 		return -1;
1073 	}
1074 
1075 	dev->xfer_size = spdk_bdev_get_write_unit_size(bdev);
1076 	dev->md_size = spdk_bdev_get_md_size(bdev);
1077 
1078 	block_size = spdk_bdev_get_block_size(bdev);
1079 	if (block_size != FTL_BLOCK_SIZE) {
1080 		SPDK_ERRLOG("Unsupported block size (%"PRIu32")\n", block_size);
1081 		return -1;
1082 	}
1083 
1084 	num_blocks = spdk_bdev_get_num_blocks(bdev);
1085 	if (num_blocks % ftl_get_num_punits(dev)) {
1086 		SPDK_ERRLOG("Unsupported geometry. Base bdev block count must be multiple "
1087 			    "of optimal number of zones.\n");
1088 		return -1;
1089 	}
1090 
1091 	if (ftl_is_append_supported(dev) &&
1092 	    !spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZONE_APPEND)) {
1093 		SPDK_ERRLOG("Bdev dosen't support append: %s\n",
1094 			    spdk_bdev_get_name(bdev));
1095 		return -1;
1096 	}
1097 
1098 	dev->num_bands = num_blocks / (ftl_get_num_punits(dev) * ftl_get_num_blocks_in_zone(dev));
1099 	dev->addr_len = spdk_u64log2(num_blocks) + 1;
1100 
1101 	return 0;
1102 }
1103 
1104 static void
1105 ftl_lba_map_request_dtor(struct spdk_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
1106 {
1107 	struct ftl_lba_map_request *request = obj;
1108 
1109 	spdk_bit_array_free(&request->segments);
1110 }
1111 
1112 static void
1113 ftl_release_bdev(struct spdk_bdev_desc *bdev_desc)
1114 {
1115 	if (!bdev_desc) {
1116 		return;
1117 	}
1118 
1119 	spdk_bdev_module_release_bdev(spdk_bdev_desc_get_bdev(bdev_desc));
1120 	spdk_bdev_close(bdev_desc);
1121 }
1122 
1123 static void
1124 ftl_dev_free_sync(struct spdk_ftl_dev *dev)
1125 {
1126 	struct spdk_ftl_dev *iter;
1127 	size_t i;
1128 
1129 	if (!dev) {
1130 		return;
1131 	}
1132 
1133 	pthread_mutex_lock(&g_ftl_queue_lock);
1134 	STAILQ_FOREACH(iter, &g_ftl_queue, stailq) {
1135 		if (iter == dev) {
1136 			STAILQ_REMOVE(&g_ftl_queue, dev, spdk_ftl_dev, stailq);
1137 			break;
1138 		}
1139 	}
1140 	pthread_mutex_unlock(&g_ftl_queue_lock);
1141 
1142 	assert(LIST_EMPTY(&dev->wptr_list));
1143 	assert(ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_INTERNAL) == 0);
1144 	assert(ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_USER) == 0);
1145 
1146 	ftl_dev_dump_bands(dev);
1147 	ftl_dev_dump_stats(dev);
1148 
1149 	spdk_io_device_unregister(dev, NULL);
1150 
1151 	if (dev->core_thread.thread) {
1152 		ftl_dev_free_thread(dev, &dev->core_thread);
1153 	}
1154 
1155 	if (dev->bands) {
1156 		for (i = 0; i < ftl_get_num_bands(dev); ++i) {
1157 			free(dev->bands[i].zone_buf);
1158 			spdk_bit_array_free(&dev->bands[i].lba_map.vld);
1159 			spdk_bit_array_free(&dev->bands[i].reloc_bitmap);
1160 		}
1161 	}
1162 
1163 	spdk_dma_free(dev->nv_cache.dma_buf);
1164 
1165 	spdk_mempool_free(dev->lba_pool);
1166 	spdk_mempool_free(dev->nv_cache.md_pool);
1167 	spdk_mempool_free(dev->media_events_pool);
1168 	if (dev->lba_request_pool) {
1169 		spdk_mempool_obj_iter(dev->lba_request_pool, ftl_lba_map_request_dtor, NULL);
1170 	}
1171 	spdk_mempool_free(dev->lba_request_pool);
1172 
1173 	ftl_rwb_free(dev->rwb);
1174 	ftl_reloc_free(dev->reloc);
1175 
1176 	ftl_release_bdev(dev->nv_cache.bdev_desc);
1177 	ftl_release_bdev(dev->base_bdev_desc);
1178 
1179 	free(dev->name);
1180 	free(dev->bands);
1181 	free(dev->l2p);
1182 	free(dev);
1183 }
1184 
1185 int
1186 spdk_ftl_dev_init(const struct spdk_ftl_dev_init_opts *_opts, spdk_ftl_init_fn cb_fn, void *cb_arg)
1187 {
1188 	struct spdk_ftl_dev *dev;
1189 	struct spdk_ftl_dev_init_opts opts = *_opts;
1190 	struct ftl_dev_init_ctx *init_ctx = NULL;
1191 	int rc = -ENOMEM;
1192 
1193 	dev = calloc(1, sizeof(*dev));
1194 	if (!dev) {
1195 		return -ENOMEM;
1196 	}
1197 
1198 	init_ctx = calloc(1, sizeof(*init_ctx));
1199 	if (!init_ctx) {
1200 		goto fail_sync;
1201 	}
1202 
1203 	init_ctx->dev = dev;
1204 	init_ctx->opts = *_opts;
1205 	init_ctx->cb_fn = cb_fn;
1206 	init_ctx->cb_arg = cb_arg;
1207 	init_ctx->thread = spdk_get_thread();
1208 
1209 	if (!opts.conf) {
1210 		opts.conf = &g_default_conf;
1211 	}
1212 
1213 	if (!opts.base_bdev) {
1214 		SPDK_ERRLOG("Lack of underlying device in configuration\n");
1215 		rc = -EINVAL;
1216 		goto fail_sync;
1217 	}
1218 
1219 	dev->conf = *opts.conf;
1220 	dev->limit = SPDK_FTL_LIMIT_MAX;
1221 
1222 	dev->name = strdup(opts.name);
1223 	if (!dev->name) {
1224 		SPDK_ERRLOG("Unable to set device name\n");
1225 		goto fail_sync;
1226 	}
1227 
1228 	if (ftl_dev_init_base_bdev(dev, opts.base_bdev)) {
1229 		SPDK_ERRLOG("Unsupported underlying device\n");
1230 		goto fail_sync;
1231 	}
1232 
1233 	/* In case of errors, we free all of the memory in ftl_dev_free_sync(), */
1234 	/* so we don't have to clean up in each of the init functions. */
1235 	if (ftl_check_conf(dev, opts.conf)) {
1236 		SPDK_ERRLOG("Invalid device configuration\n");
1237 		goto fail_sync;
1238 	}
1239 
1240 	if (ftl_init_lba_map_pools(dev)) {
1241 		SPDK_ERRLOG("Unable to init LBA map pools\n");
1242 		goto fail_sync;
1243 	}
1244 
1245 	if (ftl_init_media_events_pool(dev)) {
1246 		SPDK_ERRLOG("Unable to init media events pools\n");
1247 		goto fail_sync;
1248 	}
1249 
1250 	ftl_init_wptr_list(dev);
1251 
1252 	if (ftl_dev_init_bands(dev)) {
1253 		SPDK_ERRLOG("Unable to initialize band array\n");
1254 		goto fail_sync;
1255 	}
1256 
1257 	if (ftl_dev_init_nv_cache(dev, opts.cache_bdev)) {
1258 		SPDK_ERRLOG("Unable to initialize persistent cache\n");
1259 		goto fail_sync;
1260 	}
1261 
1262 	dev->rwb = ftl_rwb_init(&dev->conf, dev->xfer_size, dev->md_size, ftl_get_num_punits(dev));
1263 	if (!dev->rwb) {
1264 		SPDK_ERRLOG("Unable to initialize rwb structures\n");
1265 		goto fail_sync;
1266 	}
1267 
1268 	dev->reloc = ftl_reloc_init(dev);
1269 	if (!dev->reloc) {
1270 		SPDK_ERRLOG("Unable to initialize reloc structures\n");
1271 		goto fail_sync;
1272 	}
1273 
1274 	if (ftl_dev_init_io_channel(dev)) {
1275 		SPDK_ERRLOG("Unable to initialize IO channels\n");
1276 		goto fail_sync;
1277 	}
1278 
1279 	if (ftl_dev_init_zones(init_ctx)) {
1280 		SPDK_ERRLOG("Failed to initialize zones\n");
1281 		goto fail_async;
1282 	}
1283 
1284 	return 0;
1285 fail_sync:
1286 	ftl_dev_free_sync(dev);
1287 	ftl_dev_free_init_ctx(init_ctx);
1288 	return rc;
1289 fail_async:
1290 	ftl_init_fail(init_ctx);
1291 	return 0;
1292 }
1293 
1294 static void
1295 _ftl_halt_defrag(void *arg)
1296 {
1297 	ftl_reloc_halt(((struct spdk_ftl_dev *)arg)->reloc);
1298 }
1299 
1300 static void
1301 ftl_halt_complete_cb(void *ctx)
1302 {
1303 	struct ftl_dev_init_ctx *fini_ctx = ctx;
1304 
1305 	ftl_dev_free_sync(fini_ctx->dev);
1306 	if (fini_ctx->cb_fn != NULL) {
1307 		fini_ctx->cb_fn(NULL, fini_ctx->cb_arg, fini_ctx->halt_complete_status);
1308 	}
1309 
1310 	ftl_dev_free_init_ctx(fini_ctx);
1311 }
1312 
1313 static void
1314 ftl_nv_cache_header_fini_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1315 {
1316 	struct ftl_dev_init_ctx *fini_ctx = cb_arg;
1317 	int rc = 0;
1318 
1319 	spdk_bdev_free_io(bdev_io);
1320 	if (spdk_unlikely(!success)) {
1321 		SPDK_ERRLOG("Failed to write non-volatile cache metadata header\n");
1322 		rc = -EIO;
1323 	}
1324 
1325 	fini_ctx->halt_complete_status = rc;
1326 	spdk_thread_send_msg(fini_ctx->thread, ftl_halt_complete_cb, fini_ctx);
1327 }
1328 
1329 static int
1330 ftl_halt_poller(void *ctx)
1331 {
1332 	struct ftl_dev_init_ctx *fini_ctx = ctx;
1333 	struct spdk_ftl_dev *dev = fini_ctx->dev;
1334 
1335 	if (!dev->core_thread.poller) {
1336 		spdk_poller_unregister(&fini_ctx->poller);
1337 
1338 		if (ftl_dev_has_nv_cache(dev)) {
1339 			ftl_nv_cache_write_header(&dev->nv_cache, true,
1340 						  ftl_nv_cache_header_fini_cb, fini_ctx);
1341 		} else {
1342 			fini_ctx->halt_complete_status = 0;
1343 			spdk_thread_send_msg(fini_ctx->thread, ftl_halt_complete_cb, fini_ctx);
1344 		}
1345 	}
1346 
1347 	return 0;
1348 }
1349 
1350 static void
1351 ftl_add_halt_poller(void *ctx)
1352 {
1353 	struct ftl_dev_init_ctx *fini_ctx = ctx;
1354 	struct spdk_ftl_dev *dev = fini_ctx->dev;
1355 
1356 	dev->halt = 1;
1357 
1358 	_ftl_halt_defrag(dev);
1359 
1360 	assert(!fini_ctx->poller);
1361 	fini_ctx->poller = spdk_poller_register(ftl_halt_poller, fini_ctx, 100);
1362 }
1363 
1364 static int
1365 _spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg,
1366 		   struct spdk_thread *thread)
1367 {
1368 	struct ftl_dev_init_ctx *fini_ctx;
1369 
1370 	if (dev->halt_started) {
1371 		dev->halt_started = true;
1372 		return -EBUSY;
1373 	}
1374 
1375 	fini_ctx = calloc(1, sizeof(*fini_ctx));
1376 	if (!fini_ctx) {
1377 		return -ENOMEM;
1378 	}
1379 
1380 	fini_ctx->dev = dev;
1381 	fini_ctx->cb_fn = cb_fn;
1382 	fini_ctx->cb_arg = cb_arg;
1383 	fini_ctx->thread = thread;
1384 
1385 	ftl_rwb_disable_interleaving(dev->rwb);
1386 
1387 	spdk_thread_send_msg(ftl_get_core_thread(dev), ftl_add_halt_poller, fini_ctx);
1388 	return 0;
1389 }
1390 
1391 int
1392 spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg)
1393 {
1394 	return _spdk_ftl_dev_free(dev, cb_fn, cb_arg, spdk_get_thread());
1395 }
1396 
1397 SPDK_LOG_REGISTER_COMPONENT("ftl_init", SPDK_LOG_FTL_INIT)
1398