xref: /spdk/lib/ftl/ftl_init.c (revision 2505b938627faacb6bb99780c3d4595f7ebd323b)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 #include "spdk/nvme.h"
36 #include "spdk/io_channel.h"
37 #include "spdk/string.h"
38 #include "spdk/likely.h"
39 #include "spdk_internal/log.h"
40 #include "spdk/ftl.h"
41 #include "spdk/likely.h"
42 #include "spdk/string.h"
43 #include "spdk/bdev_zone.h"
44 #include "spdk/bdev_module.h"
45 
46 #include "ftl_core.h"
47 #include "ftl_io.h"
48 #include "ftl_reloc.h"
49 #include "ftl_rwb.h"
50 #include "ftl_band.h"
51 #include "ftl_debug.h"
52 
53 #define FTL_CORE_RING_SIZE	4096
54 #define FTL_INIT_TIMEOUT	30
55 #define FTL_NSID		1
56 #define FTL_ZONE_INFO_COUNT	64
57 
58 /* Dummy bdev module used to to claim bdevs. */
59 static struct spdk_bdev_module g_ftl_bdev_module = {
60 	.name	= "ftl_lib",
61 };
62 
63 typedef void (*spdk_ftl_init_fn)(struct spdk_ftl_dev *, void *, int);
64 
65 struct ftl_dev_init_ctx {
66 	/* Owner */
67 	struct spdk_ftl_dev		*dev;
68 	/* Initial arguments */
69 	struct spdk_ftl_dev_init_opts	opts;
70 	/* IO channel for zone info retrieving */
71 	struct spdk_io_channel		*ioch;
72 	/* Buffer for reading zone info  */
73 	struct spdk_bdev_zone_info	info[FTL_ZONE_INFO_COUNT];
74 	/* Currently read zone */
75 	size_t				zone_id;
76 	/* User's callback */
77 	spdk_ftl_init_fn		cb_fn;
78 	/* Callback's argument */
79 	void				*cb_arg;
80 	/* Thread to call the callback on */
81 	struct spdk_thread		*thread;
82 	/* Poller to check if the device has been destroyed/initialized */
83 	struct spdk_poller		*poller;
84 	/* Status to return for halt completion callback */
85 	int				halt_complete_status;
86 };
87 
88 static STAILQ_HEAD(, spdk_ftl_dev)	g_ftl_queue = STAILQ_HEAD_INITIALIZER(g_ftl_queue);
89 static pthread_mutex_t			g_ftl_queue_lock = PTHREAD_MUTEX_INITIALIZER;
90 static const struct spdk_ftl_conf	g_default_conf = {
91 	.limits = {
92 		/* 5 free bands  / 0 % host writes */
93 		[SPDK_FTL_LIMIT_CRIT]  = { .thld = 5,  .limit = 0 },
94 		/* 10 free bands / 5 % host writes */
95 		[SPDK_FTL_LIMIT_HIGH]  = { .thld = 10, .limit = 5 },
96 		/* 20 free bands / 40 % host writes */
97 		[SPDK_FTL_LIMIT_LOW]   = { .thld = 20, .limit = 40 },
98 		/* 40 free bands / 100 % host writes - defrag starts running */
99 		[SPDK_FTL_LIMIT_START] = { .thld = 40, .limit = 100 },
100 	},
101 	/* 10 percent valid blocks */
102 	.invalid_thld = 10,
103 	/* 20% spare blocks */
104 	.lba_rsvd = 20,
105 	/* 6M write buffer */
106 	.rwb_size = 6 * 1024 * 1024,
107 	/* 90% band fill threshold */
108 	.band_thld = 90,
109 	/* Max 32 IO depth per band relocate */
110 	.max_reloc_qdepth = 32,
111 	/* Max 3 active band relocates */
112 	.max_active_relocs = 3,
113 	/* IO pool size per user thread (this should be adjusted to thread IO qdepth) */
114 	.user_io_pool_size = 2048,
115 	/* Number of interleaving units per ws_opt */
116 	/* 1 for default and 3 for 3D TLC NAND */
117 	.num_interleave_units = 1,
118 	/*
119 	 * If clear ftl will return error when restoring after a dirty shutdown
120 	 * If set, last band will be padded, ftl will restore based only on closed bands - this
121 	 * will result in lost data after recovery.
122 	 */
123 	.allow_open_bands = false,
124 	.nv_cache = {
125 		/* Maximum number of concurrent requests */
126 		.max_request_cnt = 2048,
127 		/* Maximum number of blocks per request */
128 		.max_request_size = 16,
129 	}
130 };
131 
132 static int
133 ftl_band_init_md(struct ftl_band *band)
134 {
135 	struct ftl_lba_map *lba_map = &band->lba_map;
136 
137 	lba_map->vld = spdk_bit_array_create(ftl_get_num_blocks_in_band(band->dev));
138 	if (!lba_map->vld) {
139 		return -ENOMEM;
140 	}
141 
142 	pthread_spin_init(&lba_map->lock, PTHREAD_PROCESS_PRIVATE);
143 	ftl_band_md_clear(band);
144 	return 0;
145 }
146 
147 static int
148 ftl_check_conf(const struct spdk_ftl_dev *dev, const struct spdk_ftl_conf *conf)
149 {
150 	size_t i;
151 
152 	if (conf->invalid_thld >= 100) {
153 		return -1;
154 	}
155 	if (conf->lba_rsvd >= 100) {
156 		return -1;
157 	}
158 	if (conf->lba_rsvd == 0) {
159 		return -1;
160 	}
161 	if (conf->rwb_size == 0) {
162 		return -1;
163 	}
164 	if (conf->rwb_size % FTL_BLOCK_SIZE != 0) {
165 		return -1;
166 	}
167 	if (dev->xfer_size % conf->num_interleave_units != 0) {
168 		return -1;
169 	}
170 
171 	for (i = 0; i < SPDK_FTL_LIMIT_MAX; ++i) {
172 		if (conf->limits[i].limit > 100) {
173 			return -1;
174 		}
175 	}
176 
177 	return 0;
178 }
179 
180 static int
181 ftl_dev_init_bands(struct spdk_ftl_dev *dev)
182 {
183 	struct ftl_band *band, *pband;
184 	unsigned int i;
185 	int rc = 0;
186 
187 	LIST_INIT(&dev->free_bands);
188 	LIST_INIT(&dev->shut_bands);
189 
190 	dev->num_free = 0;
191 	dev->bands = calloc(ftl_get_num_bands(dev), sizeof(*dev->bands));
192 	if (!dev->bands) {
193 		return -1;
194 	}
195 
196 	for (i = 0; i < ftl_get_num_bands(dev); ++i) {
197 		band = &dev->bands[i];
198 		band->id = i;
199 		band->dev = dev;
200 		band->state = FTL_BAND_STATE_CLOSED;
201 
202 		if (LIST_EMPTY(&dev->shut_bands)) {
203 			LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
204 		} else {
205 			LIST_INSERT_AFTER(pband, band, list_entry);
206 		}
207 		pband = band;
208 
209 		CIRCLEQ_INIT(&band->zones);
210 		band->zone_buf = calloc(ftl_get_num_punits(dev), sizeof(*band->zone_buf));
211 		if (!band->zone_buf) {
212 			SPDK_ERRLOG("Failed to allocate block state table for band: [%u]\n", i);
213 			rc = -1;
214 			break;
215 		}
216 
217 		rc = ftl_band_init_md(band);
218 		if (rc) {
219 			SPDK_ERRLOG("Failed to initialize metadata structures for band [%u]\n", i);
220 			break;
221 		}
222 
223 		band->reloc_bitmap = spdk_bit_array_create(ftl_get_num_bands(dev));
224 		if (!band->reloc_bitmap) {
225 			SPDK_ERRLOG("Failed to allocate band relocation bitmap\n");
226 			break;
227 		}
228 	}
229 
230 	return rc;
231 }
232 
233 static void
234 ftl_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
235 {
236 	struct spdk_ftl_dev *dev = event_ctx;
237 
238 	switch (type) {
239 	case SPDK_BDEV_EVENT_REMOVE:
240 		assert(0);
241 		break;
242 	case SPDK_BDEV_EVENT_MEDIA_MANAGEMENT:
243 		assert(bdev == spdk_bdev_desc_get_bdev(dev->base_bdev_desc));
244 		ftl_get_media_events(dev);
245 	default:
246 		break;
247 	}
248 }
249 
250 static int
251 ftl_dev_init_nv_cache(struct spdk_ftl_dev *dev, const char *bdev_name)
252 {
253 	struct spdk_bdev *bdev;
254 	struct spdk_ftl_conf *conf = &dev->conf;
255 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
256 	char pool_name[128];
257 	int rc;
258 
259 	if (!bdev_name) {
260 		return 0;
261 	}
262 
263 	bdev = spdk_bdev_get_by_name(bdev_name);
264 	if (!bdev) {
265 		SPDK_ERRLOG("Unable to find bdev: %s\n", bdev_name);
266 		return -1;
267 	}
268 
269 	if (spdk_bdev_open_ext(bdev_name, true, ftl_bdev_event_cb,
270 			       dev, &nv_cache->bdev_desc)) {
271 		SPDK_ERRLOG("Unable to open bdev: %s\n", bdev_name);
272 		return -1;
273 	}
274 
275 	if (spdk_bdev_module_claim_bdev(bdev, nv_cache->bdev_desc, &g_ftl_bdev_module)) {
276 		spdk_bdev_close(nv_cache->bdev_desc);
277 		nv_cache->bdev_desc = NULL;
278 		SPDK_ERRLOG("Unable to claim bdev %s\n", bdev_name);
279 		return -1;
280 	}
281 
282 	SPDK_INFOLOG(SPDK_LOG_FTL_INIT, "Using %s as write buffer cache\n",
283 		     spdk_bdev_get_name(bdev));
284 
285 	if (spdk_bdev_get_block_size(bdev) != FTL_BLOCK_SIZE) {
286 		SPDK_ERRLOG("Unsupported block size (%d)\n", spdk_bdev_get_block_size(bdev));
287 		return -1;
288 	}
289 
290 	if (!spdk_bdev_is_md_separate(bdev)) {
291 		SPDK_ERRLOG("Bdev %s doesn't support separate metadata buffer IO\n",
292 			    spdk_bdev_get_name(bdev));
293 		return -1;
294 	}
295 
296 	if (spdk_bdev_get_md_size(bdev) < sizeof(uint64_t)) {
297 		SPDK_ERRLOG("Bdev's %s metadata is too small (%"PRIu32")\n",
298 			    spdk_bdev_get_name(bdev), spdk_bdev_get_md_size(bdev));
299 		return -1;
300 	}
301 
302 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
303 		SPDK_ERRLOG("Unsupported DIF type used by bdev %s\n",
304 			    spdk_bdev_get_name(bdev));
305 		return -1;
306 	}
307 
308 	/* The cache needs to be capable of storing at least two full bands. This requirement comes
309 	 * from the fact that cache works as a protection against power loss, so before the data
310 	 * inside the cache can be overwritten, the band it's stored on has to be closed. Plus one
311 	 * extra block is needed to store the header.
312 	 */
313 	if (spdk_bdev_get_num_blocks(bdev) < ftl_get_num_blocks_in_band(dev) * 2 + 1) {
314 		SPDK_ERRLOG("Insufficient number of blocks for write buffer cache (available: %"
315 			    PRIu64", required: %"PRIu64")\n", spdk_bdev_get_num_blocks(bdev),
316 			    ftl_get_num_blocks_in_band(dev) * 2 + 1);
317 		return -1;
318 	}
319 
320 	rc = snprintf(pool_name, sizeof(pool_name), "ftl-nvpool-%p", dev);
321 	if (rc < 0 || rc >= 128) {
322 		return -1;
323 	}
324 
325 	nv_cache->md_pool = spdk_mempool_create(pool_name, conf->nv_cache.max_request_cnt,
326 						spdk_bdev_get_md_size(bdev) *
327 						conf->nv_cache.max_request_size,
328 						SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
329 						SPDK_ENV_SOCKET_ID_ANY);
330 	if (!nv_cache->md_pool) {
331 		SPDK_ERRLOG("Failed to initialize non-volatile cache metadata pool\n");
332 		return -1;
333 	}
334 
335 	nv_cache->dma_buf = spdk_dma_zmalloc(FTL_BLOCK_SIZE, spdk_bdev_get_buf_align(bdev), NULL);
336 	if (!nv_cache->dma_buf) {
337 		SPDK_ERRLOG("Memory allocation failure\n");
338 		return -1;
339 	}
340 
341 	if (pthread_spin_init(&nv_cache->lock, PTHREAD_PROCESS_PRIVATE)) {
342 		SPDK_ERRLOG("Failed to initialize cache lock\n");
343 		return -1;
344 	}
345 
346 	nv_cache->current_addr = FTL_NV_CACHE_DATA_OFFSET;
347 	nv_cache->num_data_blocks = spdk_bdev_get_num_blocks(bdev) - 1;
348 	nv_cache->num_available = nv_cache->num_data_blocks;
349 	nv_cache->ready = false;
350 
351 	return 0;
352 }
353 
354 void
355 spdk_ftl_conf_init_defaults(struct spdk_ftl_conf *conf)
356 {
357 	*conf = g_default_conf;
358 }
359 
360 static void
361 ftl_lba_map_request_ctor(struct spdk_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
362 {
363 	struct ftl_lba_map_request *request = obj;
364 	struct spdk_ftl_dev *dev = opaque;
365 
366 	request->segments = spdk_bit_array_create(spdk_divide_round_up(
367 				    ftl_get_num_blocks_in_band(dev), FTL_NUM_LBA_IN_BLOCK));
368 }
369 
370 static int
371 ftl_init_media_events_pool(struct spdk_ftl_dev *dev)
372 {
373 	char pool_name[128];
374 	int rc;
375 
376 	rc = snprintf(pool_name, sizeof(pool_name), "ftl-media-%p", dev);
377 	if (rc < 0 || rc >= (int)sizeof(pool_name)) {
378 		SPDK_ERRLOG("Failed to create media pool name\n");
379 		return -1;
380 	}
381 
382 	dev->media_events_pool = spdk_mempool_create(pool_name, 1024,
383 				 sizeof(struct ftl_media_event),
384 				 SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
385 				 SPDK_ENV_SOCKET_ID_ANY);
386 	if (!dev->media_events_pool) {
387 		SPDK_ERRLOG("Failed to create media events pool\n");
388 		return -1;
389 	}
390 
391 	return 0;
392 }
393 
394 static int
395 ftl_init_lba_map_pools(struct spdk_ftl_dev *dev)
396 {
397 #define POOL_NAME_LEN 128
398 	char pool_name[POOL_NAME_LEN];
399 	int rc;
400 
401 	rc = snprintf(pool_name, sizeof(pool_name), "%s-%s", dev->name, "ftl-lba-pool");
402 	if (rc < 0 || rc >= POOL_NAME_LEN) {
403 		return -ENAMETOOLONG;
404 	}
405 
406 	/* We need to reserve at least 2 buffers for band close / open sequence
407 	 * alone, plus additional (8) buffers for handling write errors.
408 	 * TODO: This memory pool is utilized only by core thread - it introduce
409 	 * unnecessary overhead and should be replaced by different data structure.
410 	 */
411 	dev->lba_pool = spdk_mempool_create(pool_name, 2 + 8,
412 					    ftl_lba_map_pool_elem_size(dev),
413 					    SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
414 					    SPDK_ENV_SOCKET_ID_ANY);
415 	if (!dev->lba_pool) {
416 		return -ENOMEM;
417 	}
418 
419 	rc = snprintf(pool_name, sizeof(pool_name), "%s-%s", dev->name, "ftl-lbareq-pool");
420 	if (rc < 0 || rc >= POOL_NAME_LEN) {
421 		return -ENAMETOOLONG;
422 	}
423 
424 	dev->lba_request_pool = spdk_mempool_create_ctor(pool_name,
425 				dev->conf.max_reloc_qdepth * dev->conf.max_active_relocs,
426 				sizeof(struct ftl_lba_map_request),
427 				SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
428 				SPDK_ENV_SOCKET_ID_ANY,
429 				ftl_lba_map_request_ctor,
430 				dev);
431 	if (!dev->lba_request_pool) {
432 		return -ENOMEM;
433 	}
434 
435 	return 0;
436 }
437 
438 static void
439 ftl_init_wptr_list(struct spdk_ftl_dev *dev)
440 {
441 	LIST_INIT(&dev->wptr_list);
442 	LIST_INIT(&dev->flush_list);
443 	LIST_INIT(&dev->band_flush_list);
444 }
445 
446 static size_t
447 ftl_dev_band_max_seq(struct spdk_ftl_dev *dev)
448 {
449 	struct ftl_band *band;
450 	size_t seq = 0;
451 
452 	LIST_FOREACH(band, &dev->shut_bands, list_entry) {
453 		if (band->seq > seq) {
454 			seq = band->seq;
455 		}
456 	}
457 
458 	return seq;
459 }
460 
461 static void
462 _ftl_init_bands_state(void *ctx)
463 {
464 	struct ftl_band *band, *temp_band;
465 	struct spdk_ftl_dev *dev = ctx;
466 
467 	dev->seq = ftl_dev_band_max_seq(dev);
468 
469 	LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) {
470 		if (!band->lba_map.num_vld) {
471 			ftl_band_set_state(band, FTL_BAND_STATE_FREE);
472 		}
473 	}
474 
475 	ftl_reloc_resume(dev->reloc);
476 	/* Clear the limit applications as they're incremented incorrectly by */
477 	/* the initialization code */
478 	memset(dev->stats.limits, 0, sizeof(dev->stats.limits));
479 }
480 
481 static int
482 ftl_init_num_free_bands(struct spdk_ftl_dev *dev)
483 {
484 	struct ftl_band *band;
485 	int cnt = 0;
486 
487 	LIST_FOREACH(band, &dev->shut_bands, list_entry) {
488 		if (band->num_zones && !band->lba_map.num_vld) {
489 			cnt++;
490 		}
491 	}
492 	return cnt;
493 }
494 
495 static int
496 ftl_init_bands_state(struct spdk_ftl_dev *dev)
497 {
498 	/* TODO: Should we abort initialization or expose read only device */
499 	/* if there is no free bands? */
500 	/* If we abort initialization should we depend on condition that */
501 	/* we have no free bands or should we have some minimal number of */
502 	/* free bands? */
503 	if (!ftl_init_num_free_bands(dev)) {
504 		return -1;
505 	}
506 
507 	spdk_thread_send_msg(ftl_get_core_thread(dev), _ftl_init_bands_state, dev);
508 	return 0;
509 }
510 
511 static void
512 _ftl_dev_init_core_thread(void *ctx)
513 {
514 	struct ftl_thread *thread = ctx;
515 	struct spdk_ftl_dev *dev = thread->dev;
516 
517 	thread->poller = spdk_poller_register(thread->poller_fn, thread, thread->period_us);
518 	if (!thread->poller) {
519 		SPDK_ERRLOG("Unable to register poller\n");
520 		assert(0);
521 	}
522 
523 	thread->ioch = spdk_get_io_channel(dev);
524 }
525 
526 static int
527 ftl_dev_init_core_thread(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts)
528 {
529 	struct ftl_thread *thread = &dev->core_thread;
530 
531 	if (!opts->core_thread) {
532 		return -1;
533 	}
534 
535 	thread->dev = dev;
536 	thread->poller_fn = ftl_task_core;
537 	thread->thread = opts->core_thread;
538 	thread->period_us = 0;
539 
540 	spdk_thread_send_msg(opts->core_thread, _ftl_dev_init_core_thread, thread);
541 	return 0;
542 }
543 
544 static void
545 ftl_dev_free_thread(struct spdk_ftl_dev *dev, struct ftl_thread *thread)
546 {
547 	assert(thread->poller == NULL);
548 
549 	spdk_put_io_channel(thread->ioch);
550 	thread->thread = NULL;
551 	thread->ioch = NULL;
552 }
553 
554 static int
555 ftl_dev_l2p_alloc(struct spdk_ftl_dev *dev)
556 {
557 	size_t addr_size;
558 	uint64_t i;
559 
560 	if (dev->num_lbas == 0) {
561 		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Invalid l2p table size\n");
562 		return -1;
563 	}
564 
565 	if (dev->l2p) {
566 		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "L2p table already allocated\n");
567 		return -1;
568 	}
569 
570 	addr_size = dev->addr_len >= 32 ? 8 : 4;
571 	dev->l2p = malloc(dev->num_lbas * addr_size);
572 	if (!dev->l2p) {
573 		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Failed to allocate l2p table\n");
574 		return -1;
575 	}
576 
577 	for (i = 0; i < dev->num_lbas; ++i) {
578 		ftl_l2p_set(dev, i, ftl_to_addr(FTL_ADDR_INVALID));
579 	}
580 
581 	return 0;
582 }
583 
584 static void
585 ftl_dev_free_init_ctx(struct ftl_dev_init_ctx *init_ctx)
586 {
587 	if (!init_ctx) {
588 		return;
589 	}
590 
591 	if (init_ctx->ioch) {
592 		spdk_put_io_channel(init_ctx->ioch);
593 	}
594 
595 	free(init_ctx);
596 }
597 
598 static void
599 ftl_call_init_complete_cb(void *ctx)
600 {
601 	struct ftl_dev_init_ctx *init_ctx = ctx;
602 	struct spdk_ftl_dev *dev = init_ctx->dev;
603 
604 	if (init_ctx->cb_fn != NULL) {
605 		init_ctx->cb_fn(dev, init_ctx->cb_arg, 0);
606 	}
607 
608 	ftl_dev_free_init_ctx(init_ctx);
609 }
610 
611 static void
612 ftl_init_complete(struct ftl_dev_init_ctx *init_ctx)
613 {
614 	struct spdk_ftl_dev *dev = init_ctx->dev;
615 
616 	pthread_mutex_lock(&g_ftl_queue_lock);
617 	STAILQ_INSERT_HEAD(&g_ftl_queue, dev, stailq);
618 	pthread_mutex_unlock(&g_ftl_queue_lock);
619 
620 	dev->initialized = 1;
621 
622 	spdk_thread_send_msg(init_ctx->thread, ftl_call_init_complete_cb, init_ctx);
623 }
624 
625 static void
626 ftl_init_fail_cb(struct spdk_ftl_dev *dev, void *ctx, int status)
627 {
628 	struct ftl_dev_init_ctx *init_ctx = ctx;
629 
630 	if (init_ctx->cb_fn != NULL) {
631 		init_ctx->cb_fn(NULL, init_ctx->cb_arg, -ENODEV);
632 	}
633 
634 	ftl_dev_free_init_ctx(init_ctx);
635 }
636 
637 static int _spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg,
638 			      struct spdk_thread *thread);
639 
640 static void
641 ftl_init_fail(struct ftl_dev_init_ctx *init_ctx)
642 {
643 	if (_spdk_ftl_dev_free(init_ctx->dev, ftl_init_fail_cb, init_ctx, init_ctx->thread)) {
644 		SPDK_ERRLOG("Unable to free the device\n");
645 		assert(0);
646 	}
647 }
648 
649 static void
650 ftl_write_nv_cache_md_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
651 {
652 	struct ftl_dev_init_ctx *init_ctx = cb_arg;
653 	struct spdk_ftl_dev *dev = init_ctx->dev;
654 
655 	spdk_bdev_free_io(bdev_io);
656 	if (spdk_unlikely(!success)) {
657 		SPDK_ERRLOG("Writing non-volatile cache's metadata header failed\n");
658 		ftl_init_fail(init_ctx);
659 		return;
660 	}
661 
662 	dev->nv_cache.ready = true;
663 	ftl_init_complete(init_ctx);
664 }
665 
666 static void
667 ftl_clear_nv_cache_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
668 {
669 	struct ftl_dev_init_ctx *init_ctx = cb_arg;
670 	struct spdk_ftl_dev *dev = init_ctx->dev;
671 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
672 
673 	spdk_bdev_free_io(bdev_io);
674 	if (spdk_unlikely(!success)) {
675 		SPDK_ERRLOG("Unable to clear the non-volatile cache bdev\n");
676 		ftl_init_fail(init_ctx);
677 		return;
678 	}
679 
680 	nv_cache->phase = 1;
681 	if (ftl_nv_cache_write_header(nv_cache, false, ftl_write_nv_cache_md_cb, init_ctx)) {
682 		SPDK_ERRLOG("Unable to write non-volatile cache metadata header\n");
683 		ftl_init_fail(init_ctx);
684 	}
685 }
686 
687 static void
688 _ftl_nv_cache_scrub(void *ctx)
689 {
690 	struct ftl_dev_init_ctx *init_ctx = ctx;
691 	struct spdk_ftl_dev *dev = init_ctx->dev;
692 	int rc;
693 
694 	rc = ftl_nv_cache_scrub(&dev->nv_cache, ftl_clear_nv_cache_cb, init_ctx);
695 
696 	if (spdk_unlikely(rc != 0)) {
697 		SPDK_ERRLOG("Unable to clear the non-volatile cache bdev: %s\n",
698 			    spdk_strerror(-rc));
699 		ftl_init_fail(init_ctx);
700 	}
701 }
702 
703 static int
704 ftl_setup_initial_state(struct ftl_dev_init_ctx *init_ctx)
705 {
706 	struct spdk_ftl_dev *dev = init_ctx->dev;
707 	struct spdk_ftl_conf *conf = &dev->conf;
708 	size_t i;
709 
710 	spdk_uuid_generate(&dev->uuid);
711 
712 	dev->num_lbas = 0;
713 	for (i = 0; i < ftl_get_num_bands(dev); ++i) {
714 		dev->num_lbas += ftl_band_num_usable_blocks(&dev->bands[i]);
715 	}
716 
717 	dev->num_lbas = (dev->num_lbas * (100 - conf->lba_rsvd)) / 100;
718 
719 	if (ftl_dev_l2p_alloc(dev)) {
720 		SPDK_ERRLOG("Unable to init l2p table\n");
721 		return -1;
722 	}
723 
724 	if (ftl_init_bands_state(dev)) {
725 		SPDK_ERRLOG("Unable to finish the initialization\n");
726 		return -1;
727 	}
728 
729 	if (!ftl_dev_has_nv_cache(dev)) {
730 		ftl_init_complete(init_ctx);
731 	} else {
732 		spdk_thread_send_msg(ftl_get_core_thread(dev), _ftl_nv_cache_scrub, init_ctx);
733 	}
734 
735 	return 0;
736 }
737 
738 static void
739 ftl_restore_nv_cache_cb(struct ftl_restore *restore, int status, void *cb_arg)
740 {
741 	struct ftl_dev_init_ctx *init_ctx = cb_arg;
742 
743 	if (spdk_unlikely(status != 0)) {
744 		SPDK_ERRLOG("Failed to restore the non-volatile cache state\n");
745 		ftl_init_fail(init_ctx);
746 		return;
747 	}
748 
749 	ftl_init_complete(init_ctx);
750 }
751 
752 static void
753 ftl_restore_device_cb(struct ftl_restore *restore, int status, void *cb_arg)
754 {
755 	struct ftl_dev_init_ctx *init_ctx = cb_arg;
756 	struct spdk_ftl_dev *dev = init_ctx->dev;
757 
758 	if (status) {
759 		SPDK_ERRLOG("Failed to restore the device from the SSD\n");
760 		ftl_init_fail(init_ctx);
761 		return;
762 	}
763 
764 	if (ftl_init_bands_state(dev)) {
765 		SPDK_ERRLOG("Unable to finish the initialization\n");
766 		ftl_init_fail(init_ctx);
767 		return;
768 	}
769 
770 	if (!ftl_dev_has_nv_cache(dev)) {
771 		ftl_init_complete(init_ctx);
772 		return;
773 	}
774 
775 	ftl_restore_nv_cache(restore, ftl_restore_nv_cache_cb, init_ctx);
776 }
777 
778 static void
779 ftl_restore_md_cb(struct ftl_restore *restore, int status, void *cb_arg)
780 {
781 	struct ftl_dev_init_ctx *init_ctx = cb_arg;
782 
783 	if (status) {
784 		SPDK_ERRLOG("Failed to restore the metadata from the SSD\n");
785 		goto error;
786 	}
787 
788 	/* After the metadata is read it should be possible to allocate the L2P */
789 	if (ftl_dev_l2p_alloc(init_ctx->dev)) {
790 		SPDK_ERRLOG("Failed to allocate the L2P\n");
791 		goto error;
792 	}
793 
794 	if (ftl_restore_device(restore, ftl_restore_device_cb, init_ctx)) {
795 		SPDK_ERRLOG("Failed to start device restoration from the SSD\n");
796 		goto error;
797 	}
798 
799 	return;
800 error:
801 	ftl_init_fail(init_ctx);
802 }
803 
804 static int
805 ftl_restore_state(struct ftl_dev_init_ctx *init_ctx)
806 {
807 	struct spdk_ftl_dev *dev = init_ctx->dev;
808 
809 	dev->uuid = init_ctx->opts.uuid;
810 
811 	if (ftl_restore_md(dev, ftl_restore_md_cb, init_ctx)) {
812 		SPDK_ERRLOG("Failed to start metadata restoration from the SSD\n");
813 		return -1;
814 	}
815 
816 	return 0;
817 }
818 
819 static void
820 ftl_dev_update_bands(struct spdk_ftl_dev *dev)
821 {
822 	struct ftl_band *band, *temp_band;
823 	size_t i;
824 
825 	for (i = 0; i < ftl_get_num_bands(dev); ++i) {
826 		band = &dev->bands[i];
827 		band->tail_md_addr = ftl_band_tail_md_addr(band);
828 	}
829 
830 	/* Remove band from shut_bands list to prevent further processing */
831 	/* if all blocks on this band are bad */
832 	LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) {
833 		if (!band->num_zones) {
834 			dev->num_bands--;
835 			LIST_REMOVE(band, list_entry);
836 		}
837 	}
838 }
839 
840 static void
841 ftl_dev_init_state(struct ftl_dev_init_ctx *init_ctx)
842 {
843 	struct spdk_ftl_dev *dev = init_ctx->dev;
844 
845 	ftl_dev_update_bands(dev);
846 
847 	if (ftl_dev_init_core_thread(dev, &init_ctx->opts)) {
848 		SPDK_ERRLOG("Unable to initialize device thread\n");
849 		ftl_init_fail(init_ctx);
850 		return;
851 	}
852 
853 	if (init_ctx->opts.mode & SPDK_FTL_MODE_CREATE) {
854 		if (ftl_setup_initial_state(init_ctx)) {
855 			SPDK_ERRLOG("Failed to setup initial state of the device\n");
856 			ftl_init_fail(init_ctx);
857 			return;
858 		}
859 	} else {
860 		if (ftl_restore_state(init_ctx)) {
861 			SPDK_ERRLOG("Unable to restore device's state from the SSD\n");
862 			ftl_init_fail(init_ctx);
863 			return;
864 		}
865 	}
866 }
867 
868 static void ftl_dev_get_zone_info(struct ftl_dev_init_ctx *init_ctx);
869 
870 static void
871 ftl_dev_get_zone_info_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
872 {
873 	struct ftl_dev_init_ctx *init_ctx = cb_arg;
874 	struct spdk_ftl_dev *dev = init_ctx->dev;
875 	struct ftl_band *band;
876 	struct ftl_zone *zone;
877 	struct ftl_addr addr;
878 	size_t i, zones_left, num_zones;
879 
880 	spdk_bdev_free_io(bdev_io);
881 
882 	if (spdk_unlikely(!success)) {
883 		SPDK_ERRLOG("Unable to read zone info for zone id: %"PRIu64"\n", init_ctx->zone_id);
884 		ftl_init_fail(init_ctx);
885 		return;
886 	}
887 
888 	zones_left = ftl_get_num_zones(dev) - (init_ctx->zone_id / ftl_get_num_blocks_in_zone(dev));
889 	num_zones = spdk_min(zones_left, FTL_ZONE_INFO_COUNT);
890 
891 	for (i = 0; i < num_zones; ++i) {
892 		addr.offset = init_ctx->info[i].zone_id;
893 		band = &dev->bands[ftl_addr_get_band(dev, addr)];
894 		zone = &band->zone_buf[ftl_addr_get_punit(dev, addr)];
895 		zone->info = init_ctx->info[i];
896 
897 		/* TODO: add support for zone capacity less than zone size */
898 		if (zone->info.capacity != ftl_get_num_blocks_in_zone(dev)) {
899 			zone->info.state = SPDK_BDEV_ZONE_STATE_OFFLINE;
900 			SPDK_ERRLOG("Zone capacity is not equal zone size for "
901 				    "zone id: %"PRIu64"\n", init_ctx->zone_id);
902 		}
903 
904 		if (zone->info.state != SPDK_BDEV_ZONE_STATE_OFFLINE) {
905 			band->num_zones++;
906 			CIRCLEQ_INSERT_TAIL(&band->zones, zone, circleq);
907 		}
908 	}
909 
910 	init_ctx->zone_id = init_ctx->zone_id + num_zones * ftl_get_num_blocks_in_zone(dev);
911 
912 	ftl_dev_get_zone_info(init_ctx);
913 }
914 
915 static void
916 ftl_dev_get_zone_info(struct ftl_dev_init_ctx *init_ctx)
917 {
918 	struct spdk_ftl_dev *dev = init_ctx->dev;
919 	size_t zones_left, num_zones;
920 	int rc;
921 
922 	zones_left = ftl_get_num_zones(dev) - (init_ctx->zone_id / ftl_get_num_blocks_in_zone(dev));
923 	if (zones_left == 0) {
924 		ftl_dev_init_state(init_ctx);
925 		return;
926 	}
927 
928 	num_zones = spdk_min(zones_left, FTL_ZONE_INFO_COUNT);
929 
930 	rc = spdk_bdev_get_zone_info(dev->base_bdev_desc, init_ctx->ioch,
931 				     init_ctx->zone_id, num_zones, init_ctx->info,
932 				     ftl_dev_get_zone_info_cb, init_ctx);
933 
934 	if (spdk_unlikely(rc != 0)) {
935 		SPDK_ERRLOG("Unable to read zone info for zone id: %"PRIu64"\n", init_ctx->zone_id);
936 		ftl_init_fail(init_ctx);
937 	}
938 }
939 
940 static int
941 ftl_dev_init_zones(struct ftl_dev_init_ctx *init_ctx)
942 {
943 	struct spdk_ftl_dev *dev =  init_ctx->dev;
944 
945 	init_ctx->zone_id = 0;
946 	init_ctx->ioch = spdk_bdev_get_io_channel(dev->base_bdev_desc);
947 	if (!init_ctx->ioch) {
948 		SPDK_ERRLOG("Failed to get base bdev IO channel\n");
949 		return -1;
950 	}
951 
952 	ftl_dev_get_zone_info(init_ctx);
953 
954 	return 0;
955 }
956 
957 static int
958 ftl_io_channel_create_cb(void *io_device, void *ctx)
959 {
960 	struct spdk_ftl_dev *dev = io_device;
961 	struct ftl_io_channel *ioch = ctx;
962 	char mempool_name[32];
963 	int rc;
964 
965 	rc = snprintf(mempool_name, sizeof(mempool_name), "ftl_io_%p", ioch);
966 	if (rc < 0 || rc >= (int)sizeof(mempool_name)) {
967 		SPDK_ERRLOG("Failed to create IO channel pool name\n");
968 		return -1;
969 	}
970 
971 	ioch->cache_ioch = NULL;
972 	ioch->dev = dev;
973 	ioch->elem_size = sizeof(struct ftl_md_io);
974 	ioch->io_pool = spdk_mempool_create(mempool_name,
975 					    dev->conf.user_io_pool_size,
976 					    ioch->elem_size,
977 					    0,
978 					    SPDK_ENV_SOCKET_ID_ANY);
979 	if (!ioch->io_pool) {
980 		SPDK_ERRLOG("Failed to create IO channel's IO pool\n");
981 		return -1;
982 	}
983 
984 	ioch->base_ioch = spdk_bdev_get_io_channel(dev->base_bdev_desc);
985 	if (!ioch->base_ioch) {
986 		SPDK_ERRLOG("Failed to create base bdev IO channel\n");
987 		spdk_mempool_free(ioch->io_pool);
988 		return -1;
989 	}
990 
991 	if (ftl_dev_has_nv_cache(dev)) {
992 		ioch->cache_ioch = spdk_bdev_get_io_channel(dev->nv_cache.bdev_desc);
993 		if (!ioch->cache_ioch) {
994 			SPDK_ERRLOG("Failed to create cache IO channel\n");
995 			spdk_mempool_free(ioch->io_pool);
996 			spdk_put_io_channel(ioch->base_ioch);
997 			return -1;
998 		}
999 	}
1000 
1001 	return 0;
1002 }
1003 
1004 static void
1005 ftl_io_channel_destroy_cb(void *io_device, void *ctx)
1006 {
1007 	struct ftl_io_channel *ioch = ctx;
1008 
1009 	spdk_mempool_free(ioch->io_pool);
1010 
1011 	spdk_put_io_channel(ioch->base_ioch);
1012 
1013 	if (ioch->cache_ioch) {
1014 		spdk_put_io_channel(ioch->cache_ioch);
1015 	}
1016 }
1017 
1018 static int
1019 ftl_dev_init_io_channel(struct spdk_ftl_dev *dev)
1020 {
1021 	spdk_io_device_register(dev, ftl_io_channel_create_cb, ftl_io_channel_destroy_cb,
1022 				sizeof(struct ftl_io_channel),
1023 				NULL);
1024 
1025 	return 0;
1026 }
1027 
1028 static int
1029 ftl_dev_init_base_bdev(struct spdk_ftl_dev *dev, const char *bdev_name)
1030 {
1031 	uint32_t block_size;
1032 	uint64_t num_blocks;
1033 	struct spdk_bdev *bdev;
1034 
1035 	bdev = spdk_bdev_get_by_name(bdev_name);
1036 	if (!bdev) {
1037 		SPDK_ERRLOG("Unable to find bdev: %s\n", bdev_name);
1038 		return -1;
1039 	}
1040 
1041 	if (!spdk_bdev_is_zoned(bdev)) {
1042 		SPDK_ERRLOG("Bdev dosen't support zone capabilities: %s\n",
1043 			    spdk_bdev_get_name(bdev));
1044 		return -1;
1045 	}
1046 
1047 	if (spdk_bdev_open_ext(bdev_name, true, ftl_bdev_event_cb,
1048 			       dev, &dev->base_bdev_desc)) {
1049 		SPDK_ERRLOG("Unable to open bdev: %s\n", bdev_name);
1050 		return -1;
1051 	}
1052 
1053 	if (spdk_bdev_module_claim_bdev(bdev, dev->base_bdev_desc, &g_ftl_bdev_module)) {
1054 		spdk_bdev_close(dev->base_bdev_desc);
1055 		dev->base_bdev_desc = NULL;
1056 		SPDK_ERRLOG("Unable to claim bdev %s\n", bdev_name);
1057 		return -1;
1058 	}
1059 
1060 	dev->xfer_size = spdk_bdev_get_write_unit_size(bdev);
1061 	dev->md_size = spdk_bdev_get_md_size(bdev);
1062 
1063 	block_size = spdk_bdev_get_block_size(bdev);
1064 	if (block_size != FTL_BLOCK_SIZE) {
1065 		SPDK_ERRLOG("Unsupported block size (%"PRIu32")\n", block_size);
1066 		return -1;
1067 	}
1068 
1069 	num_blocks = spdk_bdev_get_num_blocks(bdev);
1070 	if (num_blocks % ftl_get_num_punits(dev)) {
1071 		SPDK_ERRLOG("Unsupported geometry. Base bdev block count must be multiple "
1072 			    "of optimal number of zones.\n");
1073 		return -1;
1074 	}
1075 
1076 	if (ftl_is_append_supported(dev) &&
1077 	    !spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZONE_APPEND)) {
1078 		SPDK_ERRLOG("Bdev dosen't support append: %s\n",
1079 			    spdk_bdev_get_name(bdev));
1080 		return -1;
1081 	}
1082 
1083 	dev->num_bands = num_blocks / (ftl_get_num_punits(dev) * ftl_get_num_blocks_in_zone(dev));
1084 	dev->addr_len = spdk_u64log2(num_blocks) + 1;
1085 
1086 	return 0;
1087 }
1088 
1089 static void
1090 ftl_lba_map_request_dtor(struct spdk_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
1091 {
1092 	struct ftl_lba_map_request *request = obj;
1093 
1094 	spdk_bit_array_free(&request->segments);
1095 }
1096 
1097 static void
1098 ftl_release_bdev(struct spdk_bdev_desc *bdev_desc)
1099 {
1100 	if (!bdev_desc) {
1101 		return;
1102 	}
1103 
1104 	spdk_bdev_module_release_bdev(spdk_bdev_desc_get_bdev(bdev_desc));
1105 	spdk_bdev_close(bdev_desc);
1106 }
1107 
1108 static void
1109 ftl_dev_free_sync(struct spdk_ftl_dev *dev)
1110 {
1111 	struct spdk_ftl_dev *iter;
1112 	size_t i;
1113 
1114 	if (!dev) {
1115 		return;
1116 	}
1117 
1118 	pthread_mutex_lock(&g_ftl_queue_lock);
1119 	STAILQ_FOREACH(iter, &g_ftl_queue, stailq) {
1120 		if (iter == dev) {
1121 			STAILQ_REMOVE(&g_ftl_queue, dev, spdk_ftl_dev, stailq);
1122 			break;
1123 		}
1124 	}
1125 	pthread_mutex_unlock(&g_ftl_queue_lock);
1126 
1127 	assert(LIST_EMPTY(&dev->wptr_list));
1128 	assert(ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_INTERNAL) == 0);
1129 	assert(ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_USER) == 0);
1130 
1131 	ftl_dev_dump_bands(dev);
1132 	ftl_dev_dump_stats(dev);
1133 
1134 	spdk_io_device_unregister(dev, NULL);
1135 
1136 	if (dev->core_thread.thread) {
1137 		ftl_dev_free_thread(dev, &dev->core_thread);
1138 	}
1139 
1140 	if (dev->bands) {
1141 		for (i = 0; i < ftl_get_num_bands(dev); ++i) {
1142 			free(dev->bands[i].zone_buf);
1143 			spdk_bit_array_free(&dev->bands[i].lba_map.vld);
1144 			spdk_bit_array_free(&dev->bands[i].reloc_bitmap);
1145 		}
1146 	}
1147 
1148 	spdk_dma_free(dev->nv_cache.dma_buf);
1149 
1150 	spdk_mempool_free(dev->lba_pool);
1151 	spdk_mempool_free(dev->nv_cache.md_pool);
1152 	spdk_mempool_free(dev->media_events_pool);
1153 	if (dev->lba_request_pool) {
1154 		spdk_mempool_obj_iter(dev->lba_request_pool, ftl_lba_map_request_dtor, NULL);
1155 	}
1156 	spdk_mempool_free(dev->lba_request_pool);
1157 
1158 	ftl_rwb_free(dev->rwb);
1159 	ftl_reloc_free(dev->reloc);
1160 
1161 	ftl_release_bdev(dev->nv_cache.bdev_desc);
1162 	ftl_release_bdev(dev->base_bdev_desc);
1163 
1164 	free(dev->name);
1165 	free(dev->bands);
1166 	free(dev->l2p);
1167 	free(dev);
1168 }
1169 
1170 int
1171 spdk_ftl_dev_init(const struct spdk_ftl_dev_init_opts *_opts, spdk_ftl_init_fn cb_fn, void *cb_arg)
1172 {
1173 	struct spdk_ftl_dev *dev;
1174 	struct spdk_ftl_dev_init_opts opts = *_opts;
1175 	struct ftl_dev_init_ctx *init_ctx = NULL;
1176 	int rc = -ENOMEM;
1177 
1178 	dev = calloc(1, sizeof(*dev));
1179 	if (!dev) {
1180 		return -ENOMEM;
1181 	}
1182 
1183 	init_ctx = calloc(1, sizeof(*init_ctx));
1184 	if (!init_ctx) {
1185 		goto fail_sync;
1186 	}
1187 
1188 	init_ctx->dev = dev;
1189 	init_ctx->opts = *_opts;
1190 	init_ctx->cb_fn = cb_fn;
1191 	init_ctx->cb_arg = cb_arg;
1192 	init_ctx->thread = spdk_get_thread();
1193 
1194 	if (!opts.conf) {
1195 		opts.conf = &g_default_conf;
1196 	}
1197 
1198 	if (!opts.base_bdev) {
1199 		SPDK_ERRLOG("Lack of underlying device in configuration\n");
1200 		rc = -EINVAL;
1201 		goto fail_sync;
1202 	}
1203 
1204 	TAILQ_INIT(&dev->retry_queue);
1205 	dev->conf = *opts.conf;
1206 	dev->limit = SPDK_FTL_LIMIT_MAX;
1207 
1208 	dev->name = strdup(opts.name);
1209 	if (!dev->name) {
1210 		SPDK_ERRLOG("Unable to set device name\n");
1211 		goto fail_sync;
1212 	}
1213 
1214 	if (ftl_dev_init_base_bdev(dev, opts.base_bdev)) {
1215 		SPDK_ERRLOG("Unsupported underlying device\n");
1216 		goto fail_sync;
1217 	}
1218 
1219 	/* In case of errors, we free all of the memory in ftl_dev_free_sync(), */
1220 	/* so we don't have to clean up in each of the init functions. */
1221 	if (ftl_check_conf(dev, opts.conf)) {
1222 		SPDK_ERRLOG("Invalid device configuration\n");
1223 		goto fail_sync;
1224 	}
1225 
1226 	if (ftl_init_lba_map_pools(dev)) {
1227 		SPDK_ERRLOG("Unable to init LBA map pools\n");
1228 		goto fail_sync;
1229 	}
1230 
1231 	if (ftl_init_media_events_pool(dev)) {
1232 		SPDK_ERRLOG("Unable to init media events pools\n");
1233 		goto fail_sync;
1234 	}
1235 
1236 	ftl_init_wptr_list(dev);
1237 
1238 	if (ftl_dev_init_bands(dev)) {
1239 		SPDK_ERRLOG("Unable to initialize band array\n");
1240 		goto fail_sync;
1241 	}
1242 
1243 	if (ftl_dev_init_nv_cache(dev, opts.cache_bdev)) {
1244 		SPDK_ERRLOG("Unable to initialize persistent cache\n");
1245 		goto fail_sync;
1246 	}
1247 
1248 	dev->rwb = ftl_rwb_init(&dev->conf, dev->xfer_size, dev->md_size, ftl_get_num_punits(dev));
1249 	if (!dev->rwb) {
1250 		SPDK_ERRLOG("Unable to initialize rwb structures\n");
1251 		goto fail_sync;
1252 	}
1253 
1254 	dev->reloc = ftl_reloc_init(dev);
1255 	if (!dev->reloc) {
1256 		SPDK_ERRLOG("Unable to initialize reloc structures\n");
1257 		goto fail_sync;
1258 	}
1259 
1260 	if (ftl_dev_init_io_channel(dev)) {
1261 		SPDK_ERRLOG("Unable to initialize IO channels\n");
1262 		goto fail_sync;
1263 	}
1264 
1265 	if (ftl_dev_init_zones(init_ctx)) {
1266 		SPDK_ERRLOG("Failed to initialize zones\n");
1267 		goto fail_async;
1268 	}
1269 
1270 	return 0;
1271 fail_sync:
1272 	ftl_dev_free_sync(dev);
1273 	ftl_dev_free_init_ctx(init_ctx);
1274 	return rc;
1275 fail_async:
1276 	ftl_init_fail(init_ctx);
1277 	return 0;
1278 }
1279 
1280 static void
1281 _ftl_halt_defrag(void *arg)
1282 {
1283 	ftl_reloc_halt(((struct spdk_ftl_dev *)arg)->reloc);
1284 }
1285 
1286 static void
1287 ftl_halt_complete_cb(void *ctx)
1288 {
1289 	struct ftl_dev_init_ctx *fini_ctx = ctx;
1290 
1291 	ftl_dev_free_sync(fini_ctx->dev);
1292 	if (fini_ctx->cb_fn != NULL) {
1293 		fini_ctx->cb_fn(NULL, fini_ctx->cb_arg, fini_ctx->halt_complete_status);
1294 	}
1295 
1296 	ftl_dev_free_init_ctx(fini_ctx);
1297 }
1298 
1299 static void
1300 ftl_nv_cache_header_fini_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1301 {
1302 	struct ftl_dev_init_ctx *fini_ctx = cb_arg;
1303 	int rc = 0;
1304 
1305 	spdk_bdev_free_io(bdev_io);
1306 	if (spdk_unlikely(!success)) {
1307 		SPDK_ERRLOG("Failed to write non-volatile cache metadata header\n");
1308 		rc = -EIO;
1309 	}
1310 
1311 	fini_ctx->halt_complete_status = rc;
1312 	spdk_thread_send_msg(fini_ctx->thread, ftl_halt_complete_cb, fini_ctx);
1313 }
1314 
1315 static int
1316 ftl_halt_poller(void *ctx)
1317 {
1318 	struct ftl_dev_init_ctx *fini_ctx = ctx;
1319 	struct spdk_ftl_dev *dev = fini_ctx->dev;
1320 
1321 	if (!dev->core_thread.poller) {
1322 		spdk_poller_unregister(&fini_ctx->poller);
1323 
1324 		if (ftl_dev_has_nv_cache(dev)) {
1325 			ftl_nv_cache_write_header(&dev->nv_cache, true,
1326 						  ftl_nv_cache_header_fini_cb, fini_ctx);
1327 		} else {
1328 			fini_ctx->halt_complete_status = 0;
1329 			spdk_thread_send_msg(fini_ctx->thread, ftl_halt_complete_cb, fini_ctx);
1330 		}
1331 	}
1332 
1333 	return 0;
1334 }
1335 
1336 static void
1337 ftl_add_halt_poller(void *ctx)
1338 {
1339 	struct ftl_dev_init_ctx *fini_ctx = ctx;
1340 	struct spdk_ftl_dev *dev = fini_ctx->dev;
1341 
1342 	dev->halt = 1;
1343 
1344 	_ftl_halt_defrag(dev);
1345 
1346 	assert(!fini_ctx->poller);
1347 	fini_ctx->poller = spdk_poller_register(ftl_halt_poller, fini_ctx, 100);
1348 }
1349 
1350 static int
1351 _spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg,
1352 		   struct spdk_thread *thread)
1353 {
1354 	struct ftl_dev_init_ctx *fini_ctx;
1355 
1356 	if (dev->halt_started) {
1357 		dev->halt_started = true;
1358 		return -EBUSY;
1359 	}
1360 
1361 	fini_ctx = calloc(1, sizeof(*fini_ctx));
1362 	if (!fini_ctx) {
1363 		return -ENOMEM;
1364 	}
1365 
1366 	fini_ctx->dev = dev;
1367 	fini_ctx->cb_fn = cb_fn;
1368 	fini_ctx->cb_arg = cb_arg;
1369 	fini_ctx->thread = thread;
1370 
1371 	ftl_rwb_disable_interleaving(dev->rwb);
1372 
1373 	spdk_thread_send_msg(ftl_get_core_thread(dev), ftl_add_halt_poller, fini_ctx);
1374 	return 0;
1375 }
1376 
1377 int
1378 spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg)
1379 {
1380 	return _spdk_ftl_dev_free(dev, cb_fn, cb_arg, spdk_get_thread());
1381 }
1382 
1383 SPDK_LOG_REGISTER_COMPONENT("ftl_init", SPDK_LOG_FTL_INIT)
1384