xref: /spdk/lib/ftl/ftl_init.c (revision 9889ab2dc80e40dae92dcef361d53dcba722043d)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 #include "spdk/nvme.h"
36 #include "spdk/io_channel.h"
37 #include "spdk/string.h"
38 #include "spdk/likely.h"
39 #include "spdk_internal/log.h"
40 #include "spdk/ftl.h"
41 #include "spdk/likely.h"
42 #include "spdk/string.h"
43 
44 #include "ftl_core.h"
45 #include "ftl_anm.h"
46 #include "ftl_io.h"
47 #include "ftl_reloc.h"
48 #include "ftl_rwb.h"
49 #include "ftl_band.h"
50 #include "ftl_debug.h"
51 
52 #define FTL_CORE_RING_SIZE	4096
53 #define FTL_INIT_TIMEOUT	30
54 #define FTL_NSID		1
55 
56 #define ftl_range_intersect(s1, e1, s2, e2) \
57 	((s1) <= (e2) && (s2) <= (e1))
58 
59 struct ftl_admin_cmpl {
60 	struct spdk_nvme_cpl			status;
61 
62 	int					complete;
63 };
64 
65 static STAILQ_HEAD(, spdk_ftl_dev)	g_ftl_queue = STAILQ_HEAD_INITIALIZER(g_ftl_queue);
66 static pthread_mutex_t			g_ftl_queue_lock = PTHREAD_MUTEX_INITIALIZER;
67 static const struct spdk_ftl_conf	g_default_conf = {
68 	.limits = {
69 		/* 5 free bands  / 0 % host writes */
70 		[SPDK_FTL_LIMIT_CRIT]  = { .thld = 5,  .limit = 0 },
71 		/* 10 free bands / 5 % host writes */
72 		[SPDK_FTL_LIMIT_HIGH]  = { .thld = 10, .limit = 5 },
73 		/* 20 free bands / 40 % host writes */
74 		[SPDK_FTL_LIMIT_LOW]   = { .thld = 20, .limit = 40 },
75 		/* 40 free bands / 100 % host writes - defrag starts running */
76 		[SPDK_FTL_LIMIT_START] = { .thld = 40, .limit = 100 },
77 	},
78 	/* 10 percent valid lbks */
79 	.invalid_thld = 10,
80 	/* 20% spare lbks */
81 	.lba_rsvd = 20,
82 	/* 6M write buffer */
83 	.rwb_size = 6 * 1024 * 1024,
84 	/* 90% band fill threshold */
85 	.band_thld = 90,
86 	/* Max 32 IO depth per band relocate */
87 	.max_reloc_qdepth = 32,
88 	/* Max 3 active band relocates */
89 	.max_active_relocs = 3,
90 	/* IO pool size per user thread (this should be adjusted to thread IO qdepth) */
91 	.user_io_pool_size = 2048,
92 	/* Number of interleaving units per ws_opt */
93 	/* 1 for default and 3 for 3D TLC NAND */
94 	.num_interleave_units = 1,
95 	/*
96 	 * If clear ftl will return error when restoring after a dirty shutdown
97 	 * If set, last band will be padded, ftl will restore based only on closed bands - this
98 	 * will result in lost data after recovery.
99 	 */
100 	.allow_open_bands = false,
101 	.nv_cache = {
102 		/* Maximum number of concurrent requests */
103 		.max_request_cnt = 2048,
104 		/* Maximum number of blocks per request */
105 		.max_request_size = 16,
106 	}
107 };
108 
109 static void ftl_dev_free_sync(struct spdk_ftl_dev *dev);
110 
111 static void
112 ftl_admin_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
113 {
114 	struct ftl_admin_cmpl *cmpl = ctx;
115 
116 	cmpl->complete = 1;
117 	cmpl->status = *cpl;
118 }
119 
120 static int
121 ftl_band_init_md(struct ftl_band *band)
122 {
123 	struct ftl_lba_map *lba_map = &band->lba_map;
124 
125 	lba_map->vld = spdk_bit_array_create(ftl_num_band_lbks(band->dev));
126 	if (!lba_map->vld) {
127 		return -ENOMEM;
128 	}
129 
130 	pthread_spin_init(&lba_map->lock, PTHREAD_PROCESS_PRIVATE);
131 	ftl_band_md_clear(band);
132 	return 0;
133 }
134 
135 static int
136 ftl_check_conf(const struct spdk_ftl_conf *conf,
137 	       const struct spdk_ocssd_geometry_data *geo)
138 {
139 	size_t i;
140 
141 	if (conf->invalid_thld >= 100) {
142 		return -1;
143 	}
144 	if (conf->lba_rsvd >= 100) {
145 		return -1;
146 	}
147 	if (conf->lba_rsvd == 0) {
148 		return -1;
149 	}
150 	if (conf->rwb_size == 0) {
151 		return -1;
152 	}
153 	if (conf->rwb_size % FTL_BLOCK_SIZE != 0) {
154 		return -1;
155 	}
156 	if (geo->ws_opt % conf->num_interleave_units != 0) {
157 		return -1;
158 	}
159 
160 	for (i = 0; i < SPDK_FTL_LIMIT_MAX; ++i) {
161 		if (conf->limits[i].limit > 100) {
162 			return -1;
163 		}
164 	}
165 
166 	return 0;
167 }
168 
169 static int
170 ftl_check_init_opts(const struct spdk_ftl_dev_init_opts *opts,
171 		    const struct spdk_ocssd_geometry_data *geo)
172 {
173 	struct spdk_ftl_dev *dev;
174 	size_t num_punits = geo->num_pu * geo->num_grp;
175 	int rc = 0;
176 
177 	if (opts->range.begin > opts->range.end || opts->range.end >= num_punits) {
178 		return -1;
179 	}
180 
181 	if (ftl_check_conf(opts->conf, geo)) {
182 		return -1;
183 	}
184 
185 	pthread_mutex_lock(&g_ftl_queue_lock);
186 
187 	STAILQ_FOREACH(dev, &g_ftl_queue, stailq) {
188 		if (spdk_nvme_transport_id_compare(&dev->trid, &opts->trid)) {
189 			continue;
190 		}
191 
192 		if (ftl_range_intersect(opts->range.begin, opts->range.end,
193 					dev->range.begin, dev->range.end)) {
194 			rc = -1;
195 			goto out;
196 		}
197 	}
198 
199 out:
200 	pthread_mutex_unlock(&g_ftl_queue_lock);
201 	return rc;
202 }
203 
204 int
205 ftl_retrieve_chunk_info(struct spdk_ftl_dev *dev, struct ftl_ppa ppa,
206 			struct spdk_ocssd_chunk_information_entry *info,
207 			unsigned int num_entries)
208 {
209 	volatile struct ftl_admin_cmpl cmpl = {};
210 	uint32_t nsid = spdk_nvme_ns_get_id(dev->ns);
211 	uint64_t offset = (ppa.grp * dev->geo.num_pu + ppa.pu) *
212 			  dev->geo.num_chk + ppa.chk;
213 	int rc;
214 
215 	rc = spdk_nvme_ctrlr_cmd_get_log_page(dev->ctrlr, SPDK_OCSSD_LOG_CHUNK_INFO, nsid,
216 					      info, num_entries * sizeof(*info),
217 					      offset * sizeof(*info),
218 					      ftl_admin_cb, (void *)&cmpl);
219 	if (spdk_unlikely(rc != 0)) {
220 		SPDK_ERRLOG("spdk_nvme_ctrlr_cmd_get_log_page: %s\n", spdk_strerror(-rc));
221 		return -1;
222 	}
223 
224 	while (!cmpl.complete) {
225 		spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr);
226 	}
227 
228 	if (spdk_nvme_cpl_is_error(&cmpl.status)) {
229 		SPDK_ERRLOG("Unexpected status code: [%d], status code type: [%d]\n",
230 			    cmpl.status.status.sc, cmpl.status.status.sct);
231 		return -1;
232 	}
233 
234 	return 0;
235 }
236 
237 static int
238 ftl_retrieve_punit_chunk_info(struct spdk_ftl_dev *dev, const struct ftl_punit *punit,
239 			      struct spdk_ocssd_chunk_information_entry *info)
240 {
241 	uint32_t i = 0;
242 	unsigned int num_entries = FTL_BLOCK_SIZE / sizeof(*info);
243 	struct ftl_ppa chunk_ppa = punit->start_ppa;
244 	char ppa_buf[128];
245 
246 	for (i = 0; i < dev->geo.num_chk; i += num_entries, chunk_ppa.chk += num_entries) {
247 		if (num_entries > dev->geo.num_chk - i) {
248 			num_entries = dev->geo.num_chk - i;
249 		}
250 
251 		if (ftl_retrieve_chunk_info(dev, chunk_ppa, &info[i], num_entries)) {
252 			SPDK_ERRLOG("Failed to retrieve chunk information @ppa: %s\n",
253 				    ftl_ppa2str(chunk_ppa, ppa_buf, sizeof(ppa_buf)));
254 			return -1;
255 		}
256 	}
257 
258 	return 0;
259 }
260 
261 static unsigned char
262 ftl_get_chunk_state(const struct spdk_ocssd_chunk_information_entry *info)
263 {
264 	if (info->cs.free) {
265 		return FTL_CHUNK_STATE_FREE;
266 	}
267 
268 	if (info->cs.open) {
269 		return FTL_CHUNK_STATE_OPEN;
270 	}
271 
272 	if (info->cs.closed) {
273 		return FTL_CHUNK_STATE_CLOSED;
274 	}
275 
276 	if (info->cs.offline) {
277 		return FTL_CHUNK_STATE_BAD;
278 	}
279 
280 	assert(0 && "Invalid block state");
281 	return FTL_CHUNK_STATE_BAD;
282 }
283 
284 static void
285 ftl_remove_empty_bands(struct spdk_ftl_dev *dev)
286 {
287 	struct ftl_band *band, *temp_band;
288 
289 	/* Remove band from shut_bands list to prevent further processing */
290 	/* if all blocks on this band are bad */
291 	LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) {
292 		if (!band->num_chunks) {
293 			dev->num_bands--;
294 			LIST_REMOVE(band, list_entry);
295 		}
296 	}
297 }
298 
299 static int
300 ftl_dev_init_bands(struct spdk_ftl_dev *dev)
301 {
302 	struct spdk_ocssd_chunk_information_entry	*info;
303 	struct ftl_band					*band, *pband;
304 	struct ftl_punit				*punit;
305 	struct ftl_chunk				*chunk;
306 	unsigned int					i, j;
307 	char						buf[128];
308 	int						rc = 0;
309 
310 	LIST_INIT(&dev->free_bands);
311 	LIST_INIT(&dev->shut_bands);
312 
313 	dev->num_free = 0;
314 	dev->num_bands = ftl_dev_num_bands(dev);
315 	dev->bands = calloc(ftl_dev_num_bands(dev), sizeof(*dev->bands));
316 	if (!dev->bands) {
317 		return -1;
318 	}
319 
320 	info = calloc(dev->geo.num_chk, sizeof(*info));
321 	if (!info) {
322 		return -1;
323 	}
324 
325 	for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
326 		band = &dev->bands[i];
327 		band->id = i;
328 		band->dev = dev;
329 		band->state = FTL_BAND_STATE_CLOSED;
330 
331 		if (LIST_EMPTY(&dev->shut_bands)) {
332 			LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
333 		} else {
334 			LIST_INSERT_AFTER(pband, band, list_entry);
335 		}
336 		pband = band;
337 
338 		CIRCLEQ_INIT(&band->chunks);
339 		band->chunk_buf = calloc(ftl_dev_num_punits(dev), sizeof(*band->chunk_buf));
340 		if (!band->chunk_buf) {
341 			SPDK_ERRLOG("Failed to allocate block state table for band: [%u]\n", i);
342 			rc = -1;
343 			goto out;
344 		}
345 
346 		rc = ftl_band_init_md(band);
347 		if (rc) {
348 			SPDK_ERRLOG("Failed to initialize metadata structures for band [%u]\n", i);
349 			goto out;
350 		}
351 
352 		band->reloc_bitmap = spdk_bit_array_create(ftl_dev_num_bands(dev));
353 		if (!band->reloc_bitmap) {
354 			SPDK_ERRLOG("Failed to allocate band relocation bitmap\n");
355 			goto out;
356 		}
357 	}
358 
359 	for (i = 0; i < ftl_dev_num_punits(dev); ++i) {
360 		punit = &dev->punits[i];
361 
362 		rc = ftl_retrieve_punit_chunk_info(dev, punit, info);
363 		if (rc) {
364 			SPDK_ERRLOG("Failed to retrieve bbt for @ppa: %s [%lu]\n",
365 				    ftl_ppa2str(punit->start_ppa, buf, sizeof(buf)),
366 				    ftl_ppa_addr_pack(dev, punit->start_ppa));
367 			goto out;
368 		}
369 
370 		for (j = 0; j < ftl_dev_num_bands(dev); ++j) {
371 			band = &dev->bands[j];
372 			chunk = &band->chunk_buf[i];
373 			chunk->pos = i;
374 			chunk->state = ftl_get_chunk_state(&info[j]);
375 			chunk->punit = punit;
376 			chunk->start_ppa = punit->start_ppa;
377 			chunk->start_ppa.chk = band->id;
378 			chunk->write_offset = ftl_dev_lbks_in_chunk(dev);
379 
380 			if (chunk->state != FTL_CHUNK_STATE_BAD) {
381 				band->num_chunks++;
382 				CIRCLEQ_INSERT_TAIL(&band->chunks, chunk, circleq);
383 			}
384 		}
385 	}
386 
387 	for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
388 		band = &dev->bands[i];
389 		band->tail_md_ppa = ftl_band_tail_md_ppa(band);
390 	}
391 
392 	ftl_remove_empty_bands(dev);
393 out:
394 	free(info);
395 	return rc;
396 }
397 
398 static int
399 ftl_dev_init_punits(struct spdk_ftl_dev *dev)
400 {
401 	unsigned int i, punit;
402 
403 	dev->punits = calloc(ftl_dev_num_punits(dev), sizeof(*dev->punits));
404 	if (!dev->punits) {
405 		return -1;
406 	}
407 
408 	for (i = 0; i < ftl_dev_num_punits(dev); ++i) {
409 		dev->punits[i].dev = dev;
410 		punit = dev->range.begin + i;
411 
412 		dev->punits[i].start_ppa.ppa = 0;
413 		dev->punits[i].start_ppa.grp = punit % dev->geo.num_grp;
414 		dev->punits[i].start_ppa.pu = punit / dev->geo.num_grp;
415 	}
416 
417 	return 0;
418 }
419 
420 static int
421 ftl_dev_retrieve_geo(struct spdk_ftl_dev *dev)
422 {
423 	volatile struct ftl_admin_cmpl cmpl = {};
424 	uint32_t nsid = spdk_nvme_ns_get_id(dev->ns);
425 
426 	if (spdk_nvme_ocssd_ctrlr_cmd_geometry(dev->ctrlr, nsid, &dev->geo, sizeof(dev->geo),
427 					       ftl_admin_cb, (void *)&cmpl)) {
428 		SPDK_ERRLOG("Unable to retrieve geometry\n");
429 		return -1;
430 	}
431 
432 	/* TODO: add a timeout */
433 	while (!cmpl.complete) {
434 		spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr);
435 	}
436 
437 	if (spdk_nvme_cpl_is_error(&cmpl.status)) {
438 		SPDK_ERRLOG("Unexpected status code: [%d], status code type: [%d]\n",
439 			    cmpl.status.status.sc, cmpl.status.status.sct);
440 		return -1;
441 	}
442 
443 	/* TODO: add sanity checks for the geo */
444 	dev->ppa_len = dev->geo.lbaf.grp_len +
445 		       dev->geo.lbaf.pu_len +
446 		       dev->geo.lbaf.chk_len +
447 		       dev->geo.lbaf.lbk_len;
448 
449 	dev->ppaf.lbk_offset = 0;
450 	dev->ppaf.lbk_mask   = (1 << dev->geo.lbaf.lbk_len) - 1;
451 	dev->ppaf.chk_offset = dev->ppaf.lbk_offset + dev->geo.lbaf.lbk_len;
452 	dev->ppaf.chk_mask   = (1 << dev->geo.lbaf.chk_len) - 1;
453 	dev->ppaf.pu_offset  = dev->ppaf.chk_offset + dev->geo.lbaf.chk_len;
454 	dev->ppaf.pu_mask    = (1 << dev->geo.lbaf.pu_len) - 1;
455 	dev->ppaf.grp_offset = dev->ppaf.pu_offset + dev->geo.lbaf.pu_len;
456 	dev->ppaf.grp_mask   = (1 << dev->geo.lbaf.grp_len) - 1;
457 
458 	/* We're using optimal write size as our xfer size */
459 	dev->xfer_size = dev->geo.ws_opt;
460 
461 	return 0;
462 }
463 
464 static int
465 ftl_dev_nvme_init(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts)
466 {
467 	uint32_t block_size;
468 
469 	dev->ctrlr = opts->ctrlr;
470 
471 	if (spdk_nvme_ctrlr_get_num_ns(dev->ctrlr) != 1) {
472 		SPDK_ERRLOG("Unsupported number of namespaces\n");
473 		return -1;
474 	}
475 
476 	dev->ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, FTL_NSID);
477 	if (dev->ns == NULL) {
478 		SPDK_ERRLOG("Invalid NS (%"PRIu32")\n", FTL_NSID);
479 		return -1;
480 	}
481 	dev->trid = opts->trid;
482 	dev->md_size = spdk_nvme_ns_get_md_size(dev->ns);
483 
484 	block_size = spdk_nvme_ns_get_extended_sector_size(dev->ns);
485 	if (block_size != FTL_BLOCK_SIZE) {
486 		SPDK_ERRLOG("Unsupported block size (%"PRIu32")\n", block_size);
487 		return -1;
488 	}
489 
490 	if (dev->md_size % sizeof(uint32_t) != 0) {
491 		/* Metadata pointer must be dword aligned */
492 		SPDK_ERRLOG("Unsupported metadata size (%zu)\n", dev->md_size);
493 		return -1;
494 	}
495 
496 	return 0;
497 }
498 
499 static int
500 ftl_dev_init_nv_cache(struct spdk_ftl_dev *dev, struct spdk_bdev_desc *bdev_desc)
501 {
502 	struct spdk_bdev *bdev;
503 	struct spdk_ftl_conf *conf = &dev->conf;
504 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
505 	char pool_name[128];
506 	int rc;
507 
508 	if (!bdev_desc) {
509 		return 0;
510 	}
511 
512 	bdev = spdk_bdev_desc_get_bdev(bdev_desc);
513 	SPDK_INFOLOG(SPDK_LOG_FTL_INIT, "Using %s as write buffer cache\n",
514 		     spdk_bdev_get_name(bdev));
515 
516 	if (spdk_bdev_get_block_size(bdev) != FTL_BLOCK_SIZE) {
517 		SPDK_ERRLOG("Unsupported block size (%d)\n", spdk_bdev_get_block_size(bdev));
518 		return -1;
519 	}
520 
521 	if (!spdk_bdev_is_md_separate(bdev)) {
522 		SPDK_ERRLOG("Bdev %s doesn't support separate metadata buffer IO\n",
523 			    spdk_bdev_get_name(bdev));
524 		return -1;
525 	}
526 
527 	if (spdk_bdev_get_md_size(bdev) < sizeof(uint64_t)) {
528 		SPDK_ERRLOG("Bdev's %s metadata is too small (%"PRIu32")\n",
529 			    spdk_bdev_get_name(bdev), spdk_bdev_get_md_size(bdev));
530 		return -1;
531 	}
532 
533 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
534 		SPDK_ERRLOG("Unsupported DIF type used by bdev %s\n",
535 			    spdk_bdev_get_name(bdev));
536 		return -1;
537 	}
538 
539 	/* The cache needs to be capable of storing at least two full bands. This requirement comes
540 	 * from the fact that cache works as a protection against power loss, so before the data
541 	 * inside the cache can be overwritten, the band it's stored on has to be closed. Plus one
542 	 * extra block is needed to store the header.
543 	 */
544 	if (spdk_bdev_get_num_blocks(bdev) < ftl_num_band_lbks(dev) * 2 + 1) {
545 		SPDK_ERRLOG("Insufficient number of blocks for write buffer cache (available: %"
546 			    PRIu64", required: %"PRIu64")\n", spdk_bdev_get_num_blocks(bdev),
547 			    ftl_num_band_lbks(dev) * 2 + 1);
548 		return -1;
549 	}
550 
551 	rc = snprintf(pool_name, sizeof(pool_name), "ftl-nvpool-%p", dev);
552 	if (rc < 0 || rc >= 128) {
553 		return -1;
554 	}
555 
556 	nv_cache->md_pool = spdk_mempool_create(pool_name, conf->nv_cache.max_request_cnt,
557 						spdk_bdev_get_md_size(bdev) *
558 						conf->nv_cache.max_request_size,
559 						SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
560 						SPDK_ENV_SOCKET_ID_ANY);
561 	if (!nv_cache->md_pool) {
562 		SPDK_ERRLOG("Failed to initialize non-volatile cache metadata pool\n");
563 		return -1;
564 	}
565 
566 	nv_cache->dma_buf = spdk_dma_zmalloc(FTL_BLOCK_SIZE, spdk_bdev_get_buf_align(bdev), NULL);
567 	if (!nv_cache->dma_buf) {
568 		SPDK_ERRLOG("Memory allocation failure\n");
569 		return -1;
570 	}
571 
572 	if (pthread_spin_init(&nv_cache->lock, PTHREAD_PROCESS_PRIVATE)) {
573 		SPDK_ERRLOG("Failed to initialize cache lock\n");
574 		return -1;
575 	}
576 
577 	nv_cache->bdev_desc = bdev_desc;
578 	nv_cache->current_addr = FTL_NV_CACHE_DATA_OFFSET;
579 	nv_cache->num_data_blocks = spdk_bdev_get_num_blocks(bdev) - 1;
580 	nv_cache->num_available = nv_cache->num_data_blocks;
581 	nv_cache->ready = false;
582 
583 	return 0;
584 }
585 
586 void
587 spdk_ftl_conf_init_defaults(struct spdk_ftl_conf *conf)
588 {
589 	*conf = g_default_conf;
590 }
591 
592 static void
593 ftl_lba_map_request_ctor(struct spdk_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
594 {
595 	struct ftl_lba_map_request *request = obj;
596 	struct spdk_ftl_dev *dev = opaque;
597 
598 	request->segments = spdk_bit_array_create(spdk_divide_round_up(
599 				    ftl_num_band_lbks(dev), FTL_NUM_LBA_IN_BLOCK));
600 }
601 
602 static int
603 ftl_init_lba_map_pools(struct spdk_ftl_dev *dev)
604 {
605 #define POOL_NAME_LEN 128
606 	char pool_name[POOL_NAME_LEN];
607 	int rc;
608 
609 	rc = snprintf(pool_name, sizeof(pool_name), "%s-%s", dev->name, "ocssd-lba-pool");
610 	if (rc < 0 || rc >= POOL_NAME_LEN) {
611 		return -ENAMETOOLONG;
612 	}
613 
614 	/* We need to reserve at least 2 buffers for band close / open sequence
615 	 * alone, plus additional (8) buffers for handling write errors.
616 	 * TODO: This memory pool is utilized only by core thread - it introduce
617 	 * unnecessary overhead and should be replaced by different data structure.
618 	 */
619 	dev->lba_pool = spdk_mempool_create(pool_name, 2 + 8,
620 					    ftl_lba_map_pool_elem_size(dev),
621 					    SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
622 					    SPDK_ENV_SOCKET_ID_ANY);
623 	if (!dev->lba_pool) {
624 		return -ENOMEM;
625 	}
626 
627 	rc = snprintf(pool_name, sizeof(pool_name), "%s-%s", dev->name, "ocssd-lbareq-pool");
628 	if (rc < 0 || rc >= POOL_NAME_LEN) {
629 		return -ENAMETOOLONG;
630 	}
631 
632 	dev->lba_request_pool = spdk_mempool_create_ctor(pool_name,
633 				dev->conf.max_reloc_qdepth * dev->conf.max_active_relocs,
634 				sizeof(struct ftl_lba_map_request),
635 				SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
636 				SPDK_ENV_SOCKET_ID_ANY,
637 				ftl_lba_map_request_ctor,
638 				dev);
639 	if (!dev->lba_request_pool) {
640 		return -ENOMEM;
641 	}
642 
643 	return 0;
644 }
645 
646 static void
647 ftl_init_wptr_list(struct spdk_ftl_dev *dev)
648 {
649 	LIST_INIT(&dev->wptr_list);
650 	LIST_INIT(&dev->flush_list);
651 	LIST_INIT(&dev->band_flush_list);
652 }
653 
654 static size_t
655 ftl_dev_band_max_seq(struct spdk_ftl_dev *dev)
656 {
657 	struct ftl_band *band;
658 	size_t seq = 0;
659 
660 	LIST_FOREACH(band, &dev->shut_bands, list_entry) {
661 		if (band->seq > seq) {
662 			seq = band->seq;
663 		}
664 	}
665 
666 	return seq;
667 }
668 
669 static void
670 _ftl_init_bands_state(void *ctx)
671 {
672 	struct ftl_band *band, *temp_band;
673 	struct spdk_ftl_dev *dev = ctx;
674 
675 	dev->seq = ftl_dev_band_max_seq(dev);
676 
677 	LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) {
678 		if (!band->lba_map.num_vld) {
679 			ftl_band_set_state(band, FTL_BAND_STATE_FREE);
680 		}
681 	}
682 
683 	ftl_reloc_resume(dev->reloc);
684 	/* Clear the limit applications as they're incremented incorrectly by */
685 	/* the initialization code */
686 	memset(dev->stats.limits, 0, sizeof(dev->stats.limits));
687 }
688 
689 static int
690 ftl_init_num_free_bands(struct spdk_ftl_dev *dev)
691 {
692 	struct ftl_band *band;
693 	int cnt = 0;
694 
695 	LIST_FOREACH(band, &dev->shut_bands, list_entry) {
696 		if (band->num_chunks && !band->lba_map.num_vld) {
697 			cnt++;
698 		}
699 	}
700 	return cnt;
701 }
702 
703 static int
704 ftl_init_bands_state(struct spdk_ftl_dev *dev)
705 {
706 	/* TODO: Should we abort initialization or expose read only device */
707 	/* if there is no free bands? */
708 	/* If we abort initialization should we depend on condition that */
709 	/* we have no free bands or should we have some minimal number of */
710 	/* free bands? */
711 	if (!ftl_init_num_free_bands(dev)) {
712 		return -1;
713 	}
714 
715 	spdk_thread_send_msg(ftl_get_core_thread(dev), _ftl_init_bands_state, dev);
716 	return 0;
717 }
718 
719 static void
720 _ftl_dev_init_thread(void *ctx)
721 {
722 	struct ftl_thread *thread = ctx;
723 	struct spdk_ftl_dev *dev = thread->dev;
724 
725 	thread->poller = spdk_poller_register(thread->poller_fn, thread, thread->period_us);
726 	if (!thread->poller) {
727 		SPDK_ERRLOG("Unable to register poller\n");
728 		assert(0);
729 	}
730 
731 	if (spdk_get_thread() == ftl_get_core_thread(dev)) {
732 		ftl_anm_register_device(dev, ftl_process_anm_event);
733 	}
734 
735 	thread->ioch = spdk_get_io_channel(dev);
736 }
737 
738 static int
739 ftl_dev_init_thread(struct spdk_ftl_dev *dev, struct ftl_thread *thread,
740 		    struct spdk_thread *spdk_thread, spdk_poller_fn fn, uint64_t period_us)
741 {
742 	thread->dev = dev;
743 	thread->poller_fn = fn;
744 	thread->thread = spdk_thread;
745 	thread->period_us = period_us;
746 
747 	thread->qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, NULL, 0);
748 	if (!thread->qpair) {
749 		SPDK_ERRLOG("Unable to initialize qpair\n");
750 		return -1;
751 	}
752 
753 	spdk_thread_send_msg(spdk_thread, _ftl_dev_init_thread, thread);
754 	return 0;
755 }
756 
757 static int
758 ftl_dev_init_threads(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts)
759 {
760 	if (!opts->core_thread || !opts->read_thread) {
761 		return -1;
762 	}
763 
764 	if (ftl_dev_init_thread(dev, &dev->core_thread, opts->core_thread, ftl_task_core, 0)) {
765 		SPDK_ERRLOG("Unable to initialize core thread\n");
766 		return -1;
767 	}
768 
769 	if (ftl_dev_init_thread(dev, &dev->read_thread, opts->read_thread, ftl_task_read, 0)) {
770 		SPDK_ERRLOG("Unable to initialize read thread\n");
771 		return -1;
772 	}
773 
774 	return 0;
775 }
776 
777 static void
778 ftl_dev_free_thread(struct spdk_ftl_dev *dev, struct ftl_thread *thread)
779 {
780 	assert(thread->poller == NULL);
781 
782 	spdk_put_io_channel(thread->ioch);
783 	spdk_nvme_ctrlr_free_io_qpair(thread->qpair);
784 	thread->thread = NULL;
785 	thread->ioch = NULL;
786 	thread->qpair = NULL;
787 }
788 
789 static int
790 ftl_dev_l2p_alloc(struct spdk_ftl_dev *dev)
791 {
792 	size_t addr_size;
793 	uint64_t i;
794 
795 	if (dev->num_lbas == 0) {
796 		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Invalid l2p table size\n");
797 		return -1;
798 	}
799 
800 	if (dev->l2p) {
801 		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "L2p table already allocated\n");
802 		return -1;
803 	}
804 
805 	addr_size = dev->ppa_len >= 32 ? 8 : 4;
806 	dev->l2p = malloc(dev->num_lbas * addr_size);
807 	if (!dev->l2p) {
808 		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Failed to allocate l2p table\n");
809 		return -1;
810 	}
811 
812 	for (i = 0; i < dev->num_lbas; ++i) {
813 		ftl_l2p_set(dev, i, ftl_to_ppa(FTL_PPA_INVALID));
814 	}
815 
816 	return 0;
817 }
818 
819 static void
820 ftl_call_init_complete_cb(void *_ctx)
821 {
822 	struct ftl_init_context *ctx = _ctx;
823 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(ctx, struct spdk_ftl_dev, init_ctx);
824 
825 	if (ctx->cb_fn != NULL) {
826 		ctx->cb_fn(dev, ctx->cb_arg, 0);
827 	}
828 }
829 
830 static void
831 ftl_init_complete(struct spdk_ftl_dev *dev)
832 {
833 	pthread_mutex_lock(&g_ftl_queue_lock);
834 	STAILQ_INSERT_HEAD(&g_ftl_queue, dev, stailq);
835 	pthread_mutex_unlock(&g_ftl_queue_lock);
836 
837 	dev->initialized = 1;
838 
839 	spdk_thread_send_msg(dev->init_ctx.thread, ftl_call_init_complete_cb, &dev->init_ctx);
840 }
841 
842 static void
843 ftl_init_fail_cb(struct spdk_ftl_dev *dev, void *_ctx, int status)
844 {
845 	struct ftl_init_context *ctx = _ctx;
846 
847 	if (ctx->cb_fn != NULL) {
848 		ctx->cb_fn(NULL, ctx->cb_arg, -ENODEV);
849 	}
850 
851 	free(ctx);
852 }
853 
854 static int _spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg,
855 			      struct spdk_thread *thread);
856 
857 static void
858 ftl_init_fail(struct spdk_ftl_dev *dev)
859 {
860 	struct ftl_init_context *ctx;
861 
862 	ctx = malloc(sizeof(*ctx));
863 	if (!ctx) {
864 		SPDK_ERRLOG("Unable to allocate context to free the device\n");
865 		return;
866 	}
867 
868 	*ctx = dev->init_ctx;
869 	if (_spdk_ftl_dev_free(dev, ftl_init_fail_cb, ctx, ctx->thread)) {
870 		SPDK_ERRLOG("Unable to free the device\n");
871 		assert(0);
872 	}
873 }
874 
875 static void
876 ftl_write_nv_cache_md_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
877 {
878 	struct spdk_ftl_dev *dev = cb_arg;
879 
880 	spdk_bdev_free_io(bdev_io);
881 	if (spdk_unlikely(!success)) {
882 		SPDK_ERRLOG("Writing non-volatile cache's metadata header failed\n");
883 		ftl_init_fail(dev);
884 		return;
885 	}
886 
887 	dev->nv_cache.ready = true;
888 	ftl_init_complete(dev);
889 }
890 
891 static void
892 ftl_clear_nv_cache_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
893 {
894 	struct spdk_ftl_dev *dev = cb_arg;
895 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
896 
897 	spdk_bdev_free_io(bdev_io);
898 	if (spdk_unlikely(!success)) {
899 		SPDK_ERRLOG("Unable to clear the non-volatile cache bdev\n");
900 		ftl_init_fail(dev);
901 		return;
902 	}
903 
904 	nv_cache->phase = 1;
905 	if (ftl_nv_cache_write_header(nv_cache, false, ftl_write_nv_cache_md_cb, dev)) {
906 		SPDK_ERRLOG("Unable to write non-volatile cache metadata header\n");
907 		ftl_init_fail(dev);
908 	}
909 }
910 
911 static void
912 _ftl_nv_cache_scrub(void *ctx)
913 {
914 	struct spdk_ftl_dev *dev = ctx;
915 	int rc;
916 
917 	rc = ftl_nv_cache_scrub(&dev->nv_cache, ftl_clear_nv_cache_cb, dev);
918 
919 	if (spdk_unlikely(rc != 0)) {
920 		SPDK_ERRLOG("Unable to clear the non-volatile cache bdev: %s\n",
921 			    spdk_strerror(-rc));
922 		ftl_init_fail(dev);
923 	}
924 }
925 
926 static int
927 ftl_setup_initial_state(struct spdk_ftl_dev *dev)
928 {
929 	struct spdk_ftl_conf *conf = &dev->conf;
930 	size_t i;
931 
932 	spdk_uuid_generate(&dev->uuid);
933 
934 	dev->num_lbas = 0;
935 	for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
936 		dev->num_lbas += ftl_band_num_usable_lbks(&dev->bands[i]);
937 	}
938 
939 	dev->num_lbas = (dev->num_lbas * (100 - conf->lba_rsvd)) / 100;
940 
941 	if (ftl_dev_l2p_alloc(dev)) {
942 		SPDK_ERRLOG("Unable to init l2p table\n");
943 		return -1;
944 	}
945 
946 	if (ftl_init_bands_state(dev)) {
947 		SPDK_ERRLOG("Unable to finish the initialization\n");
948 		return -1;
949 	}
950 
951 	if (!ftl_dev_has_nv_cache(dev)) {
952 		ftl_init_complete(dev);
953 	} else {
954 		spdk_thread_send_msg(ftl_get_core_thread(dev), _ftl_nv_cache_scrub, dev);
955 	}
956 
957 	return 0;
958 }
959 
960 static void
961 ftl_restore_nv_cache_cb(struct spdk_ftl_dev *dev, struct ftl_restore *restore, int status)
962 {
963 	if (spdk_unlikely(status != 0)) {
964 		SPDK_ERRLOG("Failed to restore the non-volatile cache state\n");
965 		ftl_init_fail(dev);
966 		return;
967 	}
968 
969 	ftl_init_complete(dev);
970 }
971 
972 static void
973 ftl_restore_device_cb(struct spdk_ftl_dev *dev, struct ftl_restore *restore, int status)
974 {
975 	if (status) {
976 		SPDK_ERRLOG("Failed to restore the device from the SSD\n");
977 		ftl_init_fail(dev);
978 		return;
979 	}
980 
981 	if (ftl_init_bands_state(dev)) {
982 		SPDK_ERRLOG("Unable to finish the initialization\n");
983 		ftl_init_fail(dev);
984 		return;
985 	}
986 
987 	if (!ftl_dev_has_nv_cache(dev)) {
988 		ftl_init_complete(dev);
989 		return;
990 	}
991 
992 	ftl_restore_nv_cache(restore, ftl_restore_nv_cache_cb);
993 }
994 
995 static void
996 ftl_restore_md_cb(struct spdk_ftl_dev *dev, struct ftl_restore *restore, int status)
997 {
998 	if (status) {
999 		SPDK_ERRLOG("Failed to restore the metadata from the SSD\n");
1000 		goto error;
1001 	}
1002 
1003 	/* After the metadata is read it should be possible to allocate the L2P */
1004 	if (ftl_dev_l2p_alloc(dev)) {
1005 		SPDK_ERRLOG("Failed to allocate the L2P\n");
1006 		goto error;
1007 	}
1008 
1009 	if (ftl_restore_device(restore, ftl_restore_device_cb)) {
1010 		SPDK_ERRLOG("Failed to start device restoration from the SSD\n");
1011 		goto error;
1012 	}
1013 
1014 	return;
1015 error:
1016 	ftl_init_fail(dev);
1017 }
1018 
1019 static int
1020 ftl_restore_state(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts)
1021 {
1022 	dev->uuid = opts->uuid;
1023 
1024 	if (ftl_restore_md(dev, ftl_restore_md_cb)) {
1025 		SPDK_ERRLOG("Failed to start metadata restoration from the SSD\n");
1026 		return -1;
1027 	}
1028 
1029 	return 0;
1030 }
1031 
1032 static int
1033 ftl_io_channel_create_cb(void *io_device, void *ctx)
1034 {
1035 	struct spdk_ftl_dev *dev = io_device;
1036 	struct ftl_io_channel *ioch = ctx;
1037 	char mempool_name[32];
1038 
1039 	snprintf(mempool_name, sizeof(mempool_name), "ftl_io_%p", ioch);
1040 	ioch->cache_ioch = NULL;
1041 	ioch->dev = dev;
1042 	ioch->elem_size = sizeof(struct ftl_md_io);
1043 	ioch->io_pool = spdk_mempool_create(mempool_name,
1044 					    dev->conf.user_io_pool_size,
1045 					    ioch->elem_size,
1046 					    0,
1047 					    SPDK_ENV_SOCKET_ID_ANY);
1048 	if (!ioch->io_pool) {
1049 		SPDK_ERRLOG("Failed to create IO channel's IO pool\n");
1050 		return -1;
1051 	}
1052 
1053 	if (ftl_dev_has_nv_cache(dev)) {
1054 		ioch->cache_ioch = spdk_bdev_get_io_channel(dev->nv_cache.bdev_desc);
1055 		if (!ioch->cache_ioch) {
1056 			SPDK_ERRLOG("Failed to create cache IO channel\n");
1057 			spdk_mempool_free(ioch->io_pool);
1058 			return -1;
1059 		}
1060 	}
1061 
1062 	return 0;
1063 }
1064 
1065 static void
1066 ftl_io_channel_destroy_cb(void *io_device, void *ctx)
1067 {
1068 	struct ftl_io_channel *ioch = ctx;
1069 
1070 	spdk_mempool_free(ioch->io_pool);
1071 
1072 	if (ioch->cache_ioch) {
1073 		spdk_put_io_channel(ioch->cache_ioch);
1074 	}
1075 }
1076 
1077 static int
1078 ftl_dev_init_io_channel(struct spdk_ftl_dev *dev)
1079 {
1080 	spdk_io_device_register(dev, ftl_io_channel_create_cb, ftl_io_channel_destroy_cb,
1081 				sizeof(struct ftl_io_channel),
1082 				NULL);
1083 
1084 	return 0;
1085 }
1086 
1087 int
1088 spdk_ftl_dev_init(const struct spdk_ftl_dev_init_opts *_opts, spdk_ftl_init_fn cb_fn, void *cb_arg)
1089 {
1090 	struct spdk_ftl_dev *dev;
1091 	struct spdk_ftl_dev_init_opts opts = *_opts;
1092 
1093 	dev = calloc(1, sizeof(*dev));
1094 	if (!dev) {
1095 		return -ENOMEM;
1096 	}
1097 
1098 	if (!opts.conf) {
1099 		opts.conf = &g_default_conf;
1100 	}
1101 
1102 	TAILQ_INIT(&dev->retry_queue);
1103 	dev->conf = *opts.conf;
1104 	dev->init_ctx.cb_fn = cb_fn;
1105 	dev->init_ctx.cb_arg = cb_arg;
1106 	dev->init_ctx.thread = spdk_get_thread();
1107 	dev->range = opts.range;
1108 	dev->limit = SPDK_FTL_LIMIT_MAX;
1109 
1110 	dev->name = strdup(opts.name);
1111 	if (!dev->name) {
1112 		SPDK_ERRLOG("Unable to set device name\n");
1113 		goto fail_sync;
1114 	}
1115 
1116 	if (ftl_dev_nvme_init(dev, &opts)) {
1117 		SPDK_ERRLOG("Unable to initialize NVMe structures\n");
1118 		goto fail_sync;
1119 	}
1120 
1121 	/* In case of errors, we free all of the memory in ftl_dev_free_sync(), */
1122 	/* so we don't have to clean up in each of the init functions. */
1123 	if (ftl_dev_retrieve_geo(dev)) {
1124 		SPDK_ERRLOG("Unable to retrieve geometry\n");
1125 		goto fail_sync;
1126 	}
1127 
1128 	if (ftl_check_init_opts(&opts, &dev->geo)) {
1129 		SPDK_ERRLOG("Invalid device configuration\n");
1130 		goto fail_sync;
1131 	}
1132 
1133 	if (ftl_dev_init_punits(dev)) {
1134 		SPDK_ERRLOG("Unable to initialize LUNs\n");
1135 		goto fail_sync;
1136 	}
1137 
1138 	if (ftl_init_lba_map_pools(dev)) {
1139 		SPDK_ERRLOG("Unable to init LBA map pools\n");
1140 		goto fail_sync;
1141 	}
1142 
1143 	ftl_init_wptr_list(dev);
1144 
1145 	if (ftl_dev_init_bands(dev)) {
1146 		SPDK_ERRLOG("Unable to initialize band array\n");
1147 		goto fail_sync;
1148 	}
1149 
1150 	if (ftl_dev_init_nv_cache(dev, opts.cache_bdev_desc)) {
1151 		SPDK_ERRLOG("Unable to initialize persistent cache\n");
1152 		goto fail_sync;
1153 	}
1154 
1155 	dev->rwb = ftl_rwb_init(&dev->conf, dev->geo.ws_opt, dev->md_size, ftl_dev_num_punits(dev));
1156 	if (!dev->rwb) {
1157 		SPDK_ERRLOG("Unable to initialize rwb structures\n");
1158 		goto fail_sync;
1159 	}
1160 
1161 	dev->reloc = ftl_reloc_init(dev);
1162 	if (!dev->reloc) {
1163 		SPDK_ERRLOG("Unable to initialize reloc structures\n");
1164 		goto fail_sync;
1165 	}
1166 
1167 	if (ftl_dev_init_io_channel(dev)) {
1168 		SPDK_ERRLOG("Unable to initialize IO channels\n");
1169 		goto fail_sync;
1170 	}
1171 
1172 	if (ftl_dev_init_threads(dev, &opts)) {
1173 		SPDK_ERRLOG("Unable to initialize device threads\n");
1174 		goto fail_sync;
1175 	}
1176 
1177 	if (opts.mode & SPDK_FTL_MODE_CREATE) {
1178 		if (ftl_setup_initial_state(dev)) {
1179 			SPDK_ERRLOG("Failed to setup initial state of the device\n");
1180 			goto fail_async;
1181 		}
1182 	} else {
1183 		if (ftl_restore_state(dev, &opts)) {
1184 			SPDK_ERRLOG("Unable to restore device's state from the SSD\n");
1185 			goto fail_async;
1186 		}
1187 	}
1188 
1189 	return 0;
1190 fail_sync:
1191 	ftl_dev_free_sync(dev);
1192 	return -ENOMEM;
1193 fail_async:
1194 	ftl_init_fail(dev);
1195 	return 0;
1196 }
1197 
1198 static void
1199 _ftl_halt_defrag(void *arg)
1200 {
1201 	ftl_reloc_halt(((struct spdk_ftl_dev *)arg)->reloc);
1202 }
1203 
1204 static void
1205 ftl_lba_map_request_dtor(struct spdk_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
1206 {
1207 	struct ftl_lba_map_request *request = obj;
1208 
1209 	spdk_bit_array_free(&request->segments);
1210 }
1211 
1212 static void
1213 ftl_dev_free_sync(struct spdk_ftl_dev *dev)
1214 {
1215 	struct spdk_ftl_dev *iter;
1216 	size_t i;
1217 
1218 	if (!dev) {
1219 		return;
1220 	}
1221 
1222 	pthread_mutex_lock(&g_ftl_queue_lock);
1223 	STAILQ_FOREACH(iter, &g_ftl_queue, stailq) {
1224 		if (iter == dev) {
1225 			STAILQ_REMOVE(&g_ftl_queue, dev, spdk_ftl_dev, stailq);
1226 			break;
1227 		}
1228 	}
1229 	pthread_mutex_unlock(&g_ftl_queue_lock);
1230 
1231 	assert(LIST_EMPTY(&dev->wptr_list));
1232 	assert(ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_INTERNAL) == 0);
1233 	assert(ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_USER) == 0);
1234 
1235 	ftl_dev_dump_bands(dev);
1236 	ftl_dev_dump_stats(dev);
1237 
1238 	spdk_io_device_unregister(dev, NULL);
1239 
1240 	if (dev->core_thread.thread) {
1241 		ftl_dev_free_thread(dev, &dev->core_thread);
1242 	}
1243 	if (dev->read_thread.thread) {
1244 		ftl_dev_free_thread(dev, &dev->read_thread);
1245 	}
1246 
1247 	if (dev->bands) {
1248 		for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
1249 			free(dev->bands[i].chunk_buf);
1250 			spdk_bit_array_free(&dev->bands[i].lba_map.vld);
1251 			spdk_bit_array_free(&dev->bands[i].reloc_bitmap);
1252 		}
1253 	}
1254 
1255 	spdk_dma_free(dev->nv_cache.dma_buf);
1256 
1257 	spdk_mempool_free(dev->lba_pool);
1258 	spdk_mempool_free(dev->nv_cache.md_pool);
1259 	if (dev->lba_request_pool) {
1260 		spdk_mempool_obj_iter(dev->lba_request_pool, ftl_lba_map_request_dtor, NULL);
1261 	}
1262 	spdk_mempool_free(dev->lba_request_pool);
1263 
1264 	ftl_rwb_free(dev->rwb);
1265 	ftl_reloc_free(dev->reloc);
1266 
1267 	free(dev->name);
1268 	free(dev->punits);
1269 	free(dev->bands);
1270 	free(dev->l2p);
1271 	free(dev);
1272 }
1273 
1274 static void
1275 ftl_call_fini_complete(struct spdk_ftl_dev *dev, int status)
1276 {
1277 	struct ftl_init_context ctx = dev->fini_ctx;
1278 
1279 	ftl_dev_free_sync(dev);
1280 	if (ctx.cb_fn != NULL) {
1281 		ctx.cb_fn(NULL, ctx.cb_arg, status);
1282 	}
1283 }
1284 
1285 static void
1286 ftl_halt_complete_cb(void *ctx)
1287 {
1288 	struct spdk_ftl_dev *dev = ctx;
1289 
1290 	ftl_call_fini_complete(dev, dev->halt_complete_status);
1291 }
1292 
1293 static void
1294 ftl_nv_cache_header_fini_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1295 {
1296 	struct spdk_ftl_dev *dev = cb_arg;
1297 	int rc = 0;
1298 
1299 	spdk_bdev_free_io(bdev_io);
1300 	if (spdk_unlikely(!success)) {
1301 		SPDK_ERRLOG("Failed to write non-volatile cache metadata header\n");
1302 		rc = -EIO;
1303 	}
1304 
1305 	dev->halt_complete_status = rc;
1306 	spdk_thread_send_msg(dev->fini_ctx.thread, ftl_halt_complete_cb, dev);
1307 }
1308 
1309 static void
1310 _ftl_anm_unregister_cb(void *ctx)
1311 {
1312 	struct spdk_ftl_dev *dev = ctx;
1313 
1314 	if (ftl_dev_has_nv_cache(dev)) {
1315 		ftl_nv_cache_write_header(&dev->nv_cache, true, ftl_nv_cache_header_fini_cb, dev);
1316 	} else {
1317 		dev->halt_complete_status = 0;
1318 		spdk_thread_send_msg(dev->fini_ctx.thread, ftl_halt_complete_cb, dev);
1319 	}
1320 }
1321 
1322 static void
1323 ftl_anm_unregister_cb(void *ctx, int status)
1324 {
1325 	struct spdk_ftl_dev *dev = ctx;
1326 
1327 	spdk_thread_send_msg(ftl_get_core_thread(dev), _ftl_anm_unregister_cb, dev);
1328 }
1329 
1330 static int
1331 ftl_halt_poller(void *ctx)
1332 {
1333 	struct spdk_ftl_dev *dev = ctx;
1334 	int rc;
1335 
1336 	if (!dev->core_thread.poller && !dev->read_thread.poller) {
1337 		rc = ftl_anm_unregister_device(dev, ftl_anm_unregister_cb);
1338 		if (spdk_unlikely(rc != 0)) {
1339 			SPDK_ERRLOG("Failed to unregister ANM device, will retry later\n");
1340 		} else {
1341 			spdk_poller_unregister(&dev->fini_ctx.poller);
1342 		}
1343 	}
1344 
1345 	return 0;
1346 }
1347 
1348 static void
1349 ftl_add_halt_poller(void *ctx)
1350 {
1351 	struct spdk_ftl_dev *dev = ctx;
1352 	dev->halt = 1;
1353 
1354 	_ftl_halt_defrag(dev);
1355 
1356 	assert(!dev->fini_ctx.poller);
1357 	dev->fini_ctx.poller = spdk_poller_register(ftl_halt_poller, dev, 100);
1358 }
1359 
1360 static int
1361 _spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg,
1362 		   struct spdk_thread *thread)
1363 {
1364 	if (dev->fini_ctx.cb_fn != NULL) {
1365 		return -EBUSY;
1366 	}
1367 
1368 	dev->fini_ctx.cb_fn = cb_fn;
1369 	dev->fini_ctx.cb_arg = cb_arg;
1370 	dev->fini_ctx.thread = thread;
1371 
1372 	ftl_rwb_disable_interleaving(dev->rwb);
1373 
1374 	spdk_thread_send_msg(ftl_get_core_thread(dev), ftl_add_halt_poller, dev);
1375 	return 0;
1376 }
1377 
1378 int
1379 spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg)
1380 {
1381 	return _spdk_ftl_dev_free(dev, cb_fn, cb_arg, spdk_get_thread());
1382 }
1383 
1384 int
1385 spdk_ftl_module_init(const struct ftl_module_init_opts *opts, spdk_ftl_fn cb, void *cb_arg)
1386 {
1387 	return ftl_anm_init(opts->anm_thread, cb, cb_arg);
1388 }
1389 
1390 int
1391 spdk_ftl_module_fini(spdk_ftl_fn cb, void *cb_arg)
1392 {
1393 	return ftl_anm_free(cb, cb_arg);
1394 }
1395 
1396 SPDK_LOG_REGISTER_COMPONENT("ftl_init", SPDK_LOG_FTL_INIT)
1397