xref: /spdk/lib/ftl/ftl_init.c (revision 6ced60152638431a89c8418b44d95f5b7449fa3b)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 #include "spdk/nvme.h"
36 #include "spdk/io_channel.h"
37 #include "spdk/bdev_module.h"
38 #include "spdk/string.h"
39 #include "spdk/likely.h"
40 #include "spdk_internal/log.h"
41 #include "spdk/ftl.h"
42 #include "spdk/likely.h"
43 #include "spdk/string.h"
44 
45 #include "ftl_core.h"
46 #include "ftl_anm.h"
47 #include "ftl_io.h"
48 #include "ftl_reloc.h"
49 #include "ftl_rwb.h"
50 #include "ftl_band.h"
51 #include "ftl_debug.h"
52 
53 #define FTL_CORE_RING_SIZE	4096
54 #define FTL_INIT_TIMEOUT	30
55 #define FTL_NSID		1
56 
57 #define ftl_range_intersect(s1, e1, s2, e2) \
58 	((s1) <= (e2) && (s2) <= (e1))
59 
60 struct ftl_admin_cmpl {
61 	struct spdk_nvme_cpl			status;
62 
63 	int					complete;
64 };
65 
66 static STAILQ_HEAD(, spdk_ftl_dev)	g_ftl_queue = STAILQ_HEAD_INITIALIZER(g_ftl_queue);
67 static pthread_mutex_t			g_ftl_queue_lock = PTHREAD_MUTEX_INITIALIZER;
68 static const struct spdk_ftl_conf	g_default_conf = {
69 	.defrag = {
70 		.limits = {
71 			/* 5 free bands  / 0 % host writes */
72 			[SPDK_FTL_LIMIT_CRIT]  = { .thld = 5,  .limit = 0 },
73 			/* 10 free bands / 5 % host writes */
74 			[SPDK_FTL_LIMIT_HIGH]  = { .thld = 10, .limit = 5 },
75 			/* 20 free bands / 40 % host writes */
76 			[SPDK_FTL_LIMIT_LOW]   = { .thld = 20, .limit = 40 },
77 			/* 40 free bands / 100 % host writes - defrag starts running */
78 			[SPDK_FTL_LIMIT_START] = { .thld = 40, .limit = 100 },
79 		},
80 		/* 10 percent valid lbks */
81 		.invalid_thld = 10,
82 	},
83 	/* 20% spare lbks */
84 	.lba_rsvd = 20,
85 	/* 6M write buffer */
86 	.rwb_size = 6 * 1024 * 1024,
87 	/* 90% band fill threshold */
88 	.band_thld = 90,
89 	/* Max 32 IO depth per band relocate */
90 	.max_reloc_qdepth = 32,
91 	/* Max 3 active band relocates */
92 	.max_active_relocs = 3,
93 	/* IO pool size per user thread (this should be adjusted to thread IO qdepth) */
94 	.user_io_pool_size = 2048,
95 	/* Number of interleaving units per ws_opt */
96 	/* 1 for default and 3 for 3D TLC NAND */
97 	.num_interleave_units = 1,
98 	/*
99 	 * If clear ftl will return error when restoring after a dirty shutdown
100 	 * If set, last band will be padded, ftl will restore based only on closed bands - this
101 	 * will result in lost data after recovery.
102 	 */
103 	.allow_open_bands = false,
104 	.nv_cache = {
105 		/* Maximum number of concurrent requests */
106 		.max_request_cnt = 2048,
107 		/* Maximum number of blocks per request */
108 		.max_request_size = 16,
109 	}
110 };
111 
112 static void ftl_dev_free_sync(struct spdk_ftl_dev *dev);
113 
114 static void
115 ftl_admin_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
116 {
117 	struct ftl_admin_cmpl *cmpl = ctx;
118 
119 	cmpl->complete = 1;
120 	cmpl->status = *cpl;
121 }
122 
123 static int
124 ftl_band_init_md(struct ftl_band *band)
125 {
126 	struct ftl_lba_map *lba_map = &band->lba_map;
127 
128 	lba_map->vld = spdk_bit_array_create(ftl_num_band_lbks(band->dev));
129 	if (!lba_map->vld) {
130 		return -ENOMEM;
131 	}
132 
133 	pthread_spin_init(&lba_map->lock, PTHREAD_PROCESS_PRIVATE);
134 	ftl_band_md_clear(band);
135 	return 0;
136 }
137 
138 static int
139 ftl_check_conf(const struct spdk_ftl_conf *conf,
140 	       const struct spdk_ocssd_geometry_data *geo)
141 {
142 	size_t i;
143 
144 	if (conf->defrag.invalid_thld >= 100) {
145 		return -1;
146 	}
147 	if (conf->lba_rsvd >= 100) {
148 		return -1;
149 	}
150 	if (conf->lba_rsvd == 0) {
151 		return -1;
152 	}
153 	if (conf->rwb_size == 0) {
154 		return -1;
155 	}
156 	if (conf->rwb_size % FTL_BLOCK_SIZE != 0) {
157 		return -1;
158 	}
159 	if (geo->ws_opt % conf->num_interleave_units != 0) {
160 		return -1;
161 	}
162 
163 	for (i = 0; i < SPDK_FTL_LIMIT_MAX; ++i) {
164 		if (conf->defrag.limits[i].limit > 100) {
165 			return -1;
166 		}
167 	}
168 
169 	return 0;
170 }
171 
172 static int
173 ftl_check_init_opts(const struct spdk_ftl_dev_init_opts *opts,
174 		    const struct spdk_ocssd_geometry_data *geo)
175 {
176 	struct spdk_ftl_dev *dev;
177 	size_t num_punits = geo->num_pu * geo->num_grp;
178 	int rc = 0;
179 
180 	if (opts->range.begin > opts->range.end || opts->range.end >= num_punits) {
181 		return -1;
182 	}
183 
184 	if (ftl_check_conf(opts->conf, geo)) {
185 		return -1;
186 	}
187 
188 	pthread_mutex_lock(&g_ftl_queue_lock);
189 
190 	STAILQ_FOREACH(dev, &g_ftl_queue, stailq) {
191 		if (spdk_nvme_transport_id_compare(&dev->trid, &opts->trid)) {
192 			continue;
193 		}
194 
195 		if (ftl_range_intersect(opts->range.begin, opts->range.end,
196 					dev->range.begin, dev->range.end)) {
197 			rc = -1;
198 			goto out;
199 		}
200 	}
201 
202 out:
203 	pthread_mutex_unlock(&g_ftl_queue_lock);
204 	return rc;
205 }
206 
207 int
208 ftl_retrieve_chunk_info(struct spdk_ftl_dev *dev, struct ftl_ppa ppa,
209 			struct spdk_ocssd_chunk_information_entry *info,
210 			unsigned int num_entries)
211 {
212 	volatile struct ftl_admin_cmpl cmpl = {};
213 	uint32_t nsid = spdk_nvme_ns_get_id(dev->ns);
214 	uint64_t offset = (ppa.grp * dev->geo.num_pu + ppa.pu) *
215 			  dev->geo.num_chk + ppa.chk;
216 	int rc;
217 
218 	rc = spdk_nvme_ctrlr_cmd_get_log_page(dev->ctrlr, SPDK_OCSSD_LOG_CHUNK_INFO, nsid,
219 					      info, num_entries * sizeof(*info),
220 					      offset * sizeof(*info),
221 					      ftl_admin_cb, (void *)&cmpl);
222 	if (spdk_unlikely(rc != 0)) {
223 		SPDK_ERRLOG("spdk_nvme_ctrlr_cmd_get_log_page: %s\n", spdk_strerror(-rc));
224 		return -1;
225 	}
226 
227 	while (!cmpl.complete) {
228 		spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr);
229 	}
230 
231 	if (spdk_nvme_cpl_is_error(&cmpl.status)) {
232 		SPDK_ERRLOG("Unexpected status code: [%d], status code type: [%d]\n",
233 			    cmpl.status.status.sc, cmpl.status.status.sct);
234 		return -1;
235 	}
236 
237 	return 0;
238 }
239 
240 static int
241 ftl_retrieve_punit_chunk_info(struct spdk_ftl_dev *dev, const struct ftl_punit *punit,
242 			      struct spdk_ocssd_chunk_information_entry *info)
243 {
244 	uint32_t i = 0;
245 	unsigned int num_entries = FTL_BLOCK_SIZE / sizeof(*info);
246 	struct ftl_ppa chunk_ppa = punit->start_ppa;
247 	char ppa_buf[128];
248 
249 	for (i = 0; i < dev->geo.num_chk; i += num_entries, chunk_ppa.chk += num_entries) {
250 		if (num_entries > dev->geo.num_chk - i) {
251 			num_entries = dev->geo.num_chk - i;
252 		}
253 
254 		if (ftl_retrieve_chunk_info(dev, chunk_ppa, &info[i], num_entries)) {
255 			SPDK_ERRLOG("Failed to retrieve chunk information @ppa: %s\n",
256 				    ftl_ppa2str(chunk_ppa, ppa_buf, sizeof(ppa_buf)));
257 			return -1;
258 		}
259 	}
260 
261 	return 0;
262 }
263 
264 static unsigned char
265 ftl_get_chunk_state(const struct spdk_ocssd_chunk_information_entry *info)
266 {
267 	if (info->cs.free) {
268 		return FTL_CHUNK_STATE_FREE;
269 	}
270 
271 	if (info->cs.open) {
272 		return FTL_CHUNK_STATE_OPEN;
273 	}
274 
275 	if (info->cs.closed) {
276 		return FTL_CHUNK_STATE_CLOSED;
277 	}
278 
279 	if (info->cs.offline) {
280 		return FTL_CHUNK_STATE_BAD;
281 	}
282 
283 	assert(0 && "Invalid block state");
284 	return FTL_CHUNK_STATE_BAD;
285 }
286 
287 static void
288 ftl_remove_empty_bands(struct spdk_ftl_dev *dev)
289 {
290 	struct ftl_band *band, *temp_band;
291 
292 	/* Remove band from shut_bands list to prevent further processing */
293 	/* if all blocks on this band are bad */
294 	LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) {
295 		if (!band->num_chunks) {
296 			dev->num_bands--;
297 			LIST_REMOVE(band, list_entry);
298 		}
299 	}
300 }
301 
302 static int
303 ftl_dev_init_bands(struct spdk_ftl_dev *dev)
304 {
305 	struct spdk_ocssd_chunk_information_entry	*info;
306 	struct ftl_band					*band, *pband;
307 	struct ftl_punit				*punit;
308 	struct ftl_chunk				*chunk;
309 	unsigned int					i, j;
310 	char						buf[128];
311 	int						rc = 0;
312 
313 	LIST_INIT(&dev->free_bands);
314 	LIST_INIT(&dev->shut_bands);
315 
316 	dev->num_free = 0;
317 	dev->num_bands = ftl_dev_num_bands(dev);
318 	dev->bands = calloc(ftl_dev_num_bands(dev), sizeof(*dev->bands));
319 	if (!dev->bands) {
320 		return -1;
321 	}
322 
323 	info = calloc(dev->geo.num_chk, sizeof(*info));
324 	if (!info) {
325 		return -1;
326 	}
327 
328 	for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
329 		band = &dev->bands[i];
330 		band->id = i;
331 		band->dev = dev;
332 		band->state = FTL_BAND_STATE_CLOSED;
333 
334 		if (LIST_EMPTY(&dev->shut_bands)) {
335 			LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
336 		} else {
337 			LIST_INSERT_AFTER(pband, band, list_entry);
338 		}
339 		pband = band;
340 
341 		CIRCLEQ_INIT(&band->chunks);
342 		band->chunk_buf = calloc(ftl_dev_num_punits(dev), sizeof(*band->chunk_buf));
343 		if (!band->chunk_buf) {
344 			SPDK_ERRLOG("Failed to allocate block state table for band: [%u]\n", i);
345 			rc = -1;
346 			goto out;
347 		}
348 
349 		rc = ftl_band_init_md(band);
350 		if (rc) {
351 			SPDK_ERRLOG("Failed to initialize metadata structures for band [%u]\n", i);
352 			goto out;
353 		}
354 
355 		band->reloc_bitmap = spdk_bit_array_create(ftl_dev_num_bands(dev));
356 		if (!band->reloc_bitmap) {
357 			SPDK_ERRLOG("Failed to allocate band relocation bitmap\n");
358 			goto out;
359 		}
360 	}
361 
362 	for (i = 0; i < ftl_dev_num_punits(dev); ++i) {
363 		punit = &dev->punits[i];
364 
365 		rc = ftl_retrieve_punit_chunk_info(dev, punit, info);
366 		if (rc) {
367 			SPDK_ERRLOG("Failed to retrieve bbt for @ppa: %s [%lu]\n",
368 				    ftl_ppa2str(punit->start_ppa, buf, sizeof(buf)),
369 				    ftl_ppa_addr_pack(dev, punit->start_ppa));
370 			goto out;
371 		}
372 
373 		for (j = 0; j < ftl_dev_num_bands(dev); ++j) {
374 			band = &dev->bands[j];
375 			chunk = &band->chunk_buf[i];
376 			chunk->pos = i;
377 			chunk->state = ftl_get_chunk_state(&info[j]);
378 			chunk->punit = punit;
379 			chunk->start_ppa = punit->start_ppa;
380 			chunk->start_ppa.chk = band->id;
381 			chunk->write_offset = ftl_dev_lbks_in_chunk(dev);
382 
383 			if (chunk->state != FTL_CHUNK_STATE_BAD) {
384 				band->num_chunks++;
385 				CIRCLEQ_INSERT_TAIL(&band->chunks, chunk, circleq);
386 			}
387 		}
388 	}
389 
390 	for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
391 		band = &dev->bands[i];
392 		band->tail_md_ppa = ftl_band_tail_md_ppa(band);
393 	}
394 
395 	ftl_remove_empty_bands(dev);
396 out:
397 	free(info);
398 	return rc;
399 }
400 
401 static int
402 ftl_dev_init_punits(struct spdk_ftl_dev *dev)
403 {
404 	unsigned int i, punit;
405 
406 	dev->punits = calloc(ftl_dev_num_punits(dev), sizeof(*dev->punits));
407 	if (!dev->punits) {
408 		return -1;
409 	}
410 
411 	for (i = 0; i < ftl_dev_num_punits(dev); ++i) {
412 		dev->punits[i].dev = dev;
413 		punit = dev->range.begin + i;
414 
415 		dev->punits[i].start_ppa.ppa = 0;
416 		dev->punits[i].start_ppa.grp = punit % dev->geo.num_grp;
417 		dev->punits[i].start_ppa.pu = punit / dev->geo.num_grp;
418 	}
419 
420 	return 0;
421 }
422 
423 static int
424 ftl_dev_retrieve_geo(struct spdk_ftl_dev *dev)
425 {
426 	volatile struct ftl_admin_cmpl cmpl = {};
427 	uint32_t nsid = spdk_nvme_ns_get_id(dev->ns);
428 
429 	if (spdk_nvme_ocssd_ctrlr_cmd_geometry(dev->ctrlr, nsid, &dev->geo, sizeof(dev->geo),
430 					       ftl_admin_cb, (void *)&cmpl)) {
431 		SPDK_ERRLOG("Unable to retrieve geometry\n");
432 		return -1;
433 	}
434 
435 	/* TODO: add a timeout */
436 	while (!cmpl.complete) {
437 		spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr);
438 	}
439 
440 	if (spdk_nvme_cpl_is_error(&cmpl.status)) {
441 		SPDK_ERRLOG("Unexpected status code: [%d], status code type: [%d]\n",
442 			    cmpl.status.status.sc, cmpl.status.status.sct);
443 		return -1;
444 	}
445 
446 	/* TODO: add sanity checks for the geo */
447 	dev->ppa_len = dev->geo.lbaf.grp_len +
448 		       dev->geo.lbaf.pu_len +
449 		       dev->geo.lbaf.chk_len +
450 		       dev->geo.lbaf.lbk_len;
451 
452 	dev->ppaf.lbk_offset = 0;
453 	dev->ppaf.lbk_mask   = (1 << dev->geo.lbaf.lbk_len) - 1;
454 	dev->ppaf.chk_offset = dev->ppaf.lbk_offset + dev->geo.lbaf.lbk_len;
455 	dev->ppaf.chk_mask   = (1 << dev->geo.lbaf.chk_len) - 1;
456 	dev->ppaf.pu_offset  = dev->ppaf.chk_offset + dev->geo.lbaf.chk_len;
457 	dev->ppaf.pu_mask    = (1 << dev->geo.lbaf.pu_len) - 1;
458 	dev->ppaf.grp_offset = dev->ppaf.pu_offset + dev->geo.lbaf.pu_len;
459 	dev->ppaf.grp_mask   = (1 << dev->geo.lbaf.grp_len) - 1;
460 
461 	/* We're using optimal write size as our xfer size */
462 	dev->xfer_size = dev->geo.ws_opt;
463 
464 	return 0;
465 }
466 
467 static int
468 ftl_dev_nvme_init(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts)
469 {
470 	uint32_t block_size;
471 
472 	dev->ctrlr = opts->ctrlr;
473 
474 	if (spdk_nvme_ctrlr_get_num_ns(dev->ctrlr) != 1) {
475 		SPDK_ERRLOG("Unsupported number of namespaces\n");
476 		return -1;
477 	}
478 
479 	dev->ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, FTL_NSID);
480 	dev->trid = opts->trid;
481 	dev->md_size = spdk_nvme_ns_get_md_size(dev->ns);
482 
483 	block_size = spdk_nvme_ns_get_extended_sector_size(dev->ns);
484 	if (block_size != FTL_BLOCK_SIZE) {
485 		SPDK_ERRLOG("Unsupported block size (%"PRIu32")\n", block_size);
486 		return -1;
487 	}
488 
489 	if (dev->md_size % sizeof(uint32_t) != 0) {
490 		/* Metadata pointer must be dword aligned */
491 		SPDK_ERRLOG("Unsupported metadata size (%zu)\n", dev->md_size);
492 		return -1;
493 	}
494 
495 	return 0;
496 }
497 
498 static int
499 ftl_dev_init_nv_cache(struct spdk_ftl_dev *dev, struct spdk_bdev_desc *bdev_desc)
500 {
501 	struct spdk_bdev *bdev;
502 	struct spdk_ftl_conf *conf = &dev->conf;
503 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
504 	char pool_name[128];
505 	int rc;
506 
507 	if (!bdev_desc) {
508 		return 0;
509 	}
510 
511 	bdev = spdk_bdev_desc_get_bdev(bdev_desc);
512 	SPDK_INFOLOG(SPDK_LOG_FTL_INIT, "Using %s as write buffer cache\n",
513 		     spdk_bdev_get_name(bdev));
514 
515 	if (spdk_bdev_get_block_size(bdev) != FTL_BLOCK_SIZE) {
516 		SPDK_ERRLOG("Unsupported block size (%d)\n", spdk_bdev_get_block_size(bdev));
517 		return -1;
518 	}
519 
520 	if (!spdk_bdev_is_md_separate(bdev)) {
521 		SPDK_ERRLOG("Bdev %s doesn't support separate metadata buffer IO\n",
522 			    spdk_bdev_get_name(bdev));
523 		return -1;
524 	}
525 
526 	if (spdk_bdev_get_md_size(bdev) < sizeof(uint64_t)) {
527 		SPDK_ERRLOG("Bdev's %s metadata is too small (%"PRIu32")\n",
528 			    spdk_bdev_get_name(bdev), spdk_bdev_get_md_size(bdev));
529 		return -1;
530 	}
531 
532 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
533 		SPDK_ERRLOG("Unsupported DIF type used by bdev %s\n",
534 			    spdk_bdev_get_name(bdev));
535 		return -1;
536 	}
537 
538 	/* The cache needs to be capable of storing at least two full bands. This requirement comes
539 	 * from the fact that cache works as a protection against power loss, so before the data
540 	 * inside the cache can be overwritten, the band it's stored on has to be closed. Plus one
541 	 * extra block is needed to store the header.
542 	 */
543 	if (spdk_bdev_get_num_blocks(bdev) < ftl_num_band_lbks(dev) * 2 + 1) {
544 		SPDK_ERRLOG("Insufficient number of blocks for write buffer cache (available: %"
545 			    PRIu64", required: %"PRIu64")\n", spdk_bdev_get_num_blocks(bdev),
546 			    ftl_num_band_lbks(dev) * 2 + 1);
547 		return -1;
548 	}
549 
550 	rc = snprintf(pool_name, sizeof(pool_name), "ftl-nvpool-%p", dev);
551 	if (rc < 0 || rc >= 128) {
552 		return -1;
553 	}
554 
555 	nv_cache->md_pool = spdk_mempool_create(pool_name, conf->nv_cache.max_request_cnt,
556 						spdk_bdev_get_md_size(bdev) *
557 						conf->nv_cache.max_request_size,
558 						SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
559 						SPDK_ENV_SOCKET_ID_ANY);
560 	if (!nv_cache->md_pool) {
561 		SPDK_ERRLOG("Failed to initialize non-volatile cache metadata pool\n");
562 		return -1;
563 	}
564 
565 	nv_cache->dma_buf = spdk_dma_zmalloc(FTL_BLOCK_SIZE, spdk_bdev_get_buf_align(bdev), NULL);
566 	if (!nv_cache->dma_buf) {
567 		SPDK_ERRLOG("Memory allocation failure\n");
568 		return -1;
569 	}
570 
571 	if (pthread_spin_init(&nv_cache->lock, PTHREAD_PROCESS_PRIVATE)) {
572 		SPDK_ERRLOG("Failed to initialize cache lock\n");
573 		return -1;
574 	}
575 
576 	nv_cache->bdev_desc = bdev_desc;
577 	nv_cache->current_addr = FTL_NV_CACHE_DATA_OFFSET;
578 	nv_cache->num_data_blocks = spdk_bdev_get_num_blocks(bdev) - 1;
579 	nv_cache->num_available = nv_cache->num_data_blocks;
580 	nv_cache->ready = false;
581 
582 	return 0;
583 }
584 
585 void
586 spdk_ftl_conf_init_defaults(struct spdk_ftl_conf *conf)
587 {
588 	*conf = g_default_conf;
589 }
590 
591 static void
592 ftl_lba_map_request_ctor(struct spdk_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
593 {
594 	struct ftl_lba_map_request *request = obj;
595 	struct spdk_ftl_dev *dev = opaque;
596 
597 	request->segments = spdk_bit_array_create(spdk_divide_round_up(
598 				    ftl_num_band_lbks(dev), FTL_NUM_LBA_IN_BLOCK));
599 }
600 
601 static int
602 ftl_init_lba_map_pools(struct spdk_ftl_dev *dev)
603 {
604 #define POOL_NAME_LEN 128
605 	char pool_name[POOL_NAME_LEN];
606 	int rc;
607 
608 	rc = snprintf(pool_name, sizeof(pool_name), "%s-%s", dev->name, "ocssd-lba-pool");
609 	if (rc < 0 || rc >= POOL_NAME_LEN) {
610 		return -ENAMETOOLONG;
611 	}
612 
613 	/* We need to reserve at least 2 buffers for band close / open sequence
614 	 * alone, plus additional (8) buffers for handling write errors.
615 	 * TODO: This memory pool is utilized only by core thread - it introduce
616 	 * unnecessary overhead and should be replaced by different data structure.
617 	 */
618 	dev->lba_pool = spdk_mempool_create(pool_name, 2 + 8,
619 					    ftl_lba_map_pool_elem_size(dev),
620 					    SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
621 					    SPDK_ENV_SOCKET_ID_ANY);
622 	if (!dev->lba_pool) {
623 		return -ENOMEM;
624 	}
625 
626 	rc = snprintf(pool_name, sizeof(pool_name), "%s-%s", dev->name, "ocssd-lbareq-pool");
627 	if (rc < 0 || rc >= POOL_NAME_LEN) {
628 		return -ENAMETOOLONG;
629 	}
630 
631 	dev->lba_request_pool = spdk_mempool_create_ctor(pool_name,
632 				dev->conf.max_reloc_qdepth * dev->conf.max_active_relocs,
633 				sizeof(struct ftl_lba_map_request),
634 				SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
635 				SPDK_ENV_SOCKET_ID_ANY,
636 				ftl_lba_map_request_ctor,
637 				dev);
638 	if (!dev->lba_request_pool) {
639 		return -ENOMEM;
640 	}
641 
642 	return 0;
643 }
644 
645 static void
646 ftl_init_wptr_list(struct spdk_ftl_dev *dev)
647 {
648 	LIST_INIT(&dev->wptr_list);
649 	LIST_INIT(&dev->flush_list);
650 	LIST_INIT(&dev->band_flush_list);
651 }
652 
653 static size_t
654 ftl_dev_band_max_seq(struct spdk_ftl_dev *dev)
655 {
656 	struct ftl_band *band;
657 	size_t seq = 0;
658 
659 	LIST_FOREACH(band, &dev->shut_bands, list_entry) {
660 		if (band->seq > seq) {
661 			seq = band->seq;
662 		}
663 	}
664 
665 	return seq;
666 }
667 
668 static void
669 _ftl_init_bands_state(void *ctx)
670 {
671 	struct ftl_band *band, *temp_band;
672 	struct spdk_ftl_dev *dev = ctx;
673 
674 	dev->seq = ftl_dev_band_max_seq(dev);
675 
676 	LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) {
677 		if (!band->lba_map.num_vld) {
678 			ftl_band_set_state(band, FTL_BAND_STATE_FREE);
679 		}
680 	}
681 
682 	ftl_reloc_resume(dev->reloc);
683 	/* Clear the limit applications as they're incremented incorrectly by */
684 	/* the initialization code */
685 	memset(dev->stats.limits, 0, sizeof(dev->stats.limits));
686 }
687 
688 static int
689 ftl_init_num_free_bands(struct spdk_ftl_dev *dev)
690 {
691 	struct ftl_band *band;
692 	int cnt = 0;
693 
694 	LIST_FOREACH(band, &dev->shut_bands, list_entry) {
695 		if (band->num_chunks && !band->lba_map.num_vld) {
696 			cnt++;
697 		}
698 	}
699 	return cnt;
700 }
701 
702 static int
703 ftl_init_bands_state(struct spdk_ftl_dev *dev)
704 {
705 	/* TODO: Should we abort initialization or expose read only device */
706 	/* if there is no free bands? */
707 	/* If we abort initialization should we depend on condition that */
708 	/* we have no free bands or should we have some minimal number of */
709 	/* free bands? */
710 	if (!ftl_init_num_free_bands(dev)) {
711 		return -1;
712 	}
713 
714 	spdk_thread_send_msg(ftl_get_core_thread(dev), _ftl_init_bands_state, dev);
715 	return 0;
716 }
717 
718 static void
719 _ftl_dev_init_thread(void *ctx)
720 {
721 	struct ftl_thread *thread = ctx;
722 	struct spdk_ftl_dev *dev = thread->dev;
723 
724 	thread->poller = spdk_poller_register(thread->poller_fn, thread, thread->period_us);
725 	if (!thread->poller) {
726 		SPDK_ERRLOG("Unable to register poller\n");
727 		assert(0);
728 	}
729 
730 	if (spdk_get_thread() == ftl_get_core_thread(dev)) {
731 		ftl_anm_register_device(dev, ftl_process_anm_event);
732 	}
733 }
734 
735 static int
736 ftl_dev_init_thread(struct spdk_ftl_dev *dev, struct ftl_thread *thread,
737 		    struct spdk_thread *spdk_thread, spdk_poller_fn fn, uint64_t period_us)
738 {
739 	thread->dev = dev;
740 	thread->poller_fn = fn;
741 	thread->thread = spdk_thread;
742 	thread->period_us = period_us;
743 
744 	thread->qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, NULL, 0);
745 	if (!thread->qpair) {
746 		SPDK_ERRLOG("Unable to initialize qpair\n");
747 		return -1;
748 	}
749 
750 	spdk_thread_send_msg(spdk_thread, _ftl_dev_init_thread, thread);
751 	return 0;
752 }
753 
754 static int
755 ftl_dev_init_threads(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts)
756 {
757 	if (!opts->core_thread || !opts->read_thread) {
758 		return -1;
759 	}
760 
761 	if (ftl_dev_init_thread(dev, &dev->core_thread, opts->core_thread, ftl_task_core, 0)) {
762 		SPDK_ERRLOG("Unable to initialize core thread\n");
763 		return -1;
764 	}
765 
766 	if (ftl_dev_init_thread(dev, &dev->read_thread, opts->read_thread, ftl_task_read, 0)) {
767 		SPDK_ERRLOG("Unable to initialize read thread\n");
768 		return -1;
769 	}
770 
771 	return 0;
772 }
773 
774 static void
775 ftl_dev_free_thread(struct spdk_ftl_dev *dev, struct ftl_thread *thread)
776 {
777 	assert(thread->poller == NULL);
778 
779 	spdk_nvme_ctrlr_free_io_qpair(thread->qpair);
780 	thread->thread = NULL;
781 	thread->qpair = NULL;
782 }
783 
784 static int
785 ftl_dev_l2p_alloc(struct spdk_ftl_dev *dev)
786 {
787 	size_t addr_size;
788 	uint64_t i;
789 
790 	if (dev->num_lbas == 0) {
791 		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Invalid l2p table size\n");
792 		return -1;
793 	}
794 
795 	if (dev->l2p) {
796 		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "L2p table already allocated\n");
797 		return -1;
798 	}
799 
800 	addr_size = dev->ppa_len >= 32 ? 8 : 4;
801 	dev->l2p = malloc(dev->num_lbas * addr_size);
802 	if (!dev->l2p) {
803 		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Failed to allocate l2p table\n");
804 		return -1;
805 	}
806 
807 	for (i = 0; i < dev->num_lbas; ++i) {
808 		ftl_l2p_set(dev, i, ftl_to_ppa(FTL_PPA_INVALID));
809 	}
810 
811 	return 0;
812 }
813 
814 static void
815 ftl_call_init_complete_cb(void *_ctx)
816 {
817 	struct ftl_init_context *ctx = _ctx;
818 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(ctx, struct spdk_ftl_dev, init_ctx);
819 
820 	if (ctx->cb_fn != NULL) {
821 		ctx->cb_fn(dev, ctx->cb_arg, 0);
822 	}
823 }
824 
825 static void
826 ftl_init_complete(struct spdk_ftl_dev *dev)
827 {
828 	pthread_mutex_lock(&g_ftl_queue_lock);
829 	STAILQ_INSERT_HEAD(&g_ftl_queue, dev, stailq);
830 	pthread_mutex_unlock(&g_ftl_queue_lock);
831 
832 	dev->initialized = 1;
833 
834 	spdk_thread_send_msg(dev->init_ctx.thread, ftl_call_init_complete_cb, &dev->init_ctx);
835 }
836 
837 static void
838 ftl_init_fail_cb(struct spdk_ftl_dev *dev, void *_ctx, int status)
839 {
840 	struct ftl_init_context *ctx = _ctx;
841 
842 	if (ctx->cb_fn != NULL) {
843 		ctx->cb_fn(NULL, ctx->cb_arg, -ENODEV);
844 	}
845 
846 	free(ctx);
847 }
848 
849 static int _spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg,
850 			      struct spdk_thread *thread);
851 
852 static void
853 ftl_init_fail(struct spdk_ftl_dev *dev)
854 {
855 	struct ftl_init_context *ctx;
856 
857 	ctx = malloc(sizeof(*ctx));
858 	if (!ctx) {
859 		SPDK_ERRLOG("Unable to allocate context to free the device\n");
860 		return;
861 	}
862 
863 	*ctx = dev->init_ctx;
864 	if (_spdk_ftl_dev_free(dev, ftl_init_fail_cb, ctx, ctx->thread)) {
865 		SPDK_ERRLOG("Unable to free the device\n");
866 		assert(0);
867 	}
868 }
869 
870 static void
871 ftl_write_nv_cache_md_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
872 {
873 	struct spdk_ftl_dev *dev = cb_arg;
874 
875 	spdk_bdev_free_io(bdev_io);
876 	if (spdk_unlikely(!success)) {
877 		SPDK_ERRLOG("Writing non-volatile cache's metadata header failed\n");
878 		ftl_init_fail(dev);
879 		return;
880 	}
881 
882 	dev->nv_cache.ready = true;
883 	ftl_init_complete(dev);
884 }
885 
886 static void
887 ftl_clear_nv_cache_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
888 {
889 	struct spdk_ftl_dev *dev = cb_arg;
890 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
891 
892 	spdk_bdev_free_io(bdev_io);
893 	if (spdk_unlikely(!success)) {
894 		SPDK_ERRLOG("Unable to clear the non-volatile cache bdev\n");
895 		ftl_init_fail(dev);
896 		return;
897 	}
898 
899 	nv_cache->phase = 1;
900 	if (ftl_nv_cache_write_header(nv_cache, ftl_write_nv_cache_md_cb, dev)) {
901 		SPDK_ERRLOG("Unable to write non-volatile cache metadata header\n");
902 		ftl_init_fail(dev);
903 	}
904 }
905 
906 static int
907 ftl_setup_initial_state(struct spdk_ftl_dev *dev)
908 {
909 	struct spdk_ftl_conf *conf = &dev->conf;
910 	size_t i;
911 	int rc;
912 
913 	spdk_uuid_generate(&dev->uuid);
914 
915 	dev->num_lbas = 0;
916 	for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
917 		dev->num_lbas += ftl_band_num_usable_lbks(&dev->bands[i]);
918 	}
919 
920 	dev->num_lbas = (dev->num_lbas * (100 - conf->lba_rsvd)) / 100;
921 
922 	if (ftl_dev_l2p_alloc(dev)) {
923 		SPDK_ERRLOG("Unable to init l2p table\n");
924 		return -1;
925 	}
926 
927 	if (ftl_init_bands_state(dev)) {
928 		SPDK_ERRLOG("Unable to finish the initialization\n");
929 		return -1;
930 	}
931 
932 	if (!ftl_dev_has_nv_cache(dev)) {
933 		ftl_init_complete(dev);
934 	} else {
935 		rc = ftl_nv_cache_scrub(&dev->nv_cache, ftl_clear_nv_cache_cb, dev);
936 		if (spdk_unlikely(rc != 0)) {
937 			SPDK_ERRLOG("Unable to clear the non-volatile cache bdev: %s\n",
938 				    spdk_strerror(-rc));
939 			return -1;
940 		}
941 	}
942 
943 	return 0;
944 }
945 
946 static void
947 ftl_restore_nv_cache_cb(struct spdk_ftl_dev *dev, struct ftl_restore *restore, int status)
948 {
949 	if (spdk_unlikely(status != 0)) {
950 		SPDK_ERRLOG("Failed to restore the non-volatile cache state\n");
951 		ftl_init_fail(dev);
952 		return;
953 	}
954 
955 	ftl_init_complete(dev);
956 }
957 
958 static void
959 ftl_restore_device_cb(struct spdk_ftl_dev *dev, struct ftl_restore *restore, int status)
960 {
961 	if (status) {
962 		SPDK_ERRLOG("Failed to restore the device from the SSD\n");
963 		ftl_init_fail(dev);
964 		return;
965 	}
966 
967 	if (ftl_init_bands_state(dev)) {
968 		SPDK_ERRLOG("Unable to finish the initialization\n");
969 		ftl_init_fail(dev);
970 		return;
971 	}
972 
973 	if (!ftl_dev_has_nv_cache(dev)) {
974 		ftl_init_complete(dev);
975 		return;
976 	}
977 
978 	ftl_restore_nv_cache(restore, ftl_restore_nv_cache_cb);
979 }
980 
981 static void
982 ftl_restore_md_cb(struct spdk_ftl_dev *dev, struct ftl_restore *restore, int status)
983 {
984 	if (status) {
985 		SPDK_ERRLOG("Failed to restore the metadata from the SSD\n");
986 		goto error;
987 	}
988 
989 	/* After the metadata is read it should be possible to allocate the L2P */
990 	if (ftl_dev_l2p_alloc(dev)) {
991 		SPDK_ERRLOG("Failed to allocate the L2P\n");
992 		goto error;
993 	}
994 
995 	if (ftl_restore_device(restore, ftl_restore_device_cb)) {
996 		SPDK_ERRLOG("Failed to start device restoration from the SSD\n");
997 		goto error;
998 	}
999 
1000 	return;
1001 error:
1002 	ftl_init_fail(dev);
1003 }
1004 
1005 static int
1006 ftl_restore_state(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts)
1007 {
1008 	dev->uuid = opts->uuid;
1009 
1010 	if (ftl_restore_md(dev, ftl_restore_md_cb)) {
1011 		SPDK_ERRLOG("Failed to start metadata restoration from the SSD\n");
1012 		return -1;
1013 	}
1014 
1015 	return 0;
1016 }
1017 
1018 static int
1019 ftl_io_channel_create_cb(void *io_device, void *ctx)
1020 {
1021 	struct spdk_ftl_dev *dev = io_device;
1022 	struct ftl_io_channel *ioch = ctx;
1023 	char mempool_name[32];
1024 
1025 	snprintf(mempool_name, sizeof(mempool_name), "ftl_io_%p", ioch);
1026 	ioch->cache_ioch = NULL;
1027 	ioch->dev = dev;
1028 	ioch->elem_size = sizeof(struct ftl_md_io);
1029 	ioch->io_pool = spdk_mempool_create(mempool_name,
1030 					    dev->conf.user_io_pool_size,
1031 					    ioch->elem_size,
1032 					    0,
1033 					    SPDK_ENV_SOCKET_ID_ANY);
1034 	if (!ioch->io_pool) {
1035 		SPDK_ERRLOG("Failed to create IO channel's IO pool\n");
1036 		return -1;
1037 	}
1038 
1039 	if (ftl_dev_has_nv_cache(dev)) {
1040 		ioch->cache_ioch = spdk_bdev_get_io_channel(dev->nv_cache.bdev_desc);
1041 		if (!ioch->cache_ioch) {
1042 			SPDK_ERRLOG("Failed to create cache IO channel\n");
1043 			spdk_mempool_free(ioch->io_pool);
1044 			return -1;
1045 		}
1046 	}
1047 
1048 	return 0;
1049 }
1050 
1051 static void
1052 ftl_io_channel_destroy_cb(void *io_device, void *ctx)
1053 {
1054 	struct ftl_io_channel *ioch = ctx;
1055 
1056 	spdk_mempool_free(ioch->io_pool);
1057 
1058 	if (ioch->cache_ioch) {
1059 		spdk_put_io_channel(ioch->cache_ioch);
1060 	}
1061 }
1062 
1063 static int
1064 ftl_dev_init_io_channel(struct spdk_ftl_dev *dev)
1065 {
1066 	spdk_io_device_register(dev, ftl_io_channel_create_cb, ftl_io_channel_destroy_cb,
1067 				sizeof(struct ftl_io_channel),
1068 				NULL);
1069 
1070 	dev->ioch = spdk_get_io_channel(dev);
1071 	if (!dev->ioch) {
1072 		spdk_io_device_unregister(dev, NULL);
1073 		return -1;
1074 	}
1075 
1076 	return 0;
1077 }
1078 
1079 int
1080 spdk_ftl_dev_init(const struct spdk_ftl_dev_init_opts *_opts, spdk_ftl_init_fn cb_fn, void *cb_arg)
1081 {
1082 	struct spdk_ftl_dev *dev;
1083 	struct spdk_ftl_dev_init_opts opts = *_opts;
1084 
1085 	dev = calloc(1, sizeof(*dev));
1086 	if (!dev) {
1087 		return -ENOMEM;
1088 	}
1089 
1090 	if (!opts.conf) {
1091 		opts.conf = &g_default_conf;
1092 	}
1093 
1094 	TAILQ_INIT(&dev->retry_queue);
1095 	dev->conf = *opts.conf;
1096 	dev->init_ctx.cb_fn = cb_fn;
1097 	dev->init_ctx.cb_arg = cb_arg;
1098 	dev->init_ctx.thread = spdk_get_thread();
1099 	dev->range = opts.range;
1100 	dev->limit = SPDK_FTL_LIMIT_MAX;
1101 
1102 	dev->name = strdup(opts.name);
1103 	if (!dev->name) {
1104 		SPDK_ERRLOG("Unable to set device name\n");
1105 		goto fail_sync;
1106 	}
1107 
1108 	if (ftl_dev_nvme_init(dev, &opts)) {
1109 		SPDK_ERRLOG("Unable to initialize NVMe structures\n");
1110 		goto fail_sync;
1111 	}
1112 
1113 	/* In case of errors, we free all of the memory in ftl_dev_free_sync(), */
1114 	/* so we don't have to clean up in each of the init functions. */
1115 	if (ftl_dev_retrieve_geo(dev)) {
1116 		SPDK_ERRLOG("Unable to retrieve geometry\n");
1117 		goto fail_sync;
1118 	}
1119 
1120 	if (ftl_check_init_opts(&opts, &dev->geo)) {
1121 		SPDK_ERRLOG("Invalid device configuration\n");
1122 		goto fail_sync;
1123 	}
1124 
1125 	if (ftl_dev_init_punits(dev)) {
1126 		SPDK_ERRLOG("Unable to initialize LUNs\n");
1127 		goto fail_sync;
1128 	}
1129 
1130 	if (ftl_init_lba_map_pools(dev)) {
1131 		SPDK_ERRLOG("Unable to init LBA map pools\n");
1132 		goto fail_sync;
1133 	}
1134 
1135 	ftl_init_wptr_list(dev);
1136 
1137 	if (ftl_dev_init_bands(dev)) {
1138 		SPDK_ERRLOG("Unable to initialize band array\n");
1139 		goto fail_sync;
1140 	}
1141 
1142 	if (ftl_dev_init_nv_cache(dev, opts.cache_bdev_desc)) {
1143 		SPDK_ERRLOG("Unable to initialize persistent cache\n");
1144 		goto fail_sync;
1145 	}
1146 
1147 	dev->rwb = ftl_rwb_init(&dev->conf, dev->geo.ws_opt, dev->md_size, ftl_dev_num_punits(dev));
1148 	if (!dev->rwb) {
1149 		SPDK_ERRLOG("Unable to initialize rwb structures\n");
1150 		goto fail_sync;
1151 	}
1152 
1153 	dev->reloc = ftl_reloc_init(dev);
1154 	if (!dev->reloc) {
1155 		SPDK_ERRLOG("Unable to initialize reloc structures\n");
1156 		goto fail_sync;
1157 	}
1158 
1159 	if (ftl_dev_init_io_channel(dev)) {
1160 		SPDK_ERRLOG("Unable to initialize IO channels\n");
1161 		goto fail_sync;
1162 	}
1163 
1164 	if (ftl_dev_init_threads(dev, &opts)) {
1165 		SPDK_ERRLOG("Unable to initialize device threads\n");
1166 		goto fail_sync;
1167 	}
1168 
1169 	if (opts.mode & SPDK_FTL_MODE_CREATE) {
1170 		if (ftl_setup_initial_state(dev)) {
1171 			SPDK_ERRLOG("Failed to setup initial state of the device\n");
1172 			goto fail_async;
1173 		}
1174 	} else {
1175 		if (ftl_restore_state(dev, &opts)) {
1176 			SPDK_ERRLOG("Unable to restore device's state from the SSD\n");
1177 			goto fail_async;
1178 		}
1179 	}
1180 
1181 	return 0;
1182 fail_sync:
1183 	ftl_dev_free_sync(dev);
1184 	return -ENOMEM;
1185 fail_async:
1186 	ftl_init_fail(dev);
1187 	return 0;
1188 }
1189 
1190 static void
1191 _ftl_halt_defrag(void *arg)
1192 {
1193 	ftl_reloc_halt(((struct spdk_ftl_dev *)arg)->reloc);
1194 }
1195 
1196 static void
1197 ftl_lba_map_request_dtor(struct spdk_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
1198 {
1199 	struct ftl_lba_map_request *request = obj;
1200 
1201 	spdk_bit_array_free(&request->segments);
1202 }
1203 
1204 static void
1205 ftl_dev_free_sync(struct spdk_ftl_dev *dev)
1206 {
1207 	struct spdk_ftl_dev *iter;
1208 	size_t i;
1209 
1210 	if (!dev) {
1211 		return;
1212 	}
1213 
1214 	pthread_mutex_lock(&g_ftl_queue_lock);
1215 	STAILQ_FOREACH(iter, &g_ftl_queue, stailq) {
1216 		if (iter == dev) {
1217 			STAILQ_REMOVE(&g_ftl_queue, dev, spdk_ftl_dev, stailq);
1218 			break;
1219 		}
1220 	}
1221 	pthread_mutex_unlock(&g_ftl_queue_lock);
1222 
1223 	assert(LIST_EMPTY(&dev->wptr_list));
1224 
1225 	ftl_dev_dump_bands(dev);
1226 	ftl_dev_dump_stats(dev);
1227 
1228 	if (dev->ioch) {
1229 		spdk_put_io_channel(dev->ioch);
1230 		spdk_io_device_unregister(dev, NULL);
1231 	}
1232 
1233 	if (dev->bands) {
1234 		for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
1235 			free(dev->bands[i].chunk_buf);
1236 			spdk_bit_array_free(&dev->bands[i].lba_map.vld);
1237 			spdk_bit_array_free(&dev->bands[i].reloc_bitmap);
1238 		}
1239 	}
1240 
1241 	spdk_dma_free(dev->nv_cache.dma_buf);
1242 
1243 	spdk_mempool_free(dev->lba_pool);
1244 	spdk_mempool_free(dev->nv_cache.md_pool);
1245 	if (dev->lba_request_pool) {
1246 		spdk_mempool_obj_iter(dev->lba_request_pool, ftl_lba_map_request_dtor, NULL);
1247 	}
1248 	spdk_mempool_free(dev->lba_request_pool);
1249 
1250 	ftl_rwb_free(dev->rwb);
1251 	ftl_reloc_free(dev->reloc);
1252 
1253 	free(dev->name);
1254 	free(dev->punits);
1255 	free(dev->bands);
1256 	free(dev->l2p);
1257 	free(dev);
1258 }
1259 
1260 static void
1261 ftl_call_fini_complete_cb(void *_ctx)
1262 {
1263 	struct spdk_ftl_dev *dev = _ctx;
1264 	struct ftl_init_context ctx = dev->fini_ctx;
1265 
1266 	ftl_dev_free_sync(dev);
1267 
1268 	if (ctx.cb_fn != NULL) {
1269 		ctx.cb_fn(NULL, ctx.cb_arg, 0);
1270 	}
1271 }
1272 
1273 static int
1274 ftl_halt_poller(void *ctx)
1275 {
1276 	struct spdk_ftl_dev *dev = ctx;
1277 
1278 	if (!dev->core_thread.poller && !dev->read_thread.poller) {
1279 		spdk_poller_unregister(&dev->fini_ctx.poller);
1280 
1281 		ftl_dev_free_thread(dev, &dev->read_thread);
1282 		ftl_dev_free_thread(dev, &dev->core_thread);
1283 
1284 		ftl_anm_unregister_device(dev);
1285 
1286 		spdk_thread_send_msg(dev->fini_ctx.thread, ftl_call_fini_complete_cb, dev);
1287 	}
1288 
1289 	return 0;
1290 }
1291 
1292 static void
1293 ftl_add_halt_poller(void *ctx)
1294 {
1295 	struct spdk_ftl_dev *dev = ctx;
1296 
1297 	_ftl_halt_defrag(dev);
1298 
1299 	assert(!dev->fini_ctx.poller);
1300 	dev->fini_ctx.poller = spdk_poller_register(ftl_halt_poller, dev, 100);
1301 }
1302 
1303 static int
1304 _spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg,
1305 		   struct spdk_thread *thread)
1306 {
1307 	if (dev->fini_ctx.cb_fn != NULL) {
1308 		return -EBUSY;
1309 	}
1310 
1311 	dev->fini_ctx.cb_fn = cb_fn;
1312 	dev->fini_ctx.cb_arg = cb_arg;
1313 	dev->fini_ctx.thread = thread;
1314 	dev->halt = 1;
1315 
1316 	ftl_rwb_disable_interleaving(dev->rwb);
1317 
1318 	spdk_thread_send_msg(ftl_get_core_thread(dev), ftl_add_halt_poller, dev);
1319 	return 0;
1320 }
1321 
1322 int
1323 spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg)
1324 {
1325 	return _spdk_ftl_dev_free(dev, cb_fn, cb_arg, spdk_get_thread());
1326 }
1327 
1328 int
1329 spdk_ftl_module_init(const struct ftl_module_init_opts *opts, spdk_ftl_fn cb, void *cb_arg)
1330 {
1331 	return ftl_anm_init(opts->anm_thread, cb, cb_arg);
1332 }
1333 
1334 int
1335 spdk_ftl_module_fini(spdk_ftl_fn cb, void *cb_arg)
1336 {
1337 	return ftl_anm_free(cb, cb_arg);
1338 }
1339 
1340 SPDK_LOG_REGISTER_COMPONENT("ftl_init", SPDK_LOG_FTL_INIT)
1341