xref: /spdk/lib/ftl/ftl_init.c (revision ae7b5890ef728af40bd233a5011b924c482603bf)
1  /*-
2   *   BSD LICENSE
3   *
4   *   Copyright (c) Intel Corporation.
5   *   All rights reserved.
6   *
7   *   Redistribution and use in source and binary forms, with or without
8   *   modification, are permitted provided that the following conditions
9   *   are met:
10   *
11   *     * Redistributions of source code must retain the above copyright
12   *       notice, this list of conditions and the following disclaimer.
13   *     * Redistributions in binary form must reproduce the above copyright
14   *       notice, this list of conditions and the following disclaimer in
15   *       the documentation and/or other materials provided with the
16   *       distribution.
17   *     * Neither the name of Intel Corporation nor the names of its
18   *       contributors may be used to endorse or promote products derived
19   *       from this software without specific prior written permission.
20   *
21   *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22   *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23   *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24   *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25   *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26   *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27   *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28   *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29   *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30   *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31   *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32   */
33  
34  #include "spdk/stdinc.h"
35  #include "spdk/nvme.h"
36  #include "spdk/io_channel.h"
37  #include "spdk/bdev_module.h"
38  #include "spdk/string.h"
39  #include "spdk/likely.h"
40  #include "spdk_internal/log.h"
41  #include "spdk/ftl.h"
42  #include "spdk/likely.h"
43  #include "spdk/string.h"
44  
45  #include "ftl_core.h"
46  #include "ftl_anm.h"
47  #include "ftl_io.h"
48  #include "ftl_reloc.h"
49  #include "ftl_rwb.h"
50  #include "ftl_band.h"
51  #include "ftl_debug.h"
52  
53  #define FTL_CORE_RING_SIZE	4096
54  #define FTL_INIT_TIMEOUT	30
55  #define FTL_NSID		1
56  
57  #define ftl_range_intersect(s1, e1, s2, e2) \
58  	((s1) <= (e2) && (s2) <= (e1))
59  
60  struct ftl_admin_cmpl {
61  	struct spdk_nvme_cpl			status;
62  
63  	int					complete;
64  };
65  
66  static STAILQ_HEAD(, spdk_ftl_dev)	g_ftl_queue = STAILQ_HEAD_INITIALIZER(g_ftl_queue);
67  static pthread_mutex_t			g_ftl_queue_lock = PTHREAD_MUTEX_INITIALIZER;
68  static const struct spdk_ftl_conf	g_default_conf = {
69  	.limits = {
70  		/* 5 free bands  / 0 % host writes */
71  		[SPDK_FTL_LIMIT_CRIT]  = { .thld = 5,  .limit = 0 },
72  		/* 10 free bands / 5 % host writes */
73  		[SPDK_FTL_LIMIT_HIGH]  = { .thld = 10, .limit = 5 },
74  		/* 20 free bands / 40 % host writes */
75  		[SPDK_FTL_LIMIT_LOW]   = { .thld = 20, .limit = 40 },
76  		/* 40 free bands / 100 % host writes - defrag starts running */
77  		[SPDK_FTL_LIMIT_START] = { .thld = 40, .limit = 100 },
78  	},
79  	/* 10 percent valid lbks */
80  	.invalid_thld = 10,
81  	/* 20% spare lbks */
82  	.lba_rsvd = 20,
83  	/* 6M write buffer */
84  	.rwb_size = 6 * 1024 * 1024,
85  	/* 90% band fill threshold */
86  	.band_thld = 90,
87  	/* Max 32 IO depth per band relocate */
88  	.max_reloc_qdepth = 32,
89  	/* Max 3 active band relocates */
90  	.max_active_relocs = 3,
91  	/* IO pool size per user thread (this should be adjusted to thread IO qdepth) */
92  	.user_io_pool_size = 2048,
93  	/* Number of interleaving units per ws_opt */
94  	/* 1 for default and 3 for 3D TLC NAND */
95  	.num_interleave_units = 1,
96  	/*
97  	 * If clear ftl will return error when restoring after a dirty shutdown
98  	 * If set, last band will be padded, ftl will restore based only on closed bands - this
99  	 * will result in lost data after recovery.
100  	 */
101  	.allow_open_bands = false,
102  	.nv_cache = {
103  		/* Maximum number of concurrent requests */
104  		.max_request_cnt = 2048,
105  		/* Maximum number of blocks per request */
106  		.max_request_size = 16,
107  	}
108  };
109  
110  static void ftl_dev_free_sync(struct spdk_ftl_dev *dev);
111  
112  static void
113  ftl_admin_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
114  {
115  	struct ftl_admin_cmpl *cmpl = ctx;
116  
117  	cmpl->complete = 1;
118  	cmpl->status = *cpl;
119  }
120  
121  static int
122  ftl_band_init_md(struct ftl_band *band)
123  {
124  	struct ftl_lba_map *lba_map = &band->lba_map;
125  
126  	lba_map->vld = spdk_bit_array_create(ftl_num_band_lbks(band->dev));
127  	if (!lba_map->vld) {
128  		return -ENOMEM;
129  	}
130  
131  	pthread_spin_init(&lba_map->lock, PTHREAD_PROCESS_PRIVATE);
132  	ftl_band_md_clear(band);
133  	return 0;
134  }
135  
136  static int
137  ftl_check_conf(const struct spdk_ftl_conf *conf,
138  	       const struct spdk_ocssd_geometry_data *geo)
139  {
140  	size_t i;
141  
142  	if (conf->invalid_thld >= 100) {
143  		return -1;
144  	}
145  	if (conf->lba_rsvd >= 100) {
146  		return -1;
147  	}
148  	if (conf->lba_rsvd == 0) {
149  		return -1;
150  	}
151  	if (conf->rwb_size == 0) {
152  		return -1;
153  	}
154  	if (conf->rwb_size % FTL_BLOCK_SIZE != 0) {
155  		return -1;
156  	}
157  	if (geo->ws_opt % conf->num_interleave_units != 0) {
158  		return -1;
159  	}
160  
161  	for (i = 0; i < SPDK_FTL_LIMIT_MAX; ++i) {
162  		if (conf->limits[i].limit > 100) {
163  			return -1;
164  		}
165  	}
166  
167  	return 0;
168  }
169  
170  static int
171  ftl_check_init_opts(const struct spdk_ftl_dev_init_opts *opts,
172  		    const struct spdk_ocssd_geometry_data *geo)
173  {
174  	struct spdk_ftl_dev *dev;
175  	size_t num_punits = geo->num_pu * geo->num_grp;
176  	int rc = 0;
177  
178  	if (opts->range.begin > opts->range.end || opts->range.end >= num_punits) {
179  		return -1;
180  	}
181  
182  	if (ftl_check_conf(opts->conf, geo)) {
183  		return -1;
184  	}
185  
186  	pthread_mutex_lock(&g_ftl_queue_lock);
187  
188  	STAILQ_FOREACH(dev, &g_ftl_queue, stailq) {
189  		if (spdk_nvme_transport_id_compare(&dev->trid, &opts->trid)) {
190  			continue;
191  		}
192  
193  		if (ftl_range_intersect(opts->range.begin, opts->range.end,
194  					dev->range.begin, dev->range.end)) {
195  			rc = -1;
196  			goto out;
197  		}
198  	}
199  
200  out:
201  	pthread_mutex_unlock(&g_ftl_queue_lock);
202  	return rc;
203  }
204  
205  int
206  ftl_retrieve_chunk_info(struct spdk_ftl_dev *dev, struct ftl_ppa ppa,
207  			struct spdk_ocssd_chunk_information_entry *info,
208  			unsigned int num_entries)
209  {
210  	volatile struct ftl_admin_cmpl cmpl = {};
211  	uint32_t nsid = spdk_nvme_ns_get_id(dev->ns);
212  	uint64_t offset = (ppa.grp * dev->geo.num_pu + ppa.pu) *
213  			  dev->geo.num_chk + ppa.chk;
214  	int rc;
215  
216  	rc = spdk_nvme_ctrlr_cmd_get_log_page(dev->ctrlr, SPDK_OCSSD_LOG_CHUNK_INFO, nsid,
217  					      info, num_entries * sizeof(*info),
218  					      offset * sizeof(*info),
219  					      ftl_admin_cb, (void *)&cmpl);
220  	if (spdk_unlikely(rc != 0)) {
221  		SPDK_ERRLOG("spdk_nvme_ctrlr_cmd_get_log_page: %s\n", spdk_strerror(-rc));
222  		return -1;
223  	}
224  
225  	while (!cmpl.complete) {
226  		spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr);
227  	}
228  
229  	if (spdk_nvme_cpl_is_error(&cmpl.status)) {
230  		SPDK_ERRLOG("Unexpected status code: [%d], status code type: [%d]\n",
231  			    cmpl.status.status.sc, cmpl.status.status.sct);
232  		return -1;
233  	}
234  
235  	return 0;
236  }
237  
238  static int
239  ftl_retrieve_punit_chunk_info(struct spdk_ftl_dev *dev, const struct ftl_punit *punit,
240  			      struct spdk_ocssd_chunk_information_entry *info)
241  {
242  	uint32_t i = 0;
243  	unsigned int num_entries = FTL_BLOCK_SIZE / sizeof(*info);
244  	struct ftl_ppa chunk_ppa = punit->start_ppa;
245  	char ppa_buf[128];
246  
247  	for (i = 0; i < dev->geo.num_chk; i += num_entries, chunk_ppa.chk += num_entries) {
248  		if (num_entries > dev->geo.num_chk - i) {
249  			num_entries = dev->geo.num_chk - i;
250  		}
251  
252  		if (ftl_retrieve_chunk_info(dev, chunk_ppa, &info[i], num_entries)) {
253  			SPDK_ERRLOG("Failed to retrieve chunk information @ppa: %s\n",
254  				    ftl_ppa2str(chunk_ppa, ppa_buf, sizeof(ppa_buf)));
255  			return -1;
256  		}
257  	}
258  
259  	return 0;
260  }
261  
262  static unsigned char
263  ftl_get_chunk_state(const struct spdk_ocssd_chunk_information_entry *info)
264  {
265  	if (info->cs.free) {
266  		return FTL_CHUNK_STATE_FREE;
267  	}
268  
269  	if (info->cs.open) {
270  		return FTL_CHUNK_STATE_OPEN;
271  	}
272  
273  	if (info->cs.closed) {
274  		return FTL_CHUNK_STATE_CLOSED;
275  	}
276  
277  	if (info->cs.offline) {
278  		return FTL_CHUNK_STATE_BAD;
279  	}
280  
281  	assert(0 && "Invalid block state");
282  	return FTL_CHUNK_STATE_BAD;
283  }
284  
285  static void
286  ftl_remove_empty_bands(struct spdk_ftl_dev *dev)
287  {
288  	struct ftl_band *band, *temp_band;
289  
290  	/* Remove band from shut_bands list to prevent further processing */
291  	/* if all blocks on this band are bad */
292  	LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) {
293  		if (!band->num_chunks) {
294  			dev->num_bands--;
295  			LIST_REMOVE(band, list_entry);
296  		}
297  	}
298  }
299  
300  static int
301  ftl_dev_init_bands(struct spdk_ftl_dev *dev)
302  {
303  	struct spdk_ocssd_chunk_information_entry	*info;
304  	struct ftl_band					*band, *pband;
305  	struct ftl_punit				*punit;
306  	struct ftl_chunk				*chunk;
307  	unsigned int					i, j;
308  	char						buf[128];
309  	int						rc = 0;
310  
311  	LIST_INIT(&dev->free_bands);
312  	LIST_INIT(&dev->shut_bands);
313  
314  	dev->num_free = 0;
315  	dev->num_bands = ftl_dev_num_bands(dev);
316  	dev->bands = calloc(ftl_dev_num_bands(dev), sizeof(*dev->bands));
317  	if (!dev->bands) {
318  		return -1;
319  	}
320  
321  	info = calloc(dev->geo.num_chk, sizeof(*info));
322  	if (!info) {
323  		return -1;
324  	}
325  
326  	for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
327  		band = &dev->bands[i];
328  		band->id = i;
329  		band->dev = dev;
330  		band->state = FTL_BAND_STATE_CLOSED;
331  
332  		if (LIST_EMPTY(&dev->shut_bands)) {
333  			LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
334  		} else {
335  			LIST_INSERT_AFTER(pband, band, list_entry);
336  		}
337  		pband = band;
338  
339  		CIRCLEQ_INIT(&band->chunks);
340  		band->chunk_buf = calloc(ftl_dev_num_punits(dev), sizeof(*band->chunk_buf));
341  		if (!band->chunk_buf) {
342  			SPDK_ERRLOG("Failed to allocate block state table for band: [%u]\n", i);
343  			rc = -1;
344  			goto out;
345  		}
346  
347  		rc = ftl_band_init_md(band);
348  		if (rc) {
349  			SPDK_ERRLOG("Failed to initialize metadata structures for band [%u]\n", i);
350  			goto out;
351  		}
352  
353  		band->reloc_bitmap = spdk_bit_array_create(ftl_dev_num_bands(dev));
354  		if (!band->reloc_bitmap) {
355  			SPDK_ERRLOG("Failed to allocate band relocation bitmap\n");
356  			goto out;
357  		}
358  	}
359  
360  	for (i = 0; i < ftl_dev_num_punits(dev); ++i) {
361  		punit = &dev->punits[i];
362  
363  		rc = ftl_retrieve_punit_chunk_info(dev, punit, info);
364  		if (rc) {
365  			SPDK_ERRLOG("Failed to retrieve bbt for @ppa: %s [%lu]\n",
366  				    ftl_ppa2str(punit->start_ppa, buf, sizeof(buf)),
367  				    ftl_ppa_addr_pack(dev, punit->start_ppa));
368  			goto out;
369  		}
370  
371  		for (j = 0; j < ftl_dev_num_bands(dev); ++j) {
372  			band = &dev->bands[j];
373  			chunk = &band->chunk_buf[i];
374  			chunk->pos = i;
375  			chunk->state = ftl_get_chunk_state(&info[j]);
376  			chunk->punit = punit;
377  			chunk->start_ppa = punit->start_ppa;
378  			chunk->start_ppa.chk = band->id;
379  			chunk->write_offset = ftl_dev_lbks_in_chunk(dev);
380  
381  			if (chunk->state != FTL_CHUNK_STATE_BAD) {
382  				band->num_chunks++;
383  				CIRCLEQ_INSERT_TAIL(&band->chunks, chunk, circleq);
384  			}
385  		}
386  	}
387  
388  	for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
389  		band = &dev->bands[i];
390  		band->tail_md_ppa = ftl_band_tail_md_ppa(band);
391  	}
392  
393  	ftl_remove_empty_bands(dev);
394  out:
395  	free(info);
396  	return rc;
397  }
398  
399  static int
400  ftl_dev_init_punits(struct spdk_ftl_dev *dev)
401  {
402  	unsigned int i, punit;
403  
404  	dev->punits = calloc(ftl_dev_num_punits(dev), sizeof(*dev->punits));
405  	if (!dev->punits) {
406  		return -1;
407  	}
408  
409  	for (i = 0; i < ftl_dev_num_punits(dev); ++i) {
410  		dev->punits[i].dev = dev;
411  		punit = dev->range.begin + i;
412  
413  		dev->punits[i].start_ppa.ppa = 0;
414  		dev->punits[i].start_ppa.grp = punit % dev->geo.num_grp;
415  		dev->punits[i].start_ppa.pu = punit / dev->geo.num_grp;
416  	}
417  
418  	return 0;
419  }
420  
421  static int
422  ftl_dev_retrieve_geo(struct spdk_ftl_dev *dev)
423  {
424  	volatile struct ftl_admin_cmpl cmpl = {};
425  	uint32_t nsid = spdk_nvme_ns_get_id(dev->ns);
426  
427  	if (spdk_nvme_ocssd_ctrlr_cmd_geometry(dev->ctrlr, nsid, &dev->geo, sizeof(dev->geo),
428  					       ftl_admin_cb, (void *)&cmpl)) {
429  		SPDK_ERRLOG("Unable to retrieve geometry\n");
430  		return -1;
431  	}
432  
433  	/* TODO: add a timeout */
434  	while (!cmpl.complete) {
435  		spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr);
436  	}
437  
438  	if (spdk_nvme_cpl_is_error(&cmpl.status)) {
439  		SPDK_ERRLOG("Unexpected status code: [%d], status code type: [%d]\n",
440  			    cmpl.status.status.sc, cmpl.status.status.sct);
441  		return -1;
442  	}
443  
444  	/* TODO: add sanity checks for the geo */
445  	dev->ppa_len = dev->geo.lbaf.grp_len +
446  		       dev->geo.lbaf.pu_len +
447  		       dev->geo.lbaf.chk_len +
448  		       dev->geo.lbaf.lbk_len;
449  
450  	dev->ppaf.lbk_offset = 0;
451  	dev->ppaf.lbk_mask   = (1 << dev->geo.lbaf.lbk_len) - 1;
452  	dev->ppaf.chk_offset = dev->ppaf.lbk_offset + dev->geo.lbaf.lbk_len;
453  	dev->ppaf.chk_mask   = (1 << dev->geo.lbaf.chk_len) - 1;
454  	dev->ppaf.pu_offset  = dev->ppaf.chk_offset + dev->geo.lbaf.chk_len;
455  	dev->ppaf.pu_mask    = (1 << dev->geo.lbaf.pu_len) - 1;
456  	dev->ppaf.grp_offset = dev->ppaf.pu_offset + dev->geo.lbaf.pu_len;
457  	dev->ppaf.grp_mask   = (1 << dev->geo.lbaf.grp_len) - 1;
458  
459  	/* We're using optimal write size as our xfer size */
460  	dev->xfer_size = dev->geo.ws_opt;
461  
462  	return 0;
463  }
464  
465  static int
466  ftl_dev_nvme_init(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts)
467  {
468  	uint32_t block_size;
469  
470  	dev->ctrlr = opts->ctrlr;
471  
472  	if (spdk_nvme_ctrlr_get_num_ns(dev->ctrlr) != 1) {
473  		SPDK_ERRLOG("Unsupported number of namespaces\n");
474  		return -1;
475  	}
476  
477  	dev->ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, FTL_NSID);
478  	if (dev->ns == NULL) {
479  		SPDK_ERRLOG("Invalid NS (%"PRIu32")\n", FTL_NSID);
480  		return -1;
481  	}
482  	dev->trid = opts->trid;
483  	dev->md_size = spdk_nvme_ns_get_md_size(dev->ns);
484  
485  	block_size = spdk_nvme_ns_get_extended_sector_size(dev->ns);
486  	if (block_size != FTL_BLOCK_SIZE) {
487  		SPDK_ERRLOG("Unsupported block size (%"PRIu32")\n", block_size);
488  		return -1;
489  	}
490  
491  	if (dev->md_size % sizeof(uint32_t) != 0) {
492  		/* Metadata pointer must be dword aligned */
493  		SPDK_ERRLOG("Unsupported metadata size (%zu)\n", dev->md_size);
494  		return -1;
495  	}
496  
497  	return 0;
498  }
499  
500  static int
501  ftl_dev_init_nv_cache(struct spdk_ftl_dev *dev, struct spdk_bdev_desc *bdev_desc)
502  {
503  	struct spdk_bdev *bdev;
504  	struct spdk_ftl_conf *conf = &dev->conf;
505  	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
506  	char pool_name[128];
507  	int rc;
508  
509  	if (!bdev_desc) {
510  		return 0;
511  	}
512  
513  	bdev = spdk_bdev_desc_get_bdev(bdev_desc);
514  	SPDK_INFOLOG(SPDK_LOG_FTL_INIT, "Using %s as write buffer cache\n",
515  		     spdk_bdev_get_name(bdev));
516  
517  	if (spdk_bdev_get_block_size(bdev) != FTL_BLOCK_SIZE) {
518  		SPDK_ERRLOG("Unsupported block size (%d)\n", spdk_bdev_get_block_size(bdev));
519  		return -1;
520  	}
521  
522  	if (!spdk_bdev_is_md_separate(bdev)) {
523  		SPDK_ERRLOG("Bdev %s doesn't support separate metadata buffer IO\n",
524  			    spdk_bdev_get_name(bdev));
525  		return -1;
526  	}
527  
528  	if (spdk_bdev_get_md_size(bdev) < sizeof(uint64_t)) {
529  		SPDK_ERRLOG("Bdev's %s metadata is too small (%"PRIu32")\n",
530  			    spdk_bdev_get_name(bdev), spdk_bdev_get_md_size(bdev));
531  		return -1;
532  	}
533  
534  	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
535  		SPDK_ERRLOG("Unsupported DIF type used by bdev %s\n",
536  			    spdk_bdev_get_name(bdev));
537  		return -1;
538  	}
539  
540  	/* The cache needs to be capable of storing at least two full bands. This requirement comes
541  	 * from the fact that cache works as a protection against power loss, so before the data
542  	 * inside the cache can be overwritten, the band it's stored on has to be closed. Plus one
543  	 * extra block is needed to store the header.
544  	 */
545  	if (spdk_bdev_get_num_blocks(bdev) < ftl_num_band_lbks(dev) * 2 + 1) {
546  		SPDK_ERRLOG("Insufficient number of blocks for write buffer cache (available: %"
547  			    PRIu64", required: %"PRIu64")\n", spdk_bdev_get_num_blocks(bdev),
548  			    ftl_num_band_lbks(dev) * 2 + 1);
549  		return -1;
550  	}
551  
552  	rc = snprintf(pool_name, sizeof(pool_name), "ftl-nvpool-%p", dev);
553  	if (rc < 0 || rc >= 128) {
554  		return -1;
555  	}
556  
557  	nv_cache->md_pool = spdk_mempool_create(pool_name, conf->nv_cache.max_request_cnt,
558  						spdk_bdev_get_md_size(bdev) *
559  						conf->nv_cache.max_request_size,
560  						SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
561  						SPDK_ENV_SOCKET_ID_ANY);
562  	if (!nv_cache->md_pool) {
563  		SPDK_ERRLOG("Failed to initialize non-volatile cache metadata pool\n");
564  		return -1;
565  	}
566  
567  	nv_cache->dma_buf = spdk_dma_zmalloc(FTL_BLOCK_SIZE, spdk_bdev_get_buf_align(bdev), NULL);
568  	if (!nv_cache->dma_buf) {
569  		SPDK_ERRLOG("Memory allocation failure\n");
570  		return -1;
571  	}
572  
573  	if (pthread_spin_init(&nv_cache->lock, PTHREAD_PROCESS_PRIVATE)) {
574  		SPDK_ERRLOG("Failed to initialize cache lock\n");
575  		return -1;
576  	}
577  
578  	nv_cache->bdev_desc = bdev_desc;
579  	nv_cache->current_addr = FTL_NV_CACHE_DATA_OFFSET;
580  	nv_cache->num_data_blocks = spdk_bdev_get_num_blocks(bdev) - 1;
581  	nv_cache->num_available = nv_cache->num_data_blocks;
582  	nv_cache->ready = false;
583  
584  	return 0;
585  }
586  
587  void
588  spdk_ftl_conf_init_defaults(struct spdk_ftl_conf *conf)
589  {
590  	*conf = g_default_conf;
591  }
592  
593  static void
594  ftl_lba_map_request_ctor(struct spdk_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
595  {
596  	struct ftl_lba_map_request *request = obj;
597  	struct spdk_ftl_dev *dev = opaque;
598  
599  	request->segments = spdk_bit_array_create(spdk_divide_round_up(
600  				    ftl_num_band_lbks(dev), FTL_NUM_LBA_IN_BLOCK));
601  }
602  
603  static int
604  ftl_init_lba_map_pools(struct spdk_ftl_dev *dev)
605  {
606  #define POOL_NAME_LEN 128
607  	char pool_name[POOL_NAME_LEN];
608  	int rc;
609  
610  	rc = snprintf(pool_name, sizeof(pool_name), "%s-%s", dev->name, "ocssd-lba-pool");
611  	if (rc < 0 || rc >= POOL_NAME_LEN) {
612  		return -ENAMETOOLONG;
613  	}
614  
615  	/* We need to reserve at least 2 buffers for band close / open sequence
616  	 * alone, plus additional (8) buffers for handling write errors.
617  	 * TODO: This memory pool is utilized only by core thread - it introduce
618  	 * unnecessary overhead and should be replaced by different data structure.
619  	 */
620  	dev->lba_pool = spdk_mempool_create(pool_name, 2 + 8,
621  					    ftl_lba_map_pool_elem_size(dev),
622  					    SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
623  					    SPDK_ENV_SOCKET_ID_ANY);
624  	if (!dev->lba_pool) {
625  		return -ENOMEM;
626  	}
627  
628  	rc = snprintf(pool_name, sizeof(pool_name), "%s-%s", dev->name, "ocssd-lbareq-pool");
629  	if (rc < 0 || rc >= POOL_NAME_LEN) {
630  		return -ENAMETOOLONG;
631  	}
632  
633  	dev->lba_request_pool = spdk_mempool_create_ctor(pool_name,
634  				dev->conf.max_reloc_qdepth * dev->conf.max_active_relocs,
635  				sizeof(struct ftl_lba_map_request),
636  				SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
637  				SPDK_ENV_SOCKET_ID_ANY,
638  				ftl_lba_map_request_ctor,
639  				dev);
640  	if (!dev->lba_request_pool) {
641  		return -ENOMEM;
642  	}
643  
644  	return 0;
645  }
646  
647  static void
648  ftl_init_wptr_list(struct spdk_ftl_dev *dev)
649  {
650  	LIST_INIT(&dev->wptr_list);
651  	LIST_INIT(&dev->flush_list);
652  	LIST_INIT(&dev->band_flush_list);
653  }
654  
655  static size_t
656  ftl_dev_band_max_seq(struct spdk_ftl_dev *dev)
657  {
658  	struct ftl_band *band;
659  	size_t seq = 0;
660  
661  	LIST_FOREACH(band, &dev->shut_bands, list_entry) {
662  		if (band->seq > seq) {
663  			seq = band->seq;
664  		}
665  	}
666  
667  	return seq;
668  }
669  
670  static void
671  _ftl_init_bands_state(void *ctx)
672  {
673  	struct ftl_band *band, *temp_band;
674  	struct spdk_ftl_dev *dev = ctx;
675  
676  	dev->seq = ftl_dev_band_max_seq(dev);
677  
678  	LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) {
679  		if (!band->lba_map.num_vld) {
680  			ftl_band_set_state(band, FTL_BAND_STATE_FREE);
681  		}
682  	}
683  
684  	ftl_reloc_resume(dev->reloc);
685  	/* Clear the limit applications as they're incremented incorrectly by */
686  	/* the initialization code */
687  	memset(dev->stats.limits, 0, sizeof(dev->stats.limits));
688  }
689  
690  static int
691  ftl_init_num_free_bands(struct spdk_ftl_dev *dev)
692  {
693  	struct ftl_band *band;
694  	int cnt = 0;
695  
696  	LIST_FOREACH(band, &dev->shut_bands, list_entry) {
697  		if (band->num_chunks && !band->lba_map.num_vld) {
698  			cnt++;
699  		}
700  	}
701  	return cnt;
702  }
703  
704  static int
705  ftl_init_bands_state(struct spdk_ftl_dev *dev)
706  {
707  	/* TODO: Should we abort initialization or expose read only device */
708  	/* if there is no free bands? */
709  	/* If we abort initialization should we depend on condition that */
710  	/* we have no free bands or should we have some minimal number of */
711  	/* free bands? */
712  	if (!ftl_init_num_free_bands(dev)) {
713  		return -1;
714  	}
715  
716  	spdk_thread_send_msg(ftl_get_core_thread(dev), _ftl_init_bands_state, dev);
717  	return 0;
718  }
719  
720  static void
721  _ftl_dev_init_thread(void *ctx)
722  {
723  	struct ftl_thread *thread = ctx;
724  	struct spdk_ftl_dev *dev = thread->dev;
725  
726  	thread->poller = spdk_poller_register(thread->poller_fn, thread, thread->period_us);
727  	if (!thread->poller) {
728  		SPDK_ERRLOG("Unable to register poller\n");
729  		assert(0);
730  	}
731  
732  	if (spdk_get_thread() == ftl_get_core_thread(dev)) {
733  		ftl_anm_register_device(dev, ftl_process_anm_event);
734  	}
735  }
736  
737  static int
738  ftl_dev_init_thread(struct spdk_ftl_dev *dev, struct ftl_thread *thread,
739  		    struct spdk_thread *spdk_thread, spdk_poller_fn fn, uint64_t period_us)
740  {
741  	thread->dev = dev;
742  	thread->poller_fn = fn;
743  	thread->thread = spdk_thread;
744  	thread->period_us = period_us;
745  
746  	thread->qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, NULL, 0);
747  	if (!thread->qpair) {
748  		SPDK_ERRLOG("Unable to initialize qpair\n");
749  		return -1;
750  	}
751  
752  	spdk_thread_send_msg(spdk_thread, _ftl_dev_init_thread, thread);
753  	return 0;
754  }
755  
756  static int
757  ftl_dev_init_threads(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts)
758  {
759  	if (!opts->core_thread || !opts->read_thread) {
760  		return -1;
761  	}
762  
763  	if (ftl_dev_init_thread(dev, &dev->core_thread, opts->core_thread, ftl_task_core, 0)) {
764  		SPDK_ERRLOG("Unable to initialize core thread\n");
765  		return -1;
766  	}
767  
768  	if (ftl_dev_init_thread(dev, &dev->read_thread, opts->read_thread, ftl_task_read, 0)) {
769  		SPDK_ERRLOG("Unable to initialize read thread\n");
770  		return -1;
771  	}
772  
773  	return 0;
774  }
775  
776  static void
777  ftl_dev_free_thread(struct spdk_ftl_dev *dev, struct ftl_thread *thread)
778  {
779  	assert(thread->poller == NULL);
780  
781  	spdk_nvme_ctrlr_free_io_qpair(thread->qpair);
782  	thread->thread = NULL;
783  	thread->qpair = NULL;
784  }
785  
786  static int
787  ftl_dev_l2p_alloc(struct spdk_ftl_dev *dev)
788  {
789  	size_t addr_size;
790  	uint64_t i;
791  
792  	if (dev->num_lbas == 0) {
793  		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Invalid l2p table size\n");
794  		return -1;
795  	}
796  
797  	if (dev->l2p) {
798  		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "L2p table already allocated\n");
799  		return -1;
800  	}
801  
802  	addr_size = dev->ppa_len >= 32 ? 8 : 4;
803  	dev->l2p = malloc(dev->num_lbas * addr_size);
804  	if (!dev->l2p) {
805  		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Failed to allocate l2p table\n");
806  		return -1;
807  	}
808  
809  	for (i = 0; i < dev->num_lbas; ++i) {
810  		ftl_l2p_set(dev, i, ftl_to_ppa(FTL_PPA_INVALID));
811  	}
812  
813  	return 0;
814  }
815  
816  static void
817  ftl_call_init_complete_cb(void *_ctx)
818  {
819  	struct ftl_init_context *ctx = _ctx;
820  	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(ctx, struct spdk_ftl_dev, init_ctx);
821  
822  	if (ctx->cb_fn != NULL) {
823  		ctx->cb_fn(dev, ctx->cb_arg, 0);
824  	}
825  }
826  
827  static void
828  ftl_init_complete(struct spdk_ftl_dev *dev)
829  {
830  	pthread_mutex_lock(&g_ftl_queue_lock);
831  	STAILQ_INSERT_HEAD(&g_ftl_queue, dev, stailq);
832  	pthread_mutex_unlock(&g_ftl_queue_lock);
833  
834  	dev->initialized = 1;
835  
836  	spdk_thread_send_msg(dev->init_ctx.thread, ftl_call_init_complete_cb, &dev->init_ctx);
837  }
838  
839  static void
840  ftl_init_fail_cb(struct spdk_ftl_dev *dev, void *_ctx, int status)
841  {
842  	struct ftl_init_context *ctx = _ctx;
843  
844  	if (ctx->cb_fn != NULL) {
845  		ctx->cb_fn(NULL, ctx->cb_arg, -ENODEV);
846  	}
847  
848  	free(ctx);
849  }
850  
851  static int _spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg,
852  			      struct spdk_thread *thread);
853  
854  static void
855  ftl_init_fail(struct spdk_ftl_dev *dev)
856  {
857  	struct ftl_init_context *ctx;
858  
859  	ctx = malloc(sizeof(*ctx));
860  	if (!ctx) {
861  		SPDK_ERRLOG("Unable to allocate context to free the device\n");
862  		return;
863  	}
864  
865  	*ctx = dev->init_ctx;
866  	if (_spdk_ftl_dev_free(dev, ftl_init_fail_cb, ctx, ctx->thread)) {
867  		SPDK_ERRLOG("Unable to free the device\n");
868  		assert(0);
869  	}
870  }
871  
872  static void
873  ftl_write_nv_cache_md_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
874  {
875  	struct spdk_ftl_dev *dev = cb_arg;
876  
877  	spdk_bdev_free_io(bdev_io);
878  	if (spdk_unlikely(!success)) {
879  		SPDK_ERRLOG("Writing non-volatile cache's metadata header failed\n");
880  		ftl_init_fail(dev);
881  		return;
882  	}
883  
884  	dev->nv_cache.ready = true;
885  	ftl_init_complete(dev);
886  }
887  
888  static void
889  ftl_clear_nv_cache_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
890  {
891  	struct spdk_ftl_dev *dev = cb_arg;
892  	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
893  
894  	spdk_bdev_free_io(bdev_io);
895  	if (spdk_unlikely(!success)) {
896  		SPDK_ERRLOG("Unable to clear the non-volatile cache bdev\n");
897  		ftl_init_fail(dev);
898  		return;
899  	}
900  
901  	nv_cache->phase = 1;
902  	if (ftl_nv_cache_write_header(nv_cache, false, ftl_write_nv_cache_md_cb, dev)) {
903  		SPDK_ERRLOG("Unable to write non-volatile cache metadata header\n");
904  		ftl_init_fail(dev);
905  	}
906  }
907  
908  static int
909  ftl_setup_initial_state(struct spdk_ftl_dev *dev)
910  {
911  	struct spdk_ftl_conf *conf = &dev->conf;
912  	size_t i;
913  	int rc;
914  
915  	spdk_uuid_generate(&dev->uuid);
916  
917  	dev->num_lbas = 0;
918  	for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
919  		dev->num_lbas += ftl_band_num_usable_lbks(&dev->bands[i]);
920  	}
921  
922  	dev->num_lbas = (dev->num_lbas * (100 - conf->lba_rsvd)) / 100;
923  
924  	if (ftl_dev_l2p_alloc(dev)) {
925  		SPDK_ERRLOG("Unable to init l2p table\n");
926  		return -1;
927  	}
928  
929  	if (ftl_init_bands_state(dev)) {
930  		SPDK_ERRLOG("Unable to finish the initialization\n");
931  		return -1;
932  	}
933  
934  	if (!ftl_dev_has_nv_cache(dev)) {
935  		ftl_init_complete(dev);
936  	} else {
937  		rc = ftl_nv_cache_scrub(&dev->nv_cache, ftl_clear_nv_cache_cb, dev);
938  		if (spdk_unlikely(rc != 0)) {
939  			SPDK_ERRLOG("Unable to clear the non-volatile cache bdev: %s\n",
940  				    spdk_strerror(-rc));
941  			return -1;
942  		}
943  	}
944  
945  	return 0;
946  }
947  
948  static void
949  ftl_restore_nv_cache_cb(struct spdk_ftl_dev *dev, struct ftl_restore *restore, int status)
950  {
951  	if (spdk_unlikely(status != 0)) {
952  		SPDK_ERRLOG("Failed to restore the non-volatile cache state\n");
953  		ftl_init_fail(dev);
954  		return;
955  	}
956  
957  	ftl_init_complete(dev);
958  }
959  
960  static void
961  ftl_restore_device_cb(struct spdk_ftl_dev *dev, struct ftl_restore *restore, int status)
962  {
963  	if (status) {
964  		SPDK_ERRLOG("Failed to restore the device from the SSD\n");
965  		ftl_init_fail(dev);
966  		return;
967  	}
968  
969  	if (ftl_init_bands_state(dev)) {
970  		SPDK_ERRLOG("Unable to finish the initialization\n");
971  		ftl_init_fail(dev);
972  		return;
973  	}
974  
975  	if (!ftl_dev_has_nv_cache(dev)) {
976  		ftl_init_complete(dev);
977  		return;
978  	}
979  
980  	ftl_restore_nv_cache(restore, ftl_restore_nv_cache_cb);
981  }
982  
983  static void
984  ftl_restore_md_cb(struct spdk_ftl_dev *dev, struct ftl_restore *restore, int status)
985  {
986  	if (status) {
987  		SPDK_ERRLOG("Failed to restore the metadata from the SSD\n");
988  		goto error;
989  	}
990  
991  	/* After the metadata is read it should be possible to allocate the L2P */
992  	if (ftl_dev_l2p_alloc(dev)) {
993  		SPDK_ERRLOG("Failed to allocate the L2P\n");
994  		goto error;
995  	}
996  
997  	if (ftl_restore_device(restore, ftl_restore_device_cb)) {
998  		SPDK_ERRLOG("Failed to start device restoration from the SSD\n");
999  		goto error;
1000  	}
1001  
1002  	return;
1003  error:
1004  	ftl_init_fail(dev);
1005  }
1006  
1007  static int
1008  ftl_restore_state(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts)
1009  {
1010  	dev->uuid = opts->uuid;
1011  
1012  	if (ftl_restore_md(dev, ftl_restore_md_cb)) {
1013  		SPDK_ERRLOG("Failed to start metadata restoration from the SSD\n");
1014  		return -1;
1015  	}
1016  
1017  	return 0;
1018  }
1019  
1020  static int
1021  ftl_io_channel_create_cb(void *io_device, void *ctx)
1022  {
1023  	struct spdk_ftl_dev *dev = io_device;
1024  	struct ftl_io_channel *ioch = ctx;
1025  	char mempool_name[32];
1026  
1027  	snprintf(mempool_name, sizeof(mempool_name), "ftl_io_%p", ioch);
1028  	ioch->cache_ioch = NULL;
1029  	ioch->dev = dev;
1030  	ioch->elem_size = sizeof(struct ftl_md_io);
1031  	ioch->io_pool = spdk_mempool_create(mempool_name,
1032  					    dev->conf.user_io_pool_size,
1033  					    ioch->elem_size,
1034  					    0,
1035  					    SPDK_ENV_SOCKET_ID_ANY);
1036  	if (!ioch->io_pool) {
1037  		SPDK_ERRLOG("Failed to create IO channel's IO pool\n");
1038  		return -1;
1039  	}
1040  
1041  	if (ftl_dev_has_nv_cache(dev)) {
1042  		ioch->cache_ioch = spdk_bdev_get_io_channel(dev->nv_cache.bdev_desc);
1043  		if (!ioch->cache_ioch) {
1044  			SPDK_ERRLOG("Failed to create cache IO channel\n");
1045  			spdk_mempool_free(ioch->io_pool);
1046  			return -1;
1047  		}
1048  	}
1049  
1050  	return 0;
1051  }
1052  
1053  static void
1054  ftl_io_channel_destroy_cb(void *io_device, void *ctx)
1055  {
1056  	struct ftl_io_channel *ioch = ctx;
1057  
1058  	spdk_mempool_free(ioch->io_pool);
1059  
1060  	if (ioch->cache_ioch) {
1061  		spdk_put_io_channel(ioch->cache_ioch);
1062  	}
1063  }
1064  
1065  static int
1066  ftl_dev_init_io_channel(struct spdk_ftl_dev *dev)
1067  {
1068  	spdk_io_device_register(dev, ftl_io_channel_create_cb, ftl_io_channel_destroy_cb,
1069  				sizeof(struct ftl_io_channel),
1070  				NULL);
1071  
1072  	dev->ioch = spdk_get_io_channel(dev);
1073  	if (!dev->ioch) {
1074  		spdk_io_device_unregister(dev, NULL);
1075  		return -1;
1076  	}
1077  
1078  	return 0;
1079  }
1080  
1081  int
1082  spdk_ftl_dev_init(const struct spdk_ftl_dev_init_opts *_opts, spdk_ftl_init_fn cb_fn, void *cb_arg)
1083  {
1084  	struct spdk_ftl_dev *dev;
1085  	struct spdk_ftl_dev_init_opts opts = *_opts;
1086  
1087  	dev = calloc(1, sizeof(*dev));
1088  	if (!dev) {
1089  		return -ENOMEM;
1090  	}
1091  
1092  	if (!opts.conf) {
1093  		opts.conf = &g_default_conf;
1094  	}
1095  
1096  	TAILQ_INIT(&dev->retry_queue);
1097  	dev->conf = *opts.conf;
1098  	dev->init_ctx.cb_fn = cb_fn;
1099  	dev->init_ctx.cb_arg = cb_arg;
1100  	dev->init_ctx.thread = spdk_get_thread();
1101  	dev->range = opts.range;
1102  	dev->limit = SPDK_FTL_LIMIT_MAX;
1103  
1104  	dev->name = strdup(opts.name);
1105  	if (!dev->name) {
1106  		SPDK_ERRLOG("Unable to set device name\n");
1107  		goto fail_sync;
1108  	}
1109  
1110  	if (ftl_dev_nvme_init(dev, &opts)) {
1111  		SPDK_ERRLOG("Unable to initialize NVMe structures\n");
1112  		goto fail_sync;
1113  	}
1114  
1115  	/* In case of errors, we free all of the memory in ftl_dev_free_sync(), */
1116  	/* so we don't have to clean up in each of the init functions. */
1117  	if (ftl_dev_retrieve_geo(dev)) {
1118  		SPDK_ERRLOG("Unable to retrieve geometry\n");
1119  		goto fail_sync;
1120  	}
1121  
1122  	if (ftl_check_init_opts(&opts, &dev->geo)) {
1123  		SPDK_ERRLOG("Invalid device configuration\n");
1124  		goto fail_sync;
1125  	}
1126  
1127  	if (ftl_dev_init_punits(dev)) {
1128  		SPDK_ERRLOG("Unable to initialize LUNs\n");
1129  		goto fail_sync;
1130  	}
1131  
1132  	if (ftl_init_lba_map_pools(dev)) {
1133  		SPDK_ERRLOG("Unable to init LBA map pools\n");
1134  		goto fail_sync;
1135  	}
1136  
1137  	ftl_init_wptr_list(dev);
1138  
1139  	if (ftl_dev_init_bands(dev)) {
1140  		SPDK_ERRLOG("Unable to initialize band array\n");
1141  		goto fail_sync;
1142  	}
1143  
1144  	if (ftl_dev_init_nv_cache(dev, opts.cache_bdev_desc)) {
1145  		SPDK_ERRLOG("Unable to initialize persistent cache\n");
1146  		goto fail_sync;
1147  	}
1148  
1149  	dev->rwb = ftl_rwb_init(&dev->conf, dev->geo.ws_opt, dev->md_size, ftl_dev_num_punits(dev));
1150  	if (!dev->rwb) {
1151  		SPDK_ERRLOG("Unable to initialize rwb structures\n");
1152  		goto fail_sync;
1153  	}
1154  
1155  	dev->reloc = ftl_reloc_init(dev);
1156  	if (!dev->reloc) {
1157  		SPDK_ERRLOG("Unable to initialize reloc structures\n");
1158  		goto fail_sync;
1159  	}
1160  
1161  	if (ftl_dev_init_io_channel(dev)) {
1162  		SPDK_ERRLOG("Unable to initialize IO channels\n");
1163  		goto fail_sync;
1164  	}
1165  
1166  	if (ftl_dev_init_threads(dev, &opts)) {
1167  		SPDK_ERRLOG("Unable to initialize device threads\n");
1168  		goto fail_sync;
1169  	}
1170  
1171  	if (opts.mode & SPDK_FTL_MODE_CREATE) {
1172  		if (ftl_setup_initial_state(dev)) {
1173  			SPDK_ERRLOG("Failed to setup initial state of the device\n");
1174  			goto fail_async;
1175  		}
1176  	} else {
1177  		if (ftl_restore_state(dev, &opts)) {
1178  			SPDK_ERRLOG("Unable to restore device's state from the SSD\n");
1179  			goto fail_async;
1180  		}
1181  	}
1182  
1183  	return 0;
1184  fail_sync:
1185  	ftl_dev_free_sync(dev);
1186  	return -ENOMEM;
1187  fail_async:
1188  	ftl_init_fail(dev);
1189  	return 0;
1190  }
1191  
1192  static void
1193  _ftl_halt_defrag(void *arg)
1194  {
1195  	ftl_reloc_halt(((struct spdk_ftl_dev *)arg)->reloc);
1196  }
1197  
1198  static void
1199  ftl_lba_map_request_dtor(struct spdk_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
1200  {
1201  	struct ftl_lba_map_request *request = obj;
1202  
1203  	spdk_bit_array_free(&request->segments);
1204  }
1205  
1206  static void
1207  ftl_dev_free_sync(struct spdk_ftl_dev *dev)
1208  {
1209  	struct spdk_ftl_dev *iter;
1210  	size_t i;
1211  
1212  	if (!dev) {
1213  		return;
1214  	}
1215  
1216  	pthread_mutex_lock(&g_ftl_queue_lock);
1217  	STAILQ_FOREACH(iter, &g_ftl_queue, stailq) {
1218  		if (iter == dev) {
1219  			STAILQ_REMOVE(&g_ftl_queue, dev, spdk_ftl_dev, stailq);
1220  			break;
1221  		}
1222  	}
1223  	pthread_mutex_unlock(&g_ftl_queue_lock);
1224  
1225  	assert(LIST_EMPTY(&dev->wptr_list));
1226  
1227  	ftl_dev_dump_bands(dev);
1228  	ftl_dev_dump_stats(dev);
1229  
1230  	if (dev->ioch) {
1231  		spdk_put_io_channel(dev->ioch);
1232  		spdk_io_device_unregister(dev, NULL);
1233  	}
1234  
1235  	if (dev->bands) {
1236  		for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
1237  			free(dev->bands[i].chunk_buf);
1238  			spdk_bit_array_free(&dev->bands[i].lba_map.vld);
1239  			spdk_bit_array_free(&dev->bands[i].reloc_bitmap);
1240  		}
1241  	}
1242  
1243  	spdk_dma_free(dev->nv_cache.dma_buf);
1244  
1245  	spdk_mempool_free(dev->lba_pool);
1246  	spdk_mempool_free(dev->nv_cache.md_pool);
1247  	if (dev->lba_request_pool) {
1248  		spdk_mempool_obj_iter(dev->lba_request_pool, ftl_lba_map_request_dtor, NULL);
1249  	}
1250  	spdk_mempool_free(dev->lba_request_pool);
1251  
1252  	ftl_rwb_free(dev->rwb);
1253  	ftl_reloc_free(dev->reloc);
1254  
1255  	free(dev->name);
1256  	free(dev->punits);
1257  	free(dev->bands);
1258  	free(dev->l2p);
1259  	free(dev);
1260  }
1261  
1262  static void
1263  ftl_call_fini_complete(struct spdk_ftl_dev *dev, int status)
1264  {
1265  	struct ftl_init_context ctx = dev->fini_ctx;
1266  
1267  	ftl_dev_free_sync(dev);
1268  	if (ctx.cb_fn != NULL) {
1269  		ctx.cb_fn(NULL, ctx.cb_arg, status);
1270  	}
1271  }
1272  
1273  static void
1274  ftl_nv_cache_header_fini_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1275  {
1276  	int status = 0;
1277  
1278  	spdk_bdev_free_io(bdev_io);
1279  	if (spdk_unlikely(!success)) {
1280  		SPDK_ERRLOG("Failed to write non-volatile cache metadata header\n");
1281  		status = -EIO;
1282  	}
1283  
1284  	ftl_call_fini_complete((struct spdk_ftl_dev *)cb_arg, status);
1285  }
1286  
1287  static void
1288  ftl_halt_complete_cb(void *ctx)
1289  {
1290  	struct spdk_ftl_dev *dev = ctx;
1291  	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1292  	int rc = 0;
1293  
1294  	if (!ftl_dev_has_nv_cache(dev)) {
1295  		ftl_call_fini_complete(dev, 0);
1296  	} else {
1297  		rc = ftl_nv_cache_write_header(nv_cache, true, ftl_nv_cache_header_fini_cb, dev);
1298  		if (spdk_unlikely(rc != 0)) {
1299  			SPDK_ERRLOG("Failed to write non-volatile cache metadata header: %s\n",
1300  				    spdk_strerror(-rc));
1301  			ftl_call_fini_complete(dev, rc);
1302  		}
1303  	}
1304  }
1305  
1306  static int
1307  ftl_halt_poller(void *ctx)
1308  {
1309  	struct spdk_ftl_dev *dev = ctx;
1310  
1311  	if (!dev->core_thread.poller && !dev->read_thread.poller) {
1312  		spdk_poller_unregister(&dev->fini_ctx.poller);
1313  
1314  		ftl_dev_free_thread(dev, &dev->read_thread);
1315  		ftl_dev_free_thread(dev, &dev->core_thread);
1316  
1317  		ftl_anm_unregister_device(dev);
1318  
1319  		spdk_thread_send_msg(dev->fini_ctx.thread, ftl_halt_complete_cb, dev);
1320  	}
1321  
1322  	return 0;
1323  }
1324  
1325  static void
1326  ftl_add_halt_poller(void *ctx)
1327  {
1328  	struct spdk_ftl_dev *dev = ctx;
1329  	dev->halt = 1;
1330  
1331  	_ftl_halt_defrag(dev);
1332  
1333  	assert(!dev->fini_ctx.poller);
1334  	dev->fini_ctx.poller = spdk_poller_register(ftl_halt_poller, dev, 100);
1335  }
1336  
1337  static int
1338  _spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg,
1339  		   struct spdk_thread *thread)
1340  {
1341  	if (dev->fini_ctx.cb_fn != NULL) {
1342  		return -EBUSY;
1343  	}
1344  
1345  	dev->fini_ctx.cb_fn = cb_fn;
1346  	dev->fini_ctx.cb_arg = cb_arg;
1347  	dev->fini_ctx.thread = thread;
1348  
1349  	ftl_rwb_disable_interleaving(dev->rwb);
1350  
1351  	spdk_thread_send_msg(ftl_get_core_thread(dev), ftl_add_halt_poller, dev);
1352  	return 0;
1353  }
1354  
1355  int
1356  spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg)
1357  {
1358  	return _spdk_ftl_dev_free(dev, cb_fn, cb_arg, spdk_get_thread());
1359  }
1360  
1361  int
1362  spdk_ftl_module_init(const struct ftl_module_init_opts *opts, spdk_ftl_fn cb, void *cb_arg)
1363  {
1364  	return ftl_anm_init(opts->anm_thread, cb, cb_arg);
1365  }
1366  
1367  int
1368  spdk_ftl_module_fini(spdk_ftl_fn cb, void *cb_arg)
1369  {
1370  	return ftl_anm_free(cb, cb_arg);
1371  }
1372  
1373  SPDK_LOG_REGISTER_COMPONENT("ftl_init", SPDK_LOG_FTL_INIT)
1374