xref: /spdk/lib/ftl/ftl_init.c (revision b78e763c1af2ace4c19d2932065a43357e3f5d3e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <spdk/stdinc.h>
35 #include <spdk/nvme.h>
36 #include <spdk/io_channel.h>
37 #include <spdk/bdev_module.h>
38 #include <spdk_internal/log.h>
39 #include <spdk/ftl.h>
40 #include "ftl_core.h"
41 #include "ftl_anm.h"
42 #include "ftl_io.h"
43 #include "ftl_reloc.h"
44 #include "ftl_rwb.h"
45 #include "ftl_band.h"
46 #include "ftl_debug.h"
47 
48 #define FTL_CORE_RING_SIZE	4096
49 #define FTL_INIT_TIMEOUT	30
50 #define FTL_NSID		1
51 
52 #define ftl_range_intersect(s1, e1, s2, e2) \
53 	((s1) <= (e2) && (s2) <= (e1))
54 
55 struct ftl_admin_cmpl {
56 	struct spdk_nvme_cpl			status;
57 
58 	int					complete;
59 };
60 
61 static STAILQ_HEAD(, spdk_ftl_dev)	g_ftl_queue = STAILQ_HEAD_INITIALIZER(g_ftl_queue);
62 static pthread_mutex_t		g_ftl_queue_lock = PTHREAD_MUTEX_INITIALIZER;
63 static const struct spdk_ftl_conf	g_default_conf = {
64 	.defrag = {
65 		.limits = {
66 			/* 5 free bands  / 0 % host writes */
67 			[SPDK_FTL_LIMIT_CRIT]  = { .thld = 5,  .limit = 0 },
68 			/* 10 free bands / 5 % host writes */
69 			[SPDK_FTL_LIMIT_HIGH]  = { .thld = 10, .limit = 5 },
70 			/* 20 free bands / 40 % host writes */
71 			[SPDK_FTL_LIMIT_LOW]   = { .thld = 20, .limit = 40 },
72 			/* 40 free bands / 100 % host writes - defrag starts running */
73 			[SPDK_FTL_LIMIT_START] = { .thld = 40, .limit = 100 },
74 		},
75 		/* 10 percent valid lbks */
76 		.invalid_thld = 10,
77 	},
78 	/* 20% spare lbks */
79 	.lba_rsvd = 20,
80 	/* 6M write buffer */
81 	.rwb_size = 6 * 1024 * 1024,
82 	/* 90% band fill threshold */
83 	.band_thld = 90,
84 	/* Max 32 IO depth per band relocate */
85 	.max_reloc_qdepth = 32,
86 	/* Max 3 active band relocates */
87 	.max_active_relocs = 3,
88 	/* IO pool size per user thread (this should be adjusted to thread IO qdepth) */
89 	.user_io_pool_size = 2048,
90 };
91 
92 static void ftl_dev_free_sync(struct spdk_ftl_dev *dev);
93 
94 static void
95 ftl_admin_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
96 {
97 	struct ftl_admin_cmpl *cmpl = ctx;
98 
99 	cmpl->complete = 1;
100 	cmpl->status = *cpl;
101 }
102 
103 static int
104 ftl_band_init_md(struct ftl_band *band)
105 {
106 	struct ftl_md *md = &band->md;
107 
108 	md->vld_map = spdk_bit_array_create(ftl_num_band_lbks(band->dev));
109 	if (!md->vld_map) {
110 		return -ENOMEM;
111 	}
112 
113 	pthread_spin_init(&md->lock, PTHREAD_PROCESS_PRIVATE);
114 	ftl_band_md_clear(&band->md);
115 	return 0;
116 }
117 
118 static int
119 ftl_check_init_opts(const struct spdk_ftl_dev_init_opts *opts,
120 		    const struct spdk_ocssd_geometry_data *geo)
121 {
122 	struct spdk_ftl_dev *dev;
123 	size_t num_punits = geo->num_pu * geo->num_grp;
124 	int rc = 0;
125 
126 	if (opts->range.begin > opts->range.end || opts->range.end >= num_punits) {
127 		return -1;
128 	}
129 
130 	pthread_mutex_lock(&g_ftl_queue_lock);
131 
132 	STAILQ_FOREACH(dev, &g_ftl_queue, stailq) {
133 		if (spdk_nvme_transport_id_compare(&dev->trid, &opts->trid)) {
134 			continue;
135 		}
136 
137 		if (ftl_range_intersect(opts->range.begin, opts->range.end,
138 					dev->range.begin, dev->range.end)) {
139 			rc = -1;
140 			goto out;
141 		}
142 	}
143 
144 out:
145 	pthread_mutex_unlock(&g_ftl_queue_lock);
146 	return rc;
147 }
148 
149 static int
150 ftl_retrieve_bbt_page(struct spdk_ftl_dev *dev, uint64_t offset,
151 		      struct spdk_ocssd_chunk_information_entry *info,
152 		      unsigned int num_entries)
153 {
154 	volatile struct ftl_admin_cmpl cmpl = {};
155 	uint32_t nsid = spdk_nvme_ns_get_id(dev->ns);
156 
157 	if (spdk_nvme_ctrlr_cmd_get_log_page(dev->ctrlr, SPDK_OCSSD_LOG_CHUNK_INFO, nsid,
158 					     info, num_entries * sizeof(*info),
159 					     offset * sizeof(*info),
160 					     ftl_admin_cb, (void *)&cmpl)) {
161 		return -1;
162 	}
163 
164 	while (!cmpl.complete) {
165 		spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr);
166 	}
167 
168 	if (spdk_nvme_cpl_is_error(&cmpl.status)) {
169 		SPDK_ERRLOG("Unexpected status code: [%d], status code type: [%d]\n",
170 			    cmpl.status.status.sc, cmpl.status.status.sct);
171 		return -1;
172 	}
173 
174 	return 0;
175 }
176 
177 static int
178 ftl_retrieve_bbt(struct spdk_ftl_dev *dev, const struct ftl_punit *punit,
179 		 struct spdk_ocssd_chunk_information_entry *info)
180 {
181 	uint32_t i = 0;
182 	unsigned int num_entries = PAGE_SIZE / sizeof(*info);
183 	uint64_t off = (punit->start_ppa.grp * dev->geo.num_pu + punit->start_ppa.pu) *
184 		       dev->geo.num_chk;
185 
186 	for (i = 0; i < dev->geo.num_chk; i += num_entries) {
187 		if (num_entries > dev->geo.num_chk - i) {
188 			num_entries = dev->geo.num_chk - i;
189 		}
190 
191 		if (ftl_retrieve_bbt_page(dev, off + i, &info[i], num_entries)) {
192 			return -1;
193 		}
194 	}
195 
196 	return 0;
197 }
198 
199 static unsigned char
200 ftl_get_chunk_state(const struct spdk_ocssd_chunk_information_entry *info)
201 {
202 	if (info->cs.free) {
203 		return FTL_CHUNK_STATE_FREE;
204 	}
205 
206 	if (info->cs.open) {
207 		return FTL_CHUNK_STATE_OPEN;
208 	}
209 
210 	if (info->cs.closed) {
211 		return FTL_CHUNK_STATE_CLOSED;
212 	}
213 
214 	if (info->cs.offline) {
215 		return FTL_CHUNK_STATE_BAD;
216 	}
217 
218 	assert(0 && "Invalid block state");
219 	return FTL_CHUNK_STATE_BAD;
220 }
221 
222 static void
223 ftl_remove_empty_bands(struct spdk_ftl_dev *dev)
224 {
225 	struct ftl_band *band, *temp_band;
226 
227 	/* Remove band from shut_bands list to prevent further processing */
228 	/* if all blocks on this band are bad */
229 	LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) {
230 		if (!band->num_chunks) {
231 			dev->num_bands--;
232 			LIST_REMOVE(band, list_entry);
233 		}
234 	}
235 }
236 
237 static int
238 ftl_dev_init_bands(struct spdk_ftl_dev *dev)
239 {
240 	struct spdk_ocssd_chunk_information_entry	*info;
241 	struct ftl_band					*band, *pband;
242 	struct ftl_punit				*punit;
243 	struct ftl_chunk				*chunk;
244 	unsigned int					i, j;
245 	char						buf[128];
246 	int						rc = 0;
247 
248 	LIST_INIT(&dev->free_bands);
249 	LIST_INIT(&dev->shut_bands);
250 
251 	dev->num_free = 0;
252 	dev->num_bands = ftl_dev_num_bands(dev);
253 	dev->bands = calloc(ftl_dev_num_bands(dev), sizeof(*dev->bands));
254 	if (!dev->bands) {
255 		return -1;
256 	}
257 
258 	info = calloc(dev->geo.num_chk, sizeof(*info));
259 	if (!info) {
260 		return -1;
261 	}
262 
263 	for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
264 		band = &dev->bands[i];
265 		band->id = i;
266 		band->dev = dev;
267 		band->state = FTL_BAND_STATE_CLOSED;
268 
269 		if (LIST_EMPTY(&dev->shut_bands)) {
270 			LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
271 		} else {
272 			LIST_INSERT_AFTER(pband, band, list_entry);
273 		}
274 		pband = band;
275 
276 		CIRCLEQ_INIT(&band->chunks);
277 		band->chunk_buf = calloc(ftl_dev_num_punits(dev), sizeof(*band->chunk_buf));
278 		if (!band->chunk_buf) {
279 			SPDK_ERRLOG("Failed to allocate block state table for band: [%u]\n", i);
280 			rc = -1;
281 			goto out;
282 		}
283 
284 		rc = ftl_band_init_md(band);
285 		if (rc) {
286 			SPDK_ERRLOG("Failed to initialize metadata structures for band [%u]\n", i);
287 			goto out;
288 		}
289 	}
290 
291 	for (i = 0; i < ftl_dev_num_punits(dev); ++i) {
292 		punit = &dev->punits[i];
293 
294 		rc = ftl_retrieve_bbt(dev, punit, info);
295 		if (rc) {
296 			SPDK_ERRLOG("Failed to retrieve bbt for @ppa: %s [%lu]\n",
297 				    ftl_ppa2str(punit->start_ppa, buf, sizeof(buf)),
298 				    ftl_ppa_addr_pack(dev, punit->start_ppa));
299 			goto out;
300 		}
301 
302 		for (j = 0; j < ftl_dev_num_bands(dev); ++j) {
303 			band = &dev->bands[j];
304 			chunk = &band->chunk_buf[i];
305 			chunk->pos = i;
306 			chunk->state = ftl_get_chunk_state(&info[j]);
307 			chunk->punit = punit;
308 			chunk->start_ppa = punit->start_ppa;
309 			chunk->start_ppa.chk = band->id;
310 
311 			if (chunk->state != FTL_CHUNK_STATE_BAD) {
312 				band->num_chunks++;
313 				CIRCLEQ_INSERT_TAIL(&band->chunks, chunk, circleq);
314 			}
315 		}
316 	}
317 
318 	ftl_remove_empty_bands(dev);
319 out:
320 	free(info);
321 	return rc;
322 }
323 
324 static int
325 ftl_dev_init_punits(struct spdk_ftl_dev *dev)
326 {
327 	unsigned int i, punit;
328 
329 	dev->punits = calloc(ftl_dev_num_punits(dev), sizeof(*dev->punits));
330 	if (!dev->punits) {
331 		return -1;
332 	}
333 
334 	for (i = 0; i < ftl_dev_num_punits(dev); ++i) {
335 		dev->punits[i].dev = dev;
336 		punit = dev->range.begin + i;
337 
338 		dev->punits[i].start_ppa.ppa = 0;
339 		dev->punits[i].start_ppa.grp = punit % dev->geo.num_grp;
340 		dev->punits[i].start_ppa.pu = punit / dev->geo.num_grp;
341 	}
342 
343 	return 0;
344 }
345 
346 static int
347 ftl_dev_retrieve_geo(struct spdk_ftl_dev *dev)
348 {
349 	volatile struct ftl_admin_cmpl cmpl = {};
350 	struct spdk_ocssd_geometry_data *buf;
351 	uint32_t nsid = spdk_nvme_ns_get_id(dev->ns);
352 	int rc = -1;
353 
354 	buf = malloc(PAGE_SIZE);
355 	if (!buf) {
356 		SPDK_ERRLOG("Memory allocation failure\n");
357 		return -1;
358 	}
359 
360 	if (spdk_nvme_ocssd_ctrlr_cmd_geometry(dev->ctrlr, nsid, buf, PAGE_SIZE,
361 					       ftl_admin_cb, (void *)&cmpl)) {
362 		SPDK_ERRLOG("Unable to retrieve geometry\n");
363 		goto out;
364 	}
365 
366 	/* TODO: add a timeout */
367 	while (!cmpl.complete) {
368 		spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr);
369 	}
370 
371 	if (spdk_nvme_cpl_is_error(&cmpl.status)) {
372 		SPDK_ERRLOG("Unexpected status code: [%d], status code type: [%d]\n",
373 			    cmpl.status.status.sc, cmpl.status.status.sct);
374 		goto out;
375 	}
376 
377 	dev->geo = *buf;
378 
379 	/* TODO: add sanity checks for the geo */
380 	dev->ppa_len = dev->geo.lbaf.grp_len +
381 		       dev->geo.lbaf.pu_len +
382 		       dev->geo.lbaf.chk_len +
383 		       dev->geo.lbaf.lbk_len;
384 
385 	dev->ppaf.lbk_offset = 0;
386 	dev->ppaf.lbk_mask   = (1 << dev->geo.lbaf.lbk_len) - 1;
387 	dev->ppaf.chk_offset = dev->ppaf.lbk_offset + dev->geo.lbaf.lbk_len;
388 	dev->ppaf.chk_mask   = (1 << dev->geo.lbaf.chk_len) - 1;
389 	dev->ppaf.pu_offset  = dev->ppaf.chk_offset + dev->geo.lbaf.chk_len;
390 	dev->ppaf.pu_mask    = (1 << dev->geo.lbaf.pu_len) - 1;
391 	dev->ppaf.grp_offset = dev->ppaf.pu_offset + dev->geo.lbaf.pu_len;
392 	dev->ppaf.grp_mask   = (1 << dev->geo.lbaf.grp_len) - 1;
393 
394 	/* We're using optimal write size as our xfer size */
395 	dev->xfer_size = dev->geo.ws_opt;
396 
397 	rc = 0;
398 out:
399 	free(buf);
400 	return rc;
401 }
402 
403 static int
404 ftl_dev_nvme_init(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts)
405 {
406 	dev->ctrlr = opts->ctrlr;
407 
408 	assert(dev->ctrlr != NULL);
409 
410 	if (spdk_nvme_ctrlr_get_num_ns(dev->ctrlr) != 1) {
411 		SPDK_ERRLOG("Unsupported number of namespaces\n");
412 		return -1;
413 	}
414 
415 	dev->ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, FTL_NSID);
416 	dev->trid = opts->trid;
417 	dev->md_size = spdk_nvme_ns_get_md_size(dev->ns);
418 	if (dev->md_size % sizeof(uint32_t) != 0) {
419 		/* Metadata pointer must be dword aligned */
420 		SPDK_ERRLOG("Unsupported metadata size (%zu)\n", dev->md_size);
421 		return -1;
422 	}
423 
424 	return 0;
425 }
426 
427 static int
428 ftl_conf_validate(const struct spdk_ftl_conf *conf)
429 {
430 	size_t i;
431 
432 	if (conf->defrag.invalid_thld >= 100) {
433 		return -1;
434 	}
435 	if (conf->lba_rsvd >= 100) {
436 		return -1;
437 	}
438 	if (conf->lba_rsvd == 0) {
439 		return -1;
440 	}
441 	if (conf->rwb_size == 0) {
442 		return -1;
443 	}
444 	if (conf->rwb_size % FTL_BLOCK_SIZE != 0) {
445 		return -1;
446 	}
447 
448 	for (i = 0; i < SPDK_FTL_LIMIT_MAX; ++i) {
449 		if (conf->defrag.limits[i].limit > 100) {
450 			return -1;
451 		}
452 	}
453 
454 	return 0;
455 }
456 
457 void
458 spdk_ftl_conf_init_defaults(struct spdk_ftl_conf *conf)
459 {
460 	*conf = g_default_conf;
461 }
462 
463 static int
464 ftl_init_wptr_list(struct spdk_ftl_dev *dev)
465 {
466 #define POOL_NAME_LEN 128
467 	char pool_name[POOL_NAME_LEN];
468 	int rc;
469 
470 	LIST_INIT(&dev->wptr_list);
471 	LIST_INIT(&dev->flush_list);
472 
473 	rc = snprintf(pool_name, sizeof(pool_name), "%s-%s", dev->name, "ocssd-lba-pool");
474 	if (rc < 0 || rc >= POOL_NAME_LEN) {
475 		return -ENAMETOOLONG;
476 	}
477 
478 	/* We need to reserve at least 2 buffers for band close / open sequence
479 	 * alone, plus additional (8) buffers for handling write errors.
480 	 * TODO: This memory pool is utilized only by core thread - it introduce
481 	 * unnecessary overhead and should be replaced by different data structure.
482 	 */
483 	dev->lba_pool = spdk_mempool_create(pool_name, 2 + 8,
484 					    ftl_num_band_lbks(dev) * sizeof(uint64_t),
485 					    SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
486 					    SPDK_ENV_SOCKET_ID_ANY);
487 	if (!dev->lba_pool) {
488 		return -ENOMEM;
489 	}
490 
491 	return 0;
492 }
493 
494 static size_t
495 ftl_dev_band_max_seq(struct spdk_ftl_dev *dev)
496 {
497 	struct ftl_band *band;
498 	size_t seq = 0;
499 
500 	LIST_FOREACH(band, &dev->shut_bands, list_entry) {
501 		if (band->md.seq > seq) {
502 			seq = band->md.seq;
503 		}
504 	}
505 
506 	return seq;
507 }
508 
509 static void
510 _ftl_init_bands_state(void *ctx)
511 {
512 	struct ftl_band *band, *temp_band;
513 	struct spdk_ftl_dev *dev = ctx;
514 
515 	dev->seq = ftl_dev_band_max_seq(dev);
516 
517 	LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) {
518 		if (!band->md.num_vld) {
519 			ftl_band_set_state(band, FTL_BAND_STATE_FREE);
520 		}
521 	}
522 
523 	ftl_reloc_resume(dev->reloc);
524 	/* Clear the limit applications as they're incremented incorrectly by */
525 	/* the initialization code */
526 	memset(dev->stats.limits, 0, sizeof(dev->stats.limits));
527 }
528 
529 static int
530 ftl_init_num_free_bands(struct spdk_ftl_dev *dev)
531 {
532 	struct ftl_band *band;
533 	int cnt = 0;
534 
535 	LIST_FOREACH(band, &dev->shut_bands, list_entry) {
536 		if (band->num_chunks && !band->md.num_vld) {
537 			cnt++;
538 		}
539 	}
540 	return cnt;
541 }
542 
543 static int
544 ftl_init_bands_state(struct spdk_ftl_dev *dev)
545 {
546 	/* TODO: Should we abort initialization or expose read only device */
547 	/* if there is no free bands? */
548 	/* If we abort initialization should we depend on condition that */
549 	/* we have no free bands or should we have some minimal number of */
550 	/* free bands? */
551 	if (!ftl_init_num_free_bands(dev)) {
552 		return -1;
553 	}
554 
555 	spdk_thread_send_msg(ftl_get_core_thread(dev), _ftl_init_bands_state, dev);
556 	return 0;
557 }
558 
559 static void
560 _ftl_dev_init_thread(void *ctx)
561 {
562 	struct ftl_thread *thread = ctx;
563 	struct spdk_ftl_dev *dev = thread->dev;
564 
565 	thread->thread = spdk_get_thread();
566 
567 	thread->poller = spdk_poller_register(thread->poller_fn, thread, thread->period_us);
568 	if (!thread->poller) {
569 		SPDK_ERRLOG("Unable to register poller\n");
570 		assert(0);
571 	}
572 
573 	if (spdk_get_thread() == ftl_get_core_thread(dev)) {
574 		ftl_anm_register_device(dev, ftl_process_anm_event);
575 	}
576 }
577 
578 static int
579 ftl_dev_init_thread(struct spdk_ftl_dev *dev, struct ftl_thread *thread,
580 		    struct spdk_thread *spdk_thread, spdk_poller_fn fn, uint64_t period_us)
581 {
582 	thread->dev = dev;
583 	thread->poller_fn = fn;
584 	thread->thread = spdk_thread;
585 	thread->period_us = period_us;
586 
587 	thread->qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, NULL, 0);
588 	if (!thread->qpair) {
589 		SPDK_ERRLOG("Unable to initialize qpair\n");
590 		return -1;
591 	}
592 
593 	spdk_thread_send_msg(spdk_thread, _ftl_dev_init_thread, thread);
594 	return 0;
595 }
596 
597 static int
598 ftl_dev_init_threads(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts)
599 {
600 	if (!opts->core_thread || !opts->read_thread) {
601 		return -1;
602 	}
603 
604 	if (ftl_dev_init_thread(dev, &dev->core_thread, opts->core_thread, ftl_task_core, 0)) {
605 		SPDK_ERRLOG("Unable to initialize core thread\n");
606 		return -1;
607 	}
608 
609 	if (ftl_dev_init_thread(dev, &dev->read_thread, opts->read_thread, ftl_task_read, 0)) {
610 		SPDK_ERRLOG("Unable to initialize read thread\n");
611 		return -1;
612 	}
613 
614 	return 0;
615 }
616 
617 static void
618 ftl_dev_free_thread(struct spdk_ftl_dev *dev, struct ftl_thread *thread)
619 {
620 	assert(thread->poller == NULL);
621 
622 	spdk_nvme_ctrlr_free_io_qpair(thread->qpair);
623 	thread->thread = NULL;
624 	thread->qpair = NULL;
625 }
626 
627 static int
628 ftl_dev_l2p_alloc(struct spdk_ftl_dev *dev)
629 {
630 	size_t addr_size;
631 	uint64_t i;
632 
633 	if (dev->num_lbas == 0) {
634 		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Invalid l2p table size\n");
635 		return -1;
636 	}
637 
638 	if (dev->l2p) {
639 		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "L2p table already allocated\n");
640 		return -1;
641 	}
642 
643 	addr_size = dev->ppa_len >= 32 ? 8 : 4;
644 	dev->l2p = malloc(dev->num_lbas * addr_size);
645 	if (!dev->l2p) {
646 		SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Failed to allocate l2p table\n");
647 		return -1;
648 	}
649 
650 	for (i = 0; i < dev->num_lbas; ++i) {
651 		ftl_l2p_set(dev, i, ftl_to_ppa(FTL_PPA_INVALID));
652 	}
653 
654 	return 0;
655 }
656 
657 static void
658 ftl_init_complete(struct spdk_ftl_dev *dev)
659 {
660 	pthread_mutex_lock(&g_ftl_queue_lock);
661 	STAILQ_INSERT_HEAD(&g_ftl_queue, dev, stailq);
662 	pthread_mutex_unlock(&g_ftl_queue_lock);
663 
664 	dev->initialized = 1;
665 
666 	if (dev->init_cb) {
667 		dev->init_cb(dev, dev->init_arg, 0);
668 	}
669 
670 	dev->init_cb = NULL;
671 	dev->init_arg = NULL;
672 }
673 
674 static int
675 ftl_setup_initial_state(struct spdk_ftl_dev *dev)
676 {
677 	struct spdk_ftl_conf *conf = &dev->conf;
678 	size_t i;
679 
680 	spdk_uuid_generate(&dev->uuid);
681 
682 	dev->num_lbas = 0;
683 	for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
684 		dev->num_lbas += ftl_band_num_usable_lbks(&dev->bands[i]);
685 	}
686 
687 	dev->num_lbas = (dev->num_lbas * (100 - conf->lba_rsvd)) / 100;
688 
689 	if (ftl_dev_l2p_alloc(dev)) {
690 		SPDK_ERRLOG("Unable to init l2p table\n");
691 		return -1;
692 	}
693 
694 	if (ftl_init_bands_state(dev)) {
695 		SPDK_ERRLOG("Unable to finish the initialization\n");
696 		return -1;
697 	}
698 
699 	ftl_init_complete(dev);
700 	return 0;
701 }
702 
703 struct ftl_init_fail_ctx {
704 	spdk_ftl_init_fn	cb;
705 	void			*arg;
706 };
707 
708 static void
709 ftl_init_fail_cb(void *ctx, int status)
710 {
711 	struct ftl_init_fail_ctx *fail_cb = ctx;
712 
713 	fail_cb->cb(NULL, fail_cb->arg, -ENODEV);
714 	free(fail_cb);
715 }
716 
717 static void
718 ftl_init_fail(struct spdk_ftl_dev *dev)
719 {
720 	struct ftl_init_fail_ctx *fail_cb;
721 
722 	fail_cb = malloc(sizeof(*fail_cb));
723 	if (!fail_cb) {
724 		SPDK_ERRLOG("Unable to allocate context to free the device\n");
725 		return;
726 	}
727 
728 	fail_cb->cb = dev->init_cb;
729 	fail_cb->arg = dev->init_arg;
730 	dev->halt_cb = NULL;
731 
732 	if (spdk_ftl_dev_free(dev, ftl_init_fail_cb, fail_cb)) {
733 		SPDK_ERRLOG("Unable to free the device\n");
734 		assert(0);
735 	}
736 }
737 
738 static void
739 ftl_restore_device_cb(struct spdk_ftl_dev *dev, struct ftl_restore *restore, int status)
740 {
741 	if (status) {
742 		SPDK_ERRLOG("Failed to restore the device from the SSD\n");
743 		goto error;
744 	}
745 
746 	if (ftl_init_bands_state(dev)) {
747 		SPDK_ERRLOG("Unable to finish the initialization\n");
748 		goto error;
749 	}
750 
751 	ftl_init_complete(dev);
752 	return;
753 error:
754 	ftl_init_fail(dev);
755 }
756 
757 static void
758 ftl_restore_md_cb(struct spdk_ftl_dev *dev, struct ftl_restore *restore, int status)
759 {
760 	if (status) {
761 		SPDK_ERRLOG("Failed to restore the metadata from the SSD\n");
762 		goto error;
763 	}
764 
765 	/* After the metadata is read it should be possible to allocate the L2P */
766 	if (ftl_dev_l2p_alloc(dev)) {
767 		SPDK_ERRLOG("Failed to allocate the L2P\n");
768 		goto error;
769 	}
770 
771 	if (ftl_restore_device(restore, ftl_restore_device_cb)) {
772 		SPDK_ERRLOG("Failed to start device restoration from the SSD\n");
773 		goto error;
774 	}
775 
776 	return;
777 error:
778 	ftl_init_fail(dev);
779 }
780 
781 static int
782 ftl_restore_state(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts)
783 {
784 	dev->uuid = opts->uuid;
785 
786 	if (ftl_restore_md(dev, ftl_restore_md_cb)) {
787 		SPDK_ERRLOG("Failed to start metadata restoration from the SSD\n");
788 		return -1;
789 	}
790 
791 	return 0;
792 }
793 
794 static int
795 ftl_io_channel_create_cb(void *io_device, void *ctx)
796 {
797 	struct ftl_io_channel *ch = ctx;
798 	char mempool_name[32];
799 	struct spdk_ftl_dev *dev = io_device;
800 
801 	snprintf(mempool_name, sizeof(mempool_name), "ftl_io_%p", ch);
802 	ch->elem_size = sizeof(struct ftl_md_io);
803 	ch->io_pool = spdk_mempool_create(mempool_name,
804 					  dev->conf.user_io_pool_size,
805 					  ch->elem_size,
806 					  0,
807 					  SPDK_ENV_SOCKET_ID_ANY);
808 
809 	if (!ch->io_pool) {
810 		return -1;
811 	}
812 
813 	return 0;
814 }
815 
816 static void
817 ftl_io_channel_destroy_cb(void *io_device, void *ctx)
818 {
819 	struct ftl_io_channel *ch = ctx;
820 
821 	spdk_mempool_free(ch->io_pool);
822 }
823 
824 int
825 spdk_ftl_dev_init(const struct spdk_ftl_dev_init_opts *opts, spdk_ftl_init_fn cb, void *cb_arg)
826 {
827 	struct spdk_ftl_dev *dev;
828 
829 	if (!opts || !opts->ctrlr) {
830 		return -EINVAL;
831 	}
832 
833 	dev = calloc(1, sizeof(*dev));
834 	if (!dev) {
835 		return -ENOMEM;
836 	}
837 
838 	if (opts->conf) {
839 		if (ftl_conf_validate(opts->conf)) {
840 			SPDK_ERRLOG("Invalid configuration\n");
841 			goto fail_sync;
842 		}
843 
844 		memcpy(&dev->conf, opts->conf, sizeof(dev->conf));
845 	} else {
846 		spdk_ftl_conf_init_defaults(&dev->conf);
847 	}
848 
849 	spdk_io_device_register(dev, ftl_io_channel_create_cb, ftl_io_channel_destroy_cb,
850 				sizeof(struct ftl_io_channel),
851 				NULL);
852 
853 	dev->ioch = spdk_get_io_channel(dev);
854 	dev->init_cb = cb;
855 	dev->init_arg = cb_arg;
856 	dev->range = opts->range;
857 	dev->limit = SPDK_FTL_LIMIT_MAX;
858 	dev->name = strdup(opts->name);
859 	if (!dev->name) {
860 		SPDK_ERRLOG("Unable to set device name\n");
861 		goto fail_sync;
862 	}
863 
864 	if (ftl_dev_nvme_init(dev, opts)) {
865 		SPDK_ERRLOG("Unable to initialize NVMe structures\n");
866 		goto fail_sync;
867 	}
868 
869 	/* In case of errors, we free all of the memory in ftl_dev_free_sync(), */
870 	/* so we don't have to clean up in each of the init functions. */
871 	if (ftl_dev_retrieve_geo(dev)) {
872 		SPDK_ERRLOG("Unable to retrieve geometry\n");
873 		goto fail_sync;
874 	}
875 
876 	if (ftl_check_init_opts(opts, &dev->geo)) {
877 		SPDK_ERRLOG("Invalid device configuration\n");
878 		goto fail_sync;
879 	}
880 
881 	if (ftl_dev_init_punits(dev)) {
882 		SPDK_ERRLOG("Unable to initialize LUNs\n");
883 		goto fail_sync;
884 	}
885 
886 	if (ftl_init_wptr_list(dev)) {
887 		SPDK_ERRLOG("Unable to init wptr\n");
888 		goto fail_sync;
889 	}
890 
891 	if (ftl_dev_init_bands(dev)) {
892 		SPDK_ERRLOG("Unable to initialize band array\n");
893 		goto fail_sync;
894 	}
895 
896 	dev->rwb = ftl_rwb_init(&dev->conf, dev->geo.ws_opt, dev->md_size);
897 	if (!dev->rwb) {
898 		SPDK_ERRLOG("Unable to initialize rwb structures\n");
899 		goto fail_sync;
900 	}
901 
902 	dev->reloc = ftl_reloc_init(dev);
903 	if (!dev->reloc) {
904 		SPDK_ERRLOG("Unable to initialize reloc structures\n");
905 		goto fail_sync;
906 	}
907 
908 	if (ftl_dev_init_threads(dev, opts)) {
909 		SPDK_ERRLOG("Unable to initialize device threads\n");
910 		goto fail_sync;
911 	}
912 
913 	if (opts->mode & SPDK_FTL_MODE_CREATE) {
914 		if (ftl_setup_initial_state(dev)) {
915 			SPDK_ERRLOG("Failed to setup initial state of the device\n");
916 			goto fail_async;
917 		}
918 
919 	} else {
920 		if (ftl_restore_state(dev, opts)) {
921 			SPDK_ERRLOG("Unable to restore device's state from the SSD\n");
922 			goto fail_async;
923 		}
924 	}
925 
926 	return 0;
927 fail_sync:
928 	ftl_dev_free_sync(dev);
929 	return -ENOMEM;
930 fail_async:
931 	ftl_init_fail(dev);
932 	return 0;
933 }
934 
935 static void
936 _ftl_halt_defrag(void *arg)
937 {
938 	ftl_reloc_halt(((struct spdk_ftl_dev *)arg)->reloc);
939 }
940 
941 static void
942 ftl_dev_free_sync(struct spdk_ftl_dev *dev)
943 {
944 	struct spdk_ftl_dev *iter;
945 	size_t i;
946 
947 	if (!dev) {
948 		return;
949 	}
950 
951 	pthread_mutex_lock(&g_ftl_queue_lock);
952 	STAILQ_FOREACH(iter, &g_ftl_queue, stailq) {
953 		if (iter == dev) {
954 			STAILQ_REMOVE(&g_ftl_queue, dev, spdk_ftl_dev, stailq);
955 			break;
956 		}
957 	}
958 	pthread_mutex_unlock(&g_ftl_queue_lock);
959 
960 	ftl_dev_free_thread(dev, &dev->read_thread);
961 	ftl_dev_free_thread(dev, &dev->core_thread);
962 
963 	assert(LIST_EMPTY(&dev->wptr_list));
964 
965 	ftl_dev_dump_bands(dev);
966 	ftl_dev_dump_stats(dev);
967 
968 	spdk_put_io_channel(dev->ioch);
969 	spdk_io_device_unregister(dev, NULL);
970 
971 	if (dev->bands) {
972 		for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
973 			free(dev->bands[i].chunk_buf);
974 			spdk_bit_array_free(&dev->bands[i].md.vld_map);
975 		}
976 	}
977 
978 	spdk_mempool_free(dev->lba_pool);
979 
980 	ftl_rwb_free(dev->rwb);
981 	ftl_reloc_free(dev->reloc);
982 
983 	free(dev->name);
984 	free(dev->punits);
985 	free(dev->bands);
986 	free(dev->l2p);
987 	free(dev);
988 }
989 
990 static int
991 ftl_halt_poller(void *ctx)
992 {
993 	struct spdk_ftl_dev *dev = ctx;
994 	spdk_ftl_fn halt_cb = dev->halt_cb;
995 	void *halt_arg = dev->halt_arg;
996 
997 	if (!dev->core_thread.poller && !dev->read_thread.poller) {
998 		spdk_poller_unregister(&dev->halt_poller);
999 
1000 		ftl_anm_unregister_device(dev);
1001 		ftl_dev_free_sync(dev);
1002 
1003 		if (halt_cb) {
1004 			halt_cb(halt_arg, 0);
1005 		}
1006 	}
1007 
1008 	return 0;
1009 }
1010 
1011 static void
1012 ftl_add_halt_poller(void *ctx)
1013 {
1014 	struct spdk_ftl_dev *dev = ctx;
1015 
1016 	_ftl_halt_defrag(dev);
1017 
1018 	assert(!dev->halt_poller);
1019 	dev->halt_poller = spdk_poller_register(ftl_halt_poller, dev, 100);
1020 }
1021 
1022 int
1023 spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_fn cb, void *cb_arg)
1024 {
1025 	if (!dev || !cb) {
1026 		return -EINVAL;
1027 	}
1028 
1029 	if (dev->halt_cb) {
1030 		return -EBUSY;
1031 	}
1032 
1033 	dev->halt_cb = cb;
1034 	dev->halt_arg = cb_arg;
1035 	dev->halt = 1;
1036 
1037 	spdk_thread_send_msg(ftl_get_core_thread(dev), ftl_add_halt_poller, dev);
1038 	return 0;
1039 }
1040 
1041 int
1042 spdk_ftl_module_init(const struct ftl_module_init_opts *opts, spdk_ftl_fn cb, void *cb_arg)
1043 {
1044 	return ftl_anm_init(opts->anm_thread, cb, cb_arg);
1045 }
1046 
1047 int
1048 spdk_ftl_module_fini(spdk_ftl_fn cb, void *cb_arg)
1049 {
1050 	return ftl_anm_free(cb, cb_arg);
1051 }
1052 
1053 SPDK_LOG_REGISTER_COMPONENT("ftl_init", SPDK_LOG_FTL_INIT)
1054