xref: /spdk/lib/ftl/mngt/ftl_mngt_recovery.c (revision ea8f5b27612fa03698a9ce3ad4bd37765d9cdfa5)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/bdev_module.h"
7 
8 #include "ftl_nv_cache.h"
9 #include "ftl_core.h"
10 #include "ftl_utils.h"
11 #include "ftl_band.h"
12 #include "ftl_internal.h"
13 #include "ftl_l2p_cache.h"
14 #include "ftl_mngt.h"
15 #include "ftl_mngt_steps.h"
16 #include "utils/ftl_addr_utils.h"
17 
18 struct ftl_mngt_recovery_ctx {
19 	/* Main recovery FTL management process */
20 	struct ftl_mngt_process *main;
21 	int status;
22 	TAILQ_HEAD(, ftl_band) open_bands;
23 	uint64_t open_bands_num;
24 	struct {
25 		struct ftl_layout_region region;
26 		struct ftl_md *md;
27 		uint64_t *l2p;
28 		uint64_t *seq_id;
29 		uint64_t count;
30 	} l2p_snippet;
31 	struct {
32 		uint64_t block_limit;
33 		uint64_t lba_first;
34 		uint64_t lba_last;
35 		uint32_t i;
36 	} iter;
37 	uint64_t p2l_ckpt_seq_id[FTL_LAYOUT_REGION_TYPE_P2L_COUNT];
38 };
39 
40 static const struct ftl_mngt_process_desc g_desc_recovery_iteration;
41 static const struct ftl_mngt_process_desc g_desc_recovery;
42 static const struct ftl_mngt_process_desc g_desc_recovery_shm;
43 
44 static bool
45 recovery_iter_done(struct spdk_ftl_dev *dev, struct ftl_mngt_recovery_ctx *ctx)
46 {
47 	return 0 == ctx->l2p_snippet.region.current.blocks;
48 }
49 
50 static void
51 recovery_iter_advance(struct spdk_ftl_dev *dev, struct ftl_mngt_recovery_ctx *ctx)
52 {
53 	struct ftl_layout_region *region, *snippet;
54 	uint64_t first_block, last_blocks;
55 
56 	ctx->iter.i++;
57 	region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_L2P];
58 	snippet = &ctx->l2p_snippet.region;
59 
60 	/* Advance processed blocks */
61 	snippet->current.offset += snippet->current.blocks;
62 	snippet->current.blocks = region->current.offset + region->current.blocks - snippet->current.offset;
63 	snippet->current.blocks = spdk_min(snippet->current.blocks, ctx->iter.block_limit);
64 
65 	first_block = snippet->current.offset - region->current.offset;
66 	ctx->iter.lba_first = first_block * (FTL_BLOCK_SIZE / dev->layout.l2p.addr_size);
67 
68 	last_blocks = first_block + snippet->current.blocks;
69 	ctx->iter.lba_last = last_blocks * (FTL_BLOCK_SIZE / dev->layout.l2p.addr_size);
70 
71 	if (ctx->iter.lba_last > dev->num_lbas) {
72 		ctx->iter.lba_last = dev->num_lbas;
73 	}
74 }
75 
76 static void
77 ftl_mngt_recovery_init(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
78 {
79 	struct ftl_mngt_recovery_ctx *ctx = ftl_mngt_get_process_ctx(mngt);
80 	const uint64_t lbas_in_block = FTL_BLOCK_SIZE / dev->layout.l2p.addr_size;
81 	uint64_t mem_limit, lba_limit, l2p_limit, iterations, seq_limit;
82 	uint64_t l2p_limit_block, seq_limit_block, md_blocks;
83 	int md_flags;
84 
85 	ctx->main = mngt;
86 
87 	if (ftl_fast_recovery(dev)) {
88 		/* If shared memory fast recovery then we don't need temporary buffers */
89 		ftl_mngt_next_step(mngt);
90 		return;
91 	}
92 
93 	/*
94 	 * Recovery process allocates temporary buffers, to not exceed memory limit free L2P
95 	 * metadata buffers if they exist, they will be recreated in L2P initialization phase
96 	 */
97 	ftl_md_unlink(dev, FTL_L2P_CACHE_MD_NAME_L1, ftl_md_create_shm_flags(dev));
98 	ftl_md_unlink(dev, FTL_L2P_CACHE_MD_NAME_L2, ftl_md_create_shm_flags(dev));
99 	ftl_md_unlink(dev, FTL_L2P_CACHE_MD_NAME_L2_CTX, ftl_md_create_shm_flags(dev));
100 
101 	/* Below values are in byte unit */
102 	mem_limit = dev->conf.l2p_dram_limit * MiB;
103 	mem_limit = spdk_min(mem_limit, spdk_divide_round_up(dev->num_lbas * dev->layout.l2p.addr_size,
104 			     MiB) * MiB);
105 
106 	lba_limit = mem_limit / (sizeof(uint64_t) + dev->layout.l2p.addr_size);
107 	l2p_limit = lba_limit * dev->layout.l2p.addr_size;
108 	iterations = spdk_divide_round_up(dev->num_lbas, lba_limit);
109 
110 	ctx->iter.block_limit = spdk_divide_round_up(l2p_limit, FTL_BLOCK_SIZE);
111 
112 	/* Round to block size */
113 	ctx->l2p_snippet.count = ctx->iter.block_limit * lbas_in_block;
114 
115 	seq_limit = ctx->l2p_snippet.count * sizeof(uint64_t);
116 
117 	FTL_NOTICELOG(dev, "Recovery memory limit: %"PRIu64"MiB\n", (uint64_t)(mem_limit / MiB));
118 	FTL_NOTICELOG(dev, "L2P resident size: %"PRIu64"MiB\n", (uint64_t)(l2p_limit / MiB));
119 	FTL_NOTICELOG(dev, "Seq ID resident size: %"PRIu64"MiB\n", (uint64_t)(seq_limit / MiB));
120 	FTL_NOTICELOG(dev, "Recovery iterations: %"PRIu64"\n", iterations);
121 	dev->sb->ckpt_seq_id = 0;
122 
123 	/* Initialize region */
124 	ctx->l2p_snippet.region = dev->layout.region[FTL_LAYOUT_REGION_TYPE_L2P];
125 	/* Limit blocks in region, it will be needed for ftl_md_set_region */
126 	ctx->l2p_snippet.region.current.blocks = ctx->iter.block_limit;
127 
128 	l2p_limit_block = ctx->iter.block_limit;
129 	seq_limit_block = spdk_divide_round_up(seq_limit, FTL_BLOCK_SIZE);
130 
131 	md_blocks = l2p_limit_block + seq_limit_block;
132 	md_flags = FTL_MD_CREATE_SHM | FTL_MD_CREATE_SHM_NEW;
133 
134 	/* Initialize snippet of L2P metadata */
135 	ctx->l2p_snippet.md = ftl_md_create(dev, md_blocks, 0, "l2p_recovery", md_flags,
136 					    &ctx->l2p_snippet.region);
137 	if (!ctx->l2p_snippet.md) {
138 		ftl_mngt_fail_step(mngt);
139 		return;
140 	}
141 
142 	ctx->l2p_snippet.l2p = ftl_md_get_buffer(ctx->l2p_snippet.md);
143 
144 	/* Initialize recovery iterator, we call it with blocks set to zero,
145 	 * it means zero block done (processed), thanks that it will recalculate
146 	 *  offsets and starting LBA to initial position */
147 	ctx->l2p_snippet.region.current.blocks = 0;
148 	recovery_iter_advance(dev, ctx);
149 
150 	/* Initialize snippet of sequence IDs */
151 	ctx->l2p_snippet.seq_id = (uint64_t *)((char *)ftl_md_get_buffer(ctx->l2p_snippet.md) +
152 					       (l2p_limit_block * FTL_BLOCK_SIZE));
153 
154 	TAILQ_INIT(&ctx->open_bands);
155 	ftl_mngt_next_step(mngt);
156 }
157 
158 static void
159 ftl_mngt_recovery_deinit(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
160 {
161 	struct ftl_mngt_recovery_ctx *ctx = ftl_mngt_get_process_ctx(mngt);
162 
163 	ftl_md_destroy(ctx->l2p_snippet.md, 0);
164 	ctx->l2p_snippet.md = NULL;
165 	ctx->l2p_snippet.seq_id = NULL;
166 
167 	ftl_mngt_next_step(mngt);
168 }
169 
170 static void
171 recovery_iteration_cb(struct spdk_ftl_dev *dev, void *_ctx, int status)
172 {
173 	struct ftl_mngt_recovery_ctx *ctx = _ctx;
174 
175 	recovery_iter_advance(dev, ctx);
176 
177 	if (status) {
178 		ftl_mngt_fail_step(ctx->main);
179 	} else {
180 		ftl_mngt_continue_step(ctx->main);
181 	}
182 }
183 
184 static void
185 ftl_mngt_recovery_run_iteration(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
186 {
187 	struct ftl_mngt_recovery_ctx *ctx = ftl_mngt_get_process_ctx(mngt);
188 
189 	if (ftl_fast_recovery(dev)) {
190 		ftl_mngt_skip_step(mngt);
191 		return;
192 	}
193 
194 	if (recovery_iter_done(dev, ctx)) {
195 		ftl_mngt_next_step(mngt);
196 	} else {
197 		ftl_mngt_process_execute(dev, &g_desc_recovery_iteration, recovery_iteration_cb, ctx);
198 	}
199 }
200 
201 static void
202 restore_band_state_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
203 {
204 	struct ftl_mngt_process *mngt = md->owner.cb_ctx;
205 	struct ftl_mngt_recovery_ctx *pctx = ftl_mngt_get_process_ctx(mngt);
206 	struct ftl_band *band;
207 	uint64_t num_bands = ftl_get_num_bands(dev);
208 	uint64_t i;
209 
210 	if (status) {
211 		/* Restore error, end step */
212 		ftl_mngt_fail_step(mngt);
213 		return;
214 	}
215 
216 	for (i = 0; i < num_bands; i++) {
217 		band = &dev->bands[i];
218 
219 		switch (band->md->state) {
220 		case FTL_BAND_STATE_FREE:
221 			ftl_band_initialize_free_state(band);
222 			break;
223 		case FTL_BAND_STATE_OPEN:
224 			TAILQ_REMOVE(&band->dev->shut_bands, band, queue_entry);
225 			TAILQ_INSERT_HEAD(&pctx->open_bands, band, queue_entry);
226 			break;
227 		case FTL_BAND_STATE_CLOSED:
228 			break;
229 		default:
230 			status = -EINVAL;
231 		}
232 	}
233 
234 	if (status) {
235 		ftl_mngt_fail_step(mngt);
236 	} else {
237 		ftl_mngt_next_step(mngt);
238 	}
239 }
240 
241 static void
242 ftl_mngt_recovery_restore_band_state(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
243 {
244 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD];
245 
246 	md->owner.cb_ctx = mngt;
247 	md->cb = restore_band_state_cb;
248 	ftl_md_restore(md);
249 }
250 
251 static void
252 ftl_mngt_recovery_iteration_init_seq_ids(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
253 {
254 	struct ftl_mngt_recovery_ctx *ctx = ftl_mngt_get_caller_ctx(mngt);
255 	size_t size = sizeof(ctx->l2p_snippet.seq_id[0]) * ctx->l2p_snippet.count;
256 
257 	memset(ctx->l2p_snippet.seq_id, 0, size);
258 
259 	ftl_mngt_next_step(mngt);
260 }
261 
262 static void
263 l2p_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
264 {
265 	struct ftl_mngt_process *mngt = md->owner.cb_ctx;
266 
267 	if (status) {
268 		ftl_mngt_fail_step(mngt);
269 	} else {
270 		ftl_mngt_next_step(mngt);
271 	}
272 }
273 
274 static void
275 ftl_mngt_recovery_iteration_load_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
276 {
277 	struct ftl_mngt_recovery_ctx *ctx = ftl_mngt_get_caller_ctx(mngt);
278 	struct ftl_md *md = ctx->l2p_snippet.md;
279 	struct ftl_layout_region *region = &ctx->l2p_snippet.region;
280 
281 	FTL_NOTICELOG(dev, "L2P recovery, iteration %u\n", ctx->iter.i);
282 	FTL_NOTICELOG(dev, "Load L2P, blocks [%"PRIu64", %"PRIu64"), LBAs [%"PRIu64", %"PRIu64")\n",
283 		      region->current.offset, region->current.offset + region->current.blocks,
284 		      ctx->iter.lba_first, ctx->iter.lba_last);
285 
286 	if (ftl_md_set_region(md, &ctx->l2p_snippet.region)) {
287 		ftl_mngt_fail_step(mngt);
288 		return;
289 	}
290 
291 	md->owner.cb_ctx = mngt;
292 	md->cb = l2p_cb;
293 	ftl_md_restore(md);
294 }
295 
296 static void
297 ftl_mngt_recovery_iteration_save_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
298 {
299 	struct ftl_mngt_recovery_ctx *ctx = ftl_mngt_get_caller_ctx(mngt);
300 	struct ftl_md *md = ctx->l2p_snippet.md;
301 
302 	md->owner.cb_ctx = mngt;
303 	md->cb = l2p_cb;
304 	ftl_md_persist(md);
305 }
306 
307 static void
308 ftl_mngt_recovery_iteration_restore_valid_map(struct spdk_ftl_dev *dev,
309 		struct ftl_mngt_process *mngt)
310 {
311 	struct ftl_mngt_recovery_ctx *pctx = ftl_mngt_get_caller_ctx(mngt);
312 	uint64_t lba, lba_off;
313 	ftl_addr addr;
314 
315 	for (lba = pctx->iter.lba_first; lba < pctx->iter.lba_last; lba++) {
316 		lba_off = lba - pctx->iter.lba_first;
317 		addr = ftl_addr_load(dev, pctx->l2p_snippet.l2p, lba_off);
318 
319 		if (addr == FTL_ADDR_INVALID) {
320 			continue;
321 		}
322 
323 		if (!ftl_addr_in_nvc(dev, addr)) {
324 			struct ftl_band *band = ftl_band_from_addr(dev, addr);
325 			band->p2l_map.num_valid++;
326 		}
327 
328 		if (ftl_bitmap_get(dev->valid_map, addr)) {
329 			assert(false);
330 			ftl_mngt_fail_step(mngt);
331 			return;
332 		} else {
333 			ftl_bitmap_set(dev->valid_map, addr);
334 		}
335 	}
336 
337 	ftl_mngt_next_step(mngt);
338 }
339 
340 static void
341 p2l_ckpt_preprocess(struct spdk_ftl_dev *dev, struct ftl_mngt_recovery_ctx *pctx)
342 {
343 	uint64_t seq_id;
344 	int md_region, ckpt_id;
345 
346 	for (md_region = FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN;
347 	     md_region <= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX; md_region++) {
348 		ckpt_id = md_region - FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN;
349 		seq_id = ftl_mngt_p2l_ckpt_get_seq_id(dev, md_region);
350 		pctx->p2l_ckpt_seq_id[ckpt_id] = seq_id;
351 		FTL_NOTICELOG(dev, "P2L ckpt_id=%d found seq_id=%"PRIu64"\n", ckpt_id, seq_id);
352 	}
353 }
354 
355 static int
356 p2l_ckpt_restore_p2l(struct ftl_mngt_recovery_ctx *pctx, struct ftl_band *band)
357 {
358 	uint64_t seq_id;
359 	int md_region, ckpt_id;
360 
361 	memset(band->p2l_map.band_map, -1,
362 	       FTL_BLOCK_SIZE * ftl_p2l_map_num_blocks(band->dev));
363 
364 	for (md_region = FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN;
365 	     md_region <= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX; md_region++) {
366 		ckpt_id = md_region - FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN;
367 		seq_id = pctx->p2l_ckpt_seq_id[ckpt_id];
368 		if (seq_id == band->md->seq) {
369 			FTL_NOTICELOG(band->dev, "Restore band P2L band_id=%u ckpt_id=%d seq_id=%"
370 				      PRIu64"\n", band->id, ckpt_id, seq_id);
371 			return ftl_mngt_p2l_ckpt_restore(band, md_region, seq_id);
372 		}
373 	}
374 
375 	/* Band opened but no valid blocks within it, set write pointer to 0 */
376 	ftl_band_iter_init(band);
377 	FTL_NOTICELOG(band->dev, "Restore band P2L band_id=%u, band_seq_id=%"PRIu64" does not"
378 		      " match any P2L checkpoint\n", band->id, band->md->seq);
379 	return 0;
380 }
381 
382 static void
383 ftl_mngt_recovery_pre_process_p2l(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
384 {
385 	struct ftl_mngt_recovery_ctx *pctx = ftl_mngt_get_process_ctx(mngt);
386 
387 	p2l_ckpt_preprocess(dev, pctx);
388 	ftl_mngt_next_step(mngt);
389 }
390 
391 static void
392 ftl_mngt_recover_seq_id(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
393 {
394 	ftl_recover_max_seq(dev);
395 	ftl_mngt_next_step(mngt);
396 }
397 
398 static void
399 ftl_mngt_recovery_open_bands_p2l(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
400 {
401 	struct ftl_mngt_recovery_ctx *pctx = ftl_mngt_get_process_ctx(mngt);
402 	struct ftl_band *band;
403 
404 	if (TAILQ_EMPTY(&pctx->open_bands)) {
405 		FTL_NOTICELOG(dev, "No more open bands to recover from P2L\n");
406 		if (pctx->status) {
407 			ftl_mngt_fail_step(mngt);
408 		} else {
409 			ftl_mngt_next_step(mngt);
410 		}
411 		return;
412 	}
413 
414 	if (!ftl_mngt_get_step_ctx(mngt)) {
415 		ftl_mngt_alloc_step_ctx(mngt, sizeof(bool));
416 
417 		/* Step first time called, initialize */
418 		TAILQ_FOREACH(band, &pctx->open_bands, queue_entry) {
419 			band->md->df_p2l_map = FTL_DF_OBJ_ID_INVALID;
420 			if (ftl_band_alloc_p2l_map(band)) {
421 				FTL_ERRLOG(dev, "Open band recovery ERROR, Cannot allocate LBA map\n");
422 				ftl_mngt_fail_step(mngt);
423 				return;
424 			}
425 
426 			if (p2l_ckpt_restore_p2l(pctx, band)) {
427 				FTL_ERRLOG(dev, "Open band recovery ERROR, Cannot restore P2L\n");
428 				ftl_mngt_fail_step(mngt);
429 				return;
430 			}
431 
432 			if (!band->p2l_map.p2l_ckpt) {
433 				band->p2l_map.p2l_ckpt = ftl_p2l_ckpt_acquire_region_type(dev, band->md->p2l_md_region);
434 				if (!band->p2l_map.p2l_ckpt) {
435 					FTL_ERRLOG(dev, "Open band recovery ERROR, Cannot acquire P2L\n");
436 					ftl_mngt_fail_step(mngt);
437 					return;
438 				}
439 			}
440 		}
441 	}
442 
443 	band = TAILQ_FIRST(&pctx->open_bands);
444 
445 	if (ftl_band_filled(band, band->md->iter.offset)) {
446 		band->md->state = FTL_BAND_STATE_FULL;
447 	}
448 
449 	/* In a next step (finalize band initialization) this band will
450 	 * be assigned to the writer. So temporary we move this band
451 	 * to the closed list, and in the next step it will be moved to
452 	 * the writer from such list.
453 	 */
454 	TAILQ_REMOVE(&pctx->open_bands, band, queue_entry);
455 	TAILQ_INSERT_TAIL(&dev->shut_bands, band, queue_entry);
456 
457 	FTL_NOTICELOG(dev, "Open band recovered, id = %u, seq id %"PRIu64", write offset %"PRIu64"\n",
458 		      band->id, band->md->seq, band->md->iter.offset);
459 
460 	ftl_mngt_continue_step(mngt);
461 }
462 
463 static void
464 ftl_mngt_restore_valid_counters(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
465 {
466 	ftl_valid_map_load_state(dev);
467 	ftl_mngt_next_step(mngt);
468 }
469 
470 static void
471 ftl_mngt_recovery_shm_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
472 {
473 	if (ftl_fast_recovery(dev)) {
474 		ftl_mngt_call_process(mngt, &g_desc_recovery_shm);
475 	} else {
476 		ftl_mngt_skip_step(mngt);
477 	}
478 }
479 
480 /*
481  * During dirty shutdown recovery, the whole L2P needs to be reconstructed. However,
482  * recreating it all at the same time may take up to much DRAM, so it's done in multiple
483  * iterations. This process describes the recovery of a part of L2P in one iteration.
484  */
485 static const struct ftl_mngt_process_desc g_desc_recovery_iteration = {
486 	.name = "FTL recovery iteration",
487 	.steps = {
488 		{
489 			.name = "Load L2P",
490 			.action = ftl_mngt_recovery_iteration_load_l2p,
491 		},
492 		{
493 			.name = "Initialize sequence IDs",
494 			.action = ftl_mngt_recovery_iteration_init_seq_ids,
495 		},
496 		{
497 			.name = "Restore valid map",
498 			.action = ftl_mngt_recovery_iteration_restore_valid_map,
499 		},
500 		{
501 			.name = "Save L2P",
502 			.action = ftl_mngt_recovery_iteration_save_l2p,
503 		},
504 		{}
505 	}
506 };
507 
508 /*
509  * Loading of FTL after dirty shutdown. Recovers metadata, L2P, decides on amount of recovery
510  * iterations to be executed (dependent on ratio of L2P cache size and total L2P size)
511  */
512 static const struct ftl_mngt_process_desc g_desc_recovery = {
513 	.name = "FTL recovery",
514 	.ctx_size = sizeof(struct ftl_mngt_recovery_ctx),
515 	.steps = {
516 		{
517 			.name = "Initialize recovery",
518 			.action = ftl_mngt_recovery_init,
519 			.cleanup = ftl_mngt_recovery_deinit
520 		},
521 		{
522 			.name = "Recover band state",
523 			.action = ftl_mngt_recovery_restore_band_state,
524 		},
525 		{
526 			.name = "Initialize P2L checkpointing",
527 			.action = ftl_mngt_p2l_init_ckpt,
528 			.cleanup = ftl_mngt_p2l_deinit_ckpt
529 		},
530 		{
531 			.name = "Restore P2L checkpoints",
532 			.action = ftl_mngt_p2l_restore_ckpt
533 		},
534 		{
535 			.name = "Preprocess P2L checkpoints",
536 			.action = ftl_mngt_recovery_pre_process_p2l
537 		},
538 		{
539 			.name = "Recover open bands P2L",
540 			.action = ftl_mngt_recovery_open_bands_p2l
541 		},
542 		{
543 			.name = "Recover max seq ID",
544 			.action = ftl_mngt_recover_seq_id
545 		},
546 		{
547 			.name = "Recovery iterations",
548 			.action = ftl_mngt_recovery_run_iteration,
549 		},
550 		{
551 			.name = "Deinitialize recovery",
552 			.action = ftl_mngt_recovery_deinit
553 		},
554 		{
555 			.name = "Initialize L2P",
556 			.action = ftl_mngt_init_l2p,
557 			.cleanup = ftl_mngt_deinit_l2p
558 		},
559 		{
560 			.name = "Recover L2P from shared memory",
561 			.action = ftl_mngt_recovery_shm_l2p,
562 		},
563 		{
564 			.name = "Finalize band initialization",
565 			.action = ftl_mngt_finalize_init_bands,
566 		},
567 		{
568 			.name = "Free P2L region bufs",
569 			.action = ftl_mngt_p2l_free_bufs,
570 		},
571 		{
572 			.name = "Start core poller",
573 			.action = ftl_mngt_start_core_poller,
574 			.cleanup = ftl_mngt_stop_core_poller
575 		},
576 		{
577 			.name = "Self test on startup",
578 			.action = ftl_mngt_self_test
579 		},
580 		{
581 			.name = "Finalize initialization",
582 			.action = ftl_mngt_finalize_startup,
583 		},
584 		{}
585 	}
586 };
587 
588 /*
589  * Shared memory specific steps for dirty shutdown recovery - main task is rebuilding the state of
590  * L2P cache (paged in/out status, dirtiness etc. of individual pages).
591  */
592 static const struct ftl_mngt_process_desc g_desc_recovery_shm = {
593 	.name = "FTL recovery from SHM",
594 	.ctx_size = sizeof(struct ftl_mngt_recovery_ctx),
595 	.steps = {
596 		{
597 			.name = "Restore L2P from SHM",
598 			.action = ftl_mngt_restore_l2p,
599 		},
600 		{
601 			.name = "Restore valid maps counters",
602 			.action = ftl_mngt_restore_valid_counters,
603 		},
604 		{}
605 	}
606 };
607 
608 void
609 ftl_mngt_recover(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
610 {
611 	ftl_mngt_call_process(mngt, &g_desc_recovery);
612 }
613