xref: /spdk/lib/ftl/upgrade/ftl_chunk_upgrade.c (revision a5c04e6d852a9f13a1dd2d3cbce4c99e5aaac1d7)
18c41c403SKozlowski Mateusz /*   SPDX-License-Identifier: BSD-3-Clause
2*a5c04e6dSMateusz Kozlowski  *   Copyright 2023 Solidigm All Rights Reserved
3a6dbe372Spaul luse  *   Copyright (C) 2022 Intel Corporation.
48c41c403SKozlowski Mateusz  *   All rights reserved.
58c41c403SKozlowski Mateusz  */
68c41c403SKozlowski Mateusz 
78c41c403SKozlowski Mateusz #include "ftl_nv_cache.h"
88c41c403SKozlowski Mateusz #include "ftl_layout_upgrade.h"
9*a5c04e6dSMateusz Kozlowski #include "ftl_utils.h"
10*a5c04e6dSMateusz Kozlowski 
11*a5c04e6dSMateusz Kozlowski struct upgrade_ctx {
12*a5c04e6dSMateusz Kozlowski 	struct ftl_md			*md_v2;
13*a5c04e6dSMateusz Kozlowski 	struct ftl_layout_region	reg_v2;
14*a5c04e6dSMateusz Kozlowski };
15*a5c04e6dSMateusz Kozlowski 
16*a5c04e6dSMateusz Kozlowski static void
v1_to_v2_upgrade_cleanup(struct ftl_layout_upgrade_ctx * lctx)17*a5c04e6dSMateusz Kozlowski v1_to_v2_upgrade_cleanup(struct ftl_layout_upgrade_ctx *lctx)
18*a5c04e6dSMateusz Kozlowski {
19*a5c04e6dSMateusz Kozlowski 	struct upgrade_ctx *ctx = lctx->ctx;
20*a5c04e6dSMateusz Kozlowski 
21*a5c04e6dSMateusz Kozlowski 	if (ctx->md_v2) {
22*a5c04e6dSMateusz Kozlowski 		ftl_md_destroy(ctx->md_v2, 0);
23*a5c04e6dSMateusz Kozlowski 		ctx->md_v2 = NULL;
24*a5c04e6dSMateusz Kozlowski 	}
25*a5c04e6dSMateusz Kozlowski }
26*a5c04e6dSMateusz Kozlowski 
27*a5c04e6dSMateusz Kozlowski static void
v1_to_v2_upgrade_finish(struct spdk_ftl_dev * dev,struct ftl_layout_upgrade_ctx * lctx,int status)28*a5c04e6dSMateusz Kozlowski v1_to_v2_upgrade_finish(struct spdk_ftl_dev *dev, struct ftl_layout_upgrade_ctx *lctx, int status)
29*a5c04e6dSMateusz Kozlowski {
30*a5c04e6dSMateusz Kozlowski 	struct upgrade_ctx *ctx = lctx->ctx;
31*a5c04e6dSMateusz Kozlowski 
32*a5c04e6dSMateusz Kozlowski 	v1_to_v2_upgrade_cleanup(lctx);
33*a5c04e6dSMateusz Kozlowski 	ftl_region_upgrade_completed(dev, lctx, ctx->reg_v2.entry_size, ctx->reg_v2.num_entries, status);
34*a5c04e6dSMateusz Kozlowski }
35*a5c04e6dSMateusz Kozlowski 
36*a5c04e6dSMateusz Kozlowski static void
v1_to_v2_upgrade_set(struct ftl_layout_upgrade_ctx * lctx)37*a5c04e6dSMateusz Kozlowski v1_to_v2_upgrade_set(struct ftl_layout_upgrade_ctx *lctx)
38*a5c04e6dSMateusz Kozlowski {
39*a5c04e6dSMateusz Kozlowski 	struct upgrade_ctx *ctx = lctx->ctx;
40*a5c04e6dSMateusz Kozlowski 	struct ftl_nv_cache_chunk_md *md = ftl_md_get_buffer(ctx->md_v2);
41*a5c04e6dSMateusz Kozlowski 
42*a5c04e6dSMateusz Kozlowski 	assert(sizeof(struct ftl_nv_cache_chunk_md) == FTL_BLOCK_SIZE);
43*a5c04e6dSMateusz Kozlowski 	for (uint64_t i = 0; i < ctx->reg_v2.current.blocks; i++, md++) {
44*a5c04e6dSMateusz Kozlowski 		ftl_nv_cache_chunk_md_initialize(md);
45*a5c04e6dSMateusz Kozlowski 	}
46*a5c04e6dSMateusz Kozlowski }
47*a5c04e6dSMateusz Kozlowski 
48*a5c04e6dSMateusz Kozlowski static void
v1_to_v2_upgrade_md_cb(struct spdk_ftl_dev * dev,struct ftl_md * md,int status)49*a5c04e6dSMateusz Kozlowski v1_to_v2_upgrade_md_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
50*a5c04e6dSMateusz Kozlowski {
51*a5c04e6dSMateusz Kozlowski 	struct ftl_layout_upgrade_ctx *lctx = md->owner.cb_ctx;
52*a5c04e6dSMateusz Kozlowski 
53*a5c04e6dSMateusz Kozlowski 	v1_to_v2_upgrade_finish(dev, lctx, status);
54*a5c04e6dSMateusz Kozlowski }
55*a5c04e6dSMateusz Kozlowski 
56*a5c04e6dSMateusz Kozlowski static int
v1_to_v2_upgrade_setup_ctx(struct spdk_ftl_dev * dev,struct ftl_layout_upgrade_ctx * lctx,uint32_t type)57*a5c04e6dSMateusz Kozlowski v1_to_v2_upgrade_setup_ctx(struct spdk_ftl_dev *dev, struct ftl_layout_upgrade_ctx *lctx,
58*a5c04e6dSMateusz Kozlowski 			   uint32_t type)
59*a5c04e6dSMateusz Kozlowski {
60*a5c04e6dSMateusz Kozlowski 	struct upgrade_ctx *ctx = lctx->ctx;
61*a5c04e6dSMateusz Kozlowski 	const struct ftl_md_layout_ops *md_ops = &dev->nv_cache.nvc_type->ops.md_layout_ops;
62*a5c04e6dSMateusz Kozlowski 
63*a5c04e6dSMateusz Kozlowski 	assert(sizeof(struct ftl_nv_cache_chunk_md) == FTL_BLOCK_SIZE);
64*a5c04e6dSMateusz Kozlowski 
65*a5c04e6dSMateusz Kozlowski 	/* Create the new NV cache metadata region - v2 */
66*a5c04e6dSMateusz Kozlowski 	if (md_ops->region_open(dev, type, FTL_NVC_VERSION_2, sizeof(struct ftl_nv_cache_chunk_md),
67*a5c04e6dSMateusz Kozlowski 				dev->layout.nvc.chunk_count, &ctx->reg_v2)) {
68*a5c04e6dSMateusz Kozlowski 		return -1;
69*a5c04e6dSMateusz Kozlowski 	}
70*a5c04e6dSMateusz Kozlowski 	ctx->md_v2 = ftl_md_create(dev, ctx->reg_v2.current.blocks, 0, ctx->reg_v2.name, FTL_MD_CREATE_HEAP,
71*a5c04e6dSMateusz Kozlowski 				   &ctx->reg_v2);
72*a5c04e6dSMateusz Kozlowski 	if (!ctx->md_v2) {
73*a5c04e6dSMateusz Kozlowski 		return -1;
74*a5c04e6dSMateusz Kozlowski 	}
75*a5c04e6dSMateusz Kozlowski 
76*a5c04e6dSMateusz Kozlowski 	ctx->md_v2->owner.cb_ctx = lctx;
77*a5c04e6dSMateusz Kozlowski 	ctx->md_v2->cb = v1_to_v2_upgrade_md_cb;
78*a5c04e6dSMateusz Kozlowski 	v1_to_v2_upgrade_set(lctx);
79*a5c04e6dSMateusz Kozlowski 
80*a5c04e6dSMateusz Kozlowski 	return 0;
81*a5c04e6dSMateusz Kozlowski }
82*a5c04e6dSMateusz Kozlowski 
83*a5c04e6dSMateusz Kozlowski static int
v1_to_v2_upgrade(struct spdk_ftl_dev * dev,struct ftl_layout_upgrade_ctx * lctx)84*a5c04e6dSMateusz Kozlowski v1_to_v2_upgrade(struct spdk_ftl_dev *dev, struct ftl_layout_upgrade_ctx *lctx)
85*a5c04e6dSMateusz Kozlowski {
86*a5c04e6dSMateusz Kozlowski 	struct upgrade_ctx *ctx = lctx->ctx;
87*a5c04e6dSMateusz Kozlowski 
88*a5c04e6dSMateusz Kozlowski 	/*
89*a5c04e6dSMateusz Kozlowski 	 * Chunks at this point should be fully drained of user data (major upgrade). This means that it's safe to reinitialize
90*a5c04e6dSMateusz Kozlowski 	 * the MD and fully change the structure layout (we're not interpreting the metadata contents at this point).
91*a5c04e6dSMateusz Kozlowski 	 * Once we're done the version of the region in the superblock will be updated.
92*a5c04e6dSMateusz Kozlowski 	 */
93*a5c04e6dSMateusz Kozlowski 
94*a5c04e6dSMateusz Kozlowski 	if (v1_to_v2_upgrade_setup_ctx(dev, lctx, lctx->reg->type)) {
95*a5c04e6dSMateusz Kozlowski 		goto error;
96*a5c04e6dSMateusz Kozlowski 	}
97*a5c04e6dSMateusz Kozlowski 	ftl_md_persist(ctx->md_v2);
98*a5c04e6dSMateusz Kozlowski 	return 0;
99*a5c04e6dSMateusz Kozlowski 
100*a5c04e6dSMateusz Kozlowski error:
101*a5c04e6dSMateusz Kozlowski 	v1_to_v2_upgrade_cleanup(lctx);
102*a5c04e6dSMateusz Kozlowski 	return -1;
103*a5c04e6dSMateusz Kozlowski }
104*a5c04e6dSMateusz Kozlowski 
105*a5c04e6dSMateusz Kozlowski static int
v1_to_v2_upgrade_enabled(struct spdk_ftl_dev * dev,struct ftl_layout_region * region)106*a5c04e6dSMateusz Kozlowski v1_to_v2_upgrade_enabled(struct spdk_ftl_dev *dev, struct ftl_layout_region *region)
107*a5c04e6dSMateusz Kozlowski {
108*a5c04e6dSMateusz Kozlowski 	const struct ftl_md_layout_ops *md_ops = &dev->nv_cache.nvc_type->ops.md_layout_ops;
109*a5c04e6dSMateusz Kozlowski 
110*a5c04e6dSMateusz Kozlowski 	assert(sizeof(struct ftl_nv_cache_chunk_md) == FTL_BLOCK_SIZE);
111*a5c04e6dSMateusz Kozlowski 
112*a5c04e6dSMateusz Kozlowski 	if (ftl_region_major_upgrade_enabled(dev, region)) {
113*a5c04e6dSMateusz Kozlowski 		return -1;
114*a5c04e6dSMateusz Kozlowski 	}
115*a5c04e6dSMateusz Kozlowski 
116*a5c04e6dSMateusz Kozlowski 	/* Create the new NV cache metadata region (v2) up front - this allocates a separate entry in the superblock and
117*a5c04e6dSMateusz Kozlowski 	 * area on the cache for us. This is to reserve space for other region upgrades allocating new regions and it
118*a5c04e6dSMateusz Kozlowski 	 * allows us to do an atomic upgrade of the whole region.
119*a5c04e6dSMateusz Kozlowski 	 *
120*a5c04e6dSMateusz Kozlowski 	 * If the upgrade is stopped by power failure/crash after the V2 region has been added, then the upgrade process
121*a5c04e6dSMateusz Kozlowski 	 * will start again (since V1 still exists), but region_create will fail (since the v2 region has already been
122*a5c04e6dSMateusz Kozlowski 	 * created). In such a case only verification of the region length by region_open is needed.
123*a5c04e6dSMateusz Kozlowski 	 *
124*a5c04e6dSMateusz Kozlowski 	 * Once the upgrade is fully done, the old v1 region entry will be removed from the SB and its area on the cache
125*a5c04e6dSMateusz Kozlowski 	 * freed.
126*a5c04e6dSMateusz Kozlowski 	 */
127*a5c04e6dSMateusz Kozlowski 	if (md_ops->region_create(dev, region->type, FTL_NVC_VERSION_2, dev->layout.nvc.chunk_count) &&
128*a5c04e6dSMateusz Kozlowski 	    md_ops->region_open(dev, region->type, FTL_NVC_VERSION_2, sizeof(struct ftl_nv_cache_chunk_md),
129*a5c04e6dSMateusz Kozlowski 				dev->layout.nvc.chunk_count, NULL)) {
130*a5c04e6dSMateusz Kozlowski 		return -1;
131*a5c04e6dSMateusz Kozlowski 	}
132*a5c04e6dSMateusz Kozlowski 
133*a5c04e6dSMateusz Kozlowski 	return 0;
134*a5c04e6dSMateusz Kozlowski }
1358c41c403SKozlowski Mateusz 
1368c41c403SKozlowski Mateusz struct ftl_region_upgrade_desc nvc_upgrade_desc[] = {
1378c41c403SKozlowski Mateusz 	[FTL_NVC_VERSION_0] = {
1388c41c403SKozlowski Mateusz 		.verify = ftl_region_upgrade_disabled,
1398c41c403SKozlowski Mateusz 	},
140*a5c04e6dSMateusz Kozlowski 	[FTL_NVC_VERSION_1] = {
141*a5c04e6dSMateusz Kozlowski 		.verify = v1_to_v2_upgrade_enabled,
142*a5c04e6dSMateusz Kozlowski 		.ctx_size = sizeof(struct upgrade_ctx),
143*a5c04e6dSMateusz Kozlowski 		.new_version = FTL_NVC_VERSION_2,
144*a5c04e6dSMateusz Kozlowski 		.upgrade = v1_to_v2_upgrade,
145*a5c04e6dSMateusz Kozlowski 	},
1468c41c403SKozlowski Mateusz };
1478c41c403SKozlowski Mateusz 
1488c41c403SKozlowski Mateusz SPDK_STATIC_ASSERT(SPDK_COUNTOF(nvc_upgrade_desc) == FTL_NVC_VERSION_CURRENT,
1498c41c403SKozlowski Mateusz 		   "Missing NVC region upgrade descriptors");
150