18c41c403SKozlowski Mateusz /* SPDX-License-Identifier: BSD-3-Clause
2a6dbe372Spaul luse * Copyright (C) 2022 Intel Corporation.
38c41c403SKozlowski Mateusz * All rights reserved.
48c41c403SKozlowski Mateusz */
58c41c403SKozlowski Mateusz
68c41c403SKozlowski Mateusz #include "mngt/ftl_mngt.h"
78c41c403SKozlowski Mateusz #include "mngt/ftl_mngt_steps.h"
88c41c403SKozlowski Mateusz #include "ftl_layout_upgrade.h"
98c41c403SKozlowski Mateusz
10*4061ed11SMateusz Kozlowski struct upgrade_ctx {
11*4061ed11SMateusz Kozlowski struct ftl_md *md;
12*4061ed11SMateusz Kozlowski struct ftl_layout_region reg;
13*4061ed11SMateusz Kozlowski };
14*4061ed11SMateusz Kozlowski
15*4061ed11SMateusz Kozlowski static void
v2_upgrade_cleanup(struct ftl_layout_upgrade_ctx * lctx)16*4061ed11SMateusz Kozlowski v2_upgrade_cleanup(struct ftl_layout_upgrade_ctx *lctx)
17*4061ed11SMateusz Kozlowski {
18*4061ed11SMateusz Kozlowski struct upgrade_ctx *ctx = lctx->ctx;
19*4061ed11SMateusz Kozlowski
20*4061ed11SMateusz Kozlowski if (ctx->md) {
21*4061ed11SMateusz Kozlowski ftl_md_destroy(ctx->md, 0);
22*4061ed11SMateusz Kozlowski ctx->md = NULL;
23*4061ed11SMateusz Kozlowski }
24*4061ed11SMateusz Kozlowski }
25*4061ed11SMateusz Kozlowski
26*4061ed11SMateusz Kozlowski static void
v2_upgrade_finish(struct spdk_ftl_dev * dev,struct ftl_layout_upgrade_ctx * lctx,int status)27*4061ed11SMateusz Kozlowski v2_upgrade_finish(struct spdk_ftl_dev *dev, struct ftl_layout_upgrade_ctx *lctx, int status)
28*4061ed11SMateusz Kozlowski {
29*4061ed11SMateusz Kozlowski struct upgrade_ctx *ctx = lctx->ctx;
30*4061ed11SMateusz Kozlowski
31*4061ed11SMateusz Kozlowski v2_upgrade_cleanup(lctx);
32*4061ed11SMateusz Kozlowski ftl_region_upgrade_completed(dev, lctx, ctx->reg.entry_size, ctx->reg.num_entries, status);
33*4061ed11SMateusz Kozlowski }
34*4061ed11SMateusz Kozlowski
35*4061ed11SMateusz Kozlowski static void
v2_upgrade_md_cb(struct spdk_ftl_dev * dev,struct ftl_md * md,int status)36*4061ed11SMateusz Kozlowski v2_upgrade_md_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
37*4061ed11SMateusz Kozlowski {
38*4061ed11SMateusz Kozlowski struct ftl_layout_upgrade_ctx *lctx = md->owner.cb_ctx;
39*4061ed11SMateusz Kozlowski
40*4061ed11SMateusz Kozlowski v2_upgrade_finish(dev, lctx, status);
41*4061ed11SMateusz Kozlowski }
42*4061ed11SMateusz Kozlowski
43*4061ed11SMateusz Kozlowski static int
v2_upgrade_setup_ctx(struct spdk_ftl_dev * dev,struct ftl_layout_upgrade_ctx * lctx,uint32_t type)44*4061ed11SMateusz Kozlowski v2_upgrade_setup_ctx(struct spdk_ftl_dev *dev, struct ftl_layout_upgrade_ctx *lctx, uint32_t type)
45*4061ed11SMateusz Kozlowski {
46*4061ed11SMateusz Kozlowski struct upgrade_ctx *ctx = lctx->ctx;
47*4061ed11SMateusz Kozlowski const struct ftl_md_layout_ops *md_ops = &dev->nv_cache.nvc_type->ops.md_layout_ops;
48*4061ed11SMateusz Kozlowski
49*4061ed11SMateusz Kozlowski /* TODO Add validation if no open bands */
50*4061ed11SMateusz Kozlowski
51*4061ed11SMateusz Kozlowski /* Open metadata region */
52*4061ed11SMateusz Kozlowski if (md_ops->region_open(dev, lctx->reg->type, FTL_P2L_VERSION_2,
53*4061ed11SMateusz Kozlowski sizeof(struct ftl_p2l_ckpt_page_no_vss),
54*4061ed11SMateusz Kozlowski dev->layout.p2l.ckpt_pages, &ctx->reg)) {
55*4061ed11SMateusz Kozlowski return -1;
56*4061ed11SMateusz Kozlowski }
57*4061ed11SMateusz Kozlowski
58*4061ed11SMateusz Kozlowski ctx->md = ftl_md_create(dev, ctx->reg.current.blocks, 0, ctx->reg.name, FTL_MD_CREATE_HEAP,
59*4061ed11SMateusz Kozlowski &ctx->reg);
60*4061ed11SMateusz Kozlowski if (!ctx->md) {
61*4061ed11SMateusz Kozlowski return -1;
62*4061ed11SMateusz Kozlowski }
63*4061ed11SMateusz Kozlowski
64*4061ed11SMateusz Kozlowski ctx->md->owner.cb_ctx = lctx;
65*4061ed11SMateusz Kozlowski ctx->md->cb = v2_upgrade_md_cb;
66*4061ed11SMateusz Kozlowski
67*4061ed11SMateusz Kozlowski return 0;
68*4061ed11SMateusz Kozlowski }
69*4061ed11SMateusz Kozlowski
70*4061ed11SMateusz Kozlowski static int
v2_upgrade(struct spdk_ftl_dev * dev,struct ftl_layout_upgrade_ctx * lctx)71*4061ed11SMateusz Kozlowski v2_upgrade(struct spdk_ftl_dev *dev, struct ftl_layout_upgrade_ctx *lctx)
72*4061ed11SMateusz Kozlowski {
73*4061ed11SMateusz Kozlowski struct upgrade_ctx *ctx = lctx->ctx;
74*4061ed11SMateusz Kozlowski
75*4061ed11SMateusz Kozlowski if (v2_upgrade_setup_ctx(dev, lctx, lctx->reg->type)) {
76*4061ed11SMateusz Kozlowski goto error;
77*4061ed11SMateusz Kozlowski }
78*4061ed11SMateusz Kozlowski ftl_md_clear(ctx->md, 0, NULL);
79*4061ed11SMateusz Kozlowski return 0;
80*4061ed11SMateusz Kozlowski error:
81*4061ed11SMateusz Kozlowski v2_upgrade_cleanup(lctx);
82*4061ed11SMateusz Kozlowski return -1;
83*4061ed11SMateusz Kozlowski }
84*4061ed11SMateusz Kozlowski
85*4061ed11SMateusz Kozlowski static int
v1_to_v2_upgrade_enabled(struct spdk_ftl_dev * dev,struct ftl_layout_region * region)86*4061ed11SMateusz Kozlowski v1_to_v2_upgrade_enabled(struct spdk_ftl_dev *dev, struct ftl_layout_region *region)
87*4061ed11SMateusz Kozlowski {
88*4061ed11SMateusz Kozlowski const struct ftl_md_layout_ops *md_ops = &dev->nv_cache.nvc_type->ops.md_layout_ops;
89*4061ed11SMateusz Kozlowski
90*4061ed11SMateusz Kozlowski if (ftl_region_major_upgrade_enabled(dev, region)) {
91*4061ed11SMateusz Kozlowski return -1;
92*4061ed11SMateusz Kozlowski }
93*4061ed11SMateusz Kozlowski
94*4061ed11SMateusz Kozlowski /* Create the new P2L metadata region (v2) up front - this allocates a separate entry in the superblock and
95*4061ed11SMateusz Kozlowski * area on the cache for us. This is to reserve space for other region upgrades allocating new regions and it
96*4061ed11SMateusz Kozlowski * allows us to do an atomic upgrade of the whole region.
97*4061ed11SMateusz Kozlowski *
98*4061ed11SMateusz Kozlowski * If the upgrade is stopped by power failure/crash after the V2 region has been added, then the upgrade process
99*4061ed11SMateusz Kozlowski * will start again (since V1 still exists), but region_create will fail (since the v2 region has already been
100*4061ed11SMateusz Kozlowski * created). In such a case only verification of the region length by region_open is needed.
101*4061ed11SMateusz Kozlowski *
102*4061ed11SMateusz Kozlowski * Once the upgrade is fully done, the old v1 region entry will be removed from the SB and its area on the cache
103*4061ed11SMateusz Kozlowski * freed.
104*4061ed11SMateusz Kozlowski */
105*4061ed11SMateusz Kozlowski if (md_ops->region_create(dev, region->type, FTL_P2L_VERSION_2, dev->layout.p2l.ckpt_pages) &&
106*4061ed11SMateusz Kozlowski md_ops->region_open(dev, region->type, FTL_P2L_VERSION_2, sizeof(struct ftl_p2l_ckpt_page_no_vss),
107*4061ed11SMateusz Kozlowski dev->layout.p2l.ckpt_pages, NULL)) {
108*4061ed11SMateusz Kozlowski return -1;
109*4061ed11SMateusz Kozlowski }
110*4061ed11SMateusz Kozlowski
111*4061ed11SMateusz Kozlowski return 0;
112*4061ed11SMateusz Kozlowski }
113*4061ed11SMateusz Kozlowski
1148c41c403SKozlowski Mateusz struct ftl_region_upgrade_desc p2l_upgrade_desc[] = {
1158c41c403SKozlowski Mateusz [FTL_P2L_VERSION_0] = {
1168c41c403SKozlowski Mateusz .verify = ftl_region_upgrade_disabled
1178c41c403SKozlowski Mateusz },
118*4061ed11SMateusz Kozlowski [FTL_P2L_VERSION_1] = {
119*4061ed11SMateusz Kozlowski .verify = v1_to_v2_upgrade_enabled,
120*4061ed11SMateusz Kozlowski .ctx_size = sizeof(struct upgrade_ctx),
121*4061ed11SMateusz Kozlowski .new_version = FTL_P2L_VERSION_2,
122*4061ed11SMateusz Kozlowski .upgrade = v2_upgrade,
123*4061ed11SMateusz Kozlowski },
1248c41c403SKozlowski Mateusz };
1258c41c403SKozlowski Mateusz
1268c41c403SKozlowski Mateusz SPDK_STATIC_ASSERT(SPDK_COUNTOF(p2l_upgrade_desc) == FTL_P2L_VERSION_CURRENT,
1278c41c403SKozlowski Mateusz "Missing P2L region upgrade descriptors");
128