xref: /spdk/lib/ftl/mngt/ftl_mngt_bdev.c (revision 780cb81f62366bd50be32af7aaaa51db1443acf6)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright 2023 Solidigm All Rights Reserved
3  *   Copyright (C) 2022 Intel Corporation.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/bdev_module.h"
8 #include "spdk/ftl.h"
9 
10 #include "ftl_nv_cache.h"
11 #include "ftl_internal.h"
12 #include "ftl_mngt_steps.h"
13 #include "ftl_internal.h"
14 #include "ftl_core.h"
15 #include "utils/ftl_defs.h"
16 
17 #define MINIMUM_CACHE_SIZE_GIB 5
18 #define MINIMUM_BASE_SIZE_GIB 20
19 
20 /*  Dummy bdev module used to to claim bdevs. */
21 static struct spdk_bdev_module g_ftl_bdev_module = {
22 	.name   = "ftl_lib",
23 };
24 
25 static inline uint64_t
26 ftl_calculate_num_blocks_in_band(struct spdk_bdev_desc *desc)
27 {
28 	/* TODO: this should be passed via input parameter */
29 #ifdef SPDK_FTL_ZONE_EMU_BLOCKS
30 	return SPDK_FTL_ZONE_EMU_BLOCKS;
31 #else
32 	return (1ULL << 30) / FTL_BLOCK_SIZE;
33 #endif
34 }
35 
36 static void
37 base_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
38 {
39 	switch (type) {
40 	case SPDK_BDEV_EVENT_REMOVE:
41 		assert(0);
42 		break;
43 	default:
44 		break;
45 	}
46 }
47 
48 void
49 ftl_mngt_open_base_bdev(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
50 {
51 	uint32_t block_size;
52 	uint64_t num_blocks;
53 	const char *bdev_name = dev->conf.base_bdev;
54 	struct spdk_bdev *bdev;
55 
56 	if (spdk_bdev_open_ext(bdev_name, true, base_bdev_event_cb,
57 			       dev, &dev->base_bdev_desc)) {
58 		FTL_ERRLOG(dev, "Unable to open bdev: %s\n", bdev_name);
59 		goto error;
60 	}
61 
62 	bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
63 
64 	if (spdk_bdev_module_claim_bdev(bdev, dev->base_bdev_desc, &g_ftl_bdev_module)) {
65 		/* clear the desc so that we don't try to release the claim on cleanup */
66 		spdk_bdev_close(dev->base_bdev_desc);
67 		dev->base_bdev_desc = NULL;
68 		FTL_ERRLOG(dev, "Unable to claim bdev %s\n", bdev_name);
69 		goto error;
70 	}
71 
72 	block_size = spdk_bdev_get_block_size(bdev);
73 	if (block_size != FTL_BLOCK_SIZE) {
74 		FTL_ERRLOG(dev, "Unsupported block size (%"PRIu32")\n", block_size);
75 		goto error;
76 	}
77 
78 	num_blocks = spdk_bdev_get_num_blocks(bdev);
79 
80 	if (num_blocks * block_size < MINIMUM_BASE_SIZE_GIB * GiB) {
81 		FTL_ERRLOG(dev, "Bdev %s is too small, requires, at least %uGiB capacity\n",
82 			   spdk_bdev_get_name(bdev), MINIMUM_BASE_SIZE_GIB);
83 		goto error;
84 	}
85 
86 	dev->base_ioch = spdk_bdev_get_io_channel(dev->base_bdev_desc);
87 	if (!dev->base_ioch) {
88 		FTL_ERRLOG(dev, "Failed to create base bdev IO channel\n");
89 		goto error;
90 	}
91 
92 	dev->xfer_size = ftl_get_write_unit_size(bdev);
93 	if (dev->xfer_size != FTL_NUM_LBA_IN_BLOCK) {
94 		FTL_ERRLOG(dev, "Unsupported xfer_size (%"PRIu64")\n", dev->xfer_size);
95 		goto error;
96 	}
97 
98 	dev->base_type = ftl_base_device_get_type_by_bdev(dev, bdev);
99 	if (!dev->base_type) {
100 		FTL_ERRLOG(dev, "Failed to get base device type\n");
101 		goto error;
102 	}
103 	/* TODO: validate size when base device VSS usage gets added */
104 	dev->md_size = spdk_bdev_get_md_size(bdev);
105 
106 	if (!dev->base_type->ops.md_layout_ops.region_create) {
107 		FTL_ERRLOG(dev, "Base device doesn't implement md_layout_ops\n");
108 		goto error;
109 	}
110 
111 	/* Cache frequently used values */
112 	dev->num_blocks_in_band = ftl_calculate_num_blocks_in_band(dev->base_bdev_desc);
113 	dev->is_zoned = spdk_bdev_is_zoned(spdk_bdev_desc_get_bdev(dev->base_bdev_desc));
114 
115 	if (dev->is_zoned) {
116 		/* TODO - current FTL code isn't fully compatible with ZNS drives */
117 		FTL_ERRLOG(dev, "Creating FTL on Zoned devices is not supported\n");
118 		goto error;
119 	}
120 
121 	ftl_mngt_next_step(mngt);
122 	return;
123 error:
124 	ftl_mngt_fail_step(mngt);
125 }
126 
127 void
128 ftl_mngt_close_base_bdev(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
129 {
130 	if (dev->base_ioch) {
131 		spdk_put_io_channel(dev->base_ioch);
132 		dev->base_ioch = NULL;
133 	}
134 
135 	if (dev->base_bdev_desc) {
136 		struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
137 
138 		spdk_bdev_module_release_bdev(bdev);
139 		spdk_bdev_close(dev->base_bdev_desc);
140 
141 		dev->base_bdev_desc = NULL;
142 	}
143 
144 	ftl_mngt_next_step(mngt);
145 }
146 
147 static void
148 nv_cache_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
149 {
150 	switch (type) {
151 	case SPDK_BDEV_EVENT_REMOVE:
152 		assert(0);
153 		break;
154 	default:
155 		break;
156 	}
157 }
158 
159 void
160 ftl_mngt_open_cache_bdev(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
161 {
162 	struct spdk_bdev *bdev;
163 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
164 	const char *bdev_name = dev->conf.cache_bdev;
165 	const struct ftl_md_layout_ops *md_ops;
166 
167 	if (spdk_bdev_open_ext(bdev_name, true, nv_cache_bdev_event_cb, dev,
168 			       &nv_cache->bdev_desc)) {
169 		FTL_ERRLOG(dev, "Unable to open bdev: %s\n", bdev_name);
170 		goto error;
171 	}
172 
173 	bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
174 
175 	if (spdk_bdev_module_claim_bdev(bdev, nv_cache->bdev_desc, &g_ftl_bdev_module)) {
176 		/* clear the desc so that we don't try to release the claim on cleanup */
177 		spdk_bdev_close(nv_cache->bdev_desc);
178 		nv_cache->bdev_desc = NULL;
179 		FTL_ERRLOG(dev, "Unable to claim bdev %s\n", bdev_name);
180 		goto error;
181 	}
182 
183 	FTL_NOTICELOG(dev, "Using %s as write buffer cache\n", spdk_bdev_get_name(bdev));
184 
185 	if (spdk_bdev_get_block_size(bdev) != FTL_BLOCK_SIZE) {
186 		FTL_ERRLOG(dev, "Unsupported block size (%d)\n",
187 			   spdk_bdev_get_block_size(bdev));
188 		goto error;
189 	}
190 
191 	nv_cache->cache_ioch = spdk_bdev_get_io_channel(nv_cache->bdev_desc);
192 	if (!nv_cache->cache_ioch) {
193 		FTL_ERRLOG(dev, "Failed to create cache IO channel for NV Cache\n");
194 		goto error;
195 	}
196 
197 	if (bdev->blockcnt * bdev->blocklen < MINIMUM_CACHE_SIZE_GIB * GiB) {
198 		FTL_ERRLOG(dev, "Bdev %s is too small, requires, at least %uGiB capacity\n",
199 			   spdk_bdev_get_name(bdev), MINIMUM_CACHE_SIZE_GIB);
200 		goto error;
201 	}
202 	nv_cache->md_size = spdk_bdev_get_md_size(bdev);
203 
204 	/* Get FTL NVC bdev descriptor */
205 	nv_cache->nvc_desc = ftl_nv_cache_device_get_desc_by_bdev(dev, bdev);
206 	if (!nv_cache->nvc_desc) {
207 		FTL_ERRLOG(dev, "Failed to get NV Cache device descriptor\n");
208 		goto error;
209 	}
210 	nv_cache->md_size = sizeof(union ftl_md_vss);
211 
212 	md_ops = &nv_cache->nvc_desc->ops.md_layout_ops;
213 	if (!md_ops->region_create) {
214 		FTL_ERRLOG(dev, "NV Cache device doesn't implement md_layout_ops\n");
215 		goto error;
216 	}
217 
218 	FTL_NOTICELOG(dev, "Using %s as NV Cache device\n", nv_cache->nvc_desc->name);
219 	ftl_mngt_next_step(mngt);
220 	return;
221 error:
222 	ftl_mngt_fail_step(mngt);
223 }
224 
225 void
226 ftl_mngt_close_cache_bdev(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
227 {
228 	if (dev->nv_cache.cache_ioch) {
229 		spdk_put_io_channel(dev->nv_cache.cache_ioch);
230 		dev->nv_cache.cache_ioch = NULL;
231 	}
232 
233 	if (dev->nv_cache.bdev_desc) {
234 		struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->nv_cache.bdev_desc);
235 
236 		spdk_bdev_module_release_bdev(bdev);
237 		spdk_bdev_close(dev->nv_cache.bdev_desc);
238 
239 		dev->nv_cache.bdev_desc = NULL;
240 	}
241 
242 	ftl_mngt_next_step(mngt);
243 }
244