xref: /spdk/lib/ftl/mngt/ftl_mngt_misc.c (revision 877573897ad52be4fa8989f7617bd655b87e05c4)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "ftl_core.h"
7 #include "ftl_utils.h"
8 #include "ftl_mngt.h"
9 #include "ftl_mngt_steps.h"
10 #include "ftl_band.h"
11 #include "ftl_internal.h"
12 #include "ftl_nv_cache.h"
13 #include "ftl_debug.h"
14 
15 void
16 ftl_mngt_check_conf(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
17 {
18 	if (ftl_conf_is_valid(&dev->conf)) {
19 		ftl_mngt_next_step(mngt);
20 	} else {
21 		ftl_mngt_fail_step(mngt);
22 	}
23 }
24 
25 static int
26 init_p2l_map_pool(struct spdk_ftl_dev *dev)
27 {
28 	size_t p2l_pool_el_blks = spdk_divide_round_up(ftl_p2l_map_pool_elem_size(dev), FTL_BLOCK_SIZE);
29 	size_t p2l_pool_buf_blks = P2L_MEMPOOL_SIZE * p2l_pool_el_blks;
30 	void *p2l_pool_buf;
31 
32 	dev->p2l_pool_md = ftl_md_create(dev, p2l_pool_buf_blks, 0, "p2l_pool",
33 					 ftl_md_create_shm_flags(dev), NULL);
34 	if (!dev->p2l_pool_md) {
35 		return -ENOMEM;
36 	}
37 
38 	p2l_pool_buf = ftl_md_get_buffer(dev->p2l_pool_md);
39 	dev->p2l_pool = ftl_mempool_create_ext(p2l_pool_buf, P2L_MEMPOOL_SIZE,
40 					       p2l_pool_el_blks * FTL_BLOCK_SIZE,
41 					       FTL_BLOCK_SIZE);
42 	if (!dev->p2l_pool) {
43 		return -ENOMEM;
44 	}
45 
46 	if (!ftl_fast_startup(dev)) {
47 		ftl_mempool_initialize_ext(dev->p2l_pool);
48 	}
49 
50 	return 0;
51 }
52 
53 static int
54 init_band_md_pool(struct spdk_ftl_dev *dev)
55 {
56 	dev->band_md_pool = ftl_mempool_create(P2L_MEMPOOL_SIZE,
57 					       sizeof(struct ftl_band_md),
58 					       FTL_BLOCK_SIZE,
59 					       SPDK_ENV_SOCKET_ID_ANY);
60 	if (!dev->band_md_pool) {
61 		return -ENOMEM;
62 	}
63 
64 	return 0;
65 }
66 
67 void
68 ftl_mngt_init_mem_pools(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
69 {
70 	if (init_p2l_map_pool(dev)) {
71 		ftl_mngt_fail_step(mngt);
72 		return;
73 	}
74 
75 	if (init_band_md_pool(dev)) {
76 		ftl_mngt_fail_step(mngt);
77 		return;
78 	}
79 
80 	ftl_mngt_next_step(mngt);
81 }
82 
83 void
84 ftl_mngt_deinit_mem_pools(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
85 {
86 	if (dev->p2l_pool) {
87 		ftl_mempool_destroy_ext(dev->p2l_pool);
88 		dev->p2l_pool = NULL;
89 	}
90 
91 	if (dev->p2l_pool_md) {
92 		ftl_md_destroy(dev->p2l_pool_md, ftl_md_destroy_shm_flags(dev));
93 		dev->p2l_pool_md = NULL;
94 	}
95 
96 	if (dev->band_md_pool) {
97 		ftl_mempool_destroy(dev->band_md_pool);
98 		dev->band_md_pool = NULL;
99 	}
100 
101 	ftl_mngt_next_step(mngt);
102 }
103 
104 void
105 ftl_mngt_init_reloc(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
106 {
107 	dev->reloc = ftl_reloc_init(dev);
108 	if (!dev->reloc) {
109 		FTL_ERRLOG(dev, "Unable to initialize reloc structures\n");
110 		ftl_mngt_fail_step(mngt);
111 		return;
112 	}
113 
114 	ftl_mngt_next_step(mngt);
115 }
116 
117 void
118 ftl_mngt_deinit_reloc(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
119 {
120 	ftl_reloc_free(dev->reloc);
121 	ftl_mngt_next_step(mngt);
122 }
123 
124 void
125 ftl_mngt_init_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
126 {
127 	if (ftl_nv_cache_init(dev)) {
128 		FTL_ERRLOG(dev, "Unable to initialize persistent cache\n");
129 		ftl_mngt_fail_step(mngt);
130 		return;
131 	}
132 
133 	ftl_mngt_next_step(mngt);
134 }
135 
136 void
137 ftl_mngt_deinit_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
138 {
139 	ftl_nv_cache_deinit(dev);
140 	ftl_mngt_next_step(mngt);
141 }
142 
143 static void
144 user_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
145 {
146 	struct ftl_mngt_process *mngt = md->owner.cb_ctx;
147 
148 	if (status) {
149 		FTL_ERRLOG(ftl_mngt_get_dev(mngt), "FTL NV Cache: ERROR of clearing user cache data\n");
150 		ftl_mngt_fail_step(mngt);
151 	} else {
152 		ftl_mngt_next_step(mngt);
153 	}
154 }
155 
156 void
157 ftl_mngt_scrub_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
158 {
159 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_DATA_NVC];
160 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_DATA_NVC];
161 	union ftl_md_vss vss;
162 
163 	FTL_NOTICELOG(dev, "First startup needs to scrub nv cache data region, this may take some time.\n");
164 	FTL_NOTICELOG(dev, "Scrubbing %lluGiB\n", region->current.blocks * FTL_BLOCK_SIZE / GiB);
165 
166 	/* Need to scrub user data, so in case of dirty shutdown the recovery won't
167 	 * pull in data during open chunks recovery from any previous instance (since during short
168 	 * tests it's very likely that chunks seq_id will be in line between new head md and old VSS)
169 	 */
170 	md->cb = user_clear_cb;
171 	md->owner.cb_ctx = mngt;
172 
173 	vss.version.md_version = region->current.version;
174 	vss.nv_cache.lba = FTL_ADDR_INVALID;
175 	ftl_md_clear(md, 0, &vss);
176 }
177 
178 void
179 ftl_mngt_finalize_startup(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
180 {
181 	if (ftl_bitmap_find_first_set(dev->unmap_map, 0, UINT64_MAX) != UINT64_MAX) {
182 		dev->unmap_in_progress = true;
183 	}
184 
185 	/* Clear the limit applications as they're incremented incorrectly by
186 	 * the initialization code.
187 	 */
188 	memset(dev->stats.limits, 0, sizeof(dev->stats.limits));
189 	dev->initialized = 1;
190 	dev->sb_shm->shm_ready = true;
191 
192 	ftl_reloc_resume(dev->reloc);
193 	ftl_writer_resume(&dev->writer_user);
194 	ftl_writer_resume(&dev->writer_gc);
195 	ftl_nv_cache_resume(&dev->nv_cache);
196 
197 	ftl_mngt_next_step(mngt);
198 }
199 
200 void
201 ftl_mngt_start_core_poller(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
202 {
203 	dev->core_poller = SPDK_POLLER_REGISTER(ftl_core_poller, dev, 0);
204 	if (!dev->core_poller) {
205 		FTL_ERRLOG(dev, "Unable to register core poller\n");
206 		ftl_mngt_fail_step(mngt);
207 		return;
208 	}
209 
210 	ftl_mngt_next_step(mngt);
211 }
212 
213 void
214 ftl_mngt_stop_core_poller(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
215 {
216 	dev->halt = true;
217 
218 	if (dev->core_poller) {
219 		ftl_mngt_continue_step(mngt);
220 	} else {
221 		ftl_mngt_next_step(mngt);
222 	}
223 }
224 
225 void
226 ftl_mngt_dump_stats(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
227 {
228 	ftl_dev_dump_bands(dev);
229 	ftl_dev_dump_stats(dev);
230 	ftl_mngt_next_step(mngt);
231 }
232 
233 void
234 ftl_mngt_init_vld_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
235 {
236 	struct ftl_md *valid_map_md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_VALID_MAP];
237 
238 	dev->valid_map = ftl_bitmap_create(ftl_md_get_buffer(valid_map_md),
239 					   ftl_md_get_buffer_size(valid_map_md));
240 	if (!dev->valid_map) {
241 		FTL_ERRLOG(dev, "Failed to create valid map\n");
242 		ftl_mngt_fail_step(mngt);
243 		return;
244 	}
245 
246 	ftl_mngt_next_step(mngt);
247 }
248 
249 void
250 ftl_mngt_deinit_vld_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
251 {
252 	if (dev->valid_map) {
253 		ftl_bitmap_destroy(dev->valid_map);
254 		dev->valid_map = NULL;
255 	}
256 
257 	ftl_mngt_next_step(mngt);
258 }
259 void
260 ftl_mngt_init_unmap_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
261 {
262 	uint64_t num_l2p_pages = spdk_divide_round_up(dev->num_lbas, dev->layout.l2p.lbas_in_page);
263 	uint64_t map_blocks = ftl_bitmap_bits_to_blocks(num_l2p_pages);
264 
265 	dev->unmap_map_md = ftl_md_create(dev,
266 					  map_blocks,
267 					  0,
268 					  "trim_bitmap",
269 					  ftl_md_create_shm_flags(dev), NULL);
270 
271 	if (!dev->unmap_map_md) {
272 		FTL_ERRLOG(dev, "Failed to create trim bitmap md\n");
273 		ftl_mngt_fail_step(mngt);
274 		return;
275 	}
276 
277 	dev->unmap_map = ftl_bitmap_create(ftl_md_get_buffer(dev->unmap_map_md),
278 					   ftl_md_get_buffer_size(dev->unmap_map_md));
279 
280 	if (!dev->unmap_map) {
281 		FTL_ERRLOG(dev, "Failed to create unmap map\n");
282 		ftl_mngt_fail_step(mngt);
283 		return;
284 	}
285 
286 	ftl_mngt_next_step(mngt);
287 }
288 
289 static void
290 unmap_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
291 {
292 	struct ftl_mngt_process *mngt = md->owner.cb_ctx;
293 
294 	if (status) {
295 		FTL_ERRLOG(dev, "ERROR of clearing trim unmap\n");
296 		ftl_mngt_fail_step(mngt);
297 	} else {
298 		ftl_mngt_next_step(mngt);
299 	}
300 }
301 
302 void
303 ftl_mngt_unmap_clear(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
304 {
305 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_MD];
306 
307 	md->cb = unmap_clear_cb;
308 	md->owner.cb_ctx = mngt;
309 
310 	ftl_md_clear(md, 0, NULL);
311 }
312 
313 void
314 ftl_mngt_deinit_unmap_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
315 {
316 	ftl_bitmap_destroy(dev->unmap_map);
317 	dev->unmap_map = NULL;
318 
319 	ftl_md_destroy(dev->unmap_map_md, ftl_md_destroy_shm_flags(dev));
320 	dev->unmap_map_md = NULL;
321 
322 	ftl_mngt_next_step(mngt);
323 }
324