xref: /spdk/lib/ftl/mngt/ftl_mngt_misc.c (revision 60982c759db49b4f4579f16e3b24df0725ba4b94)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "ftl_core.h"
7 #include "ftl_utils.h"
8 #include "ftl_mngt.h"
9 #include "ftl_mngt_steps.h"
10 #include "ftl_band.h"
11 #include "ftl_internal.h"
12 #include "ftl_nv_cache.h"
13 #include "ftl_debug.h"
14 
15 void
16 ftl_mngt_check_conf(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
17 {
18 	if (ftl_conf_is_valid(&dev->conf)) {
19 		ftl_mngt_next_step(mngt);
20 	} else {
21 		ftl_mngt_fail_step(mngt);
22 	}
23 }
24 
25 static int
26 init_p2l_map_pool(struct spdk_ftl_dev *dev)
27 {
28 	size_t p2l_pool_el_blks = spdk_divide_round_up(ftl_p2l_map_pool_elem_size(dev), FTL_BLOCK_SIZE);
29 	size_t p2l_pool_buf_blks = P2L_MEMPOOL_SIZE * p2l_pool_el_blks;
30 	void *p2l_pool_buf;
31 
32 	dev->p2l_pool_md = ftl_md_create(dev, p2l_pool_buf_blks, 0, "p2l_pool",
33 					 ftl_md_create_shm_flags(dev), NULL);
34 	if (!dev->p2l_pool_md) {
35 		return -ENOMEM;
36 	}
37 
38 	p2l_pool_buf = ftl_md_get_buffer(dev->p2l_pool_md);
39 	dev->p2l_pool = ftl_mempool_create_ext(p2l_pool_buf, P2L_MEMPOOL_SIZE,
40 					       p2l_pool_el_blks * FTL_BLOCK_SIZE,
41 					       FTL_BLOCK_SIZE);
42 	if (!dev->p2l_pool) {
43 		return -ENOMEM;
44 	}
45 
46 	if (!ftl_fast_startup(dev)) {
47 		ftl_mempool_initialize_ext(dev->p2l_pool);
48 	}
49 
50 	return 0;
51 }
52 
53 static int
54 init_band_md_pool(struct spdk_ftl_dev *dev)
55 {
56 	dev->band_md_pool = ftl_mempool_create(P2L_MEMPOOL_SIZE,
57 					       sizeof(struct ftl_band_md),
58 					       FTL_BLOCK_SIZE,
59 					       SPDK_ENV_SOCKET_ID_ANY);
60 	if (!dev->band_md_pool) {
61 		return -ENOMEM;
62 	}
63 
64 	return 0;
65 }
66 
67 void
68 ftl_mngt_init_mem_pools(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
69 {
70 	if (init_p2l_map_pool(dev)) {
71 		ftl_mngt_fail_step(mngt);
72 		return;
73 	}
74 
75 	if (init_band_md_pool(dev)) {
76 		ftl_mngt_fail_step(mngt);
77 		return;
78 	}
79 
80 	ftl_mngt_next_step(mngt);
81 }
82 
83 void
84 ftl_mngt_deinit_mem_pools(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
85 {
86 	if (dev->p2l_pool) {
87 		ftl_mempool_destroy_ext(dev->p2l_pool);
88 		dev->p2l_pool = NULL;
89 	}
90 
91 	if (dev->p2l_pool_md) {
92 		ftl_md_destroy(dev->p2l_pool_md, ftl_md_destroy_shm_flags(dev));
93 		dev->p2l_pool_md = NULL;
94 	}
95 
96 	if (dev->band_md_pool) {
97 		ftl_mempool_destroy(dev->band_md_pool);
98 		dev->band_md_pool = NULL;
99 	}
100 
101 	ftl_mngt_next_step(mngt);
102 }
103 
104 void
105 ftl_mngt_init_reloc(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
106 {
107 	dev->reloc = ftl_reloc_init(dev);
108 	if (!dev->reloc) {
109 		FTL_ERRLOG(dev, "Unable to initialize reloc structures\n");
110 		ftl_mngt_fail_step(mngt);
111 		return;
112 	}
113 
114 	ftl_mngt_next_step(mngt);
115 }
116 
117 void
118 ftl_mngt_deinit_reloc(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
119 {
120 	ftl_reloc_free(dev->reloc);
121 	ftl_mngt_next_step(mngt);
122 }
123 
124 void
125 ftl_mngt_init_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
126 {
127 	if (ftl_nv_cache_init(dev)) {
128 		FTL_ERRLOG(dev, "Unable to initialize persistent cache\n");
129 		ftl_mngt_fail_step(mngt);
130 		return;
131 	}
132 
133 	ftl_mngt_next_step(mngt);
134 }
135 
136 void
137 ftl_mngt_deinit_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
138 {
139 	ftl_nv_cache_deinit(dev);
140 	ftl_mngt_next_step(mngt);
141 }
142 
143 static void
144 user_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
145 {
146 	struct ftl_mngt_process *mngt = md->owner.cb_ctx;
147 
148 	if (status) {
149 		FTL_ERRLOG(ftl_mngt_get_dev(mngt), "FTL NV Cache: ERROR of clearing user cache data\n");
150 		ftl_mngt_fail_step(mngt);
151 	} else {
152 		ftl_mngt_next_step(mngt);
153 	}
154 }
155 
156 void
157 ftl_mngt_scrub_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
158 {
159 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_DATA_NVC];
160 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_DATA_NVC];
161 	union ftl_md_vss vss;
162 
163 	FTL_NOTICELOG(dev, "First startup needs to scrub nv cache data region, this may take some time.\n");
164 	FTL_NOTICELOG(dev, "Scrubbing %lluGiB\n", region->current.blocks * FTL_BLOCK_SIZE / GiB);
165 
166 	/* Need to scrub user data, so in case of dirty shutdown the recovery won't
167 	 * pull in data during open chunks recovery from any previous instance (since during short
168 	 * tests it's very likely that chunks seq_id will be in line between new head md and old VSS)
169 	 */
170 	md->cb = user_clear_cb;
171 	md->owner.cb_ctx = mngt;
172 
173 	vss.version.md_version = region->current.version;
174 	vss.nv_cache.lba = FTL_ADDR_INVALID;
175 	ftl_md_clear(md, 0, &vss);
176 }
177 
178 void
179 ftl_mngt_finalize_startup(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
180 {
181 	if (ftl_bitmap_find_first_set(dev->unmap_map, 0, UINT64_MAX) != UINT64_MAX) {
182 		dev->unmap_in_progress = true;
183 	}
184 
185 	/* Clear the limit applications as they're incremented incorrectly by
186 	 * the initialization code.
187 	 */
188 	memset(dev->stats.limits, 0, sizeof(dev->stats.limits));
189 	dev->initialized = 1;
190 	dev->sb_shm->shm_ready = true;
191 
192 	ftl_l2p_resume(dev);
193 	ftl_reloc_resume(dev->reloc);
194 	ftl_writer_resume(&dev->writer_user);
195 	ftl_writer_resume(&dev->writer_gc);
196 	ftl_nv_cache_resume(&dev->nv_cache);
197 
198 	ftl_mngt_next_step(mngt);
199 }
200 
201 void
202 ftl_mngt_start_core_poller(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
203 {
204 	dev->core_poller = SPDK_POLLER_REGISTER(ftl_core_poller, dev, 0);
205 	if (!dev->core_poller) {
206 		FTL_ERRLOG(dev, "Unable to register core poller\n");
207 		ftl_mngt_fail_step(mngt);
208 		return;
209 	}
210 
211 	ftl_mngt_next_step(mngt);
212 }
213 
214 void
215 ftl_mngt_stop_core_poller(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
216 {
217 	dev->halt = true;
218 
219 	if (dev->core_poller) {
220 		ftl_mngt_continue_step(mngt);
221 	} else {
222 		ftl_mngt_next_step(mngt);
223 	}
224 }
225 
226 void
227 ftl_mngt_dump_stats(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
228 {
229 	ftl_dev_dump_bands(dev);
230 	ftl_dev_dump_stats(dev);
231 	ftl_mngt_next_step(mngt);
232 }
233 
234 void
235 ftl_mngt_init_vld_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
236 {
237 	struct ftl_md *valid_map_md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_VALID_MAP];
238 
239 	dev->valid_map = ftl_bitmap_create(ftl_md_get_buffer(valid_map_md),
240 					   ftl_md_get_buffer_size(valid_map_md));
241 	if (!dev->valid_map) {
242 		FTL_ERRLOG(dev, "Failed to create valid map\n");
243 		ftl_mngt_fail_step(mngt);
244 		return;
245 	}
246 
247 	ftl_mngt_next_step(mngt);
248 }
249 
250 void
251 ftl_mngt_deinit_vld_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
252 {
253 	if (dev->valid_map) {
254 		ftl_bitmap_destroy(dev->valid_map);
255 		dev->valid_map = NULL;
256 	}
257 
258 	ftl_mngt_next_step(mngt);
259 }
260 void
261 ftl_mngt_init_unmap_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
262 {
263 	uint64_t num_l2p_pages = spdk_divide_round_up(dev->num_lbas, dev->layout.l2p.lbas_in_page);
264 	uint64_t map_blocks = ftl_bitmap_bits_to_blocks(num_l2p_pages);
265 
266 	dev->unmap_map_md = ftl_md_create(dev,
267 					  map_blocks,
268 					  0,
269 					  "trim_bitmap",
270 					  ftl_md_create_shm_flags(dev), NULL);
271 
272 	if (!dev->unmap_map_md) {
273 		FTL_ERRLOG(dev, "Failed to create trim bitmap md\n");
274 		ftl_mngt_fail_step(mngt);
275 		return;
276 	}
277 
278 	dev->unmap_map = ftl_bitmap_create(ftl_md_get_buffer(dev->unmap_map_md),
279 					   ftl_md_get_buffer_size(dev->unmap_map_md));
280 
281 	if (!dev->unmap_map) {
282 		FTL_ERRLOG(dev, "Failed to create unmap map\n");
283 		ftl_mngt_fail_step(mngt);
284 		return;
285 	}
286 
287 	ftl_mngt_next_step(mngt);
288 }
289 
290 static void
291 unmap_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
292 {
293 	struct ftl_mngt_process *mngt = md->owner.cb_ctx;
294 
295 	if (status) {
296 		FTL_ERRLOG(dev, "ERROR of clearing trim unmap\n");
297 		ftl_mngt_fail_step(mngt);
298 	} else {
299 		ftl_mngt_next_step(mngt);
300 	}
301 }
302 
303 void
304 ftl_mngt_unmap_clear(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
305 {
306 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_MD];
307 
308 	md->cb = unmap_clear_cb;
309 	md->owner.cb_ctx = mngt;
310 
311 	ftl_md_clear(md, 0, NULL);
312 }
313 
314 void
315 ftl_mngt_deinit_unmap_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
316 {
317 	ftl_bitmap_destroy(dev->unmap_map);
318 	dev->unmap_map = NULL;
319 
320 	ftl_md_destroy(dev->unmap_map_md, ftl_md_destroy_shm_flags(dev));
321 	dev->unmap_map_md = NULL;
322 
323 	ftl_mngt_next_step(mngt);
324 }
325