xref: /spdk/lib/ftl/mngt/ftl_mngt_misc.c (revision 712e8cb7ef7046c6928709f211c6cb0ad9ea8711)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "ftl_core.h"
7 #include "ftl_utils.h"
8 #include "ftl_mngt.h"
9 #include "ftl_mngt_steps.h"
10 #include "ftl_band.h"
11 #include "ftl_internal.h"
12 #include "ftl_nv_cache.h"
13 #include "ftl_debug.h"
14 
15 void
16 ftl_mngt_check_conf(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
17 {
18 	if (ftl_conf_is_valid(&dev->conf)) {
19 		ftl_mngt_next_step(mngt);
20 	} else {
21 		ftl_mngt_fail_step(mngt);
22 	}
23 }
24 
25 static int
26 init_p2l_map_pool(struct spdk_ftl_dev *dev)
27 {
28 	size_t p2l_pool_el_blks = spdk_divide_round_up(ftl_p2l_map_pool_elem_size(dev), FTL_BLOCK_SIZE);
29 	size_t p2l_pool_buf_blks = P2L_MEMPOOL_SIZE * p2l_pool_el_blks;
30 	void *p2l_pool_buf;
31 
32 	dev->p2l_pool_md = ftl_md_create(dev, p2l_pool_buf_blks, 0, "p2l_pool",
33 					 ftl_md_create_shm_flags(dev), NULL);
34 	if (!dev->p2l_pool_md) {
35 		return -ENOMEM;
36 	}
37 
38 	p2l_pool_buf = ftl_md_get_buffer(dev->p2l_pool_md);
39 	dev->p2l_pool = ftl_mempool_create_ext(p2l_pool_buf, P2L_MEMPOOL_SIZE,
40 					       p2l_pool_el_blks * FTL_BLOCK_SIZE,
41 					       FTL_BLOCK_SIZE);
42 	if (!dev->p2l_pool) {
43 		return -ENOMEM;
44 	}
45 
46 	if (!ftl_fast_startup(dev)) {
47 		ftl_mempool_initialize_ext(dev->p2l_pool);
48 	}
49 
50 	return 0;
51 }
52 
53 static int
54 init_band_md_pool(struct spdk_ftl_dev *dev)
55 {
56 	dev->band_md_pool = ftl_mempool_create(P2L_MEMPOOL_SIZE,
57 					       sizeof(struct ftl_band_md),
58 					       FTL_BLOCK_SIZE,
59 					       SPDK_ENV_SOCKET_ID_ANY);
60 	if (!dev->band_md_pool) {
61 		return -ENOMEM;
62 	}
63 
64 	return 0;
65 }
66 
67 void
68 ftl_mngt_init_mem_pools(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
69 {
70 	if (init_p2l_map_pool(dev)) {
71 		ftl_mngt_fail_step(mngt);
72 	}
73 
74 	if (init_band_md_pool(dev)) {
75 		ftl_mngt_fail_step(mngt);
76 	}
77 
78 	ftl_mngt_next_step(mngt);
79 }
80 
81 void
82 ftl_mngt_deinit_mem_pools(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
83 {
84 	if (dev->p2l_pool) {
85 		ftl_mempool_destroy_ext(dev->p2l_pool);
86 		dev->p2l_pool = NULL;
87 	}
88 
89 	if (dev->p2l_pool_md) {
90 		ftl_md_destroy(dev->p2l_pool_md, ftl_md_destroy_shm_flags(dev));
91 		dev->p2l_pool_md = NULL;
92 	}
93 
94 	if (dev->band_md_pool) {
95 		ftl_mempool_destroy(dev->band_md_pool);
96 		dev->band_md_pool = NULL;
97 	}
98 
99 	ftl_mngt_next_step(mngt);
100 }
101 
102 void
103 ftl_mngt_init_reloc(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
104 {
105 	dev->reloc = ftl_reloc_init(dev);
106 	if (!dev->reloc) {
107 		FTL_ERRLOG(dev, "Unable to initialize reloc structures\n");
108 		ftl_mngt_fail_step(mngt);
109 		return;
110 	}
111 
112 	ftl_mngt_next_step(mngt);
113 }
114 
115 void
116 ftl_mngt_deinit_reloc(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
117 {
118 	ftl_reloc_free(dev->reloc);
119 	ftl_mngt_next_step(mngt);
120 }
121 
122 void
123 ftl_mngt_init_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
124 {
125 	if (ftl_nv_cache_init(dev)) {
126 		FTL_ERRLOG(dev, "Unable to initialize persistent cache\n");
127 		ftl_mngt_fail_step(mngt);
128 		return;
129 	}
130 
131 	ftl_mngt_next_step(mngt);
132 }
133 
134 void
135 ftl_mngt_deinit_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
136 {
137 	ftl_nv_cache_deinit(dev);
138 	ftl_mngt_next_step(mngt);
139 }
140 
141 static void
142 user_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
143 {
144 	struct ftl_mngt_process *mngt = md->owner.cb_ctx;
145 
146 	if (status) {
147 		FTL_ERRLOG(ftl_mngt_get_dev(mngt), "FTL NV Cache: ERROR of clearing user cache data\n");
148 		ftl_mngt_fail_step(mngt);
149 	} else {
150 		ftl_mngt_next_step(mngt);
151 	}
152 }
153 
154 void
155 ftl_mngt_scrub_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
156 {
157 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_DATA_NVC];
158 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_DATA_NVC];
159 	union ftl_md_vss vss;
160 
161 	FTL_NOTICELOG(dev, "First startup needs to scrub nv cache data region, this may take some time.\n");
162 	FTL_NOTICELOG(dev, "Scrubbing %lluGiB\n", region->current.blocks * FTL_BLOCK_SIZE / GiB);
163 
164 	/* Need to scrub user data, so in case of dirty shutdown the recovery won't
165 	 * pull in data during open chunks recovery from any previous instance (since during short
166 	 * tests it's very likely that chunks seq_id will be in line between new head md and old VSS)
167 	 */
168 	md->cb = user_clear_cb;
169 	md->owner.cb_ctx = mngt;
170 
171 	vss.version.md_version = region->current.version;
172 	vss.nv_cache.lba = FTL_ADDR_INVALID;
173 	ftl_md_clear(md, 0, &vss);
174 }
175 
176 void
177 ftl_mngt_finalize_startup(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
178 {
179 	if (ftl_bitmap_find_first_set(dev->unmap_map, 0, UINT64_MAX) != UINT64_MAX) {
180 		dev->unmap_in_progress = true;
181 	}
182 
183 	/* Clear the limit applications as they're incremented incorrectly by
184 	 * the initialization code.
185 	 */
186 	memset(dev->stats.limits, 0, sizeof(dev->stats.limits));
187 	dev->initialized = 1;
188 	dev->sb_shm->shm_ready = true;
189 
190 	ftl_reloc_resume(dev->reloc);
191 	ftl_writer_resume(&dev->writer_user);
192 	ftl_writer_resume(&dev->writer_gc);
193 	ftl_nv_cache_resume(&dev->nv_cache);
194 
195 	ftl_mngt_next_step(mngt);
196 }
197 
198 void
199 ftl_mngt_start_core_poller(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
200 {
201 	dev->core_poller = SPDK_POLLER_REGISTER(ftl_core_poller, dev, 0);
202 	if (!dev->core_poller) {
203 		FTL_ERRLOG(dev, "Unable to register core poller\n");
204 		ftl_mngt_fail_step(mngt);
205 		return;
206 	}
207 
208 	ftl_mngt_next_step(mngt);
209 }
210 
211 void
212 ftl_mngt_stop_core_poller(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
213 {
214 	dev->halt = true;
215 
216 	if (dev->core_poller) {
217 		ftl_mngt_continue_step(mngt);
218 	} else {
219 		ftl_mngt_next_step(mngt);
220 	}
221 }
222 
223 void
224 ftl_mngt_dump_stats(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
225 {
226 	ftl_dev_dump_bands(dev);
227 	ftl_dev_dump_stats(dev);
228 	ftl_mngt_next_step(mngt);
229 }
230 
231 void
232 ftl_mngt_init_vld_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
233 {
234 	struct ftl_md *valid_map_md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_VALID_MAP];
235 
236 	dev->valid_map = ftl_bitmap_create(ftl_md_get_buffer(valid_map_md),
237 					   ftl_md_get_buffer_size(valid_map_md));
238 	if (!dev->valid_map) {
239 		FTL_ERRLOG(dev, "Failed to create valid map\n");
240 		ftl_mngt_fail_step(mngt);
241 		return;
242 	}
243 
244 	ftl_mngt_next_step(mngt);
245 }
246 
247 void
248 ftl_mngt_deinit_vld_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
249 {
250 	if (dev->valid_map) {
251 		ftl_bitmap_destroy(dev->valid_map);
252 		dev->valid_map = NULL;
253 	}
254 
255 	ftl_mngt_next_step(mngt);
256 }
257 void
258 ftl_mngt_init_unmap_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
259 {
260 	uint64_t num_l2p_pages = spdk_divide_round_up(dev->num_lbas, dev->layout.l2p.lbas_in_page);
261 	uint64_t map_blocks = ftl_bitmap_bits_to_blocks(num_l2p_pages);
262 
263 	dev->unmap_map_md = ftl_md_create(dev,
264 					  map_blocks,
265 					  0,
266 					  "trim_bitmap",
267 					  ftl_md_create_shm_flags(dev), NULL);
268 
269 	if (!dev->unmap_map_md) {
270 		FTL_ERRLOG(dev, "Failed to create trim bitmap md\n");
271 		ftl_mngt_fail_step(mngt);
272 		return;
273 	}
274 
275 	dev->unmap_map = ftl_bitmap_create(ftl_md_get_buffer(dev->unmap_map_md),
276 					   ftl_md_get_buffer_size(dev->unmap_map_md));
277 
278 	if (!dev->unmap_map) {
279 		FTL_ERRLOG(dev, "Failed to create unmap map\n");
280 		ftl_mngt_fail_step(mngt);
281 		return;
282 	}
283 
284 	ftl_mngt_next_step(mngt);
285 }
286 
287 static void
288 unmap_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
289 {
290 	struct ftl_mngt_process *mngt = md->owner.cb_ctx;
291 
292 	if (status) {
293 		FTL_ERRLOG(dev, "ERROR of clearing trim unmap\n");
294 		ftl_mngt_fail_step(mngt);
295 	} else {
296 		ftl_mngt_next_step(mngt);
297 	}
298 }
299 
300 void
301 ftl_mngt_unmap_clear(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
302 {
303 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_MD];
304 
305 	md->cb = unmap_clear_cb;
306 	md->owner.cb_ctx = mngt;
307 
308 	ftl_md_clear(md, 0, NULL);
309 }
310 
311 void
312 ftl_mngt_deinit_unmap_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
313 {
314 	ftl_bitmap_destroy(dev->unmap_map);
315 	dev->unmap_map = NULL;
316 
317 	ftl_md_destroy(dev->unmap_map_md, ftl_md_destroy_shm_flags(dev));
318 	dev->unmap_map_md = NULL;
319 
320 	ftl_mngt_next_step(mngt);
321 }
322