xref: /spdk/lib/ftl/mngt/ftl_mngt_misc.c (revision 18c8b52afa69f39481ebb75711b2f30b11693f9d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "ftl_core.h"
7 #include "ftl_utils.h"
8 #include "ftl_mngt.h"
9 #include "ftl_mngt_steps.h"
10 #include "ftl_band.h"
11 #include "ftl_internal.h"
12 #include "ftl_nv_cache.h"
13 #include "ftl_debug.h"
14 
15 void
16 ftl_mngt_check_conf(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
17 {
18 	if (ftl_conf_is_valid(&dev->conf)) {
19 		ftl_mngt_next_step(mngt);
20 	} else {
21 		ftl_mngt_fail_step(mngt);
22 	}
23 }
24 
25 static int
26 init_p2l_map_pool(struct spdk_ftl_dev *dev)
27 {
28 	size_t p2l_pool_el_blks = spdk_divide_round_up(ftl_p2l_map_pool_elem_size(dev), FTL_BLOCK_SIZE);
29 	size_t p2l_pool_buf_blks = P2L_MEMPOOL_SIZE * p2l_pool_el_blks;
30 	void *p2l_pool_buf;
31 
32 	dev->p2l_pool_md = ftl_md_create(dev, p2l_pool_buf_blks, 0, "p2l_pool",
33 					 ftl_md_create_shm_flags(dev), NULL);
34 	if (!dev->p2l_pool_md) {
35 		return -ENOMEM;
36 	}
37 
38 	p2l_pool_buf = ftl_md_get_buffer(dev->p2l_pool_md);
39 	dev->p2l_pool = ftl_mempool_create_ext(p2l_pool_buf, P2L_MEMPOOL_SIZE,
40 					       p2l_pool_el_blks * FTL_BLOCK_SIZE,
41 					       FTL_BLOCK_SIZE);
42 	if (!dev->p2l_pool) {
43 		return -ENOMEM;
44 	}
45 
46 	if (!ftl_fast_startup(dev)) {
47 		ftl_mempool_initialize_ext(dev->p2l_pool);
48 	}
49 
50 	return 0;
51 }
52 
53 static int
54 init_band_md_pool(struct spdk_ftl_dev *dev)
55 {
56 	dev->band_md_pool = ftl_mempool_create(P2L_MEMPOOL_SIZE,
57 					       sizeof(struct ftl_band_md),
58 					       FTL_BLOCK_SIZE,
59 					       SPDK_ENV_SOCKET_ID_ANY);
60 	if (!dev->band_md_pool) {
61 		return -ENOMEM;
62 	}
63 
64 	return 0;
65 }
66 
67 void
68 ftl_mngt_init_mem_pools(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
69 {
70 	if (init_p2l_map_pool(dev)) {
71 		ftl_mngt_fail_step(mngt);
72 	}
73 
74 	if (init_band_md_pool(dev)) {
75 		ftl_mngt_fail_step(mngt);
76 	}
77 
78 	ftl_mngt_next_step(mngt);
79 }
80 
81 void
82 ftl_mngt_deinit_mem_pools(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
83 {
84 	if (dev->p2l_pool) {
85 		ftl_mempool_destroy_ext(dev->p2l_pool);
86 		dev->p2l_pool = NULL;
87 	}
88 
89 	if (dev->p2l_pool_md) {
90 		ftl_md_destroy(dev->p2l_pool_md, ftl_md_destroy_shm_flags(dev));
91 		dev->p2l_pool_md = NULL;
92 	}
93 
94 	if (dev->band_md_pool) {
95 		ftl_mempool_destroy(dev->band_md_pool);
96 		dev->band_md_pool = NULL;
97 	}
98 
99 	ftl_mngt_next_step(mngt);
100 }
101 
102 void
103 ftl_mngt_init_reloc(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
104 {
105 	dev->reloc = ftl_reloc_init(dev);
106 	if (!dev->reloc) {
107 		FTL_ERRLOG(dev, "Unable to initialize reloc structures\n");
108 		ftl_mngt_fail_step(mngt);
109 		return;
110 	}
111 
112 	ftl_mngt_next_step(mngt);
113 }
114 
115 void
116 ftl_mngt_deinit_reloc(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
117 {
118 	ftl_reloc_free(dev->reloc);
119 	ftl_mngt_next_step(mngt);
120 }
121 
122 void
123 ftl_mngt_init_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
124 {
125 	if (ftl_nv_cache_init(dev)) {
126 		FTL_ERRLOG(dev, "Unable to initialize persistent cache\n");
127 		ftl_mngt_fail_step(mngt);
128 		return;
129 	}
130 
131 	ftl_mngt_next_step(mngt);
132 }
133 
134 void
135 ftl_mngt_deinit_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
136 {
137 	ftl_nv_cache_deinit(dev);
138 	ftl_mngt_next_step(mngt);
139 }
140 
141 static void
142 user_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
143 {
144 	struct ftl_mngt_process *mngt = md->owner.cb_ctx;
145 
146 	if (status) {
147 		FTL_ERRLOG(ftl_mngt_get_dev(mngt), "FTL NV Cache: ERROR of clearing user cache data\n");
148 		ftl_mngt_fail_step(mngt);
149 	} else {
150 		ftl_mngt_next_step(mngt);
151 	}
152 }
153 
154 void
155 ftl_mngt_scrub_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
156 {
157 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_DATA_NVC];
158 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_DATA_NVC];
159 	union ftl_md_vss vss;
160 
161 	FTL_NOTICELOG(dev, "First startup needs to scrub nv cache data region, this may take some time.\n");
162 	FTL_NOTICELOG(dev, "Scrubbing %lluGiB\n", region->current.blocks * FTL_BLOCK_SIZE / GiB);
163 
164 	/* Need to scrub user data, so in case of dirty shutdown the recovery won't
165 	 * pull in data during open chunks recovery from any previous instance (since during short
166 	 * tests it's very likely that chunks seq_id will be in line between new head md and old VSS)
167 	 */
168 	md->cb = user_clear_cb;
169 	md->owner.cb_ctx = mngt;
170 
171 	vss.version.md_version = region->current.version;
172 	vss.nv_cache.lba = FTL_ADDR_INVALID;
173 	ftl_md_clear(md, 0, &vss);
174 }
175 
176 void
177 ftl_mngt_finalize_startup(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
178 {
179 	if (ftl_bitmap_find_first_set(dev->unmap_map, 0, UINT64_MAX) != UINT64_MAX) {
180 		dev->unmap_in_progress = true;
181 	}
182 
183 	dev->initialized = 1;
184 	dev->sb_shm->shm_ready = true;
185 
186 	ftl_reloc_resume(dev->reloc);
187 	ftl_writer_resume(&dev->writer_user);
188 	ftl_writer_resume(&dev->writer_gc);
189 	ftl_nv_cache_resume(&dev->nv_cache);
190 
191 	ftl_mngt_next_step(mngt);
192 }
193 
194 void
195 ftl_mngt_start_core_poller(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
196 {
197 	dev->core_poller = SPDK_POLLER_REGISTER(ftl_core_poller, dev, 0);
198 	if (!dev->core_poller) {
199 		FTL_ERRLOG(dev, "Unable to register core poller\n");
200 		ftl_mngt_fail_step(mngt);
201 		return;
202 	}
203 
204 	ftl_mngt_next_step(mngt);
205 }
206 
207 void
208 ftl_mngt_stop_core_poller(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
209 {
210 	dev->halt = true;
211 
212 	if (dev->core_poller) {
213 		ftl_mngt_continue_step(mngt);
214 	} else {
215 		ftl_mngt_next_step(mngt);
216 	}
217 }
218 
219 void
220 ftl_mngt_dump_stats(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
221 {
222 	ftl_dev_dump_bands(dev);
223 	ftl_dev_dump_stats(dev);
224 	ftl_mngt_next_step(mngt);
225 }
226 
227 void
228 ftl_mngt_init_vld_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
229 {
230 	struct ftl_md *valid_map_md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_VALID_MAP];
231 
232 	dev->valid_map = ftl_bitmap_create(ftl_md_get_buffer(valid_map_md),
233 					   ftl_md_get_buffer_size(valid_map_md));
234 	if (!dev->valid_map) {
235 		FTL_ERRLOG(dev, "Failed to create valid map\n");
236 		ftl_mngt_fail_step(mngt);
237 		return;
238 	}
239 
240 	ftl_mngt_next_step(mngt);
241 }
242 
243 void
244 ftl_mngt_deinit_vld_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
245 {
246 	if (dev->valid_map) {
247 		ftl_bitmap_destroy(dev->valid_map);
248 		dev->valid_map = NULL;
249 	}
250 
251 	ftl_mngt_next_step(mngt);
252 }
253 void
254 ftl_mngt_init_unmap_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
255 {
256 	uint64_t num_l2p_pages = spdk_divide_round_up(dev->num_lbas, dev->layout.l2p.lbas_in_page);
257 	uint64_t map_blocks = ftl_bitmap_bits_to_blocks(num_l2p_pages);
258 
259 	dev->unmap_map_md = ftl_md_create(dev,
260 					  map_blocks,
261 					  0,
262 					  "trim_bitmap",
263 					  ftl_md_create_shm_flags(dev), NULL);
264 
265 	if (!dev->unmap_map_md) {
266 		FTL_ERRLOG(dev, "Failed to create trim bitmap md\n");
267 		ftl_mngt_fail_step(mngt);
268 		return;
269 	}
270 
271 	dev->unmap_map = ftl_bitmap_create(ftl_md_get_buffer(dev->unmap_map_md),
272 					   ftl_md_get_buffer_size(dev->unmap_map_md));
273 
274 	if (!dev->unmap_map) {
275 		FTL_ERRLOG(dev, "Failed to create unmap map\n");
276 		ftl_mngt_fail_step(mngt);
277 		return;
278 	}
279 
280 	ftl_mngt_next_step(mngt);
281 }
282 
283 static void
284 unmap_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
285 {
286 	struct ftl_mngt_process *mngt = md->owner.cb_ctx;
287 
288 	if (status) {
289 		FTL_ERRLOG(dev, "ERROR of clearing trim unmap\n");
290 		ftl_mngt_fail_step(mngt);
291 	} else {
292 		ftl_mngt_next_step(mngt);
293 	}
294 }
295 
296 void
297 ftl_mngt_unmap_clear(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
298 {
299 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_MD];
300 
301 	md->cb = unmap_clear_cb;
302 	md->owner.cb_ctx = mngt;
303 
304 	ftl_md_clear(md, 0, NULL);
305 }
306 
307 void
308 ftl_mngt_deinit_unmap_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
309 {
310 	ftl_bitmap_destroy(dev->unmap_map);
311 	dev->unmap_map = NULL;
312 
313 	ftl_md_destroy(dev->unmap_map_md, ftl_md_destroy_shm_flags(dev));
314 	dev->unmap_map_md = NULL;
315 
316 	ftl_mngt_next_step(mngt);
317 }
318