xref: /spdk/lib/ftl/ftl_core.h (revision a044e19470d20ae1792bedcd820e80d8ab4ad498)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef FTL_CORE_H
35 #define FTL_CORE_H
36 
37 #include "spdk/stdinc.h"
38 #include "spdk/nvme.h"
39 #include "spdk/nvme_ocssd.h"
40 #include "spdk/uuid.h"
41 #include "spdk/thread.h"
42 #include "spdk/util.h"
43 #include "spdk_internal/log.h"
44 #include "spdk/queue.h"
45 #include "spdk/ftl.h"
46 #include "spdk/bdev.h"
47 
48 #include "ftl_ppa.h"
49 #include "ftl_io.h"
50 #include "ftl_trace.h"
51 
52 struct spdk_ftl_dev;
53 struct ftl_band;
54 struct ftl_chunk;
55 struct ftl_io;
56 struct ftl_restore;
57 struct ftl_wptr;
58 struct ftl_flush;
59 struct ftl_reloc;
60 struct ftl_anm_event;
61 
62 struct ftl_stats {
63 	/* Number of writes scheduled directly by the user */
64 	uint64_t				write_user;
65 
66 	/* Total number of writes */
67 	uint64_t				write_total;
68 
69 	/* Traces */
70 	struct ftl_trace			trace;
71 
72 	/* Number of limits applied */
73 	uint64_t				limits[SPDK_FTL_LIMIT_MAX];
74 };
75 
76 struct ftl_punit {
77 	struct spdk_ftl_dev			*dev;
78 
79 	struct ftl_ppa				start_ppa;
80 };
81 
82 struct ftl_thread {
83 	/* Owner */
84 	struct spdk_ftl_dev			*dev;
85 	/* I/O queue pair */
86 	struct spdk_nvme_qpair			*qpair;
87 
88 	/* Thread on which the poller is running */
89 	struct spdk_thread			*thread;
90 
91 	/* Poller */
92 	struct spdk_poller			*poller;
93 	/* Poller's function */
94 	spdk_poller_fn				poller_fn;
95 	/* Poller's frequency */
96 	uint64_t				period_us;
97 };
98 
99 struct ftl_global_md {
100 	/* Device instance */
101 	struct spdk_uuid			uuid;
102 	/* Size of the l2p table */
103 	uint64_t				num_lbas;
104 };
105 
106 struct ftl_nv_cache {
107 	/* Write buffer cache bdev */
108 	struct spdk_bdev_desc			*bdev_desc;
109 	/* Write pointer */
110 	uint64_t				current_addr;
111 	/* Number of available blocks left */
112 	uint64_t				num_available;
113 	/* Metadata pool */
114 	struct spdk_mempool			*md_pool;
115 	/* Cache lock */
116 	pthread_spinlock_t			lock;
117 };
118 
119 struct spdk_ftl_dev {
120 	/* Device instance */
121 	struct spdk_uuid			uuid;
122 	/* Device name */
123 	char					*name;
124 	/* Configuration */
125 	struct spdk_ftl_conf			conf;
126 
127 	/* Indicates the device is fully initialized */
128 	int					initialized;
129 	/* Indicates the device is about to be stopped */
130 	int					halt;
131 
132 	/* Init callback */
133 	spdk_ftl_init_fn			init_cb;
134 	/* Init callback's context */
135 	void					*init_arg;
136 
137 	/* Halt callback */
138 	spdk_ftl_fn				halt_cb;
139 	/* Halt callback's context */
140 	void					*halt_arg;
141 	/* Halt poller, checks if the device has been halted */
142 	struct spdk_poller			*halt_poller;
143 
144 	/* IO channel */
145 	struct spdk_io_channel			*ioch;
146 
147 	/* NVMe controller */
148 	struct spdk_nvme_ctrlr			*ctrlr;
149 	/* NVMe namespace */
150 	struct spdk_nvme_ns			*ns;
151 	/* NVMe transport ID */
152 	struct spdk_nvme_transport_id		trid;
153 
154 	/* Non-volatile write buffer cache */
155 	struct ftl_nv_cache			nv_cache;
156 
157 	/* LBA map memory pool */
158 	struct spdk_mempool			*lba_pool;
159 
160 	/* LBA map requests pool */
161 	struct spdk_mempool			*lba_request_pool;
162 
163 	/* Statistics */
164 	struct ftl_stats			stats;
165 
166 	/* Parallel unit range */
167 	struct spdk_ftl_punit_range		range;
168 	/* Array of parallel units */
169 	struct ftl_punit			*punits;
170 
171 	/* Current sequence number */
172 	uint64_t				seq;
173 
174 	/* Array of bands */
175 	struct ftl_band				*bands;
176 	/* Band being curently defraged */
177 	struct ftl_band				*df_band;
178 	/* Number of operational bands */
179 	size_t					num_bands;
180 	/* Next write band */
181 	struct ftl_band				*next_band;
182 	/* Free band list */
183 	LIST_HEAD(, ftl_band)			free_bands;
184 	/* Closed bands list */
185 	LIST_HEAD(, ftl_band)			shut_bands;
186 	/* Number of free bands */
187 	size_t					num_free;
188 
189 	/* List of write pointers */
190 	LIST_HEAD(, ftl_wptr)			wptr_list;
191 
192 	/* Logical -> physical table */
193 	void					*l2p;
194 	/* Size of the l2p table */
195 	uint64_t				num_lbas;
196 
197 	/* PPA format */
198 	struct ftl_ppa_fmt			ppaf;
199 	/* PPA address size */
200 	size_t					ppa_len;
201 	/* Device's geometry */
202 	struct spdk_ocssd_geometry_data		geo;
203 
204 	/* Flush list */
205 	LIST_HEAD(, ftl_flush)			flush_list;
206 
207 	/* Device specific md buffer */
208 	struct ftl_global_md			global_md;
209 
210 	/* Metadata size */
211 	size_t					md_size;
212 
213 	/* Transfer unit size */
214 	size_t					xfer_size;
215 	/* Ring write buffer */
216 	struct ftl_rwb				*rwb;
217 
218 	/* Current user write limit */
219 	int					limit;
220 
221 	/* Inflight IO operations */
222 	uint32_t				num_inflight;
223 	/* Queue of IO awaiting retry */
224 	TAILQ_HEAD(, ftl_io)			retry_queue;
225 
226 	/* Manages data relocation */
227 	struct ftl_reloc			*reloc;
228 
229 	/* Threads */
230 	struct ftl_thread			core_thread;
231 	struct ftl_thread			read_thread;
232 
233 	/* Devices' list */
234 	STAILQ_ENTRY(spdk_ftl_dev)		stailq;
235 };
236 
237 typedef void (*ftl_restore_fn)(struct spdk_ftl_dev *, struct ftl_restore *, int);
238 
239 void	ftl_apply_limits(struct spdk_ftl_dev *dev);
240 void	ftl_io_read(struct ftl_io *io);
241 void	ftl_io_write(struct ftl_io *io);
242 int	ftl_io_erase(struct ftl_io *io);
243 int	ftl_io_flush(struct ftl_io *io);
244 int	ftl_current_limit(const struct spdk_ftl_dev *dev);
245 int	ftl_invalidate_addr(struct spdk_ftl_dev *dev, struct ftl_ppa ppa);
246 int	ftl_task_core(void *ctx);
247 int	ftl_task_read(void *ctx);
248 void	ftl_process_anm_event(struct ftl_anm_event *event);
249 size_t	ftl_tail_md_num_lbks(const struct spdk_ftl_dev *dev);
250 size_t	ftl_tail_md_hdr_num_lbks(void);
251 size_t	ftl_vld_map_num_lbks(const struct spdk_ftl_dev *dev);
252 size_t	ftl_lba_map_num_lbks(const struct spdk_ftl_dev *dev);
253 size_t	ftl_head_md_num_lbks(const struct spdk_ftl_dev *dev);
254 int	ftl_restore_md(struct spdk_ftl_dev *dev, ftl_restore_fn cb);
255 int	ftl_restore_device(struct ftl_restore *restore, ftl_restore_fn cb);
256 int	ftl_band_set_direct_access(struct ftl_band *band, bool access);
257 int	ftl_retrieve_chunk_info(struct spdk_ftl_dev *dev, struct ftl_ppa ppa,
258 				struct spdk_ocssd_chunk_information_entry *info,
259 				unsigned int num_entries);
260 bool	ftl_ppa_is_written(struct ftl_band *band, struct ftl_ppa ppa);
261 
262 #define ftl_to_ppa(addr) \
263 	(struct ftl_ppa) { .ppa = (uint64_t)(addr) }
264 
265 #define ftl_to_ppa_packed(addr) \
266 	(struct ftl_ppa) { .pack.ppa = (uint32_t)(addr) }
267 
268 static inline struct spdk_thread *
269 ftl_get_core_thread(const struct spdk_ftl_dev *dev)
270 {
271 	return dev->core_thread.thread;
272 }
273 
274 static inline struct spdk_nvme_qpair *
275 ftl_get_write_qpair(const struct spdk_ftl_dev *dev)
276 {
277 	return dev->core_thread.qpair;
278 }
279 
280 static inline struct spdk_thread *
281 ftl_get_read_thread(const struct spdk_ftl_dev *dev)
282 {
283 	return dev->read_thread.thread;
284 }
285 
286 static inline struct spdk_nvme_qpair *
287 ftl_get_read_qpair(const struct spdk_ftl_dev *dev)
288 {
289 	return dev->read_thread.qpair;
290 }
291 
292 static inline int
293 ftl_ppa_packed(const struct spdk_ftl_dev *dev)
294 {
295 	return dev->ppa_len < 32;
296 }
297 
298 static inline int
299 ftl_ppa_invalid(struct ftl_ppa ppa)
300 {
301 	return ppa.ppa == ftl_to_ppa(FTL_PPA_INVALID).ppa;
302 }
303 
304 static inline int
305 ftl_ppa_cached(struct ftl_ppa ppa)
306 {
307 	return !ftl_ppa_invalid(ppa) && ppa.cached;
308 }
309 
310 static inline uint64_t
311 ftl_ppa_addr_pack(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
312 {
313 	uint64_t lbk, chk, pu, grp;
314 
315 	lbk = ppa.lbk;
316 	chk = ppa.chk;
317 	pu = ppa.pu;
318 	grp = ppa.grp;
319 
320 	return (lbk << dev->ppaf.lbk_offset) |
321 	       (chk << dev->ppaf.chk_offset) |
322 	       (pu  << dev->ppaf.pu_offset) |
323 	       (grp << dev->ppaf.grp_offset);
324 }
325 
326 static inline struct ftl_ppa
327 ftl_ppa_addr_unpack(const struct spdk_ftl_dev *dev, uint64_t ppa)
328 {
329 	struct ftl_ppa res = {};
330 
331 	res.lbk = (ppa >> dev->ppaf.lbk_offset) & dev->ppaf.lbk_mask;
332 	res.chk = (ppa >> dev->ppaf.chk_offset) & dev->ppaf.chk_mask;
333 	res.pu  = (ppa >> dev->ppaf.pu_offset)  & dev->ppaf.pu_mask;
334 	res.grp = (ppa >> dev->ppaf.grp_offset) & dev->ppaf.grp_mask;
335 
336 	return res;
337 }
338 
339 static inline struct ftl_ppa
340 ftl_ppa_to_packed(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
341 {
342 	struct ftl_ppa p = {};
343 
344 	if (ftl_ppa_invalid(ppa)) {
345 		p = ftl_to_ppa_packed(FTL_PPA_INVALID);
346 	} else if (ftl_ppa_cached(ppa)) {
347 		p.pack.cached = 1;
348 		p.pack.offset = (uint32_t) ppa.offset;
349 	} else {
350 		p.pack.ppa = (uint32_t) ftl_ppa_addr_pack(dev, ppa);
351 	}
352 
353 	return p;
354 }
355 
356 static inline struct ftl_ppa
357 ftl_ppa_from_packed(const struct spdk_ftl_dev *dev, struct ftl_ppa p)
358 {
359 	struct ftl_ppa ppa = {};
360 
361 	if (p.pack.ppa == (uint32_t)FTL_PPA_INVALID) {
362 		ppa = ftl_to_ppa(FTL_PPA_INVALID);
363 	} else if (p.pack.cached) {
364 		ppa.cached = 1;
365 		ppa.offset = p.pack.offset;
366 	} else {
367 		ppa = ftl_ppa_addr_unpack(dev, p.pack.ppa);
368 	}
369 
370 	return ppa;
371 }
372 
373 static inline unsigned int
374 ftl_ppa_flatten_punit(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
375 {
376 	return ppa.pu * dev->geo.num_grp + ppa.grp - dev->range.begin;
377 }
378 
379 static inline int
380 ftl_ppa_in_range(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
381 {
382 	unsigned int punit = ftl_ppa_flatten_punit(dev, ppa) + dev->range.begin;
383 
384 	if (punit >= dev->range.begin && punit <= dev->range.end) {
385 		return 1;
386 	}
387 
388 	return 0;
389 }
390 
391 #define _ftl_l2p_set(l2p, off, val, bits) \
392 	__atomic_store_n(((uint##bits##_t *)(l2p)) + (off), val, __ATOMIC_SEQ_CST)
393 
394 #define _ftl_l2p_set32(l2p, off, val) \
395 	_ftl_l2p_set(l2p, off, val, 32)
396 
397 #define _ftl_l2p_set64(l2p, off, val) \
398 	_ftl_l2p_set(l2p, off, val, 64)
399 
400 #define _ftl_l2p_get(l2p, off, bits) \
401 	__atomic_load_n(((uint##bits##_t *)(l2p)) + (off), __ATOMIC_SEQ_CST)
402 
403 #define _ftl_l2p_get32(l2p, off) \
404 	_ftl_l2p_get(l2p, off, 32)
405 
406 #define _ftl_l2p_get64(l2p, off) \
407 	_ftl_l2p_get(l2p, off, 64)
408 
409 #define ftl_ppa_cmp(p1, p2) \
410 	((p1).ppa == (p2).ppa)
411 
412 static inline void
413 ftl_l2p_set(struct spdk_ftl_dev *dev, uint64_t lba, struct ftl_ppa ppa)
414 {
415 	assert(dev->num_lbas > lba);
416 
417 	if (ftl_ppa_packed(dev)) {
418 		_ftl_l2p_set32(dev->l2p, lba, ftl_ppa_to_packed(dev, ppa).ppa);
419 	} else {
420 		_ftl_l2p_set64(dev->l2p, lba, ppa.ppa);
421 	}
422 }
423 
424 static inline struct ftl_ppa
425 ftl_l2p_get(struct spdk_ftl_dev *dev, uint64_t lba)
426 {
427 	assert(dev->num_lbas > lba);
428 
429 	if (ftl_ppa_packed(dev)) {
430 		return ftl_ppa_from_packed(dev, ftl_to_ppa_packed(
431 						   _ftl_l2p_get32(dev->l2p, lba)));
432 	} else {
433 		return ftl_to_ppa(_ftl_l2p_get64(dev->l2p, lba));
434 	}
435 }
436 static inline size_t
437 ftl_dev_num_bands(const struct spdk_ftl_dev *dev)
438 {
439 	return dev->geo.num_chk;
440 }
441 
442 static inline size_t
443 ftl_dev_lbks_in_chunk(const struct spdk_ftl_dev *dev)
444 {
445 	return dev->geo.clba;
446 }
447 
448 static inline size_t
449 ftl_dev_num_punits(const struct spdk_ftl_dev *dev)
450 {
451 	return dev->range.end - dev->range.begin + 1;
452 }
453 
454 static inline uint64_t
455 ftl_num_band_lbks(const struct spdk_ftl_dev *dev)
456 {
457 	return ftl_dev_num_punits(dev) * ftl_dev_lbks_in_chunk(dev);
458 }
459 
460 static inline size_t
461 ftl_vld_map_size(const struct spdk_ftl_dev *dev)
462 {
463 	return (size_t)spdk_divide_round_up(ftl_num_band_lbks(dev), CHAR_BIT);
464 }
465 
466 #endif /* FTL_CORE_H */
467