xref: /spdk/lib/ftl/ftl_core.h (revision 19d5c3ed8e87dbd240c77ae0ddb5eda25ae99b5f)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef FTL_CORE_H
35 #define FTL_CORE_H
36 
37 #include "spdk/stdinc.h"
38 #include "spdk/uuid.h"
39 #include "spdk/thread.h"
40 #include "spdk/util.h"
41 #include "spdk_internal/log.h"
42 #include "spdk/likely.h"
43 #include "spdk/queue.h"
44 #include "spdk/ftl.h"
45 #include "spdk/bdev.h"
46 #include "spdk/bdev_zone.h"
47 
48 #include "ftl_addr.h"
49 #include "ftl_io.h"
50 #include "ftl_trace.h"
51 
52 struct spdk_ftl_dev;
53 struct ftl_band;
54 struct ftl_zone;
55 struct ftl_io;
56 struct ftl_restore;
57 struct ftl_wptr;
58 struct ftl_flush;
59 struct ftl_reloc;
60 struct ftl_anm_event;
61 struct ftl_band_flush;
62 
63 struct ftl_stats {
64 	/* Number of writes scheduled directly by the user */
65 	uint64_t				write_user;
66 
67 	/* Total number of writes */
68 	uint64_t				write_total;
69 
70 	/* Traces */
71 	struct ftl_trace			trace;
72 
73 	/* Number of limits applied */
74 	uint64_t				limits[SPDK_FTL_LIMIT_MAX];
75 };
76 
77 struct ftl_thread {
78 	/* Owner */
79 	struct spdk_ftl_dev			*dev;
80 
81 	/* Thread on which the poller is running */
82 	struct spdk_thread			*thread;
83 
84 	/* IO channel */
85 	struct spdk_io_channel			*ioch;
86 	/* Poller */
87 	struct spdk_poller			*poller;
88 	/* Poller's function */
89 	spdk_poller_fn				poller_fn;
90 	/* Poller's frequency */
91 	uint64_t				period_us;
92 };
93 
94 struct ftl_global_md {
95 	/* Device instance */
96 	struct spdk_uuid			uuid;
97 	/* Size of the l2p table */
98 	uint64_t				num_lbas;
99 };
100 
101 struct ftl_nv_cache {
102 	/* Write buffer cache bdev */
103 	struct spdk_bdev_desc			*bdev_desc;
104 	/* Write pointer */
105 	uint64_t				current_addr;
106 	/* Number of available blocks left */
107 	uint64_t				num_available;
108 	/* Maximum number of blocks */
109 	uint64_t				num_data_blocks;
110 	/*
111 	 * Phase of the current cycle of writes. Each time whole cache area is filled, the phase is
112 	 * advanced. Current phase is saved in every IO's metadata, as well as in the header saved
113 	 * in the first sector. By looking at the phase of each block, it's possible to find the
114 	 * oldest block and replay the order of the writes when recovering the data from the cache.
115 	 */
116 	unsigned int				phase;
117 	/* Indicates that the data can be written to the cache */
118 	bool					ready;
119 	/* Metadata pool */
120 	struct spdk_mempool			*md_pool;
121 	/* DMA buffer for writing the header */
122 	void					*dma_buf;
123 	/* Cache lock */
124 	pthread_spinlock_t			lock;
125 };
126 
127 struct spdk_ftl_dev {
128 	/* Device instance */
129 	struct spdk_uuid			uuid;
130 	/* Device name */
131 	char					*name;
132 	/* Configuration */
133 	struct spdk_ftl_conf			conf;
134 
135 	/* Indicates the device is fully initialized */
136 	int					initialized;
137 	/* Indicates the device is about to be stopped */
138 	int					halt;
139 	/* Indicates the device is about to start stopping - use to handle multiple stop request */
140 	bool					halt_started;
141 
142 	/* Underlying device */
143 	struct spdk_bdev_desc			*base_bdev_desc;
144 
145 	/* Non-volatile write buffer cache */
146 	struct ftl_nv_cache			nv_cache;
147 
148 	/* LBA map memory pool */
149 	struct spdk_mempool			*lba_pool;
150 
151 	/* LBA map requests pool */
152 	struct spdk_mempool			*lba_request_pool;
153 
154 	/* Media management events pool */
155 	struct spdk_mempool			*media_events_pool;
156 
157 	/* Statistics */
158 	struct ftl_stats			stats;
159 
160 	/* Current sequence number */
161 	uint64_t				seq;
162 
163 	/* Array of bands */
164 	struct ftl_band				*bands;
165 	/* Number of operational bands */
166 	size_t					num_bands;
167 	/* Next write band */
168 	struct ftl_band				*next_band;
169 	/* Free band list */
170 	LIST_HEAD(, ftl_band)			free_bands;
171 	/* Closed bands list */
172 	LIST_HEAD(, ftl_band)			shut_bands;
173 	/* Number of free bands */
174 	size_t					num_free;
175 
176 	/* List of write pointers */
177 	LIST_HEAD(, ftl_wptr)			wptr_list;
178 
179 	/* Logical -> physical table */
180 	void					*l2p;
181 	/* Size of the l2p table */
182 	uint64_t				num_lbas;
183 
184 	/* Address size */
185 	size_t					addr_len;
186 
187 	/* Flush list */
188 	LIST_HEAD(, ftl_flush)			flush_list;
189 	/* List of band flush requests */
190 	LIST_HEAD(, ftl_band_flush)		band_flush_list;
191 
192 	/* Device specific md buffer */
193 	struct ftl_global_md			global_md;
194 
195 	/* Metadata size */
196 	size_t					md_size;
197 
198 	/* Transfer unit size */
199 	size_t					xfer_size;
200 	/* Ring write buffer */
201 	struct ftl_rwb				*rwb;
202 
203 	/* Current user write limit */
204 	int					limit;
205 
206 	/* Inflight IO operations */
207 	uint32_t				num_inflight;
208 	/* Queue of IO awaiting retry */
209 	TAILQ_HEAD(, ftl_io)			retry_queue;
210 
211 	/* Manages data relocation */
212 	struct ftl_reloc			*reloc;
213 
214 	/* Threads */
215 	struct ftl_thread			core_thread;
216 
217 	/* Devices' list */
218 	STAILQ_ENTRY(spdk_ftl_dev)		stailq;
219 };
220 
221 struct ftl_nv_cache_header {
222 	/* Version of the header */
223 	uint32_t				version;
224 	/* UUID of the FTL device */
225 	struct spdk_uuid			uuid;
226 	/* Size of the non-volatile cache (in blocks) */
227 	uint64_t				size;
228 	/* Contains the next address to be written after clean shutdown, invalid LBA otherwise */
229 	uint64_t				current_addr;
230 	/* Current phase */
231 	uint8_t					phase;
232 	/* Checksum of the header, needs to be last element */
233 	uint32_t				checksum;
234 } __attribute__((packed));
235 
236 struct ftl_media_event {
237 	/* Owner */
238 	struct spdk_ftl_dev			*dev;
239 	/* Media event */
240 	struct spdk_bdev_media_event		event;
241 };
242 
243 typedef void (*ftl_restore_fn)(struct ftl_restore *, int, void *cb_arg);
244 
245 void	ftl_apply_limits(struct spdk_ftl_dev *dev);
246 void	ftl_io_read(struct ftl_io *io);
247 void	ftl_io_write(struct ftl_io *io);
248 int	ftl_flush_rwb(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg);
249 int	ftl_current_limit(const struct spdk_ftl_dev *dev);
250 int	ftl_invalidate_addr(struct spdk_ftl_dev *dev, struct ftl_addr addr);
251 int	ftl_task_core(void *ctx);
252 int	ftl_task_read(void *ctx);
253 void	ftl_process_anm_event(struct ftl_anm_event *event);
254 size_t	ftl_tail_md_num_blocks(const struct spdk_ftl_dev *dev);
255 size_t	ftl_tail_md_hdr_num_blocks(void);
256 size_t	ftl_vld_map_num_blocks(const struct spdk_ftl_dev *dev);
257 size_t	ftl_lba_map_num_blocks(const struct spdk_ftl_dev *dev);
258 size_t	ftl_head_md_num_blocks(const struct spdk_ftl_dev *dev);
259 int	ftl_restore_md(struct spdk_ftl_dev *dev, ftl_restore_fn cb, void *cb_arg);
260 int	ftl_restore_device(struct ftl_restore *restore, ftl_restore_fn cb, void *cb_arg);
261 void	ftl_restore_nv_cache(struct ftl_restore *restore, ftl_restore_fn cb, void *cb_arg);
262 int	ftl_band_set_direct_access(struct ftl_band *band, bool access);
263 bool	ftl_addr_is_written(struct ftl_band *band, struct ftl_addr addr);
264 int	ftl_flush_active_bands(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg);
265 int	ftl_nv_cache_write_header(struct ftl_nv_cache *nv_cache, bool shutdown,
266 				  spdk_bdev_io_completion_cb cb_fn, void *cb_arg);
267 int	ftl_nv_cache_scrub(struct ftl_nv_cache *nv_cache, spdk_bdev_io_completion_cb cb_fn,
268 			   void *cb_arg);
269 void	ftl_get_media_events(struct spdk_ftl_dev *dev);
270 
271 struct spdk_io_channel *
272 ftl_get_io_channel(const struct spdk_ftl_dev *dev);
273 
274 #define ftl_to_addr(address) \
275 	(struct ftl_addr) { .offset = (uint64_t)(address) }
276 
277 #define ftl_to_addr_packed(address) \
278 	(struct ftl_addr) { .pack.offset = (uint32_t)(address) }
279 
280 static inline struct spdk_thread *
281 ftl_get_core_thread(const struct spdk_ftl_dev *dev)
282 {
283 	return dev->core_thread.thread;
284 }
285 
286 static inline size_t
287 ftl_get_num_bands(const struct spdk_ftl_dev *dev)
288 {
289 	return dev->num_bands;
290 }
291 
292 static inline size_t
293 ftl_get_num_punits(const struct spdk_ftl_dev *dev)
294 {
295 	return spdk_bdev_get_optimal_open_zones(spdk_bdev_desc_get_bdev(dev->base_bdev_desc));
296 }
297 
298 static inline size_t
299 ftl_get_num_zones(const struct spdk_ftl_dev *dev)
300 {
301 	return ftl_get_num_bands(dev) * ftl_get_num_punits(dev);
302 }
303 
304 static inline size_t
305 ftl_get_num_blocks_in_zone(const struct spdk_ftl_dev *dev)
306 {
307 	return spdk_bdev_get_zone_size(spdk_bdev_desc_get_bdev(dev->base_bdev_desc));
308 }
309 
310 static inline uint64_t
311 ftl_get_num_blocks_in_band(const struct spdk_ftl_dev *dev)
312 {
313 	return ftl_get_num_punits(dev) * ftl_get_num_blocks_in_zone(dev);
314 }
315 
316 static inline uint64_t
317 ftl_addr_get_zone_slba(const struct spdk_ftl_dev *dev, struct ftl_addr addr)
318 {
319 	return addr.offset -= (addr.offset % ftl_get_num_blocks_in_zone(dev));
320 }
321 
322 static inline uint64_t
323 ftl_addr_get_band(const struct spdk_ftl_dev *dev, struct ftl_addr addr)
324 {
325 	return addr.offset / ftl_get_num_blocks_in_band(dev);
326 }
327 
328 static inline uint64_t
329 ftl_addr_get_punit(const struct spdk_ftl_dev *dev, struct ftl_addr addr)
330 {
331 	return (addr.offset / ftl_get_num_blocks_in_zone(dev)) % ftl_get_num_punits(dev);
332 }
333 
334 static inline uint64_t
335 ftl_addr_get_zone_offset(const struct spdk_ftl_dev *dev, struct ftl_addr addr)
336 {
337 	return addr.offset % ftl_get_num_blocks_in_zone(dev);
338 }
339 
340 static inline size_t
341 ftl_vld_map_size(const struct spdk_ftl_dev *dev)
342 {
343 	return (size_t)spdk_divide_round_up(ftl_get_num_blocks_in_band(dev), CHAR_BIT);
344 }
345 
346 static inline int
347 ftl_addr_packed(const struct spdk_ftl_dev *dev)
348 {
349 	return dev->addr_len < 32;
350 }
351 
352 static inline int
353 ftl_addr_invalid(struct ftl_addr addr)
354 {
355 	return addr.offset == ftl_to_addr(FTL_ADDR_INVALID).offset;
356 }
357 
358 static inline int
359 ftl_addr_cached(struct ftl_addr addr)
360 {
361 	return !ftl_addr_invalid(addr) && addr.cached;
362 }
363 
364 static inline struct ftl_addr
365 ftl_addr_to_packed(const struct spdk_ftl_dev *dev, struct ftl_addr addr)
366 {
367 	struct ftl_addr p = {};
368 
369 	if (ftl_addr_invalid(addr)) {
370 		p = ftl_to_addr_packed(FTL_ADDR_INVALID);
371 	} else if (ftl_addr_cached(addr)) {
372 		p.pack.cached = 1;
373 		p.pack.cache_offset = (uint32_t) addr.cache_offset;
374 	} else {
375 		p.pack.offset = (uint32_t) addr.offset;
376 	}
377 
378 	return p;
379 }
380 
381 static inline struct ftl_addr
382 ftl_addr_from_packed(const struct spdk_ftl_dev *dev, struct ftl_addr p)
383 {
384 	struct ftl_addr addr = {};
385 
386 	if (p.pack.offset == (uint32_t)FTL_ADDR_INVALID) {
387 		addr = ftl_to_addr(FTL_ADDR_INVALID);
388 	} else if (p.pack.cached) {
389 		addr.cached = 1;
390 		addr.cache_offset = p.pack.cache_offset;
391 	} else {
392 		addr = p;
393 	}
394 
395 	return addr;
396 }
397 
398 #define _ftl_l2p_set(l2p, off, val, bits) \
399 	__atomic_store_n(((uint##bits##_t *)(l2p)) + (off), val, __ATOMIC_SEQ_CST)
400 
401 #define _ftl_l2p_set32(l2p, off, val) \
402 	_ftl_l2p_set(l2p, off, val, 32)
403 
404 #define _ftl_l2p_set64(l2p, off, val) \
405 	_ftl_l2p_set(l2p, off, val, 64)
406 
407 #define _ftl_l2p_get(l2p, off, bits) \
408 	__atomic_load_n(((uint##bits##_t *)(l2p)) + (off), __ATOMIC_SEQ_CST)
409 
410 #define _ftl_l2p_get32(l2p, off) \
411 	_ftl_l2p_get(l2p, off, 32)
412 
413 #define _ftl_l2p_get64(l2p, off) \
414 	_ftl_l2p_get(l2p, off, 64)
415 
416 #define ftl_addr_cmp(p1, p2) \
417 	((p1).offset == (p2).offset)
418 
419 static inline void
420 ftl_l2p_set(struct spdk_ftl_dev *dev, uint64_t lba, struct ftl_addr addr)
421 {
422 	assert(dev->num_lbas > lba);
423 
424 	if (ftl_addr_packed(dev)) {
425 		_ftl_l2p_set32(dev->l2p, lba, ftl_addr_to_packed(dev, addr).offset);
426 	} else {
427 		_ftl_l2p_set64(dev->l2p, lba, addr.offset);
428 	}
429 }
430 
431 static inline struct ftl_addr
432 ftl_l2p_get(struct spdk_ftl_dev *dev, uint64_t lba)
433 {
434 	assert(dev->num_lbas > lba);
435 
436 	if (ftl_addr_packed(dev)) {
437 		return ftl_addr_from_packed(dev, ftl_to_addr_packed(
438 						    _ftl_l2p_get32(dev->l2p, lba)));
439 	} else {
440 		return ftl_to_addr(_ftl_l2p_get64(dev->l2p, lba));
441 	}
442 }
443 
444 static inline bool
445 ftl_dev_has_nv_cache(const struct spdk_ftl_dev *dev)
446 {
447 	return dev->nv_cache.bdev_desc != NULL;
448 }
449 
450 #define FTL_NV_CACHE_HEADER_VERSION	(1)
451 #define FTL_NV_CACHE_DATA_OFFSET	(1)
452 #define FTL_NV_CACHE_PHASE_OFFSET	(62)
453 #define FTL_NV_CACHE_PHASE_COUNT	(4)
454 #define FTL_NV_CACHE_PHASE_MASK		(3ULL << FTL_NV_CACHE_PHASE_OFFSET)
455 #define FTL_NV_CACHE_LBA_INVALID	(FTL_LBA_INVALID & ~FTL_NV_CACHE_PHASE_MASK)
456 
457 static inline bool
458 ftl_nv_cache_phase_is_valid(unsigned int phase)
459 {
460 	return phase > 0 && phase <= 3;
461 }
462 
463 static inline unsigned int
464 ftl_nv_cache_next_phase(unsigned int current)
465 {
466 	static const unsigned int phases[] = { 0, 2, 3, 1 };
467 	assert(ftl_nv_cache_phase_is_valid(current));
468 	return phases[current];
469 }
470 
471 static inline unsigned int
472 ftl_nv_cache_prev_phase(unsigned int current)
473 {
474 	static const unsigned int phases[] = { 0, 3, 1, 2 };
475 	assert(ftl_nv_cache_phase_is_valid(current));
476 	return phases[current];
477 }
478 
479 static inline uint64_t
480 ftl_nv_cache_pack_lba(uint64_t lba, unsigned int phase)
481 {
482 	assert(ftl_nv_cache_phase_is_valid(phase));
483 	return (lba & ~FTL_NV_CACHE_PHASE_MASK) | ((uint64_t)phase << FTL_NV_CACHE_PHASE_OFFSET);
484 }
485 
486 static inline void
487 ftl_nv_cache_unpack_lba(uint64_t in_lba, uint64_t *out_lba, unsigned int *phase)
488 {
489 	*out_lba = in_lba & ~FTL_NV_CACHE_PHASE_MASK;
490 	*phase = (in_lba & FTL_NV_CACHE_PHASE_MASK) >> FTL_NV_CACHE_PHASE_OFFSET;
491 
492 	/* If the phase is invalid the block wasn't written yet, so treat the LBA as invalid too */
493 	if (!ftl_nv_cache_phase_is_valid(*phase) || *out_lba == FTL_NV_CACHE_LBA_INVALID) {
494 		*out_lba = FTL_LBA_INVALID;
495 	}
496 }
497 
498 static inline bool
499 ftl_is_append_supported(const struct spdk_ftl_dev *dev)
500 {
501 	return dev->conf.use_append;
502 }
503 
504 #endif /* FTL_CORE_H */
505