xref: /spdk/lib/ftl/ftl_nv_cache.h (revision f869197b76ff6981e901b6d9a05789e1b993494a)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #ifndef FTL_NV_CACHE_H
7 #define FTL_NV_CACHE_H
8 
9 #include "spdk/stdinc.h"
10 #include "spdk/crc32.h"
11 
12 #include "ftl_io.h"
13 #include "ftl_utils.h"
14 
15 /*
16  * FTL non volatile cache is divided into groups of blocks called chunks.
17  * Size of each chunk is multiple of xfer size plus additional metadata.
18  * For each block associated lba is stored in metadata. Cache space is
19  * written chunk by chunk sequentially. When number of free chunks reaches
20  * some threshold oldest chunks are moved from cache to backend storage to
21  * create space for new user data.
22  */
23 
24 #define FTL_NVC_VERSION_0	0
25 #define FTL_NVC_VERSION_1	1
26 
27 #define FTL_NVC_VERSION_CURRENT FTL_NVC_VERSION_1
28 
29 #define FTL_NV_CACHE_NUM_COMPACTORS 8
30 
31 struct ftl_nvcache_restore;
32 typedef void (*ftl_nv_cache_restore_fn)(struct ftl_nvcache_restore *, int, void *cb_arg);
33 
34 enum ftl_chunk_state {
35 	FTL_CHUNK_STATE_FREE,
36 	FTL_CHUNK_STATE_OPEN,
37 	FTL_CHUNK_STATE_CLOSED,
38 	FTL_CHUNK_STATE_MAX
39 };
40 
41 struct ftl_nv_cache_chunk_md {
42 	/* Sequence id of writing */
43 	uint64_t seq_id;
44 
45 	/* Sequence ID when chunk was closed */
46 	uint64_t close_seq_id;
47 
48 	/* Current lba to write */
49 	uint32_t write_pointer;
50 
51 	/* Number of blocks written */
52 	uint32_t blocks_written;
53 
54 	/* Number of skipped block (case when IO size is greater than blocks left in chunk) */
55 	uint32_t blocks_skipped;
56 
57 	/* Next block to be compacted */
58 	uint32_t read_pointer;
59 
60 	/* Number of compacted (both valid and invalid) blocks */
61 	uint32_t blocks_compacted;
62 
63 	/* Chunk state */
64 	enum ftl_chunk_state state;
65 
66 	/* CRC32 checksum of the associated P2L map when chunk is in closed state */
67 	uint32_t p2l_map_checksum;
68 } __attribute__((aligned(FTL_BLOCK_SIZE)));
69 
70 #define FTL_NV_CACHE_CHUNK_MD_SIZE sizeof(struct ftl_nv_cache_chunk_md)
71 SPDK_STATIC_ASSERT(FTL_NV_CACHE_CHUNK_MD_SIZE == FTL_BLOCK_SIZE,
72 		   "FTL NV Chunk metadata size is invalid");
73 
74 struct ftl_nv_cache_chunk {
75 	struct ftl_nv_cache *nv_cache;
76 
77 	struct ftl_nv_cache_chunk_md *md;
78 
79 	/* Offset from start lba of the cache */
80 	uint64_t offset;
81 
82 	/* P2L map */
83 	struct ftl_p2l_map p2l_map;
84 
85 	/* Metadata request */
86 	struct ftl_basic_rq metadata_rq;
87 
88 	TAILQ_ENTRY(ftl_nv_cache_chunk) entry;
89 
90 	/* This flag is used to indicate chunk is used in recovery */
91 	bool recovery;
92 
93 	/* For writing metadata */
94 	struct ftl_md_io_entry_ctx md_persist_entry_ctx;
95 };
96 
97 struct ftl_nv_cache_compactor {
98 	struct ftl_nv_cache *nv_cache;
99 	struct ftl_rq *wr;
100 	struct ftl_rq *rd;
101 	TAILQ_ENTRY(ftl_nv_cache_compactor) entry;
102 	struct spdk_bdev_io_wait_entry bdev_io_wait;
103 };
104 
105 struct ftl_nv_cache {
106 	/* Flag indicating halt request */
107 	bool halt;
108 
109 	/* Write buffer cache bdev */
110 	struct spdk_bdev_desc *bdev_desc;
111 
112 	/* Persistent cache IO channel */
113 	struct spdk_io_channel *cache_ioch;
114 
115 	/* Metadata pool */
116 	struct ftl_mempool *md_pool;
117 
118 	/* P2L map memory pool */
119 	struct ftl_mempool *p2l_pool;
120 
121 	/* Chunk md memory pool */
122 	struct ftl_mempool *chunk_md_pool;
123 
124 	/* Chunk md memory pool for freeing chunks */
125 	struct ftl_mempool *free_chunk_md_pool;
126 
127 	/* Block Metadata size */
128 	uint64_t md_size;
129 
130 	/* NV cache metadata object handle */
131 	struct ftl_md *md;
132 
133 	/* Number of blocks in chunk */
134 	uint64_t chunk_blocks;
135 
136 	/* Number of blocks in tail md per chunk */
137 	uint64_t tail_md_chunk_blocks;
138 
139 	/* Number of chunks */
140 	uint64_t chunk_count;
141 
142 	/* Current processed chunk */
143 	struct ftl_nv_cache_chunk *chunk_current;
144 
145 	/* Free chunks list */
146 	TAILQ_HEAD(, ftl_nv_cache_chunk) chunk_free_list;
147 	uint64_t chunk_free_count;
148 
149 	/* Open chunks list */
150 	TAILQ_HEAD(, ftl_nv_cache_chunk) chunk_open_list;
151 	uint64_t chunk_open_count;
152 
153 	/* Full chunks list */
154 	TAILQ_HEAD(, ftl_nv_cache_chunk) chunk_full_list;
155 	uint64_t chunk_full_count;
156 
157 	/* Chunks being compacted */
158 	TAILQ_HEAD(, ftl_nv_cache_chunk) chunk_comp_list;
159 	uint64_t chunk_comp_count;
160 
161 	/* Chunks being freed */
162 	TAILQ_HEAD(, ftl_nv_cache_chunk) needs_free_persist_list;
163 	uint64_t chunk_free_persist_count;
164 
165 	TAILQ_HEAD(, ftl_nv_cache_compactor) compactor_list;
166 	uint64_t compaction_active_count;
167 	uint64_t chunk_compaction_threshold;
168 
169 	struct ftl_nv_cache_chunk *chunks;
170 
171 	uint64_t last_seq_id;
172 };
173 
174 int ftl_nv_cache_init(struct spdk_ftl_dev *dev);
175 void ftl_nv_cache_deinit(struct spdk_ftl_dev *dev);
176 bool ftl_nv_cache_write(struct ftl_io *io);
177 void ftl_nv_cache_fill_md(struct ftl_io *io);
178 int ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
179 		      spdk_bdev_io_completion_cb cb, void *cb_arg);
180 bool ftl_nv_cache_full(struct ftl_nv_cache *nv_cache);
181 void ftl_nv_cache_process(struct spdk_ftl_dev *dev);
182 
183 void ftl_chunk_map_set_lba(struct ftl_nv_cache_chunk *chunk,
184 			   uint64_t offset, uint64_t lba);
185 uint64_t ftl_chunk_map_get_lba(struct ftl_nv_cache_chunk *chunk, uint64_t offset);
186 
187 void ftl_nv_cache_set_addr(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr);
188 
189 int ftl_nv_cache_save_state(struct ftl_nv_cache *nv_cache);
190 
191 int ftl_nv_cache_load_state(struct ftl_nv_cache *nv_cache);
192 
193 void ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache);
194 
195 int ftl_nv_cache_chunks_busy(struct ftl_nv_cache *nv_cache);
196 
197 static inline void
198 ftl_nv_cache_resume(struct ftl_nv_cache *nv_cache)
199 {
200 	nv_cache->halt = false;
201 }
202 
203 bool ftl_nv_cache_is_halted(struct ftl_nv_cache *nv_cache);
204 
205 size_t ftl_nv_cache_chunk_tail_md_num_blocks(const struct ftl_nv_cache *nv_cache);
206 
207 uint64_t chunk_tail_md_offset(struct ftl_nv_cache *nv_cache);
208 /**
209  * @brief Iterates over NV caches chunks and returns the max open and closed sequence id
210  *
211  * @param nv_cache FLT NV cache
212  * @param[out] open_seq_id Max detected open sequence id
213  * @param[out] close_seq_id Max detected close sequence id
214  */
215 void ftl_nv_cache_get_max_seq_id(struct ftl_nv_cache *nv_cache, uint64_t *open_seq_id,
216 				 uint64_t *close_seq_id);
217 
218 typedef int (*ftl_chunk_md_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx);
219 
220 struct ftl_nv_cache_chunk *ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev *dev,
221 		ftl_addr addr);
222 
223 #endif  /* FTL_NV_CACHE_H */
224