xref: /spdk/lib/ftl/ftl_nv_cache.h (revision 2d610abe8d35ec40014b8a449b760cf732797c75)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright 2023 Solidigm All Rights Reserved
3  *   Copyright (C) 2022 Intel Corporation.
4  *   All rights reserved.
5  */
6 
7 #ifndef FTL_NV_CACHE_H
8 #define FTL_NV_CACHE_H
9 
10 #include "spdk/stdinc.h"
11 #include "spdk/crc32.h"
12 
13 #include "ftl_io.h"
14 #include "ftl_utils.h"
15 #include "nvc/ftl_nvc_dev.h"
16 
17 /*
18  * FTL non volatile cache is divided into groups of blocks called chunks.
19  * Size of each chunk is multiple of xfer size plus additional metadata.
20  * For each block associated lba is stored in metadata. Cache space is
21  * written chunk by chunk sequentially. When number of free chunks reaches
22  * some threshold oldest chunks are moved from cache to backend storage to
23  * create space for new user data.
24  */
25 
26 #define FTL_NVC_VERSION_0	0
27 #define FTL_NVC_VERSION_1	1
28 #define FTL_NVC_VERSION_2	2
29 
30 #define FTL_NVC_VERSION_CURRENT FTL_NVC_VERSION_2
31 
32 #define FTL_NV_CACHE_NUM_COMPACTORS 8
33 
34 /*
35  * Parameters controlling nv cache write throttling.
36  *
37  * The write throttle limit value is calculated as follows:
38  * limit = compaction_average_bw * (1.0 + modifier)
39  *
40  * The modifier depends on the number of free chunks vs the configured threshold. Its value is
41  * zero if the number of free chunks is at the threshold, negative if below and positive if above.
42  */
43 
44 /* Interval in milliseconds between write throttle updates. */
45 #define FTL_NV_CACHE_THROTTLE_INTERVAL_MS	20
46 /* Throttle modifier proportional gain */
47 #define FTL_NV_CACHE_THROTTLE_MODIFIER_KP	20
48 /* Min and max modifier values */
49 #define FTL_NV_CACHE_THROTTLE_MODIFIER_MIN	-0.8
50 #define FTL_NV_CACHE_THROTTLE_MODIFIER_MAX	0.5
51 
52 struct ftl_nvcache_restore;
53 typedef void (*ftl_nv_cache_restore_fn)(struct ftl_nvcache_restore *, int, void *cb_arg);
54 
55 enum ftl_chunk_state {
56 	FTL_CHUNK_STATE_FREE,
57 	FTL_CHUNK_STATE_OPEN,
58 	FTL_CHUNK_STATE_CLOSED,
59 	FTL_CHUNK_STATE_INACTIVE,
60 	FTL_CHUNK_STATE_MAX
61 };
62 
63 struct ftl_nv_cache_chunk_md {
64 	/* Chunk metadata version */
65 	uint64_t version;
66 
67 	/* Sequence id of writing */
68 	uint64_t seq_id;
69 
70 	/* Sequence ID when chunk was closed */
71 	uint64_t close_seq_id;
72 
73 	/* Current lba to write */
74 	uint32_t write_pointer;
75 
76 	/* Number of blocks written */
77 	uint32_t blocks_written;
78 
79 	/* Number of skipped block (case when IO size is greater than blocks left in chunk) */
80 	uint32_t blocks_skipped;
81 
82 	/* Next block to be compacted */
83 	uint32_t read_pointer;
84 
85 	/* Number of compacted (both valid and invalid) blocks */
86 	uint32_t blocks_compacted;
87 
88 	/* Chunk state */
89 	enum ftl_chunk_state state;
90 
91 	/* CRC32 checksum of the associated P2L map when chunk is in closed state */
92 	uint32_t p2l_map_checksum;
93 
94 	/* Reserved */
95 	uint8_t reserved[4044];
96 } __attribute__((packed));
97 
98 SPDK_STATIC_ASSERT(sizeof(struct ftl_nv_cache_chunk_md) == FTL_BLOCK_SIZE,
99 		   "FTL NV Chunk metadata size is invalid");
100 
101 struct ftl_nv_cache_chunk {
102 	struct ftl_nv_cache *nv_cache;
103 
104 	struct ftl_nv_cache_chunk_md *md;
105 
106 	/* Offset from start lba of the cache */
107 	uint64_t offset;
108 
109 	/* P2L map */
110 	struct ftl_p2l_map p2l_map;
111 
112 	/* Metadata request */
113 	struct ftl_basic_rq metadata_rq;
114 
115 	TAILQ_ENTRY(ftl_nv_cache_chunk) entry;
116 
117 	/* This flag is used to indicate chunk is used in recovery */
118 	bool recovery;
119 
120 	/* Compaction start time */
121 	uint64_t compaction_start_tsc;
122 
123 	/* Compaction duration */
124 	uint64_t compaction_length_tsc;
125 
126 	/* For writing metadata */
127 	struct ftl_md_io_entry_ctx md_persist_entry_ctx;
128 };
129 
130 struct ftl_nv_cache_compactor {
131 	struct ftl_nv_cache *nv_cache;
132 	struct ftl_rq *rq;
133 	TAILQ_ENTRY(ftl_nv_cache_compactor) entry;
134 	struct spdk_bdev_io_wait_entry bdev_io_wait;
135 };
136 
137 struct ftl_nv_cache {
138 	/* Flag indicating halt request */
139 	bool halt;
140 
141 	/* NV cache device type */
142 	const struct ftl_nv_cache_device_type *nvc_type;
143 
144 	/* Write buffer cache bdev */
145 	struct spdk_bdev_desc *bdev_desc;
146 
147 	/* Persistent cache IO channel */
148 	struct spdk_io_channel *cache_ioch;
149 
150 	/* Metadata pool */
151 	struct ftl_mempool *md_pool;
152 
153 	/* P2L map memory pool */
154 	struct ftl_mempool *p2l_pool;
155 
156 	/* Chunk md memory pool */
157 	struct ftl_mempool *chunk_md_pool;
158 
159 	/* Chunk md memory pool for freeing chunks */
160 	struct ftl_mempool *free_chunk_md_pool;
161 
162 	/* Block Metadata size */
163 	uint64_t md_size;
164 
165 	/* NV cache metadata object handle */
166 	struct ftl_md *md;
167 
168 	/* Number of blocks in chunk */
169 	uint64_t chunk_blocks;
170 
171 	/* Number of blocks in tail md per chunk */
172 	uint64_t tail_md_chunk_blocks;
173 
174 	/* Number of chunks */
175 	uint64_t chunk_count;
176 
177 	/* Current processed chunk */
178 	struct ftl_nv_cache_chunk *chunk_current;
179 
180 	/* Free chunks list */
181 	TAILQ_HEAD(, ftl_nv_cache_chunk) chunk_free_list;
182 	uint64_t chunk_free_count;
183 
184 	/* Open chunks list */
185 	TAILQ_HEAD(, ftl_nv_cache_chunk) chunk_open_list;
186 	uint64_t chunk_open_count;
187 
188 	/* Full chunks list */
189 	TAILQ_HEAD(, ftl_nv_cache_chunk) chunk_full_list;
190 	uint64_t chunk_full_count;
191 
192 	/* Chunks being compacted */
193 	TAILQ_HEAD(, ftl_nv_cache_chunk) chunk_comp_list;
194 	uint64_t chunk_comp_count;
195 
196 	/* Chunks being freed */
197 	TAILQ_HEAD(, ftl_nv_cache_chunk) needs_free_persist_list;
198 	uint64_t chunk_free_persist_count;
199 
200 	/* Chunks which are inactive */
201 	TAILQ_HEAD(, ftl_nv_cache_chunk) chunk_inactive_list;
202 	uint64_t chunk_inactive_count;
203 
204 	TAILQ_HEAD(, ftl_nv_cache_compactor) compactor_list;
205 	uint64_t compaction_active_count;
206 	uint64_t chunk_compaction_threshold;
207 
208 	struct ftl_nv_cache_chunk *chunks;
209 
210 	uint64_t last_seq_id;
211 
212 	uint64_t chunk_free_target;
213 
214 	/* Simple moving average of recent compaction velocity values */
215 	double compaction_sma;
216 
217 #define FTL_NV_CACHE_COMPACTION_SMA_N (FTL_NV_CACHE_NUM_COMPACTORS * 2)
218 	/* Circular buffer holding values for calculating compaction SMA */
219 	struct compaction_bw_stats {
220 		double buf[FTL_NV_CACHE_COMPACTION_SMA_N];
221 		ptrdiff_t first;
222 		size_t count;
223 		double sum;
224 	} compaction_recent_bw;
225 
226 	struct {
227 		uint64_t interval_tsc;
228 		uint64_t start_tsc;
229 		uint64_t blocks_submitted;
230 		uint64_t blocks_submitted_limit;
231 	} throttle;
232 };
233 
234 typedef void (*nvc_scrub_cb)(struct spdk_ftl_dev *dev, void *cb_ctx, int status);
235 
236 void ftl_nv_cache_scrub(struct spdk_ftl_dev *dev, nvc_scrub_cb cb, void *cb_ctx);
237 
238 int ftl_nv_cache_init(struct spdk_ftl_dev *dev);
239 void ftl_nv_cache_deinit(struct spdk_ftl_dev *dev);
240 bool ftl_nv_cache_write(struct ftl_io *io);
241 void ftl_nv_cache_fill_md(struct ftl_io *io);
242 int ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
243 		      spdk_bdev_io_completion_cb cb, void *cb_arg);
244 bool ftl_nv_cache_throttle(struct spdk_ftl_dev *dev);
245 void ftl_nv_cache_process(struct spdk_ftl_dev *dev);
246 
247 void ftl_chunk_map_set_lba(struct ftl_nv_cache_chunk *chunk,
248 			   uint64_t offset, uint64_t lba);
249 uint64_t ftl_chunk_map_get_lba(struct ftl_nv_cache_chunk *chunk, uint64_t offset);
250 
251 void ftl_nv_cache_set_addr(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr);
252 
253 int ftl_nv_cache_save_state(struct ftl_nv_cache *nv_cache);
254 
255 int ftl_nv_cache_load_state(struct ftl_nv_cache *nv_cache);
256 
257 void ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache);
258 
259 int ftl_nv_cache_chunks_busy(struct ftl_nv_cache *nv_cache);
260 
261 static inline void
262 ftl_nv_cache_resume(struct ftl_nv_cache *nv_cache)
263 {
264 	nv_cache->halt = false;
265 }
266 
267 bool ftl_nv_cache_is_halted(struct ftl_nv_cache *nv_cache);
268 
269 size_t ftl_nv_cache_chunk_tail_md_num_blocks(const struct ftl_nv_cache *nv_cache);
270 
271 uint64_t chunk_tail_md_offset(struct ftl_nv_cache *nv_cache);
272 /**
273  * @brief Iterates over NV caches chunks and returns the max open and closed sequence id
274  *
275  * @param nv_cache FLT NV cache
276  * @param[out] open_seq_id Max detected open sequence id
277  * @param[out] close_seq_id Max detected close sequence id
278  */
279 void ftl_nv_cache_get_max_seq_id(struct ftl_nv_cache *nv_cache, uint64_t *open_seq_id,
280 				 uint64_t *close_seq_id);
281 
282 void ftl_mngt_nv_cache_restore_chunk_state(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
283 
284 void ftl_mngt_nv_cache_recover_open_chunk(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
285 
286 typedef int (*ftl_chunk_md_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx);
287 
288 void ftl_mngt_nv_cache_restore_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
289 				   ftl_chunk_md_cb cb, void *cb_ctx);
290 
291 struct ftl_nv_cache_chunk *ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev *dev,
292 		ftl_addr addr);
293 
294 uint64_t ftl_nv_cache_acquire_trim_seq_id(struct ftl_nv_cache *nv_cache);
295 
296 void ftl_nv_cache_chunk_md_initialize(struct ftl_nv_cache_chunk_md *md);
297 
298 #endif  /* FTL_NV_CACHE_H */
299