xref: /spdk/lib/ftl/ftl_band_ops.c (revision 8afdeef3becfe9409cc9e7372bd0bc10e8b7d46d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   Copyright 2023 Solidigm All Rights Reserved
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk/queue.h"
9 #include "spdk/bdev_module.h"
10 
11 #include "ftl_core.h"
12 #include "ftl_band.h"
13 #include "ftl_internal.h"
14 
15 static void
16 write_rq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
17 {
18 	struct ftl_rq *rq = arg;
19 	struct spdk_ftl_dev *dev = rq->dev;
20 
21 	ftl_stats_bdev_io_completed(dev, rq->owner.compaction ? FTL_STATS_TYPE_CMP : FTL_STATS_TYPE_GC,
22 				    bdev_io);
23 	spdk_bdev_free_io(bdev_io);
24 
25 	rq->success = success;
26 	if (spdk_likely(rq->success)) {
27 		ftl_p2l_ckpt_issue(rq);
28 	} else {
29 #ifdef SPDK_FTL_RETRY_ON_ERROR
30 		assert(rq->io.band->queue_depth > 0);
31 		rq->io.band->queue_depth--;
32 		rq->owner.cb(rq);
33 
34 #else
35 		ftl_abort();
36 #endif
37 	}
38 }
39 
40 static void
41 ftl_band_rq_bdev_write(void *_rq)
42 {
43 	struct ftl_rq *rq = _rq;
44 	struct ftl_band *band = rq->io.band;
45 	struct spdk_ftl_dev *dev = band->dev;
46 	int rc;
47 
48 	rc = spdk_bdev_write_blocks(dev->base_bdev_desc, dev->base_ioch,
49 				    rq->io_payload, rq->io.addr, rq->num_blocks,
50 				    write_rq_end, rq);
51 
52 	if (spdk_unlikely(rc)) {
53 		if (rc == -ENOMEM) {
54 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
55 			rq->io.bdev_io_wait.bdev = bdev;
56 			rq->io.bdev_io_wait.cb_fn = ftl_band_rq_bdev_write;
57 			rq->io.bdev_io_wait.cb_arg = rq;
58 			spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &rq->io.bdev_io_wait);
59 		} else {
60 			ftl_abort();
61 		}
62 	}
63 }
64 
65 void
66 ftl_band_rq_write(struct ftl_band *band, struct ftl_rq *rq)
67 {
68 	struct spdk_ftl_dev *dev = band->dev;
69 
70 	rq->success = false;
71 	rq->io.band = band;
72 	rq->io.addr = band->md->iter.addr;
73 
74 	ftl_band_rq_bdev_write(rq);
75 
76 	band->queue_depth++;
77 	dev->stats.io_activity_total += rq->num_blocks;
78 
79 	ftl_band_iter_advance(band, rq->num_blocks);
80 	if (ftl_band_filled(band, band->md->iter.offset)) {
81 		ftl_band_set_state(band, FTL_BAND_STATE_FULL);
82 		band->owner.state_change_fn(band);
83 	}
84 }
85 
86 static void ftl_band_rq_bdev_read(void *_entry);
87 
88 static void
89 read_rq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
90 {
91 	struct ftl_rq_entry *entry = arg;
92 	struct ftl_band *band = entry->io.band;
93 	struct ftl_rq *rq = ftl_rq_from_entry(entry);
94 
95 	ftl_stats_bdev_io_completed(band->dev, FTL_STATS_TYPE_GC, bdev_io);
96 
97 	rq->success = success;
98 	if (spdk_unlikely(!success)) {
99 		ftl_band_rq_bdev_read(entry);
100 		spdk_bdev_free_io(bdev_io);
101 		return;
102 	}
103 
104 	assert(band->queue_depth > 0);
105 	band->queue_depth--;
106 
107 	rq->owner.cb(rq);
108 	spdk_bdev_free_io(bdev_io);
109 }
110 
111 static void
112 ftl_band_rq_bdev_read(void *_entry)
113 {
114 	struct ftl_rq_entry *entry = _entry;
115 	struct ftl_rq *rq = ftl_rq_from_entry(entry);
116 	struct spdk_ftl_dev *dev = rq->dev;
117 	int rc;
118 
119 	rc = spdk_bdev_read_blocks(dev->base_bdev_desc, dev->base_ioch, entry->io_payload,
120 				   entry->bdev_io.offset_blocks, entry->bdev_io.num_blocks,
121 				   read_rq_end, entry);
122 	if (spdk_unlikely(rc)) {
123 		if (rc == -ENOMEM) {
124 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
125 			entry->bdev_io.wait_entry.bdev = bdev;
126 			entry->bdev_io.wait_entry.cb_fn = ftl_band_rq_bdev_read;
127 			entry->bdev_io.wait_entry.cb_arg = entry;
128 			spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &entry->bdev_io.wait_entry);
129 		} else {
130 			ftl_abort();
131 		}
132 	}
133 }
134 
135 void
136 ftl_band_rq_read(struct ftl_band *band, struct ftl_rq *rq)
137 {
138 	struct spdk_ftl_dev *dev = band->dev;
139 	struct ftl_rq_entry *entry = &rq->entries[rq->iter.idx];
140 
141 	assert(rq->iter.idx + rq->iter.count <= rq->num_blocks);
142 
143 	rq->success = false;
144 	rq->io.band = band;
145 	rq->io.addr = band->md->iter.addr;
146 	entry->io.band = band;
147 	entry->bdev_io.offset_blocks = rq->io.addr;
148 	entry->bdev_io.num_blocks = rq->iter.count;
149 
150 	ftl_band_rq_bdev_read(entry);
151 
152 	dev->stats.io_activity_total += rq->num_blocks;
153 	band->queue_depth++;
154 }
155 
156 static void
157 write_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
158 {
159 	struct ftl_basic_rq *brq = arg;
160 	struct ftl_band *band = brq->io.band;
161 
162 	ftl_stats_bdev_io_completed(band->dev, FTL_STATS_TYPE_MD_BASE, bdev_io);
163 
164 	brq->success = success;
165 
166 	assert(band->queue_depth > 0);
167 	band->queue_depth--;
168 
169 	brq->owner.cb(brq);
170 	spdk_bdev_free_io(bdev_io);
171 }
172 
173 static void
174 ftl_band_brq_bdev_write(void *_brq)
175 {
176 	struct ftl_basic_rq *brq = _brq;
177 	struct spdk_ftl_dev *dev = brq->dev;
178 	int rc;
179 
180 	rc = spdk_bdev_write_blocks(dev->base_bdev_desc, dev->base_ioch,
181 				    brq->io_payload, brq->io.addr,
182 				    brq->num_blocks, write_brq_end, brq);
183 
184 	if (spdk_unlikely(rc)) {
185 		if (rc == -ENOMEM) {
186 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
187 			brq->io.bdev_io_wait.bdev = bdev;
188 			brq->io.bdev_io_wait.cb_fn = ftl_band_brq_bdev_write;
189 			brq->io.bdev_io_wait.cb_arg = brq;
190 			spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &brq->io.bdev_io_wait);
191 		} else {
192 			ftl_abort();
193 		}
194 	}
195 }
196 
197 void
198 ftl_band_basic_rq_write(struct ftl_band *band, struct ftl_basic_rq *brq)
199 {
200 	struct spdk_ftl_dev *dev = band->dev;
201 
202 	brq->io.addr = band->md->iter.addr;
203 	brq->io.band = band;
204 	brq->success = false;
205 
206 	ftl_band_brq_bdev_write(brq);
207 
208 	dev->stats.io_activity_total += brq->num_blocks;
209 	band->queue_depth++;
210 	ftl_band_iter_advance(band, brq->num_blocks);
211 	if (ftl_band_filled(band, band->md->iter.offset)) {
212 		ftl_band_set_state(band, FTL_BAND_STATE_FULL);
213 		band->owner.state_change_fn(band);
214 	}
215 }
216 
217 static void
218 read_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
219 {
220 	struct ftl_basic_rq *brq = arg;
221 	struct ftl_band *band = brq->io.band;
222 
223 	ftl_stats_bdev_io_completed(band->dev, FTL_STATS_TYPE_MD_BASE, bdev_io);
224 
225 	brq->success = success;
226 
227 	assert(band->queue_depth > 0);
228 	band->queue_depth--;
229 
230 	brq->owner.cb(brq);
231 	spdk_bdev_free_io(bdev_io);
232 }
233 
234 static void
235 ftl_band_brq_bdev_read(void *_brq)
236 {
237 	struct ftl_basic_rq *brq = _brq;
238 	struct spdk_ftl_dev *dev = brq->dev;
239 	int rc;
240 
241 	rc = spdk_bdev_read_blocks(dev->base_bdev_desc, dev->base_ioch,
242 				   brq->io_payload, brq->io.addr,
243 				   brq->num_blocks, read_brq_end, brq);
244 	if (spdk_unlikely(rc)) {
245 		if (rc == -ENOMEM) {
246 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
247 			brq->io.bdev_io_wait.bdev = bdev;
248 			brq->io.bdev_io_wait.cb_fn = ftl_band_brq_bdev_read;
249 			brq->io.bdev_io_wait.cb_arg = brq;
250 			spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &brq->io.bdev_io_wait);
251 		} else {
252 			ftl_abort();
253 		}
254 	}
255 }
256 
257 void
258 ftl_band_basic_rq_read(struct ftl_band *band, struct ftl_basic_rq *brq)
259 {
260 	struct spdk_ftl_dev *dev = brq->dev;
261 
262 	brq->io.band = band;
263 
264 	ftl_band_brq_bdev_read(brq);
265 
266 	brq->io.band->queue_depth++;
267 	dev->stats.io_activity_total += brq->num_blocks;
268 }
269 
270 static void
271 band_open_cb(int status, void *cb_arg)
272 {
273 	struct ftl_band *band = cb_arg;
274 
275 	if (spdk_unlikely(status)) {
276 #ifdef SPDK_FTL_RETRY_ON_ERROR
277 		ftl_md_persist_entry_retry(&band->md_persist_entry_ctx);
278 		return;
279 #else
280 		ftl_abort();
281 #endif
282 	}
283 
284 	ftl_band_set_state(band, FTL_BAND_STATE_OPEN);
285 }
286 
287 void
288 ftl_band_open(struct ftl_band *band, enum ftl_band_type type)
289 {
290 	struct spdk_ftl_dev *dev = band->dev;
291 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD];
292 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_BAND_MD);
293 	struct ftl_p2l_map *p2l_map = &band->p2l_map;
294 
295 	ftl_band_set_type(band, type);
296 	ftl_band_set_state(band, FTL_BAND_STATE_OPENING);
297 
298 	memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE);
299 	p2l_map->band_dma_md->state = FTL_BAND_STATE_OPEN;
300 	p2l_map->band_dma_md->p2l_map_checksum = 0;
301 
302 	if (spdk_unlikely(0 != band->p2l_map.num_valid)) {
303 		/*
304 		 * This is inconsistent state, a band with valid block,
305 		 * it could be moved on the free list
306 		 */
307 		assert(false && 0 == band->p2l_map.num_valid);
308 		ftl_abort();
309 	}
310 
311 	ftl_md_persist_entries(md, band->id, 1, p2l_map->band_dma_md, NULL,
312 			       band_open_cb, band, &band->md_persist_entry_ctx);
313 }
314 
315 static void
316 band_close_cb(int status, void *cb_arg)
317 {
318 	struct ftl_band *band = cb_arg;
319 
320 	if (spdk_unlikely(status)) {
321 #ifdef SPDK_FTL_RETRY_ON_ERROR
322 		ftl_md_persist_entry_retry(&band->md_persist_entry_ctx);
323 		return;
324 #else
325 		ftl_abort();
326 #endif
327 	}
328 
329 	band->md->p2l_map_checksum = band->p2l_map.band_dma_md->p2l_map_checksum;
330 	ftl_band_set_state(band, FTL_BAND_STATE_CLOSED);
331 }
332 
333 static void
334 band_map_write_cb(struct ftl_basic_rq *brq)
335 {
336 	struct ftl_band *band = brq->io.band;
337 	struct ftl_p2l_map *p2l_map = &band->p2l_map;
338 	struct spdk_ftl_dev *dev = band->dev;
339 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_BAND_MD);
340 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD];
341 	uint32_t band_map_crc;
342 
343 	if (spdk_likely(brq->success)) {
344 
345 		band_map_crc = spdk_crc32c_update(p2l_map->band_map,
346 						  ftl_tail_md_num_blocks(dev) * FTL_BLOCK_SIZE, 0);
347 		memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE);
348 		p2l_map->band_dma_md->state = FTL_BAND_STATE_CLOSED;
349 		p2l_map->band_dma_md->p2l_map_checksum = band_map_crc;
350 
351 		ftl_md_persist_entries(md, band->id, 1, p2l_map->band_dma_md, NULL,
352 				       band_close_cb, band, &band->md_persist_entry_ctx);
353 	} else {
354 #ifdef SPDK_FTL_RETRY_ON_ERROR
355 		/* Try to retry in case of failure */
356 		ftl_band_brq_bdev_write(brq);
357 		band->queue_depth++;
358 #else
359 		ftl_abort();
360 #endif
361 	}
362 }
363 
364 void
365 ftl_band_close(struct ftl_band *band)
366 {
367 	struct spdk_ftl_dev *dev = band->dev;
368 	void *metadata = band->p2l_map.band_map;
369 	uint64_t num_blocks = ftl_tail_md_num_blocks(dev);
370 
371 	/* Write P2L map first, after completion, set the state to close on nvcache, then internally */
372 	band->md->close_seq_id = ftl_get_next_seq_id(dev);
373 	ftl_band_set_state(band, FTL_BAND_STATE_CLOSING);
374 	ftl_basic_rq_init(dev, &band->metadata_rq, metadata, num_blocks);
375 	ftl_basic_rq_set_owner(&band->metadata_rq, band_map_write_cb, band);
376 
377 	ftl_band_basic_rq_write(band, &band->metadata_rq);
378 }
379 
380 static void
381 band_free_cb(int status, void *ctx)
382 {
383 	struct ftl_band *band = (struct ftl_band *)ctx;
384 
385 	if (spdk_unlikely(status)) {
386 #ifdef SPDK_FTL_RETRY_ON_ERROR
387 		ftl_md_persist_entry_retry(&band->md_persist_entry_ctx);
388 		return;
389 #else
390 		ftl_abort();
391 #endif
392 	}
393 
394 	ftl_band_release_p2l_map(band);
395 	FTL_DEBUGLOG(band->dev, "Band is going to free state. Band id: %u\n", band->id);
396 	ftl_band_set_state(band, FTL_BAND_STATE_FREE);
397 	assert(0 == band->p2l_map.ref_cnt);
398 }
399 
400 void
401 ftl_band_free(struct ftl_band *band)
402 {
403 	struct spdk_ftl_dev *dev = band->dev;
404 	struct ftl_p2l_map *p2l_map = &band->p2l_map;
405 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD];
406 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_BAND_MD);
407 
408 	memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE);
409 	p2l_map->band_dma_md->state = FTL_BAND_STATE_FREE;
410 	p2l_map->band_dma_md->close_seq_id = 0;
411 	p2l_map->band_dma_md->p2l_map_checksum = 0;
412 
413 	ftl_md_persist_entries(md, band->id, 1, p2l_map->band_dma_md, NULL,
414 			       band_free_cb, band, &band->md_persist_entry_ctx);
415 
416 	/* TODO: The whole band erase code should probably be done here instead */
417 }
418 
419 static void
420 read_md_cb(struct ftl_basic_rq *brq)
421 {
422 	struct ftl_band *band = brq->owner.priv;
423 	struct spdk_ftl_dev *dev = band->dev;
424 	ftl_band_ops_cb cb;
425 	uint32_t band_map_crc;
426 	bool success = true;
427 	void *priv;
428 
429 	cb = band->owner.ops_fn;
430 	priv = band->owner.priv;
431 
432 	if (!brq->success) {
433 		ftl_band_basic_rq_read(band, &band->metadata_rq);
434 		return;
435 	}
436 
437 	band_map_crc = spdk_crc32c_update(band->p2l_map.band_map,
438 					  ftl_tail_md_num_blocks(band->dev) * FTL_BLOCK_SIZE, 0);
439 	if (band->md->p2l_map_checksum && band->md->p2l_map_checksum != band_map_crc) {
440 		FTL_ERRLOG(dev, "GC error, inconsistent P2L map CRC\n");
441 		success = false;
442 
443 		ftl_stats_crc_error(band->dev, FTL_STATS_TYPE_GC);
444 	}
445 	band->owner.ops_fn = NULL;
446 	band->owner.priv = NULL;
447 	cb(band, priv, success);
448 }
449 
450 static int
451 _read_md(struct ftl_band *band)
452 {
453 	struct spdk_ftl_dev *dev = band->dev;
454 	struct ftl_basic_rq *rq = &band->metadata_rq;
455 
456 	if (ftl_band_alloc_p2l_map(band)) {
457 		return -ENOMEM;
458 	}
459 
460 	/* Read P2L map */
461 	ftl_basic_rq_init(dev, rq, band->p2l_map.band_map, ftl_p2l_map_num_blocks(dev));
462 	ftl_basic_rq_set_owner(rq, read_md_cb, band);
463 
464 	rq->io.band = band;
465 	rq->io.addr = ftl_band_p2l_map_addr(band);
466 
467 	ftl_band_basic_rq_read(band, &band->metadata_rq);
468 
469 	return 0;
470 }
471 
472 static void
473 read_md(void *band)
474 {
475 	int rc;
476 
477 	rc = _read_md(band);
478 	if (spdk_unlikely(rc)) {
479 		spdk_thread_send_msg(spdk_get_thread(), read_md, band);
480 	}
481 }
482 
483 static void
484 read_tail_md_cb(struct ftl_basic_rq *brq)
485 {
486 	struct ftl_band *band = brq->owner.priv;
487 	enum ftl_md_status status = FTL_MD_IO_FAILURE;
488 	ftl_band_md_cb cb;
489 	void *priv;
490 
491 	if (spdk_unlikely(!brq->success)) {
492 		/* Retries the read in case of error */
493 		ftl_band_basic_rq_read(band, &band->metadata_rq);
494 		return;
495 	}
496 
497 	cb = band->owner.md_fn;
498 	band->owner.md_fn = NULL;
499 
500 	priv = band->owner.priv;
501 	band->owner.priv = NULL;
502 
503 	status = FTL_MD_SUCCESS;
504 
505 	cb(band, priv, status);
506 }
507 
508 void
509 ftl_band_read_tail_brq_md(struct ftl_band *band, ftl_band_md_cb cb, void *cntx)
510 {
511 	struct spdk_ftl_dev *dev = band->dev;
512 	struct ftl_basic_rq *rq = &band->metadata_rq;
513 
514 	ftl_basic_rq_init(dev, rq, band->p2l_map.band_map, ftl_tail_md_num_blocks(dev));
515 	ftl_basic_rq_set_owner(rq, read_tail_md_cb, band);
516 
517 	assert(!band->owner.md_fn);
518 	assert(!band->owner.priv);
519 	band->owner.md_fn = cb;
520 	band->owner.priv = cntx;
521 
522 	rq->io.band = band;
523 	rq->io.addr = band->tail_md_addr;
524 
525 	ftl_band_basic_rq_read(band, &band->metadata_rq);
526 }
527 
528 void
529 ftl_band_get_next_gc(struct spdk_ftl_dev *dev, ftl_band_ops_cb cb, void *cntx)
530 {
531 	struct ftl_band *band = ftl_band_search_next_to_reloc(dev);
532 
533 	/* if disk is very small, GC start very early that no band is ready for it */
534 	if (spdk_unlikely(!band)) {
535 		cb(NULL, cntx, false);
536 		return;
537 	}
538 
539 	/* Only one owner is allowed */
540 	assert(!band->queue_depth);
541 	assert(!band->owner.ops_fn);
542 	assert(!band->owner.priv);
543 	band->owner.ops_fn = cb;
544 	band->owner.priv = cntx;
545 
546 	read_md(band);
547 }
548