xref: /spdk/lib/ftl/ftl_band_ops.c (revision 42fd001310188f0635a3953f3b0ea0b33a840902)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk/queue.h"
8 #include "spdk/bdev_module.h"
9 
10 #include "ftl_core.h"
11 #include "ftl_band.h"
12 #include "ftl_internal.h"
13 
14 static void
15 write_rq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
16 {
17 	struct ftl_rq *rq = arg;
18 	struct spdk_ftl_dev *dev = rq->dev;
19 
20 	ftl_stats_bdev_io_completed(dev, rq->owner.compaction ? FTL_STATS_TYPE_CMP : FTL_STATS_TYPE_GC,
21 				    bdev_io);
22 	spdk_bdev_free_io(bdev_io);
23 
24 	rq->success = success;
25 	if (spdk_likely(rq->success)) {
26 		ftl_p2l_ckpt_issue(rq);
27 	} else {
28 #ifdef SPDK_FTL_RETRY_ON_ERROR
29 		assert(rq->io.band->queue_depth > 0);
30 		rq->io.band->queue_depth--;
31 		rq->owner.cb(rq);
32 
33 #else
34 		ftl_abort();
35 #endif
36 	}
37 }
38 
39 static void
40 ftl_band_rq_bdev_write(void *_rq)
41 {
42 	struct ftl_rq *rq = _rq;
43 	struct ftl_band *band = rq->io.band;
44 	struct spdk_ftl_dev *dev = band->dev;
45 	int rc;
46 
47 	rc = spdk_bdev_write_blocks(dev->base_bdev_desc, dev->base_ioch,
48 				    rq->io_payload, rq->io.addr, rq->num_blocks,
49 				    write_rq_end, rq);
50 
51 	if (spdk_unlikely(rc)) {
52 		if (rc == -ENOMEM) {
53 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
54 			rq->io.bdev_io_wait.bdev = bdev;
55 			rq->io.bdev_io_wait.cb_fn = ftl_band_rq_bdev_write;
56 			rq->io.bdev_io_wait.cb_arg = rq;
57 			spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &rq->io.bdev_io_wait);
58 		} else {
59 			ftl_abort();
60 		}
61 	}
62 }
63 
64 void
65 ftl_band_rq_write(struct ftl_band *band, struct ftl_rq *rq)
66 {
67 	struct spdk_ftl_dev *dev = band->dev;
68 
69 	rq->success = false;
70 	rq->io.band = band;
71 	rq->io.addr = band->md->iter.addr;
72 
73 	ftl_band_rq_bdev_write(rq);
74 
75 	band->queue_depth++;
76 	dev->stats.io_activity_total += rq->num_blocks;
77 
78 	ftl_band_iter_advance(band, rq->num_blocks);
79 	if (ftl_band_filled(band, band->md->iter.offset)) {
80 		ftl_band_set_state(band, FTL_BAND_STATE_FULL);
81 		band->owner.state_change_fn(band);
82 	}
83 }
84 
85 static void ftl_band_rq_bdev_read(void *_entry);
86 
87 static void
88 read_rq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
89 {
90 	struct ftl_rq_entry *entry = arg;
91 	struct ftl_band *band = entry->io.band;
92 	struct ftl_rq *rq = ftl_rq_from_entry(entry);
93 
94 	ftl_stats_bdev_io_completed(band->dev, FTL_STATS_TYPE_GC, bdev_io);
95 
96 	rq->success = success;
97 	if (spdk_unlikely(!success)) {
98 		ftl_band_rq_bdev_read(entry);
99 		spdk_bdev_free_io(bdev_io);
100 		return;
101 	}
102 
103 	assert(band->queue_depth > 0);
104 	band->queue_depth--;
105 
106 	rq->owner.cb(rq);
107 	spdk_bdev_free_io(bdev_io);
108 }
109 
110 static void
111 ftl_band_rq_bdev_read(void *_entry)
112 {
113 	struct ftl_rq_entry *entry = _entry;
114 	struct ftl_rq *rq = ftl_rq_from_entry(entry);
115 	struct spdk_ftl_dev *dev = rq->dev;
116 	int rc;
117 
118 	rc = spdk_bdev_read_blocks(dev->base_bdev_desc, dev->base_ioch, entry->io_payload,
119 				   entry->bdev_io.offset_blocks, entry->bdev_io.num_blocks,
120 				   read_rq_end, entry);
121 	if (spdk_unlikely(rc)) {
122 		if (rc == -ENOMEM) {
123 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
124 			entry->bdev_io.wait_entry.bdev = bdev;
125 			entry->bdev_io.wait_entry.cb_fn = ftl_band_rq_bdev_read;
126 			entry->bdev_io.wait_entry.cb_arg = entry;
127 			spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &entry->bdev_io.wait_entry);
128 		} else {
129 			ftl_abort();
130 		}
131 	}
132 }
133 
134 void
135 ftl_band_rq_read(struct ftl_band *band, struct ftl_rq *rq)
136 {
137 	struct spdk_ftl_dev *dev = band->dev;
138 	struct ftl_rq_entry *entry = &rq->entries[rq->iter.idx];
139 
140 	assert(rq->iter.idx + rq->iter.count <= rq->num_blocks);
141 
142 	rq->success = false;
143 	rq->io.band = band;
144 	rq->io.addr = band->md->iter.addr;
145 	entry->io.band = band;
146 	entry->bdev_io.offset_blocks = rq->io.addr;
147 	entry->bdev_io.num_blocks = rq->iter.count;
148 
149 	ftl_band_rq_bdev_read(entry);
150 
151 	dev->stats.io_activity_total += rq->num_blocks;
152 	band->queue_depth++;
153 }
154 
155 static void
156 write_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
157 {
158 	struct ftl_basic_rq *brq = arg;
159 	struct ftl_band *band = brq->io.band;
160 
161 	ftl_stats_bdev_io_completed(band->dev, FTL_STATS_TYPE_MD_BASE, bdev_io);
162 
163 	brq->success = success;
164 
165 	assert(band->queue_depth > 0);
166 	band->queue_depth--;
167 
168 	brq->owner.cb(brq);
169 	spdk_bdev_free_io(bdev_io);
170 }
171 
172 static void
173 ftl_band_brq_bdev_write(void *_brq)
174 {
175 	struct ftl_basic_rq *brq = _brq;
176 	struct spdk_ftl_dev *dev = brq->dev;
177 	int rc;
178 
179 	rc = spdk_bdev_write_blocks(dev->base_bdev_desc, dev->base_ioch,
180 				    brq->io_payload, brq->io.addr,
181 				    brq->num_blocks, write_brq_end, brq);
182 
183 	if (spdk_unlikely(rc)) {
184 		if (rc == -ENOMEM) {
185 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
186 			brq->io.bdev_io_wait.bdev = bdev;
187 			brq->io.bdev_io_wait.cb_fn = ftl_band_brq_bdev_write;
188 			brq->io.bdev_io_wait.cb_arg = brq;
189 			spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &brq->io.bdev_io_wait);
190 		} else {
191 			ftl_abort();
192 		}
193 	}
194 }
195 
196 void
197 ftl_band_basic_rq_write(struct ftl_band *band, struct ftl_basic_rq *brq)
198 {
199 	struct spdk_ftl_dev *dev = band->dev;
200 
201 	brq->io.addr = band->md->iter.addr;
202 	brq->io.band = band;
203 	brq->success = false;
204 
205 	ftl_band_brq_bdev_write(brq);
206 
207 	dev->stats.io_activity_total += brq->num_blocks;
208 	band->queue_depth++;
209 	ftl_band_iter_advance(band, brq->num_blocks);
210 	if (ftl_band_filled(band, band->md->iter.offset)) {
211 		ftl_band_set_state(band, FTL_BAND_STATE_FULL);
212 		band->owner.state_change_fn(band);
213 	}
214 }
215 
216 static void
217 read_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
218 {
219 	struct ftl_basic_rq *brq = arg;
220 	struct ftl_band *band = brq->io.band;
221 
222 	ftl_stats_bdev_io_completed(band->dev, FTL_STATS_TYPE_MD_BASE, bdev_io);
223 
224 	brq->success = success;
225 
226 	assert(band->queue_depth > 0);
227 	band->queue_depth--;
228 
229 	brq->owner.cb(brq);
230 	spdk_bdev_free_io(bdev_io);
231 }
232 
233 static void
234 ftl_band_brq_bdev_read(void *_brq)
235 {
236 	struct ftl_basic_rq *brq = _brq;
237 	struct spdk_ftl_dev *dev = brq->dev;
238 	int rc;
239 
240 	rc = spdk_bdev_read_blocks(dev->base_bdev_desc, dev->base_ioch,
241 				   brq->io_payload, brq->io.addr,
242 				   brq->num_blocks, read_brq_end, brq);
243 	if (spdk_unlikely(rc)) {
244 		if (rc == -ENOMEM) {
245 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
246 			brq->io.bdev_io_wait.bdev = bdev;
247 			brq->io.bdev_io_wait.cb_fn = ftl_band_brq_bdev_read;
248 			brq->io.bdev_io_wait.cb_arg = brq;
249 			spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &brq->io.bdev_io_wait);
250 		} else {
251 			ftl_abort();
252 		}
253 	}
254 }
255 
256 void
257 ftl_band_basic_rq_read(struct ftl_band *band, struct ftl_basic_rq *brq)
258 {
259 	struct spdk_ftl_dev *dev = brq->dev;
260 
261 	brq->io.band = band;
262 
263 	ftl_band_brq_bdev_read(brq);
264 
265 	brq->io.band->queue_depth++;
266 	dev->stats.io_activity_total += brq->num_blocks;
267 }
268 
269 static void
270 band_open_cb(int status, void *cb_arg)
271 {
272 	struct ftl_band *band = cb_arg;
273 
274 	if (spdk_unlikely(status)) {
275 #ifdef SPDK_FTL_RETRY_ON_ERROR
276 		ftl_md_persist_entry_retry(&band->md_persist_entry_ctx);
277 		return;
278 #else
279 		ftl_abort();
280 #endif
281 	}
282 
283 	ftl_band_set_state(band, FTL_BAND_STATE_OPEN);
284 }
285 
286 void
287 ftl_band_open(struct ftl_band *band, enum ftl_band_type type)
288 {
289 	struct spdk_ftl_dev *dev = band->dev;
290 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD];
291 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_BAND_MD);
292 	struct ftl_p2l_map *p2l_map = &band->p2l_map;
293 
294 	ftl_band_set_type(band, type);
295 	ftl_band_set_state(band, FTL_BAND_STATE_OPENING);
296 
297 	memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE);
298 	p2l_map->band_dma_md->state = FTL_BAND_STATE_OPEN;
299 	p2l_map->band_dma_md->p2l_map_checksum = 0;
300 
301 	if (spdk_unlikely(0 != band->p2l_map.num_valid)) {
302 		/*
303 		 * This is inconsistent state, a band with valid block,
304 		 * it could be moved on the free list
305 		 */
306 		assert(false && 0 == band->p2l_map.num_valid);
307 		ftl_abort();
308 	}
309 
310 	ftl_md_persist_entry(md, band->id, p2l_map->band_dma_md, NULL,
311 			     band_open_cb, band, &band->md_persist_entry_ctx);
312 }
313 
314 static void
315 band_close_cb(int status, void *cb_arg)
316 {
317 	struct ftl_band *band = cb_arg;
318 
319 	if (spdk_unlikely(status)) {
320 #ifdef SPDK_FTL_RETRY_ON_ERROR
321 		ftl_md_persist_entry_retry(&band->md_persist_entry_ctx);
322 		return;
323 #else
324 		ftl_abort();
325 #endif
326 	}
327 
328 	band->md->p2l_map_checksum = band->p2l_map.band_dma_md->p2l_map_checksum;
329 	ftl_band_set_state(band, FTL_BAND_STATE_CLOSED);
330 }
331 
332 static void
333 band_map_write_cb(struct ftl_basic_rq *brq)
334 {
335 	struct ftl_band *band = brq->io.band;
336 	struct ftl_p2l_map *p2l_map = &band->p2l_map;
337 	struct spdk_ftl_dev *dev = band->dev;
338 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_BAND_MD);
339 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD];
340 	uint32_t band_map_crc;
341 
342 	if (spdk_likely(brq->success)) {
343 
344 		band_map_crc = spdk_crc32c_update(p2l_map->band_map,
345 						  ftl_tail_md_num_blocks(dev) * FTL_BLOCK_SIZE, 0);
346 		memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE);
347 		p2l_map->band_dma_md->state = FTL_BAND_STATE_CLOSED;
348 		p2l_map->band_dma_md->p2l_map_checksum = band_map_crc;
349 
350 		ftl_md_persist_entry(md, band->id, p2l_map->band_dma_md, NULL,
351 				     band_close_cb, band, &band->md_persist_entry_ctx);
352 	} else {
353 #ifdef SPDK_FTL_RETRY_ON_ERROR
354 		/* Try to retry in case of failure */
355 		ftl_band_brq_bdev_write(brq);
356 		band->queue_depth++;
357 #else
358 		ftl_abort();
359 #endif
360 	}
361 }
362 
363 void
364 ftl_band_close(struct ftl_band *band)
365 {
366 	struct spdk_ftl_dev *dev = band->dev;
367 	void *metadata = band->p2l_map.band_map;
368 	uint64_t num_blocks = ftl_tail_md_num_blocks(dev);
369 
370 	/* Write P2L map first, after completion, set the state to close on nvcache, then internally */
371 	band->md->close_seq_id = ftl_get_next_seq_id(dev);
372 	ftl_band_set_state(band, FTL_BAND_STATE_CLOSING);
373 	ftl_basic_rq_init(dev, &band->metadata_rq, metadata, num_blocks);
374 	ftl_basic_rq_set_owner(&band->metadata_rq, band_map_write_cb, band);
375 
376 	ftl_band_basic_rq_write(band, &band->metadata_rq);
377 }
378 
379 static void
380 band_free_cb(int status, void *ctx)
381 {
382 	struct ftl_band *band = (struct ftl_band *)ctx;
383 
384 	if (spdk_unlikely(status)) {
385 #ifdef SPDK_FTL_RETRY_ON_ERROR
386 		ftl_md_persist_entry_retry(&band->md_persist_entry_ctx);
387 		return;
388 #else
389 		ftl_abort();
390 #endif
391 	}
392 
393 	ftl_band_release_p2l_map(band);
394 	FTL_DEBUGLOG(band->dev, "Band is going to free state. Band id: %u\n", band->id);
395 	ftl_band_set_state(band, FTL_BAND_STATE_FREE);
396 	assert(0 == band->p2l_map.ref_cnt);
397 }
398 
399 void
400 ftl_band_free(struct ftl_band *band)
401 {
402 	struct spdk_ftl_dev *dev = band->dev;
403 	struct ftl_p2l_map *p2l_map = &band->p2l_map;
404 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD];
405 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_BAND_MD);
406 
407 	memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE);
408 	p2l_map->band_dma_md->state = FTL_BAND_STATE_FREE;
409 	p2l_map->band_dma_md->close_seq_id = 0;
410 	p2l_map->band_dma_md->p2l_map_checksum = 0;
411 
412 	ftl_md_persist_entry(md, band->id, p2l_map->band_dma_md, NULL,
413 			     band_free_cb, band, &band->md_persist_entry_ctx);
414 
415 	/* TODO: The whole band erase code should probably be done here instead */
416 }
417 
418 static void
419 read_md_cb(struct ftl_basic_rq *brq)
420 {
421 	struct ftl_band *band = brq->owner.priv;
422 	struct spdk_ftl_dev *dev = band->dev;
423 	ftl_band_ops_cb cb;
424 	uint32_t band_map_crc;
425 	bool success = true;
426 	void *priv;
427 
428 	cb = band->owner.ops_fn;
429 	priv = band->owner.priv;
430 
431 	if (!brq->success) {
432 		ftl_band_basic_rq_read(band, &band->metadata_rq);
433 		return;
434 	}
435 
436 	band_map_crc = spdk_crc32c_update(band->p2l_map.band_map,
437 					  ftl_tail_md_num_blocks(band->dev) * FTL_BLOCK_SIZE, 0);
438 	if (band->md->p2l_map_checksum && band->md->p2l_map_checksum != band_map_crc) {
439 		FTL_ERRLOG(dev, "GC error, inconsistent P2L map CRC\n");
440 		success = false;
441 
442 		ftl_stats_crc_error(band->dev, FTL_STATS_TYPE_GC);
443 	}
444 	band->owner.ops_fn = NULL;
445 	band->owner.priv = NULL;
446 	cb(band, priv, success);
447 }
448 
449 static int
450 _read_md(struct ftl_band *band)
451 {
452 	struct spdk_ftl_dev *dev = band->dev;
453 	struct ftl_basic_rq *rq = &band->metadata_rq;
454 
455 	if (ftl_band_alloc_p2l_map(band)) {
456 		return -ENOMEM;
457 	}
458 
459 	/* Read P2L map */
460 	ftl_basic_rq_init(dev, rq, band->p2l_map.band_map, ftl_p2l_map_num_blocks(dev));
461 	ftl_basic_rq_set_owner(rq, read_md_cb, band);
462 
463 	rq->io.band = band;
464 	rq->io.addr = ftl_band_p2l_map_addr(band);
465 
466 	ftl_band_basic_rq_read(band, &band->metadata_rq);
467 
468 	return 0;
469 }
470 
471 static void
472 read_md(void *band)
473 {
474 	int rc;
475 
476 	rc = _read_md(band);
477 	if (spdk_unlikely(rc)) {
478 		spdk_thread_send_msg(spdk_get_thread(), read_md, band);
479 	}
480 }
481 
482 static void
483 read_tail_md_cb(struct ftl_basic_rq *brq)
484 {
485 	struct ftl_band *band = brq->owner.priv;
486 	enum ftl_md_status status = FTL_MD_IO_FAILURE;
487 	ftl_band_md_cb cb;
488 	void *priv;
489 
490 	if (spdk_unlikely(!brq->success)) {
491 		/* Retries the read in case of error */
492 		ftl_band_basic_rq_read(band, &band->metadata_rq);
493 		return;
494 	}
495 
496 	cb = band->owner.md_fn;
497 	band->owner.md_fn = NULL;
498 
499 	priv = band->owner.priv;
500 	band->owner.priv = NULL;
501 
502 	status = FTL_MD_SUCCESS;
503 
504 	cb(band, priv, status);
505 }
506 
507 void
508 ftl_band_read_tail_brq_md(struct ftl_band *band, ftl_band_md_cb cb, void *cntx)
509 {
510 	struct spdk_ftl_dev *dev = band->dev;
511 	struct ftl_basic_rq *rq = &band->metadata_rq;
512 
513 	ftl_basic_rq_init(dev, rq, band->p2l_map.band_map, ftl_tail_md_num_blocks(dev));
514 	ftl_basic_rq_set_owner(rq, read_tail_md_cb, band);
515 
516 	assert(!band->owner.md_fn);
517 	assert(!band->owner.priv);
518 	band->owner.md_fn = cb;
519 	band->owner.priv = cntx;
520 
521 	rq->io.band = band;
522 	rq->io.addr = band->tail_md_addr;
523 
524 	ftl_band_basic_rq_read(band, &band->metadata_rq);
525 }
526 
527 void
528 ftl_band_get_next_gc(struct spdk_ftl_dev *dev, ftl_band_ops_cb cb, void *cntx)
529 {
530 	struct ftl_band *band = ftl_band_search_next_to_reloc(dev);
531 
532 	/* if disk is very small, GC start very early that no band is ready for it */
533 	if (spdk_unlikely(!band)) {
534 		cb(NULL, cntx, false);
535 		return;
536 	}
537 
538 	/* Only one owner is allowed */
539 	assert(!band->queue_depth);
540 	assert(!band->owner.ops_fn);
541 	assert(!band->owner.priv);
542 	band->owner.ops_fn = cb;
543 	band->owner.priv = cntx;
544 
545 	read_md(band);
546 }
547