xref: /spdk/lib/ftl/ftl_band_ops.c (revision 12fbe739a31b09aff0d05f354d4f3bbef99afc55)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk/queue.h"
8 #include "spdk/bdev_module.h"
9 
10 #include "ftl_core.h"
11 #include "ftl_band.h"
12 #include "ftl_internal.h"
13 
14 static void
15 write_rq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
16 {
17 	struct ftl_rq *rq = arg;
18 	struct spdk_ftl_dev *dev = rq->dev;
19 
20 	ftl_stats_bdev_io_completed(dev, rq->owner.compaction ? FTL_STATS_TYPE_CMP : FTL_STATS_TYPE_GC,
21 				    bdev_io);
22 
23 	rq->success = success;
24 
25 	ftl_p2l_ckpt_issue(rq);
26 
27 	spdk_bdev_free_io(bdev_io);
28 }
29 
30 static void
31 ftl_band_rq_bdev_write(void *_rq)
32 {
33 	struct ftl_rq *rq = _rq;
34 	struct ftl_band *band = rq->io.band;
35 	struct spdk_ftl_dev *dev = band->dev;
36 	int rc;
37 
38 	rc = spdk_bdev_write_blocks(dev->base_bdev_desc, dev->base_ioch,
39 				    rq->io_payload, rq->io.addr, rq->num_blocks,
40 				    write_rq_end, rq);
41 
42 	if (spdk_unlikely(rc)) {
43 		if (rc == -ENOMEM) {
44 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
45 			rq->io.bdev_io_wait.bdev = bdev;
46 			rq->io.bdev_io_wait.cb_fn = ftl_band_rq_bdev_write;
47 			rq->io.bdev_io_wait.cb_arg = rq;
48 			spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &rq->io.bdev_io_wait);
49 		} else {
50 			ftl_abort();
51 		}
52 	}
53 }
54 
55 void
56 ftl_band_rq_write(struct ftl_band *band, struct ftl_rq *rq)
57 {
58 	struct spdk_ftl_dev *dev = band->dev;
59 
60 	rq->success = false;
61 	rq->io.band = band;
62 	rq->io.addr = band->md->iter.addr;
63 
64 	ftl_band_rq_bdev_write(rq);
65 
66 	band->queue_depth++;
67 	dev->stats.io_activity_total += rq->num_blocks;
68 
69 	ftl_band_iter_advance(band, rq->num_blocks);
70 	if (ftl_band_filled(band, band->md->iter.offset)) {
71 		ftl_band_set_state(band, FTL_BAND_STATE_FULL);
72 		band->owner.state_change_fn(band);
73 	}
74 }
75 
76 static void ftl_band_rq_bdev_read(void *_entry);
77 
78 static void
79 read_rq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
80 {
81 	struct ftl_rq_entry *entry = arg;
82 	struct ftl_band *band = entry->io.band;
83 	struct ftl_rq *rq = ftl_rq_from_entry(entry);
84 
85 	ftl_stats_bdev_io_completed(band->dev, FTL_STATS_TYPE_GC, bdev_io);
86 
87 	rq->success = success;
88 	if (spdk_unlikely(!success)) {
89 		ftl_band_rq_bdev_read(entry);
90 		spdk_bdev_free_io(bdev_io);
91 		return;
92 	}
93 
94 	assert(band->queue_depth > 0);
95 	band->queue_depth--;
96 
97 	rq->owner.cb(rq);
98 	spdk_bdev_free_io(bdev_io);
99 }
100 
101 static void
102 ftl_band_rq_bdev_read(void *_entry)
103 {
104 	struct ftl_rq_entry *entry = _entry;
105 	struct ftl_rq *rq = ftl_rq_from_entry(entry);
106 	struct spdk_ftl_dev *dev = rq->dev;
107 	int rc;
108 
109 	rc = spdk_bdev_read_blocks(dev->base_bdev_desc, dev->base_ioch, entry->io_payload,
110 				   entry->bdev_io.offset_blocks, entry->bdev_io.num_blocks,
111 				   read_rq_end, entry);
112 	if (spdk_unlikely(rc)) {
113 		if (rc == -ENOMEM) {
114 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
115 			entry->bdev_io.wait_entry.bdev = bdev;
116 			entry->bdev_io.wait_entry.cb_fn = ftl_band_rq_bdev_read;
117 			entry->bdev_io.wait_entry.cb_arg = entry;
118 			spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &entry->bdev_io.wait_entry);
119 		} else {
120 			ftl_abort();
121 		}
122 	}
123 }
124 
125 void
126 ftl_band_rq_read(struct ftl_band *band, struct ftl_rq *rq)
127 {
128 	struct spdk_ftl_dev *dev = band->dev;
129 	struct ftl_rq_entry *entry = &rq->entries[rq->iter.idx];
130 
131 	assert(rq->iter.idx + rq->iter.count <= rq->num_blocks);
132 
133 	rq->success = false;
134 	rq->io.band = band;
135 	rq->io.addr = band->md->iter.addr;
136 	entry->io.band = band;
137 	entry->bdev_io.offset_blocks = rq->io.addr;
138 	entry->bdev_io.num_blocks = rq->iter.count;
139 
140 	ftl_band_rq_bdev_read(entry);
141 
142 	dev->stats.io_activity_total += rq->num_blocks;
143 	band->queue_depth++;
144 }
145 
146 static void
147 write_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
148 {
149 	struct ftl_basic_rq *brq = arg;
150 	struct ftl_band *band = brq->io.band;
151 
152 	ftl_stats_bdev_io_completed(band->dev, FTL_STATS_TYPE_MD_BASE, bdev_io);
153 
154 	brq->success = success;
155 
156 	assert(band->queue_depth > 0);
157 	band->queue_depth--;
158 
159 	brq->owner.cb(brq);
160 	spdk_bdev_free_io(bdev_io);
161 }
162 
163 static void
164 ftl_band_brq_bdev_write(void *_brq)
165 {
166 	struct ftl_basic_rq *brq = _brq;
167 	struct spdk_ftl_dev *dev = brq->dev;
168 	int rc;
169 
170 	rc = spdk_bdev_write_blocks(dev->base_bdev_desc, dev->base_ioch,
171 				    brq->io_payload, brq->io.addr,
172 				    brq->num_blocks, write_brq_end, brq);
173 
174 	if (spdk_unlikely(rc)) {
175 		if (rc == -ENOMEM) {
176 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
177 			brq->io.bdev_io_wait.bdev = bdev;
178 			brq->io.bdev_io_wait.cb_fn = ftl_band_brq_bdev_write;
179 			brq->io.bdev_io_wait.cb_arg = brq;
180 			spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &brq->io.bdev_io_wait);
181 		} else {
182 			ftl_abort();
183 		}
184 	}
185 }
186 
187 void
188 ftl_band_basic_rq_write(struct ftl_band *band, struct ftl_basic_rq *brq)
189 {
190 	struct spdk_ftl_dev *dev = band->dev;
191 
192 	brq->io.addr = band->md->iter.addr;
193 	brq->io.band = band;
194 	brq->success = false;
195 
196 	ftl_band_brq_bdev_write(brq);
197 
198 	dev->stats.io_activity_total += brq->num_blocks;
199 	band->queue_depth++;
200 	ftl_band_iter_advance(band, brq->num_blocks);
201 	if (ftl_band_filled(band, band->md->iter.offset)) {
202 		ftl_band_set_state(band, FTL_BAND_STATE_FULL);
203 		band->owner.state_change_fn(band);
204 	}
205 }
206 
207 static void
208 read_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
209 {
210 	struct ftl_basic_rq *brq = arg;
211 	struct ftl_band *band = brq->io.band;
212 
213 	ftl_stats_bdev_io_completed(band->dev, FTL_STATS_TYPE_MD_BASE, bdev_io);
214 
215 	brq->success = success;
216 
217 	assert(band->queue_depth > 0);
218 	band->queue_depth--;
219 
220 	brq->owner.cb(brq);
221 	spdk_bdev_free_io(bdev_io);
222 }
223 
224 static void
225 ftl_band_brq_bdev_read(void *_brq)
226 {
227 	struct ftl_basic_rq *brq = _brq;
228 	struct spdk_ftl_dev *dev = brq->dev;
229 	int rc;
230 
231 	rc = spdk_bdev_read_blocks(dev->base_bdev_desc, dev->base_ioch,
232 				   brq->io_payload, brq->io.addr,
233 				   brq->num_blocks, read_brq_end, brq);
234 	if (spdk_unlikely(rc)) {
235 		if (rc == -ENOMEM) {
236 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
237 			brq->io.bdev_io_wait.bdev = bdev;
238 			brq->io.bdev_io_wait.cb_fn = ftl_band_brq_bdev_read;
239 			brq->io.bdev_io_wait.cb_arg = brq;
240 			spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &brq->io.bdev_io_wait);
241 		} else {
242 			ftl_abort();
243 		}
244 	}
245 }
246 
247 void
248 ftl_band_basic_rq_read(struct ftl_band *band, struct ftl_basic_rq *brq)
249 {
250 	struct spdk_ftl_dev *dev = brq->dev;
251 
252 	brq->io.band = band;
253 
254 	ftl_band_brq_bdev_read(brq);
255 
256 	brq->io.band->queue_depth++;
257 	dev->stats.io_activity_total += brq->num_blocks;
258 }
259 
260 static void
261 band_open_cb(int status, void *cb_arg)
262 {
263 	struct ftl_band *band = cb_arg;
264 
265 	if (spdk_unlikely(status)) {
266 #ifdef SPDK_FTL_RETRY_ON_ERROR
267 		ftl_md_persist_entry_retry(&band->md_persist_entry_ctx);
268 		return;
269 #else
270 		ftl_abort();
271 #endif
272 	}
273 
274 	ftl_band_set_state(band, FTL_BAND_STATE_OPEN);
275 }
276 
277 void
278 ftl_band_open(struct ftl_band *band, enum ftl_band_type type)
279 {
280 	struct spdk_ftl_dev *dev = band->dev;
281 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD];
282 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_BAND_MD];
283 	struct ftl_p2l_map *p2l_map = &band->p2l_map;
284 
285 	ftl_band_set_type(band, type);
286 	ftl_band_set_state(band, FTL_BAND_STATE_OPENING);
287 
288 	memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE);
289 	p2l_map->band_dma_md->state = FTL_BAND_STATE_OPEN;
290 	p2l_map->band_dma_md->p2l_map_checksum = 0;
291 
292 	if (spdk_unlikely(0 != band->p2l_map.num_valid)) {
293 		/*
294 		 * This is inconsistent state, a band with valid block,
295 		 * it could be moved on the free list
296 		 */
297 		assert(false && 0 == band->p2l_map.num_valid);
298 		ftl_abort();
299 	}
300 
301 	ftl_md_persist_entry(md, band->id, p2l_map->band_dma_md, NULL,
302 			     band_open_cb, band, &band->md_persist_entry_ctx);
303 }
304 
305 static void
306 band_close_cb(int status, void *cb_arg)
307 {
308 	struct ftl_band *band = cb_arg;
309 
310 	if (spdk_unlikely(status)) {
311 #ifdef SPDK_FTL_RETRY_ON_ERROR
312 		ftl_md_persist_entry_retry(&band->md_persist_entry_ctx);
313 		return;
314 #else
315 		ftl_abort();
316 #endif
317 	}
318 
319 	band->md->p2l_map_checksum = band->p2l_map.band_dma_md->p2l_map_checksum;
320 	ftl_band_set_state(band, FTL_BAND_STATE_CLOSED);
321 }
322 
323 static void
324 band_map_write_cb(struct ftl_basic_rq *brq)
325 {
326 	struct ftl_band *band = brq->io.band;
327 	struct ftl_p2l_map *p2l_map = &band->p2l_map;
328 	struct spdk_ftl_dev *dev = band->dev;
329 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_BAND_MD];
330 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD];
331 	uint32_t band_map_crc;
332 
333 	if (spdk_likely(brq->success)) {
334 
335 		band_map_crc = spdk_crc32c_update(p2l_map->band_map,
336 						  ftl_tail_md_num_blocks(dev) * FTL_BLOCK_SIZE, 0);
337 		memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE);
338 		p2l_map->band_dma_md->state = FTL_BAND_STATE_CLOSED;
339 		p2l_map->band_dma_md->p2l_map_checksum = band_map_crc;
340 
341 		ftl_md_persist_entry(md, band->id, p2l_map->band_dma_md, NULL,
342 				     band_close_cb, band, &band->md_persist_entry_ctx);
343 	} else {
344 #ifdef SPDK_FTL_RETRY_ON_ERROR
345 		/* Try to retry in case of failure */
346 		ftl_band_brq_bdev_write(brq);
347 		band->queue_depth++;
348 #else
349 		ftl_abort();
350 #endif
351 	}
352 }
353 
354 void
355 ftl_band_close(struct ftl_band *band)
356 {
357 	struct spdk_ftl_dev *dev = band->dev;
358 	void *metadata = band->p2l_map.band_map;
359 	uint64_t num_blocks = ftl_tail_md_num_blocks(dev);
360 
361 	/* Write P2L map first, after completion, set the state to close on nvcache, then internally */
362 	band->md->close_seq_id = ftl_get_next_seq_id(dev);
363 	ftl_band_set_state(band, FTL_BAND_STATE_CLOSING);
364 	ftl_basic_rq_init(dev, &band->metadata_rq, metadata, num_blocks);
365 	ftl_basic_rq_set_owner(&band->metadata_rq, band_map_write_cb, band);
366 
367 	ftl_band_basic_rq_write(band, &band->metadata_rq);
368 }
369 
370 static void
371 band_free_cb(int status, void *ctx)
372 {
373 	struct ftl_band *band = (struct ftl_band *)ctx;
374 
375 	if (spdk_unlikely(status)) {
376 #ifdef SPDK_FTL_RETRY_ON_ERROR
377 		ftl_md_persist_entry_retry(&band->md_persist_entry_ctx);
378 		return;
379 #else
380 		ftl_abort();
381 #endif
382 	}
383 
384 	ftl_band_release_p2l_map(band);
385 	FTL_DEBUGLOG(band->dev, "Band is going to free state. Band id: %u\n", band->id);
386 	ftl_band_set_state(band, FTL_BAND_STATE_FREE);
387 	assert(0 == band->p2l_map.ref_cnt);
388 }
389 
390 void
391 ftl_band_free(struct ftl_band *band)
392 {
393 	struct spdk_ftl_dev *dev = band->dev;
394 	struct ftl_p2l_map *p2l_map = &band->p2l_map;
395 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD];
396 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_BAND_MD];
397 
398 	memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE);
399 	p2l_map->band_dma_md->state = FTL_BAND_STATE_FREE;
400 	p2l_map->band_dma_md->close_seq_id = 0;
401 	p2l_map->band_dma_md->p2l_map_checksum = 0;
402 
403 	ftl_md_persist_entry(md, band->id, p2l_map->band_dma_md, NULL,
404 			     band_free_cb, band, &band->md_persist_entry_ctx);
405 
406 	/* TODO: The whole band erase code should probably be done here instead */
407 }
408 
409 static void
410 read_md_cb(struct ftl_basic_rq *brq)
411 {
412 	struct ftl_band *band = brq->owner.priv;
413 	struct spdk_ftl_dev *dev = band->dev;
414 	ftl_band_ops_cb cb;
415 	uint32_t band_map_crc;
416 	bool success = true;
417 	void *priv;
418 
419 	cb = band->owner.ops_fn;
420 	priv = band->owner.priv;
421 
422 	if (!brq->success) {
423 		ftl_band_basic_rq_read(band, &band->metadata_rq);
424 		return;
425 	}
426 
427 	band_map_crc = spdk_crc32c_update(band->p2l_map.band_map,
428 					  ftl_tail_md_num_blocks(band->dev) * FTL_BLOCK_SIZE, 0);
429 	if (band->md->p2l_map_checksum && band->md->p2l_map_checksum != band_map_crc) {
430 		FTL_ERRLOG(dev, "GC error, inconsistent P2L map CRC\n");
431 		success = false;
432 
433 		ftl_stats_crc_error(band->dev, FTL_STATS_TYPE_GC);
434 	}
435 	band->owner.ops_fn = NULL;
436 	band->owner.priv = NULL;
437 	cb(band, priv, success);
438 }
439 
440 static int
441 _read_md(struct ftl_band *band)
442 {
443 	struct spdk_ftl_dev *dev = band->dev;
444 	struct ftl_basic_rq *rq = &band->metadata_rq;
445 
446 	if (ftl_band_alloc_p2l_map(band)) {
447 		return -ENOMEM;
448 	}
449 
450 	/* Read P2L map */
451 	ftl_basic_rq_init(dev, rq, band->p2l_map.band_map, ftl_p2l_map_num_blocks(dev));
452 	ftl_basic_rq_set_owner(rq, read_md_cb, band);
453 
454 	rq->io.band = band;
455 	rq->io.addr = ftl_band_p2l_map_addr(band);
456 
457 	ftl_band_basic_rq_read(band, &band->metadata_rq);
458 
459 	return 0;
460 }
461 
462 static void
463 read_md(void *band)
464 {
465 	int rc;
466 
467 	rc = _read_md(band);
468 	if (spdk_unlikely(rc)) {
469 		spdk_thread_send_msg(spdk_get_thread(), read_md, band);
470 	}
471 }
472 
473 static void
474 read_tail_md_cb(struct ftl_basic_rq *brq)
475 {
476 	struct ftl_band *band = brq->owner.priv;
477 	enum ftl_md_status status = FTL_MD_IO_FAILURE;
478 	ftl_band_md_cb cb;
479 	void *priv;
480 
481 	if (spdk_unlikely(!brq->success)) {
482 		/* Retries the read in case of error */
483 		ftl_band_basic_rq_read(band, &band->metadata_rq);
484 		return;
485 	}
486 
487 	cb = band->owner.md_fn;
488 	band->owner.md_fn = NULL;
489 
490 	priv = band->owner.priv;
491 	band->owner.priv = NULL;
492 
493 	status = FTL_MD_SUCCESS;
494 
495 	cb(band, priv, status);
496 }
497 
498 void
499 ftl_band_read_tail_brq_md(struct ftl_band *band, ftl_band_md_cb cb, void *cntx)
500 {
501 	struct spdk_ftl_dev *dev = band->dev;
502 	struct ftl_basic_rq *rq = &band->metadata_rq;
503 
504 	ftl_basic_rq_init(dev, rq, band->p2l_map.band_map, ftl_tail_md_num_blocks(dev));
505 	ftl_basic_rq_set_owner(rq, read_tail_md_cb, band);
506 
507 	assert(!band->owner.md_fn);
508 	assert(!band->owner.priv);
509 	band->owner.md_fn = cb;
510 	band->owner.priv = cntx;
511 
512 	rq->io.band = band;
513 	rq->io.addr = band->tail_md_addr;
514 
515 	ftl_band_basic_rq_read(band, &band->metadata_rq);
516 }
517 
518 void
519 ftl_band_get_next_gc(struct spdk_ftl_dev *dev, ftl_band_ops_cb cb, void *cntx)
520 {
521 	struct ftl_band *band = ftl_band_search_next_to_reloc(dev);
522 
523 	/* if disk is very small, GC start very early that no band is ready for it */
524 	if (spdk_unlikely(!band)) {
525 		cb(NULL, cntx, false);
526 		return;
527 	}
528 
529 	/* Only one owner is allowed */
530 	assert(!band->queue_depth);
531 	assert(!band->owner.ops_fn);
532 	assert(!band->owner.priv);
533 	band->owner.ops_fn = cb;
534 	band->owner.priv = cntx;
535 
536 	read_md(band);
537 }
538