xref: /spdk/lib/ftl/ftl_reloc.c (revision b02581a89058ebaebe03bd0e16e3b58adfe406c1)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "ftl_band.h"
7 #include "ftl_core.h"
8 #include "ftl_debug.h"
9 #include "ftl_io.h"
10 #include "ftl_internal.h"
11 #include "spdk/ftl.h"
12 #include "spdk/likely.h"
13 
14 struct ftl_reloc;
15 struct ftl_band_reloc;
16 
17 /* TODO: Should probably change the move naming nomenclature to something more descriptive */
18 enum ftl_reloc_move_state {
19 	FTL_RELOC_STATE_READ = 0,
20 	FTL_RELOC_STATE_PIN,
21 	FTL_RELOC_STATE_WRITE,
22 	FTL_RELOC_STATE_WAIT,
23 	FTL_RELOC_STATE_HALT,
24 
25 	FTL_RELOC_STATE_MAX
26 };
27 
28 struct ftl_reloc_move {
29 	/* FTL device */
30 	struct spdk_ftl_dev *dev;
31 
32 	struct ftl_reloc *reloc;
33 
34 	/* Request for doing IO */
35 	struct ftl_rq *rq;
36 
37 	/* Move state (read, write) */
38 	enum ftl_reloc_move_state state;
39 
40 	/* Entry of circular list */
41 	TAILQ_ENTRY(ftl_reloc_move) qentry;
42 };
43 
44 struct ftl_reloc {
45 	/* Device associated with relocate */
46 	struct spdk_ftl_dev *dev;
47 
48 	/* Indicates relocate is about to halt */
49 	bool halt;
50 
51 	/* Band which are read to relocate */
52 	struct ftl_band *band;
53 
54 	/* Bands already read, but waiting for finishing GC */
55 	TAILQ_HEAD(, ftl_band) band_done;
56 	size_t band_done_count;
57 
58 	/* Flags indicating reloc is waiting for a new band */
59 	bool band_waiting;
60 
61 	/* Maximum number of IOs per band */
62 	size_t max_qdepth;
63 
64 	/* Queue of free move objects */
65 	struct ftl_reloc_move *move_buffer;
66 
67 	/* Array of movers queue for each state */
68 	TAILQ_HEAD(, ftl_reloc_move) move_queue[FTL_RELOC_STATE_MAX];
69 
70 };
71 
72 static void move_read_cb(struct ftl_rq *rq);
73 static void move_write_cb(struct ftl_rq *rq);
74 static void move_set_state(struct ftl_reloc_move *mv, enum ftl_reloc_move_state state);
75 static void move_write(struct ftl_reloc *reloc, struct ftl_reloc_move *mv);
76 static void move_read_error_cb(struct ftl_rq *rq, struct ftl_band *band, uint64_t idx,
77 			       uint64_t count);
78 
79 static void
80 move_deinit(struct ftl_reloc_move *mv)
81 {
82 	assert(mv);
83 	ftl_rq_del(mv->rq);
84 }
85 
86 static int
87 move_init(struct ftl_reloc *reloc, struct ftl_reloc_move *mv)
88 {
89 	mv->state = FTL_RELOC_STATE_HALT;
90 	TAILQ_INSERT_TAIL(&reloc->move_queue[FTL_RELOC_STATE_HALT], mv, qentry);
91 
92 	mv->reloc = reloc;
93 	mv->dev = reloc->dev;
94 	mv->rq = ftl_rq_new(mv->dev, mv->dev->md_size);
95 
96 	if (!mv->rq) {
97 		return -ENOMEM;
98 	}
99 	mv->rq->owner.priv = mv;
100 
101 	return 0;
102 }
103 
104 struct ftl_reloc *
105 ftl_reloc_init(struct spdk_ftl_dev *dev)
106 {
107 	struct ftl_reloc *reloc;
108 	struct ftl_reloc_move *move;
109 	size_t i, count;
110 
111 	reloc = calloc(1, sizeof(*reloc));
112 	if (!reloc) {
113 		return NULL;
114 	}
115 
116 	reloc->dev = dev;
117 	reloc->halt = true;
118 	reloc->max_qdepth = dev->sb->max_reloc_qdepth;
119 
120 	reloc->move_buffer = calloc(reloc->max_qdepth, sizeof(*reloc->move_buffer));
121 	if (!reloc->move_buffer) {
122 		FTL_ERRLOG(dev, "Failed to initialize reloc moves pool");
123 		goto error;
124 	}
125 
126 	/* Initialize movers queues */
127 	count = SPDK_COUNTOF(reloc->move_queue);
128 	for (i = 0; i < count; ++i) {
129 		TAILQ_INIT(&reloc->move_queue[i]);
130 	}
131 
132 	for (i = 0; i < reloc->max_qdepth; ++i) {
133 		move = &reloc->move_buffer[i];
134 
135 		if (move_init(reloc, move)) {
136 			goto error;
137 		}
138 	}
139 
140 	TAILQ_INIT(&reloc->band_done);
141 
142 	return reloc;
143 error:
144 	ftl_reloc_free(reloc);
145 	return NULL;
146 }
147 
148 struct ftl_reloc_task_fini {
149 	struct ftl_reloc_task *task;
150 	spdk_msg_fn cb;
151 	void *cb_arg;
152 };
153 
154 void
155 ftl_reloc_free(struct ftl_reloc *reloc)
156 {
157 	size_t i;
158 
159 	if (!reloc) {
160 		return;
161 	}
162 
163 	if (reloc->move_buffer) {
164 		for (i = 0; i < reloc->max_qdepth; ++i) {
165 			move_deinit(&reloc->move_buffer[i]);
166 		}
167 	}
168 
169 	free(reloc->move_buffer);
170 	free(reloc);
171 }
172 
173 void
174 ftl_reloc_halt(struct ftl_reloc *reloc)
175 {
176 	struct spdk_ftl_dev *dev = reloc->dev;
177 
178 	if (dev->conf.prep_upgrade_on_shutdown && 0 == dev->num_free) {
179 		/*
180 		 * In shutdown upgrade procedure, it is required to have
181 		 * at least one free band. Keep reloc running to reclaim
182 		 * the band.
183 		 */
184 		return;
185 	}
186 
187 	reloc->halt = true;
188 }
189 
190 void
191 ftl_reloc_resume(struct ftl_reloc *reloc)
192 {
193 	struct ftl_reloc_move *mv, *next;
194 	reloc->halt = false;
195 
196 	TAILQ_FOREACH_SAFE(mv, &reloc->move_queue[FTL_RELOC_STATE_HALT], qentry,
197 			   next) {
198 		move_set_state(mv, FTL_RELOC_STATE_READ);
199 	}
200 }
201 
202 static void
203 move_set_state(struct ftl_reloc_move *mv, enum ftl_reloc_move_state state)
204 {
205 	struct ftl_reloc *reloc = mv->reloc;
206 
207 	switch (state) {
208 	case FTL_RELOC_STATE_READ:
209 		mv->rq->owner.cb = move_read_cb;
210 		mv->rq->owner.error = move_read_error_cb;
211 		mv->rq->iter.idx = 0;
212 		mv->rq->iter.count = 0;
213 		mv->rq->success = true;
214 		break;
215 
216 	case FTL_RELOC_STATE_WRITE:
217 		mv->rq->owner.cb = move_write_cb;
218 		mv->rq->owner.error = NULL;
219 		break;
220 
221 	case FTL_RELOC_STATE_PIN:
222 	case FTL_RELOC_STATE_WAIT:
223 	case FTL_RELOC_STATE_HALT:
224 		break;
225 
226 	default:
227 		ftl_abort();
228 		break;
229 	}
230 
231 	if (mv->state != state) {
232 		/* Remove the mover from previous queue */
233 		TAILQ_REMOVE(&reloc->move_queue[mv->state], mv, qentry);
234 		/* Insert the mover to the new queue */
235 		TAILQ_INSERT_TAIL(&reloc->move_queue[state], mv, qentry);
236 		/* Update state */
237 		mv->state = state;
238 	}
239 }
240 
241 static void
242 move_get_band_cb(struct ftl_band *band, void *cntx, bool status)
243 {
244 	struct ftl_reloc *reloc = cntx;
245 
246 	if (spdk_likely(status)) {
247 		reloc->band = band;
248 		ftl_band_iter_init(band);
249 	}
250 	reloc->band_waiting = false;
251 }
252 
253 static void
254 move_grab_new_band(struct ftl_reloc *reloc)
255 {
256 	if (!reloc->band_waiting) {
257 		if (!ftl_needs_reloc(reloc->dev)) {
258 			return;
259 		}
260 
261 		/* Limit number of simultaneously relocated bands */
262 		if (reloc->band_done_count > 2) {
263 			return;
264 		}
265 
266 		reloc->band_waiting = true;
267 		ftl_band_get_next_gc(reloc->dev, move_get_band_cb, reloc);
268 	}
269 }
270 
271 static struct ftl_band *
272 move_get_band(struct ftl_reloc *reloc)
273 {
274 	struct ftl_band *band = reloc->band;
275 
276 	if (!band) {
277 		move_grab_new_band(reloc);
278 		return NULL;
279 	}
280 
281 	if (!ftl_band_filled(band, band->md->iter.offset)) {
282 		/* Band still not read, we can continue reading */
283 		return band;
284 	}
285 
286 	TAILQ_INSERT_TAIL(&reloc->band_done, band, queue_entry);
287 	reloc->band_done_count++;
288 	reloc->band = NULL;
289 
290 	return NULL;
291 }
292 
293 static void
294 move_advance_rq(struct ftl_rq *rq)
295 {
296 	struct ftl_band *band = rq->io.band;
297 	uint64_t offset, i;
298 	struct ftl_rq_entry *entry = &rq->entries[rq->iter.idx];
299 
300 	assert(rq->iter.idx + rq->iter.count <= rq->num_blocks);
301 
302 	for (i = 0; i < rq->iter.count; i++) {
303 		offset = ftl_band_block_offset_from_addr(band, rq->io.addr);
304 
305 		assert(offset < ftl_get_num_blocks_in_band(band->dev));
306 		assert(ftl_band_block_offset_valid(band, offset));
307 
308 		entry->lba = band->p2l_map.band_map[offset].lba;
309 		entry->addr = rq->io.addr;
310 		entry->owner.priv = band;
311 		entry->seq_id = band->p2l_map.band_map[offset].seq_id;
312 
313 		entry++;
314 		rq->io.addr = ftl_band_next_addr(band, rq->io.addr, 1);
315 		band->owner.cnt++;
316 	}
317 
318 	/* Increase QD for the request */
319 	rq->iter.qd++;
320 
321 	/* Advanced request iterator */
322 	rq->iter.idx += rq->iter.count;
323 }
324 
325 static void
326 move_init_entries(struct ftl_rq *rq, uint64_t idx, uint64_t count)
327 {
328 	uint64_t i = 0;
329 	struct ftl_rq_entry *iter = &rq->entries[idx];
330 
331 	assert(idx + count <= rq->num_blocks);
332 
333 	i = 0;
334 	while (i < count) {
335 		iter->addr = FTL_ADDR_INVALID;
336 		iter->owner.priv = NULL;
337 		iter->lba = FTL_LBA_INVALID;
338 		iter->seq_id = 0;
339 		iter++;
340 		i++;
341 	}
342 }
343 
344 static void
345 move_read_error_cb(struct ftl_rq *rq, struct ftl_band *band, uint64_t idx, uint64_t count)
346 {
347 	move_init_entries(rq, idx, count);
348 	band->owner.cnt -= count;
349 }
350 
351 static void
352 move_read_cb(struct ftl_rq *rq)
353 {
354 	struct ftl_reloc_move *mv = rq->owner.priv;
355 
356 	/* Decrease QD of the request */
357 	assert(rq->iter.qd > 0);
358 	rq->iter.qd--;
359 
360 	if (rq->iter.idx != rq->num_blocks || rq->iter.qd) {
361 		return;
362 	}
363 
364 	move_set_state(mv, FTL_RELOC_STATE_PIN);
365 }
366 
367 static void
368 move_rq_pad(struct ftl_rq *rq, struct ftl_band *band)
369 {
370 	struct ftl_rq_entry *entry = &rq->entries[rq->iter.idx];
371 
372 	for (; rq->iter.idx < rq->num_blocks; ++rq->iter.idx) {
373 		entry->addr = rq->io.addr;
374 		entry->owner.priv = band;
375 		entry->lba = FTL_LBA_INVALID;
376 		entry->seq_id = 0;
377 		entry++;
378 		rq->io.addr = ftl_band_next_addr(band, rq->io.addr, 1);
379 		band->owner.cnt++;
380 	}
381 
382 	assert(rq->iter.idx == rq->num_blocks);
383 }
384 
385 static void
386 move_read(struct ftl_reloc *reloc, struct ftl_reloc_move *mv, struct ftl_band *band)
387 {
388 	struct ftl_rq *rq = mv->rq;
389 	uint64_t blocks = ftl_get_num_blocks_in_band(band->dev);
390 	uint64_t pos = band->md->iter.offset;
391 	uint64_t begin = ftl_bitmap_find_first_set(band->p2l_map.valid, pos, UINT64_MAX);
392 	uint64_t end, band_left, rq_left;
393 
394 	if (spdk_likely(begin < blocks)) {
395 		if (begin > pos) {
396 			ftl_band_iter_advance(band, begin - pos);
397 		} else if (begin == pos) {
398 			/* Valid block at the position of iterator */
399 		} else {
400 			/* Inconsistent state */
401 			ftl_abort();
402 		}
403 	} else if (UINT64_MAX == begin) {
404 		/* No more valid LBAs in the band */
405 		band_left = ftl_band_user_blocks_left(band, pos);
406 		ftl_band_iter_advance(band, band_left);
407 
408 		assert(ftl_band_filled(band, band->md->iter.offset));
409 
410 		if (rq->iter.idx) {
411 			move_rq_pad(rq, band);
412 			move_set_state(mv, FTL_RELOC_STATE_WAIT);
413 			rq->iter.qd++;
414 			rq->owner.cb(rq);
415 		}
416 
417 		return;
418 	} else {
419 		/* Inconsistent state */
420 		ftl_abort();
421 	}
422 
423 	rq_left = rq->num_blocks - rq->iter.idx;
424 	assert(rq_left > 0);
425 
426 	/* Find next clear bit, but no further than max request count */
427 	end = ftl_bitmap_find_first_clear(band->p2l_map.valid, begin + 1, begin + rq_left);
428 	if (end != UINT64_MAX) {
429 		rq_left = end - begin;
430 	}
431 
432 	band_left = ftl_band_user_blocks_left(band, band->md->iter.offset);
433 	rq->iter.count = spdk_min(rq_left, band_left);
434 
435 	ftl_band_rq_read(band, rq);
436 
437 	move_advance_rq(rq);
438 
439 	/* Advance band iterator */
440 	ftl_band_iter_advance(band, rq->iter.count);
441 
442 	/* If band is fully written pad rest of request */
443 	if (ftl_band_filled(band, band->md->iter.offset)) {
444 		move_rq_pad(rq, band);
445 	}
446 
447 	if (rq->iter.idx == rq->num_blocks) {
448 		/*
449 		 * All request entries scheduled for reading,
450 		 * We can change state to waiting
451 		 */
452 		move_set_state(mv, FTL_RELOC_STATE_WAIT);
453 	}
454 }
455 
456 static void
457 move_pin_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
458 {
459 	struct ftl_reloc_move *mv = pin_ctx->cb_ctx;
460 	struct ftl_rq *rq = mv->rq;
461 
462 	if (status) {
463 		rq->iter.status = status;
464 		pin_ctx->lba = FTL_LBA_INVALID;
465 	}
466 
467 	if (--rq->iter.remaining == 0) {
468 		if (rq->iter.status) {
469 			/* unpin and try again */
470 			ftl_rq_unpin(rq);
471 			move_set_state(mv, FTL_RELOC_STATE_PIN);
472 			return;
473 		}
474 
475 		move_set_state(mv, FTL_RELOC_STATE_WRITE);
476 	}
477 }
478 
479 static void
480 move_pin(struct ftl_reloc_move *mv)
481 {
482 	struct ftl_rq *rq = mv->rq;
483 	struct ftl_rq_entry *entry = rq->entries;
484 	uint64_t i;
485 
486 	move_set_state(mv, FTL_RELOC_STATE_WAIT);
487 
488 	rq->iter.remaining = rq->iter.count = rq->num_blocks;
489 	rq->iter.status = 0;
490 
491 	for (i = 0; i < rq->num_blocks; i++) {
492 		if (entry->lba != FTL_LBA_INVALID) {
493 			ftl_l2p_pin(rq->dev, entry->lba, 1, move_pin_cb, mv, &entry->l2p_pin_ctx);
494 		} else {
495 			ftl_l2p_pin_skip(rq->dev, move_pin_cb, mv, &entry->l2p_pin_ctx);
496 		}
497 		entry++;
498 	}
499 }
500 
501 static void
502 move_finish_write(struct ftl_rq *rq)
503 {
504 	uint64_t i;
505 	struct spdk_ftl_dev *dev = rq->dev;
506 	struct ftl_rq_entry *iter = rq->entries;
507 	ftl_addr addr = rq->io.addr;
508 	struct ftl_band *rq_band = rq->io.band;
509 	struct ftl_band *band;
510 
511 	for (i = 0; i < rq->num_blocks; ++i, ++iter) {
512 		band = iter->owner.priv;
513 
514 		if (band) {
515 			assert(band->owner.cnt > 0);
516 			band->owner.cnt--;
517 		}
518 		if (iter->lba != FTL_LBA_INVALID) {
519 			/* Update L2P table */
520 			ftl_l2p_update_base(dev, iter->lba, addr, iter->addr);
521 			ftl_l2p_unpin(dev, iter->lba, 1);
522 		}
523 		addr = ftl_band_next_addr(rq_band, addr, 1);
524 	}
525 }
526 
527 static void
528 move_write_cb(struct ftl_rq *rq)
529 {
530 	struct ftl_reloc_move *mv = rq->owner.priv;
531 
532 	assert(rq->iter.qd == 1);
533 	rq->iter.qd--;
534 
535 	if (spdk_likely(rq->success)) {
536 		move_finish_write(rq);
537 		move_set_state(mv, FTL_RELOC_STATE_READ);
538 	} else {
539 		/* Write failed, repeat write */
540 		move_set_state(mv, FTL_RELOC_STATE_WRITE);
541 	}
542 }
543 
544 static void
545 move_write(struct ftl_reloc *reloc, struct ftl_reloc_move *mv)
546 {
547 	struct spdk_ftl_dev *dev = mv->dev;
548 	struct ftl_rq *rq = mv->rq;
549 
550 	assert(rq->iter.idx == rq->num_blocks);
551 
552 	/* Request contains data to be placed on a new location, submit it */
553 	ftl_writer_queue_rq(&dev->writer_gc, rq);
554 	rq->iter.qd++;
555 
556 	move_set_state(mv, FTL_RELOC_STATE_WAIT);
557 }
558 
559 static void
560 move_run(struct ftl_reloc *reloc, struct ftl_reloc_move *mv)
561 {
562 	struct ftl_band *band;
563 
564 	switch (mv->state) {
565 	case FTL_RELOC_STATE_READ: {
566 		if (spdk_unlikely(reloc->halt)) {
567 			move_set_state(mv, FTL_RELOC_STATE_HALT);
568 			break;
569 		}
570 
571 		band = move_get_band(reloc);
572 		if (!band) {
573 			break;
574 		}
575 
576 		move_read(reloc, mv, band);
577 	}
578 	break;
579 
580 	case FTL_RELOC_STATE_PIN:
581 		move_pin(mv);
582 		ftl_add_io_activity(reloc->dev);
583 		break;
584 
585 	case FTL_RELOC_STATE_WRITE:
586 		if (spdk_unlikely(reloc->halt)) {
587 			ftl_rq_unpin(mv->rq);
588 			move_set_state(mv, FTL_RELOC_STATE_HALT);
589 			break;
590 		}
591 
592 		ftl_add_io_activity(reloc->dev);
593 		move_write(reloc, mv);
594 		break;
595 
596 	case FTL_RELOC_STATE_HALT:
597 	case FTL_RELOC_STATE_WAIT:
598 		break;
599 
600 	default:
601 		assert(0);
602 		ftl_abort();
603 		break;
604 	}
605 }
606 
607 static void
608 move_handle_band_error(struct ftl_band *band)
609 {
610 	struct ftl_reloc *reloc = band->dev->reloc;
611 	/*
612 	 * Handle band error, it's because an error occurred during reading,
613 	 * Add band to the close band list, will try reloc it in a moment
614 	 */
615 	TAILQ_REMOVE(&reloc->band_done, band, queue_entry);
616 	reloc->band_done_count--;
617 
618 	band->md->state = FTL_BAND_STATE_CLOSING;
619 	ftl_band_set_state(band, FTL_BAND_STATE_CLOSED);
620 }
621 
622 static void
623 move_release_bands(struct ftl_reloc *reloc)
624 {
625 	struct ftl_band *band;
626 
627 	if (TAILQ_EMPTY(&reloc->band_done)) {
628 		return;
629 	}
630 
631 	band = TAILQ_FIRST(&reloc->band_done);
632 
633 	if (band->owner.cnt || ftl_band_qd(band)) {
634 		/* Band still in use */
635 		return;
636 	}
637 
638 	if (ftl_band_empty(band)) {
639 		assert(ftl_band_filled(band, band->md->iter.offset));
640 		TAILQ_REMOVE(&reloc->band_done, band, queue_entry);
641 		reloc->band_done_count--;
642 		ftl_band_free(band);
643 	} else {
644 		move_handle_band_error(band);
645 	}
646 }
647 
648 bool
649 ftl_reloc_is_halted(const struct ftl_reloc *reloc)
650 {
651 	size_t i, count;
652 
653 	count = SPDK_COUNTOF(reloc->move_queue);
654 	for (i = 0; i < count; ++i) {
655 		if (i == FTL_RELOC_STATE_HALT) {
656 			continue;
657 		}
658 
659 		if (!TAILQ_EMPTY(&reloc->move_queue[i])) {
660 			return false;
661 		}
662 	}
663 
664 	return true;
665 }
666 
667 void
668 ftl_reloc(struct ftl_reloc *reloc)
669 {
670 	size_t i, count;
671 
672 	count = SPDK_COUNTOF(reloc->move_queue);
673 	for (i = 0; i < count; ++i) {
674 		if (TAILQ_EMPTY(&reloc->move_queue[i])) {
675 			continue;
676 		}
677 
678 		move_run(reloc, TAILQ_FIRST(&reloc->move_queue[i]));
679 	}
680 
681 	move_release_bands(reloc);
682 }
683