xref: /spdk/lib/ftl/ftl_reloc.c (revision 60982c759db49b4f4579f16e3b24df0725ba4b94)
1  /*   SPDX-License-Identifier: BSD-3-Clause
2   *   Copyright (C) 2018 Intel Corporation.
3   *   All rights reserved.
4   */
5  
6  #include "ftl_band.h"
7  #include "ftl_core.h"
8  #include "ftl_debug.h"
9  #include "ftl_io.h"
10  #include "ftl_internal.h"
11  #include "spdk/ftl.h"
12  #include "spdk/likely.h"
13  
14  struct ftl_reloc;
15  struct ftl_band_reloc;
16  
17  /* TODO: Should probably change the move naming nomenclature to something more descriptive */
18  enum ftl_reloc_move_state {
19  	FTL_RELOC_STATE_READ = 0,
20  	FTL_RELOC_STATE_PIN,
21  	FTL_RELOC_STATE_WRITE,
22  	FTL_RELOC_STATE_WAIT,
23  	FTL_RELOC_STATE_HALT,
24  
25  	FTL_RELOC_STATE_MAX
26  };
27  
28  struct ftl_reloc_move {
29  	/* FTL device */
30  	struct spdk_ftl_dev *dev;
31  
32  	struct ftl_reloc *reloc;
33  
34  	/* Request for doing IO */
35  	struct ftl_rq *rq;
36  
37  	/* Move state (read, write) */
38  	enum ftl_reloc_move_state state;
39  
40  	/* Entry of circular list */
41  	TAILQ_ENTRY(ftl_reloc_move) qentry;
42  };
43  
44  struct ftl_reloc {
45  	/* Device associated with relocate */
46  	struct spdk_ftl_dev *dev;
47  
48  	/* Indicates relocate is about to halt */
49  	bool halt;
50  
51  	/* Band which are read to relocate */
52  	struct ftl_band *band;
53  
54  	/* Bands already read, but waiting for finishing GC */
55  	TAILQ_HEAD(, ftl_band) band_done;
56  	size_t band_done_count;
57  
58  	/* Flags indicating reloc is waiting for a new band */
59  	bool band_waiting;
60  
61  	/* Maximum number of IOs per band */
62  	size_t max_qdepth;
63  
64  	/* Queue of free move objects */
65  	struct ftl_reloc_move *move_buffer;
66  
67  	/* Array of movers queue for each state */
68  	TAILQ_HEAD(, ftl_reloc_move) move_queue[FTL_RELOC_STATE_MAX];
69  
70  };
71  
72  static void move_read_cb(struct ftl_rq *rq);
73  static void move_write_cb(struct ftl_rq *rq);
74  static void move_set_state(struct ftl_reloc_move *mv, enum ftl_reloc_move_state state);
75  static void move_write(struct ftl_reloc *reloc, struct ftl_reloc_move *mv);
76  static void move_read_error_cb(struct ftl_rq *rq, struct ftl_band *band, uint64_t idx,
77  			       uint64_t count);
78  
79  static void
80  move_deinit(struct ftl_reloc_move *mv)
81  {
82  	assert(mv);
83  	ftl_rq_del(mv->rq);
84  }
85  
86  static int
87  move_init(struct ftl_reloc *reloc, struct ftl_reloc_move *mv)
88  {
89  	mv->state = FTL_RELOC_STATE_HALT;
90  	TAILQ_INSERT_TAIL(&reloc->move_queue[FTL_RELOC_STATE_HALT], mv, qentry);
91  
92  	mv->reloc = reloc;
93  	mv->dev = reloc->dev;
94  	mv->rq = ftl_rq_new(mv->dev, mv->dev->md_size);
95  
96  	if (!mv->rq) {
97  		return -ENOMEM;
98  	}
99  	mv->rq->owner.priv = mv;
100  
101  	return 0;
102  }
103  
104  struct ftl_reloc *
105  ftl_reloc_init(struct spdk_ftl_dev *dev)
106  {
107  	struct ftl_reloc *reloc;
108  	struct ftl_reloc_move *move;
109  	size_t i, count;
110  
111  	reloc = calloc(1, sizeof(*reloc));
112  	if (!reloc) {
113  		return NULL;
114  	}
115  
116  	reloc->dev = dev;
117  	reloc->halt = true;
118  	reloc->max_qdepth = dev->sb->max_reloc_qdepth;
119  
120  	reloc->move_buffer = calloc(reloc->max_qdepth, sizeof(*reloc->move_buffer));
121  	if (!reloc->move_buffer) {
122  		FTL_ERRLOG(dev, "Failed to initialize reloc moves pool");
123  		goto error;
124  	}
125  
126  	/* Initialize movers queues */
127  	count = SPDK_COUNTOF(reloc->move_queue);
128  	for (i = 0; i < count; ++i) {
129  		TAILQ_INIT(&reloc->move_queue[i]);
130  	}
131  
132  	for (i = 0; i < reloc->max_qdepth; ++i) {
133  		move = &reloc->move_buffer[i];
134  
135  		if (move_init(reloc, move)) {
136  			goto error;
137  		}
138  	}
139  
140  	TAILQ_INIT(&reloc->band_done);
141  
142  	return reloc;
143  error:
144  	ftl_reloc_free(reloc);
145  	return NULL;
146  }
147  
148  struct ftl_reloc_task_fini {
149  	struct ftl_reloc_task *task;
150  	spdk_msg_fn cb;
151  	void *cb_arg;
152  };
153  
154  void
155  ftl_reloc_free(struct ftl_reloc *reloc)
156  {
157  	size_t i;
158  
159  	if (!reloc) {
160  		return;
161  	}
162  
163  	if (reloc->move_buffer) {
164  		for (i = 0; i < reloc->max_qdepth; ++i) {
165  			move_deinit(&reloc->move_buffer[i]);
166  		}
167  	}
168  
169  	free(reloc->move_buffer);
170  	free(reloc);
171  }
172  
173  void
174  ftl_reloc_halt(struct ftl_reloc *reloc)
175  {
176  	reloc->halt = true;
177  }
178  
179  void
180  ftl_reloc_resume(struct ftl_reloc *reloc)
181  {
182  	struct ftl_reloc_move *mv, *next;
183  	reloc->halt = false;
184  
185  	TAILQ_FOREACH_SAFE(mv, &reloc->move_queue[FTL_RELOC_STATE_HALT], qentry,
186  			   next) {
187  		move_set_state(mv, FTL_RELOC_STATE_READ);
188  	}
189  }
190  
191  static void
192  move_set_state(struct ftl_reloc_move *mv, enum ftl_reloc_move_state state)
193  {
194  	struct ftl_reloc *reloc = mv->reloc;
195  
196  	switch (state) {
197  	case FTL_RELOC_STATE_READ:
198  		mv->rq->owner.cb = move_read_cb;
199  		mv->rq->owner.error = move_read_error_cb;
200  		mv->rq->iter.idx = 0;
201  		mv->rq->iter.count = 0;
202  		mv->rq->success = true;
203  		break;
204  
205  	case FTL_RELOC_STATE_WRITE:
206  		mv->rq->owner.cb = move_write_cb;
207  		mv->rq->owner.error = NULL;
208  		break;
209  
210  	case FTL_RELOC_STATE_PIN:
211  	case FTL_RELOC_STATE_WAIT:
212  	case FTL_RELOC_STATE_HALT:
213  		break;
214  
215  	default:
216  		ftl_abort();
217  		break;
218  	}
219  
220  	if (mv->state != state) {
221  		/* Remove the mover from previous queue */
222  		TAILQ_REMOVE(&reloc->move_queue[mv->state], mv, qentry);
223  		/* Insert the mover to the new queue */
224  		TAILQ_INSERT_TAIL(&reloc->move_queue[state], mv, qentry);
225  		/* Update state */
226  		mv->state = state;
227  	}
228  }
229  
230  static void
231  move_get_band_cb(struct ftl_band *band, void *cntx, bool status)
232  {
233  	struct ftl_reloc *reloc = cntx;
234  
235  	if (spdk_likely(status)) {
236  		reloc->band = band;
237  		ftl_band_iter_init(band);
238  	}
239  	reloc->band_waiting = false;
240  }
241  
242  static void
243  move_grab_new_band(struct ftl_reloc *reloc)
244  {
245  	if (!reloc->band_waiting) {
246  		if (!ftl_needs_reloc(reloc->dev)) {
247  			return;
248  		}
249  
250  		/* Limit number of simultaneously relocated bands */
251  		if (reloc->band_done_count > 2) {
252  			return;
253  		}
254  
255  		reloc->band_waiting = true;
256  		ftl_band_get_next_gc(reloc->dev, move_get_band_cb, reloc);
257  	}
258  }
259  
260  static struct ftl_band *
261  move_get_band(struct ftl_reloc *reloc)
262  {
263  	struct ftl_band *band = reloc->band;
264  
265  	if (!band) {
266  		move_grab_new_band(reloc);
267  		return NULL;
268  	}
269  
270  	if (!ftl_band_filled(band, band->md->iter.offset)) {
271  		/* Band still not read, we can continue reading */
272  		return band;
273  	}
274  
275  	TAILQ_INSERT_TAIL(&reloc->band_done, band, queue_entry);
276  	reloc->band_done_count++;
277  	reloc->band = NULL;
278  
279  	return NULL;
280  }
281  
282  static void
283  move_advance_rq(struct ftl_rq *rq)
284  {
285  	struct ftl_band *band = rq->io.band;
286  	uint64_t offset, i;
287  	struct ftl_rq_entry *entry = &rq->entries[rq->iter.idx];
288  
289  	assert(rq->iter.idx + rq->iter.count <= rq->num_blocks);
290  
291  	for (i = 0; i < rq->iter.count; i++) {
292  		offset = ftl_band_block_offset_from_addr(band, rq->io.addr);
293  
294  		assert(offset < ftl_get_num_blocks_in_band(band->dev));
295  		assert(ftl_band_block_offset_valid(band, offset));
296  
297  		entry->lba = band->p2l_map.band_map[offset].lba;
298  		entry->addr = rq->io.addr;
299  		entry->owner.priv = band;
300  		entry->seq_id = band->p2l_map.band_map[offset].seq_id;
301  
302  		entry++;
303  		rq->io.addr = ftl_band_next_addr(band, rq->io.addr, 1);
304  		band->owner.cnt++;
305  	}
306  
307  	/* Increase QD for the request */
308  	rq->iter.qd++;
309  
310  	/* Advanced request iterator */
311  	rq->iter.idx += rq->iter.count;
312  }
313  
314  static void
315  move_init_entries(struct ftl_rq *rq, uint64_t idx, uint64_t count)
316  {
317  	uint64_t i = 0;
318  	struct ftl_rq_entry *iter = &rq->entries[idx];
319  
320  	assert(idx + count <= rq->num_blocks);
321  
322  	i = 0;
323  	while (i < count) {
324  		iter->addr = FTL_ADDR_INVALID;
325  		iter->owner.priv = NULL;
326  		iter->lba = FTL_LBA_INVALID;
327  		iter->seq_id = 0;
328  		iter++;
329  		i++;
330  	}
331  }
332  
333  static void
334  move_read_error_cb(struct ftl_rq *rq, struct ftl_band *band, uint64_t idx, uint64_t count)
335  {
336  	move_init_entries(rq, idx, count);
337  	band->owner.cnt -= count;
338  }
339  
340  static void
341  move_read_cb(struct ftl_rq *rq)
342  {
343  	struct ftl_reloc_move *mv = rq->owner.priv;
344  
345  	/* Decrease QD of the request */
346  	assert(rq->iter.qd > 0);
347  	rq->iter.qd--;
348  
349  	if (rq->iter.idx != rq->num_blocks || rq->iter.qd) {
350  		return;
351  	}
352  
353  	move_set_state(mv, FTL_RELOC_STATE_PIN);
354  }
355  
356  static void
357  move_rq_pad(struct ftl_rq *rq, struct ftl_band *band)
358  {
359  	struct ftl_rq_entry *entry = &rq->entries[rq->iter.idx];
360  
361  	for (; rq->iter.idx < rq->num_blocks; ++rq->iter.idx) {
362  		entry->addr = rq->io.addr;
363  		entry->owner.priv = band;
364  		entry->lba = FTL_LBA_INVALID;
365  		entry->seq_id = 0;
366  		entry++;
367  		rq->io.addr = ftl_band_next_addr(band, rq->io.addr, 1);
368  		band->owner.cnt++;
369  	}
370  
371  	assert(rq->iter.idx == rq->num_blocks);
372  }
373  
374  static void
375  move_read(struct ftl_reloc *reloc, struct ftl_reloc_move *mv, struct ftl_band *band)
376  {
377  	struct ftl_rq *rq = mv->rq;
378  	uint64_t blocks = ftl_get_num_blocks_in_band(band->dev);
379  	uint64_t pos = band->md->iter.offset;
380  	uint64_t begin = ftl_bitmap_find_first_set(band->p2l_map.valid, pos, UINT64_MAX);
381  	uint64_t end, band_left, rq_left;
382  
383  	if (spdk_likely(begin < blocks)) {
384  		if (begin > pos) {
385  			ftl_band_iter_advance(band, begin - pos);
386  		} else if (begin == pos) {
387  			/* Valid block at the position of iterator */
388  		} else {
389  			/* Inconsistent state */
390  			ftl_abort();
391  		}
392  	} else if (UINT64_MAX == begin) {
393  		/* No more valid LBAs in the band */
394  		band_left = ftl_band_user_blocks_left(band, pos);
395  		ftl_band_iter_advance(band, band_left);
396  
397  		assert(ftl_band_filled(band, band->md->iter.offset));
398  
399  		if (rq->iter.idx) {
400  			move_rq_pad(rq, band);
401  			move_set_state(mv, FTL_RELOC_STATE_WAIT);
402  			rq->iter.qd++;
403  			rq->owner.cb(rq);
404  		}
405  
406  		return;
407  	} else {
408  		/* Inconsistent state */
409  		ftl_abort();
410  	}
411  
412  	rq_left = rq->num_blocks - rq->iter.idx;
413  	assert(rq_left > 0);
414  
415  	/* Find next clear bit, but no further than max request count */
416  	end = ftl_bitmap_find_first_clear(band->p2l_map.valid, begin + 1, begin + rq_left);
417  	if (end != UINT64_MAX) {
418  		rq_left = end - begin;
419  	}
420  
421  	band_left = ftl_band_user_blocks_left(band, band->md->iter.offset);
422  	rq->iter.count = spdk_min(rq_left, band_left);
423  
424  	ftl_band_rq_read(band, rq);
425  
426  	move_advance_rq(rq);
427  
428  	/* Advance band iterator */
429  	ftl_band_iter_advance(band, rq->iter.count);
430  
431  	/* If band is fully written pad rest of request */
432  	if (ftl_band_filled(band, band->md->iter.offset)) {
433  		move_rq_pad(rq, band);
434  	}
435  
436  	if (rq->iter.idx == rq->num_blocks) {
437  		/*
438  		 * All request entries scheduled for reading,
439  		 * We can change state to waiting
440  		 */
441  		move_set_state(mv, FTL_RELOC_STATE_WAIT);
442  	}
443  }
444  
445  static void
446  move_pin_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
447  {
448  	struct ftl_reloc_move *mv = pin_ctx->cb_ctx;
449  	struct ftl_rq *rq = mv->rq;
450  
451  	if (status) {
452  		rq->iter.status = status;
453  		pin_ctx->lba = FTL_LBA_INVALID;
454  	}
455  
456  	if (--rq->iter.remaining == 0) {
457  		if (rq->iter.status) {
458  			/* unpin and try again */
459  			ftl_rq_unpin(rq);
460  			move_set_state(mv, FTL_RELOC_STATE_PIN);
461  			return;
462  		}
463  
464  		move_set_state(mv, FTL_RELOC_STATE_WRITE);
465  	}
466  }
467  
468  static void
469  move_pin(struct ftl_reloc_move *mv)
470  {
471  	struct ftl_rq *rq = mv->rq;
472  	struct ftl_rq_entry *entry = rq->entries;
473  	uint64_t i;
474  
475  	move_set_state(mv, FTL_RELOC_STATE_WAIT);
476  
477  	rq->iter.remaining = rq->iter.count = rq->num_blocks;
478  	rq->iter.status = 0;
479  
480  	for (i = 0; i < rq->num_blocks; i++) {
481  		if (entry->lba != FTL_LBA_INVALID) {
482  			ftl_l2p_pin(rq->dev, entry->lba, 1, move_pin_cb, mv, &entry->l2p_pin_ctx);
483  		} else {
484  			ftl_l2p_pin_skip(rq->dev, move_pin_cb, mv, &entry->l2p_pin_ctx);
485  		}
486  		entry++;
487  	}
488  }
489  
490  static void
491  move_finish_write(struct ftl_rq *rq)
492  {
493  	uint64_t i;
494  	struct spdk_ftl_dev *dev = rq->dev;
495  	struct ftl_rq_entry *iter = rq->entries;
496  	ftl_addr addr = rq->io.addr;
497  	struct ftl_band *rq_band = rq->io.band;
498  	struct ftl_band *band;
499  
500  	for (i = 0; i < rq->num_blocks; ++i, ++iter) {
501  		band = iter->owner.priv;
502  
503  		if (band) {
504  			assert(band->owner.cnt > 0);
505  			band->owner.cnt--;
506  		}
507  		if (iter->lba != FTL_LBA_INVALID) {
508  			/* Update L2P table */
509  			ftl_l2p_update_base(dev, iter->lba, addr, iter->addr);
510  			ftl_l2p_unpin(dev, iter->lba, 1);
511  		}
512  		addr = ftl_band_next_addr(rq_band, addr, 1);
513  	}
514  }
515  
516  static void
517  move_write_cb(struct ftl_rq *rq)
518  {
519  	struct ftl_reloc_move *mv = rq->owner.priv;
520  
521  	assert(rq->iter.qd == 1);
522  	rq->iter.qd--;
523  
524  	if (spdk_likely(rq->success)) {
525  		move_finish_write(rq);
526  		move_set_state(mv, FTL_RELOC_STATE_READ);
527  	} else {
528  		/* Write failed, repeat write */
529  		move_set_state(mv, FTL_RELOC_STATE_WRITE);
530  	}
531  }
532  
533  static void
534  move_write(struct ftl_reloc *reloc, struct ftl_reloc_move *mv)
535  {
536  	struct spdk_ftl_dev *dev = mv->dev;
537  	struct ftl_rq *rq = mv->rq;
538  
539  	assert(rq->iter.idx == rq->num_blocks);
540  
541  	/* Request contains data to be placed on a new location, submit it */
542  	ftl_writer_queue_rq(&dev->writer_gc, rq);
543  	rq->iter.qd++;
544  
545  	move_set_state(mv, FTL_RELOC_STATE_WAIT);
546  }
547  
548  static void
549  move_run(struct ftl_reloc *reloc, struct ftl_reloc_move *mv)
550  {
551  	struct ftl_band *band;
552  
553  	switch (mv->state) {
554  	case FTL_RELOC_STATE_READ: {
555  		if (spdk_unlikely(reloc->halt)) {
556  			move_set_state(mv, FTL_RELOC_STATE_HALT);
557  			break;
558  		}
559  
560  		band = move_get_band(reloc);
561  		if (!band) {
562  			break;
563  		}
564  
565  		move_read(reloc, mv, band);
566  	}
567  	break;
568  
569  	case FTL_RELOC_STATE_PIN:
570  		move_pin(mv);
571  		ftl_add_io_activity(reloc->dev);
572  		break;
573  
574  	case FTL_RELOC_STATE_WRITE:
575  		if (spdk_unlikely(reloc->halt)) {
576  			ftl_rq_unpin(mv->rq);
577  			move_set_state(mv, FTL_RELOC_STATE_HALT);
578  			break;
579  		}
580  
581  		ftl_add_io_activity(reloc->dev);
582  		move_write(reloc, mv);
583  		break;
584  
585  	case FTL_RELOC_STATE_HALT:
586  	case FTL_RELOC_STATE_WAIT:
587  		break;
588  
589  	default:
590  		assert(0);
591  		ftl_abort();
592  		break;
593  	}
594  }
595  
596  static void
597  move_handle_band_error(struct ftl_band *band)
598  {
599  	struct ftl_reloc *reloc = band->dev->reloc;
600  	/*
601  	 * Handle band error, it's because an error occurred during reading,
602  	 * Add band to the close band list, will try reloc it in a moment
603  	 */
604  	TAILQ_REMOVE(&reloc->band_done, band, queue_entry);
605  	reloc->band_done_count--;
606  
607  	band->md->state = FTL_BAND_STATE_CLOSING;
608  	ftl_band_set_state(band, FTL_BAND_STATE_CLOSED);
609  }
610  
611  static void
612  move_release_bands(struct ftl_reloc *reloc)
613  {
614  	struct ftl_band *band;
615  
616  	if (TAILQ_EMPTY(&reloc->band_done)) {
617  		return;
618  	}
619  
620  	band = TAILQ_FIRST(&reloc->band_done);
621  
622  	if (band->owner.cnt || ftl_band_qd(band)) {
623  		/* Band still in use */
624  		return;
625  	}
626  
627  	if (ftl_band_empty(band)) {
628  		assert(ftl_band_filled(band, band->md->iter.offset));
629  		TAILQ_REMOVE(&reloc->band_done, band, queue_entry);
630  		reloc->band_done_count--;
631  		ftl_band_free(band);
632  	} else {
633  		move_handle_band_error(band);
634  	}
635  }
636  
637  bool
638  ftl_reloc_is_halted(const struct ftl_reloc *reloc)
639  {
640  	size_t i, count;
641  
642  	count = SPDK_COUNTOF(reloc->move_queue);
643  	for (i = 0; i < count; ++i) {
644  		if (i == FTL_RELOC_STATE_HALT) {
645  			continue;
646  		}
647  
648  		if (!TAILQ_EMPTY(&reloc->move_queue[i])) {
649  			return false;
650  		}
651  	}
652  
653  	return true;
654  }
655  
656  void
657  ftl_reloc(struct ftl_reloc *reloc)
658  {
659  	size_t i, count;
660  
661  	count = SPDK_COUNTOF(reloc->move_queue);
662  	for (i = 0; i < count; ++i) {
663  		if (TAILQ_EMPTY(&reloc->move_queue[i])) {
664  			continue;
665  		}
666  
667  		move_run(reloc, TAILQ_FIRST(&reloc->move_queue[i]));
668  	}
669  
670  	move_release_bands(reloc);
671  }
672