xref: /spdk/lib/ftl/ftl_band.c (revision deb8ee5c335ac95dec8111f7f08d928dae613a3a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/crc32.h"
35 #include "spdk/likely.h"
36 #include "spdk/util.h"
37 #include "spdk/ftl.h"
38 
39 #include "ftl_band.h"
40 #include "ftl_io.h"
41 #include "ftl_core.h"
42 #include "ftl_reloc.h"
43 #include "ftl_debug.h"
44 
45 /* TODO: define some signature for meta version */
46 #define FTL_MD_VER 1
47 
48 struct __attribute__((packed)) ftl_md_hdr {
49 	/* Device instance */
50 	struct spdk_uuid	uuid;
51 
52 	/* Meta version */
53 	uint8_t			ver;
54 
55 	/* Sequence number */
56 	uint64_t		seq;
57 
58 	/* CRC32 checksum */
59 	uint32_t		checksum;
60 };
61 
62 /* End metadata layout stored on media (with all three being aligned to block size): */
63 /* - header */
64 /* - valid bitmap */
65 /* - LBA map */
66 struct __attribute__((packed)) ftl_tail_md {
67 	struct ftl_md_hdr	hdr;
68 
69 	/* Max number of lbks */
70 	uint64_t		num_lbks;
71 
72 	uint8_t			reserved[4059];
73 };
74 SPDK_STATIC_ASSERT(sizeof(struct ftl_tail_md) == FTL_BLOCK_SIZE, "Incorrect metadata size");
75 
76 struct __attribute__((packed)) ftl_head_md {
77 	struct ftl_md_hdr	hdr;
78 
79 	/* Number of defrag cycles */
80 	uint64_t		wr_cnt;
81 
82 	/* Number of surfaced LBAs */
83 	uint64_t		lba_cnt;
84 
85 	/* Transfer size */
86 	uint32_t		xfer_size;
87 };
88 
89 size_t
90 ftl_tail_md_hdr_num_lbks(void)
91 {
92 	return spdk_divide_round_up(sizeof(struct ftl_tail_md), FTL_BLOCK_SIZE);
93 }
94 
95 size_t
96 ftl_vld_map_num_lbks(const struct spdk_ftl_dev *dev)
97 {
98 	return spdk_divide_round_up(ftl_vld_map_size(dev), FTL_BLOCK_SIZE);
99 }
100 
101 size_t
102 ftl_lba_map_num_lbks(const struct spdk_ftl_dev *dev)
103 {
104 	return spdk_divide_round_up(ftl_num_band_lbks(dev) * sizeof(uint64_t), FTL_BLOCK_SIZE);
105 }
106 
107 size_t
108 ftl_head_md_num_lbks(const struct spdk_ftl_dev *dev)
109 {
110 	return dev->xfer_size;
111 }
112 
113 size_t
114 ftl_tail_md_num_lbks(const struct spdk_ftl_dev *dev)
115 {
116 	return spdk_divide_round_up(ftl_tail_md_hdr_num_lbks() +
117 				    ftl_vld_map_num_lbks(dev) +
118 				    ftl_lba_map_num_lbks(dev),
119 				    dev->xfer_size) * dev->xfer_size;
120 }
121 
122 static uint64_t
123 ftl_band_tail_md_offset(struct ftl_band *band)
124 {
125 	return ftl_band_num_usable_lbks(band) -
126 	       ftl_tail_md_num_lbks(band->dev);
127 }
128 
129 int
130 ftl_band_full(struct ftl_band *band, size_t offset)
131 {
132 	return offset == ftl_band_tail_md_offset(band);
133 }
134 
135 void
136 ftl_band_write_failed(struct ftl_band *band)
137 {
138 	struct spdk_ftl_dev *dev = band->dev;
139 
140 	band->high_prio = 1;
141 	band->tail_md_ppa = ftl_to_ppa(FTL_PPA_INVALID);
142 
143 	if (!dev->df_band) {
144 		dev->df_band = band;
145 	}
146 
147 	ftl_reloc_add(dev->reloc, band, 0, ftl_num_band_lbks(dev), 1);
148 	ftl_band_set_state(band, FTL_BAND_STATE_CLOSED);
149 }
150 
151 void
152 ftl_band_clear_md(struct ftl_band *band)
153 {
154 	spdk_bit_array_clear_mask(band->md.vld_map);
155 	memset(band->md.lba_map, 0, ftl_num_band_lbks(band->dev) * sizeof(uint64_t));
156 	band->md.num_vld = 0;
157 }
158 
159 static void
160 ftl_band_free_md(struct ftl_band *band)
161 {
162 	struct spdk_ftl_dev *dev = band->dev;
163 	struct ftl_md *md = &band->md;
164 
165 	assert(band->state == FTL_BAND_STATE_CLOSED ||
166 	       band->state == FTL_BAND_STATE_FREE);
167 	assert(md->ref_cnt == 0);
168 	assert(md->lba_map != NULL);
169 	assert(!band->high_prio);
170 
171 	/* Verify that band's metadata is consistent with l2p */
172 	if (band->num_chunks) {
173 		assert(ftl_band_validate_md(band, band->md.lba_map) == true);
174 	}
175 
176 	spdk_mempool_put(dev->lba_pool, md->lba_map);
177 	md->lba_map = NULL;
178 }
179 
180 static void
181 _ftl_band_set_free(struct ftl_band *band)
182 {
183 	struct spdk_ftl_dev *dev = band->dev;
184 	struct ftl_band *lband, *prev;
185 
186 	/* Verify band's previous state */
187 	assert(band->state == FTL_BAND_STATE_CLOSED);
188 
189 	if (band == dev->df_band) {
190 		dev->df_band = NULL;
191 	}
192 
193 	/* Remove the band from the closed band list */
194 	LIST_REMOVE(band, list_entry);
195 
196 	/* Keep the list sorted by band's write count */
197 	LIST_FOREACH(lband, &dev->free_bands, list_entry) {
198 		if (lband->md.wr_cnt > band->md.wr_cnt) {
199 			LIST_INSERT_BEFORE(lband, band, list_entry);
200 			break;
201 		}
202 		prev = lband;
203 	}
204 
205 	if (!lband) {
206 		if (LIST_EMPTY(&dev->free_bands)) {
207 			LIST_INSERT_HEAD(&dev->free_bands, band, list_entry);
208 		} else {
209 			LIST_INSERT_AFTER(prev, band, list_entry);
210 		}
211 	}
212 
213 #if defined(DEBUG)
214 	prev = NULL;
215 	LIST_FOREACH(lband, &dev->free_bands, list_entry) {
216 		if (!prev) {
217 			continue;
218 		}
219 		assert(prev->md.wr_cnt <= lband->md.wr_cnt);
220 	}
221 #endif
222 	dev->num_free++;
223 	ftl_apply_limits(dev);
224 }
225 
226 static void
227 _ftl_band_set_preparing(struct ftl_band *band)
228 {
229 	struct spdk_ftl_dev *dev = band->dev;
230 	struct ftl_md *md = &band->md;
231 
232 	/* Verify band's previous state */
233 	assert(band->state == FTL_BAND_STATE_FREE);
234 	/* Remove band from free list */
235 	LIST_REMOVE(band, list_entry);
236 
237 	md->wr_cnt++;
238 
239 	assert(dev->num_free > 0);
240 	dev->num_free--;
241 
242 	ftl_apply_limits(dev);
243 }
244 
245 static void
246 _ftl_band_set_closed(struct ftl_band *band)
247 {
248 	struct spdk_ftl_dev *dev = band->dev;
249 	struct ftl_chunk *chunk;
250 
251 	/* TODO: add this kind of check in band_set_state() */
252 	if (band->state == FTL_BAND_STATE_CLOSED) {
253 		return;
254 	}
255 
256 	/* Set the state as free_md() checks for that */
257 	band->state = FTL_BAND_STATE_CLOSED;
258 
259 	/* Free the md if there are no outstanding IOs */
260 	ftl_band_release_md(band);
261 
262 	if (spdk_likely(band->num_chunks)) {
263 		LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
264 		CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) {
265 			chunk->state = FTL_CHUNK_STATE_CLOSED;
266 		}
267 	} else {
268 		LIST_REMOVE(band, list_entry);
269 	}
270 }
271 
272 static uint32_t
273 ftl_md_calc_crc(const struct ftl_md_hdr *hdr, size_t size)
274 {
275 	size_t checkoff = offsetof(struct ftl_md_hdr, checksum);
276 	size_t mdoff = checkoff + sizeof(hdr->checksum);
277 	uint32_t crc;
278 
279 	crc = spdk_crc32c_update(hdr, checkoff, 0);
280 	return spdk_crc32c_update((const char *)hdr + mdoff, size - mdoff, crc);
281 }
282 
283 static void
284 ftl_set_md_hdr(struct spdk_ftl_dev *dev, struct ftl_md_hdr *hdr,
285 	       struct ftl_md *md, size_t size)
286 {
287 	hdr->seq = md->seq;
288 	hdr->ver = FTL_MD_VER;
289 	hdr->uuid = dev->uuid;
290 	hdr->checksum = ftl_md_calc_crc(hdr, size);
291 }
292 
293 static int
294 ftl_pack_head_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
295 {
296 	struct ftl_head_md *head = data;
297 
298 	head->wr_cnt = md->wr_cnt;
299 	head->lba_cnt = dev->num_lbas;
300 	head->xfer_size = dev->xfer_size;
301 	ftl_set_md_hdr(dev, &head->hdr, md, sizeof(struct ftl_head_md));
302 
303 	return FTL_MD_SUCCESS;
304 }
305 
306 static int
307 ftl_pack_tail_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
308 {
309 	struct ftl_tail_md *tail = data;
310 	size_t map_size;
311 	void *vld_offset, *map_offset;
312 
313 	map_size = ftl_num_band_lbks(dev) * sizeof(uint64_t);
314 	vld_offset = (char *)data + ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE;
315 	map_offset = (char *)vld_offset + ftl_vld_map_num_lbks(dev) * FTL_BLOCK_SIZE;
316 
317 	/* Clear out the buffer */
318 	memset(data, 0, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
319 	tail->num_lbks = ftl_num_band_lbks(dev);
320 
321 	pthread_spin_lock(&md->lock);
322 	spdk_bit_array_store_mask(md->vld_map, vld_offset);
323 	pthread_spin_unlock(&md->lock);
324 
325 	memcpy(map_offset, md->lba_map, map_size);
326 	ftl_set_md_hdr(dev, &tail->hdr, md, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
327 
328 	return FTL_MD_SUCCESS;
329 }
330 
331 static int
332 ftl_md_hdr_vld(struct spdk_ftl_dev *dev, const struct ftl_md_hdr *hdr, size_t size)
333 {
334 	if (spdk_uuid_compare(&dev->uuid, &hdr->uuid) != 0) {
335 		return FTL_MD_NO_MD;
336 	}
337 
338 	if (hdr->ver != FTL_MD_VER) {
339 		return FTL_MD_INVALID_VER;
340 	}
341 
342 	if (ftl_md_calc_crc(hdr, size) != hdr->checksum) {
343 		return FTL_MD_INVALID_CRC;
344 	}
345 
346 	return FTL_MD_SUCCESS;
347 }
348 
349 static int
350 ftl_unpack_tail_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
351 {
352 	struct ftl_tail_md *tail = data;
353 	size_t map_size;
354 	void *vld_offset, *map_offset;
355 	int rc;
356 
357 	map_size = ftl_num_band_lbks(dev) * sizeof(uint64_t);
358 	vld_offset = (char *)data + ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE;
359 	map_offset = (char *)vld_offset + ftl_vld_map_num_lbks(dev) * FTL_BLOCK_SIZE;
360 
361 	rc = ftl_md_hdr_vld(dev, &tail->hdr, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
362 	if (rc) {
363 		return rc;
364 	}
365 
366 	if (tail->num_lbks != ftl_num_band_lbks(dev)) {
367 		return FTL_MD_INVALID_SIZE;
368 	}
369 
370 	if (md->vld_map) {
371 		spdk_bit_array_load_mask(md->vld_map, vld_offset);
372 	}
373 
374 	if (md->lba_map) {
375 		memcpy(md->lba_map, map_offset, map_size);
376 	}
377 
378 	md->seq = tail->hdr.seq;
379 	return FTL_MD_SUCCESS;
380 }
381 
382 static int
383 ftl_unpack_lba_map(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
384 {
385 	memcpy(md->lba_map, data, ftl_num_band_lbks(dev) * sizeof(uint64_t));
386 	return FTL_MD_SUCCESS;
387 }
388 
389 static int
390 ftl_unpack_head_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
391 {
392 	struct ftl_head_md *head = data;
393 	int rc;
394 
395 	rc = ftl_md_hdr_vld(dev, &head->hdr, sizeof(struct ftl_head_md));
396 	if (rc) {
397 		return rc;
398 	}
399 
400 	md->seq = head->hdr.seq;
401 	md->wr_cnt = head->wr_cnt;
402 
403 	if (dev->global_md.num_lbas == 0) {
404 		dev->global_md.num_lbas = head->lba_cnt;
405 	}
406 
407 	if (dev->global_md.num_lbas != head->lba_cnt) {
408 		return FTL_MD_INVALID_SIZE;
409 	}
410 
411 	if (dev->xfer_size != head->xfer_size) {
412 		return FTL_MD_INVALID_SIZE;
413 	}
414 
415 	return FTL_MD_SUCCESS;
416 }
417 
418 struct ftl_ppa
419 ftl_band_tail_md_ppa(struct ftl_band *band)
420 {
421 	struct ftl_ppa ppa;
422 	struct ftl_chunk *chunk;
423 	struct spdk_ftl_dev *dev = band->dev;
424 	size_t xfer_size = dev->xfer_size;
425 	size_t num_req = ftl_band_tail_md_offset(band) / xfer_size;
426 	size_t i;
427 
428 	if (spdk_unlikely(!band->num_chunks)) {
429 		return ftl_to_ppa(FTL_PPA_INVALID);
430 	}
431 
432 	/* Metadata should be aligned to xfer size */
433 	assert(ftl_band_tail_md_offset(band) % xfer_size == 0);
434 
435 	chunk = CIRCLEQ_FIRST(&band->chunks);
436 	for (i = 0; i < num_req % band->num_chunks; ++i) {
437 		chunk = ftl_band_next_chunk(band, chunk);
438 	}
439 
440 	ppa.lbk = (num_req / band->num_chunks) * xfer_size;
441 	ppa.chk = band->id;
442 	ppa.pu = chunk->punit->start_ppa.pu;
443 	ppa.grp = chunk->punit->start_ppa.grp;
444 
445 	return ppa;
446 }
447 
448 struct ftl_ppa
449 ftl_band_head_md_ppa(struct ftl_band *band)
450 {
451 	struct ftl_ppa ppa;
452 
453 	if (spdk_unlikely(!band->num_chunks)) {
454 		return ftl_to_ppa(FTL_PPA_INVALID);
455 	}
456 
457 	ppa = CIRCLEQ_FIRST(&band->chunks)->punit->start_ppa;
458 	ppa.chk = band->id;
459 
460 	return ppa;
461 }
462 
463 void
464 ftl_band_set_state(struct ftl_band *band, enum ftl_band_state state)
465 {
466 	switch (state) {
467 	case FTL_BAND_STATE_FREE:
468 		_ftl_band_set_free(band);
469 		break;
470 
471 	case FTL_BAND_STATE_PREP:
472 		_ftl_band_set_preparing(band);
473 		break;
474 
475 	case FTL_BAND_STATE_CLOSED:
476 		_ftl_band_set_closed(band);
477 		break;
478 
479 	default:
480 		break;
481 	}
482 
483 	band->state = state;
484 }
485 
486 void
487 ftl_band_set_addr(struct ftl_band *band, uint64_t lba, struct ftl_ppa ppa)
488 {
489 	struct ftl_md *md = &band->md;
490 	uint64_t offset;
491 
492 	assert(lba != FTL_LBA_INVALID);
493 
494 	offset = ftl_band_lbkoff_from_ppa(band, ppa);
495 	pthread_spin_lock(&band->md.lock);
496 
497 	md->num_vld++;
498 	md->lba_map[offset] = lba;
499 	spdk_bit_array_set(md->vld_map, offset);
500 
501 	pthread_spin_unlock(&band->md.lock);
502 }
503 
504 size_t
505 ftl_band_age(const struct ftl_band *band)
506 {
507 	return (size_t)(band->dev->seq - band->md.seq);
508 }
509 
510 size_t
511 ftl_band_num_usable_lbks(const struct ftl_band *band)
512 {
513 	return band->num_chunks * ftl_dev_lbks_in_chunk(band->dev);
514 }
515 
516 size_t
517 ftl_band_user_lbks(const struct ftl_band *band)
518 {
519 	return ftl_band_num_usable_lbks(band) -
520 	       ftl_head_md_num_lbks(band->dev) -
521 	       ftl_tail_md_num_lbks(band->dev);
522 }
523 
524 struct ftl_band *
525 ftl_band_from_ppa(struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
526 {
527 	assert(ppa.chk < ftl_dev_num_bands(dev));
528 	return &dev->bands[ppa.chk];
529 }
530 
531 struct ftl_chunk *
532 ftl_band_chunk_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
533 {
534 	struct spdk_ftl_dev *dev = band->dev;
535 	unsigned int punit;
536 
537 	punit = ftl_ppa_flatten_punit(dev, ppa);
538 	assert(punit < ftl_dev_num_punits(dev));
539 
540 	return &band->chunk_buf[punit];
541 }
542 
543 uint64_t
544 ftl_band_lbkoff_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
545 {
546 	struct spdk_ftl_dev *dev = band->dev;
547 	unsigned int punit;
548 
549 	punit = ftl_ppa_flatten_punit(dev, ppa);
550 	assert(ppa.chk == band->id);
551 
552 	return punit * ftl_dev_lbks_in_chunk(dev) + ppa.lbk;
553 }
554 
555 struct ftl_ppa
556 ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t num_lbks)
557 {
558 	struct spdk_ftl_dev *dev = band->dev;
559 	struct ftl_chunk *chunk;
560 	unsigned int punit_num;
561 	size_t num_xfers, num_stripes;
562 
563 	assert(ppa.chk == band->id);
564 
565 	punit_num = ftl_ppa_flatten_punit(dev, ppa);
566 	chunk = &band->chunk_buf[punit_num];
567 
568 	num_lbks += (ppa.lbk % dev->xfer_size);
569 	ppa.lbk  -= (ppa.lbk % dev->xfer_size);
570 
571 #if defined(DEBUG)
572 	/* Check that the number of chunks has not been changed */
573 	struct ftl_chunk *_chunk;
574 	size_t _num_chunks = 0;
575 	CIRCLEQ_FOREACH(_chunk, &band->chunks, circleq) {
576 		if (spdk_likely(_chunk->state != FTL_CHUNK_STATE_BAD)) {
577 			_num_chunks++;
578 		}
579 	}
580 	assert(band->num_chunks == _num_chunks);
581 #endif
582 	num_stripes = (num_lbks / dev->xfer_size) / band->num_chunks;
583 	ppa.lbk  += num_stripes * dev->xfer_size;
584 	num_lbks -= num_stripes * dev->xfer_size * band->num_chunks;
585 
586 	if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
587 		return ftl_to_ppa(FTL_PPA_INVALID);
588 	}
589 
590 	num_xfers = num_lbks / dev->xfer_size;
591 	for (size_t i = 0; i < num_xfers; ++i) {
592 		/* When the last chunk is reached the lbk part of the address */
593 		/* needs to be increased by xfer_size */
594 		if (ftl_band_chunk_is_last(band, chunk)) {
595 			ppa.lbk += dev->xfer_size;
596 			if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
597 				return ftl_to_ppa(FTL_PPA_INVALID);
598 			}
599 		}
600 
601 		chunk = ftl_band_next_operational_chunk(band, chunk);
602 		ppa.grp = chunk->start_ppa.grp;
603 		ppa.pu = chunk->start_ppa.pu;
604 
605 		num_lbks -= dev->xfer_size;
606 	}
607 
608 	if (num_lbks) {
609 		ppa.lbk += num_lbks;
610 		if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
611 			return ftl_to_ppa(FTL_PPA_INVALID);
612 		}
613 	}
614 
615 	return ppa;
616 }
617 
618 struct ftl_ppa
619 ftl_band_ppa_from_lbkoff(struct ftl_band *band, uint64_t lbkoff)
620 {
621 	struct ftl_ppa ppa = { .ppa = 0 };
622 	struct spdk_ftl_dev *dev = band->dev;
623 	uint64_t punit;
624 
625 	punit = lbkoff / ftl_dev_lbks_in_chunk(dev) + dev->range.begin;
626 
627 	ppa.lbk = lbkoff % ftl_dev_lbks_in_chunk(dev);
628 	ppa.chk = band->id;
629 	ppa.pu = punit / dev->geo.num_grp;
630 	ppa.grp = punit % dev->geo.num_grp;
631 
632 	return ppa;
633 }
634 
635 struct ftl_ppa
636 ftl_band_next_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t offset)
637 {
638 	uint64_t lbkoff = ftl_band_lbkoff_from_ppa(band, ppa);
639 	return ftl_band_ppa_from_lbkoff(band, lbkoff + offset);
640 }
641 
642 void
643 ftl_band_acquire_md(struct ftl_band *band)
644 {
645 	assert(band->md.lba_map != NULL);
646 	band->md.ref_cnt++;
647 }
648 
649 int
650 ftl_band_alloc_md(struct ftl_band *band)
651 {
652 	struct spdk_ftl_dev *dev = band->dev;
653 	struct ftl_md *md = &band->md;
654 
655 	assert(md->ref_cnt == 0);
656 	assert(md->lba_map == NULL);
657 
658 	md->lba_map = spdk_mempool_get(dev->lba_pool);
659 	if (!md->lba_map) {
660 		return -1;
661 	}
662 
663 	ftl_band_acquire_md(band);
664 	return 0;
665 }
666 
667 void
668 ftl_band_release_md(struct ftl_band *band)
669 {
670 	struct ftl_md *md = &band->md;
671 
672 	assert(band->md.lba_map != NULL);
673 	assert(md->ref_cnt > 0);
674 	md->ref_cnt--;
675 
676 	if (md->ref_cnt == 0) {
677 		ftl_band_free_md(band);
678 	}
679 }
680 
681 static void
682 ftl_read_md_cb(void *arg, int status)
683 {
684 	struct ftl_md_io *md_io = arg;
685 
686 	if (!status) {
687 		status = md_io->pack_fn(md_io->io.dev,
688 					md_io->md,
689 					md_io->buf);
690 	} else {
691 		status = FTL_MD_IO_FAILURE;
692 	}
693 
694 	md_io->cb.fn(md_io->cb.ctx, status);
695 }
696 
697 static struct ftl_md_io *
698 ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data, struct ftl_ppa ppa,
699 		    struct ftl_band *band, size_t lbk_cnt, size_t req_size, ftl_md_pack_fn fn,
700 		    const struct ftl_cb *cb)
701 {
702 	struct ftl_md_io *io;
703 	struct ftl_io_init_opts opts = {
704 		.dev		= dev,
705 		.io		= NULL,
706 		.rwb_batch	= NULL,
707 		.band		= band,
708 		.size		= sizeof(*io),
709 		.flags		= FTL_IO_MD | FTL_IO_PPA_MODE,
710 		.type		= FTL_IO_READ,
711 		.iov_cnt	= spdk_divide_round_up(lbk_cnt, req_size),
712 		.req_size	= req_size,
713 		.fn		= ftl_read_md_cb,
714 		.data		= data,
715 	};
716 
717 	io = (struct ftl_md_io *)ftl_io_init_internal(&opts);
718 	if (!io) {
719 		return NULL;
720 	}
721 
722 	io->io.ppa = ppa;
723 	io->md = md;
724 	io->buf = data;
725 	io->pack_fn = fn;
726 	io->cb = *cb;
727 
728 	return io;
729 }
730 
731 static struct ftl_io *
732 ftl_io_init_md_write(struct spdk_ftl_dev *dev, struct ftl_band *band,
733 		     void *data, size_t req_cnt, spdk_ftl_fn cb)
734 {
735 	struct ftl_io_init_opts opts = {
736 		.dev		= dev,
737 		.io		= NULL,
738 		.rwb_batch	= NULL,
739 		.band		= band,
740 		.size		= sizeof(struct ftl_io),
741 		.flags		= FTL_IO_MD | FTL_IO_PPA_MODE,
742 		.type		= FTL_IO_WRITE,
743 		.iov_cnt	= req_cnt,
744 		.req_size	= dev->xfer_size,
745 		.fn		= cb,
746 		.data		= data,
747 		.md		= NULL,
748 	};
749 
750 	return ftl_io_init_internal(&opts);
751 }
752 
753 static int
754 ftl_band_write_md(struct ftl_band *band, void *data, size_t lbk_cnt,
755 		  ftl_md_pack_fn md_fn, spdk_ftl_fn cb)
756 {
757 	struct spdk_ftl_dev *dev = band->dev;
758 	struct ftl_io *io;
759 
760 	io = ftl_io_init_md_write(dev, band, data,
761 				  spdk_divide_round_up(lbk_cnt, dev->xfer_size), cb);
762 	if (!io) {
763 		return -ENOMEM;
764 	}
765 
766 	md_fn(dev, &band->md, data);
767 
768 	return ftl_io_write(io);
769 }
770 
771 void
772 ftl_band_md_clear(struct ftl_md *md)
773 {
774 	md->seq = 0;
775 	md->num_vld = 0;
776 	md->wr_cnt = 0;
777 	md->lba_map = NULL;
778 }
779 
780 int
781 ftl_band_write_head_md(struct ftl_band *band, void *data, spdk_ftl_fn cb)
782 {
783 	return ftl_band_write_md(band, data, ftl_head_md_num_lbks(band->dev),
784 				 ftl_pack_head_md, cb);
785 }
786 
787 int
788 ftl_band_write_tail_md(struct ftl_band *band, void *data, spdk_ftl_fn cb)
789 {
790 	return ftl_band_write_md(band, data, ftl_tail_md_num_lbks(band->dev),
791 				 ftl_pack_tail_md, cb);
792 }
793 
794 static struct ftl_ppa
795 ftl_band_lba_map_ppa(struct ftl_band *band)
796 {
797 	return ftl_band_next_xfer_ppa(band, band->tail_md_ppa,
798 				      ftl_tail_md_hdr_num_lbks() +
799 				      ftl_vld_map_num_lbks(band->dev));
800 }
801 
802 static int
803 ftl_band_read_md(struct ftl_band *band, struct ftl_md *md, void *data, size_t lbk_cnt,
804 		 size_t req_size, struct ftl_ppa start_ppa, ftl_md_pack_fn unpack_fn,
805 		 const struct ftl_cb *cb)
806 {
807 	struct spdk_ftl_dev *dev = band->dev;
808 	struct ftl_md_io *io;
809 
810 	if (spdk_unlikely(!band->num_chunks)) {
811 		return -ENOENT;
812 	}
813 
814 	io = ftl_io_init_md_read(dev, md, data, start_ppa, band, lbk_cnt,
815 				 req_size, unpack_fn, cb);
816 	if (!io) {
817 		return -ENOMEM;
818 	}
819 
820 	ftl_io_read((struct ftl_io *)io);
821 	return 0;
822 }
823 
824 int
825 ftl_band_read_tail_md(struct ftl_band *band, struct ftl_md *md,
826 		      void *data, struct ftl_ppa ppa, const struct ftl_cb *cb)
827 {
828 	return ftl_band_read_md(band, md, data,
829 				ftl_tail_md_num_lbks(band->dev),
830 				band->dev->xfer_size,
831 				ppa,
832 				ftl_unpack_tail_md,
833 				cb);
834 }
835 
836 int
837 ftl_band_read_lba_map(struct ftl_band *band, struct ftl_md *md,
838 		      void *data, const struct ftl_cb *cb)
839 {
840 	/* TODO: change this interface to allow reading parts of the LBA map instead of */
841 	/* reading whole metadata */
842 	return ftl_band_read_md(band, md, data,
843 				ftl_lba_map_num_lbks(band->dev),
844 				band->dev->xfer_size,
845 				ftl_band_lba_map_ppa(band),
846 				ftl_unpack_lba_map,
847 				cb);
848 }
849 
850 int
851 ftl_band_read_head_md(struct ftl_band *band, struct ftl_md *md,
852 		      void *data, const struct ftl_cb *cb)
853 {
854 	return ftl_band_read_md(band, md, data,
855 				ftl_head_md_num_lbks(band->dev),
856 				band->dev->xfer_size,
857 				ftl_band_head_md_ppa(band),
858 				ftl_unpack_head_md,
859 				cb);
860 }
861 
862 static void
863 ftl_band_remove_chunk(struct ftl_band *band, struct ftl_chunk *chunk)
864 {
865 	CIRCLEQ_REMOVE(&band->chunks, chunk, circleq);
866 	band->num_chunks--;
867 }
868 
869 static void
870 ftl_erase_fail(struct ftl_io *io, int status)
871 {
872 	struct ftl_chunk *chunk;
873 	char buf[128];
874 
875 	SPDK_ERRLOG("Erase failed @ppa: %s, status: %d\n",
876 		    ftl_ppa2str(io->ppa, buf, sizeof(buf)), status);
877 
878 	chunk = ftl_band_chunk_from_ppa(io->band, io->ppa);
879 	chunk->state = FTL_CHUNK_STATE_BAD;
880 	ftl_band_remove_chunk(io->band, chunk);
881 }
882 
883 static void
884 ftl_band_erase_cb(void *ctx, int status)
885 {
886 	struct ftl_io *io = ctx;
887 	struct ftl_chunk *chunk;
888 
889 	if (spdk_unlikely(status)) {
890 		ftl_erase_fail(io, status);
891 		return;
892 	}
893 	chunk = ftl_band_chunk_from_ppa(io->band, io->ppa);
894 	chunk->state = FTL_CHUNK_STATE_FREE;
895 }
896 
897 int
898 ftl_band_erase(struct ftl_band *band)
899 {
900 	struct ftl_chunk *chunk;
901 	struct ftl_io *io;
902 	int rc = 0;
903 
904 	assert(band->state == FTL_BAND_STATE_CLOSED ||
905 	       band->state == FTL_BAND_STATE_FREE);
906 
907 	ftl_band_set_state(band, FTL_BAND_STATE_PREP);
908 
909 	CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) {
910 		if (chunk->state == FTL_CHUNK_STATE_FREE) {
911 			continue;
912 		}
913 
914 		io = ftl_io_erase_init(band, 1, ftl_band_erase_cb);
915 		if (!io) {
916 			rc = -ENOMEM;
917 			break;
918 		}
919 
920 		io->ppa = chunk->start_ppa;
921 		rc = ftl_io_erase(io);
922 		if (rc) {
923 			assert(0);
924 			/* TODO: change band's state back to close? */
925 			break;
926 		}
927 	}
928 
929 	return rc;
930 }
931 
932 int
933 ftl_band_write_prep(struct ftl_band *band)
934 {
935 	struct spdk_ftl_dev *dev = band->dev;
936 
937 	if (ftl_band_alloc_md(band)) {
938 		return -1;
939 	}
940 
941 	band->md.seq = ++dev->seq;
942 	return 0;
943 }
944 
945 struct ftl_chunk *
946 ftl_band_next_operational_chunk(struct ftl_band *band, struct ftl_chunk *chunk)
947 {
948 	struct ftl_chunk *result = NULL;
949 	struct ftl_chunk *entry;
950 
951 	if (spdk_unlikely(!band->num_chunks)) {
952 		return NULL;
953 	}
954 
955 	/* Erasing band may fail after it was assigned to wptr. */
956 	/* In such a case chunk is no longer in band->chunks queue. */
957 	if (spdk_likely(chunk->state != FTL_CHUNK_STATE_BAD)) {
958 		result = ftl_band_next_chunk(band, chunk);
959 	} else {
960 		CIRCLEQ_FOREACH_REVERSE(entry, &band->chunks, circleq) {
961 			if (entry->pos > chunk->pos) {
962 				result = entry;
963 			} else {
964 				if (!result) {
965 					result = CIRCLEQ_FIRST(&band->chunks);
966 				}
967 				break;
968 			}
969 		}
970 	}
971 
972 	return result;
973 }
974