xref: /spdk/lib/ftl/ftl_band.c (revision 552e21cce6cccbf833ed9109827e08337377d7ce)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/crc32.h"
35 #include "spdk/likely.h"
36 #include "spdk/util.h"
37 #include "spdk/ftl.h"
38 
39 #include "ftl_band.h"
40 #include "ftl_io.h"
41 #include "ftl_core.h"
42 #include "ftl_reloc.h"
43 #include "ftl_debug.h"
44 
45 /* TODO: define some signature for meta version */
46 #define FTL_MD_VER 1
47 
48 struct __attribute__((packed)) ftl_md_hdr {
49 	/* Device instance */
50 	struct spdk_uuid	uuid;
51 
52 	/* Meta version */
53 	uint8_t			ver;
54 
55 	/* Sequence number */
56 	uint64_t		seq;
57 
58 	/* CRC32 checksum */
59 	uint32_t		checksum;
60 };
61 
62 /* End metadata layout stored on media (with all three being aligned to block size): */
63 /* - header */
64 /* - valid bitmap */
65 /* - LBA map */
66 struct __attribute__((packed)) ftl_tail_md {
67 	struct ftl_md_hdr	hdr;
68 
69 	/* Max number of lbks */
70 	uint64_t		num_lbks;
71 
72 	uint8_t			reserved[4059];
73 };
74 SPDK_STATIC_ASSERT(sizeof(struct ftl_tail_md) == FTL_BLOCK_SIZE, "Incorrect metadata size");
75 
76 struct __attribute__((packed)) ftl_head_md {
77 	struct ftl_md_hdr	hdr;
78 
79 	/* Number of defrag cycles */
80 	uint64_t		wr_cnt;
81 
82 	/* Number of surfaced LBAs */
83 	uint64_t		lba_cnt;
84 
85 	/* Transfer size */
86 	uint32_t		xfer_size;
87 };
88 
89 size_t
90 ftl_tail_md_hdr_num_lbks(void)
91 {
92 	return spdk_divide_round_up(sizeof(struct ftl_tail_md), FTL_BLOCK_SIZE);
93 }
94 
95 size_t
96 ftl_vld_map_num_lbks(const struct spdk_ftl_dev *dev)
97 {
98 	return spdk_divide_round_up(ftl_vld_map_size(dev), FTL_BLOCK_SIZE);
99 }
100 
101 size_t
102 ftl_lba_map_num_lbks(const struct spdk_ftl_dev *dev)
103 {
104 	return spdk_divide_round_up(ftl_num_band_lbks(dev) * sizeof(uint64_t), FTL_BLOCK_SIZE);
105 }
106 
107 size_t
108 ftl_head_md_num_lbks(const struct spdk_ftl_dev *dev)
109 {
110 	return dev->xfer_size;
111 }
112 
113 size_t
114 ftl_tail_md_num_lbks(const struct spdk_ftl_dev *dev)
115 {
116 	return spdk_divide_round_up(ftl_tail_md_hdr_num_lbks() +
117 				    ftl_vld_map_num_lbks(dev) +
118 				    ftl_lba_map_num_lbks(dev),
119 				    dev->xfer_size) * dev->xfer_size;
120 }
121 
122 static uint64_t
123 ftl_band_tail_md_offset(struct ftl_band *band)
124 {
125 	return ftl_band_num_usable_lbks(band) -
126 	       ftl_tail_md_num_lbks(band->dev);
127 }
128 
129 int
130 ftl_band_full(struct ftl_band *band, size_t offset)
131 {
132 	return offset == ftl_band_tail_md_offset(band);
133 }
134 
135 void
136 ftl_band_write_failed(struct ftl_band *band)
137 {
138 	struct spdk_ftl_dev *dev = band->dev;
139 
140 	band->high_prio = 1;
141 	band->tail_md_ppa = ftl_to_ppa(FTL_PPA_INVALID);
142 
143 	if (!dev->df_band) {
144 		dev->df_band = band;
145 	}
146 
147 	ftl_reloc_add(dev->reloc, band, 0, ftl_num_band_lbks(dev), 1);
148 	ftl_band_set_state(band, FTL_BAND_STATE_CLOSED);
149 }
150 
151 void
152 ftl_band_clear_md(struct ftl_band *band)
153 {
154 	spdk_bit_array_clear_mask(band->md.vld_map);
155 	memset(band->md.lba_map, 0, ftl_num_band_lbks(band->dev) * sizeof(uint64_t));
156 	band->md.num_vld = 0;
157 }
158 
159 static void
160 ftl_band_free_md(struct ftl_band *band)
161 {
162 	struct spdk_ftl_dev *dev = band->dev;
163 	struct ftl_md *md = &band->md;
164 
165 	assert(band->state == FTL_BAND_STATE_CLOSED ||
166 	       band->state == FTL_BAND_STATE_FREE);
167 	assert(md->ref_cnt == 0);
168 	assert(md->lba_map != NULL);
169 	assert(!band->high_prio);
170 
171 	/* Verify that band's metadata is consistent with l2p */
172 	if (band->num_chunks) {
173 		assert(ftl_band_validate_md(band, band->md.lba_map) == true);
174 	}
175 
176 	spdk_mempool_put(dev->lba_pool, md->lba_map);
177 	md->lba_map = NULL;
178 }
179 
180 static void
181 _ftl_band_set_free(struct ftl_band *band)
182 {
183 	struct spdk_ftl_dev *dev = band->dev;
184 	struct ftl_band *lband, *prev;
185 
186 	/* Verify band's previous state */
187 	assert(band->state == FTL_BAND_STATE_CLOSED);
188 
189 	if (band == dev->df_band) {
190 		dev->df_band = NULL;
191 	}
192 
193 	/* Remove the band from the closed band list */
194 	LIST_REMOVE(band, list_entry);
195 
196 	/* Keep the list sorted by band's write count */
197 	LIST_FOREACH(lband, &dev->free_bands, list_entry) {
198 		if (lband->md.wr_cnt > band->md.wr_cnt) {
199 			LIST_INSERT_BEFORE(lband, band, list_entry);
200 			break;
201 		}
202 		prev = lband;
203 	}
204 
205 	if (!lband) {
206 		if (LIST_EMPTY(&dev->free_bands)) {
207 			LIST_INSERT_HEAD(&dev->free_bands, band, list_entry);
208 		} else {
209 			LIST_INSERT_AFTER(prev, band, list_entry);
210 		}
211 	}
212 
213 #if defined(DEBUG)
214 	prev = NULL;
215 	LIST_FOREACH(lband, &dev->free_bands, list_entry) {
216 		if (!prev) {
217 			continue;
218 		}
219 		assert(prev->md.wr_cnt <= lband->md.wr_cnt);
220 	}
221 #endif
222 	dev->num_free++;
223 	ftl_apply_limits(dev);
224 }
225 
226 static void
227 _ftl_band_set_opening(struct ftl_band *band)
228 {
229 	struct spdk_ftl_dev *dev = band->dev;
230 	struct ftl_md *md = &band->md;
231 
232 	/* Verify band's previous state */
233 	assert(band->state == FTL_BAND_STATE_PREP);
234 	LIST_REMOVE(band, list_entry);
235 
236 	md->wr_cnt++;
237 
238 	assert(dev->num_free > 0);
239 	dev->num_free--;
240 
241 	ftl_apply_limits(dev);
242 }
243 
244 static void
245 _ftl_band_set_closed(struct ftl_band *band)
246 {
247 	struct spdk_ftl_dev *dev = band->dev;
248 	struct ftl_chunk *chunk;
249 
250 	/* TODO: add this kind of check in band_set_state() */
251 	if (band->state == FTL_BAND_STATE_CLOSED) {
252 		return;
253 	}
254 
255 	/* Set the state as free_md() checks for that */
256 	band->state = FTL_BAND_STATE_CLOSED;
257 
258 	/* Free the md if there are no outstanding IOs */
259 	ftl_band_release_md(band);
260 
261 	if (spdk_likely(band->num_chunks)) {
262 		LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
263 		CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) {
264 			chunk->state = FTL_CHUNK_STATE_CLOSED;
265 		}
266 	} else {
267 		LIST_REMOVE(band, list_entry);
268 	}
269 }
270 
271 static uint32_t
272 ftl_md_calc_crc(const struct ftl_md_hdr *hdr, size_t size)
273 {
274 	size_t checkoff = offsetof(struct ftl_md_hdr, checksum);
275 	size_t mdoff = checkoff + sizeof(hdr->checksum);
276 	uint32_t crc;
277 
278 	crc = spdk_crc32c_update(hdr, checkoff, 0);
279 	return spdk_crc32c_update((const char *)hdr + mdoff, size - mdoff, crc);
280 }
281 
282 static void
283 ftl_set_md_hdr(struct spdk_ftl_dev *dev, struct ftl_md_hdr *hdr,
284 	       struct ftl_md *md, size_t size)
285 {
286 	hdr->seq = md->seq;
287 	hdr->ver = FTL_MD_VER;
288 	hdr->uuid = dev->uuid;
289 	hdr->checksum = ftl_md_calc_crc(hdr, size);
290 }
291 
292 static int
293 ftl_pack_head_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
294 {
295 	struct ftl_head_md *head = data;
296 
297 	head->wr_cnt = md->wr_cnt;
298 	head->lba_cnt = dev->num_lbas;
299 	head->xfer_size = dev->xfer_size;
300 	ftl_set_md_hdr(dev, &head->hdr, md, sizeof(struct ftl_head_md));
301 
302 	return FTL_MD_SUCCESS;
303 }
304 
305 static int
306 ftl_pack_tail_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
307 {
308 	struct ftl_tail_md *tail = data;
309 	size_t map_size;
310 	void *vld_offset, *map_offset;
311 
312 	map_size = ftl_num_band_lbks(dev) * sizeof(uint64_t);
313 	vld_offset = (char *)data + ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE;
314 	map_offset = (char *)vld_offset + ftl_vld_map_num_lbks(dev) * FTL_BLOCK_SIZE;
315 
316 	/* Clear out the buffer */
317 	memset(data, 0, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
318 	tail->num_lbks = ftl_num_band_lbks(dev);
319 
320 	pthread_spin_lock(&md->lock);
321 	spdk_bit_array_store_mask(md->vld_map, vld_offset);
322 	pthread_spin_unlock(&md->lock);
323 
324 	memcpy(map_offset, md->lba_map, map_size);
325 	ftl_set_md_hdr(dev, &tail->hdr, md, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
326 
327 	return FTL_MD_SUCCESS;
328 }
329 
330 static int
331 ftl_md_hdr_vld(struct spdk_ftl_dev *dev, const struct ftl_md_hdr *hdr, size_t size)
332 {
333 	if (spdk_uuid_compare(&dev->uuid, &hdr->uuid) != 0) {
334 		return FTL_MD_NO_MD;
335 	}
336 
337 	if (hdr->ver != FTL_MD_VER) {
338 		return FTL_MD_INVALID_VER;
339 	}
340 
341 	if (ftl_md_calc_crc(hdr, size) != hdr->checksum) {
342 		return FTL_MD_INVALID_CRC;
343 	}
344 
345 	return FTL_MD_SUCCESS;
346 }
347 
348 static int
349 ftl_unpack_tail_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
350 {
351 	struct ftl_tail_md *tail = data;
352 	size_t map_size;
353 	void *vld_offset, *map_offset;
354 	int rc;
355 
356 	map_size = ftl_num_band_lbks(dev) * sizeof(uint64_t);
357 	vld_offset = (char *)data + ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE;
358 	map_offset = (char *)vld_offset + ftl_vld_map_num_lbks(dev) * FTL_BLOCK_SIZE;
359 
360 	rc = ftl_md_hdr_vld(dev, &tail->hdr, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
361 	if (rc) {
362 		return rc;
363 	}
364 
365 	if (tail->num_lbks != ftl_num_band_lbks(dev)) {
366 		return FTL_MD_INVALID_SIZE;
367 	}
368 
369 	if (md->vld_map) {
370 		spdk_bit_array_load_mask(md->vld_map, vld_offset);
371 	}
372 
373 	if (md->lba_map) {
374 		memcpy(md->lba_map, map_offset, map_size);
375 	}
376 
377 	md->seq = tail->hdr.seq;
378 	return FTL_MD_SUCCESS;
379 }
380 
381 static int
382 ftl_unpack_lba_map(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
383 {
384 	memcpy(md->lba_map, data, ftl_num_band_lbks(dev) * sizeof(uint64_t));
385 	return FTL_MD_SUCCESS;
386 }
387 
388 static int
389 ftl_unpack_head_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
390 {
391 	struct ftl_head_md *head = data;
392 	int rc;
393 
394 	rc = ftl_md_hdr_vld(dev, &head->hdr, sizeof(struct ftl_head_md));
395 	if (rc) {
396 		return rc;
397 	}
398 
399 	md->seq = head->hdr.seq;
400 	md->wr_cnt = head->wr_cnt;
401 
402 	if (dev->global_md.num_lbas == 0) {
403 		dev->global_md.num_lbas = head->lba_cnt;
404 	}
405 
406 	if (dev->global_md.num_lbas != head->lba_cnt) {
407 		return FTL_MD_INVALID_SIZE;
408 	}
409 
410 	if (dev->xfer_size != head->xfer_size) {
411 		return FTL_MD_INVALID_SIZE;
412 	}
413 
414 	return FTL_MD_SUCCESS;
415 }
416 
417 struct ftl_ppa
418 ftl_band_tail_md_ppa(struct ftl_band *band)
419 {
420 	struct ftl_ppa ppa;
421 	struct ftl_chunk *chunk;
422 	struct spdk_ftl_dev *dev = band->dev;
423 	size_t xfer_size = dev->xfer_size;
424 	size_t num_req = ftl_band_tail_md_offset(band) / xfer_size;
425 	size_t i;
426 
427 	if (spdk_unlikely(!band->num_chunks)) {
428 		return ftl_to_ppa(FTL_PPA_INVALID);
429 	}
430 
431 	/* Metadata should be aligned to xfer size */
432 	assert(ftl_band_tail_md_offset(band) % xfer_size == 0);
433 
434 	chunk = CIRCLEQ_FIRST(&band->chunks);
435 	for (i = 0; i < num_req % band->num_chunks; ++i) {
436 		chunk = ftl_band_next_chunk(band, chunk);
437 	}
438 
439 	ppa.lbk = (num_req / band->num_chunks) * xfer_size;
440 	ppa.chk = band->id;
441 	ppa.pu = chunk->punit->start_ppa.pu;
442 	ppa.grp = chunk->punit->start_ppa.grp;
443 
444 	return ppa;
445 }
446 
447 struct ftl_ppa
448 ftl_band_head_md_ppa(struct ftl_band *band)
449 {
450 	struct ftl_ppa ppa;
451 
452 	if (spdk_unlikely(!band->num_chunks)) {
453 		return ftl_to_ppa(FTL_PPA_INVALID);
454 	}
455 
456 	ppa = CIRCLEQ_FIRST(&band->chunks)->punit->start_ppa;
457 	ppa.chk = band->id;
458 
459 	return ppa;
460 }
461 
462 void
463 ftl_band_set_state(struct ftl_band *band, enum ftl_band_state state)
464 {
465 	switch (state) {
466 	case FTL_BAND_STATE_FREE:
467 		_ftl_band_set_free(band);
468 		break;
469 
470 	case FTL_BAND_STATE_OPENING:
471 		_ftl_band_set_opening(band);
472 		break;
473 
474 	case FTL_BAND_STATE_CLOSED:
475 		_ftl_band_set_closed(band);
476 		break;
477 
478 	default:
479 		break;
480 	}
481 
482 	band->state = state;
483 }
484 
485 void
486 ftl_band_set_addr(struct ftl_band *band, uint64_t lba, struct ftl_ppa ppa)
487 {
488 	struct ftl_md *md = &band->md;
489 	uint64_t offset;
490 
491 	assert(lba != FTL_LBA_INVALID);
492 
493 	offset = ftl_band_lbkoff_from_ppa(band, ppa);
494 	pthread_spin_lock(&band->md.lock);
495 
496 	md->num_vld++;
497 	md->lba_map[offset] = lba;
498 	spdk_bit_array_set(md->vld_map, offset);
499 
500 	pthread_spin_unlock(&band->md.lock);
501 }
502 
503 size_t
504 ftl_band_age(const struct ftl_band *band)
505 {
506 	return (size_t)(band->dev->seq - band->md.seq);
507 }
508 
509 size_t
510 ftl_band_num_usable_lbks(const struct ftl_band *band)
511 {
512 	return band->num_chunks * ftl_dev_lbks_in_chunk(band->dev);
513 }
514 
515 size_t
516 ftl_band_user_lbks(const struct ftl_band *band)
517 {
518 	return ftl_band_num_usable_lbks(band) -
519 	       ftl_head_md_num_lbks(band->dev) -
520 	       ftl_tail_md_num_lbks(band->dev);
521 }
522 
523 struct ftl_band *
524 ftl_band_from_ppa(struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
525 {
526 	assert(ppa.chk < ftl_dev_num_bands(dev));
527 	return &dev->bands[ppa.chk];
528 }
529 
530 struct ftl_chunk *
531 ftl_band_chunk_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
532 {
533 	struct spdk_ftl_dev *dev = band->dev;
534 	unsigned int punit;
535 
536 	punit = ftl_ppa_flatten_punit(dev, ppa);
537 	assert(punit < ftl_dev_num_punits(dev));
538 
539 	return &band->chunk_buf[punit];
540 }
541 
542 uint64_t
543 ftl_band_lbkoff_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
544 {
545 	struct spdk_ftl_dev *dev = band->dev;
546 	unsigned int punit;
547 
548 	punit = ftl_ppa_flatten_punit(dev, ppa);
549 	assert(ppa.chk == band->id);
550 
551 	return punit * ftl_dev_lbks_in_chunk(dev) + ppa.lbk;
552 }
553 
554 struct ftl_ppa
555 ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t num_lbks)
556 {
557 	struct spdk_ftl_dev *dev = band->dev;
558 	struct ftl_chunk *chunk;
559 	unsigned int punit_num;
560 	size_t num_xfers, num_stripes;
561 
562 	assert(ppa.chk == band->id);
563 
564 	punit_num = ftl_ppa_flatten_punit(dev, ppa);
565 	chunk = &band->chunk_buf[punit_num];
566 
567 	num_lbks += (ppa.lbk % dev->xfer_size);
568 	ppa.lbk  -= (ppa.lbk % dev->xfer_size);
569 
570 #if defined(DEBUG)
571 	/* Check that the number of chunks has not been changed */
572 	struct ftl_chunk *_chunk;
573 	size_t _num_chunks = 0;
574 	CIRCLEQ_FOREACH(_chunk, &band->chunks, circleq) {
575 		if (spdk_likely(_chunk->state != FTL_CHUNK_STATE_BAD)) {
576 			_num_chunks++;
577 		}
578 	}
579 	assert(band->num_chunks == _num_chunks);
580 #endif
581 	num_stripes = (num_lbks / dev->xfer_size) / band->num_chunks;
582 	ppa.lbk  += num_stripes * dev->xfer_size;
583 	num_lbks -= num_stripes * dev->xfer_size * band->num_chunks;
584 
585 	if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
586 		return ftl_to_ppa(FTL_PPA_INVALID);
587 	}
588 
589 	num_xfers = num_lbks / dev->xfer_size;
590 	for (size_t i = 0; i < num_xfers; ++i) {
591 		/* When the last chunk is reached the lbk part of the address */
592 		/* needs to be increased by xfer_size */
593 		if (ftl_band_chunk_is_last(band, chunk)) {
594 			ppa.lbk += dev->xfer_size;
595 			if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
596 				return ftl_to_ppa(FTL_PPA_INVALID);
597 			}
598 		}
599 
600 		chunk = ftl_band_next_operational_chunk(band, chunk);
601 		ppa.grp = chunk->start_ppa.grp;
602 		ppa.pu = chunk->start_ppa.pu;
603 
604 		num_lbks -= dev->xfer_size;
605 	}
606 
607 	if (num_lbks) {
608 		ppa.lbk += num_lbks;
609 		if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
610 			return ftl_to_ppa(FTL_PPA_INVALID);
611 		}
612 	}
613 
614 	return ppa;
615 }
616 
617 struct ftl_ppa
618 ftl_band_ppa_from_lbkoff(struct ftl_band *band, uint64_t lbkoff)
619 {
620 	struct ftl_ppa ppa = { .ppa = 0 };
621 	struct spdk_ftl_dev *dev = band->dev;
622 	uint64_t punit;
623 
624 	punit = lbkoff / ftl_dev_lbks_in_chunk(dev) + dev->range.begin;
625 
626 	ppa.lbk = lbkoff % ftl_dev_lbks_in_chunk(dev);
627 	ppa.chk = band->id;
628 	ppa.pu = punit / dev->geo.num_grp;
629 	ppa.grp = punit % dev->geo.num_grp;
630 
631 	return ppa;
632 }
633 
634 struct ftl_ppa
635 ftl_band_next_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t offset)
636 {
637 	uint64_t lbkoff = ftl_band_lbkoff_from_ppa(band, ppa);
638 	return ftl_band_ppa_from_lbkoff(band, lbkoff + offset);
639 }
640 
641 void
642 ftl_band_acquire_md(struct ftl_band *band)
643 {
644 	assert(band->md.lba_map != NULL);
645 	band->md.ref_cnt++;
646 }
647 
648 int
649 ftl_band_alloc_md(struct ftl_band *band)
650 {
651 	struct spdk_ftl_dev *dev = band->dev;
652 	struct ftl_md *md = &band->md;
653 
654 	assert(md->ref_cnt == 0);
655 	assert(md->lba_map == NULL);
656 
657 	md->lba_map = spdk_mempool_get(dev->lba_pool);
658 	if (!md->lba_map) {
659 		return -1;
660 	}
661 
662 	ftl_band_acquire_md(band);
663 	return 0;
664 }
665 
666 void
667 ftl_band_release_md(struct ftl_band *band)
668 {
669 	struct ftl_md *md = &band->md;
670 
671 	assert(band->md.lba_map != NULL);
672 	assert(md->ref_cnt > 0);
673 	md->ref_cnt--;
674 
675 	if (md->ref_cnt == 0) {
676 		ftl_band_free_md(band);
677 	}
678 }
679 
680 static void
681 ftl_read_md_cb(void *arg, int status)
682 {
683 	struct ftl_md_io *md_io = arg;
684 
685 	if (!status) {
686 		status = md_io->pack_fn(md_io->io.dev,
687 					md_io->md,
688 					md_io->buf);
689 	} else {
690 		status = FTL_MD_IO_FAILURE;
691 	}
692 
693 	md_io->cb.fn(md_io->cb.ctx, status);
694 }
695 
696 static struct ftl_md_io *
697 ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data, struct ftl_ppa ppa,
698 		    struct ftl_band *band, size_t lbk_cnt, size_t req_size, ftl_md_pack_fn fn,
699 		    const struct ftl_cb *cb)
700 {
701 	struct ftl_md_io *io;
702 	struct ftl_io_init_opts opts = {
703 		.dev		= dev,
704 		.io		= NULL,
705 		.rwb_batch	= NULL,
706 		.band		= band,
707 		.size		= sizeof(*io),
708 		.flags		= FTL_IO_MD | FTL_IO_PPA_MODE,
709 		.type		= FTL_IO_READ,
710 		.iov_cnt	= spdk_divide_round_up(lbk_cnt, req_size),
711 		.req_size	= req_size,
712 		.fn		= ftl_read_md_cb,
713 		.data		= data,
714 	};
715 
716 	io = (struct ftl_md_io *)ftl_io_init_internal(&opts);
717 	if (!io) {
718 		return NULL;
719 	}
720 
721 	io->io.ppa = ppa;
722 	io->md = md;
723 	io->buf = data;
724 	io->pack_fn = fn;
725 	io->cb = *cb;
726 
727 	return io;
728 }
729 
730 static struct ftl_io *
731 ftl_io_init_md_write(struct spdk_ftl_dev *dev, struct ftl_band *band,
732 		     void *data, size_t req_cnt, spdk_ftl_fn cb)
733 {
734 	struct ftl_io_init_opts opts = {
735 		.dev		= dev,
736 		.io		= NULL,
737 		.rwb_batch	= NULL,
738 		.band		= band,
739 		.size		= sizeof(struct ftl_io),
740 		.flags		= FTL_IO_MD | FTL_IO_PPA_MODE,
741 		.type		= FTL_IO_WRITE,
742 		.iov_cnt	= req_cnt,
743 		.req_size	= dev->xfer_size,
744 		.fn		= cb,
745 		.data		= data,
746 		.md		= NULL,
747 	};
748 
749 	return ftl_io_init_internal(&opts);
750 }
751 
752 static int
753 ftl_band_write_md(struct ftl_band *band, void *data, size_t lbk_cnt,
754 		  ftl_md_pack_fn md_fn, spdk_ftl_fn cb)
755 {
756 	struct spdk_ftl_dev *dev = band->dev;
757 	struct ftl_io *io;
758 
759 	io = ftl_io_init_md_write(dev, band, data,
760 				  spdk_divide_round_up(lbk_cnt, dev->xfer_size), cb);
761 	if (!io) {
762 		return -ENOMEM;
763 	}
764 
765 	md_fn(dev, &band->md, data);
766 
767 	return ftl_io_write(io);
768 }
769 
770 void
771 ftl_band_md_clear(struct ftl_md *md)
772 {
773 	md->seq = 0;
774 	md->num_vld = 0;
775 	md->wr_cnt = 0;
776 	md->lba_map = NULL;
777 }
778 
779 int
780 ftl_band_write_head_md(struct ftl_band *band, void *data, spdk_ftl_fn cb)
781 {
782 	return ftl_band_write_md(band, data, ftl_head_md_num_lbks(band->dev),
783 				 ftl_pack_head_md, cb);
784 }
785 
786 int
787 ftl_band_write_tail_md(struct ftl_band *band, void *data, spdk_ftl_fn cb)
788 {
789 	return ftl_band_write_md(band, data, ftl_tail_md_num_lbks(band->dev),
790 				 ftl_pack_tail_md, cb);
791 }
792 
793 static struct ftl_ppa
794 ftl_band_lba_map_ppa(struct ftl_band *band)
795 {
796 	return ftl_band_next_xfer_ppa(band, band->tail_md_ppa,
797 				      ftl_tail_md_hdr_num_lbks() +
798 				      ftl_vld_map_num_lbks(band->dev));
799 }
800 
801 static int
802 ftl_band_read_md(struct ftl_band *band, struct ftl_md *md, void *data, size_t lbk_cnt,
803 		 size_t req_size, struct ftl_ppa start_ppa, ftl_md_pack_fn unpack_fn,
804 		 const struct ftl_cb *cb)
805 {
806 	struct spdk_ftl_dev *dev = band->dev;
807 	struct ftl_md_io *io;
808 
809 	if (spdk_unlikely(!band->num_chunks)) {
810 		return -ENOENT;
811 	}
812 
813 	io = ftl_io_init_md_read(dev, md, data, start_ppa, band, lbk_cnt,
814 				 req_size, unpack_fn, cb);
815 	if (!io) {
816 		return -ENOMEM;
817 	}
818 
819 	return ftl_io_read((struct ftl_io *)io);
820 }
821 
822 int
823 ftl_band_read_tail_md(struct ftl_band *band, struct ftl_md *md,
824 		      void *data, struct ftl_ppa ppa, const struct ftl_cb *cb)
825 {
826 	return ftl_band_read_md(band, md, data,
827 				ftl_tail_md_num_lbks(band->dev),
828 				band->dev->xfer_size,
829 				ppa,
830 				ftl_unpack_tail_md,
831 				cb);
832 }
833 
834 int
835 ftl_band_read_lba_map(struct ftl_band *band, struct ftl_md *md,
836 		      void *data, const struct ftl_cb *cb)
837 {
838 	/* TODO: change this interface to allow reading parts of the LBA map instead of */
839 	/* reading whole metadata */
840 	return ftl_band_read_md(band, md, data,
841 				ftl_lba_map_num_lbks(band->dev),
842 				band->dev->xfer_size,
843 				ftl_band_lba_map_ppa(band),
844 				ftl_unpack_lba_map,
845 				cb);
846 }
847 
848 int
849 ftl_band_read_head_md(struct ftl_band *band, struct ftl_md *md,
850 		      void *data, const struct ftl_cb *cb)
851 {
852 	return ftl_band_read_md(band, md, data,
853 				ftl_head_md_num_lbks(band->dev),
854 				band->dev->xfer_size,
855 				ftl_band_head_md_ppa(band),
856 				ftl_unpack_head_md,
857 				cb);
858 }
859 
860 static void
861 ftl_band_remove_chunk(struct ftl_band *band, struct ftl_chunk *chunk)
862 {
863 	CIRCLEQ_REMOVE(&band->chunks, chunk, circleq);
864 	band->num_chunks--;
865 }
866 
867 static void
868 ftl_erase_fail(struct ftl_io *io, int status)
869 {
870 	struct ftl_chunk *chunk;
871 	char buf[128];
872 
873 	SPDK_ERRLOG("Erase failed @ppa: %s, status: %d\n",
874 		    ftl_ppa2str(io->ppa, buf, sizeof(buf)), status);
875 
876 	chunk = ftl_band_chunk_from_ppa(io->band, io->ppa);
877 	chunk->state = FTL_CHUNK_STATE_BAD;
878 	ftl_band_remove_chunk(io->band, chunk);
879 }
880 
881 static void
882 ftl_band_erase_cb(void *ctx, int status)
883 {
884 	struct ftl_io *io = ctx;
885 	struct ftl_chunk *chunk;
886 
887 	if (spdk_unlikely(status)) {
888 		ftl_erase_fail(io, status);
889 		return;
890 	}
891 	chunk = ftl_band_chunk_from_ppa(io->band, io->ppa);
892 	chunk->state = FTL_CHUNK_STATE_FREE;
893 }
894 
895 int
896 ftl_band_erase(struct ftl_band *band)
897 {
898 	struct ftl_chunk *chunk;
899 	struct ftl_io *io;
900 	int rc = 0;
901 
902 	assert(band->state == FTL_BAND_STATE_CLOSED ||
903 	       band->state == FTL_BAND_STATE_FREE);
904 
905 	ftl_band_set_state(band, FTL_BAND_STATE_PREP);
906 
907 	CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) {
908 		if (chunk->state == FTL_CHUNK_STATE_FREE) {
909 			continue;
910 		}
911 
912 		io = ftl_io_erase_init(band, 1, ftl_band_erase_cb);
913 		if (!io) {
914 			rc = -ENOMEM;
915 			break;
916 		}
917 
918 		io->ppa = chunk->start_ppa;
919 		rc = ftl_io_erase(io);
920 		if (rc) {
921 			assert(0);
922 			/* TODO: change band's state back to close? */
923 			break;
924 		}
925 	}
926 
927 	return rc;
928 }
929 
930 int
931 ftl_band_write_prep(struct ftl_band *band)
932 {
933 	struct spdk_ftl_dev *dev = band->dev;
934 
935 	if (ftl_band_alloc_md(band)) {
936 		return -1;
937 	}
938 
939 	band->md.seq = ++dev->seq;
940 	return 0;
941 }
942 
943 struct ftl_chunk *
944 ftl_band_next_operational_chunk(struct ftl_band *band, struct ftl_chunk *chunk)
945 {
946 	struct ftl_chunk *result = NULL;
947 	struct ftl_chunk *entry;
948 
949 	if (spdk_unlikely(!band->num_chunks)) {
950 		return NULL;
951 	}
952 
953 	/* Erasing band may fail after it was assigned to wptr. */
954 	/* In such a case chunk is no longer in band->chunks queue. */
955 	if (spdk_likely(chunk->state != FTL_CHUNK_STATE_BAD)) {
956 		result = ftl_band_next_chunk(band, chunk);
957 	} else {
958 		CIRCLEQ_FOREACH_REVERSE(entry, &band->chunks, circleq) {
959 			if (entry->pos > chunk->pos) {
960 				result = entry;
961 			} else {
962 				if (!result) {
963 					result = CIRCLEQ_FIRST(&band->chunks);
964 				}
965 				break;
966 			}
967 		}
968 	}
969 
970 	return result;
971 }
972