xref: /spdk/lib/ftl/ftl_band.c (revision bb488d2829a9b7863daab45917dd2174905cc0ae)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/crc32.h"
35 #include "spdk/likely.h"
36 #include "spdk/util.h"
37 #include "spdk/ftl.h"
38 
39 #include "ftl_band.h"
40 #include "ftl_io.h"
41 #include "ftl_core.h"
42 #include "ftl_reloc.h"
43 #include "ftl_debug.h"
44 
45 /* TODO: define some signature for meta version */
46 #define FTL_MD_VER 1
47 
48 struct __attribute__((packed)) ftl_md_hdr {
49 	/* Device instance */
50 	struct spdk_uuid	uuid;
51 
52 	/* Meta version */
53 	uint8_t			ver;
54 
55 	/* Sequence number */
56 	uint64_t		seq;
57 
58 	/* CRC32 checksum */
59 	uint32_t		checksum;
60 };
61 
62 /* End metadata layout stored on media (with all three being aligned to block size): */
63 /* - header */
64 /* - valid bitmap */
65 /* - LBA map */
66 struct __attribute__((packed)) ftl_tail_md {
67 	struct ftl_md_hdr	hdr;
68 
69 	/* Max number of lbks */
70 	uint64_t		num_lbks;
71 
72 	uint8_t			reserved[4059];
73 };
74 SPDK_STATIC_ASSERT(sizeof(struct ftl_tail_md) == FTL_BLOCK_SIZE, "Incorrect metadata size");
75 
76 struct __attribute__((packed)) ftl_head_md {
77 	struct ftl_md_hdr	hdr;
78 
79 	/* Number of defrag cycles */
80 	uint64_t		wr_cnt;
81 
82 	/* Number of surfaced LBAs */
83 	uint64_t		lba_cnt;
84 
85 	/* Transfer size */
86 	uint32_t		xfer_size;
87 };
88 
89 size_t
90 ftl_tail_md_hdr_num_lbks(void)
91 {
92 	return spdk_divide_round_up(sizeof(struct ftl_tail_md), FTL_BLOCK_SIZE);
93 }
94 
95 size_t
96 ftl_vld_map_num_lbks(const struct spdk_ftl_dev *dev)
97 {
98 	return spdk_divide_round_up(ftl_vld_map_size(dev), FTL_BLOCK_SIZE);
99 }
100 
101 size_t
102 ftl_lba_map_num_lbks(const struct spdk_ftl_dev *dev)
103 {
104 	return spdk_divide_round_up(ftl_num_band_lbks(dev) * sizeof(uint64_t), FTL_BLOCK_SIZE);
105 }
106 
107 size_t
108 ftl_head_md_num_lbks(const struct spdk_ftl_dev *dev)
109 {
110 	return dev->xfer_size;
111 }
112 
113 size_t
114 ftl_tail_md_num_lbks(const struct spdk_ftl_dev *dev)
115 {
116 	return spdk_divide_round_up(ftl_tail_md_hdr_num_lbks() +
117 				    ftl_vld_map_num_lbks(dev) +
118 				    ftl_lba_map_num_lbks(dev),
119 				    dev->xfer_size) * dev->xfer_size;
120 }
121 
122 static uint64_t
123 ftl_band_tail_md_offset(struct ftl_band *band)
124 {
125 	return ftl_band_num_usable_lbks(band) -
126 	       ftl_tail_md_num_lbks(band->dev);
127 }
128 
129 int
130 ftl_band_full(struct ftl_band *band, size_t offset)
131 {
132 	return offset == ftl_band_tail_md_offset(band);
133 }
134 
135 void
136 ftl_band_write_failed(struct ftl_band *band)
137 {
138 	struct spdk_ftl_dev *dev = band->dev;
139 
140 	band->high_prio = 1;
141 	band->tail_md_ppa = ftl_to_ppa(FTL_PPA_INVALID);
142 
143 	if (!dev->df_band) {
144 		dev->df_band = band;
145 	}
146 
147 	ftl_reloc_add(dev->reloc, band, 0, ftl_num_band_lbks(dev), 1);
148 	ftl_band_set_state(band, FTL_BAND_STATE_CLOSED);
149 }
150 
151 void
152 ftl_band_clear_md(struct ftl_band *band)
153 {
154 	spdk_bit_array_clear_mask(band->md.vld_map);
155 	memset(band->md.lba_map, 0, ftl_num_band_lbks(band->dev) * sizeof(uint64_t));
156 	band->md.num_vld = 0;
157 }
158 
159 static void
160 ftl_band_free_md(struct ftl_band *band)
161 {
162 	struct spdk_ftl_dev *dev = band->dev;
163 	struct ftl_md *md = &band->md;
164 
165 	assert(band->state == FTL_BAND_STATE_CLOSED ||
166 	       band->state == FTL_BAND_STATE_FREE);
167 	assert(md->ref_cnt == 0);
168 	assert(md->lba_map != NULL);
169 	assert(!band->high_prio);
170 
171 	/* Verify that band's metadata is consistent with l2p */
172 	if (band->num_chunks) {
173 		assert(ftl_band_validate_md(band, band->md.lba_map) == true);
174 	}
175 
176 	spdk_mempool_put(dev->lba_pool, md->lba_map);
177 	spdk_dma_free(md->dma_buf);
178 	md->lba_map = NULL;
179 	md->dma_buf = NULL;
180 }
181 
182 static void
183 _ftl_band_set_free(struct ftl_band *band)
184 {
185 	struct spdk_ftl_dev *dev = band->dev;
186 	struct ftl_band *lband, *prev;
187 
188 	/* Verify band's previous state */
189 	assert(band->state == FTL_BAND_STATE_CLOSED);
190 
191 	if (band == dev->df_band) {
192 		dev->df_band = NULL;
193 	}
194 
195 	/* Remove the band from the closed band list */
196 	LIST_REMOVE(band, list_entry);
197 
198 	/* Keep the list sorted by band's write count */
199 	LIST_FOREACH(lband, &dev->free_bands, list_entry) {
200 		if (lband->md.wr_cnt > band->md.wr_cnt) {
201 			LIST_INSERT_BEFORE(lband, band, list_entry);
202 			break;
203 		}
204 		prev = lband;
205 	}
206 
207 	if (!lband) {
208 		if (LIST_EMPTY(&dev->free_bands)) {
209 			LIST_INSERT_HEAD(&dev->free_bands, band, list_entry);
210 		} else {
211 			LIST_INSERT_AFTER(prev, band, list_entry);
212 		}
213 	}
214 
215 #if defined(DEBUG)
216 	prev = NULL;
217 	LIST_FOREACH(lband, &dev->free_bands, list_entry) {
218 		if (!prev) {
219 			continue;
220 		}
221 		assert(prev->md.wr_cnt <= lband->md.wr_cnt);
222 	}
223 #endif
224 	dev->num_free++;
225 	ftl_apply_limits(dev);
226 }
227 
228 static void
229 _ftl_band_set_preparing(struct ftl_band *band)
230 {
231 	struct spdk_ftl_dev *dev = band->dev;
232 	struct ftl_md *md = &band->md;
233 
234 	/* Verify band's previous state */
235 	assert(band->state == FTL_BAND_STATE_FREE);
236 	/* Remove band from free list */
237 	LIST_REMOVE(band, list_entry);
238 
239 	md->wr_cnt++;
240 
241 	assert(dev->num_free > 0);
242 	dev->num_free--;
243 
244 	ftl_apply_limits(dev);
245 }
246 
247 static void
248 _ftl_band_set_closed(struct ftl_band *band)
249 {
250 	struct spdk_ftl_dev *dev = band->dev;
251 	struct ftl_chunk *chunk;
252 
253 	/* TODO: add this kind of check in band_set_state() */
254 	if (band->state == FTL_BAND_STATE_CLOSED) {
255 		return;
256 	}
257 
258 	/* Set the state as free_md() checks for that */
259 	band->state = FTL_BAND_STATE_CLOSED;
260 
261 	/* Free the md if there are no outstanding IOs */
262 	ftl_band_release_md(band);
263 
264 	if (spdk_likely(band->num_chunks)) {
265 		LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
266 		CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) {
267 			chunk->state = FTL_CHUNK_STATE_CLOSED;
268 		}
269 	} else {
270 		LIST_REMOVE(band, list_entry);
271 	}
272 }
273 
274 static uint32_t
275 ftl_md_calc_crc(const struct ftl_md_hdr *hdr, size_t size)
276 {
277 	size_t checkoff = offsetof(struct ftl_md_hdr, checksum);
278 	size_t mdoff = checkoff + sizeof(hdr->checksum);
279 	uint32_t crc;
280 
281 	crc = spdk_crc32c_update(hdr, checkoff, 0);
282 	return spdk_crc32c_update((const char *)hdr + mdoff, size - mdoff, crc);
283 }
284 
285 static void
286 ftl_set_md_hdr(struct spdk_ftl_dev *dev, struct ftl_md_hdr *hdr,
287 	       struct ftl_md *md, size_t size)
288 {
289 	hdr->seq = md->seq;
290 	hdr->ver = FTL_MD_VER;
291 	hdr->uuid = dev->uuid;
292 	hdr->checksum = ftl_md_calc_crc(hdr, size);
293 }
294 
295 static int
296 ftl_pack_head_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
297 {
298 	struct ftl_head_md *head = data;
299 
300 	head->wr_cnt = md->wr_cnt;
301 	head->lba_cnt = dev->num_lbas;
302 	head->xfer_size = dev->xfer_size;
303 	ftl_set_md_hdr(dev, &head->hdr, md, sizeof(struct ftl_head_md));
304 
305 	return FTL_MD_SUCCESS;
306 }
307 
308 static int
309 ftl_pack_tail_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
310 {
311 	struct ftl_tail_md *tail = data;
312 	size_t map_size;
313 	void *vld_offset, *map_offset;
314 
315 	map_size = ftl_num_band_lbks(dev) * sizeof(uint64_t);
316 	vld_offset = (char *)data + ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE;
317 	map_offset = (char *)vld_offset + ftl_vld_map_num_lbks(dev) * FTL_BLOCK_SIZE;
318 
319 	/* Clear out the buffer */
320 	memset(data, 0, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
321 	tail->num_lbks = ftl_num_band_lbks(dev);
322 
323 	pthread_spin_lock(&md->lock);
324 	spdk_bit_array_store_mask(md->vld_map, vld_offset);
325 	pthread_spin_unlock(&md->lock);
326 
327 	memcpy(map_offset, md->lba_map, map_size);
328 	ftl_set_md_hdr(dev, &tail->hdr, md, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
329 
330 	return FTL_MD_SUCCESS;
331 }
332 
333 static int
334 ftl_md_hdr_vld(struct spdk_ftl_dev *dev, const struct ftl_md_hdr *hdr, size_t size)
335 {
336 	if (spdk_uuid_compare(&dev->uuid, &hdr->uuid) != 0) {
337 		return FTL_MD_NO_MD;
338 	}
339 
340 	if (hdr->ver != FTL_MD_VER) {
341 		return FTL_MD_INVALID_VER;
342 	}
343 
344 	if (ftl_md_calc_crc(hdr, size) != hdr->checksum) {
345 		return FTL_MD_INVALID_CRC;
346 	}
347 
348 	return FTL_MD_SUCCESS;
349 }
350 
351 static int
352 ftl_unpack_tail_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
353 {
354 	struct ftl_tail_md *tail = data;
355 	size_t map_size;
356 	void *vld_offset, *map_offset;
357 	int rc;
358 
359 	map_size = ftl_num_band_lbks(dev) * sizeof(uint64_t);
360 	vld_offset = (char *)data + ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE;
361 	map_offset = (char *)vld_offset + ftl_vld_map_num_lbks(dev) * FTL_BLOCK_SIZE;
362 
363 	rc = ftl_md_hdr_vld(dev, &tail->hdr, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
364 	if (rc) {
365 		return rc;
366 	}
367 
368 	if (tail->num_lbks != ftl_num_band_lbks(dev)) {
369 		return FTL_MD_INVALID_SIZE;
370 	}
371 
372 	if (md->vld_map) {
373 		spdk_bit_array_load_mask(md->vld_map, vld_offset);
374 	}
375 
376 	if (md->lba_map) {
377 		memcpy(md->lba_map, map_offset, map_size);
378 	}
379 
380 	md->seq = tail->hdr.seq;
381 	return FTL_MD_SUCCESS;
382 }
383 
384 static int
385 ftl_unpack_lba_map(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
386 {
387 	memcpy(md->lba_map, data, ftl_num_band_lbks(dev) * sizeof(uint64_t));
388 	return FTL_MD_SUCCESS;
389 }
390 
391 static int
392 ftl_unpack_head_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
393 {
394 	struct ftl_head_md *head = data;
395 	int rc;
396 
397 	rc = ftl_md_hdr_vld(dev, &head->hdr, sizeof(struct ftl_head_md));
398 	if (rc) {
399 		return rc;
400 	}
401 
402 	md->seq = head->hdr.seq;
403 	md->wr_cnt = head->wr_cnt;
404 
405 	if (dev->global_md.num_lbas == 0) {
406 		dev->global_md.num_lbas = head->lba_cnt;
407 	}
408 
409 	if (dev->global_md.num_lbas != head->lba_cnt) {
410 		return FTL_MD_INVALID_SIZE;
411 	}
412 
413 	if (dev->xfer_size != head->xfer_size) {
414 		return FTL_MD_INVALID_SIZE;
415 	}
416 
417 	return FTL_MD_SUCCESS;
418 }
419 
420 struct ftl_ppa
421 ftl_band_tail_md_ppa(struct ftl_band *band)
422 {
423 	struct ftl_ppa ppa;
424 	struct ftl_chunk *chunk;
425 	struct spdk_ftl_dev *dev = band->dev;
426 	size_t xfer_size = dev->xfer_size;
427 	size_t num_req = ftl_band_tail_md_offset(band) / xfer_size;
428 	size_t i;
429 
430 	if (spdk_unlikely(!band->num_chunks)) {
431 		return ftl_to_ppa(FTL_PPA_INVALID);
432 	}
433 
434 	/* Metadata should be aligned to xfer size */
435 	assert(ftl_band_tail_md_offset(band) % xfer_size == 0);
436 
437 	chunk = CIRCLEQ_FIRST(&band->chunks);
438 	for (i = 0; i < num_req % band->num_chunks; ++i) {
439 		chunk = ftl_band_next_chunk(band, chunk);
440 	}
441 
442 	ppa.lbk = (num_req / band->num_chunks) * xfer_size;
443 	ppa.chk = band->id;
444 	ppa.pu = chunk->punit->start_ppa.pu;
445 	ppa.grp = chunk->punit->start_ppa.grp;
446 
447 	return ppa;
448 }
449 
450 struct ftl_ppa
451 ftl_band_head_md_ppa(struct ftl_band *band)
452 {
453 	struct ftl_ppa ppa;
454 
455 	if (spdk_unlikely(!band->num_chunks)) {
456 		return ftl_to_ppa(FTL_PPA_INVALID);
457 	}
458 
459 	ppa = CIRCLEQ_FIRST(&band->chunks)->punit->start_ppa;
460 	ppa.chk = band->id;
461 
462 	return ppa;
463 }
464 
465 void
466 ftl_band_set_state(struct ftl_band *band, enum ftl_band_state state)
467 {
468 	switch (state) {
469 	case FTL_BAND_STATE_FREE:
470 		_ftl_band_set_free(band);
471 		break;
472 
473 	case FTL_BAND_STATE_PREP:
474 		_ftl_band_set_preparing(band);
475 		break;
476 
477 	case FTL_BAND_STATE_CLOSED:
478 		_ftl_band_set_closed(band);
479 		break;
480 
481 	default:
482 		break;
483 	}
484 
485 	band->state = state;
486 }
487 
488 void
489 ftl_band_set_addr(struct ftl_band *band, uint64_t lba, struct ftl_ppa ppa)
490 {
491 	struct ftl_md *md = &band->md;
492 	uint64_t offset;
493 
494 	assert(lba != FTL_LBA_INVALID);
495 
496 	offset = ftl_band_lbkoff_from_ppa(band, ppa);
497 	pthread_spin_lock(&band->md.lock);
498 
499 	md->num_vld++;
500 	md->lba_map[offset] = lba;
501 	spdk_bit_array_set(md->vld_map, offset);
502 
503 	pthread_spin_unlock(&band->md.lock);
504 }
505 
506 size_t
507 ftl_band_age(const struct ftl_band *band)
508 {
509 	return (size_t)(band->dev->seq - band->md.seq);
510 }
511 
512 size_t
513 ftl_band_num_usable_lbks(const struct ftl_band *band)
514 {
515 	return band->num_chunks * ftl_dev_lbks_in_chunk(band->dev);
516 }
517 
518 size_t
519 ftl_band_user_lbks(const struct ftl_band *band)
520 {
521 	return ftl_band_num_usable_lbks(band) -
522 	       ftl_head_md_num_lbks(band->dev) -
523 	       ftl_tail_md_num_lbks(band->dev);
524 }
525 
526 struct ftl_band *
527 ftl_band_from_ppa(struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
528 {
529 	assert(ppa.chk < ftl_dev_num_bands(dev));
530 	return &dev->bands[ppa.chk];
531 }
532 
533 struct ftl_chunk *
534 ftl_band_chunk_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
535 {
536 	struct spdk_ftl_dev *dev = band->dev;
537 	unsigned int punit;
538 
539 	punit = ftl_ppa_flatten_punit(dev, ppa);
540 	assert(punit < ftl_dev_num_punits(dev));
541 
542 	return &band->chunk_buf[punit];
543 }
544 
545 uint64_t
546 ftl_band_lbkoff_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
547 {
548 	struct spdk_ftl_dev *dev = band->dev;
549 	unsigned int punit;
550 
551 	punit = ftl_ppa_flatten_punit(dev, ppa);
552 	assert(ppa.chk == band->id);
553 
554 	return punit * ftl_dev_lbks_in_chunk(dev) + ppa.lbk;
555 }
556 
557 struct ftl_ppa
558 ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t num_lbks)
559 {
560 	struct spdk_ftl_dev *dev = band->dev;
561 	struct ftl_chunk *chunk;
562 	unsigned int punit_num;
563 	size_t num_xfers, num_stripes;
564 
565 	assert(ppa.chk == band->id);
566 
567 	punit_num = ftl_ppa_flatten_punit(dev, ppa);
568 	chunk = &band->chunk_buf[punit_num];
569 
570 	num_lbks += (ppa.lbk % dev->xfer_size);
571 	ppa.lbk  -= (ppa.lbk % dev->xfer_size);
572 
573 #if defined(DEBUG)
574 	/* Check that the number of chunks has not been changed */
575 	struct ftl_chunk *_chunk;
576 	size_t _num_chunks = 0;
577 	CIRCLEQ_FOREACH(_chunk, &band->chunks, circleq) {
578 		if (spdk_likely(_chunk->state != FTL_CHUNK_STATE_BAD)) {
579 			_num_chunks++;
580 		}
581 	}
582 	assert(band->num_chunks == _num_chunks);
583 #endif
584 	num_stripes = (num_lbks / dev->xfer_size) / band->num_chunks;
585 	ppa.lbk  += num_stripes * dev->xfer_size;
586 	num_lbks -= num_stripes * dev->xfer_size * band->num_chunks;
587 
588 	if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
589 		return ftl_to_ppa(FTL_PPA_INVALID);
590 	}
591 
592 	num_xfers = num_lbks / dev->xfer_size;
593 	for (size_t i = 0; i < num_xfers; ++i) {
594 		/* When the last chunk is reached the lbk part of the address */
595 		/* needs to be increased by xfer_size */
596 		if (ftl_band_chunk_is_last(band, chunk)) {
597 			ppa.lbk += dev->xfer_size;
598 			if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
599 				return ftl_to_ppa(FTL_PPA_INVALID);
600 			}
601 		}
602 
603 		chunk = ftl_band_next_operational_chunk(band, chunk);
604 		ppa.grp = chunk->start_ppa.grp;
605 		ppa.pu = chunk->start_ppa.pu;
606 
607 		num_lbks -= dev->xfer_size;
608 	}
609 
610 	if (num_lbks) {
611 		ppa.lbk += num_lbks;
612 		if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
613 			return ftl_to_ppa(FTL_PPA_INVALID);
614 		}
615 	}
616 
617 	return ppa;
618 }
619 
620 struct ftl_ppa
621 ftl_band_ppa_from_lbkoff(struct ftl_band *band, uint64_t lbkoff)
622 {
623 	struct ftl_ppa ppa = { .ppa = 0 };
624 	struct spdk_ftl_dev *dev = band->dev;
625 	uint64_t punit;
626 
627 	punit = lbkoff / ftl_dev_lbks_in_chunk(dev) + dev->range.begin;
628 
629 	ppa.lbk = lbkoff % ftl_dev_lbks_in_chunk(dev);
630 	ppa.chk = band->id;
631 	ppa.pu = punit / dev->geo.num_grp;
632 	ppa.grp = punit % dev->geo.num_grp;
633 
634 	return ppa;
635 }
636 
637 struct ftl_ppa
638 ftl_band_next_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t offset)
639 {
640 	uint64_t lbkoff = ftl_band_lbkoff_from_ppa(band, ppa);
641 	return ftl_band_ppa_from_lbkoff(band, lbkoff + offset);
642 }
643 
644 void
645 ftl_band_acquire_md(struct ftl_band *band)
646 {
647 	assert(band->md.lba_map != NULL);
648 	band->md.ref_cnt++;
649 }
650 
651 int
652 ftl_band_alloc_md(struct ftl_band *band)
653 {
654 	struct spdk_ftl_dev *dev = band->dev;
655 	struct ftl_md *md = &band->md;
656 
657 	assert(md->ref_cnt == 0);
658 	assert(md->lba_map == NULL);
659 
660 	md->lba_map = spdk_mempool_get(dev->lba_pool);
661 	if (!md->lba_map) {
662 		return -1;
663 	}
664 
665 	md->dma_buf = spdk_dma_zmalloc(ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE,
666 				       FTL_BLOCK_SIZE, NULL);
667 	if (!md->dma_buf) {
668 		spdk_mempool_put(dev->lba_pool, md->lba_map);
669 		return -1;
670 	}
671 
672 	ftl_band_acquire_md(band);
673 	return 0;
674 }
675 
676 void
677 ftl_band_release_md(struct ftl_band *band)
678 {
679 	struct ftl_md *md = &band->md;
680 
681 	assert(band->md.lba_map != NULL);
682 	assert(md->ref_cnt > 0);
683 	md->ref_cnt--;
684 
685 	if (md->ref_cnt == 0) {
686 		ftl_band_free_md(band);
687 	}
688 }
689 
690 static void
691 ftl_read_md_cb(void *arg, int status)
692 {
693 	struct ftl_md_io *md_io = arg;
694 
695 	if (!status) {
696 		status = md_io->pack_fn(md_io->io.dev,
697 					md_io->md,
698 					md_io->buf);
699 	} else {
700 		status = FTL_MD_IO_FAILURE;
701 	}
702 
703 	md_io->cb.fn(md_io->cb.ctx, status);
704 }
705 
706 static struct ftl_md_io *
707 ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data, struct ftl_ppa ppa,
708 		    struct ftl_band *band, size_t lbk_cnt, size_t req_size, ftl_md_pack_fn fn,
709 		    const struct ftl_cb *cb)
710 {
711 	struct ftl_md_io *io;
712 	struct ftl_io_init_opts opts = {
713 		.dev		= dev,
714 		.io		= NULL,
715 		.rwb_batch	= NULL,
716 		.band		= band,
717 		.size		= sizeof(*io),
718 		.flags		= FTL_IO_MD | FTL_IO_PPA_MODE,
719 		.type		= FTL_IO_READ,
720 		.iov_cnt	= spdk_divide_round_up(lbk_cnt, req_size),
721 		.req_size	= req_size,
722 		.fn		= ftl_read_md_cb,
723 		.data		= data,
724 	};
725 
726 	io = (struct ftl_md_io *)ftl_io_init_internal(&opts);
727 	if (!io) {
728 		return NULL;
729 	}
730 
731 	io->io.ppa = ppa;
732 	io->md = md;
733 	io->buf = data;
734 	io->pack_fn = fn;
735 	io->cb = *cb;
736 
737 	return io;
738 }
739 
740 static struct ftl_io *
741 ftl_io_init_md_write(struct spdk_ftl_dev *dev, struct ftl_band *band,
742 		     void *data, size_t req_cnt, spdk_ftl_fn cb)
743 {
744 	struct ftl_io_init_opts opts = {
745 		.dev		= dev,
746 		.io		= NULL,
747 		.rwb_batch	= NULL,
748 		.band		= band,
749 		.size		= sizeof(struct ftl_io),
750 		.flags		= FTL_IO_MD | FTL_IO_PPA_MODE,
751 		.type		= FTL_IO_WRITE,
752 		.iov_cnt	= req_cnt,
753 		.req_size	= dev->xfer_size,
754 		.fn		= cb,
755 		.data		= data,
756 		.md		= NULL,
757 	};
758 
759 	return ftl_io_init_internal(&opts);
760 }
761 
762 static int
763 ftl_band_write_md(struct ftl_band *band, void *data, size_t lbk_cnt,
764 		  ftl_md_pack_fn md_fn, spdk_ftl_fn cb)
765 {
766 	struct spdk_ftl_dev *dev = band->dev;
767 	struct ftl_io *io;
768 
769 	io = ftl_io_init_md_write(dev, band, data,
770 				  spdk_divide_round_up(lbk_cnt, dev->xfer_size), cb);
771 	if (!io) {
772 		return -ENOMEM;
773 	}
774 
775 	md_fn(dev, &band->md, data);
776 
777 	return ftl_io_write(io);
778 }
779 
780 void
781 ftl_band_md_clear(struct ftl_md *md)
782 {
783 	md->seq = 0;
784 	md->num_vld = 0;
785 	md->wr_cnt = 0;
786 	md->lba_map = NULL;
787 }
788 
789 int
790 ftl_band_write_head_md(struct ftl_band *band, void *data, spdk_ftl_fn cb)
791 {
792 	return ftl_band_write_md(band, data, ftl_head_md_num_lbks(band->dev),
793 				 ftl_pack_head_md, cb);
794 }
795 
796 int
797 ftl_band_write_tail_md(struct ftl_band *band, void *data, spdk_ftl_fn cb)
798 {
799 	return ftl_band_write_md(band, data, ftl_tail_md_num_lbks(band->dev),
800 				 ftl_pack_tail_md, cb);
801 }
802 
803 static struct ftl_ppa
804 ftl_band_lba_map_ppa(struct ftl_band *band)
805 {
806 	return ftl_band_next_xfer_ppa(band, band->tail_md_ppa,
807 				      ftl_tail_md_hdr_num_lbks() +
808 				      ftl_vld_map_num_lbks(band->dev));
809 }
810 
811 static int
812 ftl_band_read_md(struct ftl_band *band, struct ftl_md *md, void *data, size_t lbk_cnt,
813 		 size_t req_size, struct ftl_ppa start_ppa, ftl_md_pack_fn unpack_fn,
814 		 const struct ftl_cb *cb)
815 {
816 	struct spdk_ftl_dev *dev = band->dev;
817 	struct ftl_md_io *io;
818 
819 	if (spdk_unlikely(!band->num_chunks)) {
820 		return -ENOENT;
821 	}
822 
823 	io = ftl_io_init_md_read(dev, md, data, start_ppa, band, lbk_cnt,
824 				 req_size, unpack_fn, cb);
825 	if (!io) {
826 		return -ENOMEM;
827 	}
828 
829 	ftl_io_read((struct ftl_io *)io);
830 	return 0;
831 }
832 
833 int
834 ftl_band_read_tail_md(struct ftl_band *band, struct ftl_md *md,
835 		      void *data, struct ftl_ppa ppa, const struct ftl_cb *cb)
836 {
837 	return ftl_band_read_md(band, md, data,
838 				ftl_tail_md_num_lbks(band->dev),
839 				band->dev->xfer_size,
840 				ppa,
841 				ftl_unpack_tail_md,
842 				cb);
843 }
844 
845 int
846 ftl_band_read_lba_map(struct ftl_band *band, struct ftl_md *md,
847 		      void *data, const struct ftl_cb *cb)
848 {
849 	/* TODO: change this interface to allow reading parts of the LBA map instead of */
850 	/* reading whole metadata */
851 	return ftl_band_read_md(band, md, data,
852 				ftl_lba_map_num_lbks(band->dev),
853 				band->dev->xfer_size,
854 				ftl_band_lba_map_ppa(band),
855 				ftl_unpack_lba_map,
856 				cb);
857 }
858 
859 int
860 ftl_band_read_head_md(struct ftl_band *band, struct ftl_md *md,
861 		      void *data, const struct ftl_cb *cb)
862 {
863 	return ftl_band_read_md(band, md, data,
864 				ftl_head_md_num_lbks(band->dev),
865 				band->dev->xfer_size,
866 				ftl_band_head_md_ppa(band),
867 				ftl_unpack_head_md,
868 				cb);
869 }
870 
871 static void
872 ftl_band_remove_chunk(struct ftl_band *band, struct ftl_chunk *chunk)
873 {
874 	CIRCLEQ_REMOVE(&band->chunks, chunk, circleq);
875 	band->num_chunks--;
876 }
877 
878 static void
879 ftl_erase_fail(struct ftl_io *io, int status)
880 {
881 	struct ftl_chunk *chunk;
882 	char buf[128];
883 
884 	SPDK_ERRLOG("Erase failed @ppa: %s, status: %d\n",
885 		    ftl_ppa2str(io->ppa, buf, sizeof(buf)), status);
886 
887 	chunk = ftl_band_chunk_from_ppa(io->band, io->ppa);
888 	chunk->state = FTL_CHUNK_STATE_BAD;
889 	ftl_band_remove_chunk(io->band, chunk);
890 }
891 
892 static void
893 ftl_band_erase_cb(void *ctx, int status)
894 {
895 	struct ftl_io *io = ctx;
896 	struct ftl_chunk *chunk;
897 
898 	if (spdk_unlikely(status)) {
899 		ftl_erase_fail(io, status);
900 		return;
901 	}
902 	chunk = ftl_band_chunk_from_ppa(io->band, io->ppa);
903 	chunk->state = FTL_CHUNK_STATE_FREE;
904 }
905 
906 int
907 ftl_band_erase(struct ftl_band *band)
908 {
909 	struct ftl_chunk *chunk;
910 	struct ftl_io *io;
911 	int rc = 0;
912 
913 	assert(band->state == FTL_BAND_STATE_CLOSED ||
914 	       band->state == FTL_BAND_STATE_FREE);
915 
916 	ftl_band_set_state(band, FTL_BAND_STATE_PREP);
917 
918 	CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) {
919 		if (chunk->state == FTL_CHUNK_STATE_FREE) {
920 			continue;
921 		}
922 
923 		io = ftl_io_erase_init(band, 1, ftl_band_erase_cb);
924 		if (!io) {
925 			rc = -ENOMEM;
926 			break;
927 		}
928 
929 		io->ppa = chunk->start_ppa;
930 		rc = ftl_io_erase(io);
931 		if (rc) {
932 			assert(0);
933 			/* TODO: change band's state back to close? */
934 			break;
935 		}
936 	}
937 
938 	return rc;
939 }
940 
941 int
942 ftl_band_write_prep(struct ftl_band *band)
943 {
944 	struct spdk_ftl_dev *dev = band->dev;
945 
946 	if (ftl_band_alloc_md(band)) {
947 		return -1;
948 	}
949 
950 	band->md.seq = ++dev->seq;
951 	return 0;
952 }
953 
954 struct ftl_chunk *
955 ftl_band_next_operational_chunk(struct ftl_band *band, struct ftl_chunk *chunk)
956 {
957 	struct ftl_chunk *result = NULL;
958 	struct ftl_chunk *entry;
959 
960 	if (spdk_unlikely(!band->num_chunks)) {
961 		return NULL;
962 	}
963 
964 	/* Erasing band may fail after it was assigned to wptr. */
965 	/* In such a case chunk is no longer in band->chunks queue. */
966 	if (spdk_likely(chunk->state != FTL_CHUNK_STATE_BAD)) {
967 		result = ftl_band_next_chunk(band, chunk);
968 	} else {
969 		CIRCLEQ_FOREACH_REVERSE(entry, &band->chunks, circleq) {
970 			if (entry->pos > chunk->pos) {
971 				result = entry;
972 			} else {
973 				if (!result) {
974 					result = CIRCLEQ_FIRST(&band->chunks);
975 				}
976 				break;
977 			}
978 		}
979 	}
980 
981 	return result;
982 }
983