xref: /spdk/lib/ftl/ftl_core.c (revision 3aa204fb3138c43e63b868e488277f13b098cef1)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/likely.h"
35 #include "spdk/stdinc.h"
36 #include "spdk/nvme.h"
37 #include "spdk/io_channel.h"
38 #include "spdk/bdev_module.h"
39 #include "spdk_internal/log.h"
40 #include "spdk/ftl.h"
41 
42 #include "ftl_core.h"
43 #include "ftl_band.h"
44 #include "ftl_io.h"
45 #include "ftl_anm.h"
46 #include "ftl_rwb.h"
47 #include "ftl_debug.h"
48 #include "ftl_reloc.h"
49 
50 /* Max number of iovecs */
51 #define FTL_MAX_IOV 1024
52 
53 struct ftl_wptr {
54 	/* Owner device */
55 	struct spdk_ftl_dev		*dev;
56 
57 	/* Current PPA */
58 	struct ftl_ppa			ppa;
59 
60 	/* Band currently being written to */
61 	struct ftl_band			*band;
62 
63 	/* Current logical block's offset */
64 	uint64_t			offset;
65 
66 	/* Current erase block */
67 	struct ftl_chunk		*chunk;
68 
69 	/* Metadata DMA buffer */
70 	void				*md_buf;
71 
72 	/* List link */
73 	LIST_ENTRY(ftl_wptr)		list_entry;
74 };
75 
76 struct ftl_flush {
77 	/* Owner device */
78 	struct spdk_ftl_dev		*dev;
79 
80 	/* Number of batches to wait for */
81 	size_t				num_req;
82 
83 	/* Callback */
84 	struct ftl_cb			cb;
85 
86 	/* Batch bitmap */
87 	struct spdk_bit_array		*bmap;
88 
89 	/* List link */
90 	LIST_ENTRY(ftl_flush)		list_entry;
91 };
92 
93 typedef int (*ftl_next_ppa_fn)(struct ftl_io *, struct ftl_ppa *, size_t, void *);
94 static void _ftl_read(void *);
95 static void _ftl_write(void *);
96 
97 static int
98 ftl_rwb_flags_from_io(const struct ftl_io *io)
99 {
100 	int valid_flags = FTL_IO_INTERNAL | FTL_IO_WEAK | FTL_IO_PAD;
101 	return io->flags & valid_flags;
102 }
103 
104 static int
105 ftl_rwb_entry_weak(const struct ftl_rwb_entry *entry)
106 {
107 	return entry->flags & FTL_IO_WEAK;
108 }
109 
110 static void
111 ftl_wptr_free(struct ftl_wptr *wptr)
112 {
113 	if (!wptr) {
114 		return;
115 	}
116 
117 	spdk_dma_free(wptr->md_buf);
118 	free(wptr);
119 }
120 
121 static void
122 ftl_remove_wptr(struct ftl_wptr *wptr)
123 {
124 	LIST_REMOVE(wptr, list_entry);
125 	ftl_wptr_free(wptr);
126 }
127 
128 static void
129 ftl_io_cmpl_cb(void *arg, const struct spdk_nvme_cpl *status)
130 {
131 	struct ftl_io *io = arg;
132 
133 	if (spdk_nvme_cpl_is_error(status)) {
134 		ftl_io_process_error(io, status);
135 	}
136 
137 	ftl_trace_completion(io->dev, io, FTL_TRACE_COMPLETION_DISK);
138 
139 	if (!ftl_io_dec_req(io)) {
140 		ftl_io_complete(io);
141 	}
142 }
143 
144 static void
145 ftl_halt_writes(struct spdk_ftl_dev *dev, struct ftl_band *band)
146 {
147 	struct ftl_wptr *wptr = NULL;
148 
149 	LIST_FOREACH(wptr, &dev->wptr_list, list_entry) {
150 		if (wptr->band == band) {
151 			break;
152 		}
153 	}
154 
155 	/* If the band already has the high_prio flag set, other writes must */
156 	/* have failed earlier, so it's already taken care of. */
157 	if (band->high_prio) {
158 		assert(wptr == NULL);
159 		return;
160 	}
161 
162 	ftl_band_write_failed(band);
163 	ftl_remove_wptr(wptr);
164 }
165 
166 static struct ftl_wptr *
167 ftl_wptr_from_band(struct ftl_band *band)
168 {
169 	struct spdk_ftl_dev *dev = band->dev;
170 	struct ftl_wptr *wptr = NULL;
171 
172 	LIST_FOREACH(wptr, &dev->wptr_list, list_entry) {
173 		if (wptr->band == band) {
174 			return wptr;
175 		}
176 	}
177 
178 	return NULL;
179 }
180 
181 static void
182 ftl_md_write_fail(struct ftl_io *io, int status)
183 {
184 	struct ftl_band *band = io->band;
185 	struct ftl_wptr *wptr;
186 	char buf[128];
187 
188 	wptr = ftl_wptr_from_band(band);
189 
190 	SPDK_ERRLOG("Metadata write failed @ppa: %s, status: %d\n",
191 		    ftl_ppa2str(wptr->ppa, buf, sizeof(buf)), status);
192 
193 	ftl_halt_writes(io->dev, band);
194 }
195 
196 static void
197 ftl_md_write_cb(void *arg, int status)
198 {
199 	struct ftl_io *io = arg;
200 	struct ftl_wptr *wptr;
201 
202 	wptr = ftl_wptr_from_band(io->band);
203 
204 	if (status) {
205 		ftl_md_write_fail(io, status);
206 		return;
207 	}
208 
209 	ftl_band_set_next_state(io->band);
210 	if (io->band->state == FTL_BAND_STATE_CLOSED) {
211 		ftl_remove_wptr(wptr);
212 	}
213 }
214 
215 static int
216 ftl_ppa_read_next_ppa(struct ftl_io *io, struct ftl_ppa *ppa,
217 		      size_t lbk, void *ctx)
218 {
219 	struct spdk_ftl_dev *dev = io->dev;
220 	size_t lbk_cnt, max_lbks;
221 
222 	assert(ftl_io_mode_ppa(io));
223 	assert(io->iov_pos < io->iov_cnt);
224 
225 	if (lbk == 0) {
226 		*ppa = io->ppa;
227 	} else {
228 		*ppa = ftl_band_next_xfer_ppa(io->band, io->ppa, lbk);
229 	}
230 
231 	assert(!ftl_ppa_invalid(*ppa));
232 
233 	/* Metadata has to be read in the way it's written (jumping across */
234 	/* the chunks in xfer_size increments) */
235 	if (io->flags & FTL_IO_MD) {
236 		max_lbks = dev->xfer_size - (ppa->lbk % dev->xfer_size);
237 		lbk_cnt = spdk_min(ftl_io_iovec_len_left(io), max_lbks);
238 		assert(ppa->lbk / dev->xfer_size == (ppa->lbk + lbk_cnt - 1) / dev->xfer_size);
239 	} else {
240 		lbk_cnt = ftl_io_iovec_len_left(io);
241 	}
242 
243 	return lbk_cnt;
244 }
245 
246 static int
247 ftl_wptr_close_band(struct ftl_wptr *wptr)
248 {
249 	struct ftl_band *band = wptr->band;
250 
251 	ftl_band_set_state(band, FTL_BAND_STATE_CLOSING);
252 	band->tail_md_ppa = wptr->ppa;
253 
254 	return ftl_band_write_tail_md(band, wptr->md_buf, ftl_md_write_cb);
255 }
256 
257 static int
258 ftl_wptr_open_band(struct ftl_wptr *wptr)
259 {
260 	struct ftl_band *band = wptr->band;
261 
262 	assert(ftl_band_chunk_is_first(band, wptr->chunk));
263 	assert(band->md.num_vld == 0);
264 
265 	ftl_band_clear_md(band);
266 
267 	assert(band->state == FTL_BAND_STATE_PREP);
268 	ftl_band_set_state(band, FTL_BAND_STATE_OPENING);
269 
270 	return ftl_band_write_head_md(band, wptr->md_buf, ftl_md_write_cb);
271 }
272 
273 static int
274 ftl_submit_erase(struct ftl_io *io)
275 {
276 	struct spdk_ftl_dev *dev = io->dev;
277 	struct ftl_band *band = io->band;
278 	struct ftl_ppa ppa = io->ppa;
279 	struct ftl_chunk *chunk;
280 	uint64_t ppa_packed;
281 	int rc = 0;
282 	size_t i;
283 
284 	for (i = 0; i < io->lbk_cnt; ++i) {
285 		if (i != 0) {
286 			chunk = ftl_band_next_chunk(band, ftl_band_chunk_from_ppa(band, ppa));
287 			assert(chunk->state == FTL_CHUNK_STATE_CLOSED ||
288 			       chunk->state == FTL_CHUNK_STATE_VACANT);
289 			ppa = chunk->start_ppa;
290 		}
291 
292 		assert(ppa.lbk == 0);
293 		ppa_packed = ftl_ppa_addr_pack(dev, ppa);
294 
295 		ftl_io_inc_req(io);
296 
297 		ftl_trace_submission(dev, io, ppa, 1);
298 		rc = spdk_nvme_ocssd_ns_cmd_vector_reset(dev->ns, ftl_get_write_qpair(dev),
299 				&ppa_packed, 1, NULL, ftl_io_cmpl_cb, io);
300 		if (rc) {
301 			SPDK_ERRLOG("Vector reset failed with status: %d\n", rc);
302 			ftl_io_dec_req(io);
303 			break;
304 		}
305 	}
306 
307 	if (ftl_io_done(io)) {
308 		ftl_io_complete(io);
309 	}
310 
311 	return rc;
312 }
313 
314 static void
315 _ftl_io_erase(void *ctx)
316 {
317 	ftl_io_erase((struct ftl_io *)ctx);
318 }
319 
320 static bool
321 ftl_check_core_thread(const struct spdk_ftl_dev *dev)
322 {
323 	return dev->core_thread.thread == spdk_get_thread();
324 }
325 
326 static bool
327 ftl_check_read_thread(const struct spdk_ftl_dev *dev)
328 {
329 	return dev->read_thread.thread == spdk_get_thread();
330 }
331 
332 int
333 ftl_io_erase(struct ftl_io *io)
334 {
335 	struct spdk_ftl_dev *dev = io->dev;
336 
337 	if (ftl_check_core_thread(dev)) {
338 		return ftl_submit_erase(io);
339 	}
340 
341 	spdk_thread_send_msg(ftl_get_core_thread(dev), _ftl_io_erase, io);
342 	return 0;
343 }
344 
345 static struct ftl_band *
346 ftl_next_write_band(struct spdk_ftl_dev *dev)
347 {
348 	struct ftl_band *band;
349 
350 	band = LIST_FIRST(&dev->free_bands);
351 	if (!band) {
352 		return NULL;
353 	}
354 	assert(band->state == FTL_BAND_STATE_FREE);
355 
356 	if (ftl_band_erase(band)) {
357 		/* TODO: handle erase failure */
358 		return NULL;
359 	}
360 
361 	return band;
362 }
363 
364 static struct ftl_band *
365 ftl_next_wptr_band(struct spdk_ftl_dev *dev)
366 {
367 	struct ftl_band *band;
368 
369 	if (!dev->next_band) {
370 		band = ftl_next_write_band(dev);
371 	} else {
372 		assert(dev->next_band->state == FTL_BAND_STATE_PREP);
373 		band = dev->next_band;
374 		dev->next_band = NULL;
375 	}
376 
377 	return band;
378 }
379 
380 static struct ftl_wptr *
381 ftl_wptr_init(struct ftl_band *band)
382 {
383 	struct spdk_ftl_dev *dev = band->dev;
384 	struct ftl_wptr *wptr;
385 
386 	wptr = calloc(1, sizeof(*wptr));
387 	if (!wptr) {
388 		return NULL;
389 	}
390 
391 	wptr->md_buf = spdk_dma_zmalloc(ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE,
392 					FTL_BLOCK_SIZE, NULL);
393 	if (!wptr->md_buf) {
394 		ftl_wptr_free(wptr);
395 		return NULL;
396 	}
397 
398 	wptr->dev = dev;
399 	wptr->band = band;
400 	wptr->chunk = CIRCLEQ_FIRST(&band->chunks);
401 	wptr->ppa = wptr->chunk->start_ppa;
402 
403 	return wptr;
404 }
405 
406 static int
407 ftl_add_wptr(struct spdk_ftl_dev *dev)
408 {
409 	struct ftl_band *band;
410 	struct ftl_wptr *wptr;
411 
412 	band = ftl_next_wptr_band(dev);
413 	if (!band) {
414 		return -1;
415 	}
416 
417 	wptr = ftl_wptr_init(band);
418 	if (!wptr) {
419 		return -1;
420 	}
421 
422 	if (ftl_band_write_prep(band)) {
423 		ftl_wptr_free(wptr);
424 		return -1;
425 	}
426 
427 	LIST_INSERT_HEAD(&dev->wptr_list, wptr, list_entry);
428 
429 	SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "wptr: band %u\n", band->id);
430 	ftl_trace_write_band(dev, band);
431 	return 0;
432 }
433 
434 static void
435 ftl_wptr_advance(struct ftl_wptr *wptr, size_t xfer_size)
436 {
437 	struct ftl_band *band = wptr->band;
438 	struct spdk_ftl_dev *dev = wptr->dev;
439 	struct spdk_ftl_conf *conf = &dev->conf;
440 	size_t next_thld;
441 
442 	wptr->offset += xfer_size;
443 	next_thld = (ftl_band_num_usable_lbks(band) * conf->band_thld) / 100;
444 
445 	if (ftl_band_full(band, wptr->offset)) {
446 		ftl_band_set_state(band, FTL_BAND_STATE_FULL);
447 	}
448 
449 	wptr->ppa = ftl_band_next_xfer_ppa(band, wptr->ppa, xfer_size);
450 	wptr->chunk = ftl_band_next_operational_chunk(band, wptr->chunk);
451 
452 	assert(!ftl_ppa_invalid(wptr->ppa));
453 
454 	SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "wptr: grp:%d, pu:%d chunk:%d, lbk:%u\n",
455 		      wptr->ppa.grp, wptr->ppa.pu, wptr->ppa.chk, wptr->ppa.lbk);
456 
457 	if (wptr->offset >= next_thld && !dev->next_band) {
458 		dev->next_band = ftl_next_write_band(dev);
459 	}
460 }
461 
462 static int
463 ftl_wptr_ready(struct ftl_wptr *wptr)
464 {
465 	struct ftl_band *band = wptr->band;
466 
467 	/* TODO: add handling of empty bands */
468 
469 	if (spdk_unlikely(!ftl_chunk_is_writable(wptr->chunk))) {
470 		/* Erasing band may fail after it was assigned to wptr. */
471 		if (spdk_unlikely(wptr->chunk->state == FTL_CHUNK_STATE_BAD)) {
472 			ftl_wptr_advance(wptr, wptr->dev->xfer_size);
473 		}
474 		return 0;
475 	}
476 
477 	/* If we're in the process of writing metadata, wait till it is */
478 	/* completed. */
479 	/* TODO: we should probably change bands once we're writing tail md */
480 	if (ftl_band_state_changing(band)) {
481 		return 0;
482 	}
483 
484 	if (band->state == FTL_BAND_STATE_FULL) {
485 		if (ftl_wptr_close_band(wptr)) {
486 			/* TODO: need recovery here */
487 			assert(false);
488 		}
489 		return 0;
490 	}
491 
492 	if (band->state != FTL_BAND_STATE_OPEN) {
493 		if (ftl_wptr_open_band(wptr)) {
494 			/* TODO: need recovery here */
495 			assert(false);
496 		}
497 		return 0;
498 	}
499 
500 	return 1;
501 }
502 
503 static const struct spdk_ftl_limit *
504 ftl_get_limit(const struct spdk_ftl_dev *dev, int type)
505 {
506 	assert(type < SPDK_FTL_LIMIT_MAX);
507 	return &dev->conf.defrag.limits[type];
508 }
509 
510 static bool
511 ftl_cache_lba_valid(struct spdk_ftl_dev *dev, struct ftl_rwb_entry *entry)
512 {
513 	struct ftl_ppa ppa;
514 
515 	/* If the LBA is invalid don't bother checking the md and l2p */
516 	if (spdk_unlikely(entry->lba == FTL_LBA_INVALID)) {
517 		return false;
518 	}
519 
520 	ppa = ftl_l2p_get(dev, entry->lba);
521 	if (!(ftl_ppa_cached(ppa) && ppa.offset == entry->pos)) {
522 		return false;
523 	}
524 
525 	return true;
526 }
527 
528 static void
529 ftl_evict_cache_entry(struct spdk_ftl_dev *dev, struct ftl_rwb_entry *entry)
530 {
531 	pthread_spin_lock(&entry->lock);
532 
533 	if (!ftl_rwb_entry_valid(entry)) {
534 		goto unlock;
535 	}
536 
537 	/* If the l2p wasn't updated and still points at the entry, fill it with the */
538 	/* on-disk PPA and clear the cache status bit. Otherwise, skip the l2p update */
539 	/* and just clear the cache status. */
540 	if (!ftl_cache_lba_valid(dev, entry)) {
541 		goto clear;
542 	}
543 
544 	ftl_l2p_set(dev, entry->lba, entry->ppa);
545 clear:
546 	ftl_rwb_entry_invalidate(entry);
547 unlock:
548 	pthread_spin_unlock(&entry->lock);
549 }
550 
551 static struct ftl_rwb_entry *
552 ftl_acquire_entry(struct spdk_ftl_dev *dev, int flags)
553 {
554 	struct ftl_rwb_entry *entry;
555 
556 	entry = ftl_rwb_acquire(dev->rwb, ftl_rwb_type_from_flags(flags));
557 	if (!entry) {
558 		return NULL;
559 	}
560 
561 	ftl_evict_cache_entry(dev, entry);
562 
563 	entry->flags = flags;
564 	return entry;
565 }
566 
567 static void
568 ftl_rwb_pad(struct spdk_ftl_dev *dev, size_t size)
569 {
570 	struct ftl_rwb_entry *entry;
571 	int flags = FTL_IO_PAD | FTL_IO_INTERNAL;
572 
573 	for (size_t i = 0; i < size; ++i) {
574 		entry = ftl_acquire_entry(dev, flags);
575 		if (!entry) {
576 			break;
577 		}
578 
579 		entry->lba = FTL_LBA_INVALID;
580 		entry->ppa = ftl_to_ppa(FTL_PPA_INVALID);
581 		memset(entry->data, 0, FTL_BLOCK_SIZE);
582 		ftl_rwb_push(entry);
583 	}
584 }
585 
586 static void
587 ftl_remove_free_bands(struct spdk_ftl_dev *dev)
588 {
589 	while (!LIST_EMPTY(&dev->free_bands)) {
590 		LIST_REMOVE(LIST_FIRST(&dev->free_bands), list_entry);
591 	}
592 
593 	dev->next_band = NULL;
594 }
595 
596 static void
597 ftl_process_shutdown(struct spdk_ftl_dev *dev)
598 {
599 	size_t size = ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_INTERNAL) +
600 		      ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_USER);
601 
602 	if (size >= dev->xfer_size) {
603 		return;
604 	}
605 
606 	/* If we reach this point we need to remove free bands */
607 	/* and pad current wptr band to the end */
608 	ftl_remove_free_bands(dev);
609 
610 	/* Pad write buffer until band is full */
611 	ftl_rwb_pad(dev, dev->xfer_size - size);
612 }
613 
614 static int
615 ftl_shutdown_complete(struct spdk_ftl_dev *dev)
616 {
617 	return !__atomic_load_n(&dev->num_inflight, __ATOMIC_SEQ_CST) &&
618 	       LIST_EMPTY(&dev->wptr_list);
619 }
620 
621 void
622 ftl_apply_limits(struct spdk_ftl_dev *dev)
623 {
624 	const struct spdk_ftl_limit *limit;
625 	struct ftl_stats *stats = &dev->stats;
626 	size_t rwb_limit[FTL_RWB_TYPE_MAX];
627 	int i;
628 
629 	ftl_rwb_get_limits(dev->rwb, rwb_limit);
630 
631 	/* Clear existing limit */
632 	dev->limit = SPDK_FTL_LIMIT_MAX;
633 
634 	for (i = SPDK_FTL_LIMIT_CRIT; i < SPDK_FTL_LIMIT_MAX; ++i) {
635 		limit = ftl_get_limit(dev, i);
636 
637 		if (dev->num_free <= limit->thld) {
638 			rwb_limit[FTL_RWB_TYPE_USER] =
639 				(limit->limit * ftl_rwb_entry_cnt(dev->rwb)) / 100;
640 			stats->limits[i]++;
641 			dev->limit = i;
642 			goto apply;
643 		}
644 	}
645 
646 	/* Clear the limits, since we don't need to apply them anymore */
647 	rwb_limit[FTL_RWB_TYPE_USER] = ftl_rwb_entry_cnt(dev->rwb);
648 apply:
649 	ftl_trace_limits(dev, rwb_limit, dev->num_free);
650 	ftl_rwb_set_limits(dev->rwb, rwb_limit);
651 }
652 
653 static int
654 ftl_invalidate_addr_unlocked(struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
655 {
656 	struct ftl_band *band = ftl_band_from_ppa(dev, ppa);
657 	struct ftl_md *md = &band->md;
658 	uint64_t offset;
659 
660 	offset = ftl_band_lbkoff_from_ppa(band, ppa);
661 
662 	/* The bit might be already cleared if two writes are scheduled to the */
663 	/* same LBA at the same time */
664 	if (spdk_bit_array_get(md->vld_map, offset)) {
665 		assert(md->num_vld > 0);
666 		spdk_bit_array_clear(md->vld_map, offset);
667 		md->num_vld--;
668 		return 1;
669 	}
670 
671 	return 0;
672 }
673 
674 int
675 ftl_invalidate_addr(struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
676 {
677 	struct ftl_band *band;
678 	int rc;
679 
680 	assert(!ftl_ppa_cached(ppa));
681 	band = ftl_band_from_ppa(dev, ppa);
682 
683 	pthread_spin_lock(&band->md.lock);
684 	rc = ftl_invalidate_addr_unlocked(dev, ppa);
685 	pthread_spin_unlock(&band->md.lock);
686 
687 	return rc;
688 }
689 
690 static int
691 ftl_read_retry(int rc)
692 {
693 	return rc == -EAGAIN;
694 }
695 
696 static int
697 ftl_read_canceled(int rc)
698 {
699 	return rc == 0;
700 }
701 
702 static void
703 ftl_submit_read(struct ftl_io *io, ftl_next_ppa_fn next_ppa,
704 		void *ctx)
705 {
706 	struct spdk_ftl_dev *dev = io->dev;
707 	struct ftl_ppa ppa;
708 	size_t lbk = 0;
709 	int rc = 0, lbk_cnt;
710 
711 	while (lbk < io->lbk_cnt) {
712 		/* We might hit the cache here, if so, skip the read */
713 		lbk_cnt = rc = next_ppa(io, &ppa, lbk, ctx);
714 
715 		/* We might need to retry the read from scratch (e.g. */
716 		/* because write was under way and completed before */
717 		/* we could read it from rwb */
718 		if (ftl_read_retry(rc)) {
719 			continue;
720 		}
721 
722 		/* We don't have to schedule the read, as it was read from cache */
723 		if (ftl_read_canceled(rc)) {
724 			ftl_io_update_iovec(io, 1);
725 			lbk++;
726 			continue;
727 		}
728 
729 		assert(lbk_cnt > 0);
730 
731 		ftl_trace_submission(dev, io, ppa, lbk_cnt);
732 		rc = spdk_nvme_ns_cmd_read(dev->ns, ftl_get_read_qpair(dev),
733 					   ftl_io_iovec_addr(io),
734 					   ftl_ppa_addr_pack(io->dev, ppa), lbk_cnt,
735 					   ftl_io_cmpl_cb, io, 0);
736 
737 		if (rc) {
738 			io->status = rc;
739 
740 			if (rc != -ENOMEM) {
741 				SPDK_ERRLOG("spdk_nvme_ns_cmd_read failed with status: %d\n", rc);
742 			}
743 			break;
744 		}
745 
746 		ftl_io_update_iovec(io, lbk_cnt);
747 		ftl_io_inc_req(io);
748 		lbk += lbk_cnt;
749 	}
750 
751 	/* If we didn't have to read anything from the device, */
752 	/* complete the request right away */
753 	if (ftl_io_done(io)) {
754 		ftl_io_complete(io);
755 	}
756 }
757 
758 static int
759 ftl_ppa_cache_read(struct ftl_io *io, uint64_t lba,
760 		   struct ftl_ppa ppa, void *buf)
761 {
762 	struct ftl_rwb *rwb = io->dev->rwb;
763 	struct ftl_rwb_entry *entry;
764 	struct ftl_ppa nppa;
765 	int rc = 0;
766 
767 	entry = ftl_rwb_entry_from_offset(rwb, ppa.offset);
768 	pthread_spin_lock(&entry->lock);
769 
770 	nppa = ftl_l2p_get(io->dev, lba);
771 	if (ppa.ppa != nppa.ppa) {
772 		rc = -1;
773 		goto out;
774 	}
775 
776 	memcpy(buf, entry->data, FTL_BLOCK_SIZE);
777 out:
778 	pthread_spin_unlock(&entry->lock);
779 	return rc;
780 }
781 
782 static int
783 ftl_lba_read_next_ppa(struct ftl_io *io, struct ftl_ppa *ppa,
784 		      size_t lbk, void *ctx)
785 {
786 	struct spdk_ftl_dev *dev = io->dev;
787 	*ppa = ftl_l2p_get(dev, io->lba + lbk);
788 
789 	(void) ctx;
790 
791 	SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "Read ppa:%lx, lba:%lu\n", ppa->ppa, io->lba);
792 
793 	/* If the PPA is invalid, skip it (the buffer should already be zero'ed) */
794 	if (ftl_ppa_invalid(*ppa)) {
795 		ftl_trace_completion(io->dev, io, FTL_TRACE_COMPLETION_INVALID);
796 		return 0;
797 	}
798 
799 	if (ftl_ppa_cached(*ppa)) {
800 		if (!ftl_ppa_cache_read(io, io->lba + lbk, *ppa, ftl_io_iovec_addr(io))) {
801 			ftl_trace_completion(io->dev, io, FTL_TRACE_COMPLETION_CACHE);
802 			return 0;
803 		}
804 
805 		/* If the state changed, we have to re-read the l2p */
806 		return -EAGAIN;
807 	}
808 
809 	/* We want to read one lbk at a time */
810 	return 1;
811 }
812 
813 static void
814 ftl_complete_flush(struct ftl_flush *flush)
815 {
816 	assert(flush->num_req == 0);
817 	LIST_REMOVE(flush, list_entry);
818 
819 	flush->cb.fn(flush->cb.ctx, 0);
820 
821 	spdk_bit_array_free(&flush->bmap);
822 	free(flush);
823 }
824 
825 static void
826 ftl_process_flush(struct spdk_ftl_dev *dev, struct ftl_rwb_batch *batch)
827 {
828 	struct ftl_flush *flush, *tflush;
829 	size_t offset;
830 
831 	LIST_FOREACH_SAFE(flush, &dev->flush_list, list_entry, tflush) {
832 		offset = ftl_rwb_batch_get_offset(batch);
833 
834 		if (spdk_bit_array_get(flush->bmap, offset)) {
835 			spdk_bit_array_set(flush->bmap, offset);
836 			if (!(--flush->num_req)) {
837 				ftl_complete_flush(flush);
838 			}
839 		}
840 	}
841 }
842 
843 static void
844 ftl_write_fail(struct ftl_io *io, int status)
845 {
846 	struct ftl_rwb_batch *batch = io->rwb_batch;
847 	struct spdk_ftl_dev *dev = io->dev;
848 	struct ftl_rwb_entry *entry;
849 	struct ftl_band *band;
850 	char buf[128];
851 
852 	entry = ftl_rwb_batch_first_entry(batch);
853 
854 	band = ftl_band_from_ppa(io->dev, entry->ppa);
855 	SPDK_ERRLOG("Write failed @ppa: %s, status: %d\n",
856 		    ftl_ppa2str(entry->ppa, buf, sizeof(buf)), status);
857 
858 	/* Close the band and, halt wptr and defrag */
859 	ftl_halt_writes(dev, band);
860 
861 	ftl_rwb_foreach(entry, batch) {
862 		/* Invalidate meta set by process_writes() */
863 		ftl_invalidate_addr(dev, entry->ppa);
864 	}
865 
866 	/* Reset the batch back to the the RWB to resend it later */
867 	ftl_rwb_batch_revert(batch);
868 }
869 
870 static void
871 ftl_write_cb(void *arg, int status)
872 {
873 	struct ftl_io *io = arg;
874 	struct spdk_ftl_dev *dev = io->dev;
875 	struct ftl_rwb_batch *batch = io->rwb_batch;
876 	struct ftl_rwb_entry *entry;
877 
878 	if (status) {
879 		ftl_write_fail(io, status);
880 		return;
881 	}
882 
883 	assert(io->lbk_cnt == dev->xfer_size);
884 	ftl_rwb_foreach(entry, batch) {
885 		if (!(io->flags & FTL_IO_MD) && !(entry->flags & FTL_IO_PAD)) {
886 			/* Verify that the LBA is set for user lbks */
887 			assert(entry->lba != FTL_LBA_INVALID);
888 		}
889 
890 		SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "Write ppa:%lu, lba:%lu\n",
891 			      entry->ppa.ppa, entry->lba);
892 	}
893 
894 	ftl_process_flush(dev, batch);
895 	ftl_rwb_batch_release(batch);
896 }
897 
898 static void
899 ftl_update_rwb_stats(struct spdk_ftl_dev *dev, const struct ftl_rwb_entry *entry)
900 {
901 	if (!ftl_rwb_entry_internal(entry)) {
902 		dev->stats.write_user++;
903 	}
904 	dev->stats.write_total++;
905 }
906 
907 static void
908 ftl_update_l2p(struct spdk_ftl_dev *dev, const struct ftl_rwb_entry *entry,
909 	       struct ftl_ppa ppa)
910 {
911 	struct ftl_ppa prev_ppa;
912 	struct ftl_rwb_entry *prev;
913 	struct ftl_band *band;
914 	int valid;
915 
916 	prev_ppa = ftl_l2p_get(dev, entry->lba);
917 	if (ftl_ppa_invalid(prev_ppa)) {
918 		ftl_l2p_set(dev, entry->lba, ppa);
919 		return;
920 	}
921 
922 	/* If the L2P's PPA is different than what we expected we don't need to */
923 	/* do anything (someone's already overwritten our data). */
924 	if (ftl_rwb_entry_weak(entry) && !ftl_ppa_cmp(prev_ppa, entry->ppa)) {
925 		return;
926 	}
927 
928 	if (ftl_ppa_cached(prev_ppa)) {
929 		assert(!ftl_rwb_entry_weak(entry));
930 		prev = ftl_rwb_entry_from_offset(dev->rwb, prev_ppa.offset);
931 		pthread_spin_lock(&prev->lock);
932 
933 		/* Re-read the L2P under the lock to protect against updates */
934 		/* to this LBA from other threads */
935 		prev_ppa = ftl_l2p_get(dev, entry->lba);
936 
937 		/* If the entry is no longer in cache, another write has been */
938 		/* scheduled in the meantime, so we have to invalidate its LBA */
939 		if (!ftl_ppa_cached(prev_ppa)) {
940 			ftl_invalidate_addr(dev, prev_ppa);
941 		}
942 
943 		/* If previous entry is part of cache, remove and invalidate it */
944 		if (ftl_rwb_entry_valid(prev)) {
945 			ftl_invalidate_addr(dev, prev->ppa);
946 			ftl_rwb_entry_invalidate(prev);
947 		}
948 
949 		ftl_l2p_set(dev, entry->lba, ppa);
950 		pthread_spin_unlock(&prev->lock);
951 		return;
952 	}
953 
954 	/* Lock the band containing previous PPA. This assures atomic changes to */
955 	/* the L2P as wall as metadata. The valid bits in metadata are used to */
956 	/* check weak writes validity. */
957 	band = ftl_band_from_ppa(dev, prev_ppa);
958 	pthread_spin_lock(&band->md.lock);
959 
960 	valid = ftl_invalidate_addr_unlocked(dev, prev_ppa);
961 
962 	/* If the address has been invalidated already, we don't want to update */
963 	/* the L2P for weak writes, as it means the write is no longer valid. */
964 	if (!ftl_rwb_entry_weak(entry) || valid) {
965 		ftl_l2p_set(dev, entry->lba, ppa);
966 	}
967 
968 	pthread_spin_unlock(&band->md.lock);
969 }
970 
971 static int
972 ftl_submit_write(struct ftl_wptr *wptr, struct ftl_io *io)
973 {
974 	struct spdk_ftl_dev	*dev = io->dev;
975 	struct iovec		*iov = ftl_io_iovec(io);
976 	int			rc = 0;
977 	size_t			i;
978 
979 	for (i = 0; i < io->iov_cnt; ++i) {
980 		assert(iov[i].iov_len > 0);
981 		assert(iov[i].iov_len / PAGE_SIZE == dev->xfer_size);
982 
983 		ftl_trace_submission(dev, io, wptr->ppa, iov[i].iov_len / PAGE_SIZE);
984 		rc = spdk_nvme_ns_cmd_write_with_md(dev->ns, ftl_get_write_qpair(dev),
985 						    iov[i].iov_base, ftl_io_get_md(io),
986 						    ftl_ppa_addr_pack(dev, wptr->ppa),
987 						    iov[i].iov_len / PAGE_SIZE,
988 						    ftl_io_cmpl_cb, io, 0, 0, 0);
989 		if (rc) {
990 			SPDK_ERRLOG("spdk_nvme_ns_cmd_write failed with status:%d, ppa:%lu\n",
991 				    rc, wptr->ppa.ppa);
992 			io->status = -EIO;
993 			break;
994 		}
995 
996 		io->pos = iov[i].iov_len / PAGE_SIZE;
997 		ftl_io_inc_req(io);
998 		ftl_wptr_advance(wptr, iov[i].iov_len / PAGE_SIZE);
999 	}
1000 
1001 	if (ftl_io_done(io)) {
1002 		ftl_io_complete(io);
1003 	}
1004 
1005 	return rc;
1006 }
1007 
1008 static void
1009 ftl_flush_pad_batch(struct spdk_ftl_dev *dev)
1010 {
1011 	struct ftl_rwb *rwb = dev->rwb;
1012 	size_t size;
1013 
1014 	size = ftl_rwb_num_acquired(rwb, FTL_RWB_TYPE_INTERNAL) +
1015 	       ftl_rwb_num_acquired(rwb, FTL_RWB_TYPE_USER);
1016 
1017 	/* There must be something in the RWB, otherwise the flush */
1018 	/* wouldn't be waiting for anything */
1019 	assert(size > 0);
1020 
1021 	/* Only add padding when there's less than xfer size */
1022 	/* entries in the buffer. Otherwise we just have to wait */
1023 	/* for the entries to become ready. */
1024 	if (size < dev->xfer_size) {
1025 		ftl_rwb_pad(dev, dev->xfer_size - (size % dev->xfer_size));
1026 	}
1027 }
1028 
1029 static int
1030 ftl_wptr_process_writes(struct ftl_wptr *wptr)
1031 {
1032 	struct spdk_ftl_dev	*dev = wptr->dev;
1033 	struct ftl_rwb_batch	*batch;
1034 	struct ftl_rwb_entry	*entry;
1035 	struct ftl_io		*io;
1036 	struct ftl_ppa		ppa, prev_ppa;
1037 
1038 	/* Make sure the band is prepared for writing */
1039 	if (!ftl_wptr_ready(wptr)) {
1040 		return 0;
1041 	}
1042 
1043 	if (dev->halt) {
1044 		ftl_process_shutdown(dev);
1045 	}
1046 
1047 	batch = ftl_rwb_pop(dev->rwb);
1048 	if (!batch) {
1049 		/* If there are queued flush requests we need to pad the RWB to */
1050 		/* force out remaining entries */
1051 		if (!LIST_EMPTY(&dev->flush_list)) {
1052 			ftl_flush_pad_batch(dev);
1053 		}
1054 
1055 		return 0;
1056 	}
1057 
1058 	io = ftl_io_rwb_init(dev, wptr->band, batch, ftl_write_cb);
1059 	if (!io) {
1060 		goto error;
1061 	}
1062 
1063 	ppa = wptr->ppa;
1064 	ftl_rwb_foreach(entry, batch) {
1065 		entry->ppa = ppa;
1066 
1067 		if (entry->lba != FTL_LBA_INVALID) {
1068 			pthread_spin_lock(&entry->lock);
1069 			prev_ppa = ftl_l2p_get(dev, entry->lba);
1070 
1071 			/* If the l2p was updated in the meantime, don't update band's metadata */
1072 			if (ftl_ppa_cached(prev_ppa) && prev_ppa.offset == entry->pos) {
1073 				/* Setting entry's cache bit needs to be done after metadata */
1074 				/* within the band is updated to make sure that writes */
1075 				/* invalidating the entry clear the metadata as well */
1076 				ftl_band_set_addr(wptr->band, entry->lba, entry->ppa);
1077 				ftl_rwb_entry_set_valid(entry);
1078 			}
1079 			pthread_spin_unlock(&entry->lock);
1080 		}
1081 
1082 		ftl_trace_rwb_pop(dev, entry);
1083 		ftl_update_rwb_stats(dev, entry);
1084 
1085 		ppa = ftl_band_next_ppa(wptr->band, ppa, 1);
1086 	}
1087 
1088 	SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "Write ppa:%lx, %lx\n", wptr->ppa.ppa,
1089 		      ftl_ppa_addr_pack(dev, wptr->ppa));
1090 
1091 	if (ftl_submit_write(wptr, io)) {
1092 		/* TODO: we need some recovery here */
1093 		assert(0 && "Write submit failed");
1094 		if (ftl_io_done(io)) {
1095 			ftl_io_free(io);
1096 		}
1097 	}
1098 
1099 	return dev->xfer_size;
1100 error:
1101 	ftl_rwb_batch_revert(batch);
1102 	return 0;
1103 }
1104 
1105 static int
1106 ftl_process_writes(struct spdk_ftl_dev *dev)
1107 {
1108 	struct ftl_wptr *wptr, *twptr;
1109 	size_t num_active = 0;
1110 	enum ftl_band_state state;
1111 
1112 	LIST_FOREACH_SAFE(wptr, &dev->wptr_list, list_entry, twptr) {
1113 		ftl_wptr_process_writes(wptr);
1114 		state = wptr->band->state;
1115 
1116 		if (state != FTL_BAND_STATE_FULL &&
1117 		    state != FTL_BAND_STATE_CLOSING &&
1118 		    state != FTL_BAND_STATE_CLOSED) {
1119 			num_active++;
1120 		}
1121 	}
1122 
1123 	if (num_active < 1) {
1124 		ftl_add_wptr(dev);
1125 	}
1126 
1127 	return 0;
1128 }
1129 
1130 static void
1131 ftl_rwb_entry_fill(struct ftl_rwb_entry *entry, struct ftl_io *io)
1132 {
1133 	struct ftl_band *band;
1134 
1135 	memcpy(entry->data, ftl_io_iovec_addr(io), FTL_BLOCK_SIZE);
1136 
1137 	if (ftl_rwb_entry_weak(entry)) {
1138 		band = ftl_band_from_ppa(io->dev, io->ppa);
1139 		entry->ppa = ftl_band_next_ppa(band, io->ppa, io->pos);
1140 	}
1141 
1142 	entry->trace = io->trace;
1143 
1144 	if (entry->md) {
1145 		memcpy(entry->md, &entry->lba, sizeof(io->lba));
1146 	}
1147 }
1148 
1149 static int
1150 ftl_rwb_fill(struct ftl_io *io)
1151 {
1152 	struct spdk_ftl_dev *dev = io->dev;
1153 	struct ftl_rwb_entry *entry;
1154 	struct ftl_ppa ppa = { .cached = 1 };
1155 	int flags = ftl_rwb_flags_from_io(io);
1156 	uint64_t lba;
1157 
1158 	for (; io->pos < io->lbk_cnt; ++io->pos) {
1159 		lba = ftl_io_current_lba(io);
1160 		if (lba == FTL_LBA_INVALID) {
1161 			ftl_io_update_iovec(io, 1);
1162 			continue;
1163 		}
1164 
1165 		entry = ftl_acquire_entry(dev, flags);
1166 		if (!entry) {
1167 			return -EAGAIN;
1168 		}
1169 
1170 		entry->lba = lba;
1171 		ftl_rwb_entry_fill(entry, io);
1172 
1173 		ppa.offset = entry->pos;
1174 
1175 		ftl_io_update_iovec(io, 1);
1176 		ftl_update_l2p(dev, entry, ppa);
1177 
1178 		/* Needs to be done after L2P is updated to avoid race with */
1179 		/* write completion callback when it's processed faster than */
1180 		/* L2P is set in update_l2p(). */
1181 		ftl_rwb_push(entry);
1182 		ftl_trace_rwb_fill(dev, io);
1183 	}
1184 
1185 	ftl_io_complete(io);
1186 	return 0;
1187 }
1188 
1189 static bool
1190 ftl_dev_needs_defrag(struct spdk_ftl_dev *dev)
1191 {
1192 	const struct spdk_ftl_limit *limit = ftl_get_limit(dev, SPDK_FTL_LIMIT_START);
1193 
1194 	if (ftl_reloc_is_halted(dev->reloc)) {
1195 		return false;
1196 	}
1197 
1198 	if (dev->df_band) {
1199 		return false;
1200 	}
1201 
1202 	if (dev->num_free <= limit->thld) {
1203 		return true;
1204 	}
1205 
1206 	return false;
1207 }
1208 
1209 static double
1210 ftl_band_calc_merit(struct ftl_band *band, size_t *threshold_valid)
1211 {
1212 	size_t usable, valid, invalid;
1213 	double vld_ratio;
1214 
1215 	/* If the band doesn't have any usable lbks it's of no use */
1216 	usable = ftl_band_num_usable_lbks(band);
1217 	if (usable == 0) {
1218 		return 0.0;
1219 	}
1220 
1221 	valid =  threshold_valid ? (usable - *threshold_valid) : band->md.num_vld;
1222 	invalid = usable - valid;
1223 
1224 	/* Add one to avoid division by 0 */
1225 	vld_ratio = (double)invalid / (double)(valid + 1);
1226 	return vld_ratio * ftl_band_age(band);
1227 }
1228 
1229 static bool
1230 ftl_band_needs_defrag(struct ftl_band *band, struct spdk_ftl_dev *dev)
1231 {
1232 	struct spdk_ftl_conf *conf = &dev->conf;
1233 	size_t thld_vld;
1234 
1235 	/* If we're in dire need of free bands, every band is worth defragging */
1236 	if (ftl_current_limit(dev) == SPDK_FTL_LIMIT_CRIT) {
1237 		return true;
1238 	}
1239 
1240 	thld_vld = (ftl_band_num_usable_lbks(band) * conf->defrag.invalid_thld) / 100;
1241 
1242 	return band->merit > ftl_band_calc_merit(band, &thld_vld);
1243 }
1244 
1245 static struct ftl_band *
1246 ftl_select_defrag_band(struct spdk_ftl_dev *dev)
1247 {
1248 	struct ftl_band *band, *mband = NULL;
1249 	double merit = 0;
1250 
1251 	LIST_FOREACH(band, &dev->shut_bands, list_entry) {
1252 		assert(band->state == FTL_BAND_STATE_CLOSED);
1253 		band->merit = ftl_band_calc_merit(band, NULL);
1254 		if (band->merit > merit) {
1255 			merit = band->merit;
1256 			mband = band;
1257 		}
1258 	}
1259 
1260 	if (mband && !ftl_band_needs_defrag(mband, dev)) {
1261 		mband = NULL;
1262 	}
1263 
1264 	return mband;
1265 }
1266 
1267 static void
1268 ftl_process_relocs(struct spdk_ftl_dev *dev)
1269 {
1270 	if (ftl_dev_needs_defrag(dev)) {
1271 		dev->df_band = ftl_select_defrag_band(dev);
1272 		if (dev->df_band) {
1273 			ftl_reloc_add(dev->reloc, dev->df_band, 0, ftl_num_band_lbks(dev), 0);
1274 		}
1275 	}
1276 
1277 	ftl_reloc(dev->reloc);
1278 }
1279 
1280 int
1281 ftl_current_limit(const struct spdk_ftl_dev *dev)
1282 {
1283 	return dev->limit;
1284 }
1285 
1286 void
1287 spdk_ftl_dev_get_attrs(const struct spdk_ftl_dev *dev, struct spdk_ftl_attrs *attrs)
1288 {
1289 	attrs->uuid = dev->uuid;
1290 	attrs->lbk_cnt = dev->num_lbas;
1291 	attrs->lbk_size = FTL_BLOCK_SIZE;
1292 	attrs->range = dev->range;
1293 }
1294 
1295 static void
1296 _ftl_io_write(void *ctx)
1297 {
1298 	ftl_io_write((struct ftl_io *)ctx);
1299 }
1300 
1301 int
1302 ftl_io_write(struct ftl_io *io)
1303 {
1304 	struct spdk_ftl_dev *dev = io->dev;
1305 
1306 	/* For normal IOs we just need to copy the data onto the rwb */
1307 	if (!(io->flags & FTL_IO_MD)) {
1308 		return ftl_rwb_fill(io);
1309 	}
1310 
1311 	/* Metadata has its own buffer, so it doesn't have to be copied, so just */
1312 	/* send it the the core thread and schedule the write immediately */
1313 	if (ftl_check_core_thread(dev)) {
1314 		return ftl_submit_write(ftl_wptr_from_band(io->band), io);
1315 	}
1316 
1317 	spdk_thread_send_msg(ftl_get_core_thread(dev), _ftl_io_write, io);
1318 
1319 	return 0;
1320 }
1321 
1322 
1323 static int
1324 _spdk_ftl_write(struct ftl_io *io)
1325 {
1326 	int rc;
1327 
1328 	rc = ftl_io_write(io);
1329 	if (rc == -EAGAIN) {
1330 		spdk_thread_send_msg(spdk_io_channel_get_thread(io->ch),
1331 				     _ftl_write, io);
1332 		return 0;
1333 	}
1334 
1335 	if (rc) {
1336 		ftl_io_free(io);
1337 	}
1338 
1339 	return rc;
1340 }
1341 
1342 static void
1343 _ftl_write(void *ctx)
1344 {
1345 	_spdk_ftl_write(ctx);
1346 }
1347 
1348 int
1349 spdk_ftl_write(struct spdk_ftl_dev *dev, struct spdk_io_channel *ch, uint64_t lba, size_t lba_cnt,
1350 	       struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_arg)
1351 {
1352 	struct ftl_io *io;
1353 
1354 	if (iov_cnt == 0 || iov_cnt > FTL_MAX_IOV) {
1355 		return -EINVAL;
1356 	}
1357 
1358 	if (lba_cnt == 0) {
1359 		return -EINVAL;
1360 	}
1361 
1362 	if (lba_cnt != ftl_iovec_num_lbks(iov, iov_cnt)) {
1363 		return -EINVAL;
1364 	}
1365 
1366 	if (!dev->initialized) {
1367 		return -EBUSY;
1368 	}
1369 
1370 	io = ftl_io_alloc(ch);
1371 	if (!io) {
1372 		return -ENOMEM;
1373 	}
1374 
1375 	ftl_io_user_init(dev, io, lba, lba_cnt, iov, iov_cnt, cb_fn, cb_arg, FTL_IO_WRITE);
1376 	return _spdk_ftl_write(io);
1377 }
1378 
1379 void
1380 ftl_io_read(struct ftl_io *io)
1381 {
1382 	struct spdk_ftl_dev *dev = io->dev;
1383 	ftl_next_ppa_fn	next_ppa;
1384 
1385 	if (ftl_check_read_thread(dev)) {
1386 		if (ftl_io_mode_ppa(io)) {
1387 			next_ppa = ftl_ppa_read_next_ppa;
1388 		} else {
1389 			next_ppa = ftl_lba_read_next_ppa;
1390 		}
1391 
1392 		ftl_submit_read(io, next_ppa, NULL);
1393 		return;
1394 	}
1395 
1396 	spdk_thread_send_msg(ftl_get_read_thread(dev), _ftl_read, io);
1397 }
1398 
1399 static void
1400 _ftl_read(void *arg)
1401 {
1402 	ftl_io_read((struct ftl_io *)arg);
1403 }
1404 
1405 int
1406 spdk_ftl_read(struct spdk_ftl_dev *dev, struct spdk_io_channel *ch, uint64_t lba, size_t lba_cnt,
1407 	      struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_arg)
1408 {
1409 	struct ftl_io *io;
1410 
1411 	if (iov_cnt == 0 || iov_cnt > FTL_MAX_IOV) {
1412 		return -EINVAL;
1413 	}
1414 
1415 	if (lba_cnt == 0) {
1416 		return -EINVAL;
1417 	}
1418 
1419 	if (lba_cnt != ftl_iovec_num_lbks(iov, iov_cnt)) {
1420 		return -EINVAL;
1421 	}
1422 
1423 	if (!dev->initialized) {
1424 		return -EBUSY;
1425 	}
1426 
1427 	io = ftl_io_alloc(ch);
1428 	if (!io) {
1429 		return -ENOMEM;
1430 	}
1431 
1432 	ftl_io_user_init(dev, io, lba, lba_cnt, iov, iov_cnt, cb_fn, cb_arg, FTL_IO_READ);
1433 	ftl_io_read(io);
1434 	return 0;
1435 }
1436 
1437 static struct ftl_flush *
1438 ftl_flush_init(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg)
1439 {
1440 	struct ftl_flush *flush;
1441 	struct ftl_rwb *rwb = dev->rwb;
1442 
1443 	flush = calloc(1, sizeof(*flush));
1444 	if (!flush) {
1445 		return NULL;
1446 	}
1447 
1448 	flush->bmap = spdk_bit_array_create(ftl_rwb_num_batches(rwb));
1449 	if (!flush->bmap) {
1450 		goto error;
1451 	}
1452 
1453 	flush->dev = dev;
1454 	flush->cb.fn = cb_fn;
1455 	flush->cb.ctx = cb_arg;
1456 
1457 	return flush;
1458 error:
1459 	free(flush);
1460 	return NULL;
1461 }
1462 
1463 static void
1464 _ftl_flush(void *ctx)
1465 {
1466 	struct ftl_flush *flush = ctx;
1467 	struct spdk_ftl_dev *dev = flush->dev;
1468 	struct ftl_rwb *rwb = dev->rwb;
1469 	struct ftl_rwb_batch *batch;
1470 
1471 	/* Attach flush object to all non-empty batches */
1472 	ftl_rwb_foreach_batch(batch, rwb) {
1473 		if (!ftl_rwb_batch_empty(batch)) {
1474 			spdk_bit_array_set(flush->bmap, ftl_rwb_batch_get_offset(batch));
1475 			flush->num_req++;
1476 		}
1477 	}
1478 
1479 	LIST_INSERT_HEAD(&dev->flush_list, flush, list_entry);
1480 
1481 	/* If the RWB was already empty, the flush can be completed right away */
1482 	if (!flush->num_req) {
1483 		ftl_complete_flush(flush);
1484 	}
1485 }
1486 
1487 int
1488 spdk_ftl_flush(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg)
1489 {
1490 	struct ftl_flush *flush;
1491 
1492 	if (!dev->initialized) {
1493 		return -EBUSY;
1494 	}
1495 
1496 	flush = ftl_flush_init(dev, cb_fn, cb_arg);
1497 	if (!flush) {
1498 		return -ENOMEM;
1499 	}
1500 
1501 	spdk_thread_send_msg(ftl_get_core_thread(dev), _ftl_flush, flush);
1502 	return 0;
1503 }
1504 
1505 void
1506 ftl_process_anm_event(struct ftl_anm_event *event)
1507 {
1508 	SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "Unconsumed ANM received for dev: %p...\n", event->dev);
1509 	ftl_anm_event_complete(event);
1510 }
1511 
1512 int
1513 ftl_task_read(void *ctx)
1514 {
1515 	struct ftl_thread *thread = ctx;
1516 	struct spdk_ftl_dev *dev = thread->dev;
1517 	struct spdk_nvme_qpair *qpair = ftl_get_read_qpair(dev);
1518 
1519 	if (dev->halt) {
1520 		if (ftl_shutdown_complete(dev)) {
1521 			spdk_poller_unregister(&thread->poller);
1522 			return 0;
1523 		}
1524 	}
1525 
1526 	return spdk_nvme_qpair_process_completions(qpair, 0);
1527 }
1528 
1529 int
1530 ftl_task_core(void *ctx)
1531 {
1532 	struct ftl_thread *thread = ctx;
1533 	struct spdk_ftl_dev *dev = thread->dev;
1534 	struct spdk_nvme_qpair *qpair = ftl_get_write_qpair(dev);
1535 
1536 	if (dev->halt) {
1537 		if (ftl_shutdown_complete(dev)) {
1538 			spdk_poller_unregister(&thread->poller);
1539 			return 0;
1540 		}
1541 	}
1542 
1543 	ftl_process_writes(dev);
1544 	spdk_nvme_qpair_process_completions(qpair, 0);
1545 	ftl_process_relocs(dev);
1546 
1547 	return 0;
1548 }
1549 
1550 SPDK_LOG_REGISTER_COMPONENT("ftl_core", SPDK_LOG_FTL_CORE)
1551