Lines Matching defs:dev
33 struct spdk_ftl_dev *dev = io->dev;
35 ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_USER, bdev_io);
41 ftl_trace_completion(dev, io, FTL_TRACE_COMPLETION_DISK);
61 ftl_get_limit(const struct spdk_ftl_dev *dev, int type)
64 return dev->conf.limits[type];
68 ftl_shutdown_complete(struct spdk_ftl_dev *dev)
72 if (dev->num_inflight) {
76 if (!ftl_nv_cache_is_halted(&dev->nv_cache)) {
77 ftl_nv_cache_halt(&dev->nv_cache);
81 if (!ftl_writer_is_halted(&dev->writer_user)) {
82 ftl_writer_halt(&dev->writer_user);
86 if (!ftl_reloc_is_halted(dev->reloc)) {
87 ftl_reloc_halt(dev->reloc);
91 if (!ftl_writer_is_halted(&dev->writer_gc)) {
92 ftl_writer_halt(&dev->writer_gc);
96 if (!ftl_nv_cache_chunks_busy(&dev->nv_cache)) {
100 for (i = 0; i < ftl_get_num_bands(dev); ++i) {
101 if (dev->bands[i].queue_depth ||
102 dev->bands[i].md->state == FTL_BAND_STATE_CLOSING) {
107 if (!ftl_l2p_is_halted(dev)) {
108 ftl_l2p_halt(dev);
116 ftl_apply_limits(struct spdk_ftl_dev *dev)
119 struct ftl_stats *stats = &dev->stats;
123 dev->limit = SPDK_FTL_LIMIT_MAX;
126 limit = ftl_get_limit(dev, i);
128 if (dev->num_free <= limit) {
130 dev->limit = i;
135 ftl_trace_limits(dev, dev->limit, dev->num_free);
139 ftl_invalidate_addr(struct spdk_ftl_dev *dev, ftl_addr addr)
144 if (ftl_addr_in_nvc(dev, addr)) {
145 ftl_bitmap_clear(dev->valid_map, addr);
149 band = ftl_band_from_addr(dev, addr);
154 if (ftl_bitmap_get(dev->valid_map, addr)) {
156 ftl_bitmap_clear(dev->valid_map, addr);
177 struct spdk_ftl_dev *dev = io->dev;
182 *addr = ftl_l2p_get(dev, ftl_io_current_lba(io));
190 addr_cached = ftl_addr_in_nvc(dev, *addr);
193 next_addr = ftl_l2p_get(dev, ftl_io_get_lba(io, io->pos + i));
203 if (addr_cached != ftl_addr_in_nvc(dev, next_addr)) {
230 struct spdk_ftl_dev *dev = io->dev;
247 ftl_trace_submission(dev, io, addr, num_blocks);
249 if (ftl_addr_in_nvc(dev, addr)) {
252 rc = spdk_bdev_read_blocks(dev->base_bdev_desc, dev->base_ioch,
262 if (ftl_addr_in_nvc(dev, addr)) {
263 bdev = spdk_bdev_desc_get_bdev(dev->nv_cache.bdev_desc);
264 ch = dev->nv_cache.cache_ioch;
266 bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
267 ch = dev->base_ioch;
291 ftl_needs_reloc(struct spdk_ftl_dev *dev)
293 size_t limit = ftl_get_limit(dev, SPDK_FTL_LIMIT_START);
295 if (dev->num_free <= limit) {
303 spdk_ftl_dev_get_attrs(const struct spdk_ftl_dev *dev, struct spdk_ftl_attrs *attrs,
306 attrs->num_blocks = dev->num_lbas;
308 attrs->optimum_io_size = dev->xfer_size;
313 ftl_io_pin_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
336 ftl_l2p_pin_skip(io->dev, ftl_io_pin_cb, io, &io->l2p_pin_ctx);
339 ftl_l2p_pin(io->dev, io->lba, io->num_blocks,
348 struct spdk_ftl_dev *dev = io->dev;
359 TAILQ_INSERT_TAIL(&dev->rd_sq, io, queue_entry);
362 TAILQ_INSERT_TAIL(&dev->wr_sq, io, queue_entry);
365 TAILQ_INSERT_TAIL(&dev->trim_sq, io, queue_entry);
374 queue_io(struct spdk_ftl_dev *dev, struct ftl_io *io)
388 spdk_ftl_writev(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch,
403 FTL_ERRLOG(dev, "Invalid IO vector to handle, device %s, LBA %"PRIu64"\n",
404 dev->conf.name, lba);
408 if (!dev->initialized) {
417 return queue_io(dev, io);
421 spdk_ftl_readv(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch,
435 FTL_ERRLOG(dev, "Invalid IO vector to handle, device %s, LBA %"PRIu64"\n",
436 dev->conf.name, lba);
440 if (!dev->initialized) {
449 return queue_io(dev, io);
453 ftl_trim(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch,
463 return queue_io(dev, io);
467 spdk_ftl_unmap(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch,
471 uint64_t alignment = dev->layout.l2p.lbas_in_page;
481 if (lba + lba_cnt > dev->num_lbas) {
485 if (!dev->initialized) {
507 rc = ftl_trim(dev, io, ch, lba, lba_cnt, cb_fn, cb_arg);
509 rc = ftl_mngt_trim(dev, lba, lba_cnt, cb_fn, cb_arg);
537 ftl_process_io_channel(struct spdk_ftl_dev *dev, struct ftl_io_channel *ioch)
554 ftl_trim_log_clear(struct spdk_ftl_dev *dev)
556 struct ftl_trim_log *log = ftl_md_get_buffer(dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_LOG]);
564 io->dev->trim_qd--;
569 TAILQ_INSERT_HEAD(&io->dev->trim_sq, io, queue_entry);
590 struct spdk_ftl_dev *dev = io->dev;
591 struct ftl_md *trim_log = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_LOG];
594 cb, io, &dev->trim_md_io_entry_ctx);
601 struct spdk_ftl_dev *dev = io->dev;
606 ftl_trim_log_clear(dev);
614 struct spdk_ftl_dev *dev = io->dev;
615 struct ftl_md *trim_md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_MD];
626 first = io->lba / dev->layout.l2p.lbas_in_page;
627 entries = io->num_blocks / dev->layout.l2p.lbas_in_page;
637 ftl_trim_md_cb, io, &dev->trim_md_io_entry_ctx);
641 ftl_set_trim_map(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t num_blocks, uint64_t seq_id)
644 uint64_t lbas_in_page = dev->layout.l2p.lbas_in_page;
645 struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_MD];
655 ftl_bitmap_set(dev->trim_map, i);
660 log = ftl_md_get_buffer(dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_LOG]);
669 struct spdk_ftl_dev *dev = io->dev;
672 seq_id = ftl_nv_cache_acquire_trim_seq_id(&dev->nv_cache);
677 dev->trim_in_progress = true;
678 dev->trim_qd++;
680 dev->sb_shm->trim.start_lba = io->lba;
681 dev->sb_shm->trim.num_blocks = io->num_blocks;
682 dev->sb_shm->trim.seq_id = seq_id;
683 dev->sb_shm->trim.in_progress = true;
684 ftl_set_trim_map(dev, io->lba, io->num_blocks, seq_id);
686 dev->sb_shm->trim.in_progress = false;
693 ftl_process_io_queue(struct spdk_ftl_dev *dev)
701 if (!TAILQ_EMPTY(&dev->rd_sq)) {
702 io = TAILQ_FIRST(&dev->rd_sq);
703 TAILQ_REMOVE(&dev->rd_sq, io, queue_entry);
706 ftl_add_io_activity(dev);
709 while (!TAILQ_EMPTY(&dev->wr_sq) && !ftl_nv_cache_throttle(dev)) {
710 io = TAILQ_FIRST(&dev->wr_sq);
711 TAILQ_REMOVE(&dev->wr_sq, io, queue_entry);
714 TAILQ_INSERT_HEAD(&dev->wr_sq, io, queue_entry);
717 ftl_add_io_activity(dev);
720 if (!TAILQ_EMPTY(&dev->trim_sq) && dev->trim_qd == 0) {
721 io = TAILQ_FIRST(&dev->trim_sq);
722 TAILQ_REMOVE(&dev->trim_sq, io, queue_entry);
732 TAILQ_INSERT_HEAD(&dev->trim_sq, io, queue_entry);
734 ftl_add_io_activity(dev);
738 TAILQ_FOREACH(ioch, &dev->ioch_queue, entry) {
739 ftl_process_io_channel(dev, ioch);
746 struct spdk_ftl_dev *dev = ctx;
747 uint64_t io_activity_total_old = dev->stats.io_activity_total;
749 if (dev->halt && ftl_shutdown_complete(dev)) {
750 spdk_poller_unregister(&dev->core_poller);
754 ftl_process_io_queue(dev);
755 ftl_writer_run(&dev->writer_user);
756 ftl_writer_run(&dev->writer_gc);
757 ftl_reloc(dev->reloc);
758 ftl_nv_cache_process(dev);
759 ftl_l2p_process(dev);
761 if (io_activity_total_old != dev->stats.io_activity_total) {
769 ftl_band_get_next_free(struct spdk_ftl_dev *dev)
773 if (!TAILQ_EMPTY(&dev->free_bands)) {
774 band = TAILQ_FIRST(&dev->free_bands);
775 TAILQ_REMOVE(&dev->free_bands, band, queue_entry);
812 spdk_ftl_dev_set_fast_shutdown(struct spdk_ftl_dev *dev, bool fast_shutdown)
814 assert(dev);
815 dev->conf.fast_shutdown = fast_shutdown;
819 ftl_stats_bdev_io_completed(struct spdk_ftl_dev *dev, enum ftl_stats_type type,
822 struct ftl_stats_entry *stats_entry = &dev->stats.entries[type];
853 spdk_ftl_get_io_channel(struct spdk_ftl_dev *dev)
855 return spdk_get_io_channel(dev);
859 ftl_stats_crc_error(struct spdk_ftl_dev *dev, enum ftl_stats_type type)
862 struct ftl_stats_entry *stats_entry = &dev->stats.entries[type];
869 struct spdk_ftl_dev *dev;
890 *stats_ctx->stats = stats_ctx->dev->stats;
898 spdk_ftl_get_stats(struct spdk_ftl_dev *dev, struct ftl_stats *stats, spdk_ftl_stats_fn cb_fn,
909 stats_ctx->dev = dev;
915 rc = spdk_thread_send_msg(dev->core_thread, _ftl_get_stats, stats_ctx);