Lines Matching defs:md

15 static void io_submit(struct ftl_md *md);
16 static void io_done(struct ftl_md *md);
19 has_mirror(struct ftl_md *md)
21 if (md->region) {
22 if (md->region->mirror_type != FTL_LAYOUT_REGION_TYPE_INVALID) {
23 return md->mirror_enabled;
31 ftl_md_get_mirror(struct ftl_md *md)
33 if (has_mirror(md)) {
34 return md->dev->layout.md[md->region->mirror_type];
47 xfer_size(struct ftl_md *md)
49 return ftl_md_xfer_blocks(md->dev) * FTL_BLOCK_SIZE;
53 ftl_md_create_spdk_buf(struct ftl_md *md, uint64_t vss_blksz)
55 md->shm_fd = -1;
56 md->vss_data = NULL;
57 md->data = spdk_zmalloc(md->data_blocks * (FTL_BLOCK_SIZE + vss_blksz), FTL_BLOCK_SIZE, NULL,
60 if (md->data && vss_blksz) {
61 md->vss_data = ((char *)md->data) + md->data_blocks * FTL_BLOCK_SIZE;
66 ftl_md_create_heap(struct ftl_md *md, uint64_t vss_blksz)
68 md->shm_fd = -1;
69 md->vss_data = NULL;
70 md->data = calloc(md->data_blocks, FTL_BLOCK_SIZE + vss_blksz);
72 if (md->data && vss_blksz) {
73 md->vss_data = ((char *)md->data) + md->data_blocks * FTL_BLOCK_SIZE;
78 ftl_md_destroy_spdk_buf(struct ftl_md *md)
80 if (md->data) {
81 spdk_free(md->data);
82 md->data = NULL;
83 md->vss_data = NULL;
88 ftl_md_destroy_heap(struct ftl_md *md)
90 if (md->data) {
91 free(md->data);
92 md->data = NULL;
93 md->vss_data = NULL;
104 ftl_md_setup_obj(struct ftl_md *md, int flags,
117 md->shm_mmap_flags = MAP_SHARED;
118 md->shm_open = ftl_wrapper_open;
119 md->shm_unlink = unlink;
122 spdk_uuid_fmt_lower(uuid_str, SPDK_UUID_STRING_LEN, &md->dev->conf.uuid) ||
123 snprintf(md->name, sizeof(md->name) / sizeof(md->name[0]),
125 md->name[0] = 0;
130 ftl_md_invalidate_shm(struct ftl_md *md)
132 if (md->dev->sb_shm && md->dev->sb_shm->shm_ready) {
133 md->dev->init_retry = true;
134 md->dev->sb_shm->shm_ready = false;
139 ftl_md_create_shm(struct ftl_md *md, uint64_t vss_blksz, int flags)
147 assert(md->shm_open && md->shm_unlink);
148 md->data = NULL;
149 md->vss_data = NULL;
150 md->shm_sz = 0;
153 if (md->name[0] == 0) {
160 if (md->shm_unlink(md->name) < 0 && errno != ENOENT) {
161 ftl_md_invalidate_shm(md);
168 md->shm_fd = md->shm_open(md->name, open_flags, open_mode);
169 if (md->shm_fd < 0 || fstat(md->shm_fd, &shm_stat) < 0) {
179 md->shm_sz = spdk_divide_round_up(md->data_blocks * FTL_BLOCK_SIZE, shm_stat.st_blksize);
182 vss_blk_offs = md->shm_sz;
185 md->shm_sz += spdk_divide_round_up(md->data_blocks * vss_blksz,
190 md->shm_sz *= shm_stat.st_blksize;
193 if ((shm_stat.st_size == 0 && (ftruncate(md->shm_fd, md->shm_sz) < 0 ||
195 || (shm_stat.st_size > 0 && (size_t)shm_stat.st_size != md->shm_sz)) {
200 shm_ptr = mmap(NULL, md->shm_sz, PROT_READ | PROT_WRITE, md->shm_mmap_flags,
201 md->shm_fd, 0);
206 md->data = shm_ptr;
208 md->vss_data = ((char *)shm_ptr) + vss_blk_offs * shm_stat.st_blksize;
212 if (mlock(md->data, md->shm_sz) < 0) {
216 if (spdk_mem_register(md->data, md->shm_sz)) {
219 md->mem_reg = true;
225 munlock(md->data, md->shm_sz);
228 munmap(md->data, md->shm_sz);
229 md->data = NULL;
230 md->vss_data = NULL;
231 md->shm_sz = 0;
234 if (md->shm_fd >= 0) {
235 close(md->shm_fd);
236 md->shm_unlink(md->name);
237 md->shm_fd = -1;
239 ftl_md_invalidate_shm(md);
243 ftl_md_destroy_shm(struct ftl_md *md, int flags)
245 if (!md->data) {
249 assert(md->shm_sz > 0);
250 if (md->mem_reg) {
251 spdk_mem_unregister(md->data, md->shm_sz);
252 md->mem_reg = false;
256 munlock(md->data, md->shm_sz);
259 munmap(md->data, md->shm_sz);
262 close(md->shm_fd);
264 md->data = NULL;
265 md->vss_data = NULL;
273 assert(md->name[0] != 0 && md->shm_unlink != NULL);
274 md->shm_unlink(md->name);
281 struct ftl_md *md;
283 md = calloc(1, sizeof(*md));
284 if (!md) {
287 md->dev = dev;
288 md->data_blocks = blocks;
289 md->mirror_enabled = true;
293 ftl_md_setup_obj(md, flags, name);
294 ftl_md_create_shm(md, vss_blksz, flags);
296 ftl_md_create_spdk_buf(md, vss_blksz);
299 ftl_md_create_heap(md, vss_blksz);
302 if (!md->data) {
303 free(md);
312 md->entry_vss_dma_buf = spdk_malloc(entry_vss_buf_size, FTL_BLOCK_SIZE,
315 if (!md->entry_vss_dma_buf) {
320 ftl_md_set_region(md, region);
323 return md;
325 ftl_md_destroy(md, ftl_md_destroy_region_flags(dev, region->type));
332 struct ftl_md md = { 0 };
339 md.dev = dev;
340 ftl_md_setup_obj(&md, flags, name);
342 return md.shm_unlink(md.name);
346 ftl_md_destroy(struct ftl_md *md, int flags)
348 if (!md) {
352 if (!md->is_mirror) {
353 ftl_md_free_buf(md, flags);
354 spdk_free(md->entry_vss_dma_buf);
356 free(md);
360 ftl_md_free_buf(struct ftl_md *md, int flags)
362 if (!md) {
366 if (md->shm_fd < 0) {
368 ftl_md_destroy_spdk_buf(md);
371 ftl_md_destroy_heap(md);
374 ftl_md_destroy_shm(md, flags);
379 ftl_md_get_buffer(struct ftl_md *md)
381 return md->data;
385 ftl_md_get_buffer_size(struct ftl_md *md)
387 return md->data_blocks * FTL_BLOCK_SIZE;
416 union ftl_md_vss *ftl_md_get_vss_buffer(struct ftl_md *md)
418 return md->vss_data;
422 io_cleanup(struct ftl_md *md)
424 spdk_dma_free(md->io.data);
425 md->io.data = NULL;
427 spdk_dma_free(md->io.md);
428 md->io.md = NULL;
434 struct ftl_md *md = arg;
436 md->cb(md->dev, md, -EINVAL);
437 io_cleanup(md);
456 struct ftl_md *md = arg;
458 ftl_stats_bdev_io_completed(md->dev, get_bdev_io_ftl_stats_type(md->dev, bdev_io), bdev_io);
461 if (md->io.op == FTL_MD_OP_RESTORE && has_mirror(md)) {
462 md->io.status = -EAGAIN;
464 md->io.status = -EIO;
470 if (md->io.op == FTL_MD_OP_RESTORE) {
471 memcpy(md->data + md->io.data_offset, md->io.data, size);
473 if (md->vss_data) {
474 uint64_t vss_offset = md->io.data_offset / FTL_BLOCK_SIZE;
476 memcpy(md->vss_data + vss_offset, md->io.md, blocks * FTL_MD_VSS_SZ);
480 md->io.address += blocks;
481 md->io.remaining -= blocks;
482 md->io.data_offset += size;
487 io_submit(md);
534 struct ftl_md *md = _md;
535 const struct ftl_layout_region *region = md->region;
539 blocks = spdk_min(md->io.remaining, ftl_md_xfer_blocks(md->dev));
541 switch (md->io.op) {
543 rc = read_blocks(md->dev, region->bdev_desc, region->ioch,
544 md->io.data, md->io.md,
545 md->io.address, blocks,
546 read_write_blocks_cb, md);
550 rc = write_blocks(md->dev, region->bdev_desc, region->ioch,
551 md->io.data, md->io.md,
552 md->io.address, blocks,
553 read_write_blocks_cb, md);
562 md->io.bdev_io_wait.bdev = bdev;
563 md->io.bdev_io_wait.cb_fn = read_write_blocks;
564 md->io.bdev_io_wait.cb_arg = md;
565 spdk_bdev_queue_io_wait(bdev, region->ioch, &md->io.bdev_io_wait);
573 io_submit(struct ftl_md *md)
575 if (!md->io.remaining || md->io.status) {
576 io_done(md);
580 if (md->io.op == FTL_MD_OP_PERSIST) {
581 uint64_t blocks = spdk_min(md->io.remaining, ftl_md_xfer_blocks(md->dev));
583 memcpy(md->io.data, md->data + md->io.data_offset, FTL_BLOCK_SIZE * blocks);
585 if (md->vss_data) {
586 uint64_t vss_offset = md->io.data_offset / FTL_BLOCK_SIZE;
588 assert(md->io.md);
589 memcpy(md->io.md, md->vss_data + vss_offset, FTL_MD_VSS_SZ * blocks);
593 read_write_blocks(md);
597 io_can_start(struct ftl_md *md)
599 assert(NULL == md->io.data);
600 if (NULL != md->io.data) {
605 if (!md->region) {
610 if (md->region->current.blocks > md->data_blocks) {
612 FTL_ERRLOG(md->dev, "Blocks number mismatch between metadata object and"
621 io_prepare(struct ftl_md *md, enum ftl_md_ops op)
623 const struct ftl_layout_region *region = md->region;
627 data_size = xfer_size(md);
628 md->io.data = spdk_zmalloc(data_size, FTL_BLOCK_SIZE, NULL,
630 if (!md->io.data) {
634 if (md->vss_data || md->region->vss_blksz) {
635 meta_size = ftl_md_xfer_blocks(md->dev) * FTL_MD_VSS_SZ;
636 md->io.md = spdk_zmalloc(meta_size, FTL_BLOCK_SIZE, NULL,
638 if (!md->io.md) {
639 spdk_dma_free(md->io.data);
640 md->io.data = NULL;
645 md->io.address = region->current.offset;
646 md->io.remaining = region->current.blocks;
647 md->io.data_offset = 0;
648 md->io.status = 0;
649 md->io.op = op;
655 io_init(struct ftl_md *md, enum ftl_md_ops op)
657 if (io_can_start(md)) {
661 if (io_prepare(md, op)) {
669 persist_entry_lba(struct ftl_md *md, uint64_t start_entry)
671 return md->region->current.offset + start_entry * md->region->entry_size;
678 struct ftl_md *md = ctx->md;
680 ftl_stats_bdev_io_completed(md->dev, get_bdev_io_ftl_stats_type(md->dev, bdev_io), bdev_io);
697 ftl_md_persist_entry_write_blocks(struct ftl_md_io_entry_ctx *ctx, struct ftl_md *md,
702 rc = write_blocks(md->dev, md->region->bdev_desc, md->region->ioch,
704 persist_entry_lba(md, ctx->start_entry), md->region->entry_size * ctx->num_entries,
708 struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(md->region->bdev_desc);
712 spdk_bdev_queue_io_wait(bdev, md->region->ioch, &ctx->bdev_io_wait);
725 struct ftl_md *md_mirror = ftl_md_get_mirror(ctx->md);
734 struct ftl_md *md = ctx->md;
737 rc = ftl_md_persist_entry_write_blocks(ctx, md, ftl_md_persist_entry_primary);
739 if (!rc && has_mirror(md)) {
740 assert(md->region->entry_size == (ftl_md_get_mirror(md))->region->entry_size);
759 ftl_md_persist_entries(struct ftl_md *md, uint64_t start_entry, uint64_t num_entries, void *buffer,
763 if (spdk_unlikely(0 == md->region->entry_size)) {
767 if (spdk_unlikely(start_entry + num_entries > md->region->num_entries)) {
775 ctx->md = md;
779 ctx->vss_buffer = vss_buffer ? : md->entry_vss_dma_buf;
788 struct ftl_md *md = ctx->md;
790 ftl_stats_bdev_io_completed(md->dev, get_bdev_io_ftl_stats_type(md->dev, bdev_io), bdev_io);
795 if (has_mirror(md)) {
796 md->mirror_enabled = true;
799 ftl_md_read_entry(ftl_md_get_mirror(md), ctx->start_entry, ctx->buffer,
815 ftl_md_read_entry_read_blocks(struct ftl_md_io_entry_ctx *ctx, struct ftl_md *md,
820 rc = read_blocks(md->dev, md->region->bdev_desc, md->region->ioch,
822 persist_entry_lba(md, ctx->start_entry), md->region->entry_size,
827 struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(md->region->bdev_desc);
831 spdk_bdev_queue_io_wait(bdev, md->region->ioch, &ctx->bdev_io_wait);
843 ftl_md_read_entry_read_blocks(ctx, ctx->md, _ftl_md_read_entry);
847 ftl_md_read_entry(struct ftl_md *md, uint64_t start_entry, void *buffer, void *vss_buffer,
851 if (spdk_unlikely(0 == md->region->entry_size)) {
858 ctx->md = md;
873 persist_mirror_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
875 struct ftl_md *primary = md->owner.private;
892 ftl_md_persist(struct ftl_md *md)
894 if (has_mirror(md)) {
895 struct ftl_md *md_mirror = ftl_md_get_mirror(md);
897 md->mirror_enabled = true;
901 md_mirror->owner.private = md;
908 if (0 == io_init(md, FTL_MD_OP_PERSIST)) {
909 io_submit(md);
911 spdk_thread_send_msg(spdk_get_thread(), exception, md);
916 restore_mirror_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
918 struct ftl_md *primary = md->owner.private;
939 restore_sync_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
941 struct ftl_md *primary = md->owner.private;
954 restore_done(struct ftl_md *md)
956 if (-EAGAIN == md->io.status) {
963 if (has_mirror(md)) {
964 struct ftl_md *md_mirror = ftl_md_get_mirror(md);
966 md->mirror_enabled = true;
970 md_mirror->owner.private = md;
978 } else if (0 == md->io.status && false == md->dev->sb->clean) {
979 if (has_mirror(md)) {
980 struct ftl_md *md_mirror = ftl_md_get_mirror(md);
985 md_mirror->owner.private = md;
993 return md->io.status;
997 io_done(struct ftl_md *md)
1001 if (md->io.op == FTL_MD_OP_RESTORE) {
1002 status = restore_done(md);
1004 status = md->io.status;
1008 /* The MD instance may be destroyed in ctx of md->cb(), e.g. upon region upgrade. */
1010 io_cleanup(md);
1011 md->cb(md->dev, md, status);
1016 ftl_md_restore(struct ftl_md *md)
1018 if (0 == io_init(md, FTL_MD_OP_RESTORE)) {
1019 io_submit(md);
1021 spdk_thread_send_msg(spdk_get_thread(), exception, md);
1026 pattern_prepare(struct ftl_md *md,
1029 void *data = md->io.data;
1030 uint64_t data_size = xfer_size(md);
1034 if (md->io.md) {
1037 ftl_md_vss_buf_init(md->io.md, ftl_md_xfer_blocks(md->dev), vss_pattern);
1042 vss.version.md_version = md->region->current.version;
1043 ftl_md_vss_buf_init(md->io.md, ftl_md_xfer_blocks(md->dev), &vss);
1066 ftl_md_clear(struct ftl_md *md, int data_pattern, union ftl_md_vss *vss_pattern)
1068 if (has_mirror(md)) {
1069 struct ftl_md *md_mirror = ftl_md_get_mirror(md);
1071 md->mirror_enabled = true;
1075 md_mirror->owner.private = md;
1079 if (0 == io_init(md, FTL_MD_OP_CLEAR) && 0 == pattern_prepare(md, data_pattern, vss_pattern)) {
1083 spdk_thread_send_msg(spdk_get_thread(), exception, md);
1088 if (0 == io_init(md, FTL_MD_OP_CLEAR) && 0 == pattern_prepare(md, data_pattern, vss_pattern)) {
1089 io_submit(md);
1091 spdk_thread_send_msg(spdk_get_thread(), exception, md);
1096 ftl_md_get_region(struct ftl_md *md)
1098 return md->region;
1102 ftl_md_set_region(struct ftl_md *md,
1105 assert(region->current.blocks <= md->data_blocks);
1106 md->region = region;
1108 if (md->vss_data) {
1111 ftl_md_vss_buf_init(md->vss_data, md->data_blocks, &vss);
1113 assert(md->entry_vss_dma_buf);
1114 ftl_md_vss_buf_init(md->entry_vss_dma_buf, region->entry_size, &vss);
1118 if (has_mirror(md)) {
1119 md->mirror_enabled = true;