Lines Matching defs:md

33 	struct ftl_md *md = nv_cache->md;
34 void *buffer = ftl_md_get_buffer(md);
35 uint64_t size = ftl_md_get_buffer_size(md);
72 /* Map pool element holds the whole tail md */
128 nvc_scrub_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
130 struct nvc_scrub_ctx *scrub_ctx = md->owner.cb_ctx;
220 struct ftl_nv_cache_chunk_md *md;
226 nv_cache->md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
227 if (!nv_cache->md) {
263 md = ftl_md_get_buffer(nv_cache->md);
264 if (!md) {
271 for (i = 0; i < nv_cache->chunk_count; i++, chunk++, md++) {
273 chunk->md = md;
274 chunk->md->version = FTL_NVC_VERSION_CURRENT;
275 nvc_validate_md(nv_cache, md);
283 chunk->md->state = FTL_CHUNK_STATE_INACTIVE;
385 assert(chunk->md->write_pointer + nv_cache->tail_md_chunk_blocks <=
387 return nv_cache->chunk_blocks - chunk->md->write_pointer -
394 return chunk->md->write_pointer == chunk->nv_cache->chunk_blocks;
416 if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
430 address = chunk->offset + chunk->md->write_pointer;
436 chunk->md->write_pointer += num_blocks;
451 chunk->md->blocks_skipped = free_space;
452 chunk->md->blocks_written += free_space;
453 chunk->md->write_pointer += free_space;
455 if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
468 union ftl_md_vss *metadata = io->md;
473 metadata->nv_cache.seq_id = chunk->md->seq_id;
487 chunk->md->blocks_written += advanced_blocks;
489 assert(chunk->md->blocks_written <= nv_cache->chunk_blocks);
491 if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
499 return chunk->md->blocks_written - chunk->md->blocks_skipped -
506 assert(chunk->md->blocks_written != 0);
508 if (chunk_user_blocks_written(chunk) == chunk->md->blocks_compacted) {
548 ftl_nv_cache_chunk_md_initialize(chunk->md);
590 chunk->md->state = FTL_CHUNK_STATE_FREE;
591 chunk->md->close_seq_id = 0;
607 struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
621 memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
626 ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md, NULL,
654 *ptr = (double)chunk->md->blocks_compacted * FTL_BLOCK_SIZE / chunk->compaction_length_tsc;
670 chunk->md->blocks_compacted += num_blocks;
671 assert(chunk->md->blocks_compacted <= chunk_user_blocks_written(chunk));
827 assert(chunk->md->blocks_written != 0);
829 if (chunk_user_blocks_written(chunk) == chunk->md->read_pointer) {
895 assert(chunk->md->write_pointer);
925 assert(chunk->md->blocks_written >= chunk->md->blocks_skipped);
928 assert(blocks_written >= chunk->md->read_pointer);
929 blocks_to_read = blocks_written - chunk->md->read_pointer;
1003 start = ftl_addr_from_nvc_offset(dev, chunk->offset + chunk->md->read_pointer);
1007 chunk->md->read_pointer += to_read;
1015 chunk->md->read_pointer += skip;
1049 entry->lba = ftl_chunk_map_get_lba(chunk, chunk->md->read_pointer);
1052 chunk->md->read_pointer++;
1173 entry->seq_id = chunk->md->seq_id;
1472 chunk->md->seq_id = ftl_get_next_seq_id(dev);
1543 nvc_validate_md(nv_cache, chunk->md);
1545 if (chunk->md->read_pointer) {
1547 if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
1557 chunk->md->read_pointer = chunk->md->blocks_compacted = 0;
1558 } else if (chunk->md->blocks_written == nv_cache->chunk_blocks) {
1560 } else if (0 == chunk->md->blocks_written) {
1583 return a_chunk->md->seq_id - b_chunk->md->seq_id;
1685 if (chunk->md->state == FTL_CHUNK_STATE_FREE) {
1687 chunk->md->state = FTL_CHUNK_STATE_INACTIVE;
1689 } else if (chunk->md->state == FTL_CHUNK_STATE_INACTIVE) {
1691 chunk->md->state = FTL_CHUNK_STATE_FREE;
1700 nvc_validate_md(nv_cache, chunk->md);
1707 if (chunk->md->version != FTL_NVC_VERSION_CURRENT) {
1714 if (chunk->md->state != FTL_CHUNK_STATE_INACTIVE) {
1720 switch (chunk->md->state) {
1722 if (chunk->md->blocks_written || chunk->md->write_pointer) {
1736 if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
1801 o_seq_id = spdk_max(o_seq_id, chunk->md->seq_id);
1802 c_seq_id = spdk_max(c_seq_id, chunk->md->close_seq_id);
1863 chunk->md->write_pointer += brq->num_blocks;
1914 chunk->md->state = FTL_CHUNK_STATE_OPEN;
1923 struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1937 assert(chunk->md->write_pointer == 0);
1938 assert(chunk->md->blocks_written == 0);
1944 memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
1948 ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md,
1960 assert(chunk->md->write_pointer == chunk->nv_cache->chunk_blocks);
1963 chunk->md->p2l_map_checksum = chunk->p2l_map.chunk_dma_md->p2l_map_checksum;
1973 chunk->nv_cache->last_seq_id = chunk->md->close_seq_id;
1975 chunk->md->state = FTL_CHUNK_STATE_CLOSED;
1995 struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
2001 memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
2004 ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, chunk->p2l_map.chunk_dma_md,
2010 chunk->md->write_pointer -= brq->num_blocks;
2025 chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
2029 assert(chunk->md->write_pointer == chunk_tail_md_offset(chunk->nv_cache));
2030 brq->io.addr = chunk->offset + chunk->md->write_pointer;
2146 if (seq_id && (chunk->md->close_seq_id <= seq_id)) {
2187 restore_chunk_state_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
2189 struct ftl_mngt_process *mngt = md->owner.cb_ctx;
2204 chunk->md->state != FTL_CHUNK_STATE_INACTIVE) {
2209 if (chunk->md->version != FTL_NVC_VERSION_CURRENT) {
2214 switch (chunk->md->state) {
2254 struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
2256 md->owner.cb_ctx = mngt;
2257 md->cb = restore_chunk_state_cb;
2258 ftl_md_restore(md);
2275 ctx->chunk->offset, ctx->chunk->md->seq_id);
2321 *chunk->md = *chunk->p2l_map.chunk_dma_md;
2324 chunk->md->seq_id);
2344 struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
2346 *chunk_md = *chunk->md;
2354 ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, chunk_md, NULL,
2455 * moved to free chunk list. Also need to wait for free md requests */
2475 if (chunk->md->state != FTL_CHUNK_STATE_OPEN) {
2481 ftl_nv_cache_chunk_md_initialize(chunk->md);
2495 chunk->md->blocks_skipped = free_space;
2496 chunk->md->blocks_written += free_space;
2497 chunk->md->write_pointer += free_space;
2510 if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
2511 return chunk->md->seq_id;
2521 seq_id = nv_cache->chunk_current->md->seq_id;
2524 chunk->md->blocks_skipped = free_space;
2525 chunk->md->blocks_written += free_space;
2526 chunk->md->write_pointer += free_space;
2527 if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
2541 double used = chunk->md->blocks_written + chunk->md->blocks_skipped;
2553 assert(chunk->md->state < SPDK_COUNTOF(names));
2554 if (chunk->md->state < SPDK_COUNTOF(names)) {
2555 return names[chunk->md->state];
2583 ftl_nv_cache_chunk_md_initialize(struct ftl_nv_cache_chunk_md *md)
2585 memset(md, 0, sizeof(*md));
2586 md->version = FTL_NVC_VERSION_CURRENT;