| /spdk/lib/ftl/ |
| H A D | ftl_layout.c | 26 blocks2mib(uint64_t blocks) in blocks2mib() argument 30 result = blocks; in blocks2mib() 71 ftl_md_region_align_blocks(struct spdk_ftl_dev *dev, uint64_t blocks) in ftl_md_region_align_blocks() argument 76 result = spdk_divide_round_up(blocks, alignment); in ftl_md_region_align_blocks() 117 return region->current.blocks == 0 && region->current.offset == FTL_ADDR_INVALID; in dump_region() 128 assert(!(region->current.blocks % superblock_region_blocks(dev))); in is_region_disabled() 133 FTL_NOTICELOG(dev, " blocks: %.2f MiB\n", in ftl_validate_regions() 134 blocks2mib(region->current.blocks)); in ftl_validate_regions() 166 uint64_t r1_end = r1->current.offset + r1->current.blocks - 1; in ftl_validate_regions() 168 uint64_t r2_end = r2->current.offset + r2->current.blocks in ftl_validate_regions() 178 uint64_t blocks; get_num_user_lbas() local 282 uint64_t blocks, chunk_count; layout_setup_legacy_default_nvc() local 429 uint64_t blocks; layout_setup_default_nvc() local [all...] |
| H A D | ftl_debug.c | 203 write_user = dev->stats.entries[FTL_STATS_TYPE_CMP].write.blocks; in ftl_dev_dump_stats() 205 dev->stats.entries[FTL_STATS_TYPE_GC].write.blocks + in ftl_dev_dump_stats() 206 dev->stats.entries[FTL_STATS_TYPE_MD_BASE].write.blocks; in ftl_dev_dump_stats()
|
| H A D | ftl_layout.h | 92 /* Number of blocks in FTL_BLOCK_SIZE unit */ 93 uint64_t blocks; 110 /* Number of blocks in FTL_BLOCK_SIZE unit of a single entry. 199 * @brief Get number of blocks required to store an MD region 204 * @retval Number of blocks required to store an MD region 209 * @brief Get number of blocks for md_region aligned to a common value 212 * @param blocks size of the MD region in blocks 214 * @retval Aligned number of blocks required to store an MD region 216 uint64_t ftl_md_region_align_blocks(struct spdk_ftl_dev *dev, uint64_t blocks); 87 uint64_t blocks; global() member [all...] |
| /spdk/module/bdev/ocf/ |
| H A D | stats.c | 20 return ocf_stats_collect_core(core, &stats->usage, &stats->reqs, &stats->blocks, &stats->errors); in vbdev_ocf_stats_get() 75 WJSON_STAT(w, stats, blocks, core_volume_rd, "4KiB blocks"); in vbdev_ocf_stats_write_json() 76 WJSON_STAT(w, stats, blocks, core_volume_wr, "4KiB blocks"); in vbdev_ocf_stats_write_json() 77 WJSON_STAT(w, stats, blocks, core_volume_total, "4KiB blocks"); in vbdev_ocf_stats_write_json() 78 WJSON_STAT(w, stats, blocks, cache_volume_rd, "4KiB blocks"); in vbdev_ocf_stats_write_json() 79 WJSON_STAT(w, stats, blocks, cache_volume_wr, "4KiB blocks"); in vbdev_ocf_stats_write_json() 80 WJSON_STAT(w, stats, blocks, cache_volume_total, "4KiB blocks"); in vbdev_ocf_stats_write_json() 81 WJSON_STAT(w, stats, blocks, volume_rd, "4KiB blocks"); in vbdev_ocf_stats_write_json() 82 WJSON_STAT(w, stats, blocks, volume_wr, "4KiB blocks"); in vbdev_ocf_stats_write_json() 83 WJSON_STAT(w, stats, blocks, volume_total, "4KiB blocks"); in vbdev_ocf_stats_write_json()
|
| H A D | stats.h | 15 struct ocf_stats_blocks blocks; member
|
| /spdk/test/unit/lib/bdev/raid/concat.c/ |
| H A D | concat_ut.c | 277 struct raid_bdev *raid_bdev, uint64_t lba, uint64_t blocks, int16_t iotype) in raid_io_initialize() argument 297 raid_test_bdev_io_init(raid_io, raid_bdev, raid_ch, iotype, lba, blocks, iovs, iovcnt, md_buf); in raid_io_initialize() 306 uint64_t lba, blocks; in submit_and_verify_rw() local 310 blocks = 1; in submit_and_verify_rw() 320 raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, blocks, SPDK_BDEV_IO_TYPE_WRITE); in submit_and_verify_rw() 324 raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, blocks, SPDK_BDEV_IO_TYPE_READ); in submit_and_verify_rw() 328 raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, blocks, SPDK_BDEV_IO_TYPE_UNMAP); in submit_and_verify_rw() 332 raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, blocks, SPDK_BDEV_IO_TYPE_FLUSH); in submit_and_verify_rw() 344 CU_ASSERT(g_req_records.num_blocks[0] == blocks); in submit_and_verify_rw() 377 uint64_t lba, blocks; in submit_and_verify_null_payload() local [all …]
|
| /spdk/doc/ |
| H A D | ssd_internals.md | 19 blocks**. The size of an erase block is highly implementation specific, but 28 drive is composed of a set of fixed size **logical blocks** which are usually 29 512B or 4KiB in size. These blocks are entirely logical constructs of the 36 blocks to account for wear (called **wear-leveling**) and spread them across 44 One consequence of the flash translation layer is that logical blocks do not 59 to all blocks prior to reading them when benchmarking! 62 available erase blocks. In order to continue writing, the SSD must free some 64 some number of erase blocks so that they can guarantee there are free erase 65 blocks available for garbage collection. Garbage collection generally proceeds 70 3. Moving valid logical blocks by reading them and writing them to a different erase block (i.e. th… [all …]
|
| H A D | ftl.md | 15 are contiguous and in range from 0 to the number of surfaced blocks (the number of spare blocks 17 spare blocks account for zones going offline throughout the lifespan of the device as well as 28 a band follow the same pattern - a batch of logical blocks is written to one zone, another batch 99 Since a write to the same LBA invalidates its previous physical location, some of the blocks on a 115 Valid blocks are marked with an asterisk '\*'. 118 the appropriate blocks are marked as required to be moved. The `reloc` module takes a band that has 119 some of such blocks marked, checks their validity and, if they're still valid, copies them. 121 Choosing a band for garbage collection depends its validity ratio (proportion of valid blocks to all 122 user blocks). The lower the ratio, the higher the chance the band will be chosen for gc.
|
| H A D | compression.md | 5 The SPDK "reduce" block compression scheme is based on using SSDs for storing compressed blocks of 6 storage and persistent memory for metadata. This metadata includes mappings of logical blocks 7 requested by a user to the compressed blocks on SSD. The scheme described in this document 14 This scheme only describes how compressed blocks are stored on an SSD and the metadata for tracking 15 those compressed blocks. It relies on the higher-software module to perform the compression 19 (Note that in some cases, blocks of storage may not be compressible, or cannot be compressed enough 21 disk. The phrase "compressed blocks of storage" includes these uncompressed blocks.) 27 lowest-numbered blocks available on the backing storage device. This will ensure that when this 28 algorithm is used on a thin-provisioned backing storage device, blocks will not be allocated until 47 much the chunk was able to be compressed. The blocks on disk associated with a chunk are stored in… [all …]
|
| /spdk/scripts/ |
| H A D | fio-wrapper | 62 local blocks nvme nvme_sub 70 blocks+=("${nvme##*/}") 76 local blocks 77 blocks=(/sys/block/ublk*)
|
| H A D | setup.sh | 235 local blocks block blockp dev mount holder 239 blocks=($(get_block_dev_from_bdf "$bdf")) 241 for block in "${blocks[@]}"; do
|
| /spdk/test/unit/lib/ftl/ftl_layout_upgrade/ |
| H A D | ftl_layout_upgrade_ut.c | 89 DEFINE_STUB(ftl_md_create, struct ftl_md *, (struct spdk_ftl_dev *dev, uint64_t blocks, 146 reg->current.blocks = TEST_REG_BLKS; in test_setup() 216 tracker, reg->type, reg->current.version, reg->current.blocks, TEST_REG_BLKS); in test_l2p_upgrade() 221 CU_ASSERT_EQUAL(reg->current.blocks, reg_props->blk_sz); in test_l2p_upgrade() 309 CU_ASSERT_EQUAL(reg->current.blocks, 0xc0ffee); in test_l2p_upgrade() 332 CU_ASSERT_NOT_EQUAL(region->current.blocks, 0x0514); in l2p_upgrade_v0_to_v1() 347 CU_ASSERT_NOT_EQUAL(region->current.blocks, 0x0514); in l2p_upgrade_v1_to_v2() 362 CU_ASSERT_EQUAL(region->current.blocks, 0xc0ffee); in l2p_upgrade_v2_to_v3()
|
| /spdk/lib/ftl/utils/ |
| H A D | ftl_md.c | 277 struct ftl_md *ftl_md_create(struct spdk_ftl_dev *dev, uint64_t blocks, in ftl_md_create() argument 288 md->data_blocks = blocks; in ftl_md_create() 467 uint64_t blocks = bdev_io->u.bdev.num_blocks; in read_write_blocks_cb() local 468 uint64_t size = blocks * FTL_BLOCK_SIZE; in read_write_blocks_cb() 476 memcpy(md->vss_data + vss_offset, md->io.md, blocks * FTL_MD_VSS_SZ); in read_write_blocks_cb() 480 md->io.address += blocks; in read_write_blocks_cb() 481 md->io.remaining -= blocks; in read_write_blocks_cb() 536 uint64_t blocks; in read_write_blocks() local 539 blocks = spdk_min(md->io.remaining, ftl_md_xfer_blocks(md->dev)); in read_write_blocks() 545 md->io.address, blocks, in read_write_blocks() 581 uint64_t blocks = spdk_min(md->io.remaining, ftl_md_xfer_blocks(md->dev)); io_submit() local [all...] |
| /spdk/test/setup/ |
| H A D | devices.sh | 196 declare -a blocks=() 205 blocks+=("${block##*/}") 211 declare -r test_disk=${blocks[0]}
|
| /spdk/lib/ftl/upgrade/ |
| H A D | ftl_band_upgrade.c | 94 if (lctx->reg->current.blocks != ctx->reg.current.blocks) { in v2_upgrade_setup_ctx() 98 ctx->md = ftl_md_create(dev, lctx->reg->current.blocks, 0, ctx->reg.name, FTL_MD_CREATE_HEAP, in v2_upgrade_setup_ctx()
|
| H A D | ftl_chunk_upgrade.c | 43 for (uint64_t i = 0; i < ctx->reg_v2.current.blocks; i++, md++) { in v1_to_v2_upgrade_set() 70 …ctx->md_v2 = ftl_md_create(dev, ctx->reg_v2.current.blocks, 0, ctx->reg_v2.name, FTL_MD_CREATE_HEA… in v1_to_v2_upgrade_setup_ctx()
|
| H A D | ftl_sb_v5.c | 369 reg->current.blocks = reg_next->blk_sz; in ftl_superblock_v5_md_layout_upgrade_region() 395 ftl_bug(reg->current.blocks != reg_next->blk_sz); in ftl_superblock_v5_md_layout_upgrade_region() 464 reg->current.blocks = reg_search_ctx->blk_sz; in layout_apply_from_sb_blob() 472 reg->current.blocks = reg_search_ctx->blk_sz; in layout_apply_from_sb_blob() 484 reg->current.blocks != reg_search_ctx->blk_sz) { in layout_apply_from_sb_blob()
|
| H A D | ftl_trim_upgrade.c | 56 ctx->md = ftl_md_create(dev, ctx->reg.current.blocks, 0, ctx->reg.name, FTL_MD_CREATE_HEAP, in v0_to_v1_upgrade_setup_ctx()
|
| /spdk/ |
| H A D | .astylerc | 4 keep-one-line-blocks # Don't break blocks that are on one line
|
| /spdk/test/unit/lib/ftl/ftl_sb/ |
| H A D | ftl_sb_ut.c | 89 DEFINE_STUB(ftl_md_create, struct ftl_md *, (struct spdk_ftl_dev *dev, uint64_t blocks, 151 reg->current.blocks = TEST_REG_BLKS; in test_setup() 310 uint64_t blks_left = total_blocks - reg->current.offset - reg->current.blocks; in test_superblock_v3_md_layout_add_free() 320 reg->current.offset + reg->current.blocks, blks_left)) { in test_superblock_v3_md_layout_add_free() 358 reg->current.offset, reg->current.blocks)) { in test_ftl_superblock_v3_md_layout_build() 603 tracker, reg->type, reg->current.version, reg->current.blocks, TEST_REG_BLKS); in test_sb_v5_md_layout() 608 CU_ASSERT_EQUAL(reg->current.blocks, reg_props->blk_sz); in test_sb_v5_md_layout() 675 tbe->blk_offs = reg->current.offset + FTL_LAYOUT_REGION_TYPE_MAX * reg->current.blocks; in test_sb_v5_md_layout() 676 tbe->blk_sz = reg->current.blocks; in test_sb_v5_md_layout() 683 tbe->blk_offs = reg->current.offset + FTL_LAYOUT_REGION_TYPE_MAX * reg->current.blocks; in test_sb_v5_md_layout() [all...] |
| /spdk/lib/ftl/mngt/ |
| H A D | ftl_mngt_recovery.c | 47 return 0 == ctx->l2p_snippet.region.current.blocks; in recovery_iter_done() 61 snippet->current.offset += snippet->current.blocks; in recovery_iter_advance() 62 …snippet->current.blocks = region->current.offset + region->current.blocks - snippet->current.offse… in recovery_iter_advance() 63 snippet->current.blocks = spdk_min(snippet->current.blocks, ctx->iter.block_limit); in recovery_iter_advance() 68 last_blocks = first_block + snippet->current.blocks; in recovery_iter_advance() 126 ctx->l2p_snippet.region.current.blocks = ctx->iter.block_limit; in ftl_mngt_recovery_init() 147 ctx->l2p_snippet.region.current.blocks = 0; in ftl_mngt_recovery_init() 341 assert(page_id < ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_L2P)->current.blocks); in ftl_mngt_recovery_iteration_init_seq_ids() 374 region->current.offset, region->current.offset + region->current.blocks, in ftl_mngt_recovery_iteration_load_l2p()
|
| H A D | ftl_mngt_band.c | 47 uint64_t i, blocks, md_blocks, md_bands; in ftl_dev_init_bands() local 50 blocks = spdk_bdev_get_num_blocks(spdk_bdev_desc_get_bdev(dev->base_bdev_desc)); in ftl_dev_init_bands() 51 dev->num_bands = blocks / ftl_get_num_blocks_in_band(dev); in ftl_dev_init_bands()
|
| /spdk/test/nvme/perf/ |
| H A D | common.sh | 529 local blocks block_idx io_mechanism=libaio 533 blocks=($(get_disks)) 542 for block_idx in "${!blocks[@]}"; do 545 rpc_ref["filename"]=/dev/${blocks[block_idx]} 547 rpc_ref["name"]=${blocks[block_idx]}
|
| /spdk/lib/ftl/base/ |
| H A D | ftl_base_bdev.c | 34 * Current buffers are allocated in 1MiB sizes (256 blocks), so it can't be larger than that. in md_region_create() 35 * In the future, if the restriction is relaxed, the ftl_bitmap_buffer_alignment (64 blocks) in md_region_create() 39 "Unsupported write unit size (%lu), must be a power of 2 (in blocks). Can't be larger than 256 (1MiB)\n", in md_region_create() 120 region->current.blocks = reg_search_ctx->blk_sz;
|
| /spdk/lib/scsi/ |
| H A D | scsi_bdev.c | 128 uint32_t blocks, optimal_blocks; in bdev_scsi_inquiry() local 488 blocks = SPDK_WORK_ATS_BLOCK_SIZE / block_size; in bdev_scsi_inquiry() 490 if (blocks > 0xff) { in bdev_scsi_inquiry() 491 blocks = 0xff; in bdev_scsi_inquiry() 494 data[5] = (uint8_t)blocks; in bdev_scsi_inquiry() 506 blocks = SPDK_WORK_BLOCK_SIZE / block_size; in bdev_scsi_inquiry() 509 to_be32(&data[8], blocks); in bdev_scsi_inquiry() 511 to_be32(&data[12], blocks); in bdev_scsi_inquiry() 548 to_be64(&data[36], blocks); in bdev_scsi_inquiry()
|