1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2023 Solidigm All Rights Reserved 3 * Copyright (C) 2022 Intel Corporation. 4 * All rights reserved. 5 */ 6 7 #include "spdk/bdev_module.h" 8 #include "spdk/ftl.h" 9 10 #include "ftl_nv_cache.h" 11 #include "ftl_internal.h" 12 #include "ftl_mngt_steps.h" 13 #include "ftl_core.h" 14 #include "utils/ftl_defs.h" 15 #include "utils/ftl_layout_tracker_bdev.h" 16 17 #define MINIMUM_CACHE_SIZE_GIB 5 18 #define MINIMUM_BASE_SIZE_GIB 20 19 20 /* Dummy bdev module used to to claim bdevs. */ 21 static struct spdk_bdev_module g_ftl_bdev_module = { 22 .name = "ftl_lib", 23 }; 24 25 static inline uint64_t 26 ftl_calculate_num_blocks_in_band(struct spdk_bdev_desc *desc) 27 { 28 /* TODO: this should be passed via input parameter */ 29 #ifdef SPDK_FTL_ZONE_EMU_BLOCKS 30 return SPDK_FTL_ZONE_EMU_BLOCKS; 31 #else 32 return (1ULL << 30) / FTL_BLOCK_SIZE; 33 #endif 34 } 35 36 static void 37 base_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 38 { 39 switch (type) { 40 case SPDK_BDEV_EVENT_REMOVE: 41 assert(0); 42 break; 43 default: 44 break; 45 } 46 } 47 48 void 49 ftl_mngt_open_base_bdev(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt) 50 { 51 uint32_t block_size; 52 uint64_t num_blocks; 53 const char *bdev_name = dev->conf.base_bdev; 54 struct spdk_bdev *bdev; 55 56 if (spdk_bdev_open_ext(bdev_name, true, base_bdev_event_cb, 57 dev, &dev->base_bdev_desc)) { 58 FTL_ERRLOG(dev, "Unable to open bdev: %s\n", bdev_name); 59 goto error; 60 } 61 62 bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc); 63 64 if (spdk_bdev_module_claim_bdev(bdev, dev->base_bdev_desc, &g_ftl_bdev_module)) { 65 /* clear the desc so that we don't try to release the claim on cleanup */ 66 spdk_bdev_close(dev->base_bdev_desc); 67 dev->base_bdev_desc = NULL; 68 FTL_ERRLOG(dev, "Unable to claim bdev %s\n", bdev_name); 69 goto error; 70 } 71 72 block_size = spdk_bdev_get_block_size(bdev); 73 if (block_size != FTL_BLOCK_SIZE) { 74 FTL_ERRLOG(dev, "Unsupported block size (%"PRIu32")\n", block_size); 75 goto error; 76 } 77 78 num_blocks = spdk_bdev_get_num_blocks(bdev); 79 80 if (num_blocks * block_size < MINIMUM_BASE_SIZE_GIB * GiB) { 81 FTL_ERRLOG(dev, "Bdev %s is too small, requires, at least %uGiB capacity\n", 82 spdk_bdev_get_name(bdev), MINIMUM_BASE_SIZE_GIB); 83 goto error; 84 } 85 86 dev->base_ioch = spdk_bdev_get_io_channel(dev->base_bdev_desc); 87 if (!dev->base_ioch) { 88 FTL_ERRLOG(dev, "Failed to create base bdev IO channel\n"); 89 goto error; 90 } 91 92 dev->xfer_size = ftl_get_write_unit_size(bdev); 93 if (!spdk_u32_is_pow2(dev->xfer_size)) { 94 FTL_ERRLOG(dev, 95 "Unsupported xfer_size (%"PRIu64") - only power of 2 blocks xfer_size is supported\n", 96 dev->xfer_size); 97 goto error; 98 } 99 100 dev->base_type = ftl_base_device_get_type_by_bdev(dev, bdev); 101 if (!dev->base_type) { 102 FTL_ERRLOG(dev, "Failed to get base device type\n"); 103 goto error; 104 } 105 /* TODO: validate size when base device VSS usage gets added */ 106 dev->md_size = spdk_bdev_get_md_size(bdev); 107 108 if (!dev->base_type->ops.md_layout_ops.region_create) { 109 FTL_ERRLOG(dev, "Base device doesn't implement md_layout_ops\n"); 110 goto error; 111 } 112 113 /* Cache frequently used values */ 114 dev->num_blocks_in_band = ftl_calculate_num_blocks_in_band(dev->base_bdev_desc); 115 dev->is_zoned = spdk_bdev_is_zoned(spdk_bdev_desc_get_bdev(dev->base_bdev_desc)); 116 117 if (dev->is_zoned) { 118 /* TODO - current FTL code isn't fully compatible with ZNS drives */ 119 FTL_ERRLOG(dev, "Creating FTL on Zoned devices is not supported\n"); 120 goto error; 121 } 122 123 dev->base_layout_tracker = ftl_layout_tracker_bdev_init(spdk_bdev_get_num_blocks(bdev)); 124 if (!dev->base_layout_tracker) { 125 FTL_ERRLOG(dev, "Failed to instantiate layout tracker for base device\n"); 126 goto error; 127 } 128 129 ftl_mngt_next_step(mngt); 130 return; 131 error: 132 ftl_mngt_fail_step(mngt); 133 } 134 135 void 136 ftl_mngt_close_base_bdev(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt) 137 { 138 if (dev->base_ioch) { 139 spdk_put_io_channel(dev->base_ioch); 140 dev->base_ioch = NULL; 141 } 142 143 if (dev->base_bdev_desc) { 144 struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc); 145 146 spdk_bdev_module_release_bdev(bdev); 147 spdk_bdev_close(dev->base_bdev_desc); 148 149 dev->base_bdev_desc = NULL; 150 } 151 152 if (dev->base_layout_tracker) { 153 ftl_layout_tracker_bdev_fini(dev->base_layout_tracker); 154 dev->base_layout_tracker = NULL; 155 } 156 157 ftl_mngt_next_step(mngt); 158 } 159 160 static void 161 nv_cache_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 162 { 163 switch (type) { 164 case SPDK_BDEV_EVENT_REMOVE: 165 assert(0); 166 break; 167 default: 168 break; 169 } 170 } 171 172 void 173 ftl_mngt_open_cache_bdev(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt) 174 { 175 struct spdk_bdev *bdev; 176 struct ftl_nv_cache *nv_cache = &dev->nv_cache; 177 const char *bdev_name = dev->conf.cache_bdev; 178 const struct ftl_md_layout_ops *md_ops; 179 180 if (spdk_bdev_open_ext(bdev_name, true, nv_cache_bdev_event_cb, dev, 181 &nv_cache->bdev_desc)) { 182 FTL_ERRLOG(dev, "Unable to open bdev: %s\n", bdev_name); 183 goto error; 184 } 185 186 bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc); 187 188 if (spdk_bdev_module_claim_bdev(bdev, nv_cache->bdev_desc, &g_ftl_bdev_module)) { 189 /* clear the desc so that we don't try to release the claim on cleanup */ 190 spdk_bdev_close(nv_cache->bdev_desc); 191 nv_cache->bdev_desc = NULL; 192 FTL_ERRLOG(dev, "Unable to claim bdev %s\n", bdev_name); 193 goto error; 194 } 195 196 FTL_NOTICELOG(dev, "Using %s as write buffer cache\n", spdk_bdev_get_name(bdev)); 197 198 if (spdk_bdev_get_block_size(bdev) != FTL_BLOCK_SIZE) { 199 FTL_ERRLOG(dev, "Unsupported block size (%d)\n", 200 spdk_bdev_get_block_size(bdev)); 201 goto error; 202 } 203 204 nv_cache->cache_ioch = spdk_bdev_get_io_channel(nv_cache->bdev_desc); 205 if (!nv_cache->cache_ioch) { 206 FTL_ERRLOG(dev, "Failed to create cache IO channel for NV Cache\n"); 207 goto error; 208 } 209 210 if (bdev->blockcnt * bdev->blocklen < MINIMUM_CACHE_SIZE_GIB * GiB) { 211 FTL_ERRLOG(dev, "Bdev %s is too small, requires, at least %uGiB capacity\n", 212 spdk_bdev_get_name(bdev), MINIMUM_CACHE_SIZE_GIB); 213 goto error; 214 } 215 nv_cache->md_size = spdk_bdev_get_md_size(bdev); 216 217 nv_cache->nvc_type = ftl_nv_cache_device_get_type_by_bdev(dev, bdev); 218 if (!nv_cache->nvc_type) { 219 FTL_ERRLOG(dev, "Failed to get NV Cache device type\n"); 220 goto error; 221 } 222 nv_cache->md_size = sizeof(union ftl_md_vss); 223 224 md_ops = &nv_cache->nvc_type->ops.md_layout_ops; 225 if (!md_ops->region_create) { 226 FTL_ERRLOG(dev, "NV Cache device doesn't implement md_layout_ops\n"); 227 goto error; 228 } 229 230 dev->nvc_layout_tracker = ftl_layout_tracker_bdev_init(spdk_bdev_get_num_blocks(bdev)); 231 if (!dev->nvc_layout_tracker) { 232 FTL_ERRLOG(dev, "Failed to instantiate layout tracker for nvc device\n"); 233 goto error; 234 } 235 236 FTL_NOTICELOG(dev, "Using %s as NV Cache device\n", nv_cache->nvc_type->name); 237 ftl_mngt_next_step(mngt); 238 return; 239 error: 240 ftl_mngt_fail_step(mngt); 241 } 242 243 void 244 ftl_mngt_close_cache_bdev(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt) 245 { 246 if (dev->nv_cache.cache_ioch) { 247 spdk_put_io_channel(dev->nv_cache.cache_ioch); 248 dev->nv_cache.cache_ioch = NULL; 249 } 250 251 if (dev->nv_cache.bdev_desc) { 252 struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->nv_cache.bdev_desc); 253 254 spdk_bdev_module_release_bdev(bdev); 255 spdk_bdev_close(dev->nv_cache.bdev_desc); 256 257 dev->nv_cache.bdev_desc = NULL; 258 } 259 260 if (dev->nvc_layout_tracker) { 261 ftl_layout_tracker_bdev_fini(dev->nvc_layout_tracker); 262 dev->nvc_layout_tracker = NULL; 263 } 264 265 ftl_mngt_next_step(mngt); 266 } 267