1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 */ 4 #include "spdk/stdinc.h" 5 6 #include "spdk_internal/cunit.h" 7 #include "spdk/blob.h" 8 9 /* 10 * This creates a bs_dev that does not depend on a bdev. Typical use without assertions looks like: 11 * 12 * struct spdk_bs_dev *dev; 13 * struct spdk_bs_opts bs_opts; 14 * struct spdk_blob_opts blob_opts; 15 * struct ut_snap_opts esnap_opts; 16 * struct spdk_io_channel *bs_chan; 17 * bool destroyed = false; 18 * 19 * Create the blobstore with external snapshot support. 20 * dev = init_dev(); 21 * memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 22 * spdk_bs_opts_init(&bs_opts, sizeof(bs_opts)); 23 * bs_opts.esnap_bs_dev_create = ut_esnap_create; 24 * 25 * Create an esnap clone blob. 26 * ut_esnap_opts_init(512, 2048, "name", &destroyed, &esnap_opts); 27 * blob_opts.esnap_id = &esnap_opts; 28 * blob_opts.esnap_id_len = sizeof(esnap_opts); 29 * opts.num_clusters = 4; 30 * blob = ut_blob_create_and_open(bs, &opts); 31 * 32 * Do stuff like you would with any other blob. 33 * bs_chan = spdk_bs_alloc_io_channel(bs); 34 * ... 35 * 36 * You can check the value of destroyed to verify that spdk_blob_close() led to the 37 * destruction of the bs_dev created during spdk_blob_open(). 38 * spdk_blob_close(blob, blob_op_complete, NULL); 39 * poll_threads(); 40 * CU_ASSERT(destroyed); 41 */ 42 43 static void 44 ut_memset4(void *dst, uint32_t pat, size_t len) 45 { 46 uint32_t *vals = dst; 47 48 assert((len % 4) == 0); 49 for (size_t i = 0; i < (len / 4); i++) { 50 vals[i] = pat; 51 } 52 } 53 54 static void 55 ut_memset8(void *dst, uint64_t pat, size_t len) 56 { 57 uint64_t *vals = dst; 58 59 assert((len % 8) == 0); 60 for (size_t i = 0; i < (len / 8); i++) { 61 vals[i] = pat; 62 } 63 } 64 65 #define UT_ESNAP_OPTS_MAGIC 0xbadf1ea5 66 struct ut_esnap_opts { 67 /* 68 * This structure gets stored in an xattr. The magic number is used to give some assurance 69 * that we got the right thing before trying to use the other fields. 70 */ 71 uint32_t magic; 72 uint32_t block_size; 73 uint64_t num_blocks; 74 /* 75 * If non-NULL, referenced address will be set to true when the device is fully destroyed. 76 * This address must remain valid for the life of the blob, even across blobstore reload. 77 */ 78 bool *destroyed; 79 char name[32]; 80 }; 81 82 struct ut_esnap_dev { 83 struct spdk_bs_dev bs_dev; 84 struct ut_esnap_opts ut_opts; 85 spdk_blob_id blob_id; 86 uint32_t num_channels; 87 }; 88 89 struct ut_esnap_channel { 90 struct ut_esnap_dev *dev; 91 struct spdk_thread *thread; 92 uint64_t blocks_read; 93 }; 94 95 static void 96 ut_esnap_opts_init(uint32_t block_size, uint32_t num_blocks, const char *name, bool *destroyed, 97 struct ut_esnap_opts *opts) 98 { 99 memset(opts, 0, sizeof(*opts)); 100 opts->magic = UT_ESNAP_OPTS_MAGIC; 101 opts->block_size = block_size; 102 opts->num_blocks = num_blocks; 103 opts->destroyed = destroyed; 104 spdk_strcpy_pad(opts->name, name, sizeof(opts->name) - 1, '\0'); 105 } 106 107 static struct spdk_io_channel * 108 ut_esnap_create_channel(struct spdk_bs_dev *dev) 109 { 110 struct spdk_io_channel *ch; 111 112 ch = spdk_get_io_channel(dev); 113 if (ch == NULL) { 114 return NULL; 115 } 116 117 return ch; 118 } 119 120 static void 121 ut_esnap_destroy_channel(struct spdk_bs_dev *dev, struct spdk_io_channel *channel) 122 { 123 spdk_put_io_channel(channel); 124 } 125 126 /* 127 * When reading, each block is filled with 64-bit values made up of the least significant 32 bits of 128 * the blob ID and the lba. 129 */ 130 union ut_word { 131 uint64_t num; 132 struct { 133 uint32_t blob_id; 134 uint32_t lba; 135 } f; 136 }; 137 138 static bool 139 ut_esnap_content_is_correct(void *buf, uint32_t buf_sz, uint32_t id, 140 uint32_t start_byte, uint32_t esnap_blksz) 141 { 142 union ut_word *words = buf; 143 uint32_t off, i, j, lba; 144 145 j = 0; 146 for (off = start_byte; off < start_byte + buf_sz; off += esnap_blksz) { 147 lba = off / esnap_blksz; 148 for (i = 0; i < esnap_blksz / sizeof(*words); i++) { 149 if (words[j].f.blob_id != id || words[j].f.lba != lba) { 150 return false; 151 } 152 j++; 153 } 154 } 155 return true; 156 } 157 158 static void 159 ut_esnap_read(struct spdk_bs_dev *bs_dev, struct spdk_io_channel *channel, void *payload, 160 uint64_t lba, uint32_t lba_count, struct spdk_bs_dev_cb_args *cb_args) 161 { 162 struct ut_esnap_dev *ut_dev = (struct ut_esnap_dev *)bs_dev; 163 struct ut_esnap_channel *ut_ch = spdk_io_channel_get_ctx(channel); 164 const uint32_t block_size = ut_dev->ut_opts.block_size; 165 union ut_word word; 166 uint64_t cur; 167 168 /* The channel passed in must be associated with this bs_dev. */ 169 CU_ASSERT(&ut_ch->dev->bs_dev == bs_dev); 170 CU_ASSERT(spdk_get_thread() == ut_ch->thread); 171 172 SPDK_CU_ASSERT_FATAL(sizeof(word) == 8); 173 SPDK_CU_ASSERT_FATAL(lba + lba_count <= UINT32_MAX); 174 175 word.f.blob_id = ut_dev->blob_id & 0xffffffff; 176 for (cur = 0; cur < lba_count; cur++) { 177 word.f.lba = lba + cur; 178 ut_memset8(payload + cur * block_size, word.num, block_size); 179 } 180 ut_ch->blocks_read += lba_count; 181 182 cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, 0); 183 } 184 185 static void 186 ut_esnap_readv(struct spdk_bs_dev *bs_dev, struct spdk_io_channel *channel, 187 struct iovec *iov, int iovcnt, uint64_t lba, uint32_t lba_count, 188 struct spdk_bs_dev_cb_args *cb_args) 189 { 190 struct ut_esnap_channel *ut_ch = spdk_io_channel_get_ctx(channel); 191 192 /* The channel passed in must be associated with this bs_dev. */ 193 CU_ASSERT(&ut_ch->dev->bs_dev == bs_dev); 194 CU_ASSERT(spdk_get_thread() == ut_ch->thread); 195 196 if (iovcnt != 1) { 197 CU_ASSERT(false); 198 cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, -ENOTSUP); 199 return; 200 } 201 ut_esnap_read(bs_dev, channel, iov->iov_base, lba, lba_count, cb_args); 202 } 203 204 static void 205 ut_esnap_readv_ext(struct spdk_bs_dev *bs_dev, struct spdk_io_channel *channel, 206 struct iovec *iov, int iovcnt, uint64_t lba, uint32_t lba_count, 207 struct spdk_bs_dev_cb_args *cb_args, struct spdk_blob_ext_io_opts *io_opts) 208 { 209 struct ut_esnap_channel *ut_ch = spdk_io_channel_get_ctx(channel); 210 211 /* The channel passed in must be associated with this bs_dev. */ 212 CU_ASSERT(&ut_ch->dev->bs_dev == bs_dev); 213 CU_ASSERT(spdk_get_thread() == ut_ch->thread); 214 215 CU_ASSERT(false); 216 cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, -ENOTSUP); 217 } 218 219 static bool 220 ut_esnap_is_zeroes(struct spdk_bs_dev *dev, uint64_t lba, uint64_t lba_count) 221 { 222 return false; 223 } 224 225 static bool 226 ut_esnap_is_range_valid(struct spdk_bs_dev *dev, uint64_t lba, uint64_t lba_count) 227 { 228 struct ut_esnap_dev *ut_dev = (struct ut_esnap_dev *)dev; 229 230 if (lba >= ut_dev->ut_opts.num_blocks) { 231 return false; 232 } else if (lba + lba_count > ut_dev->ut_opts.num_blocks) { 233 /* bdevs used for esnaps must currently be an exact multiple of the 234 * blobstore cluster size (see spdk_lvol_create_esnap_clone()), but if that 235 * ever changes this code here needs to be updated to account for it. */ 236 SPDK_ERRLOG("Entire range must be within the bs_dev bounds for CoW.\n" 237 "lba(lba_count): %lu(%lu), num_blks: %lu\n", lba, lba_count, ut_dev->ut_opts.num_blocks); 238 assert(false); 239 return false; 240 } 241 242 return true; 243 } 244 245 static int 246 ut_esnap_io_channel_create(void *io_device, void *ctx) 247 { 248 struct ut_esnap_dev *ut_dev = io_device; 249 struct ut_esnap_channel *ut_ch = ctx; 250 251 ut_ch->dev = ut_dev; 252 ut_ch->thread = spdk_get_thread(); 253 ut_ch->blocks_read = 0; 254 255 ut_dev->num_channels++; 256 257 return 0; 258 } 259 260 static void 261 ut_esnap_io_channel_destroy(void *io_device, void *ctx) 262 { 263 struct ut_esnap_dev *ut_dev = io_device; 264 struct ut_esnap_channel *ut_ch = ctx; 265 266 CU_ASSERT(ut_ch->thread == spdk_get_thread()); 267 268 CU_ASSERT(ut_dev->num_channels > 0); 269 ut_dev->num_channels--; 270 271 return; 272 } 273 274 static void 275 ut_esnap_dev_free(void *io_device) 276 { 277 struct ut_esnap_dev *ut_dev = io_device; 278 279 if (ut_dev->ut_opts.destroyed != NULL) { 280 *ut_dev->ut_opts.destroyed = true; 281 } 282 283 CU_ASSERT(ut_dev->num_channels == 0); 284 285 ut_memset4(ut_dev, 0xdeadf1ea, sizeof(*ut_dev)); 286 free(ut_dev); 287 } 288 289 static void 290 ut_esnap_destroy(struct spdk_bs_dev *bs_dev) 291 { 292 spdk_io_device_unregister(bs_dev, ut_esnap_dev_free); 293 } 294 295 static bool 296 ut_esnap_translate_lba(struct spdk_bs_dev *dev, uint64_t lba, uint64_t *base_lba) 297 { 298 *base_lba = lba; 299 return true; 300 } 301 302 static struct spdk_bs_dev * 303 ut_esnap_dev_alloc(const struct ut_esnap_opts *opts) 304 { 305 struct ut_esnap_dev *ut_dev; 306 struct spdk_bs_dev *bs_dev; 307 308 assert(opts->magic == UT_ESNAP_OPTS_MAGIC); 309 310 ut_dev = calloc(1, sizeof(*ut_dev)); 311 if (ut_dev == NULL) { 312 return NULL; 313 } 314 315 ut_dev->ut_opts = *opts; 316 bs_dev = &ut_dev->bs_dev; 317 318 bs_dev->blocklen = opts->block_size; 319 bs_dev->blockcnt = opts->num_blocks; 320 321 bs_dev->create_channel = ut_esnap_create_channel; 322 bs_dev->destroy_channel = ut_esnap_destroy_channel; 323 bs_dev->destroy = ut_esnap_destroy; 324 bs_dev->read = ut_esnap_read; 325 bs_dev->readv = ut_esnap_readv; 326 bs_dev->readv_ext = ut_esnap_readv_ext; 327 bs_dev->is_zeroes = ut_esnap_is_zeroes; 328 bs_dev->is_range_valid = ut_esnap_is_range_valid; 329 bs_dev->translate_lba = ut_esnap_translate_lba; 330 331 spdk_io_device_register(ut_dev, ut_esnap_io_channel_create, ut_esnap_io_channel_destroy, 332 sizeof(struct ut_esnap_channel), opts->name); 333 334 return bs_dev; 335 } 336 337 static int 338 ut_esnap_create(void *bs_ctx, void *blob_ctx, struct spdk_blob *blob, 339 const void *id, uint32_t id_len, struct spdk_bs_dev **bs_devp) 340 { 341 struct spdk_bs_dev *bs_dev = NULL; 342 343 /* With any blobstore that will use bs_ctx or blob_ctx, wrap this function and pass NULL as 344 * bs_ctx and blob_ctx. */ 345 CU_ASSERT(bs_ctx == NULL); 346 CU_ASSERT(bs_ctx == NULL); 347 348 SPDK_CU_ASSERT_FATAL(id != NULL); 349 SPDK_CU_ASSERT_FATAL(sizeof(struct ut_esnap_opts) == id_len); 350 351 bs_dev = ut_esnap_dev_alloc(id); 352 SPDK_CU_ASSERT_FATAL(bs_dev != NULL); 353 354 *bs_devp = bs_dev; 355 return 0; 356 } 357 358 static int 359 ut_esnap_create_with_count(void *bs_ctx, void *blob_ctx, struct spdk_blob *blob, 360 const void *id, uint32_t id_len, struct spdk_bs_dev **bs_devp) 361 { 362 uint32_t *bs_ctx_count = bs_ctx; 363 uint32_t *blob_ctx_count = blob_ctx; 364 365 SPDK_CU_ASSERT_FATAL(bs_ctx != NULL); 366 367 (*bs_ctx_count)++; 368 369 /* 370 * blob_ctx can be non-NULL when spdk_bs_open_blob() is used. Opens that come via 371 * spdk_bs_load(), spdk_bs_open_blob(), and those that come via spdk_bs_open_blob_ext() with 372 * NULL opts->esnap_ctx will have blob_ctx == NULL. 373 */ 374 if (blob_ctx_count != NULL) { 375 (*blob_ctx_count)++; 376 } 377 378 return ut_esnap_create(NULL, NULL, blob, id, id_len, bs_devp); 379 } 380 381 static struct ut_esnap_channel * 382 ut_esnap_get_io_channel(struct spdk_io_channel *ch, spdk_blob_id blob_id) 383 { 384 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(ch); 385 struct blob_esnap_channel find = {}; 386 struct blob_esnap_channel *esnap_channel; 387 388 find.blob_id = blob_id; 389 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 390 if (esnap_channel == NULL) { 391 return NULL; 392 } 393 394 return spdk_io_channel_get_ctx(esnap_channel->channel); 395 } 396