xref: /spdk/test/unit/lib/blob/blob_bdev.c/blob_bdev_ut.c (revision 2dc4a231ac65d10dd2e1a96684094bef1b7ebb95)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  */
4 
5 #include "spdk/stdinc.h"
6 
7 #include "spdk_internal/cunit.h"
8 #include "common/lib/ut_multithread.c"
9 
10 static void ut_put_io_channel(struct spdk_io_channel *ch);
11 
12 #define spdk_put_io_channel(ch) ut_put_io_channel(ch);
13 #include "blob/bdev/blob_bdev.c"
14 
15 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
16 		enum spdk_bdev_io_type io_type), false);
17 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
18 DEFINE_STUB(spdk_bdev_queue_io_wait, int,
19 	    (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
20 	     struct spdk_bdev_io_wait_entry *entry), 0);
21 DEFINE_STUB(spdk_bdev_read_blocks, int,
22 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
23 	     uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
24 	     void *cb_arg), 0);
25 DEFINE_STUB(spdk_bdev_write_blocks, int,
26 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
27 	     uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
28 	     void *cb_arg), 0);
29 DEFINE_STUB(spdk_bdev_readv_blocks, int,
30 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, struct iovec *iov, int iovcnt,
31 	     uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
32 	     void *cb_arg), 0);
33 DEFINE_STUB(spdk_bdev_writev_blocks, int,
34 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, struct iovec *iov, int iovcnt,
35 	     uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
36 	     void *cb_arg), 0);
37 DEFINE_STUB(spdk_bdev_readv_blocks_ext, int,
38 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, struct iovec *iov, int iovcnt,
39 	     uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
40 	     void *cb_arg, struct spdk_bdev_ext_io_opts *opts), 0);
41 DEFINE_STUB(spdk_bdev_writev_blocks_ext, int,
42 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, struct iovec *iov, int iovcnt,
43 	     uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
44 	     void *cb_arg, struct spdk_bdev_ext_io_opts *opts), 0);
45 DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
46 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, uint64_t offset_blocks,
47 	     uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
48 DEFINE_STUB(spdk_bdev_unmap_blocks, int,
49 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, uint64_t offset_blocks,
50 	     uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
51 DEFINE_STUB(spdk_bdev_copy_blocks, int,
52 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, uint64_t dst_offset_blocks,
53 	     uint64_t src_offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
54 	     void *cb_arg), 0);
55 
56 struct spdk_bdev {
57 	char name[16];
58 	uint64_t blockcnt;
59 	uint32_t blocklen;
60 	uint32_t phys_blocklen;
61 	uint32_t open_cnt;
62 	enum spdk_bdev_claim_type claim_type;
63 	struct spdk_bdev_module *claim_module;
64 	struct spdk_bdev_desc *claim_desc;
65 };
66 
67 struct spdk_bdev_desc {
68 	struct spdk_bdev *bdev;
69 	bool write;
70 	enum spdk_bdev_claim_type claim_type;
71 	struct spdk_thread *thread;
72 };
73 
74 struct spdk_bdev *g_bdev;
75 
76 static struct spdk_bdev_module g_bdev_mod = {
77 	.name = "blob_bdev_ut"
78 };
79 
80 struct spdk_io_channel *
81 spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
82 {
83 	if (desc != NULL) {
84 		return (struct spdk_io_channel *)0x1;
85 	}
86 	return NULL;
87 }
88 
89 static void
90 ut_put_io_channel(struct spdk_io_channel *ch)
91 {
92 }
93 
94 static struct spdk_bdev *
95 get_bdev(const char *bdev_name)
96 {
97 	if (g_bdev == NULL) {
98 		return NULL;
99 	}
100 
101 	if (strcmp(bdev_name, g_bdev->name) != 0) {
102 		return NULL;
103 	}
104 
105 	return g_bdev;
106 }
107 
108 int
109 spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
110 		   void *event_ctx, struct spdk_bdev_desc **_desc)
111 {
112 	struct spdk_bdev_desc *desc;
113 	struct spdk_bdev *bdev = get_bdev(bdev_name);
114 
115 	if (bdev == NULL) {
116 		return -ENODEV;
117 	}
118 
119 	if (write && bdev->claim_module != NULL) {
120 		return -EPERM;
121 	}
122 
123 	desc = calloc(1, sizeof(*desc));
124 	desc->bdev = g_bdev;
125 	desc->write = write;
126 	desc->thread = spdk_get_thread();
127 	*_desc = desc;
128 	bdev->open_cnt++;
129 
130 	return 0;
131 }
132 
133 void
134 spdk_bdev_close(struct spdk_bdev_desc *desc)
135 {
136 	struct spdk_bdev *bdev = desc->bdev;
137 
138 	CU_ASSERT(desc->thread == spdk_get_thread());
139 
140 	bdev->open_cnt--;
141 	if (bdev->claim_desc == desc) {
142 		bdev->claim_desc = NULL;
143 		bdev->claim_type = SPDK_BDEV_CLAIM_NONE;
144 		bdev->claim_module = NULL;
145 	}
146 	free(desc);
147 }
148 
149 struct spdk_bdev *
150 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
151 {
152 	return desc->bdev;
153 }
154 
155 uint64_t
156 spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
157 {
158 	return bdev->blockcnt;
159 }
160 
161 uint32_t
162 spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
163 {
164 	return bdev->blocklen;
165 }
166 
167 uint32_t
168 spdk_bdev_get_physical_block_size(const struct spdk_bdev *bdev)
169 {
170 	return bdev->phys_blocklen;
171 }
172 
173 /* This is a simple approximation: it does not support shared claims */
174 int
175 spdk_bdev_module_claim_bdev_desc(struct spdk_bdev_desc *desc, enum spdk_bdev_claim_type type,
176 				 struct spdk_bdev_claim_opts *opts,
177 				 struct spdk_bdev_module *module)
178 {
179 	struct spdk_bdev *bdev = desc->bdev;
180 
181 	if (bdev->claim_module != NULL) {
182 		return -EPERM;
183 	}
184 
185 	bdev->claim_type = type;
186 	bdev->claim_module = module;
187 	bdev->claim_desc = desc;
188 
189 	desc->claim_type = type;
190 
191 	return 0;
192 }
193 
194 static void
195 init_bdev(struct spdk_bdev *bdev, const char *name, uint64_t num_blocks)
196 {
197 	memset(bdev, 0, sizeof(*bdev));
198 	snprintf(bdev->name, sizeof(bdev->name), "%s", name);
199 	bdev->blockcnt = num_blocks;
200 }
201 
202 static void
203 create_bs_dev(void)
204 {
205 	struct spdk_bdev bdev;
206 	struct spdk_bs_dev *bs_dev = NULL;
207 	struct blob_bdev *blob_bdev;
208 	int rc;
209 
210 	init_bdev(&bdev, "bdev0", 16);
211 	g_bdev = &bdev;
212 
213 	rc = spdk_bdev_create_bs_dev_ext("bdev0", NULL, NULL, &bs_dev);
214 	CU_ASSERT(rc == 0);
215 	SPDK_CU_ASSERT_FATAL(bs_dev != NULL);
216 	CU_ASSERT(bdev.open_cnt == 1);
217 
218 	blob_bdev = (struct blob_bdev *)bs_dev;
219 	CU_ASSERT(blob_bdev->desc != NULL);
220 	CU_ASSERT(blob_bdev->desc->write);
221 	CU_ASSERT(blob_bdev->desc->bdev == g_bdev);
222 	CU_ASSERT(blob_bdev->desc->claim_type == SPDK_BDEV_CLAIM_NONE);
223 	CU_ASSERT(bdev.claim_type == SPDK_BDEV_CLAIM_NONE);
224 
225 	bs_dev->destroy(bs_dev);
226 	CU_ASSERT(bdev.open_cnt == 0);
227 	g_bdev = NULL;
228 }
229 
230 static void
231 create_bs_dev_ro(void)
232 {
233 	struct spdk_bdev bdev;
234 	struct spdk_bs_dev *bs_dev = NULL;
235 	struct blob_bdev *blob_bdev;
236 	struct spdk_bdev_bs_dev_opts opts = { 0 };
237 	int rc;
238 
239 	/* opts with the wrong size returns -EINVAL */
240 	rc = spdk_bdev_create_bs_dev("nope", false, &opts, sizeof(opts) + 8, NULL, NULL, &bs_dev);
241 	CU_ASSERT(rc == -EINVAL);
242 
243 	/* opts with the right size is OK, but can still fail if the device doesn't exist. */
244 	opts.opts_size = sizeof(opts);
245 	rc = spdk_bdev_create_bs_dev("nope", false, &opts, sizeof(opts), NULL, NULL, &bs_dev);
246 	CU_ASSERT(rc == -ENODEV);
247 
248 	init_bdev(&bdev, "bdev0", 16);
249 	g_bdev = &bdev;
250 
251 	/* The normal way to create a read-only device */
252 	rc = spdk_bdev_create_bs_dev("bdev0", false, NULL, 0, NULL, NULL, &bs_dev);
253 	CU_ASSERT(rc == 0);
254 	SPDK_CU_ASSERT_FATAL(bs_dev != NULL);
255 	CU_ASSERT(bdev.open_cnt == 1);
256 
257 	blob_bdev = (struct blob_bdev *)bs_dev;
258 	CU_ASSERT(blob_bdev->desc != NULL);
259 	CU_ASSERT(!blob_bdev->desc->write);
260 	CU_ASSERT(blob_bdev->desc->bdev == g_bdev);
261 	CU_ASSERT(blob_bdev->desc->claim_type == SPDK_BDEV_CLAIM_NONE);
262 	CU_ASSERT(bdev.claim_type == SPDK_BDEV_CLAIM_NONE);
263 
264 	bs_dev->destroy(bs_dev);
265 	CU_ASSERT(bdev.open_cnt == 0);
266 	g_bdev = NULL;
267 }
268 
269 static void
270 create_bs_dev_rw(void)
271 {
272 	struct spdk_bdev bdev;
273 	struct spdk_bs_dev *bs_dev = NULL;
274 	struct blob_bdev *blob_bdev;
275 	int rc;
276 
277 	init_bdev(&bdev, "bdev0", 16);
278 	g_bdev = &bdev;
279 
280 	/* This is equivalent to spdk_bdev_create_bs_dev_ext() */
281 	rc = spdk_bdev_create_bs_dev("bdev0", true, NULL, 0, NULL, NULL, &bs_dev);
282 	CU_ASSERT(rc == 0);
283 	SPDK_CU_ASSERT_FATAL(bs_dev != NULL);
284 	CU_ASSERT(bdev.open_cnt == 1);
285 
286 	blob_bdev = (struct blob_bdev *)bs_dev;
287 	CU_ASSERT(blob_bdev->desc != NULL);
288 	CU_ASSERT(blob_bdev->desc->write);
289 	CU_ASSERT(blob_bdev->desc->bdev == g_bdev);
290 	CU_ASSERT(blob_bdev->desc->claim_type == SPDK_BDEV_CLAIM_NONE);
291 	CU_ASSERT(bdev.claim_type == SPDK_BDEV_CLAIM_NONE);
292 
293 	bs_dev->destroy(bs_dev);
294 	CU_ASSERT(bdev.open_cnt == 0);
295 	g_bdev = NULL;
296 }
297 
298 static void
299 claim_bs_dev(void)
300 {
301 	struct spdk_bdev bdev;
302 	struct spdk_bs_dev *bs_dev = NULL, *bs_dev2 = NULL;
303 	struct blob_bdev *blob_bdev;
304 	int rc;
305 
306 	init_bdev(&bdev, "bdev0", 16);
307 	g_bdev = &bdev;
308 
309 	rc = spdk_bdev_create_bs_dev_ext("bdev0", NULL, NULL, &bs_dev);
310 	CU_ASSERT(rc == 0);
311 	SPDK_CU_ASSERT_FATAL(bs_dev != NULL);
312 
313 	blob_bdev = (struct blob_bdev *)bs_dev;
314 	CU_ASSERT(blob_bdev->desc->claim_type == SPDK_BDEV_CLAIM_NONE);
315 	CU_ASSERT(bdev.claim_type == SPDK_BDEV_CLAIM_NONE);
316 	CU_ASSERT(blob_bdev->desc->write);
317 
318 	/* Can get an exclusive write claim */
319 	rc = spdk_bs_bdev_claim(bs_dev, &g_bdev_mod);
320 	CU_ASSERT(rc == 0);
321 	CU_ASSERT(blob_bdev->desc->write);
322 	CU_ASSERT(bdev.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
323 	CU_ASSERT(bdev.claim_desc == blob_bdev->desc);
324 
325 	/* Claim blocks a second writer without messing up the first one. */
326 	rc = spdk_bdev_create_bs_dev_ext("bdev0", NULL, NULL, &bs_dev2);
327 	CU_ASSERT(rc == -EPERM);
328 	CU_ASSERT(bdev.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
329 	CU_ASSERT(bdev.claim_desc == blob_bdev->desc);
330 
331 	/* Claim blocks a second claim without messing up the first one. */
332 	rc = spdk_bs_bdev_claim(bs_dev, &g_bdev_mod);
333 	CU_ASSERT(rc == -EPERM);
334 	CU_ASSERT(bdev.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
335 	CU_ASSERT(bdev.claim_desc == blob_bdev->desc);
336 
337 	bs_dev->destroy(bs_dev);
338 	CU_ASSERT(bdev.open_cnt == 0);
339 	CU_ASSERT(bdev.claim_type == SPDK_BDEV_CLAIM_NONE);
340 	CU_ASSERT(bdev.claim_module == NULL);
341 	CU_ASSERT(bdev.claim_desc == NULL);
342 	g_bdev = NULL;
343 }
344 
345 static void
346 claim_bs_dev_ro(void)
347 {
348 	struct spdk_bdev bdev;
349 	struct spdk_bs_dev *bs_dev = NULL, *bs_dev2 = NULL;
350 	struct blob_bdev *blob_bdev;
351 	int rc;
352 
353 	init_bdev(&bdev, "bdev0", 16);
354 	g_bdev = &bdev;
355 
356 	rc = spdk_bdev_create_bs_dev("bdev0", false, NULL, 0, NULL, NULL, &bs_dev);
357 	CU_ASSERT(rc == 0);
358 	SPDK_CU_ASSERT_FATAL(bs_dev != NULL);
359 
360 	blob_bdev = (struct blob_bdev *)bs_dev;
361 	CU_ASSERT(blob_bdev->desc->claim_type == SPDK_BDEV_CLAIM_NONE);
362 	CU_ASSERT(bdev.claim_type == SPDK_BDEV_CLAIM_NONE);
363 	CU_ASSERT(!blob_bdev->desc->write);
364 
365 	/* Can get an shared reader claim */
366 	rc = spdk_bs_bdev_claim(bs_dev, &g_bdev_mod);
367 	CU_ASSERT(rc == 0);
368 	CU_ASSERT(!blob_bdev->desc->write);
369 	CU_ASSERT(bdev.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
370 	CU_ASSERT(bdev.claim_desc == blob_bdev->desc);
371 
372 	/* Claim blocks a writer without messing up the claim. */
373 	rc = spdk_bdev_create_bs_dev_ext("bdev0", NULL, NULL, &bs_dev2);
374 	CU_ASSERT(rc == -EPERM);
375 	CU_ASSERT(bdev.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
376 	CU_ASSERT(bdev.claim_desc == blob_bdev->desc);
377 
378 	/* Another reader is just fine */
379 	rc = spdk_bdev_create_bs_dev("bdev0", false, NULL, 0, NULL, NULL, &bs_dev2);
380 	CU_ASSERT(rc == 0);
381 	SPDK_CU_ASSERT_FATAL(bs_dev2 != NULL);
382 	bs_dev2->destroy(bs_dev2);
383 
384 	bs_dev->destroy(bs_dev);
385 	CU_ASSERT(bdev.open_cnt == 0);
386 	CU_ASSERT(bdev.claim_type == SPDK_BDEV_CLAIM_NONE);
387 	CU_ASSERT(bdev.claim_module == NULL);
388 	CU_ASSERT(bdev.claim_desc == NULL);
389 	g_bdev = NULL;
390 }
391 
392 /*
393  * Verify that create_channel() and destroy_channel() increment and decrement the blob_bdev->refs.
394  */
395 static void
396 deferred_destroy_refs(void)
397 {
398 	struct spdk_bdev bdev;
399 	struct spdk_io_channel *ch1, *ch2;
400 	struct spdk_bs_dev *bs_dev = NULL;
401 	struct blob_bdev *blob_bdev;
402 	int rc;
403 
404 	set_thread(0);
405 	init_bdev(&bdev, "bdev0", 16);
406 	g_bdev = &bdev;
407 
408 	/* Open a blob_bdev, verify reference count is 1. */
409 	rc = spdk_bdev_create_bs_dev("bdev0", false, NULL, 0, NULL, NULL, &bs_dev);
410 	CU_ASSERT(rc == 0);
411 	SPDK_CU_ASSERT_FATAL(bs_dev != NULL);
412 	blob_bdev = (struct blob_bdev *)bs_dev;
413 	CU_ASSERT(blob_bdev->refs == 1);
414 	CU_ASSERT(blob_bdev->desc != NULL);
415 
416 	/* Verify reference count increases with channels on the same thread. */
417 	ch1 = bs_dev->create_channel(bs_dev);
418 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
419 	CU_ASSERT(blob_bdev->refs == 2);
420 	ch2 = bs_dev->create_channel(bs_dev);
421 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
422 	CU_ASSERT(blob_bdev->refs == 3);
423 	bs_dev->destroy_channel(bs_dev, ch1);
424 	CU_ASSERT(blob_bdev->refs == 2);
425 	bs_dev->destroy_channel(bs_dev, ch2);
426 	CU_ASSERT(blob_bdev->refs == 1);
427 	CU_ASSERT(blob_bdev->desc != NULL);
428 
429 	/* Verify reference count increases with channels on different threads. */
430 	ch1 = bs_dev->create_channel(bs_dev);
431 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
432 	CU_ASSERT(blob_bdev->refs == 2);
433 	set_thread(1);
434 	ch2 = bs_dev->create_channel(bs_dev);
435 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
436 	CU_ASSERT(blob_bdev->refs == 3);
437 	bs_dev->destroy_channel(bs_dev, ch1);
438 	CU_ASSERT(blob_bdev->refs == 2);
439 	bs_dev->destroy_channel(bs_dev, ch2);
440 	CU_ASSERT(blob_bdev->refs == 1);
441 	CU_ASSERT(blob_bdev->desc != NULL);
442 
443 	set_thread(0);
444 	bs_dev->destroy(bs_dev);
445 	g_bdev = NULL;
446 }
447 
448 /*
449  * When a channel is open bs_dev->destroy() should not free bs_dev until after the last channel is
450  * closed. Further, destroy() prevents the creation of new channels.
451  */
452 static void
453 deferred_destroy_channels(void)
454 {
455 	struct spdk_bdev bdev;
456 	struct spdk_io_channel *ch1, *ch2;
457 	struct spdk_bs_dev *bs_dev = NULL;
458 	struct blob_bdev *blob_bdev;
459 	int rc;
460 
461 	set_thread(0);
462 	init_bdev(&bdev, "bdev0", 16);
463 
464 	/* Open bs_dev and sanity check */
465 	g_bdev = &bdev;
466 	rc = spdk_bdev_create_bs_dev("bdev0", false, NULL, 0, NULL, NULL, &bs_dev);
467 	CU_ASSERT(rc == 0);
468 	SPDK_CU_ASSERT_FATAL(bs_dev != NULL);
469 	CU_ASSERT(bdev.open_cnt == 1);
470 	blob_bdev = (struct blob_bdev *)bs_dev;
471 	CU_ASSERT(blob_bdev->refs == 1);
472 	CU_ASSERT(blob_bdev->desc != NULL);
473 
474 	/* Create a channel, destroy the bs_dev. It should not be freed yet. */
475 	ch1 = bs_dev->create_channel(bs_dev);
476 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
477 	CU_ASSERT(blob_bdev->refs == 2);
478 	bs_dev->destroy(bs_dev);
479 
480 	/* Destroy closes the bdev and prevents desc from being used for creating more channels. */
481 	CU_ASSERT(blob_bdev->desc == NULL);
482 	CU_ASSERT(bdev.open_cnt == 0);
483 	CU_ASSERT(blob_bdev->refs == 1);
484 	ch2 = bs_dev->create_channel(bs_dev);
485 	CU_ASSERT(ch2 == NULL)
486 	CU_ASSERT(blob_bdev->refs == 1);
487 	bs_dev->destroy_channel(bs_dev, ch1);
488 	g_bdev = NULL;
489 
490 	/* Now bs_dev should have been freed. Builds with asan will verify. */
491 }
492 
493 /*
494  * Verify that deferred destroy copes well with the last channel destruction being on a thread other
495  * than the thread used to obtain the bdev descriptor.
496  */
497 static void
498 deferred_destroy_threads(void)
499 {
500 	struct spdk_bdev bdev;
501 	struct spdk_io_channel *ch1, *ch2;
502 	struct spdk_bs_dev *bs_dev = NULL;
503 	struct blob_bdev *blob_bdev;
504 	int rc;
505 
506 	set_thread(0);
507 	init_bdev(&bdev, "bdev0", 16);
508 	g_bdev = &bdev;
509 
510 	/* Open bs_dev and sanity check */
511 	rc = spdk_bdev_create_bs_dev("bdev0", false, NULL, 0, NULL, NULL, &bs_dev);
512 	CU_ASSERT(rc == 0);
513 	SPDK_CU_ASSERT_FATAL(bs_dev != NULL);
514 	CU_ASSERT(bdev.open_cnt == 1);
515 	blob_bdev = (struct blob_bdev *)bs_dev;
516 	CU_ASSERT(blob_bdev->refs == 1);
517 	CU_ASSERT(blob_bdev->desc != NULL);
518 
519 	/* Create two channels, each on their own thread. */
520 	ch1 = bs_dev->create_channel(bs_dev);
521 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
522 	CU_ASSERT(blob_bdev->refs == 2);
523 	CU_ASSERT(spdk_get_thread() == blob_bdev->desc->thread);
524 	set_thread(1);
525 	ch2 = bs_dev->create_channel(bs_dev);
526 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
527 	CU_ASSERT(blob_bdev->refs == 3);
528 
529 	/* Destroy the bs_dev on thread 0, the channel on thread 0, then the channel on thread 1. */
530 	set_thread(0);
531 	bs_dev->destroy(bs_dev);
532 	CU_ASSERT(blob_bdev->desc == NULL);
533 	CU_ASSERT(bdev.open_cnt == 0);
534 	CU_ASSERT(blob_bdev->refs == 2);
535 	bs_dev->destroy_channel(bs_dev, ch1);
536 	CU_ASSERT(blob_bdev->refs == 1);
537 	set_thread(1);
538 	bs_dev->destroy_channel(bs_dev, ch2);
539 	set_thread(0);
540 	g_bdev = NULL;
541 
542 	/* Now bs_dev should have been freed. Builds with asan will verify. */
543 }
544 
545 int
546 main(int argc, char **argv)
547 {
548 	CU_pSuite	suite;
549 	unsigned int	num_failures;
550 
551 	CU_initialize_registry();
552 
553 	suite = CU_add_suite("blob_bdev", NULL, NULL);
554 
555 	CU_ADD_TEST(suite, create_bs_dev);
556 	CU_ADD_TEST(suite, create_bs_dev_ro);
557 	CU_ADD_TEST(suite, create_bs_dev_rw);
558 	CU_ADD_TEST(suite, claim_bs_dev);
559 	CU_ADD_TEST(suite, claim_bs_dev_ro);
560 	CU_ADD_TEST(suite, deferred_destroy_refs);
561 	CU_ADD_TEST(suite, deferred_destroy_channels);
562 	CU_ADD_TEST(suite, deferred_destroy_threads);
563 
564 	allocate_threads(2);
565 	set_thread(0);
566 
567 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
568 	CU_cleanup_registry();
569 
570 	free_threads();
571 
572 	return num_failures;
573 }
574