xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision 3ef479ab163d96d6fd7f28b256d2a93ab42afd8e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "lib/test_env.c"
37 #include "lib/ut_multithread.c"
38 
39 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
40 #undef SPDK_CONFIG_VTUNE
41 
42 #include "bdev.c"
43 
44 #define BDEV_UT_NUM_THREADS 3
45 
46 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io,
47 		int *sc, int *sk, int *asc, int *ascq));
48 
49 struct ut_bdev {
50 	struct spdk_bdev	bdev;
51 	int			io_target;
52 };
53 
54 struct ut_bdev_channel {
55 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
56 	uint32_t			outstanding_cnt;
57 	uint32_t			avail_cnt;
58 };
59 
60 struct ut_bdev g_bdev;
61 struct spdk_bdev_desc *g_desc;
62 bool g_teardown_done = false;
63 
64 static int
65 stub_create_ch(void *io_device, void *ctx_buf)
66 {
67 	struct ut_bdev_channel *ch = ctx_buf;
68 
69 	TAILQ_INIT(&ch->outstanding_io);
70 	ch->outstanding_cnt = 0;
71 	/*
72 	 * When avail gets to 0, the submit_request function will return ENOMEM.
73 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
74 	 *  big value that won't get hit.  The ENOMEM tests can then override this
75 	 *  value to something much smaller to induce ENOMEM conditions.
76 	 */
77 	ch->avail_cnt = 2048;
78 	return 0;
79 }
80 
81 static void
82 stub_destroy_ch(void *io_device, void *ctx_buf)
83 {
84 }
85 
86 static struct spdk_io_channel *
87 stub_get_io_channel(void *ctx)
88 {
89 	return spdk_get_io_channel(&g_bdev.io_target);
90 }
91 
92 static int
93 stub_destruct(void *ctx)
94 {
95 	return 0;
96 }
97 
98 static void
99 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
100 {
101 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
102 
103 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
104 		struct spdk_bdev_io *io;
105 
106 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
107 			io = TAILQ_FIRST(&ch->outstanding_io);
108 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
109 			ch->outstanding_cnt--;
110 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
111 			ch->avail_cnt++;
112 		}
113 	}
114 
115 	if (ch->avail_cnt > 0) {
116 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
117 		ch->outstanding_cnt++;
118 		ch->avail_cnt--;
119 	} else {
120 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
121 	}
122 }
123 
124 static uint32_t
125 stub_complete_io(uint32_t num_to_complete)
126 {
127 	struct spdk_io_channel *_ch = spdk_get_io_channel(&g_bdev.io_target);
128 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
129 	struct spdk_bdev_io *io;
130 	bool complete_all = (num_to_complete == 0);
131 	uint32_t num_completed = 0;
132 
133 	while (complete_all || num_completed < num_to_complete) {
134 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
135 			break;
136 		}
137 		io = TAILQ_FIRST(&ch->outstanding_io);
138 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
139 		ch->outstanding_cnt--;
140 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
141 		ch->avail_cnt++;
142 		num_completed++;
143 	}
144 
145 	spdk_put_io_channel(_ch);
146 	return num_completed;
147 }
148 
149 static struct spdk_bdev_fn_table fn_table = {
150 	.get_io_channel =	stub_get_io_channel,
151 	.destruct =		stub_destruct,
152 	.submit_request =	stub_submit_request,
153 };
154 
155 static int
156 module_init(void)
157 {
158 	return 0;
159 }
160 
161 static void
162 module_fini(void)
163 {
164 }
165 
166 SPDK_BDEV_MODULE_REGISTER(bdev_ut, module_init, module_fini, NULL, NULL, NULL)
167 
168 static void
169 register_bdev(void)
170 {
171 	g_bdev.bdev.name = "bdev_ut";
172 	g_bdev.bdev.fn_table = &fn_table;
173 	g_bdev.bdev.module = SPDK_GET_BDEV_MODULE(bdev_ut);
174 	g_bdev.bdev.blocklen = 4096;
175 	g_bdev.bdev.blockcnt = 1024;
176 
177 	spdk_io_device_register(&g_bdev.io_target, stub_create_ch, stub_destroy_ch,
178 				sizeof(struct ut_bdev_channel));
179 	spdk_bdev_register(&g_bdev.bdev);
180 }
181 
182 static void
183 unregister_bdev(void)
184 {
185 	/* Handle any deferred messages. */
186 	poll_threads();
187 	spdk_bdev_unregister(&g_bdev.bdev, NULL, NULL);
188 	spdk_io_device_unregister(&g_bdev.io_target, NULL);
189 	memset(&g_bdev, 0, sizeof(g_bdev));
190 }
191 
192 static void
193 bdev_init_cb(void *done, int rc)
194 {
195 	CU_ASSERT(rc == 0);
196 	*(bool *)done = true;
197 }
198 
199 static void
200 setup_test(void)
201 {
202 	bool done = false;
203 
204 	allocate_threads(BDEV_UT_NUM_THREADS);
205 	spdk_bdev_initialize(bdev_init_cb, &done);
206 	register_bdev();
207 	spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
208 }
209 
210 static void
211 finish_cb(void *cb_arg)
212 {
213 	g_teardown_done = true;
214 }
215 
216 static void
217 teardown_test(void)
218 {
219 	g_teardown_done = false;
220 	spdk_bdev_close(g_desc);
221 	g_desc = NULL;
222 	unregister_bdev();
223 	spdk_bdev_finish(finish_cb, NULL);
224 	poll_threads();
225 	CU_ASSERT(g_teardown_done == true);
226 	g_teardown_done = false;
227 	free_threads();
228 }
229 
230 static void
231 basic(void)
232 {
233 	setup_test();
234 
235 	set_thread(0);
236 
237 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
238 	spdk_put_io_channel(g_ut_threads[0].ch);
239 
240 	teardown_test();
241 }
242 
243 static void
244 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
245 {
246 	bool *done = cb_arg;
247 
248 	CU_ASSERT(success == true);
249 	*done = true;
250 	spdk_bdev_free_io(bdev_io);
251 }
252 
253 static void
254 put_channel_during_reset(void)
255 {
256 	struct spdk_io_channel *io_ch;
257 	bool done = false;
258 
259 	setup_test();
260 
261 	set_thread(0);
262 	io_ch = spdk_bdev_get_io_channel(g_desc);
263 	CU_ASSERT(io_ch != NULL);
264 
265 	/*
266 	 * Start a reset, but then put the I/O channel before
267 	 *  the deferred messages for the reset get a chance to
268 	 *  execute.
269 	 */
270 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
271 	spdk_put_io_channel(io_ch);
272 	poll_threads();
273 	stub_complete_io(0);
274 
275 	teardown_test();
276 }
277 
278 static void
279 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
280 {
281 	enum spdk_bdev_io_status *status = cb_arg;
282 
283 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
284 	spdk_bdev_free_io(bdev_io);
285 }
286 
287 static void
288 aborted_reset(void)
289 {
290 	struct spdk_io_channel *io_ch[2];
291 	enum spdk_bdev_io_status status1, status2;
292 
293 	setup_test();
294 
295 	set_thread(0);
296 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
297 	CU_ASSERT(io_ch[0] != NULL);
298 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
299 	poll_threads();
300 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
301 
302 	/*
303 	 * First reset has been submitted on ch0.  Now submit a second
304 	 *  reset on ch1 which will get queued since there is already a
305 	 *  reset in progress.
306 	 */
307 	set_thread(1);
308 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
309 	CU_ASSERT(io_ch[1] != NULL);
310 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
311 	poll_threads();
312 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
313 
314 	/*
315 	 * Now destroy ch1.  This will abort the queued reset.  Check that
316 	 *  the second reset was completed with failed status.  Also check
317 	 *  that bdev->reset_in_progress != NULL, since the original reset
318 	 *  has not been completed yet.  This ensures that the bdev code is
319 	 *  correctly noticing that the failed reset is *not* the one that
320 	 *  had been submitted to the bdev module.
321 	 */
322 	set_thread(1);
323 	spdk_put_io_channel(io_ch[1]);
324 	poll_threads();
325 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
326 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
327 
328 	/*
329 	 * Now complete the first reset, verify that it completed with SUCCESS
330 	 *  status and that bdev->reset_in_progress is also set back to NULL.
331 	 */
332 	set_thread(0);
333 	spdk_put_io_channel(io_ch[0]);
334 	stub_complete_io(0);
335 	poll_threads();
336 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
337 	CU_ASSERT(g_bdev.bdev.reset_in_progress == NULL);
338 
339 	teardown_test();
340 }
341 
342 static void
343 io_during_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
344 {
345 	enum spdk_bdev_io_status *status = cb_arg;
346 
347 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
348 	spdk_bdev_free_io(bdev_io);
349 }
350 
351 static void
352 io_during_reset(void)
353 {
354 	struct spdk_io_channel *io_ch[2];
355 	struct spdk_bdev_channel *bdev_ch[2];
356 	enum spdk_bdev_io_status status0, status1, status_reset;
357 	int rc;
358 
359 	setup_test();
360 
361 	/*
362 	 * First test normal case - submit an I/O on each of two channels (with no resets)
363 	 *  and verify they complete successfully.
364 	 */
365 	set_thread(0);
366 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
367 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
368 	CU_ASSERT(bdev_ch[0]->flags == 0);
369 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
370 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_reset_done, &status0);
371 	CU_ASSERT(rc == 0);
372 
373 	set_thread(1);
374 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
375 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
376 	CU_ASSERT(bdev_ch[1]->flags == 0);
377 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
378 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_reset_done, &status1);
379 	CU_ASSERT(rc == 0);
380 
381 	poll_threads();
382 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
383 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
384 
385 	set_thread(0);
386 	stub_complete_io(0);
387 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
388 
389 	set_thread(1);
390 	stub_complete_io(0);
391 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
392 
393 	/*
394 	 * Now submit a reset, and leave it pending while we submit I/O on two different
395 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
396 	 *  progress.
397 	 */
398 	set_thread(0);
399 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
400 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_reset_done, &status_reset);
401 	CU_ASSERT(rc == 0);
402 
403 	CU_ASSERT(bdev_ch[0]->flags == 0);
404 	CU_ASSERT(bdev_ch[1]->flags == 0);
405 	poll_threads();
406 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
407 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
408 
409 	set_thread(0);
410 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
411 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_reset_done, &status0);
412 	CU_ASSERT(rc == 0);
413 
414 	set_thread(1);
415 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
416 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_reset_done, &status1);
417 	CU_ASSERT(rc == 0);
418 
419 	/*
420 	 * A reset is in progress so these read I/O should complete with failure.  Note that we
421 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
422 	 */
423 	poll_threads();
424 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
425 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
426 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
427 
428 	/*
429 	 * Complete the reset
430 	 */
431 	set_thread(0);
432 	stub_complete_io(0);
433 
434 	/*
435 	 * Only poll thread 0. We should not get a completion.
436 	 */
437 	poll_thread(0);
438 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
439 
440 	/*
441 	 * Poll both thread 0 and 1 so the messages can propagate and we
442 	 * get a completion.
443 	 */
444 	poll_threads();
445 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
446 
447 	spdk_put_io_channel(io_ch[0]);
448 	set_thread(1);
449 	spdk_put_io_channel(io_ch[1]);
450 	poll_threads();
451 
452 	teardown_test();
453 }
454 
455 static void
456 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
457 {
458 	enum spdk_bdev_io_status *status = cb_arg;
459 
460 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
461 	spdk_bdev_free_io(bdev_io);
462 }
463 
464 static uint32_t
465 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
466 {
467 	struct spdk_bdev_io *io;
468 	uint32_t cnt = 0;
469 
470 	TAILQ_FOREACH(io, tailq, link) {
471 		cnt++;
472 	}
473 
474 	return cnt;
475 }
476 
477 static void
478 enomem(void)
479 {
480 	struct spdk_io_channel *io_ch;
481 	struct spdk_bdev_channel *bdev_ch;
482 	struct ut_bdev_channel *ut_ch;
483 	const uint32_t IO_ARRAY_SIZE = 64;
484 	const uint32_t AVAIL = 20;
485 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
486 	uint32_t nomem_cnt, i;
487 	struct spdk_bdev_io *first_io;
488 	int rc;
489 
490 	setup_test();
491 
492 	set_thread(0);
493 	io_ch = spdk_bdev_get_io_channel(g_desc);
494 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
495 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
496 	ut_ch->avail_cnt = AVAIL;
497 
498 	/* First submit a number of IOs equal to what the channel can support. */
499 	for (i = 0; i < AVAIL; i++) {
500 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
501 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
502 		CU_ASSERT(rc == 0);
503 	}
504 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch->nomem_io));
505 
506 	/*
507 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
508 	 *  the enomem_io list.
509 	 */
510 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
511 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
512 	CU_ASSERT(rc == 0);
513 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->nomem_io));
514 	first_io = TAILQ_FIRST(&bdev_ch->nomem_io);
515 
516 	/*
517 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
518 	 *  the first_io above.
519 	 */
520 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
521 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
522 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
523 		CU_ASSERT(rc == 0);
524 	}
525 
526 	/* Assert that first_io is still at the head of the list. */
527 	CU_ASSERT(TAILQ_FIRST(&bdev_ch->nomem_io) == first_io);
528 	CU_ASSERT(bdev_io_tailq_cnt(&bdev_ch->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
529 	nomem_cnt = bdev_io_tailq_cnt(&bdev_ch->nomem_io);
530 	CU_ASSERT(bdev_ch->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
531 
532 	/*
533 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
534 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
535 	 *  list.
536 	 */
537 	stub_complete_io(1);
538 	CU_ASSERT(bdev_io_tailq_cnt(&bdev_ch->nomem_io) == nomem_cnt);
539 
540 	/*
541 	 * Complete enough I/O to hit the nomem_theshold.  This should trigger retrying nomem_io,
542 	 *  and we should see I/O get resubmitted to the test bdev module.
543 	 */
544 	stub_complete_io(NOMEM_THRESHOLD_COUNT - 1);
545 	CU_ASSERT(bdev_io_tailq_cnt(&bdev_ch->nomem_io) < nomem_cnt);
546 	nomem_cnt = bdev_io_tailq_cnt(&bdev_ch->nomem_io);
547 
548 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
549 	stub_complete_io(1);
550 	CU_ASSERT(bdev_io_tailq_cnt(&bdev_ch->nomem_io) == nomem_cnt);
551 
552 	/*
553 	 * Send a reset and confirm that all I/O are completed, including the ones that
554 	 *  were queued on the nomem_io list.
555 	 */
556 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
557 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
558 	poll_threads();
559 	CU_ASSERT(rc == 0);
560 	/* This will complete the reset. */
561 	stub_complete_io(0);
562 
563 	CU_ASSERT(bdev_io_tailq_cnt(&bdev_ch->nomem_io) == 0);
564 	CU_ASSERT(bdev_ch->io_outstanding == 0);
565 
566 	spdk_put_io_channel(io_ch);
567 	poll_threads();
568 	teardown_test();
569 }
570 
571 int
572 main(int argc, char **argv)
573 {
574 	CU_pSuite	suite = NULL;
575 	unsigned int	num_failures;
576 
577 	if (CU_initialize_registry() != CUE_SUCCESS) {
578 		return CU_get_error();
579 	}
580 
581 	suite = CU_add_suite("bdev", NULL, NULL);
582 	if (suite == NULL) {
583 		CU_cleanup_registry();
584 		return CU_get_error();
585 	}
586 
587 	if (
588 		CU_add_test(suite, "basic", basic) == NULL ||
589 		CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
590 		CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
591 		CU_add_test(suite, "io_during_reset", io_during_reset) == NULL ||
592 		CU_add_test(suite, "enomem", enomem) == NULL
593 	) {
594 		CU_cleanup_registry();
595 		return CU_get_error();
596 	}
597 
598 	CU_basic_set_mode(CU_BRM_VERBOSE);
599 	CU_basic_run_tests();
600 	num_failures = CU_get_number_of_failures();
601 	CU_cleanup_registry();
602 	return num_failures;
603 }
604