xref: /spdk/test/unit/lib/ftl/ftl_io.c/ftl_io_ut.c (revision 1fa071d332db21bf893d581a8e93b425ba788a24)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 #include "common/lib/ut_multithread.c"
38 
39 #include "ftl/ftl_io.c"
40 #include "ftl/ftl_init.c"
41 #include "ftl/ftl_core.c"
42 #include "ftl/ftl_band.c"
43 
44 DEFINE_STUB(ftl_trace_alloc_id, uint64_t, (struct spdk_ftl_dev *dev), 0);
45 DEFINE_STUB_V(ftl_trace_completion, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
46 				     enum ftl_trace_completion type));
47 DEFINE_STUB(spdk_bdev_io_get_append_location, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
48 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
49 DEFINE_STUB(spdk_bdev_get_optimal_open_zones, uint32_t, (const struct spdk_bdev *b), 1);
50 DEFINE_STUB(spdk_bdev_zone_appendv, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
51 		struct iovec *iov, int iovcnt, uint64_t zone_id, uint64_t num_blocks,
52 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
53 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *b), 1024);
54 DEFINE_STUB(spdk_bdev_zone_management, int, (struct spdk_bdev_desc *desc,
55 		struct spdk_io_channel *ch, uint64_t zone_id, enum spdk_bdev_zone_action action,
56 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
57 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
58 DEFINE_STUB_V(ftl_trace_submission, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
59 				     struct ftl_addr addr, size_t addr_cnt));
60 DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, int limit, size_t num_free));
61 DEFINE_STUB(spdk_bdev_read_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
62 		void *buf, uint64_t offset_blocks, uint64_t num_blocks,
63 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
64 DEFINE_STUB(spdk_bdev_write_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
65 		void *buf, uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
66 		void *cb_arg), 0);
67 DEFINE_STUB(spdk_bdev_write_blocks_with_md, int, (struct spdk_bdev_desc *desc,
68 		struct spdk_io_channel *ch, void *buf, void *md, uint64_t offset_blocks,
69 		uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
70 DEFINE_STUB(spdk_bdev_writev_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
71 		struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
72 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
73 DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024);
74 DEFINE_STUB(spdk_bdev_get_md_size, uint32_t, (const struct spdk_bdev *bdev), 0);
75 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 4096);
76 #if defined(FTL_META_DEBUG)
77 DEFINE_STUB(ftl_band_validate_md, bool, (struct ftl_band *band), true);
78 #endif
79 DEFINE_STUB_V(ftl_trace_wbuf_fill, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
80 
81 struct spdk_io_channel *
82 spdk_bdev_get_io_channel(struct spdk_bdev_desc *bdev_desc)
83 {
84 	return spdk_get_io_channel(bdev_desc);
85 }
86 
87 static int
88 channel_create_cb(void *io_device, void *ctx)
89 {
90 	return 0;
91 }
92 
93 static void
94 channel_destroy_cb(void *io_device, void *ctx)
95 {}
96 
97 static struct spdk_ftl_dev *
98 setup_device(uint32_t num_threads, uint32_t xfer_size)
99 {
100 	struct spdk_ftl_dev *dev;
101 	struct _ftl_io_channel *_ioch;
102 	struct ftl_io_channel *ioch;
103 	int rc;
104 
105 	allocate_threads(num_threads);
106 	set_thread(0);
107 
108 	dev = calloc(1, sizeof(*dev));
109 	SPDK_CU_ASSERT_FATAL(dev != NULL);
110 
111 	dev->core_thread = spdk_get_thread();
112 	dev->ioch = calloc(1, sizeof(*_ioch) + sizeof(struct spdk_io_channel));
113 	SPDK_CU_ASSERT_FATAL(dev->ioch != NULL);
114 
115 	_ioch = (struct _ftl_io_channel *)(dev->ioch + 1);
116 	ioch = _ioch->ioch = calloc(1, sizeof(*ioch));
117 	SPDK_CU_ASSERT_FATAL(ioch != NULL);
118 
119 	ioch->elem_size = sizeof(struct ftl_md_io);
120 	ioch->io_pool = spdk_mempool_create("io-pool", 4096, ioch->elem_size, 0, 0);
121 
122 	SPDK_CU_ASSERT_FATAL(ioch->io_pool != NULL);
123 
124 	dev->conf = g_default_conf;
125 	dev->xfer_size = xfer_size;
126 	dev->base_bdev_desc = (struct spdk_bdev_desc *)0xdeadbeef;
127 	spdk_io_device_register(dev->base_bdev_desc, channel_create_cb, channel_destroy_cb, 0, NULL);
128 
129 	rc = ftl_dev_init_io_channel(dev);
130 	CU_ASSERT_EQUAL(rc, 0);
131 
132 	return dev;
133 }
134 
135 static void
136 free_device(struct spdk_ftl_dev *dev)
137 {
138 	struct ftl_io_channel *ioch;
139 
140 	ioch = ftl_io_channel_get_ctx(dev->ioch);
141 	spdk_mempool_free(ioch->io_pool);
142 	free(ioch);
143 
144 	spdk_io_device_unregister(dev, NULL);
145 	spdk_io_device_unregister(dev->base_bdev_desc, NULL);
146 	free_threads();
147 
148 	free(dev->ioch_array);
149 	free(dev->iov_buf);
150 	free(dev->ioch);
151 	free(dev);
152 }
153 
154 static void
155 setup_io(struct ftl_io *io, struct spdk_ftl_dev *dev, ftl_io_fn cb, void *ctx)
156 {
157 	io->dev = dev;
158 	io->cb_fn = cb;
159 	io->cb_ctx = ctx;
160 }
161 
162 static struct ftl_io *
163 alloc_io(struct spdk_ftl_dev *dev, ftl_io_fn cb, void *ctx)
164 {
165 	struct ftl_io *io;
166 
167 	io = ftl_io_alloc(dev->ioch);
168 	SPDK_CU_ASSERT_FATAL(io != NULL);
169 	setup_io(io, dev, cb, ctx);
170 
171 	return io;
172 }
173 
174 static void
175 io_complete_cb(struct ftl_io *io, void *ctx, int status)
176 {
177 	*(int *)ctx = status;
178 }
179 
180 static void
181 test_completion(void)
182 {
183 	struct spdk_ftl_dev *dev;
184 	struct ftl_io_channel *ioch;
185 	struct ftl_io *io;
186 	int req, status = 0;
187 	size_t pool_size;
188 
189 	dev = setup_device(1, 16);
190 	ioch = ftl_io_channel_get_ctx(dev->ioch);
191 	pool_size = spdk_mempool_count(ioch->io_pool);
192 
193 	io = alloc_io(dev, io_complete_cb, &status);
194 	io->status = -EIO;
195 
196 #define NUM_REQUESTS 16
197 	for (req = 0; req < NUM_REQUESTS; ++req) {
198 		ftl_io_inc_req(io);
199 		CU_ASSERT_FALSE(ftl_io_done(io));
200 	}
201 
202 	CU_ASSERT_EQUAL(io->req_cnt, NUM_REQUESTS);
203 
204 	for (req = 0; req < (NUM_REQUESTS - 1); ++req) {
205 		ftl_io_dec_req(io);
206 		CU_ASSERT_FALSE(ftl_io_done(io));
207 	}
208 
209 	CU_ASSERT_EQUAL(io->req_cnt, 1);
210 
211 	ftl_io_dec_req(io);
212 	CU_ASSERT_TRUE(ftl_io_done(io));
213 
214 	ftl_io_complete(io);
215 	CU_ASSERT_EQUAL(status, -EIO);
216 
217 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
218 
219 	free_device(dev);
220 }
221 
222 static void
223 test_alloc_free(void)
224 {
225 	struct spdk_ftl_dev *dev;
226 	struct ftl_io_channel *ioch;
227 	struct ftl_io *parent, *child;
228 	int parent_status = -1;
229 	size_t pool_size;
230 
231 	dev = setup_device(1, 16);
232 	ioch = ftl_io_channel_get_ctx(dev->ioch);
233 	pool_size = spdk_mempool_count(ioch->io_pool);
234 
235 	parent = alloc_io(dev, io_complete_cb, &parent_status);
236 	SPDK_CU_ASSERT_FATAL(parent != NULL);
237 	child = ftl_io_alloc_child(parent);
238 	SPDK_CU_ASSERT_FATAL(child != NULL);
239 
240 	ftl_io_free(child);
241 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - 1);
242 
243 	child = ftl_io_alloc_child(parent);
244 	SPDK_CU_ASSERT_FATAL(child != NULL);
245 	ftl_io_complete(child);
246 	CU_ASSERT_EQUAL(parent_status, -1);
247 	ftl_io_complete(parent);
248 	CU_ASSERT_EQUAL(parent_status, 0);
249 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
250 
251 	parent_status = -1;
252 	parent = alloc_io(dev, io_complete_cb, &parent_status);
253 	SPDK_CU_ASSERT_FATAL(parent != NULL);
254 	child = ftl_io_alloc_child(parent);
255 	SPDK_CU_ASSERT_FATAL(child != NULL);
256 
257 	ftl_io_free(child);
258 	CU_ASSERT_EQUAL(parent_status, -1);
259 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - 1);
260 	ftl_io_complete(parent);
261 	CU_ASSERT_EQUAL(parent_status, 0);
262 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
263 
264 	free_device(dev);
265 }
266 
267 static void
268 test_child_requests(void)
269 {
270 	struct spdk_ftl_dev *dev;
271 	struct ftl_io_channel *ioch;
272 #define MAX_CHILDREN 16
273 	struct ftl_io *parent, *child[MAX_CHILDREN];
274 	int status[MAX_CHILDREN + 1], i;
275 	size_t pool_size;
276 
277 	dev = setup_device(1, 16);
278 	ioch = ftl_io_channel_get_ctx(dev->ioch);
279 	pool_size = spdk_mempool_count(ioch->io_pool);
280 
281 	/* Verify correct behaviour when children finish first */
282 	parent = alloc_io(dev, io_complete_cb, &status[0]);
283 	parent->status = 0;
284 
285 	ftl_io_inc_req(parent);
286 	status[0] = -1;
287 
288 	for (i = 0; i < MAX_CHILDREN; ++i) {
289 		status[i + 1] = -1;
290 
291 		child[i] = ftl_io_alloc_child(parent);
292 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
293 		setup_io(child[i], dev, io_complete_cb, &status[i + 1]);
294 		child[i]->status = 0;
295 
296 		ftl_io_inc_req(child[i]);
297 	}
298 
299 	CU_ASSERT_FALSE(ftl_io_done(parent));
300 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
301 
302 	for (i = 0; i < MAX_CHILDREN; ++i) {
303 		CU_ASSERT_FALSE(ftl_io_done(child[i]));
304 		ftl_io_dec_req(child[i]);
305 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
306 		CU_ASSERT_FALSE(ftl_io_done(parent));
307 
308 		ftl_io_complete(child[i]);
309 		CU_ASSERT_FALSE(ftl_io_done(parent));
310 		CU_ASSERT_EQUAL(status[i + 1], 0);
311 	}
312 
313 	CU_ASSERT_EQUAL(status[0], -1);
314 
315 	ftl_io_dec_req(parent);
316 	CU_ASSERT_EQUAL(parent->req_cnt, 0);
317 	CU_ASSERT_TRUE(ftl_io_done(parent));
318 
319 	ftl_io_complete(parent);
320 	CU_ASSERT_EQUAL(status[0], 0);
321 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
322 
323 
324 	/* Verify correct behaviour when parent finishes first */
325 	parent = alloc_io(dev, io_complete_cb, &status[0]);
326 	parent->status = 0;
327 
328 	ftl_io_inc_req(parent);
329 	status[0] = -1;
330 
331 	for (i = 0; i < MAX_CHILDREN; ++i) {
332 		status[i + 1] = -1;
333 
334 		child[i] = ftl_io_alloc_child(parent);
335 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
336 		setup_io(child[i], dev, io_complete_cb, &status[i + 1]);
337 		child[i]->status = 0;
338 
339 		ftl_io_inc_req(child[i]);
340 	}
341 
342 	CU_ASSERT_FALSE(ftl_io_done(parent));
343 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
344 
345 	ftl_io_dec_req(parent);
346 	CU_ASSERT_TRUE(ftl_io_done(parent));
347 	CU_ASSERT_EQUAL(parent->req_cnt, 0);
348 
349 	ftl_io_complete(parent);
350 	CU_ASSERT_EQUAL(status[0], -1);
351 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
352 
353 	for (i = 0; i < MAX_CHILDREN; ++i) {
354 		CU_ASSERT_FALSE(ftl_io_done(child[i]));
355 		ftl_io_dec_req(child[i]);
356 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
357 
358 		ftl_io_complete(child[i]);
359 		CU_ASSERT_EQUAL(status[i + 1], 0);
360 	}
361 
362 	CU_ASSERT_EQUAL(status[0], 0);
363 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
364 
365 	free_device(dev);
366 }
367 
368 static void
369 test_child_status(void)
370 {
371 	struct spdk_ftl_dev *dev;
372 	struct ftl_io_channel *ioch;
373 	struct ftl_io *parent, *child[2];
374 	int parent_status, child_status[2];
375 	size_t pool_size, i;
376 
377 	dev = setup_device(1, 16);
378 	ioch = ftl_io_channel_get_ctx(dev->ioch);
379 	pool_size = spdk_mempool_count(ioch->io_pool);
380 
381 	/* Verify the first error is returned by the parent */
382 	parent = alloc_io(dev, io_complete_cb, &parent_status);
383 	parent->status = 0;
384 
385 	for (i = 0; i < 2; ++i) {
386 		child[i] = ftl_io_alloc_child(parent);
387 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
388 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
389 	}
390 
391 	child[0]->status = -3;
392 	child[1]->status = -4;
393 
394 	ftl_io_complete(child[1]);
395 	ftl_io_complete(child[0]);
396 	ftl_io_complete(parent);
397 
398 	CU_ASSERT_EQUAL(child_status[0], -3);
399 	CU_ASSERT_EQUAL(child_status[1], -4);
400 	CU_ASSERT_EQUAL(parent_status, -4);
401 
402 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
403 
404 	/* Verify parent's status is kept if children finish successfully */
405 	parent = alloc_io(dev, io_complete_cb, &parent_status);
406 	parent->status = -1;
407 
408 	for (i = 0; i < 2; ++i) {
409 		child[i] = ftl_io_alloc_child(parent);
410 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
411 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
412 	}
413 
414 	child[0]->status = 0;
415 	child[1]->status = 0;
416 
417 	ftl_io_complete(parent);
418 	ftl_io_complete(child[1]);
419 	ftl_io_complete(child[0]);
420 
421 	CU_ASSERT_EQUAL(child_status[0], 0);
422 	CU_ASSERT_EQUAL(child_status[1], 0);
423 	CU_ASSERT_EQUAL(parent_status, -1);
424 
425 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
426 
427 	/* Verify parent's status is kept if children fail too */
428 	parent = alloc_io(dev, io_complete_cb, &parent_status);
429 	parent->status = -1;
430 
431 	for (i = 0; i < 2; ++i) {
432 		child[i] = ftl_io_alloc_child(parent);
433 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
434 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
435 	}
436 
437 	child[0]->status = -3;
438 	child[1]->status = -4;
439 
440 	ftl_io_complete(parent);
441 	ftl_io_complete(child[1]);
442 	ftl_io_complete(child[0]);
443 
444 	CU_ASSERT_EQUAL(child_status[0], -3);
445 	CU_ASSERT_EQUAL(child_status[1], -4);
446 	CU_ASSERT_EQUAL(parent_status, -1);
447 
448 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
449 
450 	free_device(dev);
451 }
452 
453 static void
454 test_multi_generation(void)
455 {
456 	struct spdk_ftl_dev *dev;
457 	struct ftl_io_channel *ioch;
458 #define MAX_GRAND_CHILDREN	32
459 	struct ftl_io *parent, *child[MAX_CHILDREN], *gchild[MAX_CHILDREN * MAX_GRAND_CHILDREN];
460 	int parent_status, child_status[MAX_CHILDREN], gchild_status[MAX_CHILDREN * MAX_GRAND_CHILDREN];
461 	size_t pool_size;
462 	int i, j;
463 
464 	dev = setup_device(1, 16);
465 	ioch = ftl_io_channel_get_ctx(dev->ioch);
466 	pool_size = spdk_mempool_count(ioch->io_pool);
467 
468 	/* Verify correct behaviour when children finish first */
469 	parent = alloc_io(dev, io_complete_cb, &parent_status);
470 	parent->status = 0;
471 
472 	ftl_io_inc_req(parent);
473 	parent_status = -1;
474 
475 	for (i = 0; i < MAX_CHILDREN; ++i) {
476 		child_status[i] = -1;
477 
478 		child[i] = ftl_io_alloc_child(parent);
479 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
480 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
481 		child[i]->status = 0;
482 
483 
484 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
485 			struct ftl_io *io = ftl_io_alloc_child(child[i]);
486 			SPDK_CU_ASSERT_FATAL(io != NULL);
487 
488 			gchild[i * MAX_GRAND_CHILDREN + j] = io;
489 			gchild_status[i * MAX_GRAND_CHILDREN + j] = -1;
490 			setup_io(io, dev, io_complete_cb, &gchild_status[i * MAX_GRAND_CHILDREN + j]);
491 			io->status = 0;
492 
493 			ftl_io_inc_req(io);
494 		}
495 
496 		ftl_io_inc_req(child[i]);
497 	}
498 
499 	for (i = 0; i < MAX_CHILDREN; ++i) {
500 		CU_ASSERT_FALSE(ftl_io_done(child[i]));
501 		ftl_io_dec_req(child[i]);
502 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
503 
504 		ftl_io_complete(child[i]);
505 		CU_ASSERT_FALSE(ftl_io_done(parent));
506 		CU_ASSERT_EQUAL(child_status[i], -1);
507 
508 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
509 			struct ftl_io *io = gchild[i * MAX_GRAND_CHILDREN + j];
510 
511 			CU_ASSERT_FALSE(ftl_io_done(io));
512 			ftl_io_dec_req(io);
513 			CU_ASSERT_TRUE(ftl_io_done(io));
514 			ftl_io_complete(io);
515 			CU_ASSERT_EQUAL(gchild_status[i * MAX_GRAND_CHILDREN + j], 0);
516 		}
517 
518 		CU_ASSERT_EQUAL(child_status[i], 0);
519 	}
520 
521 	ftl_io_dec_req(parent);
522 	CU_ASSERT_TRUE(ftl_io_done(parent));
523 	ftl_io_complete(parent);
524 	CU_ASSERT_EQUAL(parent_status, 0);
525 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
526 
527 	/* Verify correct behaviour when parents finish first */
528 	parent = alloc_io(dev, io_complete_cb, &parent_status);
529 	parent->status = 0;
530 	parent_status = -1;
531 
532 	for (i = 0; i < MAX_CHILDREN; ++i) {
533 		child_status[i] = -1;
534 
535 		child[i] = ftl_io_alloc_child(parent);
536 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
537 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
538 		child[i]->status = 0;
539 
540 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
541 			struct ftl_io *io = ftl_io_alloc_child(child[i]);
542 			SPDK_CU_ASSERT_FATAL(io != NULL);
543 
544 			gchild[i * MAX_GRAND_CHILDREN + j] = io;
545 			gchild_status[i * MAX_GRAND_CHILDREN + j] = -1;
546 			setup_io(io, dev, io_complete_cb, &gchild_status[i * MAX_GRAND_CHILDREN + j]);
547 			io->status = 0;
548 
549 			ftl_io_inc_req(io);
550 		}
551 
552 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
553 		ftl_io_complete(child[i]);
554 		CU_ASSERT_EQUAL(child_status[i], -1);
555 	}
556 
557 	CU_ASSERT_TRUE(ftl_io_done(parent));
558 	ftl_io_complete(parent);
559 	CU_ASSERT_EQUAL(parent_status, -1);
560 
561 	for (i = 0; i < MAX_CHILDREN; ++i) {
562 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
563 			struct ftl_io *io = gchild[i * MAX_GRAND_CHILDREN + j];
564 
565 			CU_ASSERT_FALSE(ftl_io_done(io));
566 			ftl_io_dec_req(io);
567 			CU_ASSERT_TRUE(ftl_io_done(io));
568 			ftl_io_complete(io);
569 			CU_ASSERT_EQUAL(gchild_status[i * MAX_GRAND_CHILDREN + j], 0);
570 		}
571 
572 		CU_ASSERT_EQUAL(child_status[i], 0);
573 	}
574 
575 	CU_ASSERT_EQUAL(parent_status, 0);
576 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
577 
578 	free_device(dev);
579 }
580 
581 static void
582 test_io_channel_create(void)
583 {
584 	struct spdk_ftl_dev *dev;
585 	struct spdk_io_channel *ioch, **ioch_array;
586 	struct ftl_io_channel *ftl_ioch;
587 	uint32_t ioch_idx;
588 
589 	dev = setup_device(g_default_conf.max_io_channels + 1, 16);
590 
591 	ioch = spdk_get_io_channel(dev);
592 	CU_ASSERT(ioch != NULL);
593 	CU_ASSERT_EQUAL(dev->num_io_channels, 1);
594 	spdk_put_io_channel(ioch);
595 	poll_threads();
596 	CU_ASSERT_EQUAL(dev->num_io_channels, 0);
597 
598 	ioch_array = calloc(dev->conf.max_io_channels, sizeof(*ioch_array));
599 	SPDK_CU_ASSERT_FATAL(ioch != NULL);
600 
601 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ++ioch_idx) {
602 		set_thread(ioch_idx);
603 		ioch = ioch_array[ioch_idx] = spdk_get_io_channel(dev);
604 		SPDK_CU_ASSERT_FATAL(ioch != NULL);
605 		poll_threads();
606 
607 		ftl_ioch = ftl_io_channel_get_ctx(ioch);
608 		CU_ASSERT_EQUAL(ftl_ioch->index, ioch_idx);
609 	}
610 
611 	CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels);
612 	set_thread(dev->conf.max_io_channels);
613 	ioch = spdk_get_io_channel(dev);
614 	CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels);
615 	CU_ASSERT_EQUAL(ioch, NULL);
616 
617 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx += 2) {
618 		set_thread(ioch_idx);
619 		spdk_put_io_channel(ioch_array[ioch_idx]);
620 		ioch_array[ioch_idx] = NULL;
621 		poll_threads();
622 	}
623 
624 	poll_threads();
625 	CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels / 2);
626 
627 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx++) {
628 		set_thread(ioch_idx);
629 
630 		if (ioch_array[ioch_idx] == NULL) {
631 			ioch = ioch_array[ioch_idx] = spdk_get_io_channel(dev);
632 			SPDK_CU_ASSERT_FATAL(ioch != NULL);
633 			poll_threads();
634 
635 			ftl_ioch = ftl_io_channel_get_ctx(ioch);
636 			CU_ASSERT_EQUAL(ftl_ioch->index, ioch_idx);
637 		}
638 	}
639 
640 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx++) {
641 		set_thread(ioch_idx);
642 		spdk_put_io_channel(ioch_array[ioch_idx]);
643 	}
644 
645 	poll_threads();
646 	CU_ASSERT_EQUAL(dev->num_io_channels, 0);
647 
648 	free(ioch_array);
649 	free_device(dev);
650 }
651 
652 static void
653 test_acquire_entry(void)
654 {
655 	struct spdk_ftl_dev *dev;
656 	struct spdk_io_channel *ioch, **ioch_array;
657 	struct ftl_io_channel *ftl_ioch;
658 	struct ftl_wbuf_entry *entry, **entries;
659 	uint32_t num_entries, num_io_channels = 2;
660 	uint32_t ioch_idx, entry_idx, tmp_idx;
661 
662 	dev = setup_device(num_io_channels, 16);
663 
664 	num_entries = dev->conf.write_buffer_size / FTL_BLOCK_SIZE;
665 	entries = calloc(num_entries * num_io_channels, sizeof(*entries));
666 	SPDK_CU_ASSERT_FATAL(entries != NULL);
667 	ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
668 	SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
669 
670 	/* Acquire whole buffer of internal entries */
671 	entry_idx = 0;
672 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
673 		set_thread(ioch_idx);
674 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
675 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
676 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
677 		poll_threads();
678 
679 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
680 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
681 			CU_ASSERT(entries[entry_idx - 1] != NULL);
682 		}
683 
684 		entry = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
685 		CU_ASSERT(entry == NULL);
686 	}
687 
688 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
689 		set_thread(ioch_idx);
690 
691 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
692 			ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
693 			entries[ioch_idx * num_entries + tmp_idx] = NULL;
694 		}
695 
696 		spdk_put_io_channel(ioch_array[ioch_idx]);
697 	}
698 	poll_threads();
699 
700 	/* Do the same for user entries */
701 	entry_idx = 0;
702 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
703 		set_thread(ioch_idx);
704 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
705 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
706 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
707 		poll_threads();
708 
709 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
710 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
711 			CU_ASSERT(entries[entry_idx - 1] != NULL);
712 		}
713 
714 		entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
715 		CU_ASSERT(entry == NULL);
716 	}
717 
718 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
719 		set_thread(ioch_idx);
720 
721 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
722 			ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
723 			entries[ioch_idx * num_entries + tmp_idx] = NULL;
724 		}
725 
726 		spdk_put_io_channel(ioch_array[ioch_idx]);
727 	}
728 	poll_threads();
729 
730 	/* Verify limits */
731 	entry_idx = 0;
732 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
733 		set_thread(ioch_idx);
734 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
735 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
736 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
737 		poll_threads();
738 
739 		ftl_ioch->qdepth_limit = num_entries / 2;
740 		for (tmp_idx = 0; tmp_idx < num_entries / 2; ++tmp_idx) {
741 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
742 			CU_ASSERT(entries[entry_idx - 1] != NULL);
743 		}
744 
745 		entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
746 		CU_ASSERT(entry == NULL);
747 
748 		for (; tmp_idx < num_entries; ++tmp_idx) {
749 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
750 			CU_ASSERT(entries[entry_idx - 1] != NULL);
751 		}
752 	}
753 
754 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
755 		set_thread(ioch_idx);
756 
757 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
758 			ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
759 			entries[ioch_idx * num_entries + tmp_idx] = NULL;
760 		}
761 
762 		spdk_put_io_channel(ioch_array[ioch_idx]);
763 	}
764 	poll_threads();
765 
766 	/* Verify acquire/release */
767 	set_thread(0);
768 	ioch = spdk_get_io_channel(dev);
769 	SPDK_CU_ASSERT_FATAL(ioch != NULL);
770 	ftl_ioch = ftl_io_channel_get_ctx(ioch);
771 	poll_threads();
772 
773 	for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
774 		entries[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
775 		CU_ASSERT(entries[entry_idx] != NULL);
776 	}
777 
778 	entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
779 	CU_ASSERT(entry == NULL);
780 
781 	for (entry_idx = 0; entry_idx < num_entries / 2; ++entry_idx) {
782 		ftl_release_wbuf_entry(entries[entry_idx]);
783 		entries[entry_idx] = NULL;
784 	}
785 
786 	for (; entry_idx < num_entries; ++entry_idx) {
787 		entries[entry_idx - num_entries / 2] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
788 		CU_ASSERT(entries[entry_idx - num_entries / 2] != NULL);
789 	}
790 
791 	for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
792 		ftl_release_wbuf_entry(entries[entry_idx]);
793 		entries[entry_idx] = NULL;
794 	}
795 
796 	spdk_put_io_channel(ioch);
797 	poll_threads();
798 
799 	free(ioch_array);
800 	free(entries);
801 	free_device(dev);
802 }
803 
804 static void
805 test_submit_batch(void)
806 {
807 	struct spdk_ftl_dev *dev;
808 	struct spdk_io_channel **_ioch_array;
809 	struct ftl_io_channel **ioch_array;
810 	struct ftl_wbuf_entry *entry;
811 	struct ftl_batch *batch, *batch2;
812 	uint32_t num_io_channels = 16;
813 	uint32_t ioch_idx, tmp_idx, entry_idx;
814 	uint64_t ioch_bitmap;
815 	size_t num_entries;
816 
817 	dev = setup_device(num_io_channels, num_io_channels);
818 
819 	_ioch_array = calloc(num_io_channels, sizeof(*_ioch_array));
820 	SPDK_CU_ASSERT_FATAL(_ioch_array != NULL);
821 	ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
822 	SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
823 
824 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
825 		set_thread(ioch_idx);
826 		_ioch_array[ioch_idx] = spdk_get_io_channel(dev);
827 		SPDK_CU_ASSERT_FATAL(_ioch_array[ioch_idx] != NULL);
828 		ioch_array[ioch_idx] = ftl_io_channel_get_ctx(_ioch_array[ioch_idx]);
829 		poll_threads();
830 	}
831 
832 	/* Make sure the IO channels are not starved and entries are popped in RR fashion */
833 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
834 		set_thread(ioch_idx);
835 
836 		for (entry_idx = 0; entry_idx < dev->xfer_size; ++entry_idx) {
837 			entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
838 			SPDK_CU_ASSERT_FATAL(entry != NULL);
839 
840 			num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
841 							(void **)&entry, 1, NULL);
842 			CU_ASSERT(num_entries == 1);
843 		}
844 	}
845 
846 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
847 		for (tmp_idx = 0; tmp_idx < ioch_idx; ++tmp_idx) {
848 			set_thread(tmp_idx);
849 
850 			while (spdk_ring_count(ioch_array[tmp_idx]->submit_queue) < dev->xfer_size) {
851 				entry = ftl_acquire_wbuf_entry(ioch_array[tmp_idx], 0);
852 				SPDK_CU_ASSERT_FATAL(entry != NULL);
853 
854 				num_entries = spdk_ring_enqueue(ioch_array[tmp_idx]->submit_queue,
855 								(void **)&entry, 1, NULL);
856 				CU_ASSERT(num_entries == 1);
857 			}
858 		}
859 
860 		set_thread(ioch_idx);
861 
862 		batch = ftl_get_next_batch(dev);
863 		SPDK_CU_ASSERT_FATAL(batch != NULL);
864 
865 		TAILQ_FOREACH(entry, &batch->entries, tailq) {
866 			CU_ASSERT(entry->ioch == ioch_array[ioch_idx]);
867 		}
868 
869 		ftl_release_batch(dev, batch);
870 
871 		CU_ASSERT(spdk_ring_count(ioch_array[ioch_idx]->free_queue) ==
872 			  ioch_array[ioch_idx]->num_entries);
873 	}
874 
875 	for (ioch_idx = 0; ioch_idx < num_io_channels - 1; ++ioch_idx) {
876 		batch = ftl_get_next_batch(dev);
877 		SPDK_CU_ASSERT_FATAL(batch != NULL);
878 		ftl_release_batch(dev, batch);
879 	}
880 
881 	/* Make sure the batch can be built from entries from any IO channel */
882 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
883 		set_thread(ioch_idx);
884 		entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
885 		SPDK_CU_ASSERT_FATAL(entry != NULL);
886 
887 		num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
888 						(void **)&entry, 1, NULL);
889 		CU_ASSERT(num_entries == 1);
890 	}
891 
892 	batch = ftl_get_next_batch(dev);
893 	SPDK_CU_ASSERT_FATAL(batch != NULL);
894 
895 	ioch_bitmap = 0;
896 	TAILQ_FOREACH(entry, &batch->entries, tailq) {
897 		ioch_bitmap |= 1 << entry->ioch->index;
898 	}
899 
900 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
901 		CU_ASSERT((ioch_bitmap & (1 << ioch_array[ioch_idx]->index)) != 0);
902 	}
903 	ftl_release_batch(dev, batch);
904 
905 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
906 		CU_ASSERT(spdk_ring_count(ioch_array[ioch_idx]->free_queue) ==
907 			  ioch_array[ioch_idx]->num_entries);
908 	}
909 
910 	/* Make sure pending batches are prioritized */
911 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
912 		set_thread(ioch_idx);
913 
914 		while (spdk_ring_count(ioch_array[ioch_idx]->submit_queue) < dev->xfer_size) {
915 			entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
916 			SPDK_CU_ASSERT_FATAL(entry != NULL);
917 			num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
918 							(void **)&entry, 1, NULL);
919 			CU_ASSERT(num_entries == 1);
920 		}
921 	}
922 
923 	batch = ftl_get_next_batch(dev);
924 	SPDK_CU_ASSERT_FATAL(batch != NULL);
925 
926 	TAILQ_INSERT_TAIL(&dev->pending_batches, batch, tailq);
927 	batch2 = ftl_get_next_batch(dev);
928 	SPDK_CU_ASSERT_FATAL(batch2 != NULL);
929 
930 	CU_ASSERT(TAILQ_EMPTY(&dev->pending_batches));
931 	CU_ASSERT(batch == batch2);
932 
933 	batch = ftl_get_next_batch(dev);
934 	SPDK_CU_ASSERT_FATAL(batch != NULL);
935 
936 	ftl_release_batch(dev, batch);
937 	ftl_release_batch(dev, batch2);
938 
939 	for (ioch_idx = 2; ioch_idx < num_io_channels; ++ioch_idx) {
940 		batch = ftl_get_next_batch(dev);
941 		SPDK_CU_ASSERT_FATAL(batch != NULL);
942 		ftl_release_batch(dev, batch);
943 	}
944 
945 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
946 		set_thread(ioch_idx);
947 		spdk_put_io_channel(_ioch_array[ioch_idx]);
948 	}
949 	poll_threads();
950 
951 	free(_ioch_array);
952 	free(ioch_array);
953 	free_device(dev);
954 }
955 
956 static void
957 test_entry_address(void)
958 {
959 	struct spdk_ftl_dev *dev;
960 	struct spdk_io_channel **ioch_array;
961 	struct ftl_io_channel *ftl_ioch;
962 	struct ftl_wbuf_entry **entry_array;
963 	struct ftl_addr addr;
964 	uint32_t num_entries, num_io_channels = 7;
965 	uint32_t ioch_idx, entry_idx;
966 
967 	dev = setup_device(num_io_channels, num_io_channels);
968 	ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
969 	SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
970 
971 	num_entries = dev->conf.write_buffer_size / FTL_BLOCK_SIZE;
972 	entry_array = calloc(num_entries, sizeof(*entry_array));
973 	SPDK_CU_ASSERT_FATAL(entry_array != NULL);
974 
975 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
976 		set_thread(ioch_idx);
977 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
978 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
979 		poll_threads();
980 	}
981 
982 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
983 		set_thread(ioch_idx);
984 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
985 
986 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
987 			entry_array[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
988 			SPDK_CU_ASSERT_FATAL(entry_array[entry_idx] != NULL);
989 
990 			addr = ftl_get_addr_from_entry(entry_array[entry_idx]);
991 			CU_ASSERT(addr.cached == 1);
992 			CU_ASSERT((addr.cache_offset >> dev->ioch_shift) == entry_idx);
993 			CU_ASSERT((addr.cache_offset & ((1 << dev->ioch_shift) - 1)) == ioch_idx);
994 			CU_ASSERT(entry_array[entry_idx] == ftl_get_entry_from_addr(dev, addr));
995 		}
996 
997 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
998 			ftl_release_wbuf_entry(entry_array[entry_idx]);
999 		}
1000 	}
1001 
1002 	for (ioch_idx = 0; ioch_idx < num_io_channels; ioch_idx += 2) {
1003 		set_thread(ioch_idx);
1004 		spdk_put_io_channel(ioch_array[ioch_idx]);
1005 		ioch_array[ioch_idx] = NULL;
1006 	}
1007 	poll_threads();
1008 
1009 	for (ioch_idx = 1; ioch_idx < num_io_channels; ioch_idx += 2) {
1010 		set_thread(ioch_idx);
1011 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
1012 
1013 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
1014 			entry_array[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
1015 			SPDK_CU_ASSERT_FATAL(entry_array[entry_idx] != NULL);
1016 
1017 			addr = ftl_get_addr_from_entry(entry_array[entry_idx]);
1018 			CU_ASSERT(addr.cached == 1);
1019 			CU_ASSERT(entry_array[entry_idx] == ftl_get_entry_from_addr(dev, addr));
1020 		}
1021 
1022 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
1023 			ftl_release_wbuf_entry(entry_array[entry_idx]);
1024 		}
1025 	}
1026 
1027 	for (ioch_idx = 1; ioch_idx < num_io_channels; ioch_idx += 2) {
1028 		set_thread(ioch_idx);
1029 		spdk_put_io_channel(ioch_array[ioch_idx]);
1030 	}
1031 	poll_threads();
1032 
1033 	free(entry_array);
1034 	free(ioch_array);
1035 	free_device(dev);
1036 }
1037 
1038 int
1039 main(int argc, char **argv)
1040 {
1041 	CU_pSuite suite;
1042 	unsigned int num_failures;
1043 
1044 	CU_set_error_action(CUEA_ABORT);
1045 	CU_initialize_registry();
1046 
1047 	suite = CU_add_suite("ftl_io_suite", NULL, NULL);
1048 
1049 
1050 	CU_ADD_TEST(suite, test_completion);
1051 	CU_ADD_TEST(suite, test_alloc_free);
1052 	CU_ADD_TEST(suite, test_child_requests);
1053 	CU_ADD_TEST(suite, test_child_status);
1054 	CU_ADD_TEST(suite, test_multi_generation);
1055 	CU_ADD_TEST(suite, test_io_channel_create);
1056 	CU_ADD_TEST(suite, test_acquire_entry);
1057 	CU_ADD_TEST(suite, test_submit_batch);
1058 	CU_ADD_TEST(suite, test_entry_address);
1059 
1060 	CU_basic_set_mode(CU_BRM_VERBOSE);
1061 	CU_basic_run_tests();
1062 	num_failures = CU_get_number_of_failures();
1063 	CU_cleanup_registry();
1064 
1065 	return num_failures;
1066 }
1067