xref: /spdk/test/unit/lib/ftl/ftl_io.c/ftl_io_ut.c (revision eb05cbd677aede19b5e52e6d91dbfb0d617fae54)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 #include "common/lib/ut_multithread.c"
38 
39 #include "ftl/ftl_io.c"
40 #include "ftl/ftl_init.c"
41 #include "ftl/ftl_core.c"
42 #include "ftl/ftl_band.c"
43 
44 DEFINE_STUB(spdk_bdev_io_get_append_location, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
45 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
46 DEFINE_STUB(spdk_bdev_get_optimal_open_zones, uint32_t, (const struct spdk_bdev *b), 1);
47 DEFINE_STUB(spdk_bdev_zone_appendv, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
48 		struct iovec *iov, int iovcnt, uint64_t zone_id, uint64_t num_blocks,
49 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
50 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *b), 1024);
51 DEFINE_STUB(spdk_bdev_zone_management, int, (struct spdk_bdev_desc *desc,
52 		struct spdk_io_channel *ch, uint64_t zone_id, enum spdk_bdev_zone_action action,
53 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
54 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
55 DEFINE_STUB(spdk_bdev_read_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
56 		void *buf, uint64_t offset_blocks, uint64_t num_blocks,
57 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
58 DEFINE_STUB(spdk_bdev_write_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
59 		void *buf, uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
60 		void *cb_arg), 0);
61 DEFINE_STUB(spdk_bdev_write_blocks_with_md, int, (struct spdk_bdev_desc *desc,
62 		struct spdk_io_channel *ch, void *buf, void *md, uint64_t offset_blocks,
63 		uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
64 DEFINE_STUB(spdk_bdev_writev_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
65 		struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
66 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
67 DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024);
68 DEFINE_STUB(spdk_bdev_get_md_size, uint32_t, (const struct spdk_bdev *bdev), 0);
69 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 4096);
70 #if defined(FTL_META_DEBUG)
71 DEFINE_STUB(ftl_band_validate_md, bool, (struct ftl_band *band), true);
72 #endif
73 #if defined(DEBUG)
74 DEFINE_STUB_V(ftl_trace_submission, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
75 				     struct ftl_addr addr, size_t addr_cnt));
76 DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, int limit, size_t num_free));
77 DEFINE_STUB(ftl_trace_alloc_id, uint64_t, (struct spdk_ftl_dev *dev), 0);
78 DEFINE_STUB_V(ftl_trace_completion, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
79 				     enum ftl_trace_completion type));
80 DEFINE_STUB_V(ftl_trace_wbuf_fill, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
81 #endif
82 
83 struct spdk_io_channel *
84 spdk_bdev_get_io_channel(struct spdk_bdev_desc *bdev_desc)
85 {
86 	return spdk_get_io_channel(bdev_desc);
87 }
88 
89 static int
90 channel_create_cb(void *io_device, void *ctx)
91 {
92 	return 0;
93 }
94 
95 static void
96 channel_destroy_cb(void *io_device, void *ctx)
97 {}
98 
99 static struct spdk_ftl_dev *
100 setup_device(uint32_t num_threads, uint32_t xfer_size)
101 {
102 	struct spdk_ftl_dev *dev;
103 	struct _ftl_io_channel *_ioch;
104 	struct ftl_io_channel *ioch;
105 	int rc;
106 
107 	allocate_threads(num_threads);
108 	set_thread(0);
109 
110 	dev = calloc(1, sizeof(*dev));
111 	SPDK_CU_ASSERT_FATAL(dev != NULL);
112 
113 	dev->core_thread = spdk_get_thread();
114 	dev->ioch = calloc(1, sizeof(*_ioch) + sizeof(struct spdk_io_channel));
115 	SPDK_CU_ASSERT_FATAL(dev->ioch != NULL);
116 
117 	_ioch = (struct _ftl_io_channel *)(dev->ioch + 1);
118 	ioch = _ioch->ioch = calloc(1, sizeof(*ioch));
119 	SPDK_CU_ASSERT_FATAL(ioch != NULL);
120 
121 	ioch->elem_size = sizeof(struct ftl_md_io);
122 	ioch->io_pool = spdk_mempool_create("io-pool", 4096, ioch->elem_size, 0, 0);
123 
124 	SPDK_CU_ASSERT_FATAL(ioch->io_pool != NULL);
125 
126 	dev->conf = g_default_conf;
127 	dev->xfer_size = xfer_size;
128 	dev->base_bdev_desc = (struct spdk_bdev_desc *)0xdeadbeef;
129 	spdk_io_device_register(dev->base_bdev_desc, channel_create_cb, channel_destroy_cb, 0, NULL);
130 
131 	rc = ftl_dev_init_io_channel(dev);
132 	CU_ASSERT_EQUAL(rc, 0);
133 
134 	return dev;
135 }
136 
137 static void
138 free_device(struct spdk_ftl_dev *dev)
139 {
140 	struct ftl_io_channel *ioch;
141 
142 	ioch = ftl_io_channel_get_ctx(dev->ioch);
143 	spdk_mempool_free(ioch->io_pool);
144 	free(ioch);
145 
146 	spdk_io_device_unregister(dev, NULL);
147 	spdk_io_device_unregister(dev->base_bdev_desc, NULL);
148 	free_threads();
149 
150 	free(dev->ioch_array);
151 	free(dev->iov_buf);
152 	free(dev->ioch);
153 	free(dev);
154 }
155 
156 static void
157 setup_io(struct ftl_io *io, struct spdk_ftl_dev *dev, ftl_io_fn cb, void *ctx)
158 {
159 	io->dev = dev;
160 	io->cb_fn = cb;
161 	io->cb_ctx = ctx;
162 }
163 
164 static struct ftl_io *
165 alloc_io(struct spdk_ftl_dev *dev, ftl_io_fn cb, void *ctx)
166 {
167 	struct ftl_io *io;
168 
169 	io = ftl_io_alloc(dev->ioch);
170 	SPDK_CU_ASSERT_FATAL(io != NULL);
171 	setup_io(io, dev, cb, ctx);
172 
173 	return io;
174 }
175 
176 static void
177 io_complete_cb(struct ftl_io *io, void *ctx, int status)
178 {
179 	*(int *)ctx = status;
180 }
181 
182 static void
183 test_completion(void)
184 {
185 	struct spdk_ftl_dev *dev;
186 	struct ftl_io_channel *ioch;
187 	struct ftl_io *io;
188 	int req, status = 0;
189 	size_t pool_size;
190 
191 	dev = setup_device(1, 16);
192 	ioch = ftl_io_channel_get_ctx(dev->ioch);
193 	pool_size = spdk_mempool_count(ioch->io_pool);
194 
195 	io = alloc_io(dev, io_complete_cb, &status);
196 	io->status = -EIO;
197 
198 #define NUM_REQUESTS 16
199 	for (req = 0; req < NUM_REQUESTS; ++req) {
200 		ftl_io_inc_req(io);
201 		CU_ASSERT_FALSE(ftl_io_done(io));
202 	}
203 
204 	CU_ASSERT_EQUAL(io->req_cnt, NUM_REQUESTS);
205 
206 	for (req = 0; req < (NUM_REQUESTS - 1); ++req) {
207 		ftl_io_dec_req(io);
208 		CU_ASSERT_FALSE(ftl_io_done(io));
209 	}
210 
211 	CU_ASSERT_EQUAL(io->req_cnt, 1);
212 
213 	ftl_io_dec_req(io);
214 	CU_ASSERT_TRUE(ftl_io_done(io));
215 
216 	ftl_io_complete(io);
217 	CU_ASSERT_EQUAL(status, -EIO);
218 
219 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
220 
221 	free_device(dev);
222 }
223 
224 static void
225 test_alloc_free(void)
226 {
227 	struct spdk_ftl_dev *dev;
228 	struct ftl_io_channel *ioch;
229 	struct ftl_io *parent, *child;
230 	int parent_status = -1;
231 	size_t pool_size;
232 
233 	dev = setup_device(1, 16);
234 	ioch = ftl_io_channel_get_ctx(dev->ioch);
235 	pool_size = spdk_mempool_count(ioch->io_pool);
236 
237 	parent = alloc_io(dev, io_complete_cb, &parent_status);
238 	SPDK_CU_ASSERT_FATAL(parent != NULL);
239 	child = ftl_io_alloc_child(parent);
240 	SPDK_CU_ASSERT_FATAL(child != NULL);
241 
242 	ftl_io_free(child);
243 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - 1);
244 
245 	child = ftl_io_alloc_child(parent);
246 	SPDK_CU_ASSERT_FATAL(child != NULL);
247 	ftl_io_complete(child);
248 	CU_ASSERT_EQUAL(parent_status, -1);
249 	ftl_io_complete(parent);
250 	CU_ASSERT_EQUAL(parent_status, 0);
251 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
252 
253 	parent_status = -1;
254 	parent = alloc_io(dev, io_complete_cb, &parent_status);
255 	SPDK_CU_ASSERT_FATAL(parent != NULL);
256 	child = ftl_io_alloc_child(parent);
257 	SPDK_CU_ASSERT_FATAL(child != NULL);
258 
259 	ftl_io_free(child);
260 	CU_ASSERT_EQUAL(parent_status, -1);
261 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - 1);
262 	ftl_io_complete(parent);
263 	CU_ASSERT_EQUAL(parent_status, 0);
264 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
265 
266 	free_device(dev);
267 }
268 
269 static void
270 test_child_requests(void)
271 {
272 	struct spdk_ftl_dev *dev;
273 	struct ftl_io_channel *ioch;
274 #define MAX_CHILDREN 16
275 	struct ftl_io *parent, *child[MAX_CHILDREN];
276 	int status[MAX_CHILDREN + 1], i;
277 	size_t pool_size;
278 
279 	dev = setup_device(1, 16);
280 	ioch = ftl_io_channel_get_ctx(dev->ioch);
281 	pool_size = spdk_mempool_count(ioch->io_pool);
282 
283 	/* Verify correct behaviour when children finish first */
284 	parent = alloc_io(dev, io_complete_cb, &status[0]);
285 	parent->status = 0;
286 
287 	ftl_io_inc_req(parent);
288 	status[0] = -1;
289 
290 	for (i = 0; i < MAX_CHILDREN; ++i) {
291 		status[i + 1] = -1;
292 
293 		child[i] = ftl_io_alloc_child(parent);
294 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
295 		setup_io(child[i], dev, io_complete_cb, &status[i + 1]);
296 		child[i]->status = 0;
297 
298 		ftl_io_inc_req(child[i]);
299 	}
300 
301 	CU_ASSERT_FALSE(ftl_io_done(parent));
302 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
303 
304 	for (i = 0; i < MAX_CHILDREN; ++i) {
305 		CU_ASSERT_FALSE(ftl_io_done(child[i]));
306 		ftl_io_dec_req(child[i]);
307 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
308 		CU_ASSERT_FALSE(ftl_io_done(parent));
309 
310 		ftl_io_complete(child[i]);
311 		CU_ASSERT_FALSE(ftl_io_done(parent));
312 		CU_ASSERT_EQUAL(status[i + 1], 0);
313 	}
314 
315 	CU_ASSERT_EQUAL(status[0], -1);
316 
317 	ftl_io_dec_req(parent);
318 	CU_ASSERT_EQUAL(parent->req_cnt, 0);
319 	CU_ASSERT_TRUE(ftl_io_done(parent));
320 
321 	ftl_io_complete(parent);
322 	CU_ASSERT_EQUAL(status[0], 0);
323 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
324 
325 
326 	/* Verify correct behaviour when parent finishes first */
327 	parent = alloc_io(dev, io_complete_cb, &status[0]);
328 	parent->status = 0;
329 
330 	ftl_io_inc_req(parent);
331 	status[0] = -1;
332 
333 	for (i = 0; i < MAX_CHILDREN; ++i) {
334 		status[i + 1] = -1;
335 
336 		child[i] = ftl_io_alloc_child(parent);
337 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
338 		setup_io(child[i], dev, io_complete_cb, &status[i + 1]);
339 		child[i]->status = 0;
340 
341 		ftl_io_inc_req(child[i]);
342 	}
343 
344 	CU_ASSERT_FALSE(ftl_io_done(parent));
345 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
346 
347 	ftl_io_dec_req(parent);
348 	CU_ASSERT_TRUE(ftl_io_done(parent));
349 	CU_ASSERT_EQUAL(parent->req_cnt, 0);
350 
351 	ftl_io_complete(parent);
352 	CU_ASSERT_EQUAL(status[0], -1);
353 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
354 
355 	for (i = 0; i < MAX_CHILDREN; ++i) {
356 		CU_ASSERT_FALSE(ftl_io_done(child[i]));
357 		ftl_io_dec_req(child[i]);
358 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
359 
360 		ftl_io_complete(child[i]);
361 		CU_ASSERT_EQUAL(status[i + 1], 0);
362 	}
363 
364 	CU_ASSERT_EQUAL(status[0], 0);
365 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
366 
367 	free_device(dev);
368 }
369 
370 static void
371 test_child_status(void)
372 {
373 	struct spdk_ftl_dev *dev;
374 	struct ftl_io_channel *ioch;
375 	struct ftl_io *parent, *child[2];
376 	int parent_status, child_status[2];
377 	size_t pool_size, i;
378 
379 	dev = setup_device(1, 16);
380 	ioch = ftl_io_channel_get_ctx(dev->ioch);
381 	pool_size = spdk_mempool_count(ioch->io_pool);
382 
383 	/* Verify the first error is returned by the parent */
384 	parent = alloc_io(dev, io_complete_cb, &parent_status);
385 	parent->status = 0;
386 
387 	for (i = 0; i < 2; ++i) {
388 		child[i] = ftl_io_alloc_child(parent);
389 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
390 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
391 	}
392 
393 	child[0]->status = -3;
394 	child[1]->status = -4;
395 
396 	ftl_io_complete(child[1]);
397 	ftl_io_complete(child[0]);
398 	ftl_io_complete(parent);
399 
400 	CU_ASSERT_EQUAL(child_status[0], -3);
401 	CU_ASSERT_EQUAL(child_status[1], -4);
402 	CU_ASSERT_EQUAL(parent_status, -4);
403 
404 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
405 
406 	/* Verify parent's status is kept if children finish successfully */
407 	parent = alloc_io(dev, io_complete_cb, &parent_status);
408 	parent->status = -1;
409 
410 	for (i = 0; i < 2; ++i) {
411 		child[i] = ftl_io_alloc_child(parent);
412 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
413 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
414 	}
415 
416 	child[0]->status = 0;
417 	child[1]->status = 0;
418 
419 	ftl_io_complete(parent);
420 	ftl_io_complete(child[1]);
421 	ftl_io_complete(child[0]);
422 
423 	CU_ASSERT_EQUAL(child_status[0], 0);
424 	CU_ASSERT_EQUAL(child_status[1], 0);
425 	CU_ASSERT_EQUAL(parent_status, -1);
426 
427 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
428 
429 	/* Verify parent's status is kept if children fail too */
430 	parent = alloc_io(dev, io_complete_cb, &parent_status);
431 	parent->status = -1;
432 
433 	for (i = 0; i < 2; ++i) {
434 		child[i] = ftl_io_alloc_child(parent);
435 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
436 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
437 	}
438 
439 	child[0]->status = -3;
440 	child[1]->status = -4;
441 
442 	ftl_io_complete(parent);
443 	ftl_io_complete(child[1]);
444 	ftl_io_complete(child[0]);
445 
446 	CU_ASSERT_EQUAL(child_status[0], -3);
447 	CU_ASSERT_EQUAL(child_status[1], -4);
448 	CU_ASSERT_EQUAL(parent_status, -1);
449 
450 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
451 
452 	free_device(dev);
453 }
454 
455 static void
456 test_multi_generation(void)
457 {
458 	struct spdk_ftl_dev *dev;
459 	struct ftl_io_channel *ioch;
460 #define MAX_GRAND_CHILDREN	32
461 	struct ftl_io *parent, *child[MAX_CHILDREN], *gchild[MAX_CHILDREN * MAX_GRAND_CHILDREN];
462 	int parent_status, child_status[MAX_CHILDREN], gchild_status[MAX_CHILDREN * MAX_GRAND_CHILDREN];
463 	size_t pool_size;
464 	int i, j;
465 
466 	dev = setup_device(1, 16);
467 	ioch = ftl_io_channel_get_ctx(dev->ioch);
468 	pool_size = spdk_mempool_count(ioch->io_pool);
469 
470 	/* Verify correct behaviour when children finish first */
471 	parent = alloc_io(dev, io_complete_cb, &parent_status);
472 	parent->status = 0;
473 
474 	ftl_io_inc_req(parent);
475 	parent_status = -1;
476 
477 	for (i = 0; i < MAX_CHILDREN; ++i) {
478 		child_status[i] = -1;
479 
480 		child[i] = ftl_io_alloc_child(parent);
481 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
482 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
483 		child[i]->status = 0;
484 
485 
486 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
487 			struct ftl_io *io = ftl_io_alloc_child(child[i]);
488 			SPDK_CU_ASSERT_FATAL(io != NULL);
489 
490 			gchild[i * MAX_GRAND_CHILDREN + j] = io;
491 			gchild_status[i * MAX_GRAND_CHILDREN + j] = -1;
492 			setup_io(io, dev, io_complete_cb, &gchild_status[i * MAX_GRAND_CHILDREN + j]);
493 			io->status = 0;
494 
495 			ftl_io_inc_req(io);
496 		}
497 
498 		ftl_io_inc_req(child[i]);
499 	}
500 
501 	for (i = 0; i < MAX_CHILDREN; ++i) {
502 		CU_ASSERT_FALSE(ftl_io_done(child[i]));
503 		ftl_io_dec_req(child[i]);
504 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
505 
506 		ftl_io_complete(child[i]);
507 		CU_ASSERT_FALSE(ftl_io_done(parent));
508 		CU_ASSERT_EQUAL(child_status[i], -1);
509 
510 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
511 			struct ftl_io *io = gchild[i * MAX_GRAND_CHILDREN + j];
512 
513 			CU_ASSERT_FALSE(ftl_io_done(io));
514 			ftl_io_dec_req(io);
515 			CU_ASSERT_TRUE(ftl_io_done(io));
516 			ftl_io_complete(io);
517 			CU_ASSERT_EQUAL(gchild_status[i * MAX_GRAND_CHILDREN + j], 0);
518 		}
519 
520 		CU_ASSERT_EQUAL(child_status[i], 0);
521 	}
522 
523 	ftl_io_dec_req(parent);
524 	CU_ASSERT_TRUE(ftl_io_done(parent));
525 	ftl_io_complete(parent);
526 	CU_ASSERT_EQUAL(parent_status, 0);
527 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
528 
529 	/* Verify correct behaviour when parents finish first */
530 	parent = alloc_io(dev, io_complete_cb, &parent_status);
531 	parent->status = 0;
532 	parent_status = -1;
533 
534 	for (i = 0; i < MAX_CHILDREN; ++i) {
535 		child_status[i] = -1;
536 
537 		child[i] = ftl_io_alloc_child(parent);
538 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
539 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
540 		child[i]->status = 0;
541 
542 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
543 			struct ftl_io *io = ftl_io_alloc_child(child[i]);
544 			SPDK_CU_ASSERT_FATAL(io != NULL);
545 
546 			gchild[i * MAX_GRAND_CHILDREN + j] = io;
547 			gchild_status[i * MAX_GRAND_CHILDREN + j] = -1;
548 			setup_io(io, dev, io_complete_cb, &gchild_status[i * MAX_GRAND_CHILDREN + j]);
549 			io->status = 0;
550 
551 			ftl_io_inc_req(io);
552 		}
553 
554 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
555 		ftl_io_complete(child[i]);
556 		CU_ASSERT_EQUAL(child_status[i], -1);
557 	}
558 
559 	CU_ASSERT_TRUE(ftl_io_done(parent));
560 	ftl_io_complete(parent);
561 	CU_ASSERT_EQUAL(parent_status, -1);
562 
563 	for (i = 0; i < MAX_CHILDREN; ++i) {
564 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
565 			struct ftl_io *io = gchild[i * MAX_GRAND_CHILDREN + j];
566 
567 			CU_ASSERT_FALSE(ftl_io_done(io));
568 			ftl_io_dec_req(io);
569 			CU_ASSERT_TRUE(ftl_io_done(io));
570 			ftl_io_complete(io);
571 			CU_ASSERT_EQUAL(gchild_status[i * MAX_GRAND_CHILDREN + j], 0);
572 		}
573 
574 		CU_ASSERT_EQUAL(child_status[i], 0);
575 	}
576 
577 	CU_ASSERT_EQUAL(parent_status, 0);
578 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
579 
580 	free_device(dev);
581 }
582 
583 static void
584 test_io_channel_create(void)
585 {
586 	struct spdk_ftl_dev *dev;
587 	struct spdk_io_channel *ioch, **ioch_array;
588 	struct ftl_io_channel *ftl_ioch;
589 	uint32_t ioch_idx;
590 
591 	dev = setup_device(g_default_conf.max_io_channels + 1, 16);
592 
593 	ioch = spdk_get_io_channel(dev);
594 	CU_ASSERT(ioch != NULL);
595 	CU_ASSERT_EQUAL(dev->num_io_channels, 1);
596 	spdk_put_io_channel(ioch);
597 	poll_threads();
598 	CU_ASSERT_EQUAL(dev->num_io_channels, 0);
599 
600 	ioch_array = calloc(dev->conf.max_io_channels, sizeof(*ioch_array));
601 	SPDK_CU_ASSERT_FATAL(ioch != NULL);
602 
603 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ++ioch_idx) {
604 		set_thread(ioch_idx);
605 		ioch = ioch_array[ioch_idx] = spdk_get_io_channel(dev);
606 		SPDK_CU_ASSERT_FATAL(ioch != NULL);
607 		poll_threads();
608 
609 		ftl_ioch = ftl_io_channel_get_ctx(ioch);
610 		CU_ASSERT_EQUAL(ftl_ioch->index, ioch_idx);
611 	}
612 
613 	CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels);
614 	set_thread(dev->conf.max_io_channels);
615 	ioch = spdk_get_io_channel(dev);
616 	CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels);
617 	CU_ASSERT_EQUAL(ioch, NULL);
618 
619 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx += 2) {
620 		set_thread(ioch_idx);
621 		spdk_put_io_channel(ioch_array[ioch_idx]);
622 		ioch_array[ioch_idx] = NULL;
623 		poll_threads();
624 	}
625 
626 	poll_threads();
627 	CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels / 2);
628 
629 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx++) {
630 		set_thread(ioch_idx);
631 
632 		if (ioch_array[ioch_idx] == NULL) {
633 			ioch = ioch_array[ioch_idx] = spdk_get_io_channel(dev);
634 			SPDK_CU_ASSERT_FATAL(ioch != NULL);
635 			poll_threads();
636 
637 			ftl_ioch = ftl_io_channel_get_ctx(ioch);
638 			CU_ASSERT_EQUAL(ftl_ioch->index, ioch_idx);
639 		}
640 	}
641 
642 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx++) {
643 		set_thread(ioch_idx);
644 		spdk_put_io_channel(ioch_array[ioch_idx]);
645 	}
646 
647 	poll_threads();
648 	CU_ASSERT_EQUAL(dev->num_io_channels, 0);
649 
650 	free(ioch_array);
651 	free_device(dev);
652 }
653 
654 static void
655 test_acquire_entry(void)
656 {
657 	struct spdk_ftl_dev *dev;
658 	struct spdk_io_channel *ioch, **ioch_array;
659 	struct ftl_io_channel *ftl_ioch;
660 	struct ftl_wbuf_entry *entry, **entries;
661 	uint32_t num_entries, num_io_channels = 2;
662 	uint32_t ioch_idx, entry_idx, tmp_idx;
663 
664 	dev = setup_device(num_io_channels, 16);
665 
666 	num_entries = dev->conf.write_buffer_size / FTL_BLOCK_SIZE;
667 	entries = calloc(num_entries * num_io_channels, sizeof(*entries));
668 	SPDK_CU_ASSERT_FATAL(entries != NULL);
669 	ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
670 	SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
671 
672 	/* Acquire whole buffer of internal entries */
673 	entry_idx = 0;
674 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
675 		set_thread(ioch_idx);
676 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
677 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
678 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
679 		poll_threads();
680 
681 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
682 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
683 			CU_ASSERT(entries[entry_idx - 1] != NULL);
684 		}
685 
686 		entry = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
687 		CU_ASSERT(entry == NULL);
688 	}
689 
690 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
691 		set_thread(ioch_idx);
692 
693 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
694 			ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
695 			entries[ioch_idx * num_entries + tmp_idx] = NULL;
696 		}
697 
698 		spdk_put_io_channel(ioch_array[ioch_idx]);
699 	}
700 	poll_threads();
701 
702 	/* Do the same for user entries */
703 	entry_idx = 0;
704 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
705 		set_thread(ioch_idx);
706 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
707 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
708 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
709 		poll_threads();
710 
711 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
712 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
713 			CU_ASSERT(entries[entry_idx - 1] != NULL);
714 		}
715 
716 		entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
717 		CU_ASSERT(entry == NULL);
718 	}
719 
720 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
721 		set_thread(ioch_idx);
722 
723 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
724 			ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
725 			entries[ioch_idx * num_entries + tmp_idx] = NULL;
726 		}
727 
728 		spdk_put_io_channel(ioch_array[ioch_idx]);
729 	}
730 	poll_threads();
731 
732 	/* Verify limits */
733 	entry_idx = 0;
734 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
735 		set_thread(ioch_idx);
736 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
737 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
738 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
739 		poll_threads();
740 
741 		ftl_ioch->qdepth_limit = num_entries / 2;
742 		for (tmp_idx = 0; tmp_idx < num_entries / 2; ++tmp_idx) {
743 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
744 			CU_ASSERT(entries[entry_idx - 1] != NULL);
745 		}
746 
747 		entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
748 		CU_ASSERT(entry == NULL);
749 
750 		for (; tmp_idx < num_entries; ++tmp_idx) {
751 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
752 			CU_ASSERT(entries[entry_idx - 1] != NULL);
753 		}
754 	}
755 
756 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
757 		set_thread(ioch_idx);
758 
759 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
760 			ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
761 			entries[ioch_idx * num_entries + tmp_idx] = NULL;
762 		}
763 
764 		spdk_put_io_channel(ioch_array[ioch_idx]);
765 	}
766 	poll_threads();
767 
768 	/* Verify acquire/release */
769 	set_thread(0);
770 	ioch = spdk_get_io_channel(dev);
771 	SPDK_CU_ASSERT_FATAL(ioch != NULL);
772 	ftl_ioch = ftl_io_channel_get_ctx(ioch);
773 	poll_threads();
774 
775 	for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
776 		entries[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
777 		CU_ASSERT(entries[entry_idx] != NULL);
778 	}
779 
780 	entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
781 	CU_ASSERT(entry == NULL);
782 
783 	for (entry_idx = 0; entry_idx < num_entries / 2; ++entry_idx) {
784 		ftl_release_wbuf_entry(entries[entry_idx]);
785 		entries[entry_idx] = NULL;
786 	}
787 
788 	for (; entry_idx < num_entries; ++entry_idx) {
789 		entries[entry_idx - num_entries / 2] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
790 		CU_ASSERT(entries[entry_idx - num_entries / 2] != NULL);
791 	}
792 
793 	for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
794 		ftl_release_wbuf_entry(entries[entry_idx]);
795 		entries[entry_idx] = NULL;
796 	}
797 
798 	spdk_put_io_channel(ioch);
799 	poll_threads();
800 
801 	free(ioch_array);
802 	free(entries);
803 	free_device(dev);
804 }
805 
806 static void
807 test_submit_batch(void)
808 {
809 	struct spdk_ftl_dev *dev;
810 	struct spdk_io_channel **_ioch_array;
811 	struct ftl_io_channel **ioch_array;
812 	struct ftl_wbuf_entry *entry;
813 	struct ftl_batch *batch, *batch2;
814 	uint32_t num_io_channels = 16;
815 	uint32_t ioch_idx, tmp_idx, entry_idx;
816 	uint64_t ioch_bitmap;
817 	size_t num_entries;
818 
819 	dev = setup_device(num_io_channels, num_io_channels);
820 
821 	_ioch_array = calloc(num_io_channels, sizeof(*_ioch_array));
822 	SPDK_CU_ASSERT_FATAL(_ioch_array != NULL);
823 	ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
824 	SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
825 
826 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
827 		set_thread(ioch_idx);
828 		_ioch_array[ioch_idx] = spdk_get_io_channel(dev);
829 		SPDK_CU_ASSERT_FATAL(_ioch_array[ioch_idx] != NULL);
830 		ioch_array[ioch_idx] = ftl_io_channel_get_ctx(_ioch_array[ioch_idx]);
831 		poll_threads();
832 	}
833 
834 	/* Make sure the IO channels are not starved and entries are popped in RR fashion */
835 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
836 		set_thread(ioch_idx);
837 
838 		for (entry_idx = 0; entry_idx < dev->xfer_size; ++entry_idx) {
839 			entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
840 			SPDK_CU_ASSERT_FATAL(entry != NULL);
841 
842 			num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
843 							(void **)&entry, 1, NULL);
844 			CU_ASSERT(num_entries == 1);
845 		}
846 	}
847 
848 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
849 		for (tmp_idx = 0; tmp_idx < ioch_idx; ++tmp_idx) {
850 			set_thread(tmp_idx);
851 
852 			while (spdk_ring_count(ioch_array[tmp_idx]->submit_queue) < dev->xfer_size) {
853 				entry = ftl_acquire_wbuf_entry(ioch_array[tmp_idx], 0);
854 				SPDK_CU_ASSERT_FATAL(entry != NULL);
855 
856 				num_entries = spdk_ring_enqueue(ioch_array[tmp_idx]->submit_queue,
857 								(void **)&entry, 1, NULL);
858 				CU_ASSERT(num_entries == 1);
859 			}
860 		}
861 
862 		set_thread(ioch_idx);
863 
864 		batch = ftl_get_next_batch(dev);
865 		SPDK_CU_ASSERT_FATAL(batch != NULL);
866 
867 		TAILQ_FOREACH(entry, &batch->entries, tailq) {
868 			CU_ASSERT(entry->ioch == ioch_array[ioch_idx]);
869 		}
870 
871 		ftl_release_batch(dev, batch);
872 
873 		CU_ASSERT(spdk_ring_count(ioch_array[ioch_idx]->free_queue) ==
874 			  ioch_array[ioch_idx]->num_entries);
875 	}
876 
877 	for (ioch_idx = 0; ioch_idx < num_io_channels - 1; ++ioch_idx) {
878 		batch = ftl_get_next_batch(dev);
879 		SPDK_CU_ASSERT_FATAL(batch != NULL);
880 		ftl_release_batch(dev, batch);
881 	}
882 
883 	/* Make sure the batch can be built from entries from any IO channel */
884 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
885 		set_thread(ioch_idx);
886 		entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
887 		SPDK_CU_ASSERT_FATAL(entry != NULL);
888 
889 		num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
890 						(void **)&entry, 1, NULL);
891 		CU_ASSERT(num_entries == 1);
892 	}
893 
894 	batch = ftl_get_next_batch(dev);
895 	SPDK_CU_ASSERT_FATAL(batch != NULL);
896 
897 	ioch_bitmap = 0;
898 	TAILQ_FOREACH(entry, &batch->entries, tailq) {
899 		ioch_bitmap |= 1 << entry->ioch->index;
900 	}
901 
902 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
903 		CU_ASSERT((ioch_bitmap & (1 << ioch_array[ioch_idx]->index)) != 0);
904 	}
905 	ftl_release_batch(dev, batch);
906 
907 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
908 		CU_ASSERT(spdk_ring_count(ioch_array[ioch_idx]->free_queue) ==
909 			  ioch_array[ioch_idx]->num_entries);
910 	}
911 
912 	/* Make sure pending batches are prioritized */
913 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
914 		set_thread(ioch_idx);
915 
916 		while (spdk_ring_count(ioch_array[ioch_idx]->submit_queue) < dev->xfer_size) {
917 			entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
918 			SPDK_CU_ASSERT_FATAL(entry != NULL);
919 			num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
920 							(void **)&entry, 1, NULL);
921 			CU_ASSERT(num_entries == 1);
922 		}
923 	}
924 
925 	batch = ftl_get_next_batch(dev);
926 	SPDK_CU_ASSERT_FATAL(batch != NULL);
927 
928 	TAILQ_INSERT_TAIL(&dev->pending_batches, batch, tailq);
929 	batch2 = ftl_get_next_batch(dev);
930 	SPDK_CU_ASSERT_FATAL(batch2 != NULL);
931 
932 	CU_ASSERT(TAILQ_EMPTY(&dev->pending_batches));
933 	CU_ASSERT(batch == batch2);
934 
935 	batch = ftl_get_next_batch(dev);
936 	SPDK_CU_ASSERT_FATAL(batch != NULL);
937 
938 	ftl_release_batch(dev, batch);
939 	ftl_release_batch(dev, batch2);
940 
941 	for (ioch_idx = 2; ioch_idx < num_io_channels; ++ioch_idx) {
942 		batch = ftl_get_next_batch(dev);
943 		SPDK_CU_ASSERT_FATAL(batch != NULL);
944 		ftl_release_batch(dev, batch);
945 	}
946 
947 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
948 		set_thread(ioch_idx);
949 		spdk_put_io_channel(_ioch_array[ioch_idx]);
950 	}
951 	poll_threads();
952 
953 	free(_ioch_array);
954 	free(ioch_array);
955 	free_device(dev);
956 }
957 
958 static void
959 test_entry_address(void)
960 {
961 	struct spdk_ftl_dev *dev;
962 	struct spdk_io_channel **ioch_array;
963 	struct ftl_io_channel *ftl_ioch;
964 	struct ftl_wbuf_entry **entry_array;
965 	struct ftl_addr addr;
966 	uint32_t num_entries, num_io_channels = 7;
967 	uint32_t ioch_idx, entry_idx;
968 
969 	dev = setup_device(num_io_channels, num_io_channels);
970 	ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
971 	SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
972 
973 	num_entries = dev->conf.write_buffer_size / FTL_BLOCK_SIZE;
974 	entry_array = calloc(num_entries, sizeof(*entry_array));
975 	SPDK_CU_ASSERT_FATAL(entry_array != NULL);
976 
977 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
978 		set_thread(ioch_idx);
979 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
980 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
981 		poll_threads();
982 	}
983 
984 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
985 		set_thread(ioch_idx);
986 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
987 
988 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
989 			entry_array[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
990 			SPDK_CU_ASSERT_FATAL(entry_array[entry_idx] != NULL);
991 
992 			addr = ftl_get_addr_from_entry(entry_array[entry_idx]);
993 			CU_ASSERT(addr.cached == 1);
994 			CU_ASSERT((addr.cache_offset >> dev->ioch_shift) == entry_idx);
995 			CU_ASSERT((addr.cache_offset & ((1 << dev->ioch_shift) - 1)) == ioch_idx);
996 			CU_ASSERT(entry_array[entry_idx] == ftl_get_entry_from_addr(dev, addr));
997 		}
998 
999 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
1000 			ftl_release_wbuf_entry(entry_array[entry_idx]);
1001 		}
1002 	}
1003 
1004 	for (ioch_idx = 0; ioch_idx < num_io_channels; ioch_idx += 2) {
1005 		set_thread(ioch_idx);
1006 		spdk_put_io_channel(ioch_array[ioch_idx]);
1007 		ioch_array[ioch_idx] = NULL;
1008 	}
1009 	poll_threads();
1010 
1011 	for (ioch_idx = 1; ioch_idx < num_io_channels; ioch_idx += 2) {
1012 		set_thread(ioch_idx);
1013 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
1014 
1015 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
1016 			entry_array[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
1017 			SPDK_CU_ASSERT_FATAL(entry_array[entry_idx] != NULL);
1018 
1019 			addr = ftl_get_addr_from_entry(entry_array[entry_idx]);
1020 			CU_ASSERT(addr.cached == 1);
1021 			CU_ASSERT(entry_array[entry_idx] == ftl_get_entry_from_addr(dev, addr));
1022 		}
1023 
1024 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
1025 			ftl_release_wbuf_entry(entry_array[entry_idx]);
1026 		}
1027 	}
1028 
1029 	for (ioch_idx = 1; ioch_idx < num_io_channels; ioch_idx += 2) {
1030 		set_thread(ioch_idx);
1031 		spdk_put_io_channel(ioch_array[ioch_idx]);
1032 	}
1033 	poll_threads();
1034 
1035 	free(entry_array);
1036 	free(ioch_array);
1037 	free_device(dev);
1038 }
1039 
1040 int
1041 main(int argc, char **argv)
1042 {
1043 	CU_pSuite suite;
1044 	unsigned int num_failures;
1045 
1046 	CU_set_error_action(CUEA_ABORT);
1047 	CU_initialize_registry();
1048 
1049 	suite = CU_add_suite("ftl_io_suite", NULL, NULL);
1050 
1051 
1052 	CU_ADD_TEST(suite, test_completion);
1053 	CU_ADD_TEST(suite, test_alloc_free);
1054 	CU_ADD_TEST(suite, test_child_requests);
1055 	CU_ADD_TEST(suite, test_child_status);
1056 	CU_ADD_TEST(suite, test_multi_generation);
1057 	CU_ADD_TEST(suite, test_io_channel_create);
1058 	CU_ADD_TEST(suite, test_acquire_entry);
1059 	CU_ADD_TEST(suite, test_submit_batch);
1060 	CU_ADD_TEST(suite, test_entry_address);
1061 
1062 	CU_basic_set_mode(CU_BRM_VERBOSE);
1063 	CU_basic_run_tests();
1064 	num_failures = CU_get_number_of_failures();
1065 	CU_cleanup_registry();
1066 
1067 	return num_failures;
1068 }
1069