xref: /spdk/test/unit/lib/ftl/ftl_io.c/ftl_io_ut.c (revision 0098e636761237b77c12c30c2408263a5d2260cc)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "thread/thread_internal.h"
8 
9 #include "spdk_cunit.h"
10 #include "common/lib/ut_multithread.c"
11 
12 #include "ftl/ftl_io.c"
13 #include "ftl/ftl_init.c"
14 #include "ftl/ftl_core.c"
15 #include "ftl/ftl_band.c"
16 
17 DEFINE_STUB(spdk_bdev_io_get_append_location, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
18 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
19 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
20 DEFINE_STUB(spdk_bdev_get_optimal_open_zones, uint32_t, (const struct spdk_bdev *b), 1);
21 DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL);
22 DEFINE_STUB(spdk_bdev_is_md_separate, bool, (const struct spdk_bdev *bdev), false);
23 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false);
24 DEFINE_STUB(spdk_bdev_zone_appendv, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
25 		struct iovec *iov, int iovcnt, uint64_t zone_id, uint64_t num_blocks,
26 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
27 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *b), 1024);
28 DEFINE_STUB(spdk_bdev_zone_management, int, (struct spdk_bdev_desc *desc,
29 		struct spdk_io_channel *ch, uint64_t zone_id, enum spdk_bdev_zone_action action,
30 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
31 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
32 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
33 DEFINE_STUB(spdk_bdev_get_dif_type, enum spdk_dif_type,
34 	    (const struct spdk_bdev *bdev), 0);
35 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
36 DEFINE_STUB(spdk_bdev_get_write_unit_size, uint32_t,
37 	    (const struct spdk_bdev *bdev), 0);
38 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
39 		enum spdk_bdev_io_type io_type), true);
40 DEFINE_STUB(spdk_bdev_module_claim_bdev, int,
41 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
42 	     struct spdk_bdev_module *module), 0);
43 DEFINE_STUB(spdk_bdev_open_ext, int,
44 	    (const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
45 	     void *event_ctx, struct spdk_bdev_desc **desc), 0);
46 DEFINE_STUB(spdk_bdev_read_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
47 		void *buf, uint64_t offset_blocks, uint64_t num_blocks,
48 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
49 DEFINE_STUB(spdk_bdev_write_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
50 		void *buf, uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
51 		void *cb_arg), 0);
52 DEFINE_STUB(spdk_bdev_write_blocks_with_md, int, (struct spdk_bdev_desc *desc,
53 		struct spdk_io_channel *ch, void *buf, void *md, uint64_t offset_blocks,
54 		uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
55 DEFINE_STUB(spdk_bdev_writev_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
56 		struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
57 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
58 DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024);
59 DEFINE_STUB(spdk_bdev_get_md_size, uint32_t, (const struct spdk_bdev *bdev), 0);
60 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 4096);
61 DEFINE_STUB(spdk_bdev_get_media_events, size_t,
62 	    (struct spdk_bdev_desc *bdev_desc, struct spdk_bdev_media_event *events,
63 	     size_t max_events), 0);
64 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
65 DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
66 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
67 	     uint64_t offset_blocks, uint64_t num_blocks,
68 	     spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
69 DEFINE_STUB(spdk_bdev_get_zone_info, int,
70 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
71 	     uint64_t zone_id, size_t num_zones, struct spdk_bdev_zone_info *info,
72 	     spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
73 DEFINE_STUB(spdk_mempool_create_ctor, struct spdk_mempool *,
74 	    (const char *name, size_t count, size_t ele_size, size_t cache_size,
75 	     int socket_id, spdk_mempool_obj_cb_t *obj_init, void *obj_init_arg), NULL);
76 DEFINE_STUB(spdk_mempool_obj_iter, uint32_t,
77 	    (struct spdk_mempool *mp, spdk_mempool_obj_cb_t obj_cb, void *obj_cb_arg), 0);
78 DEFINE_STUB(ftl_reloc, bool, (struct ftl_reloc *reloc), false);
79 DEFINE_STUB_V(ftl_reloc_add, (struct ftl_reloc *reloc, struct ftl_band *band, size_t offset,
80 			      size_t num_blocks, int prio, bool defrag));
81 DEFINE_STUB_V(ftl_reloc_free, (struct ftl_reloc *reloc));
82 DEFINE_STUB_V(ftl_reloc_halt, (struct ftl_reloc *reloc));
83 DEFINE_STUB(ftl_reloc_init, struct ftl_reloc *, (struct spdk_ftl_dev *dev), NULL);
84 DEFINE_STUB(ftl_reloc_is_defrag_active, bool, (const struct ftl_reloc *reloc), false);
85 DEFINE_STUB(ftl_reloc_is_halted, bool, (const struct ftl_reloc *reloc), false);
86 DEFINE_STUB_V(ftl_reloc_resume, (struct ftl_reloc *reloc));
87 DEFINE_STUB(ftl_restore_device, int,
88 	    (struct ftl_restore *restore, ftl_restore_fn cb, void *cb_arg), 0);
89 DEFINE_STUB(ftl_restore_md, int,
90 	    (struct spdk_ftl_dev *dev, ftl_restore_fn cb, void *cb_arg), 0);
91 DEFINE_STUB_V(ftl_restore_nv_cache,
92 	      (struct ftl_restore *restore, ftl_restore_fn cb, void *cb_arg));
93 
94 #if defined(FTL_META_DEBUG)
95 DEFINE_STUB(ftl_band_validate_md, bool, (struct ftl_band *band), true);
96 #endif
97 #if defined(DEBUG)
98 DEFINE_STUB_V(ftl_trace_defrag_band, (struct spdk_ftl_dev *dev, const struct ftl_band *band));
99 DEFINE_STUB_V(ftl_trace_submission, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
100 				     struct ftl_addr addr, size_t addr_cnt));
101 DEFINE_STUB_V(ftl_trace_lba_io_init, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
102 DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, int limit, size_t num_free));
103 DEFINE_STUB(ftl_trace_alloc_id, uint64_t, (struct spdk_ftl_dev *dev), 0);
104 DEFINE_STUB_V(ftl_trace_completion, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
105 				     enum ftl_trace_completion type));
106 DEFINE_STUB_V(ftl_trace_wbuf_fill, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
107 DEFINE_STUB_V(ftl_trace_wbuf_pop, (struct spdk_ftl_dev *dev, const struct ftl_wbuf_entry *entry));
108 DEFINE_STUB_V(ftl_trace_write_band, (struct spdk_ftl_dev *dev, const struct ftl_band *band));
109 #endif
110 #if defined(FTL_META_DEBUG)
111 DEFINE_STUB_V(ftl_dev_dump_bands, (struct spdk_ftl_dev *dev));
112 #endif
113 #if defined(FTL_DUMP_STATS)
114 DEFINE_STUB_V(ftl_dev_dump_stats, (const struct spdk_ftl_dev *dev));
115 #endif
116 
117 #ifdef SPDK_CONFIG_PMDK
118 DEFINE_STUB(pmem_map_file, void *,
119 	    (const char *path, size_t len, int flags, mode_t mode,
120 	     size_t *mapped_lenp, int *is_pmemp), NULL);
121 DEFINE_STUB(pmem_unmap, int, (void *addr, size_t len), 0);
122 DEFINE_STUB(pmem_memset_persist, void *, (void *pmemdest, int c, size_t len), NULL);
123 #endif
124 
125 struct spdk_io_channel *
126 spdk_bdev_get_io_channel(struct spdk_bdev_desc *bdev_desc)
127 {
128 	return spdk_get_io_channel(bdev_desc);
129 }
130 
131 static int
132 channel_create_cb(void *io_device, void *ctx)
133 {
134 	return 0;
135 }
136 
137 static void
138 channel_destroy_cb(void *io_device, void *ctx)
139 {}
140 
141 static struct spdk_ftl_dev *
142 setup_device(uint32_t num_threads, uint32_t xfer_size)
143 {
144 	struct spdk_ftl_dev *dev;
145 	struct _ftl_io_channel *_ioch;
146 	struct ftl_io_channel *ioch;
147 	int rc;
148 
149 	allocate_threads(num_threads);
150 	set_thread(0);
151 
152 	dev = calloc(1, sizeof(*dev));
153 	SPDK_CU_ASSERT_FATAL(dev != NULL);
154 
155 	dev->core_thread = spdk_get_thread();
156 	dev->ioch = calloc(1, sizeof(*_ioch) + sizeof(struct spdk_io_channel));
157 	SPDK_CU_ASSERT_FATAL(dev->ioch != NULL);
158 
159 	_ioch = (struct _ftl_io_channel *)(dev->ioch + 1);
160 	ioch = _ioch->ioch = calloc(1, sizeof(*ioch));
161 	SPDK_CU_ASSERT_FATAL(ioch != NULL);
162 
163 	ioch->elem_size = sizeof(struct ftl_md_io);
164 	ioch->io_pool = spdk_mempool_create("io-pool", 4096, ioch->elem_size, 0, 0);
165 
166 	SPDK_CU_ASSERT_FATAL(ioch->io_pool != NULL);
167 
168 	dev->conf = g_default_conf;
169 	dev->xfer_size = xfer_size;
170 	dev->base_bdev_desc = (struct spdk_bdev_desc *)0xdeadbeef;
171 	spdk_io_device_register(dev->base_bdev_desc, channel_create_cb, channel_destroy_cb, 0, NULL);
172 
173 	rc = ftl_dev_init_io_channel(dev);
174 	CU_ASSERT_EQUAL(rc, 0);
175 
176 	return dev;
177 }
178 
179 static void
180 free_device(struct spdk_ftl_dev *dev)
181 {
182 	struct ftl_io_channel *ioch;
183 
184 	ioch = ftl_io_channel_get_ctx(dev->ioch);
185 	spdk_mempool_free(ioch->io_pool);
186 	free(ioch);
187 
188 	spdk_io_device_unregister(dev, NULL);
189 	spdk_io_device_unregister(dev->base_bdev_desc, NULL);
190 	free_threads();
191 
192 	free(dev->ioch_array);
193 	free(dev->iov_buf);
194 	free(dev->ioch);
195 	free(dev);
196 }
197 
198 static void
199 setup_io(struct ftl_io *io, struct spdk_ftl_dev *dev, ftl_io_fn cb, void *ctx)
200 {
201 	io->dev = dev;
202 	io->cb_fn = cb;
203 	io->cb_ctx = ctx;
204 }
205 
206 static struct ftl_io *
207 alloc_io(struct spdk_ftl_dev *dev, ftl_io_fn cb, void *ctx)
208 {
209 	struct ftl_io *io;
210 
211 	io = ftl_io_alloc(dev->ioch);
212 	SPDK_CU_ASSERT_FATAL(io != NULL);
213 	setup_io(io, dev, cb, ctx);
214 
215 	return io;
216 }
217 
218 static void
219 io_complete_cb(struct ftl_io *io, void *ctx, int status)
220 {
221 	*(int *)ctx = status;
222 }
223 
224 static void
225 test_completion(void)
226 {
227 	struct spdk_ftl_dev *dev;
228 	struct ftl_io_channel *ioch;
229 	struct ftl_io *io;
230 	int req, status = 0;
231 	size_t pool_size;
232 
233 	dev = setup_device(1, 16);
234 	ioch = ftl_io_channel_get_ctx(dev->ioch);
235 	pool_size = spdk_mempool_count(ioch->io_pool);
236 
237 	io = alloc_io(dev, io_complete_cb, &status);
238 	io->status = -EIO;
239 
240 #define NUM_REQUESTS 16
241 	for (req = 0; req < NUM_REQUESTS; ++req) {
242 		ftl_io_inc_req(io);
243 		CU_ASSERT_FALSE(ftl_io_done(io));
244 	}
245 
246 	CU_ASSERT_EQUAL(io->req_cnt, NUM_REQUESTS);
247 
248 	for (req = 0; req < (NUM_REQUESTS - 1); ++req) {
249 		ftl_io_dec_req(io);
250 		CU_ASSERT_FALSE(ftl_io_done(io));
251 	}
252 
253 	CU_ASSERT_EQUAL(io->req_cnt, 1);
254 
255 	ftl_io_dec_req(io);
256 	CU_ASSERT_TRUE(ftl_io_done(io));
257 
258 	ftl_io_complete(io);
259 	CU_ASSERT_EQUAL(status, -EIO);
260 
261 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
262 
263 	free_device(dev);
264 }
265 
266 static void
267 test_alloc_free(void)
268 {
269 	struct spdk_ftl_dev *dev;
270 	struct ftl_io_channel *ioch;
271 	struct ftl_io *parent, *child;
272 	int parent_status = -1;
273 	size_t pool_size;
274 
275 	dev = setup_device(1, 16);
276 	ioch = ftl_io_channel_get_ctx(dev->ioch);
277 	pool_size = spdk_mempool_count(ioch->io_pool);
278 
279 	parent = alloc_io(dev, io_complete_cb, &parent_status);
280 	SPDK_CU_ASSERT_FATAL(parent != NULL);
281 	child = ftl_io_alloc_child(parent);
282 	SPDK_CU_ASSERT_FATAL(child != NULL);
283 
284 	ftl_io_free(child);
285 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - 1);
286 
287 	child = ftl_io_alloc_child(parent);
288 	SPDK_CU_ASSERT_FATAL(child != NULL);
289 	ftl_io_complete(child);
290 	CU_ASSERT_EQUAL(parent_status, -1);
291 	ftl_io_complete(parent);
292 	CU_ASSERT_EQUAL(parent_status, 0);
293 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
294 
295 	parent_status = -1;
296 	parent = alloc_io(dev, io_complete_cb, &parent_status);
297 	SPDK_CU_ASSERT_FATAL(parent != NULL);
298 	child = ftl_io_alloc_child(parent);
299 	SPDK_CU_ASSERT_FATAL(child != NULL);
300 
301 	ftl_io_free(child);
302 	CU_ASSERT_EQUAL(parent_status, -1);
303 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - 1);
304 	ftl_io_complete(parent);
305 	CU_ASSERT_EQUAL(parent_status, 0);
306 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
307 
308 	free_device(dev);
309 }
310 
311 static void
312 test_child_requests(void)
313 {
314 	struct spdk_ftl_dev *dev;
315 	struct ftl_io_channel *ioch;
316 #define MAX_CHILDREN 16
317 	struct ftl_io *parent, *child[MAX_CHILDREN];
318 	int status[MAX_CHILDREN + 1], i;
319 	size_t pool_size;
320 
321 	dev = setup_device(1, 16);
322 	ioch = ftl_io_channel_get_ctx(dev->ioch);
323 	pool_size = spdk_mempool_count(ioch->io_pool);
324 
325 	/* Verify correct behaviour when children finish first */
326 	parent = alloc_io(dev, io_complete_cb, &status[0]);
327 	parent->status = 0;
328 
329 	ftl_io_inc_req(parent);
330 	status[0] = -1;
331 
332 	for (i = 0; i < MAX_CHILDREN; ++i) {
333 		status[i + 1] = -1;
334 
335 		child[i] = ftl_io_alloc_child(parent);
336 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
337 		setup_io(child[i], dev, io_complete_cb, &status[i + 1]);
338 		child[i]->status = 0;
339 
340 		ftl_io_inc_req(child[i]);
341 	}
342 
343 	CU_ASSERT_FALSE(ftl_io_done(parent));
344 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
345 
346 	for (i = 0; i < MAX_CHILDREN; ++i) {
347 		CU_ASSERT_FALSE(ftl_io_done(child[i]));
348 		ftl_io_dec_req(child[i]);
349 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
350 		CU_ASSERT_FALSE(ftl_io_done(parent));
351 
352 		ftl_io_complete(child[i]);
353 		CU_ASSERT_FALSE(ftl_io_done(parent));
354 		CU_ASSERT_EQUAL(status[i + 1], 0);
355 	}
356 
357 	CU_ASSERT_EQUAL(status[0], -1);
358 
359 	ftl_io_dec_req(parent);
360 	CU_ASSERT_EQUAL(parent->req_cnt, 0);
361 	CU_ASSERT_TRUE(ftl_io_done(parent));
362 
363 	ftl_io_complete(parent);
364 	CU_ASSERT_EQUAL(status[0], 0);
365 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
366 
367 
368 	/* Verify correct behaviour when parent finishes first */
369 	parent = alloc_io(dev, io_complete_cb, &status[0]);
370 	parent->status = 0;
371 
372 	ftl_io_inc_req(parent);
373 	status[0] = -1;
374 
375 	for (i = 0; i < MAX_CHILDREN; ++i) {
376 		status[i + 1] = -1;
377 
378 		child[i] = ftl_io_alloc_child(parent);
379 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
380 		setup_io(child[i], dev, io_complete_cb, &status[i + 1]);
381 		child[i]->status = 0;
382 
383 		ftl_io_inc_req(child[i]);
384 	}
385 
386 	CU_ASSERT_FALSE(ftl_io_done(parent));
387 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
388 
389 	ftl_io_dec_req(parent);
390 	CU_ASSERT_TRUE(ftl_io_done(parent));
391 	CU_ASSERT_EQUAL(parent->req_cnt, 0);
392 
393 	ftl_io_complete(parent);
394 	CU_ASSERT_EQUAL(status[0], -1);
395 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
396 
397 	for (i = 0; i < MAX_CHILDREN; ++i) {
398 		CU_ASSERT_FALSE(ftl_io_done(child[i]));
399 		ftl_io_dec_req(child[i]);
400 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
401 
402 		ftl_io_complete(child[i]);
403 		CU_ASSERT_EQUAL(status[i + 1], 0);
404 	}
405 
406 	CU_ASSERT_EQUAL(status[0], 0);
407 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
408 
409 	free_device(dev);
410 }
411 
412 static void
413 test_child_status(void)
414 {
415 	struct spdk_ftl_dev *dev;
416 	struct ftl_io_channel *ioch;
417 	struct ftl_io *parent, *child[2];
418 	int parent_status, child_status[2];
419 	size_t pool_size, i;
420 
421 	dev = setup_device(1, 16);
422 	ioch = ftl_io_channel_get_ctx(dev->ioch);
423 	pool_size = spdk_mempool_count(ioch->io_pool);
424 
425 	/* Verify the first error is returned by the parent */
426 	parent = alloc_io(dev, io_complete_cb, &parent_status);
427 	parent->status = 0;
428 
429 	for (i = 0; i < 2; ++i) {
430 		child[i] = ftl_io_alloc_child(parent);
431 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
432 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
433 	}
434 
435 	child[0]->status = -3;
436 	child[1]->status = -4;
437 
438 	ftl_io_complete(child[1]);
439 	ftl_io_complete(child[0]);
440 	ftl_io_complete(parent);
441 
442 	CU_ASSERT_EQUAL(child_status[0], -3);
443 	CU_ASSERT_EQUAL(child_status[1], -4);
444 	CU_ASSERT_EQUAL(parent_status, -4);
445 
446 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
447 
448 	/* Verify parent's status is kept if children finish successfully */
449 	parent = alloc_io(dev, io_complete_cb, &parent_status);
450 	parent->status = -1;
451 
452 	for (i = 0; i < 2; ++i) {
453 		child[i] = ftl_io_alloc_child(parent);
454 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
455 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
456 	}
457 
458 	child[0]->status = 0;
459 	child[1]->status = 0;
460 
461 	ftl_io_complete(parent);
462 	ftl_io_complete(child[1]);
463 	ftl_io_complete(child[0]);
464 
465 	CU_ASSERT_EQUAL(child_status[0], 0);
466 	CU_ASSERT_EQUAL(child_status[1], 0);
467 	CU_ASSERT_EQUAL(parent_status, -1);
468 
469 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
470 
471 	/* Verify parent's status is kept if children fail too */
472 	parent = alloc_io(dev, io_complete_cb, &parent_status);
473 	parent->status = -1;
474 
475 	for (i = 0; i < 2; ++i) {
476 		child[i] = ftl_io_alloc_child(parent);
477 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
478 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
479 	}
480 
481 	child[0]->status = -3;
482 	child[1]->status = -4;
483 
484 	ftl_io_complete(parent);
485 	ftl_io_complete(child[1]);
486 	ftl_io_complete(child[0]);
487 
488 	CU_ASSERT_EQUAL(child_status[0], -3);
489 	CU_ASSERT_EQUAL(child_status[1], -4);
490 	CU_ASSERT_EQUAL(parent_status, -1);
491 
492 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
493 
494 	free_device(dev);
495 }
496 
497 static void
498 test_multi_generation(void)
499 {
500 	struct spdk_ftl_dev *dev;
501 	struct ftl_io_channel *ioch;
502 #define MAX_GRAND_CHILDREN	32
503 	struct ftl_io *parent, *child[MAX_CHILDREN], *gchild[MAX_CHILDREN * MAX_GRAND_CHILDREN];
504 	int parent_status, child_status[MAX_CHILDREN], gchild_status[MAX_CHILDREN * MAX_GRAND_CHILDREN];
505 	size_t pool_size;
506 	int i, j;
507 
508 	dev = setup_device(1, 16);
509 	ioch = ftl_io_channel_get_ctx(dev->ioch);
510 	pool_size = spdk_mempool_count(ioch->io_pool);
511 
512 	/* Verify correct behaviour when children finish first */
513 	parent = alloc_io(dev, io_complete_cb, &parent_status);
514 	parent->status = 0;
515 
516 	ftl_io_inc_req(parent);
517 	parent_status = -1;
518 
519 	for (i = 0; i < MAX_CHILDREN; ++i) {
520 		child_status[i] = -1;
521 
522 		child[i] = ftl_io_alloc_child(parent);
523 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
524 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
525 		child[i]->status = 0;
526 
527 
528 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
529 			struct ftl_io *io = ftl_io_alloc_child(child[i]);
530 			SPDK_CU_ASSERT_FATAL(io != NULL);
531 
532 			gchild[i * MAX_GRAND_CHILDREN + j] = io;
533 			gchild_status[i * MAX_GRAND_CHILDREN + j] = -1;
534 			setup_io(io, dev, io_complete_cb, &gchild_status[i * MAX_GRAND_CHILDREN + j]);
535 			io->status = 0;
536 
537 			ftl_io_inc_req(io);
538 		}
539 
540 		ftl_io_inc_req(child[i]);
541 	}
542 
543 	for (i = 0; i < MAX_CHILDREN; ++i) {
544 		CU_ASSERT_FALSE(ftl_io_done(child[i]));
545 		ftl_io_dec_req(child[i]);
546 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
547 
548 		ftl_io_complete(child[i]);
549 		CU_ASSERT_FALSE(ftl_io_done(parent));
550 		CU_ASSERT_EQUAL(child_status[i], -1);
551 
552 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
553 			struct ftl_io *io = gchild[i * MAX_GRAND_CHILDREN + j];
554 
555 			CU_ASSERT_FALSE(ftl_io_done(io));
556 			ftl_io_dec_req(io);
557 			CU_ASSERT_TRUE(ftl_io_done(io));
558 			ftl_io_complete(io);
559 			CU_ASSERT_EQUAL(gchild_status[i * MAX_GRAND_CHILDREN + j], 0);
560 		}
561 
562 		CU_ASSERT_EQUAL(child_status[i], 0);
563 	}
564 
565 	ftl_io_dec_req(parent);
566 	CU_ASSERT_TRUE(ftl_io_done(parent));
567 	ftl_io_complete(parent);
568 	CU_ASSERT_EQUAL(parent_status, 0);
569 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
570 
571 	/* Verify correct behaviour when parents finish first */
572 	parent = alloc_io(dev, io_complete_cb, &parent_status);
573 	parent->status = 0;
574 	parent_status = -1;
575 
576 	for (i = 0; i < MAX_CHILDREN; ++i) {
577 		child_status[i] = -1;
578 
579 		child[i] = ftl_io_alloc_child(parent);
580 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
581 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
582 		child[i]->status = 0;
583 
584 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
585 			struct ftl_io *io = ftl_io_alloc_child(child[i]);
586 			SPDK_CU_ASSERT_FATAL(io != NULL);
587 
588 			gchild[i * MAX_GRAND_CHILDREN + j] = io;
589 			gchild_status[i * MAX_GRAND_CHILDREN + j] = -1;
590 			setup_io(io, dev, io_complete_cb, &gchild_status[i * MAX_GRAND_CHILDREN + j]);
591 			io->status = 0;
592 
593 			ftl_io_inc_req(io);
594 		}
595 
596 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
597 		ftl_io_complete(child[i]);
598 		CU_ASSERT_EQUAL(child_status[i], -1);
599 	}
600 
601 	CU_ASSERT_TRUE(ftl_io_done(parent));
602 	ftl_io_complete(parent);
603 	CU_ASSERT_EQUAL(parent_status, -1);
604 
605 	for (i = 0; i < MAX_CHILDREN; ++i) {
606 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
607 			struct ftl_io *io = gchild[i * MAX_GRAND_CHILDREN + j];
608 
609 			CU_ASSERT_FALSE(ftl_io_done(io));
610 			ftl_io_dec_req(io);
611 			CU_ASSERT_TRUE(ftl_io_done(io));
612 			ftl_io_complete(io);
613 			CU_ASSERT_EQUAL(gchild_status[i * MAX_GRAND_CHILDREN + j], 0);
614 		}
615 
616 		CU_ASSERT_EQUAL(child_status[i], 0);
617 	}
618 
619 	CU_ASSERT_EQUAL(parent_status, 0);
620 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
621 
622 	free_device(dev);
623 }
624 
625 static void
626 test_io_channel_create(void)
627 {
628 	struct spdk_ftl_dev *dev;
629 	struct spdk_io_channel *ioch, **ioch_array;
630 	struct ftl_io_channel *ftl_ioch;
631 	uint32_t ioch_idx;
632 
633 	dev = setup_device(g_default_conf.max_io_channels + 1, 16);
634 
635 	ioch = spdk_get_io_channel(dev);
636 	CU_ASSERT(ioch != NULL);
637 	CU_ASSERT_EQUAL(dev->num_io_channels, 1);
638 	spdk_put_io_channel(ioch);
639 	poll_threads();
640 	CU_ASSERT_EQUAL(dev->num_io_channels, 0);
641 
642 	ioch_array = calloc(dev->conf.max_io_channels, sizeof(*ioch_array));
643 	SPDK_CU_ASSERT_FATAL(ioch != NULL);
644 
645 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ++ioch_idx) {
646 		set_thread(ioch_idx);
647 		ioch = ioch_array[ioch_idx] = spdk_get_io_channel(dev);
648 		SPDK_CU_ASSERT_FATAL(ioch != NULL);
649 		poll_threads();
650 
651 		ftl_ioch = ftl_io_channel_get_ctx(ioch);
652 		CU_ASSERT_EQUAL(ftl_ioch->index, ioch_idx);
653 	}
654 
655 	CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels);
656 	set_thread(dev->conf.max_io_channels);
657 	ioch = spdk_get_io_channel(dev);
658 	CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels);
659 	CU_ASSERT_EQUAL(ioch, NULL);
660 
661 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx += 2) {
662 		set_thread(ioch_idx);
663 		spdk_put_io_channel(ioch_array[ioch_idx]);
664 		ioch_array[ioch_idx] = NULL;
665 		poll_threads();
666 	}
667 
668 	poll_threads();
669 	CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels / 2);
670 
671 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx++) {
672 		set_thread(ioch_idx);
673 
674 		if (ioch_array[ioch_idx] == NULL) {
675 			ioch = ioch_array[ioch_idx] = spdk_get_io_channel(dev);
676 			SPDK_CU_ASSERT_FATAL(ioch != NULL);
677 			poll_threads();
678 
679 			ftl_ioch = ftl_io_channel_get_ctx(ioch);
680 			CU_ASSERT_EQUAL(ftl_ioch->index, ioch_idx);
681 		}
682 	}
683 
684 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx++) {
685 		set_thread(ioch_idx);
686 		spdk_put_io_channel(ioch_array[ioch_idx]);
687 	}
688 
689 	poll_threads();
690 	CU_ASSERT_EQUAL(dev->num_io_channels, 0);
691 
692 	free(ioch_array);
693 	free_device(dev);
694 }
695 
696 static void
697 test_acquire_entry(void)
698 {
699 	struct spdk_ftl_dev *dev;
700 	struct spdk_io_channel *ioch, **ioch_array;
701 	struct ftl_io_channel *ftl_ioch;
702 	struct ftl_wbuf_entry *entry, **entries;
703 	uint32_t num_entries, num_io_channels = 2;
704 	uint32_t ioch_idx, entry_idx, tmp_idx;
705 
706 	dev = setup_device(num_io_channels, 16);
707 
708 	num_entries = dev->conf.write_buffer_size / FTL_BLOCK_SIZE;
709 	entries = calloc(num_entries * num_io_channels, sizeof(*entries));
710 	SPDK_CU_ASSERT_FATAL(entries != NULL);
711 	ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
712 	SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
713 
714 	/* Acquire whole buffer of internal entries */
715 	entry_idx = 0;
716 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
717 		set_thread(ioch_idx);
718 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
719 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
720 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
721 		poll_threads();
722 
723 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
724 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
725 			CU_ASSERT(entries[entry_idx - 1] != NULL);
726 		}
727 
728 		entry = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
729 		CU_ASSERT(entry == NULL);
730 	}
731 
732 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
733 		set_thread(ioch_idx);
734 
735 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
736 			ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
737 			entries[ioch_idx * num_entries + tmp_idx] = NULL;
738 		}
739 
740 		spdk_put_io_channel(ioch_array[ioch_idx]);
741 	}
742 	poll_threads();
743 
744 	/* Do the same for user entries */
745 	entry_idx = 0;
746 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
747 		set_thread(ioch_idx);
748 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
749 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
750 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
751 		poll_threads();
752 
753 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
754 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
755 			CU_ASSERT(entries[entry_idx - 1] != NULL);
756 		}
757 
758 		entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
759 		CU_ASSERT(entry == NULL);
760 	}
761 
762 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
763 		set_thread(ioch_idx);
764 
765 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
766 			ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
767 			entries[ioch_idx * num_entries + tmp_idx] = NULL;
768 		}
769 
770 		spdk_put_io_channel(ioch_array[ioch_idx]);
771 	}
772 	poll_threads();
773 
774 	/* Verify limits */
775 	entry_idx = 0;
776 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
777 		set_thread(ioch_idx);
778 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
779 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
780 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
781 		poll_threads();
782 
783 		ftl_ioch->qdepth_limit = num_entries / 2;
784 		for (tmp_idx = 0; tmp_idx < num_entries / 2; ++tmp_idx) {
785 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
786 			CU_ASSERT(entries[entry_idx - 1] != NULL);
787 		}
788 
789 		entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
790 		CU_ASSERT(entry == NULL);
791 
792 		for (; tmp_idx < num_entries; ++tmp_idx) {
793 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
794 			CU_ASSERT(entries[entry_idx - 1] != NULL);
795 		}
796 	}
797 
798 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
799 		set_thread(ioch_idx);
800 
801 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
802 			ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
803 			entries[ioch_idx * num_entries + tmp_idx] = NULL;
804 		}
805 
806 		spdk_put_io_channel(ioch_array[ioch_idx]);
807 	}
808 	poll_threads();
809 
810 	/* Verify acquire/release */
811 	set_thread(0);
812 	ioch = spdk_get_io_channel(dev);
813 	SPDK_CU_ASSERT_FATAL(ioch != NULL);
814 	ftl_ioch = ftl_io_channel_get_ctx(ioch);
815 	poll_threads();
816 
817 	for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
818 		entries[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
819 		CU_ASSERT(entries[entry_idx] != NULL);
820 	}
821 
822 	entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
823 	CU_ASSERT(entry == NULL);
824 
825 	for (entry_idx = 0; entry_idx < num_entries / 2; ++entry_idx) {
826 		ftl_release_wbuf_entry(entries[entry_idx]);
827 		entries[entry_idx] = NULL;
828 	}
829 
830 	for (; entry_idx < num_entries; ++entry_idx) {
831 		entries[entry_idx - num_entries / 2] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
832 		CU_ASSERT(entries[entry_idx - num_entries / 2] != NULL);
833 	}
834 
835 	for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
836 		ftl_release_wbuf_entry(entries[entry_idx]);
837 		entries[entry_idx] = NULL;
838 	}
839 
840 	spdk_put_io_channel(ioch);
841 	poll_threads();
842 
843 	free(ioch_array);
844 	free(entries);
845 	free_device(dev);
846 }
847 
848 static void
849 test_submit_batch(void)
850 {
851 	struct spdk_ftl_dev *dev;
852 	struct spdk_io_channel **_ioch_array;
853 	struct ftl_io_channel **ioch_array;
854 	struct ftl_wbuf_entry *entry;
855 	struct ftl_batch *batch, *batch2;
856 	uint32_t num_io_channels = 16;
857 	uint32_t ioch_idx, tmp_idx, entry_idx;
858 	uint64_t ioch_bitmap;
859 	size_t num_entries;
860 
861 	dev = setup_device(num_io_channels, num_io_channels);
862 
863 	_ioch_array = calloc(num_io_channels, sizeof(*_ioch_array));
864 	SPDK_CU_ASSERT_FATAL(_ioch_array != NULL);
865 	ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
866 	SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
867 
868 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
869 		set_thread(ioch_idx);
870 		_ioch_array[ioch_idx] = spdk_get_io_channel(dev);
871 		SPDK_CU_ASSERT_FATAL(_ioch_array[ioch_idx] != NULL);
872 		ioch_array[ioch_idx] = ftl_io_channel_get_ctx(_ioch_array[ioch_idx]);
873 		poll_threads();
874 	}
875 
876 	/* Make sure the IO channels are not starved and entries are popped in RR fashion */
877 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
878 		set_thread(ioch_idx);
879 
880 		for (entry_idx = 0; entry_idx < dev->xfer_size; ++entry_idx) {
881 			entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
882 			SPDK_CU_ASSERT_FATAL(entry != NULL);
883 
884 			num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
885 							(void **)&entry, 1, NULL);
886 			CU_ASSERT(num_entries == 1);
887 		}
888 	}
889 
890 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
891 		for (tmp_idx = 0; tmp_idx < ioch_idx; ++tmp_idx) {
892 			set_thread(tmp_idx);
893 
894 			while (spdk_ring_count(ioch_array[tmp_idx]->submit_queue) < dev->xfer_size) {
895 				entry = ftl_acquire_wbuf_entry(ioch_array[tmp_idx], 0);
896 				SPDK_CU_ASSERT_FATAL(entry != NULL);
897 
898 				num_entries = spdk_ring_enqueue(ioch_array[tmp_idx]->submit_queue,
899 								(void **)&entry, 1, NULL);
900 				CU_ASSERT(num_entries == 1);
901 			}
902 		}
903 
904 		set_thread(ioch_idx);
905 
906 		batch = ftl_get_next_batch(dev);
907 		SPDK_CU_ASSERT_FATAL(batch != NULL);
908 
909 		TAILQ_FOREACH(entry, &batch->entries, tailq) {
910 			CU_ASSERT(entry->ioch == ioch_array[ioch_idx]);
911 		}
912 
913 		ftl_release_batch(dev, batch);
914 
915 		CU_ASSERT(spdk_ring_count(ioch_array[ioch_idx]->free_queue) ==
916 			  ioch_array[ioch_idx]->num_entries);
917 	}
918 
919 	for (ioch_idx = 0; ioch_idx < num_io_channels - 1; ++ioch_idx) {
920 		batch = ftl_get_next_batch(dev);
921 		SPDK_CU_ASSERT_FATAL(batch != NULL);
922 		ftl_release_batch(dev, batch);
923 	}
924 
925 	/* Make sure the batch can be built from entries from any IO channel */
926 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
927 		set_thread(ioch_idx);
928 		entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
929 		SPDK_CU_ASSERT_FATAL(entry != NULL);
930 
931 		num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
932 						(void **)&entry, 1, NULL);
933 		CU_ASSERT(num_entries == 1);
934 	}
935 
936 	batch = ftl_get_next_batch(dev);
937 	SPDK_CU_ASSERT_FATAL(batch != NULL);
938 
939 	ioch_bitmap = 0;
940 	TAILQ_FOREACH(entry, &batch->entries, tailq) {
941 		ioch_bitmap |= 1 << entry->ioch->index;
942 	}
943 
944 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
945 		CU_ASSERT((ioch_bitmap & (1 << ioch_array[ioch_idx]->index)) != 0);
946 	}
947 	ftl_release_batch(dev, batch);
948 
949 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
950 		CU_ASSERT(spdk_ring_count(ioch_array[ioch_idx]->free_queue) ==
951 			  ioch_array[ioch_idx]->num_entries);
952 	}
953 
954 	/* Make sure pending batches are prioritized */
955 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
956 		set_thread(ioch_idx);
957 
958 		while (spdk_ring_count(ioch_array[ioch_idx]->submit_queue) < dev->xfer_size) {
959 			entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
960 			SPDK_CU_ASSERT_FATAL(entry != NULL);
961 			num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
962 							(void **)&entry, 1, NULL);
963 			CU_ASSERT(num_entries == 1);
964 		}
965 	}
966 
967 	batch = ftl_get_next_batch(dev);
968 	SPDK_CU_ASSERT_FATAL(batch != NULL);
969 
970 	TAILQ_INSERT_TAIL(&dev->pending_batches, batch, tailq);
971 	batch2 = ftl_get_next_batch(dev);
972 	SPDK_CU_ASSERT_FATAL(batch2 != NULL);
973 
974 	CU_ASSERT(TAILQ_EMPTY(&dev->pending_batches));
975 	CU_ASSERT(batch == batch2);
976 
977 	batch = ftl_get_next_batch(dev);
978 	SPDK_CU_ASSERT_FATAL(batch != NULL);
979 
980 	ftl_release_batch(dev, batch);
981 	ftl_release_batch(dev, batch2);
982 
983 	for (ioch_idx = 2; ioch_idx < num_io_channels; ++ioch_idx) {
984 		batch = ftl_get_next_batch(dev);
985 		SPDK_CU_ASSERT_FATAL(batch != NULL);
986 		ftl_release_batch(dev, batch);
987 	}
988 
989 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
990 		set_thread(ioch_idx);
991 		spdk_put_io_channel(_ioch_array[ioch_idx]);
992 	}
993 	poll_threads();
994 
995 	free(_ioch_array);
996 	free(ioch_array);
997 	free_device(dev);
998 }
999 
1000 static void
1001 test_entry_address(void)
1002 {
1003 	struct spdk_ftl_dev *dev;
1004 	struct spdk_io_channel **ioch_array;
1005 	struct ftl_io_channel *ftl_ioch;
1006 	struct ftl_wbuf_entry **entry_array;
1007 	struct ftl_addr addr;
1008 	uint32_t num_entries, num_io_channels = 7;
1009 	uint32_t ioch_idx, entry_idx;
1010 
1011 	dev = setup_device(num_io_channels, num_io_channels);
1012 	ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
1013 	SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
1014 
1015 	num_entries = dev->conf.write_buffer_size / FTL_BLOCK_SIZE;
1016 	entry_array = calloc(num_entries, sizeof(*entry_array));
1017 	SPDK_CU_ASSERT_FATAL(entry_array != NULL);
1018 
1019 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
1020 		set_thread(ioch_idx);
1021 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
1022 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
1023 		poll_threads();
1024 	}
1025 
1026 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
1027 		set_thread(ioch_idx);
1028 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
1029 
1030 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
1031 			entry_array[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
1032 			SPDK_CU_ASSERT_FATAL(entry_array[entry_idx] != NULL);
1033 
1034 			addr = ftl_get_addr_from_entry(entry_array[entry_idx]);
1035 			CU_ASSERT(addr.cached == 1);
1036 			CU_ASSERT((addr.cache_offset >> dev->ioch_shift) == entry_idx);
1037 			CU_ASSERT((addr.cache_offset & ((1 << dev->ioch_shift) - 1)) == ioch_idx);
1038 			CU_ASSERT(entry_array[entry_idx] == ftl_get_entry_from_addr(dev, addr));
1039 		}
1040 
1041 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
1042 			ftl_release_wbuf_entry(entry_array[entry_idx]);
1043 		}
1044 	}
1045 
1046 	for (ioch_idx = 0; ioch_idx < num_io_channels; ioch_idx += 2) {
1047 		set_thread(ioch_idx);
1048 		spdk_put_io_channel(ioch_array[ioch_idx]);
1049 		ioch_array[ioch_idx] = NULL;
1050 	}
1051 	poll_threads();
1052 
1053 	for (ioch_idx = 1; ioch_idx < num_io_channels; ioch_idx += 2) {
1054 		set_thread(ioch_idx);
1055 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
1056 
1057 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
1058 			entry_array[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
1059 			SPDK_CU_ASSERT_FATAL(entry_array[entry_idx] != NULL);
1060 
1061 			addr = ftl_get_addr_from_entry(entry_array[entry_idx]);
1062 			CU_ASSERT(addr.cached == 1);
1063 			CU_ASSERT(entry_array[entry_idx] == ftl_get_entry_from_addr(dev, addr));
1064 		}
1065 
1066 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
1067 			ftl_release_wbuf_entry(entry_array[entry_idx]);
1068 		}
1069 	}
1070 
1071 	for (ioch_idx = 1; ioch_idx < num_io_channels; ioch_idx += 2) {
1072 		set_thread(ioch_idx);
1073 		spdk_put_io_channel(ioch_array[ioch_idx]);
1074 	}
1075 	poll_threads();
1076 
1077 	free(entry_array);
1078 	free(ioch_array);
1079 	free_device(dev);
1080 }
1081 
1082 int
1083 main(int argc, char **argv)
1084 {
1085 	CU_pSuite suite;
1086 	unsigned int num_failures;
1087 
1088 	CU_set_error_action(CUEA_ABORT);
1089 	CU_initialize_registry();
1090 
1091 	suite = CU_add_suite("ftl_io_suite", NULL, NULL);
1092 
1093 
1094 	CU_ADD_TEST(suite, test_completion);
1095 	CU_ADD_TEST(suite, test_alloc_free);
1096 	CU_ADD_TEST(suite, test_child_requests);
1097 	CU_ADD_TEST(suite, test_child_status);
1098 	CU_ADD_TEST(suite, test_multi_generation);
1099 	CU_ADD_TEST(suite, test_io_channel_create);
1100 	CU_ADD_TEST(suite, test_acquire_entry);
1101 	CU_ADD_TEST(suite, test_submit_batch);
1102 	CU_ADD_TEST(suite, test_entry_address);
1103 
1104 	CU_basic_set_mode(CU_BRM_VERBOSE);
1105 	CU_basic_run_tests();
1106 	num_failures = CU_get_number_of_failures();
1107 	CU_cleanup_registry();
1108 
1109 	return num_failures;
1110 }
1111