xref: /spdk/test/unit/lib/ftl/ftl_io.c/ftl_io_ut.c (revision 2f5c602574a98ede645991abe279a96e19c50196)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 #include "thread/thread_internal.h"
36 
37 #include "spdk_cunit.h"
38 #include "common/lib/ut_multithread.c"
39 
40 #include "ftl/ftl_io.c"
41 #include "ftl/ftl_init.c"
42 #include "ftl/ftl_core.c"
43 #include "ftl/ftl_band.c"
44 
45 DEFINE_STUB(spdk_bdev_io_get_append_location, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
46 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
47 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
48 DEFINE_STUB(spdk_bdev_get_optimal_open_zones, uint32_t, (const struct spdk_bdev *b), 1);
49 DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL);
50 DEFINE_STUB(spdk_bdev_is_md_separate, bool, (const struct spdk_bdev *bdev), false);
51 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false);
52 DEFINE_STUB(spdk_bdev_zone_appendv, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
53 		struct iovec *iov, int iovcnt, uint64_t zone_id, uint64_t num_blocks,
54 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
55 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *b), 1024);
56 DEFINE_STUB(spdk_bdev_zone_management, int, (struct spdk_bdev_desc *desc,
57 		struct spdk_io_channel *ch, uint64_t zone_id, enum spdk_bdev_zone_action action,
58 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
59 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
60 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
61 DEFINE_STUB(spdk_bdev_get_dif_type, enum spdk_dif_type,
62 	    (const struct spdk_bdev *bdev), 0);
63 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
64 DEFINE_STUB(spdk_bdev_get_write_unit_size, uint32_t,
65 	    (const struct spdk_bdev *bdev), 0);
66 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
67 		enum spdk_bdev_io_type io_type), true);
68 DEFINE_STUB(spdk_bdev_module_claim_bdev, int,
69 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
70 	     struct spdk_bdev_module *module), 0);
71 DEFINE_STUB(spdk_bdev_open_ext, int,
72 	    (const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
73 	     void *event_ctx, struct spdk_bdev_desc **desc), 0);
74 DEFINE_STUB(spdk_bdev_read_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
75 		void *buf, uint64_t offset_blocks, uint64_t num_blocks,
76 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
77 DEFINE_STUB(spdk_bdev_write_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
78 		void *buf, uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
79 		void *cb_arg), 0);
80 DEFINE_STUB(spdk_bdev_write_blocks_with_md, int, (struct spdk_bdev_desc *desc,
81 		struct spdk_io_channel *ch, void *buf, void *md, uint64_t offset_blocks,
82 		uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
83 DEFINE_STUB(spdk_bdev_writev_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
84 		struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
85 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
86 DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024);
87 DEFINE_STUB(spdk_bdev_get_md_size, uint32_t, (const struct spdk_bdev *bdev), 0);
88 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 4096);
89 DEFINE_STUB(spdk_bdev_get_media_events, size_t,
90 	    (struct spdk_bdev_desc *bdev_desc, struct spdk_bdev_media_event *events,
91 	     size_t max_events), 0);
92 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
93 DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
94 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
95 	     uint64_t offset_blocks, uint64_t num_blocks,
96 	     spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
97 DEFINE_STUB(spdk_bdev_get_zone_info, int,
98 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
99 	     uint64_t zone_id, size_t num_zones, struct spdk_bdev_zone_info *info,
100 	     spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
101 DEFINE_STUB(spdk_mempool_create_ctor, struct spdk_mempool *,
102 	    (const char *name, size_t count, size_t ele_size, size_t cache_size,
103 	     int socket_id, spdk_mempool_obj_cb_t *obj_init, void *obj_init_arg), NULL);
104 DEFINE_STUB(spdk_mempool_obj_iter, uint32_t,
105 	    (struct spdk_mempool *mp, spdk_mempool_obj_cb_t obj_cb, void *obj_cb_arg), 0);
106 DEFINE_STUB(ftl_reloc, bool, (struct ftl_reloc *reloc), false);
107 DEFINE_STUB_V(ftl_reloc_add, (struct ftl_reloc *reloc, struct ftl_band *band, size_t offset,
108 			      size_t num_blocks, int prio, bool defrag));
109 DEFINE_STUB_V(ftl_reloc_free, (struct ftl_reloc *reloc));
110 DEFINE_STUB_V(ftl_reloc_halt, (struct ftl_reloc *reloc));
111 DEFINE_STUB(ftl_reloc_init, struct ftl_reloc *, (struct spdk_ftl_dev *dev), NULL);
112 DEFINE_STUB(ftl_reloc_is_defrag_active, bool, (const struct ftl_reloc *reloc), false);
113 DEFINE_STUB(ftl_reloc_is_halted, bool, (const struct ftl_reloc *reloc), false);
114 DEFINE_STUB_V(ftl_reloc_resume, (struct ftl_reloc *reloc));
115 DEFINE_STUB(ftl_restore_device, int,
116 	    (struct ftl_restore *restore, ftl_restore_fn cb, void *cb_arg), 0);
117 DEFINE_STUB(ftl_restore_md, int,
118 	    (struct spdk_ftl_dev *dev, ftl_restore_fn cb, void *cb_arg), 0);
119 DEFINE_STUB_V(ftl_restore_nv_cache,
120 	      (struct ftl_restore *restore, ftl_restore_fn cb, void *cb_arg));
121 
122 #if defined(FTL_META_DEBUG)
123 DEFINE_STUB(ftl_band_validate_md, bool, (struct ftl_band *band), true);
124 #endif
125 #if defined(DEBUG)
126 DEFINE_STUB_V(ftl_trace_defrag_band, (struct spdk_ftl_dev *dev, const struct ftl_band *band));
127 DEFINE_STUB_V(ftl_trace_submission, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
128 				     struct ftl_addr addr, size_t addr_cnt));
129 DEFINE_STUB_V(ftl_trace_lba_io_init, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
130 DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, int limit, size_t num_free));
131 DEFINE_STUB(ftl_trace_alloc_id, uint64_t, (struct spdk_ftl_dev *dev), 0);
132 DEFINE_STUB_V(ftl_trace_completion, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
133 				     enum ftl_trace_completion type));
134 DEFINE_STUB_V(ftl_trace_wbuf_fill, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
135 DEFINE_STUB_V(ftl_trace_wbuf_pop, (struct spdk_ftl_dev *dev, const struct ftl_wbuf_entry *entry));
136 DEFINE_STUB_V(ftl_trace_write_band, (struct spdk_ftl_dev *dev, const struct ftl_band *band));
137 #endif
138 #if defined(FTL_META_DEBUG)
139 DEFINE_STUB_V(ftl_dev_dump_bands, (struct spdk_ftl_dev *dev));
140 #endif
141 #if defined(FTL_DUMP_STATS)
142 DEFINE_STUB_V(ftl_dev_dump_stats, (const struct spdk_ftl_dev *dev));
143 #endif
144 
145 #ifdef SPDK_CONFIG_PMDK
146 DEFINE_STUB(pmem_map_file, void *,
147 	    (const char *path, size_t len, int flags, mode_t mode,
148 	     size_t *mapped_lenp, int *is_pmemp), NULL);
149 DEFINE_STUB(pmem_unmap, int, (void *addr, size_t len), 0);
150 DEFINE_STUB(pmem_memset_persist, void *, (void *pmemdest, int c, size_t len), NULL);
151 #endif
152 
153 struct spdk_io_channel *
154 spdk_bdev_get_io_channel(struct spdk_bdev_desc *bdev_desc)
155 {
156 	return spdk_get_io_channel(bdev_desc);
157 }
158 
159 static int
160 channel_create_cb(void *io_device, void *ctx)
161 {
162 	return 0;
163 }
164 
165 static void
166 channel_destroy_cb(void *io_device, void *ctx)
167 {}
168 
169 static struct spdk_ftl_dev *
170 setup_device(uint32_t num_threads, uint32_t xfer_size)
171 {
172 	struct spdk_ftl_dev *dev;
173 	struct _ftl_io_channel *_ioch;
174 	struct ftl_io_channel *ioch;
175 	int rc;
176 
177 	allocate_threads(num_threads);
178 	set_thread(0);
179 
180 	dev = calloc(1, sizeof(*dev));
181 	SPDK_CU_ASSERT_FATAL(dev != NULL);
182 
183 	dev->core_thread = spdk_get_thread();
184 	dev->ioch = calloc(1, sizeof(*_ioch) + sizeof(struct spdk_io_channel));
185 	SPDK_CU_ASSERT_FATAL(dev->ioch != NULL);
186 
187 	_ioch = (struct _ftl_io_channel *)(dev->ioch + 1);
188 	ioch = _ioch->ioch = calloc(1, sizeof(*ioch));
189 	SPDK_CU_ASSERT_FATAL(ioch != NULL);
190 
191 	ioch->elem_size = sizeof(struct ftl_md_io);
192 	ioch->io_pool = spdk_mempool_create("io-pool", 4096, ioch->elem_size, 0, 0);
193 
194 	SPDK_CU_ASSERT_FATAL(ioch->io_pool != NULL);
195 
196 	dev->conf = g_default_conf;
197 	dev->xfer_size = xfer_size;
198 	dev->base_bdev_desc = (struct spdk_bdev_desc *)0xdeadbeef;
199 	spdk_io_device_register(dev->base_bdev_desc, channel_create_cb, channel_destroy_cb, 0, NULL);
200 
201 	rc = ftl_dev_init_io_channel(dev);
202 	CU_ASSERT_EQUAL(rc, 0);
203 
204 	return dev;
205 }
206 
207 static void
208 free_device(struct spdk_ftl_dev *dev)
209 {
210 	struct ftl_io_channel *ioch;
211 
212 	ioch = ftl_io_channel_get_ctx(dev->ioch);
213 	spdk_mempool_free(ioch->io_pool);
214 	free(ioch);
215 
216 	spdk_io_device_unregister(dev, NULL);
217 	spdk_io_device_unregister(dev->base_bdev_desc, NULL);
218 	free_threads();
219 
220 	free(dev->ioch_array);
221 	free(dev->iov_buf);
222 	free(dev->ioch);
223 	free(dev);
224 }
225 
226 static void
227 setup_io(struct ftl_io *io, struct spdk_ftl_dev *dev, ftl_io_fn cb, void *ctx)
228 {
229 	io->dev = dev;
230 	io->cb_fn = cb;
231 	io->cb_ctx = ctx;
232 }
233 
234 static struct ftl_io *
235 alloc_io(struct spdk_ftl_dev *dev, ftl_io_fn cb, void *ctx)
236 {
237 	struct ftl_io *io;
238 
239 	io = ftl_io_alloc(dev->ioch);
240 	SPDK_CU_ASSERT_FATAL(io != NULL);
241 	setup_io(io, dev, cb, ctx);
242 
243 	return io;
244 }
245 
246 static void
247 io_complete_cb(struct ftl_io *io, void *ctx, int status)
248 {
249 	*(int *)ctx = status;
250 }
251 
252 static void
253 test_completion(void)
254 {
255 	struct spdk_ftl_dev *dev;
256 	struct ftl_io_channel *ioch;
257 	struct ftl_io *io;
258 	int req, status = 0;
259 	size_t pool_size;
260 
261 	dev = setup_device(1, 16);
262 	ioch = ftl_io_channel_get_ctx(dev->ioch);
263 	pool_size = spdk_mempool_count(ioch->io_pool);
264 
265 	io = alloc_io(dev, io_complete_cb, &status);
266 	io->status = -EIO;
267 
268 #define NUM_REQUESTS 16
269 	for (req = 0; req < NUM_REQUESTS; ++req) {
270 		ftl_io_inc_req(io);
271 		CU_ASSERT_FALSE(ftl_io_done(io));
272 	}
273 
274 	CU_ASSERT_EQUAL(io->req_cnt, NUM_REQUESTS);
275 
276 	for (req = 0; req < (NUM_REQUESTS - 1); ++req) {
277 		ftl_io_dec_req(io);
278 		CU_ASSERT_FALSE(ftl_io_done(io));
279 	}
280 
281 	CU_ASSERT_EQUAL(io->req_cnt, 1);
282 
283 	ftl_io_dec_req(io);
284 	CU_ASSERT_TRUE(ftl_io_done(io));
285 
286 	ftl_io_complete(io);
287 	CU_ASSERT_EQUAL(status, -EIO);
288 
289 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
290 
291 	free_device(dev);
292 }
293 
294 static void
295 test_alloc_free(void)
296 {
297 	struct spdk_ftl_dev *dev;
298 	struct ftl_io_channel *ioch;
299 	struct ftl_io *parent, *child;
300 	int parent_status = -1;
301 	size_t pool_size;
302 
303 	dev = setup_device(1, 16);
304 	ioch = ftl_io_channel_get_ctx(dev->ioch);
305 	pool_size = spdk_mempool_count(ioch->io_pool);
306 
307 	parent = alloc_io(dev, io_complete_cb, &parent_status);
308 	SPDK_CU_ASSERT_FATAL(parent != NULL);
309 	child = ftl_io_alloc_child(parent);
310 	SPDK_CU_ASSERT_FATAL(child != NULL);
311 
312 	ftl_io_free(child);
313 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - 1);
314 
315 	child = ftl_io_alloc_child(parent);
316 	SPDK_CU_ASSERT_FATAL(child != NULL);
317 	ftl_io_complete(child);
318 	CU_ASSERT_EQUAL(parent_status, -1);
319 	ftl_io_complete(parent);
320 	CU_ASSERT_EQUAL(parent_status, 0);
321 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
322 
323 	parent_status = -1;
324 	parent = alloc_io(dev, io_complete_cb, &parent_status);
325 	SPDK_CU_ASSERT_FATAL(parent != NULL);
326 	child = ftl_io_alloc_child(parent);
327 	SPDK_CU_ASSERT_FATAL(child != NULL);
328 
329 	ftl_io_free(child);
330 	CU_ASSERT_EQUAL(parent_status, -1);
331 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - 1);
332 	ftl_io_complete(parent);
333 	CU_ASSERT_EQUAL(parent_status, 0);
334 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
335 
336 	free_device(dev);
337 }
338 
339 static void
340 test_child_requests(void)
341 {
342 	struct spdk_ftl_dev *dev;
343 	struct ftl_io_channel *ioch;
344 #define MAX_CHILDREN 16
345 	struct ftl_io *parent, *child[MAX_CHILDREN];
346 	int status[MAX_CHILDREN + 1], i;
347 	size_t pool_size;
348 
349 	dev = setup_device(1, 16);
350 	ioch = ftl_io_channel_get_ctx(dev->ioch);
351 	pool_size = spdk_mempool_count(ioch->io_pool);
352 
353 	/* Verify correct behaviour when children finish first */
354 	parent = alloc_io(dev, io_complete_cb, &status[0]);
355 	parent->status = 0;
356 
357 	ftl_io_inc_req(parent);
358 	status[0] = -1;
359 
360 	for (i = 0; i < MAX_CHILDREN; ++i) {
361 		status[i + 1] = -1;
362 
363 		child[i] = ftl_io_alloc_child(parent);
364 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
365 		setup_io(child[i], dev, io_complete_cb, &status[i + 1]);
366 		child[i]->status = 0;
367 
368 		ftl_io_inc_req(child[i]);
369 	}
370 
371 	CU_ASSERT_FALSE(ftl_io_done(parent));
372 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
373 
374 	for (i = 0; i < MAX_CHILDREN; ++i) {
375 		CU_ASSERT_FALSE(ftl_io_done(child[i]));
376 		ftl_io_dec_req(child[i]);
377 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
378 		CU_ASSERT_FALSE(ftl_io_done(parent));
379 
380 		ftl_io_complete(child[i]);
381 		CU_ASSERT_FALSE(ftl_io_done(parent));
382 		CU_ASSERT_EQUAL(status[i + 1], 0);
383 	}
384 
385 	CU_ASSERT_EQUAL(status[0], -1);
386 
387 	ftl_io_dec_req(parent);
388 	CU_ASSERT_EQUAL(parent->req_cnt, 0);
389 	CU_ASSERT_TRUE(ftl_io_done(parent));
390 
391 	ftl_io_complete(parent);
392 	CU_ASSERT_EQUAL(status[0], 0);
393 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
394 
395 
396 	/* Verify correct behaviour when parent finishes first */
397 	parent = alloc_io(dev, io_complete_cb, &status[0]);
398 	parent->status = 0;
399 
400 	ftl_io_inc_req(parent);
401 	status[0] = -1;
402 
403 	for (i = 0; i < MAX_CHILDREN; ++i) {
404 		status[i + 1] = -1;
405 
406 		child[i] = ftl_io_alloc_child(parent);
407 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
408 		setup_io(child[i], dev, io_complete_cb, &status[i + 1]);
409 		child[i]->status = 0;
410 
411 		ftl_io_inc_req(child[i]);
412 	}
413 
414 	CU_ASSERT_FALSE(ftl_io_done(parent));
415 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
416 
417 	ftl_io_dec_req(parent);
418 	CU_ASSERT_TRUE(ftl_io_done(parent));
419 	CU_ASSERT_EQUAL(parent->req_cnt, 0);
420 
421 	ftl_io_complete(parent);
422 	CU_ASSERT_EQUAL(status[0], -1);
423 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
424 
425 	for (i = 0; i < MAX_CHILDREN; ++i) {
426 		CU_ASSERT_FALSE(ftl_io_done(child[i]));
427 		ftl_io_dec_req(child[i]);
428 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
429 
430 		ftl_io_complete(child[i]);
431 		CU_ASSERT_EQUAL(status[i + 1], 0);
432 	}
433 
434 	CU_ASSERT_EQUAL(status[0], 0);
435 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
436 
437 	free_device(dev);
438 }
439 
440 static void
441 test_child_status(void)
442 {
443 	struct spdk_ftl_dev *dev;
444 	struct ftl_io_channel *ioch;
445 	struct ftl_io *parent, *child[2];
446 	int parent_status, child_status[2];
447 	size_t pool_size, i;
448 
449 	dev = setup_device(1, 16);
450 	ioch = ftl_io_channel_get_ctx(dev->ioch);
451 	pool_size = spdk_mempool_count(ioch->io_pool);
452 
453 	/* Verify the first error is returned by the parent */
454 	parent = alloc_io(dev, io_complete_cb, &parent_status);
455 	parent->status = 0;
456 
457 	for (i = 0; i < 2; ++i) {
458 		child[i] = ftl_io_alloc_child(parent);
459 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
460 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
461 	}
462 
463 	child[0]->status = -3;
464 	child[1]->status = -4;
465 
466 	ftl_io_complete(child[1]);
467 	ftl_io_complete(child[0]);
468 	ftl_io_complete(parent);
469 
470 	CU_ASSERT_EQUAL(child_status[0], -3);
471 	CU_ASSERT_EQUAL(child_status[1], -4);
472 	CU_ASSERT_EQUAL(parent_status, -4);
473 
474 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
475 
476 	/* Verify parent's status is kept if children finish successfully */
477 	parent = alloc_io(dev, io_complete_cb, &parent_status);
478 	parent->status = -1;
479 
480 	for (i = 0; i < 2; ++i) {
481 		child[i] = ftl_io_alloc_child(parent);
482 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
483 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
484 	}
485 
486 	child[0]->status = 0;
487 	child[1]->status = 0;
488 
489 	ftl_io_complete(parent);
490 	ftl_io_complete(child[1]);
491 	ftl_io_complete(child[0]);
492 
493 	CU_ASSERT_EQUAL(child_status[0], 0);
494 	CU_ASSERT_EQUAL(child_status[1], 0);
495 	CU_ASSERT_EQUAL(parent_status, -1);
496 
497 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
498 
499 	/* Verify parent's status is kept if children fail too */
500 	parent = alloc_io(dev, io_complete_cb, &parent_status);
501 	parent->status = -1;
502 
503 	for (i = 0; i < 2; ++i) {
504 		child[i] = ftl_io_alloc_child(parent);
505 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
506 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
507 	}
508 
509 	child[0]->status = -3;
510 	child[1]->status = -4;
511 
512 	ftl_io_complete(parent);
513 	ftl_io_complete(child[1]);
514 	ftl_io_complete(child[0]);
515 
516 	CU_ASSERT_EQUAL(child_status[0], -3);
517 	CU_ASSERT_EQUAL(child_status[1], -4);
518 	CU_ASSERT_EQUAL(parent_status, -1);
519 
520 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
521 
522 	free_device(dev);
523 }
524 
525 static void
526 test_multi_generation(void)
527 {
528 	struct spdk_ftl_dev *dev;
529 	struct ftl_io_channel *ioch;
530 #define MAX_GRAND_CHILDREN	32
531 	struct ftl_io *parent, *child[MAX_CHILDREN], *gchild[MAX_CHILDREN * MAX_GRAND_CHILDREN];
532 	int parent_status, child_status[MAX_CHILDREN], gchild_status[MAX_CHILDREN * MAX_GRAND_CHILDREN];
533 	size_t pool_size;
534 	int i, j;
535 
536 	dev = setup_device(1, 16);
537 	ioch = ftl_io_channel_get_ctx(dev->ioch);
538 	pool_size = spdk_mempool_count(ioch->io_pool);
539 
540 	/* Verify correct behaviour when children finish first */
541 	parent = alloc_io(dev, io_complete_cb, &parent_status);
542 	parent->status = 0;
543 
544 	ftl_io_inc_req(parent);
545 	parent_status = -1;
546 
547 	for (i = 0; i < MAX_CHILDREN; ++i) {
548 		child_status[i] = -1;
549 
550 		child[i] = ftl_io_alloc_child(parent);
551 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
552 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
553 		child[i]->status = 0;
554 
555 
556 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
557 			struct ftl_io *io = ftl_io_alloc_child(child[i]);
558 			SPDK_CU_ASSERT_FATAL(io != NULL);
559 
560 			gchild[i * MAX_GRAND_CHILDREN + j] = io;
561 			gchild_status[i * MAX_GRAND_CHILDREN + j] = -1;
562 			setup_io(io, dev, io_complete_cb, &gchild_status[i * MAX_GRAND_CHILDREN + j]);
563 			io->status = 0;
564 
565 			ftl_io_inc_req(io);
566 		}
567 
568 		ftl_io_inc_req(child[i]);
569 	}
570 
571 	for (i = 0; i < MAX_CHILDREN; ++i) {
572 		CU_ASSERT_FALSE(ftl_io_done(child[i]));
573 		ftl_io_dec_req(child[i]);
574 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
575 
576 		ftl_io_complete(child[i]);
577 		CU_ASSERT_FALSE(ftl_io_done(parent));
578 		CU_ASSERT_EQUAL(child_status[i], -1);
579 
580 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
581 			struct ftl_io *io = gchild[i * MAX_GRAND_CHILDREN + j];
582 
583 			CU_ASSERT_FALSE(ftl_io_done(io));
584 			ftl_io_dec_req(io);
585 			CU_ASSERT_TRUE(ftl_io_done(io));
586 			ftl_io_complete(io);
587 			CU_ASSERT_EQUAL(gchild_status[i * MAX_GRAND_CHILDREN + j], 0);
588 		}
589 
590 		CU_ASSERT_EQUAL(child_status[i], 0);
591 	}
592 
593 	ftl_io_dec_req(parent);
594 	CU_ASSERT_TRUE(ftl_io_done(parent));
595 	ftl_io_complete(parent);
596 	CU_ASSERT_EQUAL(parent_status, 0);
597 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
598 
599 	/* Verify correct behaviour when parents finish first */
600 	parent = alloc_io(dev, io_complete_cb, &parent_status);
601 	parent->status = 0;
602 	parent_status = -1;
603 
604 	for (i = 0; i < MAX_CHILDREN; ++i) {
605 		child_status[i] = -1;
606 
607 		child[i] = ftl_io_alloc_child(parent);
608 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
609 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
610 		child[i]->status = 0;
611 
612 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
613 			struct ftl_io *io = ftl_io_alloc_child(child[i]);
614 			SPDK_CU_ASSERT_FATAL(io != NULL);
615 
616 			gchild[i * MAX_GRAND_CHILDREN + j] = io;
617 			gchild_status[i * MAX_GRAND_CHILDREN + j] = -1;
618 			setup_io(io, dev, io_complete_cb, &gchild_status[i * MAX_GRAND_CHILDREN + j]);
619 			io->status = 0;
620 
621 			ftl_io_inc_req(io);
622 		}
623 
624 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
625 		ftl_io_complete(child[i]);
626 		CU_ASSERT_EQUAL(child_status[i], -1);
627 	}
628 
629 	CU_ASSERT_TRUE(ftl_io_done(parent));
630 	ftl_io_complete(parent);
631 	CU_ASSERT_EQUAL(parent_status, -1);
632 
633 	for (i = 0; i < MAX_CHILDREN; ++i) {
634 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
635 			struct ftl_io *io = gchild[i * MAX_GRAND_CHILDREN + j];
636 
637 			CU_ASSERT_FALSE(ftl_io_done(io));
638 			ftl_io_dec_req(io);
639 			CU_ASSERT_TRUE(ftl_io_done(io));
640 			ftl_io_complete(io);
641 			CU_ASSERT_EQUAL(gchild_status[i * MAX_GRAND_CHILDREN + j], 0);
642 		}
643 
644 		CU_ASSERT_EQUAL(child_status[i], 0);
645 	}
646 
647 	CU_ASSERT_EQUAL(parent_status, 0);
648 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
649 
650 	free_device(dev);
651 }
652 
653 static void
654 test_io_channel_create(void)
655 {
656 	struct spdk_ftl_dev *dev;
657 	struct spdk_io_channel *ioch, **ioch_array;
658 	struct ftl_io_channel *ftl_ioch;
659 	uint32_t ioch_idx;
660 
661 	dev = setup_device(g_default_conf.max_io_channels + 1, 16);
662 
663 	ioch = spdk_get_io_channel(dev);
664 	CU_ASSERT(ioch != NULL);
665 	CU_ASSERT_EQUAL(dev->num_io_channels, 1);
666 	spdk_put_io_channel(ioch);
667 	poll_threads();
668 	CU_ASSERT_EQUAL(dev->num_io_channels, 0);
669 
670 	ioch_array = calloc(dev->conf.max_io_channels, sizeof(*ioch_array));
671 	SPDK_CU_ASSERT_FATAL(ioch != NULL);
672 
673 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ++ioch_idx) {
674 		set_thread(ioch_idx);
675 		ioch = ioch_array[ioch_idx] = spdk_get_io_channel(dev);
676 		SPDK_CU_ASSERT_FATAL(ioch != NULL);
677 		poll_threads();
678 
679 		ftl_ioch = ftl_io_channel_get_ctx(ioch);
680 		CU_ASSERT_EQUAL(ftl_ioch->index, ioch_idx);
681 	}
682 
683 	CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels);
684 	set_thread(dev->conf.max_io_channels);
685 	ioch = spdk_get_io_channel(dev);
686 	CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels);
687 	CU_ASSERT_EQUAL(ioch, NULL);
688 
689 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx += 2) {
690 		set_thread(ioch_idx);
691 		spdk_put_io_channel(ioch_array[ioch_idx]);
692 		ioch_array[ioch_idx] = NULL;
693 		poll_threads();
694 	}
695 
696 	poll_threads();
697 	CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels / 2);
698 
699 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx++) {
700 		set_thread(ioch_idx);
701 
702 		if (ioch_array[ioch_idx] == NULL) {
703 			ioch = ioch_array[ioch_idx] = spdk_get_io_channel(dev);
704 			SPDK_CU_ASSERT_FATAL(ioch != NULL);
705 			poll_threads();
706 
707 			ftl_ioch = ftl_io_channel_get_ctx(ioch);
708 			CU_ASSERT_EQUAL(ftl_ioch->index, ioch_idx);
709 		}
710 	}
711 
712 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx++) {
713 		set_thread(ioch_idx);
714 		spdk_put_io_channel(ioch_array[ioch_idx]);
715 	}
716 
717 	poll_threads();
718 	CU_ASSERT_EQUAL(dev->num_io_channels, 0);
719 
720 	free(ioch_array);
721 	free_device(dev);
722 }
723 
724 static void
725 test_acquire_entry(void)
726 {
727 	struct spdk_ftl_dev *dev;
728 	struct spdk_io_channel *ioch, **ioch_array;
729 	struct ftl_io_channel *ftl_ioch;
730 	struct ftl_wbuf_entry *entry, **entries;
731 	uint32_t num_entries, num_io_channels = 2;
732 	uint32_t ioch_idx, entry_idx, tmp_idx;
733 
734 	dev = setup_device(num_io_channels, 16);
735 
736 	num_entries = dev->conf.write_buffer_size / FTL_BLOCK_SIZE;
737 	entries = calloc(num_entries * num_io_channels, sizeof(*entries));
738 	SPDK_CU_ASSERT_FATAL(entries != NULL);
739 	ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
740 	SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
741 
742 	/* Acquire whole buffer of internal entries */
743 	entry_idx = 0;
744 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
745 		set_thread(ioch_idx);
746 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
747 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
748 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
749 		poll_threads();
750 
751 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
752 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
753 			CU_ASSERT(entries[entry_idx - 1] != NULL);
754 		}
755 
756 		entry = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
757 		CU_ASSERT(entry == NULL);
758 	}
759 
760 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
761 		set_thread(ioch_idx);
762 
763 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
764 			ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
765 			entries[ioch_idx * num_entries + tmp_idx] = NULL;
766 		}
767 
768 		spdk_put_io_channel(ioch_array[ioch_idx]);
769 	}
770 	poll_threads();
771 
772 	/* Do the same for user entries */
773 	entry_idx = 0;
774 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
775 		set_thread(ioch_idx);
776 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
777 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
778 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
779 		poll_threads();
780 
781 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
782 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
783 			CU_ASSERT(entries[entry_idx - 1] != NULL);
784 		}
785 
786 		entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
787 		CU_ASSERT(entry == NULL);
788 	}
789 
790 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
791 		set_thread(ioch_idx);
792 
793 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
794 			ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
795 			entries[ioch_idx * num_entries + tmp_idx] = NULL;
796 		}
797 
798 		spdk_put_io_channel(ioch_array[ioch_idx]);
799 	}
800 	poll_threads();
801 
802 	/* Verify limits */
803 	entry_idx = 0;
804 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
805 		set_thread(ioch_idx);
806 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
807 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
808 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
809 		poll_threads();
810 
811 		ftl_ioch->qdepth_limit = num_entries / 2;
812 		for (tmp_idx = 0; tmp_idx < num_entries / 2; ++tmp_idx) {
813 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
814 			CU_ASSERT(entries[entry_idx - 1] != NULL);
815 		}
816 
817 		entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
818 		CU_ASSERT(entry == NULL);
819 
820 		for (; tmp_idx < num_entries; ++tmp_idx) {
821 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
822 			CU_ASSERT(entries[entry_idx - 1] != NULL);
823 		}
824 	}
825 
826 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
827 		set_thread(ioch_idx);
828 
829 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
830 			ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
831 			entries[ioch_idx * num_entries + tmp_idx] = NULL;
832 		}
833 
834 		spdk_put_io_channel(ioch_array[ioch_idx]);
835 	}
836 	poll_threads();
837 
838 	/* Verify acquire/release */
839 	set_thread(0);
840 	ioch = spdk_get_io_channel(dev);
841 	SPDK_CU_ASSERT_FATAL(ioch != NULL);
842 	ftl_ioch = ftl_io_channel_get_ctx(ioch);
843 	poll_threads();
844 
845 	for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
846 		entries[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
847 		CU_ASSERT(entries[entry_idx] != NULL);
848 	}
849 
850 	entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
851 	CU_ASSERT(entry == NULL);
852 
853 	for (entry_idx = 0; entry_idx < num_entries / 2; ++entry_idx) {
854 		ftl_release_wbuf_entry(entries[entry_idx]);
855 		entries[entry_idx] = NULL;
856 	}
857 
858 	for (; entry_idx < num_entries; ++entry_idx) {
859 		entries[entry_idx - num_entries / 2] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
860 		CU_ASSERT(entries[entry_idx - num_entries / 2] != NULL);
861 	}
862 
863 	for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
864 		ftl_release_wbuf_entry(entries[entry_idx]);
865 		entries[entry_idx] = NULL;
866 	}
867 
868 	spdk_put_io_channel(ioch);
869 	poll_threads();
870 
871 	free(ioch_array);
872 	free(entries);
873 	free_device(dev);
874 }
875 
876 static void
877 test_submit_batch(void)
878 {
879 	struct spdk_ftl_dev *dev;
880 	struct spdk_io_channel **_ioch_array;
881 	struct ftl_io_channel **ioch_array;
882 	struct ftl_wbuf_entry *entry;
883 	struct ftl_batch *batch, *batch2;
884 	uint32_t num_io_channels = 16;
885 	uint32_t ioch_idx, tmp_idx, entry_idx;
886 	uint64_t ioch_bitmap;
887 	size_t num_entries;
888 
889 	dev = setup_device(num_io_channels, num_io_channels);
890 
891 	_ioch_array = calloc(num_io_channels, sizeof(*_ioch_array));
892 	SPDK_CU_ASSERT_FATAL(_ioch_array != NULL);
893 	ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
894 	SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
895 
896 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
897 		set_thread(ioch_idx);
898 		_ioch_array[ioch_idx] = spdk_get_io_channel(dev);
899 		SPDK_CU_ASSERT_FATAL(_ioch_array[ioch_idx] != NULL);
900 		ioch_array[ioch_idx] = ftl_io_channel_get_ctx(_ioch_array[ioch_idx]);
901 		poll_threads();
902 	}
903 
904 	/* Make sure the IO channels are not starved and entries are popped in RR fashion */
905 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
906 		set_thread(ioch_idx);
907 
908 		for (entry_idx = 0; entry_idx < dev->xfer_size; ++entry_idx) {
909 			entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
910 			SPDK_CU_ASSERT_FATAL(entry != NULL);
911 
912 			num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
913 							(void **)&entry, 1, NULL);
914 			CU_ASSERT(num_entries == 1);
915 		}
916 	}
917 
918 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
919 		for (tmp_idx = 0; tmp_idx < ioch_idx; ++tmp_idx) {
920 			set_thread(tmp_idx);
921 
922 			while (spdk_ring_count(ioch_array[tmp_idx]->submit_queue) < dev->xfer_size) {
923 				entry = ftl_acquire_wbuf_entry(ioch_array[tmp_idx], 0);
924 				SPDK_CU_ASSERT_FATAL(entry != NULL);
925 
926 				num_entries = spdk_ring_enqueue(ioch_array[tmp_idx]->submit_queue,
927 								(void **)&entry, 1, NULL);
928 				CU_ASSERT(num_entries == 1);
929 			}
930 		}
931 
932 		set_thread(ioch_idx);
933 
934 		batch = ftl_get_next_batch(dev);
935 		SPDK_CU_ASSERT_FATAL(batch != NULL);
936 
937 		TAILQ_FOREACH(entry, &batch->entries, tailq) {
938 			CU_ASSERT(entry->ioch == ioch_array[ioch_idx]);
939 		}
940 
941 		ftl_release_batch(dev, batch);
942 
943 		CU_ASSERT(spdk_ring_count(ioch_array[ioch_idx]->free_queue) ==
944 			  ioch_array[ioch_idx]->num_entries);
945 	}
946 
947 	for (ioch_idx = 0; ioch_idx < num_io_channels - 1; ++ioch_idx) {
948 		batch = ftl_get_next_batch(dev);
949 		SPDK_CU_ASSERT_FATAL(batch != NULL);
950 		ftl_release_batch(dev, batch);
951 	}
952 
953 	/* Make sure the batch can be built from entries from any IO channel */
954 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
955 		set_thread(ioch_idx);
956 		entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
957 		SPDK_CU_ASSERT_FATAL(entry != NULL);
958 
959 		num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
960 						(void **)&entry, 1, NULL);
961 		CU_ASSERT(num_entries == 1);
962 	}
963 
964 	batch = ftl_get_next_batch(dev);
965 	SPDK_CU_ASSERT_FATAL(batch != NULL);
966 
967 	ioch_bitmap = 0;
968 	TAILQ_FOREACH(entry, &batch->entries, tailq) {
969 		ioch_bitmap |= 1 << entry->ioch->index;
970 	}
971 
972 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
973 		CU_ASSERT((ioch_bitmap & (1 << ioch_array[ioch_idx]->index)) != 0);
974 	}
975 	ftl_release_batch(dev, batch);
976 
977 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
978 		CU_ASSERT(spdk_ring_count(ioch_array[ioch_idx]->free_queue) ==
979 			  ioch_array[ioch_idx]->num_entries);
980 	}
981 
982 	/* Make sure pending batches are prioritized */
983 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
984 		set_thread(ioch_idx);
985 
986 		while (spdk_ring_count(ioch_array[ioch_idx]->submit_queue) < dev->xfer_size) {
987 			entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
988 			SPDK_CU_ASSERT_FATAL(entry != NULL);
989 			num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
990 							(void **)&entry, 1, NULL);
991 			CU_ASSERT(num_entries == 1);
992 		}
993 	}
994 
995 	batch = ftl_get_next_batch(dev);
996 	SPDK_CU_ASSERT_FATAL(batch != NULL);
997 
998 	TAILQ_INSERT_TAIL(&dev->pending_batches, batch, tailq);
999 	batch2 = ftl_get_next_batch(dev);
1000 	SPDK_CU_ASSERT_FATAL(batch2 != NULL);
1001 
1002 	CU_ASSERT(TAILQ_EMPTY(&dev->pending_batches));
1003 	CU_ASSERT(batch == batch2);
1004 
1005 	batch = ftl_get_next_batch(dev);
1006 	SPDK_CU_ASSERT_FATAL(batch != NULL);
1007 
1008 	ftl_release_batch(dev, batch);
1009 	ftl_release_batch(dev, batch2);
1010 
1011 	for (ioch_idx = 2; ioch_idx < num_io_channels; ++ioch_idx) {
1012 		batch = ftl_get_next_batch(dev);
1013 		SPDK_CU_ASSERT_FATAL(batch != NULL);
1014 		ftl_release_batch(dev, batch);
1015 	}
1016 
1017 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
1018 		set_thread(ioch_idx);
1019 		spdk_put_io_channel(_ioch_array[ioch_idx]);
1020 	}
1021 	poll_threads();
1022 
1023 	free(_ioch_array);
1024 	free(ioch_array);
1025 	free_device(dev);
1026 }
1027 
1028 static void
1029 test_entry_address(void)
1030 {
1031 	struct spdk_ftl_dev *dev;
1032 	struct spdk_io_channel **ioch_array;
1033 	struct ftl_io_channel *ftl_ioch;
1034 	struct ftl_wbuf_entry **entry_array;
1035 	struct ftl_addr addr;
1036 	uint32_t num_entries, num_io_channels = 7;
1037 	uint32_t ioch_idx, entry_idx;
1038 
1039 	dev = setup_device(num_io_channels, num_io_channels);
1040 	ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
1041 	SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
1042 
1043 	num_entries = dev->conf.write_buffer_size / FTL_BLOCK_SIZE;
1044 	entry_array = calloc(num_entries, sizeof(*entry_array));
1045 	SPDK_CU_ASSERT_FATAL(entry_array != NULL);
1046 
1047 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
1048 		set_thread(ioch_idx);
1049 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
1050 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
1051 		poll_threads();
1052 	}
1053 
1054 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
1055 		set_thread(ioch_idx);
1056 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
1057 
1058 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
1059 			entry_array[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
1060 			SPDK_CU_ASSERT_FATAL(entry_array[entry_idx] != NULL);
1061 
1062 			addr = ftl_get_addr_from_entry(entry_array[entry_idx]);
1063 			CU_ASSERT(addr.cached == 1);
1064 			CU_ASSERT((addr.cache_offset >> dev->ioch_shift) == entry_idx);
1065 			CU_ASSERT((addr.cache_offset & ((1 << dev->ioch_shift) - 1)) == ioch_idx);
1066 			CU_ASSERT(entry_array[entry_idx] == ftl_get_entry_from_addr(dev, addr));
1067 		}
1068 
1069 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
1070 			ftl_release_wbuf_entry(entry_array[entry_idx]);
1071 		}
1072 	}
1073 
1074 	for (ioch_idx = 0; ioch_idx < num_io_channels; ioch_idx += 2) {
1075 		set_thread(ioch_idx);
1076 		spdk_put_io_channel(ioch_array[ioch_idx]);
1077 		ioch_array[ioch_idx] = NULL;
1078 	}
1079 	poll_threads();
1080 
1081 	for (ioch_idx = 1; ioch_idx < num_io_channels; ioch_idx += 2) {
1082 		set_thread(ioch_idx);
1083 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
1084 
1085 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
1086 			entry_array[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
1087 			SPDK_CU_ASSERT_FATAL(entry_array[entry_idx] != NULL);
1088 
1089 			addr = ftl_get_addr_from_entry(entry_array[entry_idx]);
1090 			CU_ASSERT(addr.cached == 1);
1091 			CU_ASSERT(entry_array[entry_idx] == ftl_get_entry_from_addr(dev, addr));
1092 		}
1093 
1094 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
1095 			ftl_release_wbuf_entry(entry_array[entry_idx]);
1096 		}
1097 	}
1098 
1099 	for (ioch_idx = 1; ioch_idx < num_io_channels; ioch_idx += 2) {
1100 		set_thread(ioch_idx);
1101 		spdk_put_io_channel(ioch_array[ioch_idx]);
1102 	}
1103 	poll_threads();
1104 
1105 	free(entry_array);
1106 	free(ioch_array);
1107 	free_device(dev);
1108 }
1109 
1110 int
1111 main(int argc, char **argv)
1112 {
1113 	CU_pSuite suite;
1114 	unsigned int num_failures;
1115 
1116 	CU_set_error_action(CUEA_ABORT);
1117 	CU_initialize_registry();
1118 
1119 	suite = CU_add_suite("ftl_io_suite", NULL, NULL);
1120 
1121 
1122 	CU_ADD_TEST(suite, test_completion);
1123 	CU_ADD_TEST(suite, test_alloc_free);
1124 	CU_ADD_TEST(suite, test_child_requests);
1125 	CU_ADD_TEST(suite, test_child_status);
1126 	CU_ADD_TEST(suite, test_multi_generation);
1127 	CU_ADD_TEST(suite, test_io_channel_create);
1128 	CU_ADD_TEST(suite, test_acquire_entry);
1129 	CU_ADD_TEST(suite, test_submit_batch);
1130 	CU_ADD_TEST(suite, test_entry_address);
1131 
1132 	CU_basic_set_mode(CU_BRM_VERBOSE);
1133 	CU_basic_run_tests();
1134 	num_failures = CU_get_number_of_failures();
1135 	CU_cleanup_registry();
1136 
1137 	return num_failures;
1138 }
1139