xref: /spdk/test/unit/lib/ftl/ftl_io.c/ftl_io_ut.c (revision 8bb0ded3e55c182cea67af1f6790f8de5f38c05f)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 #include "common/lib/ut_multithread.c"
38 
39 #include "ftl/ftl_io.c"
40 #include "ftl/ftl_init.c"
41 #include "ftl/ftl_core.c"
42 #include "ftl/ftl_band.c"
43 
44 DEFINE_STUB(spdk_bdev_io_get_append_location, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
45 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
46 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
47 DEFINE_STUB(spdk_bdev_get_optimal_open_zones, uint32_t, (const struct spdk_bdev *b), 1);
48 DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL);
49 DEFINE_STUB(spdk_bdev_is_md_separate, bool, (const struct spdk_bdev *bdev), false);
50 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false);
51 DEFINE_STUB(spdk_bdev_zone_appendv, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
52 		struct iovec *iov, int iovcnt, uint64_t zone_id, uint64_t num_blocks,
53 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
54 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *b), 1024);
55 DEFINE_STUB(spdk_bdev_zone_management, int, (struct spdk_bdev_desc *desc,
56 		struct spdk_io_channel *ch, uint64_t zone_id, enum spdk_bdev_zone_action action,
57 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
58 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
59 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
60 DEFINE_STUB(spdk_bdev_get_dif_type, enum spdk_dif_type,
61 	    (const struct spdk_bdev *bdev), 0);
62 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
63 DEFINE_STUB(spdk_bdev_get_write_unit_size, uint32_t,
64 	    (const struct spdk_bdev *bdev), 0);
65 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
66 		enum spdk_bdev_io_type io_type), true);
67 DEFINE_STUB(spdk_bdev_module_claim_bdev, int,
68 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
69 	     struct spdk_bdev_module *module), 0);
70 DEFINE_STUB(spdk_bdev_open_ext, int,
71 	    (const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
72 	     void *event_ctx, struct spdk_bdev_desc **desc), 0);
73 DEFINE_STUB(spdk_bdev_read_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
74 		void *buf, uint64_t offset_blocks, uint64_t num_blocks,
75 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
76 DEFINE_STUB(spdk_bdev_write_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
77 		void *buf, uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
78 		void *cb_arg), 0);
79 DEFINE_STUB(spdk_bdev_write_blocks_with_md, int, (struct spdk_bdev_desc *desc,
80 		struct spdk_io_channel *ch, void *buf, void *md, uint64_t offset_blocks,
81 		uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
82 DEFINE_STUB(spdk_bdev_writev_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
83 		struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
84 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
85 DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024);
86 DEFINE_STUB(spdk_bdev_get_md_size, uint32_t, (const struct spdk_bdev *bdev), 0);
87 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 4096);
88 DEFINE_STUB(spdk_bdev_get_media_events, size_t,
89 	    (struct spdk_bdev_desc *bdev_desc, struct spdk_bdev_media_event *events,
90 	     size_t max_events), 0);
91 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
92 DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
93 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
94 	     uint64_t offset_blocks, uint64_t num_blocks,
95 	     spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
96 DEFINE_STUB(spdk_bdev_get_zone_info, int,
97 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
98 	     uint64_t zone_id, size_t num_zones, struct spdk_bdev_zone_info *info,
99 	     spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
100 DEFINE_STUB(spdk_mempool_create_ctor, struct spdk_mempool *,
101 	    (const char *name, size_t count, size_t ele_size, size_t cache_size,
102 	     int socket_id, spdk_mempool_obj_cb_t *obj_init, void *obj_init_arg), NULL);
103 DEFINE_STUB(spdk_mempool_obj_iter, uint32_t,
104 	    (struct spdk_mempool *mp, spdk_mempool_obj_cb_t obj_cb, void *obj_cb_arg), 0);
105 DEFINE_STUB(ftl_reloc, bool, (struct ftl_reloc *reloc), false);
106 DEFINE_STUB_V(ftl_reloc_add, (struct ftl_reloc *reloc, struct ftl_band *band, size_t offset,
107 			      size_t num_blocks, int prio, bool defrag));
108 DEFINE_STUB_V(ftl_reloc_free, (struct ftl_reloc *reloc));
109 DEFINE_STUB_V(ftl_reloc_halt, (struct ftl_reloc *reloc));
110 DEFINE_STUB(ftl_reloc_init, struct ftl_reloc *, (struct spdk_ftl_dev *dev), NULL);
111 DEFINE_STUB(ftl_reloc_is_defrag_active, bool, (const struct ftl_reloc *reloc), false);
112 DEFINE_STUB(ftl_reloc_is_halted, bool, (const struct ftl_reloc *reloc), false);
113 DEFINE_STUB_V(ftl_reloc_resume, (struct ftl_reloc *reloc));
114 DEFINE_STUB(ftl_restore_device, int,
115 	    (struct ftl_restore *restore, ftl_restore_fn cb, void *cb_arg), 0);
116 DEFINE_STUB(ftl_restore_md, int,
117 	    (struct spdk_ftl_dev *dev, ftl_restore_fn cb, void *cb_arg), 0);
118 DEFINE_STUB_V(ftl_restore_nv_cache,
119 	      (struct ftl_restore *restore, ftl_restore_fn cb, void *cb_arg));
120 
121 #if defined(FTL_META_DEBUG)
122 DEFINE_STUB(ftl_band_validate_md, bool, (struct ftl_band *band), true);
123 #endif
124 #if defined(DEBUG)
125 DEFINE_STUB_V(ftl_trace_defrag_band, (struct spdk_ftl_dev *dev, const struct ftl_band *band));
126 DEFINE_STUB_V(ftl_trace_submission, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
127 				     struct ftl_addr addr, size_t addr_cnt));
128 DEFINE_STUB_V(ftl_trace_lba_io_init, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
129 DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, int limit, size_t num_free));
130 DEFINE_STUB(ftl_trace_alloc_id, uint64_t, (struct spdk_ftl_dev *dev), 0);
131 DEFINE_STUB_V(ftl_trace_completion, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
132 				     enum ftl_trace_completion type));
133 DEFINE_STUB_V(ftl_trace_wbuf_fill, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
134 DEFINE_STUB_V(ftl_trace_wbuf_pop, (struct spdk_ftl_dev *dev, const struct ftl_wbuf_entry *entry));
135 DEFINE_STUB_V(ftl_trace_write_band, (struct spdk_ftl_dev *dev, const struct ftl_band *band));
136 #endif
137 #if defined(FTL_META_DEBUG)
138 DEFINE_STUB_V(ftl_dev_dump_bands, (struct spdk_ftl_dev *dev));
139 #endif
140 #if defined(FTL_DUMP_STATS)
141 DEFINE_STUB_V(ftl_dev_dump_stats, (const struct spdk_ftl_dev *dev));
142 #endif
143 
144 #ifdef SPDK_CONFIG_PMDK
145 DEFINE_STUB(pmem_map_file, void *,
146 	    (const char *path, size_t len, int flags, mode_t mode,
147 	     size_t *mapped_lenp, int *is_pmemp), NULL);
148 DEFINE_STUB(pmem_unmap, int, (void *addr, size_t len), 0);
149 DEFINE_STUB(pmem_memset_persist, void *, (void *pmemdest, int c, size_t len), NULL);
150 #endif
151 
152 struct spdk_io_channel *
153 spdk_bdev_get_io_channel(struct spdk_bdev_desc *bdev_desc)
154 {
155 	return spdk_get_io_channel(bdev_desc);
156 }
157 
158 static int
159 channel_create_cb(void *io_device, void *ctx)
160 {
161 	return 0;
162 }
163 
164 static void
165 channel_destroy_cb(void *io_device, void *ctx)
166 {}
167 
168 static struct spdk_ftl_dev *
169 setup_device(uint32_t num_threads, uint32_t xfer_size)
170 {
171 	struct spdk_ftl_dev *dev;
172 	struct _ftl_io_channel *_ioch;
173 	struct ftl_io_channel *ioch;
174 	int rc;
175 
176 	allocate_threads(num_threads);
177 	set_thread(0);
178 
179 	dev = calloc(1, sizeof(*dev));
180 	SPDK_CU_ASSERT_FATAL(dev != NULL);
181 
182 	dev->core_thread = spdk_get_thread();
183 	dev->ioch = calloc(1, sizeof(*_ioch) + sizeof(struct spdk_io_channel));
184 	SPDK_CU_ASSERT_FATAL(dev->ioch != NULL);
185 
186 	_ioch = (struct _ftl_io_channel *)(dev->ioch + 1);
187 	ioch = _ioch->ioch = calloc(1, sizeof(*ioch));
188 	SPDK_CU_ASSERT_FATAL(ioch != NULL);
189 
190 	ioch->elem_size = sizeof(struct ftl_md_io);
191 	ioch->io_pool = spdk_mempool_create("io-pool", 4096, ioch->elem_size, 0, 0);
192 
193 	SPDK_CU_ASSERT_FATAL(ioch->io_pool != NULL);
194 
195 	dev->conf = g_default_conf;
196 	dev->xfer_size = xfer_size;
197 	dev->base_bdev_desc = (struct spdk_bdev_desc *)0xdeadbeef;
198 	spdk_io_device_register(dev->base_bdev_desc, channel_create_cb, channel_destroy_cb, 0, NULL);
199 
200 	rc = ftl_dev_init_io_channel(dev);
201 	CU_ASSERT_EQUAL(rc, 0);
202 
203 	return dev;
204 }
205 
206 static void
207 free_device(struct spdk_ftl_dev *dev)
208 {
209 	struct ftl_io_channel *ioch;
210 
211 	ioch = ftl_io_channel_get_ctx(dev->ioch);
212 	spdk_mempool_free(ioch->io_pool);
213 	free(ioch);
214 
215 	spdk_io_device_unregister(dev, NULL);
216 	spdk_io_device_unregister(dev->base_bdev_desc, NULL);
217 	free_threads();
218 
219 	free(dev->ioch_array);
220 	free(dev->iov_buf);
221 	free(dev->ioch);
222 	free(dev);
223 }
224 
225 static void
226 setup_io(struct ftl_io *io, struct spdk_ftl_dev *dev, ftl_io_fn cb, void *ctx)
227 {
228 	io->dev = dev;
229 	io->cb_fn = cb;
230 	io->cb_ctx = ctx;
231 }
232 
233 static struct ftl_io *
234 alloc_io(struct spdk_ftl_dev *dev, ftl_io_fn cb, void *ctx)
235 {
236 	struct ftl_io *io;
237 
238 	io = ftl_io_alloc(dev->ioch);
239 	SPDK_CU_ASSERT_FATAL(io != NULL);
240 	setup_io(io, dev, cb, ctx);
241 
242 	return io;
243 }
244 
245 static void
246 io_complete_cb(struct ftl_io *io, void *ctx, int status)
247 {
248 	*(int *)ctx = status;
249 }
250 
251 static void
252 test_completion(void)
253 {
254 	struct spdk_ftl_dev *dev;
255 	struct ftl_io_channel *ioch;
256 	struct ftl_io *io;
257 	int req, status = 0;
258 	size_t pool_size;
259 
260 	dev = setup_device(1, 16);
261 	ioch = ftl_io_channel_get_ctx(dev->ioch);
262 	pool_size = spdk_mempool_count(ioch->io_pool);
263 
264 	io = alloc_io(dev, io_complete_cb, &status);
265 	io->status = -EIO;
266 
267 #define NUM_REQUESTS 16
268 	for (req = 0; req < NUM_REQUESTS; ++req) {
269 		ftl_io_inc_req(io);
270 		CU_ASSERT_FALSE(ftl_io_done(io));
271 	}
272 
273 	CU_ASSERT_EQUAL(io->req_cnt, NUM_REQUESTS);
274 
275 	for (req = 0; req < (NUM_REQUESTS - 1); ++req) {
276 		ftl_io_dec_req(io);
277 		CU_ASSERT_FALSE(ftl_io_done(io));
278 	}
279 
280 	CU_ASSERT_EQUAL(io->req_cnt, 1);
281 
282 	ftl_io_dec_req(io);
283 	CU_ASSERT_TRUE(ftl_io_done(io));
284 
285 	ftl_io_complete(io);
286 	CU_ASSERT_EQUAL(status, -EIO);
287 
288 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
289 
290 	free_device(dev);
291 }
292 
293 static void
294 test_alloc_free(void)
295 {
296 	struct spdk_ftl_dev *dev;
297 	struct ftl_io_channel *ioch;
298 	struct ftl_io *parent, *child;
299 	int parent_status = -1;
300 	size_t pool_size;
301 
302 	dev = setup_device(1, 16);
303 	ioch = ftl_io_channel_get_ctx(dev->ioch);
304 	pool_size = spdk_mempool_count(ioch->io_pool);
305 
306 	parent = alloc_io(dev, io_complete_cb, &parent_status);
307 	SPDK_CU_ASSERT_FATAL(parent != NULL);
308 	child = ftl_io_alloc_child(parent);
309 	SPDK_CU_ASSERT_FATAL(child != NULL);
310 
311 	ftl_io_free(child);
312 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - 1);
313 
314 	child = ftl_io_alloc_child(parent);
315 	SPDK_CU_ASSERT_FATAL(child != NULL);
316 	ftl_io_complete(child);
317 	CU_ASSERT_EQUAL(parent_status, -1);
318 	ftl_io_complete(parent);
319 	CU_ASSERT_EQUAL(parent_status, 0);
320 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
321 
322 	parent_status = -1;
323 	parent = alloc_io(dev, io_complete_cb, &parent_status);
324 	SPDK_CU_ASSERT_FATAL(parent != NULL);
325 	child = ftl_io_alloc_child(parent);
326 	SPDK_CU_ASSERT_FATAL(child != NULL);
327 
328 	ftl_io_free(child);
329 	CU_ASSERT_EQUAL(parent_status, -1);
330 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - 1);
331 	ftl_io_complete(parent);
332 	CU_ASSERT_EQUAL(parent_status, 0);
333 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
334 
335 	free_device(dev);
336 }
337 
338 static void
339 test_child_requests(void)
340 {
341 	struct spdk_ftl_dev *dev;
342 	struct ftl_io_channel *ioch;
343 #define MAX_CHILDREN 16
344 	struct ftl_io *parent, *child[MAX_CHILDREN];
345 	int status[MAX_CHILDREN + 1], i;
346 	size_t pool_size;
347 
348 	dev = setup_device(1, 16);
349 	ioch = ftl_io_channel_get_ctx(dev->ioch);
350 	pool_size = spdk_mempool_count(ioch->io_pool);
351 
352 	/* Verify correct behaviour when children finish first */
353 	parent = alloc_io(dev, io_complete_cb, &status[0]);
354 	parent->status = 0;
355 
356 	ftl_io_inc_req(parent);
357 	status[0] = -1;
358 
359 	for (i = 0; i < MAX_CHILDREN; ++i) {
360 		status[i + 1] = -1;
361 
362 		child[i] = ftl_io_alloc_child(parent);
363 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
364 		setup_io(child[i], dev, io_complete_cb, &status[i + 1]);
365 		child[i]->status = 0;
366 
367 		ftl_io_inc_req(child[i]);
368 	}
369 
370 	CU_ASSERT_FALSE(ftl_io_done(parent));
371 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
372 
373 	for (i = 0; i < MAX_CHILDREN; ++i) {
374 		CU_ASSERT_FALSE(ftl_io_done(child[i]));
375 		ftl_io_dec_req(child[i]);
376 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
377 		CU_ASSERT_FALSE(ftl_io_done(parent));
378 
379 		ftl_io_complete(child[i]);
380 		CU_ASSERT_FALSE(ftl_io_done(parent));
381 		CU_ASSERT_EQUAL(status[i + 1], 0);
382 	}
383 
384 	CU_ASSERT_EQUAL(status[0], -1);
385 
386 	ftl_io_dec_req(parent);
387 	CU_ASSERT_EQUAL(parent->req_cnt, 0);
388 	CU_ASSERT_TRUE(ftl_io_done(parent));
389 
390 	ftl_io_complete(parent);
391 	CU_ASSERT_EQUAL(status[0], 0);
392 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
393 
394 
395 	/* Verify correct behaviour when parent finishes first */
396 	parent = alloc_io(dev, io_complete_cb, &status[0]);
397 	parent->status = 0;
398 
399 	ftl_io_inc_req(parent);
400 	status[0] = -1;
401 
402 	for (i = 0; i < MAX_CHILDREN; ++i) {
403 		status[i + 1] = -1;
404 
405 		child[i] = ftl_io_alloc_child(parent);
406 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
407 		setup_io(child[i], dev, io_complete_cb, &status[i + 1]);
408 		child[i]->status = 0;
409 
410 		ftl_io_inc_req(child[i]);
411 	}
412 
413 	CU_ASSERT_FALSE(ftl_io_done(parent));
414 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
415 
416 	ftl_io_dec_req(parent);
417 	CU_ASSERT_TRUE(ftl_io_done(parent));
418 	CU_ASSERT_EQUAL(parent->req_cnt, 0);
419 
420 	ftl_io_complete(parent);
421 	CU_ASSERT_EQUAL(status[0], -1);
422 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size - MAX_CHILDREN - 1);
423 
424 	for (i = 0; i < MAX_CHILDREN; ++i) {
425 		CU_ASSERT_FALSE(ftl_io_done(child[i]));
426 		ftl_io_dec_req(child[i]);
427 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
428 
429 		ftl_io_complete(child[i]);
430 		CU_ASSERT_EQUAL(status[i + 1], 0);
431 	}
432 
433 	CU_ASSERT_EQUAL(status[0], 0);
434 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
435 
436 	free_device(dev);
437 }
438 
439 static void
440 test_child_status(void)
441 {
442 	struct spdk_ftl_dev *dev;
443 	struct ftl_io_channel *ioch;
444 	struct ftl_io *parent, *child[2];
445 	int parent_status, child_status[2];
446 	size_t pool_size, i;
447 
448 	dev = setup_device(1, 16);
449 	ioch = ftl_io_channel_get_ctx(dev->ioch);
450 	pool_size = spdk_mempool_count(ioch->io_pool);
451 
452 	/* Verify the first error is returned by the parent */
453 	parent = alloc_io(dev, io_complete_cb, &parent_status);
454 	parent->status = 0;
455 
456 	for (i = 0; i < 2; ++i) {
457 		child[i] = ftl_io_alloc_child(parent);
458 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
459 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
460 	}
461 
462 	child[0]->status = -3;
463 	child[1]->status = -4;
464 
465 	ftl_io_complete(child[1]);
466 	ftl_io_complete(child[0]);
467 	ftl_io_complete(parent);
468 
469 	CU_ASSERT_EQUAL(child_status[0], -3);
470 	CU_ASSERT_EQUAL(child_status[1], -4);
471 	CU_ASSERT_EQUAL(parent_status, -4);
472 
473 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
474 
475 	/* Verify parent's status is kept if children finish successfully */
476 	parent = alloc_io(dev, io_complete_cb, &parent_status);
477 	parent->status = -1;
478 
479 	for (i = 0; i < 2; ++i) {
480 		child[i] = ftl_io_alloc_child(parent);
481 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
482 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
483 	}
484 
485 	child[0]->status = 0;
486 	child[1]->status = 0;
487 
488 	ftl_io_complete(parent);
489 	ftl_io_complete(child[1]);
490 	ftl_io_complete(child[0]);
491 
492 	CU_ASSERT_EQUAL(child_status[0], 0);
493 	CU_ASSERT_EQUAL(child_status[1], 0);
494 	CU_ASSERT_EQUAL(parent_status, -1);
495 
496 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
497 
498 	/* Verify parent's status is kept if children fail too */
499 	parent = alloc_io(dev, io_complete_cb, &parent_status);
500 	parent->status = -1;
501 
502 	for (i = 0; i < 2; ++i) {
503 		child[i] = ftl_io_alloc_child(parent);
504 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
505 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
506 	}
507 
508 	child[0]->status = -3;
509 	child[1]->status = -4;
510 
511 	ftl_io_complete(parent);
512 	ftl_io_complete(child[1]);
513 	ftl_io_complete(child[0]);
514 
515 	CU_ASSERT_EQUAL(child_status[0], -3);
516 	CU_ASSERT_EQUAL(child_status[1], -4);
517 	CU_ASSERT_EQUAL(parent_status, -1);
518 
519 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
520 
521 	free_device(dev);
522 }
523 
524 static void
525 test_multi_generation(void)
526 {
527 	struct spdk_ftl_dev *dev;
528 	struct ftl_io_channel *ioch;
529 #define MAX_GRAND_CHILDREN	32
530 	struct ftl_io *parent, *child[MAX_CHILDREN], *gchild[MAX_CHILDREN * MAX_GRAND_CHILDREN];
531 	int parent_status, child_status[MAX_CHILDREN], gchild_status[MAX_CHILDREN * MAX_GRAND_CHILDREN];
532 	size_t pool_size;
533 	int i, j;
534 
535 	dev = setup_device(1, 16);
536 	ioch = ftl_io_channel_get_ctx(dev->ioch);
537 	pool_size = spdk_mempool_count(ioch->io_pool);
538 
539 	/* Verify correct behaviour when children finish first */
540 	parent = alloc_io(dev, io_complete_cb, &parent_status);
541 	parent->status = 0;
542 
543 	ftl_io_inc_req(parent);
544 	parent_status = -1;
545 
546 	for (i = 0; i < MAX_CHILDREN; ++i) {
547 		child_status[i] = -1;
548 
549 		child[i] = ftl_io_alloc_child(parent);
550 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
551 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
552 		child[i]->status = 0;
553 
554 
555 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
556 			struct ftl_io *io = ftl_io_alloc_child(child[i]);
557 			SPDK_CU_ASSERT_FATAL(io != NULL);
558 
559 			gchild[i * MAX_GRAND_CHILDREN + j] = io;
560 			gchild_status[i * MAX_GRAND_CHILDREN + j] = -1;
561 			setup_io(io, dev, io_complete_cb, &gchild_status[i * MAX_GRAND_CHILDREN + j]);
562 			io->status = 0;
563 
564 			ftl_io_inc_req(io);
565 		}
566 
567 		ftl_io_inc_req(child[i]);
568 	}
569 
570 	for (i = 0; i < MAX_CHILDREN; ++i) {
571 		CU_ASSERT_FALSE(ftl_io_done(child[i]));
572 		ftl_io_dec_req(child[i]);
573 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
574 
575 		ftl_io_complete(child[i]);
576 		CU_ASSERT_FALSE(ftl_io_done(parent));
577 		CU_ASSERT_EQUAL(child_status[i], -1);
578 
579 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
580 			struct ftl_io *io = gchild[i * MAX_GRAND_CHILDREN + j];
581 
582 			CU_ASSERT_FALSE(ftl_io_done(io));
583 			ftl_io_dec_req(io);
584 			CU_ASSERT_TRUE(ftl_io_done(io));
585 			ftl_io_complete(io);
586 			CU_ASSERT_EQUAL(gchild_status[i * MAX_GRAND_CHILDREN + j], 0);
587 		}
588 
589 		CU_ASSERT_EQUAL(child_status[i], 0);
590 	}
591 
592 	ftl_io_dec_req(parent);
593 	CU_ASSERT_TRUE(ftl_io_done(parent));
594 	ftl_io_complete(parent);
595 	CU_ASSERT_EQUAL(parent_status, 0);
596 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
597 
598 	/* Verify correct behaviour when parents finish first */
599 	parent = alloc_io(dev, io_complete_cb, &parent_status);
600 	parent->status = 0;
601 	parent_status = -1;
602 
603 	for (i = 0; i < MAX_CHILDREN; ++i) {
604 		child_status[i] = -1;
605 
606 		child[i] = ftl_io_alloc_child(parent);
607 		SPDK_CU_ASSERT_FATAL(child[i] != NULL);
608 		setup_io(child[i], dev, io_complete_cb, &child_status[i]);
609 		child[i]->status = 0;
610 
611 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
612 			struct ftl_io *io = ftl_io_alloc_child(child[i]);
613 			SPDK_CU_ASSERT_FATAL(io != NULL);
614 
615 			gchild[i * MAX_GRAND_CHILDREN + j] = io;
616 			gchild_status[i * MAX_GRAND_CHILDREN + j] = -1;
617 			setup_io(io, dev, io_complete_cb, &gchild_status[i * MAX_GRAND_CHILDREN + j]);
618 			io->status = 0;
619 
620 			ftl_io_inc_req(io);
621 		}
622 
623 		CU_ASSERT_TRUE(ftl_io_done(child[i]));
624 		ftl_io_complete(child[i]);
625 		CU_ASSERT_EQUAL(child_status[i], -1);
626 	}
627 
628 	CU_ASSERT_TRUE(ftl_io_done(parent));
629 	ftl_io_complete(parent);
630 	CU_ASSERT_EQUAL(parent_status, -1);
631 
632 	for (i = 0; i < MAX_CHILDREN; ++i) {
633 		for (j = 0; j < MAX_GRAND_CHILDREN; ++j) {
634 			struct ftl_io *io = gchild[i * MAX_GRAND_CHILDREN + j];
635 
636 			CU_ASSERT_FALSE(ftl_io_done(io));
637 			ftl_io_dec_req(io);
638 			CU_ASSERT_TRUE(ftl_io_done(io));
639 			ftl_io_complete(io);
640 			CU_ASSERT_EQUAL(gchild_status[i * MAX_GRAND_CHILDREN + j], 0);
641 		}
642 
643 		CU_ASSERT_EQUAL(child_status[i], 0);
644 	}
645 
646 	CU_ASSERT_EQUAL(parent_status, 0);
647 	CU_ASSERT_EQUAL(spdk_mempool_count(ioch->io_pool), pool_size);
648 
649 	free_device(dev);
650 }
651 
652 static void
653 test_io_channel_create(void)
654 {
655 	struct spdk_ftl_dev *dev;
656 	struct spdk_io_channel *ioch, **ioch_array;
657 	struct ftl_io_channel *ftl_ioch;
658 	uint32_t ioch_idx;
659 
660 	dev = setup_device(g_default_conf.max_io_channels + 1, 16);
661 
662 	ioch = spdk_get_io_channel(dev);
663 	CU_ASSERT(ioch != NULL);
664 	CU_ASSERT_EQUAL(dev->num_io_channels, 1);
665 	spdk_put_io_channel(ioch);
666 	poll_threads();
667 	CU_ASSERT_EQUAL(dev->num_io_channels, 0);
668 
669 	ioch_array = calloc(dev->conf.max_io_channels, sizeof(*ioch_array));
670 	SPDK_CU_ASSERT_FATAL(ioch != NULL);
671 
672 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ++ioch_idx) {
673 		set_thread(ioch_idx);
674 		ioch = ioch_array[ioch_idx] = spdk_get_io_channel(dev);
675 		SPDK_CU_ASSERT_FATAL(ioch != NULL);
676 		poll_threads();
677 
678 		ftl_ioch = ftl_io_channel_get_ctx(ioch);
679 		CU_ASSERT_EQUAL(ftl_ioch->index, ioch_idx);
680 	}
681 
682 	CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels);
683 	set_thread(dev->conf.max_io_channels);
684 	ioch = spdk_get_io_channel(dev);
685 	CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels);
686 	CU_ASSERT_EQUAL(ioch, NULL);
687 
688 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx += 2) {
689 		set_thread(ioch_idx);
690 		spdk_put_io_channel(ioch_array[ioch_idx]);
691 		ioch_array[ioch_idx] = NULL;
692 		poll_threads();
693 	}
694 
695 	poll_threads();
696 	CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels / 2);
697 
698 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx++) {
699 		set_thread(ioch_idx);
700 
701 		if (ioch_array[ioch_idx] == NULL) {
702 			ioch = ioch_array[ioch_idx] = spdk_get_io_channel(dev);
703 			SPDK_CU_ASSERT_FATAL(ioch != NULL);
704 			poll_threads();
705 
706 			ftl_ioch = ftl_io_channel_get_ctx(ioch);
707 			CU_ASSERT_EQUAL(ftl_ioch->index, ioch_idx);
708 		}
709 	}
710 
711 	for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx++) {
712 		set_thread(ioch_idx);
713 		spdk_put_io_channel(ioch_array[ioch_idx]);
714 	}
715 
716 	poll_threads();
717 	CU_ASSERT_EQUAL(dev->num_io_channels, 0);
718 
719 	free(ioch_array);
720 	free_device(dev);
721 }
722 
723 static void
724 test_acquire_entry(void)
725 {
726 	struct spdk_ftl_dev *dev;
727 	struct spdk_io_channel *ioch, **ioch_array;
728 	struct ftl_io_channel *ftl_ioch;
729 	struct ftl_wbuf_entry *entry, **entries;
730 	uint32_t num_entries, num_io_channels = 2;
731 	uint32_t ioch_idx, entry_idx, tmp_idx;
732 
733 	dev = setup_device(num_io_channels, 16);
734 
735 	num_entries = dev->conf.write_buffer_size / FTL_BLOCK_SIZE;
736 	entries = calloc(num_entries * num_io_channels, sizeof(*entries));
737 	SPDK_CU_ASSERT_FATAL(entries != NULL);
738 	ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
739 	SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
740 
741 	/* Acquire whole buffer of internal entries */
742 	entry_idx = 0;
743 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
744 		set_thread(ioch_idx);
745 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
746 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
747 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
748 		poll_threads();
749 
750 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
751 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
752 			CU_ASSERT(entries[entry_idx - 1] != NULL);
753 		}
754 
755 		entry = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
756 		CU_ASSERT(entry == NULL);
757 	}
758 
759 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
760 		set_thread(ioch_idx);
761 
762 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
763 			ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
764 			entries[ioch_idx * num_entries + tmp_idx] = NULL;
765 		}
766 
767 		spdk_put_io_channel(ioch_array[ioch_idx]);
768 	}
769 	poll_threads();
770 
771 	/* Do the same for user entries */
772 	entry_idx = 0;
773 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
774 		set_thread(ioch_idx);
775 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
776 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
777 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
778 		poll_threads();
779 
780 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
781 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
782 			CU_ASSERT(entries[entry_idx - 1] != NULL);
783 		}
784 
785 		entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
786 		CU_ASSERT(entry == NULL);
787 	}
788 
789 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
790 		set_thread(ioch_idx);
791 
792 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
793 			ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
794 			entries[ioch_idx * num_entries + tmp_idx] = NULL;
795 		}
796 
797 		spdk_put_io_channel(ioch_array[ioch_idx]);
798 	}
799 	poll_threads();
800 
801 	/* Verify limits */
802 	entry_idx = 0;
803 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
804 		set_thread(ioch_idx);
805 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
806 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
807 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
808 		poll_threads();
809 
810 		ftl_ioch->qdepth_limit = num_entries / 2;
811 		for (tmp_idx = 0; tmp_idx < num_entries / 2; ++tmp_idx) {
812 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
813 			CU_ASSERT(entries[entry_idx - 1] != NULL);
814 		}
815 
816 		entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
817 		CU_ASSERT(entry == NULL);
818 
819 		for (; tmp_idx < num_entries; ++tmp_idx) {
820 			entries[entry_idx++] = ftl_acquire_wbuf_entry(ftl_ioch, FTL_IO_INTERNAL);
821 			CU_ASSERT(entries[entry_idx - 1] != NULL);
822 		}
823 	}
824 
825 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
826 		set_thread(ioch_idx);
827 
828 		for (tmp_idx = 0; tmp_idx < num_entries; ++tmp_idx) {
829 			ftl_release_wbuf_entry(entries[ioch_idx * num_entries + tmp_idx]);
830 			entries[ioch_idx * num_entries + tmp_idx] = NULL;
831 		}
832 
833 		spdk_put_io_channel(ioch_array[ioch_idx]);
834 	}
835 	poll_threads();
836 
837 	/* Verify acquire/release */
838 	set_thread(0);
839 	ioch = spdk_get_io_channel(dev);
840 	SPDK_CU_ASSERT_FATAL(ioch != NULL);
841 	ftl_ioch = ftl_io_channel_get_ctx(ioch);
842 	poll_threads();
843 
844 	for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
845 		entries[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
846 		CU_ASSERT(entries[entry_idx] != NULL);
847 	}
848 
849 	entry = ftl_acquire_wbuf_entry(ftl_ioch, 0);
850 	CU_ASSERT(entry == NULL);
851 
852 	for (entry_idx = 0; entry_idx < num_entries / 2; ++entry_idx) {
853 		ftl_release_wbuf_entry(entries[entry_idx]);
854 		entries[entry_idx] = NULL;
855 	}
856 
857 	for (; entry_idx < num_entries; ++entry_idx) {
858 		entries[entry_idx - num_entries / 2] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
859 		CU_ASSERT(entries[entry_idx - num_entries / 2] != NULL);
860 	}
861 
862 	for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
863 		ftl_release_wbuf_entry(entries[entry_idx]);
864 		entries[entry_idx] = NULL;
865 	}
866 
867 	spdk_put_io_channel(ioch);
868 	poll_threads();
869 
870 	free(ioch_array);
871 	free(entries);
872 	free_device(dev);
873 }
874 
875 static void
876 test_submit_batch(void)
877 {
878 	struct spdk_ftl_dev *dev;
879 	struct spdk_io_channel **_ioch_array;
880 	struct ftl_io_channel **ioch_array;
881 	struct ftl_wbuf_entry *entry;
882 	struct ftl_batch *batch, *batch2;
883 	uint32_t num_io_channels = 16;
884 	uint32_t ioch_idx, tmp_idx, entry_idx;
885 	uint64_t ioch_bitmap;
886 	size_t num_entries;
887 
888 	dev = setup_device(num_io_channels, num_io_channels);
889 
890 	_ioch_array = calloc(num_io_channels, sizeof(*_ioch_array));
891 	SPDK_CU_ASSERT_FATAL(_ioch_array != NULL);
892 	ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
893 	SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
894 
895 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
896 		set_thread(ioch_idx);
897 		_ioch_array[ioch_idx] = spdk_get_io_channel(dev);
898 		SPDK_CU_ASSERT_FATAL(_ioch_array[ioch_idx] != NULL);
899 		ioch_array[ioch_idx] = ftl_io_channel_get_ctx(_ioch_array[ioch_idx]);
900 		poll_threads();
901 	}
902 
903 	/* Make sure the IO channels are not starved and entries are popped in RR fashion */
904 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
905 		set_thread(ioch_idx);
906 
907 		for (entry_idx = 0; entry_idx < dev->xfer_size; ++entry_idx) {
908 			entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
909 			SPDK_CU_ASSERT_FATAL(entry != NULL);
910 
911 			num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
912 							(void **)&entry, 1, NULL);
913 			CU_ASSERT(num_entries == 1);
914 		}
915 	}
916 
917 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
918 		for (tmp_idx = 0; tmp_idx < ioch_idx; ++tmp_idx) {
919 			set_thread(tmp_idx);
920 
921 			while (spdk_ring_count(ioch_array[tmp_idx]->submit_queue) < dev->xfer_size) {
922 				entry = ftl_acquire_wbuf_entry(ioch_array[tmp_idx], 0);
923 				SPDK_CU_ASSERT_FATAL(entry != NULL);
924 
925 				num_entries = spdk_ring_enqueue(ioch_array[tmp_idx]->submit_queue,
926 								(void **)&entry, 1, NULL);
927 				CU_ASSERT(num_entries == 1);
928 			}
929 		}
930 
931 		set_thread(ioch_idx);
932 
933 		batch = ftl_get_next_batch(dev);
934 		SPDK_CU_ASSERT_FATAL(batch != NULL);
935 
936 		TAILQ_FOREACH(entry, &batch->entries, tailq) {
937 			CU_ASSERT(entry->ioch == ioch_array[ioch_idx]);
938 		}
939 
940 		ftl_release_batch(dev, batch);
941 
942 		CU_ASSERT(spdk_ring_count(ioch_array[ioch_idx]->free_queue) ==
943 			  ioch_array[ioch_idx]->num_entries);
944 	}
945 
946 	for (ioch_idx = 0; ioch_idx < num_io_channels - 1; ++ioch_idx) {
947 		batch = ftl_get_next_batch(dev);
948 		SPDK_CU_ASSERT_FATAL(batch != NULL);
949 		ftl_release_batch(dev, batch);
950 	}
951 
952 	/* Make sure the batch can be built from entries from any IO channel */
953 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
954 		set_thread(ioch_idx);
955 		entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
956 		SPDK_CU_ASSERT_FATAL(entry != NULL);
957 
958 		num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
959 						(void **)&entry, 1, NULL);
960 		CU_ASSERT(num_entries == 1);
961 	}
962 
963 	batch = ftl_get_next_batch(dev);
964 	SPDK_CU_ASSERT_FATAL(batch != NULL);
965 
966 	ioch_bitmap = 0;
967 	TAILQ_FOREACH(entry, &batch->entries, tailq) {
968 		ioch_bitmap |= 1 << entry->ioch->index;
969 	}
970 
971 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
972 		CU_ASSERT((ioch_bitmap & (1 << ioch_array[ioch_idx]->index)) != 0);
973 	}
974 	ftl_release_batch(dev, batch);
975 
976 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
977 		CU_ASSERT(spdk_ring_count(ioch_array[ioch_idx]->free_queue) ==
978 			  ioch_array[ioch_idx]->num_entries);
979 	}
980 
981 	/* Make sure pending batches are prioritized */
982 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
983 		set_thread(ioch_idx);
984 
985 		while (spdk_ring_count(ioch_array[ioch_idx]->submit_queue) < dev->xfer_size) {
986 			entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0);
987 			SPDK_CU_ASSERT_FATAL(entry != NULL);
988 			num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue,
989 							(void **)&entry, 1, NULL);
990 			CU_ASSERT(num_entries == 1);
991 		}
992 	}
993 
994 	batch = ftl_get_next_batch(dev);
995 	SPDK_CU_ASSERT_FATAL(batch != NULL);
996 
997 	TAILQ_INSERT_TAIL(&dev->pending_batches, batch, tailq);
998 	batch2 = ftl_get_next_batch(dev);
999 	SPDK_CU_ASSERT_FATAL(batch2 != NULL);
1000 
1001 	CU_ASSERT(TAILQ_EMPTY(&dev->pending_batches));
1002 	CU_ASSERT(batch == batch2);
1003 
1004 	batch = ftl_get_next_batch(dev);
1005 	SPDK_CU_ASSERT_FATAL(batch != NULL);
1006 
1007 	ftl_release_batch(dev, batch);
1008 	ftl_release_batch(dev, batch2);
1009 
1010 	for (ioch_idx = 2; ioch_idx < num_io_channels; ++ioch_idx) {
1011 		batch = ftl_get_next_batch(dev);
1012 		SPDK_CU_ASSERT_FATAL(batch != NULL);
1013 		ftl_release_batch(dev, batch);
1014 	}
1015 
1016 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
1017 		set_thread(ioch_idx);
1018 		spdk_put_io_channel(_ioch_array[ioch_idx]);
1019 	}
1020 	poll_threads();
1021 
1022 	free(_ioch_array);
1023 	free(ioch_array);
1024 	free_device(dev);
1025 }
1026 
1027 static void
1028 test_entry_address(void)
1029 {
1030 	struct spdk_ftl_dev *dev;
1031 	struct spdk_io_channel **ioch_array;
1032 	struct ftl_io_channel *ftl_ioch;
1033 	struct ftl_wbuf_entry **entry_array;
1034 	struct ftl_addr addr;
1035 	uint32_t num_entries, num_io_channels = 7;
1036 	uint32_t ioch_idx, entry_idx;
1037 
1038 	dev = setup_device(num_io_channels, num_io_channels);
1039 	ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
1040 	SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
1041 
1042 	num_entries = dev->conf.write_buffer_size / FTL_BLOCK_SIZE;
1043 	entry_array = calloc(num_entries, sizeof(*entry_array));
1044 	SPDK_CU_ASSERT_FATAL(entry_array != NULL);
1045 
1046 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
1047 		set_thread(ioch_idx);
1048 		ioch_array[ioch_idx] = spdk_get_io_channel(dev);
1049 		SPDK_CU_ASSERT_FATAL(ioch_array[ioch_idx] != NULL);
1050 		poll_threads();
1051 	}
1052 
1053 	for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) {
1054 		set_thread(ioch_idx);
1055 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
1056 
1057 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
1058 			entry_array[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
1059 			SPDK_CU_ASSERT_FATAL(entry_array[entry_idx] != NULL);
1060 
1061 			addr = ftl_get_addr_from_entry(entry_array[entry_idx]);
1062 			CU_ASSERT(addr.cached == 1);
1063 			CU_ASSERT((addr.cache_offset >> dev->ioch_shift) == entry_idx);
1064 			CU_ASSERT((addr.cache_offset & ((1 << dev->ioch_shift) - 1)) == ioch_idx);
1065 			CU_ASSERT(entry_array[entry_idx] == ftl_get_entry_from_addr(dev, addr));
1066 		}
1067 
1068 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
1069 			ftl_release_wbuf_entry(entry_array[entry_idx]);
1070 		}
1071 	}
1072 
1073 	for (ioch_idx = 0; ioch_idx < num_io_channels; ioch_idx += 2) {
1074 		set_thread(ioch_idx);
1075 		spdk_put_io_channel(ioch_array[ioch_idx]);
1076 		ioch_array[ioch_idx] = NULL;
1077 	}
1078 	poll_threads();
1079 
1080 	for (ioch_idx = 1; ioch_idx < num_io_channels; ioch_idx += 2) {
1081 		set_thread(ioch_idx);
1082 		ftl_ioch = ftl_io_channel_get_ctx(ioch_array[ioch_idx]);
1083 
1084 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
1085 			entry_array[entry_idx] = ftl_acquire_wbuf_entry(ftl_ioch, 0);
1086 			SPDK_CU_ASSERT_FATAL(entry_array[entry_idx] != NULL);
1087 
1088 			addr = ftl_get_addr_from_entry(entry_array[entry_idx]);
1089 			CU_ASSERT(addr.cached == 1);
1090 			CU_ASSERT(entry_array[entry_idx] == ftl_get_entry_from_addr(dev, addr));
1091 		}
1092 
1093 		for (entry_idx = 0; entry_idx < num_entries; ++entry_idx) {
1094 			ftl_release_wbuf_entry(entry_array[entry_idx]);
1095 		}
1096 	}
1097 
1098 	for (ioch_idx = 1; ioch_idx < num_io_channels; ioch_idx += 2) {
1099 		set_thread(ioch_idx);
1100 		spdk_put_io_channel(ioch_array[ioch_idx]);
1101 	}
1102 	poll_threads();
1103 
1104 	free(entry_array);
1105 	free(ioch_array);
1106 	free_device(dev);
1107 }
1108 
1109 int
1110 main(int argc, char **argv)
1111 {
1112 	CU_pSuite suite;
1113 	unsigned int num_failures;
1114 
1115 	CU_set_error_action(CUEA_ABORT);
1116 	CU_initialize_registry();
1117 
1118 	suite = CU_add_suite("ftl_io_suite", NULL, NULL);
1119 
1120 
1121 	CU_ADD_TEST(suite, test_completion);
1122 	CU_ADD_TEST(suite, test_alloc_free);
1123 	CU_ADD_TEST(suite, test_child_requests);
1124 	CU_ADD_TEST(suite, test_child_status);
1125 	CU_ADD_TEST(suite, test_multi_generation);
1126 	CU_ADD_TEST(suite, test_io_channel_create);
1127 	CU_ADD_TEST(suite, test_acquire_entry);
1128 	CU_ADD_TEST(suite, test_submit_batch);
1129 	CU_ADD_TEST(suite, test_entry_address);
1130 
1131 	CU_basic_set_mode(CU_BRM_VERBOSE);
1132 	CU_basic_run_tests();
1133 	num_failures = CU_get_number_of_failures();
1134 	CU_cleanup_registry();
1135 
1136 	return num_failures;
1137 }
1138