xref: /spdk/test/unit/lib/bdev/vbdev_zone_block.c/vbdev_zone_block_ut.c (revision 34edd9f1bf5fda4c987f4500ddc3c9f50be32e7d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2019 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_internal/cunit.h"
9 #include "spdk/env.h"
10 #include "spdk_internal/mock.h"
11 #include "thread/thread_internal.h"
12 #include "common/lib/test_env.c"
13 #include "bdev/zone_block/vbdev_zone_block.c"
14 #include "bdev/zone_block/vbdev_zone_block_rpc.c"
15 
16 #define BLOCK_CNT ((uint64_t)1024ul * 1024ul * 1024ul * 1024ul)
17 #define BLOCK_SIZE 4096
18 
19 /* Globals */
20 uint64_t g_block_cnt;
21 struct io_output *g_io_output = NULL;
22 uint32_t g_max_io_size;
23 uint32_t g_io_output_index;
24 uint32_t g_io_comp_status;
25 uint8_t g_rpc_err;
26 uint8_t g_json_decode_obj_construct;
27 static TAILQ_HEAD(, spdk_bdev) g_bdev_list = TAILQ_HEAD_INITIALIZER(g_bdev_list);
28 void *g_rpc_req = NULL;
29 static struct spdk_thread *g_thread;
30 
31 struct io_output {
32 	struct spdk_bdev_desc       *desc;
33 	struct spdk_io_channel      *ch;
34 	uint64_t                    offset_blocks;
35 	uint64_t                    num_blocks;
36 	spdk_bdev_io_completion_cb  cb;
37 	void                        *cb_arg;
38 	enum spdk_bdev_io_type      iotype;
39 };
40 
41 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
42 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
43 DEFINE_STUB(spdk_json_decode_string, int, (const struct spdk_json_val *val, void *out), 0);
44 DEFINE_STUB(spdk_json_decode_uint64, int, (const struct spdk_json_val *val, void *out), 0);
45 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
46 DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0);
47 DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0);
48 DEFINE_STUB(spdk_json_write_named_string, int, (struct spdk_json_write_ctx *w,
49 		const char *name, const char *val), 0);
50 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
51 		enum spdk_bdev_io_type io_type), true);
52 DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0);
53 DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w,
54 		const char *name), 0);
55 DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0);
56 DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func,
57 		uint32_t state_mask));
58 DEFINE_STUB_V(spdk_jsonrpc_end_result, (struct spdk_jsonrpc_request *request,
59 					struct spdk_json_write_ctx *w));
60 DEFINE_STUB_V(spdk_jsonrpc_send_bool_response, (struct spdk_jsonrpc_request *request,
61 		bool value));
62 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc),
63 	    (void *)0);
64 
65 static void
66 set_test_opts(void)
67 {
68 	g_max_io_size = 1024;
69 }
70 
71 static void
72 init_test_globals(uint64_t block_cnt)
73 {
74 	g_io_output = calloc(g_max_io_size, sizeof(struct io_output));
75 	SPDK_CU_ASSERT_FATAL(g_io_output != NULL);
76 	g_io_output_index = 0;
77 	g_block_cnt = block_cnt;
78 }
79 
80 static void
81 free_test_globals(void)
82 {
83 	free(g_io_output);
84 	g_io_output = NULL;
85 }
86 
87 void
88 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
89 {
90 	free(bdev_io);
91 }
92 
93 int
94 spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
95 		   void *event_ctx, struct spdk_bdev_desc **_desc)
96 {
97 	struct spdk_bdev *bdev;
98 
99 	TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
100 		if (strcmp(bdev_name, bdev->name) == 0) {
101 			*_desc = (void *)bdev;
102 			return 0;
103 		}
104 	}
105 
106 	return -ENODEV;
107 }
108 
109 struct spdk_bdev *
110 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
111 {
112 	return (void *)desc;
113 }
114 
115 int
116 spdk_bdev_register(struct spdk_bdev *bdev)
117 {
118 	CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(bdev->name));
119 	TAILQ_INSERT_TAIL(&g_bdev_list, bdev, internal.link);
120 
121 	return 0;
122 }
123 
124 void
125 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
126 {
127 	CU_ASSERT_EQUAL(spdk_bdev_get_by_name(bdev->name), bdev);
128 	TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
129 
130 	bdev->fn_table->destruct(bdev->ctxt);
131 
132 	if (cb_fn) {
133 		cb_fn(cb_arg, 0);
134 	}
135 }
136 
137 int
138 spdk_bdev_unregister_by_name(const char *bdev_name, struct spdk_bdev_module *module,
139 			     spdk_bdev_unregister_cb cb_fn, void *cb_arg)
140 {
141 	struct spdk_bdev *bdev;
142 
143 	CU_ASSERT(module == &bdev_zoned_if);
144 
145 	bdev = spdk_bdev_get_by_name(bdev_name);
146 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
147 
148 	spdk_bdev_unregister(bdev, cb_fn, cb_arg);
149 
150 	return 0;
151 }
152 
153 int
154 spdk_json_write_named_uint64(struct spdk_json_write_ctx *w, const char *name, uint64_t val)
155 {
156 	struct rpc_construct_zone_block *req = g_rpc_req;
157 	if (strcmp(name, "zone_capacity") == 0) {
158 		CU_ASSERT(req->zone_capacity == val);
159 	} else if (strcmp(name, "optimal_open_zones") == 0) {
160 		CU_ASSERT(req->optimal_open_zones == val);
161 	}
162 
163 	return 0;
164 }
165 
166 const char *
167 spdk_bdev_get_name(const struct spdk_bdev *bdev)
168 {
169 	return bdev->name;
170 }
171 
172 bool
173 spdk_bdev_is_zoned(const struct spdk_bdev *bdev)
174 {
175 	return bdev->zoned;
176 }
177 
178 int
179 spdk_json_write_string(struct spdk_json_write_ctx *w, const char *val)
180 {
181 	return 0;
182 }
183 
184 int
185 spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
186 			    struct spdk_bdev_module *module)
187 {
188 	if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
189 		CU_ASSERT(bdev->internal.claim.v1.module != NULL);
190 		return -1;
191 	}
192 	CU_ASSERT(bdev->internal.claim.v1.module == NULL);
193 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_EXCL_WRITE;
194 	bdev->internal.claim.v1.module = module;
195 	return 0;
196 }
197 
198 void
199 spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
200 {
201 	CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
202 	CU_ASSERT(bdev->internal.claim.v1.module != NULL);
203 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE;
204 	bdev->internal.claim.v1.module = NULL;
205 }
206 
207 void
208 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
209 {
210 	g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false);
211 }
212 
213 int
214 spdk_json_decode_object(const struct spdk_json_val *values,
215 			const struct spdk_json_object_decoder *decoders, size_t num_decoders,
216 			void *out)
217 {
218 	struct rpc_construct_zone_block *construct, *_construct;
219 	struct rpc_delete_zone_block *delete, *_delete;
220 
221 	if (g_json_decode_obj_construct) {
222 		construct = g_rpc_req;
223 		_construct = out;
224 
225 		_construct->name = strdup(construct->name);
226 		SPDK_CU_ASSERT_FATAL(_construct->name != NULL);
227 		_construct->base_bdev = strdup(construct->base_bdev);
228 		SPDK_CU_ASSERT_FATAL(_construct->base_bdev != NULL);
229 		_construct->zone_capacity = construct->zone_capacity;
230 		_construct->optimal_open_zones = construct->optimal_open_zones;
231 	} else {
232 		delete = g_rpc_req;
233 		_delete = out;
234 
235 		_delete->name = strdup(delete->name);
236 		SPDK_CU_ASSERT_FATAL(_delete->name != NULL);
237 	}
238 
239 	return 0;
240 }
241 
242 struct spdk_json_write_ctx *
243 spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request)
244 {
245 	return (void *)1;
246 }
247 
248 static struct spdk_bdev *
249 create_nvme_bdev(void)
250 {
251 	struct spdk_bdev *base_bdev;
252 	char *name = "Nvme0n1";
253 	base_bdev = calloc(1, sizeof(struct spdk_bdev));
254 	SPDK_CU_ASSERT_FATAL(base_bdev != NULL);
255 	base_bdev->name = strdup(name);
256 	SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL);
257 	base_bdev->blocklen = BLOCK_SIZE;
258 	base_bdev->blockcnt = g_block_cnt;
259 	base_bdev->write_unit_size = 1;
260 	TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link);
261 
262 	return base_bdev;
263 }
264 
265 static void
266 base_bdevs_cleanup(void)
267 {
268 	struct spdk_bdev *bdev;
269 	struct spdk_bdev *bdev_next;
270 
271 	if (!TAILQ_EMPTY(&g_bdev_list)) {
272 		TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) {
273 			free(bdev->name);
274 			TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
275 			free(bdev);
276 		}
277 	}
278 }
279 
280 struct spdk_bdev *
281 spdk_bdev_get_by_name(const char *bdev_name)
282 {
283 	struct spdk_bdev *bdev;
284 
285 	if (!TAILQ_EMPTY(&g_bdev_list)) {
286 		TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
287 			if (strcmp(bdev_name, bdev->name) == 0) {
288 				return bdev;
289 			}
290 		}
291 	}
292 
293 	return NULL;
294 }
295 
296 void
297 spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request,
298 				 int error_code, const char *msg)
299 {
300 	g_rpc_err = 1;
301 }
302 
303 void
304 spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request,
305 				     int error_code, const char *fmt, ...)
306 {
307 	g_rpc_err = 1;
308 }
309 
310 static void
311 set_io_output(struct io_output *output,
312 	      struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
313 	      uint64_t offset_blocks, uint64_t num_blocks,
314 	      spdk_bdev_io_completion_cb cb, void *cb_arg,
315 	      enum spdk_bdev_io_type iotype)
316 {
317 	output->desc = desc;
318 	output->ch = ch;
319 	output->offset_blocks = offset_blocks;
320 	output->num_blocks = num_blocks;
321 	output->cb = cb;
322 	output->cb_arg = cb_arg;
323 	output->iotype = iotype;
324 }
325 
326 int
327 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
328 		       uint64_t offset_blocks, uint64_t num_blocks,
329 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
330 {
331 	struct io_output *output = &g_io_output[g_io_output_index];
332 	struct spdk_bdev_io *child_io;
333 
334 	set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
335 		      SPDK_BDEV_IO_TYPE_UNMAP);
336 	g_io_output_index++;
337 
338 	child_io = calloc(1, sizeof(struct spdk_bdev_io));
339 	SPDK_CU_ASSERT_FATAL(child_io != NULL);
340 	cb(child_io, true, cb_arg);
341 
342 	return 0;
343 }
344 
345 int
346 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
347 				struct iovec *iov, int iovcnt, void *md,
348 				uint64_t offset_blocks, uint64_t num_blocks,
349 				spdk_bdev_io_completion_cb cb, void *cb_arg)
350 {
351 	struct io_output *output = &g_io_output[g_io_output_index];
352 	struct spdk_bdev_io *child_io;
353 
354 	SPDK_CU_ASSERT_FATAL(g_io_output_index < g_max_io_size);
355 
356 	set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
357 		      SPDK_BDEV_IO_TYPE_WRITE);
358 	g_io_output_index++;
359 
360 	child_io = calloc(1, sizeof(struct spdk_bdev_io));
361 	SPDK_CU_ASSERT_FATAL(child_io != NULL);
362 	child_io->internal.desc = desc;
363 	child_io->type = SPDK_BDEV_IO_TYPE_WRITE;
364 	child_io->u.bdev.iovs = iov;
365 	child_io->u.bdev.iovcnt = iovcnt;
366 	child_io->u.bdev.md_buf = md;
367 	child_io->u.bdev.num_blocks = num_blocks;
368 	child_io->u.bdev.offset_blocks = offset_blocks;
369 	cb(child_io, true, cb_arg);
370 
371 	return 0;
372 }
373 
374 
375 int
376 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
377 			struct iovec *iov, int iovcnt,
378 			uint64_t offset_blocks, uint64_t num_blocks,
379 			spdk_bdev_io_completion_cb cb, void *cb_arg)
380 {
381 
382 	return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks,
383 					       cb, cb_arg);
384 }
385 
386 int
387 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
388 			       struct iovec *iov, int iovcnt, void *md,
389 			       uint64_t offset_blocks, uint64_t num_blocks,
390 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
391 {
392 	struct io_output *output = &g_io_output[g_io_output_index];
393 	struct spdk_bdev_io *child_io;
394 
395 	SPDK_CU_ASSERT_FATAL(g_io_output_index < g_max_io_size);
396 	set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
397 		      SPDK_BDEV_IO_TYPE_READ);
398 	g_io_output_index++;
399 
400 	child_io = calloc(1, sizeof(struct spdk_bdev_io));
401 	SPDK_CU_ASSERT_FATAL(child_io != NULL);
402 	cb(child_io, true, cb_arg);
403 
404 	return 0;
405 }
406 
407 int
408 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
409 		       struct iovec *iov, int iovcnt,
410 		       uint64_t offset_blocks, uint64_t num_blocks,
411 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
412 {
413 
414 	return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks,
415 					      cb, cb_arg);
416 }
417 
418 static void
419 verify_config_present(const char *name, bool presence)
420 {
421 	struct bdev_zone_block_config *cfg;
422 	bool cfg_found;
423 
424 	cfg_found = false;
425 
426 	TAILQ_FOREACH(cfg, &g_bdev_configs, link) {
427 		if (cfg->vbdev_name != NULL) {
428 			if (strcmp(name, cfg->vbdev_name) == 0) {
429 				cfg_found = true;
430 				break;
431 			}
432 		}
433 	}
434 
435 	if (presence == true) {
436 		CU_ASSERT(cfg_found == true);
437 	} else {
438 		CU_ASSERT(cfg_found == false);
439 	}
440 }
441 
442 static void
443 verify_bdev_present(const char *name, bool presence)
444 {
445 	struct bdev_zone_block *bdev;
446 	bool bdev_found = false;
447 
448 	TAILQ_FOREACH(bdev, &g_bdev_nodes, link) {
449 		if (strcmp(bdev->bdev.name, name) == 0) {
450 			bdev_found = true;
451 			break;
452 		}
453 	}
454 	if (presence == true) {
455 		CU_ASSERT(bdev_found == true);
456 	} else {
457 		CU_ASSERT(bdev_found == false);
458 	}
459 }
460 
461 static void
462 initialize_create_req(const char *vbdev_name, const char *base_name,
463 		      uint64_t zone_capacity, uint64_t optimal_open_zones, bool create_base_bdev)
464 {
465 	struct rpc_construct_zone_block *r;
466 
467 	r = g_rpc_req = calloc(1, sizeof(struct rpc_construct_zone_block));
468 	SPDK_CU_ASSERT_FATAL(r != NULL);
469 
470 	r->name = strdup(vbdev_name);
471 	SPDK_CU_ASSERT_FATAL(r->name != NULL);
472 	r->base_bdev = strdup(base_name);
473 	SPDK_CU_ASSERT_FATAL(r->base_bdev != NULL);
474 	r->zone_capacity = zone_capacity;
475 	r->optimal_open_zones = optimal_open_zones;
476 
477 	if (create_base_bdev == true) {
478 		create_nvme_bdev();
479 	}
480 	g_rpc_err = 0;
481 	g_json_decode_obj_construct = 1;
482 }
483 
484 static void
485 free_create_req(void)
486 {
487 	struct rpc_construct_zone_block *r = g_rpc_req;
488 
489 	free(r->name);
490 	free(r->base_bdev);
491 	free(r);
492 	g_rpc_req = NULL;
493 }
494 
495 static void
496 initialize_delete_req(const char *vbdev_name)
497 {
498 	struct rpc_delete_zone_block *r;
499 
500 	r = g_rpc_req = calloc(1, sizeof(struct rpc_delete_zone_block));
501 	SPDK_CU_ASSERT_FATAL(r != NULL);
502 	r->name = strdup(vbdev_name);
503 	SPDK_CU_ASSERT_FATAL(r->name != NULL);
504 
505 	g_rpc_err = 0;
506 	g_json_decode_obj_construct = 0;
507 }
508 
509 static void
510 free_delete_req(void)
511 {
512 	struct rpc_delete_zone_block *r = g_rpc_req;
513 
514 	free(r->name);
515 	free(r);
516 	g_rpc_req = NULL;
517 }
518 
519 static void
520 verify_zone_config(bool presence)
521 {
522 	struct rpc_construct_zone_block *r = g_rpc_req;
523 	struct bdev_zone_block_config *cfg = NULL;
524 
525 	TAILQ_FOREACH(cfg, &g_bdev_configs, link) {
526 		if (strcmp(r->name, cfg->vbdev_name) == 0) {
527 			if (presence == false) {
528 				break;
529 			}
530 			CU_ASSERT(strcmp(r->base_bdev, cfg->bdev_name) == 0);
531 			CU_ASSERT(r->zone_capacity == cfg->zone_capacity);
532 			CU_ASSERT(spdk_max(r->optimal_open_zones, 1) == cfg->optimal_open_zones);
533 			break;
534 		}
535 	}
536 
537 	if (presence) {
538 		CU_ASSERT(cfg != NULL);
539 	} else {
540 		CU_ASSERT(cfg == NULL);
541 	}
542 }
543 
544 static void
545 verify_zone_bdev(bool presence)
546 {
547 	struct rpc_construct_zone_block *r = g_rpc_req;
548 	struct block_zone *zone;
549 	struct bdev_zone_block *bdev;
550 	bool bdev_found = false;
551 	uint32_t i;
552 	uint64_t expected_num_zones;
553 	uint64_t expected_optimal_open_zones;
554 
555 	TAILQ_FOREACH(bdev, &g_bdev_nodes, link) {
556 		if (strcmp(bdev->bdev.name, r->name) == 0) {
557 			bdev_found = true;
558 			if (presence == false) {
559 				break;
560 			}
561 
562 			expected_optimal_open_zones = spdk_max(r->optimal_open_zones, 1);
563 			expected_num_zones = g_block_cnt / spdk_align64pow2(r->zone_capacity) / expected_optimal_open_zones;
564 			expected_num_zones *= expected_optimal_open_zones;
565 
566 			CU_ASSERT(bdev->num_zones == expected_num_zones);
567 			CU_ASSERT(bdev->bdev.zoned == true);
568 			CU_ASSERT(bdev->bdev.blockcnt == expected_num_zones * spdk_align64pow2(r->zone_capacity));
569 			CU_ASSERT(bdev->bdev.blocklen == BLOCK_SIZE);
570 			CU_ASSERT(bdev->bdev.ctxt == bdev);
571 			CU_ASSERT(bdev->bdev.fn_table == &zone_block_fn_table);
572 			CU_ASSERT(bdev->bdev.module == &bdev_zoned_if);
573 			CU_ASSERT(bdev->bdev.write_unit_size == 1);
574 			CU_ASSERT(bdev->bdev.zone_size == spdk_align64pow2(r->zone_capacity));
575 			CU_ASSERT(bdev->bdev.optimal_open_zones == expected_optimal_open_zones);
576 			CU_ASSERT(bdev->bdev.max_open_zones == 0);
577 
578 			for (i = 0; i < bdev->num_zones; i++) {
579 				zone = &bdev->zones[i];
580 				CU_ASSERT(zone->zone_info.state == SPDK_BDEV_ZONE_STATE_FULL);
581 				CU_ASSERT(zone->zone_info.capacity == r->zone_capacity);
582 			}
583 			break;
584 		}
585 	}
586 
587 	if (presence == true) {
588 		CU_ASSERT(bdev_found == true);
589 	} else {
590 		CU_ASSERT(bdev_found == false);
591 	}
592 }
593 
594 static void
595 send_create_vbdev(char *vdev_name, char *name, uint64_t zone_capacity, uint64_t optimal_open_zones,
596 		  bool create_bdev, bool success)
597 {
598 	initialize_create_req(vdev_name, name, zone_capacity, optimal_open_zones, create_bdev);
599 	rpc_zone_block_create(NULL, NULL);
600 	CU_ASSERT(g_rpc_err != success);
601 	verify_zone_config(success);
602 	verify_zone_bdev(success);
603 	free_create_req();
604 }
605 
606 static void
607 send_delete_vbdev(char *name, bool success)
608 {
609 	initialize_delete_req(name);
610 	rpc_zone_block_delete(NULL, NULL);
611 	verify_config_present(name, false);
612 	verify_bdev_present(name, false);
613 	CU_ASSERT(g_rpc_err != success);
614 	free_delete_req();
615 }
616 
617 static void
618 test_cleanup(void)
619 {
620 	CU_ASSERT(spdk_thread_is_idle(g_thread));
621 	zone_block_finish();
622 	base_bdevs_cleanup();
623 	free_test_globals();
624 }
625 
626 static void
627 test_zone_block_create(void)
628 {
629 	struct spdk_bdev *bdev;
630 	char *name = "Nvme0n1";
631 	size_t num_zones = 16;
632 	size_t zone_capacity = BLOCK_CNT / num_zones;
633 
634 	init_test_globals(BLOCK_CNT);
635 	CU_ASSERT(zone_block_init() == 0);
636 
637 	/* Create zoned virtual device before nvme device */
638 	verify_config_present("zone_dev1", false);
639 	verify_bdev_present("zone_dev1", false);
640 	initialize_create_req("zone_dev1", name, zone_capacity, 1, false);
641 	rpc_zone_block_create(NULL, NULL);
642 	CU_ASSERT(g_rpc_err == 0);
643 	verify_zone_config(true);
644 	verify_zone_bdev(false);
645 	bdev = create_nvme_bdev();
646 	zone_block_examine(bdev);
647 	verify_zone_bdev(true);
648 	free_create_req();
649 
650 	/* Delete bdev */
651 	send_delete_vbdev("zone_dev1", true);
652 
653 	/* Create zoned virtual device and verify its correctness */
654 	verify_config_present("zone_dev1", false);
655 	send_create_vbdev("zone_dev1", name, zone_capacity, 1, false, true);
656 	send_delete_vbdev("zone_dev1", true);
657 
658 	while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
659 	test_cleanup();
660 }
661 
662 static void
663 test_zone_block_create_invalid(void)
664 {
665 	char *name = "Nvme0n1";
666 	size_t num_zones = 8;
667 	size_t zone_capacity = BLOCK_CNT / num_zones;
668 
669 	init_test_globals(BLOCK_CNT);
670 	CU_ASSERT(zone_block_init() == 0);
671 
672 	/* Create zoned virtual device and verify its correctness */
673 	verify_config_present("zone_dev1", false);
674 	verify_bdev_present("zone_dev1", false);
675 	send_create_vbdev("zone_dev1", name, zone_capacity, 1, true, true);
676 
677 	/* Try to create another zoned virtual device on the same bdev */
678 	send_create_vbdev("zone_dev2", name, zone_capacity, 1, false, false);
679 
680 	/* Try to create zoned virtual device on the zoned bdev */
681 	send_create_vbdev("zone_dev2", "zone_dev1", zone_capacity, 1, false, false);
682 
683 	/* Unclaim the base bdev */
684 	send_delete_vbdev("zone_dev1", true);
685 
686 	/* Try to create zoned virtual device with 0 zone size */
687 	send_create_vbdev("zone_dev1", name, 0, 1, false, false);
688 
689 	/* Try to create zoned virtual device with 0 optimal number of zones */
690 	send_create_vbdev("zone_dev1", name, zone_capacity, 0, false, false);
691 
692 	while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
693 	test_cleanup();
694 }
695 
696 static void
697 bdev_io_zone_info_initialize(struct spdk_bdev_io *bdev_io, struct spdk_bdev *bdev,
698 			     uint64_t zone_id, uint32_t num_zones)
699 {
700 	bdev_io->bdev = bdev;
701 	bdev_io->type = SPDK_BDEV_IO_TYPE_GET_ZONE_INFO;
702 
703 	bdev_io->u.zone_mgmt.zone_id = zone_id;
704 
705 	bdev_io->u.zone_mgmt.num_zones = num_zones;
706 	if (num_zones) {
707 		bdev_io->u.zone_mgmt.buf = calloc(num_zones, sizeof(struct spdk_bdev_zone_info));
708 		SPDK_CU_ASSERT_FATAL(bdev_io->u.zone_mgmt.buf != NULL);
709 	}
710 }
711 
712 static void
713 bdev_io_zone_initialize(struct spdk_bdev_io *bdev_io, struct spdk_bdev *bdev,
714 			uint64_t zone_id, uint32_t num_zones, uint8_t zone_action)
715 {
716 	bdev_io->bdev = bdev;
717 	bdev_io->type = SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT;
718 
719 	bdev_io->u.zone_mgmt.zone_action = zone_action;
720 	bdev_io->u.zone_mgmt.zone_id = zone_id;
721 }
722 
723 static void
724 bdev_io_zone_cleanup(struct spdk_bdev_io *bdev_io)
725 {
726 	free(bdev_io->u.zone_mgmt.buf);
727 	free(bdev_io);
728 }
729 
730 static void
731 bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_bdev *bdev,
732 		   uint64_t lba, uint64_t blocks, int16_t iotype)
733 {
734 	bdev_io->bdev = bdev;
735 	bdev_io->u.bdev.offset_blocks = lba;
736 	bdev_io->u.bdev.num_blocks = blocks;
737 	bdev_io->type = iotype;
738 
739 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) {
740 		return;
741 	}
742 
743 	bdev_io->u.bdev.iovcnt = 1;
744 	bdev_io->u.bdev.iovs =  &bdev_io->iov;
745 	bdev_io->u.bdev.iovs->iov_base = calloc(1, bdev_io->u.bdev.num_blocks * BLOCK_SIZE);
746 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL);
747 	bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * BLOCK_SIZE;
748 }
749 
750 static void
751 bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
752 {
753 	free(bdev_io->iov.iov_base);
754 	free(bdev_io);
755 }
756 
757 static struct bdev_zone_block *
758 create_and_get_vbdev(char *vdev_name, char *name, uint64_t num_zones, uint64_t optimal_open_zones,
759 		     bool create_bdev)
760 {
761 	size_t zone_size = g_block_cnt / num_zones;
762 	struct bdev_zone_block *bdev = NULL;
763 
764 	send_create_vbdev(vdev_name, name, zone_size, optimal_open_zones, create_bdev, true);
765 
766 	TAILQ_FOREACH(bdev, &g_bdev_nodes, link) {
767 		if (strcmp(bdev->bdev.name, vdev_name) == 0) {
768 			break;
769 		}
770 	}
771 
772 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
773 	return bdev;
774 }
775 
776 static void
777 test_supported_io_types(void)
778 {
779 	struct bdev_zone_block *bdev;
780 	char *name = "Nvme0n1";
781 	uint32_t num_zones = 8;
782 
783 	init_test_globals(BLOCK_CNT);
784 	CU_ASSERT(zone_block_init() == 0);
785 
786 	/* Create zone dev */
787 	bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
788 
789 	CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT) == true);
790 	CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZONE_APPEND) == true);
791 	CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_READ) == true);
792 	CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE) == true);
793 
794 	CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN) == false);
795 	CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO) == false);
796 	CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO_MD) == false);
797 	CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP) == false);
798 	CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH) == false);
799 	CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_RESET) == false);
800 	CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) == false);
801 	CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY) == false);
802 
803 	send_delete_vbdev("zone_dev1", true);
804 	while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
805 	test_cleanup();
806 }
807 
808 static void
809 send_zone_info(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
810 	       uint64_t wp,
811 	       enum spdk_bdev_zone_state state, uint32_t output_index, bool success)
812 {
813 	struct spdk_bdev_io *bdev_io;
814 	struct spdk_bdev_zone_info *info;
815 
816 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io));
817 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
818 	bdev_io_zone_info_initialize(bdev_io, &bdev->bdev, zone_id, 1);
819 	memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
820 	g_io_output_index = output_index;
821 
822 	g_io_comp_status = !success;
823 	zone_block_submit_request(ch, bdev_io);
824 	CU_ASSERT(g_io_comp_status == success);
825 
826 	if (success) {
827 		info = (struct spdk_bdev_zone_info *)bdev_io->u.zone_mgmt.buf;
828 		CU_ASSERT(info->zone_id == zone_id);
829 		CU_ASSERT(info->capacity == bdev->zone_capacity);
830 		CU_ASSERT(info->write_pointer == wp);
831 		CU_ASSERT(info->state == state);
832 	}
833 
834 	bdev_io_zone_cleanup(bdev_io);
835 }
836 
837 static void
838 test_get_zone_info(void)
839 {
840 	struct spdk_io_channel *ch;
841 	struct bdev_zone_block *bdev;
842 	struct spdk_bdev_io *bdev_io;
843 	char *name = "Nvme0n1";
844 	uint32_t num_zones = 8, i;
845 	struct spdk_bdev_zone_info *info;
846 
847 	init_test_globals(BLOCK_CNT);
848 	CU_ASSERT(zone_block_init() == 0);
849 
850 	/* Create zone dev */
851 	bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
852 
853 	ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
854 	SPDK_CU_ASSERT_FATAL(ch != NULL);
855 
856 	/* Get info about each zone */
857 	for (i = 0; i < num_zones; i++) {
858 		send_zone_info(bdev, ch, i * bdev->bdev.zone_size,
859 			       i * bdev->bdev.zone_size + bdev->zone_capacity, SPDK_BDEV_ZONE_STATE_FULL, 0, true);
860 	}
861 
862 	/* Send info asking for 0 zones */
863 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io));
864 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
865 	bdev_io_zone_info_initialize(bdev_io, &bdev->bdev, 0, 0);
866 	memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
867 	g_io_output_index = 0;
868 	zone_block_submit_request(ch, bdev_io);
869 	CU_ASSERT(g_io_comp_status);
870 	bdev_io_zone_cleanup(bdev_io);
871 
872 	/* Send info asking for all zones */
873 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io));
874 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
875 	bdev_io_zone_info_initialize(bdev_io, &bdev->bdev, 0, num_zones);
876 	memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
877 	g_io_output_index = 0;
878 	zone_block_submit_request(ch, bdev_io);
879 	CU_ASSERT(g_io_comp_status);
880 
881 	for (i = 0; i < num_zones; i++) {
882 		info = &(((struct spdk_bdev_zone_info *)bdev_io->u.zone_mgmt.buf)[i]);
883 		CU_ASSERT(info->zone_id == i * bdev->bdev.zone_size);
884 		CU_ASSERT(info->capacity == bdev->zone_capacity);
885 		CU_ASSERT(info->write_pointer == i * bdev->bdev.zone_size + bdev->zone_capacity);
886 		CU_ASSERT(info->state == SPDK_BDEV_ZONE_STATE_FULL);
887 	}
888 	bdev_io_zone_cleanup(bdev_io);
889 
890 	/* Send info asking for too many zones */
891 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io));
892 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
893 	bdev_io_zone_info_initialize(bdev_io, &bdev->bdev, 0, num_zones + 1);
894 	memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
895 	g_io_output_index = 0;
896 	zone_block_submit_request(ch, bdev_io);
897 	CU_ASSERT(!g_io_comp_status);
898 	bdev_io_zone_cleanup(bdev_io);
899 
900 	/* Send info with misaligned start LBA */
901 	send_zone_info(bdev, ch, 1, 0, SPDK_BDEV_ZONE_STATE_FULL, 0, false);
902 
903 	/* Send info with too high LBA */
904 	send_zone_info(bdev, ch, num_zones * bdev->bdev.zone_size, 0, SPDK_BDEV_ZONE_STATE_FULL, 0,
905 		       false);
906 
907 	/* Delete zone dev */
908 	send_delete_vbdev("zone_dev1", true);
909 
910 	while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
911 	free(ch);
912 
913 	test_cleanup();
914 }
915 
916 static void
917 send_zone_management(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
918 		     uint32_t output_index, enum spdk_bdev_zone_action action, bool success)
919 {
920 	struct spdk_bdev_io *bdev_io;
921 
922 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct zone_block_io));
923 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
924 	bdev_io_zone_initialize(bdev_io, &bdev->bdev, zone_id, 1, action);
925 	memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
926 	g_io_output_index = output_index;
927 
928 	g_io_comp_status = !success;
929 	zone_block_submit_request(ch, bdev_io);
930 
931 	CU_ASSERT(g_io_comp_status == success);
932 	bdev_io_zone_cleanup(bdev_io);
933 }
934 
935 static void
936 send_reset_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
937 		uint32_t output_index, bool success)
938 {
939 	send_zone_management(bdev, ch, zone_id, output_index, SPDK_BDEV_ZONE_RESET, success);
940 }
941 
942 static void
943 send_open_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
944 	       uint32_t output_index, bool success)
945 {
946 	send_zone_management(bdev, ch, zone_id, output_index, SPDK_BDEV_ZONE_OPEN, success);
947 }
948 
949 static void
950 send_close_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
951 		uint32_t output_index, bool success)
952 {
953 	send_zone_management(bdev, ch, zone_id, output_index, SPDK_BDEV_ZONE_CLOSE, success);
954 }
955 
956 static void
957 send_finish_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id,
958 		 uint32_t output_index, bool success)
959 {
960 	send_zone_management(bdev, ch, zone_id, output_index, SPDK_BDEV_ZONE_FINISH, success);
961 }
962 
963 static void
964 test_reset_zone(void)
965 {
966 	struct spdk_io_channel *ch;
967 	struct bdev_zone_block *bdev;
968 	char *name = "Nvme0n1";
969 	uint32_t num_zones = 16;
970 	uint64_t zone_id;
971 	uint32_t output_index = 0;
972 
973 	init_test_globals(BLOCK_CNT);
974 	CU_ASSERT(zone_block_init() == 0);
975 
976 	/* Create zone dev */
977 	bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
978 
979 	ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
980 	SPDK_CU_ASSERT_FATAL(ch != NULL);
981 
982 	/* Send reset to zone 0 */
983 	zone_id = 0;
984 	send_reset_zone(bdev, ch, zone_id, output_index, true);
985 	send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
986 
987 	/* Send reset to last zone */
988 	zone_id = (num_zones - 1) * bdev->bdev.zone_size;
989 	send_reset_zone(bdev, ch, zone_id, output_index, true);
990 	send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
991 
992 	/* Send reset with misaligned LBA */
993 	zone_id = 1;
994 	send_reset_zone(bdev, ch, zone_id, output_index, false);
995 
996 	/* Send reset to non-existing zone */
997 	zone_id = num_zones * bdev->bdev.zone_size;
998 	send_reset_zone(bdev, ch, zone_id, output_index, false);
999 
1000 	/* Send reset to already reset zone */
1001 	zone_id = 0;
1002 	send_reset_zone(bdev, ch, zone_id, output_index, true);
1003 	send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
1004 
1005 	/* Delete zone dev */
1006 	send_delete_vbdev("zone_dev1", true);
1007 
1008 	while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
1009 	free(ch);
1010 
1011 	test_cleanup();
1012 }
1013 
1014 static void
1015 send_write_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t lba,
1016 		uint64_t blocks, uint32_t output_index, bool success)
1017 {
1018 	struct spdk_bdev_io *bdev_io;
1019 
1020 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct zone_block_io));
1021 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1022 	bdev_io_initialize(bdev_io, &bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_WRITE);
1023 	memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
1024 	g_io_output_index = output_index;
1025 
1026 	g_io_comp_status = !success;
1027 	zone_block_submit_request(ch, bdev_io);
1028 
1029 	CU_ASSERT(g_io_comp_status == success);
1030 	bdev_io_cleanup(bdev_io);
1031 }
1032 
1033 static void
1034 send_read_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t lba,
1035 	       uint64_t blocks, uint32_t output_index, bool success)
1036 {
1037 	struct spdk_bdev_io *bdev_io;
1038 
1039 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct zone_block_io));
1040 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1041 	bdev_io_initialize(bdev_io, &bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_READ);
1042 	memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
1043 	g_io_output_index = output_index;
1044 
1045 	g_io_comp_status = !success;
1046 	zone_block_submit_request(ch, bdev_io);
1047 
1048 	CU_ASSERT(g_io_comp_status == success);
1049 	bdev_io_cleanup(bdev_io);
1050 }
1051 
1052 static void
1053 send_append_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t lba,
1054 		 uint64_t blocks, uint32_t output_index, bool success, uint64_t wp)
1055 {
1056 	struct spdk_bdev_io *bdev_io;
1057 
1058 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct zone_block_io));
1059 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1060 	bdev_io_initialize(bdev_io, &bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_ZONE_APPEND);
1061 	memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output)));
1062 	g_io_output_index = output_index;
1063 
1064 	g_io_comp_status = !success;
1065 	zone_block_submit_request(ch, bdev_io);
1066 
1067 	CU_ASSERT(g_io_comp_status == success);
1068 	if (success) {
1069 		CU_ASSERT(bdev_io->u.bdev.offset_blocks == wp);
1070 	}
1071 	bdev_io_cleanup(bdev_io);
1072 }
1073 
1074 static void
1075 test_open_zone(void)
1076 {
1077 	struct spdk_io_channel *ch;
1078 	struct bdev_zone_block *bdev;
1079 	char *name = "Nvme0n1";
1080 	uint32_t num_zones = 16;
1081 	uint64_t zone_id;
1082 	uint32_t output_index = 0, i;
1083 
1084 	init_test_globals(BLOCK_CNT);
1085 	CU_ASSERT(zone_block_init() == 0);
1086 
1087 	/* Create zone dev */
1088 	bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
1089 
1090 	ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
1091 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1092 
1093 	/* Try to open full zone */
1094 	zone_id = 0;
1095 	send_open_zone(bdev, ch, zone_id, output_index, false);
1096 
1097 	/* Open all zones */
1098 	for (i = 0; i < num_zones; i++) {
1099 		zone_id = i * bdev->bdev.zone_size;
1100 		send_reset_zone(bdev, ch, zone_id, output_index, true);
1101 		send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
1102 	}
1103 	for (i = 0; i < num_zones; i++) {
1104 		zone_id = i * bdev->bdev.zone_size;
1105 		send_open_zone(bdev, ch, zone_id, output_index, true);
1106 		send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
1107 	}
1108 
1109 	/* Reset one of the zones and open it again */
1110 	zone_id = 0;
1111 	send_reset_zone(bdev, ch, zone_id, output_index, true);
1112 	send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
1113 	send_open_zone(bdev, ch, zone_id, output_index, true);
1114 	send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
1115 
1116 	/* Send open with misaligned LBA */
1117 	zone_id = 0;
1118 	send_reset_zone(bdev, ch, zone_id, output_index, true);
1119 	zone_id = 1;
1120 	send_open_zone(bdev, ch, zone_id, output_index, false);
1121 
1122 	/* Send open to non-existing zone */
1123 	zone_id = num_zones * bdev->bdev.zone_size;
1124 	send_open_zone(bdev, ch, zone_id, output_index, false);
1125 
1126 	/* Send open to already opened zone */
1127 	zone_id = bdev->bdev.zone_size;
1128 	send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
1129 	send_open_zone(bdev, ch, zone_id, output_index, true);
1130 	send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
1131 
1132 	/* Delete zone dev */
1133 	send_delete_vbdev("zone_dev1", true);
1134 
1135 	while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
1136 	free(ch);
1137 
1138 	test_cleanup();
1139 }
1140 
1141 static void
1142 test_zone_write(void)
1143 {
1144 	struct spdk_io_channel *ch;
1145 	struct bdev_zone_block *bdev;
1146 	char *name = "Nvme0n1";
1147 	uint32_t num_zones = 20;
1148 	uint64_t zone_id, lba, block_len;
1149 	uint32_t output_index = 0, i;
1150 
1151 	init_test_globals(20 * 1024ul);
1152 	CU_ASSERT(zone_block_init() == 0);
1153 
1154 	/* Create zone dev */
1155 	bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
1156 
1157 	ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
1158 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1159 
1160 	/* Write to full zone */
1161 	lba = 0;
1162 	send_write_zone(bdev, ch, lba, 1, output_index, false);
1163 
1164 	/* Write out of device range */
1165 	lba = g_block_cnt;
1166 	send_write_zone(bdev, ch, lba, 1, output_index, false);
1167 
1168 	/* Write 1 sector to zone 0 */
1169 	lba = 0;
1170 	send_reset_zone(bdev, ch, lba, output_index, true);
1171 	send_write_zone(bdev, ch, lba, 1, output_index, true);
1172 	send_zone_info(bdev, ch, lba, 1, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
1173 
1174 	/* Write to another zone */
1175 	lba = bdev->bdev.zone_size;
1176 	send_reset_zone(bdev, ch, lba, output_index, true);
1177 	send_write_zone(bdev, ch, lba, 5, output_index, true);
1178 	send_zone_info(bdev, ch, lba, lba + 5, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
1179 
1180 	/* Fill zone 0 and verify zone state change */
1181 	block_len = 15;
1182 	send_write_zone(bdev, ch, 1, block_len, output_index, true);
1183 	block_len = 16;
1184 	for (i = block_len; i < bdev->bdev.zone_size; i += block_len) {
1185 		send_write_zone(bdev, ch, i, block_len, output_index, true);
1186 	}
1187 	send_zone_info(bdev, ch, 0, bdev->bdev.zone_size, SPDK_BDEV_ZONE_STATE_FULL, output_index,
1188 		       true);
1189 
1190 	/* Write to wrong write pointer */
1191 	lba = bdev->bdev.zone_size;
1192 	send_write_zone(bdev, ch, lba + 7, 1, output_index, false);
1193 	/* Write to already written sectors */
1194 	send_write_zone(bdev, ch, lba, 1, output_index, false);
1195 
1196 	/* Write to two zones at once */
1197 	for (i = 0; i < num_zones; i++) {
1198 		zone_id = i * bdev->bdev.zone_size;
1199 		send_reset_zone(bdev, ch, zone_id, output_index, true);
1200 		send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
1201 	}
1202 	block_len = 16;
1203 	for (i = 0; i < bdev->bdev.zone_size - block_len; i += block_len) {
1204 		send_write_zone(bdev, ch, i, block_len, output_index, true);
1205 	}
1206 	send_write_zone(bdev, ch, bdev->bdev.zone_size - block_len, 32, output_index, false);
1207 
1208 	/* Delete zone dev */
1209 	send_delete_vbdev("zone_dev1", true);
1210 
1211 	while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
1212 	free(ch);
1213 
1214 	test_cleanup();
1215 }
1216 
1217 static void
1218 test_zone_read(void)
1219 {
1220 	struct spdk_io_channel *ch;
1221 	struct bdev_zone_block *bdev;
1222 	char *name = "Nvme0n1";
1223 	uint32_t num_zones = 20;
1224 	uint64_t lba, block_len;
1225 	uint32_t output_index = 0;
1226 
1227 	init_test_globals(20 * 1024ul);
1228 	CU_ASSERT(zone_block_init() == 0);
1229 
1230 	/* Create zone dev */
1231 	bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
1232 
1233 	ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
1234 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1235 
1236 	/* Read out of device range */
1237 	block_len = 16;
1238 	lba = g_block_cnt - block_len / 2;
1239 	send_read_zone(bdev, ch, lba, block_len, output_index, false);
1240 
1241 	block_len = 1;
1242 	lba = g_block_cnt;
1243 	send_read_zone(bdev, ch, lba, block_len, output_index, false);
1244 
1245 	/* Read from full zone */
1246 	lba = 0;
1247 	send_read_zone(bdev, ch, lba, 1, output_index, true);
1248 
1249 	/* Read from empty zone */
1250 	send_reset_zone(bdev, ch, lba, output_index, true);
1251 	send_read_zone(bdev, ch, lba, 1, output_index, true);
1252 
1253 	/* Read written sectors from open zone */
1254 	send_write_zone(bdev, ch, lba, 1, output_index, true);
1255 	send_read_zone(bdev, ch, lba, 1, output_index, true);
1256 
1257 	/* Read partially written sectors from open zone */
1258 	send_read_zone(bdev, ch, lba, 2, output_index, true);
1259 
1260 	/* Read unwritten sectors from open zone */
1261 	lba = 2;
1262 	send_read_zone(bdev, ch, lba, 1, output_index, true);
1263 
1264 	/* Read from two zones at once */
1265 	block_len = 16;
1266 	lba = bdev->bdev.zone_size - block_len / 2;
1267 	send_read_zone(bdev, ch, lba, block_len, output_index, false);
1268 
1269 	/* Delete zone dev */
1270 	send_delete_vbdev("zone_dev1", true);
1271 
1272 	while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
1273 	free(ch);
1274 	test_cleanup();
1275 }
1276 
1277 static void
1278 test_close_zone(void)
1279 {
1280 	struct spdk_io_channel *ch;
1281 	struct bdev_zone_block *bdev;
1282 	char *name = "Nvme0n1";
1283 	uint32_t num_zones = 20;
1284 	uint64_t zone_id;
1285 	uint32_t output_index = 0;
1286 
1287 	init_test_globals(20 * 1024ul);
1288 	CU_ASSERT(zone_block_init() == 0);
1289 
1290 	/* Create zone dev */
1291 	bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
1292 
1293 	ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
1294 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1295 
1296 	/* Try to close a full zone */
1297 	zone_id = 0;
1298 	send_close_zone(bdev, ch, zone_id, output_index, false);
1299 
1300 	/* Try to close an empty zone */
1301 	send_reset_zone(bdev, ch, zone_id, output_index, true);
1302 	send_close_zone(bdev, ch, zone_id, output_index, false);
1303 
1304 	/* Close an open zone */
1305 	send_open_zone(bdev, ch, zone_id, output_index, true);
1306 	send_close_zone(bdev, ch, zone_id, output_index, true);
1307 	send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_CLOSED, output_index, true);
1308 
1309 	/* Close a closed zone */
1310 	send_close_zone(bdev, ch, zone_id, output_index, true);
1311 	send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_CLOSED, output_index, true);
1312 
1313 	/* Send close to last zone */
1314 	zone_id = (num_zones - 1) * bdev->bdev.zone_size;
1315 	send_reset_zone(bdev, ch, zone_id, output_index, true);
1316 	send_open_zone(bdev, ch, zone_id, output_index, true);
1317 	send_close_zone(bdev, ch, zone_id, output_index, true);
1318 	send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_CLOSED, output_index, true);
1319 
1320 	/* Send close with misaligned LBA */
1321 	zone_id = 1;
1322 	send_close_zone(bdev, ch, zone_id, output_index, false);
1323 
1324 	/* Send close to non-existing zone */
1325 	zone_id = num_zones * bdev->bdev.zone_size;
1326 	send_close_zone(bdev, ch, zone_id, output_index, false);
1327 
1328 	/* Delete zone dev */
1329 	send_delete_vbdev("zone_dev1", true);
1330 
1331 	while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
1332 	free(ch);
1333 	test_cleanup();
1334 }
1335 
1336 static void
1337 test_finish_zone(void)
1338 {
1339 	struct spdk_io_channel *ch;
1340 	struct bdev_zone_block *bdev;
1341 	char *name = "Nvme0n1";
1342 	uint32_t num_zones = 20;
1343 	uint64_t zone_id, wp;
1344 	uint32_t output_index = 0;
1345 
1346 	init_test_globals(20 * 1024ul);
1347 	CU_ASSERT(zone_block_init() == 0);
1348 
1349 	/* Create zone dev */
1350 	bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
1351 
1352 	ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
1353 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1354 
1355 	/* Reset an unused zone */
1356 	send_reset_zone(bdev, ch, bdev->bdev.zone_size, output_index, true);
1357 
1358 	/* Finish a full zone */
1359 	zone_id = 0;
1360 	wp = bdev->bdev.zone_size;
1361 	send_finish_zone(bdev, ch, zone_id, output_index, true);
1362 	send_zone_info(bdev, ch, zone_id, wp, SPDK_BDEV_ZONE_STATE_FULL, output_index, true);
1363 
1364 	/* Finish an empty zone */
1365 	send_reset_zone(bdev, ch, zone_id, output_index, true);
1366 	send_finish_zone(bdev, ch, zone_id, output_index, true);
1367 	send_zone_info(bdev, ch, zone_id, wp, SPDK_BDEV_ZONE_STATE_FULL, output_index, true);
1368 
1369 	/* Finish an open zone */
1370 	send_reset_zone(bdev, ch, zone_id, output_index, true);
1371 	send_write_zone(bdev, ch, zone_id, 1, output_index, true);
1372 	send_finish_zone(bdev, ch, zone_id, output_index, true);
1373 	send_zone_info(bdev, ch, zone_id, wp, SPDK_BDEV_ZONE_STATE_FULL, output_index, true);
1374 
1375 	/* Send finish with misaligned LBA */
1376 	zone_id = 1;
1377 	send_finish_zone(bdev, ch, zone_id, output_index, false);
1378 
1379 	/* Send finish to non-existing zone */
1380 	zone_id = num_zones * bdev->bdev.zone_size;
1381 	send_finish_zone(bdev, ch, zone_id, output_index, false);
1382 
1383 	/* Make sure unused zone wasn't written to */
1384 	zone_id = bdev->bdev.zone_size;
1385 	send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
1386 
1387 	/* Delete zone dev */
1388 	send_delete_vbdev("zone_dev1", true);
1389 
1390 	while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
1391 	free(ch);
1392 
1393 	test_cleanup();
1394 }
1395 
1396 static void
1397 test_append_zone(void)
1398 {
1399 	struct spdk_io_channel *ch;
1400 	struct bdev_zone_block *bdev;
1401 	char *name = "Nvme0n1";
1402 	uint32_t num_zones = 20;
1403 	uint64_t zone_id, block_len, i;
1404 	uint32_t output_index = 0;
1405 
1406 	init_test_globals(20 * 1024ul);
1407 	CU_ASSERT(zone_block_init() == 0);
1408 
1409 	/* Create zone dev */
1410 	bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true);
1411 
1412 	ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel));
1413 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1414 
1415 	/* Append to full zone */
1416 	zone_id = 0;
1417 	send_append_zone(bdev, ch, zone_id, 1, output_index, false, 0);
1418 
1419 	/* Append out of device range */
1420 	zone_id = g_block_cnt;
1421 	send_append_zone(bdev, ch, zone_id, 1, output_index, false, 0);
1422 
1423 	/* Append 1 sector to zone 0 */
1424 	zone_id = 0;
1425 	send_reset_zone(bdev, ch, zone_id, output_index, true);
1426 	send_append_zone(bdev, ch, zone_id, 1, output_index, true, zone_id);
1427 	send_zone_info(bdev, ch, zone_id, 1, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
1428 
1429 	/* Append to another zone */
1430 	zone_id = bdev->bdev.zone_size;
1431 	send_reset_zone(bdev, ch, zone_id, output_index, true);
1432 	send_append_zone(bdev, ch, zone_id, 5, output_index, true, zone_id);
1433 	send_zone_info(bdev, ch, zone_id, zone_id + 5, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true);
1434 
1435 	/* Fill zone 0 and verify zone state change */
1436 	zone_id = 0;
1437 	block_len = 15;
1438 	send_append_zone(bdev, ch, zone_id, block_len, output_index, true, 1);
1439 	block_len++;
1440 	for (i = block_len; i < bdev->zone_capacity; i += block_len) {
1441 		send_append_zone(bdev, ch, zone_id, block_len, output_index, true, i);
1442 	}
1443 	send_zone_info(bdev, ch, zone_id, bdev->bdev.zone_size, SPDK_BDEV_ZONE_STATE_FULL, output_index,
1444 		       true);
1445 
1446 	/* Append to two zones at once */
1447 	for (i = 0; i < num_zones; i++) {
1448 		zone_id = i * bdev->bdev.zone_size;
1449 		send_reset_zone(bdev, ch, zone_id, output_index, true);
1450 		send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true);
1451 	}
1452 
1453 	zone_id = 0;
1454 	block_len = 16;
1455 	for (i = 0; i < bdev->zone_capacity - block_len; i += block_len) {
1456 		send_append_zone(bdev, ch, zone_id, block_len, output_index, true, zone_id + i);
1457 	}
1458 	send_append_zone(bdev, ch, zone_id, 32, output_index, false, 0);
1459 	/* Delete zone dev */
1460 	send_delete_vbdev("zone_dev1", true);
1461 
1462 	while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
1463 	free(ch);
1464 
1465 	test_cleanup();
1466 }
1467 
1468 int
1469 main(int argc, char **argv)
1470 {
1471 	CU_pSuite       suite = NULL;
1472 	unsigned int    num_failures;
1473 
1474 	CU_initialize_registry();
1475 
1476 	suite = CU_add_suite("zone_block", NULL, NULL);
1477 
1478 	CU_ADD_TEST(suite, test_zone_block_create);
1479 	CU_ADD_TEST(suite, test_zone_block_create_invalid);
1480 	CU_ADD_TEST(suite, test_get_zone_info);
1481 	CU_ADD_TEST(suite, test_supported_io_types);
1482 	CU_ADD_TEST(suite, test_reset_zone);
1483 	CU_ADD_TEST(suite, test_open_zone);
1484 	CU_ADD_TEST(suite, test_zone_write);
1485 	CU_ADD_TEST(suite, test_zone_read);
1486 	CU_ADD_TEST(suite, test_close_zone);
1487 	CU_ADD_TEST(suite, test_finish_zone);
1488 	CU_ADD_TEST(suite, test_append_zone);
1489 
1490 	g_thread = spdk_thread_create("test", NULL);
1491 	spdk_set_thread(g_thread);
1492 
1493 	set_test_opts();
1494 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1495 
1496 	spdk_thread_exit(g_thread);
1497 	while (!spdk_thread_is_exited(g_thread)) {
1498 		spdk_thread_poll(g_thread, 0, 0);
1499 	}
1500 	spdk_thread_destroy(g_thread);
1501 
1502 	CU_cleanup_registry();
1503 
1504 	return num_failures;
1505 }
1506