xref: /spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c (revision 26cac6bf15cd3e90bf6221c5c53672ec2befca30)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "common/lib/ut_multithread.c"
9 #include "spdk_internal/cunit.h"
10 #include "spdk/nvmf.h"
11 #include "spdk_internal/mock.h"
12 
13 #include "spdk/bdev_module.h"
14 #include "nvmf/subsystem.c"
15 #include "nvmf/transport.c"
16 
17 SPDK_LOG_REGISTER_COMPONENT(nvmf)
18 
19 DEFINE_STUB(spdk_bdev_module_claim_bdev,
20 	    int,
21 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
22 	     struct spdk_bdev_module *module), 0);
23 
24 DEFINE_STUB_V(spdk_bdev_module_release_bdev,
25 	      (struct spdk_bdev *bdev));
26 
27 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t,
28 	    (const struct spdk_bdev *bdev), 512);
29 
30 DEFINE_STUB(spdk_bdev_get_md_size, uint32_t,
31 	    (const struct spdk_bdev *bdev), 0);
32 
33 DEFINE_STUB(spdk_bdev_is_md_interleaved, bool,
34 	    (const struct spdk_bdev *bdev), false);
35 
36 DEFINE_STUB(spdk_bdev_io_type_supported, bool,
37 	    (struct spdk_bdev *bdev,
38 	     enum spdk_bdev_io_type io_type), false);
39 
40 DEFINE_STUB_V(nvmf_update_discovery_log,
41 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn));
42 
43 DEFINE_STUB(spdk_nvmf_qpair_disconnect,
44 	    int,
45 	    (struct spdk_nvmf_qpair *qpair,
46 	     nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0);
47 
48 DEFINE_STUB(spdk_nvmf_request_complete,
49 	    int,
50 	    (struct spdk_nvmf_request *req), 0);
51 
52 DEFINE_STUB(nvmf_ctrlr_async_event_ana_change_notice,
53 	    int,
54 	    (struct spdk_nvmf_ctrlr *ctrlr), 0);
55 
56 DEFINE_STUB(spdk_nvme_transport_id_trtype_str,
57 	    const char *,
58 	    (enum spdk_nvme_transport_type trtype), NULL);
59 
60 DEFINE_STUB(spdk_bdev_is_zoned, bool,
61 	    (const struct spdk_bdev *bdev), false);
62 
63 DEFINE_STUB(spdk_bdev_get_max_zone_append_size, uint32_t,
64 	    (const struct spdk_bdev *bdev), 0);
65 
66 DEFINE_STUB(spdk_mempool_lookup, struct spdk_mempool *,
67 	    (const char *name), NULL);
68 
69 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *,
70 	    (enum spdk_nvmf_adrfam adrfam), NULL);
71 
72 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int,
73 	    (struct spdk_nvmf_qpair *qpair,
74 	     struct spdk_nvme_transport_id *trid), 0);
75 
76 static struct spdk_nvmf_transport g_transport = {};
77 
78 struct spdk_nvmf_subsystem *
79 spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
80 {
81 	return NULL;
82 }
83 
84 struct spdk_nvmf_transport *
85 spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name)
86 {
87 	if (strncmp(transport_name, SPDK_NVME_TRANSPORT_NAME_RDMA, SPDK_NVMF_TRSTRING_MAX_LEN)) {
88 		return &g_transport;
89 	}
90 
91 	return NULL;
92 }
93 
94 int
95 nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
96 				 struct spdk_nvmf_subsystem *subsystem)
97 {
98 	return 0;
99 }
100 
101 int
102 nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
103 			      struct spdk_nvmf_subsystem *subsystem,
104 			      spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
105 {
106 	return 0;
107 }
108 
109 void
110 nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
111 				 struct spdk_nvmf_subsystem *subsystem,
112 				 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
113 {
114 }
115 
116 void
117 nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
118 				struct spdk_nvmf_subsystem *subsystem,
119 				uint32_t nsid,
120 				spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
121 {
122 }
123 
124 void
125 nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
126 				 struct spdk_nvmf_subsystem *subsystem,
127 				 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
128 {
129 }
130 
131 int
132 spdk_nvme_transport_id_parse_trtype(enum spdk_nvme_transport_type *trtype, const char *str)
133 {
134 	if (trtype == NULL || str == NULL) {
135 		return -EINVAL;
136 	}
137 
138 	if (strcasecmp(str, "PCIe") == 0) {
139 		*trtype = SPDK_NVME_TRANSPORT_PCIE;
140 	} else if (strcasecmp(str, "RDMA") == 0) {
141 		*trtype = SPDK_NVME_TRANSPORT_RDMA;
142 	} else {
143 		return -ENOENT;
144 	}
145 	return 0;
146 }
147 
148 int
149 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
150 			       const struct spdk_nvme_transport_id *trid2)
151 {
152 	return 0;
153 }
154 
155 int32_t
156 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
157 {
158 	return -1;
159 }
160 
161 int32_t
162 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
163 {
164 	return -1;
165 }
166 
167 int
168 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
169 {
170 	return -1;
171 }
172 
173 void
174 nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
175 {
176 }
177 
178 static struct spdk_nvmf_ctrlr *g_ns_changed_ctrlr = NULL;
179 static uint32_t g_ns_changed_nsid = 0;
180 void
181 nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
182 {
183 	g_ns_changed_ctrlr = ctrlr;
184 	g_ns_changed_nsid = nsid;
185 }
186 
187 static struct spdk_bdev g_bdevs[] = {
188 	{ .name = "bdev1" },
189 	{ .name = "bdev2" },
190 };
191 
192 struct spdk_bdev_desc {
193 	struct spdk_bdev	*bdev;
194 };
195 
196 int
197 spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
198 		   void *event_ctx, struct spdk_bdev_desc **_desc)
199 {
200 	struct spdk_bdev_desc *desc;
201 	size_t i;
202 
203 	for (i = 0; i < sizeof(g_bdevs); i++) {
204 		if (strcmp(bdev_name, g_bdevs[i].name) == 0) {
205 
206 			desc = calloc(1, sizeof(*desc));
207 			SPDK_CU_ASSERT_FATAL(desc != NULL);
208 
209 			desc->bdev = &g_bdevs[i];
210 			*_desc = desc;
211 			return 0;
212 		}
213 	}
214 
215 	return -EINVAL;
216 }
217 
218 void
219 spdk_bdev_close(struct spdk_bdev_desc *desc)
220 {
221 	free(desc);
222 }
223 
224 struct spdk_bdev *
225 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
226 {
227 	return desc->bdev;
228 }
229 
230 const char *
231 spdk_bdev_get_name(const struct spdk_bdev *bdev)
232 {
233 	return "test";
234 }
235 
236 const struct spdk_uuid *
237 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
238 {
239 	return &bdev->uuid;
240 }
241 
242 static void
243 test_spdk_nvmf_subsystem_add_ns(void)
244 {
245 	struct spdk_nvmf_tgt tgt = {};
246 	struct spdk_nvmf_subsystem subsystem = {
247 		.max_nsid = 1024,
248 		.ns = NULL,
249 		.tgt = &tgt,
250 	};
251 	struct spdk_nvmf_ns_opts ns_opts;
252 	uint32_t nsid;
253 	int rc;
254 
255 	subsystem.ns = calloc(subsystem.max_nsid, sizeof(struct spdk_nvmf_subsystem_ns *));
256 	SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL);
257 	subsystem.ana_group = calloc(subsystem.max_nsid, sizeof(uint32_t));
258 	SPDK_CU_ASSERT_FATAL(subsystem.ana_group != NULL);
259 
260 	tgt.max_subsystems = 1024;
261 	RB_INIT(&tgt.subsystems);
262 
263 	/* Request a specific NSID */
264 	spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
265 	ns_opts.nsid = 5;
266 	nsid = spdk_nvmf_subsystem_add_ns_ext(&subsystem, "bdev2", &ns_opts, sizeof(ns_opts), NULL);
267 	CU_ASSERT(nsid == 5);
268 	CU_ASSERT(subsystem.max_nsid == 1024);
269 	SPDK_CU_ASSERT_FATAL(subsystem.ns[nsid - 1] != NULL);
270 	CU_ASSERT(subsystem.ns[nsid - 1]->bdev == &g_bdevs[1]);
271 
272 	/* Request an NSID that is already in use */
273 	spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
274 	ns_opts.nsid = 5;
275 	nsid = spdk_nvmf_subsystem_add_ns_ext(&subsystem, "bdev2", &ns_opts, sizeof(ns_opts), NULL);
276 	CU_ASSERT(nsid == 0);
277 	CU_ASSERT(subsystem.max_nsid == 1024);
278 
279 	/* Request 0xFFFFFFFF (invalid NSID, reserved for broadcast) */
280 	spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
281 	ns_opts.nsid = 0xFFFFFFFF;
282 	nsid = spdk_nvmf_subsystem_add_ns_ext(&subsystem, "bdev2", &ns_opts, sizeof(ns_opts), NULL);
283 	CU_ASSERT(nsid == 0);
284 	CU_ASSERT(subsystem.max_nsid == 1024);
285 
286 	rc = spdk_nvmf_subsystem_remove_ns(&subsystem, 5);
287 	CU_ASSERT(rc == 0);
288 
289 	free(subsystem.ns);
290 	free(subsystem.ana_group);
291 }
292 
293 static void
294 nvmf_test_create_subsystem(void)
295 {
296 	struct spdk_nvmf_tgt tgt = {};
297 	char nqn[256];
298 	struct spdk_nvmf_subsystem *subsystem;
299 	int rc;
300 
301 	tgt.max_subsystems = 1024;
302 	tgt.subsystem_ids = spdk_bit_array_create(tgt.max_subsystems);
303 	RB_INIT(&tgt.subsystems);
304 
305 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
306 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
307 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
308 	CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
309 	rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
310 	CU_ASSERT(rc == 0);
311 
312 	/* valid name with complex reverse domain */
313 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk-full--rev-domain.name:subsystem1");
314 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
315 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
316 	CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
317 	rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
318 	CU_ASSERT(rc == 0);
319 
320 	/* Valid name discovery controller */
321 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
322 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
323 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
324 	CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
325 	rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
326 	CU_ASSERT(rc == 0);
327 
328 	/* Invalid name, no user supplied string */
329 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
330 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
331 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
332 
333 	/* Valid name, only contains top-level domain name */
334 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
335 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
336 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
337 	CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
338 	rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
339 	CU_ASSERT(rc == 0);
340 
341 	/* Invalid name, domain label > 63 characters */
342 	snprintf(nqn, sizeof(nqn),
343 		 "nqn.2016-06.io.abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz:sub");
344 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
345 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
346 
347 	/* Invalid name, domain label starts with digit */
348 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.3spdk:sub");
349 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
350 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
351 
352 	/* Invalid name, domain label starts with - */
353 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.-spdk:subsystem1");
354 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
355 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
356 
357 	/* Invalid name, domain label ends with - */
358 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk-:subsystem1");
359 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
360 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
361 
362 	/* Invalid name, domain label with multiple consecutive periods */
363 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io..spdk:subsystem1");
364 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
365 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
366 
367 	/* Longest valid name */
368 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
369 	memset(nqn + strlen(nqn), 'a', 223 - strlen(nqn));
370 	nqn[223] = '\0';
371 	CU_ASSERT(strlen(nqn) == 223);
372 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
373 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
374 	CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
375 	rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
376 	CU_ASSERT(rc == 0);
377 
378 	/* Invalid name, too long */
379 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
380 	memset(nqn + strlen(nqn), 'a', 224 - strlen(nqn));
381 	nqn[224] = '\0';
382 	CU_ASSERT(strlen(nqn) == 224);
383 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
384 	CU_ASSERT(subsystem == NULL);
385 
386 	/* Valid name using uuid format */
387 	snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:ff9b6406-0fc8-4779-80ca-4dca14bda0d2");
388 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
389 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
390 	CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
391 	rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
392 	CU_ASSERT(rc == 0);
393 
394 	/* Invalid name user string contains an invalid utf-8 character */
395 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:\xFFsubsystem1");
396 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
397 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
398 
399 	/* Valid name with non-ascii but valid utf-8 characters */
400 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:\xe1\x8a\x88subsystem1\xca\x80");
401 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
402 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
403 	CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
404 	rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
405 	CU_ASSERT(rc == 0);
406 
407 	/* Invalid uuid (too long) */
408 	snprintf(nqn, sizeof(nqn),
409 		 "nqn.2014-08.org.nvmexpress:uuid:ff9b6406-0fc8-4779-80ca-4dca14bda0d2aaaa");
410 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
411 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
412 
413 	/* Invalid uuid (dashes placed incorrectly) */
414 	snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:ff9b64-060fc8-4779-80ca-4dca14bda0d2");
415 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
416 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
417 
418 	/* Invalid uuid (invalid characters in uuid) */
419 	snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:ff9hg406-0fc8-4779-80ca-4dca14bda0d2");
420 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
421 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
422 
423 	spdk_bit_array_free(&tgt.subsystem_ids);
424 }
425 
426 static void
427 test_spdk_nvmf_subsystem_set_sn(void)
428 {
429 	struct spdk_nvmf_subsystem subsystem = {};
430 
431 	/* Basic valid serial number */
432 	CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "abcd xyz") == 0);
433 	CU_ASSERT(strcmp(subsystem.sn, "abcd xyz") == 0);
434 
435 	/* Exactly 20 characters (valid) */
436 	CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "12345678901234567890") == 0);
437 	CU_ASSERT(strcmp(subsystem.sn, "12345678901234567890") == 0);
438 
439 	/* 21 characters (too long, invalid) */
440 	CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "123456789012345678901") < 0);
441 
442 	/* Non-ASCII characters (invalid) */
443 	CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "abcd\txyz") < 0);
444 }
445 
446 /*
447  * Reservation Unit Test Configuration
448  *       --------             --------    --------
449  *      | Host A |           | Host B |  | Host C |
450  *       --------             --------    --------
451  *      /        \               |           |
452  *  --------   --------       -------     -------
453  * |Ctrlr1_A| |Ctrlr2_A|     |Ctrlr_B|   |Ctrlr_C|
454  *  --------   --------       -------     -------
455  *    \           \              /           /
456  *     \           \            /           /
457  *      \           \          /           /
458  *      --------------------------------------
459  *     |            NAMESPACE 1               |
460  *      --------------------------------------
461  */
462 static struct spdk_nvmf_subsystem g_subsystem;
463 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
464 static struct spdk_nvmf_ns g_ns;
465 static struct spdk_bdev g_bdev;
466 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
467 
468 void
469 nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr)
470 {
471 }
472 
473 static void
474 ut_reservation_init(void)
475 {
476 
477 	TAILQ_INIT(&g_subsystem.ctrlrs);
478 
479 	memset(&g_ns, 0, sizeof(g_ns));
480 	TAILQ_INIT(&g_ns.registrants);
481 	g_ns.subsystem = &g_subsystem;
482 	g_ns.ptpl_file = NULL;
483 	g_ns.ptpl_activated = false;
484 	spdk_uuid_generate(&g_bdev.uuid);
485 	g_ns.bdev = &g_bdev;
486 
487 	/* Host A has two controllers */
488 	spdk_uuid_generate(&g_ctrlr1_A.hostid);
489 	TAILQ_INIT(&g_ctrlr1_A.log_head);
490 	g_ctrlr1_A.subsys = &g_subsystem;
491 	g_ctrlr1_A.num_avail_log_pages = 0;
492 	TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr1_A, link);
493 	spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
494 	TAILQ_INIT(&g_ctrlr2_A.log_head);
495 	g_ctrlr2_A.subsys = &g_subsystem;
496 	g_ctrlr2_A.num_avail_log_pages = 0;
497 	TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr2_A, link);
498 
499 	/* Host B has 1 controller */
500 	spdk_uuid_generate(&g_ctrlr_B.hostid);
501 	TAILQ_INIT(&g_ctrlr_B.log_head);
502 	g_ctrlr_B.subsys = &g_subsystem;
503 	g_ctrlr_B.num_avail_log_pages = 0;
504 	TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr_B, link);
505 
506 	/* Host C has 1 controller */
507 	spdk_uuid_generate(&g_ctrlr_C.hostid);
508 	TAILQ_INIT(&g_ctrlr_C.log_head);
509 	g_ctrlr_C.subsys = &g_subsystem;
510 	g_ctrlr_C.num_avail_log_pages = 0;
511 	TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr_C, link);
512 }
513 
514 static void
515 ut_reservation_deinit(void)
516 {
517 	struct spdk_nvmf_registrant *reg, *tmp;
518 	struct spdk_nvmf_reservation_log *log, *log_tmp;
519 	struct spdk_nvmf_ctrlr *ctrlr, *ctrlr_tmp;
520 
521 	TAILQ_FOREACH_SAFE(reg, &g_ns.registrants, link, tmp) {
522 		TAILQ_REMOVE(&g_ns.registrants, reg, link);
523 		free(reg);
524 	}
525 	TAILQ_FOREACH_SAFE(log, &g_ctrlr1_A.log_head, link, log_tmp) {
526 		TAILQ_REMOVE(&g_ctrlr1_A.log_head, log, link);
527 		free(log);
528 	}
529 	g_ctrlr1_A.num_avail_log_pages = 0;
530 	TAILQ_FOREACH_SAFE(log, &g_ctrlr2_A.log_head, link, log_tmp) {
531 		TAILQ_REMOVE(&g_ctrlr2_A.log_head, log, link);
532 		free(log);
533 	}
534 	g_ctrlr2_A.num_avail_log_pages = 0;
535 	TAILQ_FOREACH_SAFE(log, &g_ctrlr_B.log_head, link, log_tmp) {
536 		TAILQ_REMOVE(&g_ctrlr_B.log_head, log, link);
537 		free(log);
538 	}
539 	g_ctrlr_B.num_avail_log_pages = 0;
540 	TAILQ_FOREACH_SAFE(log, &g_ctrlr_C.log_head, link, log_tmp) {
541 		TAILQ_REMOVE(&g_ctrlr_C.log_head, log, link);
542 		free(log);
543 	}
544 	g_ctrlr_C.num_avail_log_pages = 0;
545 
546 	TAILQ_FOREACH_SAFE(ctrlr, &g_subsystem.ctrlrs, link, ctrlr_tmp) {
547 		TAILQ_REMOVE(&g_subsystem.ctrlrs, ctrlr, link);
548 	}
549 }
550 
551 static struct spdk_nvmf_request *
552 ut_reservation_build_req(uint32_t length)
553 {
554 	struct spdk_nvmf_request *req;
555 
556 	req = calloc(1, sizeof(*req));
557 	assert(req != NULL);
558 
559 	spdk_iov_one(req->iov, &req->iovcnt, calloc(1, length), length);
560 	assert(req->iov[0].iov_base != NULL);
561 	req->data = req->iov[0].iov_base;
562 	req->length = length;
563 
564 	req->cmd = (union nvmf_h2c_msg *)calloc(1, sizeof(union nvmf_h2c_msg));
565 	assert(req->cmd != NULL);
566 
567 	req->rsp = (union nvmf_c2h_msg *)calloc(1, sizeof(union nvmf_c2h_msg));
568 	assert(req->rsp != NULL);
569 
570 	return req;
571 }
572 
573 static void
574 ut_reservation_free_req(struct spdk_nvmf_request *req)
575 {
576 	free(req->cmd);
577 	free(req->rsp);
578 	free(req->iov[0].iov_base);
579 	free(req);
580 }
581 
582 static void
583 ut_reservation_build_register_request(struct spdk_nvmf_request *req,
584 				      uint8_t rrega, uint8_t iekey,
585 				      uint8_t cptpl, uint64_t crkey,
586 				      uint64_t nrkey)
587 {
588 	struct spdk_nvme_reservation_register_data key;
589 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
590 
591 	key.crkey = crkey;
592 	key.nrkey = nrkey;
593 	cmd->cdw10 = 0;
594 	cmd->cdw10_bits.resv_register.rrega = rrega;
595 	cmd->cdw10_bits.resv_register.iekey = iekey;
596 	cmd->cdw10_bits.resv_register.cptpl = cptpl;
597 	memcpy(req->iov[0].iov_base, &key, sizeof(key));
598 }
599 
600 static void
601 ut_reservation_build_acquire_request(struct spdk_nvmf_request *req,
602 				     uint8_t racqa, uint8_t iekey,
603 				     uint8_t rtype, uint64_t crkey,
604 				     uint64_t prkey)
605 {
606 	struct spdk_nvme_reservation_acquire_data key;
607 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
608 
609 	key.crkey = crkey;
610 	key.prkey = prkey;
611 	cmd->cdw10 = 0;
612 	cmd->cdw10_bits.resv_acquire.racqa = racqa;
613 	cmd->cdw10_bits.resv_acquire.iekey = iekey;
614 	cmd->cdw10_bits.resv_acquire.rtype = rtype;
615 	memcpy(req->iov[0].iov_base, &key, sizeof(key));
616 }
617 
618 static void
619 ut_reservation_build_release_request(struct spdk_nvmf_request *req,
620 				     uint8_t rrela, uint8_t iekey,
621 				     uint8_t rtype, uint64_t crkey)
622 {
623 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
624 
625 	cmd->cdw10 = 0;
626 	cmd->cdw10_bits.resv_release.rrela = rrela;
627 	cmd->cdw10_bits.resv_release.iekey = iekey;
628 	cmd->cdw10_bits.resv_release.rtype = rtype;
629 	memcpy(req->iov[0].iov_base, &crkey, sizeof(crkey));
630 }
631 
632 /*
633  * Construct four registrants for other test cases.
634  *
635  * g_ctrlr1_A register with key 0xa1.
636  * g_ctrlr2_A register with key 0xa1.
637  * g_ctrlr_B register with key 0xb1.
638  * g_ctrlr_C register with key 0xc1.
639  * */
640 static void
641 ut_reservation_build_registrants(void)
642 {
643 	struct spdk_nvmf_request *req;
644 	struct spdk_nvme_cpl *rsp;
645 	struct spdk_nvmf_registrant *reg;
646 	uint32_t gen;
647 
648 	req = ut_reservation_build_req(16);
649 	rsp = &req->rsp->nvme_cpl;
650 	SPDK_CU_ASSERT_FATAL(req != NULL);
651 	gen = g_ns.gen;
652 
653 	/* TEST CASE: g_ctrlr1_A register with a new key */
654 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
655 					      0, 0, 0, 0xa1);
656 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
657 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
658 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
659 	SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa1);
660 	SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 1);
661 
662 	/* TEST CASE: g_ctrlr2_A register with a new key, because it has same
663 	 * Host Identifier with g_ctrlr1_A, so the register key should same.
664 	 */
665 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
666 					      0, 0, 0, 0xa2);
667 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr2_A, req);
668 	/* Reservation conflict for other key than 0xa1 */
669 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
670 
671 	/* g_ctrlr_B register with a new key */
672 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
673 					      0, 0, 0, 0xb1);
674 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
675 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
676 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
677 	SPDK_CU_ASSERT_FATAL(reg->rkey == 0xb1);
678 	SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 2);
679 
680 	/* g_ctrlr_C register with a new key */
681 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
682 					      0, 0, 0, 0xc1);
683 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr_C, req);
684 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
685 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
686 	SPDK_CU_ASSERT_FATAL(reg->rkey == 0xc1);
687 	SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 3);
688 
689 	ut_reservation_free_req(req);
690 }
691 
692 static void
693 test_reservation_register(void)
694 {
695 	struct spdk_nvmf_request *req;
696 	struct spdk_nvme_cpl *rsp;
697 	struct spdk_nvmf_registrant *reg;
698 	uint32_t gen;
699 
700 	ut_reservation_init();
701 
702 	req = ut_reservation_build_req(16);
703 	rsp = &req->rsp->nvme_cpl;
704 	SPDK_CU_ASSERT_FATAL(req != NULL);
705 
706 	ut_reservation_build_registrants();
707 
708 	/* TEST CASE: Replace g_ctrlr1_A with a new key */
709 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REPLACE_KEY,
710 					      0, 0, 0xa1, 0xa11);
711 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
712 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
713 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
714 	SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa11);
715 
716 	/* TEST CASE: Host A with g_ctrlr1_A get reservation with
717 	 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE
718 	 */
719 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
720 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xa11, 0x0);
721 	gen = g_ns.gen;
722 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
723 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
724 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
725 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
726 	SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0xa11);
727 	SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
728 	SPDK_CU_ASSERT_FATAL(g_ns.gen == gen);
729 
730 	/* TEST CASE: g_ctrlr_C unregister with IEKEY enabled */
731 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
732 					      1, 0, 0, 0);
733 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr_C, req);
734 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
735 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
736 	SPDK_CU_ASSERT_FATAL(reg == NULL);
737 
738 	/* TEST CASE: g_ctrlr_B unregister with correct key */
739 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
740 					      0, 0, 0xb1, 0);
741 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
742 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
743 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
744 	SPDK_CU_ASSERT_FATAL(reg == NULL);
745 
746 	/* TEST CASE: No registrant now, g_ctrlr_B replace new key with IEKEY disabled */
747 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REPLACE_KEY,
748 					      0, 0, 0, 0xb1);
749 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
750 	SPDK_CU_ASSERT_FATAL(rsp->status.sc != SPDK_NVME_SC_SUCCESS);
751 
752 	/* TEST CASE: No registrant now, g_ctrlr_B replace new key with IEKEY enabled */
753 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REPLACE_KEY,
754 					      1, 0, 0, 0xb1);
755 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
756 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
757 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
758 	SPDK_CU_ASSERT_FATAL(reg != NULL);
759 
760 	/* TEST CASE: g_ctrlr_B replace new key with IEKEY enabled and wrong crkey  */
761 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REPLACE_KEY,
762 					      1, 0, 0xff, 0xb2);
763 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
764 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
765 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
766 	SPDK_CU_ASSERT_FATAL(reg != NULL);
767 	SPDK_CU_ASSERT_FATAL(reg->rkey == 0xb2);
768 
769 	/* TEST CASE: g_ctrlr1_A unregister with correct key,
770 	 * reservation should be removed as well.
771 	 */
772 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
773 					      0, 0, 0xa11, 0);
774 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
775 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
776 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
777 	SPDK_CU_ASSERT_FATAL(reg == NULL);
778 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
779 	SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0);
780 	SPDK_CU_ASSERT_FATAL(g_ns.holder == NULL);
781 
782 	ut_reservation_free_req(req);
783 	ut_reservation_deinit();
784 }
785 
786 static void
787 test_reservation_register_with_ptpl(void)
788 {
789 	struct spdk_nvmf_request *req;
790 	struct spdk_nvme_cpl *rsp;
791 	struct spdk_nvmf_registrant *reg;
792 	bool update_sgroup = false;
793 	int rc;
794 	struct spdk_nvmf_reservation_info info;
795 
796 	ut_reservation_init();
797 
798 	req = ut_reservation_build_req(16);
799 	rsp = &req->rsp->nvme_cpl;
800 	SPDK_CU_ASSERT_FATAL(req != NULL);
801 
802 	/* TEST CASE: No persistent file, register with PTPL enabled will fail */
803 	g_ns.ptpl_file = NULL;
804 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
805 					      SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1);
806 	update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
807 	SPDK_CU_ASSERT_FATAL(update_sgroup == false);
808 	SPDK_CU_ASSERT_FATAL(rsp->status.sc != SPDK_NVME_SC_SUCCESS);
809 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
810 	SPDK_CU_ASSERT_FATAL(reg == NULL);
811 
812 	/* TEST CASE: Enable PTPL */
813 	g_ns.ptpl_file = "/tmp/Ns1PR.cfg";
814 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
815 					      SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1);
816 	update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
817 	SPDK_CU_ASSERT_FATAL(update_sgroup == true);
818 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
819 	SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == true);
820 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
821 	SPDK_CU_ASSERT_FATAL(reg != NULL);
822 	SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, &reg->hostid));
823 	/* Load reservation information from configuration file */
824 	memset(&info, 0, sizeof(info));
825 	rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
826 	SPDK_CU_ASSERT_FATAL(rc == 0);
827 	SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
828 
829 	/* TEST CASE: Disable PTPL */
830 	rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
831 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
832 					      SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON, 0, 0xa1);
833 	update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
834 	SPDK_CU_ASSERT_FATAL(update_sgroup == true);
835 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
836 	SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == false);
837 	rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
838 	SPDK_CU_ASSERT_FATAL(rc < 0);
839 	unlink(g_ns.ptpl_file);
840 
841 	ut_reservation_free_req(req);
842 	ut_reservation_deinit();
843 }
844 
845 static void
846 test_reservation_acquire_preempt_1(void)
847 {
848 	struct spdk_nvmf_request *req;
849 	struct spdk_nvme_cpl *rsp;
850 	struct spdk_nvmf_registrant *reg;
851 	uint32_t gen;
852 
853 	ut_reservation_init();
854 
855 	req = ut_reservation_build_req(16);
856 	rsp = &req->rsp->nvme_cpl;
857 	SPDK_CU_ASSERT_FATAL(req != NULL);
858 
859 	ut_reservation_build_registrants();
860 
861 	gen = g_ns.gen;
862 	/* ACQUIRE: Host A with g_ctrlr1_A acquire reservation with
863 	 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE.
864 	 */
865 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
866 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1, 0x0);
867 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
868 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
869 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
870 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
871 	SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0xa1);
872 	SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
873 	SPDK_CU_ASSERT_FATAL(g_ns.gen == gen);
874 
875 	/* TEST CASE: g_ctrlr1_A holds the reservation, g_ctrlr_B preempt g_ctrl1_A,
876 	 * g_ctrl1_A registrant is unregistered.
877 	 */
878 	gen = g_ns.gen;
879 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0,
880 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xb1, 0xa1);
881 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
882 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
883 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
884 	SPDK_CU_ASSERT_FATAL(reg == NULL);
885 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
886 	SPDK_CU_ASSERT_FATAL(reg != NULL);
887 	SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
888 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
889 	SPDK_CU_ASSERT_FATAL(reg != NULL);
890 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
891 	SPDK_CU_ASSERT_FATAL(g_ns.gen > gen);
892 
893 	/* TEST CASE: g_ctrlr_B holds the reservation, g_ctrlr_C preempt g_ctrlr_B
894 	 * with valid key and PRKEY set to 0, all registrants other the host that issued
895 	 * the command are unregistered.
896 	 */
897 	gen = g_ns.gen;
898 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0,
899 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xc1, 0x0);
900 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_C, req);
901 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
902 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr2_A.hostid);
903 	SPDK_CU_ASSERT_FATAL(reg == NULL);
904 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
905 	SPDK_CU_ASSERT_FATAL(reg == NULL);
906 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
907 	SPDK_CU_ASSERT_FATAL(reg != NULL);
908 	SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
909 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
910 	SPDK_CU_ASSERT_FATAL(g_ns.gen > gen);
911 
912 	ut_reservation_free_req(req);
913 	ut_reservation_deinit();
914 }
915 
916 static void
917 test_reservation_acquire_release_with_ptpl(void)
918 {
919 	struct spdk_nvmf_request *req;
920 	struct spdk_nvme_cpl *rsp;
921 	struct spdk_nvmf_registrant *reg;
922 	bool update_sgroup = false;
923 	struct spdk_uuid holder_uuid;
924 	int rc;
925 	struct spdk_nvmf_reservation_info info;
926 
927 	ut_reservation_init();
928 
929 	req = ut_reservation_build_req(16);
930 	rsp = &req->rsp->nvme_cpl;
931 	SPDK_CU_ASSERT_FATAL(req != NULL);
932 
933 	/* TEST CASE: Enable PTPL */
934 	g_ns.ptpl_file = "/tmp/Ns1PR.cfg";
935 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
936 					      SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1);
937 	update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
938 	SPDK_CU_ASSERT_FATAL(update_sgroup == true);
939 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
940 	SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == true);
941 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
942 	SPDK_CU_ASSERT_FATAL(reg != NULL);
943 	SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, &reg->hostid));
944 	/* Load reservation information from configuration file */
945 	memset(&info, 0, sizeof(info));
946 	rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
947 	SPDK_CU_ASSERT_FATAL(rc == 0);
948 	SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
949 
950 	/* TEST CASE: Acquire the reservation */
951 	rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
952 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
953 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1, 0x0);
954 	update_sgroup = nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
955 	SPDK_CU_ASSERT_FATAL(update_sgroup == true);
956 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
957 	memset(&info, 0, sizeof(info));
958 	rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
959 	SPDK_CU_ASSERT_FATAL(rc == 0);
960 	SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
961 	SPDK_CU_ASSERT_FATAL(info.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
962 	SPDK_CU_ASSERT_FATAL(info.crkey == 0xa1);
963 	spdk_uuid_parse(&holder_uuid, info.holder_uuid);
964 	SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, &holder_uuid));
965 
966 	/* TEST CASE: Release the reservation */
967 	rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
968 	ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
969 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1);
970 	update_sgroup = nvmf_ns_reservation_release(&g_ns, &g_ctrlr1_A, req);
971 	SPDK_CU_ASSERT_FATAL(update_sgroup == true);
972 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
973 	memset(&info, 0, sizeof(info));
974 	rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
975 	SPDK_CU_ASSERT_FATAL(rc == 0);
976 	SPDK_CU_ASSERT_FATAL(info.rtype == 0);
977 	SPDK_CU_ASSERT_FATAL(info.crkey == 0);
978 	SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
979 	unlink(g_ns.ptpl_file);
980 
981 	ut_reservation_free_req(req);
982 	ut_reservation_deinit();
983 }
984 
985 static void
986 test_reservation_release(void)
987 {
988 	struct spdk_nvmf_request *req;
989 	struct spdk_nvme_cpl *rsp;
990 	struct spdk_nvmf_registrant *reg;
991 
992 	ut_reservation_init();
993 
994 	req = ut_reservation_build_req(16);
995 	rsp = &req->rsp->nvme_cpl;
996 	SPDK_CU_ASSERT_FATAL(req != NULL);
997 
998 	ut_reservation_build_registrants();
999 
1000 	/* ACQUIRE: Host A with g_ctrlr1_A get reservation with
1001 	 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS
1002 	 */
1003 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
1004 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xa1, 0x0);
1005 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
1006 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1007 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
1008 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1009 	SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
1010 
1011 	/* Test Case: Host B release the reservation */
1012 	ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
1013 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xb1);
1014 	nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
1015 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1016 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
1017 	SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0);
1018 	SPDK_CU_ASSERT_FATAL(g_ns.holder == NULL);
1019 
1020 	/* Test Case: Host C clear the registrants */
1021 	ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_CLEAR, 0,
1022 					     0, 0xc1);
1023 	nvmf_ns_reservation_release(&g_ns, &g_ctrlr_C, req);
1024 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1025 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
1026 	SPDK_CU_ASSERT_FATAL(reg == NULL);
1027 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr2_A.hostid);
1028 	SPDK_CU_ASSERT_FATAL(reg == NULL);
1029 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
1030 	SPDK_CU_ASSERT_FATAL(reg == NULL);
1031 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
1032 	SPDK_CU_ASSERT_FATAL(reg == NULL);
1033 
1034 	ut_reservation_free_req(req);
1035 	ut_reservation_deinit();
1036 }
1037 
1038 void
1039 nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
1040 				  struct spdk_nvmf_ns *ns,
1041 				  enum spdk_nvme_reservation_notification_log_page_type type)
1042 {
1043 	ctrlr->num_avail_log_pages++;
1044 }
1045 
1046 static void
1047 test_reservation_unregister_notification(void)
1048 {
1049 	struct spdk_nvmf_request *req;
1050 	struct spdk_nvme_cpl *rsp;
1051 
1052 	ut_reservation_init();
1053 
1054 	req = ut_reservation_build_req(16);
1055 	SPDK_CU_ASSERT_FATAL(req != NULL);
1056 	rsp = &req->rsp->nvme_cpl;
1057 
1058 	ut_reservation_build_registrants();
1059 
1060 	/* ACQUIRE: Host B with g_ctrlr_B get reservation with
1061 	 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
1062 	 */
1063 	rsp->status.sc = 0xff;
1064 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
1065 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
1066 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
1067 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1068 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1069 
1070 	/* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B unregister the registration.
1071 	 * Reservation release notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C only for
1072 	 * SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY or SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY
1073 	 * type.
1074 	 */
1075 	rsp->status.sc = 0xff;
1076 	g_ctrlr1_A.num_avail_log_pages = 0;
1077 	g_ctrlr2_A.num_avail_log_pages = 0;
1078 	g_ctrlr_B.num_avail_log_pages = 5;
1079 	g_ctrlr_C.num_avail_log_pages = 0;
1080 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
1081 					      0, 0, 0xb1, 0);
1082 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
1083 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1084 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
1085 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
1086 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
1087 	SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
1088 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages);
1089 
1090 	ut_reservation_free_req(req);
1091 	ut_reservation_deinit();
1092 }
1093 
1094 static void
1095 test_reservation_release_notification(void)
1096 {
1097 	struct spdk_nvmf_request *req;
1098 	struct spdk_nvme_cpl *rsp;
1099 
1100 	ut_reservation_init();
1101 
1102 	req = ut_reservation_build_req(16);
1103 	SPDK_CU_ASSERT_FATAL(req != NULL);
1104 	rsp = &req->rsp->nvme_cpl;
1105 
1106 	ut_reservation_build_registrants();
1107 
1108 	/* ACQUIRE: Host B with g_ctrlr_B get reservation with
1109 	 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
1110 	 */
1111 	rsp->status.sc = 0xff;
1112 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
1113 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
1114 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
1115 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1116 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1117 
1118 	/* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B release the reservation.
1119 	 * Reservation release notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C.
1120 	 */
1121 	rsp->status.sc = 0xff;
1122 	g_ctrlr1_A.num_avail_log_pages = 0;
1123 	g_ctrlr2_A.num_avail_log_pages = 0;
1124 	g_ctrlr_B.num_avail_log_pages = 5;
1125 	g_ctrlr_C.num_avail_log_pages = 0;
1126 	ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
1127 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1);
1128 	nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
1129 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1130 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
1131 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
1132 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
1133 	SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
1134 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages);
1135 
1136 	ut_reservation_free_req(req);
1137 	ut_reservation_deinit();
1138 }
1139 
1140 static void
1141 test_reservation_release_notification_write_exclusive(void)
1142 {
1143 	struct spdk_nvmf_request *req;
1144 	struct spdk_nvme_cpl *rsp;
1145 
1146 	ut_reservation_init();
1147 
1148 	req = ut_reservation_build_req(16);
1149 	SPDK_CU_ASSERT_FATAL(req != NULL);
1150 	rsp = &req->rsp->nvme_cpl;
1151 
1152 	ut_reservation_build_registrants();
1153 
1154 	/* ACQUIRE: Host B with g_ctrlr_B get reservation with
1155 	 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE
1156 	 */
1157 	rsp->status.sc = 0xff;
1158 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
1159 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xb1, 0x0);
1160 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
1161 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1162 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1163 
1164 	/* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B release the reservation.
1165 	 * Because the reservation type is SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
1166 	 * no reservation notification occurs.
1167 	 */
1168 	rsp->status.sc = 0xff;
1169 	g_ctrlr1_A.num_avail_log_pages = 5;
1170 	g_ctrlr2_A.num_avail_log_pages = 5;
1171 	g_ctrlr_B.num_avail_log_pages = 5;
1172 	g_ctrlr_C.num_avail_log_pages = 5;
1173 	ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
1174 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xb1);
1175 	nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
1176 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1177 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
1178 	SPDK_CU_ASSERT_FATAL(5 == g_ctrlr1_A.num_avail_log_pages);
1179 	SPDK_CU_ASSERT_FATAL(5 == g_ctrlr2_A.num_avail_log_pages);
1180 	SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
1181 	SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_C.num_avail_log_pages);
1182 
1183 	ut_reservation_free_req(req);
1184 	ut_reservation_deinit();
1185 }
1186 
1187 static void
1188 test_reservation_clear_notification(void)
1189 {
1190 	struct spdk_nvmf_request *req;
1191 	struct spdk_nvme_cpl *rsp;
1192 
1193 	ut_reservation_init();
1194 
1195 	req = ut_reservation_build_req(16);
1196 	SPDK_CU_ASSERT_FATAL(req != NULL);
1197 	rsp = &req->rsp->nvme_cpl;
1198 
1199 	ut_reservation_build_registrants();
1200 
1201 	/* ACQUIRE: Host B with g_ctrlr_B get reservation with
1202 	 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
1203 	 */
1204 	rsp->status.sc = 0xff;
1205 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
1206 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
1207 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
1208 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1209 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1210 
1211 	/* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B clear the reservation.
1212 	 * Reservation Preempted notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C.
1213 	 */
1214 	rsp->status.sc = 0xff;
1215 	g_ctrlr1_A.num_avail_log_pages = 0;
1216 	g_ctrlr2_A.num_avail_log_pages = 0;
1217 	g_ctrlr_B.num_avail_log_pages = 5;
1218 	g_ctrlr_C.num_avail_log_pages = 0;
1219 	ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_CLEAR, 0,
1220 					     0, 0xb1);
1221 	nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
1222 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1223 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
1224 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
1225 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
1226 	SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
1227 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages);
1228 
1229 	ut_reservation_free_req(req);
1230 	ut_reservation_deinit();
1231 }
1232 
1233 static void
1234 test_reservation_preempt_notification(void)
1235 {
1236 	struct spdk_nvmf_request *req;
1237 	struct spdk_nvme_cpl *rsp;
1238 
1239 	ut_reservation_init();
1240 
1241 	req = ut_reservation_build_req(16);
1242 	SPDK_CU_ASSERT_FATAL(req != NULL);
1243 	rsp = &req->rsp->nvme_cpl;
1244 
1245 	ut_reservation_build_registrants();
1246 
1247 	/* ACQUIRE: Host B with g_ctrlr_B get reservation with
1248 	 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
1249 	 */
1250 	rsp->status.sc = 0xff;
1251 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
1252 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
1253 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
1254 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1255 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1256 
1257 	/* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_C preempt g_ctrlr_B,
1258 	 * g_ctrlr_B registrant is unregistered, and reservation is preempted.
1259 	 * Registration Preempted notification sends to g_ctrlr_B.
1260 	 * Reservation Preempted notification sends to g_ctrlr1_A/g_ctrlr2_A.
1261 	 */
1262 	rsp->status.sc = 0xff;
1263 	g_ctrlr1_A.num_avail_log_pages = 0;
1264 	g_ctrlr2_A.num_avail_log_pages = 0;
1265 	g_ctrlr_B.num_avail_log_pages = 0;
1266 	g_ctrlr_C.num_avail_log_pages = 5;
1267 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0,
1268 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xc1, 0xb1);
1269 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_C, req);
1270 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1271 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1272 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
1273 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
1274 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_B.num_avail_log_pages);
1275 	SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_C.num_avail_log_pages);
1276 
1277 	ut_reservation_free_req(req);
1278 	ut_reservation_deinit();
1279 }
1280 
1281 static int
1282 nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf)
1283 {
1284 	return 0;
1285 }
1286 
1287 static void
1288 nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf)
1289 {
1290 }
1291 
1292 static void
1293 test_spdk_nvmf_ns_event(void)
1294 {
1295 	struct spdk_nvmf_tgt tgt = {};
1296 	struct spdk_nvmf_subsystem subsystem = {
1297 		.max_nsid = 1024,
1298 		.ns = NULL,
1299 		.tgt = &tgt,
1300 	};
1301 	struct spdk_nvmf_ctrlr ctrlr = {
1302 		.subsys = &subsystem
1303 	};
1304 	struct spdk_nvmf_ns_opts ns_opts;
1305 	uint32_t nsid;
1306 	struct spdk_bdev *bdev;
1307 
1308 	subsystem.ns = calloc(subsystem.max_nsid, sizeof(struct spdk_nvmf_subsystem_ns *));
1309 	SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL);
1310 	subsystem.ana_group = calloc(subsystem.max_nsid, sizeof(uint32_t));
1311 	SPDK_CU_ASSERT_FATAL(subsystem.ana_group != NULL);
1312 
1313 	tgt.max_subsystems = 1024;
1314 	tgt.subsystem_ids = spdk_bit_array_create(tgt.max_subsystems);
1315 	RB_INIT(&tgt.subsystems);
1316 
1317 	spdk_io_device_register(&tgt,
1318 				nvmf_tgt_create_poll_group,
1319 				nvmf_tgt_destroy_poll_group,
1320 				sizeof(struct spdk_nvmf_poll_group),
1321 				NULL);
1322 
1323 	/* Add one namespace */
1324 	spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
1325 	nsid = spdk_nvmf_subsystem_add_ns_ext(&subsystem, "bdev1", &ns_opts, sizeof(ns_opts), NULL);
1326 	CU_ASSERT(nsid == 1);
1327 	CU_ASSERT(NULL != subsystem.ns[0]);
1328 	CU_ASSERT(subsystem.ns[nsid - 1]->bdev == &g_bdevs[nsid - 1]);
1329 
1330 	bdev = subsystem.ns[nsid - 1]->bdev;
1331 
1332 	/* Add one controller */
1333 	TAILQ_INIT(&subsystem.ctrlrs);
1334 	TAILQ_INSERT_TAIL(&subsystem.ctrlrs, &ctrlr, link);
1335 
1336 	/* Namespace resize event */
1337 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1338 	g_ns_changed_nsid = 0xFFFFFFFF;
1339 	g_ns_changed_ctrlr = NULL;
1340 	nvmf_ns_event(SPDK_BDEV_EVENT_RESIZE, bdev, subsystem.ns[0]);
1341 	CU_ASSERT(SPDK_NVMF_SUBSYSTEM_PAUSING == subsystem.state);
1342 
1343 	poll_threads();
1344 	CU_ASSERT(1 == g_ns_changed_nsid);
1345 	CU_ASSERT(&ctrlr == g_ns_changed_ctrlr);
1346 	CU_ASSERT(SPDK_NVMF_SUBSYSTEM_ACTIVE == subsystem.state);
1347 
1348 	/* Namespace remove event */
1349 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1350 	g_ns_changed_nsid = 0xFFFFFFFF;
1351 	g_ns_changed_ctrlr = NULL;
1352 	nvmf_ns_event(SPDK_BDEV_EVENT_REMOVE, bdev, subsystem.ns[0]);
1353 	CU_ASSERT(SPDK_NVMF_SUBSYSTEM_PAUSING == subsystem.state);
1354 	CU_ASSERT(0xFFFFFFFF == g_ns_changed_nsid);
1355 	CU_ASSERT(NULL == g_ns_changed_ctrlr);
1356 
1357 	poll_threads();
1358 	CU_ASSERT(1 == g_ns_changed_nsid);
1359 	CU_ASSERT(&ctrlr == g_ns_changed_ctrlr);
1360 	CU_ASSERT(NULL == subsystem.ns[0]);
1361 	CU_ASSERT(SPDK_NVMF_SUBSYSTEM_ACTIVE == subsystem.state);
1362 
1363 	spdk_io_device_unregister(&tgt, NULL);
1364 
1365 	poll_threads();
1366 
1367 	free(subsystem.ns);
1368 	free(subsystem.ana_group);
1369 	spdk_bit_array_free(&tgt.subsystem_ids);
1370 }
1371 
1372 static void
1373 test_nvmf_ns_reservation_add_remove_registrant(void)
1374 {
1375 	struct spdk_nvmf_ns ns = {};
1376 	struct spdk_nvmf_ctrlr ctrlr = {};
1377 	struct spdk_nvmf_registrant *reg = NULL;
1378 	int rc;
1379 
1380 	TAILQ_INIT(&ns.registrants);
1381 	spdk_uuid_generate(&ctrlr.hostid);
1382 
1383 	rc = nvmf_ns_reservation_add_registrant(&ns, &ctrlr, 0xa11);
1384 	CU_ASSERT(rc == 0);
1385 	reg = TAILQ_FIRST(&ns.registrants);
1386 	SPDK_CU_ASSERT_FATAL(reg != NULL);
1387 	CU_ASSERT(ns.gen == 1);
1388 	CU_ASSERT(reg->rkey == 0xa11);
1389 	CU_ASSERT(!strncmp((uint8_t *)&reg->hostid, (uint8_t *)&ctrlr.hostid, sizeof(ctrlr.hostid)));
1390 
1391 	nvmf_ns_reservation_remove_registrant(&ns, reg);
1392 	CU_ASSERT(TAILQ_EMPTY(&ns.registrants));
1393 	CU_ASSERT(ns.gen == 2);
1394 }
1395 
1396 static void
1397 test_nvmf_subsystem_destroy_cb(void *cb_arg)
1398 {
1399 }
1400 
1401 static void
1402 test_nvmf_subsystem_add_ctrlr(void)
1403 {
1404 	int rc;
1405 	struct spdk_nvmf_ctrlr ctrlr = {};
1406 	struct spdk_nvmf_tgt tgt = {};
1407 	char nqn[256] = "nqn.2016-06.io.spdk:subsystem1";
1408 	struct spdk_nvmf_subsystem *subsystem = NULL;
1409 
1410 	tgt.max_subsystems = 1024;
1411 	tgt.subsystem_ids = spdk_bit_array_create(tgt.max_subsystems);
1412 	RB_INIT(&tgt.subsystems);
1413 
1414 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
1415 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
1416 	ctrlr.subsys = subsystem;
1417 
1418 	ctrlr.dynamic_ctrlr = true;
1419 	rc = nvmf_subsystem_add_ctrlr(subsystem, &ctrlr);
1420 	CU_ASSERT(rc == 0);
1421 	CU_ASSERT(!TAILQ_EMPTY(&subsystem->ctrlrs));
1422 	CU_ASSERT(ctrlr.cntlid == 1);
1423 	CU_ASSERT(nvmf_subsystem_get_ctrlr(subsystem, 1) == &ctrlr);
1424 
1425 	nvmf_subsystem_remove_ctrlr(subsystem, &ctrlr);
1426 	CU_ASSERT(TAILQ_EMPTY(&subsystem->ctrlrs));
1427 	rc = spdk_nvmf_subsystem_destroy(subsystem, test_nvmf_subsystem_destroy_cb, NULL);
1428 	CU_ASSERT(rc == 0);
1429 	spdk_bit_array_free(&tgt.subsystem_ids);
1430 }
1431 
1432 static void
1433 _add_transport_cb(void *arg, int status)
1434 {
1435 	CU_ASSERT(status == 0);
1436 }
1437 
1438 static int
1439 transport_subsystem_add_host_err(struct spdk_nvmf_transport *transport,
1440 				 const struct spdk_nvmf_subsystem *subsystem,
1441 				 const char *hostnqn,
1442 				 const struct spdk_json_val *transport_specific)
1443 {
1444 	return -1;
1445 }
1446 
1447 void
1448 spdk_nvmf_tgt_add_transport(struct spdk_nvmf_tgt *tgt,
1449 			    struct spdk_nvmf_transport *transport,
1450 			    spdk_nvmf_tgt_add_transport_done_fn cb_fn,
1451 			    void *cb_arg)
1452 {
1453 	TAILQ_INSERT_TAIL(&tgt->transports, transport, link);
1454 }
1455 
1456 static struct spdk_nvmf_transport *
1457 transport_create(struct spdk_nvmf_transport_opts *opts)
1458 {
1459 	return &g_transport;
1460 }
1461 
1462 static void
1463 test_spdk_nvmf_subsystem_add_host(void)
1464 {
1465 	struct spdk_nvmf_tgt tgt = {};
1466 	struct spdk_nvmf_subsystem *subsystem = NULL;
1467 	int rc;
1468 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
1469 	const char subsystemnqn[] = "nqn.2016-06.io.spdk:subsystem1";
1470 	struct spdk_nvmf_transport_opts opts = {.opts_size = 1};
1471 	const struct spdk_nvmf_transport_ops test_ops = {
1472 		.name = "transport_ut",
1473 		.create = transport_create,
1474 		.subsystem_add_host = transport_subsystem_add_host_err,
1475 	};
1476 	struct spdk_nvmf_transport *transport;
1477 
1478 	tgt.max_subsystems = 1024;
1479 	tgt.subsystem_ids = spdk_bit_array_create(tgt.max_subsystems);
1480 	RB_INIT(&tgt.subsystems);
1481 
1482 	subsystem = spdk_nvmf_subsystem_create(&tgt, subsystemnqn, SPDK_NVMF_SUBTYPE_NVME, 0);
1483 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
1484 	CU_ASSERT_STRING_EQUAL(subsystem->subnqn, subsystemnqn);
1485 
1486 	rc = spdk_nvmf_subsystem_add_host(subsystem, hostnqn, NULL);
1487 	CU_ASSERT(rc == 0);
1488 	CU_ASSERT(!TAILQ_EMPTY(&subsystem->hosts));
1489 
1490 	/* Add existing nqn, this function is allowed to be called if the nqn was previously added. */
1491 	rc = spdk_nvmf_subsystem_add_host(subsystem, hostnqn, NULL);
1492 	CU_ASSERT(rc == 0);
1493 
1494 	rc = spdk_nvmf_subsystem_remove_host(subsystem, hostnqn);
1495 	CU_ASSERT(rc == 0);
1496 	CU_ASSERT(TAILQ_EMPTY(&subsystem->hosts));
1497 
1498 	/* No available nqn */
1499 	rc = spdk_nvmf_subsystem_remove_host(subsystem, hostnqn);
1500 	CU_ASSERT(rc == -ENOENT);
1501 
1502 	/* Ensure hostnqn list remains empty after transport callback fails */
1503 	spdk_nvmf_transport_register(&test_ops);
1504 	transport = spdk_nvmf_transport_create("transport_ut", &opts);
1505 	SPDK_CU_ASSERT_FATAL(transport != NULL);
1506 
1507 	TAILQ_INIT(&tgt.transports);
1508 	spdk_nvmf_tgt_add_transport(&tgt, transport, _add_transport_cb, 0);
1509 
1510 	rc = spdk_nvmf_subsystem_add_host(subsystem, hostnqn, NULL);
1511 	CU_ASSERT(rc != 0);
1512 	CU_ASSERT(TAILQ_EMPTY(&subsystem->hosts));
1513 
1514 	spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
1515 	spdk_bit_array_free(&tgt.subsystem_ids);
1516 }
1517 
1518 static void
1519 test_nvmf_ns_reservation_report(void)
1520 {
1521 	struct spdk_nvmf_ns ns = {};
1522 	struct spdk_nvmf_ctrlr ctrlr = {};
1523 	struct spdk_nvmf_request req = {};
1524 	union nvmf_h2c_msg cmd = {};
1525 	union nvmf_c2h_msg rsp = {};
1526 	struct spdk_nvme_registered_ctrlr_extended_data *ctrlr_data;
1527 	struct spdk_nvme_reservation_status_extended_data *status_data;
1528 	struct spdk_nvmf_registrant *reg;
1529 	void *data;
1530 
1531 	data = calloc(1, sizeof(*status_data) + sizeof(*ctrlr_data) * 2);
1532 	reg = calloc(2, sizeof(struct spdk_nvmf_registrant));
1533 	SPDK_CU_ASSERT_FATAL(data != NULL && reg != NULL);
1534 
1535 	req.length = sizeof(*status_data) + sizeof(*ctrlr_data) * 2;
1536 	spdk_iov_one(req.iov, &req.iovcnt, data, req.length);
1537 	req.data = req.iov[0].iov_base;
1538 
1539 	req.cmd = &cmd;
1540 	req.rsp = &rsp;
1541 	ns.gen = 1;
1542 	ns.rtype = SPDK_NVME_RESERVE_WRITE_EXCLUSIVE;
1543 	ns.ptpl_activated = true;
1544 	cmd.nvme_cmd.cdw11_bits.resv_report.eds = true;
1545 	cmd.nvme_cmd.cdw10 = 100;
1546 	reg[0].rkey = 0xa;
1547 	reg[1].rkey = 0xb;
1548 	spdk_uuid_generate(&reg[0].hostid);
1549 	spdk_uuid_generate(&reg[1].hostid);
1550 	TAILQ_INIT(&ns.registrants);
1551 	TAILQ_INSERT_TAIL(&ns.registrants, &reg[0], link);
1552 	TAILQ_INSERT_TAIL(&ns.registrants, &reg[1], link);
1553 
1554 	nvmf_ns_reservation_report(&ns, &ctrlr, &req);
1555 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1556 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1557 	/* Get ctrlr data and status data pointers */
1558 	ctrlr_data = (void *)((char *)req.iov[0].iov_base + sizeof(*status_data));
1559 	status_data = (void *)req.iov[0].iov_base;
1560 	SPDK_CU_ASSERT_FATAL(status_data != NULL && ctrlr_data != NULL);
1561 	CU_ASSERT(status_data->data.gen == 1);
1562 	CU_ASSERT(status_data->data.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1563 	CU_ASSERT(status_data->data.ptpls == true);
1564 	CU_ASSERT(status_data->data.regctl == 2);
1565 	CU_ASSERT(ctrlr_data->cntlid == 0xffff);
1566 	CU_ASSERT(ctrlr_data->rcsts.status == false);
1567 	CU_ASSERT(ctrlr_data->rkey ==  0xa);
1568 	CU_ASSERT(!spdk_uuid_compare((struct spdk_uuid *)ctrlr_data->hostid, &reg[0].hostid));
1569 	/* Check second ctrlr data */
1570 	ctrlr_data++;
1571 	CU_ASSERT(ctrlr_data->cntlid == 0xffff);
1572 	CU_ASSERT(ctrlr_data->rcsts.status == false);
1573 	CU_ASSERT(ctrlr_data->rkey ==  0xb);
1574 	CU_ASSERT(!spdk_uuid_compare((struct spdk_uuid *)ctrlr_data->hostid, &reg[1].hostid));
1575 
1576 	/* extended controller data structure */
1577 	spdk_iov_memset(req.iov, req.iovcnt, 0);
1578 	memset(req.rsp, 0, sizeof(*req.rsp));
1579 	cmd.nvme_cmd.cdw11_bits.resv_report.eds = false;
1580 
1581 	nvmf_ns_reservation_report(&ns, &ctrlr, &req);
1582 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_HOSTID_INCONSISTENT_FORMAT);
1583 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1584 
1585 	/* Transfer length invalid */
1586 	spdk_iov_memset(req.iov, req.iovcnt, 0);
1587 	memset(req.rsp, 0, sizeof(*req.rsp));
1588 	cmd.nvme_cmd.cdw11_bits.resv_report.eds = true;
1589 	cmd.nvme_cmd.cdw10 = 0;
1590 
1591 	nvmf_ns_reservation_report(&ns, &ctrlr, &req);
1592 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
1593 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1594 
1595 	free(req.iov[0].iov_base);
1596 	free(reg);
1597 }
1598 
1599 static void
1600 test_nvmf_valid_nqn(void)
1601 {
1602 	bool rc;
1603 	char uuid[SPDK_NVMF_UUID_STRING_LEN + 1] = {};
1604 	char nqn[SPDK_NVMF_NQN_MAX_LEN + 1] = {};
1605 	struct spdk_uuid s_uuid = {};
1606 
1607 	spdk_uuid_generate(&s_uuid);
1608 	spdk_uuid_fmt_lower(uuid, sizeof(uuid), &s_uuid);
1609 
1610 	/* discovery nqn */
1611 	snprintf(nqn, sizeof(nqn), "%s", SPDK_NVMF_DISCOVERY_NQN);
1612 
1613 	rc = nvmf_valid_nqn(nqn);
1614 	CU_ASSERT(rc == true);
1615 
1616 	/* nqn with uuid */
1617 	memset(nqn, 0xff, sizeof(nqn));
1618 	snprintf(nqn, sizeof(nqn), "%s%s", SPDK_NVMF_NQN_UUID_PRE, uuid);
1619 
1620 	rc = nvmf_valid_nqn(nqn);
1621 	CU_ASSERT(rc == true);
1622 
1623 	/* Check nqn valid reverse domain */
1624 	memset(nqn, 0xff, sizeof(nqn));
1625 	snprintf(nqn, sizeof(nqn), "%s", "nqn.2016-06.io.spdk:cnode1");
1626 
1627 	rc = nvmf_valid_nqn(nqn);
1628 	CU_ASSERT(rc == true);
1629 
1630 	/* Invalid nqn length */
1631 	memset(nqn, 0xff, sizeof(nqn));
1632 	snprintf(nqn, sizeof(nqn), "%s", "nqn.");
1633 
1634 	rc = nvmf_valid_nqn(nqn);
1635 	CU_ASSERT(rc == false);
1636 
1637 	/* Copy uuid to the nqn string, but omit the last character to make it invalid */
1638 	memset(nqn, 0, SPDK_NVMF_NQN_MAX_LEN + 1);
1639 	snprintf(nqn, sizeof(nqn), "%s", SPDK_NVMF_NQN_UUID_PRE);
1640 	memcpy(&nqn[SPDK_NVMF_NQN_UUID_PRE_LEN], uuid, SPDK_NVMF_UUID_STRING_LEN - 1);
1641 
1642 	rc = nvmf_valid_nqn(nqn);
1643 	CU_ASSERT(rc == false);
1644 
1645 	/* Invalid domain */
1646 	memset(nqn, 0xff, SPDK_NVMF_NQN_MAX_LEN + 1);
1647 	snprintf(nqn, sizeof(nqn), "%s", "nqn.2016-06.io...spdk:cnode1");
1648 
1649 	rc = nvmf_valid_nqn(nqn);
1650 	CU_ASSERT(rc == false);
1651 }
1652 
1653 static void
1654 test_nvmf_ns_reservation_restore(void)
1655 {
1656 	struct spdk_nvmf_ns ns = {};
1657 	struct spdk_nvmf_reservation_info info = {};
1658 	struct spdk_bdev bdev = {};
1659 	struct spdk_uuid s_uuid = {};
1660 	struct spdk_nvmf_registrant *reg0, *reg1;
1661 	char uuid[SPDK_UUID_STRING_LEN] = {};
1662 	int rc;
1663 
1664 	ns.bdev = &bdev;
1665 	TAILQ_INIT(&ns.registrants);
1666 	info.ptpl_activated = true;
1667 	info.num_regs = 2;
1668 	info.rtype = SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS;
1669 	info.registrants[0].rkey = 0xb;
1670 	info.registrants[1].rkey = 0xc;
1671 
1672 	/* Generate and prepare uuids, make sure bdev and info uuid are the same */
1673 	spdk_uuid_generate(&s_uuid);
1674 	spdk_uuid_fmt_lower(uuid, sizeof(uuid), &s_uuid);
1675 	snprintf(info.holder_uuid, SPDK_UUID_STRING_LEN, "%s", uuid);
1676 	snprintf(info.bdev_uuid, SPDK_UUID_STRING_LEN, "%s", uuid);
1677 	snprintf(info.registrants[0].host_uuid, SPDK_UUID_STRING_LEN, "%s", uuid);
1678 	spdk_uuid_copy(&bdev.uuid, &s_uuid);
1679 	spdk_uuid_generate(&s_uuid);
1680 	spdk_uuid_fmt_lower(uuid, sizeof(uuid), &s_uuid);
1681 	snprintf(info.registrants[1].host_uuid, SPDK_UUID_STRING_LEN, "%s", uuid);
1682 
1683 	/* info->rkey not exist in registrants */
1684 	info.crkey = 0xa;
1685 
1686 	rc = nvmf_ns_reservation_restore(&ns, &info);
1687 	CU_ASSERT(rc == -EINVAL);
1688 
1689 	/* info->rkey exists in registrants */
1690 	info.crkey = 0xb;
1691 
1692 	rc = nvmf_ns_reservation_restore(&ns, &info);
1693 	CU_ASSERT(rc == 0);
1694 	CU_ASSERT(ns.crkey == 0xb);
1695 	CU_ASSERT(ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1696 	CU_ASSERT(ns.ptpl_activated == true);
1697 	/* Check two registrant`s rkey */
1698 	reg0 = TAILQ_FIRST(&ns.registrants);
1699 	reg1 = TAILQ_NEXT(reg0, link);
1700 	CU_ASSERT(ns.holder == reg0);
1701 	CU_ASSERT(reg0->rkey = 0xb);
1702 	CU_ASSERT(reg1->rkey = 0xc);
1703 
1704 	rc = nvmf_ns_reservation_clear_all_registrants(&ns);
1705 	CU_ASSERT(rc == 2);
1706 	CU_ASSERT(TAILQ_EMPTY(&ns.registrants));
1707 
1708 	/* Existing bdev UUID is different with configuration */
1709 	spdk_uuid_generate(&s_uuid);
1710 	spdk_uuid_fmt_lower(uuid, sizeof(uuid), &s_uuid);
1711 	snprintf(info.bdev_uuid, SPDK_UUID_STRING_LEN, "%s", uuid);
1712 	spdk_uuid_generate(&s_uuid);
1713 	spdk_uuid_copy(&bdev.uuid, &s_uuid);
1714 
1715 	rc = nvmf_ns_reservation_restore(&ns, &info);
1716 	CU_ASSERT(rc == -EINVAL);
1717 }
1718 
1719 static void
1720 test_nvmf_subsystem_state_change(void)
1721 {
1722 	struct spdk_nvmf_tgt tgt = {};
1723 	struct spdk_nvmf_subsystem *subsystem, *discovery_subsystem;
1724 	int rc;
1725 
1726 	tgt.max_subsystems = 1024;
1727 	tgt.subsystem_ids = spdk_bit_array_create(tgt.max_subsystems);
1728 	RB_INIT(&tgt.subsystems);
1729 
1730 	discovery_subsystem = spdk_nvmf_subsystem_create(&tgt, SPDK_NVMF_DISCOVERY_NQN,
1731 			      SPDK_NVMF_SUBTYPE_DISCOVERY, 0);
1732 	SPDK_CU_ASSERT_FATAL(discovery_subsystem != NULL);
1733 	subsystem = spdk_nvmf_subsystem_create(&tgt, "nqn.2016-06.io.spdk:subsystem1",
1734 					       SPDK_NVMF_SUBTYPE_NVME, 0);
1735 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
1736 
1737 	spdk_io_device_register(&tgt,
1738 				nvmf_tgt_create_poll_group,
1739 				nvmf_tgt_destroy_poll_group,
1740 				sizeof(struct spdk_nvmf_poll_group),
1741 				NULL);
1742 
1743 	rc = spdk_nvmf_subsystem_start(discovery_subsystem, NULL, NULL);
1744 	CU_ASSERT(rc == 0);
1745 	poll_threads();
1746 	CU_ASSERT(discovery_subsystem->state == SPDK_NVMF_SUBSYSTEM_ACTIVE);
1747 	rc = spdk_nvmf_subsystem_start(subsystem, NULL, NULL);
1748 	CU_ASSERT(rc == 0);
1749 	poll_threads();
1750 	CU_ASSERT(subsystem->state == SPDK_NVMF_SUBSYSTEM_ACTIVE);
1751 
1752 	rc = spdk_nvmf_subsystem_pause(subsystem, SPDK_NVME_GLOBAL_NS_TAG, NULL, NULL);
1753 	CU_ASSERT(rc == 0);
1754 	rc = spdk_nvmf_subsystem_stop(subsystem, NULL, NULL);
1755 	CU_ASSERT(rc == -EBUSY);
1756 	poll_threads();
1757 	CU_ASSERT(subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED);
1758 
1759 	rc = spdk_nvmf_subsystem_stop(discovery_subsystem, NULL, NULL);
1760 	CU_ASSERT(rc == 0);
1761 	poll_threads();
1762 	CU_ASSERT(discovery_subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE);
1763 	rc = spdk_nvmf_subsystem_stop(subsystem, NULL, NULL);
1764 	CU_ASSERT(rc == 0);
1765 	poll_threads();
1766 	CU_ASSERT(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE);
1767 
1768 	rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
1769 	CU_ASSERT(rc == 0);
1770 	rc = spdk_nvmf_subsystem_destroy(discovery_subsystem, NULL, NULL);
1771 	CU_ASSERT(rc == 0);
1772 
1773 	spdk_io_device_unregister(&tgt, NULL);
1774 	poll_threads();
1775 
1776 	spdk_bit_array_free(&tgt.subsystem_ids);
1777 }
1778 
1779 int
1780 main(int argc, char **argv)
1781 {
1782 	CU_pSuite	suite = NULL;
1783 	unsigned int	num_failures;
1784 
1785 	CU_initialize_registry();
1786 
1787 	suite = CU_add_suite("nvmf", NULL, NULL);
1788 
1789 	CU_ADD_TEST(suite, nvmf_test_create_subsystem);
1790 	CU_ADD_TEST(suite, test_spdk_nvmf_subsystem_add_ns);
1791 	CU_ADD_TEST(suite, test_spdk_nvmf_subsystem_set_sn);
1792 	CU_ADD_TEST(suite, test_reservation_register);
1793 	CU_ADD_TEST(suite, test_reservation_register_with_ptpl);
1794 	CU_ADD_TEST(suite, test_reservation_acquire_preempt_1);
1795 	CU_ADD_TEST(suite, test_reservation_acquire_release_with_ptpl);
1796 	CU_ADD_TEST(suite, test_reservation_release);
1797 	CU_ADD_TEST(suite, test_reservation_unregister_notification);
1798 	CU_ADD_TEST(suite, test_reservation_release_notification);
1799 	CU_ADD_TEST(suite, test_reservation_release_notification_write_exclusive);
1800 	CU_ADD_TEST(suite, test_reservation_clear_notification);
1801 	CU_ADD_TEST(suite, test_reservation_preempt_notification);
1802 	CU_ADD_TEST(suite, test_spdk_nvmf_ns_event);
1803 	CU_ADD_TEST(suite, test_nvmf_ns_reservation_add_remove_registrant);
1804 	CU_ADD_TEST(suite, test_nvmf_subsystem_add_ctrlr);
1805 	CU_ADD_TEST(suite, test_spdk_nvmf_subsystem_add_host);
1806 	CU_ADD_TEST(suite, test_nvmf_ns_reservation_report);
1807 	CU_ADD_TEST(suite, test_nvmf_valid_nqn);
1808 	CU_ADD_TEST(suite, test_nvmf_ns_reservation_restore);
1809 	CU_ADD_TEST(suite, test_nvmf_subsystem_state_change);
1810 
1811 	allocate_threads(1);
1812 	set_thread(0);
1813 
1814 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1815 	CU_cleanup_registry();
1816 
1817 	free_threads();
1818 
1819 	return num_failures;
1820 }
1821