xref: /spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c (revision 16d862d0380886f6fc765f68a87e240bb4295595)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "common/lib/ut_multithread.c"
9 #include "spdk_internal/cunit.h"
10 #include "spdk/nvmf.h"
11 #include "spdk_internal/mock.h"
12 
13 #include "spdk/bdev_module.h"
14 #include "nvmf/subsystem.c"
15 #include "nvmf/transport.c"
16 
17 SPDK_LOG_REGISTER_COMPONENT(nvmf)
18 
19 DEFINE_STUB(spdk_bdev_module_claim_bdev,
20 	    int,
21 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
22 	     struct spdk_bdev_module *module), 0);
23 
24 DEFINE_STUB_V(spdk_bdev_module_release_bdev,
25 	      (struct spdk_bdev *bdev));
26 
27 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t,
28 	    (const struct spdk_bdev *bdev), 512);
29 
30 DEFINE_STUB(spdk_bdev_get_md_size, uint32_t,
31 	    (const struct spdk_bdev *bdev), 0);
32 
33 DEFINE_STUB(spdk_bdev_is_md_interleaved, bool,
34 	    (const struct spdk_bdev *bdev), false);
35 
36 DEFINE_STUB(spdk_bdev_io_type_supported, bool,
37 	    (struct spdk_bdev *bdev,
38 	     enum spdk_bdev_io_type io_type), false);
39 
40 DEFINE_STUB_V(nvmf_update_discovery_log,
41 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn));
42 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair), 0);
43 
44 DEFINE_STUB(spdk_nvmf_request_complete,
45 	    int,
46 	    (struct spdk_nvmf_request *req), 0);
47 
48 DEFINE_STUB(nvmf_ctrlr_async_event_ana_change_notice,
49 	    int,
50 	    (struct spdk_nvmf_ctrlr *ctrlr), 0);
51 
52 DEFINE_STUB(spdk_nvme_transport_id_trtype_str,
53 	    const char *,
54 	    (enum spdk_nvme_transport_type trtype), NULL);
55 
56 DEFINE_STUB(spdk_bdev_is_zoned, bool,
57 	    (const struct spdk_bdev *bdev), false);
58 
59 DEFINE_STUB(spdk_bdev_get_max_zone_append_size, uint32_t,
60 	    (const struct spdk_bdev *bdev), 0);
61 
62 DEFINE_STUB(spdk_mempool_lookup, struct spdk_mempool *,
63 	    (const char *name), NULL);
64 
65 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *,
66 	    (enum spdk_nvmf_adrfam adrfam), NULL);
67 
68 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int,
69 	    (struct spdk_nvmf_qpair *qpair,
70 	     struct spdk_nvme_transport_id *trid), 0);
71 DEFINE_STUB(spdk_key_dup, struct spdk_key *, (struct spdk_key *k), NULL);
72 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL);
73 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k));
74 DEFINE_STUB(nvmf_auth_is_supported, bool, (void), false);
75 
76 static struct spdk_nvmf_transport g_transport = {};
77 
78 struct spdk_nvmf_subsystem *
79 spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
80 {
81 	return NULL;
82 }
83 
84 struct spdk_nvmf_transport *
85 spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name)
86 {
87 	if (strncmp(transport_name, SPDK_NVME_TRANSPORT_NAME_RDMA, SPDK_NVMF_TRSTRING_MAX_LEN)) {
88 		return &g_transport;
89 	}
90 
91 	return NULL;
92 }
93 
94 int
95 nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
96 				 struct spdk_nvmf_subsystem *subsystem)
97 {
98 	return 0;
99 }
100 
101 int
102 nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
103 			      struct spdk_nvmf_subsystem *subsystem,
104 			      spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
105 {
106 	return 0;
107 }
108 
109 void
110 nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
111 				 struct spdk_nvmf_subsystem *subsystem,
112 				 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
113 {
114 }
115 
116 void
117 nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
118 				struct spdk_nvmf_subsystem *subsystem,
119 				uint32_t nsid,
120 				spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
121 {
122 }
123 
124 void
125 nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
126 				 struct spdk_nvmf_subsystem *subsystem,
127 				 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
128 {
129 }
130 
131 int
132 spdk_nvme_transport_id_parse_trtype(enum spdk_nvme_transport_type *trtype, const char *str)
133 {
134 	if (trtype == NULL || str == NULL) {
135 		return -EINVAL;
136 	}
137 
138 	if (strcasecmp(str, "PCIe") == 0) {
139 		*trtype = SPDK_NVME_TRANSPORT_PCIE;
140 	} else if (strcasecmp(str, "RDMA") == 0) {
141 		*trtype = SPDK_NVME_TRANSPORT_RDMA;
142 	} else {
143 		return -ENOENT;
144 	}
145 	return 0;
146 }
147 
148 int
149 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
150 			       const struct spdk_nvme_transport_id *trid2)
151 {
152 	return 0;
153 }
154 
155 int32_t
156 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
157 {
158 	return -1;
159 }
160 
161 int32_t
162 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
163 {
164 	return -1;
165 }
166 
167 int
168 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
169 {
170 	return -1;
171 }
172 
173 void
174 nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
175 {
176 }
177 
178 static struct spdk_nvmf_ctrlr *g_ns_changed_ctrlr = NULL;
179 static uint32_t g_ns_changed_nsid = 0;
180 void
181 nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
182 {
183 	g_ns_changed_ctrlr = ctrlr;
184 	g_ns_changed_nsid = nsid;
185 }
186 
187 
188 static struct spdk_nvmf_ctrlr *g_async_event_ctrlr = NULL;
189 int
190 nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr)
191 {
192 	g_async_event_ctrlr = ctrlr;
193 	return 0;
194 }
195 
196 static struct spdk_bdev g_bdevs[] = {
197 	{ .name = "bdev1" },
198 	{ .name = "bdev2" },
199 	{ .name = "bdev3", .ctratt.raw = 0x80000 },
200 };
201 
202 struct spdk_bdev_desc {
203 	struct spdk_bdev	*bdev;
204 };
205 
206 int
207 spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
208 		   void *event_ctx, struct spdk_bdev_desc **_desc)
209 {
210 	struct spdk_bdev_desc *desc;
211 	size_t i;
212 
213 	for (i = 0; i < sizeof(g_bdevs); i++) {
214 		if (strcmp(bdev_name, g_bdevs[i].name) == 0) {
215 
216 			desc = calloc(1, sizeof(*desc));
217 			SPDK_CU_ASSERT_FATAL(desc != NULL);
218 
219 			desc->bdev = &g_bdevs[i];
220 			*_desc = desc;
221 			return 0;
222 		}
223 	}
224 
225 	return -EINVAL;
226 }
227 
228 void
229 spdk_bdev_close(struct spdk_bdev_desc *desc)
230 {
231 	free(desc);
232 }
233 
234 struct spdk_bdev *
235 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
236 {
237 	return desc->bdev;
238 }
239 
240 const char *
241 spdk_bdev_get_name(const struct spdk_bdev *bdev)
242 {
243 	return "test";
244 }
245 
246 const struct spdk_uuid *
247 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
248 {
249 	return &bdev->uuid;
250 }
251 
252 union spdk_bdev_nvme_ctratt spdk_bdev_get_nvme_ctratt(struct spdk_bdev *bdev)
253 {
254 	return bdev->ctratt;
255 }
256 
257 static void
258 test_spdk_nvmf_subsystem_add_ns(void)
259 {
260 	struct spdk_nvmf_tgt tgt = {};
261 	struct spdk_nvmf_subsystem subsystem = {
262 		.max_nsid = 1024,
263 		.ns = NULL,
264 		.tgt = &tgt,
265 	};
266 	struct spdk_nvmf_ns_opts ns_opts;
267 	uint32_t nsid;
268 	int rc;
269 
270 	subsystem.ns = calloc(subsystem.max_nsid, sizeof(struct spdk_nvmf_subsystem_ns *));
271 	SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL);
272 	subsystem.ana_group = calloc(subsystem.max_nsid, sizeof(uint32_t));
273 	SPDK_CU_ASSERT_FATAL(subsystem.ana_group != NULL);
274 
275 	tgt.max_subsystems = 1024;
276 	RB_INIT(&tgt.subsystems);
277 
278 	/* Request a specific NSID */
279 	spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
280 	ns_opts.nsid = 5;
281 	nsid = spdk_nvmf_subsystem_add_ns_ext(&subsystem, "bdev2", &ns_opts, sizeof(ns_opts), NULL);
282 	CU_ASSERT(nsid == 5);
283 	CU_ASSERT(subsystem.max_nsid == 1024);
284 	SPDK_CU_ASSERT_FATAL(subsystem.ns[nsid - 1] != NULL);
285 	CU_ASSERT(subsystem.ns[nsid - 1]->bdev == &g_bdevs[1]);
286 
287 	/* Request an NSID that is already in use */
288 	spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
289 	ns_opts.nsid = 5;
290 	nsid = spdk_nvmf_subsystem_add_ns_ext(&subsystem, "bdev2", &ns_opts, sizeof(ns_opts), NULL);
291 	CU_ASSERT(nsid == 0);
292 	CU_ASSERT(subsystem.max_nsid == 1024);
293 
294 	/* Request 0xFFFFFFFF (invalid NSID, reserved for broadcast) */
295 	spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
296 	ns_opts.nsid = 0xFFFFFFFF;
297 	nsid = spdk_nvmf_subsystem_add_ns_ext(&subsystem, "bdev2", &ns_opts, sizeof(ns_opts), NULL);
298 	CU_ASSERT(nsid == 0);
299 	CU_ASSERT(subsystem.max_nsid == 1024);
300 
301 	rc = spdk_nvmf_subsystem_remove_ns(&subsystem, 5);
302 	CU_ASSERT(rc == 0);
303 
304 	free(subsystem.ns);
305 	free(subsystem.ana_group);
306 }
307 
308 static void
309 test_spdk_nvmf_subsystem_add_fdp_ns(void)
310 {
311 	struct spdk_nvmf_tgt tgt = {};
312 	struct spdk_nvmf_subsystem subsystem = {
313 		.max_nsid = 1024,
314 		.ns = NULL,
315 		.tgt = &tgt,
316 	};
317 	struct spdk_nvmf_ns_opts ns_opts;
318 	uint32_t nsid;
319 	int rc;
320 
321 	subsystem.ns = calloc(subsystem.max_nsid, sizeof(struct spdk_nvmf_subsystem_ns *));
322 	SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL);
323 	subsystem.ana_group = calloc(subsystem.max_nsid, sizeof(uint32_t));
324 	SPDK_CU_ASSERT_FATAL(subsystem.ana_group != NULL);
325 
326 	tgt.max_subsystems = 1024;
327 	RB_INIT(&tgt.subsystems);
328 
329 	CU_ASSERT(subsystem.fdp_supported == false);
330 
331 	/* Add a FDP supported namespace to the subsystem */
332 	spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
333 	ns_opts.nsid = 3;
334 	nsid = spdk_nvmf_subsystem_add_ns_ext(&subsystem, "bdev3", &ns_opts, sizeof(ns_opts), NULL);
335 	CU_ASSERT(nsid == 3);
336 	CU_ASSERT(subsystem.max_nsid == 1024);
337 	SPDK_CU_ASSERT_FATAL(subsystem.ns[nsid - 1] != NULL);
338 	CU_ASSERT(subsystem.ns[nsid - 1]->bdev == &g_bdevs[2]);
339 	CU_ASSERT(subsystem.fdp_supported == true);
340 
341 	/* Try to add a non FDP supported namespace to the subsystem */
342 	spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
343 	ns_opts.nsid = 5;
344 	nsid = spdk_nvmf_subsystem_add_ns_ext(&subsystem, "bdev2", &ns_opts, sizeof(ns_opts), NULL);
345 	CU_ASSERT(nsid == 0);
346 	CU_ASSERT(subsystem.max_nsid == 1024);
347 	CU_ASSERT(subsystem.fdp_supported == true);
348 
349 	/* Remove last FDP namespace from the subsystem */
350 	rc = spdk_nvmf_subsystem_remove_ns(&subsystem, 3);
351 	CU_ASSERT(rc == 0);
352 	CU_ASSERT(subsystem.fdp_supported == false);
353 
354 	free(subsystem.ns);
355 	free(subsystem.ana_group);
356 }
357 
358 static void
359 nvmf_test_create_subsystem(void)
360 {
361 	struct spdk_nvmf_tgt tgt = {};
362 	char nqn[256];
363 	struct spdk_nvmf_subsystem *subsystem;
364 	int rc;
365 
366 	tgt.max_subsystems = 1024;
367 	tgt.subsystem_ids = spdk_bit_array_create(tgt.max_subsystems);
368 	RB_INIT(&tgt.subsystems);
369 
370 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
371 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
372 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
373 	CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
374 	rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
375 	CU_ASSERT(rc == 0);
376 
377 	/* valid name with complex reverse domain */
378 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk-full--rev-domain.name:subsystem1");
379 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
380 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
381 	CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
382 	rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
383 	CU_ASSERT(rc == 0);
384 
385 	/* Valid name discovery controller */
386 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
387 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
388 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
389 	CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
390 	rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
391 	CU_ASSERT(rc == 0);
392 
393 	/* Invalid name, no user supplied string */
394 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
395 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
396 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
397 
398 	/* Valid name, only contains top-level domain name */
399 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
400 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
401 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
402 	CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
403 	rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
404 	CU_ASSERT(rc == 0);
405 
406 	/* Invalid name, domain label > 63 characters */
407 	snprintf(nqn, sizeof(nqn),
408 		 "nqn.2016-06.io.abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz:sub");
409 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
410 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
411 
412 	/* Invalid name, domain label starts with digit */
413 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.3spdk:sub");
414 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
415 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
416 
417 	/* Invalid name, domain label starts with - */
418 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.-spdk:subsystem1");
419 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
420 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
421 
422 	/* Invalid name, domain label ends with - */
423 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk-:subsystem1");
424 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
425 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
426 
427 	/* Invalid name, domain label with multiple consecutive periods */
428 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io..spdk:subsystem1");
429 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
430 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
431 
432 	/* Longest valid name */
433 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
434 	memset(nqn + strlen(nqn), 'a', 223 - strlen(nqn));
435 	nqn[223] = '\0';
436 	CU_ASSERT(strlen(nqn) == 223);
437 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
438 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
439 	CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
440 	rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
441 	CU_ASSERT(rc == 0);
442 
443 	/* Invalid name, too long */
444 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
445 	memset(nqn + strlen(nqn), 'a', 224 - strlen(nqn));
446 	nqn[224] = '\0';
447 	CU_ASSERT(strlen(nqn) == 224);
448 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
449 	CU_ASSERT(subsystem == NULL);
450 
451 	/* Valid name using uuid format */
452 	snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:ff9b6406-0fc8-4779-80ca-4dca14bda0d2");
453 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
454 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
455 	CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
456 	rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
457 	CU_ASSERT(rc == 0);
458 
459 	/* Invalid name user string contains an invalid utf-8 character */
460 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:\xFFsubsystem1");
461 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
462 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
463 
464 	/* Valid name with non-ascii but valid utf-8 characters */
465 	snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:\xe1\x8a\x88subsystem1\xca\x80");
466 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
467 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
468 	CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
469 	rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
470 	CU_ASSERT(rc == 0);
471 
472 	/* Invalid uuid (too long) */
473 	snprintf(nqn, sizeof(nqn),
474 		 "nqn.2014-08.org.nvmexpress:uuid:ff9b6406-0fc8-4779-80ca-4dca14bda0d2aaaa");
475 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
476 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
477 
478 	/* Invalid uuid (dashes placed incorrectly) */
479 	snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:ff9b64-060fc8-4779-80ca-4dca14bda0d2");
480 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
481 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
482 
483 	/* Invalid uuid (invalid characters in uuid) */
484 	snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:ff9hg406-0fc8-4779-80ca-4dca14bda0d2");
485 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
486 	SPDK_CU_ASSERT_FATAL(subsystem == NULL);
487 
488 	spdk_bit_array_free(&tgt.subsystem_ids);
489 }
490 
491 static void
492 test_spdk_nvmf_subsystem_set_sn(void)
493 {
494 	struct spdk_nvmf_subsystem subsystem = {};
495 
496 	/* Basic valid serial number */
497 	CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "abcd xyz") == 0);
498 	CU_ASSERT(strcmp(subsystem.sn, "abcd xyz") == 0);
499 
500 	/* Exactly 20 characters (valid) */
501 	CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "12345678901234567890") == 0);
502 	CU_ASSERT(strcmp(subsystem.sn, "12345678901234567890") == 0);
503 
504 	/* 21 characters (too long, invalid) */
505 	CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "123456789012345678901") < 0);
506 
507 	/* Non-ASCII characters (invalid) */
508 	CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "abcd\txyz") < 0);
509 }
510 
511 static void
512 test_spdk_nvmf_ns_visible(void)
513 {
514 	struct spdk_nvmf_subsystem subsystem = {};
515 	struct spdk_nvmf_ns ns1 = {
516 		.nsid = 1,
517 		.anagrpid = 1,
518 		.always_visible = false
519 	};
520 	struct spdk_nvmf_ns ns2 = {
521 		.nsid = 2,
522 		.anagrpid = 2,
523 		.always_visible = false
524 	};
525 	struct spdk_nvmf_ns *ns3;
526 	struct spdk_nvmf_ctrlr ctrlrA = {
527 		.subsys = &subsystem
528 	};
529 	struct spdk_nvmf_ctrlr ctrlrB = {
530 		.subsys = &subsystem
531 	};
532 	struct spdk_thread *thread;
533 	struct spdk_nvmf_tgt tgt = {};
534 	uint32_t nsid;
535 	int rc;
536 
537 	thread = spdk_get_thread();
538 	SPDK_CU_ASSERT_FATAL(thread != NULL);
539 	ctrlrA.thread = thread;
540 	ctrlrB.thread = thread;
541 
542 	subsystem.max_nsid = 1024;
543 	subsystem.ns = calloc(subsystem.max_nsid, sizeof(subsystem.ns));
544 	SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL);
545 	subsystem.ana_group = calloc(subsystem.max_nsid, sizeof(uint32_t));
546 	SPDK_CU_ASSERT_FATAL(subsystem.ana_group != NULL);
547 	TAILQ_INIT(&tgt.transports);
548 	subsystem.tgt = &tgt;
549 
550 	subsystem.ns[1] = &ns1;
551 	subsystem.ns[2] = &ns2;
552 	ns3 = calloc(1, sizeof(*ns3));
553 	SPDK_CU_ASSERT_FATAL(ns3 != NULL);
554 	ns3->nsid = 3;
555 	ns3->anagrpid = 3;
556 	subsystem.ana_group[ns3->anagrpid - 1] = 1;
557 	subsystem.ns[3] = ns3;
558 
559 	snprintf(ctrlrA.hostnqn, sizeof(ctrlrA.hostnqn), "nqn.2016-06.io.spdk:host1");
560 	ctrlrA.visible_ns = spdk_bit_array_create(subsystem.max_nsid);
561 	SPDK_CU_ASSERT_FATAL(ctrlrA.visible_ns != NULL);
562 	snprintf(ctrlrB.hostnqn, sizeof(ctrlrB.hostnqn), "nqn.2016-06.io.spdk:host2");
563 	ctrlrB.visible_ns = spdk_bit_array_create(subsystem.max_nsid);
564 	SPDK_CU_ASSERT_FATAL(ctrlrB.visible_ns != NULL);
565 
566 	/* Add two controllers ctrlrA and ctrlrB */
567 	TAILQ_INIT(&subsystem.ctrlrs);
568 	TAILQ_INSERT_TAIL(&subsystem.ctrlrs, &ctrlrA, link);
569 	TAILQ_INSERT_TAIL(&subsystem.ctrlrs, &ctrlrB, link);
570 
571 	/* Invalid host nqn */
572 	nsid = 1;
573 	rc = spdk_nvmf_ns_add_host(&subsystem, nsid, "", 0);
574 	CU_ASSERT(rc == -EINVAL);
575 	rc = spdk_nvmf_ns_add_host(&subsystem, nsid, NULL, 0);
576 	CU_ASSERT(rc == -EINVAL);
577 	rc = spdk_nvmf_ns_remove_host(&subsystem, nsid, NULL, 0);
578 	CU_ASSERT(rc == -EINVAL);
579 
580 	/* Invalid nsid */
581 	nsid = 0;
582 	rc = spdk_nvmf_ns_add_host(&subsystem, nsid, ctrlrA.hostnqn, 0);
583 	CU_ASSERT(rc == -EINVAL);
584 	rc = spdk_nvmf_ns_remove_host(&subsystem, nsid, ctrlrA.hostnqn, 0);
585 	CU_ASSERT(rc == -EINVAL);
586 
587 	/* Unallocated ns */
588 	nsid = 1;
589 	rc = spdk_nvmf_ns_add_host(&subsystem, nsid, ctrlrA.hostnqn, 0);
590 	CU_ASSERT(rc == -ENOENT);
591 	rc = spdk_nvmf_ns_remove_host(&subsystem, nsid, ctrlrA.hostnqn, 0);
592 	CU_ASSERT(rc == -ENOENT);
593 
594 	/* Attach any is active => do not allow individual host control */
595 	ns1.always_visible = true;
596 	nsid = 2;
597 	rc = spdk_nvmf_ns_add_host(&subsystem, nsid, ctrlrA.hostnqn, 0);
598 	CU_ASSERT(rc == -EPERM);
599 	rc = spdk_nvmf_ns_remove_host(&subsystem, nsid, ctrlrA.hostnqn, 0);
600 	CU_ASSERT(rc == -EPERM);
601 	ns1.always_visible = false;
602 
603 	/* Attach ctrlrA to namespace 2 hot + cold */
604 	nsid = 2;
605 	g_async_event_ctrlr = NULL;
606 	g_ns_changed_ctrlr = NULL;
607 	g_ns_changed_nsid = 0;
608 	rc = spdk_nvmf_ns_add_host(&subsystem, nsid, ctrlrA.hostnqn, 0);
609 	CU_ASSERT(rc == 0);
610 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) != NULL);
611 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrB.hostnqn) == NULL);
612 	CU_ASSERT(nvmf_ns_find_host(&ns2, ctrlrA.hostnqn) == NULL);
613 	CU_ASSERT(nvmf_ns_find_host(&ns2, ctrlrB.hostnqn) == NULL);
614 	CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
615 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
616 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid));
617 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid));
618 	/* check last ns_changed */
619 	CU_ASSERT(g_ns_changed_ctrlr == &ctrlrA);
620 	CU_ASSERT(g_ns_changed_nsid == nsid);
621 	/* check async_event */
622 	poll_threads();
623 	CU_ASSERT(g_async_event_ctrlr == &ctrlrA);
624 
625 	/* Attach ctrlrA to namespace 2 again => should not create any ns change/async event */
626 	g_async_event_ctrlr = NULL;
627 	g_ns_changed_ctrlr = NULL;
628 	g_ns_changed_nsid = 0;
629 	rc = spdk_nvmf_ns_add_host(&subsystem, nsid, ctrlrA.hostnqn, 0);
630 	CU_ASSERT(rc == 0);
631 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) != NULL);
632 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrB.hostnqn) == NULL);
633 	CU_ASSERT(nvmf_ns_find_host(&ns2, ctrlrA.hostnqn) == NULL);
634 	CU_ASSERT(nvmf_ns_find_host(&ns2, ctrlrB.hostnqn) == NULL);
635 	CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
636 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
637 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid));
638 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid));
639 	/* check last ns_changed */
640 	CU_ASSERT(g_ns_changed_ctrlr == NULL);
641 	CU_ASSERT(g_ns_changed_nsid == 0);
642 	/* check async_event */
643 	poll_threads();
644 	CU_ASSERT(g_async_event_ctrlr == NULL);
645 
646 	/* Detach ctrlrA from namespace 2 hot + cold */
647 	g_async_event_ctrlr = NULL;
648 	g_ns_changed_ctrlr = NULL;
649 	g_ns_changed_nsid = 0;
650 	rc = spdk_nvmf_ns_remove_host(&subsystem, nsid, ctrlrA.hostnqn, 0);
651 	CU_ASSERT(rc == 0);
652 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL);
653 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrB.hostnqn) == NULL);
654 	CU_ASSERT(nvmf_ns_find_host(&ns2, ctrlrA.hostnqn) == NULL);
655 	CU_ASSERT(nvmf_ns_find_host(&ns2, ctrlrB.hostnqn) == NULL);
656 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
657 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
658 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid));
659 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid));
660 	/* check last ns_changed */
661 	CU_ASSERT(g_ns_changed_ctrlr == &ctrlrA);
662 	CU_ASSERT(g_ns_changed_nsid == nsid);
663 	/* check async_event */
664 	poll_threads();
665 	CU_ASSERT(g_async_event_ctrlr == &ctrlrA);
666 
667 	/* Detach ctrlrA from namespace 2 again hot + cold */
668 	g_async_event_ctrlr = NULL;
669 	g_ns_changed_ctrlr = NULL;
670 	g_ns_changed_nsid = 0;
671 	rc = spdk_nvmf_ns_remove_host(&subsystem, nsid, ctrlrA.hostnqn, 0);
672 	CU_ASSERT(rc == 0);
673 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL);
674 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrB.hostnqn) == NULL);
675 	CU_ASSERT(nvmf_ns_find_host(&ns2, ctrlrA.hostnqn) == NULL);
676 	CU_ASSERT(nvmf_ns_find_host(&ns2, ctrlrB.hostnqn) == NULL);
677 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
678 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
679 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid));
680 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid));
681 	/* check last ns_changed */
682 	CU_ASSERT(g_ns_changed_ctrlr == NULL);
683 	CU_ASSERT(g_ns_changed_nsid == 0);
684 	/* check async_event */
685 	poll_threads();
686 	CU_ASSERT(g_async_event_ctrlr == NULL);
687 
688 	/* Attach ctrlrA to namespace 4 hot + cold => remove ns */
689 	nsid = 4;
690 	g_async_event_ctrlr = NULL;
691 	g_ns_changed_ctrlr = NULL;
692 	g_ns_changed_nsid = 0;
693 	rc = spdk_nvmf_ns_add_host(&subsystem, nsid, ctrlrA.hostnqn, 0);
694 	CU_ASSERT(rc == 0);
695 	CU_ASSERT(nvmf_ns_find_host(ns3, ctrlrA.hostnqn) != NULL);
696 	CU_ASSERT(nvmf_ns_find_host(ns3, ctrlrB.hostnqn) == NULL);
697 	CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
698 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
699 	/* check last ns_changed */
700 	CU_ASSERT(g_ns_changed_ctrlr == &ctrlrA);
701 	CU_ASSERT(g_ns_changed_nsid == nsid);
702 	/* check async_event */
703 	poll_threads();
704 	CU_ASSERT(g_async_event_ctrlr == &ctrlrA);
705 
706 	g_async_event_ctrlr = NULL;
707 	g_ns_changed_ctrlr = NULL;
708 	g_ns_changed_nsid = 0;
709 	rc = spdk_nvmf_subsystem_remove_ns(&subsystem, nsid);
710 	CU_ASSERT(rc == 0);
711 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
712 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
713 	/* check last ns_changed */
714 	CU_ASSERT(g_ns_changed_ctrlr == &ctrlrA);
715 	CU_ASSERT(g_ns_changed_nsid == nsid);
716 
717 	free(ctrlrA.visible_ns);
718 	free(ctrlrB.visible_ns);
719 	free(subsystem.ana_group);
720 	free(subsystem.ns);
721 }
722 
723 /*
724  * Reservation Unit Test Configuration
725  *       --------             --------    --------
726  *      | Host A |           | Host B |  | Host C |
727  *       --------             --------    --------
728  *      /        \               |           |
729  *  --------   --------       -------     -------
730  * |Ctrlr1_A| |Ctrlr2_A|     |Ctrlr_B|   |Ctrlr_C|
731  *  --------   --------       -------     -------
732  *    \           \              /           /
733  *     \           \            /           /
734  *      \           \          /           /
735  *      --------------------------------------
736  *     |            NAMESPACE 1               |
737  *      --------------------------------------
738  */
739 static struct spdk_nvmf_subsystem g_subsystem;
740 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
741 static struct spdk_nvmf_ns g_ns;
742 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
743 
744 void
745 nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr)
746 {
747 }
748 
749 static void
750 ut_reservation_init(void)
751 {
752 
753 	TAILQ_INIT(&g_subsystem.ctrlrs);
754 
755 	memset(&g_ns, 0, sizeof(g_ns));
756 	TAILQ_INIT(&g_ns.registrants);
757 	g_ns.subsystem = &g_subsystem;
758 	g_ns.ptpl_file = NULL;
759 	g_ns.ptpl_activated = false;
760 	spdk_uuid_generate(&g_bdevs[0].uuid);
761 	g_ns.bdev = &g_bdevs[0];
762 
763 	/* Host A has two controllers */
764 	spdk_uuid_generate(&g_ctrlr1_A.hostid);
765 	TAILQ_INIT(&g_ctrlr1_A.log_head);
766 	g_ctrlr1_A.subsys = &g_subsystem;
767 	g_ctrlr1_A.num_avail_log_pages = 0;
768 	TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr1_A, link);
769 	spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
770 	TAILQ_INIT(&g_ctrlr2_A.log_head);
771 	g_ctrlr2_A.subsys = &g_subsystem;
772 	g_ctrlr2_A.num_avail_log_pages = 0;
773 	TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr2_A, link);
774 
775 	/* Host B has 1 controller */
776 	spdk_uuid_generate(&g_ctrlr_B.hostid);
777 	TAILQ_INIT(&g_ctrlr_B.log_head);
778 	g_ctrlr_B.subsys = &g_subsystem;
779 	g_ctrlr_B.num_avail_log_pages = 0;
780 	TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr_B, link);
781 
782 	/* Host C has 1 controller */
783 	spdk_uuid_generate(&g_ctrlr_C.hostid);
784 	TAILQ_INIT(&g_ctrlr_C.log_head);
785 	g_ctrlr_C.subsys = &g_subsystem;
786 	g_ctrlr_C.num_avail_log_pages = 0;
787 	TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr_C, link);
788 }
789 
790 static void
791 ut_reservation_deinit(void)
792 {
793 	struct spdk_nvmf_registrant *reg, *tmp;
794 	struct spdk_nvmf_reservation_log *log, *log_tmp;
795 	struct spdk_nvmf_ctrlr *ctrlr, *ctrlr_tmp;
796 
797 	TAILQ_FOREACH_SAFE(reg, &g_ns.registrants, link, tmp) {
798 		TAILQ_REMOVE(&g_ns.registrants, reg, link);
799 		free(reg);
800 	}
801 	TAILQ_FOREACH_SAFE(log, &g_ctrlr1_A.log_head, link, log_tmp) {
802 		TAILQ_REMOVE(&g_ctrlr1_A.log_head, log, link);
803 		free(log);
804 	}
805 	g_ctrlr1_A.num_avail_log_pages = 0;
806 	TAILQ_FOREACH_SAFE(log, &g_ctrlr2_A.log_head, link, log_tmp) {
807 		TAILQ_REMOVE(&g_ctrlr2_A.log_head, log, link);
808 		free(log);
809 	}
810 	g_ctrlr2_A.num_avail_log_pages = 0;
811 	TAILQ_FOREACH_SAFE(log, &g_ctrlr_B.log_head, link, log_tmp) {
812 		TAILQ_REMOVE(&g_ctrlr_B.log_head, log, link);
813 		free(log);
814 	}
815 	g_ctrlr_B.num_avail_log_pages = 0;
816 	TAILQ_FOREACH_SAFE(log, &g_ctrlr_C.log_head, link, log_tmp) {
817 		TAILQ_REMOVE(&g_ctrlr_C.log_head, log, link);
818 		free(log);
819 	}
820 	g_ctrlr_C.num_avail_log_pages = 0;
821 
822 	TAILQ_FOREACH_SAFE(ctrlr, &g_subsystem.ctrlrs, link, ctrlr_tmp) {
823 		TAILQ_REMOVE(&g_subsystem.ctrlrs, ctrlr, link);
824 	}
825 }
826 
827 static struct spdk_nvmf_request *
828 ut_reservation_build_req(uint32_t length)
829 {
830 	struct spdk_nvmf_request *req;
831 
832 	req = calloc(1, sizeof(*req));
833 	assert(req != NULL);
834 
835 	SPDK_IOV_ONE(req->iov, &req->iovcnt, calloc(1, length), length);
836 	assert(req->iov[0].iov_base != NULL);
837 	req->length = length;
838 
839 	req->cmd = (union nvmf_h2c_msg *)calloc(1, sizeof(union nvmf_h2c_msg));
840 	assert(req->cmd != NULL);
841 
842 	req->rsp = (union nvmf_c2h_msg *)calloc(1, sizeof(union nvmf_c2h_msg));
843 	assert(req->rsp != NULL);
844 
845 	return req;
846 }
847 
848 static void
849 ut_reservation_free_req(struct spdk_nvmf_request *req)
850 {
851 	free(req->cmd);
852 	free(req->rsp);
853 	free(req->iov[0].iov_base);
854 	free(req);
855 }
856 
857 static void
858 ut_reservation_build_register_request(struct spdk_nvmf_request *req,
859 				      uint8_t rrega, uint8_t iekey,
860 				      uint8_t cptpl, uint64_t crkey,
861 				      uint64_t nrkey)
862 {
863 	struct spdk_nvme_reservation_register_data key;
864 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
865 
866 	key.crkey = crkey;
867 	key.nrkey = nrkey;
868 	cmd->cdw10 = 0;
869 	cmd->cdw10_bits.resv_register.rrega = rrega;
870 	cmd->cdw10_bits.resv_register.iekey = iekey;
871 	cmd->cdw10_bits.resv_register.cptpl = cptpl;
872 	memcpy(req->iov[0].iov_base, &key, sizeof(key));
873 }
874 
875 static void
876 ut_reservation_build_acquire_request(struct spdk_nvmf_request *req,
877 				     uint8_t racqa, uint8_t iekey,
878 				     uint8_t rtype, uint64_t crkey,
879 				     uint64_t prkey)
880 {
881 	struct spdk_nvme_reservation_acquire_data key;
882 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
883 
884 	key.crkey = crkey;
885 	key.prkey = prkey;
886 	cmd->cdw10 = 0;
887 	cmd->cdw10_bits.resv_acquire.racqa = racqa;
888 	cmd->cdw10_bits.resv_acquire.iekey = iekey;
889 	cmd->cdw10_bits.resv_acquire.rtype = rtype;
890 	memcpy(req->iov[0].iov_base, &key, sizeof(key));
891 }
892 
893 static void
894 ut_reservation_build_release_request(struct spdk_nvmf_request *req,
895 				     uint8_t rrela, uint8_t iekey,
896 				     uint8_t rtype, uint64_t crkey)
897 {
898 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
899 
900 	cmd->cdw10 = 0;
901 	cmd->cdw10_bits.resv_release.rrela = rrela;
902 	cmd->cdw10_bits.resv_release.iekey = iekey;
903 	cmd->cdw10_bits.resv_release.rtype = rtype;
904 	memcpy(req->iov[0].iov_base, &crkey, sizeof(crkey));
905 }
906 
907 /*
908  * Construct four registrants for other test cases.
909  *
910  * g_ctrlr1_A register with key 0xa1.
911  * g_ctrlr2_A register with key 0xa1.
912  * g_ctrlr_B register with key 0xb1.
913  * g_ctrlr_C register with key 0xc1.
914  * */
915 static void
916 ut_reservation_build_registrants(void)
917 {
918 	struct spdk_nvmf_request *req;
919 	struct spdk_nvme_cpl *rsp;
920 	struct spdk_nvmf_registrant *reg;
921 	uint32_t gen;
922 
923 	req = ut_reservation_build_req(16);
924 	rsp = &req->rsp->nvme_cpl;
925 	SPDK_CU_ASSERT_FATAL(req != NULL);
926 	gen = g_ns.gen;
927 
928 	/* TEST CASE: g_ctrlr1_A register with a new key */
929 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
930 					      0, 0, 0, 0xa1);
931 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
932 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
933 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
934 	SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa1);
935 	SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 1);
936 
937 	/* TEST CASE: g_ctrlr2_A register with a new key, because it has same
938 	 * Host Identifier with g_ctrlr1_A, so the register key should same.
939 	 */
940 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
941 					      0, 0, 0, 0xa2);
942 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr2_A, req);
943 	/* Reservation conflict for other key than 0xa1 */
944 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
945 
946 	/* g_ctrlr_B register with a new key */
947 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
948 					      0, 0, 0, 0xb1);
949 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
950 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
951 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
952 	SPDK_CU_ASSERT_FATAL(reg->rkey == 0xb1);
953 	SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 2);
954 
955 	/* g_ctrlr_C register with a new key */
956 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
957 					      0, 0, 0, 0xc1);
958 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr_C, req);
959 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
960 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
961 	SPDK_CU_ASSERT_FATAL(reg->rkey == 0xc1);
962 	SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 3);
963 
964 	ut_reservation_free_req(req);
965 }
966 
967 static void
968 test_reservation_register(void)
969 {
970 	struct spdk_nvmf_request *req;
971 	struct spdk_nvme_cpl *rsp;
972 	struct spdk_nvmf_registrant *reg;
973 	uint32_t gen;
974 
975 	ut_reservation_init();
976 
977 	req = ut_reservation_build_req(16);
978 	rsp = &req->rsp->nvme_cpl;
979 	SPDK_CU_ASSERT_FATAL(req != NULL);
980 
981 	ut_reservation_build_registrants();
982 
983 	/* TEST CASE: Replace g_ctrlr1_A with a new key */
984 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REPLACE_KEY,
985 					      0, 0, 0xa1, 0xa11);
986 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
987 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
988 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
989 	SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa11);
990 
991 	/* TEST CASE: Host A with g_ctrlr1_A get reservation with
992 	 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE
993 	 */
994 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
995 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xa11, 0x0);
996 	gen = g_ns.gen;
997 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
998 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
999 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
1000 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1001 	SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0xa11);
1002 	SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
1003 	SPDK_CU_ASSERT_FATAL(g_ns.gen == gen);
1004 
1005 	/* TEST CASE: g_ctrlr_C unregister with IEKEY enabled */
1006 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
1007 					      1, 0, 0, 0);
1008 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr_C, req);
1009 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1010 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
1011 	SPDK_CU_ASSERT_FATAL(reg == NULL);
1012 
1013 	/* TEST CASE: g_ctrlr_B unregister with correct key */
1014 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
1015 					      0, 0, 0xb1, 0);
1016 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
1017 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1018 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
1019 	SPDK_CU_ASSERT_FATAL(reg == NULL);
1020 
1021 	/* TEST CASE: No registrant now, g_ctrlr_B replace new key with IEKEY disabled */
1022 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REPLACE_KEY,
1023 					      0, 0, 0, 0xb1);
1024 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
1025 	SPDK_CU_ASSERT_FATAL(rsp->status.sc != SPDK_NVME_SC_SUCCESS);
1026 
1027 	/* TEST CASE: No registrant now, g_ctrlr_B replace new key with IEKEY enabled */
1028 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REPLACE_KEY,
1029 					      1, 0, 0, 0xb1);
1030 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
1031 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1032 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
1033 	SPDK_CU_ASSERT_FATAL(reg != NULL);
1034 
1035 	/* TEST CASE: g_ctrlr_B replace new key with IEKEY enabled and wrong crkey  */
1036 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REPLACE_KEY,
1037 					      1, 0, 0xff, 0xb2);
1038 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
1039 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1040 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
1041 	SPDK_CU_ASSERT_FATAL(reg != NULL);
1042 	SPDK_CU_ASSERT_FATAL(reg->rkey == 0xb2);
1043 
1044 	/* TEST CASE: g_ctrlr1_A unregister with correct key,
1045 	 * reservation should be removed as well.
1046 	 */
1047 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
1048 					      0, 0, 0xa11, 0);
1049 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
1050 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1051 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
1052 	SPDK_CU_ASSERT_FATAL(reg == NULL);
1053 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
1054 	SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0);
1055 	SPDK_CU_ASSERT_FATAL(g_ns.holder == NULL);
1056 
1057 	ut_reservation_free_req(req);
1058 	ut_reservation_deinit();
1059 }
1060 
1061 static void
1062 test_reservation_register_with_ptpl(void)
1063 {
1064 	struct spdk_nvmf_request *req;
1065 	struct spdk_nvme_cpl *rsp;
1066 	struct spdk_nvmf_registrant *reg;
1067 	bool update_sgroup = false;
1068 	int rc;
1069 	struct spdk_nvmf_reservation_info info;
1070 
1071 	ut_reservation_init();
1072 
1073 	req = ut_reservation_build_req(16);
1074 	rsp = &req->rsp->nvme_cpl;
1075 	SPDK_CU_ASSERT_FATAL(req != NULL);
1076 
1077 	/* TEST CASE: No persistent file, register with PTPL enabled will fail */
1078 	g_ns.ptpl_file = NULL;
1079 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
1080 					      SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1);
1081 	update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
1082 	SPDK_CU_ASSERT_FATAL(update_sgroup == false);
1083 	SPDK_CU_ASSERT_FATAL(rsp->status.sc != SPDK_NVME_SC_SUCCESS);
1084 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
1085 	SPDK_CU_ASSERT_FATAL(reg == NULL);
1086 
1087 	/* TEST CASE: Enable PTPL */
1088 	g_ns.ptpl_file = "/tmp/Ns1PR.cfg";
1089 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
1090 					      SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1);
1091 	update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
1092 	SPDK_CU_ASSERT_FATAL(update_sgroup == true);
1093 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1094 	SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == true);
1095 	rc = nvmf_ns_update_reservation_info(&g_ns);
1096 	SPDK_CU_ASSERT_FATAL(rc == 0);
1097 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
1098 	SPDK_CU_ASSERT_FATAL(reg != NULL);
1099 	SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, &reg->hostid));
1100 	/* Load reservation information from configuration file */
1101 	memset(&info, 0, sizeof(info));
1102 	rc = nvmf_ns_reservation_load(&g_ns, &info);
1103 	SPDK_CU_ASSERT_FATAL(rc == 0);
1104 	SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
1105 
1106 	/* TEST CASE: Disable PTPL */
1107 	rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
1108 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
1109 					      SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON, 0, 0xa1);
1110 	update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
1111 	SPDK_CU_ASSERT_FATAL(update_sgroup == true);
1112 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1113 	SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == false);
1114 	rc = nvmf_ns_update_reservation_info(&g_ns);
1115 	SPDK_CU_ASSERT_FATAL(rc == 0);
1116 	rc = nvmf_ns_reservation_load(&g_ns, &info);
1117 	SPDK_CU_ASSERT_FATAL(rc < 0);
1118 	unlink(g_ns.ptpl_file);
1119 
1120 	ut_reservation_free_req(req);
1121 	ut_reservation_deinit();
1122 }
1123 
1124 static void
1125 test_reservation_acquire_preempt_1(void)
1126 {
1127 	struct spdk_nvmf_request *req;
1128 	struct spdk_nvme_cpl *rsp;
1129 	struct spdk_nvmf_registrant *reg;
1130 	uint32_t gen;
1131 
1132 	ut_reservation_init();
1133 
1134 	req = ut_reservation_build_req(16);
1135 	rsp = &req->rsp->nvme_cpl;
1136 	SPDK_CU_ASSERT_FATAL(req != NULL);
1137 
1138 	ut_reservation_build_registrants();
1139 
1140 	gen = g_ns.gen;
1141 	/* ACQUIRE: Host A with g_ctrlr1_A acquire reservation with
1142 	 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE.
1143 	 */
1144 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
1145 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1, 0x0);
1146 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
1147 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1148 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
1149 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1150 	SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0xa1);
1151 	SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
1152 	SPDK_CU_ASSERT_FATAL(g_ns.gen == gen);
1153 
1154 	/* TEST CASE: g_ctrlr1_A holds the reservation, g_ctrlr_B preempt g_ctrl1_A,
1155 	 * g_ctrl1_A registrant is unregistered.
1156 	 */
1157 	gen = g_ns.gen;
1158 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0,
1159 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xb1, 0xa1);
1160 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
1161 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1162 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
1163 	SPDK_CU_ASSERT_FATAL(reg == NULL);
1164 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
1165 	SPDK_CU_ASSERT_FATAL(reg != NULL);
1166 	SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
1167 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
1168 	SPDK_CU_ASSERT_FATAL(reg != NULL);
1169 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1170 	SPDK_CU_ASSERT_FATAL(g_ns.gen > gen);
1171 
1172 	/* TEST CASE: g_ctrlr_B holds the reservation, g_ctrlr_C preempt g_ctrlr_B
1173 	 * with valid key and PRKEY set to 0, all registrants other the host that issued
1174 	 * the command are unregistered.
1175 	 */
1176 	gen = g_ns.gen;
1177 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0,
1178 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xc1, 0x0);
1179 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_C, req);
1180 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1181 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr2_A.hostid);
1182 	SPDK_CU_ASSERT_FATAL(reg == NULL);
1183 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
1184 	SPDK_CU_ASSERT_FATAL(reg == NULL);
1185 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
1186 	SPDK_CU_ASSERT_FATAL(reg != NULL);
1187 	SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
1188 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1189 	SPDK_CU_ASSERT_FATAL(g_ns.gen > gen);
1190 
1191 	ut_reservation_free_req(req);
1192 	ut_reservation_deinit();
1193 }
1194 
1195 static void
1196 test_reservation_acquire_release_with_ptpl(void)
1197 {
1198 	struct spdk_nvmf_request *req;
1199 	struct spdk_nvme_cpl *rsp;
1200 	struct spdk_nvmf_registrant *reg;
1201 	bool update_sgroup = false;
1202 	struct spdk_uuid holder_uuid;
1203 	int rc;
1204 	struct spdk_nvmf_reservation_info info;
1205 
1206 	ut_reservation_init();
1207 
1208 	req = ut_reservation_build_req(16);
1209 	rsp = &req->rsp->nvme_cpl;
1210 	SPDK_CU_ASSERT_FATAL(req != NULL);
1211 
1212 	/* TEST CASE: Enable PTPL */
1213 	g_ns.ptpl_file = "/tmp/Ns1PR.cfg";
1214 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
1215 					      SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1);
1216 	update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
1217 	SPDK_CU_ASSERT_FATAL(update_sgroup == true);
1218 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1219 	SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == true);
1220 	rc = nvmf_ns_update_reservation_info(&g_ns);
1221 	SPDK_CU_ASSERT_FATAL(rc == 0);
1222 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
1223 	SPDK_CU_ASSERT_FATAL(reg != NULL);
1224 	SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, &reg->hostid));
1225 	/* Load reservation information from configuration file */
1226 	memset(&info, 0, sizeof(info));
1227 	rc = nvmf_ns_reservation_load(&g_ns, &info);
1228 	SPDK_CU_ASSERT_FATAL(rc == 0);
1229 	SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
1230 
1231 	/* TEST CASE: Acquire the reservation */
1232 	rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
1233 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
1234 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1, 0x0);
1235 	update_sgroup = nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
1236 	SPDK_CU_ASSERT_FATAL(update_sgroup == true);
1237 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1238 	rc = nvmf_ns_update_reservation_info(&g_ns);
1239 	SPDK_CU_ASSERT_FATAL(rc == 0);
1240 	memset(&info, 0, sizeof(info));
1241 	rc = nvmf_ns_reservation_load(&g_ns, &info);
1242 	SPDK_CU_ASSERT_FATAL(rc == 0);
1243 	SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
1244 	SPDK_CU_ASSERT_FATAL(info.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1245 	SPDK_CU_ASSERT_FATAL(info.crkey == 0xa1);
1246 	spdk_uuid_parse(&holder_uuid, info.holder_uuid);
1247 	SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, &holder_uuid));
1248 
1249 	/* TEST CASE: Release the reservation */
1250 	rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
1251 	ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
1252 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1);
1253 	update_sgroup = nvmf_ns_reservation_release(&g_ns, &g_ctrlr1_A, req);
1254 	SPDK_CU_ASSERT_FATAL(update_sgroup == true);
1255 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1256 	rc = nvmf_ns_update_reservation_info(&g_ns);
1257 	SPDK_CU_ASSERT_FATAL(rc == 0);
1258 	memset(&info, 0, sizeof(info));
1259 	rc = nvmf_ns_reservation_load(&g_ns, &info);
1260 	SPDK_CU_ASSERT_FATAL(rc == 0);
1261 	SPDK_CU_ASSERT_FATAL(info.rtype == 0);
1262 	SPDK_CU_ASSERT_FATAL(info.crkey == 0);
1263 	SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
1264 	unlink(g_ns.ptpl_file);
1265 
1266 	ut_reservation_free_req(req);
1267 	ut_reservation_deinit();
1268 }
1269 
1270 static void
1271 test_reservation_release(void)
1272 {
1273 	struct spdk_nvmf_request *req;
1274 	struct spdk_nvme_cpl *rsp;
1275 	struct spdk_nvmf_registrant *reg;
1276 
1277 	ut_reservation_init();
1278 
1279 	req = ut_reservation_build_req(16);
1280 	rsp = &req->rsp->nvme_cpl;
1281 	SPDK_CU_ASSERT_FATAL(req != NULL);
1282 
1283 	ut_reservation_build_registrants();
1284 
1285 	/* ACQUIRE: Host A with g_ctrlr1_A get reservation with
1286 	 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS
1287 	 */
1288 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
1289 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xa1, 0x0);
1290 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
1291 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1292 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
1293 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1294 	SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
1295 
1296 	/* Test Case: Host B release the reservation */
1297 	ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
1298 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xb1);
1299 	nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
1300 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1301 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
1302 	SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0);
1303 	SPDK_CU_ASSERT_FATAL(g_ns.holder == NULL);
1304 
1305 	/* Test Case: Host C clear the registrants */
1306 	ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_CLEAR, 0,
1307 					     0, 0xc1);
1308 	nvmf_ns_reservation_release(&g_ns, &g_ctrlr_C, req);
1309 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1310 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
1311 	SPDK_CU_ASSERT_FATAL(reg == NULL);
1312 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr2_A.hostid);
1313 	SPDK_CU_ASSERT_FATAL(reg == NULL);
1314 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
1315 	SPDK_CU_ASSERT_FATAL(reg == NULL);
1316 	reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
1317 	SPDK_CU_ASSERT_FATAL(reg == NULL);
1318 
1319 	ut_reservation_free_req(req);
1320 	ut_reservation_deinit();
1321 }
1322 
1323 void
1324 nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
1325 				  struct spdk_nvmf_ns *ns,
1326 				  enum spdk_nvme_reservation_notification_log_page_type type)
1327 {
1328 	ctrlr->num_avail_log_pages++;
1329 }
1330 
1331 static void
1332 test_reservation_unregister_notification(void)
1333 {
1334 	struct spdk_nvmf_request *req;
1335 	struct spdk_nvme_cpl *rsp;
1336 
1337 	ut_reservation_init();
1338 
1339 	req = ut_reservation_build_req(16);
1340 	SPDK_CU_ASSERT_FATAL(req != NULL);
1341 	rsp = &req->rsp->nvme_cpl;
1342 
1343 	ut_reservation_build_registrants();
1344 
1345 	/* ACQUIRE: Host B with g_ctrlr_B get reservation with
1346 	 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
1347 	 */
1348 	rsp->status.sc = 0xff;
1349 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
1350 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
1351 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
1352 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1353 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1354 
1355 	/* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B unregister the registration.
1356 	 * Reservation release notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C only for
1357 	 * SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY or SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY
1358 	 * type.
1359 	 */
1360 	rsp->status.sc = 0xff;
1361 	g_ctrlr1_A.num_avail_log_pages = 0;
1362 	g_ctrlr2_A.num_avail_log_pages = 0;
1363 	g_ctrlr_B.num_avail_log_pages = 5;
1364 	g_ctrlr_C.num_avail_log_pages = 0;
1365 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
1366 					      0, 0, 0xb1, 0);
1367 	nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
1368 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1369 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
1370 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
1371 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
1372 	SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
1373 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages);
1374 
1375 	ut_reservation_free_req(req);
1376 	ut_reservation_deinit();
1377 }
1378 
1379 static void
1380 test_reservation_release_notification(void)
1381 {
1382 	struct spdk_nvmf_request *req;
1383 	struct spdk_nvme_cpl *rsp;
1384 
1385 	ut_reservation_init();
1386 
1387 	req = ut_reservation_build_req(16);
1388 	SPDK_CU_ASSERT_FATAL(req != NULL);
1389 	rsp = &req->rsp->nvme_cpl;
1390 
1391 	ut_reservation_build_registrants();
1392 
1393 	/* ACQUIRE: Host B with g_ctrlr_B get reservation with
1394 	 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
1395 	 */
1396 	rsp->status.sc = 0xff;
1397 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
1398 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
1399 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
1400 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1401 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1402 
1403 	/* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B release the reservation.
1404 	 * Reservation release notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C.
1405 	 */
1406 	rsp->status.sc = 0xff;
1407 	g_ctrlr1_A.num_avail_log_pages = 0;
1408 	g_ctrlr2_A.num_avail_log_pages = 0;
1409 	g_ctrlr_B.num_avail_log_pages = 5;
1410 	g_ctrlr_C.num_avail_log_pages = 0;
1411 	ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
1412 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1);
1413 	nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
1414 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1415 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
1416 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
1417 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
1418 	SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
1419 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages);
1420 
1421 	ut_reservation_free_req(req);
1422 	ut_reservation_deinit();
1423 }
1424 
1425 static void
1426 test_reservation_release_notification_write_exclusive(void)
1427 {
1428 	struct spdk_nvmf_request *req;
1429 	struct spdk_nvme_cpl *rsp;
1430 
1431 	ut_reservation_init();
1432 
1433 	req = ut_reservation_build_req(16);
1434 	SPDK_CU_ASSERT_FATAL(req != NULL);
1435 	rsp = &req->rsp->nvme_cpl;
1436 
1437 	ut_reservation_build_registrants();
1438 
1439 	/* ACQUIRE: Host B with g_ctrlr_B get reservation with
1440 	 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE
1441 	 */
1442 	rsp->status.sc = 0xff;
1443 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
1444 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xb1, 0x0);
1445 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
1446 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1447 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1448 
1449 	/* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B release the reservation.
1450 	 * Because the reservation type is SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
1451 	 * no reservation notification occurs.
1452 	 */
1453 	rsp->status.sc = 0xff;
1454 	g_ctrlr1_A.num_avail_log_pages = 5;
1455 	g_ctrlr2_A.num_avail_log_pages = 5;
1456 	g_ctrlr_B.num_avail_log_pages = 5;
1457 	g_ctrlr_C.num_avail_log_pages = 5;
1458 	ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
1459 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xb1);
1460 	nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
1461 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1462 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
1463 	SPDK_CU_ASSERT_FATAL(5 == g_ctrlr1_A.num_avail_log_pages);
1464 	SPDK_CU_ASSERT_FATAL(5 == g_ctrlr2_A.num_avail_log_pages);
1465 	SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
1466 	SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_C.num_avail_log_pages);
1467 
1468 	ut_reservation_free_req(req);
1469 	ut_reservation_deinit();
1470 }
1471 
1472 static void
1473 test_reservation_clear_notification(void)
1474 {
1475 	struct spdk_nvmf_request *req;
1476 	struct spdk_nvme_cpl *rsp;
1477 
1478 	ut_reservation_init();
1479 
1480 	req = ut_reservation_build_req(16);
1481 	SPDK_CU_ASSERT_FATAL(req != NULL);
1482 	rsp = &req->rsp->nvme_cpl;
1483 
1484 	ut_reservation_build_registrants();
1485 
1486 	/* ACQUIRE: Host B with g_ctrlr_B get reservation with
1487 	 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
1488 	 */
1489 	rsp->status.sc = 0xff;
1490 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
1491 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
1492 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
1493 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1494 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1495 
1496 	/* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B clear the reservation.
1497 	 * Reservation Preempted notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C.
1498 	 */
1499 	rsp->status.sc = 0xff;
1500 	g_ctrlr1_A.num_avail_log_pages = 0;
1501 	g_ctrlr2_A.num_avail_log_pages = 0;
1502 	g_ctrlr_B.num_avail_log_pages = 5;
1503 	g_ctrlr_C.num_avail_log_pages = 0;
1504 	ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_CLEAR, 0,
1505 					     0, 0xb1);
1506 	nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
1507 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1508 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
1509 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
1510 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
1511 	SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
1512 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages);
1513 
1514 	ut_reservation_free_req(req);
1515 	ut_reservation_deinit();
1516 }
1517 
1518 static void
1519 test_reservation_preempt_notification(void)
1520 {
1521 	struct spdk_nvmf_request *req;
1522 	struct spdk_nvme_cpl *rsp;
1523 
1524 	ut_reservation_init();
1525 
1526 	req = ut_reservation_build_req(16);
1527 	SPDK_CU_ASSERT_FATAL(req != NULL);
1528 	rsp = &req->rsp->nvme_cpl;
1529 
1530 	ut_reservation_build_registrants();
1531 
1532 	/* ACQUIRE: Host B with g_ctrlr_B get reservation with
1533 	 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
1534 	 */
1535 	rsp->status.sc = 0xff;
1536 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
1537 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
1538 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
1539 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1540 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1541 
1542 	/* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_C preempt g_ctrlr_B,
1543 	 * g_ctrlr_B registrant is unregistered, and reservation is preempted.
1544 	 * Registration Preempted notification sends to g_ctrlr_B.
1545 	 * Reservation Preempted notification sends to g_ctrlr1_A/g_ctrlr2_A.
1546 	 */
1547 	rsp->status.sc = 0xff;
1548 	g_ctrlr1_A.num_avail_log_pages = 0;
1549 	g_ctrlr2_A.num_avail_log_pages = 0;
1550 	g_ctrlr_B.num_avail_log_pages = 0;
1551 	g_ctrlr_C.num_avail_log_pages = 5;
1552 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0,
1553 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xc1, 0xb1);
1554 	nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_C, req);
1555 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
1556 	SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1557 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
1558 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
1559 	SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_B.num_avail_log_pages);
1560 	SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_C.num_avail_log_pages);
1561 
1562 	ut_reservation_free_req(req);
1563 	ut_reservation_deinit();
1564 }
1565 
1566 static int
1567 nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf)
1568 {
1569 	return 0;
1570 }
1571 
1572 static void
1573 nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf)
1574 {
1575 }
1576 
1577 static void
1578 test_spdk_nvmf_ns_event(void)
1579 {
1580 	struct spdk_nvmf_tgt tgt = {};
1581 	struct spdk_nvmf_subsystem subsystem = {
1582 		.max_nsid = 1024,
1583 		.ns = NULL,
1584 		.tgt = &tgt,
1585 		.state_changes = TAILQ_HEAD_INITIALIZER(subsystem.state_changes),
1586 	};
1587 	struct spdk_nvmf_ctrlr ctrlr = {
1588 		.subsys = &subsystem
1589 	};
1590 	struct spdk_nvmf_ns_opts ns_opts;
1591 	uint32_t nsid;
1592 	struct spdk_bdev *bdev;
1593 	struct spdk_thread *thread;
1594 
1595 	ctrlr.visible_ns = spdk_bit_array_create(1);
1596 	spdk_bit_array_set(ctrlr.visible_ns, 0);
1597 
1598 	thread = spdk_get_thread();
1599 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1600 
1601 	subsystem.ns = calloc(subsystem.max_nsid, sizeof(struct spdk_nvmf_subsystem_ns *));
1602 	SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL);
1603 	subsystem.ana_group = calloc(subsystem.max_nsid, sizeof(uint32_t));
1604 	SPDK_CU_ASSERT_FATAL(subsystem.ana_group != NULL);
1605 
1606 	tgt.max_subsystems = 1024;
1607 	tgt.subsystem_ids = spdk_bit_array_create(tgt.max_subsystems);
1608 	RB_INIT(&tgt.subsystems);
1609 
1610 	spdk_io_device_register(&tgt,
1611 				nvmf_tgt_create_poll_group,
1612 				nvmf_tgt_destroy_poll_group,
1613 				sizeof(struct spdk_nvmf_poll_group),
1614 				NULL);
1615 
1616 	/* Add one namespace */
1617 	spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
1618 	nsid = spdk_nvmf_subsystem_add_ns_ext(&subsystem, "bdev1", &ns_opts, sizeof(ns_opts), NULL);
1619 	CU_ASSERT(nsid == 1);
1620 	CU_ASSERT(NULL != subsystem.ns[0]);
1621 	CU_ASSERT(subsystem.ns[nsid - 1]->bdev == &g_bdevs[nsid - 1]);
1622 
1623 	bdev = subsystem.ns[nsid - 1]->bdev;
1624 
1625 	/* Add one controller */
1626 	TAILQ_INIT(&subsystem.ctrlrs);
1627 	TAILQ_INSERT_TAIL(&subsystem.ctrlrs, &ctrlr, link);
1628 
1629 	/* Namespace resize event */
1630 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1631 	g_ns_changed_nsid = 0xFFFFFFFF;
1632 	g_ns_changed_ctrlr = NULL;
1633 	nvmf_ns_event(SPDK_BDEV_EVENT_RESIZE, bdev, subsystem.ns[0]);
1634 	CU_ASSERT(SPDK_NVMF_SUBSYSTEM_PAUSING == subsystem.state);
1635 
1636 	poll_threads();
1637 	CU_ASSERT(1 == g_ns_changed_nsid);
1638 	CU_ASSERT(&ctrlr == g_ns_changed_ctrlr);
1639 	CU_ASSERT(SPDK_NVMF_SUBSYSTEM_ACTIVE == subsystem.state);
1640 
1641 	/* Namespace remove event */
1642 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1643 	g_ns_changed_nsid = 0xFFFFFFFF;
1644 	g_ns_changed_ctrlr = NULL;
1645 	nvmf_ns_event(SPDK_BDEV_EVENT_REMOVE, bdev, subsystem.ns[0]);
1646 	CU_ASSERT(SPDK_NVMF_SUBSYSTEM_PAUSING == subsystem.state);
1647 	CU_ASSERT(0xFFFFFFFF == g_ns_changed_nsid);
1648 	CU_ASSERT(NULL == g_ns_changed_ctrlr);
1649 
1650 	poll_threads();
1651 	CU_ASSERT(1 == g_ns_changed_nsid);
1652 	CU_ASSERT(&ctrlr == g_ns_changed_ctrlr);
1653 	CU_ASSERT(NULL == subsystem.ns[0]);
1654 	CU_ASSERT(SPDK_NVMF_SUBSYSTEM_ACTIVE == subsystem.state);
1655 
1656 	spdk_io_device_unregister(&tgt, NULL);
1657 
1658 	poll_threads();
1659 
1660 	free(subsystem.ns);
1661 	free(subsystem.ana_group);
1662 	spdk_bit_array_free(&ctrlr.visible_ns);
1663 	spdk_bit_array_free(&tgt.subsystem_ids);
1664 }
1665 
1666 static void
1667 test_nvmf_ns_reservation_add_remove_registrant(void)
1668 {
1669 	struct spdk_nvmf_ns ns = {};
1670 	struct spdk_nvmf_ctrlr ctrlr = {};
1671 	struct spdk_nvmf_registrant *reg = NULL;
1672 	int rc;
1673 
1674 	TAILQ_INIT(&ns.registrants);
1675 	spdk_uuid_generate(&ctrlr.hostid);
1676 
1677 	rc = nvmf_ns_reservation_add_registrant(&ns, &ctrlr, 0xa11);
1678 	CU_ASSERT(rc == 0);
1679 	reg = TAILQ_FIRST(&ns.registrants);
1680 	SPDK_CU_ASSERT_FATAL(reg != NULL);
1681 	CU_ASSERT(ns.gen == 1);
1682 	CU_ASSERT(reg->rkey == 0xa11);
1683 	CU_ASSERT(!strncmp((uint8_t *)&reg->hostid, (uint8_t *)&ctrlr.hostid, sizeof(ctrlr.hostid)));
1684 
1685 	nvmf_ns_reservation_remove_registrant(&ns, reg);
1686 	CU_ASSERT(TAILQ_EMPTY(&ns.registrants));
1687 	CU_ASSERT(ns.gen == 2);
1688 }
1689 
1690 static void
1691 test_nvmf_subsystem_destroy_cb(void *cb_arg)
1692 {
1693 }
1694 
1695 static void
1696 test_nvmf_subsystem_add_ctrlr(void)
1697 {
1698 	int rc;
1699 	struct spdk_nvmf_ctrlr ctrlr = {};
1700 	struct spdk_nvmf_tgt tgt = {};
1701 	char nqn[256] = "nqn.2016-06.io.spdk:subsystem1";
1702 	struct spdk_nvmf_subsystem *subsystem = NULL;
1703 
1704 	tgt.max_subsystems = 1024;
1705 	tgt.subsystem_ids = spdk_bit_array_create(tgt.max_subsystems);
1706 	RB_INIT(&tgt.subsystems);
1707 
1708 	subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
1709 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
1710 	ctrlr.subsys = subsystem;
1711 
1712 	ctrlr.dynamic_ctrlr = true;
1713 	rc = nvmf_subsystem_add_ctrlr(subsystem, &ctrlr);
1714 	CU_ASSERT(rc == 0);
1715 	CU_ASSERT(!TAILQ_EMPTY(&subsystem->ctrlrs));
1716 	CU_ASSERT(ctrlr.cntlid == 1);
1717 	CU_ASSERT(nvmf_subsystem_get_ctrlr(subsystem, 1) == &ctrlr);
1718 
1719 	nvmf_subsystem_remove_ctrlr(subsystem, &ctrlr);
1720 	CU_ASSERT(TAILQ_EMPTY(&subsystem->ctrlrs));
1721 	rc = spdk_nvmf_subsystem_destroy(subsystem, test_nvmf_subsystem_destroy_cb, NULL);
1722 	CU_ASSERT(rc == 0);
1723 	spdk_bit_array_free(&tgt.subsystem_ids);
1724 }
1725 
1726 static void
1727 _add_transport_cb(void *arg, int status)
1728 {
1729 	CU_ASSERT(status == 0);
1730 }
1731 
1732 static int
1733 transport_subsystem_add_host_err(struct spdk_nvmf_transport *transport,
1734 				 const struct spdk_nvmf_subsystem *subsystem,
1735 				 const char *hostnqn,
1736 				 const struct spdk_json_val *transport_specific)
1737 {
1738 	return -1;
1739 }
1740 
1741 void
1742 spdk_nvmf_tgt_add_transport(struct spdk_nvmf_tgt *tgt,
1743 			    struct spdk_nvmf_transport *transport,
1744 			    spdk_nvmf_tgt_add_transport_done_fn cb_fn,
1745 			    void *cb_arg)
1746 {
1747 	TAILQ_INSERT_TAIL(&tgt->transports, transport, link);
1748 }
1749 
1750 static struct spdk_nvmf_transport *
1751 transport_create(struct spdk_nvmf_transport_opts *opts)
1752 {
1753 	return &g_transport;
1754 }
1755 
1756 static void
1757 test_spdk_nvmf_subsystem_add_host(void)
1758 {
1759 	struct spdk_nvmf_tgt tgt = {};
1760 	struct spdk_nvmf_subsystem *subsystem = NULL;
1761 	int rc;
1762 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
1763 	const char subsystemnqn[] = "nqn.2016-06.io.spdk:subsystem1";
1764 	struct spdk_nvmf_transport_opts opts = {
1765 		.opts_size = sizeof(struct spdk_nvmf_transport_opts),
1766 		.io_unit_size = 8192
1767 	};
1768 	const struct spdk_nvmf_transport_ops test_ops = {
1769 		.name = "transport_ut",
1770 		.create = transport_create,
1771 		.subsystem_add_host = transport_subsystem_add_host_err,
1772 	};
1773 	struct spdk_nvmf_transport *transport;
1774 
1775 	tgt.max_subsystems = 1024;
1776 	tgt.subsystem_ids = spdk_bit_array_create(tgt.max_subsystems);
1777 	RB_INIT(&tgt.subsystems);
1778 
1779 	subsystem = spdk_nvmf_subsystem_create(&tgt, subsystemnqn, SPDK_NVMF_SUBTYPE_NVME, 0);
1780 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
1781 	CU_ASSERT_STRING_EQUAL(subsystem->subnqn, subsystemnqn);
1782 
1783 	rc = spdk_nvmf_subsystem_add_host(subsystem, hostnqn, NULL);
1784 	CU_ASSERT(rc == 0);
1785 	CU_ASSERT(!TAILQ_EMPTY(&subsystem->hosts));
1786 
1787 	/* Add existing nqn, this function isn't allowed to be called if the nqn was previously added. */
1788 	rc = spdk_nvmf_subsystem_add_host(subsystem, hostnqn, NULL);
1789 	CU_ASSERT(rc == -EINVAL);
1790 
1791 	rc = spdk_nvmf_subsystem_remove_host(subsystem, hostnqn);
1792 	CU_ASSERT(rc == 0);
1793 	CU_ASSERT(TAILQ_EMPTY(&subsystem->hosts));
1794 
1795 	/* No available nqn */
1796 	rc = spdk_nvmf_subsystem_remove_host(subsystem, hostnqn);
1797 	CU_ASSERT(rc == -ENOENT);
1798 
1799 	/* Ensure hostnqn list remains empty after transport callback fails */
1800 	spdk_nvmf_transport_register(&test_ops);
1801 	transport = spdk_nvmf_transport_create("transport_ut", &opts);
1802 	SPDK_CU_ASSERT_FATAL(transport != NULL);
1803 
1804 	TAILQ_INIT(&tgt.transports);
1805 	spdk_nvmf_tgt_add_transport(&tgt, transport, _add_transport_cb, 0);
1806 
1807 	rc = spdk_nvmf_subsystem_add_host(subsystem, hostnqn, NULL);
1808 	CU_ASSERT(rc != 0);
1809 	CU_ASSERT(TAILQ_EMPTY(&subsystem->hosts));
1810 
1811 	spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
1812 	spdk_bit_array_free(&tgt.subsystem_ids);
1813 }
1814 
1815 static void
1816 test_nvmf_ns_reservation_report(void)
1817 {
1818 	struct spdk_nvmf_ns ns = {};
1819 	struct spdk_nvmf_ctrlr ctrlr = {};
1820 	struct spdk_nvmf_request req = {};
1821 	union nvmf_h2c_msg cmd = {};
1822 	union nvmf_c2h_msg rsp = {};
1823 	struct spdk_nvme_registered_ctrlr_extended_data *ctrlr_data;
1824 	struct spdk_nvme_reservation_status_extended_data *status_data;
1825 	struct spdk_nvmf_registrant *reg;
1826 	void *data;
1827 
1828 	data = calloc(1, sizeof(*status_data) + sizeof(*ctrlr_data) * 2);
1829 	reg = calloc(2, sizeof(struct spdk_nvmf_registrant));
1830 	SPDK_CU_ASSERT_FATAL(data != NULL && reg != NULL);
1831 
1832 	req.length = sizeof(*status_data) + sizeof(*ctrlr_data) * 2;
1833 	SPDK_IOV_ONE(req.iov, &req.iovcnt, data, req.length);
1834 
1835 	req.cmd = &cmd;
1836 	req.rsp = &rsp;
1837 	ns.gen = 1;
1838 	ns.rtype = SPDK_NVME_RESERVE_WRITE_EXCLUSIVE;
1839 	ns.ptpl_activated = true;
1840 	cmd.nvme_cmd.cdw11_bits.resv_report.eds = true;
1841 	cmd.nvme_cmd.cdw10 = 100;
1842 	reg[0].rkey = 0xa;
1843 	reg[1].rkey = 0xb;
1844 	spdk_uuid_generate(&reg[0].hostid);
1845 	spdk_uuid_generate(&reg[1].hostid);
1846 	TAILQ_INIT(&ns.registrants);
1847 	TAILQ_INSERT_TAIL(&ns.registrants, &reg[0], link);
1848 	TAILQ_INSERT_TAIL(&ns.registrants, &reg[1], link);
1849 
1850 	nvmf_ns_reservation_report(&ns, &ctrlr, &req);
1851 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1852 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1853 	/* Get ctrlr data and status data pointers */
1854 	ctrlr_data = (void *)((char *)req.iov[0].iov_base + sizeof(*status_data));
1855 	status_data = (void *)req.iov[0].iov_base;
1856 	SPDK_CU_ASSERT_FATAL(status_data != NULL && ctrlr_data != NULL);
1857 	CU_ASSERT(status_data->data.gen == 1);
1858 	CU_ASSERT(status_data->data.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1859 	CU_ASSERT(status_data->data.ptpls == true);
1860 	CU_ASSERT(status_data->data.regctl == 2);
1861 	CU_ASSERT(ctrlr_data->cntlid == 0xffff);
1862 	CU_ASSERT(ctrlr_data->rcsts.status == false);
1863 	CU_ASSERT(ctrlr_data->rkey ==  0xa);
1864 	CU_ASSERT(!spdk_uuid_compare((struct spdk_uuid *)ctrlr_data->hostid, &reg[0].hostid));
1865 	/* Check second ctrlr data */
1866 	ctrlr_data++;
1867 	CU_ASSERT(ctrlr_data->cntlid == 0xffff);
1868 	CU_ASSERT(ctrlr_data->rcsts.status == false);
1869 	CU_ASSERT(ctrlr_data->rkey ==  0xb);
1870 	CU_ASSERT(!spdk_uuid_compare((struct spdk_uuid *)ctrlr_data->hostid, &reg[1].hostid));
1871 
1872 	/* extended controller data structure */
1873 	spdk_iov_memset(req.iov, req.iovcnt, 0);
1874 	memset(req.rsp, 0, sizeof(*req.rsp));
1875 	cmd.nvme_cmd.cdw11_bits.resv_report.eds = false;
1876 
1877 	nvmf_ns_reservation_report(&ns, &ctrlr, &req);
1878 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_HOSTID_INCONSISTENT_FORMAT);
1879 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1880 
1881 	/* Transfer length invalid */
1882 	spdk_iov_memset(req.iov, req.iovcnt, 0);
1883 	memset(req.rsp, 0, sizeof(*req.rsp));
1884 	cmd.nvme_cmd.cdw11_bits.resv_report.eds = true;
1885 	cmd.nvme_cmd.cdw10 = 0;
1886 
1887 	nvmf_ns_reservation_report(&ns, &ctrlr, &req);
1888 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
1889 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1890 
1891 	free(req.iov[0].iov_base);
1892 	free(reg);
1893 }
1894 
1895 static void
1896 test_nvmf_nqn_is_valid(void)
1897 {
1898 	bool rc;
1899 	char uuid[SPDK_NVMF_UUID_STRING_LEN + 1] = {};
1900 	char nqn[SPDK_NVMF_NQN_MAX_LEN + 1] = {};
1901 	struct spdk_uuid s_uuid = {};
1902 
1903 	spdk_uuid_generate(&s_uuid);
1904 	spdk_uuid_fmt_lower(uuid, sizeof(uuid), &s_uuid);
1905 
1906 	/* discovery nqn */
1907 	snprintf(nqn, sizeof(nqn), "%s", SPDK_NVMF_DISCOVERY_NQN);
1908 
1909 	rc = nvmf_nqn_is_valid(nqn);
1910 	CU_ASSERT(rc == true);
1911 
1912 	/* nqn with uuid */
1913 	memset(nqn, 0xff, sizeof(nqn));
1914 	snprintf(nqn, sizeof(nqn), "%s%s", SPDK_NVMF_NQN_UUID_PRE, uuid);
1915 
1916 	rc = nvmf_nqn_is_valid(nqn);
1917 	CU_ASSERT(rc == true);
1918 
1919 	/* Check nqn valid reverse domain */
1920 	memset(nqn, 0xff, sizeof(nqn));
1921 	snprintf(nqn, sizeof(nqn), "%s", "nqn.2016-06.io.spdk:cnode1");
1922 
1923 	rc = nvmf_nqn_is_valid(nqn);
1924 	CU_ASSERT(rc == true);
1925 
1926 	/* Invalid nqn length */
1927 	memset(nqn, 0xff, sizeof(nqn));
1928 	snprintf(nqn, sizeof(nqn), "%s", "nqn.");
1929 
1930 	rc = nvmf_nqn_is_valid(nqn);
1931 	CU_ASSERT(rc == false);
1932 
1933 	/* Copy uuid to the nqn string, but omit the last character to make it invalid */
1934 	memset(nqn, 0, SPDK_NVMF_NQN_MAX_LEN + 1);
1935 	snprintf(nqn, sizeof(nqn), "%s", SPDK_NVMF_NQN_UUID_PRE);
1936 	memcpy(&nqn[SPDK_NVMF_NQN_UUID_PRE_LEN], uuid, SPDK_NVMF_UUID_STRING_LEN - 1);
1937 
1938 	rc = nvmf_nqn_is_valid(nqn);
1939 	CU_ASSERT(rc == false);
1940 
1941 	/* Invalid domain */
1942 	memset(nqn, 0xff, SPDK_NVMF_NQN_MAX_LEN + 1);
1943 	snprintf(nqn, sizeof(nqn), "%s", "nqn.2016-06.io...spdk:cnode1");
1944 
1945 	rc = nvmf_nqn_is_valid(nqn);
1946 	CU_ASSERT(rc == false);
1947 }
1948 
1949 static void
1950 test_nvmf_ns_reservation_restore(void)
1951 {
1952 	struct spdk_nvmf_ns ns = {};
1953 	struct spdk_nvmf_reservation_info info = {};
1954 	struct spdk_bdev bdev = {};
1955 	struct spdk_uuid s_uuid = {};
1956 	struct spdk_nvmf_registrant *reg0, *reg1;
1957 	char uuid[SPDK_UUID_STRING_LEN] = {};
1958 	int rc;
1959 
1960 	ns.bdev = &bdev;
1961 	TAILQ_INIT(&ns.registrants);
1962 	info.ptpl_activated = true;
1963 	info.num_regs = 2;
1964 	info.rtype = SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS;
1965 	info.registrants[0].rkey = 0xb;
1966 	info.registrants[1].rkey = 0xc;
1967 
1968 	/* Generate and prepare uuids, make sure bdev and info uuid are the same */
1969 	spdk_uuid_generate(&s_uuid);
1970 	spdk_uuid_fmt_lower(uuid, sizeof(uuid), &s_uuid);
1971 	snprintf(info.holder_uuid, SPDK_UUID_STRING_LEN, "%s", uuid);
1972 	snprintf(info.bdev_uuid, SPDK_UUID_STRING_LEN, "%s", uuid);
1973 	snprintf(info.registrants[0].host_uuid, SPDK_UUID_STRING_LEN, "%s", uuid);
1974 	spdk_uuid_copy(&bdev.uuid, &s_uuid);
1975 	spdk_uuid_generate(&s_uuid);
1976 	spdk_uuid_fmt_lower(uuid, sizeof(uuid), &s_uuid);
1977 	snprintf(info.registrants[1].host_uuid, SPDK_UUID_STRING_LEN, "%s", uuid);
1978 
1979 	/* info->rkey not exist in registrants */
1980 	info.crkey = 0xa;
1981 
1982 	rc = nvmf_ns_reservation_restore(&ns, &info);
1983 	CU_ASSERT(rc == -EINVAL);
1984 
1985 	/* info->rkey exists in registrants */
1986 	info.crkey = 0xb;
1987 
1988 	rc = nvmf_ns_reservation_restore(&ns, &info);
1989 	CU_ASSERT(rc == 0);
1990 	CU_ASSERT(ns.crkey == 0xb);
1991 	CU_ASSERT(ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1992 	CU_ASSERT(ns.ptpl_activated == true);
1993 	/* Check two registrant`s rkey */
1994 	reg0 = TAILQ_FIRST(&ns.registrants);
1995 	reg1 = TAILQ_NEXT(reg0, link);
1996 	CU_ASSERT(ns.holder == reg0);
1997 	CU_ASSERT(reg0->rkey = 0xb);
1998 	CU_ASSERT(reg1->rkey = 0xc);
1999 
2000 	rc = nvmf_ns_reservation_clear_all_registrants(&ns);
2001 	CU_ASSERT(rc == 2);
2002 	CU_ASSERT(TAILQ_EMPTY(&ns.registrants));
2003 
2004 	/* Existing bdev UUID is different with configuration */
2005 	spdk_uuid_generate(&s_uuid);
2006 	spdk_uuid_fmt_lower(uuid, sizeof(uuid), &s_uuid);
2007 	snprintf(info.bdev_uuid, SPDK_UUID_STRING_LEN, "%s", uuid);
2008 	spdk_uuid_generate(&s_uuid);
2009 	spdk_uuid_copy(&bdev.uuid, &s_uuid);
2010 
2011 	rc = nvmf_ns_reservation_restore(&ns, &info);
2012 	CU_ASSERT(rc == -EINVAL);
2013 
2014 	/* Check restore without reservation */
2015 	spdk_uuid_fmt_lower(info.bdev_uuid, sizeof(info.bdev_uuid), &bdev.uuid);
2016 	info.rtype = 0;
2017 	info.crkey = 0;
2018 	memset(info.holder_uuid, 0, SPDK_UUID_STRING_LEN);
2019 
2020 	rc = nvmf_ns_reservation_restore(&ns, &info);
2021 	CU_ASSERT(rc == 0);
2022 	CU_ASSERT(ns.crkey == 0);
2023 	CU_ASSERT(ns.rtype == 0);
2024 	CU_ASSERT(ns.ptpl_activated == true);
2025 	CU_ASSERT(ns.holder == NULL);
2026 	reg0 = TAILQ_FIRST(&ns.registrants);
2027 	reg1 = TAILQ_NEXT(reg0, link);
2028 	CU_ASSERT(reg0->rkey = 0xb);
2029 	CU_ASSERT(reg1->rkey = 0xc);
2030 
2031 	rc = nvmf_ns_reservation_clear_all_registrants(&ns);
2032 	CU_ASSERT(rc == 2);
2033 	CU_ASSERT(TAILQ_EMPTY(&ns.registrants));
2034 }
2035 
2036 static void
2037 ut_nvmf_subsystem_paused(struct spdk_nvmf_subsystem *subsystem, void *ctx, int status)
2038 {
2039 	CU_ASSERT_EQUAL(status, 0);
2040 	CU_ASSERT_EQUAL(subsystem->state, SPDK_NVMF_SUBSYSTEM_PAUSED);
2041 }
2042 
2043 static void
2044 test_nvmf_subsystem_state_change(void)
2045 {
2046 	struct spdk_nvmf_tgt tgt = {};
2047 	struct spdk_nvmf_subsystem *subsystem, *discovery_subsystem;
2048 	int rc;
2049 
2050 	tgt.max_subsystems = 1024;
2051 	tgt.subsystem_ids = spdk_bit_array_create(tgt.max_subsystems);
2052 	RB_INIT(&tgt.subsystems);
2053 
2054 	discovery_subsystem = spdk_nvmf_subsystem_create(&tgt, SPDK_NVMF_DISCOVERY_NQN,
2055 			      SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT, 0);
2056 	SPDK_CU_ASSERT_FATAL(discovery_subsystem != NULL);
2057 	subsystem = spdk_nvmf_subsystem_create(&tgt, "nqn.2016-06.io.spdk:subsystem1",
2058 					       SPDK_NVMF_SUBTYPE_NVME, 0);
2059 	SPDK_CU_ASSERT_FATAL(subsystem != NULL);
2060 
2061 	spdk_io_device_register(&tgt,
2062 				nvmf_tgt_create_poll_group,
2063 				nvmf_tgt_destroy_poll_group,
2064 				sizeof(struct spdk_nvmf_poll_group),
2065 				NULL);
2066 
2067 	rc = spdk_nvmf_subsystem_start(discovery_subsystem, NULL, NULL);
2068 	CU_ASSERT(rc == 0);
2069 	poll_threads();
2070 	CU_ASSERT(discovery_subsystem->state == SPDK_NVMF_SUBSYSTEM_ACTIVE);
2071 	rc = spdk_nvmf_subsystem_start(subsystem, NULL, NULL);
2072 	CU_ASSERT(rc == 0);
2073 	poll_threads();
2074 	CU_ASSERT(subsystem->state == SPDK_NVMF_SUBSYSTEM_ACTIVE);
2075 
2076 	rc = spdk_nvmf_subsystem_pause(subsystem, SPDK_NVME_GLOBAL_NS_TAG,
2077 				       ut_nvmf_subsystem_paused, NULL);
2078 	CU_ASSERT(rc == 0);
2079 	rc = spdk_nvmf_subsystem_stop(subsystem, NULL, NULL);
2080 	CU_ASSERT(rc == 0);
2081 	poll_threads();
2082 	CU_ASSERT(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE);
2083 
2084 	rc = spdk_nvmf_subsystem_stop(discovery_subsystem, NULL, NULL);
2085 	CU_ASSERT(rc == 0);
2086 	poll_threads();
2087 	CU_ASSERT(discovery_subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE);
2088 	rc = spdk_nvmf_subsystem_stop(subsystem, NULL, NULL);
2089 	CU_ASSERT(rc == 0);
2090 	poll_threads();
2091 	CU_ASSERT(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE);
2092 
2093 	rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
2094 	CU_ASSERT(rc == 0);
2095 	rc = spdk_nvmf_subsystem_destroy(discovery_subsystem, NULL, NULL);
2096 	CU_ASSERT(rc == 0);
2097 
2098 	spdk_io_device_unregister(&tgt, NULL);
2099 	poll_threads();
2100 
2101 	spdk_bit_array_free(&tgt.subsystem_ids);
2102 }
2103 
2104 static bool
2105 ut_is_ptpl_capable(const struct spdk_nvmf_ns *ns)
2106 {
2107 	return true;
2108 }
2109 
2110 static struct spdk_nvmf_reservation_info g_resv_info;
2111 
2112 static int
2113 ut_update_reservation(const struct spdk_nvmf_ns *ns, const struct spdk_nvmf_reservation_info *info)
2114 {
2115 	g_resv_info = *info;
2116 
2117 	return 0;
2118 }
2119 
2120 static int
2121 ut_load_reservation(const struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info)
2122 {
2123 	*info = g_resv_info;
2124 
2125 	return 0;
2126 }
2127 
2128 static void
2129 test_nvmf_reservation_custom_ops(void)
2130 {
2131 	struct spdk_nvmf_ns_reservation_ops ops = {
2132 		.is_ptpl_capable = ut_is_ptpl_capable,
2133 		.update = ut_update_reservation,
2134 		.load = ut_load_reservation,
2135 	};
2136 	struct spdk_nvmf_request *req;
2137 	struct spdk_nvme_cpl *rsp;
2138 	struct spdk_nvmf_registrant *reg;
2139 	bool update_sgroup = false;
2140 	struct spdk_nvmf_tgt tgt = {};
2141 	struct spdk_nvmf_subsystem subsystem = {
2142 		.max_nsid = 4,
2143 		.tgt = &tgt,
2144 	};
2145 	uint32_t nsid;
2146 	struct spdk_nvmf_ns *ns;
2147 	int rc;
2148 
2149 	subsystem.ns = calloc(subsystem.max_nsid, sizeof(struct spdk_nvmf_subsystem_ns *));
2150 	SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL);
2151 	subsystem.ana_group = calloc(subsystem.max_nsid, sizeof(uint32_t));
2152 	SPDK_CU_ASSERT_FATAL(subsystem.ana_group != NULL);
2153 
2154 	spdk_nvmf_set_custom_ns_reservation_ops(&ops);
2155 
2156 	ut_reservation_init();
2157 
2158 	req = ut_reservation_build_req(16);
2159 	rsp = &req->rsp->nvme_cpl;
2160 	SPDK_CU_ASSERT_FATAL(req != NULL);
2161 
2162 	/* Add a registrant and activate ptpl */
2163 	ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
2164 					      SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1);
2165 	update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
2166 	SPDK_CU_ASSERT_FATAL(update_sgroup == true);
2167 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
2168 	SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == true);
2169 	rc = nvmf_ns_update_reservation_info(&g_ns);
2170 	SPDK_CU_ASSERT_FATAL(rc == 0);
2171 
2172 	/* Acquire a reservation */
2173 	rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
2174 	ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
2175 					     SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1, 0x0);
2176 	update_sgroup = nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
2177 	SPDK_CU_ASSERT_FATAL(update_sgroup == true);
2178 	SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
2179 	rc = nvmf_ns_update_reservation_info(&g_ns);
2180 	SPDK_CU_ASSERT_FATAL(rc == 0);
2181 
2182 	/* Add the namespace using a different subsystem.
2183 	 * Reservation information should be restored. */
2184 	nsid = spdk_nvmf_subsystem_add_ns_ext(&subsystem, g_ns.bdev->name, NULL, 0, NULL);
2185 	CU_ASSERT(nsid == 1);
2186 
2187 	ns = _nvmf_subsystem_get_ns(&subsystem, nsid);
2188 	SPDK_CU_ASSERT_FATAL(ns != NULL);
2189 	CU_ASSERT(ns->crkey == 0xa1);
2190 	CU_ASSERT(ns->rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
2191 	CU_ASSERT(ns->ptpl_activated == true);
2192 
2193 	reg = nvmf_ns_reservation_get_registrant(ns, &g_ctrlr1_A.hostid);
2194 	SPDK_CU_ASSERT_FATAL(reg != NULL);
2195 	SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, &reg->hostid));
2196 	CU_ASSERT(reg == ns->holder);
2197 
2198 	rc = spdk_nvmf_subsystem_remove_ns(&subsystem, nsid);
2199 	CU_ASSERT(rc == 0);
2200 
2201 	free(subsystem.ns);
2202 	free(subsystem.ana_group);
2203 	ut_reservation_free_req(req);
2204 	ut_reservation_deinit();
2205 }
2206 
2207 int
2208 main(int argc, char **argv)
2209 {
2210 	CU_pSuite	suite = NULL;
2211 	unsigned int	num_failures;
2212 
2213 	CU_initialize_registry();
2214 
2215 	suite = CU_add_suite("nvmf", NULL, NULL);
2216 
2217 	CU_ADD_TEST(suite, nvmf_test_create_subsystem);
2218 	CU_ADD_TEST(suite, test_spdk_nvmf_subsystem_add_ns);
2219 	CU_ADD_TEST(suite, test_spdk_nvmf_subsystem_add_fdp_ns);
2220 	CU_ADD_TEST(suite, test_spdk_nvmf_subsystem_set_sn);
2221 	CU_ADD_TEST(suite, test_spdk_nvmf_ns_visible);
2222 	CU_ADD_TEST(suite, test_reservation_register);
2223 	CU_ADD_TEST(suite, test_reservation_register_with_ptpl);
2224 	CU_ADD_TEST(suite, test_reservation_acquire_preempt_1);
2225 	CU_ADD_TEST(suite, test_reservation_acquire_release_with_ptpl);
2226 	CU_ADD_TEST(suite, test_reservation_release);
2227 	CU_ADD_TEST(suite, test_reservation_unregister_notification);
2228 	CU_ADD_TEST(suite, test_reservation_release_notification);
2229 	CU_ADD_TEST(suite, test_reservation_release_notification_write_exclusive);
2230 	CU_ADD_TEST(suite, test_reservation_clear_notification);
2231 	CU_ADD_TEST(suite, test_reservation_preempt_notification);
2232 	CU_ADD_TEST(suite, test_spdk_nvmf_ns_event);
2233 	CU_ADD_TEST(suite, test_nvmf_ns_reservation_add_remove_registrant);
2234 	CU_ADD_TEST(suite, test_nvmf_subsystem_add_ctrlr);
2235 	CU_ADD_TEST(suite, test_spdk_nvmf_subsystem_add_host);
2236 	CU_ADD_TEST(suite, test_nvmf_ns_reservation_report);
2237 	CU_ADD_TEST(suite, test_nvmf_nqn_is_valid);
2238 	CU_ADD_TEST(suite, test_nvmf_ns_reservation_restore);
2239 	CU_ADD_TEST(suite, test_nvmf_subsystem_state_change);
2240 	CU_ADD_TEST(suite, test_nvmf_reservation_custom_ops);
2241 
2242 	allocate_threads(1);
2243 	set_thread(0);
2244 
2245 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
2246 	CU_cleanup_registry();
2247 
2248 	free_threads();
2249 
2250 	return num_failures;
2251 }
2252