xref: /spdk/lib/nvmf/subsystem.c (revision 6cebe9d06b14ad173e45d2b9be49b04f64b5fba3)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "nvmf_internal.h"
37 #include "transport.h"
38 
39 #include "spdk/likely.h"
40 #include "spdk/string.h"
41 #include "spdk/trace.h"
42 #include "spdk/nvmf_spec.h"
43 #include "spdk/uuid.h"
44 #include "spdk/json.h"
45 #include "spdk/file.h"
46 
47 #define __SPDK_BDEV_MODULE_ONLY
48 #include "spdk/bdev_module.h"
49 #include "spdk/log.h"
50 #include "spdk_internal/utf.h"
51 
52 #define MODEL_NUMBER_DEFAULT "SPDK bdev Controller"
53 #define NVMF_SUBSYSTEM_DEFAULT_NAMESPACES 32
54 
55 /*
56  * States for parsing valid domains in NQNs according to RFC 1034
57  */
58 enum spdk_nvmf_nqn_domain_states {
59 	/* First character of a domain must be a letter */
60 	SPDK_NVMF_DOMAIN_ACCEPT_LETTER = 0,
61 
62 	/* Subsequent characters can be any of letter, digit, or hyphen */
63 	SPDK_NVMF_DOMAIN_ACCEPT_LDH = 1,
64 
65 	/* A domain label must end with either a letter or digit */
66 	SPDK_NVMF_DOMAIN_ACCEPT_ANY = 2
67 };
68 
69 /* Returns true if is a valid ASCII string as defined by the NVMe spec */
70 static bool
71 nvmf_valid_ascii_string(const void *buf, size_t size)
72 {
73 	const uint8_t *str = buf;
74 	size_t i;
75 
76 	for (i = 0; i < size; i++) {
77 		if (str[i] < 0x20 || str[i] > 0x7E) {
78 			return false;
79 		}
80 	}
81 
82 	return true;
83 }
84 
85 static bool
86 nvmf_valid_nqn(const char *nqn)
87 {
88 	size_t len;
89 	struct spdk_uuid uuid_value;
90 	uint32_t i;
91 	int bytes_consumed;
92 	uint32_t domain_label_length;
93 	char *reverse_domain_end;
94 	uint32_t reverse_domain_end_index;
95 	enum spdk_nvmf_nqn_domain_states domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER;
96 
97 	/* Check for length requirements */
98 	len = strlen(nqn);
99 	if (len > SPDK_NVMF_NQN_MAX_LEN) {
100 		SPDK_ERRLOG("Invalid NQN \"%s\": length %zu > max %d\n", nqn, len, SPDK_NVMF_NQN_MAX_LEN);
101 		return false;
102 	}
103 
104 	/* The nqn must be at least as long as SPDK_NVMF_NQN_MIN_LEN to contain the necessary prefix. */
105 	if (len < SPDK_NVMF_NQN_MIN_LEN) {
106 		SPDK_ERRLOG("Invalid NQN \"%s\": length %zu < min %d\n", nqn, len, SPDK_NVMF_NQN_MIN_LEN);
107 		return false;
108 	}
109 
110 	/* Check for discovery controller nqn */
111 	if (!strcmp(nqn, SPDK_NVMF_DISCOVERY_NQN)) {
112 		return true;
113 	}
114 
115 	/* Check for equality with the generic nqn structure of the form "nqn.2014-08.org.nvmexpress:uuid:11111111-2222-3333-4444-555555555555" */
116 	if (!strncmp(nqn, SPDK_NVMF_NQN_UUID_PRE, SPDK_NVMF_NQN_UUID_PRE_LEN)) {
117 		if (len != SPDK_NVMF_NQN_UUID_PRE_LEN + SPDK_NVMF_UUID_STRING_LEN) {
118 			SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not the correct length\n", nqn);
119 			return false;
120 		}
121 
122 		if (spdk_uuid_parse(&uuid_value, &nqn[SPDK_NVMF_NQN_UUID_PRE_LEN])) {
123 			SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not formatted correctly\n", nqn);
124 			return false;
125 		}
126 		return true;
127 	}
128 
129 	/* If the nqn does not match the uuid structure, the next several checks validate the form "nqn.yyyy-mm.reverse.domain:user-string" */
130 
131 	if (strncmp(nqn, "nqn.", 4) != 0) {
132 		SPDK_ERRLOG("Invalid NQN \"%s\": NQN must begin with \"nqn.\".\n", nqn);
133 		return false;
134 	}
135 
136 	/* Check for yyyy-mm. */
137 	if (!(isdigit(nqn[4]) && isdigit(nqn[5]) && isdigit(nqn[6]) && isdigit(nqn[7]) &&
138 	      nqn[8] == '-' && isdigit(nqn[9]) && isdigit(nqn[10]) && nqn[11] == '.')) {
139 		SPDK_ERRLOG("Invalid date code in NQN \"%s\"\n", nqn);
140 		return false;
141 	}
142 
143 	reverse_domain_end = strchr(nqn, ':');
144 	if (reverse_domain_end != NULL && (reverse_domain_end_index = reverse_domain_end - nqn) < len - 1) {
145 	} else {
146 		SPDK_ERRLOG("Invalid NQN \"%s\". NQN must contain user specified name with a ':' as a prefix.\n",
147 			    nqn);
148 		return false;
149 	}
150 
151 	/* Check for valid reverse domain */
152 	domain_label_length = 0;
153 	for (i = 12; i < reverse_domain_end_index; i++) {
154 		if (domain_label_length > SPDK_DOMAIN_LABEL_MAX_LEN) {
155 			SPDK_ERRLOG("Invalid domain name in NQN \"%s\". At least one Label is too long.\n", nqn);
156 			return false;
157 		}
158 
159 		switch (domain_state) {
160 
161 		case SPDK_NVMF_DOMAIN_ACCEPT_LETTER: {
162 			if (isalpha(nqn[i])) {
163 				domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY;
164 				domain_label_length++;
165 				break;
166 			} else {
167 				SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must start with a letter.\n", nqn);
168 				return false;
169 			}
170 		}
171 
172 		case SPDK_NVMF_DOMAIN_ACCEPT_LDH: {
173 			if (isalpha(nqn[i]) || isdigit(nqn[i])) {
174 				domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY;
175 				domain_label_length++;
176 				break;
177 			} else if (nqn[i] == '-') {
178 				if (i == reverse_domain_end_index - 1) {
179 					SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n",
180 						    nqn);
181 					return false;
182 				}
183 				domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH;
184 				domain_label_length++;
185 				break;
186 			} else if (nqn[i] == '.') {
187 				SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n",
188 					    nqn);
189 				return false;
190 			} else {
191 				SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n",
192 					    nqn);
193 				return false;
194 			}
195 		}
196 
197 		case SPDK_NVMF_DOMAIN_ACCEPT_ANY: {
198 			if (isalpha(nqn[i]) || isdigit(nqn[i])) {
199 				domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY;
200 				domain_label_length++;
201 				break;
202 			} else if (nqn[i] == '-') {
203 				if (i == reverse_domain_end_index - 1) {
204 					SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n",
205 						    nqn);
206 					return false;
207 				}
208 				domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH;
209 				domain_label_length++;
210 				break;
211 			} else if (nqn[i] == '.') {
212 				domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER;
213 				domain_label_length = 0;
214 				break;
215 			} else {
216 				SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n",
217 					    nqn);
218 				return false;
219 			}
220 		}
221 		}
222 	}
223 
224 	i = reverse_domain_end_index + 1;
225 	while (i < len) {
226 		bytes_consumed = utf8_valid(&nqn[i], &nqn[len]);
227 		if (bytes_consumed <= 0) {
228 			SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only valid utf-8.\n", nqn);
229 			return false;
230 		}
231 
232 		i += bytes_consumed;
233 	}
234 	return true;
235 }
236 
237 static void subsystem_state_change_on_pg(struct spdk_io_channel_iter *i);
238 
239 struct spdk_nvmf_subsystem *
240 spdk_nvmf_subsystem_create(struct spdk_nvmf_tgt *tgt,
241 			   const char *nqn,
242 			   enum spdk_nvmf_subtype type,
243 			   uint32_t num_ns)
244 {
245 	struct spdk_nvmf_subsystem	*subsystem;
246 	uint32_t			sid;
247 
248 	if (spdk_nvmf_tgt_find_subsystem(tgt, nqn)) {
249 		SPDK_ERRLOG("Subsystem NQN '%s' already exists\n", nqn);
250 		return NULL;
251 	}
252 
253 	if (!nvmf_valid_nqn(nqn)) {
254 		return NULL;
255 	}
256 
257 	if (type == SPDK_NVMF_SUBTYPE_DISCOVERY) {
258 		if (num_ns != 0) {
259 			SPDK_ERRLOG("Discovery subsystem cannot have namespaces.\n");
260 			return NULL;
261 		}
262 	} else if (num_ns == 0) {
263 		num_ns = NVMF_SUBSYSTEM_DEFAULT_NAMESPACES;
264 	}
265 
266 	/* Find a free subsystem id (sid) */
267 	for (sid = 0; sid < tgt->max_subsystems; sid++) {
268 		if (tgt->subsystems[sid] == NULL) {
269 			break;
270 		}
271 	}
272 	if (sid >= tgt->max_subsystems) {
273 		return NULL;
274 	}
275 
276 	subsystem = calloc(1, sizeof(struct spdk_nvmf_subsystem));
277 	if (subsystem == NULL) {
278 		return NULL;
279 	}
280 
281 	subsystem->thread = spdk_get_thread();
282 	subsystem->state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
283 	subsystem->tgt = tgt;
284 	subsystem->id = sid;
285 	subsystem->subtype = type;
286 	subsystem->max_nsid = num_ns;
287 	subsystem->next_cntlid = 0;
288 	snprintf(subsystem->subnqn, sizeof(subsystem->subnqn), "%s", nqn);
289 	pthread_mutex_init(&subsystem->mutex, NULL);
290 	TAILQ_INIT(&subsystem->listeners);
291 	TAILQ_INIT(&subsystem->hosts);
292 	TAILQ_INIT(&subsystem->ctrlrs);
293 
294 	if (num_ns != 0) {
295 		subsystem->ns = calloc(num_ns, sizeof(struct spdk_nvmf_ns *));
296 		if (subsystem->ns == NULL) {
297 			SPDK_ERRLOG("Namespace memory allocation failed\n");
298 			pthread_mutex_destroy(&subsystem->mutex);
299 			free(subsystem);
300 			return NULL;
301 		}
302 	}
303 
304 	memset(subsystem->sn, '0', sizeof(subsystem->sn) - 1);
305 	subsystem->sn[sizeof(subsystem->sn) - 1] = '\0';
306 
307 	snprintf(subsystem->mn, sizeof(subsystem->mn), "%s",
308 		 MODEL_NUMBER_DEFAULT);
309 
310 	tgt->subsystems[sid] = subsystem;
311 	nvmf_update_discovery_log(tgt, NULL);
312 
313 	return subsystem;
314 }
315 
316 /* Must hold subsystem->mutex while calling this function */
317 static void
318 nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_host *host)
319 {
320 	TAILQ_REMOVE(&subsystem->hosts, host, link);
321 	free(host);
322 }
323 
324 static void
325 _nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem,
326 				struct spdk_nvmf_subsystem_listener *listener,
327 				bool stop)
328 {
329 	struct spdk_nvmf_transport *transport;
330 
331 	if (stop) {
332 		transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, listener->trid->trstring);
333 		if (transport != NULL) {
334 			spdk_nvmf_transport_stop_listen(transport, listener->trid);
335 		}
336 	}
337 
338 	TAILQ_REMOVE(&subsystem->listeners, listener, link);
339 	free(listener);
340 }
341 
342 void
343 spdk_nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem)
344 {
345 	struct spdk_nvmf_host		*host, *host_tmp;
346 	struct spdk_nvmf_ctrlr		*ctrlr, *ctrlr_tmp;
347 	struct spdk_nvmf_ns		*ns;
348 
349 	if (!subsystem) {
350 		return;
351 	}
352 
353 	assert(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE);
354 
355 	SPDK_DEBUGLOG(nvmf, "subsystem is %p\n", subsystem);
356 
357 	nvmf_subsystem_remove_all_listeners(subsystem, false);
358 
359 	pthread_mutex_lock(&subsystem->mutex);
360 
361 	TAILQ_FOREACH_SAFE(host, &subsystem->hosts, link, host_tmp) {
362 		nvmf_subsystem_remove_host(subsystem, host);
363 	}
364 
365 	pthread_mutex_unlock(&subsystem->mutex);
366 
367 	TAILQ_FOREACH_SAFE(ctrlr, &subsystem->ctrlrs, link, ctrlr_tmp) {
368 		nvmf_ctrlr_destruct(ctrlr);
369 	}
370 
371 	ns = spdk_nvmf_subsystem_get_first_ns(subsystem);
372 	while (ns != NULL) {
373 		struct spdk_nvmf_ns *next_ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns);
374 
375 		spdk_nvmf_subsystem_remove_ns(subsystem, ns->opts.nsid);
376 		ns = next_ns;
377 	}
378 
379 	free(subsystem->ns);
380 
381 	subsystem->tgt->subsystems[subsystem->id] = NULL;
382 	nvmf_update_discovery_log(subsystem->tgt, NULL);
383 
384 	pthread_mutex_destroy(&subsystem->mutex);
385 
386 	free(subsystem);
387 }
388 
389 
390 /* we have to use the typedef in the function declaration to appease astyle. */
391 typedef enum spdk_nvmf_subsystem_state spdk_nvmf_subsystem_state_t;
392 
393 static spdk_nvmf_subsystem_state_t
394 nvmf_subsystem_get_intermediate_state(enum spdk_nvmf_subsystem_state current_state,
395 				      enum spdk_nvmf_subsystem_state requested_state)
396 {
397 	switch (requested_state) {
398 	case SPDK_NVMF_SUBSYSTEM_INACTIVE:
399 		return SPDK_NVMF_SUBSYSTEM_DEACTIVATING;
400 	case SPDK_NVMF_SUBSYSTEM_ACTIVE:
401 		if (current_state == SPDK_NVMF_SUBSYSTEM_PAUSED) {
402 			return SPDK_NVMF_SUBSYSTEM_RESUMING;
403 		} else {
404 			return SPDK_NVMF_SUBSYSTEM_ACTIVATING;
405 		}
406 	case SPDK_NVMF_SUBSYSTEM_PAUSED:
407 		return SPDK_NVMF_SUBSYSTEM_PAUSING;
408 	default:
409 		assert(false);
410 		return SPDK_NVMF_SUBSYSTEM_NUM_STATES;
411 	}
412 }
413 
414 static int
415 nvmf_subsystem_set_state(struct spdk_nvmf_subsystem *subsystem,
416 			 enum spdk_nvmf_subsystem_state state)
417 {
418 	enum spdk_nvmf_subsystem_state actual_old_state, expected_old_state;
419 	bool exchanged;
420 
421 	switch (state) {
422 	case SPDK_NVMF_SUBSYSTEM_INACTIVE:
423 		expected_old_state = SPDK_NVMF_SUBSYSTEM_DEACTIVATING;
424 		break;
425 	case SPDK_NVMF_SUBSYSTEM_ACTIVATING:
426 		expected_old_state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
427 		break;
428 	case SPDK_NVMF_SUBSYSTEM_ACTIVE:
429 		expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING;
430 		break;
431 	case SPDK_NVMF_SUBSYSTEM_PAUSING:
432 		expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
433 		break;
434 	case SPDK_NVMF_SUBSYSTEM_PAUSED:
435 		expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSING;
436 		break;
437 	case SPDK_NVMF_SUBSYSTEM_RESUMING:
438 		expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSED;
439 		break;
440 	case SPDK_NVMF_SUBSYSTEM_DEACTIVATING:
441 		expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
442 		break;
443 	default:
444 		assert(false);
445 		return -1;
446 	}
447 
448 	actual_old_state = expected_old_state;
449 	exchanged = __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false,
450 						__ATOMIC_RELAXED, __ATOMIC_RELAXED);
451 	if (spdk_unlikely(exchanged == false)) {
452 		if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING &&
453 		    state == SPDK_NVMF_SUBSYSTEM_ACTIVE) {
454 			expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING;
455 		}
456 		/* This is for the case when activating the subsystem fails. */
457 		if (actual_old_state == SPDK_NVMF_SUBSYSTEM_ACTIVATING &&
458 		    state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING) {
459 			expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING;
460 		}
461 		/* This is for the case when resuming the subsystem fails. */
462 		if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING &&
463 		    state == SPDK_NVMF_SUBSYSTEM_PAUSING) {
464 			expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING;
465 		}
466 		actual_old_state = expected_old_state;
467 		__atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false,
468 					    __ATOMIC_RELAXED, __ATOMIC_RELAXED);
469 	}
470 	assert(actual_old_state == expected_old_state);
471 	return actual_old_state - expected_old_state;
472 }
473 
474 struct subsystem_state_change_ctx {
475 	struct spdk_nvmf_subsystem		*subsystem;
476 	uint16_t				nsid;
477 
478 	enum spdk_nvmf_subsystem_state		original_state;
479 	enum spdk_nvmf_subsystem_state		requested_state;
480 
481 	spdk_nvmf_subsystem_state_change_done	cb_fn;
482 	void					*cb_arg;
483 };
484 
485 static void
486 subsystem_state_change_revert_done(struct spdk_io_channel_iter *i, int status)
487 {
488 	struct subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
489 
490 	/* Nothing to be done here if the state setting fails, we are just screwed. */
491 	if (nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state)) {
492 		SPDK_ERRLOG("Unable to revert the subsystem state after operation failure.\n");
493 	}
494 
495 	ctx->subsystem->changing_state = false;
496 	if (ctx->cb_fn) {
497 		/* return a failure here. This function only exists in an error path. */
498 		ctx->cb_fn(ctx->subsystem, ctx->cb_arg, -1);
499 	}
500 	free(ctx);
501 }
502 
503 static void
504 subsystem_state_change_done(struct spdk_io_channel_iter *i, int status)
505 {
506 	struct subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
507 	enum spdk_nvmf_subsystem_state intermediate_state;
508 
509 	if (status == 0) {
510 		status = nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state);
511 		if (status) {
512 			status = -1;
513 		}
514 	}
515 
516 	if (status) {
517 		intermediate_state = nvmf_subsystem_get_intermediate_state(ctx->requested_state,
518 				     ctx->original_state);
519 		assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES);
520 
521 		if (nvmf_subsystem_set_state(ctx->subsystem, intermediate_state)) {
522 			goto out;
523 		}
524 		ctx->requested_state = ctx->original_state;
525 		spdk_for_each_channel(ctx->subsystem->tgt,
526 				      subsystem_state_change_on_pg,
527 				      ctx,
528 				      subsystem_state_change_revert_done);
529 		return;
530 	}
531 
532 out:
533 	ctx->subsystem->changing_state = false;
534 	if (ctx->cb_fn) {
535 		ctx->cb_fn(ctx->subsystem, ctx->cb_arg, status);
536 	}
537 	free(ctx);
538 }
539 
540 static void
541 subsystem_state_change_continue(void *ctx, int status)
542 {
543 	struct spdk_io_channel_iter *i = ctx;
544 	spdk_for_each_channel_continue(i, status);
545 }
546 
547 static void
548 subsystem_state_change_on_pg(struct spdk_io_channel_iter *i)
549 {
550 	struct subsystem_state_change_ctx *ctx;
551 	struct spdk_io_channel *ch;
552 	struct spdk_nvmf_poll_group *group;
553 
554 	ctx = spdk_io_channel_iter_get_ctx(i);
555 	ch = spdk_io_channel_iter_get_channel(i);
556 	group = spdk_io_channel_get_ctx(ch);
557 
558 	switch (ctx->requested_state) {
559 	case SPDK_NVMF_SUBSYSTEM_INACTIVE:
560 		nvmf_poll_group_remove_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i);
561 		break;
562 	case SPDK_NVMF_SUBSYSTEM_ACTIVE:
563 		if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_ACTIVATING) {
564 			nvmf_poll_group_add_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i);
565 		} else if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_RESUMING) {
566 			nvmf_poll_group_resume_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i);
567 		}
568 		break;
569 	case SPDK_NVMF_SUBSYSTEM_PAUSED:
570 		nvmf_poll_group_pause_subsystem(group, ctx->subsystem, ctx->nsid, subsystem_state_change_continue,
571 						i);
572 		break;
573 	default:
574 		assert(false);
575 		break;
576 	}
577 }
578 
579 static int
580 nvmf_subsystem_state_change(struct spdk_nvmf_subsystem *subsystem,
581 			    uint32_t nsid,
582 			    enum spdk_nvmf_subsystem_state requested_state,
583 			    spdk_nvmf_subsystem_state_change_done cb_fn,
584 			    void *cb_arg)
585 {
586 	struct subsystem_state_change_ctx *ctx;
587 	enum spdk_nvmf_subsystem_state intermediate_state;
588 	int rc;
589 
590 	if (__sync_val_compare_and_swap(&subsystem->changing_state, false, true)) {
591 		return -EBUSY;
592 	}
593 
594 	/* If we are already in the requested state, just call the callback immediately. */
595 	if (subsystem->state == requested_state) {
596 		subsystem->changing_state = false;
597 		if (cb_fn) {
598 			cb_fn(subsystem, cb_arg, 0);
599 		}
600 		return 0;
601 	}
602 
603 	intermediate_state = nvmf_subsystem_get_intermediate_state(subsystem->state, requested_state);
604 	assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES);
605 
606 	ctx = calloc(1, sizeof(*ctx));
607 	if (!ctx) {
608 		subsystem->changing_state = false;
609 		return -ENOMEM;
610 	}
611 
612 	ctx->original_state = subsystem->state;
613 	rc = nvmf_subsystem_set_state(subsystem, intermediate_state);
614 	if (rc) {
615 		free(ctx);
616 		subsystem->changing_state = false;
617 		return rc;
618 	}
619 
620 	ctx->subsystem = subsystem;
621 	ctx->nsid = nsid;
622 	ctx->requested_state = requested_state;
623 	ctx->cb_fn = cb_fn;
624 	ctx->cb_arg = cb_arg;
625 
626 	spdk_for_each_channel(subsystem->tgt,
627 			      subsystem_state_change_on_pg,
628 			      ctx,
629 			      subsystem_state_change_done);
630 
631 	return 0;
632 }
633 
634 int
635 spdk_nvmf_subsystem_start(struct spdk_nvmf_subsystem *subsystem,
636 			  spdk_nvmf_subsystem_state_change_done cb_fn,
637 			  void *cb_arg)
638 {
639 	return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg);
640 }
641 
642 int
643 spdk_nvmf_subsystem_stop(struct spdk_nvmf_subsystem *subsystem,
644 			 spdk_nvmf_subsystem_state_change_done cb_fn,
645 			 void *cb_arg)
646 {
647 	return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_INACTIVE, cb_fn, cb_arg);
648 }
649 
650 int
651 spdk_nvmf_subsystem_pause(struct spdk_nvmf_subsystem *subsystem,
652 			  uint32_t nsid,
653 			  spdk_nvmf_subsystem_state_change_done cb_fn,
654 			  void *cb_arg)
655 {
656 	return nvmf_subsystem_state_change(subsystem, nsid, SPDK_NVMF_SUBSYSTEM_PAUSED, cb_fn, cb_arg);
657 }
658 
659 int
660 spdk_nvmf_subsystem_resume(struct spdk_nvmf_subsystem *subsystem,
661 			   spdk_nvmf_subsystem_state_change_done cb_fn,
662 			   void *cb_arg)
663 {
664 	return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg);
665 }
666 
667 struct spdk_nvmf_subsystem *
668 spdk_nvmf_subsystem_get_first(struct spdk_nvmf_tgt *tgt)
669 {
670 	struct spdk_nvmf_subsystem	*subsystem;
671 	uint32_t sid;
672 
673 	for (sid = 0; sid < tgt->max_subsystems; sid++) {
674 		subsystem = tgt->subsystems[sid];
675 		if (subsystem) {
676 			return subsystem;
677 		}
678 	}
679 
680 	return NULL;
681 }
682 
683 struct spdk_nvmf_subsystem *
684 spdk_nvmf_subsystem_get_next(struct spdk_nvmf_subsystem *subsystem)
685 {
686 	uint32_t sid;
687 	struct spdk_nvmf_tgt *tgt;
688 
689 	if (!subsystem) {
690 		return NULL;
691 	}
692 
693 	tgt = subsystem->tgt;
694 
695 	for (sid = subsystem->id + 1; sid < tgt->max_subsystems; sid++) {
696 		subsystem = tgt->subsystems[sid];
697 		if (subsystem) {
698 			return subsystem;
699 		}
700 	}
701 
702 	return NULL;
703 }
704 
705 /* Must hold subsystem->mutex while calling this function */
706 static struct spdk_nvmf_host *
707 nvmf_subsystem_find_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn)
708 {
709 	struct spdk_nvmf_host *host = NULL;
710 
711 	TAILQ_FOREACH(host, &subsystem->hosts, link) {
712 		if (strcmp(hostnqn, host->nqn) == 0) {
713 			return host;
714 		}
715 	}
716 
717 	return NULL;
718 }
719 
720 int
721 spdk_nvmf_subsystem_add_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn)
722 {
723 	struct spdk_nvmf_host *host;
724 
725 	if (!nvmf_valid_nqn(hostnqn)) {
726 		return -EINVAL;
727 	}
728 
729 	pthread_mutex_lock(&subsystem->mutex);
730 
731 	if (nvmf_subsystem_find_host(subsystem, hostnqn)) {
732 		/* This subsystem already allows the specified host. */
733 		pthread_mutex_unlock(&subsystem->mutex);
734 		return 0;
735 	}
736 
737 	host = calloc(1, sizeof(*host));
738 	if (!host) {
739 		pthread_mutex_unlock(&subsystem->mutex);
740 		return -ENOMEM;
741 	}
742 
743 	snprintf(host->nqn, sizeof(host->nqn), "%s", hostnqn);
744 
745 	TAILQ_INSERT_HEAD(&subsystem->hosts, host, link);
746 
747 	nvmf_update_discovery_log(subsystem->tgt, hostnqn);
748 
749 	pthread_mutex_unlock(&subsystem->mutex);
750 
751 	return 0;
752 }
753 
754 int
755 spdk_nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn)
756 {
757 	struct spdk_nvmf_host *host;
758 
759 	pthread_mutex_lock(&subsystem->mutex);
760 
761 	host = nvmf_subsystem_find_host(subsystem, hostnqn);
762 	if (host == NULL) {
763 		pthread_mutex_unlock(&subsystem->mutex);
764 		return -ENOENT;
765 	}
766 
767 	nvmf_subsystem_remove_host(subsystem, host);
768 	pthread_mutex_unlock(&subsystem->mutex);
769 
770 	return 0;
771 }
772 
773 struct nvmf_subsystem_disconnect_host_ctx {
774 	struct spdk_nvmf_subsystem		*subsystem;
775 	char					*hostnqn;
776 	spdk_nvmf_tgt_subsystem_listen_done_fn	cb_fn;
777 	void					*cb_arg;
778 };
779 
780 static void
781 nvmf_subsystem_disconnect_host_fini(struct spdk_io_channel_iter *i, int status)
782 {
783 	struct nvmf_subsystem_disconnect_host_ctx *ctx;
784 
785 	ctx = spdk_io_channel_iter_get_ctx(i);
786 
787 	if (ctx->cb_fn) {
788 		ctx->cb_fn(ctx->cb_arg, status);
789 	}
790 	free(ctx->hostnqn);
791 	free(ctx);
792 }
793 
794 static void
795 nvmf_subsystem_disconnect_qpairs_by_host(struct spdk_io_channel_iter *i)
796 {
797 	struct nvmf_subsystem_disconnect_host_ctx *ctx;
798 	struct spdk_nvmf_poll_group *group;
799 	struct spdk_io_channel *ch;
800 	struct spdk_nvmf_qpair *qpair, *tmp_qpair;
801 	struct spdk_nvmf_ctrlr *ctrlr;
802 
803 	ctx = spdk_io_channel_iter_get_ctx(i);
804 	ch = spdk_io_channel_iter_get_channel(i);
805 	group = spdk_io_channel_get_ctx(ch);
806 
807 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, tmp_qpair) {
808 		ctrlr = qpair->ctrlr;
809 
810 		if (ctrlr == NULL || ctrlr->subsys != ctx->subsystem) {
811 			continue;
812 		}
813 
814 		if (strncmp(ctrlr->hostnqn, ctx->hostnqn, sizeof(ctrlr->hostnqn)) == 0) {
815 			/* Right now this does not wait for the queue pairs to actually disconnect. */
816 			spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
817 		}
818 	}
819 	spdk_for_each_channel_continue(i, 0);
820 }
821 
822 int
823 spdk_nvmf_subsystem_disconnect_host(struct spdk_nvmf_subsystem *subsystem,
824 				    const char *hostnqn,
825 				    spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn,
826 				    void *cb_arg)
827 {
828 	struct nvmf_subsystem_disconnect_host_ctx *ctx;
829 
830 	ctx = calloc(1, sizeof(struct nvmf_subsystem_disconnect_host_ctx));
831 	if (ctx == NULL) {
832 		return -ENOMEM;
833 	}
834 
835 	ctx->subsystem = subsystem;
836 	ctx->hostnqn = strdup(hostnqn);
837 	ctx->cb_fn = cb_fn;
838 	ctx->cb_arg = cb_arg;
839 
840 	spdk_for_each_channel(subsystem->tgt, nvmf_subsystem_disconnect_qpairs_by_host, ctx,
841 			      nvmf_subsystem_disconnect_host_fini);
842 
843 	return 0;
844 }
845 
846 int
847 spdk_nvmf_subsystem_set_allow_any_host(struct spdk_nvmf_subsystem *subsystem, bool allow_any_host)
848 {
849 	pthread_mutex_lock(&subsystem->mutex);
850 	subsystem->flags.allow_any_host = allow_any_host;
851 	nvmf_update_discovery_log(subsystem->tgt, NULL);
852 	pthread_mutex_unlock(&subsystem->mutex);
853 
854 	return 0;
855 }
856 
857 bool
858 spdk_nvmf_subsystem_get_allow_any_host(const struct spdk_nvmf_subsystem *subsystem)
859 {
860 	bool allow_any_host;
861 	struct spdk_nvmf_subsystem *sub;
862 
863 	/* Technically, taking the mutex modifies data in the subsystem. But the const
864 	 * is still important to convey that this doesn't mutate any other data. Cast
865 	 * it away to work around this. */
866 	sub = (struct spdk_nvmf_subsystem *)subsystem;
867 
868 	pthread_mutex_lock(&sub->mutex);
869 	allow_any_host = sub->flags.allow_any_host;
870 	pthread_mutex_unlock(&sub->mutex);
871 
872 	return allow_any_host;
873 }
874 
875 bool
876 spdk_nvmf_subsystem_host_allowed(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn)
877 {
878 	bool allowed;
879 
880 	if (!hostnqn) {
881 		return false;
882 	}
883 
884 	pthread_mutex_lock(&subsystem->mutex);
885 
886 	if (subsystem->flags.allow_any_host) {
887 		pthread_mutex_unlock(&subsystem->mutex);
888 		return true;
889 	}
890 
891 	allowed =  nvmf_subsystem_find_host(subsystem, hostnqn) != NULL;
892 	pthread_mutex_unlock(&subsystem->mutex);
893 
894 	return allowed;
895 }
896 
897 struct spdk_nvmf_host *
898 spdk_nvmf_subsystem_get_first_host(struct spdk_nvmf_subsystem *subsystem)
899 {
900 	return TAILQ_FIRST(&subsystem->hosts);
901 }
902 
903 
904 struct spdk_nvmf_host *
905 spdk_nvmf_subsystem_get_next_host(struct spdk_nvmf_subsystem *subsystem,
906 				  struct spdk_nvmf_host *prev_host)
907 {
908 	return TAILQ_NEXT(prev_host, link);
909 }
910 
911 const char *
912 spdk_nvmf_host_get_nqn(const struct spdk_nvmf_host *host)
913 {
914 	return host->nqn;
915 }
916 
917 struct spdk_nvmf_subsystem_listener *
918 nvmf_subsystem_find_listener(struct spdk_nvmf_subsystem *subsystem,
919 			     const struct spdk_nvme_transport_id *trid)
920 {
921 	struct spdk_nvmf_subsystem_listener *listener;
922 
923 	TAILQ_FOREACH(listener, &subsystem->listeners, link) {
924 		if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) {
925 			return listener;
926 		}
927 	}
928 
929 	return NULL;
930 }
931 
932 /**
933  * Function to be called once the target is listening.
934  *
935  * \param ctx Context argument passed to this function.
936  * \param status 0 if it completed successfully, or negative errno if it failed.
937  */
938 static void
939 _nvmf_subsystem_add_listener_done(void *ctx, int status)
940 {
941 	struct spdk_nvmf_subsystem_listener *listener = ctx;
942 
943 	if (status) {
944 		listener->cb_fn(listener->cb_arg, status);
945 		free(listener);
946 		return;
947 	}
948 
949 	TAILQ_INSERT_HEAD(&listener->subsystem->listeners, listener, link);
950 	nvmf_update_discovery_log(listener->subsystem->tgt, NULL);
951 	listener->cb_fn(listener->cb_arg, status);
952 }
953 
954 void
955 spdk_nvmf_subsystem_add_listener(struct spdk_nvmf_subsystem *subsystem,
956 				 struct spdk_nvme_transport_id *trid,
957 				 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn,
958 				 void *cb_arg)
959 {
960 	struct spdk_nvmf_transport *transport;
961 	struct spdk_nvmf_subsystem_listener *listener;
962 	struct spdk_nvmf_listener *tr_listener;
963 	int rc = 0;
964 
965 	assert(cb_fn != NULL);
966 
967 	if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE ||
968 	      subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) {
969 		cb_fn(cb_arg, -EAGAIN);
970 		return;
971 	}
972 
973 	if (nvmf_subsystem_find_listener(subsystem, trid)) {
974 		/* Listener already exists in this subsystem */
975 		cb_fn(cb_arg, 0);
976 		return;
977 	}
978 
979 	transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, trid->trstring);
980 	if (!transport) {
981 		SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n",
982 			    trid->trstring);
983 		cb_fn(cb_arg, -EINVAL);
984 		return;
985 	}
986 
987 	tr_listener = nvmf_transport_find_listener(transport, trid);
988 	if (!tr_listener) {
989 		SPDK_ERRLOG("Cannot find transport listener for %s\n", trid->traddr);
990 		cb_fn(cb_arg, -EINVAL);
991 		return;
992 	}
993 
994 	listener = calloc(1, sizeof(*listener));
995 	if (!listener) {
996 		cb_fn(cb_arg, -ENOMEM);
997 		return;
998 	}
999 
1000 	listener->trid = &tr_listener->trid;
1001 	listener->transport = transport;
1002 	listener->cb_fn = cb_fn;
1003 	listener->cb_arg = cb_arg;
1004 	listener->subsystem = subsystem;
1005 	listener->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
1006 
1007 	if (transport->ops->listen_associate != NULL) {
1008 		rc = transport->ops->listen_associate(transport, subsystem, trid);
1009 	}
1010 
1011 	_nvmf_subsystem_add_listener_done(listener, rc);
1012 }
1013 
1014 int
1015 spdk_nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem,
1016 				    const struct spdk_nvme_transport_id *trid)
1017 {
1018 	struct spdk_nvmf_subsystem_listener *listener;
1019 
1020 	if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE ||
1021 	      subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) {
1022 		return -EAGAIN;
1023 	}
1024 
1025 	listener = nvmf_subsystem_find_listener(subsystem, trid);
1026 	if (listener == NULL) {
1027 		return -ENOENT;
1028 	}
1029 
1030 	_nvmf_subsystem_remove_listener(subsystem, listener, false);
1031 
1032 	return 0;
1033 }
1034 
1035 void
1036 nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem,
1037 				    bool stop)
1038 {
1039 	struct spdk_nvmf_subsystem_listener *listener, *listener_tmp;
1040 
1041 	TAILQ_FOREACH_SAFE(listener, &subsystem->listeners, link, listener_tmp) {
1042 		_nvmf_subsystem_remove_listener(subsystem, listener, stop);
1043 	}
1044 }
1045 
1046 bool
1047 spdk_nvmf_subsystem_listener_allowed(struct spdk_nvmf_subsystem *subsystem,
1048 				     const struct spdk_nvme_transport_id *trid)
1049 {
1050 	struct spdk_nvmf_subsystem_listener *listener;
1051 
1052 	if (!strcmp(subsystem->subnqn, SPDK_NVMF_DISCOVERY_NQN)) {
1053 		return true;
1054 	}
1055 
1056 	TAILQ_FOREACH(listener, &subsystem->listeners, link) {
1057 		if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) {
1058 			return true;
1059 		}
1060 	}
1061 
1062 	return false;
1063 }
1064 
1065 struct spdk_nvmf_subsystem_listener *
1066 spdk_nvmf_subsystem_get_first_listener(struct spdk_nvmf_subsystem *subsystem)
1067 {
1068 	return TAILQ_FIRST(&subsystem->listeners);
1069 }
1070 
1071 struct spdk_nvmf_subsystem_listener *
1072 spdk_nvmf_subsystem_get_next_listener(struct spdk_nvmf_subsystem *subsystem,
1073 				      struct spdk_nvmf_subsystem_listener *prev_listener)
1074 {
1075 	return TAILQ_NEXT(prev_listener, link);
1076 }
1077 
1078 const struct spdk_nvme_transport_id *
1079 spdk_nvmf_subsystem_listener_get_trid(struct spdk_nvmf_subsystem_listener *listener)
1080 {
1081 	return listener->trid;
1082 }
1083 
1084 void
1085 spdk_nvmf_subsystem_allow_any_listener(struct spdk_nvmf_subsystem *subsystem,
1086 				       bool allow_any_listener)
1087 {
1088 	subsystem->flags.allow_any_listener = allow_any_listener;
1089 }
1090 
1091 bool
1092 spdk_nvmf_subsytem_any_listener_allowed(struct spdk_nvmf_subsystem *subsystem)
1093 {
1094 	return subsystem->flags.allow_any_listener;
1095 }
1096 
1097 
1098 struct subsystem_update_ns_ctx {
1099 	struct spdk_nvmf_subsystem *subsystem;
1100 
1101 	spdk_nvmf_subsystem_state_change_done cb_fn;
1102 	void *cb_arg;
1103 };
1104 
1105 static void
1106 subsystem_update_ns_done(struct spdk_io_channel_iter *i, int status)
1107 {
1108 	struct subsystem_update_ns_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
1109 
1110 	if (ctx->cb_fn) {
1111 		ctx->cb_fn(ctx->subsystem, ctx->cb_arg, status);
1112 	}
1113 	free(ctx);
1114 }
1115 
1116 static void
1117 subsystem_update_ns_on_pg(struct spdk_io_channel_iter *i)
1118 {
1119 	int rc;
1120 	struct subsystem_update_ns_ctx *ctx;
1121 	struct spdk_nvmf_poll_group *group;
1122 	struct spdk_nvmf_subsystem *subsystem;
1123 
1124 	ctx = spdk_io_channel_iter_get_ctx(i);
1125 	group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i));
1126 	subsystem = ctx->subsystem;
1127 
1128 	rc = nvmf_poll_group_update_subsystem(group, subsystem);
1129 	spdk_for_each_channel_continue(i, rc);
1130 }
1131 
1132 static int
1133 nvmf_subsystem_update_ns(struct spdk_nvmf_subsystem *subsystem, spdk_channel_for_each_cpl cpl,
1134 			 void *ctx)
1135 {
1136 	spdk_for_each_channel(subsystem->tgt,
1137 			      subsystem_update_ns_on_pg,
1138 			      ctx,
1139 			      cpl);
1140 
1141 	return 0;
1142 }
1143 
1144 static void
1145 nvmf_subsystem_ns_changed(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
1146 {
1147 	struct spdk_nvmf_ctrlr *ctrlr;
1148 
1149 	TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
1150 		nvmf_ctrlr_ns_changed(ctrlr, nsid);
1151 	}
1152 }
1153 
1154 static uint32_t
1155 nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns);
1156 
1157 int
1158 spdk_nvmf_subsystem_remove_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
1159 {
1160 	struct spdk_nvmf_transport *transport;
1161 	struct spdk_nvmf_ns *ns;
1162 
1163 	if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE ||
1164 	      subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) {
1165 		assert(false);
1166 		return -1;
1167 	}
1168 
1169 	if (nsid == 0 || nsid > subsystem->max_nsid) {
1170 		return -1;
1171 	}
1172 
1173 	ns = subsystem->ns[nsid - 1];
1174 	if (!ns) {
1175 		return -1;
1176 	}
1177 
1178 	subsystem->ns[nsid - 1] = NULL;
1179 
1180 	free(ns->ptpl_file);
1181 	nvmf_ns_reservation_clear_all_registrants(ns);
1182 	spdk_bdev_module_release_bdev(ns->bdev);
1183 	spdk_bdev_close(ns->desc);
1184 	free(ns);
1185 
1186 	for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport;
1187 	     transport = spdk_nvmf_transport_get_next(transport)) {
1188 		if (transport->ops->subsystem_remove_ns) {
1189 			transport->ops->subsystem_remove_ns(transport, subsystem, nsid);
1190 		}
1191 	}
1192 
1193 	nvmf_subsystem_ns_changed(subsystem, nsid);
1194 
1195 	return 0;
1196 }
1197 
1198 struct subsystem_ns_change_ctx {
1199 	struct spdk_nvmf_subsystem		*subsystem;
1200 	spdk_nvmf_subsystem_state_change_done	cb_fn;
1201 	uint32_t				nsid;
1202 };
1203 
1204 static void
1205 _nvmf_ns_hot_remove(struct spdk_nvmf_subsystem *subsystem,
1206 		    void *cb_arg, int status)
1207 {
1208 	struct subsystem_ns_change_ctx *ctx = cb_arg;
1209 	int rc;
1210 
1211 	rc = spdk_nvmf_subsystem_remove_ns(subsystem, ctx->nsid);
1212 	if (rc != 0) {
1213 		SPDK_ERRLOG("Failed to make changes to NVME-oF subsystem with id: %u\n", subsystem->id);
1214 	}
1215 
1216 	spdk_nvmf_subsystem_resume(subsystem, NULL, NULL);
1217 
1218 	free(ctx);
1219 }
1220 
1221 static void
1222 nvmf_ns_change_msg(void *ns_ctx)
1223 {
1224 	struct subsystem_ns_change_ctx *ctx = ns_ctx;
1225 	int rc;
1226 
1227 	rc = spdk_nvmf_subsystem_pause(ctx->subsystem, ctx->nsid, ctx->cb_fn, ctx);
1228 	if (rc) {
1229 		if (rc == -EBUSY) {
1230 			/* Try again, this is not a permanent situation. */
1231 			spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ctx);
1232 		} else {
1233 			free(ctx);
1234 			SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n");
1235 		}
1236 	}
1237 }
1238 
1239 static void
1240 nvmf_ns_hot_remove(void *remove_ctx)
1241 {
1242 	struct spdk_nvmf_ns *ns = remove_ctx;
1243 	struct subsystem_ns_change_ctx *ns_ctx;
1244 	int rc;
1245 
1246 	/* We have to allocate a new context because this op
1247 	 * is asynchronous and we could lose the ns in the middle.
1248 	 */
1249 	ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx));
1250 	if (!ns_ctx) {
1251 		SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n");
1252 		return;
1253 	}
1254 
1255 	ns_ctx->subsystem = ns->subsystem;
1256 	ns_ctx->nsid = ns->opts.nsid;
1257 	ns_ctx->cb_fn = _nvmf_ns_hot_remove;
1258 
1259 	rc = spdk_nvmf_subsystem_pause(ns->subsystem, ns_ctx->nsid, _nvmf_ns_hot_remove, ns_ctx);
1260 	if (rc) {
1261 		if (rc == -EBUSY) {
1262 			/* Try again, this is not a permanent situation. */
1263 			spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx);
1264 		} else {
1265 			SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n");
1266 			free(ns_ctx);
1267 		}
1268 	}
1269 }
1270 
1271 static void
1272 _nvmf_ns_resize(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
1273 {
1274 	struct subsystem_ns_change_ctx *ctx = cb_arg;
1275 
1276 	nvmf_subsystem_ns_changed(subsystem, ctx->nsid);
1277 	spdk_nvmf_subsystem_resume(subsystem, NULL, NULL);
1278 
1279 	free(ctx);
1280 }
1281 
1282 static void
1283 nvmf_ns_resize(void *event_ctx)
1284 {
1285 	struct spdk_nvmf_ns *ns = event_ctx;
1286 	struct subsystem_ns_change_ctx *ns_ctx;
1287 	int rc;
1288 
1289 	/* We have to allocate a new context because this op
1290 	 * is asynchronous and we could lose the ns in the middle.
1291 	 */
1292 	ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx));
1293 	if (!ns_ctx) {
1294 		SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n");
1295 		return;
1296 	}
1297 
1298 	ns_ctx->subsystem = ns->subsystem;
1299 	ns_ctx->nsid = ns->opts.nsid;
1300 	ns_ctx->cb_fn = _nvmf_ns_resize;
1301 
1302 	/* Specify 0 for the nsid here, because we do not need to pause the namespace.
1303 	 * Namespaces can only be resized bigger, so there is no need to quiesce I/O.
1304 	 */
1305 	rc = spdk_nvmf_subsystem_pause(ns->subsystem, 0, _nvmf_ns_resize, ns_ctx);
1306 	if (rc) {
1307 		if (rc == -EBUSY) {
1308 			/* Try again, this is not a permanent situation. */
1309 			spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx);
1310 		} else {
1311 			SPDK_ERRLOG("Unable to pause subsystem to process namespace resize!\n");
1312 			free(ns_ctx);
1313 		}
1314 	}
1315 }
1316 
1317 static void
1318 nvmf_ns_event(enum spdk_bdev_event_type type,
1319 	      struct spdk_bdev *bdev,
1320 	      void *event_ctx)
1321 {
1322 	SPDK_DEBUGLOG(nvmf, "Bdev event: type %d, name %s, subsystem_id %d, ns_id %d\n",
1323 		      type,
1324 		      spdk_bdev_get_name(bdev),
1325 		      ((struct spdk_nvmf_ns *)event_ctx)->subsystem->id,
1326 		      ((struct spdk_nvmf_ns *)event_ctx)->nsid);
1327 
1328 	switch (type) {
1329 	case SPDK_BDEV_EVENT_REMOVE:
1330 		nvmf_ns_hot_remove(event_ctx);
1331 		break;
1332 	case SPDK_BDEV_EVENT_RESIZE:
1333 		nvmf_ns_resize(event_ctx);
1334 		break;
1335 	default:
1336 		SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type);
1337 		break;
1338 	}
1339 }
1340 
1341 void
1342 spdk_nvmf_ns_opts_get_defaults(struct spdk_nvmf_ns_opts *opts, size_t opts_size)
1343 {
1344 	/* All current fields are set to 0 by default. */
1345 	memset(opts, 0, opts_size);
1346 }
1347 
1348 /* Dummy bdev module used to to claim bdevs. */
1349 static struct spdk_bdev_module ns_bdev_module = {
1350 	.name	= "NVMe-oF Target",
1351 };
1352 
1353 static int
1354 nvmf_ns_load_reservation(const char *file, struct spdk_nvmf_reservation_info *info);
1355 static int
1356 nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info);
1357 
1358 uint32_t
1359 spdk_nvmf_subsystem_add_ns_ext(struct spdk_nvmf_subsystem *subsystem, const char *bdev_name,
1360 			       const struct spdk_nvmf_ns_opts *user_opts, size_t opts_size,
1361 			       const char *ptpl_file)
1362 {
1363 	struct spdk_nvmf_transport *transport;
1364 	struct spdk_nvmf_ns_opts opts;
1365 	struct spdk_nvmf_ns *ns;
1366 	struct spdk_nvmf_reservation_info info = {0};
1367 	int rc;
1368 
1369 	if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE ||
1370 	      subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) {
1371 		return 0;
1372 	}
1373 
1374 	spdk_nvmf_ns_opts_get_defaults(&opts, sizeof(opts));
1375 	if (user_opts) {
1376 		memcpy(&opts, user_opts, spdk_min(sizeof(opts), opts_size));
1377 	}
1378 
1379 	if (opts.nsid == SPDK_NVME_GLOBAL_NS_TAG) {
1380 		SPDK_ERRLOG("Invalid NSID %" PRIu32 "\n", opts.nsid);
1381 		return 0;
1382 	}
1383 
1384 	if (opts.nsid == 0) {
1385 		/*
1386 		 * NSID not specified - find a free index.
1387 		 *
1388 		 * If no free slots are found, opts.nsid will be subsystem->max_nsid + 1, which will
1389 		 * expand max_nsid if possible.
1390 		 */
1391 		for (opts.nsid = 1; opts.nsid <= subsystem->max_nsid; opts.nsid++) {
1392 			if (_nvmf_subsystem_get_ns(subsystem, opts.nsid) == NULL) {
1393 				break;
1394 			}
1395 		}
1396 	}
1397 
1398 	if (_nvmf_subsystem_get_ns(subsystem, opts.nsid)) {
1399 		SPDK_ERRLOG("Requested NSID %" PRIu32 " already in use\n", opts.nsid);
1400 		return 0;
1401 	}
1402 
1403 	if (opts.nsid > subsystem->max_nsid) {
1404 		SPDK_ERRLOG("NSID greater than maximum not allowed\n");
1405 		return 0;
1406 	}
1407 
1408 	ns = calloc(1, sizeof(*ns));
1409 	if (ns == NULL) {
1410 		SPDK_ERRLOG("Namespace allocation failed\n");
1411 		return 0;
1412 	}
1413 
1414 	rc = spdk_bdev_open_ext(bdev_name, true, nvmf_ns_event, ns, &ns->desc);
1415 	if (rc != 0) {
1416 		SPDK_ERRLOG("Subsystem %s: bdev %s cannot be opened, error=%d\n",
1417 			    subsystem->subnqn, bdev_name, rc);
1418 		free(ns);
1419 		return 0;
1420 	}
1421 
1422 	ns->bdev = spdk_bdev_desc_get_bdev(ns->desc);
1423 
1424 	if (spdk_bdev_get_md_size(ns->bdev) != 0 && !spdk_bdev_is_md_interleaved(ns->bdev)) {
1425 		SPDK_ERRLOG("Can't attach bdev with separate metadata.\n");
1426 		spdk_bdev_close(ns->desc);
1427 		free(ns);
1428 		return 0;
1429 	}
1430 
1431 	rc = spdk_bdev_module_claim_bdev(ns->bdev, ns->desc, &ns_bdev_module);
1432 	if (rc != 0) {
1433 		spdk_bdev_close(ns->desc);
1434 		free(ns);
1435 		return 0;
1436 	}
1437 
1438 	if (spdk_mem_all_zero(&opts.uuid, sizeof(opts.uuid))) {
1439 		opts.uuid = *spdk_bdev_get_uuid(ns->bdev);
1440 	}
1441 
1442 	ns->opts = opts;
1443 	ns->subsystem = subsystem;
1444 	subsystem->ns[opts.nsid - 1] = ns;
1445 	ns->nsid = opts.nsid;
1446 	TAILQ_INIT(&ns->registrants);
1447 
1448 	if (ptpl_file) {
1449 		rc = nvmf_ns_load_reservation(ptpl_file, &info);
1450 		if (!rc) {
1451 			rc = nvmf_ns_reservation_restore(ns, &info);
1452 			if (rc) {
1453 				SPDK_ERRLOG("Subsystem restore reservation failed\n");
1454 				subsystem->ns[opts.nsid - 1] = NULL;
1455 				spdk_bdev_module_release_bdev(ns->bdev);
1456 				spdk_bdev_close(ns->desc);
1457 				free(ns);
1458 				return 0;
1459 			}
1460 		}
1461 		ns->ptpl_file = strdup(ptpl_file);
1462 	}
1463 
1464 	for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport;
1465 	     transport = spdk_nvmf_transport_get_next(transport)) {
1466 		if (transport->ops->subsystem_add_ns) {
1467 			rc = transport->ops->subsystem_add_ns(transport, subsystem, ns);
1468 			if (rc) {
1469 				SPDK_ERRLOG("Namespace attachment is not allowed by %s transport\n", transport->ops->name);
1470 				free(ns->ptpl_file);
1471 				nvmf_ns_reservation_clear_all_registrants(ns);
1472 				subsystem->ns[opts.nsid - 1] = NULL;
1473 				spdk_bdev_module_release_bdev(ns->bdev);
1474 				spdk_bdev_close(ns->desc);
1475 				free(ns);
1476 				return 0;
1477 			}
1478 		}
1479 	}
1480 
1481 	SPDK_DEBUGLOG(nvmf, "Subsystem %s: bdev %s assigned nsid %" PRIu32 "\n",
1482 		      spdk_nvmf_subsystem_get_nqn(subsystem),
1483 		      bdev_name,
1484 		      opts.nsid);
1485 
1486 	nvmf_subsystem_ns_changed(subsystem, opts.nsid);
1487 
1488 	return opts.nsid;
1489 }
1490 
1491 static uint32_t
1492 nvmf_subsystem_get_next_allocated_nsid(struct spdk_nvmf_subsystem *subsystem,
1493 				       uint32_t prev_nsid)
1494 {
1495 	uint32_t nsid;
1496 
1497 	if (prev_nsid >= subsystem->max_nsid) {
1498 		return 0;
1499 	}
1500 
1501 	for (nsid = prev_nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
1502 		if (subsystem->ns[nsid - 1]) {
1503 			return nsid;
1504 		}
1505 	}
1506 
1507 	return 0;
1508 }
1509 
1510 struct spdk_nvmf_ns *
1511 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
1512 {
1513 	uint32_t first_nsid;
1514 
1515 	first_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, 0);
1516 	return _nvmf_subsystem_get_ns(subsystem, first_nsid);
1517 }
1518 
1519 struct spdk_nvmf_ns *
1520 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
1521 				struct spdk_nvmf_ns *prev_ns)
1522 {
1523 	uint32_t next_nsid;
1524 
1525 	next_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, prev_ns->opts.nsid);
1526 	return _nvmf_subsystem_get_ns(subsystem, next_nsid);
1527 }
1528 
1529 struct spdk_nvmf_ns *
1530 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
1531 {
1532 	return _nvmf_subsystem_get_ns(subsystem, nsid);
1533 }
1534 
1535 uint32_t
1536 spdk_nvmf_ns_get_id(const struct spdk_nvmf_ns *ns)
1537 {
1538 	return ns->opts.nsid;
1539 }
1540 
1541 struct spdk_bdev *
1542 spdk_nvmf_ns_get_bdev(struct spdk_nvmf_ns *ns)
1543 {
1544 	return ns->bdev;
1545 }
1546 
1547 void
1548 spdk_nvmf_ns_get_opts(const struct spdk_nvmf_ns *ns, struct spdk_nvmf_ns_opts *opts,
1549 		      size_t opts_size)
1550 {
1551 	memset(opts, 0, opts_size);
1552 	memcpy(opts, &ns->opts, spdk_min(sizeof(ns->opts), opts_size));
1553 }
1554 
1555 const char *
1556 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem)
1557 {
1558 	return subsystem->sn;
1559 }
1560 
1561 int
1562 spdk_nvmf_subsystem_set_sn(struct spdk_nvmf_subsystem *subsystem, const char *sn)
1563 {
1564 	size_t len, max_len;
1565 
1566 	max_len = sizeof(subsystem->sn) - 1;
1567 	len = strlen(sn);
1568 	if (len > max_len) {
1569 		SPDK_DEBUGLOG(nvmf, "Invalid sn \"%s\": length %zu > max %zu\n",
1570 			      sn, len, max_len);
1571 		return -1;
1572 	}
1573 
1574 	if (!nvmf_valid_ascii_string(sn, len)) {
1575 		SPDK_DEBUGLOG(nvmf, "Non-ASCII sn\n");
1576 		SPDK_LOGDUMP(nvmf, "sn", sn, len);
1577 		return -1;
1578 	}
1579 
1580 	snprintf(subsystem->sn, sizeof(subsystem->sn), "%s", sn);
1581 
1582 	return 0;
1583 }
1584 
1585 const char *
1586 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem)
1587 {
1588 	return subsystem->mn;
1589 }
1590 
1591 int
1592 spdk_nvmf_subsystem_set_mn(struct spdk_nvmf_subsystem *subsystem, const char *mn)
1593 {
1594 	size_t len, max_len;
1595 
1596 	if (mn == NULL) {
1597 		mn = MODEL_NUMBER_DEFAULT;
1598 	}
1599 	max_len = sizeof(subsystem->mn) - 1;
1600 	len = strlen(mn);
1601 	if (len > max_len) {
1602 		SPDK_DEBUGLOG(nvmf, "Invalid mn \"%s\": length %zu > max %zu\n",
1603 			      mn, len, max_len);
1604 		return -1;
1605 	}
1606 
1607 	if (!nvmf_valid_ascii_string(mn, len)) {
1608 		SPDK_DEBUGLOG(nvmf, "Non-ASCII mn\n");
1609 		SPDK_LOGDUMP(nvmf, "mn", mn, len);
1610 		return -1;
1611 	}
1612 
1613 	snprintf(subsystem->mn, sizeof(subsystem->mn), "%s", mn);
1614 
1615 	return 0;
1616 }
1617 
1618 const char *
1619 spdk_nvmf_subsystem_get_nqn(const struct spdk_nvmf_subsystem *subsystem)
1620 {
1621 	return subsystem->subnqn;
1622 }
1623 
1624 enum spdk_nvmf_subtype spdk_nvmf_subsystem_get_type(struct spdk_nvmf_subsystem *subsystem)
1625 {
1626 	return subsystem->subtype;
1627 }
1628 
1629 uint32_t
1630 spdk_nvmf_subsystem_get_max_nsid(struct spdk_nvmf_subsystem *subsystem)
1631 {
1632 	return subsystem->max_nsid;
1633 }
1634 
1635 static uint16_t
1636 nvmf_subsystem_gen_cntlid(struct spdk_nvmf_subsystem *subsystem)
1637 {
1638 	int count;
1639 
1640 	/*
1641 	 * In the worst case, we might have to try all CNTLID values between 1 and 0xFFF0 - 1
1642 	 * before we find one that is unused (or find that all values are in use).
1643 	 */
1644 	for (count = 0; count < 0xFFF0 - 1; count++) {
1645 		subsystem->next_cntlid++;
1646 		if (subsystem->next_cntlid >= 0xFFF0) {
1647 			/* The spec reserves cntlid values in the range FFF0h to FFFFh. */
1648 			subsystem->next_cntlid = 1;
1649 		}
1650 
1651 		/* Check if a controller with this cntlid currently exists. */
1652 		if (nvmf_subsystem_get_ctrlr(subsystem, subsystem->next_cntlid) == NULL) {
1653 			/* Found unused cntlid */
1654 			return subsystem->next_cntlid;
1655 		}
1656 	}
1657 
1658 	/* All valid cntlid values are in use. */
1659 	return 0xFFFF;
1660 }
1661 
1662 int
1663 nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)
1664 {
1665 	ctrlr->cntlid = nvmf_subsystem_gen_cntlid(subsystem);
1666 	if (ctrlr->cntlid == 0xFFFF) {
1667 		/* Unable to get a cntlid */
1668 		SPDK_ERRLOG("Reached max simultaneous ctrlrs\n");
1669 		return -EBUSY;
1670 	}
1671 
1672 	TAILQ_INSERT_TAIL(&subsystem->ctrlrs, ctrlr, link);
1673 
1674 	return 0;
1675 }
1676 
1677 void
1678 nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem,
1679 			    struct spdk_nvmf_ctrlr *ctrlr)
1680 {
1681 	assert(subsystem == ctrlr->subsys);
1682 	TAILQ_REMOVE(&subsystem->ctrlrs, ctrlr, link);
1683 }
1684 
1685 struct spdk_nvmf_ctrlr *
1686 nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid)
1687 {
1688 	struct spdk_nvmf_ctrlr *ctrlr;
1689 
1690 	TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
1691 		if (ctrlr->cntlid == cntlid) {
1692 			return ctrlr;
1693 		}
1694 	}
1695 
1696 	return NULL;
1697 }
1698 
1699 uint32_t
1700 spdk_nvmf_subsystem_get_max_namespaces(const struct spdk_nvmf_subsystem *subsystem)
1701 {
1702 	return subsystem->max_nsid;
1703 }
1704 
1705 struct _nvmf_ns_registrant {
1706 	uint64_t		rkey;
1707 	char			*host_uuid;
1708 };
1709 
1710 struct _nvmf_ns_registrants {
1711 	size_t				num_regs;
1712 	struct _nvmf_ns_registrant	reg[SPDK_NVMF_MAX_NUM_REGISTRANTS];
1713 };
1714 
1715 struct _nvmf_ns_reservation {
1716 	bool					ptpl_activated;
1717 	enum spdk_nvme_reservation_type		rtype;
1718 	uint64_t				crkey;
1719 	char					*bdev_uuid;
1720 	char					*holder_uuid;
1721 	struct _nvmf_ns_registrants		regs;
1722 };
1723 
1724 static const struct spdk_json_object_decoder nvmf_ns_pr_reg_decoders[] = {
1725 	{"rkey", offsetof(struct _nvmf_ns_registrant, rkey), spdk_json_decode_uint64},
1726 	{"host_uuid", offsetof(struct _nvmf_ns_registrant, host_uuid), spdk_json_decode_string},
1727 };
1728 
1729 static int
1730 nvmf_decode_ns_pr_reg(const struct spdk_json_val *val, void *out)
1731 {
1732 	struct _nvmf_ns_registrant *reg = out;
1733 
1734 	return spdk_json_decode_object(val, nvmf_ns_pr_reg_decoders,
1735 				       SPDK_COUNTOF(nvmf_ns_pr_reg_decoders), reg);
1736 }
1737 
1738 static int
1739 nvmf_decode_ns_pr_regs(const struct spdk_json_val *val, void *out)
1740 {
1741 	struct _nvmf_ns_registrants *regs = out;
1742 
1743 	return spdk_json_decode_array(val, nvmf_decode_ns_pr_reg, regs->reg,
1744 				      SPDK_NVMF_MAX_NUM_REGISTRANTS, &regs->num_regs,
1745 				      sizeof(struct _nvmf_ns_registrant));
1746 }
1747 
1748 static const struct spdk_json_object_decoder nvmf_ns_pr_decoders[] = {
1749 	{"ptpl", offsetof(struct _nvmf_ns_reservation, ptpl_activated), spdk_json_decode_bool, true},
1750 	{"rtype", offsetof(struct _nvmf_ns_reservation, rtype), spdk_json_decode_uint32, true},
1751 	{"crkey", offsetof(struct _nvmf_ns_reservation, crkey), spdk_json_decode_uint64, true},
1752 	{"bdev_uuid", offsetof(struct _nvmf_ns_reservation, bdev_uuid), spdk_json_decode_string},
1753 	{"holder_uuid", offsetof(struct _nvmf_ns_reservation, holder_uuid), spdk_json_decode_string, true},
1754 	{"registrants", offsetof(struct _nvmf_ns_reservation, regs), nvmf_decode_ns_pr_regs},
1755 };
1756 
1757 static int
1758 nvmf_ns_load_reservation(const char *file, struct spdk_nvmf_reservation_info *info)
1759 {
1760 	FILE *fd;
1761 	size_t json_size;
1762 	ssize_t values_cnt, rc;
1763 	void *json = NULL, *end;
1764 	struct spdk_json_val *values = NULL;
1765 	struct _nvmf_ns_reservation res = {};
1766 	uint32_t i;
1767 
1768 	fd = fopen(file, "r");
1769 	/* It's not an error if the file does not exist */
1770 	if (!fd) {
1771 		SPDK_NOTICELOG("File %s does not exist\n", file);
1772 		return -ENOENT;
1773 	}
1774 
1775 	/* Load all persist file contents into a local buffer */
1776 	json = spdk_posix_file_load(fd, &json_size);
1777 	fclose(fd);
1778 	if (!json) {
1779 		SPDK_ERRLOG("Load persit file %s failed\n", file);
1780 		return -ENOMEM;
1781 	}
1782 
1783 	rc = spdk_json_parse(json, json_size, NULL, 0, &end, 0);
1784 	if (rc < 0) {
1785 		SPDK_NOTICELOG("Parsing JSON configuration failed (%zd)\n", rc);
1786 		goto exit;
1787 	}
1788 
1789 	values_cnt = rc;
1790 	values = calloc(values_cnt, sizeof(struct spdk_json_val));
1791 	if (values == NULL) {
1792 		goto exit;
1793 	}
1794 
1795 	rc = spdk_json_parse(json, json_size, values, values_cnt, &end, 0);
1796 	if (rc != values_cnt) {
1797 		SPDK_ERRLOG("Parsing JSON configuration failed (%zd)\n", rc);
1798 		goto exit;
1799 	}
1800 
1801 	/* Decode json */
1802 	if (spdk_json_decode_object(values, nvmf_ns_pr_decoders,
1803 				    SPDK_COUNTOF(nvmf_ns_pr_decoders),
1804 				    &res)) {
1805 		SPDK_ERRLOG("Invalid objects in the persist file %s\n", file);
1806 		rc = -EINVAL;
1807 		goto exit;
1808 	}
1809 
1810 	if (res.regs.num_regs > SPDK_NVMF_MAX_NUM_REGISTRANTS) {
1811 		SPDK_ERRLOG("Can only support up to %u registrants\n", SPDK_NVMF_MAX_NUM_REGISTRANTS);
1812 		rc = -ERANGE;
1813 		goto exit;
1814 	}
1815 
1816 	rc = 0;
1817 	info->ptpl_activated = res.ptpl_activated;
1818 	info->rtype = res.rtype;
1819 	info->crkey = res.crkey;
1820 	snprintf(info->bdev_uuid, sizeof(info->bdev_uuid), "%s", res.bdev_uuid);
1821 	snprintf(info->holder_uuid, sizeof(info->holder_uuid), "%s", res.holder_uuid);
1822 	info->num_regs = res.regs.num_regs;
1823 	for (i = 0; i < res.regs.num_regs; i++) {
1824 		info->registrants[i].rkey = res.regs.reg[i].rkey;
1825 		snprintf(info->registrants[i].host_uuid, sizeof(info->registrants[i].host_uuid), "%s",
1826 			 res.regs.reg[i].host_uuid);
1827 	}
1828 
1829 exit:
1830 	free(json);
1831 	free(values);
1832 	free(res.bdev_uuid);
1833 	free(res.holder_uuid);
1834 	for (i = 0; i < res.regs.num_regs; i++) {
1835 		free(res.regs.reg[i].host_uuid);
1836 	}
1837 
1838 	return rc;
1839 }
1840 
1841 static bool
1842 nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns);
1843 
1844 static int
1845 nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info)
1846 {
1847 	uint32_t i;
1848 	struct spdk_nvmf_registrant *reg, *holder = NULL;
1849 	struct spdk_uuid bdev_uuid, holder_uuid;
1850 
1851 	SPDK_DEBUGLOG(nvmf, "NSID %u, PTPL %u, Number of registrants %u\n",
1852 		      ns->nsid, info->ptpl_activated, info->num_regs);
1853 
1854 	/* it's not an error */
1855 	if (!info->ptpl_activated || !info->num_regs) {
1856 		return 0;
1857 	}
1858 
1859 	spdk_uuid_parse(&bdev_uuid, info->bdev_uuid);
1860 	if (spdk_uuid_compare(&bdev_uuid, spdk_bdev_get_uuid(ns->bdev))) {
1861 		SPDK_ERRLOG("Existing bdev UUID is not same with configuration file\n");
1862 		return -EINVAL;
1863 	}
1864 
1865 	ns->crkey = info->crkey;
1866 	ns->rtype = info->rtype;
1867 	ns->ptpl_activated = info->ptpl_activated;
1868 	spdk_uuid_parse(&holder_uuid, info->holder_uuid);
1869 
1870 	SPDK_DEBUGLOG(nvmf, "Bdev UUID %s\n", info->bdev_uuid);
1871 	if (info->rtype) {
1872 		SPDK_DEBUGLOG(nvmf, "Holder UUID %s, RTYPE %u, RKEY 0x%"PRIx64"\n",
1873 			      info->holder_uuid, info->rtype, info->crkey);
1874 	}
1875 
1876 	for (i = 0; i < info->num_regs; i++) {
1877 		reg = calloc(1, sizeof(*reg));
1878 		if (!reg) {
1879 			return -ENOMEM;
1880 		}
1881 		spdk_uuid_parse(&reg->hostid, info->registrants[i].host_uuid);
1882 		reg->rkey = info->registrants[i].rkey;
1883 		TAILQ_INSERT_TAIL(&ns->registrants, reg, link);
1884 		if (!spdk_uuid_compare(&holder_uuid, &reg->hostid)) {
1885 			holder = reg;
1886 		}
1887 		SPDK_DEBUGLOG(nvmf, "Registrant RKEY 0x%"PRIx64", Host UUID %s\n",
1888 			      info->registrants[i].rkey, info->registrants[i].host_uuid);
1889 	}
1890 
1891 	if (nvmf_ns_reservation_all_registrants_type(ns)) {
1892 		ns->holder = TAILQ_FIRST(&ns->registrants);
1893 	} else {
1894 		ns->holder = holder;
1895 	}
1896 
1897 	return 0;
1898 }
1899 
1900 static int
1901 nvmf_ns_json_write_cb(void *cb_ctx, const void *data, size_t size)
1902 {
1903 	char *file = cb_ctx;
1904 	size_t rc;
1905 	FILE *fd;
1906 
1907 	fd = fopen(file, "w");
1908 	if (!fd) {
1909 		SPDK_ERRLOG("Can't open file %s for write\n", file);
1910 		return -ENOENT;
1911 	}
1912 	rc = fwrite(data, 1, size, fd);
1913 	fclose(fd);
1914 
1915 	return rc == size ? 0 : -1;
1916 }
1917 
1918 static int
1919 nvmf_ns_reservation_update(const char *file, struct spdk_nvmf_reservation_info *info)
1920 {
1921 	struct spdk_json_write_ctx *w;
1922 	uint32_t i;
1923 	int rc = 0;
1924 
1925 	w = spdk_json_write_begin(nvmf_ns_json_write_cb, (void *)file, 0);
1926 	if (w == NULL) {
1927 		return -ENOMEM;
1928 	}
1929 	/* clear the configuration file */
1930 	if (!info->ptpl_activated) {
1931 		goto exit;
1932 	}
1933 
1934 	spdk_json_write_object_begin(w);
1935 	spdk_json_write_named_bool(w, "ptpl", info->ptpl_activated);
1936 	spdk_json_write_named_uint32(w, "rtype", info->rtype);
1937 	spdk_json_write_named_uint64(w, "crkey", info->crkey);
1938 	spdk_json_write_named_string(w, "bdev_uuid", info->bdev_uuid);
1939 	spdk_json_write_named_string(w, "holder_uuid", info->holder_uuid);
1940 
1941 	spdk_json_write_named_array_begin(w, "registrants");
1942 	for (i = 0; i < info->num_regs; i++) {
1943 		spdk_json_write_object_begin(w);
1944 		spdk_json_write_named_uint64(w, "rkey", info->registrants[i].rkey);
1945 		spdk_json_write_named_string(w, "host_uuid", info->registrants[i].host_uuid);
1946 		spdk_json_write_object_end(w);
1947 	}
1948 	spdk_json_write_array_end(w);
1949 	spdk_json_write_object_end(w);
1950 
1951 exit:
1952 	rc = spdk_json_write_end(w);
1953 	return rc;
1954 }
1955 
1956 static int
1957 nvmf_ns_update_reservation_info(struct spdk_nvmf_ns *ns)
1958 {
1959 	struct spdk_nvmf_reservation_info info;
1960 	struct spdk_nvmf_registrant *reg, *tmp;
1961 	uint32_t i = 0;
1962 
1963 	assert(ns != NULL);
1964 
1965 	if (!ns->bdev || !ns->ptpl_file) {
1966 		return 0;
1967 	}
1968 
1969 	memset(&info, 0, sizeof(info));
1970 	spdk_uuid_fmt_lower(info.bdev_uuid, sizeof(info.bdev_uuid), spdk_bdev_get_uuid(ns->bdev));
1971 
1972 	if (ns->rtype) {
1973 		info.rtype = ns->rtype;
1974 		info.crkey = ns->crkey;
1975 		if (!nvmf_ns_reservation_all_registrants_type(ns)) {
1976 			assert(ns->holder != NULL);
1977 			spdk_uuid_fmt_lower(info.holder_uuid, sizeof(info.holder_uuid), &ns->holder->hostid);
1978 		}
1979 	}
1980 
1981 	TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
1982 		spdk_uuid_fmt_lower(info.registrants[i].host_uuid, sizeof(info.registrants[i].host_uuid),
1983 				    &reg->hostid);
1984 		info.registrants[i++].rkey = reg->rkey;
1985 	}
1986 
1987 	info.num_regs = i;
1988 	info.ptpl_activated = ns->ptpl_activated;
1989 
1990 	return nvmf_ns_reservation_update(ns->ptpl_file, &info);
1991 }
1992 
1993 static struct spdk_nvmf_registrant *
1994 nvmf_ns_reservation_get_registrant(struct spdk_nvmf_ns *ns,
1995 				   struct spdk_uuid *uuid)
1996 {
1997 	struct spdk_nvmf_registrant *reg, *tmp;
1998 
1999 	TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
2000 		if (!spdk_uuid_compare(&reg->hostid, uuid)) {
2001 			return reg;
2002 		}
2003 	}
2004 
2005 	return NULL;
2006 }
2007 
2008 /* Generate reservation notice log to registered HostID controllers */
2009 static void
2010 nvmf_subsystem_gen_ctrlr_notification(struct spdk_nvmf_subsystem *subsystem,
2011 				      struct spdk_nvmf_ns *ns,
2012 				      struct spdk_uuid *hostid_list,
2013 				      uint32_t num_hostid,
2014 				      enum spdk_nvme_reservation_notification_log_page_type type)
2015 {
2016 	struct spdk_nvmf_ctrlr *ctrlr;
2017 	uint32_t i;
2018 
2019 	for (i = 0; i < num_hostid; i++) {
2020 		TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
2021 			if (!spdk_uuid_compare(&ctrlr->hostid, &hostid_list[i])) {
2022 				nvmf_ctrlr_reservation_notice_log(ctrlr, ns, type);
2023 			}
2024 		}
2025 	}
2026 }
2027 
2028 /* Get all registrants' hostid other than the controller who issued the command */
2029 static uint32_t
2030 nvmf_ns_reservation_get_all_other_hostid(struct spdk_nvmf_ns *ns,
2031 		struct spdk_uuid *hostid_list,
2032 		uint32_t max_num_hostid,
2033 		struct spdk_uuid *current_hostid)
2034 {
2035 	struct spdk_nvmf_registrant *reg, *tmp;
2036 	uint32_t num_hostid = 0;
2037 
2038 	TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
2039 		if (spdk_uuid_compare(&reg->hostid, current_hostid)) {
2040 			if (num_hostid == max_num_hostid) {
2041 				assert(false);
2042 				return max_num_hostid;
2043 			}
2044 			hostid_list[num_hostid++] = reg->hostid;
2045 		}
2046 	}
2047 
2048 	return num_hostid;
2049 }
2050 
2051 /* Calculate the unregistered HostID list according to list
2052  * prior to execute preempt command and list after executing
2053  * preempt command.
2054  */
2055 static uint32_t
2056 nvmf_ns_reservation_get_unregistered_hostid(struct spdk_uuid *old_hostid_list,
2057 		uint32_t old_num_hostid,
2058 		struct spdk_uuid *remaining_hostid_list,
2059 		uint32_t remaining_num_hostid)
2060 {
2061 	struct spdk_uuid temp_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS];
2062 	uint32_t i, j, num_hostid = 0;
2063 	bool found;
2064 
2065 	if (!remaining_num_hostid) {
2066 		return old_num_hostid;
2067 	}
2068 
2069 	for (i = 0; i < old_num_hostid; i++) {
2070 		found = false;
2071 		for (j = 0; j < remaining_num_hostid; j++) {
2072 			if (!spdk_uuid_compare(&old_hostid_list[i], &remaining_hostid_list[j])) {
2073 				found = true;
2074 				break;
2075 			}
2076 		}
2077 		if (!found) {
2078 			spdk_uuid_copy(&temp_hostid_list[num_hostid++], &old_hostid_list[i]);
2079 		}
2080 	}
2081 
2082 	if (num_hostid) {
2083 		memcpy(old_hostid_list, temp_hostid_list, sizeof(struct spdk_uuid) * num_hostid);
2084 	}
2085 
2086 	return num_hostid;
2087 }
2088 
2089 /* current reservation type is all registrants or not */
2090 static bool
2091 nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns)
2092 {
2093 	return (ns->rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS ||
2094 		ns->rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
2095 }
2096 
2097 /* current registrant is reservation holder or not */
2098 static bool
2099 nvmf_ns_reservation_registrant_is_holder(struct spdk_nvmf_ns *ns,
2100 		struct spdk_nvmf_registrant *reg)
2101 {
2102 	if (!reg) {
2103 		return false;
2104 	}
2105 
2106 	if (nvmf_ns_reservation_all_registrants_type(ns)) {
2107 		return true;
2108 	}
2109 
2110 	return (ns->holder == reg);
2111 }
2112 
2113 static int
2114 nvmf_ns_reservation_add_registrant(struct spdk_nvmf_ns *ns,
2115 				   struct spdk_nvmf_ctrlr *ctrlr,
2116 				   uint64_t nrkey)
2117 {
2118 	struct spdk_nvmf_registrant *reg;
2119 
2120 	reg = calloc(1, sizeof(*reg));
2121 	if (!reg) {
2122 		return -ENOMEM;
2123 	}
2124 
2125 	reg->rkey = nrkey;
2126 	/* set hostid for the registrant */
2127 	spdk_uuid_copy(&reg->hostid, &ctrlr->hostid);
2128 	TAILQ_INSERT_TAIL(&ns->registrants, reg, link);
2129 	ns->gen++;
2130 
2131 	return 0;
2132 }
2133 
2134 static void
2135 nvmf_ns_reservation_release_reservation(struct spdk_nvmf_ns *ns)
2136 {
2137 	ns->rtype = 0;
2138 	ns->crkey = 0;
2139 	ns->holder = NULL;
2140 }
2141 
2142 /* release the reservation if the last registrant was removed */
2143 static void
2144 nvmf_ns_reservation_check_release_on_remove_registrant(struct spdk_nvmf_ns *ns,
2145 		struct spdk_nvmf_registrant *reg)
2146 {
2147 	struct spdk_nvmf_registrant *next_reg;
2148 
2149 	/* no reservation holder */
2150 	if (!ns->holder) {
2151 		assert(ns->rtype == 0);
2152 		return;
2153 	}
2154 
2155 	next_reg = TAILQ_FIRST(&ns->registrants);
2156 	if (next_reg && nvmf_ns_reservation_all_registrants_type(ns)) {
2157 		/* the next valid registrant is the new holder now */
2158 		ns->holder = next_reg;
2159 	} else if (nvmf_ns_reservation_registrant_is_holder(ns, reg)) {
2160 		/* release the reservation */
2161 		nvmf_ns_reservation_release_reservation(ns);
2162 	}
2163 }
2164 
2165 static void
2166 nvmf_ns_reservation_remove_registrant(struct spdk_nvmf_ns *ns,
2167 				      struct spdk_nvmf_registrant *reg)
2168 {
2169 	TAILQ_REMOVE(&ns->registrants, reg, link);
2170 	nvmf_ns_reservation_check_release_on_remove_registrant(ns, reg);
2171 	free(reg);
2172 	ns->gen++;
2173 	return;
2174 }
2175 
2176 static uint32_t
2177 nvmf_ns_reservation_remove_registrants_by_key(struct spdk_nvmf_ns *ns,
2178 		uint64_t rkey)
2179 {
2180 	struct spdk_nvmf_registrant *reg, *tmp;
2181 	uint32_t count = 0;
2182 
2183 	TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
2184 		if (reg->rkey == rkey) {
2185 			nvmf_ns_reservation_remove_registrant(ns, reg);
2186 			count++;
2187 		}
2188 	}
2189 	return count;
2190 }
2191 
2192 static uint32_t
2193 nvmf_ns_reservation_remove_all_other_registrants(struct spdk_nvmf_ns *ns,
2194 		struct spdk_nvmf_registrant *reg)
2195 {
2196 	struct spdk_nvmf_registrant *reg_tmp, *reg_tmp2;
2197 	uint32_t count = 0;
2198 
2199 	TAILQ_FOREACH_SAFE(reg_tmp, &ns->registrants, link, reg_tmp2) {
2200 		if (reg_tmp != reg) {
2201 			nvmf_ns_reservation_remove_registrant(ns, reg_tmp);
2202 			count++;
2203 		}
2204 	}
2205 	return count;
2206 }
2207 
2208 static uint32_t
2209 nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns)
2210 {
2211 	struct spdk_nvmf_registrant *reg, *reg_tmp;
2212 	uint32_t count = 0;
2213 
2214 	TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, reg_tmp) {
2215 		nvmf_ns_reservation_remove_registrant(ns, reg);
2216 		count++;
2217 	}
2218 	return count;
2219 }
2220 
2221 static void
2222 nvmf_ns_reservation_acquire_reservation(struct spdk_nvmf_ns *ns, uint64_t rkey,
2223 					enum spdk_nvme_reservation_type rtype,
2224 					struct spdk_nvmf_registrant *holder)
2225 {
2226 	ns->rtype = rtype;
2227 	ns->crkey = rkey;
2228 	assert(ns->holder == NULL);
2229 	ns->holder = holder;
2230 }
2231 
2232 static bool
2233 nvmf_ns_reservation_register(struct spdk_nvmf_ns *ns,
2234 			     struct spdk_nvmf_ctrlr *ctrlr,
2235 			     struct spdk_nvmf_request *req)
2236 {
2237 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
2238 	uint8_t rrega, iekey, cptpl, rtype;
2239 	struct spdk_nvme_reservation_register_data key;
2240 	struct spdk_nvmf_registrant *reg;
2241 	uint8_t status = SPDK_NVME_SC_SUCCESS;
2242 	bool update_sgroup = false;
2243 	struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS];
2244 	uint32_t num_hostid = 0;
2245 	int rc;
2246 
2247 	rrega = cmd->cdw10_bits.resv_register.rrega;
2248 	iekey = cmd->cdw10_bits.resv_register.iekey;
2249 	cptpl = cmd->cdw10_bits.resv_register.cptpl;
2250 
2251 	if (req->data && req->length >= sizeof(key)) {
2252 		memcpy(&key, req->data, sizeof(key));
2253 	} else {
2254 		SPDK_ERRLOG("No key provided. Failing request.\n");
2255 		status = SPDK_NVME_SC_INVALID_FIELD;
2256 		goto exit;
2257 	}
2258 
2259 	SPDK_DEBUGLOG(nvmf, "REGISTER: RREGA %u, IEKEY %u, CPTPL %u, "
2260 		      "NRKEY 0x%"PRIx64", NRKEY 0x%"PRIx64"\n",
2261 		      rrega, iekey, cptpl, key.crkey, key.nrkey);
2262 
2263 	if (cptpl == SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON) {
2264 		/* Ture to OFF state, and need to be updated in the configuration file */
2265 		if (ns->ptpl_activated) {
2266 			ns->ptpl_activated = 0;
2267 			update_sgroup = true;
2268 		}
2269 	} else if (cptpl == SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS) {
2270 		if (ns->ptpl_file == NULL) {
2271 			status = SPDK_NVME_SC_INVALID_FIELD;
2272 			goto exit;
2273 		} else if (ns->ptpl_activated == 0) {
2274 			ns->ptpl_activated = 1;
2275 			update_sgroup = true;
2276 		}
2277 	}
2278 
2279 	/* current Host Identifier has registrant or not */
2280 	reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid);
2281 
2282 	switch (rrega) {
2283 	case SPDK_NVME_RESERVE_REGISTER_KEY:
2284 		if (!reg) {
2285 			/* register new controller */
2286 			if (key.nrkey == 0) {
2287 				SPDK_ERRLOG("Can't register zeroed new key\n");
2288 				status = SPDK_NVME_SC_INVALID_FIELD;
2289 				goto exit;
2290 			}
2291 			rc = nvmf_ns_reservation_add_registrant(ns, ctrlr, key.nrkey);
2292 			if (rc < 0) {
2293 				status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2294 				goto exit;
2295 			}
2296 			update_sgroup = true;
2297 		} else {
2298 			/* register with same key is not an error */
2299 			if (reg->rkey != key.nrkey) {
2300 				SPDK_ERRLOG("The same host already register a "
2301 					    "key with 0x%"PRIx64"\n",
2302 					    reg->rkey);
2303 				status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2304 				goto exit;
2305 			}
2306 		}
2307 		break;
2308 	case SPDK_NVME_RESERVE_UNREGISTER_KEY:
2309 		if (!reg || (!iekey && reg->rkey != key.crkey)) {
2310 			SPDK_ERRLOG("No registrant or current key doesn't match "
2311 				    "with existing registrant key\n");
2312 			status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2313 			goto exit;
2314 		}
2315 
2316 		rtype = ns->rtype;
2317 		num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list,
2318 				SPDK_NVMF_MAX_NUM_REGISTRANTS,
2319 				&ctrlr->hostid);
2320 
2321 		nvmf_ns_reservation_remove_registrant(ns, reg);
2322 
2323 		if (!ns->rtype && num_hostid && (rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY ||
2324 						 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY)) {
2325 			nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns,
2326 							      hostid_list,
2327 							      num_hostid,
2328 							      SPDK_NVME_RESERVATION_RELEASED);
2329 		}
2330 		update_sgroup = true;
2331 		break;
2332 	case SPDK_NVME_RESERVE_REPLACE_KEY:
2333 		if (!reg || (!iekey && reg->rkey != key.crkey)) {
2334 			SPDK_ERRLOG("No registrant or current key doesn't match "
2335 				    "with existing registrant key\n");
2336 			status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2337 			goto exit;
2338 		}
2339 		if (key.nrkey == 0) {
2340 			SPDK_ERRLOG("Can't register zeroed new key\n");
2341 			status = SPDK_NVME_SC_INVALID_FIELD;
2342 			goto exit;
2343 		}
2344 		reg->rkey = key.nrkey;
2345 		update_sgroup = true;
2346 		break;
2347 	default:
2348 		status = SPDK_NVME_SC_INVALID_FIELD;
2349 		goto exit;
2350 	}
2351 
2352 exit:
2353 	if (update_sgroup) {
2354 		rc = nvmf_ns_update_reservation_info(ns);
2355 		if (rc != 0) {
2356 			status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2357 		}
2358 	}
2359 	req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2360 	req->rsp->nvme_cpl.status.sc = status;
2361 	return update_sgroup;
2362 }
2363 
2364 static bool
2365 nvmf_ns_reservation_acquire(struct spdk_nvmf_ns *ns,
2366 			    struct spdk_nvmf_ctrlr *ctrlr,
2367 			    struct spdk_nvmf_request *req)
2368 {
2369 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
2370 	uint8_t racqa, iekey, rtype;
2371 	struct spdk_nvme_reservation_acquire_data key;
2372 	struct spdk_nvmf_registrant *reg;
2373 	bool all_regs = false;
2374 	uint32_t count = 0;
2375 	bool update_sgroup = true;
2376 	struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS];
2377 	uint32_t num_hostid = 0;
2378 	struct spdk_uuid new_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS];
2379 	uint32_t new_num_hostid = 0;
2380 	bool reservation_released = false;
2381 	uint8_t status = SPDK_NVME_SC_SUCCESS;
2382 
2383 	racqa = cmd->cdw10_bits.resv_acquire.racqa;
2384 	iekey = cmd->cdw10_bits.resv_acquire.iekey;
2385 	rtype = cmd->cdw10_bits.resv_acquire.rtype;
2386 
2387 	if (req->data && req->length >= sizeof(key)) {
2388 		memcpy(&key, req->data, sizeof(key));
2389 	} else {
2390 		SPDK_ERRLOG("No key provided. Failing request.\n");
2391 		status = SPDK_NVME_SC_INVALID_FIELD;
2392 		goto exit;
2393 	}
2394 
2395 	SPDK_DEBUGLOG(nvmf, "ACQUIRE: RACQA %u, IEKEY %u, RTYPE %u, "
2396 		      "NRKEY 0x%"PRIx64", PRKEY 0x%"PRIx64"\n",
2397 		      racqa, iekey, rtype, key.crkey, key.prkey);
2398 
2399 	if (iekey || rtype > SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) {
2400 		SPDK_ERRLOG("Ignore existing key field set to 1\n");
2401 		status = SPDK_NVME_SC_INVALID_FIELD;
2402 		update_sgroup = false;
2403 		goto exit;
2404 	}
2405 
2406 	reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid);
2407 	/* must be registrant and CRKEY must match */
2408 	if (!reg || reg->rkey != key.crkey) {
2409 		SPDK_ERRLOG("No registrant or current key doesn't match "
2410 			    "with existing registrant key\n");
2411 		status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2412 		update_sgroup = false;
2413 		goto exit;
2414 	}
2415 
2416 	all_regs = nvmf_ns_reservation_all_registrants_type(ns);
2417 
2418 	switch (racqa) {
2419 	case SPDK_NVME_RESERVE_ACQUIRE:
2420 		/* it's not an error for the holder to acquire same reservation type again */
2421 		if (nvmf_ns_reservation_registrant_is_holder(ns, reg) && ns->rtype == rtype) {
2422 			/* do nothing */
2423 			update_sgroup = false;
2424 		} else if (ns->holder == NULL) {
2425 			/* fisrt time to acquire the reservation */
2426 			nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg);
2427 		} else {
2428 			SPDK_ERRLOG("Invalid rtype or current registrant is not holder\n");
2429 			status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2430 			update_sgroup = false;
2431 			goto exit;
2432 		}
2433 		break;
2434 	case SPDK_NVME_RESERVE_PREEMPT:
2435 		/* no reservation holder */
2436 		if (!ns->holder) {
2437 			/* unregister with PRKEY */
2438 			nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey);
2439 			break;
2440 		}
2441 		num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list,
2442 				SPDK_NVMF_MAX_NUM_REGISTRANTS,
2443 				&ctrlr->hostid);
2444 
2445 		/* only 1 reservation holder and reservation key is valid */
2446 		if (!all_regs) {
2447 			/* preempt itself */
2448 			if (nvmf_ns_reservation_registrant_is_holder(ns, reg) &&
2449 			    ns->crkey == key.prkey) {
2450 				ns->rtype = rtype;
2451 				reservation_released = true;
2452 				break;
2453 			}
2454 
2455 			if (ns->crkey == key.prkey) {
2456 				nvmf_ns_reservation_remove_registrant(ns, ns->holder);
2457 				nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg);
2458 				reservation_released = true;
2459 			} else if (key.prkey != 0) {
2460 				nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey);
2461 			} else {
2462 				/* PRKEY is zero */
2463 				SPDK_ERRLOG("Current PRKEY is zero\n");
2464 				status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2465 				update_sgroup = false;
2466 				goto exit;
2467 			}
2468 		} else {
2469 			/* release all other registrants except for the current one */
2470 			if (key.prkey == 0) {
2471 				nvmf_ns_reservation_remove_all_other_registrants(ns, reg);
2472 				assert(ns->holder == reg);
2473 			} else {
2474 				count = nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey);
2475 				if (count == 0) {
2476 					SPDK_ERRLOG("PRKEY doesn't match any registrant\n");
2477 					status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2478 					update_sgroup = false;
2479 					goto exit;
2480 				}
2481 			}
2482 		}
2483 		break;
2484 	default:
2485 		status = SPDK_NVME_SC_INVALID_FIELD;
2486 		update_sgroup = false;
2487 		break;
2488 	}
2489 
2490 exit:
2491 	if (update_sgroup && racqa == SPDK_NVME_RESERVE_PREEMPT) {
2492 		new_num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, new_hostid_list,
2493 				 SPDK_NVMF_MAX_NUM_REGISTRANTS,
2494 				 &ctrlr->hostid);
2495 		/* Preempt notification occurs on the unregistered controllers
2496 		 * other than the controller who issued the command.
2497 		 */
2498 		num_hostid = nvmf_ns_reservation_get_unregistered_hostid(hostid_list,
2499 				num_hostid,
2500 				new_hostid_list,
2501 				new_num_hostid);
2502 		if (num_hostid) {
2503 			nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns,
2504 							      hostid_list,
2505 							      num_hostid,
2506 							      SPDK_NVME_REGISTRATION_PREEMPTED);
2507 
2508 		}
2509 		/* Reservation released notification occurs on the
2510 		 * controllers which are the remaining registrants other than
2511 		 * the controller who issued the command.
2512 		 */
2513 		if (reservation_released && new_num_hostid) {
2514 			nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns,
2515 							      new_hostid_list,
2516 							      new_num_hostid,
2517 							      SPDK_NVME_RESERVATION_RELEASED);
2518 
2519 		}
2520 	}
2521 	if (update_sgroup && ns->ptpl_activated) {
2522 		if (nvmf_ns_update_reservation_info(ns)) {
2523 			status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2524 		}
2525 	}
2526 	req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2527 	req->rsp->nvme_cpl.status.sc = status;
2528 	return update_sgroup;
2529 }
2530 
2531 static bool
2532 nvmf_ns_reservation_release(struct spdk_nvmf_ns *ns,
2533 			    struct spdk_nvmf_ctrlr *ctrlr,
2534 			    struct spdk_nvmf_request *req)
2535 {
2536 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
2537 	uint8_t rrela, iekey, rtype;
2538 	struct spdk_nvmf_registrant *reg;
2539 	uint64_t crkey;
2540 	uint8_t status = SPDK_NVME_SC_SUCCESS;
2541 	bool update_sgroup = true;
2542 	struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS];
2543 	uint32_t num_hostid = 0;
2544 
2545 	rrela = cmd->cdw10_bits.resv_release.rrela;
2546 	iekey = cmd->cdw10_bits.resv_release.iekey;
2547 	rtype = cmd->cdw10_bits.resv_release.rtype;
2548 
2549 	if (req->data && req->length >= sizeof(crkey)) {
2550 		memcpy(&crkey, req->data, sizeof(crkey));
2551 	} else {
2552 		SPDK_ERRLOG("No key provided. Failing request.\n");
2553 		status = SPDK_NVME_SC_INVALID_FIELD;
2554 		goto exit;
2555 	}
2556 
2557 	SPDK_DEBUGLOG(nvmf, "RELEASE: RRELA %u, IEKEY %u, RTYPE %u, "
2558 		      "CRKEY 0x%"PRIx64"\n",  rrela, iekey, rtype, crkey);
2559 
2560 	if (iekey) {
2561 		SPDK_ERRLOG("Ignore existing key field set to 1\n");
2562 		status = SPDK_NVME_SC_INVALID_FIELD;
2563 		update_sgroup = false;
2564 		goto exit;
2565 	}
2566 
2567 	reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid);
2568 	if (!reg || reg->rkey != crkey) {
2569 		SPDK_ERRLOG("No registrant or current key doesn't match "
2570 			    "with existing registrant key\n");
2571 		status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2572 		update_sgroup = false;
2573 		goto exit;
2574 	}
2575 
2576 	num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list,
2577 			SPDK_NVMF_MAX_NUM_REGISTRANTS,
2578 			&ctrlr->hostid);
2579 
2580 	switch (rrela) {
2581 	case SPDK_NVME_RESERVE_RELEASE:
2582 		if (!ns->holder) {
2583 			SPDK_DEBUGLOG(nvmf, "RELEASE: no holder\n");
2584 			update_sgroup = false;
2585 			goto exit;
2586 		}
2587 		if (ns->rtype != rtype) {
2588 			SPDK_ERRLOG("Type doesn't match\n");
2589 			status = SPDK_NVME_SC_INVALID_FIELD;
2590 			update_sgroup = false;
2591 			goto exit;
2592 		}
2593 		if (!nvmf_ns_reservation_registrant_is_holder(ns, reg)) {
2594 			/* not the reservation holder, this isn't an error */
2595 			update_sgroup = false;
2596 			goto exit;
2597 		}
2598 
2599 		rtype = ns->rtype;
2600 		nvmf_ns_reservation_release_reservation(ns);
2601 
2602 		if (num_hostid && rtype != SPDK_NVME_RESERVE_WRITE_EXCLUSIVE &&
2603 		    rtype != SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) {
2604 			nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns,
2605 							      hostid_list,
2606 							      num_hostid,
2607 							      SPDK_NVME_RESERVATION_RELEASED);
2608 		}
2609 		break;
2610 	case SPDK_NVME_RESERVE_CLEAR:
2611 		nvmf_ns_reservation_clear_all_registrants(ns);
2612 		if (num_hostid) {
2613 			nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns,
2614 							      hostid_list,
2615 							      num_hostid,
2616 							      SPDK_NVME_RESERVATION_PREEMPTED);
2617 		}
2618 		break;
2619 	default:
2620 		status = SPDK_NVME_SC_INVALID_FIELD;
2621 		update_sgroup = false;
2622 		goto exit;
2623 	}
2624 
2625 exit:
2626 	if (update_sgroup && ns->ptpl_activated) {
2627 		if (nvmf_ns_update_reservation_info(ns)) {
2628 			status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2629 		}
2630 	}
2631 	req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2632 	req->rsp->nvme_cpl.status.sc = status;
2633 	return update_sgroup;
2634 }
2635 
2636 static void
2637 nvmf_ns_reservation_report(struct spdk_nvmf_ns *ns,
2638 			   struct spdk_nvmf_ctrlr *ctrlr,
2639 			   struct spdk_nvmf_request *req)
2640 {
2641 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
2642 	struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
2643 	struct spdk_nvmf_ctrlr *ctrlr_tmp;
2644 	struct spdk_nvmf_registrant *reg, *tmp;
2645 	struct spdk_nvme_reservation_status_extended_data *status_data;
2646 	struct spdk_nvme_registered_ctrlr_extended_data *ctrlr_data;
2647 	uint8_t *payload;
2648 	uint32_t len, count = 0;
2649 	uint32_t regctl = 0;
2650 	uint8_t status = SPDK_NVME_SC_SUCCESS;
2651 
2652 	if (req->data == NULL) {
2653 		SPDK_ERRLOG("No data transfer specified for request. "
2654 			    " Unable to transfer back response.\n");
2655 		status = SPDK_NVME_SC_INVALID_FIELD;
2656 		goto exit;
2657 	}
2658 
2659 	if (!cmd->cdw11_bits.resv_report.eds) {
2660 		SPDK_ERRLOG("NVMeoF uses extended controller data structure, "
2661 			    "please set EDS bit in cdw11 and try again\n");
2662 		status = SPDK_NVME_SC_HOSTID_INCONSISTENT_FORMAT;
2663 		goto exit;
2664 	}
2665 
2666 	/* Get number of registerd controllers, one Host may have more than
2667 	 * one controller based on different ports.
2668 	 */
2669 	TAILQ_FOREACH(ctrlr_tmp, &subsystem->ctrlrs, link) {
2670 		reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr_tmp->hostid);
2671 		if (reg) {
2672 			regctl++;
2673 		}
2674 	}
2675 
2676 	len = sizeof(*status_data) + sizeof(*ctrlr_data) * regctl;
2677 	payload = calloc(1, len);
2678 	if (!payload) {
2679 		status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2680 		goto exit;
2681 	}
2682 
2683 	status_data = (struct spdk_nvme_reservation_status_extended_data *)payload;
2684 	status_data->data.gen = ns->gen;
2685 	status_data->data.rtype = ns->rtype;
2686 	status_data->data.regctl = regctl;
2687 	status_data->data.ptpls = ns->ptpl_activated;
2688 
2689 	TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
2690 		assert(count <= regctl);
2691 		ctrlr_data = (struct spdk_nvme_registered_ctrlr_extended_data *)
2692 			     (payload + sizeof(*status_data) + sizeof(*ctrlr_data) * count);
2693 		/* Set to 0xffffh for dynamic controller */
2694 		ctrlr_data->cntlid = 0xffff;
2695 		ctrlr_data->rcsts.status = (ns->holder == reg) ? true : false;
2696 		ctrlr_data->rkey = reg->rkey;
2697 		spdk_uuid_copy((struct spdk_uuid *)ctrlr_data->hostid, &reg->hostid);
2698 		count++;
2699 	}
2700 
2701 	memcpy(req->data, payload, spdk_min(len, (cmd->cdw10 + 1) * sizeof(uint32_t)));
2702 	free(payload);
2703 
2704 exit:
2705 	req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2706 	req->rsp->nvme_cpl.status.sc = status;
2707 	return;
2708 }
2709 
2710 static void
2711 nvmf_ns_reservation_complete(void *ctx)
2712 {
2713 	struct spdk_nvmf_request *req = ctx;
2714 
2715 	spdk_nvmf_request_complete(req);
2716 }
2717 
2718 static void
2719 _nvmf_ns_reservation_update_done(struct spdk_nvmf_subsystem *subsystem,
2720 				 void *cb_arg, int status)
2721 {
2722 	struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)cb_arg;
2723 	struct spdk_nvmf_poll_group *group = req->qpair->group;
2724 
2725 	spdk_thread_send_msg(group->thread, nvmf_ns_reservation_complete, req);
2726 }
2727 
2728 void
2729 nvmf_ns_reservation_request(void *ctx)
2730 {
2731 	struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)ctx;
2732 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
2733 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
2734 	struct subsystem_update_ns_ctx *update_ctx;
2735 	uint32_t nsid;
2736 	struct spdk_nvmf_ns *ns;
2737 	bool update_sgroup = false;
2738 
2739 	nsid = cmd->nsid;
2740 	ns = _nvmf_subsystem_get_ns(ctrlr->subsys, nsid);
2741 	assert(ns != NULL);
2742 
2743 	switch (cmd->opc) {
2744 	case SPDK_NVME_OPC_RESERVATION_REGISTER:
2745 		update_sgroup = nvmf_ns_reservation_register(ns, ctrlr, req);
2746 		break;
2747 	case SPDK_NVME_OPC_RESERVATION_ACQUIRE:
2748 		update_sgroup = nvmf_ns_reservation_acquire(ns, ctrlr, req);
2749 		break;
2750 	case SPDK_NVME_OPC_RESERVATION_RELEASE:
2751 		update_sgroup = nvmf_ns_reservation_release(ns, ctrlr, req);
2752 		break;
2753 	case SPDK_NVME_OPC_RESERVATION_REPORT:
2754 		nvmf_ns_reservation_report(ns, ctrlr, req);
2755 		break;
2756 	default:
2757 		break;
2758 	}
2759 
2760 	/* update reservation information to subsystem's poll group */
2761 	if (update_sgroup) {
2762 		update_ctx = calloc(1, sizeof(*update_ctx));
2763 		if (update_ctx == NULL) {
2764 			SPDK_ERRLOG("Can't alloc subsystem poll group update context\n");
2765 			goto update_done;
2766 		}
2767 		update_ctx->subsystem = ctrlr->subsys;
2768 		update_ctx->cb_fn = _nvmf_ns_reservation_update_done;
2769 		update_ctx->cb_arg = req;
2770 
2771 		nvmf_subsystem_update_ns(ctrlr->subsys, subsystem_update_ns_done, update_ctx);
2772 		return;
2773 	}
2774 
2775 update_done:
2776 	_nvmf_ns_reservation_update_done(ctrlr->subsys, (void *)req, 0);
2777 }
2778 
2779 int
2780 spdk_nvmf_subsystem_set_ana_reporting(struct spdk_nvmf_subsystem *subsystem,
2781 				      bool ana_reporting)
2782 {
2783 	if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) {
2784 		return -EAGAIN;
2785 	}
2786 
2787 	subsystem->flags.ana_reporting = ana_reporting;
2788 
2789 	return 0;
2790 }
2791 
2792 struct subsystem_listener_update_ctx {
2793 	struct spdk_nvmf_subsystem_listener *listener;
2794 
2795 	spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn;
2796 	void *cb_arg;
2797 };
2798 
2799 static void
2800 subsystem_listener_update_done(struct spdk_io_channel_iter *i, int status)
2801 {
2802 	struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
2803 
2804 	if (ctx->cb_fn) {
2805 		ctx->cb_fn(ctx->cb_arg, status);
2806 	}
2807 	free(ctx);
2808 }
2809 
2810 static void
2811 subsystem_listener_update_on_pg(struct spdk_io_channel_iter *i)
2812 {
2813 	struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
2814 	struct spdk_nvmf_subsystem_listener *listener;
2815 	struct spdk_nvmf_poll_group *group;
2816 	struct spdk_nvmf_ctrlr *ctrlr;
2817 
2818 	listener = ctx->listener;
2819 	group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i));
2820 
2821 	TAILQ_FOREACH(ctrlr, &listener->subsystem->ctrlrs, link) {
2822 		if (ctrlr->admin_qpair->group == group && ctrlr->listener == listener) {
2823 			nvmf_ctrlr_async_event_ana_change_notice(ctrlr);
2824 		}
2825 	}
2826 
2827 	spdk_for_each_channel_continue(i, 0);
2828 }
2829 
2830 void
2831 nvmf_subsystem_set_ana_state(struct spdk_nvmf_subsystem *subsystem,
2832 			     const struct spdk_nvme_transport_id *trid,
2833 			     enum spdk_nvme_ana_state ana_state,
2834 			     spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, void *cb_arg)
2835 {
2836 	struct spdk_nvmf_subsystem_listener *listener;
2837 	struct subsystem_listener_update_ctx *ctx;
2838 
2839 	assert(cb_fn != NULL);
2840 	assert(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE ||
2841 	       subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED);
2842 
2843 	if (!subsystem->flags.ana_reporting) {
2844 		SPDK_ERRLOG("ANA reporting is disabled\n");
2845 		cb_fn(cb_arg, -EINVAL);
2846 		return;
2847 	}
2848 
2849 	/* ANA Change state is not used, ANA Persistent Loss state
2850 	 * is not supported yet.
2851 	 */
2852 	if (!(ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE ||
2853 	      ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE ||
2854 	      ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE)) {
2855 		SPDK_ERRLOG("ANA state %d is not supported\n", ana_state);
2856 		cb_fn(cb_arg, -ENOTSUP);
2857 		return;
2858 	}
2859 
2860 	listener = nvmf_subsystem_find_listener(subsystem, trid);
2861 	if (!listener) {
2862 		SPDK_ERRLOG("Unable to find listener.\n");
2863 		cb_fn(cb_arg, -EINVAL);
2864 		return;
2865 	}
2866 
2867 	if (listener->ana_state == ana_state) {
2868 		cb_fn(cb_arg, 0);
2869 		return;
2870 	}
2871 
2872 	ctx = calloc(1, sizeof(*ctx));
2873 	if (!ctx) {
2874 		SPDK_ERRLOG("Unable to allocate context\n");
2875 		cb_fn(cb_arg, -ENOMEM);
2876 		return;
2877 	}
2878 
2879 	listener->ana_state = ana_state;
2880 	listener->ana_state_change_count++;
2881 
2882 	ctx->listener = listener;
2883 	ctx->cb_fn = cb_fn;
2884 	ctx->cb_arg = cb_arg;
2885 
2886 	spdk_for_each_channel(subsystem->tgt,
2887 			      subsystem_listener_update_on_pg,
2888 			      ctx,
2889 			      subsystem_listener_update_done);
2890 }
2891