xref: /spdk/lib/nvmf/subsystem.c (revision 88e3ffd7b6c5ec1ea1a660354d25f02c766092e1)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "nvmf_internal.h"
37 #include "transport.h"
38 
39 #include "spdk/likely.h"
40 #include "spdk/string.h"
41 #include "spdk/trace.h"
42 #include "spdk/nvmf_spec.h"
43 #include "spdk/uuid.h"
44 #include "spdk/json.h"
45 #include "spdk/file.h"
46 
47 #include "spdk/bdev_module.h"
48 #include "spdk/log.h"
49 #include "spdk_internal/utf.h"
50 
51 #define MODEL_NUMBER_DEFAULT "SPDK bdev Controller"
52 #define NVMF_SUBSYSTEM_DEFAULT_NAMESPACES 32
53 
54 /*
55  * States for parsing valid domains in NQNs according to RFC 1034
56  */
57 enum spdk_nvmf_nqn_domain_states {
58 	/* First character of a domain must be a letter */
59 	SPDK_NVMF_DOMAIN_ACCEPT_LETTER = 0,
60 
61 	/* Subsequent characters can be any of letter, digit, or hyphen */
62 	SPDK_NVMF_DOMAIN_ACCEPT_LDH = 1,
63 
64 	/* A domain label must end with either a letter or digit */
65 	SPDK_NVMF_DOMAIN_ACCEPT_ANY = 2
66 };
67 
68 /* Returns true if is a valid ASCII string as defined by the NVMe spec */
69 static bool
70 nvmf_valid_ascii_string(const void *buf, size_t size)
71 {
72 	const uint8_t *str = buf;
73 	size_t i;
74 
75 	for (i = 0; i < size; i++) {
76 		if (str[i] < 0x20 || str[i] > 0x7E) {
77 			return false;
78 		}
79 	}
80 
81 	return true;
82 }
83 
84 static bool
85 nvmf_valid_nqn(const char *nqn)
86 {
87 	size_t len;
88 	struct spdk_uuid uuid_value;
89 	uint32_t i;
90 	int bytes_consumed;
91 	uint32_t domain_label_length;
92 	char *reverse_domain_end;
93 	uint32_t reverse_domain_end_index;
94 	enum spdk_nvmf_nqn_domain_states domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER;
95 
96 	/* Check for length requirements */
97 	len = strlen(nqn);
98 	if (len > SPDK_NVMF_NQN_MAX_LEN) {
99 		SPDK_ERRLOG("Invalid NQN \"%s\": length %zu > max %d\n", nqn, len, SPDK_NVMF_NQN_MAX_LEN);
100 		return false;
101 	}
102 
103 	/* The nqn must be at least as long as SPDK_NVMF_NQN_MIN_LEN to contain the necessary prefix. */
104 	if (len < SPDK_NVMF_NQN_MIN_LEN) {
105 		SPDK_ERRLOG("Invalid NQN \"%s\": length %zu < min %d\n", nqn, len, SPDK_NVMF_NQN_MIN_LEN);
106 		return false;
107 	}
108 
109 	/* Check for discovery controller nqn */
110 	if (!strcmp(nqn, SPDK_NVMF_DISCOVERY_NQN)) {
111 		return true;
112 	}
113 
114 	/* Check for equality with the generic nqn structure of the form "nqn.2014-08.org.nvmexpress:uuid:11111111-2222-3333-4444-555555555555" */
115 	if (!strncmp(nqn, SPDK_NVMF_NQN_UUID_PRE, SPDK_NVMF_NQN_UUID_PRE_LEN)) {
116 		if (len != SPDK_NVMF_NQN_UUID_PRE_LEN + SPDK_NVMF_UUID_STRING_LEN) {
117 			SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not the correct length\n", nqn);
118 			return false;
119 		}
120 
121 		if (spdk_uuid_parse(&uuid_value, &nqn[SPDK_NVMF_NQN_UUID_PRE_LEN])) {
122 			SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not formatted correctly\n", nqn);
123 			return false;
124 		}
125 		return true;
126 	}
127 
128 	/* If the nqn does not match the uuid structure, the next several checks validate the form "nqn.yyyy-mm.reverse.domain:user-string" */
129 
130 	if (strncmp(nqn, "nqn.", 4) != 0) {
131 		SPDK_ERRLOG("Invalid NQN \"%s\": NQN must begin with \"nqn.\".\n", nqn);
132 		return false;
133 	}
134 
135 	/* Check for yyyy-mm. */
136 	if (!(isdigit(nqn[4]) && isdigit(nqn[5]) && isdigit(nqn[6]) && isdigit(nqn[7]) &&
137 	      nqn[8] == '-' && isdigit(nqn[9]) && isdigit(nqn[10]) && nqn[11] == '.')) {
138 		SPDK_ERRLOG("Invalid date code in NQN \"%s\"\n", nqn);
139 		return false;
140 	}
141 
142 	reverse_domain_end = strchr(nqn, ':');
143 	if (reverse_domain_end != NULL && (reverse_domain_end_index = reverse_domain_end - nqn) < len - 1) {
144 	} else {
145 		SPDK_ERRLOG("Invalid NQN \"%s\". NQN must contain user specified name with a ':' as a prefix.\n",
146 			    nqn);
147 		return false;
148 	}
149 
150 	/* Check for valid reverse domain */
151 	domain_label_length = 0;
152 	for (i = 12; i < reverse_domain_end_index; i++) {
153 		if (domain_label_length > SPDK_DOMAIN_LABEL_MAX_LEN) {
154 			SPDK_ERRLOG("Invalid domain name in NQN \"%s\". At least one Label is too long.\n", nqn);
155 			return false;
156 		}
157 
158 		switch (domain_state) {
159 
160 		case SPDK_NVMF_DOMAIN_ACCEPT_LETTER: {
161 			if (isalpha(nqn[i])) {
162 				domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY;
163 				domain_label_length++;
164 				break;
165 			} else {
166 				SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must start with a letter.\n", nqn);
167 				return false;
168 			}
169 		}
170 
171 		case SPDK_NVMF_DOMAIN_ACCEPT_LDH: {
172 			if (isalpha(nqn[i]) || isdigit(nqn[i])) {
173 				domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY;
174 				domain_label_length++;
175 				break;
176 			} else if (nqn[i] == '-') {
177 				if (i == reverse_domain_end_index - 1) {
178 					SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n",
179 						    nqn);
180 					return false;
181 				}
182 				domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH;
183 				domain_label_length++;
184 				break;
185 			} else if (nqn[i] == '.') {
186 				SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n",
187 					    nqn);
188 				return false;
189 			} else {
190 				SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n",
191 					    nqn);
192 				return false;
193 			}
194 		}
195 
196 		case SPDK_NVMF_DOMAIN_ACCEPT_ANY: {
197 			if (isalpha(nqn[i]) || isdigit(nqn[i])) {
198 				domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY;
199 				domain_label_length++;
200 				break;
201 			} else if (nqn[i] == '-') {
202 				if (i == reverse_domain_end_index - 1) {
203 					SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n",
204 						    nqn);
205 					return false;
206 				}
207 				domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH;
208 				domain_label_length++;
209 				break;
210 			} else if (nqn[i] == '.') {
211 				domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER;
212 				domain_label_length = 0;
213 				break;
214 			} else {
215 				SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n",
216 					    nqn);
217 				return false;
218 			}
219 		}
220 		}
221 	}
222 
223 	i = reverse_domain_end_index + 1;
224 	while (i < len) {
225 		bytes_consumed = utf8_valid(&nqn[i], &nqn[len]);
226 		if (bytes_consumed <= 0) {
227 			SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only valid utf-8.\n", nqn);
228 			return false;
229 		}
230 
231 		i += bytes_consumed;
232 	}
233 	return true;
234 }
235 
236 static void subsystem_state_change_on_pg(struct spdk_io_channel_iter *i);
237 
238 struct spdk_nvmf_subsystem *
239 spdk_nvmf_subsystem_create(struct spdk_nvmf_tgt *tgt,
240 			   const char *nqn,
241 			   enum spdk_nvmf_subtype type,
242 			   uint32_t num_ns)
243 {
244 	struct spdk_nvmf_subsystem	*subsystem;
245 	uint32_t			sid;
246 
247 	if (spdk_nvmf_tgt_find_subsystem(tgt, nqn)) {
248 		SPDK_ERRLOG("Subsystem NQN '%s' already exists\n", nqn);
249 		return NULL;
250 	}
251 
252 	if (!nvmf_valid_nqn(nqn)) {
253 		return NULL;
254 	}
255 
256 	if (type == SPDK_NVMF_SUBTYPE_DISCOVERY) {
257 		if (num_ns != 0) {
258 			SPDK_ERRLOG("Discovery subsystem cannot have namespaces.\n");
259 			return NULL;
260 		}
261 	} else if (num_ns == 0) {
262 		num_ns = NVMF_SUBSYSTEM_DEFAULT_NAMESPACES;
263 	}
264 
265 	/* Find a free subsystem id (sid) */
266 	for (sid = 0; sid < tgt->max_subsystems; sid++) {
267 		if (tgt->subsystems[sid] == NULL) {
268 			break;
269 		}
270 	}
271 	if (sid >= tgt->max_subsystems) {
272 		return NULL;
273 	}
274 
275 	subsystem = calloc(1, sizeof(struct spdk_nvmf_subsystem));
276 	if (subsystem == NULL) {
277 		return NULL;
278 	}
279 
280 	subsystem->thread = spdk_get_thread();
281 	subsystem->state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
282 	subsystem->tgt = tgt;
283 	subsystem->id = sid;
284 	subsystem->subtype = type;
285 	subsystem->max_nsid = num_ns;
286 	subsystem->next_cntlid = 0;
287 	snprintf(subsystem->subnqn, sizeof(subsystem->subnqn), "%s", nqn);
288 	pthread_mutex_init(&subsystem->mutex, NULL);
289 	TAILQ_INIT(&subsystem->listeners);
290 	TAILQ_INIT(&subsystem->hosts);
291 	TAILQ_INIT(&subsystem->ctrlrs);
292 
293 	if (num_ns != 0) {
294 		subsystem->ns = calloc(num_ns, sizeof(struct spdk_nvmf_ns *));
295 		if (subsystem->ns == NULL) {
296 			SPDK_ERRLOG("Namespace memory allocation failed\n");
297 			pthread_mutex_destroy(&subsystem->mutex);
298 			free(subsystem);
299 			return NULL;
300 		}
301 	}
302 
303 	memset(subsystem->sn, '0', sizeof(subsystem->sn) - 1);
304 	subsystem->sn[sizeof(subsystem->sn) - 1] = '\0';
305 
306 	snprintf(subsystem->mn, sizeof(subsystem->mn), "%s",
307 		 MODEL_NUMBER_DEFAULT);
308 
309 	tgt->subsystems[sid] = subsystem;
310 	nvmf_update_discovery_log(tgt, NULL);
311 
312 	return subsystem;
313 }
314 
315 /* Must hold subsystem->mutex while calling this function */
316 static void
317 nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_host *host)
318 {
319 	TAILQ_REMOVE(&subsystem->hosts, host, link);
320 	free(host);
321 }
322 
323 static void
324 _nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem,
325 				struct spdk_nvmf_subsystem_listener *listener,
326 				bool stop)
327 {
328 	struct spdk_nvmf_transport *transport;
329 
330 	if (stop) {
331 		transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, listener->trid->trstring);
332 		if (transport != NULL) {
333 			spdk_nvmf_transport_stop_listen(transport, listener->trid);
334 		}
335 	}
336 
337 	TAILQ_REMOVE(&subsystem->listeners, listener, link);
338 	free(listener);
339 }
340 
341 void
342 spdk_nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem)
343 {
344 	struct spdk_nvmf_host		*host, *host_tmp;
345 	struct spdk_nvmf_ctrlr		*ctrlr, *ctrlr_tmp;
346 	struct spdk_nvmf_ns		*ns;
347 
348 	if (!subsystem) {
349 		return;
350 	}
351 
352 	assert(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE);
353 
354 	SPDK_DEBUGLOG(nvmf, "subsystem is %p\n", subsystem);
355 
356 	nvmf_subsystem_remove_all_listeners(subsystem, false);
357 
358 	pthread_mutex_lock(&subsystem->mutex);
359 
360 	TAILQ_FOREACH_SAFE(host, &subsystem->hosts, link, host_tmp) {
361 		nvmf_subsystem_remove_host(subsystem, host);
362 	}
363 
364 	pthread_mutex_unlock(&subsystem->mutex);
365 
366 	TAILQ_FOREACH_SAFE(ctrlr, &subsystem->ctrlrs, link, ctrlr_tmp) {
367 		nvmf_ctrlr_destruct(ctrlr);
368 	}
369 
370 	ns = spdk_nvmf_subsystem_get_first_ns(subsystem);
371 	while (ns != NULL) {
372 		struct spdk_nvmf_ns *next_ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns);
373 
374 		spdk_nvmf_subsystem_remove_ns(subsystem, ns->opts.nsid);
375 		ns = next_ns;
376 	}
377 
378 	free(subsystem->ns);
379 
380 	subsystem->tgt->subsystems[subsystem->id] = NULL;
381 	nvmf_update_discovery_log(subsystem->tgt, NULL);
382 
383 	pthread_mutex_destroy(&subsystem->mutex);
384 
385 	free(subsystem);
386 }
387 
388 
389 /* we have to use the typedef in the function declaration to appease astyle. */
390 typedef enum spdk_nvmf_subsystem_state spdk_nvmf_subsystem_state_t;
391 
392 static spdk_nvmf_subsystem_state_t
393 nvmf_subsystem_get_intermediate_state(enum spdk_nvmf_subsystem_state current_state,
394 				      enum spdk_nvmf_subsystem_state requested_state)
395 {
396 	switch (requested_state) {
397 	case SPDK_NVMF_SUBSYSTEM_INACTIVE:
398 		return SPDK_NVMF_SUBSYSTEM_DEACTIVATING;
399 	case SPDK_NVMF_SUBSYSTEM_ACTIVE:
400 		if (current_state == SPDK_NVMF_SUBSYSTEM_PAUSED) {
401 			return SPDK_NVMF_SUBSYSTEM_RESUMING;
402 		} else {
403 			return SPDK_NVMF_SUBSYSTEM_ACTIVATING;
404 		}
405 	case SPDK_NVMF_SUBSYSTEM_PAUSED:
406 		return SPDK_NVMF_SUBSYSTEM_PAUSING;
407 	default:
408 		assert(false);
409 		return SPDK_NVMF_SUBSYSTEM_NUM_STATES;
410 	}
411 }
412 
413 static int
414 nvmf_subsystem_set_state(struct spdk_nvmf_subsystem *subsystem,
415 			 enum spdk_nvmf_subsystem_state state)
416 {
417 	enum spdk_nvmf_subsystem_state actual_old_state, expected_old_state;
418 	bool exchanged;
419 
420 	switch (state) {
421 	case SPDK_NVMF_SUBSYSTEM_INACTIVE:
422 		expected_old_state = SPDK_NVMF_SUBSYSTEM_DEACTIVATING;
423 		break;
424 	case SPDK_NVMF_SUBSYSTEM_ACTIVATING:
425 		expected_old_state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
426 		break;
427 	case SPDK_NVMF_SUBSYSTEM_ACTIVE:
428 		expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING;
429 		break;
430 	case SPDK_NVMF_SUBSYSTEM_PAUSING:
431 		expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
432 		break;
433 	case SPDK_NVMF_SUBSYSTEM_PAUSED:
434 		expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSING;
435 		break;
436 	case SPDK_NVMF_SUBSYSTEM_RESUMING:
437 		expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSED;
438 		break;
439 	case SPDK_NVMF_SUBSYSTEM_DEACTIVATING:
440 		expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
441 		break;
442 	default:
443 		assert(false);
444 		return -1;
445 	}
446 
447 	actual_old_state = expected_old_state;
448 	exchanged = __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false,
449 						__ATOMIC_RELAXED, __ATOMIC_RELAXED);
450 	if (spdk_unlikely(exchanged == false)) {
451 		if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING &&
452 		    state == SPDK_NVMF_SUBSYSTEM_ACTIVE) {
453 			expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING;
454 		}
455 		/* This is for the case when activating the subsystem fails. */
456 		if (actual_old_state == SPDK_NVMF_SUBSYSTEM_ACTIVATING &&
457 		    state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING) {
458 			expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING;
459 		}
460 		/* This is for the case when resuming the subsystem fails. */
461 		if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING &&
462 		    state == SPDK_NVMF_SUBSYSTEM_PAUSING) {
463 			expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING;
464 		}
465 		actual_old_state = expected_old_state;
466 		__atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false,
467 					    __ATOMIC_RELAXED, __ATOMIC_RELAXED);
468 	}
469 	assert(actual_old_state == expected_old_state);
470 	return actual_old_state - expected_old_state;
471 }
472 
473 struct subsystem_state_change_ctx {
474 	struct spdk_nvmf_subsystem *subsystem;
475 
476 	enum spdk_nvmf_subsystem_state original_state;
477 
478 	enum spdk_nvmf_subsystem_state requested_state;
479 
480 	spdk_nvmf_subsystem_state_change_done cb_fn;
481 	void *cb_arg;
482 };
483 
484 static void
485 subsystem_state_change_revert_done(struct spdk_io_channel_iter *i, int status)
486 {
487 	struct subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
488 
489 	/* Nothing to be done here if the state setting fails, we are just screwed. */
490 	if (nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state)) {
491 		SPDK_ERRLOG("Unable to revert the subsystem state after operation failure.\n");
492 	}
493 
494 	ctx->subsystem->changing_state = false;
495 	if (ctx->cb_fn) {
496 		/* return a failure here. This function only exists in an error path. */
497 		ctx->cb_fn(ctx->subsystem, ctx->cb_arg, -1);
498 	}
499 	free(ctx);
500 }
501 
502 static void
503 subsystem_state_change_done(struct spdk_io_channel_iter *i, int status)
504 {
505 	struct subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
506 	enum spdk_nvmf_subsystem_state intermediate_state;
507 
508 	if (status == 0) {
509 		status = nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state);
510 		if (status) {
511 			status = -1;
512 		}
513 	}
514 
515 	if (status) {
516 		intermediate_state = nvmf_subsystem_get_intermediate_state(ctx->requested_state,
517 				     ctx->original_state);
518 		assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES);
519 
520 		if (nvmf_subsystem_set_state(ctx->subsystem, intermediate_state)) {
521 			goto out;
522 		}
523 		ctx->requested_state = ctx->original_state;
524 		spdk_for_each_channel(ctx->subsystem->tgt,
525 				      subsystem_state_change_on_pg,
526 				      ctx,
527 				      subsystem_state_change_revert_done);
528 		return;
529 	}
530 
531 out:
532 	ctx->subsystem->changing_state = false;
533 	if (ctx->cb_fn) {
534 		ctx->cb_fn(ctx->subsystem, ctx->cb_arg, status);
535 	}
536 	free(ctx);
537 }
538 
539 static void
540 subsystem_state_change_continue(void *ctx, int status)
541 {
542 	struct spdk_io_channel_iter *i = ctx;
543 	spdk_for_each_channel_continue(i, status);
544 }
545 
546 static void
547 subsystem_state_change_on_pg(struct spdk_io_channel_iter *i)
548 {
549 	struct subsystem_state_change_ctx *ctx;
550 	struct spdk_io_channel *ch;
551 	struct spdk_nvmf_poll_group *group;
552 
553 	ctx = spdk_io_channel_iter_get_ctx(i);
554 	ch = spdk_io_channel_iter_get_channel(i);
555 	group = spdk_io_channel_get_ctx(ch);
556 
557 	switch (ctx->requested_state) {
558 	case SPDK_NVMF_SUBSYSTEM_INACTIVE:
559 		nvmf_poll_group_remove_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i);
560 		break;
561 	case SPDK_NVMF_SUBSYSTEM_ACTIVE:
562 		if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_ACTIVATING) {
563 			nvmf_poll_group_add_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i);
564 		} else if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_RESUMING) {
565 			nvmf_poll_group_resume_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i);
566 		}
567 		break;
568 	case SPDK_NVMF_SUBSYSTEM_PAUSED:
569 		nvmf_poll_group_pause_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i);
570 		break;
571 	default:
572 		assert(false);
573 		break;
574 	}
575 }
576 
577 static int
578 nvmf_subsystem_state_change(struct spdk_nvmf_subsystem *subsystem,
579 			    enum spdk_nvmf_subsystem_state requested_state,
580 			    spdk_nvmf_subsystem_state_change_done cb_fn,
581 			    void *cb_arg)
582 {
583 	struct subsystem_state_change_ctx *ctx;
584 	enum spdk_nvmf_subsystem_state intermediate_state;
585 	int rc;
586 
587 	if (__sync_val_compare_and_swap(&subsystem->changing_state, false, true)) {
588 		return -EBUSY;
589 	}
590 
591 	/* If we are already in the requested state, just call the callback immediately. */
592 	if (subsystem->state == requested_state) {
593 		subsystem->changing_state = false;
594 		if (cb_fn) {
595 			cb_fn(subsystem, cb_arg, 0);
596 		}
597 		return 0;
598 	}
599 
600 	intermediate_state = nvmf_subsystem_get_intermediate_state(subsystem->state, requested_state);
601 	assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES);
602 
603 	ctx = calloc(1, sizeof(*ctx));
604 	if (!ctx) {
605 		subsystem->changing_state = false;
606 		return -ENOMEM;
607 	}
608 
609 	ctx->original_state = subsystem->state;
610 	rc = nvmf_subsystem_set_state(subsystem, intermediate_state);
611 	if (rc) {
612 		free(ctx);
613 		subsystem->changing_state = false;
614 		return rc;
615 	}
616 
617 	ctx->subsystem = subsystem;
618 	ctx->requested_state = requested_state;
619 	ctx->cb_fn = cb_fn;
620 	ctx->cb_arg = cb_arg;
621 
622 	spdk_for_each_channel(subsystem->tgt,
623 			      subsystem_state_change_on_pg,
624 			      ctx,
625 			      subsystem_state_change_done);
626 
627 	return 0;
628 }
629 
630 int
631 spdk_nvmf_subsystem_start(struct spdk_nvmf_subsystem *subsystem,
632 			  spdk_nvmf_subsystem_state_change_done cb_fn,
633 			  void *cb_arg)
634 {
635 	return nvmf_subsystem_state_change(subsystem, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg);
636 }
637 
638 int
639 spdk_nvmf_subsystem_stop(struct spdk_nvmf_subsystem *subsystem,
640 			 spdk_nvmf_subsystem_state_change_done cb_fn,
641 			 void *cb_arg)
642 {
643 	return nvmf_subsystem_state_change(subsystem, SPDK_NVMF_SUBSYSTEM_INACTIVE, cb_fn, cb_arg);
644 }
645 
646 int
647 spdk_nvmf_subsystem_pause(struct spdk_nvmf_subsystem *subsystem,
648 			  spdk_nvmf_subsystem_state_change_done cb_fn,
649 			  void *cb_arg)
650 {
651 	return nvmf_subsystem_state_change(subsystem, SPDK_NVMF_SUBSYSTEM_PAUSED, cb_fn, cb_arg);
652 }
653 
654 int
655 spdk_nvmf_subsystem_resume(struct spdk_nvmf_subsystem *subsystem,
656 			   spdk_nvmf_subsystem_state_change_done cb_fn,
657 			   void *cb_arg)
658 {
659 	return nvmf_subsystem_state_change(subsystem, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg);
660 }
661 
662 struct spdk_nvmf_subsystem *
663 spdk_nvmf_subsystem_get_first(struct spdk_nvmf_tgt *tgt)
664 {
665 	struct spdk_nvmf_subsystem	*subsystem;
666 	uint32_t sid;
667 
668 	for (sid = 0; sid < tgt->max_subsystems; sid++) {
669 		subsystem = tgt->subsystems[sid];
670 		if (subsystem) {
671 			return subsystem;
672 		}
673 	}
674 
675 	return NULL;
676 }
677 
678 struct spdk_nvmf_subsystem *
679 spdk_nvmf_subsystem_get_next(struct spdk_nvmf_subsystem *subsystem)
680 {
681 	uint32_t sid;
682 	struct spdk_nvmf_tgt *tgt;
683 
684 	if (!subsystem) {
685 		return NULL;
686 	}
687 
688 	tgt = subsystem->tgt;
689 
690 	for (sid = subsystem->id + 1; sid < tgt->max_subsystems; sid++) {
691 		subsystem = tgt->subsystems[sid];
692 		if (subsystem) {
693 			return subsystem;
694 		}
695 	}
696 
697 	return NULL;
698 }
699 
700 /* Must hold subsystem->mutex while calling this function */
701 static struct spdk_nvmf_host *
702 nvmf_subsystem_find_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn)
703 {
704 	struct spdk_nvmf_host *host = NULL;
705 
706 	TAILQ_FOREACH(host, &subsystem->hosts, link) {
707 		if (strcmp(hostnqn, host->nqn) == 0) {
708 			return host;
709 		}
710 	}
711 
712 	return NULL;
713 }
714 
715 int
716 spdk_nvmf_subsystem_add_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn)
717 {
718 	struct spdk_nvmf_host *host;
719 
720 	if (!nvmf_valid_nqn(hostnqn)) {
721 		return -EINVAL;
722 	}
723 
724 	pthread_mutex_lock(&subsystem->mutex);
725 
726 	if (nvmf_subsystem_find_host(subsystem, hostnqn)) {
727 		/* This subsystem already allows the specified host. */
728 		pthread_mutex_unlock(&subsystem->mutex);
729 		return 0;
730 	}
731 
732 	host = calloc(1, sizeof(*host));
733 	if (!host) {
734 		pthread_mutex_unlock(&subsystem->mutex);
735 		return -ENOMEM;
736 	}
737 
738 	snprintf(host->nqn, sizeof(host->nqn), "%s", hostnqn);
739 
740 	TAILQ_INSERT_HEAD(&subsystem->hosts, host, link);
741 
742 	nvmf_update_discovery_log(subsystem->tgt, hostnqn);
743 
744 	pthread_mutex_unlock(&subsystem->mutex);
745 
746 	return 0;
747 }
748 
749 int
750 spdk_nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn)
751 {
752 	struct spdk_nvmf_host *host;
753 
754 	pthread_mutex_lock(&subsystem->mutex);
755 
756 	host = nvmf_subsystem_find_host(subsystem, hostnqn);
757 	if (host == NULL) {
758 		pthread_mutex_unlock(&subsystem->mutex);
759 		return -ENOENT;
760 	}
761 
762 	nvmf_subsystem_remove_host(subsystem, host);
763 	pthread_mutex_unlock(&subsystem->mutex);
764 
765 	return 0;
766 }
767 
768 struct nvmf_subsystem_disconnect_host_ctx {
769 	struct spdk_nvmf_subsystem		*subsystem;
770 	char					*hostnqn;
771 	spdk_nvmf_tgt_subsystem_listen_done_fn	cb_fn;
772 	void					*cb_arg;
773 };
774 
775 static void
776 nvmf_subsystem_disconnect_host_fini(struct spdk_io_channel_iter *i, int status)
777 {
778 	struct nvmf_subsystem_disconnect_host_ctx *ctx;
779 
780 	ctx = spdk_io_channel_iter_get_ctx(i);
781 
782 	if (ctx->cb_fn) {
783 		ctx->cb_fn(ctx->cb_arg, status);
784 	}
785 	free(ctx->hostnqn);
786 	free(ctx);
787 }
788 
789 static void
790 nvmf_subsystem_disconnect_qpairs_by_host(struct spdk_io_channel_iter *i)
791 {
792 	struct nvmf_subsystem_disconnect_host_ctx *ctx;
793 	struct spdk_nvmf_poll_group *group;
794 	struct spdk_io_channel *ch;
795 	struct spdk_nvmf_qpair *qpair, *tmp_qpair;
796 	struct spdk_nvmf_ctrlr *ctrlr;
797 
798 	ctx = spdk_io_channel_iter_get_ctx(i);
799 	ch = spdk_io_channel_iter_get_channel(i);
800 	group = spdk_io_channel_get_ctx(ch);
801 
802 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, tmp_qpair) {
803 		ctrlr = qpair->ctrlr;
804 
805 		if (ctrlr == NULL || ctrlr->subsys != ctx->subsystem) {
806 			continue;
807 		}
808 
809 		if (strncmp(ctrlr->hostnqn, ctx->hostnqn, sizeof(ctrlr->hostnqn)) == 0) {
810 			/* Right now this does not wait for the queue pairs to actually disconnect. */
811 			spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
812 		}
813 	}
814 	spdk_for_each_channel_continue(i, 0);
815 }
816 
817 int
818 spdk_nvmf_subsystem_disconnect_host(struct spdk_nvmf_subsystem *subsystem,
819 				    const char *hostnqn,
820 				    spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn,
821 				    void *cb_arg)
822 {
823 	struct nvmf_subsystem_disconnect_host_ctx *ctx;
824 
825 	ctx = calloc(1, sizeof(struct nvmf_subsystem_disconnect_host_ctx));
826 	if (ctx == NULL) {
827 		return -ENOMEM;
828 	}
829 
830 	ctx->subsystem = subsystem;
831 	ctx->hostnqn = strdup(hostnqn);
832 	ctx->cb_fn = cb_fn;
833 	ctx->cb_arg = cb_arg;
834 
835 	spdk_for_each_channel(subsystem->tgt, nvmf_subsystem_disconnect_qpairs_by_host, ctx,
836 			      nvmf_subsystem_disconnect_host_fini);
837 
838 	return 0;
839 }
840 
841 int
842 spdk_nvmf_subsystem_set_allow_any_host(struct spdk_nvmf_subsystem *subsystem, bool allow_any_host)
843 {
844 	pthread_mutex_lock(&subsystem->mutex);
845 	subsystem->flags.allow_any_host = allow_any_host;
846 	nvmf_update_discovery_log(subsystem->tgt, NULL);
847 	pthread_mutex_unlock(&subsystem->mutex);
848 
849 	return 0;
850 }
851 
852 bool
853 spdk_nvmf_subsystem_get_allow_any_host(const struct spdk_nvmf_subsystem *subsystem)
854 {
855 	bool allow_any_host;
856 	struct spdk_nvmf_subsystem *sub;
857 
858 	/* Technically, taking the mutex modifies data in the subsystem. But the const
859 	 * is still important to convey that this doesn't mutate any other data. Cast
860 	 * it away to work around this. */
861 	sub = (struct spdk_nvmf_subsystem *)subsystem;
862 
863 	pthread_mutex_lock(&sub->mutex);
864 	allow_any_host = sub->flags.allow_any_host;
865 	pthread_mutex_unlock(&sub->mutex);
866 
867 	return allow_any_host;
868 }
869 
870 bool
871 spdk_nvmf_subsystem_host_allowed(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn)
872 {
873 	bool allowed;
874 
875 	if (!hostnqn) {
876 		return false;
877 	}
878 
879 	pthread_mutex_lock(&subsystem->mutex);
880 
881 	if (subsystem->flags.allow_any_host) {
882 		pthread_mutex_unlock(&subsystem->mutex);
883 		return true;
884 	}
885 
886 	allowed =  nvmf_subsystem_find_host(subsystem, hostnqn) != NULL;
887 	pthread_mutex_unlock(&subsystem->mutex);
888 
889 	return allowed;
890 }
891 
892 struct spdk_nvmf_host *
893 spdk_nvmf_subsystem_get_first_host(struct spdk_nvmf_subsystem *subsystem)
894 {
895 	return TAILQ_FIRST(&subsystem->hosts);
896 }
897 
898 
899 struct spdk_nvmf_host *
900 spdk_nvmf_subsystem_get_next_host(struct spdk_nvmf_subsystem *subsystem,
901 				  struct spdk_nvmf_host *prev_host)
902 {
903 	return TAILQ_NEXT(prev_host, link);
904 }
905 
906 const char *
907 spdk_nvmf_host_get_nqn(const struct spdk_nvmf_host *host)
908 {
909 	return host->nqn;
910 }
911 
912 struct spdk_nvmf_subsystem_listener *
913 nvmf_subsystem_find_listener(struct spdk_nvmf_subsystem *subsystem,
914 			     const struct spdk_nvme_transport_id *trid)
915 {
916 	struct spdk_nvmf_subsystem_listener *listener;
917 
918 	TAILQ_FOREACH(listener, &subsystem->listeners, link) {
919 		if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) {
920 			return listener;
921 		}
922 	}
923 
924 	return NULL;
925 }
926 
927 /**
928  * Function to be called once the target is listening.
929  *
930  * \param ctx Context argument passed to this function.
931  * \param status 0 if it completed successfully, or negative errno if it failed.
932  */
933 static void
934 _nvmf_subsystem_add_listener_done(void *ctx, int status)
935 {
936 	struct spdk_nvmf_subsystem_listener *listener = ctx;
937 
938 	if (status) {
939 		listener->cb_fn(listener->cb_arg, status);
940 		free(listener);
941 		return;
942 	}
943 
944 	TAILQ_INSERT_HEAD(&listener->subsystem->listeners, listener, link);
945 	nvmf_update_discovery_log(listener->subsystem->tgt, NULL);
946 	listener->cb_fn(listener->cb_arg, status);
947 }
948 
949 void
950 spdk_nvmf_subsystem_add_listener(struct spdk_nvmf_subsystem *subsystem,
951 				 struct spdk_nvme_transport_id *trid,
952 				 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn,
953 				 void *cb_arg)
954 {
955 	struct spdk_nvmf_transport *transport;
956 	struct spdk_nvmf_subsystem_listener *listener;
957 	struct spdk_nvmf_listener *tr_listener;
958 	int rc = 0;
959 
960 	assert(cb_fn != NULL);
961 
962 	if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE ||
963 	      subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) {
964 		cb_fn(cb_arg, -EAGAIN);
965 		return;
966 	}
967 
968 	if (nvmf_subsystem_find_listener(subsystem, trid)) {
969 		/* Listener already exists in this subsystem */
970 		cb_fn(cb_arg, 0);
971 		return;
972 	}
973 
974 	transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, trid->trstring);
975 	if (!transport) {
976 		SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n",
977 			    trid->trstring);
978 		cb_fn(cb_arg, -EINVAL);
979 		return;
980 	}
981 
982 	tr_listener = nvmf_transport_find_listener(transport, trid);
983 	if (!tr_listener) {
984 		SPDK_ERRLOG("Cannot find transport listener for %s\n", trid->traddr);
985 		cb_fn(cb_arg, -EINVAL);
986 		return;
987 	}
988 
989 	listener = calloc(1, sizeof(*listener));
990 	if (!listener) {
991 		cb_fn(cb_arg, -ENOMEM);
992 		return;
993 	}
994 
995 	listener->trid = &tr_listener->trid;
996 	listener->transport = transport;
997 	listener->cb_fn = cb_fn;
998 	listener->cb_arg = cb_arg;
999 	listener->subsystem = subsystem;
1000 	listener->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
1001 
1002 	if (transport->ops->listen_associate != NULL) {
1003 		rc = transport->ops->listen_associate(transport, subsystem, trid);
1004 	}
1005 
1006 	_nvmf_subsystem_add_listener_done(listener, rc);
1007 }
1008 
1009 int
1010 spdk_nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem,
1011 				    const struct spdk_nvme_transport_id *trid)
1012 {
1013 	struct spdk_nvmf_subsystem_listener *listener;
1014 
1015 	if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE ||
1016 	      subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) {
1017 		return -EAGAIN;
1018 	}
1019 
1020 	listener = nvmf_subsystem_find_listener(subsystem, trid);
1021 	if (listener == NULL) {
1022 		return -ENOENT;
1023 	}
1024 
1025 	_nvmf_subsystem_remove_listener(subsystem, listener, false);
1026 
1027 	return 0;
1028 }
1029 
1030 void
1031 nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem,
1032 				    bool stop)
1033 {
1034 	struct spdk_nvmf_subsystem_listener *listener, *listener_tmp;
1035 
1036 	TAILQ_FOREACH_SAFE(listener, &subsystem->listeners, link, listener_tmp) {
1037 		_nvmf_subsystem_remove_listener(subsystem, listener, stop);
1038 	}
1039 }
1040 
1041 bool
1042 spdk_nvmf_subsystem_listener_allowed(struct spdk_nvmf_subsystem *subsystem,
1043 				     const struct spdk_nvme_transport_id *trid)
1044 {
1045 	struct spdk_nvmf_subsystem_listener *listener;
1046 
1047 	if (!strcmp(subsystem->subnqn, SPDK_NVMF_DISCOVERY_NQN)) {
1048 		return true;
1049 	}
1050 
1051 	TAILQ_FOREACH(listener, &subsystem->listeners, link) {
1052 		if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) {
1053 			return true;
1054 		}
1055 	}
1056 
1057 	return false;
1058 }
1059 
1060 struct spdk_nvmf_subsystem_listener *
1061 spdk_nvmf_subsystem_get_first_listener(struct spdk_nvmf_subsystem *subsystem)
1062 {
1063 	return TAILQ_FIRST(&subsystem->listeners);
1064 }
1065 
1066 struct spdk_nvmf_subsystem_listener *
1067 spdk_nvmf_subsystem_get_next_listener(struct spdk_nvmf_subsystem *subsystem,
1068 				      struct spdk_nvmf_subsystem_listener *prev_listener)
1069 {
1070 	return TAILQ_NEXT(prev_listener, link);
1071 }
1072 
1073 const struct spdk_nvme_transport_id *
1074 spdk_nvmf_subsystem_listener_get_trid(struct spdk_nvmf_subsystem_listener *listener)
1075 {
1076 	return listener->trid;
1077 }
1078 
1079 void
1080 spdk_nvmf_subsystem_allow_any_listener(struct spdk_nvmf_subsystem *subsystem,
1081 				       bool allow_any_listener)
1082 {
1083 	subsystem->flags.allow_any_listener = allow_any_listener;
1084 }
1085 
1086 bool
1087 spdk_nvmf_subsytem_any_listener_allowed(struct spdk_nvmf_subsystem *subsystem)
1088 {
1089 	return subsystem->flags.allow_any_listener;
1090 }
1091 
1092 
1093 struct subsystem_update_ns_ctx {
1094 	struct spdk_nvmf_subsystem *subsystem;
1095 
1096 	spdk_nvmf_subsystem_state_change_done cb_fn;
1097 	void *cb_arg;
1098 };
1099 
1100 static void
1101 subsystem_update_ns_done(struct spdk_io_channel_iter *i, int status)
1102 {
1103 	struct subsystem_update_ns_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
1104 
1105 	if (ctx->cb_fn) {
1106 		ctx->cb_fn(ctx->subsystem, ctx->cb_arg, status);
1107 	}
1108 	free(ctx);
1109 }
1110 
1111 static void
1112 subsystem_update_ns_on_pg(struct spdk_io_channel_iter *i)
1113 {
1114 	int rc;
1115 	struct subsystem_update_ns_ctx *ctx;
1116 	struct spdk_nvmf_poll_group *group;
1117 	struct spdk_nvmf_subsystem *subsystem;
1118 
1119 	ctx = spdk_io_channel_iter_get_ctx(i);
1120 	group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i));
1121 	subsystem = ctx->subsystem;
1122 
1123 	rc = nvmf_poll_group_update_subsystem(group, subsystem);
1124 	spdk_for_each_channel_continue(i, rc);
1125 }
1126 
1127 static int
1128 nvmf_subsystem_update_ns(struct spdk_nvmf_subsystem *subsystem, spdk_channel_for_each_cpl cpl,
1129 			 void *ctx)
1130 {
1131 	spdk_for_each_channel(subsystem->tgt,
1132 			      subsystem_update_ns_on_pg,
1133 			      ctx,
1134 			      cpl);
1135 
1136 	return 0;
1137 }
1138 
1139 static void
1140 nvmf_subsystem_ns_changed(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
1141 {
1142 	struct spdk_nvmf_ctrlr *ctrlr;
1143 
1144 	TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
1145 		nvmf_ctrlr_ns_changed(ctrlr, nsid);
1146 	}
1147 }
1148 
1149 static uint32_t
1150 nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns);
1151 
1152 int
1153 spdk_nvmf_subsystem_remove_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
1154 {
1155 	struct spdk_nvmf_transport *transport;
1156 	struct spdk_nvmf_ns *ns;
1157 
1158 	if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE ||
1159 	      subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) {
1160 		assert(false);
1161 		return -1;
1162 	}
1163 
1164 	if (nsid == 0 || nsid > subsystem->max_nsid) {
1165 		return -1;
1166 	}
1167 
1168 	ns = subsystem->ns[nsid - 1];
1169 	if (!ns) {
1170 		return -1;
1171 	}
1172 
1173 	subsystem->ns[nsid - 1] = NULL;
1174 
1175 	free(ns->ptpl_file);
1176 	nvmf_ns_reservation_clear_all_registrants(ns);
1177 	spdk_bdev_module_release_bdev(ns->bdev);
1178 	spdk_bdev_close(ns->desc);
1179 	free(ns);
1180 
1181 	for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport;
1182 	     transport = spdk_nvmf_transport_get_next(transport)) {
1183 		if (transport->ops->subsystem_remove_ns) {
1184 			transport->ops->subsystem_remove_ns(transport, subsystem, nsid);
1185 		}
1186 	}
1187 
1188 	nvmf_subsystem_ns_changed(subsystem, nsid);
1189 
1190 	return 0;
1191 }
1192 
1193 struct subsystem_ns_change_ctx {
1194 	struct spdk_nvmf_subsystem		*subsystem;
1195 	spdk_nvmf_subsystem_state_change_done	cb_fn;
1196 	uint32_t				nsid;
1197 };
1198 
1199 static void
1200 _nvmf_ns_hot_remove(struct spdk_nvmf_subsystem *subsystem,
1201 		    void *cb_arg, int status)
1202 {
1203 	struct subsystem_ns_change_ctx *ctx = cb_arg;
1204 	int rc;
1205 
1206 	rc = spdk_nvmf_subsystem_remove_ns(subsystem, ctx->nsid);
1207 	if (rc != 0) {
1208 		SPDK_ERRLOG("Failed to make changes to NVME-oF subsystem with id: %u\n", subsystem->id);
1209 	}
1210 
1211 	spdk_nvmf_subsystem_resume(subsystem, NULL, NULL);
1212 
1213 	free(ctx);
1214 }
1215 
1216 static void
1217 nvmf_ns_change_msg(void *ns_ctx)
1218 {
1219 	struct subsystem_ns_change_ctx *ctx = ns_ctx;
1220 	int rc;
1221 
1222 	rc = spdk_nvmf_subsystem_pause(ctx->subsystem, ctx->cb_fn, ctx);
1223 	if (rc) {
1224 		if (rc == -EBUSY) {
1225 			/* Try again, this is not a permanent situation. */
1226 			spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ctx);
1227 		} else {
1228 			free(ctx);
1229 			SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n");
1230 		}
1231 	}
1232 }
1233 
1234 static void
1235 nvmf_ns_hot_remove(void *remove_ctx)
1236 {
1237 	struct spdk_nvmf_ns *ns = remove_ctx;
1238 	struct subsystem_ns_change_ctx *ns_ctx;
1239 	int rc;
1240 
1241 	/* We have to allocate a new context because this op
1242 	 * is asynchronous and we could lose the ns in the middle.
1243 	 */
1244 	ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx));
1245 	if (!ns_ctx) {
1246 		SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n");
1247 		return;
1248 	}
1249 
1250 	ns_ctx->subsystem = ns->subsystem;
1251 	ns_ctx->nsid = ns->opts.nsid;
1252 	ns_ctx->cb_fn = _nvmf_ns_hot_remove;
1253 
1254 	rc = spdk_nvmf_subsystem_pause(ns->subsystem, _nvmf_ns_hot_remove, ns_ctx);
1255 	if (rc) {
1256 		if (rc == -EBUSY) {
1257 			/* Try again, this is not a permanent situation. */
1258 			spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx);
1259 		} else {
1260 			SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n");
1261 			free(ns_ctx);
1262 		}
1263 	}
1264 }
1265 
1266 static void
1267 _nvmf_ns_resize(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
1268 {
1269 	struct subsystem_ns_change_ctx *ctx = cb_arg;
1270 
1271 	nvmf_subsystem_ns_changed(subsystem, ctx->nsid);
1272 	spdk_nvmf_subsystem_resume(subsystem, NULL, NULL);
1273 
1274 	free(ctx);
1275 }
1276 
1277 static void
1278 nvmf_ns_resize(void *event_ctx)
1279 {
1280 	struct spdk_nvmf_ns *ns = event_ctx;
1281 	struct subsystem_ns_change_ctx *ns_ctx;
1282 	int rc;
1283 
1284 	/* We have to allocate a new context because this op
1285 	 * is asynchronous and we could lose the ns in the middle.
1286 	 */
1287 	ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx));
1288 	if (!ns_ctx) {
1289 		SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n");
1290 		return;
1291 	}
1292 
1293 	ns_ctx->subsystem = ns->subsystem;
1294 	ns_ctx->nsid = ns->opts.nsid;
1295 	ns_ctx->cb_fn = _nvmf_ns_resize;
1296 
1297 	rc = spdk_nvmf_subsystem_pause(ns->subsystem, _nvmf_ns_resize, ns_ctx);
1298 	if (rc) {
1299 		if (rc == -EBUSY) {
1300 			/* Try again, this is not a permanent situation. */
1301 			spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx);
1302 		}
1303 		SPDK_ERRLOG("Unable to pause subsystem to process namespace resize!\n");
1304 		free(ns_ctx);
1305 	}
1306 }
1307 
1308 static void
1309 nvmf_ns_event(enum spdk_bdev_event_type type,
1310 	      struct spdk_bdev *bdev,
1311 	      void *event_ctx)
1312 {
1313 	SPDK_DEBUGLOG(nvmf, "Bdev event: type %d, name %s, subsystem_id %d, ns_id %d\n",
1314 		      type,
1315 		      bdev->name,
1316 		      ((struct spdk_nvmf_ns *)event_ctx)->subsystem->id,
1317 		      ((struct spdk_nvmf_ns *)event_ctx)->nsid);
1318 
1319 	switch (type) {
1320 	case SPDK_BDEV_EVENT_REMOVE:
1321 		nvmf_ns_hot_remove(event_ctx);
1322 		break;
1323 	case SPDK_BDEV_EVENT_RESIZE:
1324 		nvmf_ns_resize(event_ctx);
1325 		break;
1326 	default:
1327 		SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type);
1328 		break;
1329 	}
1330 }
1331 
1332 void
1333 spdk_nvmf_ns_opts_get_defaults(struct spdk_nvmf_ns_opts *opts, size_t opts_size)
1334 {
1335 	/* All current fields are set to 0 by default. */
1336 	memset(opts, 0, opts_size);
1337 }
1338 
1339 /* Dummy bdev module used to to claim bdevs. */
1340 static struct spdk_bdev_module ns_bdev_module = {
1341 	.name	= "NVMe-oF Target",
1342 };
1343 
1344 static int
1345 nvmf_ns_load_reservation(const char *file, struct spdk_nvmf_reservation_info *info);
1346 static int
1347 nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info);
1348 
1349 uint32_t
1350 spdk_nvmf_subsystem_add_ns_ext(struct spdk_nvmf_subsystem *subsystem, const char *bdev_name,
1351 			       const struct spdk_nvmf_ns_opts *user_opts, size_t opts_size,
1352 			       const char *ptpl_file)
1353 {
1354 	struct spdk_nvmf_transport *transport;
1355 	struct spdk_nvmf_ns_opts opts;
1356 	struct spdk_nvmf_ns *ns;
1357 	struct spdk_nvmf_reservation_info info = {0};
1358 	int rc;
1359 
1360 	if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE ||
1361 	      subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) {
1362 		return 0;
1363 	}
1364 
1365 	spdk_nvmf_ns_opts_get_defaults(&opts, sizeof(opts));
1366 	if (user_opts) {
1367 		memcpy(&opts, user_opts, spdk_min(sizeof(opts), opts_size));
1368 	}
1369 
1370 	if (opts.nsid == SPDK_NVME_GLOBAL_NS_TAG) {
1371 		SPDK_ERRLOG("Invalid NSID %" PRIu32 "\n", opts.nsid);
1372 		return 0;
1373 	}
1374 
1375 	if (opts.nsid == 0) {
1376 		/*
1377 		 * NSID not specified - find a free index.
1378 		 *
1379 		 * If no free slots are found, opts.nsid will be subsystem->max_nsid + 1, which will
1380 		 * expand max_nsid if possible.
1381 		 */
1382 		for (opts.nsid = 1; opts.nsid <= subsystem->max_nsid; opts.nsid++) {
1383 			if (_nvmf_subsystem_get_ns(subsystem, opts.nsid) == NULL) {
1384 				break;
1385 			}
1386 		}
1387 	}
1388 
1389 	if (_nvmf_subsystem_get_ns(subsystem, opts.nsid)) {
1390 		SPDK_ERRLOG("Requested NSID %" PRIu32 " already in use\n", opts.nsid);
1391 		return 0;
1392 	}
1393 
1394 	if (opts.nsid > subsystem->max_nsid) {
1395 		SPDK_ERRLOG("NSID greater than maximum not allowed\n");
1396 		return 0;
1397 	}
1398 
1399 	ns = calloc(1, sizeof(*ns));
1400 	if (ns == NULL) {
1401 		SPDK_ERRLOG("Namespace allocation failed\n");
1402 		return 0;
1403 	}
1404 
1405 	rc = spdk_bdev_open_ext(bdev_name, true, nvmf_ns_event, ns, &ns->desc);
1406 	if (rc != 0) {
1407 		SPDK_ERRLOG("Subsystem %s: bdev %s cannot be opened, error=%d\n",
1408 			    subsystem->subnqn, bdev_name, rc);
1409 		free(ns);
1410 		return 0;
1411 	}
1412 
1413 	ns->bdev = spdk_bdev_desc_get_bdev(ns->desc);
1414 
1415 	if (spdk_bdev_get_md_size(ns->bdev) != 0 && !spdk_bdev_is_md_interleaved(ns->bdev)) {
1416 		SPDK_ERRLOG("Can't attach bdev with separate metadata.\n");
1417 		spdk_bdev_close(ns->desc);
1418 		free(ns);
1419 		return 0;
1420 	}
1421 
1422 	rc = spdk_bdev_module_claim_bdev(ns->bdev, ns->desc, &ns_bdev_module);
1423 	if (rc != 0) {
1424 		spdk_bdev_close(ns->desc);
1425 		free(ns);
1426 		return 0;
1427 	}
1428 
1429 	if (spdk_mem_all_zero(&opts.uuid, sizeof(opts.uuid))) {
1430 		opts.uuid = *spdk_bdev_get_uuid(ns->bdev);
1431 	}
1432 
1433 	ns->opts = opts;
1434 	ns->subsystem = subsystem;
1435 	subsystem->ns[opts.nsid - 1] = ns;
1436 	ns->nsid = opts.nsid;
1437 	TAILQ_INIT(&ns->registrants);
1438 
1439 	if (ptpl_file) {
1440 		rc = nvmf_ns_load_reservation(ptpl_file, &info);
1441 		if (!rc) {
1442 			rc = nvmf_ns_reservation_restore(ns, &info);
1443 			if (rc) {
1444 				SPDK_ERRLOG("Subsystem restore reservation failed\n");
1445 				subsystem->ns[opts.nsid - 1] = NULL;
1446 				spdk_bdev_module_release_bdev(ns->bdev);
1447 				spdk_bdev_close(ns->desc);
1448 				free(ns);
1449 				return 0;
1450 			}
1451 		}
1452 		ns->ptpl_file = strdup(ptpl_file);
1453 	}
1454 
1455 	for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport;
1456 	     transport = spdk_nvmf_transport_get_next(transport)) {
1457 		if (transport->ops->subsystem_add_ns) {
1458 			rc = transport->ops->subsystem_add_ns(transport, subsystem, ns);
1459 			if (rc) {
1460 				SPDK_ERRLOG("Namespace attachment is not allowed by %s transport\n", transport->ops->name);
1461 				free(ns->ptpl_file);
1462 				nvmf_ns_reservation_clear_all_registrants(ns);
1463 				subsystem->ns[opts.nsid - 1] = NULL;
1464 				spdk_bdev_module_release_bdev(ns->bdev);
1465 				spdk_bdev_close(ns->desc);
1466 				free(ns);
1467 				return 0;
1468 			}
1469 		}
1470 	}
1471 
1472 	SPDK_DEBUGLOG(nvmf, "Subsystem %s: bdev %s assigned nsid %" PRIu32 "\n",
1473 		      spdk_nvmf_subsystem_get_nqn(subsystem),
1474 		      bdev_name,
1475 		      opts.nsid);
1476 
1477 	nvmf_subsystem_ns_changed(subsystem, opts.nsid);
1478 
1479 	return opts.nsid;
1480 }
1481 
1482 uint32_t
1483 spdk_nvmf_subsystem_add_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_bdev *bdev,
1484 			   const struct spdk_nvmf_ns_opts *user_opts, size_t opts_size,
1485 			   const char *ptpl_file)
1486 {
1487 	return spdk_nvmf_subsystem_add_ns_ext(subsystem, spdk_bdev_get_name(bdev),
1488 					      user_opts, opts_size, ptpl_file);
1489 }
1490 
1491 static uint32_t
1492 nvmf_subsystem_get_next_allocated_nsid(struct spdk_nvmf_subsystem *subsystem,
1493 				       uint32_t prev_nsid)
1494 {
1495 	uint32_t nsid;
1496 
1497 	if (prev_nsid >= subsystem->max_nsid) {
1498 		return 0;
1499 	}
1500 
1501 	for (nsid = prev_nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
1502 		if (subsystem->ns[nsid - 1]) {
1503 			return nsid;
1504 		}
1505 	}
1506 
1507 	return 0;
1508 }
1509 
1510 struct spdk_nvmf_ns *
1511 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
1512 {
1513 	uint32_t first_nsid;
1514 
1515 	first_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, 0);
1516 	return _nvmf_subsystem_get_ns(subsystem, first_nsid);
1517 }
1518 
1519 struct spdk_nvmf_ns *
1520 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
1521 				struct spdk_nvmf_ns *prev_ns)
1522 {
1523 	uint32_t next_nsid;
1524 
1525 	next_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, prev_ns->opts.nsid);
1526 	return _nvmf_subsystem_get_ns(subsystem, next_nsid);
1527 }
1528 
1529 struct spdk_nvmf_ns *
1530 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
1531 {
1532 	return _nvmf_subsystem_get_ns(subsystem, nsid);
1533 }
1534 
1535 uint32_t
1536 spdk_nvmf_ns_get_id(const struct spdk_nvmf_ns *ns)
1537 {
1538 	return ns->opts.nsid;
1539 }
1540 
1541 struct spdk_bdev *
1542 spdk_nvmf_ns_get_bdev(struct spdk_nvmf_ns *ns)
1543 {
1544 	return ns->bdev;
1545 }
1546 
1547 void
1548 spdk_nvmf_ns_get_opts(const struct spdk_nvmf_ns *ns, struct spdk_nvmf_ns_opts *opts,
1549 		      size_t opts_size)
1550 {
1551 	memset(opts, 0, opts_size);
1552 	memcpy(opts, &ns->opts, spdk_min(sizeof(ns->opts), opts_size));
1553 }
1554 
1555 const char *
1556 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem)
1557 {
1558 	return subsystem->sn;
1559 }
1560 
1561 int
1562 spdk_nvmf_subsystem_set_sn(struct spdk_nvmf_subsystem *subsystem, const char *sn)
1563 {
1564 	size_t len, max_len;
1565 
1566 	max_len = sizeof(subsystem->sn) - 1;
1567 	len = strlen(sn);
1568 	if (len > max_len) {
1569 		SPDK_DEBUGLOG(nvmf, "Invalid sn \"%s\": length %zu > max %zu\n",
1570 			      sn, len, max_len);
1571 		return -1;
1572 	}
1573 
1574 	if (!nvmf_valid_ascii_string(sn, len)) {
1575 		SPDK_DEBUGLOG(nvmf, "Non-ASCII sn\n");
1576 		SPDK_LOGDUMP(nvmf, "sn", sn, len);
1577 		return -1;
1578 	}
1579 
1580 	snprintf(subsystem->sn, sizeof(subsystem->sn), "%s", sn);
1581 
1582 	return 0;
1583 }
1584 
1585 const char *
1586 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem)
1587 {
1588 	return subsystem->mn;
1589 }
1590 
1591 int
1592 spdk_nvmf_subsystem_set_mn(struct spdk_nvmf_subsystem *subsystem, const char *mn)
1593 {
1594 	size_t len, max_len;
1595 
1596 	if (mn == NULL) {
1597 		mn = MODEL_NUMBER_DEFAULT;
1598 	}
1599 	max_len = sizeof(subsystem->mn) - 1;
1600 	len = strlen(mn);
1601 	if (len > max_len) {
1602 		SPDK_DEBUGLOG(nvmf, "Invalid mn \"%s\": length %zu > max %zu\n",
1603 			      mn, len, max_len);
1604 		return -1;
1605 	}
1606 
1607 	if (!nvmf_valid_ascii_string(mn, len)) {
1608 		SPDK_DEBUGLOG(nvmf, "Non-ASCII mn\n");
1609 		SPDK_LOGDUMP(nvmf, "mn", mn, len);
1610 		return -1;
1611 	}
1612 
1613 	snprintf(subsystem->mn, sizeof(subsystem->mn), "%s", mn);
1614 
1615 	return 0;
1616 }
1617 
1618 const char *
1619 spdk_nvmf_subsystem_get_nqn(const struct spdk_nvmf_subsystem *subsystem)
1620 {
1621 	return subsystem->subnqn;
1622 }
1623 
1624 enum spdk_nvmf_subtype spdk_nvmf_subsystem_get_type(struct spdk_nvmf_subsystem *subsystem)
1625 {
1626 	return subsystem->subtype;
1627 }
1628 
1629 uint32_t
1630 spdk_nvmf_subsystem_get_max_nsid(struct spdk_nvmf_subsystem *subsystem)
1631 {
1632 	return subsystem->max_nsid;
1633 }
1634 
1635 static uint16_t
1636 nvmf_subsystem_gen_cntlid(struct spdk_nvmf_subsystem *subsystem)
1637 {
1638 	int count;
1639 
1640 	/*
1641 	 * In the worst case, we might have to try all CNTLID values between 1 and 0xFFF0 - 1
1642 	 * before we find one that is unused (or find that all values are in use).
1643 	 */
1644 	for (count = 0; count < 0xFFF0 - 1; count++) {
1645 		subsystem->next_cntlid++;
1646 		if (subsystem->next_cntlid >= 0xFFF0) {
1647 			/* The spec reserves cntlid values in the range FFF0h to FFFFh. */
1648 			subsystem->next_cntlid = 1;
1649 		}
1650 
1651 		/* Check if a controller with this cntlid currently exists. */
1652 		if (nvmf_subsystem_get_ctrlr(subsystem, subsystem->next_cntlid) == NULL) {
1653 			/* Found unused cntlid */
1654 			return subsystem->next_cntlid;
1655 		}
1656 	}
1657 
1658 	/* All valid cntlid values are in use. */
1659 	return 0xFFFF;
1660 }
1661 
1662 int
1663 nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)
1664 {
1665 	ctrlr->cntlid = nvmf_subsystem_gen_cntlid(subsystem);
1666 	if (ctrlr->cntlid == 0xFFFF) {
1667 		/* Unable to get a cntlid */
1668 		SPDK_ERRLOG("Reached max simultaneous ctrlrs\n");
1669 		return -EBUSY;
1670 	}
1671 
1672 	TAILQ_INSERT_TAIL(&subsystem->ctrlrs, ctrlr, link);
1673 
1674 	return 0;
1675 }
1676 
1677 void
1678 nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem,
1679 			    struct spdk_nvmf_ctrlr *ctrlr)
1680 {
1681 	assert(subsystem == ctrlr->subsys);
1682 	TAILQ_REMOVE(&subsystem->ctrlrs, ctrlr, link);
1683 }
1684 
1685 struct spdk_nvmf_ctrlr *
1686 nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid)
1687 {
1688 	struct spdk_nvmf_ctrlr *ctrlr;
1689 
1690 	TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
1691 		if (ctrlr->cntlid == cntlid) {
1692 			return ctrlr;
1693 		}
1694 	}
1695 
1696 	return NULL;
1697 }
1698 
1699 uint32_t
1700 spdk_nvmf_subsystem_get_max_namespaces(const struct spdk_nvmf_subsystem *subsystem)
1701 {
1702 	return subsystem->max_nsid;
1703 }
1704 
1705 struct _nvmf_ns_registrant {
1706 	uint64_t		rkey;
1707 	char			*host_uuid;
1708 };
1709 
1710 struct _nvmf_ns_registrants {
1711 	size_t				num_regs;
1712 	struct _nvmf_ns_registrant	reg[SPDK_NVMF_MAX_NUM_REGISTRANTS];
1713 };
1714 
1715 struct _nvmf_ns_reservation {
1716 	bool					ptpl_activated;
1717 	enum spdk_nvme_reservation_type		rtype;
1718 	uint64_t				crkey;
1719 	char					*bdev_uuid;
1720 	char					*holder_uuid;
1721 	struct _nvmf_ns_registrants		regs;
1722 };
1723 
1724 static const struct spdk_json_object_decoder nvmf_ns_pr_reg_decoders[] = {
1725 	{"rkey", offsetof(struct _nvmf_ns_registrant, rkey), spdk_json_decode_uint64},
1726 	{"host_uuid", offsetof(struct _nvmf_ns_registrant, host_uuid), spdk_json_decode_string},
1727 };
1728 
1729 static int
1730 nvmf_decode_ns_pr_reg(const struct spdk_json_val *val, void *out)
1731 {
1732 	struct _nvmf_ns_registrant *reg = out;
1733 
1734 	return spdk_json_decode_object(val, nvmf_ns_pr_reg_decoders,
1735 				       SPDK_COUNTOF(nvmf_ns_pr_reg_decoders), reg);
1736 }
1737 
1738 static int
1739 nvmf_decode_ns_pr_regs(const struct spdk_json_val *val, void *out)
1740 {
1741 	struct _nvmf_ns_registrants *regs = out;
1742 
1743 	return spdk_json_decode_array(val, nvmf_decode_ns_pr_reg, regs->reg,
1744 				      SPDK_NVMF_MAX_NUM_REGISTRANTS, &regs->num_regs,
1745 				      sizeof(struct _nvmf_ns_registrant));
1746 }
1747 
1748 static const struct spdk_json_object_decoder nvmf_ns_pr_decoders[] = {
1749 	{"ptpl", offsetof(struct _nvmf_ns_reservation, ptpl_activated), spdk_json_decode_bool, true},
1750 	{"rtype", offsetof(struct _nvmf_ns_reservation, rtype), spdk_json_decode_uint32, true},
1751 	{"crkey", offsetof(struct _nvmf_ns_reservation, crkey), spdk_json_decode_uint64, true},
1752 	{"bdev_uuid", offsetof(struct _nvmf_ns_reservation, bdev_uuid), spdk_json_decode_string},
1753 	{"holder_uuid", offsetof(struct _nvmf_ns_reservation, holder_uuid), spdk_json_decode_string, true},
1754 	{"registrants", offsetof(struct _nvmf_ns_reservation, regs), nvmf_decode_ns_pr_regs},
1755 };
1756 
1757 static int
1758 nvmf_ns_load_reservation(const char *file, struct spdk_nvmf_reservation_info *info)
1759 {
1760 	FILE *fd;
1761 	size_t json_size;
1762 	ssize_t values_cnt, rc;
1763 	void *json = NULL, *end;
1764 	struct spdk_json_val *values = NULL;
1765 	struct _nvmf_ns_reservation res = {};
1766 	uint32_t i;
1767 
1768 	fd = fopen(file, "r");
1769 	/* It's not an error if the file does not exist */
1770 	if (!fd) {
1771 		SPDK_NOTICELOG("File %s does not exist\n", file);
1772 		return -ENOENT;
1773 	}
1774 
1775 	/* Load all persist file contents into a local buffer */
1776 	json = spdk_posix_file_load(fd, &json_size);
1777 	fclose(fd);
1778 	if (!json) {
1779 		SPDK_ERRLOG("Load persit file %s failed\n", file);
1780 		return -ENOMEM;
1781 	}
1782 
1783 	rc = spdk_json_parse(json, json_size, NULL, 0, &end, 0);
1784 	if (rc < 0) {
1785 		SPDK_NOTICELOG("Parsing JSON configuration failed (%zd)\n", rc);
1786 		goto exit;
1787 	}
1788 
1789 	values_cnt = rc;
1790 	values = calloc(values_cnt, sizeof(struct spdk_json_val));
1791 	if (values == NULL) {
1792 		goto exit;
1793 	}
1794 
1795 	rc = spdk_json_parse(json, json_size, values, values_cnt, &end, 0);
1796 	if (rc != values_cnt) {
1797 		SPDK_ERRLOG("Parsing JSON configuration failed (%zd)\n", rc);
1798 		goto exit;
1799 	}
1800 
1801 	/* Decode json */
1802 	if (spdk_json_decode_object(values, nvmf_ns_pr_decoders,
1803 				    SPDK_COUNTOF(nvmf_ns_pr_decoders),
1804 				    &res)) {
1805 		SPDK_ERRLOG("Invalid objects in the persist file %s\n", file);
1806 		rc = -EINVAL;
1807 		goto exit;
1808 	}
1809 
1810 	if (res.regs.num_regs > SPDK_NVMF_MAX_NUM_REGISTRANTS) {
1811 		SPDK_ERRLOG("Can only support up to %u registrants\n", SPDK_NVMF_MAX_NUM_REGISTRANTS);
1812 		rc = -ERANGE;
1813 		goto exit;
1814 	}
1815 
1816 	rc = 0;
1817 	info->ptpl_activated = res.ptpl_activated;
1818 	info->rtype = res.rtype;
1819 	info->crkey = res.crkey;
1820 	snprintf(info->bdev_uuid, sizeof(info->bdev_uuid), "%s", res.bdev_uuid);
1821 	snprintf(info->holder_uuid, sizeof(info->holder_uuid), "%s", res.holder_uuid);
1822 	info->num_regs = res.regs.num_regs;
1823 	for (i = 0; i < res.regs.num_regs; i++) {
1824 		info->registrants[i].rkey = res.regs.reg[i].rkey;
1825 		snprintf(info->registrants[i].host_uuid, sizeof(info->registrants[i].host_uuid), "%s",
1826 			 res.regs.reg[i].host_uuid);
1827 	}
1828 
1829 exit:
1830 	free(json);
1831 	free(values);
1832 	free(res.bdev_uuid);
1833 	free(res.holder_uuid);
1834 	for (i = 0; i < res.regs.num_regs; i++) {
1835 		free(res.regs.reg[i].host_uuid);
1836 	}
1837 
1838 	return rc;
1839 }
1840 
1841 static bool
1842 nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns);
1843 
1844 static int
1845 nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info)
1846 {
1847 	uint32_t i;
1848 	struct spdk_nvmf_registrant *reg, *holder = NULL;
1849 	struct spdk_uuid bdev_uuid, holder_uuid;
1850 
1851 	SPDK_DEBUGLOG(nvmf, "NSID %u, PTPL %u, Number of registrants %u\n",
1852 		      ns->nsid, info->ptpl_activated, info->num_regs);
1853 
1854 	/* it's not an error */
1855 	if (!info->ptpl_activated || !info->num_regs) {
1856 		return 0;
1857 	}
1858 
1859 	spdk_uuid_parse(&bdev_uuid, info->bdev_uuid);
1860 	if (spdk_uuid_compare(&bdev_uuid, spdk_bdev_get_uuid(ns->bdev))) {
1861 		SPDK_ERRLOG("Existing bdev UUID is not same with configuration file\n");
1862 		return -EINVAL;
1863 	}
1864 
1865 	ns->crkey = info->crkey;
1866 	ns->rtype = info->rtype;
1867 	ns->ptpl_activated = info->ptpl_activated;
1868 	spdk_uuid_parse(&holder_uuid, info->holder_uuid);
1869 
1870 	SPDK_DEBUGLOG(nvmf, "Bdev UUID %s\n", info->bdev_uuid);
1871 	if (info->rtype) {
1872 		SPDK_DEBUGLOG(nvmf, "Holder UUID %s, RTYPE %u, RKEY 0x%"PRIx64"\n",
1873 			      info->holder_uuid, info->rtype, info->crkey);
1874 	}
1875 
1876 	for (i = 0; i < info->num_regs; i++) {
1877 		reg = calloc(1, sizeof(*reg));
1878 		if (!reg) {
1879 			return -ENOMEM;
1880 		}
1881 		spdk_uuid_parse(&reg->hostid, info->registrants[i].host_uuid);
1882 		reg->rkey = info->registrants[i].rkey;
1883 		TAILQ_INSERT_TAIL(&ns->registrants, reg, link);
1884 		if (!spdk_uuid_compare(&holder_uuid, &reg->hostid)) {
1885 			holder = reg;
1886 		}
1887 		SPDK_DEBUGLOG(nvmf, "Registrant RKEY 0x%"PRIx64", Host UUID %s\n",
1888 			      info->registrants[i].rkey, info->registrants[i].host_uuid);
1889 	}
1890 
1891 	if (nvmf_ns_reservation_all_registrants_type(ns)) {
1892 		ns->holder = TAILQ_FIRST(&ns->registrants);
1893 	} else {
1894 		ns->holder = holder;
1895 	}
1896 
1897 	return 0;
1898 }
1899 
1900 static int
1901 nvmf_ns_json_write_cb(void *cb_ctx, const void *data, size_t size)
1902 {
1903 	char *file = cb_ctx;
1904 	size_t rc;
1905 	FILE *fd;
1906 
1907 	fd = fopen(file, "w");
1908 	if (!fd) {
1909 		SPDK_ERRLOG("Can't open file %s for write\n", file);
1910 		return -ENOENT;
1911 	}
1912 	rc = fwrite(data, 1, size, fd);
1913 	fclose(fd);
1914 
1915 	return rc == size ? 0 : -1;
1916 }
1917 
1918 static int
1919 nvmf_ns_reservation_update(const char *file, struct spdk_nvmf_reservation_info *info)
1920 {
1921 	struct spdk_json_write_ctx *w;
1922 	uint32_t i;
1923 	int rc = 0;
1924 
1925 	w = spdk_json_write_begin(nvmf_ns_json_write_cb, (void *)file, 0);
1926 	if (w == NULL) {
1927 		return -ENOMEM;
1928 	}
1929 	/* clear the configuration file */
1930 	if (!info->ptpl_activated) {
1931 		goto exit;
1932 	}
1933 
1934 	spdk_json_write_object_begin(w);
1935 	spdk_json_write_named_bool(w, "ptpl", info->ptpl_activated);
1936 	spdk_json_write_named_uint32(w, "rtype", info->rtype);
1937 	spdk_json_write_named_uint64(w, "crkey", info->crkey);
1938 	spdk_json_write_named_string(w, "bdev_uuid", info->bdev_uuid);
1939 	spdk_json_write_named_string(w, "holder_uuid", info->holder_uuid);
1940 
1941 	spdk_json_write_named_array_begin(w, "registrants");
1942 	for (i = 0; i < info->num_regs; i++) {
1943 		spdk_json_write_object_begin(w);
1944 		spdk_json_write_named_uint64(w, "rkey", info->registrants[i].rkey);
1945 		spdk_json_write_named_string(w, "host_uuid", info->registrants[i].host_uuid);
1946 		spdk_json_write_object_end(w);
1947 	}
1948 	spdk_json_write_array_end(w);
1949 	spdk_json_write_object_end(w);
1950 
1951 exit:
1952 	rc = spdk_json_write_end(w);
1953 	return rc;
1954 }
1955 
1956 static int
1957 nvmf_ns_update_reservation_info(struct spdk_nvmf_ns *ns)
1958 {
1959 	struct spdk_nvmf_reservation_info info;
1960 	struct spdk_nvmf_registrant *reg, *tmp;
1961 	uint32_t i = 0;
1962 
1963 	assert(ns != NULL);
1964 
1965 	if (!ns->bdev || !ns->ptpl_file) {
1966 		return 0;
1967 	}
1968 
1969 	memset(&info, 0, sizeof(info));
1970 	spdk_uuid_fmt_lower(info.bdev_uuid, sizeof(info.bdev_uuid), spdk_bdev_get_uuid(ns->bdev));
1971 
1972 	if (ns->rtype) {
1973 		info.rtype = ns->rtype;
1974 		info.crkey = ns->crkey;
1975 		if (!nvmf_ns_reservation_all_registrants_type(ns)) {
1976 			assert(ns->holder != NULL);
1977 			spdk_uuid_fmt_lower(info.holder_uuid, sizeof(info.holder_uuid), &ns->holder->hostid);
1978 		}
1979 	}
1980 
1981 	TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
1982 		spdk_uuid_fmt_lower(info.registrants[i].host_uuid, sizeof(info.registrants[i].host_uuid),
1983 				    &reg->hostid);
1984 		info.registrants[i++].rkey = reg->rkey;
1985 	}
1986 
1987 	info.num_regs = i;
1988 	info.ptpl_activated = ns->ptpl_activated;
1989 
1990 	return nvmf_ns_reservation_update(ns->ptpl_file, &info);
1991 }
1992 
1993 static struct spdk_nvmf_registrant *
1994 nvmf_ns_reservation_get_registrant(struct spdk_nvmf_ns *ns,
1995 				   struct spdk_uuid *uuid)
1996 {
1997 	struct spdk_nvmf_registrant *reg, *tmp;
1998 
1999 	TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
2000 		if (!spdk_uuid_compare(&reg->hostid, uuid)) {
2001 			return reg;
2002 		}
2003 	}
2004 
2005 	return NULL;
2006 }
2007 
2008 /* Generate reservation notice log to registered HostID controllers */
2009 static void
2010 nvmf_subsystem_gen_ctrlr_notification(struct spdk_nvmf_subsystem *subsystem,
2011 				      struct spdk_nvmf_ns *ns,
2012 				      struct spdk_uuid *hostid_list,
2013 				      uint32_t num_hostid,
2014 				      enum spdk_nvme_reservation_notification_log_page_type type)
2015 {
2016 	struct spdk_nvmf_ctrlr *ctrlr;
2017 	uint32_t i;
2018 
2019 	for (i = 0; i < num_hostid; i++) {
2020 		TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
2021 			if (!spdk_uuid_compare(&ctrlr->hostid, &hostid_list[i])) {
2022 				nvmf_ctrlr_reservation_notice_log(ctrlr, ns, type);
2023 			}
2024 		}
2025 	}
2026 }
2027 
2028 /* Get all registrants' hostid other than the controller who issued the command */
2029 static uint32_t
2030 nvmf_ns_reservation_get_all_other_hostid(struct spdk_nvmf_ns *ns,
2031 		struct spdk_uuid *hostid_list,
2032 		uint32_t max_num_hostid,
2033 		struct spdk_uuid *current_hostid)
2034 {
2035 	struct spdk_nvmf_registrant *reg, *tmp;
2036 	uint32_t num_hostid = 0;
2037 
2038 	TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
2039 		if (spdk_uuid_compare(&reg->hostid, current_hostid)) {
2040 			if (num_hostid == max_num_hostid) {
2041 				assert(false);
2042 				return max_num_hostid;
2043 			}
2044 			hostid_list[num_hostid++] = reg->hostid;
2045 		}
2046 	}
2047 
2048 	return num_hostid;
2049 }
2050 
2051 /* Calculate the unregistered HostID list according to list
2052  * prior to execute preempt command and list after executing
2053  * preempt command.
2054  */
2055 static uint32_t
2056 nvmf_ns_reservation_get_unregistered_hostid(struct spdk_uuid *old_hostid_list,
2057 		uint32_t old_num_hostid,
2058 		struct spdk_uuid *remaining_hostid_list,
2059 		uint32_t remaining_num_hostid)
2060 {
2061 	struct spdk_uuid temp_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS];
2062 	uint32_t i, j, num_hostid = 0;
2063 	bool found;
2064 
2065 	if (!remaining_num_hostid) {
2066 		return old_num_hostid;
2067 	}
2068 
2069 	for (i = 0; i < old_num_hostid; i++) {
2070 		found = false;
2071 		for (j = 0; j < remaining_num_hostid; j++) {
2072 			if (!spdk_uuid_compare(&old_hostid_list[i], &remaining_hostid_list[j])) {
2073 				found = true;
2074 				break;
2075 			}
2076 		}
2077 		if (!found) {
2078 			spdk_uuid_copy(&temp_hostid_list[num_hostid++], &old_hostid_list[i]);
2079 		}
2080 	}
2081 
2082 	if (num_hostid) {
2083 		memcpy(old_hostid_list, temp_hostid_list, sizeof(struct spdk_uuid) * num_hostid);
2084 	}
2085 
2086 	return num_hostid;
2087 }
2088 
2089 /* current reservation type is all registrants or not */
2090 static bool
2091 nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns)
2092 {
2093 	return (ns->rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS ||
2094 		ns->rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
2095 }
2096 
2097 /* current registrant is reservation holder or not */
2098 static bool
2099 nvmf_ns_reservation_registrant_is_holder(struct spdk_nvmf_ns *ns,
2100 		struct spdk_nvmf_registrant *reg)
2101 {
2102 	if (!reg) {
2103 		return false;
2104 	}
2105 
2106 	if (nvmf_ns_reservation_all_registrants_type(ns)) {
2107 		return true;
2108 	}
2109 
2110 	return (ns->holder == reg);
2111 }
2112 
2113 static int
2114 nvmf_ns_reservation_add_registrant(struct spdk_nvmf_ns *ns,
2115 				   struct spdk_nvmf_ctrlr *ctrlr,
2116 				   uint64_t nrkey)
2117 {
2118 	struct spdk_nvmf_registrant *reg;
2119 
2120 	reg = calloc(1, sizeof(*reg));
2121 	if (!reg) {
2122 		return -ENOMEM;
2123 	}
2124 
2125 	reg->rkey = nrkey;
2126 	/* set hostid for the registrant */
2127 	spdk_uuid_copy(&reg->hostid, &ctrlr->hostid);
2128 	TAILQ_INSERT_TAIL(&ns->registrants, reg, link);
2129 	ns->gen++;
2130 
2131 	return 0;
2132 }
2133 
2134 static void
2135 nvmf_ns_reservation_release_reservation(struct spdk_nvmf_ns *ns)
2136 {
2137 	ns->rtype = 0;
2138 	ns->crkey = 0;
2139 	ns->holder = NULL;
2140 }
2141 
2142 /* release the reservation if the last registrant was removed */
2143 static void
2144 nvmf_ns_reservation_check_release_on_remove_registrant(struct spdk_nvmf_ns *ns,
2145 		struct spdk_nvmf_registrant *reg)
2146 {
2147 	struct spdk_nvmf_registrant *next_reg;
2148 
2149 	/* no reservation holder */
2150 	if (!ns->holder) {
2151 		assert(ns->rtype == 0);
2152 		return;
2153 	}
2154 
2155 	next_reg = TAILQ_FIRST(&ns->registrants);
2156 	if (next_reg && nvmf_ns_reservation_all_registrants_type(ns)) {
2157 		/* the next valid registrant is the new holder now */
2158 		ns->holder = next_reg;
2159 	} else if (nvmf_ns_reservation_registrant_is_holder(ns, reg)) {
2160 		/* release the reservation */
2161 		nvmf_ns_reservation_release_reservation(ns);
2162 	}
2163 }
2164 
2165 static void
2166 nvmf_ns_reservation_remove_registrant(struct spdk_nvmf_ns *ns,
2167 				      struct spdk_nvmf_registrant *reg)
2168 {
2169 	TAILQ_REMOVE(&ns->registrants, reg, link);
2170 	nvmf_ns_reservation_check_release_on_remove_registrant(ns, reg);
2171 	free(reg);
2172 	ns->gen++;
2173 	return;
2174 }
2175 
2176 static uint32_t
2177 nvmf_ns_reservation_remove_registrants_by_key(struct spdk_nvmf_ns *ns,
2178 		uint64_t rkey)
2179 {
2180 	struct spdk_nvmf_registrant *reg, *tmp;
2181 	uint32_t count = 0;
2182 
2183 	TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
2184 		if (reg->rkey == rkey) {
2185 			nvmf_ns_reservation_remove_registrant(ns, reg);
2186 			count++;
2187 		}
2188 	}
2189 	return count;
2190 }
2191 
2192 static uint32_t
2193 nvmf_ns_reservation_remove_all_other_registrants(struct spdk_nvmf_ns *ns,
2194 		struct spdk_nvmf_registrant *reg)
2195 {
2196 	struct spdk_nvmf_registrant *reg_tmp, *reg_tmp2;
2197 	uint32_t count = 0;
2198 
2199 	TAILQ_FOREACH_SAFE(reg_tmp, &ns->registrants, link, reg_tmp2) {
2200 		if (reg_tmp != reg) {
2201 			nvmf_ns_reservation_remove_registrant(ns, reg_tmp);
2202 			count++;
2203 		}
2204 	}
2205 	return count;
2206 }
2207 
2208 static uint32_t
2209 nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns)
2210 {
2211 	struct spdk_nvmf_registrant *reg, *reg_tmp;
2212 	uint32_t count = 0;
2213 
2214 	TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, reg_tmp) {
2215 		nvmf_ns_reservation_remove_registrant(ns, reg);
2216 		count++;
2217 	}
2218 	return count;
2219 }
2220 
2221 static void
2222 nvmf_ns_reservation_acquire_reservation(struct spdk_nvmf_ns *ns, uint64_t rkey,
2223 					enum spdk_nvme_reservation_type rtype,
2224 					struct spdk_nvmf_registrant *holder)
2225 {
2226 	ns->rtype = rtype;
2227 	ns->crkey = rkey;
2228 	assert(ns->holder == NULL);
2229 	ns->holder = holder;
2230 }
2231 
2232 static bool
2233 nvmf_ns_reservation_register(struct spdk_nvmf_ns *ns,
2234 			     struct spdk_nvmf_ctrlr *ctrlr,
2235 			     struct spdk_nvmf_request *req)
2236 {
2237 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
2238 	uint8_t rrega, iekey, cptpl, rtype;
2239 	struct spdk_nvme_reservation_register_data key;
2240 	struct spdk_nvmf_registrant *reg;
2241 	uint8_t status = SPDK_NVME_SC_SUCCESS;
2242 	bool update_sgroup = false;
2243 	struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS];
2244 	uint32_t num_hostid = 0;
2245 	int rc;
2246 
2247 	rrega = cmd->cdw10_bits.resv_register.rrega;
2248 	iekey = cmd->cdw10_bits.resv_register.iekey;
2249 	cptpl = cmd->cdw10_bits.resv_register.cptpl;
2250 
2251 	if (req->data && req->length >= sizeof(key)) {
2252 		memcpy(&key, req->data, sizeof(key));
2253 	} else {
2254 		SPDK_ERRLOG("No key provided. Failing request.\n");
2255 		status = SPDK_NVME_SC_INVALID_FIELD;
2256 		goto exit;
2257 	}
2258 
2259 	SPDK_DEBUGLOG(nvmf, "REGISTER: RREGA %u, IEKEY %u, CPTPL %u, "
2260 		      "NRKEY 0x%"PRIx64", NRKEY 0x%"PRIx64"\n",
2261 		      rrega, iekey, cptpl, key.crkey, key.nrkey);
2262 
2263 	if (cptpl == SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON) {
2264 		/* Ture to OFF state, and need to be updated in the configuration file */
2265 		if (ns->ptpl_activated) {
2266 			ns->ptpl_activated = 0;
2267 			update_sgroup = true;
2268 		}
2269 	} else if (cptpl == SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS) {
2270 		if (ns->ptpl_file == NULL) {
2271 			status = SPDK_NVME_SC_INVALID_FIELD;
2272 			goto exit;
2273 		} else if (ns->ptpl_activated == 0) {
2274 			ns->ptpl_activated = 1;
2275 			update_sgroup = true;
2276 		}
2277 	}
2278 
2279 	/* current Host Identifier has registrant or not */
2280 	reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid);
2281 
2282 	switch (rrega) {
2283 	case SPDK_NVME_RESERVE_REGISTER_KEY:
2284 		if (!reg) {
2285 			/* register new controller */
2286 			if (key.nrkey == 0) {
2287 				SPDK_ERRLOG("Can't register zeroed new key\n");
2288 				status = SPDK_NVME_SC_INVALID_FIELD;
2289 				goto exit;
2290 			}
2291 			rc = nvmf_ns_reservation_add_registrant(ns, ctrlr, key.nrkey);
2292 			if (rc < 0) {
2293 				status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2294 				goto exit;
2295 			}
2296 			update_sgroup = true;
2297 		} else {
2298 			/* register with same key is not an error */
2299 			if (reg->rkey != key.nrkey) {
2300 				SPDK_ERRLOG("The same host already register a "
2301 					    "key with 0x%"PRIx64"\n",
2302 					    reg->rkey);
2303 				status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2304 				goto exit;
2305 			}
2306 		}
2307 		break;
2308 	case SPDK_NVME_RESERVE_UNREGISTER_KEY:
2309 		if (!reg || (!iekey && reg->rkey != key.crkey)) {
2310 			SPDK_ERRLOG("No registrant or current key doesn't match "
2311 				    "with existing registrant key\n");
2312 			status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2313 			goto exit;
2314 		}
2315 
2316 		rtype = ns->rtype;
2317 		num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list,
2318 				SPDK_NVMF_MAX_NUM_REGISTRANTS,
2319 				&ctrlr->hostid);
2320 
2321 		nvmf_ns_reservation_remove_registrant(ns, reg);
2322 
2323 		if (!ns->rtype && num_hostid && (rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY ||
2324 						 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY)) {
2325 			nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns,
2326 							      hostid_list,
2327 							      num_hostid,
2328 							      SPDK_NVME_RESERVATION_RELEASED);
2329 		}
2330 		update_sgroup = true;
2331 		break;
2332 	case SPDK_NVME_RESERVE_REPLACE_KEY:
2333 		if (!reg || (!iekey && reg->rkey != key.crkey)) {
2334 			SPDK_ERRLOG("No registrant or current key doesn't match "
2335 				    "with existing registrant key\n");
2336 			status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2337 			goto exit;
2338 		}
2339 		if (key.nrkey == 0) {
2340 			SPDK_ERRLOG("Can't register zeroed new key\n");
2341 			status = SPDK_NVME_SC_INVALID_FIELD;
2342 			goto exit;
2343 		}
2344 		reg->rkey = key.nrkey;
2345 		update_sgroup = true;
2346 		break;
2347 	default:
2348 		status = SPDK_NVME_SC_INVALID_FIELD;
2349 		goto exit;
2350 	}
2351 
2352 exit:
2353 	if (update_sgroup) {
2354 		rc = nvmf_ns_update_reservation_info(ns);
2355 		if (rc != 0) {
2356 			status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2357 		}
2358 	}
2359 	req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2360 	req->rsp->nvme_cpl.status.sc = status;
2361 	return update_sgroup;
2362 }
2363 
2364 static bool
2365 nvmf_ns_reservation_acquire(struct spdk_nvmf_ns *ns,
2366 			    struct spdk_nvmf_ctrlr *ctrlr,
2367 			    struct spdk_nvmf_request *req)
2368 {
2369 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
2370 	uint8_t racqa, iekey, rtype;
2371 	struct spdk_nvme_reservation_acquire_data key;
2372 	struct spdk_nvmf_registrant *reg;
2373 	bool all_regs = false;
2374 	uint32_t count = 0;
2375 	bool update_sgroup = true;
2376 	struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS];
2377 	uint32_t num_hostid = 0;
2378 	struct spdk_uuid new_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS];
2379 	uint32_t new_num_hostid = 0;
2380 	bool reservation_released = false;
2381 	uint8_t status = SPDK_NVME_SC_SUCCESS;
2382 
2383 	racqa = cmd->cdw10_bits.resv_acquire.racqa;
2384 	iekey = cmd->cdw10_bits.resv_acquire.iekey;
2385 	rtype = cmd->cdw10_bits.resv_acquire.rtype;
2386 
2387 	if (req->data && req->length >= sizeof(key)) {
2388 		memcpy(&key, req->data, sizeof(key));
2389 	} else {
2390 		SPDK_ERRLOG("No key provided. Failing request.\n");
2391 		status = SPDK_NVME_SC_INVALID_FIELD;
2392 		goto exit;
2393 	}
2394 
2395 	SPDK_DEBUGLOG(nvmf, "ACQUIRE: RACQA %u, IEKEY %u, RTYPE %u, "
2396 		      "NRKEY 0x%"PRIx64", PRKEY 0x%"PRIx64"\n",
2397 		      racqa, iekey, rtype, key.crkey, key.prkey);
2398 
2399 	if (iekey || rtype > SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) {
2400 		SPDK_ERRLOG("Ignore existing key field set to 1\n");
2401 		status = SPDK_NVME_SC_INVALID_FIELD;
2402 		update_sgroup = false;
2403 		goto exit;
2404 	}
2405 
2406 	reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid);
2407 	/* must be registrant and CRKEY must match */
2408 	if (!reg || reg->rkey != key.crkey) {
2409 		SPDK_ERRLOG("No registrant or current key doesn't match "
2410 			    "with existing registrant key\n");
2411 		status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2412 		update_sgroup = false;
2413 		goto exit;
2414 	}
2415 
2416 	all_regs = nvmf_ns_reservation_all_registrants_type(ns);
2417 
2418 	switch (racqa) {
2419 	case SPDK_NVME_RESERVE_ACQUIRE:
2420 		/* it's not an error for the holder to acquire same reservation type again */
2421 		if (nvmf_ns_reservation_registrant_is_holder(ns, reg) && ns->rtype == rtype) {
2422 			/* do nothing */
2423 			update_sgroup = false;
2424 		} else if (ns->holder == NULL) {
2425 			/* fisrt time to acquire the reservation */
2426 			nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg);
2427 		} else {
2428 			SPDK_ERRLOG("Invalid rtype or current registrant is not holder\n");
2429 			status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2430 			update_sgroup = false;
2431 			goto exit;
2432 		}
2433 		break;
2434 	case SPDK_NVME_RESERVE_PREEMPT:
2435 		/* no reservation holder */
2436 		if (!ns->holder) {
2437 			/* unregister with PRKEY */
2438 			nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey);
2439 			break;
2440 		}
2441 		num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list,
2442 				SPDK_NVMF_MAX_NUM_REGISTRANTS,
2443 				&ctrlr->hostid);
2444 
2445 		/* only 1 reservation holder and reservation key is valid */
2446 		if (!all_regs) {
2447 			/* preempt itself */
2448 			if (nvmf_ns_reservation_registrant_is_holder(ns, reg) &&
2449 			    ns->crkey == key.prkey) {
2450 				ns->rtype = rtype;
2451 				reservation_released = true;
2452 				break;
2453 			}
2454 
2455 			if (ns->crkey == key.prkey) {
2456 				nvmf_ns_reservation_remove_registrant(ns, ns->holder);
2457 				nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg);
2458 				reservation_released = true;
2459 			} else if (key.prkey != 0) {
2460 				nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey);
2461 			} else {
2462 				/* PRKEY is zero */
2463 				SPDK_ERRLOG("Current PRKEY is zero\n");
2464 				status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2465 				update_sgroup = false;
2466 				goto exit;
2467 			}
2468 		} else {
2469 			/* release all other registrants except for the current one */
2470 			if (key.prkey == 0) {
2471 				nvmf_ns_reservation_remove_all_other_registrants(ns, reg);
2472 				assert(ns->holder == reg);
2473 			} else {
2474 				count = nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey);
2475 				if (count == 0) {
2476 					SPDK_ERRLOG("PRKEY doesn't match any registrant\n");
2477 					status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2478 					update_sgroup = false;
2479 					goto exit;
2480 				}
2481 			}
2482 		}
2483 		break;
2484 	default:
2485 		status = SPDK_NVME_SC_INVALID_FIELD;
2486 		update_sgroup = false;
2487 		break;
2488 	}
2489 
2490 exit:
2491 	if (update_sgroup && racqa == SPDK_NVME_RESERVE_PREEMPT) {
2492 		new_num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, new_hostid_list,
2493 				 SPDK_NVMF_MAX_NUM_REGISTRANTS,
2494 				 &ctrlr->hostid);
2495 		/* Preempt notification occurs on the unregistered controllers
2496 		 * other than the controller who issued the command.
2497 		 */
2498 		num_hostid = nvmf_ns_reservation_get_unregistered_hostid(hostid_list,
2499 				num_hostid,
2500 				new_hostid_list,
2501 				new_num_hostid);
2502 		if (num_hostid) {
2503 			nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns,
2504 							      hostid_list,
2505 							      num_hostid,
2506 							      SPDK_NVME_REGISTRATION_PREEMPTED);
2507 
2508 		}
2509 		/* Reservation released notification occurs on the
2510 		 * controllers which are the remaining registrants other than
2511 		 * the controller who issued the command.
2512 		 */
2513 		if (reservation_released && new_num_hostid) {
2514 			nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns,
2515 							      new_hostid_list,
2516 							      new_num_hostid,
2517 							      SPDK_NVME_RESERVATION_RELEASED);
2518 
2519 		}
2520 	}
2521 	if (update_sgroup && ns->ptpl_activated) {
2522 		if (nvmf_ns_update_reservation_info(ns)) {
2523 			status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2524 		}
2525 	}
2526 	req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2527 	req->rsp->nvme_cpl.status.sc = status;
2528 	return update_sgroup;
2529 }
2530 
2531 static bool
2532 nvmf_ns_reservation_release(struct spdk_nvmf_ns *ns,
2533 			    struct spdk_nvmf_ctrlr *ctrlr,
2534 			    struct spdk_nvmf_request *req)
2535 {
2536 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
2537 	uint8_t rrela, iekey, rtype;
2538 	struct spdk_nvmf_registrant *reg;
2539 	uint64_t crkey;
2540 	uint8_t status = SPDK_NVME_SC_SUCCESS;
2541 	bool update_sgroup = true;
2542 	struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS];
2543 	uint32_t num_hostid = 0;
2544 
2545 	rrela = cmd->cdw10_bits.resv_release.rrela;
2546 	iekey = cmd->cdw10_bits.resv_release.iekey;
2547 	rtype = cmd->cdw10_bits.resv_release.rtype;
2548 
2549 	if (req->data && req->length >= sizeof(crkey)) {
2550 		memcpy(&crkey, req->data, sizeof(crkey));
2551 	} else {
2552 		SPDK_ERRLOG("No key provided. Failing request.\n");
2553 		status = SPDK_NVME_SC_INVALID_FIELD;
2554 		goto exit;
2555 	}
2556 
2557 	SPDK_DEBUGLOG(nvmf, "RELEASE: RRELA %u, IEKEY %u, RTYPE %u, "
2558 		      "CRKEY 0x%"PRIx64"\n",  rrela, iekey, rtype, crkey);
2559 
2560 	if (iekey) {
2561 		SPDK_ERRLOG("Ignore existing key field set to 1\n");
2562 		status = SPDK_NVME_SC_INVALID_FIELD;
2563 		update_sgroup = false;
2564 		goto exit;
2565 	}
2566 
2567 	reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid);
2568 	if (!reg || reg->rkey != crkey) {
2569 		SPDK_ERRLOG("No registrant or current key doesn't match "
2570 			    "with existing registrant key\n");
2571 		status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2572 		update_sgroup = false;
2573 		goto exit;
2574 	}
2575 
2576 	num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list,
2577 			SPDK_NVMF_MAX_NUM_REGISTRANTS,
2578 			&ctrlr->hostid);
2579 
2580 	switch (rrela) {
2581 	case SPDK_NVME_RESERVE_RELEASE:
2582 		if (!ns->holder) {
2583 			SPDK_DEBUGLOG(nvmf, "RELEASE: no holder\n");
2584 			update_sgroup = false;
2585 			goto exit;
2586 		}
2587 		if (ns->rtype != rtype) {
2588 			SPDK_ERRLOG("Type doesn't match\n");
2589 			status = SPDK_NVME_SC_INVALID_FIELD;
2590 			update_sgroup = false;
2591 			goto exit;
2592 		}
2593 		if (!nvmf_ns_reservation_registrant_is_holder(ns, reg)) {
2594 			/* not the reservation holder, this isn't an error */
2595 			update_sgroup = false;
2596 			goto exit;
2597 		}
2598 
2599 		rtype = ns->rtype;
2600 		nvmf_ns_reservation_release_reservation(ns);
2601 
2602 		if (num_hostid && rtype != SPDK_NVME_RESERVE_WRITE_EXCLUSIVE &&
2603 		    rtype != SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) {
2604 			nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns,
2605 							      hostid_list,
2606 							      num_hostid,
2607 							      SPDK_NVME_RESERVATION_RELEASED);
2608 		}
2609 		break;
2610 	case SPDK_NVME_RESERVE_CLEAR:
2611 		nvmf_ns_reservation_clear_all_registrants(ns);
2612 		if (num_hostid) {
2613 			nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns,
2614 							      hostid_list,
2615 							      num_hostid,
2616 							      SPDK_NVME_RESERVATION_PREEMPTED);
2617 		}
2618 		break;
2619 	default:
2620 		status = SPDK_NVME_SC_INVALID_FIELD;
2621 		update_sgroup = false;
2622 		goto exit;
2623 	}
2624 
2625 exit:
2626 	if (update_sgroup && ns->ptpl_activated) {
2627 		if (nvmf_ns_update_reservation_info(ns)) {
2628 			status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2629 		}
2630 	}
2631 	req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2632 	req->rsp->nvme_cpl.status.sc = status;
2633 	return update_sgroup;
2634 }
2635 
2636 static void
2637 nvmf_ns_reservation_report(struct spdk_nvmf_ns *ns,
2638 			   struct spdk_nvmf_ctrlr *ctrlr,
2639 			   struct spdk_nvmf_request *req)
2640 {
2641 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
2642 	struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
2643 	struct spdk_nvmf_ctrlr *ctrlr_tmp;
2644 	struct spdk_nvmf_registrant *reg, *tmp;
2645 	struct spdk_nvme_reservation_status_extended_data *status_data;
2646 	struct spdk_nvme_registered_ctrlr_extended_data *ctrlr_data;
2647 	uint8_t *payload;
2648 	uint32_t len, count = 0;
2649 	uint32_t regctl = 0;
2650 	uint8_t status = SPDK_NVME_SC_SUCCESS;
2651 
2652 	if (req->data == NULL) {
2653 		SPDK_ERRLOG("No data transfer specified for request. "
2654 			    " Unable to transfer back response.\n");
2655 		status = SPDK_NVME_SC_INVALID_FIELD;
2656 		goto exit;
2657 	}
2658 
2659 	if (!cmd->cdw11_bits.resv_report.eds) {
2660 		SPDK_ERRLOG("NVMeoF uses extended controller data structure, "
2661 			    "please set EDS bit in cdw11 and try again\n");
2662 		status = SPDK_NVME_SC_HOSTID_INCONSISTENT_FORMAT;
2663 		goto exit;
2664 	}
2665 
2666 	/* Get number of registerd controllers, one Host may have more than
2667 	 * one controller based on different ports.
2668 	 */
2669 	TAILQ_FOREACH(ctrlr_tmp, &subsystem->ctrlrs, link) {
2670 		reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr_tmp->hostid);
2671 		if (reg) {
2672 			regctl++;
2673 		}
2674 	}
2675 
2676 	len = sizeof(*status_data) + sizeof(*ctrlr_data) * regctl;
2677 	payload = calloc(1, len);
2678 	if (!payload) {
2679 		status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2680 		goto exit;
2681 	}
2682 
2683 	status_data = (struct spdk_nvme_reservation_status_extended_data *)payload;
2684 	status_data->data.gen = ns->gen;
2685 	status_data->data.rtype = ns->rtype;
2686 	status_data->data.regctl = regctl;
2687 	status_data->data.ptpls = ns->ptpl_activated;
2688 
2689 	TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
2690 		assert(count <= regctl);
2691 		ctrlr_data = (struct spdk_nvme_registered_ctrlr_extended_data *)
2692 			     (payload + sizeof(*status_data) + sizeof(*ctrlr_data) * count);
2693 		/* Set to 0xffffh for dynamic controller */
2694 		ctrlr_data->cntlid = 0xffff;
2695 		ctrlr_data->rcsts.status = (ns->holder == reg) ? true : false;
2696 		ctrlr_data->rkey = reg->rkey;
2697 		spdk_uuid_copy((struct spdk_uuid *)ctrlr_data->hostid, &reg->hostid);
2698 		count++;
2699 	}
2700 
2701 	memcpy(req->data, payload, spdk_min(len, (cmd->cdw10 + 1) * sizeof(uint32_t)));
2702 	free(payload);
2703 
2704 exit:
2705 	req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2706 	req->rsp->nvme_cpl.status.sc = status;
2707 	return;
2708 }
2709 
2710 static void
2711 nvmf_ns_reservation_complete(void *ctx)
2712 {
2713 	struct spdk_nvmf_request *req = ctx;
2714 
2715 	spdk_nvmf_request_complete(req);
2716 }
2717 
2718 static void
2719 _nvmf_ns_reservation_update_done(struct spdk_nvmf_subsystem *subsystem,
2720 				 void *cb_arg, int status)
2721 {
2722 	struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)cb_arg;
2723 	struct spdk_nvmf_poll_group *group = req->qpair->group;
2724 
2725 	spdk_thread_send_msg(group->thread, nvmf_ns_reservation_complete, req);
2726 }
2727 
2728 void
2729 nvmf_ns_reservation_request(void *ctx)
2730 {
2731 	struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)ctx;
2732 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
2733 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
2734 	struct subsystem_update_ns_ctx *update_ctx;
2735 	uint32_t nsid;
2736 	struct spdk_nvmf_ns *ns;
2737 	bool update_sgroup = false;
2738 
2739 	nsid = cmd->nsid;
2740 	ns = _nvmf_subsystem_get_ns(ctrlr->subsys, nsid);
2741 	assert(ns != NULL);
2742 
2743 	switch (cmd->opc) {
2744 	case SPDK_NVME_OPC_RESERVATION_REGISTER:
2745 		update_sgroup = nvmf_ns_reservation_register(ns, ctrlr, req);
2746 		break;
2747 	case SPDK_NVME_OPC_RESERVATION_ACQUIRE:
2748 		update_sgroup = nvmf_ns_reservation_acquire(ns, ctrlr, req);
2749 		break;
2750 	case SPDK_NVME_OPC_RESERVATION_RELEASE:
2751 		update_sgroup = nvmf_ns_reservation_release(ns, ctrlr, req);
2752 		break;
2753 	case SPDK_NVME_OPC_RESERVATION_REPORT:
2754 		nvmf_ns_reservation_report(ns, ctrlr, req);
2755 		break;
2756 	default:
2757 		break;
2758 	}
2759 
2760 	/* update reservation information to subsystem's poll group */
2761 	if (update_sgroup) {
2762 		update_ctx = calloc(1, sizeof(*update_ctx));
2763 		if (update_ctx == NULL) {
2764 			SPDK_ERRLOG("Can't alloc subsystem poll group update context\n");
2765 			goto update_done;
2766 		}
2767 		update_ctx->subsystem = ctrlr->subsys;
2768 		update_ctx->cb_fn = _nvmf_ns_reservation_update_done;
2769 		update_ctx->cb_arg = req;
2770 
2771 		nvmf_subsystem_update_ns(ctrlr->subsys, subsystem_update_ns_done, update_ctx);
2772 		return;
2773 	}
2774 
2775 update_done:
2776 	_nvmf_ns_reservation_update_done(ctrlr->subsys, (void *)req, 0);
2777 }
2778 
2779 int
2780 spdk_nvmf_subsystem_set_ana_reporting(struct spdk_nvmf_subsystem *subsystem,
2781 				      bool ana_reporting)
2782 {
2783 	if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) {
2784 		return -EAGAIN;
2785 	}
2786 
2787 	subsystem->flags.ana_reporting = ana_reporting;
2788 
2789 	return 0;
2790 }
2791 
2792 struct subsystem_listener_update_ctx {
2793 	struct spdk_nvmf_subsystem_listener *listener;
2794 
2795 	spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn;
2796 	void *cb_arg;
2797 };
2798 
2799 static void
2800 subsystem_listener_update_done(struct spdk_io_channel_iter *i, int status)
2801 {
2802 	struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
2803 
2804 	if (ctx->cb_fn) {
2805 		ctx->cb_fn(ctx->cb_arg, status);
2806 	}
2807 	free(ctx);
2808 }
2809 
2810 static void
2811 subsystem_listener_update_on_pg(struct spdk_io_channel_iter *i)
2812 {
2813 	struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
2814 	struct spdk_nvmf_subsystem_listener *listener;
2815 	struct spdk_nvmf_poll_group *group;
2816 	struct spdk_nvmf_ctrlr *ctrlr;
2817 
2818 	listener = ctx->listener;
2819 	group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i));
2820 
2821 	TAILQ_FOREACH(ctrlr, &listener->subsystem->ctrlrs, link) {
2822 		if (ctrlr->admin_qpair->group == group && ctrlr->listener == listener) {
2823 			nvmf_ctrlr_async_event_ana_change_notice(ctrlr);
2824 		}
2825 	}
2826 
2827 	spdk_for_each_channel_continue(i, 0);
2828 }
2829 
2830 void
2831 nvmf_subsystem_set_ana_state(struct spdk_nvmf_subsystem *subsystem,
2832 			     const struct spdk_nvme_transport_id *trid,
2833 			     enum spdk_nvme_ana_state ana_state,
2834 			     spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, void *cb_arg)
2835 {
2836 	struct spdk_nvmf_subsystem_listener *listener;
2837 	struct subsystem_listener_update_ctx *ctx;
2838 
2839 	assert(cb_fn != NULL);
2840 	assert(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE ||
2841 	       subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED);
2842 
2843 	if (!subsystem->flags.ana_reporting) {
2844 		SPDK_ERRLOG("ANA reporting is disabled\n");
2845 		cb_fn(cb_arg, -EINVAL);
2846 		return;
2847 	}
2848 
2849 	/* ANA Change state is not used, ANA Persistent Loss state
2850 	 * is not supported yet.
2851 	 */
2852 	if (!(ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE ||
2853 	      ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE ||
2854 	      ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE)) {
2855 		SPDK_ERRLOG("ANA state %d is not supported\n", ana_state);
2856 		cb_fn(cb_arg, -ENOTSUP);
2857 		return;
2858 	}
2859 
2860 	listener = nvmf_subsystem_find_listener(subsystem, trid);
2861 	if (!listener) {
2862 		SPDK_ERRLOG("Unable to find listener.\n");
2863 		cb_fn(cb_arg, -EINVAL);
2864 		return;
2865 	}
2866 
2867 	if (listener->ana_state == ana_state) {
2868 		cb_fn(cb_arg, 0);
2869 		return;
2870 	}
2871 
2872 	ctx = calloc(1, sizeof(*ctx));
2873 	if (!ctx) {
2874 		SPDK_ERRLOG("Unable to allocate context\n");
2875 		cb_fn(cb_arg, -ENOMEM);
2876 		return;
2877 	}
2878 
2879 	listener->ana_state = ana_state;
2880 	listener->ana_state_change_count++;
2881 
2882 	ctx->listener = listener;
2883 	ctx->cb_fn = cb_fn;
2884 	ctx->cb_arg = cb_arg;
2885 
2886 	spdk_for_each_channel(subsystem->tgt,
2887 			      subsystem_listener_update_on_pg,
2888 			      ctx,
2889 			      subsystem_listener_update_done);
2890 }
2891