xref: /spdk/lib/nvmf/nvmf.c (revision c4d9daeb7bf491bc0eb6e8d417b75d44773cb009)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2018 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/bdev.h"
37 #include "spdk/bit_array.h"
38 #include "spdk/conf.h"
39 #include "spdk/thread.h"
40 #include "spdk/nvmf.h"
41 #include "spdk/trace.h"
42 #include "spdk/endian.h"
43 #include "spdk/string.h"
44 
45 #include "spdk_internal/log.h"
46 
47 #include "nvmf_internal.h"
48 #include "transport.h"
49 
50 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
51 
52 #define SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS 1024
53 
54 typedef void (*nvmf_qpair_disconnect_cpl)(void *ctx, int status);
55 static void spdk_nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf);
56 
57 /* supplied to a single call to nvmf_qpair_disconnect */
58 struct nvmf_qpair_disconnect_ctx {
59 	struct spdk_nvmf_qpair *qpair;
60 	struct spdk_nvmf_ctrlr *ctrlr;
61 	nvmf_qpair_disconnect_cb cb_fn;
62 	struct spdk_thread *thread;
63 	void *ctx;
64 	uint16_t qid;
65 };
66 
67 /*
68  * There are several times when we need to iterate through the list of all qpairs and selectively delete them.
69  * In order to do this sequentially without overlap, we must provide a context to recover the next qpair from
70  * to enable calling nvmf_qpair_disconnect on the next desired qpair.
71  */
72 struct nvmf_qpair_disconnect_many_ctx {
73 	struct spdk_nvmf_subsystem *subsystem;
74 	struct spdk_nvmf_poll_group *group;
75 	spdk_nvmf_poll_group_mod_done cpl_fn;
76 	void *cpl_ctx;
77 };
78 
79 static void
80 spdk_nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair,
81 			  enum spdk_nvmf_qpair_state state)
82 {
83 	assert(qpair != NULL);
84 	assert(qpair->group->thread == spdk_get_thread());
85 
86 	qpair->state = state;
87 }
88 
89 static int
90 spdk_nvmf_poll_group_poll(void *ctx)
91 {
92 	struct spdk_nvmf_poll_group *group = ctx;
93 	int rc;
94 	int count = 0;
95 	struct spdk_nvmf_transport_poll_group *tgroup;
96 
97 	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
98 		rc = spdk_nvmf_transport_poll_group_poll(tgroup);
99 		if (rc < 0) {
100 			return -1;
101 		}
102 		count += rc;
103 	}
104 
105 	return count;
106 }
107 
108 static int
109 spdk_nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf)
110 {
111 	struct spdk_nvmf_tgt *tgt = io_device;
112 	struct spdk_nvmf_poll_group *group = ctx_buf;
113 	struct spdk_nvmf_transport *transport;
114 	uint32_t sid;
115 
116 	TAILQ_INIT(&group->tgroups);
117 	TAILQ_INIT(&group->qpairs);
118 
119 	TAILQ_FOREACH(transport, &tgt->transports, link) {
120 		spdk_nvmf_poll_group_add_transport(group, transport);
121 	}
122 
123 	group->num_sgroups = tgt->max_subsystems;
124 	group->sgroups = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem_poll_group));
125 	if (!group->sgroups) {
126 		return -ENOMEM;
127 	}
128 
129 	for (sid = 0; sid < tgt->max_subsystems; sid++) {
130 		struct spdk_nvmf_subsystem *subsystem;
131 
132 		subsystem = tgt->subsystems[sid];
133 		if (!subsystem) {
134 			continue;
135 		}
136 
137 		if (spdk_nvmf_poll_group_add_subsystem(group, subsystem, NULL, NULL) != 0) {
138 			spdk_nvmf_tgt_destroy_poll_group(io_device, ctx_buf);
139 			return -1;
140 		}
141 	}
142 
143 	group->poller = spdk_poller_register(spdk_nvmf_poll_group_poll, group, 0);
144 	group->thread = spdk_get_thread();
145 
146 	return 0;
147 }
148 
149 static void
150 spdk_nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf)
151 {
152 	struct spdk_nvmf_poll_group *group = ctx_buf;
153 	struct spdk_nvmf_transport_poll_group *tgroup, *tmp;
154 	struct spdk_nvmf_subsystem_poll_group *sgroup;
155 	uint32_t sid, nsid;
156 
157 	TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) {
158 		TAILQ_REMOVE(&group->tgroups, tgroup, link);
159 		spdk_nvmf_transport_poll_group_destroy(tgroup);
160 	}
161 
162 	for (sid = 0; sid < group->num_sgroups; sid++) {
163 		sgroup = &group->sgroups[sid];
164 
165 		for (nsid = 0; nsid < sgroup->num_ns; nsid++) {
166 			if (sgroup->ns_info[nsid].channel) {
167 				spdk_put_io_channel(sgroup->ns_info[nsid].channel);
168 				sgroup->ns_info[nsid].channel = NULL;
169 			}
170 		}
171 
172 		free(sgroup->ns_info);
173 	}
174 
175 	free(group->sgroups);
176 }
177 
178 static void
179 _nvmf_tgt_disconnect_next_qpair(void *ctx)
180 {
181 	struct spdk_nvmf_qpair *qpair;
182 	struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx;
183 	struct spdk_nvmf_poll_group *group = qpair_ctx->group;
184 	struct spdk_io_channel *ch;
185 	int rc = 0;
186 
187 	qpair = TAILQ_FIRST(&group->qpairs);
188 
189 	if (qpair) {
190 		rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_tgt_disconnect_next_qpair, ctx);
191 	}
192 
193 	if (!qpair || rc != 0) {
194 		/* When the refcount from the channels reaches 0, spdk_nvmf_tgt_destroy_poll_group will be called. */
195 		ch = spdk_io_channel_from_ctx(group);
196 		spdk_put_io_channel(ch);
197 		free(qpair_ctx);
198 	}
199 }
200 
201 static void
202 spdk_nvmf_tgt_destroy_poll_group_qpairs(struct spdk_nvmf_poll_group *group)
203 {
204 	struct nvmf_qpair_disconnect_many_ctx *ctx;
205 
206 	ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx));
207 
208 	if (!ctx) {
209 		SPDK_ERRLOG("Failed to allocate memory for destroy poll group ctx\n");
210 		return;
211 	}
212 
213 	spdk_poller_unregister(&group->poller);
214 
215 	ctx->group = group;
216 	_nvmf_tgt_disconnect_next_qpair(ctx);
217 }
218 
219 struct spdk_nvmf_tgt *
220 spdk_nvmf_tgt_create(uint32_t max_subsystems)
221 {
222 	struct spdk_nvmf_tgt *tgt;
223 
224 	tgt = calloc(1, sizeof(*tgt));
225 	if (!tgt) {
226 		return NULL;
227 	}
228 
229 	if (!max_subsystems) {
230 		tgt->max_subsystems = SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS;
231 	} else {
232 		tgt->max_subsystems = max_subsystems;
233 	}
234 
235 	tgt->discovery_genctr = 0;
236 	tgt->discovery_log_page = NULL;
237 	tgt->discovery_log_page_size = 0;
238 	TAILQ_INIT(&tgt->transports);
239 
240 	tgt->subsystems = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
241 	if (!tgt->subsystems) {
242 		free(tgt);
243 		return NULL;
244 	}
245 
246 	spdk_io_device_register(tgt,
247 				spdk_nvmf_tgt_create_poll_group,
248 				spdk_nvmf_tgt_destroy_poll_group,
249 				sizeof(struct spdk_nvmf_poll_group),
250 				"nvmf_tgt");
251 
252 	return tgt;
253 }
254 
255 static void
256 spdk_nvmf_tgt_destroy_cb(void *io_device)
257 {
258 	struct spdk_nvmf_tgt *tgt = io_device;
259 	struct spdk_nvmf_transport *transport, *transport_tmp;
260 	spdk_nvmf_tgt_destroy_done_fn		*destroy_cb_fn;
261 	void					*destroy_cb_arg;
262 	uint32_t i;
263 
264 	if (tgt->discovery_log_page) {
265 		free(tgt->discovery_log_page);
266 	}
267 
268 	if (tgt->subsystems) {
269 		for (i = 0; i < tgt->max_subsystems; i++) {
270 			if (tgt->subsystems[i]) {
271 				spdk_nvmf_subsystem_destroy(tgt->subsystems[i]);
272 			}
273 		}
274 		free(tgt->subsystems);
275 	}
276 
277 	TAILQ_FOREACH_SAFE(transport, &tgt->transports, link, transport_tmp) {
278 		TAILQ_REMOVE(&tgt->transports, transport, link);
279 		spdk_nvmf_transport_destroy(transport);
280 	}
281 
282 	destroy_cb_fn = tgt->destroy_cb_fn;
283 	destroy_cb_arg = tgt->destroy_cb_arg;
284 
285 	free(tgt);
286 
287 	if (destroy_cb_fn) {
288 		destroy_cb_fn(destroy_cb_arg, 0);
289 	}
290 }
291 
292 void
293 spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt,
294 		      spdk_nvmf_tgt_destroy_done_fn cb_fn,
295 		      void *cb_arg)
296 {
297 	tgt->destroy_cb_fn = cb_fn;
298 	tgt->destroy_cb_arg = cb_arg;
299 
300 	spdk_io_device_unregister(tgt, spdk_nvmf_tgt_destroy_cb);
301 }
302 
303 static void
304 spdk_nvmf_write_subsystem_config_json(struct spdk_json_write_ctx *w,
305 				      struct spdk_nvmf_subsystem *subsystem)
306 {
307 	struct spdk_nvmf_host *host;
308 	struct spdk_nvmf_listener *listener;
309 	const struct spdk_nvme_transport_id *trid;
310 	struct spdk_nvmf_ns *ns;
311 	struct spdk_nvmf_ns_opts ns_opts;
312 	uint32_t max_namespaces;
313 	char uuid_str[SPDK_UUID_STRING_LEN];
314 	const char *trtype;
315 	const char *adrfam;
316 
317 	if (spdk_nvmf_subsystem_get_type(subsystem) != SPDK_NVMF_SUBTYPE_NVME) {
318 		return;
319 	}
320 
321 	/* { */
322 	spdk_json_write_object_begin(w);
323 	spdk_json_write_named_string(w, "method", "nvmf_subsystem_create");
324 
325 	/*     "params" : { */
326 	spdk_json_write_named_object_begin(w, "params");
327 	spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
328 	spdk_json_write_named_bool(w, "allow_any_host", spdk_nvmf_subsystem_get_allow_any_host(subsystem));
329 	spdk_json_write_named_string(w, "serial_number", spdk_nvmf_subsystem_get_sn(subsystem));
330 	spdk_json_write_named_string(w, "model_number", spdk_nvmf_subsystem_get_mn(subsystem));
331 
332 	max_namespaces = spdk_nvmf_subsystem_get_max_namespaces(subsystem);
333 	if (max_namespaces != 0) {
334 		spdk_json_write_named_uint32(w, "max_namespaces", max_namespaces);
335 	}
336 
337 	/*     } "params" */
338 	spdk_json_write_object_end(w);
339 
340 	/* } */
341 	spdk_json_write_object_end(w);
342 
343 	for (listener = spdk_nvmf_subsystem_get_first_listener(subsystem); listener != NULL;
344 	     listener = spdk_nvmf_subsystem_get_next_listener(subsystem, listener)) {
345 		trid = spdk_nvmf_listener_get_trid(listener);
346 
347 		trtype = spdk_nvme_transport_id_trtype_str(trid->trtype);
348 		adrfam = spdk_nvme_transport_id_adrfam_str(trid->adrfam);
349 
350 		spdk_json_write_object_begin(w);
351 		spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_listener");
352 
353 		/*     "params" : { */
354 		spdk_json_write_named_object_begin(w, "params");
355 
356 		spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
357 
358 		/*     "listen_address" : { */
359 		spdk_json_write_named_object_begin(w, "listen_address");
360 
361 		spdk_json_write_named_string(w, "trtype", trtype);
362 		if (adrfam) {
363 			spdk_json_write_named_string(w, "adrfam", adrfam);
364 		}
365 
366 		spdk_json_write_named_string(w, "traddr", trid->traddr);
367 		spdk_json_write_named_string(w, "trsvcid", trid->trsvcid);
368 		/*     } "listen_address" */
369 		spdk_json_write_object_end(w);
370 
371 		/*     } "params" */
372 		spdk_json_write_object_end(w);
373 
374 		/* } */
375 		spdk_json_write_object_end(w);
376 	}
377 
378 	for (host = spdk_nvmf_subsystem_get_first_host(subsystem); host != NULL;
379 	     host = spdk_nvmf_subsystem_get_next_host(subsystem, host)) {
380 
381 		spdk_json_write_object_begin(w);
382 		spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_host");
383 
384 		/*     "params" : { */
385 		spdk_json_write_named_object_begin(w, "params");
386 
387 		spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
388 		spdk_json_write_named_string(w, "host", spdk_nvmf_host_get_nqn(host));
389 
390 		/*     } "params" */
391 		spdk_json_write_object_end(w);
392 
393 		/* } */
394 		spdk_json_write_object_end(w);
395 	}
396 
397 	for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL;
398 	     ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) {
399 		spdk_nvmf_ns_get_opts(ns, &ns_opts, sizeof(ns_opts));
400 
401 		spdk_json_write_object_begin(w);
402 		spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_ns");
403 
404 		/*     "params" : { */
405 		spdk_json_write_named_object_begin(w, "params");
406 
407 		spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
408 
409 		/*     "namespace" : { */
410 		spdk_json_write_named_object_begin(w, "namespace");
411 
412 		spdk_json_write_named_uint32(w, "nsid", spdk_nvmf_ns_get_id(ns));
413 		spdk_json_write_named_string(w, "bdev_name", spdk_bdev_get_name(spdk_nvmf_ns_get_bdev(ns)));
414 
415 		if (!spdk_mem_all_zero(ns_opts.nguid, sizeof(ns_opts.nguid))) {
416 			SPDK_STATIC_ASSERT(sizeof(ns_opts.nguid) == sizeof(uint64_t) * 2, "size mismatch");
417 			spdk_json_write_named_string_fmt(w, "nguid", "%016"PRIX64"%016"PRIX64, from_be64(&ns_opts.nguid[0]),
418 							 from_be64(&ns_opts.nguid[8]));
419 		}
420 
421 		if (!spdk_mem_all_zero(ns_opts.eui64, sizeof(ns_opts.eui64))) {
422 			SPDK_STATIC_ASSERT(sizeof(ns_opts.eui64) == sizeof(uint64_t), "size mismatch");
423 			spdk_json_write_named_string_fmt(w, "eui64", "%016"PRIX64, from_be64(&ns_opts.eui64));
424 		}
425 
426 		if (!spdk_mem_all_zero(&ns_opts.uuid, sizeof(ns_opts.uuid))) {
427 			spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &ns_opts.uuid);
428 			spdk_json_write_named_string(w, "uuid",  uuid_str);
429 		}
430 
431 		/*     "namespace" */
432 		spdk_json_write_object_end(w);
433 
434 		/*     } "params" */
435 		spdk_json_write_object_end(w);
436 
437 		/* } */
438 		spdk_json_write_object_end(w);
439 	}
440 }
441 
442 void
443 spdk_nvmf_tgt_write_config_json(struct spdk_json_write_ctx *w, struct spdk_nvmf_tgt *tgt)
444 {
445 	struct spdk_nvmf_subsystem *subsystem;
446 	struct spdk_nvmf_transport *transport;
447 
448 	spdk_json_write_object_begin(w);
449 	spdk_json_write_named_string(w, "method", "set_nvmf_target_max_subsystems");
450 
451 	spdk_json_write_named_object_begin(w, "params");
452 	spdk_json_write_named_uint32(w, "max_subsystems", tgt->max_subsystems);
453 	spdk_json_write_object_end(w);
454 
455 	spdk_json_write_object_end(w);
456 
457 	/* write transports */
458 	TAILQ_FOREACH(transport, &tgt->transports, link) {
459 		spdk_json_write_object_begin(w);
460 		spdk_json_write_named_string(w, "method", "nvmf_create_transport");
461 
462 		spdk_json_write_named_object_begin(w, "params");
463 		spdk_json_write_named_string(w, "trtype", spdk_nvme_transport_id_trtype_str(transport->ops->type));
464 		spdk_json_write_named_uint32(w, "max_queue_depth", transport->opts.max_queue_depth);
465 		spdk_json_write_named_uint32(w, "max_qpairs_per_ctrlr", transport->opts.max_qpairs_per_ctrlr);
466 		spdk_json_write_named_uint32(w, "in_capsule_data_size", transport->opts.in_capsule_data_size);
467 		spdk_json_write_named_uint32(w, "max_io_size", transport->opts.max_io_size);
468 		spdk_json_write_named_uint32(w, "io_unit_size", transport->opts.io_unit_size);
469 		spdk_json_write_named_uint32(w, "max_aq_depth", transport->opts.max_aq_depth);
470 		if (transport->ops->type == SPDK_NVME_TRANSPORT_RDMA) {
471 			spdk_json_write_named_uint32(w, "max_srq_depth", transport->opts.max_srq_depth);
472 		}
473 		spdk_json_write_object_end(w);
474 
475 		spdk_json_write_object_end(w);
476 	}
477 
478 	subsystem = spdk_nvmf_subsystem_get_first(tgt);
479 	while (subsystem) {
480 		spdk_nvmf_write_subsystem_config_json(w, subsystem);
481 		subsystem = spdk_nvmf_subsystem_get_next(subsystem);
482 	}
483 }
484 
485 void
486 spdk_nvmf_tgt_listen(struct spdk_nvmf_tgt *tgt,
487 		     struct spdk_nvme_transport_id *trid,
488 		     spdk_nvmf_tgt_listen_done_fn cb_fn,
489 		     void *cb_arg)
490 {
491 	struct spdk_nvmf_transport *transport;
492 	const char *trtype;
493 	int rc;
494 
495 	transport = spdk_nvmf_tgt_get_transport(tgt, trid->trtype);
496 	if (!transport) {
497 		trtype = spdk_nvme_transport_id_trtype_str(trid->trtype);
498 		if (trtype != NULL) {
499 			SPDK_ERRLOG("Unable to listen on transport %s. The transport must be created first.\n", trtype);
500 		} else {
501 			SPDK_ERRLOG("The specified trtype %d is unknown. Please make sure that it is properly registered.\n",
502 				    trid->trtype);
503 		}
504 		cb_fn(cb_arg, -EINVAL);
505 		return;
506 	}
507 
508 	rc = spdk_nvmf_transport_listen(transport, trid);
509 	if (rc < 0) {
510 		SPDK_ERRLOG("Unable to listen on address '%s'\n", trid->traddr);
511 		cb_fn(cb_arg, rc);
512 		return;
513 	}
514 
515 	tgt->discovery_genctr++;
516 
517 	cb_fn(cb_arg, 0);
518 }
519 
520 struct spdk_nvmf_tgt_add_transport_ctx {
521 	struct spdk_nvmf_tgt *tgt;
522 	struct spdk_nvmf_transport *transport;
523 	spdk_nvmf_tgt_add_transport_done_fn cb_fn;
524 	void *cb_arg;
525 };
526 
527 static void
528 _spdk_nvmf_tgt_add_transport_done(struct spdk_io_channel_iter *i, int status)
529 {
530 	struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
531 
532 	ctx->cb_fn(ctx->cb_arg, status);
533 
534 	free(ctx);
535 }
536 
537 static void
538 _spdk_nvmf_tgt_add_transport(struct spdk_io_channel_iter *i)
539 {
540 	struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
541 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
542 	struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
543 	int rc;
544 
545 	rc = spdk_nvmf_poll_group_add_transport(group, ctx->transport);
546 	spdk_for_each_channel_continue(i, rc);
547 }
548 
549 void spdk_nvmf_tgt_add_transport(struct spdk_nvmf_tgt *tgt,
550 				 struct spdk_nvmf_transport *transport,
551 				 spdk_nvmf_tgt_add_transport_done_fn cb_fn,
552 				 void *cb_arg)
553 {
554 	struct spdk_nvmf_tgt_add_transport_ctx *ctx;
555 
556 	if (spdk_nvmf_tgt_get_transport(tgt, transport->ops->type)) {
557 		cb_fn(cb_arg, -EEXIST);
558 		return; /* transport already created */
559 	}
560 
561 	transport->tgt = tgt;
562 	TAILQ_INSERT_TAIL(&tgt->transports, transport, link);
563 
564 	ctx = calloc(1, sizeof(*ctx));
565 	if (!ctx) {
566 		cb_fn(cb_arg, -ENOMEM);
567 		return;
568 	}
569 
570 	ctx->tgt = tgt;
571 	ctx->transport = transport;
572 	ctx->cb_fn = cb_fn;
573 	ctx->cb_arg = cb_arg;
574 
575 	spdk_for_each_channel(tgt,
576 			      _spdk_nvmf_tgt_add_transport,
577 			      ctx,
578 			      _spdk_nvmf_tgt_add_transport_done);
579 }
580 
581 struct spdk_nvmf_subsystem *
582 spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
583 {
584 	struct spdk_nvmf_subsystem	*subsystem;
585 	uint32_t sid;
586 
587 	if (!subnqn) {
588 		return NULL;
589 	}
590 
591 	for (sid = 0; sid < tgt->max_subsystems; sid++) {
592 		subsystem = tgt->subsystems[sid];
593 		if (subsystem == NULL) {
594 			continue;
595 		}
596 
597 		if (strcmp(subnqn, subsystem->subnqn) == 0) {
598 			return subsystem;
599 		}
600 	}
601 
602 	return NULL;
603 }
604 
605 struct spdk_nvmf_transport *
606 spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, enum spdk_nvme_transport_type type)
607 {
608 	struct spdk_nvmf_transport *transport;
609 
610 	TAILQ_FOREACH(transport, &tgt->transports, link) {
611 		if (transport->ops->type == type) {
612 			return transport;
613 		}
614 	}
615 
616 	return NULL;
617 }
618 
619 void
620 spdk_nvmf_tgt_accept(struct spdk_nvmf_tgt *tgt, new_qpair_fn cb_fn)
621 {
622 	struct spdk_nvmf_transport *transport, *tmp;
623 
624 	TAILQ_FOREACH_SAFE(transport, &tgt->transports, link, tmp) {
625 		spdk_nvmf_transport_accept(transport, cb_fn);
626 	}
627 }
628 
629 struct spdk_nvmf_poll_group *
630 spdk_nvmf_poll_group_create(struct spdk_nvmf_tgt *tgt)
631 {
632 	struct spdk_io_channel *ch;
633 
634 	ch = spdk_get_io_channel(tgt);
635 	if (!ch) {
636 		SPDK_ERRLOG("Unable to get I/O channel for target\n");
637 		return NULL;
638 	}
639 
640 	return spdk_io_channel_get_ctx(ch);
641 }
642 
643 void
644 spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group)
645 {
646 	/* This function will put the io_channel associated with this poll group */
647 	spdk_nvmf_tgt_destroy_poll_group_qpairs(group);
648 }
649 
650 int
651 spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group,
652 			 struct spdk_nvmf_qpair *qpair)
653 {
654 	int rc = -1;
655 	struct spdk_nvmf_transport_poll_group *tgroup;
656 
657 	TAILQ_INIT(&qpair->outstanding);
658 	qpair->group = group;
659 
660 	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
661 		if (tgroup->transport == qpair->transport) {
662 			rc = spdk_nvmf_transport_poll_group_add(tgroup, qpair);
663 			break;
664 		}
665 	}
666 
667 	/* We add the qpair to the group only it is succesfully added into the tgroup */
668 	if (rc == 0) {
669 		TAILQ_INSERT_TAIL(&group->qpairs, qpair, link);
670 		spdk_nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ACTIVE);
671 	}
672 
673 	return rc;
674 }
675 
676 static
677 void _nvmf_ctrlr_destruct(void *ctx)
678 {
679 	struct spdk_nvmf_ctrlr *ctrlr = ctx;
680 
681 	spdk_nvmf_ctrlr_destruct(ctrlr);
682 }
683 
684 static void
685 _spdk_nvmf_ctrlr_free_from_qpair(void *ctx)
686 {
687 	struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx;
688 	struct spdk_nvmf_ctrlr *ctrlr = qpair_ctx->ctrlr;
689 	uint32_t count;
690 
691 	spdk_bit_array_clear(ctrlr->qpair_mask, qpair_ctx->qid);
692 	count = spdk_bit_array_count_set(ctrlr->qpair_mask);
693 	if (count == 0) {
694 		spdk_bit_array_free(&ctrlr->qpair_mask);
695 
696 		spdk_thread_send_msg(ctrlr->subsys->thread, _nvmf_ctrlr_destruct, ctrlr);
697 	}
698 
699 	if (qpair_ctx->cb_fn) {
700 		spdk_thread_send_msg(qpair_ctx->thread, qpair_ctx->cb_fn, qpair_ctx->ctx);
701 	}
702 	free(qpair_ctx);
703 }
704 
705 static void
706 _spdk_nvmf_qpair_destroy(void *ctx, int status)
707 {
708 	struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx;
709 	struct spdk_nvmf_qpair *qpair = qpair_ctx->qpair;
710 	struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
711 	struct spdk_nvmf_transport_poll_group *tgroup;
712 	struct spdk_nvmf_request *req, *tmp;
713 	struct spdk_nvmf_subsystem_poll_group *sgroup;
714 	int rc;
715 
716 	assert(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING);
717 	spdk_nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ERROR);
718 	qpair_ctx->qid = qpair->qid;
719 
720 	/* Find the tgroup and remove the qpair from the tgroup */
721 	TAILQ_FOREACH(tgroup, &qpair->group->tgroups, link) {
722 		if (tgroup->transport == qpair->transport) {
723 			rc = spdk_nvmf_transport_poll_group_remove(tgroup, qpair);
724 			if (rc && (rc != ENOTSUP)) {
725 				SPDK_ERRLOG("Cannot remove qpair=%p from transport group=%p\n",
726 					    qpair, tgroup);
727 			}
728 			break;
729 		}
730 	}
731 
732 	if (ctrlr) {
733 		sgroup = &qpair->group->sgroups[ctrlr->subsys->id];
734 		TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) {
735 			if (req->qpair == qpair) {
736 				TAILQ_REMOVE(&sgroup->queued, req, link);
737 				if (spdk_nvmf_transport_req_free(req)) {
738 					SPDK_ERRLOG("Transport request free error!\n");
739 				}
740 			}
741 		}
742 	}
743 
744 	TAILQ_REMOVE(&qpair->group->qpairs, qpair, link);
745 
746 	spdk_nvmf_transport_qpair_fini(qpair);
747 
748 	if (!ctrlr || !ctrlr->thread) {
749 		if (qpair_ctx->cb_fn) {
750 			spdk_thread_send_msg(qpair_ctx->thread, qpair_ctx->cb_fn, qpair_ctx->ctx);
751 		}
752 		free(qpair_ctx);
753 		return;
754 	}
755 
756 	qpair_ctx->ctrlr = ctrlr;
757 	spdk_thread_send_msg(ctrlr->thread, _spdk_nvmf_ctrlr_free_from_qpair, qpair_ctx);
758 
759 }
760 
761 int
762 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
763 {
764 	struct nvmf_qpair_disconnect_ctx *qpair_ctx;
765 
766 	/* If we get a qpair in the uninitialized state, we can just destroy it immediately */
767 	if (qpair->state == SPDK_NVMF_QPAIR_UNINITIALIZED) {
768 		spdk_nvmf_transport_qpair_fini(qpair);
769 		if (cb_fn) {
770 			cb_fn(ctx);
771 		}
772 		return 0;
773 	}
774 
775 	/* The queue pair must be disconnected from the thread that owns it */
776 	assert(qpair->group->thread == spdk_get_thread());
777 
778 	if (qpair->state != SPDK_NVMF_QPAIR_ACTIVE) {
779 		/* This can occur if the connection is killed by the target,
780 		 * which results in a notification that the connection
781 		 * died. Send a message to defer the processing of this
782 		 * callback. This allows the stack to unwind in the case
783 		 * where a bunch of connections are disconnected in
784 		 * a loop. */
785 		if (cb_fn) {
786 			spdk_thread_send_msg(qpair->group->thread, cb_fn, ctx);
787 		}
788 		return 0;
789 	}
790 
791 	assert(qpair->state == SPDK_NVMF_QPAIR_ACTIVE);
792 	spdk_nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_DEACTIVATING);
793 
794 	qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx));
795 	if (!qpair_ctx) {
796 		SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n");
797 		return -ENOMEM;
798 	}
799 
800 	qpair_ctx->qpair = qpair;
801 	qpair_ctx->cb_fn = cb_fn;
802 	qpair_ctx->thread = qpair->group->thread;
803 	qpair_ctx->ctx = ctx;
804 
805 	/* Check for outstanding I/O */
806 	if (!TAILQ_EMPTY(&qpair->outstanding)) {
807 		qpair->state_cb = _spdk_nvmf_qpair_destroy;
808 		qpair->state_cb_arg = qpair_ctx;
809 		spdk_nvmf_qpair_free_aer(qpair);
810 		return 0;
811 	}
812 
813 	_spdk_nvmf_qpair_destroy(qpair_ctx, 0);
814 
815 	return 0;
816 }
817 
818 int
819 spdk_nvmf_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
820 			      struct spdk_nvme_transport_id *trid)
821 {
822 	return spdk_nvmf_transport_qpair_get_peer_trid(qpair, trid);
823 }
824 
825 int
826 spdk_nvmf_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
827 			       struct spdk_nvme_transport_id *trid)
828 {
829 	return spdk_nvmf_transport_qpair_get_local_trid(qpair, trid);
830 }
831 
832 int
833 spdk_nvmf_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
834 				struct spdk_nvme_transport_id *trid)
835 {
836 	return spdk_nvmf_transport_qpair_get_listen_trid(qpair, trid);
837 }
838 
839 int
840 spdk_nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group,
841 				   struct spdk_nvmf_transport *transport)
842 {
843 	struct spdk_nvmf_transport_poll_group *tgroup;
844 
845 	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
846 		if (tgroup->transport == transport) {
847 			/* Transport already in the poll group */
848 			return 0;
849 		}
850 	}
851 
852 	tgroup = spdk_nvmf_transport_poll_group_create(transport);
853 	if (!tgroup) {
854 		SPDK_ERRLOG("Unable to create poll group for transport\n");
855 		return -1;
856 	}
857 
858 	TAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
859 
860 	return 0;
861 }
862 
863 static int
864 poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
865 			    struct spdk_nvmf_subsystem *subsystem)
866 {
867 	struct spdk_nvmf_subsystem_poll_group *sgroup;
868 	uint32_t new_num_ns, old_num_ns;
869 	uint32_t i, j;
870 	struct spdk_nvmf_ns *ns;
871 	struct spdk_nvmf_registrant *reg, *tmp;
872 
873 	/* Make sure our poll group has memory for this subsystem allocated */
874 	if (subsystem->id >= group->num_sgroups) {
875 		return -ENOMEM;
876 	}
877 
878 	sgroup = &group->sgroups[subsystem->id];
879 
880 	/* Make sure the array of namespace information is the correct size */
881 	new_num_ns = subsystem->max_nsid;
882 	old_num_ns = sgroup->num_ns;
883 
884 	if (old_num_ns == 0) {
885 		if (new_num_ns > 0) {
886 			/* First allocation */
887 			sgroup->ns_info = calloc(new_num_ns, sizeof(struct spdk_nvmf_subsystem_pg_ns_info));
888 			if (!sgroup->ns_info) {
889 				return -ENOMEM;
890 			}
891 		}
892 	} else if (new_num_ns > old_num_ns) {
893 		void *buf;
894 
895 		/* Make the array larger */
896 		buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info));
897 		if (!buf) {
898 			return -ENOMEM;
899 		}
900 
901 		sgroup->ns_info = buf;
902 
903 		/* Null out the new namespace information slots */
904 		for (i = old_num_ns; i < new_num_ns; i++) {
905 			memset(&sgroup->ns_info[i], 0, sizeof(struct spdk_nvmf_subsystem_pg_ns_info));
906 		}
907 	} else if (new_num_ns < old_num_ns) {
908 		void *buf;
909 
910 		/* Free the extra I/O channels */
911 		for (i = new_num_ns; i < old_num_ns; i++) {
912 			if (sgroup->ns_info[i].channel) {
913 				spdk_put_io_channel(sgroup->ns_info[i].channel);
914 				sgroup->ns_info[i].channel = NULL;
915 			}
916 		}
917 
918 		/* Make the array smaller */
919 		if (new_num_ns > 0) {
920 			buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info));
921 			if (!buf) {
922 				return -ENOMEM;
923 			}
924 			sgroup->ns_info = buf;
925 		} else {
926 			free(sgroup->ns_info);
927 			sgroup->ns_info = NULL;
928 		}
929 	}
930 
931 	sgroup->num_ns = new_num_ns;
932 
933 	/* Detect bdevs that were added or removed */
934 	for (i = 0; i < sgroup->num_ns; i++) {
935 		ns = subsystem->ns[i];
936 		if (ns == NULL && sgroup->ns_info[i].channel == NULL) {
937 			/* Both NULL. Leave empty */
938 		} else if (ns == NULL && sgroup->ns_info[i].channel != NULL) {
939 			/* There was a channel here, but the namespace is gone. */
940 			spdk_put_io_channel(sgroup->ns_info[i].channel);
941 			sgroup->ns_info[i].channel = NULL;
942 		} else if (ns != NULL && sgroup->ns_info[i].channel == NULL) {
943 			/* A namespace appeared but there is no channel yet */
944 			sgroup->ns_info[i].channel = spdk_bdev_get_io_channel(ns->desc);
945 			if (sgroup->ns_info[i].channel == NULL) {
946 				SPDK_ERRLOG("Could not allocate I/O channel.\n");
947 				return -ENOMEM;
948 			}
949 		} else {
950 			/* A namespace was present before and didn't change. */
951 		}
952 
953 		if (ns == NULL) {
954 			memset(&sgroup->ns_info[i], 0, sizeof(struct spdk_nvmf_subsystem_pg_ns_info));
955 		} else {
956 			sgroup->ns_info[i].crkey = ns->crkey;
957 			sgroup->ns_info[i].rtype = ns->rtype;
958 			if (ns->holder) {
959 				sgroup->ns_info[i].holder_id = ns->holder->hostid;
960 			}
961 
962 			memset(&sgroup->ns_info[i].reg_hostid, 0, SPDK_NVMF_MAX_NUM_REGISTRANTS * sizeof(struct spdk_uuid));
963 			j = 0;
964 			TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
965 				if (j >= SPDK_NVMF_MAX_NUM_REGISTRANTS) {
966 					SPDK_ERRLOG("Maximum %u registrants can support.\n", SPDK_NVMF_MAX_NUM_REGISTRANTS);
967 					return -EINVAL;
968 				}
969 				sgroup->ns_info[i].reg_hostid[j++] = reg->hostid;
970 			}
971 		}
972 	}
973 
974 	return 0;
975 }
976 
977 int
978 spdk_nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
979 				      struct spdk_nvmf_subsystem *subsystem)
980 {
981 	return poll_group_update_subsystem(group, subsystem);
982 }
983 
984 int
985 spdk_nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
986 				   struct spdk_nvmf_subsystem *subsystem,
987 				   spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
988 {
989 	int rc = 0;
990 	struct spdk_nvmf_subsystem_poll_group *sgroup = &group->sgroups[subsystem->id];
991 
992 	TAILQ_INIT(&sgroup->queued);
993 
994 	rc = poll_group_update_subsystem(group, subsystem);
995 	if (rc) {
996 		spdk_nvmf_poll_group_remove_subsystem(group, subsystem, NULL, NULL);
997 		goto fini;
998 	}
999 
1000 	sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1001 fini:
1002 	if (cb_fn) {
1003 		cb_fn(cb_arg, rc);
1004 	}
1005 
1006 	return rc;
1007 }
1008 
1009 static void
1010 _nvmf_poll_group_remove_subsystem_cb(void *ctx, int status)
1011 {
1012 	struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx;
1013 	struct spdk_nvmf_subsystem *subsystem;
1014 	struct spdk_nvmf_poll_group *group;
1015 	struct spdk_nvmf_subsystem_poll_group *sgroup;
1016 	spdk_nvmf_poll_group_mod_done cpl_fn = NULL;
1017 	void *cpl_ctx = NULL;
1018 	uint32_t nsid;
1019 
1020 	group = qpair_ctx->group;
1021 	subsystem = qpair_ctx->subsystem;
1022 	cpl_fn = qpair_ctx->cpl_fn;
1023 	cpl_ctx = qpair_ctx->cpl_ctx;
1024 	sgroup = &group->sgroups[subsystem->id];
1025 
1026 	if (status) {
1027 		goto fini;
1028 	}
1029 
1030 	for (nsid = 0; nsid < sgroup->num_ns; nsid++) {
1031 		if (sgroup->ns_info[nsid].channel) {
1032 			spdk_put_io_channel(sgroup->ns_info[nsid].channel);
1033 			sgroup->ns_info[nsid].channel = NULL;
1034 		}
1035 	}
1036 
1037 	sgroup->num_ns = 0;
1038 	free(sgroup->ns_info);
1039 	sgroup->ns_info = NULL;
1040 fini:
1041 	free(qpair_ctx);
1042 	if (cpl_fn) {
1043 		cpl_fn(cpl_ctx, status);
1044 	}
1045 }
1046 
1047 static void
1048 _nvmf_subsystem_disconnect_next_qpair(void *ctx)
1049 {
1050 	struct spdk_nvmf_qpair *qpair;
1051 	struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx;
1052 	struct spdk_nvmf_subsystem *subsystem;
1053 	struct spdk_nvmf_poll_group *group;
1054 	int rc = 0;
1055 
1056 	group = qpair_ctx->group;
1057 	subsystem = qpair_ctx->subsystem;
1058 
1059 	TAILQ_FOREACH(qpair, &group->qpairs, link) {
1060 		if ((qpair->ctrlr != NULL) && (qpair->ctrlr->subsys == subsystem)) {
1061 			break;
1062 		}
1063 	}
1064 
1065 	if (qpair) {
1066 		rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_subsystem_disconnect_next_qpair, qpair_ctx);
1067 	}
1068 
1069 	if (!qpair || rc != 0) {
1070 		_nvmf_poll_group_remove_subsystem_cb(ctx, rc);
1071 	}
1072 	return;
1073 }
1074 
1075 void
1076 spdk_nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
1077 				      struct spdk_nvmf_subsystem *subsystem,
1078 				      spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
1079 {
1080 	struct spdk_nvmf_qpair *qpair;
1081 	struct spdk_nvmf_subsystem_poll_group *sgroup;
1082 	struct nvmf_qpair_disconnect_many_ctx *ctx;
1083 	int rc = 0;
1084 
1085 	ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx));
1086 
1087 	if (!ctx) {
1088 		SPDK_ERRLOG("Unable to allocate memory for context to remove poll subsystem\n");
1089 		goto fini;
1090 	}
1091 
1092 	ctx->group = group;
1093 	ctx->subsystem = subsystem;
1094 	ctx->cpl_fn = cb_fn;
1095 	ctx->cpl_ctx = cb_arg;
1096 
1097 	sgroup = &group->sgroups[subsystem->id];
1098 	sgroup->state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
1099 
1100 	TAILQ_FOREACH(qpair, &group->qpairs, link) {
1101 		if ((qpair->ctrlr != NULL) && (qpair->ctrlr->subsys == subsystem)) {
1102 			break;
1103 		}
1104 	}
1105 
1106 	if (qpair) {
1107 		rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_subsystem_disconnect_next_qpair, ctx);
1108 	} else {
1109 		/* call the callback immediately. It will handle any channel iteration */
1110 		_nvmf_poll_group_remove_subsystem_cb(ctx, 0);
1111 	}
1112 
1113 	if (rc != 0) {
1114 		free(ctx);
1115 		goto fini;
1116 	}
1117 
1118 	return;
1119 fini:
1120 	if (cb_fn) {
1121 		cb_fn(cb_arg, rc);
1122 	}
1123 }
1124 
1125 void
1126 spdk_nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
1127 				     struct spdk_nvmf_subsystem *subsystem,
1128 				     spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
1129 {
1130 	struct spdk_nvmf_subsystem_poll_group *sgroup;
1131 	int rc = 0;
1132 
1133 	if (subsystem->id >= group->num_sgroups) {
1134 		rc = -1;
1135 		goto fini;
1136 	}
1137 
1138 	sgroup = &group->sgroups[subsystem->id];
1139 	if (sgroup == NULL) {
1140 		rc = -1;
1141 		goto fini;
1142 	}
1143 
1144 	assert(sgroup->state == SPDK_NVMF_SUBSYSTEM_ACTIVE);
1145 	/* TODO: This currently does not quiesce I/O */
1146 	sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED;
1147 fini:
1148 	if (cb_fn) {
1149 		cb_fn(cb_arg, rc);
1150 	}
1151 }
1152 
1153 void
1154 spdk_nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
1155 				      struct spdk_nvmf_subsystem *subsystem,
1156 				      spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
1157 {
1158 	struct spdk_nvmf_request *req, *tmp;
1159 	struct spdk_nvmf_subsystem_poll_group *sgroup;
1160 	int rc = 0;
1161 
1162 	if (subsystem->id >= group->num_sgroups) {
1163 		rc = -1;
1164 		goto fini;
1165 	}
1166 
1167 	sgroup = &group->sgroups[subsystem->id];
1168 
1169 	assert(sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSED);
1170 
1171 	rc = poll_group_update_subsystem(group, subsystem);
1172 	if (rc) {
1173 		goto fini;
1174 	}
1175 
1176 	sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1177 
1178 	/* Release all queued requests */
1179 	TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) {
1180 		TAILQ_REMOVE(&sgroup->queued, req, link);
1181 		spdk_nvmf_request_exec(req);
1182 	}
1183 fini:
1184 	if (cb_fn) {
1185 		cb_fn(cb_arg, rc);
1186 	}
1187 }
1188