xref: /spdk/lib/nvmf/nvmf.c (revision 8bb0ded3e55c182cea67af1f6790f8de5f38c05f)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2018-2019 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/bdev.h"
37 #include "spdk/bit_array.h"
38 #include "spdk/thread.h"
39 #include "spdk/nvmf.h"
40 #include "spdk/trace.h"
41 #include "spdk/endian.h"
42 #include "spdk/string.h"
43 
44 #include "spdk/log.h"
45 
46 #include "nvmf_internal.h"
47 #include "transport.h"
48 
49 SPDK_LOG_REGISTER_COMPONENT(nvmf)
50 
51 #define SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS 1024
52 #define SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US 10000
53 
54 static TAILQ_HEAD(, spdk_nvmf_tgt) g_nvmf_tgts = TAILQ_HEAD_INITIALIZER(g_nvmf_tgts);
55 
56 typedef void (*nvmf_qpair_disconnect_cpl)(void *ctx, int status);
57 static void nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf);
58 
59 /* supplied to a single call to nvmf_qpair_disconnect */
60 struct nvmf_qpair_disconnect_ctx {
61 	struct spdk_nvmf_qpair *qpair;
62 	struct spdk_nvmf_ctrlr *ctrlr;
63 	nvmf_qpair_disconnect_cb cb_fn;
64 	struct spdk_thread *thread;
65 	void *ctx;
66 	uint16_t qid;
67 };
68 
69 /*
70  * There are several times when we need to iterate through the list of all qpairs and selectively delete them.
71  * In order to do this sequentially without overlap, we must provide a context to recover the next qpair from
72  * to enable calling nvmf_qpair_disconnect on the next desired qpair.
73  */
74 struct nvmf_qpair_disconnect_many_ctx {
75 	struct spdk_nvmf_subsystem *subsystem;
76 	struct spdk_nvmf_poll_group *group;
77 	spdk_nvmf_poll_group_mod_done cpl_fn;
78 	void *cpl_ctx;
79 };
80 
81 static void
82 nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair,
83 		     enum spdk_nvmf_qpair_state state)
84 {
85 	assert(qpair != NULL);
86 	assert(qpair->group->thread == spdk_get_thread());
87 
88 	qpair->state = state;
89 }
90 
91 static int
92 nvmf_poll_group_poll(void *ctx)
93 {
94 	struct spdk_nvmf_poll_group *group = ctx;
95 	int rc;
96 	int count = 0;
97 	struct spdk_nvmf_transport_poll_group *tgroup;
98 
99 	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
100 		rc = nvmf_transport_poll_group_poll(tgroup);
101 		if (rc < 0) {
102 			return SPDK_POLLER_BUSY;
103 		}
104 		count += rc;
105 	}
106 
107 	return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
108 }
109 
110 static int
111 nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf)
112 {
113 	struct spdk_nvmf_tgt *tgt = io_device;
114 	struct spdk_nvmf_poll_group *group = ctx_buf;
115 	struct spdk_nvmf_transport *transport;
116 	uint32_t sid;
117 
118 	TAILQ_INIT(&group->tgroups);
119 	TAILQ_INIT(&group->qpairs);
120 
121 	TAILQ_FOREACH(transport, &tgt->transports, link) {
122 		nvmf_poll_group_add_transport(group, transport);
123 	}
124 
125 	group->num_sgroups = tgt->max_subsystems;
126 	group->sgroups = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem_poll_group));
127 	if (!group->sgroups) {
128 		return -ENOMEM;
129 	}
130 
131 	for (sid = 0; sid < tgt->max_subsystems; sid++) {
132 		struct spdk_nvmf_subsystem *subsystem;
133 
134 		subsystem = tgt->subsystems[sid];
135 		if (!subsystem) {
136 			continue;
137 		}
138 
139 		if (nvmf_poll_group_add_subsystem(group, subsystem, NULL, NULL) != 0) {
140 			nvmf_tgt_destroy_poll_group(io_device, ctx_buf);
141 			return -1;
142 		}
143 	}
144 
145 	pthread_mutex_lock(&tgt->mutex);
146 	TAILQ_INSERT_TAIL(&tgt->poll_groups, group, link);
147 	pthread_mutex_unlock(&tgt->mutex);
148 
149 	group->poller = SPDK_POLLER_REGISTER(nvmf_poll_group_poll, group, 0);
150 	group->thread = spdk_get_thread();
151 
152 	return 0;
153 }
154 
155 static void
156 nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf)
157 {
158 	struct spdk_nvmf_tgt *tgt = io_device;
159 	struct spdk_nvmf_poll_group *group = ctx_buf;
160 	struct spdk_nvmf_transport_poll_group *tgroup, *tmp;
161 	struct spdk_nvmf_subsystem_poll_group *sgroup;
162 	uint32_t sid, nsid;
163 
164 	pthread_mutex_lock(&tgt->mutex);
165 	TAILQ_REMOVE(&tgt->poll_groups, group, link);
166 	pthread_mutex_unlock(&tgt->mutex);
167 
168 	TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) {
169 		TAILQ_REMOVE(&group->tgroups, tgroup, link);
170 		nvmf_transport_poll_group_destroy(tgroup);
171 	}
172 
173 	for (sid = 0; sid < group->num_sgroups; sid++) {
174 		sgroup = &group->sgroups[sid];
175 
176 		for (nsid = 0; nsid < sgroup->num_ns; nsid++) {
177 			if (sgroup->ns_info[nsid].channel) {
178 				spdk_put_io_channel(sgroup->ns_info[nsid].channel);
179 				sgroup->ns_info[nsid].channel = NULL;
180 			}
181 		}
182 
183 		free(sgroup->ns_info);
184 	}
185 
186 	free(group->sgroups);
187 
188 	spdk_poller_unregister(&group->poller);
189 
190 	if (group->destroy_cb_fn) {
191 		group->destroy_cb_fn(group->destroy_cb_arg, 0);
192 	}
193 }
194 
195 static void
196 _nvmf_tgt_disconnect_next_qpair(void *ctx)
197 {
198 	struct spdk_nvmf_qpair *qpair;
199 	struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx;
200 	struct spdk_nvmf_poll_group *group = qpair_ctx->group;
201 	struct spdk_io_channel *ch;
202 	int rc = 0;
203 
204 	qpair = TAILQ_FIRST(&group->qpairs);
205 
206 	if (qpair) {
207 		rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_tgt_disconnect_next_qpair, ctx);
208 	}
209 
210 	if (!qpair || rc != 0) {
211 		/* When the refcount from the channels reaches 0, nvmf_tgt_destroy_poll_group will be called. */
212 		ch = spdk_io_channel_from_ctx(group);
213 		spdk_put_io_channel(ch);
214 		free(qpair_ctx);
215 	}
216 }
217 
218 static void
219 nvmf_tgt_destroy_poll_group_qpairs(struct spdk_nvmf_poll_group *group)
220 {
221 	struct nvmf_qpair_disconnect_many_ctx *ctx;
222 
223 	ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx));
224 
225 	if (!ctx) {
226 		SPDK_ERRLOG("Failed to allocate memory for destroy poll group ctx\n");
227 		return;
228 	}
229 
230 	ctx->group = group;
231 	_nvmf_tgt_disconnect_next_qpair(ctx);
232 }
233 
234 static int
235 nvmf_tgt_accept(void *ctx)
236 {
237 	struct spdk_nvmf_tgt *tgt = ctx;
238 	struct spdk_nvmf_transport *transport, *tmp;
239 	int count = 0;
240 
241 	TAILQ_FOREACH_SAFE(transport, &tgt->transports, link, tmp) {
242 		count += nvmf_transport_accept(transport);
243 	}
244 
245 	return count;
246 }
247 
248 struct spdk_nvmf_tgt *
249 spdk_nvmf_tgt_create(struct spdk_nvmf_target_opts *opts)
250 {
251 	struct spdk_nvmf_tgt *tgt, *tmp_tgt;
252 	uint32_t acceptor_poll_rate;
253 
254 	if (strnlen(opts->name, NVMF_TGT_NAME_MAX_LENGTH) == NVMF_TGT_NAME_MAX_LENGTH) {
255 		SPDK_ERRLOG("Provided target name exceeds the max length of %u.\n", NVMF_TGT_NAME_MAX_LENGTH);
256 		return NULL;
257 	}
258 
259 	TAILQ_FOREACH(tmp_tgt, &g_nvmf_tgts, link) {
260 		if (!strncmp(opts->name, tmp_tgt->name, NVMF_TGT_NAME_MAX_LENGTH)) {
261 			SPDK_ERRLOG("Provided target name must be unique.\n");
262 			return NULL;
263 		}
264 	}
265 
266 	tgt = calloc(1, sizeof(*tgt));
267 	if (!tgt) {
268 		return NULL;
269 	}
270 
271 	snprintf(tgt->name, NVMF_TGT_NAME_MAX_LENGTH, "%s", opts->name);
272 
273 	if (!opts || !opts->max_subsystems) {
274 		tgt->max_subsystems = SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS;
275 	} else {
276 		tgt->max_subsystems = opts->max_subsystems;
277 	}
278 
279 	if (!opts || !opts->acceptor_poll_rate) {
280 		acceptor_poll_rate = SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US;
281 	} else {
282 		acceptor_poll_rate = opts->acceptor_poll_rate;
283 	}
284 
285 	tgt->discovery_genctr = 0;
286 	TAILQ_INIT(&tgt->transports);
287 	TAILQ_INIT(&tgt->poll_groups);
288 
289 	tgt->subsystems = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
290 	if (!tgt->subsystems) {
291 		free(tgt);
292 		return NULL;
293 	}
294 
295 	pthread_mutex_init(&tgt->mutex, NULL);
296 
297 	tgt->accept_poller = SPDK_POLLER_REGISTER(nvmf_tgt_accept, tgt, acceptor_poll_rate);
298 	if (!tgt->accept_poller) {
299 		pthread_mutex_destroy(&tgt->mutex);
300 		free(tgt->subsystems);
301 		free(tgt);
302 		return NULL;
303 	}
304 
305 	spdk_io_device_register(tgt,
306 				nvmf_tgt_create_poll_group,
307 				nvmf_tgt_destroy_poll_group,
308 				sizeof(struct spdk_nvmf_poll_group),
309 				tgt->name);
310 
311 	TAILQ_INSERT_HEAD(&g_nvmf_tgts, tgt, link);
312 
313 	return tgt;
314 }
315 
316 static void
317 _nvmf_tgt_destroy_next_transport(void *ctx)
318 {
319 	struct spdk_nvmf_tgt *tgt = ctx;
320 	struct spdk_nvmf_transport *transport;
321 
322 	if (!TAILQ_EMPTY(&tgt->transports)) {
323 		transport = TAILQ_FIRST(&tgt->transports);
324 		TAILQ_REMOVE(&tgt->transports, transport, link);
325 		spdk_nvmf_transport_destroy(transport, _nvmf_tgt_destroy_next_transport, tgt);
326 	} else {
327 		spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn = tgt->destroy_cb_fn;
328 		void *destroy_cb_arg = tgt->destroy_cb_arg;
329 
330 		pthread_mutex_destroy(&tgt->mutex);
331 		free(tgt);
332 
333 		if (destroy_cb_fn) {
334 			destroy_cb_fn(destroy_cb_arg, 0);
335 		}
336 	}
337 }
338 
339 static void
340 nvmf_tgt_destroy_cb(void *io_device)
341 {
342 	struct spdk_nvmf_tgt *tgt = io_device;
343 	uint32_t i;
344 
345 	if (tgt->subsystems) {
346 		for (i = 0; i < tgt->max_subsystems; i++) {
347 			if (tgt->subsystems[i]) {
348 				nvmf_subsystem_remove_all_listeners(tgt->subsystems[i], true);
349 				spdk_nvmf_subsystem_destroy(tgt->subsystems[i]);
350 			}
351 		}
352 		free(tgt->subsystems);
353 	}
354 
355 	_nvmf_tgt_destroy_next_transport(tgt);
356 }
357 
358 void
359 spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt,
360 		      spdk_nvmf_tgt_destroy_done_fn cb_fn,
361 		      void *cb_arg)
362 {
363 	tgt->destroy_cb_fn = cb_fn;
364 	tgt->destroy_cb_arg = cb_arg;
365 
366 	spdk_poller_unregister(&tgt->accept_poller);
367 
368 	TAILQ_REMOVE(&g_nvmf_tgts, tgt, link);
369 
370 	spdk_io_device_unregister(tgt, nvmf_tgt_destroy_cb);
371 }
372 
373 const char *
374 spdk_nvmf_tgt_get_name(struct spdk_nvmf_tgt *tgt)
375 {
376 	return tgt->name;
377 }
378 
379 struct spdk_nvmf_tgt *
380 spdk_nvmf_get_tgt(const char *name)
381 {
382 	struct spdk_nvmf_tgt *tgt;
383 	uint32_t num_targets = 0;
384 
385 	TAILQ_FOREACH(tgt, &g_nvmf_tgts, link) {
386 		if (name) {
387 			if (!strncmp(tgt->name, name, NVMF_TGT_NAME_MAX_LENGTH)) {
388 				return tgt;
389 			}
390 		}
391 		num_targets++;
392 	}
393 
394 	/*
395 	 * special case. If there is only one target and
396 	 * no name was specified, return the only available
397 	 * target. If there is more than one target, name must
398 	 * be specified.
399 	 */
400 	if (!name && num_targets == 1) {
401 		return TAILQ_FIRST(&g_nvmf_tgts);
402 	}
403 
404 	return NULL;
405 }
406 
407 struct spdk_nvmf_tgt *
408 spdk_nvmf_get_first_tgt(void)
409 {
410 	return TAILQ_FIRST(&g_nvmf_tgts);
411 }
412 
413 struct spdk_nvmf_tgt *
414 spdk_nvmf_get_next_tgt(struct spdk_nvmf_tgt *prev)
415 {
416 	return TAILQ_NEXT(prev, link);
417 }
418 
419 static void
420 nvmf_write_subsystem_config_json(struct spdk_json_write_ctx *w,
421 				 struct spdk_nvmf_subsystem *subsystem)
422 {
423 	struct spdk_nvmf_host *host;
424 	struct spdk_nvmf_subsystem_listener *listener;
425 	const struct spdk_nvme_transport_id *trid;
426 	struct spdk_nvmf_ns *ns;
427 	struct spdk_nvmf_ns_opts ns_opts;
428 	uint32_t max_namespaces;
429 	char uuid_str[SPDK_UUID_STRING_LEN];
430 	const char *adrfam;
431 
432 	if (spdk_nvmf_subsystem_get_type(subsystem) != SPDK_NVMF_SUBTYPE_NVME) {
433 		return;
434 	}
435 
436 	/* { */
437 	spdk_json_write_object_begin(w);
438 	spdk_json_write_named_string(w, "method", "nvmf_create_subsystem");
439 
440 	/*     "params" : { */
441 	spdk_json_write_named_object_begin(w, "params");
442 	spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
443 	spdk_json_write_named_bool(w, "allow_any_host", spdk_nvmf_subsystem_get_allow_any_host(subsystem));
444 	spdk_json_write_named_string(w, "serial_number", spdk_nvmf_subsystem_get_sn(subsystem));
445 	spdk_json_write_named_string(w, "model_number", spdk_nvmf_subsystem_get_mn(subsystem));
446 
447 	max_namespaces = spdk_nvmf_subsystem_get_max_namespaces(subsystem);
448 	if (max_namespaces != 0) {
449 		spdk_json_write_named_uint32(w, "max_namespaces", max_namespaces);
450 	}
451 
452 	/*     } "params" */
453 	spdk_json_write_object_end(w);
454 
455 	/* } */
456 	spdk_json_write_object_end(w);
457 
458 	for (listener = spdk_nvmf_subsystem_get_first_listener(subsystem); listener != NULL;
459 	     listener = spdk_nvmf_subsystem_get_next_listener(subsystem, listener)) {
460 		trid = spdk_nvmf_subsystem_listener_get_trid(listener);
461 
462 		adrfam = spdk_nvme_transport_id_adrfam_str(trid->adrfam);
463 
464 		spdk_json_write_object_begin(w);
465 		spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_listener");
466 
467 		/*     "params" : { */
468 		spdk_json_write_named_object_begin(w, "params");
469 
470 		spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
471 
472 		/*     "listen_address" : { */
473 		spdk_json_write_named_object_begin(w, "listen_address");
474 
475 		spdk_json_write_named_string(w, "trtype", trid->trstring);
476 		if (adrfam) {
477 			spdk_json_write_named_string(w, "adrfam", adrfam);
478 		}
479 
480 		spdk_json_write_named_string(w, "traddr", trid->traddr);
481 		spdk_json_write_named_string(w, "trsvcid", trid->trsvcid);
482 		/*     } "listen_address" */
483 		spdk_json_write_object_end(w);
484 
485 		/*     } "params" */
486 		spdk_json_write_object_end(w);
487 
488 		/* } */
489 		spdk_json_write_object_end(w);
490 	}
491 
492 	for (host = spdk_nvmf_subsystem_get_first_host(subsystem); host != NULL;
493 	     host = spdk_nvmf_subsystem_get_next_host(subsystem, host)) {
494 
495 		spdk_json_write_object_begin(w);
496 		spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_host");
497 
498 		/*     "params" : { */
499 		spdk_json_write_named_object_begin(w, "params");
500 
501 		spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
502 		spdk_json_write_named_string(w, "host", spdk_nvmf_host_get_nqn(host));
503 
504 		/*     } "params" */
505 		spdk_json_write_object_end(w);
506 
507 		/* } */
508 		spdk_json_write_object_end(w);
509 	}
510 
511 	for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL;
512 	     ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) {
513 		spdk_nvmf_ns_get_opts(ns, &ns_opts, sizeof(ns_opts));
514 
515 		spdk_json_write_object_begin(w);
516 		spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_ns");
517 
518 		/*     "params" : { */
519 		spdk_json_write_named_object_begin(w, "params");
520 
521 		spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
522 
523 		/*     "namespace" : { */
524 		spdk_json_write_named_object_begin(w, "namespace");
525 
526 		spdk_json_write_named_uint32(w, "nsid", spdk_nvmf_ns_get_id(ns));
527 		spdk_json_write_named_string(w, "bdev_name", spdk_bdev_get_name(spdk_nvmf_ns_get_bdev(ns)));
528 
529 		if (!spdk_mem_all_zero(ns_opts.nguid, sizeof(ns_opts.nguid))) {
530 			SPDK_STATIC_ASSERT(sizeof(ns_opts.nguid) == sizeof(uint64_t) * 2, "size mismatch");
531 			spdk_json_write_named_string_fmt(w, "nguid", "%016"PRIX64"%016"PRIX64, from_be64(&ns_opts.nguid[0]),
532 							 from_be64(&ns_opts.nguid[8]));
533 		}
534 
535 		if (!spdk_mem_all_zero(ns_opts.eui64, sizeof(ns_opts.eui64))) {
536 			SPDK_STATIC_ASSERT(sizeof(ns_opts.eui64) == sizeof(uint64_t), "size mismatch");
537 			spdk_json_write_named_string_fmt(w, "eui64", "%016"PRIX64, from_be64(&ns_opts.eui64));
538 		}
539 
540 		if (!spdk_mem_all_zero(&ns_opts.uuid, sizeof(ns_opts.uuid))) {
541 			spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &ns_opts.uuid);
542 			spdk_json_write_named_string(w, "uuid",  uuid_str);
543 		}
544 
545 		/*     "namespace" */
546 		spdk_json_write_object_end(w);
547 
548 		/*     } "params" */
549 		spdk_json_write_object_end(w);
550 
551 		/* } */
552 		spdk_json_write_object_end(w);
553 	}
554 }
555 
556 void
557 spdk_nvmf_tgt_write_config_json(struct spdk_json_write_ctx *w, struct spdk_nvmf_tgt *tgt)
558 {
559 	struct spdk_nvmf_subsystem *subsystem;
560 	struct spdk_nvmf_transport *transport;
561 
562 	spdk_json_write_object_begin(w);
563 	spdk_json_write_named_string(w, "method", "nvmf_set_max_subsystems");
564 
565 	spdk_json_write_named_object_begin(w, "params");
566 	spdk_json_write_named_uint32(w, "max_subsystems", tgt->max_subsystems);
567 	spdk_json_write_object_end(w);
568 
569 	spdk_json_write_object_end(w);
570 
571 	/* write transports */
572 	TAILQ_FOREACH(transport, &tgt->transports, link) {
573 		spdk_json_write_object_begin(w);
574 		spdk_json_write_named_string(w, "method", "nvmf_create_transport");
575 
576 		spdk_json_write_named_object_begin(w, "params");
577 		spdk_json_write_named_string(w, "trtype", transport->ops->name);
578 		spdk_json_write_named_uint32(w, "max_queue_depth", transport->opts.max_queue_depth);
579 		spdk_json_write_named_uint32(w, "max_io_qpairs_per_ctrlr",
580 					     transport->opts.max_qpairs_per_ctrlr - 1);
581 		spdk_json_write_named_uint32(w, "in_capsule_data_size", transport->opts.in_capsule_data_size);
582 		spdk_json_write_named_uint32(w, "max_io_size", transport->opts.max_io_size);
583 		spdk_json_write_named_uint32(w, "io_unit_size", transport->opts.io_unit_size);
584 		spdk_json_write_named_uint32(w, "max_aq_depth", transport->opts.max_aq_depth);
585 		spdk_json_write_named_uint32(w, "num_shared_buffers", transport->opts.num_shared_buffers);
586 		spdk_json_write_named_uint32(w, "buf_cache_size", transport->opts.buf_cache_size);
587 		spdk_json_write_named_bool(w, "dif_insert_or_strip", transport->opts.dif_insert_or_strip);
588 		if (transport->ops->dump_opts) {
589 			transport->ops->dump_opts(transport, w);
590 		}
591 		spdk_json_write_named_uint32(w, "abort_timeout_sec", transport->opts.abort_timeout_sec);
592 		spdk_json_write_object_end(w);
593 
594 		spdk_json_write_object_end(w);
595 	}
596 
597 	subsystem = spdk_nvmf_subsystem_get_first(tgt);
598 	while (subsystem) {
599 		nvmf_write_subsystem_config_json(w, subsystem);
600 		subsystem = spdk_nvmf_subsystem_get_next(subsystem);
601 	}
602 }
603 
604 static void
605 nvmf_listen_opts_copy(struct spdk_nvmf_listen_opts *opts,
606 		      const struct spdk_nvmf_listen_opts *opts_src, size_t opts_size)
607 {
608 	assert(opts);
609 	assert(opts_src);
610 
611 	opts->opts_size = opts_size;
612 
613 #define SET_FIELD(field) \
614     if (offsetof(struct spdk_nvmf_listen_opts, field) + sizeof(opts->field) <= opts_size) { \
615                  opts->field = opts_src->field; \
616     } \
617 
618 	SET_FIELD(transport_specific);
619 #undef SET_FIELD
620 
621 	/* Do not remove this statement, you should always update this statement when you adding a new field,
622 	 * and do not forget to add the SET_FIELD statement for your added field. */
623 	SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_listen_opts) == 16, "Incorrect size");
624 }
625 
626 void
627 spdk_nvmf_listen_opts_init(struct spdk_nvmf_listen_opts *opts, size_t opts_size)
628 {
629 	struct spdk_nvmf_listen_opts opts_local = {};
630 
631 	/* local version of opts should have defaults set here */
632 
633 	nvmf_listen_opts_copy(opts, &opts_local, opts_size);
634 }
635 
636 int
637 spdk_nvmf_tgt_listen_ext(struct spdk_nvmf_tgt *tgt, const struct spdk_nvme_transport_id *trid,
638 			 struct spdk_nvmf_listen_opts *opts)
639 {
640 	struct spdk_nvmf_transport *transport;
641 	int rc;
642 	struct spdk_nvmf_listen_opts opts_local = {};
643 
644 	if (!opts) {
645 		SPDK_ERRLOG("opts should not be NULL\n");
646 		return -EINVAL;
647 	}
648 
649 	if (!opts->opts_size) {
650 		SPDK_ERRLOG("The opts_size in opts structure should not be zero\n");
651 		return -EINVAL;
652 	}
653 
654 	transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring);
655 	if (!transport) {
656 		SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n",
657 			    trid->trstring);
658 		return -EINVAL;
659 	}
660 
661 	nvmf_listen_opts_copy(&opts_local, opts, opts->opts_size);
662 	rc = spdk_nvmf_transport_listen(transport, trid, &opts_local);
663 	if (rc < 0) {
664 		SPDK_ERRLOG("Unable to listen on address '%s'\n", trid->traddr);
665 	}
666 
667 	return rc;
668 }
669 
670 int
671 spdk_nvmf_tgt_listen(struct spdk_nvmf_tgt *tgt, struct spdk_nvme_transport_id *trid)
672 {
673 	struct spdk_nvmf_listen_opts opts;
674 
675 	spdk_nvmf_listen_opts_init(&opts, sizeof(opts));
676 
677 	return spdk_nvmf_tgt_listen_ext(tgt, trid, &opts);
678 }
679 
680 int
681 spdk_nvmf_tgt_stop_listen(struct spdk_nvmf_tgt *tgt,
682 			  struct spdk_nvme_transport_id *trid)
683 {
684 	struct spdk_nvmf_transport *transport;
685 	int rc;
686 
687 	transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring);
688 	if (!transport) {
689 		SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n",
690 			    trid->trstring);
691 		return -EINVAL;
692 	}
693 
694 	rc = spdk_nvmf_transport_stop_listen(transport, trid);
695 	if (rc < 0) {
696 		SPDK_ERRLOG("Failed to stop listening on address '%s'\n", trid->traddr);
697 		return rc;
698 	}
699 	return 0;
700 }
701 
702 struct spdk_nvmf_tgt_add_transport_ctx {
703 	struct spdk_nvmf_tgt *tgt;
704 	struct spdk_nvmf_transport *transport;
705 	spdk_nvmf_tgt_add_transport_done_fn cb_fn;
706 	void *cb_arg;
707 };
708 
709 static void
710 _nvmf_tgt_add_transport_done(struct spdk_io_channel_iter *i, int status)
711 {
712 	struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
713 
714 	ctx->cb_fn(ctx->cb_arg, status);
715 
716 	free(ctx);
717 }
718 
719 static void
720 _nvmf_tgt_add_transport(struct spdk_io_channel_iter *i)
721 {
722 	struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
723 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
724 	struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
725 	int rc;
726 
727 	rc = nvmf_poll_group_add_transport(group, ctx->transport);
728 	spdk_for_each_channel_continue(i, rc);
729 }
730 
731 void spdk_nvmf_tgt_add_transport(struct spdk_nvmf_tgt *tgt,
732 				 struct spdk_nvmf_transport *transport,
733 				 spdk_nvmf_tgt_add_transport_done_fn cb_fn,
734 				 void *cb_arg)
735 {
736 	struct spdk_nvmf_tgt_add_transport_ctx *ctx;
737 
738 	if (spdk_nvmf_tgt_get_transport(tgt, transport->ops->name)) {
739 		cb_fn(cb_arg, -EEXIST);
740 		return; /* transport already created */
741 	}
742 
743 	transport->tgt = tgt;
744 	TAILQ_INSERT_TAIL(&tgt->transports, transport, link);
745 
746 	ctx = calloc(1, sizeof(*ctx));
747 	if (!ctx) {
748 		cb_fn(cb_arg, -ENOMEM);
749 		return;
750 	}
751 
752 	ctx->tgt = tgt;
753 	ctx->transport = transport;
754 	ctx->cb_fn = cb_fn;
755 	ctx->cb_arg = cb_arg;
756 
757 	spdk_for_each_channel(tgt,
758 			      _nvmf_tgt_add_transport,
759 			      ctx,
760 			      _nvmf_tgt_add_transport_done);
761 }
762 
763 struct spdk_nvmf_subsystem *
764 spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
765 {
766 	struct spdk_nvmf_subsystem	*subsystem;
767 	uint32_t sid;
768 
769 	if (!subnqn) {
770 		return NULL;
771 	}
772 
773 	/* Ensure that subnqn is null terminated */
774 	if (!memchr(subnqn, '\0', SPDK_NVMF_NQN_MAX_LEN + 1)) {
775 		SPDK_ERRLOG("Connect SUBNQN is not null terminated\n");
776 		return NULL;
777 	}
778 
779 	for (sid = 0; sid < tgt->max_subsystems; sid++) {
780 		subsystem = tgt->subsystems[sid];
781 		if (subsystem == NULL) {
782 			continue;
783 		}
784 
785 		if (strcmp(subnqn, subsystem->subnqn) == 0) {
786 			return subsystem;
787 		}
788 	}
789 
790 	return NULL;
791 }
792 
793 struct spdk_nvmf_transport *
794 spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name)
795 {
796 	struct spdk_nvmf_transport *transport;
797 
798 	TAILQ_FOREACH(transport, &tgt->transports, link) {
799 		if (!strncasecmp(transport->ops->name, transport_name, SPDK_NVMF_TRSTRING_MAX_LEN)) {
800 			return transport;
801 		}
802 	}
803 	return NULL;
804 }
805 
806 struct nvmf_new_qpair_ctx {
807 	struct spdk_nvmf_qpair *qpair;
808 	struct spdk_nvmf_poll_group *group;
809 };
810 
811 static void
812 _nvmf_poll_group_add(void *_ctx)
813 {
814 	struct nvmf_new_qpair_ctx *ctx = _ctx;
815 	struct spdk_nvmf_qpair *qpair = ctx->qpair;
816 	struct spdk_nvmf_poll_group *group = ctx->group;
817 
818 	free(_ctx);
819 
820 	if (spdk_nvmf_poll_group_add(group, qpair) != 0) {
821 		SPDK_ERRLOG("Unable to add the qpair to a poll group.\n");
822 		spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
823 	}
824 }
825 
826 void
827 spdk_nvmf_tgt_new_qpair(struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)
828 {
829 	struct spdk_nvmf_poll_group *group;
830 	struct nvmf_new_qpair_ctx *ctx;
831 
832 	group = spdk_nvmf_get_optimal_poll_group(qpair);
833 	if (group == NULL) {
834 		if (tgt->next_poll_group == NULL) {
835 			tgt->next_poll_group = TAILQ_FIRST(&tgt->poll_groups);
836 			if (tgt->next_poll_group == NULL) {
837 				SPDK_ERRLOG("No poll groups exist.\n");
838 				spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
839 				return;
840 			}
841 		}
842 		group = tgt->next_poll_group;
843 		tgt->next_poll_group = TAILQ_NEXT(group, link);
844 	}
845 
846 	ctx = calloc(1, sizeof(*ctx));
847 	if (!ctx) {
848 		SPDK_ERRLOG("Unable to send message to poll group.\n");
849 		spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
850 		return;
851 	}
852 
853 	ctx->qpair = qpair;
854 	ctx->group = group;
855 
856 	spdk_thread_send_msg(group->thread, _nvmf_poll_group_add, ctx);
857 }
858 
859 struct spdk_nvmf_poll_group *
860 spdk_nvmf_poll_group_create(struct spdk_nvmf_tgt *tgt)
861 {
862 	struct spdk_io_channel *ch;
863 
864 	ch = spdk_get_io_channel(tgt);
865 	if (!ch) {
866 		SPDK_ERRLOG("Unable to get I/O channel for target\n");
867 		return NULL;
868 	}
869 
870 	return spdk_io_channel_get_ctx(ch);
871 }
872 
873 void
874 spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group,
875 			     spdk_nvmf_poll_group_destroy_done_fn cb_fn,
876 			     void *cb_arg)
877 {
878 	assert(group->destroy_cb_fn == NULL);
879 	group->destroy_cb_fn = cb_fn;
880 	group->destroy_cb_arg = cb_arg;
881 
882 	/* This function will put the io_channel associated with this poll group */
883 	nvmf_tgt_destroy_poll_group_qpairs(group);
884 }
885 
886 int
887 spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group,
888 			 struct spdk_nvmf_qpair *qpair)
889 {
890 	int rc = -1;
891 	struct spdk_nvmf_transport_poll_group *tgroup;
892 
893 	TAILQ_INIT(&qpair->outstanding);
894 	qpair->group = group;
895 	qpair->ctrlr = NULL;
896 	qpair->disconnect_started = false;
897 
898 	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
899 		if (tgroup->transport == qpair->transport) {
900 			rc = nvmf_transport_poll_group_add(tgroup, qpair);
901 			break;
902 		}
903 	}
904 
905 	/* We add the qpair to the group only it is succesfully added into the tgroup */
906 	if (rc == 0) {
907 		TAILQ_INSERT_TAIL(&group->qpairs, qpair, link);
908 		nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ACTIVE);
909 	}
910 
911 	return rc;
912 }
913 
914 static void
915 _nvmf_ctrlr_destruct(void *ctx)
916 {
917 	struct spdk_nvmf_ctrlr *ctrlr = ctx;
918 
919 	nvmf_ctrlr_destruct(ctrlr);
920 }
921 
922 static void
923 _nvmf_transport_qpair_fini_complete(void *cb_ctx)
924 {
925 	struct nvmf_qpair_disconnect_ctx *qpair_ctx = cb_ctx;
926 
927 	if (qpair_ctx->cb_fn) {
928 		spdk_thread_send_msg(qpair_ctx->thread, qpair_ctx->cb_fn, qpair_ctx->ctx);
929 	}
930 	free(qpair_ctx);
931 }
932 
933 static void
934 _nvmf_transport_qpair_fini(void *ctx)
935 {
936 	struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx;
937 
938 	nvmf_transport_qpair_fini(qpair_ctx->qpair, _nvmf_transport_qpair_fini_complete, qpair_ctx);
939 }
940 
941 static void
942 _nvmf_ctrlr_free_from_qpair(void *ctx)
943 {
944 	struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx;
945 	struct spdk_nvmf_ctrlr *ctrlr = qpair_ctx->ctrlr;
946 	uint32_t count;
947 
948 	spdk_bit_array_clear(ctrlr->qpair_mask, qpair_ctx->qid);
949 	count = spdk_bit_array_count_set(ctrlr->qpair_mask);
950 	if (count == 0) {
951 		ctrlr->in_destruct = true;
952 		spdk_thread_send_msg(ctrlr->subsys->thread, _nvmf_ctrlr_destruct, ctrlr);
953 	}
954 
955 	spdk_thread_send_msg(qpair_ctx->thread, _nvmf_transport_qpair_fini, qpair_ctx);
956 }
957 
958 void
959 spdk_nvmf_poll_group_remove(struct spdk_nvmf_qpair *qpair)
960 {
961 	struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
962 	struct spdk_nvmf_transport_poll_group *tgroup;
963 	struct spdk_nvmf_request *req, *tmp;
964 	struct spdk_nvmf_subsystem_poll_group *sgroup;
965 	int rc;
966 
967 	nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ERROR);
968 
969 	/* Find the tgroup and remove the qpair from the tgroup */
970 	TAILQ_FOREACH(tgroup, &qpair->group->tgroups, link) {
971 		if (tgroup->transport == qpair->transport) {
972 			rc = nvmf_transport_poll_group_remove(tgroup, qpair);
973 			if (rc && (rc != ENOTSUP)) {
974 				SPDK_ERRLOG("Cannot remove qpair=%p from transport group=%p\n",
975 					    qpair, tgroup);
976 			}
977 			break;
978 		}
979 	}
980 
981 	if (ctrlr) {
982 		sgroup = &qpair->group->sgroups[ctrlr->subsys->id];
983 		TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) {
984 			if (req->qpair == qpair) {
985 				TAILQ_REMOVE(&sgroup->queued, req, link);
986 				if (nvmf_transport_req_free(req)) {
987 					SPDK_ERRLOG("Transport request free error!\n");
988 				}
989 			}
990 		}
991 	}
992 
993 	TAILQ_REMOVE(&qpair->group->qpairs, qpair, link);
994 	qpair->group = NULL;
995 }
996 
997 static void
998 _nvmf_qpair_destroy(void *ctx, int status)
999 {
1000 	struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx;
1001 	struct spdk_nvmf_qpair *qpair = qpair_ctx->qpair;
1002 	struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
1003 
1004 	assert(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING);
1005 	qpair_ctx->qid = qpair->qid;
1006 
1007 	spdk_nvmf_poll_group_remove(qpair);
1008 
1009 	if (!ctrlr || !ctrlr->thread) {
1010 		nvmf_transport_qpair_fini(qpair, _nvmf_transport_qpair_fini_complete, qpair_ctx);
1011 		return;
1012 	}
1013 
1014 	qpair_ctx->ctrlr = ctrlr;
1015 	spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_free_from_qpair, qpair_ctx);
1016 }
1017 
1018 static void
1019 _nvmf_qpair_disconnect_msg(void *ctx)
1020 {
1021 	struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx;
1022 
1023 	spdk_nvmf_qpair_disconnect(qpair_ctx->qpair, qpair_ctx->cb_fn, qpair_ctx->ctx);
1024 	free(ctx);
1025 }
1026 
1027 int
1028 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
1029 {
1030 	struct nvmf_qpair_disconnect_ctx *qpair_ctx;
1031 
1032 	if (__atomic_test_and_set(&qpair->disconnect_started, __ATOMIC_RELAXED)) {
1033 		if (cb_fn) {
1034 			cb_fn(ctx);
1035 		}
1036 		return 0;
1037 	}
1038 
1039 	/* If we get a qpair in the uninitialized state, we can just destroy it immediately */
1040 	if (qpair->state == SPDK_NVMF_QPAIR_UNINITIALIZED) {
1041 		nvmf_transport_qpair_fini(qpair, NULL, NULL);
1042 		if (cb_fn) {
1043 			cb_fn(ctx);
1044 		}
1045 		return 0;
1046 	}
1047 
1048 	assert(qpair->group != NULL);
1049 	if (spdk_get_thread() != qpair->group->thread) {
1050 		/* clear the atomic so we can set it on the next call on the proper thread. */
1051 		__atomic_clear(&qpair->disconnect_started, __ATOMIC_RELAXED);
1052 		qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx));
1053 		if (!qpair_ctx) {
1054 			SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n");
1055 			return -ENOMEM;
1056 		}
1057 		qpair_ctx->qpair = qpair;
1058 		qpair_ctx->cb_fn = cb_fn;
1059 		qpair_ctx->thread = qpair->group->thread;
1060 		qpair_ctx->ctx = ctx;
1061 		spdk_thread_send_msg(qpair->group->thread, _nvmf_qpair_disconnect_msg, qpair_ctx);
1062 		return 0;
1063 	}
1064 
1065 	assert(qpair->state == SPDK_NVMF_QPAIR_ACTIVE);
1066 	nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_DEACTIVATING);
1067 
1068 	qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx));
1069 	if (!qpair_ctx) {
1070 		SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n");
1071 		return -ENOMEM;
1072 	}
1073 
1074 	qpair_ctx->qpair = qpair;
1075 	qpair_ctx->cb_fn = cb_fn;
1076 	qpair_ctx->thread = qpair->group->thread;
1077 	qpair_ctx->ctx = ctx;
1078 
1079 	/* Check for outstanding I/O */
1080 	if (!TAILQ_EMPTY(&qpair->outstanding)) {
1081 		qpair->state_cb = _nvmf_qpair_destroy;
1082 		qpair->state_cb_arg = qpair_ctx;
1083 		nvmf_qpair_free_aer(qpair);
1084 		return 0;
1085 	}
1086 
1087 	_nvmf_qpair_destroy(qpair_ctx, 0);
1088 
1089 	return 0;
1090 }
1091 
1092 int
1093 spdk_nvmf_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
1094 			      struct spdk_nvme_transport_id *trid)
1095 {
1096 	return nvmf_transport_qpair_get_peer_trid(qpair, trid);
1097 }
1098 
1099 int
1100 spdk_nvmf_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
1101 			       struct spdk_nvme_transport_id *trid)
1102 {
1103 	return nvmf_transport_qpair_get_local_trid(qpair, trid);
1104 }
1105 
1106 int
1107 spdk_nvmf_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
1108 				struct spdk_nvme_transport_id *trid)
1109 {
1110 	return nvmf_transport_qpair_get_listen_trid(qpair, trid);
1111 }
1112 
1113 int
1114 nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group,
1115 			      struct spdk_nvmf_transport *transport)
1116 {
1117 	struct spdk_nvmf_transport_poll_group *tgroup;
1118 
1119 	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
1120 		if (tgroup->transport == transport) {
1121 			/* Transport already in the poll group */
1122 			return 0;
1123 		}
1124 	}
1125 
1126 	tgroup = nvmf_transport_poll_group_create(transport);
1127 	if (!tgroup) {
1128 		SPDK_ERRLOG("Unable to create poll group for transport\n");
1129 		return -1;
1130 	}
1131 
1132 	tgroup->group = group;
1133 	TAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
1134 
1135 	return 0;
1136 }
1137 
1138 static int
1139 poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
1140 			    struct spdk_nvmf_subsystem *subsystem)
1141 {
1142 	struct spdk_nvmf_subsystem_poll_group *sgroup;
1143 	uint32_t new_num_ns, old_num_ns;
1144 	uint32_t i, j;
1145 	struct spdk_nvmf_ns *ns;
1146 	struct spdk_nvmf_registrant *reg, *tmp;
1147 	struct spdk_io_channel *ch;
1148 	struct spdk_nvmf_subsystem_pg_ns_info *ns_info;
1149 	struct spdk_nvmf_ctrlr *ctrlr;
1150 	bool ns_changed;
1151 
1152 	/* Make sure our poll group has memory for this subsystem allocated */
1153 	if (subsystem->id >= group->num_sgroups) {
1154 		return -ENOMEM;
1155 	}
1156 
1157 	sgroup = &group->sgroups[subsystem->id];
1158 
1159 	/* Make sure the array of namespace information is the correct size */
1160 	new_num_ns = subsystem->max_nsid;
1161 	old_num_ns = sgroup->num_ns;
1162 
1163 	ns_changed = false;
1164 
1165 	if (old_num_ns == 0) {
1166 		if (new_num_ns > 0) {
1167 			/* First allocation */
1168 			sgroup->ns_info = calloc(new_num_ns, sizeof(struct spdk_nvmf_subsystem_pg_ns_info));
1169 			if (!sgroup->ns_info) {
1170 				return -ENOMEM;
1171 			}
1172 		}
1173 	} else if (new_num_ns > old_num_ns) {
1174 		void *buf;
1175 
1176 		/* Make the array larger */
1177 		buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info));
1178 		if (!buf) {
1179 			return -ENOMEM;
1180 		}
1181 
1182 		sgroup->ns_info = buf;
1183 
1184 		/* Null out the new namespace information slots */
1185 		for (i = old_num_ns; i < new_num_ns; i++) {
1186 			memset(&sgroup->ns_info[i], 0, sizeof(struct spdk_nvmf_subsystem_pg_ns_info));
1187 		}
1188 	} else if (new_num_ns < old_num_ns) {
1189 		void *buf;
1190 
1191 		/* Free the extra I/O channels */
1192 		for (i = new_num_ns; i < old_num_ns; i++) {
1193 			ns_info = &sgroup->ns_info[i];
1194 
1195 			if (ns_info->channel) {
1196 				spdk_put_io_channel(ns_info->channel);
1197 				ns_info->channel = NULL;
1198 			}
1199 		}
1200 
1201 		/* Make the array smaller */
1202 		if (new_num_ns > 0) {
1203 			buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info));
1204 			if (!buf) {
1205 				return -ENOMEM;
1206 			}
1207 			sgroup->ns_info = buf;
1208 		} else {
1209 			free(sgroup->ns_info);
1210 			sgroup->ns_info = NULL;
1211 		}
1212 	}
1213 
1214 	sgroup->num_ns = new_num_ns;
1215 
1216 	/* Detect bdevs that were added or removed */
1217 	for (i = 0; i < sgroup->num_ns; i++) {
1218 		ns = subsystem->ns[i];
1219 		ns_info = &sgroup->ns_info[i];
1220 		ch = ns_info->channel;
1221 
1222 		if (ns == NULL && ch == NULL) {
1223 			/* Both NULL. Leave empty */
1224 		} else if (ns == NULL && ch != NULL) {
1225 			/* There was a channel here, but the namespace is gone. */
1226 			ns_changed = true;
1227 			spdk_put_io_channel(ch);
1228 			ns_info->channel = NULL;
1229 		} else if (ns != NULL && ch == NULL) {
1230 			/* A namespace appeared but there is no channel yet */
1231 			ns_changed = true;
1232 			ch = spdk_bdev_get_io_channel(ns->desc);
1233 			if (ch == NULL) {
1234 				SPDK_ERRLOG("Could not allocate I/O channel.\n");
1235 				return -ENOMEM;
1236 			}
1237 			ns_info->channel = ch;
1238 		} else if (spdk_uuid_compare(&ns_info->uuid, spdk_bdev_get_uuid(ns->bdev)) != 0) {
1239 			/* A namespace was here before, but was replaced by a new one. */
1240 			ns_changed = true;
1241 			spdk_put_io_channel(ns_info->channel);
1242 			memset(ns_info, 0, sizeof(*ns_info));
1243 
1244 			ch = spdk_bdev_get_io_channel(ns->desc);
1245 			if (ch == NULL) {
1246 				SPDK_ERRLOG("Could not allocate I/O channel.\n");
1247 				return -ENOMEM;
1248 			}
1249 			ns_info->channel = ch;
1250 		} else if (ns_info->num_blocks != spdk_bdev_get_num_blocks(ns->bdev)) {
1251 			/* Namespace is still there but size has changed */
1252 			SPDK_DEBUGLOG(nvmf, "Namespace resized: subsystem_id %u,"
1253 				      " nsid %u, pg %p, old %" PRIu64 ", new %" PRIu64 "\n",
1254 				      subsystem->id,
1255 				      ns->nsid,
1256 				      group,
1257 				      ns_info->num_blocks,
1258 				      spdk_bdev_get_num_blocks(ns->bdev));
1259 			ns_changed = true;
1260 		}
1261 
1262 		if (ns == NULL) {
1263 			memset(ns_info, 0, sizeof(*ns_info));
1264 		} else {
1265 			ns_info->uuid = *spdk_bdev_get_uuid(ns->bdev);
1266 			ns_info->num_blocks = spdk_bdev_get_num_blocks(ns->bdev);
1267 			ns_info->crkey = ns->crkey;
1268 			ns_info->rtype = ns->rtype;
1269 			if (ns->holder) {
1270 				ns_info->holder_id = ns->holder->hostid;
1271 			}
1272 
1273 			memset(&ns_info->reg_hostid, 0, SPDK_NVMF_MAX_NUM_REGISTRANTS * sizeof(struct spdk_uuid));
1274 			j = 0;
1275 			TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
1276 				if (j >= SPDK_NVMF_MAX_NUM_REGISTRANTS) {
1277 					SPDK_ERRLOG("Maximum %u registrants can support.\n", SPDK_NVMF_MAX_NUM_REGISTRANTS);
1278 					return -EINVAL;
1279 				}
1280 				ns_info->reg_hostid[j++] = reg->hostid;
1281 			}
1282 		}
1283 	}
1284 
1285 	if (ns_changed) {
1286 		TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
1287 			if (ctrlr->admin_qpair->group == group) {
1288 				nvmf_ctrlr_async_event_ns_notice(ctrlr);
1289 				nvmf_ctrlr_async_event_ana_change_notice(ctrlr);
1290 			}
1291 		}
1292 	}
1293 
1294 	return 0;
1295 }
1296 
1297 int
1298 nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
1299 				 struct spdk_nvmf_subsystem *subsystem)
1300 {
1301 	return poll_group_update_subsystem(group, subsystem);
1302 }
1303 
1304 int
1305 nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
1306 			      struct spdk_nvmf_subsystem *subsystem,
1307 			      spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
1308 {
1309 	int rc = 0;
1310 	struct spdk_nvmf_subsystem_poll_group *sgroup = &group->sgroups[subsystem->id];
1311 
1312 	TAILQ_INIT(&sgroup->queued);
1313 
1314 	rc = poll_group_update_subsystem(group, subsystem);
1315 	if (rc) {
1316 		nvmf_poll_group_remove_subsystem(group, subsystem, NULL, NULL);
1317 		goto fini;
1318 	}
1319 
1320 	sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1321 fini:
1322 	if (cb_fn) {
1323 		cb_fn(cb_arg, rc);
1324 	}
1325 
1326 	return rc;
1327 }
1328 
1329 static void
1330 _nvmf_poll_group_remove_subsystem_cb(void *ctx, int status)
1331 {
1332 	struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx;
1333 	struct spdk_nvmf_subsystem *subsystem;
1334 	struct spdk_nvmf_poll_group *group;
1335 	struct spdk_nvmf_subsystem_poll_group *sgroup;
1336 	spdk_nvmf_poll_group_mod_done cpl_fn = NULL;
1337 	void *cpl_ctx = NULL;
1338 	uint32_t nsid;
1339 
1340 	group = qpair_ctx->group;
1341 	subsystem = qpair_ctx->subsystem;
1342 	cpl_fn = qpair_ctx->cpl_fn;
1343 	cpl_ctx = qpair_ctx->cpl_ctx;
1344 	sgroup = &group->sgroups[subsystem->id];
1345 
1346 	if (status) {
1347 		goto fini;
1348 	}
1349 
1350 	for (nsid = 0; nsid < sgroup->num_ns; nsid++) {
1351 		if (sgroup->ns_info[nsid].channel) {
1352 			spdk_put_io_channel(sgroup->ns_info[nsid].channel);
1353 			sgroup->ns_info[nsid].channel = NULL;
1354 		}
1355 	}
1356 
1357 	sgroup->num_ns = 0;
1358 	free(sgroup->ns_info);
1359 	sgroup->ns_info = NULL;
1360 fini:
1361 	free(qpair_ctx);
1362 	if (cpl_fn) {
1363 		cpl_fn(cpl_ctx, status);
1364 	}
1365 }
1366 
1367 static void
1368 _nvmf_subsystem_disconnect_next_qpair(void *ctx)
1369 {
1370 	struct spdk_nvmf_qpair *qpair;
1371 	struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx;
1372 	struct spdk_nvmf_subsystem *subsystem;
1373 	struct spdk_nvmf_poll_group *group;
1374 	int rc = 0;
1375 
1376 	group = qpair_ctx->group;
1377 	subsystem = qpair_ctx->subsystem;
1378 
1379 	TAILQ_FOREACH(qpair, &group->qpairs, link) {
1380 		if ((qpair->ctrlr != NULL) && (qpair->ctrlr->subsys == subsystem)) {
1381 			break;
1382 		}
1383 	}
1384 
1385 	if (qpair) {
1386 		rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_subsystem_disconnect_next_qpair, qpair_ctx);
1387 	}
1388 
1389 	if (!qpair || rc != 0) {
1390 		_nvmf_poll_group_remove_subsystem_cb(ctx, rc);
1391 	}
1392 	return;
1393 }
1394 
1395 void
1396 nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
1397 				 struct spdk_nvmf_subsystem *subsystem,
1398 				 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
1399 {
1400 	struct spdk_nvmf_qpair *qpair;
1401 	struct spdk_nvmf_subsystem_poll_group *sgroup;
1402 	struct nvmf_qpair_disconnect_many_ctx *ctx;
1403 	int rc = 0;
1404 
1405 	ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx));
1406 
1407 	if (!ctx) {
1408 		SPDK_ERRLOG("Unable to allocate memory for context to remove poll subsystem\n");
1409 		goto fini;
1410 	}
1411 
1412 	ctx->group = group;
1413 	ctx->subsystem = subsystem;
1414 	ctx->cpl_fn = cb_fn;
1415 	ctx->cpl_ctx = cb_arg;
1416 
1417 	sgroup = &group->sgroups[subsystem->id];
1418 	sgroup->state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
1419 
1420 	TAILQ_FOREACH(qpair, &group->qpairs, link) {
1421 		if ((qpair->ctrlr != NULL) && (qpair->ctrlr->subsys == subsystem)) {
1422 			break;
1423 		}
1424 	}
1425 
1426 	if (qpair) {
1427 		rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_subsystem_disconnect_next_qpair, ctx);
1428 	} else {
1429 		/* call the callback immediately. It will handle any channel iteration */
1430 		_nvmf_poll_group_remove_subsystem_cb(ctx, 0);
1431 	}
1432 
1433 	if (rc != 0 && rc != -EINPROGRESS) {
1434 		free(ctx);
1435 		goto fini;
1436 	}
1437 
1438 	return;
1439 fini:
1440 	if (cb_fn) {
1441 		cb_fn(cb_arg, rc);
1442 	}
1443 }
1444 
1445 void
1446 nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
1447 				struct spdk_nvmf_subsystem *subsystem,
1448 				spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
1449 {
1450 	struct spdk_nvmf_subsystem_poll_group *sgroup;
1451 	int rc = 0;
1452 
1453 	if (subsystem->id >= group->num_sgroups) {
1454 		rc = -1;
1455 		goto fini;
1456 	}
1457 
1458 	sgroup = &group->sgroups[subsystem->id];
1459 	if (sgroup == NULL) {
1460 		rc = -1;
1461 		goto fini;
1462 	}
1463 
1464 	if (sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSED) {
1465 		goto fini;
1466 	}
1467 	sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSING;
1468 
1469 	if (sgroup->io_outstanding > 0) {
1470 		assert(sgroup->cb_fn == NULL);
1471 		sgroup->cb_fn = cb_fn;
1472 		assert(sgroup->cb_arg == NULL);
1473 		sgroup->cb_arg = cb_arg;
1474 		return;
1475 	}
1476 
1477 	assert(sgroup->io_outstanding == 0);
1478 	sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED;
1479 fini:
1480 	if (cb_fn) {
1481 		cb_fn(cb_arg, rc);
1482 	}
1483 }
1484 
1485 void
1486 nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
1487 				 struct spdk_nvmf_subsystem *subsystem,
1488 				 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
1489 {
1490 	struct spdk_nvmf_request *req, *tmp;
1491 	struct spdk_nvmf_subsystem_poll_group *sgroup;
1492 	int rc = 0;
1493 
1494 	if (subsystem->id >= group->num_sgroups) {
1495 		rc = -1;
1496 		goto fini;
1497 	}
1498 
1499 	sgroup = &group->sgroups[subsystem->id];
1500 
1501 	if (sgroup->state == SPDK_NVMF_SUBSYSTEM_ACTIVE) {
1502 		goto fini;
1503 	}
1504 
1505 	rc = poll_group_update_subsystem(group, subsystem);
1506 	if (rc) {
1507 		goto fini;
1508 	}
1509 
1510 	sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1511 
1512 	/* Release all queued requests */
1513 	TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) {
1514 		TAILQ_REMOVE(&sgroup->queued, req, link);
1515 		spdk_nvmf_request_exec(req);
1516 	}
1517 fini:
1518 	if (cb_fn) {
1519 		cb_fn(cb_arg, rc);
1520 	}
1521 }
1522 
1523 
1524 struct spdk_nvmf_poll_group *
1525 spdk_nvmf_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair)
1526 {
1527 	struct spdk_nvmf_transport_poll_group *tgroup;
1528 
1529 	tgroup = nvmf_transport_get_optimal_poll_group(qpair->transport, qpair);
1530 
1531 	if (tgroup == NULL) {
1532 		return NULL;
1533 	}
1534 
1535 	return tgroup->group;
1536 }
1537 
1538 int
1539 spdk_nvmf_poll_group_get_stat(struct spdk_nvmf_tgt *tgt,
1540 			      struct spdk_nvmf_poll_group_stat *stat)
1541 {
1542 	struct spdk_io_channel *ch;
1543 	struct spdk_nvmf_poll_group *group;
1544 
1545 	if (tgt == NULL || stat == NULL) {
1546 		return -EINVAL;
1547 	}
1548 
1549 	ch = spdk_get_io_channel(tgt);
1550 	group = spdk_io_channel_get_ctx(ch);
1551 	*stat = group->stat;
1552 	spdk_put_io_channel(ch);
1553 	return 0;
1554 }
1555