xref: /spdk/lib/nvmf/nvmf.c (revision b37db06935181fd0e8f5592a96d860040abaa201)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2018-2019, 2021 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/bdev.h"
10 #include "spdk/bit_array.h"
11 #include "spdk/thread.h"
12 #include "spdk/nvmf.h"
13 #include "spdk/endian.h"
14 #include "spdk/string.h"
15 #include "spdk/log.h"
16 #include "spdk_internal/usdt.h"
17 
18 #include "nvmf_internal.h"
19 #include "transport.h"
20 
21 SPDK_LOG_REGISTER_COMPONENT(nvmf)
22 
23 #define SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS 1024
24 
25 static TAILQ_HEAD(, spdk_nvmf_tgt) g_nvmf_tgts = TAILQ_HEAD_INITIALIZER(g_nvmf_tgts);
26 
27 typedef void (*nvmf_qpair_disconnect_cpl)(void *ctx, int status);
28 
29 /* supplied to a single call to nvmf_qpair_disconnect */
30 struct nvmf_qpair_disconnect_ctx {
31 	struct spdk_nvmf_qpair *qpair;
32 	struct spdk_nvmf_ctrlr *ctrlr;
33 	uint16_t qid;
34 };
35 
36 /*
37  * There are several times when we need to iterate through the list of all qpairs and selectively delete them.
38  * In order to do this sequentially without overlap, we must provide a context to recover the next qpair from
39  * to enable calling nvmf_qpair_disconnect on the next desired qpair.
40  */
41 struct nvmf_qpair_disconnect_many_ctx {
42 	struct spdk_nvmf_subsystem *subsystem;
43 	struct spdk_nvmf_poll_group *group;
44 	spdk_nvmf_poll_group_mod_done cpl_fn;
45 	void *cpl_ctx;
46 };
47 
48 static struct spdk_nvmf_referral *
49 nvmf_tgt_find_referral(struct spdk_nvmf_tgt *tgt,
50 		       const struct spdk_nvme_transport_id *trid)
51 {
52 	struct spdk_nvmf_referral *referral;
53 
54 	TAILQ_FOREACH(referral, &tgt->referrals, link) {
55 		if (spdk_nvme_transport_id_compare(&referral->trid, trid) == 0) {
56 			return referral;
57 		}
58 	}
59 
60 	return NULL;
61 }
62 
63 int
64 spdk_nvmf_tgt_add_referral(struct spdk_nvmf_tgt *tgt,
65 			   const struct spdk_nvmf_referral_opts *uopts)
66 {
67 	struct spdk_nvmf_referral *referral;
68 	struct spdk_nvmf_referral_opts opts = {};
69 	struct spdk_nvme_transport_id *trid = &opts.trid;
70 
71 	memcpy(&opts, uopts, spdk_min(uopts->size, sizeof(opts)));
72 	if (trid->subnqn[0] == '\0') {
73 		snprintf(trid->subnqn, sizeof(trid->subnqn), "%s", SPDK_NVMF_DISCOVERY_NQN);
74 	}
75 
76 	if (!nvmf_nqn_is_valid(trid->subnqn)) {
77 		SPDK_ERRLOG("Invalid subsystem NQN\n");
78 		return -EINVAL;
79 	}
80 
81 	/* If the entry already exists, just ignore it. */
82 	if (nvmf_tgt_find_referral(tgt, trid)) {
83 		return 0;
84 	}
85 
86 	referral = calloc(1, sizeof(*referral));
87 	if (!referral) {
88 		SPDK_ERRLOG("Failed to allocate memory for a referral\n");
89 		return -ENOMEM;
90 	}
91 
92 	referral->entry.subtype = nvmf_nqn_is_discovery(trid->subnqn) ?
93 				  SPDK_NVMF_SUBTYPE_DISCOVERY :
94 				  SPDK_NVMF_SUBTYPE_NVME;
95 	referral->entry.treq.secure_channel = opts.secure_channel ?
96 					      SPDK_NVMF_TREQ_SECURE_CHANNEL_REQUIRED :
97 					      SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_REQUIRED;
98 	referral->entry.cntlid = 0xffff;
99 	referral->entry.trtype = trid->trtype;
100 	referral->entry.adrfam = trid->adrfam;
101 	memcpy(&referral->trid, trid, sizeof(struct spdk_nvme_transport_id));
102 	spdk_strcpy_pad(referral->entry.subnqn, trid->subnqn, sizeof(trid->subnqn), '\0');
103 	spdk_strcpy_pad(referral->entry.trsvcid, trid->trsvcid, sizeof(referral->entry.trsvcid), ' ');
104 	spdk_strcpy_pad(referral->entry.traddr, trid->traddr, sizeof(referral->entry.traddr), ' ');
105 
106 	TAILQ_INSERT_HEAD(&tgt->referrals, referral, link);
107 	spdk_nvmf_send_discovery_log_notice(tgt, NULL);
108 
109 	return 0;
110 }
111 
112 int
113 spdk_nvmf_tgt_remove_referral(struct spdk_nvmf_tgt *tgt,
114 			      const struct spdk_nvmf_referral_opts *uopts)
115 {
116 	struct spdk_nvmf_referral *referral;
117 	struct spdk_nvmf_referral_opts opts = {};
118 	struct spdk_nvme_transport_id *trid = &opts.trid;
119 
120 	memcpy(&opts, uopts, spdk_min(uopts->size, sizeof(opts)));
121 	if (trid->subnqn[0] == '\0') {
122 		snprintf(trid->subnqn, sizeof(trid->subnqn), "%s", SPDK_NVMF_DISCOVERY_NQN);
123 	}
124 
125 	referral = nvmf_tgt_find_referral(tgt, &opts.trid);
126 	if (referral == NULL) {
127 		return -ENOENT;
128 	}
129 
130 	TAILQ_REMOVE(&tgt->referrals, referral, link);
131 	spdk_nvmf_send_discovery_log_notice(tgt, NULL);
132 
133 	free(referral);
134 
135 	return 0;
136 }
137 
138 void
139 nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair,
140 		     enum spdk_nvmf_qpair_state state)
141 {
142 	assert(qpair != NULL);
143 	assert(qpair->group->thread == spdk_get_thread());
144 
145 	qpair->state = state;
146 }
147 
148 /*
149  * Reset and clean up the poll group (I/O channel code will actually free the
150  * group).
151  */
152 static void
153 nvmf_tgt_cleanup_poll_group(struct spdk_nvmf_poll_group *group)
154 {
155 	struct spdk_nvmf_transport_poll_group *tgroup, *tmp;
156 	struct spdk_nvmf_subsystem_poll_group *sgroup;
157 	uint32_t sid, nsid;
158 
159 	TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) {
160 		TAILQ_REMOVE(&group->tgroups, tgroup, link);
161 		nvmf_transport_poll_group_destroy(tgroup);
162 	}
163 
164 	for (sid = 0; sid < group->num_sgroups; sid++) {
165 		sgroup = &group->sgroups[sid];
166 
167 		assert(sgroup != NULL);
168 
169 		for (nsid = 0; nsid < sgroup->num_ns; nsid++) {
170 			if (sgroup->ns_info[nsid].channel) {
171 				spdk_put_io_channel(sgroup->ns_info[nsid].channel);
172 				sgroup->ns_info[nsid].channel = NULL;
173 			}
174 		}
175 
176 		free(sgroup->ns_info);
177 	}
178 
179 	free(group->sgroups);
180 
181 	if (group->destroy_cb_fn) {
182 		group->destroy_cb_fn(group->destroy_cb_arg, 0);
183 	}
184 }
185 
186 /*
187  * Callback to unregister a poll group from the target, and clean up its state.
188  */
189 static void
190 nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf)
191 {
192 	struct spdk_nvmf_tgt *tgt = io_device;
193 	struct spdk_nvmf_poll_group *group = ctx_buf;
194 
195 	SPDK_DTRACE_PROBE1_TICKS(nvmf_destroy_poll_group, spdk_thread_get_id(group->thread));
196 
197 	pthread_mutex_lock(&tgt->mutex);
198 	TAILQ_REMOVE(&tgt->poll_groups, group, link);
199 	tgt->num_poll_groups--;
200 	pthread_mutex_unlock(&tgt->mutex);
201 
202 	assert(!(tgt->state == NVMF_TGT_PAUSING || tgt->state == NVMF_TGT_RESUMING));
203 	nvmf_tgt_cleanup_poll_group(group);
204 }
205 
206 static int
207 nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group,
208 			      struct spdk_nvmf_transport *transport)
209 {
210 	struct spdk_nvmf_transport_poll_group *tgroup = nvmf_get_transport_poll_group(group, transport);
211 
212 	if (tgroup != NULL) {
213 		/* Transport already in the poll group */
214 		return 0;
215 	}
216 
217 	tgroup = nvmf_transport_poll_group_create(transport, group);
218 	if (!tgroup) {
219 		SPDK_ERRLOG("Unable to create poll group for transport\n");
220 		return -1;
221 	}
222 	SPDK_DTRACE_PROBE2_TICKS(nvmf_transport_poll_group_create, transport,
223 				 spdk_thread_get_id(group->thread));
224 
225 	tgroup->group = group;
226 	TAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
227 
228 	return 0;
229 }
230 
231 static int
232 nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf)
233 {
234 	struct spdk_nvmf_tgt *tgt = io_device;
235 	struct spdk_nvmf_poll_group *group = ctx_buf;
236 	struct spdk_nvmf_transport *transport;
237 	struct spdk_nvmf_subsystem *subsystem;
238 	struct spdk_thread *thread = spdk_get_thread();
239 	uint32_t i;
240 	int rc;
241 
242 	group->tgt = tgt;
243 	TAILQ_INIT(&group->tgroups);
244 	TAILQ_INIT(&group->qpairs);
245 	group->thread = thread;
246 	pthread_mutex_init(&group->mutex, NULL);
247 
248 	SPDK_DTRACE_PROBE1_TICKS(nvmf_create_poll_group, spdk_thread_get_id(thread));
249 
250 	TAILQ_FOREACH(transport, &tgt->transports, link) {
251 		rc = nvmf_poll_group_add_transport(group, transport);
252 		if (rc != 0) {
253 			nvmf_tgt_cleanup_poll_group(group);
254 			return rc;
255 		}
256 	}
257 
258 	group->num_sgroups = tgt->max_subsystems;
259 	group->sgroups = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem_poll_group));
260 	if (!group->sgroups) {
261 		nvmf_tgt_cleanup_poll_group(group);
262 		return -ENOMEM;
263 	}
264 
265 	for (i = 0; i < tgt->max_subsystems; i++) {
266 		TAILQ_INIT(&group->sgroups[i].queued);
267 	}
268 
269 	for (subsystem = spdk_nvmf_subsystem_get_first(tgt);
270 	     subsystem != NULL;
271 	     subsystem = spdk_nvmf_subsystem_get_next(subsystem)) {
272 		if (nvmf_poll_group_add_subsystem(group, subsystem, NULL, NULL) != 0) {
273 			nvmf_tgt_cleanup_poll_group(group);
274 			return -1;
275 		}
276 	}
277 
278 	pthread_mutex_lock(&tgt->mutex);
279 	tgt->num_poll_groups++;
280 	TAILQ_INSERT_TAIL(&tgt->poll_groups, group, link);
281 	pthread_mutex_unlock(&tgt->mutex);
282 
283 	return 0;
284 }
285 
286 static void
287 _nvmf_tgt_disconnect_qpairs(void *ctx)
288 {
289 	struct spdk_nvmf_qpair *qpair, *qpair_tmp;
290 	struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx;
291 	struct spdk_nvmf_poll_group *group = qpair_ctx->group;
292 	struct spdk_io_channel *ch;
293 	int rc;
294 
295 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, qpair_tmp) {
296 		rc = spdk_nvmf_qpair_disconnect(qpair);
297 		if (rc && rc != -EINPROGRESS) {
298 			break;
299 		}
300 	}
301 
302 	if (TAILQ_EMPTY(&group->qpairs)) {
303 		/* When the refcount from the channels reaches 0, nvmf_tgt_destroy_poll_group will be called. */
304 		ch = spdk_io_channel_from_ctx(group);
305 		spdk_put_io_channel(ch);
306 		free(qpair_ctx);
307 		return;
308 	}
309 
310 	/* Some qpairs are in process of being disconnected. Send a message and try to remove them again */
311 	spdk_thread_send_msg(spdk_get_thread(), _nvmf_tgt_disconnect_qpairs, ctx);
312 }
313 
314 static void
315 nvmf_tgt_destroy_poll_group_qpairs(struct spdk_nvmf_poll_group *group)
316 {
317 	struct nvmf_qpair_disconnect_many_ctx *ctx;
318 
319 	SPDK_DTRACE_PROBE1_TICKS(nvmf_destroy_poll_group_qpairs, spdk_thread_get_id(group->thread));
320 
321 	ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx));
322 	if (!ctx) {
323 		SPDK_ERRLOG("Failed to allocate memory for destroy poll group ctx\n");
324 		return;
325 	}
326 
327 	ctx->group = group;
328 	_nvmf_tgt_disconnect_qpairs(ctx);
329 }
330 
331 struct spdk_nvmf_tgt *
332 spdk_nvmf_tgt_create(struct spdk_nvmf_target_opts *_opts)
333 {
334 	struct spdk_nvmf_tgt *tgt, *tmp_tgt;
335 	struct spdk_nvmf_target_opts opts = {
336 		.max_subsystems = SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS,
337 		.discovery_filter = SPDK_NVMF_TGT_DISCOVERY_MATCH_ANY,
338 	};
339 
340 	memcpy(&opts, _opts, _opts->size);
341 	if (strnlen(opts.name, NVMF_TGT_NAME_MAX_LENGTH) == NVMF_TGT_NAME_MAX_LENGTH) {
342 		SPDK_ERRLOG("Provided target name exceeds the max length of %u.\n", NVMF_TGT_NAME_MAX_LENGTH);
343 		return NULL;
344 	}
345 
346 	TAILQ_FOREACH(tmp_tgt, &g_nvmf_tgts, link) {
347 		if (!strncmp(opts.name, tmp_tgt->name, NVMF_TGT_NAME_MAX_LENGTH)) {
348 			SPDK_ERRLOG("Provided target name must be unique.\n");
349 			return NULL;
350 		}
351 	}
352 
353 	tgt = calloc(1, sizeof(*tgt));
354 	if (!tgt) {
355 		return NULL;
356 	}
357 
358 	snprintf(tgt->name, NVMF_TGT_NAME_MAX_LENGTH, "%s", opts.name);
359 
360 	if (!opts.max_subsystems) {
361 		tgt->max_subsystems = SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS;
362 	} else {
363 		tgt->max_subsystems = opts.max_subsystems;
364 	}
365 
366 	tgt->crdt[0] = opts.crdt[0];
367 	tgt->crdt[1] = opts.crdt[1];
368 	tgt->crdt[2] = opts.crdt[2];
369 	tgt->discovery_filter = opts.discovery_filter;
370 	tgt->discovery_genctr = 0;
371 	tgt->dhchap_digests = opts.dhchap_digests;
372 	tgt->dhchap_dhgroups = opts.dhchap_dhgroups;
373 	TAILQ_INIT(&tgt->transports);
374 	TAILQ_INIT(&tgt->poll_groups);
375 	TAILQ_INIT(&tgt->referrals);
376 	tgt->num_poll_groups = 0;
377 
378 	tgt->subsystem_ids = spdk_bit_array_create(tgt->max_subsystems);
379 	if (tgt->subsystem_ids == NULL) {
380 		free(tgt);
381 		return NULL;
382 	}
383 
384 	RB_INIT(&tgt->subsystems);
385 
386 	pthread_mutex_init(&tgt->mutex, NULL);
387 
388 	spdk_io_device_register(tgt,
389 				nvmf_tgt_create_poll_group,
390 				nvmf_tgt_destroy_poll_group,
391 				sizeof(struct spdk_nvmf_poll_group),
392 				tgt->name);
393 
394 	tgt->state = NVMF_TGT_RUNNING;
395 
396 	TAILQ_INSERT_HEAD(&g_nvmf_tgts, tgt, link);
397 
398 	return tgt;
399 }
400 
401 static void
402 _nvmf_tgt_destroy_next_transport(void *ctx)
403 {
404 	struct spdk_nvmf_tgt *tgt = ctx;
405 	struct spdk_nvmf_transport *transport;
406 
407 	if (!TAILQ_EMPTY(&tgt->transports)) {
408 		transport = TAILQ_FIRST(&tgt->transports);
409 		TAILQ_REMOVE(&tgt->transports, transport, link);
410 		spdk_nvmf_transport_destroy(transport, _nvmf_tgt_destroy_next_transport, tgt);
411 	} else {
412 		spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn = tgt->destroy_cb_fn;
413 		void *destroy_cb_arg = tgt->destroy_cb_arg;
414 
415 		pthread_mutex_destroy(&tgt->mutex);
416 		free(tgt);
417 
418 		if (destroy_cb_fn) {
419 			destroy_cb_fn(destroy_cb_arg, 0);
420 		}
421 	}
422 }
423 
424 static void
425 nvmf_tgt_destroy_cb(void *io_device)
426 {
427 	struct spdk_nvmf_tgt *tgt = io_device;
428 	struct spdk_nvmf_subsystem *subsystem, *subsystem_next;
429 	int rc;
430 	struct spdk_nvmf_referral *referral;
431 
432 	while ((referral = TAILQ_FIRST(&tgt->referrals))) {
433 		TAILQ_REMOVE(&tgt->referrals, referral, link);
434 		free(referral);
435 	}
436 
437 	nvmf_tgt_stop_mdns_prr(tgt);
438 
439 	/* We will be freeing subsystems in this loop, so we always need to get the next one
440 	 * ahead of time, since we can't call get_next() on a subsystem that's been freed.
441 	 */
442 	for (subsystem = spdk_nvmf_subsystem_get_first(tgt),
443 	     subsystem_next = spdk_nvmf_subsystem_get_next(subsystem);
444 	     subsystem != NULL;
445 	     subsystem = subsystem_next,
446 	     subsystem_next = spdk_nvmf_subsystem_get_next(subsystem_next)) {
447 		nvmf_subsystem_remove_all_listeners(subsystem, true);
448 
449 		rc = spdk_nvmf_subsystem_destroy(subsystem, nvmf_tgt_destroy_cb, tgt);
450 		if (rc) {
451 			if (rc == -EINPROGRESS) {
452 				/* If rc is -EINPROGRESS, nvmf_tgt_destroy_cb will be called again when subsystem #i
453 				 * is destroyed, nvmf_tgt_destroy_cb will continue to destroy other subsystems if any */
454 				return;
455 			} else {
456 				SPDK_ERRLOG("Failed to destroy subsystem %s, rc %d\n", subsystem->subnqn, rc);
457 			}
458 		}
459 	}
460 	spdk_bit_array_free(&tgt->subsystem_ids);
461 	_nvmf_tgt_destroy_next_transport(tgt);
462 }
463 
464 void
465 spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt,
466 		      spdk_nvmf_tgt_destroy_done_fn cb_fn,
467 		      void *cb_arg)
468 {
469 	assert(!(tgt->state == NVMF_TGT_PAUSING || tgt->state == NVMF_TGT_RESUMING));
470 
471 	tgt->destroy_cb_fn = cb_fn;
472 	tgt->destroy_cb_arg = cb_arg;
473 
474 	TAILQ_REMOVE(&g_nvmf_tgts, tgt, link);
475 
476 	spdk_io_device_unregister(tgt, nvmf_tgt_destroy_cb);
477 }
478 
479 const char *
480 spdk_nvmf_tgt_get_name(struct spdk_nvmf_tgt *tgt)
481 {
482 	return tgt->name;
483 }
484 
485 struct spdk_nvmf_tgt *
486 spdk_nvmf_get_tgt(const char *name)
487 {
488 	struct spdk_nvmf_tgt *tgt;
489 	uint32_t num_targets = 0;
490 
491 	TAILQ_FOREACH(tgt, &g_nvmf_tgts, link) {
492 		if (name) {
493 			if (!strncmp(tgt->name, name, NVMF_TGT_NAME_MAX_LENGTH)) {
494 				return tgt;
495 			}
496 		}
497 		num_targets++;
498 	}
499 
500 	/*
501 	 * special case. If there is only one target and
502 	 * no name was specified, return the only available
503 	 * target. If there is more than one target, name must
504 	 * be specified.
505 	 */
506 	if (!name && num_targets == 1) {
507 		return TAILQ_FIRST(&g_nvmf_tgts);
508 	}
509 
510 	return NULL;
511 }
512 
513 struct spdk_nvmf_tgt *
514 spdk_nvmf_get_first_tgt(void)
515 {
516 	return TAILQ_FIRST(&g_nvmf_tgts);
517 }
518 
519 struct spdk_nvmf_tgt *
520 spdk_nvmf_get_next_tgt(struct spdk_nvmf_tgt *prev)
521 {
522 	return TAILQ_NEXT(prev, link);
523 }
524 
525 static void
526 nvmf_write_nvme_subsystem_config(struct spdk_json_write_ctx *w,
527 				 struct spdk_nvmf_subsystem *subsystem)
528 {
529 	struct spdk_nvmf_host *host;
530 	struct spdk_nvmf_ns *ns;
531 	struct spdk_nvmf_ns_opts ns_opts;
532 	uint32_t max_namespaces;
533 	struct spdk_nvmf_transport *transport;
534 
535 	assert(spdk_nvmf_subsystem_get_type(subsystem) == SPDK_NVMF_SUBTYPE_NVME);
536 
537 	/* { */
538 	spdk_json_write_object_begin(w);
539 	spdk_json_write_named_string(w, "method", "nvmf_create_subsystem");
540 
541 	/*     "params" : { */
542 	spdk_json_write_named_object_begin(w, "params");
543 	spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
544 	spdk_json_write_named_bool(w, "allow_any_host", spdk_nvmf_subsystem_get_allow_any_host(subsystem));
545 	spdk_json_write_named_string(w, "serial_number", spdk_nvmf_subsystem_get_sn(subsystem));
546 	spdk_json_write_named_string(w, "model_number", spdk_nvmf_subsystem_get_mn(subsystem));
547 
548 	max_namespaces = spdk_nvmf_subsystem_get_max_namespaces(subsystem);
549 	if (max_namespaces != 0) {
550 		spdk_json_write_named_uint32(w, "max_namespaces", max_namespaces);
551 	}
552 
553 	spdk_json_write_named_uint32(w, "min_cntlid", spdk_nvmf_subsystem_get_min_cntlid(subsystem));
554 	spdk_json_write_named_uint32(w, "max_cntlid", spdk_nvmf_subsystem_get_max_cntlid(subsystem));
555 	spdk_json_write_named_bool(w, "ana_reporting", spdk_nvmf_subsystem_get_ana_reporting(subsystem));
556 
557 	/*     } "params" */
558 	spdk_json_write_object_end(w);
559 
560 	/* } */
561 	spdk_json_write_object_end(w);
562 
563 	for (host = spdk_nvmf_subsystem_get_first_host(subsystem); host != NULL;
564 	     host = spdk_nvmf_subsystem_get_next_host(subsystem, host)) {
565 
566 		spdk_json_write_object_begin(w);
567 		spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_host");
568 
569 		/*     "params" : { */
570 		spdk_json_write_named_object_begin(w, "params");
571 
572 		spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
573 		spdk_json_write_named_string(w, "host", spdk_nvmf_host_get_nqn(host));
574 		if (host->dhchap_key != NULL) {
575 			spdk_json_write_named_string(w, "dhchap_key",
576 						     spdk_key_get_name(host->dhchap_key));
577 		}
578 		if (host->dhchap_ctrlr_key != NULL) {
579 			spdk_json_write_named_string(w, "dhchap_ctrlr_key",
580 						     spdk_key_get_name(host->dhchap_ctrlr_key));
581 		}
582 		TAILQ_FOREACH(transport, &subsystem->tgt->transports, link) {
583 			if (transport->ops->subsystem_dump_host != NULL) {
584 				transport->ops->subsystem_dump_host(transport, subsystem, host->nqn, w);
585 			}
586 		}
587 
588 		/*     } "params" */
589 		spdk_json_write_object_end(w);
590 
591 		/* } */
592 		spdk_json_write_object_end(w);
593 	}
594 
595 	for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL;
596 	     ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) {
597 		spdk_nvmf_ns_get_opts(ns, &ns_opts, sizeof(ns_opts));
598 
599 		spdk_json_write_object_begin(w);
600 		spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_ns");
601 
602 		/*     "params" : { */
603 		spdk_json_write_named_object_begin(w, "params");
604 
605 		spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
606 
607 		/*     "namespace" : { */
608 		spdk_json_write_named_object_begin(w, "namespace");
609 
610 		spdk_json_write_named_uint32(w, "nsid", spdk_nvmf_ns_get_id(ns));
611 		spdk_json_write_named_string(w, "bdev_name", spdk_bdev_get_name(spdk_nvmf_ns_get_bdev(ns)));
612 
613 		if (ns->ptpl_file != NULL) {
614 			spdk_json_write_named_string(w, "ptpl_file", ns->ptpl_file);
615 		}
616 
617 		if (!spdk_mem_all_zero(ns_opts.nguid, sizeof(ns_opts.nguid))) {
618 			SPDK_STATIC_ASSERT(sizeof(ns_opts.nguid) == sizeof(uint64_t) * 2, "size mismatch");
619 			spdk_json_write_named_string_fmt(w, "nguid", "%016"PRIX64"%016"PRIX64, from_be64(&ns_opts.nguid[0]),
620 							 from_be64(&ns_opts.nguid[8]));
621 		}
622 
623 		if (!spdk_mem_all_zero(ns_opts.eui64, sizeof(ns_opts.eui64))) {
624 			SPDK_STATIC_ASSERT(sizeof(ns_opts.eui64) == sizeof(uint64_t), "size mismatch");
625 			spdk_json_write_named_string_fmt(w, "eui64", "%016"PRIX64, from_be64(&ns_opts.eui64));
626 		}
627 
628 		if (!spdk_uuid_is_null(&ns_opts.uuid)) {
629 			spdk_json_write_named_uuid(w, "uuid",  &ns_opts.uuid);
630 		}
631 
632 		if (spdk_nvmf_subsystem_get_ana_reporting(subsystem)) {
633 			spdk_json_write_named_uint32(w, "anagrpid", ns_opts.anagrpid);
634 		}
635 
636 		spdk_json_write_named_bool(w, "no_auto_visible", !ns->always_visible);
637 
638 		/*     "namespace" */
639 		spdk_json_write_object_end(w);
640 
641 		/*     } "params" */
642 		spdk_json_write_object_end(w);
643 
644 		/* } */
645 		spdk_json_write_object_end(w);
646 
647 		TAILQ_FOREACH(host, &ns->hosts, link) {
648 			spdk_json_write_object_begin(w);
649 			spdk_json_write_named_string(w, "method", "nvmf_ns_add_host");
650 			spdk_json_write_named_object_begin(w, "params");
651 			spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
652 			spdk_json_write_named_uint32(w, "nsid", spdk_nvmf_ns_get_id(ns));
653 			spdk_json_write_named_string(w, "host", spdk_nvmf_host_get_nqn(host));
654 			spdk_json_write_object_end(w);
655 			spdk_json_write_object_end(w);
656 		}
657 	}
658 }
659 
660 static void
661 nvmf_write_subsystem_config_json(struct spdk_json_write_ctx *w,
662 				 struct spdk_nvmf_subsystem *subsystem)
663 {
664 	struct spdk_nvmf_subsystem_listener *listener;
665 	struct spdk_nvmf_transport *transport;
666 	const struct spdk_nvme_transport_id *trid;
667 
668 	if (spdk_nvmf_subsystem_get_type(subsystem) == SPDK_NVMF_SUBTYPE_NVME) {
669 		nvmf_write_nvme_subsystem_config(w, subsystem);
670 	}
671 
672 	for (listener = spdk_nvmf_subsystem_get_first_listener(subsystem); listener != NULL;
673 	     listener = spdk_nvmf_subsystem_get_next_listener(subsystem, listener)) {
674 		transport = listener->transport;
675 		trid = spdk_nvmf_subsystem_listener_get_trid(listener);
676 
677 		spdk_json_write_object_begin(w);
678 		spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_listener");
679 
680 		/*     "params" : { */
681 		spdk_json_write_named_object_begin(w, "params");
682 
683 		spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
684 
685 		spdk_json_write_named_object_begin(w, "listen_address");
686 		nvmf_transport_listen_dump_trid(trid, w);
687 		spdk_json_write_object_end(w);
688 		if (transport->ops->listen_dump_opts) {
689 			transport->ops->listen_dump_opts(transport, trid, w);
690 		}
691 
692 		spdk_json_write_named_bool(w, "secure_channel", listener->opts.secure_channel);
693 
694 		if (listener->opts.sock_impl) {
695 			spdk_json_write_named_string(w, "sock_impl", listener->opts.sock_impl);
696 		}
697 
698 		/*     } "params" */
699 		spdk_json_write_object_end(w);
700 
701 		/* } */
702 		spdk_json_write_object_end(w);
703 	}
704 }
705 
706 void
707 spdk_nvmf_tgt_write_config_json(struct spdk_json_write_ctx *w, struct spdk_nvmf_tgt *tgt)
708 {
709 	struct spdk_nvmf_subsystem *subsystem;
710 	struct spdk_nvmf_transport *transport;
711 	struct spdk_nvmf_referral *referral;
712 
713 	spdk_json_write_object_begin(w);
714 	spdk_json_write_named_string(w, "method", "nvmf_set_max_subsystems");
715 
716 	spdk_json_write_named_object_begin(w, "params");
717 	spdk_json_write_named_uint32(w, "max_subsystems", tgt->max_subsystems);
718 	spdk_json_write_object_end(w);
719 
720 	spdk_json_write_object_end(w);
721 
722 	spdk_json_write_object_begin(w);
723 	spdk_json_write_named_string(w, "method", "nvmf_set_crdt");
724 	spdk_json_write_named_object_begin(w, "params");
725 	spdk_json_write_named_uint32(w, "crdt1", tgt->crdt[0]);
726 	spdk_json_write_named_uint32(w, "crdt2", tgt->crdt[1]);
727 	spdk_json_write_named_uint32(w, "crdt3", tgt->crdt[2]);
728 	spdk_json_write_object_end(w);
729 	spdk_json_write_object_end(w);
730 
731 	/* write transports */
732 	TAILQ_FOREACH(transport, &tgt->transports, link) {
733 		spdk_json_write_object_begin(w);
734 		spdk_json_write_named_string(w, "method", "nvmf_create_transport");
735 		nvmf_transport_dump_opts(transport, w, true);
736 		spdk_json_write_object_end(w);
737 	}
738 
739 	TAILQ_FOREACH(referral, &tgt->referrals, link) {
740 		spdk_json_write_object_begin(w);
741 		spdk_json_write_named_string(w, "method", "nvmf_discovery_add_referral");
742 
743 		spdk_json_write_named_object_begin(w, "params");
744 		spdk_json_write_named_object_begin(w, "address");
745 		nvmf_transport_listen_dump_trid(&referral->trid, w);
746 		spdk_json_write_object_end(w);
747 		spdk_json_write_named_bool(w, "secure_channel",
748 					   referral->entry.treq.secure_channel ==
749 					   SPDK_NVMF_TREQ_SECURE_CHANNEL_REQUIRED);
750 		spdk_json_write_named_string(w, "subnqn", referral->trid.subnqn);
751 		spdk_json_write_object_end(w);
752 
753 		spdk_json_write_object_end(w);
754 	}
755 
756 	subsystem = spdk_nvmf_subsystem_get_first(tgt);
757 	while (subsystem) {
758 		nvmf_write_subsystem_config_json(w, subsystem);
759 		subsystem = spdk_nvmf_subsystem_get_next(subsystem);
760 	}
761 }
762 
763 static void
764 nvmf_listen_opts_copy(struct spdk_nvmf_listen_opts *opts,
765 		      const struct spdk_nvmf_listen_opts *opts_src, size_t opts_size)
766 {
767 	assert(opts);
768 	assert(opts_src);
769 
770 	opts->opts_size = opts_size;
771 
772 #define SET_FIELD(field) \
773     if (offsetof(struct spdk_nvmf_listen_opts, field) + sizeof(opts->field) <= opts_size) { \
774                  opts->field = opts_src->field; \
775     } \
776 
777 	SET_FIELD(transport_specific);
778 	SET_FIELD(secure_channel);
779 	SET_FIELD(ana_state);
780 	SET_FIELD(sock_impl);
781 #undef SET_FIELD
782 
783 	/* Do not remove this statement, you should always update this statement when you adding a new field,
784 	 * and do not forget to add the SET_FIELD statement for your added field. */
785 	SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_listen_opts) == 32, "Incorrect size");
786 }
787 
788 void
789 spdk_nvmf_listen_opts_init(struct spdk_nvmf_listen_opts *opts, size_t opts_size)
790 {
791 	struct spdk_nvmf_listen_opts opts_local = {};
792 
793 	/* local version of opts should have defaults set here */
794 	opts_local.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
795 	nvmf_listen_opts_copy(opts, &opts_local, opts_size);
796 }
797 
798 int
799 spdk_nvmf_tgt_listen_ext(struct spdk_nvmf_tgt *tgt, const struct spdk_nvme_transport_id *trid,
800 			 struct spdk_nvmf_listen_opts *opts)
801 {
802 	struct spdk_nvmf_transport *transport;
803 	int rc;
804 	struct spdk_nvmf_listen_opts opts_local = {};
805 
806 	if (!opts) {
807 		SPDK_ERRLOG("opts should not be NULL\n");
808 		return -EINVAL;
809 	}
810 
811 	if (!opts->opts_size) {
812 		SPDK_ERRLOG("The opts_size in opts structure should not be zero\n");
813 		return -EINVAL;
814 	}
815 
816 	transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring);
817 	if (!transport) {
818 		SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n",
819 			    trid->trstring);
820 		return -EINVAL;
821 	}
822 
823 	nvmf_listen_opts_copy(&opts_local, opts, opts->opts_size);
824 	rc = spdk_nvmf_transport_listen(transport, trid, &opts_local);
825 	if (rc < 0) {
826 		SPDK_ERRLOG("Unable to listen on address '%s'\n", trid->traddr);
827 	}
828 
829 	return rc;
830 }
831 
832 int
833 spdk_nvmf_tgt_stop_listen(struct spdk_nvmf_tgt *tgt,
834 			  struct spdk_nvme_transport_id *trid)
835 {
836 	struct spdk_nvmf_transport *transport;
837 	int rc;
838 
839 	transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring);
840 	if (!transport) {
841 		SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n",
842 			    trid->trstring);
843 		return -EINVAL;
844 	}
845 
846 	rc = spdk_nvmf_transport_stop_listen(transport, trid);
847 	if (rc < 0) {
848 		SPDK_ERRLOG("Failed to stop listening on address '%s'\n", trid->traddr);
849 		return rc;
850 	}
851 	return 0;
852 }
853 
854 struct spdk_nvmf_tgt_add_transport_ctx {
855 	struct spdk_nvmf_tgt *tgt;
856 	struct spdk_nvmf_transport *transport;
857 	spdk_nvmf_tgt_add_transport_done_fn cb_fn;
858 	void *cb_arg;
859 	int status;
860 };
861 
862 static void
863 _nvmf_tgt_remove_transport_done(struct spdk_io_channel_iter *i, int status)
864 {
865 	struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
866 
867 	ctx->cb_fn(ctx->cb_arg, ctx->status);
868 	free(ctx);
869 }
870 
871 static void
872 _nvmf_tgt_remove_transport(struct spdk_io_channel_iter *i)
873 {
874 	struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
875 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
876 	struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
877 	struct spdk_nvmf_transport_poll_group *tgroup, *tmp;
878 
879 	TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) {
880 		if (tgroup->transport == ctx->transport) {
881 			TAILQ_REMOVE(&group->tgroups, tgroup, link);
882 			nvmf_transport_poll_group_destroy(tgroup);
883 		}
884 	}
885 
886 	spdk_for_each_channel_continue(i, 0);
887 }
888 
889 static void
890 _nvmf_tgt_add_transport_done(struct spdk_io_channel_iter *i, int status)
891 {
892 	struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
893 
894 	if (status) {
895 		ctx->status = status;
896 		spdk_for_each_channel(ctx->tgt,
897 				      _nvmf_tgt_remove_transport,
898 				      ctx,
899 				      _nvmf_tgt_remove_transport_done);
900 		return;
901 	}
902 
903 	ctx->transport->tgt = ctx->tgt;
904 	TAILQ_INSERT_TAIL(&ctx->tgt->transports, ctx->transport, link);
905 	ctx->cb_fn(ctx->cb_arg, status);
906 	free(ctx);
907 }
908 
909 static void
910 _nvmf_tgt_add_transport(struct spdk_io_channel_iter *i)
911 {
912 	struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
913 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
914 	struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
915 	int rc;
916 
917 	rc = nvmf_poll_group_add_transport(group, ctx->transport);
918 	spdk_for_each_channel_continue(i, rc);
919 }
920 
921 void
922 spdk_nvmf_tgt_add_transport(struct spdk_nvmf_tgt *tgt,
923 			    struct spdk_nvmf_transport *transport,
924 			    spdk_nvmf_tgt_add_transport_done_fn cb_fn,
925 			    void *cb_arg)
926 {
927 	struct spdk_nvmf_tgt_add_transport_ctx *ctx;
928 
929 	SPDK_DTRACE_PROBE2_TICKS(nvmf_tgt_add_transport, transport, tgt->name);
930 
931 	if (spdk_nvmf_tgt_get_transport(tgt, transport->ops->name)) {
932 		cb_fn(cb_arg, -EEXIST);
933 		return; /* transport already created */
934 	}
935 
936 	ctx = calloc(1, sizeof(*ctx));
937 	if (!ctx) {
938 		cb_fn(cb_arg, -ENOMEM);
939 		return;
940 	}
941 
942 	ctx->tgt = tgt;
943 	ctx->transport = transport;
944 	ctx->cb_fn = cb_fn;
945 	ctx->cb_arg = cb_arg;
946 
947 	spdk_for_each_channel(tgt,
948 			      _nvmf_tgt_add_transport,
949 			      ctx,
950 			      _nvmf_tgt_add_transport_done);
951 }
952 
953 struct nvmf_tgt_pause_ctx {
954 	struct spdk_nvmf_tgt *tgt;
955 	spdk_nvmf_tgt_pause_polling_cb_fn cb_fn;
956 	void *cb_arg;
957 };
958 
959 static void
960 _nvmf_tgt_pause_polling_done(struct spdk_io_channel_iter *i, int status)
961 {
962 	struct nvmf_tgt_pause_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
963 
964 	ctx->tgt->state = NVMF_TGT_PAUSED;
965 
966 	ctx->cb_fn(ctx->cb_arg, status);
967 	free(ctx);
968 }
969 
970 static void
971 _nvmf_tgt_pause_polling(struct spdk_io_channel_iter *i)
972 {
973 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
974 	struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
975 	struct spdk_nvmf_transport_poll_group *tgroup;
976 
977 	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
978 		nvmf_transport_poll_group_pause(tgroup);
979 	}
980 
981 	spdk_for_each_channel_continue(i, 0);
982 }
983 
984 int
985 spdk_nvmf_tgt_pause_polling(struct spdk_nvmf_tgt *tgt, spdk_nvmf_tgt_pause_polling_cb_fn cb_fn,
986 			    void *cb_arg)
987 {
988 	struct nvmf_tgt_pause_ctx *ctx;
989 
990 	SPDK_DTRACE_PROBE2_TICKS(nvmf_tgt_pause_polling, tgt, tgt->name);
991 
992 	switch (tgt->state) {
993 	case NVMF_TGT_PAUSING:
994 	case NVMF_TGT_RESUMING:
995 		return -EBUSY;
996 	case NVMF_TGT_RUNNING:
997 		break;
998 	default:
999 		return -EINVAL;
1000 	}
1001 
1002 	ctx = calloc(1, sizeof(*ctx));
1003 	if (!ctx) {
1004 		return -ENOMEM;
1005 	}
1006 
1007 
1008 	tgt->state = NVMF_TGT_PAUSING;
1009 
1010 	ctx->tgt = tgt;
1011 	ctx->cb_fn = cb_fn;
1012 	ctx->cb_arg = cb_arg;
1013 
1014 	spdk_for_each_channel(tgt,
1015 			      _nvmf_tgt_pause_polling,
1016 			      ctx,
1017 			      _nvmf_tgt_pause_polling_done);
1018 	return 0;
1019 }
1020 
1021 static void
1022 _nvmf_tgt_resume_polling_done(struct spdk_io_channel_iter *i, int status)
1023 {
1024 	struct nvmf_tgt_pause_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
1025 
1026 	ctx->tgt->state = NVMF_TGT_RUNNING;
1027 
1028 	ctx->cb_fn(ctx->cb_arg, status);
1029 	free(ctx);
1030 }
1031 
1032 static void
1033 _nvmf_tgt_resume_polling(struct spdk_io_channel_iter *i)
1034 {
1035 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
1036 	struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
1037 	struct spdk_nvmf_transport_poll_group *tgroup;
1038 
1039 	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
1040 		nvmf_transport_poll_group_resume(tgroup);
1041 	}
1042 
1043 	spdk_for_each_channel_continue(i, 0);
1044 }
1045 
1046 int
1047 spdk_nvmf_tgt_resume_polling(struct spdk_nvmf_tgt *tgt, spdk_nvmf_tgt_resume_polling_cb_fn cb_fn,
1048 			     void *cb_arg)
1049 {
1050 	struct nvmf_tgt_pause_ctx *ctx;
1051 
1052 	SPDK_DTRACE_PROBE2_TICKS(nvmf_tgt_resume_polling, tgt, tgt->name);
1053 
1054 	switch (tgt->state) {
1055 	case NVMF_TGT_PAUSING:
1056 	case NVMF_TGT_RESUMING:
1057 		return -EBUSY;
1058 	case NVMF_TGT_PAUSED:
1059 		break;
1060 	default:
1061 		return -EINVAL;
1062 	}
1063 
1064 	ctx = calloc(1, sizeof(*ctx));
1065 	if (!ctx) {
1066 		return -ENOMEM;
1067 	}
1068 
1069 	tgt->state = NVMF_TGT_RESUMING;
1070 
1071 	ctx->tgt = tgt;
1072 	ctx->cb_fn = cb_fn;
1073 	ctx->cb_arg = cb_arg;
1074 
1075 	spdk_for_each_channel(tgt,
1076 			      _nvmf_tgt_resume_polling,
1077 			      ctx,
1078 			      _nvmf_tgt_resume_polling_done);
1079 	return 0;
1080 }
1081 
1082 struct spdk_nvmf_subsystem *
1083 spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
1084 {
1085 	struct spdk_nvmf_subsystem subsystem;
1086 
1087 	if (!subnqn) {
1088 		return NULL;
1089 	}
1090 
1091 	/* Ensure that subnqn is null terminated */
1092 	if (!memchr(subnqn, '\0', SPDK_NVMF_NQN_MAX_LEN + 1)) {
1093 		SPDK_ERRLOG("Connect SUBNQN is not null terminated\n");
1094 		return NULL;
1095 	}
1096 
1097 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
1098 	return RB_FIND(subsystem_tree, &tgt->subsystems, &subsystem);
1099 }
1100 
1101 struct spdk_nvmf_transport *
1102 spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name)
1103 {
1104 	struct spdk_nvmf_transport *transport;
1105 
1106 	TAILQ_FOREACH(transport, &tgt->transports, link) {
1107 		if (!strncasecmp(transport->ops->name, transport_name, SPDK_NVMF_TRSTRING_MAX_LEN)) {
1108 			return transport;
1109 		}
1110 	}
1111 	return NULL;
1112 }
1113 
1114 struct nvmf_new_qpair_ctx {
1115 	struct spdk_nvmf_qpair *qpair;
1116 	struct spdk_nvmf_poll_group *group;
1117 };
1118 
1119 static void
1120 _nvmf_poll_group_add(void *_ctx)
1121 {
1122 	struct nvmf_new_qpair_ctx *ctx = _ctx;
1123 	struct spdk_nvmf_qpair *qpair = ctx->qpair;
1124 	struct spdk_nvmf_poll_group *group = ctx->group;
1125 
1126 	free(_ctx);
1127 
1128 	if (spdk_nvmf_poll_group_add(group, qpair) != 0) {
1129 		SPDK_ERRLOG("Unable to add the qpair to a poll group.\n");
1130 		spdk_nvmf_qpair_disconnect(qpair);
1131 	}
1132 }
1133 
1134 void
1135 spdk_nvmf_tgt_new_qpair(struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)
1136 {
1137 	struct spdk_nvmf_poll_group *group;
1138 	struct nvmf_new_qpair_ctx *ctx;
1139 
1140 	group = spdk_nvmf_get_optimal_poll_group(qpair);
1141 	if (group == NULL) {
1142 		if (tgt->next_poll_group == NULL) {
1143 			tgt->next_poll_group = TAILQ_FIRST(&tgt->poll_groups);
1144 			if (tgt->next_poll_group == NULL) {
1145 				SPDK_ERRLOG("No poll groups exist.\n");
1146 				spdk_nvmf_qpair_disconnect(qpair);
1147 				return;
1148 			}
1149 		}
1150 		group = tgt->next_poll_group;
1151 		tgt->next_poll_group = TAILQ_NEXT(group, link);
1152 	}
1153 
1154 	ctx = calloc(1, sizeof(*ctx));
1155 	if (!ctx) {
1156 		SPDK_ERRLOG("Unable to send message to poll group.\n");
1157 		spdk_nvmf_qpair_disconnect(qpair);
1158 		return;
1159 	}
1160 
1161 	ctx->qpair = qpair;
1162 	ctx->group = group;
1163 
1164 	pthread_mutex_lock(&group->mutex);
1165 	group->current_unassociated_qpairs++;
1166 	pthread_mutex_unlock(&group->mutex);
1167 
1168 	spdk_thread_send_msg(group->thread, _nvmf_poll_group_add, ctx);
1169 }
1170 
1171 struct spdk_nvmf_poll_group *
1172 spdk_nvmf_poll_group_create(struct spdk_nvmf_tgt *tgt)
1173 {
1174 	struct spdk_io_channel *ch;
1175 
1176 	ch = spdk_get_io_channel(tgt);
1177 	if (!ch) {
1178 		SPDK_ERRLOG("Unable to get I/O channel for target\n");
1179 		return NULL;
1180 	}
1181 
1182 	return spdk_io_channel_get_ctx(ch);
1183 }
1184 
1185 void
1186 spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group,
1187 			     spdk_nvmf_poll_group_destroy_done_fn cb_fn,
1188 			     void *cb_arg)
1189 {
1190 	assert(group->destroy_cb_fn == NULL);
1191 	group->destroy_cb_fn = cb_fn;
1192 	group->destroy_cb_arg = cb_arg;
1193 
1194 	/* This function will put the io_channel associated with this poll group */
1195 	nvmf_tgt_destroy_poll_group_qpairs(group);
1196 }
1197 
1198 int
1199 spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group,
1200 			 struct spdk_nvmf_qpair *qpair)
1201 {
1202 	int rc;
1203 	struct spdk_nvmf_transport_poll_group *tgroup;
1204 
1205 	TAILQ_INIT(&qpair->outstanding);
1206 	qpair->group = group;
1207 	qpair->ctrlr = NULL;
1208 	qpair->disconnect_started = false;
1209 
1210 	tgroup = nvmf_get_transport_poll_group(group, qpair->transport);
1211 	if (tgroup == NULL) {
1212 		return -1;
1213 	}
1214 
1215 	rc = nvmf_transport_poll_group_add(tgroup, qpair);
1216 
1217 	/* We add the qpair to the group only it is successfully added into the tgroup */
1218 	if (rc == 0) {
1219 		SPDK_DTRACE_PROBE2_TICKS(nvmf_poll_group_add_qpair, qpair, spdk_thread_get_id(group->thread));
1220 		TAILQ_INSERT_TAIL(&group->qpairs, qpair, link);
1221 		nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_CONNECTING);
1222 	}
1223 
1224 	return rc;
1225 }
1226 
1227 static void
1228 _nvmf_ctrlr_destruct(void *ctx)
1229 {
1230 	struct spdk_nvmf_ctrlr *ctrlr = ctx;
1231 
1232 	nvmf_ctrlr_destruct(ctrlr);
1233 }
1234 
1235 static void
1236 _nvmf_ctrlr_free_from_qpair(void *ctx)
1237 {
1238 	struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx;
1239 	struct spdk_nvmf_ctrlr *ctrlr = qpair_ctx->ctrlr;
1240 	uint32_t count;
1241 
1242 	spdk_bit_array_clear(ctrlr->qpair_mask, qpair_ctx->qid);
1243 	SPDK_DEBUGLOG(nvmf, "qpair_mask cleared, qid %u\n", qpair_ctx->qid);
1244 	count = spdk_bit_array_count_set(ctrlr->qpair_mask);
1245 	if (count == 0) {
1246 		assert(!ctrlr->in_destruct);
1247 		SPDK_DEBUGLOG(nvmf, "Last qpair %u, destroy ctrlr 0x%hx\n", qpair_ctx->qid, ctrlr->cntlid);
1248 		ctrlr->in_destruct = true;
1249 		spdk_thread_send_msg(ctrlr->subsys->thread, _nvmf_ctrlr_destruct, ctrlr);
1250 	}
1251 	free(qpair_ctx);
1252 }
1253 
1254 static void
1255 _nvmf_transport_qpair_fini_complete(void *cb_ctx)
1256 {
1257 	struct nvmf_qpair_disconnect_ctx *qpair_ctx = cb_ctx;
1258 	struct spdk_nvmf_ctrlr *ctrlr;
1259 
1260 	ctrlr = qpair_ctx->ctrlr;
1261 	SPDK_DEBUGLOG(nvmf, "Finish destroying qid %u\n", qpair_ctx->qid);
1262 
1263 	if (ctrlr) {
1264 		if (qpair_ctx->qid == 0) {
1265 			/* Admin qpair is removed, so set the pointer to NULL.
1266 			 * This operation is safe since we are on ctrlr thread now, admin qpair's thread is the same
1267 			 * as controller's thread */
1268 			assert(ctrlr->thread == spdk_get_thread());
1269 			ctrlr->admin_qpair = NULL;
1270 		}
1271 		/* Free qpair id from controller's bit mask and destroy the controller if it is the last qpair */
1272 		if (ctrlr->thread) {
1273 			spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_free_from_qpair, qpair_ctx);
1274 		} else {
1275 			_nvmf_ctrlr_free_from_qpair(qpair_ctx);
1276 		}
1277 	} else {
1278 		free(qpair_ctx);
1279 	}
1280 }
1281 
1282 void
1283 spdk_nvmf_poll_group_remove(struct spdk_nvmf_qpair *qpair)
1284 {
1285 	struct spdk_nvmf_transport_poll_group *tgroup;
1286 	int rc;
1287 
1288 	SPDK_DTRACE_PROBE2_TICKS(nvmf_poll_group_remove_qpair, qpair,
1289 				 spdk_thread_get_id(qpair->group->thread));
1290 	nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ERROR);
1291 
1292 	/* Find the tgroup and remove the qpair from the tgroup */
1293 	tgroup = nvmf_get_transport_poll_group(qpair->group, qpair->transport);
1294 	if (tgroup != NULL) {
1295 		rc = nvmf_transport_poll_group_remove(tgroup, qpair);
1296 		if (rc && (rc != ENOTSUP)) {
1297 			SPDK_ERRLOG("Cannot remove qpair=%p from transport group=%p\n",
1298 				    qpair, tgroup);
1299 		}
1300 	}
1301 
1302 	TAILQ_REMOVE(&qpair->group->qpairs, qpair, link);
1303 	qpair->group = NULL;
1304 }
1305 
1306 static void
1307 _nvmf_qpair_sgroup_req_clean(struct spdk_nvmf_subsystem_poll_group *sgroup,
1308 			     const struct spdk_nvmf_qpair *qpair)
1309 {
1310 	struct spdk_nvmf_request *req, *tmp;
1311 	TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) {
1312 		if (req->qpair == qpair) {
1313 			TAILQ_REMOVE(&sgroup->queued, req, link);
1314 			if (nvmf_transport_req_free(req)) {
1315 				SPDK_ERRLOG("Transport request free error!\n");
1316 			}
1317 		}
1318 	}
1319 }
1320 
1321 static void
1322 _nvmf_qpair_destroy(void *ctx, int status)
1323 {
1324 	struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx;
1325 	struct spdk_nvmf_qpair *qpair = qpair_ctx->qpair;
1326 	struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
1327 	struct spdk_nvmf_subsystem_poll_group *sgroup;
1328 	uint32_t sid;
1329 
1330 	assert(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING);
1331 	qpair_ctx->qid = qpair->qid;
1332 
1333 	if (qpair->connect_received) {
1334 		if (0 == qpair->qid) {
1335 			assert(qpair->group->stat.current_admin_qpairs > 0);
1336 			qpair->group->stat.current_admin_qpairs--;
1337 		} else {
1338 			assert(qpair->group->stat.current_io_qpairs > 0);
1339 			qpair->group->stat.current_io_qpairs--;
1340 		}
1341 	} else {
1342 		pthread_mutex_lock(&qpair->group->mutex);
1343 		qpair->group->current_unassociated_qpairs--;
1344 		pthread_mutex_unlock(&qpair->group->mutex);
1345 	}
1346 
1347 	if (ctrlr) {
1348 		sgroup = &qpair->group->sgroups[ctrlr->subsys->id];
1349 		_nvmf_qpair_sgroup_req_clean(sgroup, qpair);
1350 	} else {
1351 		for (sid = 0; sid < qpair->group->num_sgroups; sid++) {
1352 			sgroup = &qpair->group->sgroups[sid];
1353 			assert(sgroup != NULL);
1354 			_nvmf_qpair_sgroup_req_clean(sgroup, qpair);
1355 		}
1356 	}
1357 
1358 	nvmf_qpair_auth_destroy(qpair);
1359 	qpair_ctx->ctrlr = ctrlr;
1360 	spdk_nvmf_poll_group_remove(qpair);
1361 	nvmf_transport_qpair_fini(qpair, _nvmf_transport_qpair_fini_complete, qpair_ctx);
1362 }
1363 
1364 static void
1365 _nvmf_qpair_disconnect_msg(void *ctx)
1366 {
1367 	struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx;
1368 
1369 	spdk_nvmf_qpair_disconnect(qpair_ctx->qpair);
1370 	free(ctx);
1371 }
1372 
1373 int
1374 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair)
1375 {
1376 	struct spdk_nvmf_poll_group *group = qpair->group;
1377 	struct nvmf_qpair_disconnect_ctx *qpair_ctx;
1378 
1379 	if (__atomic_test_and_set(&qpair->disconnect_started, __ATOMIC_RELAXED)) {
1380 		return -EINPROGRESS;
1381 	}
1382 
1383 	/* If we get a qpair in the uninitialized state, we can just destroy it immediately */
1384 	if (qpair->state == SPDK_NVMF_QPAIR_UNINITIALIZED) {
1385 		nvmf_transport_qpair_fini(qpair, NULL, NULL);
1386 		return 0;
1387 	}
1388 
1389 	assert(group != NULL);
1390 	if (spdk_get_thread() != group->thread) {
1391 		/* clear the atomic so we can set it on the next call on the proper thread. */
1392 		__atomic_clear(&qpair->disconnect_started, __ATOMIC_RELAXED);
1393 		qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx));
1394 		if (!qpair_ctx) {
1395 			SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n");
1396 			return -ENOMEM;
1397 		}
1398 		qpair_ctx->qpair = qpair;
1399 		spdk_thread_send_msg(group->thread, _nvmf_qpair_disconnect_msg, qpair_ctx);
1400 		return 0;
1401 	}
1402 
1403 	SPDK_DTRACE_PROBE2_TICKS(nvmf_qpair_disconnect, qpair, spdk_thread_get_id(group->thread));
1404 	assert(spdk_nvmf_qpair_is_active(qpair));
1405 	nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_DEACTIVATING);
1406 
1407 	qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx));
1408 	if (!qpair_ctx) {
1409 		SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n");
1410 		return -ENOMEM;
1411 	}
1412 
1413 	qpair_ctx->qpair = qpair;
1414 
1415 	/* Check for outstanding I/O */
1416 	if (!TAILQ_EMPTY(&qpair->outstanding)) {
1417 		SPDK_DTRACE_PROBE2_TICKS(nvmf_poll_group_drain_qpair, qpair, spdk_thread_get_id(group->thread));
1418 		qpair->state_cb = _nvmf_qpair_destroy;
1419 		qpair->state_cb_arg = qpair_ctx;
1420 		nvmf_qpair_abort_pending_zcopy_reqs(qpair);
1421 		nvmf_qpair_free_aer(qpair);
1422 		return 0;
1423 	}
1424 
1425 	_nvmf_qpair_destroy(qpair_ctx, 0);
1426 
1427 	return 0;
1428 }
1429 
1430 int
1431 spdk_nvmf_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
1432 			      struct spdk_nvme_transport_id *trid)
1433 {
1434 	memset(trid, 0, sizeof(*trid));
1435 	return nvmf_transport_qpair_get_peer_trid(qpair, trid);
1436 }
1437 
1438 int
1439 spdk_nvmf_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
1440 			       struct spdk_nvme_transport_id *trid)
1441 {
1442 	memset(trid, 0, sizeof(*trid));
1443 	return nvmf_transport_qpair_get_local_trid(qpair, trid);
1444 }
1445 
1446 int
1447 spdk_nvmf_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
1448 				struct spdk_nvme_transport_id *trid)
1449 {
1450 	memset(trid, 0, sizeof(*trid));
1451 	return nvmf_transport_qpair_get_listen_trid(qpair, trid);
1452 }
1453 
1454 static int
1455 poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
1456 			    struct spdk_nvmf_subsystem *subsystem)
1457 {
1458 	struct spdk_nvmf_subsystem_poll_group *sgroup;
1459 	uint32_t i, j;
1460 	struct spdk_nvmf_ns *ns;
1461 	struct spdk_nvmf_registrant *reg, *tmp;
1462 	struct spdk_io_channel *ch;
1463 	struct spdk_nvmf_subsystem_pg_ns_info *ns_info;
1464 	struct spdk_nvmf_ctrlr *ctrlr;
1465 	bool ns_changed;
1466 
1467 	/* Make sure our poll group has memory for this subsystem allocated */
1468 	if (subsystem->id >= group->num_sgroups) {
1469 		return -ENOMEM;
1470 	}
1471 
1472 	sgroup = &group->sgroups[subsystem->id];
1473 
1474 	/* Make sure the array of namespace information is the correct size */
1475 	if (sgroup->num_ns == 0 && subsystem->max_nsid > 0) {
1476 		/* First allocation */
1477 		sgroup->ns_info = calloc(subsystem->max_nsid, sizeof(struct spdk_nvmf_subsystem_pg_ns_info));
1478 		if (!sgroup->ns_info) {
1479 			return -ENOMEM;
1480 		}
1481 		sgroup->num_ns = subsystem->max_nsid;
1482 	}
1483 
1484 	ns_changed = false;
1485 
1486 	/* Detect bdevs that were added or removed */
1487 	for (i = 0; i < sgroup->num_ns; i++) {
1488 		ns = subsystem->ns[i];
1489 		ns_info = &sgroup->ns_info[i];
1490 		ch = ns_info->channel;
1491 
1492 		if (ns == NULL && ch == NULL) {
1493 			/* Both NULL. Leave empty */
1494 		} else if (ns == NULL && ch != NULL) {
1495 			/* There was a channel here, but the namespace is gone. */
1496 			ns_changed = true;
1497 			spdk_put_io_channel(ch);
1498 			ns_info->channel = NULL;
1499 		} else if (ns != NULL && ch == NULL) {
1500 			/* A namespace appeared but there is no channel yet */
1501 			ns_changed = true;
1502 			ch = spdk_bdev_get_io_channel(ns->desc);
1503 			if (ch == NULL) {
1504 				SPDK_ERRLOG("Could not allocate I/O channel.\n");
1505 				return -ENOMEM;
1506 			}
1507 			ns_info->channel = ch;
1508 		} else if (spdk_uuid_compare(&ns_info->uuid, spdk_bdev_get_uuid(ns->bdev)) != 0) {
1509 			/* A namespace was here before, but was replaced by a new one. */
1510 			ns_changed = true;
1511 			spdk_put_io_channel(ns_info->channel);
1512 			memset(ns_info, 0, sizeof(*ns_info));
1513 
1514 			ch = spdk_bdev_get_io_channel(ns->desc);
1515 			if (ch == NULL) {
1516 				SPDK_ERRLOG("Could not allocate I/O channel.\n");
1517 				return -ENOMEM;
1518 			}
1519 			ns_info->channel = ch;
1520 		} else if (ns_info->num_blocks != spdk_bdev_get_num_blocks(ns->bdev)) {
1521 			/* Namespace is still there but size has changed */
1522 			SPDK_DEBUGLOG(nvmf, "Namespace resized: subsystem_id %u,"
1523 				      " nsid %u, pg %p, old %" PRIu64 ", new %" PRIu64 "\n",
1524 				      subsystem->id,
1525 				      ns->nsid,
1526 				      group,
1527 				      ns_info->num_blocks,
1528 				      spdk_bdev_get_num_blocks(ns->bdev));
1529 			ns_changed = true;
1530 		} else if (ns_info->anagrpid != ns->anagrpid) {
1531 			/* Namespace is still there but ANA group ID has changed */
1532 			SPDK_DEBUGLOG(nvmf, "ANA group ID changed: subsystem_id %u,"
1533 				      "nsid %u, pg %p, old %u, new %u\n",
1534 				      subsystem->id,
1535 				      ns->nsid,
1536 				      group,
1537 				      ns_info->anagrpid,
1538 				      ns->anagrpid);
1539 			ns_changed = true;
1540 		}
1541 
1542 		if (ns == NULL) {
1543 			memset(ns_info, 0, sizeof(*ns_info));
1544 		} else {
1545 			ns_info->uuid = *spdk_bdev_get_uuid(ns->bdev);
1546 			ns_info->num_blocks = spdk_bdev_get_num_blocks(ns->bdev);
1547 			ns_info->anagrpid = ns->anagrpid;
1548 			ns_info->crkey = ns->crkey;
1549 			ns_info->rtype = ns->rtype;
1550 			if (ns->holder) {
1551 				ns_info->holder_id = ns->holder->hostid;
1552 			}
1553 
1554 			memset(&ns_info->reg_hostid, 0, SPDK_NVMF_MAX_NUM_REGISTRANTS * sizeof(struct spdk_uuid));
1555 			j = 0;
1556 			TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
1557 				if (j >= SPDK_NVMF_MAX_NUM_REGISTRANTS) {
1558 					SPDK_ERRLOG("Maximum %u registrants can support.\n", SPDK_NVMF_MAX_NUM_REGISTRANTS);
1559 					return -EINVAL;
1560 				}
1561 				ns_info->reg_hostid[j++] = reg->hostid;
1562 			}
1563 		}
1564 	}
1565 
1566 	if (ns_changed) {
1567 		TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
1568 			if (ctrlr->thread != spdk_get_thread()) {
1569 				continue;
1570 			}
1571 			/* It is possible that a ctrlr was added but the admin_qpair hasn't been
1572 			 * assigned yet.
1573 			 */
1574 			if (!ctrlr->admin_qpair) {
1575 				continue;
1576 			}
1577 			if (ctrlr->admin_qpair->group == group) {
1578 				nvmf_ctrlr_async_event_ns_notice(ctrlr);
1579 				nvmf_ctrlr_async_event_ana_change_notice(ctrlr);
1580 			}
1581 		}
1582 	}
1583 
1584 	return 0;
1585 }
1586 
1587 int
1588 nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
1589 				 struct spdk_nvmf_subsystem *subsystem)
1590 {
1591 	return poll_group_update_subsystem(group, subsystem);
1592 }
1593 
1594 int
1595 nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
1596 			      struct spdk_nvmf_subsystem *subsystem,
1597 			      spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
1598 {
1599 	int rc = 0;
1600 	struct spdk_nvmf_subsystem_poll_group *sgroup = &group->sgroups[subsystem->id];
1601 	struct spdk_nvmf_request *req, *tmp;
1602 	uint32_t i;
1603 
1604 	if (!TAILQ_EMPTY(&sgroup->queued)) {
1605 		SPDK_ERRLOG("sgroup->queued not empty when adding subsystem\n");
1606 		TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) {
1607 			TAILQ_REMOVE(&sgroup->queued, req, link);
1608 			if (nvmf_transport_req_free(req)) {
1609 				SPDK_ERRLOG("Transport request free error!\n");
1610 			}
1611 		}
1612 	}
1613 
1614 	rc = poll_group_update_subsystem(group, subsystem);
1615 	if (rc) {
1616 		nvmf_poll_group_remove_subsystem(group, subsystem, NULL, NULL);
1617 		goto fini;
1618 	}
1619 
1620 	sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1621 
1622 	for (i = 0; i < sgroup->num_ns; i++) {
1623 		sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1624 	}
1625 
1626 fini:
1627 	if (cb_fn) {
1628 		cb_fn(cb_arg, rc);
1629 	}
1630 
1631 	SPDK_DTRACE_PROBE2_TICKS(nvmf_poll_group_add_subsystem, spdk_thread_get_id(group->thread),
1632 				 subsystem->subnqn);
1633 
1634 	return rc;
1635 }
1636 
1637 static void
1638 _nvmf_poll_group_remove_subsystem_cb(void *ctx, int status)
1639 {
1640 	struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx;
1641 	struct spdk_nvmf_subsystem *subsystem;
1642 	struct spdk_nvmf_poll_group *group;
1643 	struct spdk_nvmf_subsystem_poll_group *sgroup;
1644 	spdk_nvmf_poll_group_mod_done cpl_fn = NULL;
1645 	void *cpl_ctx = NULL;
1646 	uint32_t nsid;
1647 
1648 	group = qpair_ctx->group;
1649 	subsystem = qpair_ctx->subsystem;
1650 	cpl_fn = qpair_ctx->cpl_fn;
1651 	cpl_ctx = qpair_ctx->cpl_ctx;
1652 	sgroup = &group->sgroups[subsystem->id];
1653 
1654 	if (status) {
1655 		goto fini;
1656 	}
1657 
1658 	for (nsid = 0; nsid < sgroup->num_ns; nsid++) {
1659 		if (sgroup->ns_info[nsid].channel) {
1660 			spdk_put_io_channel(sgroup->ns_info[nsid].channel);
1661 			sgroup->ns_info[nsid].channel = NULL;
1662 		}
1663 	}
1664 
1665 	sgroup->num_ns = 0;
1666 	free(sgroup->ns_info);
1667 	sgroup->ns_info = NULL;
1668 fini:
1669 	free(qpair_ctx);
1670 	if (cpl_fn) {
1671 		cpl_fn(cpl_ctx, status);
1672 	}
1673 }
1674 
1675 static void nvmf_poll_group_remove_subsystem_msg(void *ctx);
1676 
1677 static void
1678 nvmf_poll_group_remove_subsystem_msg(void *ctx)
1679 {
1680 	struct spdk_nvmf_qpair *qpair, *qpair_tmp;
1681 	struct spdk_nvmf_subsystem *subsystem;
1682 	struct spdk_nvmf_poll_group *group;
1683 	struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx;
1684 	bool qpairs_found = false;
1685 	int rc = 0;
1686 
1687 	group = qpair_ctx->group;
1688 	subsystem = qpair_ctx->subsystem;
1689 
1690 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, qpair_tmp) {
1691 		if ((qpair->ctrlr != NULL) && (qpair->ctrlr->subsys == subsystem)) {
1692 			qpairs_found = true;
1693 			rc = spdk_nvmf_qpair_disconnect(qpair);
1694 			if (rc && rc != -EINPROGRESS) {
1695 				break;
1696 			}
1697 		}
1698 	}
1699 
1700 	if (!qpairs_found) {
1701 		_nvmf_poll_group_remove_subsystem_cb(ctx, 0);
1702 		return;
1703 	}
1704 
1705 	/* Some qpairs are in process of being disconnected. Send a message and try to remove them again */
1706 	spdk_thread_send_msg(spdk_get_thread(), nvmf_poll_group_remove_subsystem_msg, ctx);
1707 }
1708 
1709 void
1710 nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
1711 				 struct spdk_nvmf_subsystem *subsystem,
1712 				 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
1713 {
1714 	struct spdk_nvmf_subsystem_poll_group *sgroup;
1715 	struct nvmf_qpair_disconnect_many_ctx *ctx;
1716 	uint32_t i;
1717 
1718 	SPDK_DTRACE_PROBE3_TICKS(nvmf_poll_group_remove_subsystem, group, spdk_thread_get_id(group->thread),
1719 				 subsystem->subnqn);
1720 
1721 	ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx));
1722 	if (!ctx) {
1723 		SPDK_ERRLOG("Unable to allocate memory for context to remove poll subsystem\n");
1724 		if (cb_fn) {
1725 			cb_fn(cb_arg, -1);
1726 		}
1727 		return;
1728 	}
1729 
1730 	ctx->group = group;
1731 	ctx->subsystem = subsystem;
1732 	ctx->cpl_fn = cb_fn;
1733 	ctx->cpl_ctx = cb_arg;
1734 
1735 	sgroup = &group->sgroups[subsystem->id];
1736 	sgroup->state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
1737 
1738 	for (i = 0; i < sgroup->num_ns; i++) {
1739 		sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
1740 	}
1741 
1742 	nvmf_poll_group_remove_subsystem_msg(ctx);
1743 }
1744 
1745 void
1746 nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
1747 				struct spdk_nvmf_subsystem *subsystem,
1748 				uint32_t nsid,
1749 				spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
1750 {
1751 	struct spdk_nvmf_subsystem_poll_group *sgroup;
1752 	struct spdk_nvmf_subsystem_pg_ns_info *ns_info = NULL;
1753 	int rc = 0;
1754 	uint32_t i;
1755 
1756 	if (subsystem->id >= group->num_sgroups) {
1757 		rc = -1;
1758 		goto fini;
1759 	}
1760 
1761 	sgroup = &group->sgroups[subsystem->id];
1762 	if (sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSED) {
1763 		goto fini;
1764 	}
1765 	sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSING;
1766 
1767 	if (nsid == SPDK_NVME_GLOBAL_NS_TAG) {
1768 		for (i = 0; i < sgroup->num_ns; i++) {
1769 			ns_info = &sgroup->ns_info[i];
1770 			ns_info->state = SPDK_NVMF_SUBSYSTEM_PAUSING;
1771 		}
1772 	} else {
1773 		/* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
1774 		if (nsid - 1 < sgroup->num_ns) {
1775 			ns_info  = &sgroup->ns_info[nsid - 1];
1776 			ns_info->state = SPDK_NVMF_SUBSYSTEM_PAUSING;
1777 		}
1778 	}
1779 
1780 	if (sgroup->mgmt_io_outstanding > 0) {
1781 		assert(sgroup->cb_fn == NULL);
1782 		sgroup->cb_fn = cb_fn;
1783 		assert(sgroup->cb_arg == NULL);
1784 		sgroup->cb_arg = cb_arg;
1785 		return;
1786 	}
1787 
1788 	if (nsid == SPDK_NVME_GLOBAL_NS_TAG) {
1789 		for (i = 0; i < sgroup->num_ns; i++) {
1790 			ns_info = &sgroup->ns_info[i];
1791 
1792 			if (ns_info->io_outstanding > 0) {
1793 				assert(sgroup->cb_fn == NULL);
1794 				sgroup->cb_fn = cb_fn;
1795 				assert(sgroup->cb_arg == NULL);
1796 				sgroup->cb_arg = cb_arg;
1797 				return;
1798 			}
1799 		}
1800 	} else {
1801 		if (ns_info != NULL && ns_info->io_outstanding > 0) {
1802 			assert(sgroup->cb_fn == NULL);
1803 			sgroup->cb_fn = cb_fn;
1804 			assert(sgroup->cb_arg == NULL);
1805 			sgroup->cb_arg = cb_arg;
1806 			return;
1807 		}
1808 	}
1809 
1810 	assert(sgroup->mgmt_io_outstanding == 0);
1811 	sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED;
1812 fini:
1813 	if (cb_fn) {
1814 		cb_fn(cb_arg, rc);
1815 	}
1816 }
1817 
1818 void
1819 nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
1820 				 struct spdk_nvmf_subsystem *subsystem,
1821 				 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
1822 {
1823 	struct spdk_nvmf_request *req, *tmp;
1824 	struct spdk_nvmf_subsystem_poll_group *sgroup;
1825 	int rc = 0;
1826 	uint32_t i;
1827 
1828 	if (subsystem->id >= group->num_sgroups) {
1829 		rc = -1;
1830 		goto fini;
1831 	}
1832 
1833 	sgroup = &group->sgroups[subsystem->id];
1834 
1835 	if (sgroup->state == SPDK_NVMF_SUBSYSTEM_ACTIVE) {
1836 		goto fini;
1837 	}
1838 
1839 	rc = poll_group_update_subsystem(group, subsystem);
1840 	if (rc) {
1841 		goto fini;
1842 	}
1843 
1844 	for (i = 0; i < sgroup->num_ns; i++) {
1845 		sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1846 	}
1847 
1848 	sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1849 
1850 	/* Release all queued requests */
1851 	TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) {
1852 		TAILQ_REMOVE(&sgroup->queued, req, link);
1853 		if (spdk_nvmf_request_using_zcopy(req)) {
1854 			spdk_nvmf_request_zcopy_start(req);
1855 		} else {
1856 			spdk_nvmf_request_exec(req);
1857 		}
1858 
1859 	}
1860 fini:
1861 	if (cb_fn) {
1862 		cb_fn(cb_arg, rc);
1863 	}
1864 }
1865 
1866 
1867 struct spdk_nvmf_poll_group *
1868 spdk_nvmf_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair)
1869 {
1870 	struct spdk_nvmf_transport_poll_group *tgroup;
1871 
1872 	tgroup = nvmf_transport_get_optimal_poll_group(qpair->transport, qpair);
1873 
1874 	if (tgroup == NULL) {
1875 		return NULL;
1876 	}
1877 
1878 	return tgroup->group;
1879 }
1880 
1881 void
1882 spdk_nvmf_poll_group_dump_stat(struct spdk_nvmf_poll_group *group, struct spdk_json_write_ctx *w)
1883 {
1884 	struct spdk_nvmf_transport_poll_group *tgroup;
1885 
1886 	spdk_json_write_object_begin(w);
1887 
1888 	spdk_json_write_named_string(w, "name", spdk_thread_get_name(spdk_get_thread()));
1889 	spdk_json_write_named_uint32(w, "admin_qpairs", group->stat.admin_qpairs);
1890 	spdk_json_write_named_uint32(w, "io_qpairs", group->stat.io_qpairs);
1891 	spdk_json_write_named_uint32(w, "current_admin_qpairs", group->stat.current_admin_qpairs);
1892 	spdk_json_write_named_uint32(w, "current_io_qpairs", group->stat.current_io_qpairs);
1893 	spdk_json_write_named_uint64(w, "pending_bdev_io", group->stat.pending_bdev_io);
1894 	spdk_json_write_named_uint64(w, "completed_nvme_io", group->stat.completed_nvme_io);
1895 
1896 	spdk_json_write_named_array_begin(w, "transports");
1897 
1898 	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
1899 		spdk_json_write_object_begin(w);
1900 		/*
1901 		 * The trtype field intentionally contains a transport name as this is more informative.
1902 		 * The field has not been renamed for backward compatibility.
1903 		 */
1904 		spdk_json_write_named_string(w, "trtype", spdk_nvmf_get_transport_name(tgroup->transport));
1905 
1906 		if (tgroup->transport->ops->poll_group_dump_stat) {
1907 			tgroup->transport->ops->poll_group_dump_stat(tgroup, w);
1908 		}
1909 
1910 		spdk_json_write_object_end(w);
1911 	}
1912 
1913 	spdk_json_write_array_end(w);
1914 	spdk_json_write_object_end(w);
1915 }
1916