xref: /spdk/lib/nvmf/subsystem.c (revision 3ef479ab163d96d6fd7f28b256d2a93ab42afd8e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "nvmf_internal.h"
37 #include "transport.h"
38 
39 #include "spdk/likely.h"
40 #include "spdk/string.h"
41 #include "spdk/trace.h"
42 #include "spdk/nvmf_spec.h"
43 
44 #include "spdk_internal/bdev.h"
45 #include "spdk_internal/log.h"
46 
47 static bool
48 spdk_nvmf_valid_nqn(const char *nqn)
49 {
50 	size_t len;
51 
52 	len = strlen(nqn);
53 	if (len > SPDK_NVMF_NQN_MAX_LEN) {
54 		SPDK_ERRLOG("Invalid NQN \"%s\": length %zu > max %d\n", nqn, len, SPDK_NVMF_NQN_MAX_LEN);
55 		return false;
56 	}
57 
58 	if (strncmp(nqn, "nqn.", 4) != 0) {
59 		SPDK_ERRLOG("Invalid NQN \"%s\": NQN must begin with \"nqn.\".\n", nqn);
60 		return false;
61 	}
62 
63 	/* yyyy-mm. */
64 	if (!(isdigit(nqn[4]) && isdigit(nqn[5]) && isdigit(nqn[6]) && isdigit(nqn[7]) &&
65 	      nqn[8] == '-' && isdigit(nqn[9]) && isdigit(nqn[10]) && nqn[11] == '.')) {
66 		SPDK_ERRLOG("Invalid date code in NQN \"%s\"\n", nqn);
67 		return false;
68 	}
69 
70 	return true;
71 }
72 
73 static void
74 spdk_nvmf_subsystem_create_done(void *io_device, void *ctx, int status)
75 {
76 }
77 
78 static int
79 spdk_nvmf_subsystem_add_to_poll_group(void *io_device,
80 				      struct spdk_io_channel *ch,
81 				      void *ctx)
82 {
83 	struct spdk_nvmf_poll_group *group;
84 	struct spdk_nvmf_subsystem *subsystem = ctx;
85 
86 	group = spdk_io_channel_get_ctx(ch);
87 
88 	return spdk_nvmf_poll_group_add_subsystem(group, subsystem);
89 }
90 
91 struct spdk_nvmf_subsystem *
92 spdk_nvmf_create_subsystem(struct spdk_nvmf_tgt *tgt,
93 			   const char *nqn,
94 			   enum spdk_nvmf_subtype type,
95 			   uint32_t num_ns)
96 {
97 	struct spdk_nvmf_subsystem	*subsystem;
98 	uint32_t			sid;
99 
100 	if (!spdk_nvmf_valid_nqn(nqn)) {
101 		return NULL;
102 	}
103 
104 	if (type == SPDK_NVMF_SUBTYPE_DISCOVERY && num_ns != 0) {
105 		SPDK_ERRLOG("Discovery subsystem cannot have namespaces.\n");
106 		return NULL;
107 	}
108 
109 	/* Find a free subsystem id (sid) */
110 	for (sid = 0; sid < tgt->max_sid; sid++) {
111 		if (tgt->subsystems[sid] == NULL) {
112 			break;
113 		}
114 	}
115 	if (sid == tgt->max_sid) {
116 		struct spdk_nvmf_subsystem **subsys_array;
117 		/* No free slots. Add more. */
118 		tgt->max_sid++;
119 		subsys_array = realloc(tgt->subsystems, tgt->max_sid * sizeof(struct spdk_nvmf_subsystem *));
120 		if (!subsys_array) {
121 			tgt->max_sid--;
122 			return NULL;
123 		}
124 		tgt->subsystems = subsys_array;
125 	}
126 
127 	subsystem = calloc(1, sizeof(struct spdk_nvmf_subsystem));
128 	if (subsystem == NULL) {
129 		return NULL;
130 	}
131 
132 	subsystem->tgt = tgt;
133 	subsystem->id = sid;
134 	subsystem->subtype = type;
135 	subsystem->max_nsid = num_ns;
136 	subsystem->num_allocated_nsid = 0;
137 	subsystem->next_cntlid = 0;
138 	snprintf(subsystem->subnqn, sizeof(subsystem->subnqn), "%s", nqn);
139 	TAILQ_INIT(&subsystem->listeners);
140 	TAILQ_INIT(&subsystem->hosts);
141 	TAILQ_INIT(&subsystem->ctrlrs);
142 
143 	if (num_ns != 0) {
144 		subsystem->ns = calloc(num_ns, sizeof(struct spdk_nvmf_ns));
145 		if (subsystem->ns == NULL) {
146 			SPDK_ERRLOG("Namespace memory allocation failed\n");
147 			free(subsystem);
148 			return NULL;
149 		}
150 	}
151 
152 	tgt->subsystems[sid] = subsystem;
153 	tgt->discovery_genctr++;
154 
155 	/* Send a message to each poll group to notify it that a new subsystem
156 	 * is available.
157 	 * TODO: This call does not currently allow the user to wait for these
158 	 * messages to propagate. It also does not protect against two calls
159 	 * to this function overlapping
160 	 */
161 	spdk_for_each_channel(tgt,
162 			      spdk_nvmf_subsystem_add_to_poll_group,
163 			      subsystem,
164 			      spdk_nvmf_subsystem_create_done);
165 
166 	return subsystem;
167 }
168 
169 static void
170 spdk_nvmf_subsystem_delete_done(void *io_device, void *ctx, int status)
171 {
172 	struct spdk_nvmf_tgt *tgt = io_device;
173 	struct spdk_nvmf_subsystem *subsystem = ctx;
174 	struct spdk_nvmf_ns *ns;
175 
176 	for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL;
177 	     ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) {
178 		if (ns->bdev == NULL) {
179 			continue;
180 		}
181 		spdk_bdev_close(ns->desc);
182 	}
183 
184 	free(subsystem->ns);
185 
186 	tgt->subsystems[subsystem->id] = NULL;
187 	tgt->discovery_genctr++;
188 
189 	free(subsystem);
190 }
191 
192 static int
193 spdk_nvmf_subsystem_remove_from_poll_group(void *io_device,
194 		struct spdk_io_channel *ch,
195 		void *ctx)
196 {
197 	struct spdk_nvmf_poll_group *group;
198 	struct spdk_nvmf_subsystem *subsystem = ctx;
199 
200 	group = spdk_io_channel_get_ctx(ch);
201 
202 	return spdk_nvmf_poll_group_remove_subsystem(group, subsystem);
203 }
204 
205 void
206 spdk_nvmf_delete_subsystem(struct spdk_nvmf_subsystem *subsystem)
207 {
208 	struct spdk_nvmf_listener	*listener, *listener_tmp;
209 	struct spdk_nvmf_host		*host, *host_tmp;
210 	struct spdk_nvmf_ctrlr		*ctrlr, *ctrlr_tmp;
211 
212 	if (!subsystem) {
213 		return;
214 	}
215 
216 	SPDK_DEBUGLOG(SPDK_LOG_NVMF, "subsystem is %p\n", subsystem);
217 
218 	TAILQ_FOREACH_SAFE(listener, &subsystem->listeners, link, listener_tmp) {
219 		TAILQ_REMOVE(&subsystem->listeners, listener, link);
220 		free(listener);
221 	}
222 
223 	TAILQ_FOREACH_SAFE(host, &subsystem->hosts, link, host_tmp) {
224 		TAILQ_REMOVE(&subsystem->hosts, host, link);
225 		free(host->nqn);
226 		free(host);
227 	}
228 
229 	TAILQ_FOREACH_SAFE(ctrlr, &subsystem->ctrlrs, link, ctrlr_tmp) {
230 		spdk_nvmf_ctrlr_destruct(ctrlr);
231 	}
232 
233 	/* Send a message to each poll group to notify it that a subsystem
234 	 * is no longer available.
235 	 * TODO: This call does not currently allow the user to wait for these
236 	 * messages to propagate. It also does not protect against two calls
237 	 * to this function overlapping
238 	 */
239 	spdk_for_each_channel(subsystem->tgt,
240 			      spdk_nvmf_subsystem_remove_from_poll_group,
241 			      subsystem,
242 			      spdk_nvmf_subsystem_delete_done);
243 }
244 
245 
246 int
247 spdk_nvmf_subsystem_add_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn)
248 {
249 	struct spdk_nvmf_host *host;
250 
251 	if (!spdk_nvmf_valid_nqn(hostnqn)) {
252 		return -1;
253 	}
254 
255 	host = calloc(1, sizeof(*host));
256 	if (!host) {
257 		return -1;
258 	}
259 	host->nqn = strdup(hostnqn);
260 	if (!host->nqn) {
261 		free(host);
262 		return -1;
263 	}
264 
265 	TAILQ_INSERT_HEAD(&subsystem->hosts, host, link);
266 	subsystem->tgt->discovery_genctr++;
267 
268 	return 0;
269 }
270 
271 void
272 spdk_nvmf_subsystem_set_allow_any_host(struct spdk_nvmf_subsystem *subsystem, bool allow_any_host)
273 {
274 	subsystem->allow_any_host = allow_any_host;
275 }
276 
277 bool
278 spdk_nvmf_subsystem_get_allow_any_host(const struct spdk_nvmf_subsystem *subsystem)
279 {
280 	return subsystem->allow_any_host;
281 }
282 
283 bool
284 spdk_nvmf_subsystem_host_allowed(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn)
285 {
286 	struct spdk_nvmf_host *host;
287 
288 	if (!hostnqn) {
289 		return false;
290 	}
291 
292 	if (subsystem->allow_any_host) {
293 		return true;
294 	}
295 
296 	TAILQ_FOREACH(host, &subsystem->hosts, link) {
297 		if (strcmp(hostnqn, host->nqn) == 0) {
298 			return true;
299 		}
300 	}
301 
302 	return false;
303 }
304 
305 struct spdk_nvmf_host *
306 spdk_nvmf_subsystem_get_first_host(struct spdk_nvmf_subsystem *subsystem)
307 {
308 	return TAILQ_FIRST(&subsystem->hosts);
309 }
310 
311 
312 struct spdk_nvmf_host *
313 spdk_nvmf_subsystem_get_next_host(struct spdk_nvmf_subsystem *subsystem,
314 				  struct spdk_nvmf_host *prev_host)
315 {
316 	return TAILQ_NEXT(prev_host, link);
317 }
318 
319 const char *
320 spdk_nvmf_host_get_nqn(struct spdk_nvmf_host *host)
321 {
322 	return host->nqn;
323 }
324 
325 int
326 spdk_nvmf_subsystem_add_listener(struct spdk_nvmf_subsystem *subsystem,
327 				 struct spdk_nvme_transport_id *trid)
328 {
329 	struct spdk_nvmf_transport *transport;
330 	struct spdk_nvmf_listener *listener;
331 
332 	transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, trid->trtype);
333 	if (transport == NULL) {
334 		SPDK_ERRLOG("Unknown transport type %d\n", trid->trtype);
335 		return -1;
336 	}
337 
338 	listener = calloc(1, sizeof(*listener));
339 	if (!listener) {
340 		return -1;
341 	}
342 
343 	listener->trid = *trid;
344 	listener->transport = transport;
345 
346 	TAILQ_INSERT_HEAD(&subsystem->listeners, listener, link);
347 
348 	return 0;
349 }
350 
351 /*
352  * TODO: this is the whitelist and will be called during connection setup
353  */
354 bool
355 spdk_nvmf_subsystem_listener_allowed(struct spdk_nvmf_subsystem *subsystem,
356 				     struct spdk_nvme_transport_id *trid)
357 {
358 	struct spdk_nvmf_listener *listener;
359 
360 	if (TAILQ_EMPTY(&subsystem->listeners)) {
361 		return true;
362 	}
363 
364 	TAILQ_FOREACH(listener, &subsystem->listeners, link) {
365 		if (spdk_nvme_transport_id_compare(&listener->trid, trid) == 0) {
366 			return true;
367 		}
368 	}
369 
370 	return false;
371 }
372 
373 struct spdk_nvmf_listener *
374 spdk_nvmf_subsystem_get_first_listener(struct spdk_nvmf_subsystem *subsystem)
375 {
376 	return TAILQ_FIRST(&subsystem->listeners);
377 }
378 
379 struct spdk_nvmf_listener *
380 spdk_nvmf_subsystem_get_next_listener(struct spdk_nvmf_subsystem *subsystem,
381 				      struct spdk_nvmf_listener *prev_listener)
382 {
383 	return TAILQ_NEXT(prev_listener, link);
384 }
385 
386 
387 const struct spdk_nvme_transport_id *
388 spdk_nvmf_listener_get_trid(struct spdk_nvmf_listener *listener)
389 {
390 	return &listener->trid;
391 }
392 
393 struct spdk_nvmf_subsystem_add_ns_ctx {
394 	struct spdk_nvmf_subsystem	*subsystem;
395 	struct spdk_nvmf_ns		*ns;
396 };
397 
398 static void
399 spdk_nvmf_subsystem_add_ns_done(void *io_device, void *ctx, int status)
400 {
401 	free(ctx);
402 }
403 
404 static int
405 spdk_nvmf_subsystem_ns_update_poll_group(void *io_device,
406 		struct spdk_io_channel *ch,
407 		void *c)
408 {
409 	struct spdk_nvmf_poll_group *group;
410 	struct spdk_nvmf_subsystem_add_ns_ctx *ctx = c;
411 
412 	group = spdk_io_channel_get_ctx(ch);
413 
414 	return spdk_nvmf_poll_group_add_ns(group, ctx->subsystem, ctx->ns);
415 }
416 
417 uint32_t
418 spdk_nvmf_subsystem_add_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_bdev *bdev,
419 			   uint32_t nsid)
420 {
421 	struct spdk_nvmf_ns *ns;
422 	struct spdk_nvmf_subsystem_add_ns_ctx *ctx;
423 	uint32_t i;
424 	int rc;
425 
426 	if (nsid == SPDK_NVME_GLOBAL_NS_TAG) {
427 		SPDK_ERRLOG("Invalid NSID %" PRIu32 "\n", nsid);
428 		return 0;
429 	}
430 
431 	if (nsid > subsystem->max_nsid ||
432 	    (nsid == 0 && subsystem->num_allocated_nsid == subsystem->max_nsid)) {
433 		struct spdk_nvmf_ns *new_ns_array;
434 		uint32_t new_max_nsid;
435 
436 		if (nsid > subsystem->max_nsid) {
437 			new_max_nsid = nsid;
438 		} else {
439 			new_max_nsid = subsystem->max_nsid + 1;
440 		}
441 
442 		if (!TAILQ_EMPTY(&subsystem->ctrlrs)) {
443 			SPDK_ERRLOG("Can't extend NSID range with active connections\n");
444 			return 0;
445 		}
446 
447 		new_ns_array = realloc(subsystem->ns, sizeof(struct spdk_nvmf_ns) * new_max_nsid);
448 		if (new_ns_array == NULL) {
449 			SPDK_ERRLOG("Memory allocation error while resizing namespace array.\n");
450 			return 0;
451 		}
452 
453 		memset(new_ns_array + subsystem->max_nsid, 0,
454 		       sizeof(struct spdk_nvmf_ns) * (new_max_nsid - subsystem->max_nsid));
455 		subsystem->ns = new_ns_array;
456 		subsystem->max_nsid = new_max_nsid;
457 	}
458 
459 	if (nsid == 0) {
460 		/* NSID not specified - find a free index */
461 		for (i = 0; i < subsystem->max_nsid; i++) {
462 			if (_spdk_nvmf_subsystem_get_ns(subsystem, i + 1) == NULL) {
463 				nsid = i + 1;
464 				break;
465 			}
466 		}
467 		if (nsid == 0) {
468 			SPDK_ERRLOG("All available NSIDs in use\n");
469 			return 0;
470 		}
471 	} else {
472 		/* Specific NSID requested */
473 		if (_spdk_nvmf_subsystem_get_ns(subsystem, nsid)) {
474 			SPDK_ERRLOG("Requested NSID %" PRIu32 " already in use\n", nsid);
475 			return 0;
476 		}
477 	}
478 
479 	ns = &subsystem->ns[nsid - 1];
480 	memset(ns, 0, sizeof(*ns));
481 	ns->bdev = bdev;
482 	ns->id = nsid;
483 	rc = spdk_bdev_open(bdev, true, NULL, NULL, &ns->desc);
484 	if (rc != 0) {
485 		SPDK_ERRLOG("Subsystem %s: bdev %s cannot be opened, error=%d\n",
486 			    subsystem->subnqn, spdk_bdev_get_name(bdev), rc);
487 		return 0;
488 	}
489 	ns->allocated = true;
490 
491 	SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Subsystem %s: bdev %s assigned nsid %" PRIu32 "\n",
492 		      spdk_nvmf_subsystem_get_nqn(subsystem),
493 		      spdk_bdev_get_name(bdev),
494 		      nsid);
495 
496 	subsystem->max_nsid = spdk_max(subsystem->max_nsid, nsid);
497 	subsystem->num_allocated_nsid++;
498 
499 	ctx = calloc(1, sizeof(*ctx));
500 	if (!ctx) {
501 		return -ENOMEM;
502 	}
503 	ctx->subsystem = subsystem;
504 	ctx->ns = ns;
505 
506 	/* Send a message to each poll group to notify it that a new namespace
507 	 * is available.
508 	 * TODO: This call does not currently allow the user to wait for these
509 	 * messages to propagate. It also does not protect against two calls
510 	 * to this function overlapping
511 	 */
512 	spdk_for_each_channel(subsystem->tgt,
513 			      spdk_nvmf_subsystem_ns_update_poll_group,
514 			      ctx,
515 			      spdk_nvmf_subsystem_add_ns_done);
516 
517 	return nsid;
518 }
519 
520 static uint32_t
521 spdk_nvmf_subsystem_get_next_allocated_nsid(struct spdk_nvmf_subsystem *subsystem,
522 		uint32_t prev_nsid)
523 {
524 	uint32_t nsid;
525 
526 	if (prev_nsid >= subsystem->max_nsid) {
527 		return 0;
528 	}
529 
530 	for (nsid = prev_nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
531 		if (subsystem->ns[nsid - 1].allocated) {
532 			return nsid;
533 		}
534 	}
535 
536 	return 0;
537 }
538 
539 struct spdk_nvmf_ns *
540 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
541 {
542 	uint32_t first_nsid;
543 
544 	first_nsid = spdk_nvmf_subsystem_get_next_allocated_nsid(subsystem, 0);
545 	return _spdk_nvmf_subsystem_get_ns(subsystem, first_nsid);
546 }
547 
548 struct spdk_nvmf_ns *
549 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
550 				struct spdk_nvmf_ns *prev_ns)
551 {
552 	uint32_t next_nsid;
553 
554 	next_nsid = spdk_nvmf_subsystem_get_next_allocated_nsid(subsystem, prev_ns->id);
555 	return _spdk_nvmf_subsystem_get_ns(subsystem, next_nsid);
556 }
557 
558 struct spdk_nvmf_ns *
559 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
560 {
561 	return _spdk_nvmf_subsystem_get_ns(subsystem, nsid);
562 }
563 
564 uint32_t
565 spdk_nvmf_ns_get_id(const struct spdk_nvmf_ns *ns)
566 {
567 	return ns->id;
568 }
569 
570 struct spdk_bdev *
571 spdk_nvmf_ns_get_bdev(struct spdk_nvmf_ns *ns)
572 {
573 	return ns->bdev;
574 }
575 
576 const char *
577 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem)
578 {
579 	return subsystem->sn;
580 }
581 
582 int
583 spdk_nvmf_subsystem_set_sn(struct spdk_nvmf_subsystem *subsystem, const char *sn)
584 {
585 	size_t len, max_len;
586 
587 	max_len = sizeof(subsystem->sn) - 1;
588 	len = strlen(sn);
589 	if (len > max_len) {
590 		SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Invalid sn \"%s\": length %zu > max %zu\n",
591 			      sn, len, max_len);
592 		return -1;
593 	}
594 
595 	snprintf(subsystem->sn, sizeof(subsystem->sn), "%s", sn);
596 
597 	return 0;
598 }
599 
600 const char *
601 spdk_nvmf_subsystem_get_nqn(struct spdk_nvmf_subsystem *subsystem)
602 {
603 	return subsystem->subnqn;
604 }
605 
606 /* Workaround for astyle formatting bug */
607 typedef enum spdk_nvmf_subtype nvmf_subtype_t;
608 
609 nvmf_subtype_t
610 spdk_nvmf_subsystem_get_type(struct spdk_nvmf_subsystem *subsystem)
611 {
612 	return subsystem->subtype;
613 }
614 
615 static uint16_t
616 spdk_nvmf_subsystem_gen_cntlid(struct spdk_nvmf_subsystem *subsystem)
617 {
618 	int count;
619 
620 	/*
621 	 * In the worst case, we might have to try all CNTLID values between 1 and 0xFFF0 - 1
622 	 * before we find one that is unused (or find that all values are in use).
623 	 */
624 	for (count = 0; count < 0xFFF0 - 1; count++) {
625 		subsystem->next_cntlid++;
626 		if (subsystem->next_cntlid >= 0xFFF0) {
627 			/* The spec reserves cntlid values in the range FFF0h to FFFFh. */
628 			subsystem->next_cntlid = 1;
629 		}
630 
631 		/* Check if a controller with this cntlid currently exists. */
632 		if (spdk_nvmf_subsystem_get_ctrlr(subsystem, subsystem->next_cntlid) == NULL) {
633 			/* Found unused cntlid */
634 			return subsystem->next_cntlid;
635 		}
636 	}
637 
638 	/* All valid cntlid values are in use. */
639 	return 0xFFFF;
640 }
641 
642 int
643 spdk_nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)
644 {
645 	ctrlr->cntlid = spdk_nvmf_subsystem_gen_cntlid(subsystem);
646 	if (ctrlr->cntlid == 0xFFFF) {
647 		/* Unable to get a cntlid */
648 		SPDK_ERRLOG("Reached max simultaneous ctrlrs\n");
649 		return -EBUSY;
650 	}
651 
652 	TAILQ_INSERT_TAIL(&subsystem->ctrlrs, ctrlr, link);
653 
654 	return 0;
655 }
656 
657 void
658 spdk_nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem,
659 				 struct spdk_nvmf_ctrlr *ctrlr)
660 {
661 	assert(subsystem == ctrlr->subsys);
662 	TAILQ_REMOVE(&subsystem->ctrlrs, ctrlr, link);
663 }
664 
665 struct spdk_nvmf_ctrlr *
666 spdk_nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid)
667 {
668 	struct spdk_nvmf_ctrlr *ctrlr;
669 
670 	TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
671 		if (ctrlr->cntlid == cntlid) {
672 			return ctrlr;
673 		}
674 	}
675 
676 	return NULL;
677 }
678