1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "nvmf_internal.h" 37 #include "transport.h" 38 39 #include "spdk/likely.h" 40 #include "spdk/string.h" 41 #include "spdk/trace.h" 42 #include "spdk/nvmf_spec.h" 43 44 #include "spdk_internal/bdev.h" 45 #include "spdk_internal/log.h" 46 47 static bool 48 spdk_nvmf_valid_nqn(const char *nqn) 49 { 50 size_t len; 51 52 len = strlen(nqn); 53 if (len > SPDK_NVMF_NQN_MAX_LEN) { 54 SPDK_ERRLOG("Invalid NQN \"%s\": length %zu > max %d\n", nqn, len, SPDK_NVMF_NQN_MAX_LEN); 55 return false; 56 } 57 58 if (strncmp(nqn, "nqn.", 4) != 0) { 59 SPDK_ERRLOG("Invalid NQN \"%s\": NQN must begin with \"nqn.\".\n", nqn); 60 return false; 61 } 62 63 /* yyyy-mm. */ 64 if (!(isdigit(nqn[4]) && isdigit(nqn[5]) && isdigit(nqn[6]) && isdigit(nqn[7]) && 65 nqn[8] == '-' && isdigit(nqn[9]) && isdigit(nqn[10]) && nqn[11] == '.')) { 66 SPDK_ERRLOG("Invalid date code in NQN \"%s\"\n", nqn); 67 return false; 68 } 69 70 return true; 71 } 72 73 static void 74 spdk_nvmf_subsystem_create_done(struct spdk_io_channel_iter *i, int status) 75 { 76 } 77 78 static void 79 spdk_nvmf_subsystem_add_to_poll_group(struct spdk_io_channel_iter *i) 80 { 81 struct spdk_nvmf_subsystem *subsystem = spdk_io_channel_iter_get_ctx(i); 82 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 83 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); 84 int rc; 85 86 rc = spdk_nvmf_poll_group_add_subsystem(group, subsystem); 87 spdk_for_each_channel_continue(i, rc); 88 } 89 90 struct spdk_nvmf_subsystem * 91 spdk_nvmf_create_subsystem(struct spdk_nvmf_tgt *tgt, 92 const char *nqn, 93 enum spdk_nvmf_subtype type, 94 uint32_t num_ns) 95 { 96 struct spdk_nvmf_subsystem *subsystem; 97 uint32_t sid; 98 99 if (!spdk_nvmf_valid_nqn(nqn)) { 100 return NULL; 101 } 102 103 if (type == SPDK_NVMF_SUBTYPE_DISCOVERY && num_ns != 0) { 104 SPDK_ERRLOG("Discovery subsystem cannot have namespaces.\n"); 105 return NULL; 106 } 107 108 /* Find a free subsystem id (sid) */ 109 for (sid = 0; sid < tgt->max_sid; sid++) { 110 if (tgt->subsystems[sid] == NULL) { 111 break; 112 } 113 } 114 if (sid == tgt->max_sid) { 115 struct spdk_nvmf_subsystem **subsys_array; 116 /* No free slots. Add more. */ 117 tgt->max_sid++; 118 subsys_array = realloc(tgt->subsystems, tgt->max_sid * sizeof(struct spdk_nvmf_subsystem *)); 119 if (!subsys_array) { 120 tgt->max_sid--; 121 return NULL; 122 } 123 tgt->subsystems = subsys_array; 124 } 125 126 subsystem = calloc(1, sizeof(struct spdk_nvmf_subsystem)); 127 if (subsystem == NULL) { 128 return NULL; 129 } 130 131 subsystem->tgt = tgt; 132 subsystem->id = sid; 133 subsystem->subtype = type; 134 subsystem->max_nsid = num_ns; 135 subsystem->num_allocated_nsid = 0; 136 subsystem->next_cntlid = 0; 137 snprintf(subsystem->subnqn, sizeof(subsystem->subnqn), "%s", nqn); 138 TAILQ_INIT(&subsystem->listeners); 139 TAILQ_INIT(&subsystem->hosts); 140 TAILQ_INIT(&subsystem->ctrlrs); 141 142 if (num_ns != 0) { 143 subsystem->ns = calloc(num_ns, sizeof(struct spdk_nvmf_ns)); 144 if (subsystem->ns == NULL) { 145 SPDK_ERRLOG("Namespace memory allocation failed\n"); 146 free(subsystem); 147 return NULL; 148 } 149 } 150 151 tgt->subsystems[sid] = subsystem; 152 tgt->discovery_genctr++; 153 154 /* Send a message to each poll group to notify it that a new subsystem 155 * is available. 156 * TODO: This call does not currently allow the user to wait for these 157 * messages to propagate. It also does not protect against two calls 158 * to this function overlapping 159 */ 160 spdk_for_each_channel(tgt, 161 spdk_nvmf_subsystem_add_to_poll_group, 162 subsystem, 163 spdk_nvmf_subsystem_create_done); 164 165 return subsystem; 166 } 167 168 static void 169 spdk_nvmf_subsystem_delete_done(struct spdk_io_channel_iter *i, int status) 170 { 171 struct spdk_nvmf_tgt *tgt = spdk_io_channel_iter_get_io_device(i); 172 struct spdk_nvmf_subsystem *subsystem = spdk_io_channel_iter_get_ctx(i); 173 struct spdk_nvmf_ns *ns; 174 175 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 176 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 177 if (ns->bdev == NULL) { 178 continue; 179 } 180 spdk_bdev_close(ns->desc); 181 } 182 183 free(subsystem->ns); 184 185 tgt->subsystems[subsystem->id] = NULL; 186 tgt->discovery_genctr++; 187 188 free(subsystem); 189 } 190 191 static void 192 spdk_nvmf_subsystem_remove_from_poll_group(struct spdk_io_channel_iter *i) 193 { 194 struct spdk_nvmf_subsystem *subsystem = spdk_io_channel_iter_get_ctx(i); 195 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 196 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); 197 int rc; 198 199 rc = spdk_nvmf_poll_group_remove_subsystem(group, subsystem); 200 201 spdk_for_each_channel_continue(i, rc); 202 } 203 204 void 205 spdk_nvmf_delete_subsystem(struct spdk_nvmf_subsystem *subsystem) 206 { 207 struct spdk_nvmf_listener *listener, *listener_tmp; 208 struct spdk_nvmf_host *host, *host_tmp; 209 struct spdk_nvmf_ctrlr *ctrlr, *ctrlr_tmp; 210 211 if (!subsystem) { 212 return; 213 } 214 215 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "subsystem is %p\n", subsystem); 216 217 TAILQ_FOREACH_SAFE(listener, &subsystem->listeners, link, listener_tmp) { 218 TAILQ_REMOVE(&subsystem->listeners, listener, link); 219 free(listener); 220 } 221 222 TAILQ_FOREACH_SAFE(host, &subsystem->hosts, link, host_tmp) { 223 TAILQ_REMOVE(&subsystem->hosts, host, link); 224 free(host->nqn); 225 free(host); 226 } 227 228 TAILQ_FOREACH_SAFE(ctrlr, &subsystem->ctrlrs, link, ctrlr_tmp) { 229 spdk_nvmf_ctrlr_destruct(ctrlr); 230 } 231 232 /* Send a message to each poll group to notify it that a subsystem 233 * is no longer available. 234 * TODO: This call does not currently allow the user to wait for these 235 * messages to propagate. It also does not protect against two calls 236 * to this function overlapping 237 */ 238 spdk_for_each_channel(subsystem->tgt, 239 spdk_nvmf_subsystem_remove_from_poll_group, 240 subsystem, 241 spdk_nvmf_subsystem_delete_done); 242 } 243 244 struct spdk_nvmf_subsystem * 245 spdk_nvmf_subsystem_get_first(struct spdk_nvmf_tgt *tgt) 246 { 247 struct spdk_nvmf_subsystem *subsystem; 248 uint32_t sid; 249 250 for (sid = 0; sid < tgt->max_sid; sid++) { 251 subsystem = tgt->subsystems[sid]; 252 if (subsystem) { 253 return subsystem; 254 } 255 } 256 257 return NULL; 258 } 259 260 struct spdk_nvmf_subsystem * 261 spdk_nvmf_subsystem_get_next(struct spdk_nvmf_subsystem *subsystem) 262 { 263 uint32_t sid; 264 struct spdk_nvmf_tgt *tgt; 265 266 if (!subsystem) { 267 return NULL; 268 } 269 270 tgt = subsystem->tgt; 271 272 for (sid = subsystem->id + 1; sid < tgt->max_sid; sid++) { 273 subsystem = tgt->subsystems[sid]; 274 if (subsystem) { 275 return subsystem; 276 } 277 } 278 279 return NULL; 280 } 281 282 int 283 spdk_nvmf_subsystem_add_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 284 { 285 struct spdk_nvmf_host *host; 286 287 if (!spdk_nvmf_valid_nqn(hostnqn)) { 288 return -1; 289 } 290 291 host = calloc(1, sizeof(*host)); 292 if (!host) { 293 return -1; 294 } 295 host->nqn = strdup(hostnqn); 296 if (!host->nqn) { 297 free(host); 298 return -1; 299 } 300 301 TAILQ_INSERT_HEAD(&subsystem->hosts, host, link); 302 subsystem->tgt->discovery_genctr++; 303 304 return 0; 305 } 306 307 void 308 spdk_nvmf_subsystem_set_allow_any_host(struct spdk_nvmf_subsystem *subsystem, bool allow_any_host) 309 { 310 subsystem->allow_any_host = allow_any_host; 311 } 312 313 bool 314 spdk_nvmf_subsystem_get_allow_any_host(const struct spdk_nvmf_subsystem *subsystem) 315 { 316 return subsystem->allow_any_host; 317 } 318 319 bool 320 spdk_nvmf_subsystem_host_allowed(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 321 { 322 struct spdk_nvmf_host *host; 323 324 if (!hostnqn) { 325 return false; 326 } 327 328 if (subsystem->allow_any_host) { 329 return true; 330 } 331 332 TAILQ_FOREACH(host, &subsystem->hosts, link) { 333 if (strcmp(hostnqn, host->nqn) == 0) { 334 return true; 335 } 336 } 337 338 return false; 339 } 340 341 struct spdk_nvmf_host * 342 spdk_nvmf_subsystem_get_first_host(struct spdk_nvmf_subsystem *subsystem) 343 { 344 return TAILQ_FIRST(&subsystem->hosts); 345 } 346 347 348 struct spdk_nvmf_host * 349 spdk_nvmf_subsystem_get_next_host(struct spdk_nvmf_subsystem *subsystem, 350 struct spdk_nvmf_host *prev_host) 351 { 352 return TAILQ_NEXT(prev_host, link); 353 } 354 355 const char * 356 spdk_nvmf_host_get_nqn(struct spdk_nvmf_host *host) 357 { 358 return host->nqn; 359 } 360 361 int 362 spdk_nvmf_subsystem_add_listener(struct spdk_nvmf_subsystem *subsystem, 363 struct spdk_nvme_transport_id *trid) 364 { 365 struct spdk_nvmf_transport *transport; 366 struct spdk_nvmf_listener *listener; 367 368 transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, trid->trtype); 369 if (transport == NULL) { 370 SPDK_ERRLOG("Unknown transport type %d\n", trid->trtype); 371 return -1; 372 } 373 374 listener = calloc(1, sizeof(*listener)); 375 if (!listener) { 376 return -1; 377 } 378 379 listener->trid = *trid; 380 listener->transport = transport; 381 382 TAILQ_INSERT_HEAD(&subsystem->listeners, listener, link); 383 384 return 0; 385 } 386 387 /* 388 * TODO: this is the whitelist and will be called during connection setup 389 */ 390 bool 391 spdk_nvmf_subsystem_listener_allowed(struct spdk_nvmf_subsystem *subsystem, 392 struct spdk_nvme_transport_id *trid) 393 { 394 struct spdk_nvmf_listener *listener; 395 396 if (TAILQ_EMPTY(&subsystem->listeners)) { 397 return true; 398 } 399 400 TAILQ_FOREACH(listener, &subsystem->listeners, link) { 401 if (spdk_nvme_transport_id_compare(&listener->trid, trid) == 0) { 402 return true; 403 } 404 } 405 406 return false; 407 } 408 409 struct spdk_nvmf_listener * 410 spdk_nvmf_subsystem_get_first_listener(struct spdk_nvmf_subsystem *subsystem) 411 { 412 return TAILQ_FIRST(&subsystem->listeners); 413 } 414 415 struct spdk_nvmf_listener * 416 spdk_nvmf_subsystem_get_next_listener(struct spdk_nvmf_subsystem *subsystem, 417 struct spdk_nvmf_listener *prev_listener) 418 { 419 return TAILQ_NEXT(prev_listener, link); 420 } 421 422 423 const struct spdk_nvme_transport_id * 424 spdk_nvmf_listener_get_trid(struct spdk_nvmf_listener *listener) 425 { 426 return &listener->trid; 427 } 428 429 static void 430 spdk_nvmf_subsystem_add_ns_done(struct spdk_io_channel_iter *i, int status) 431 { 432 return; 433 } 434 435 static void 436 spdk_nvmf_subsystem_ns_update_poll_group(struct spdk_io_channel_iter *i) 437 { 438 struct spdk_nvmf_ns *ns = spdk_io_channel_iter_get_ctx(i); 439 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 440 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); 441 int rc; 442 443 rc = spdk_nvmf_poll_group_add_ns(group, ns->subsystem, ns); 444 spdk_for_each_channel_continue(i, rc); 445 } 446 447 uint32_t 448 spdk_nvmf_subsystem_add_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_bdev *bdev, 449 uint32_t nsid) 450 { 451 struct spdk_nvmf_ns *ns; 452 uint32_t i; 453 int rc; 454 455 if (nsid == SPDK_NVME_GLOBAL_NS_TAG) { 456 SPDK_ERRLOG("Invalid NSID %" PRIu32 "\n", nsid); 457 return 0; 458 } 459 460 if (nsid > subsystem->max_nsid || 461 (nsid == 0 && subsystem->num_allocated_nsid == subsystem->max_nsid)) { 462 struct spdk_nvmf_ns *new_ns_array; 463 uint32_t new_max_nsid; 464 465 if (nsid > subsystem->max_nsid) { 466 new_max_nsid = nsid; 467 } else { 468 new_max_nsid = subsystem->max_nsid + 1; 469 } 470 471 if (!TAILQ_EMPTY(&subsystem->ctrlrs)) { 472 SPDK_ERRLOG("Can't extend NSID range with active connections\n"); 473 return 0; 474 } 475 476 new_ns_array = realloc(subsystem->ns, sizeof(struct spdk_nvmf_ns) * new_max_nsid); 477 if (new_ns_array == NULL) { 478 SPDK_ERRLOG("Memory allocation error while resizing namespace array.\n"); 479 return 0; 480 } 481 482 memset(new_ns_array + subsystem->max_nsid, 0, 483 sizeof(struct spdk_nvmf_ns) * (new_max_nsid - subsystem->max_nsid)); 484 subsystem->ns = new_ns_array; 485 subsystem->max_nsid = new_max_nsid; 486 } 487 488 if (nsid == 0) { 489 /* NSID not specified - find a free index */ 490 for (i = 0; i < subsystem->max_nsid; i++) { 491 if (_spdk_nvmf_subsystem_get_ns(subsystem, i + 1) == NULL) { 492 nsid = i + 1; 493 break; 494 } 495 } 496 if (nsid == 0) { 497 SPDK_ERRLOG("All available NSIDs in use\n"); 498 return 0; 499 } 500 } else { 501 /* Specific NSID requested */ 502 if (_spdk_nvmf_subsystem_get_ns(subsystem, nsid)) { 503 SPDK_ERRLOG("Requested NSID %" PRIu32 " already in use\n", nsid); 504 return 0; 505 } 506 } 507 508 ns = &subsystem->ns[nsid - 1]; 509 memset(ns, 0, sizeof(*ns)); 510 ns->bdev = bdev; 511 ns->id = nsid; 512 ns->subsystem = subsystem; 513 rc = spdk_bdev_open(bdev, true, NULL, NULL, &ns->desc); 514 if (rc != 0) { 515 SPDK_ERRLOG("Subsystem %s: bdev %s cannot be opened, error=%d\n", 516 subsystem->subnqn, spdk_bdev_get_name(bdev), rc); 517 return 0; 518 } 519 ns->allocated = true; 520 521 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Subsystem %s: bdev %s assigned nsid %" PRIu32 "\n", 522 spdk_nvmf_subsystem_get_nqn(subsystem), 523 spdk_bdev_get_name(bdev), 524 nsid); 525 526 subsystem->max_nsid = spdk_max(subsystem->max_nsid, nsid); 527 subsystem->num_allocated_nsid++; 528 529 /* Send a message to each poll group to notify it that a new namespace 530 * is available. 531 * TODO: This call does not currently allow the user to wait for these 532 * messages to propagate. It also does not protect against two calls 533 * to this function overlapping 534 */ 535 spdk_for_each_channel(subsystem->tgt, 536 spdk_nvmf_subsystem_ns_update_poll_group, 537 ns, 538 spdk_nvmf_subsystem_add_ns_done); 539 540 return nsid; 541 } 542 543 static uint32_t 544 spdk_nvmf_subsystem_get_next_allocated_nsid(struct spdk_nvmf_subsystem *subsystem, 545 uint32_t prev_nsid) 546 { 547 uint32_t nsid; 548 549 if (prev_nsid >= subsystem->max_nsid) { 550 return 0; 551 } 552 553 for (nsid = prev_nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 554 if (subsystem->ns[nsid - 1].allocated) { 555 return nsid; 556 } 557 } 558 559 return 0; 560 } 561 562 struct spdk_nvmf_ns * 563 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 564 { 565 uint32_t first_nsid; 566 567 first_nsid = spdk_nvmf_subsystem_get_next_allocated_nsid(subsystem, 0); 568 return _spdk_nvmf_subsystem_get_ns(subsystem, first_nsid); 569 } 570 571 struct spdk_nvmf_ns * 572 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 573 struct spdk_nvmf_ns *prev_ns) 574 { 575 uint32_t next_nsid; 576 577 next_nsid = spdk_nvmf_subsystem_get_next_allocated_nsid(subsystem, prev_ns->id); 578 return _spdk_nvmf_subsystem_get_ns(subsystem, next_nsid); 579 } 580 581 struct spdk_nvmf_ns * 582 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 583 { 584 return _spdk_nvmf_subsystem_get_ns(subsystem, nsid); 585 } 586 587 uint32_t 588 spdk_nvmf_ns_get_id(const struct spdk_nvmf_ns *ns) 589 { 590 return ns->id; 591 } 592 593 struct spdk_bdev * 594 spdk_nvmf_ns_get_bdev(struct spdk_nvmf_ns *ns) 595 { 596 return ns->bdev; 597 } 598 599 const char * 600 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 601 { 602 return subsystem->sn; 603 } 604 605 int 606 spdk_nvmf_subsystem_set_sn(struct spdk_nvmf_subsystem *subsystem, const char *sn) 607 { 608 size_t len, max_len; 609 610 max_len = sizeof(subsystem->sn) - 1; 611 len = strlen(sn); 612 if (len > max_len) { 613 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Invalid sn \"%s\": length %zu > max %zu\n", 614 sn, len, max_len); 615 return -1; 616 } 617 618 snprintf(subsystem->sn, sizeof(subsystem->sn), "%s", sn); 619 620 return 0; 621 } 622 623 const char * 624 spdk_nvmf_subsystem_get_nqn(struct spdk_nvmf_subsystem *subsystem) 625 { 626 return subsystem->subnqn; 627 } 628 629 /* Workaround for astyle formatting bug */ 630 typedef enum spdk_nvmf_subtype nvmf_subtype_t; 631 632 nvmf_subtype_t 633 spdk_nvmf_subsystem_get_type(struct spdk_nvmf_subsystem *subsystem) 634 { 635 return subsystem->subtype; 636 } 637 638 static uint16_t 639 spdk_nvmf_subsystem_gen_cntlid(struct spdk_nvmf_subsystem *subsystem) 640 { 641 int count; 642 643 /* 644 * In the worst case, we might have to try all CNTLID values between 1 and 0xFFF0 - 1 645 * before we find one that is unused (or find that all values are in use). 646 */ 647 for (count = 0; count < 0xFFF0 - 1; count++) { 648 subsystem->next_cntlid++; 649 if (subsystem->next_cntlid >= 0xFFF0) { 650 /* The spec reserves cntlid values in the range FFF0h to FFFFh. */ 651 subsystem->next_cntlid = 1; 652 } 653 654 /* Check if a controller with this cntlid currently exists. */ 655 if (spdk_nvmf_subsystem_get_ctrlr(subsystem, subsystem->next_cntlid) == NULL) { 656 /* Found unused cntlid */ 657 return subsystem->next_cntlid; 658 } 659 } 660 661 /* All valid cntlid values are in use. */ 662 return 0xFFFF; 663 } 664 665 int 666 spdk_nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr) 667 { 668 ctrlr->cntlid = spdk_nvmf_subsystem_gen_cntlid(subsystem); 669 if (ctrlr->cntlid == 0xFFFF) { 670 /* Unable to get a cntlid */ 671 SPDK_ERRLOG("Reached max simultaneous ctrlrs\n"); 672 return -EBUSY; 673 } 674 675 TAILQ_INSERT_TAIL(&subsystem->ctrlrs, ctrlr, link); 676 677 return 0; 678 } 679 680 void 681 spdk_nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem, 682 struct spdk_nvmf_ctrlr *ctrlr) 683 { 684 assert(subsystem == ctrlr->subsys); 685 TAILQ_REMOVE(&subsystem->ctrlrs, ctrlr, link); 686 } 687 688 struct spdk_nvmf_ctrlr * 689 spdk_nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid) 690 { 691 struct spdk_nvmf_ctrlr *ctrlr; 692 693 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 694 if (ctrlr->cntlid == cntlid) { 695 return ctrlr; 696 } 697 } 698 699 return NULL; 700 } 701