1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2018-2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/bdev.h" 37 #include "spdk/bit_array.h" 38 #include "spdk/thread.h" 39 #include "spdk/nvmf.h" 40 #include "spdk/trace.h" 41 #include "spdk/endian.h" 42 #include "spdk/string.h" 43 44 #include "spdk/log.h" 45 46 #include "nvmf_internal.h" 47 #include "transport.h" 48 49 SPDK_LOG_REGISTER_COMPONENT(nvmf) 50 51 #define SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS 1024 52 #define SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US 10000 53 54 static TAILQ_HEAD(, spdk_nvmf_tgt) g_nvmf_tgts = TAILQ_HEAD_INITIALIZER(g_nvmf_tgts); 55 56 typedef void (*nvmf_qpair_disconnect_cpl)(void *ctx, int status); 57 static void nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf); 58 59 /* supplied to a single call to nvmf_qpair_disconnect */ 60 struct nvmf_qpair_disconnect_ctx { 61 struct spdk_nvmf_qpair *qpair; 62 struct spdk_nvmf_ctrlr *ctrlr; 63 nvmf_qpair_disconnect_cb cb_fn; 64 struct spdk_thread *thread; 65 void *ctx; 66 uint16_t qid; 67 }; 68 69 /* 70 * There are several times when we need to iterate through the list of all qpairs and selectively delete them. 71 * In order to do this sequentially without overlap, we must provide a context to recover the next qpair from 72 * to enable calling nvmf_qpair_disconnect on the next desired qpair. 73 */ 74 struct nvmf_qpair_disconnect_many_ctx { 75 struct spdk_nvmf_subsystem *subsystem; 76 struct spdk_nvmf_poll_group *group; 77 spdk_nvmf_poll_group_mod_done cpl_fn; 78 void *cpl_ctx; 79 }; 80 81 static void 82 nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair, 83 enum spdk_nvmf_qpair_state state) 84 { 85 assert(qpair != NULL); 86 assert(qpair->group->thread == spdk_get_thread()); 87 88 qpair->state = state; 89 } 90 91 static int 92 nvmf_poll_group_poll(void *ctx) 93 { 94 struct spdk_nvmf_poll_group *group = ctx; 95 int rc; 96 int count = 0; 97 struct spdk_nvmf_transport_poll_group *tgroup; 98 99 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 100 rc = nvmf_transport_poll_group_poll(tgroup); 101 if (rc < 0) { 102 return SPDK_POLLER_BUSY; 103 } 104 count += rc; 105 } 106 107 return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; 108 } 109 110 static int 111 nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf) 112 { 113 struct spdk_nvmf_tgt *tgt = io_device; 114 struct spdk_nvmf_poll_group *group = ctx_buf; 115 struct spdk_nvmf_transport *transport; 116 uint32_t sid; 117 118 TAILQ_INIT(&group->tgroups); 119 TAILQ_INIT(&group->qpairs); 120 121 TAILQ_FOREACH(transport, &tgt->transports, link) { 122 nvmf_poll_group_add_transport(group, transport); 123 } 124 125 group->num_sgroups = tgt->max_subsystems; 126 group->sgroups = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem_poll_group)); 127 if (!group->sgroups) { 128 return -ENOMEM; 129 } 130 131 for (sid = 0; sid < tgt->max_subsystems; sid++) { 132 struct spdk_nvmf_subsystem *subsystem; 133 134 subsystem = tgt->subsystems[sid]; 135 if (!subsystem) { 136 continue; 137 } 138 139 if (nvmf_poll_group_add_subsystem(group, subsystem, NULL, NULL) != 0) { 140 nvmf_tgt_destroy_poll_group(io_device, ctx_buf); 141 return -1; 142 } 143 } 144 145 pthread_mutex_lock(&tgt->mutex); 146 TAILQ_INSERT_TAIL(&tgt->poll_groups, group, link); 147 pthread_mutex_unlock(&tgt->mutex); 148 149 group->poller = SPDK_POLLER_REGISTER(nvmf_poll_group_poll, group, 0); 150 group->thread = spdk_get_thread(); 151 152 return 0; 153 } 154 155 static void 156 nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf) 157 { 158 struct spdk_nvmf_tgt *tgt = io_device; 159 struct spdk_nvmf_poll_group *group = ctx_buf; 160 struct spdk_nvmf_transport_poll_group *tgroup, *tmp; 161 struct spdk_nvmf_subsystem_poll_group *sgroup; 162 uint32_t sid, nsid; 163 164 pthread_mutex_lock(&tgt->mutex); 165 TAILQ_REMOVE(&tgt->poll_groups, group, link); 166 pthread_mutex_unlock(&tgt->mutex); 167 168 TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) { 169 TAILQ_REMOVE(&group->tgroups, tgroup, link); 170 nvmf_transport_poll_group_destroy(tgroup); 171 } 172 173 for (sid = 0; sid < group->num_sgroups; sid++) { 174 sgroup = &group->sgroups[sid]; 175 176 for (nsid = 0; nsid < sgroup->num_ns; nsid++) { 177 if (sgroup->ns_info[nsid].channel) { 178 spdk_put_io_channel(sgroup->ns_info[nsid].channel); 179 sgroup->ns_info[nsid].channel = NULL; 180 } 181 } 182 183 free(sgroup->ns_info); 184 } 185 186 free(group->sgroups); 187 188 spdk_poller_unregister(&group->poller); 189 190 if (group->destroy_cb_fn) { 191 group->destroy_cb_fn(group->destroy_cb_arg, 0); 192 } 193 } 194 195 static void 196 _nvmf_tgt_disconnect_next_qpair(void *ctx) 197 { 198 struct spdk_nvmf_qpair *qpair; 199 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 200 struct spdk_nvmf_poll_group *group = qpair_ctx->group; 201 struct spdk_io_channel *ch; 202 int rc = 0; 203 204 qpair = TAILQ_FIRST(&group->qpairs); 205 206 if (qpair) { 207 rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_tgt_disconnect_next_qpair, ctx); 208 } 209 210 if (!qpair || rc != 0) { 211 /* When the refcount from the channels reaches 0, nvmf_tgt_destroy_poll_group will be called. */ 212 ch = spdk_io_channel_from_ctx(group); 213 spdk_put_io_channel(ch); 214 free(qpair_ctx); 215 } 216 } 217 218 static void 219 nvmf_tgt_destroy_poll_group_qpairs(struct spdk_nvmf_poll_group *group) 220 { 221 struct nvmf_qpair_disconnect_many_ctx *ctx; 222 223 ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx)); 224 225 if (!ctx) { 226 SPDK_ERRLOG("Failed to allocate memory for destroy poll group ctx\n"); 227 return; 228 } 229 230 ctx->group = group; 231 _nvmf_tgt_disconnect_next_qpair(ctx); 232 } 233 234 static int 235 nvmf_tgt_accept(void *ctx) 236 { 237 struct spdk_nvmf_tgt *tgt = ctx; 238 struct spdk_nvmf_transport *transport, *tmp; 239 int count = 0; 240 241 TAILQ_FOREACH_SAFE(transport, &tgt->transports, link, tmp) { 242 count += nvmf_transport_accept(transport); 243 } 244 245 return count; 246 } 247 248 struct spdk_nvmf_tgt * 249 spdk_nvmf_tgt_create(struct spdk_nvmf_target_opts *opts) 250 { 251 struct spdk_nvmf_tgt *tgt, *tmp_tgt; 252 uint32_t acceptor_poll_rate; 253 254 if (strnlen(opts->name, NVMF_TGT_NAME_MAX_LENGTH) == NVMF_TGT_NAME_MAX_LENGTH) { 255 SPDK_ERRLOG("Provided target name exceeds the max length of %u.\n", NVMF_TGT_NAME_MAX_LENGTH); 256 return NULL; 257 } 258 259 TAILQ_FOREACH(tmp_tgt, &g_nvmf_tgts, link) { 260 if (!strncmp(opts->name, tmp_tgt->name, NVMF_TGT_NAME_MAX_LENGTH)) { 261 SPDK_ERRLOG("Provided target name must be unique.\n"); 262 return NULL; 263 } 264 } 265 266 tgt = calloc(1, sizeof(*tgt)); 267 if (!tgt) { 268 return NULL; 269 } 270 271 snprintf(tgt->name, NVMF_TGT_NAME_MAX_LENGTH, "%s", opts->name); 272 273 if (!opts || !opts->max_subsystems) { 274 tgt->max_subsystems = SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS; 275 } else { 276 tgt->max_subsystems = opts->max_subsystems; 277 } 278 279 if (!opts || !opts->acceptor_poll_rate) { 280 acceptor_poll_rate = SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US; 281 } else { 282 acceptor_poll_rate = opts->acceptor_poll_rate; 283 } 284 285 tgt->discovery_genctr = 0; 286 TAILQ_INIT(&tgt->transports); 287 TAILQ_INIT(&tgt->poll_groups); 288 289 tgt->subsystems = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem *)); 290 if (!tgt->subsystems) { 291 free(tgt); 292 return NULL; 293 } 294 295 pthread_mutex_init(&tgt->mutex, NULL); 296 297 tgt->accept_poller = SPDK_POLLER_REGISTER(nvmf_tgt_accept, tgt, acceptor_poll_rate); 298 if (!tgt->accept_poller) { 299 pthread_mutex_destroy(&tgt->mutex); 300 free(tgt->subsystems); 301 free(tgt); 302 return NULL; 303 } 304 305 spdk_io_device_register(tgt, 306 nvmf_tgt_create_poll_group, 307 nvmf_tgt_destroy_poll_group, 308 sizeof(struct spdk_nvmf_poll_group), 309 tgt->name); 310 311 TAILQ_INSERT_HEAD(&g_nvmf_tgts, tgt, link); 312 313 return tgt; 314 } 315 316 static void 317 _nvmf_tgt_destroy_next_transport(void *ctx) 318 { 319 struct spdk_nvmf_tgt *tgt = ctx; 320 struct spdk_nvmf_transport *transport; 321 322 if (!TAILQ_EMPTY(&tgt->transports)) { 323 transport = TAILQ_FIRST(&tgt->transports); 324 TAILQ_REMOVE(&tgt->transports, transport, link); 325 spdk_nvmf_transport_destroy(transport, _nvmf_tgt_destroy_next_transport, tgt); 326 } else { 327 spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn = tgt->destroy_cb_fn; 328 void *destroy_cb_arg = tgt->destroy_cb_arg; 329 330 pthread_mutex_destroy(&tgt->mutex); 331 free(tgt); 332 333 if (destroy_cb_fn) { 334 destroy_cb_fn(destroy_cb_arg, 0); 335 } 336 } 337 } 338 339 static void 340 nvmf_tgt_destroy_cb(void *io_device) 341 { 342 struct spdk_nvmf_tgt *tgt = io_device; 343 uint32_t i; 344 345 if (tgt->subsystems) { 346 for (i = 0; i < tgt->max_subsystems; i++) { 347 if (tgt->subsystems[i]) { 348 nvmf_subsystem_remove_all_listeners(tgt->subsystems[i], true); 349 spdk_nvmf_subsystem_destroy(tgt->subsystems[i]); 350 } 351 } 352 free(tgt->subsystems); 353 } 354 355 _nvmf_tgt_destroy_next_transport(tgt); 356 } 357 358 void 359 spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt, 360 spdk_nvmf_tgt_destroy_done_fn cb_fn, 361 void *cb_arg) 362 { 363 tgt->destroy_cb_fn = cb_fn; 364 tgt->destroy_cb_arg = cb_arg; 365 366 spdk_poller_unregister(&tgt->accept_poller); 367 368 TAILQ_REMOVE(&g_nvmf_tgts, tgt, link); 369 370 spdk_io_device_unregister(tgt, nvmf_tgt_destroy_cb); 371 } 372 373 const char * 374 spdk_nvmf_tgt_get_name(struct spdk_nvmf_tgt *tgt) 375 { 376 return tgt->name; 377 } 378 379 struct spdk_nvmf_tgt * 380 spdk_nvmf_get_tgt(const char *name) 381 { 382 struct spdk_nvmf_tgt *tgt; 383 uint32_t num_targets = 0; 384 385 TAILQ_FOREACH(tgt, &g_nvmf_tgts, link) { 386 if (name) { 387 if (!strncmp(tgt->name, name, NVMF_TGT_NAME_MAX_LENGTH)) { 388 return tgt; 389 } 390 } 391 num_targets++; 392 } 393 394 /* 395 * special case. If there is only one target and 396 * no name was specified, return the only available 397 * target. If there is more than one target, name must 398 * be specified. 399 */ 400 if (!name && num_targets == 1) { 401 return TAILQ_FIRST(&g_nvmf_tgts); 402 } 403 404 return NULL; 405 } 406 407 struct spdk_nvmf_tgt * 408 spdk_nvmf_get_first_tgt(void) 409 { 410 return TAILQ_FIRST(&g_nvmf_tgts); 411 } 412 413 struct spdk_nvmf_tgt * 414 spdk_nvmf_get_next_tgt(struct spdk_nvmf_tgt *prev) 415 { 416 return TAILQ_NEXT(prev, link); 417 } 418 419 static void 420 nvmf_write_subsystem_config_json(struct spdk_json_write_ctx *w, 421 struct spdk_nvmf_subsystem *subsystem) 422 { 423 struct spdk_nvmf_host *host; 424 struct spdk_nvmf_subsystem_listener *listener; 425 const struct spdk_nvme_transport_id *trid; 426 struct spdk_nvmf_ns *ns; 427 struct spdk_nvmf_ns_opts ns_opts; 428 uint32_t max_namespaces; 429 char uuid_str[SPDK_UUID_STRING_LEN]; 430 const char *adrfam; 431 432 if (spdk_nvmf_subsystem_get_type(subsystem) != SPDK_NVMF_SUBTYPE_NVME) { 433 return; 434 } 435 436 /* { */ 437 spdk_json_write_object_begin(w); 438 spdk_json_write_named_string(w, "method", "nvmf_create_subsystem"); 439 440 /* "params" : { */ 441 spdk_json_write_named_object_begin(w, "params"); 442 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 443 spdk_json_write_named_bool(w, "allow_any_host", spdk_nvmf_subsystem_get_allow_any_host(subsystem)); 444 spdk_json_write_named_string(w, "serial_number", spdk_nvmf_subsystem_get_sn(subsystem)); 445 spdk_json_write_named_string(w, "model_number", spdk_nvmf_subsystem_get_mn(subsystem)); 446 447 max_namespaces = spdk_nvmf_subsystem_get_max_namespaces(subsystem); 448 if (max_namespaces != 0) { 449 spdk_json_write_named_uint32(w, "max_namespaces", max_namespaces); 450 } 451 452 /* } "params" */ 453 spdk_json_write_object_end(w); 454 455 /* } */ 456 spdk_json_write_object_end(w); 457 458 for (listener = spdk_nvmf_subsystem_get_first_listener(subsystem); listener != NULL; 459 listener = spdk_nvmf_subsystem_get_next_listener(subsystem, listener)) { 460 trid = spdk_nvmf_subsystem_listener_get_trid(listener); 461 462 adrfam = spdk_nvme_transport_id_adrfam_str(trid->adrfam); 463 464 spdk_json_write_object_begin(w); 465 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_listener"); 466 467 /* "params" : { */ 468 spdk_json_write_named_object_begin(w, "params"); 469 470 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 471 472 /* "listen_address" : { */ 473 spdk_json_write_named_object_begin(w, "listen_address"); 474 475 spdk_json_write_named_string(w, "trtype", trid->trstring); 476 if (adrfam) { 477 spdk_json_write_named_string(w, "adrfam", adrfam); 478 } 479 480 spdk_json_write_named_string(w, "traddr", trid->traddr); 481 spdk_json_write_named_string(w, "trsvcid", trid->trsvcid); 482 /* } "listen_address" */ 483 spdk_json_write_object_end(w); 484 485 /* } "params" */ 486 spdk_json_write_object_end(w); 487 488 /* } */ 489 spdk_json_write_object_end(w); 490 } 491 492 for (host = spdk_nvmf_subsystem_get_first_host(subsystem); host != NULL; 493 host = spdk_nvmf_subsystem_get_next_host(subsystem, host)) { 494 495 spdk_json_write_object_begin(w); 496 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_host"); 497 498 /* "params" : { */ 499 spdk_json_write_named_object_begin(w, "params"); 500 501 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 502 spdk_json_write_named_string(w, "host", spdk_nvmf_host_get_nqn(host)); 503 504 /* } "params" */ 505 spdk_json_write_object_end(w); 506 507 /* } */ 508 spdk_json_write_object_end(w); 509 } 510 511 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 512 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 513 spdk_nvmf_ns_get_opts(ns, &ns_opts, sizeof(ns_opts)); 514 515 spdk_json_write_object_begin(w); 516 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_ns"); 517 518 /* "params" : { */ 519 spdk_json_write_named_object_begin(w, "params"); 520 521 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 522 523 /* "namespace" : { */ 524 spdk_json_write_named_object_begin(w, "namespace"); 525 526 spdk_json_write_named_uint32(w, "nsid", spdk_nvmf_ns_get_id(ns)); 527 spdk_json_write_named_string(w, "bdev_name", spdk_bdev_get_name(spdk_nvmf_ns_get_bdev(ns))); 528 529 if (!spdk_mem_all_zero(ns_opts.nguid, sizeof(ns_opts.nguid))) { 530 SPDK_STATIC_ASSERT(sizeof(ns_opts.nguid) == sizeof(uint64_t) * 2, "size mismatch"); 531 spdk_json_write_named_string_fmt(w, "nguid", "%016"PRIX64"%016"PRIX64, from_be64(&ns_opts.nguid[0]), 532 from_be64(&ns_opts.nguid[8])); 533 } 534 535 if (!spdk_mem_all_zero(ns_opts.eui64, sizeof(ns_opts.eui64))) { 536 SPDK_STATIC_ASSERT(sizeof(ns_opts.eui64) == sizeof(uint64_t), "size mismatch"); 537 spdk_json_write_named_string_fmt(w, "eui64", "%016"PRIX64, from_be64(&ns_opts.eui64)); 538 } 539 540 if (!spdk_mem_all_zero(&ns_opts.uuid, sizeof(ns_opts.uuid))) { 541 spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &ns_opts.uuid); 542 spdk_json_write_named_string(w, "uuid", uuid_str); 543 } 544 545 /* "namespace" */ 546 spdk_json_write_object_end(w); 547 548 /* } "params" */ 549 spdk_json_write_object_end(w); 550 551 /* } */ 552 spdk_json_write_object_end(w); 553 } 554 } 555 556 void 557 spdk_nvmf_tgt_write_config_json(struct spdk_json_write_ctx *w, struct spdk_nvmf_tgt *tgt) 558 { 559 struct spdk_nvmf_subsystem *subsystem; 560 struct spdk_nvmf_transport *transport; 561 562 spdk_json_write_object_begin(w); 563 spdk_json_write_named_string(w, "method", "nvmf_set_max_subsystems"); 564 565 spdk_json_write_named_object_begin(w, "params"); 566 spdk_json_write_named_uint32(w, "max_subsystems", tgt->max_subsystems); 567 spdk_json_write_object_end(w); 568 569 spdk_json_write_object_end(w); 570 571 /* write transports */ 572 TAILQ_FOREACH(transport, &tgt->transports, link) { 573 spdk_json_write_object_begin(w); 574 spdk_json_write_named_string(w, "method", "nvmf_create_transport"); 575 576 spdk_json_write_named_object_begin(w, "params"); 577 spdk_json_write_named_string(w, "trtype", transport->ops->name); 578 spdk_json_write_named_uint32(w, "max_queue_depth", transport->opts.max_queue_depth); 579 spdk_json_write_named_uint32(w, "max_io_qpairs_per_ctrlr", 580 transport->opts.max_qpairs_per_ctrlr - 1); 581 spdk_json_write_named_uint32(w, "in_capsule_data_size", transport->opts.in_capsule_data_size); 582 spdk_json_write_named_uint32(w, "max_io_size", transport->opts.max_io_size); 583 spdk_json_write_named_uint32(w, "io_unit_size", transport->opts.io_unit_size); 584 spdk_json_write_named_uint32(w, "max_aq_depth", transport->opts.max_aq_depth); 585 if (transport->ops->dump_opts) { 586 transport->ops->dump_opts(transport, w); 587 } 588 spdk_json_write_named_uint32(w, "abort_timeout_sec", transport->opts.abort_timeout_sec); 589 spdk_json_write_object_end(w); 590 591 spdk_json_write_object_end(w); 592 } 593 594 subsystem = spdk_nvmf_subsystem_get_first(tgt); 595 while (subsystem) { 596 nvmf_write_subsystem_config_json(w, subsystem); 597 subsystem = spdk_nvmf_subsystem_get_next(subsystem); 598 } 599 } 600 601 static void 602 nvmf_listen_opts_copy(struct spdk_nvmf_listen_opts *opts, 603 const struct spdk_nvmf_listen_opts *opts_src, size_t opts_size) 604 { 605 assert(opts); 606 assert(opts_src); 607 608 opts->opts_size = opts_size; 609 610 #define SET_FIELD(field) \ 611 if (offsetof(struct spdk_nvmf_listen_opts, field) + sizeof(opts->field) <= opts_size) { \ 612 opts->field = opts_src->field; \ 613 } \ 614 615 SET_FIELD(transport_specific); 616 #undef SET_FIELD 617 618 /* Do not remove this statement, you should always update this statement when you adding a new field, 619 * and do not forget to add the SET_FIELD statement for your added field. */ 620 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_listen_opts) == 16, "Incorrect size"); 621 } 622 623 void 624 spdk_nvmf_listen_opts_init(struct spdk_nvmf_listen_opts *opts, size_t opts_size) 625 { 626 struct spdk_nvmf_listen_opts opts_local = {}; 627 628 /* local version of opts should have defaults set here */ 629 630 nvmf_listen_opts_copy(opts, &opts_local, opts_size); 631 } 632 633 int 634 spdk_nvmf_tgt_listen_ext(struct spdk_nvmf_tgt *tgt, const struct spdk_nvme_transport_id *trid, 635 struct spdk_nvmf_listen_opts *opts) 636 { 637 struct spdk_nvmf_transport *transport; 638 int rc; 639 struct spdk_nvmf_listen_opts opts_local = {}; 640 641 if (!opts) { 642 SPDK_ERRLOG("opts should not be NULL\n"); 643 return -EINVAL; 644 } 645 646 if (!opts->opts_size) { 647 SPDK_ERRLOG("The opts_size in opts structure should not be zero\n"); 648 return -EINVAL; 649 } 650 651 transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring); 652 if (!transport) { 653 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n", 654 trid->trstring); 655 return -EINVAL; 656 } 657 658 nvmf_listen_opts_copy(&opts_local, opts, opts->opts_size); 659 rc = spdk_nvmf_transport_listen(transport, trid, &opts_local); 660 if (rc < 0) { 661 SPDK_ERRLOG("Unable to listen on address '%s'\n", trid->traddr); 662 } 663 664 return rc; 665 } 666 667 int 668 spdk_nvmf_tgt_listen(struct spdk_nvmf_tgt *tgt, struct spdk_nvme_transport_id *trid) 669 { 670 struct spdk_nvmf_listen_opts opts; 671 672 spdk_nvmf_listen_opts_init(&opts, sizeof(opts)); 673 674 return spdk_nvmf_tgt_listen_ext(tgt, trid, &opts); 675 } 676 677 int 678 spdk_nvmf_tgt_stop_listen(struct spdk_nvmf_tgt *tgt, 679 struct spdk_nvme_transport_id *trid) 680 { 681 struct spdk_nvmf_transport *transport; 682 int rc; 683 684 transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring); 685 if (!transport) { 686 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n", 687 trid->trstring); 688 return -EINVAL; 689 } 690 691 rc = spdk_nvmf_transport_stop_listen(transport, trid); 692 if (rc < 0) { 693 SPDK_ERRLOG("Failed to stop listening on address '%s'\n", trid->traddr); 694 return rc; 695 } 696 return 0; 697 } 698 699 struct spdk_nvmf_tgt_add_transport_ctx { 700 struct spdk_nvmf_tgt *tgt; 701 struct spdk_nvmf_transport *transport; 702 spdk_nvmf_tgt_add_transport_done_fn cb_fn; 703 void *cb_arg; 704 }; 705 706 static void 707 _nvmf_tgt_add_transport_done(struct spdk_io_channel_iter *i, int status) 708 { 709 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 710 711 ctx->cb_fn(ctx->cb_arg, status); 712 713 free(ctx); 714 } 715 716 static void 717 _nvmf_tgt_add_transport(struct spdk_io_channel_iter *i) 718 { 719 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 720 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 721 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); 722 int rc; 723 724 rc = nvmf_poll_group_add_transport(group, ctx->transport); 725 spdk_for_each_channel_continue(i, rc); 726 } 727 728 void spdk_nvmf_tgt_add_transport(struct spdk_nvmf_tgt *tgt, 729 struct spdk_nvmf_transport *transport, 730 spdk_nvmf_tgt_add_transport_done_fn cb_fn, 731 void *cb_arg) 732 { 733 struct spdk_nvmf_tgt_add_transport_ctx *ctx; 734 735 if (spdk_nvmf_tgt_get_transport(tgt, transport->ops->name)) { 736 cb_fn(cb_arg, -EEXIST); 737 return; /* transport already created */ 738 } 739 740 transport->tgt = tgt; 741 TAILQ_INSERT_TAIL(&tgt->transports, transport, link); 742 743 ctx = calloc(1, sizeof(*ctx)); 744 if (!ctx) { 745 cb_fn(cb_arg, -ENOMEM); 746 return; 747 } 748 749 ctx->tgt = tgt; 750 ctx->transport = transport; 751 ctx->cb_fn = cb_fn; 752 ctx->cb_arg = cb_arg; 753 754 spdk_for_each_channel(tgt, 755 _nvmf_tgt_add_transport, 756 ctx, 757 _nvmf_tgt_add_transport_done); 758 } 759 760 struct spdk_nvmf_subsystem * 761 spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn) 762 { 763 struct spdk_nvmf_subsystem *subsystem; 764 uint32_t sid; 765 766 if (!subnqn) { 767 return NULL; 768 } 769 770 /* Ensure that subnqn is null terminated */ 771 if (!memchr(subnqn, '\0', SPDK_NVMF_NQN_MAX_LEN + 1)) { 772 SPDK_ERRLOG("Connect SUBNQN is not null terminated\n"); 773 return NULL; 774 } 775 776 for (sid = 0; sid < tgt->max_subsystems; sid++) { 777 subsystem = tgt->subsystems[sid]; 778 if (subsystem == NULL) { 779 continue; 780 } 781 782 if (strcmp(subnqn, subsystem->subnqn) == 0) { 783 return subsystem; 784 } 785 } 786 787 return NULL; 788 } 789 790 struct spdk_nvmf_transport * 791 spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name) 792 { 793 struct spdk_nvmf_transport *transport; 794 795 TAILQ_FOREACH(transport, &tgt->transports, link) { 796 if (!strncasecmp(transport->ops->name, transport_name, SPDK_NVMF_TRSTRING_MAX_LEN)) { 797 return transport; 798 } 799 } 800 return NULL; 801 } 802 803 struct nvmf_new_qpair_ctx { 804 struct spdk_nvmf_qpair *qpair; 805 struct spdk_nvmf_poll_group *group; 806 }; 807 808 static void 809 _nvmf_poll_group_add(void *_ctx) 810 { 811 struct nvmf_new_qpair_ctx *ctx = _ctx; 812 struct spdk_nvmf_qpair *qpair = ctx->qpair; 813 struct spdk_nvmf_poll_group *group = ctx->group; 814 815 free(_ctx); 816 817 if (spdk_nvmf_poll_group_add(group, qpair) != 0) { 818 SPDK_ERRLOG("Unable to add the qpair to a poll group.\n"); 819 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 820 } 821 } 822 823 void 824 spdk_nvmf_tgt_new_qpair(struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair) 825 { 826 struct spdk_nvmf_poll_group *group; 827 struct nvmf_new_qpair_ctx *ctx; 828 829 group = spdk_nvmf_get_optimal_poll_group(qpair); 830 if (group == NULL) { 831 if (tgt->next_poll_group == NULL) { 832 tgt->next_poll_group = TAILQ_FIRST(&tgt->poll_groups); 833 if (tgt->next_poll_group == NULL) { 834 SPDK_ERRLOG("No poll groups exist.\n"); 835 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 836 return; 837 } 838 } 839 group = tgt->next_poll_group; 840 tgt->next_poll_group = TAILQ_NEXT(group, link); 841 } 842 843 ctx = calloc(1, sizeof(*ctx)); 844 if (!ctx) { 845 SPDK_ERRLOG("Unable to send message to poll group.\n"); 846 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 847 return; 848 } 849 850 ctx->qpair = qpair; 851 ctx->group = group; 852 853 spdk_thread_send_msg(group->thread, _nvmf_poll_group_add, ctx); 854 } 855 856 struct spdk_nvmf_poll_group * 857 spdk_nvmf_poll_group_create(struct spdk_nvmf_tgt *tgt) 858 { 859 struct spdk_io_channel *ch; 860 861 ch = spdk_get_io_channel(tgt); 862 if (!ch) { 863 SPDK_ERRLOG("Unable to get I/O channel for target\n"); 864 return NULL; 865 } 866 867 return spdk_io_channel_get_ctx(ch); 868 } 869 870 void 871 spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group, 872 spdk_nvmf_poll_group_destroy_done_fn cb_fn, 873 void *cb_arg) 874 { 875 assert(group->destroy_cb_fn == NULL); 876 group->destroy_cb_fn = cb_fn; 877 group->destroy_cb_arg = cb_arg; 878 879 /* This function will put the io_channel associated with this poll group */ 880 nvmf_tgt_destroy_poll_group_qpairs(group); 881 } 882 883 int 884 spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group, 885 struct spdk_nvmf_qpair *qpair) 886 { 887 int rc = -1; 888 struct spdk_nvmf_transport_poll_group *tgroup; 889 890 TAILQ_INIT(&qpair->outstanding); 891 qpair->group = group; 892 qpair->ctrlr = NULL; 893 qpair->disconnect_started = false; 894 895 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 896 if (tgroup->transport == qpair->transport) { 897 rc = nvmf_transport_poll_group_add(tgroup, qpair); 898 break; 899 } 900 } 901 902 /* We add the qpair to the group only it is succesfully added into the tgroup */ 903 if (rc == 0) { 904 TAILQ_INSERT_TAIL(&group->qpairs, qpair, link); 905 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ACTIVE); 906 } 907 908 return rc; 909 } 910 911 static void 912 _nvmf_ctrlr_destruct(void *ctx) 913 { 914 struct spdk_nvmf_ctrlr *ctrlr = ctx; 915 916 nvmf_ctrlr_destruct(ctrlr); 917 } 918 919 static void 920 _nvmf_transport_qpair_fini_complete(void *cb_ctx) 921 { 922 struct nvmf_qpair_disconnect_ctx *qpair_ctx = cb_ctx; 923 924 if (qpair_ctx->cb_fn) { 925 spdk_thread_send_msg(qpair_ctx->thread, qpair_ctx->cb_fn, qpair_ctx->ctx); 926 } 927 free(qpair_ctx); 928 } 929 930 static void 931 _nvmf_transport_qpair_fini(void *ctx) 932 { 933 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 934 935 nvmf_transport_qpair_fini(qpair_ctx->qpair, _nvmf_transport_qpair_fini_complete, qpair_ctx); 936 } 937 938 static void 939 _nvmf_ctrlr_free_from_qpair(void *ctx) 940 { 941 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 942 struct spdk_nvmf_ctrlr *ctrlr = qpair_ctx->ctrlr; 943 uint32_t count; 944 945 spdk_bit_array_clear(ctrlr->qpair_mask, qpair_ctx->qid); 946 count = spdk_bit_array_count_set(ctrlr->qpair_mask); 947 if (count == 0) { 948 ctrlr->in_destruct = true; 949 spdk_thread_send_msg(ctrlr->subsys->thread, _nvmf_ctrlr_destruct, ctrlr); 950 } 951 952 spdk_thread_send_msg(qpair_ctx->thread, _nvmf_transport_qpair_fini, qpair_ctx); 953 } 954 955 void 956 spdk_nvmf_poll_group_remove(struct spdk_nvmf_qpair *qpair) 957 { 958 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 959 struct spdk_nvmf_transport_poll_group *tgroup; 960 struct spdk_nvmf_request *req, *tmp; 961 struct spdk_nvmf_subsystem_poll_group *sgroup; 962 int rc; 963 964 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ERROR); 965 966 /* Find the tgroup and remove the qpair from the tgroup */ 967 TAILQ_FOREACH(tgroup, &qpair->group->tgroups, link) { 968 if (tgroup->transport == qpair->transport) { 969 rc = nvmf_transport_poll_group_remove(tgroup, qpair); 970 if (rc && (rc != ENOTSUP)) { 971 SPDK_ERRLOG("Cannot remove qpair=%p from transport group=%p\n", 972 qpair, tgroup); 973 } 974 break; 975 } 976 } 977 978 if (ctrlr) { 979 sgroup = &qpair->group->sgroups[ctrlr->subsys->id]; 980 TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) { 981 if (req->qpair == qpair) { 982 TAILQ_REMOVE(&sgroup->queued, req, link); 983 if (nvmf_transport_req_free(req)) { 984 SPDK_ERRLOG("Transport request free error!\n"); 985 } 986 } 987 } 988 } 989 990 TAILQ_REMOVE(&qpair->group->qpairs, qpair, link); 991 qpair->group = NULL; 992 } 993 994 static void 995 _nvmf_qpair_destroy(void *ctx, int status) 996 { 997 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 998 struct spdk_nvmf_qpair *qpair = qpair_ctx->qpair; 999 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 1000 1001 assert(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING); 1002 qpair_ctx->qid = qpair->qid; 1003 1004 spdk_nvmf_poll_group_remove(qpair); 1005 1006 if (!ctrlr || !ctrlr->thread) { 1007 nvmf_transport_qpair_fini(qpair, _nvmf_transport_qpair_fini_complete, qpair_ctx); 1008 return; 1009 } 1010 1011 qpair_ctx->ctrlr = ctrlr; 1012 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_free_from_qpair, qpair_ctx); 1013 } 1014 1015 static void 1016 _nvmf_qpair_disconnect_msg(void *ctx) 1017 { 1018 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 1019 1020 spdk_nvmf_qpair_disconnect(qpair_ctx->qpair, qpair_ctx->cb_fn, qpair_ctx->ctx); 1021 free(ctx); 1022 } 1023 1024 int 1025 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 1026 { 1027 struct nvmf_qpair_disconnect_ctx *qpair_ctx; 1028 1029 if (__atomic_test_and_set(&qpair->disconnect_started, __ATOMIC_RELAXED)) { 1030 if (cb_fn) { 1031 cb_fn(ctx); 1032 } 1033 return 0; 1034 } 1035 1036 /* If we get a qpair in the uninitialized state, we can just destroy it immediately */ 1037 if (qpair->state == SPDK_NVMF_QPAIR_UNINITIALIZED) { 1038 nvmf_transport_qpair_fini(qpair, NULL, NULL); 1039 if (cb_fn) { 1040 cb_fn(ctx); 1041 } 1042 return 0; 1043 } 1044 1045 assert(qpair->group != NULL); 1046 if (spdk_get_thread() != qpair->group->thread) { 1047 /* clear the atomic so we can set it on the next call on the proper thread. */ 1048 __atomic_clear(&qpair->disconnect_started, __ATOMIC_RELAXED); 1049 qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx)); 1050 if (!qpair_ctx) { 1051 SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n"); 1052 return -ENOMEM; 1053 } 1054 qpair_ctx->qpair = qpair; 1055 qpair_ctx->cb_fn = cb_fn; 1056 qpair_ctx->thread = qpair->group->thread; 1057 qpair_ctx->ctx = ctx; 1058 spdk_thread_send_msg(qpair->group->thread, _nvmf_qpair_disconnect_msg, qpair_ctx); 1059 return 0; 1060 } 1061 1062 assert(qpair->state == SPDK_NVMF_QPAIR_ACTIVE); 1063 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_DEACTIVATING); 1064 1065 qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx)); 1066 if (!qpair_ctx) { 1067 SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n"); 1068 return -ENOMEM; 1069 } 1070 1071 qpair_ctx->qpair = qpair; 1072 qpair_ctx->cb_fn = cb_fn; 1073 qpair_ctx->thread = qpair->group->thread; 1074 qpair_ctx->ctx = ctx; 1075 1076 /* Check for outstanding I/O */ 1077 if (!TAILQ_EMPTY(&qpair->outstanding)) { 1078 qpair->state_cb = _nvmf_qpair_destroy; 1079 qpair->state_cb_arg = qpair_ctx; 1080 nvmf_qpair_free_aer(qpair); 1081 return 0; 1082 } 1083 1084 _nvmf_qpair_destroy(qpair_ctx, 0); 1085 1086 return 0; 1087 } 1088 1089 int 1090 spdk_nvmf_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair, 1091 struct spdk_nvme_transport_id *trid) 1092 { 1093 return nvmf_transport_qpair_get_peer_trid(qpair, trid); 1094 } 1095 1096 int 1097 spdk_nvmf_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair, 1098 struct spdk_nvme_transport_id *trid) 1099 { 1100 return nvmf_transport_qpair_get_local_trid(qpair, trid); 1101 } 1102 1103 int 1104 spdk_nvmf_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, 1105 struct spdk_nvme_transport_id *trid) 1106 { 1107 return nvmf_transport_qpair_get_listen_trid(qpair, trid); 1108 } 1109 1110 int 1111 nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group, 1112 struct spdk_nvmf_transport *transport) 1113 { 1114 struct spdk_nvmf_transport_poll_group *tgroup; 1115 1116 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 1117 if (tgroup->transport == transport) { 1118 /* Transport already in the poll group */ 1119 return 0; 1120 } 1121 } 1122 1123 tgroup = nvmf_transport_poll_group_create(transport); 1124 if (!tgroup) { 1125 SPDK_ERRLOG("Unable to create poll group for transport\n"); 1126 return -1; 1127 } 1128 1129 tgroup->group = group; 1130 TAILQ_INSERT_TAIL(&group->tgroups, tgroup, link); 1131 1132 return 0; 1133 } 1134 1135 static int 1136 poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, 1137 struct spdk_nvmf_subsystem *subsystem) 1138 { 1139 struct spdk_nvmf_subsystem_poll_group *sgroup; 1140 uint32_t new_num_ns, old_num_ns; 1141 uint32_t i, j; 1142 struct spdk_nvmf_ns *ns; 1143 struct spdk_nvmf_registrant *reg, *tmp; 1144 struct spdk_io_channel *ch; 1145 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 1146 struct spdk_nvmf_ctrlr *ctrlr; 1147 bool ns_changed; 1148 1149 /* Make sure our poll group has memory for this subsystem allocated */ 1150 if (subsystem->id >= group->num_sgroups) { 1151 return -ENOMEM; 1152 } 1153 1154 sgroup = &group->sgroups[subsystem->id]; 1155 1156 /* Make sure the array of namespace information is the correct size */ 1157 new_num_ns = subsystem->max_nsid; 1158 old_num_ns = sgroup->num_ns; 1159 1160 ns_changed = false; 1161 1162 if (old_num_ns == 0) { 1163 if (new_num_ns > 0) { 1164 /* First allocation */ 1165 sgroup->ns_info = calloc(new_num_ns, sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1166 if (!sgroup->ns_info) { 1167 return -ENOMEM; 1168 } 1169 } 1170 } else if (new_num_ns > old_num_ns) { 1171 void *buf; 1172 1173 /* Make the array larger */ 1174 buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1175 if (!buf) { 1176 return -ENOMEM; 1177 } 1178 1179 sgroup->ns_info = buf; 1180 1181 /* Null out the new namespace information slots */ 1182 for (i = old_num_ns; i < new_num_ns; i++) { 1183 memset(&sgroup->ns_info[i], 0, sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1184 } 1185 } else if (new_num_ns < old_num_ns) { 1186 void *buf; 1187 1188 /* Free the extra I/O channels */ 1189 for (i = new_num_ns; i < old_num_ns; i++) { 1190 ns_info = &sgroup->ns_info[i]; 1191 1192 if (ns_info->channel) { 1193 spdk_put_io_channel(ns_info->channel); 1194 ns_info->channel = NULL; 1195 } 1196 } 1197 1198 /* Make the array smaller */ 1199 if (new_num_ns > 0) { 1200 buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1201 if (!buf) { 1202 return -ENOMEM; 1203 } 1204 sgroup->ns_info = buf; 1205 } else { 1206 free(sgroup->ns_info); 1207 sgroup->ns_info = NULL; 1208 } 1209 } 1210 1211 sgroup->num_ns = new_num_ns; 1212 1213 /* Detect bdevs that were added or removed */ 1214 for (i = 0; i < sgroup->num_ns; i++) { 1215 ns = subsystem->ns[i]; 1216 ns_info = &sgroup->ns_info[i]; 1217 ch = ns_info->channel; 1218 1219 if (ns == NULL && ch == NULL) { 1220 /* Both NULL. Leave empty */ 1221 } else if (ns == NULL && ch != NULL) { 1222 /* There was a channel here, but the namespace is gone. */ 1223 ns_changed = true; 1224 spdk_put_io_channel(ch); 1225 ns_info->channel = NULL; 1226 } else if (ns != NULL && ch == NULL) { 1227 /* A namespace appeared but there is no channel yet */ 1228 ns_changed = true; 1229 ch = spdk_bdev_get_io_channel(ns->desc); 1230 if (ch == NULL) { 1231 SPDK_ERRLOG("Could not allocate I/O channel.\n"); 1232 return -ENOMEM; 1233 } 1234 ns_info->channel = ch; 1235 } else if (spdk_uuid_compare(&ns_info->uuid, spdk_bdev_get_uuid(ns->bdev)) != 0) { 1236 /* A namespace was here before, but was replaced by a new one. */ 1237 ns_changed = true; 1238 spdk_put_io_channel(ns_info->channel); 1239 memset(ns_info, 0, sizeof(*ns_info)); 1240 1241 ch = spdk_bdev_get_io_channel(ns->desc); 1242 if (ch == NULL) { 1243 SPDK_ERRLOG("Could not allocate I/O channel.\n"); 1244 return -ENOMEM; 1245 } 1246 ns_info->channel = ch; 1247 } else if (ns_info->num_blocks != spdk_bdev_get_num_blocks(ns->bdev)) { 1248 /* Namespace is still there but size has changed */ 1249 SPDK_DEBUGLOG(nvmf, "Namespace resized: subsystem_id %u," 1250 " nsid %u, pg %p, old %" PRIu64 ", new %" PRIu64 "\n", 1251 subsystem->id, 1252 ns->nsid, 1253 group, 1254 ns_info->num_blocks, 1255 spdk_bdev_get_num_blocks(ns->bdev)); 1256 ns_changed = true; 1257 } 1258 1259 if (ns == NULL) { 1260 memset(ns_info, 0, sizeof(*ns_info)); 1261 } else { 1262 ns_info->uuid = *spdk_bdev_get_uuid(ns->bdev); 1263 ns_info->num_blocks = spdk_bdev_get_num_blocks(ns->bdev); 1264 ns_info->crkey = ns->crkey; 1265 ns_info->rtype = ns->rtype; 1266 if (ns->holder) { 1267 ns_info->holder_id = ns->holder->hostid; 1268 } 1269 1270 memset(&ns_info->reg_hostid, 0, SPDK_NVMF_MAX_NUM_REGISTRANTS * sizeof(struct spdk_uuid)); 1271 j = 0; 1272 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 1273 if (j >= SPDK_NVMF_MAX_NUM_REGISTRANTS) { 1274 SPDK_ERRLOG("Maximum %u registrants can support.\n", SPDK_NVMF_MAX_NUM_REGISTRANTS); 1275 return -EINVAL; 1276 } 1277 ns_info->reg_hostid[j++] = reg->hostid; 1278 } 1279 } 1280 } 1281 1282 if (ns_changed) { 1283 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1284 if (ctrlr->admin_qpair->group == group) { 1285 nvmf_ctrlr_async_event_ns_notice(ctrlr); 1286 nvmf_ctrlr_async_event_ana_change_notice(ctrlr); 1287 } 1288 } 1289 } 1290 1291 return 0; 1292 } 1293 1294 int 1295 nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, 1296 struct spdk_nvmf_subsystem *subsystem) 1297 { 1298 return poll_group_update_subsystem(group, subsystem); 1299 } 1300 1301 int 1302 nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group, 1303 struct spdk_nvmf_subsystem *subsystem, 1304 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1305 { 1306 int rc = 0; 1307 struct spdk_nvmf_subsystem_poll_group *sgroup = &group->sgroups[subsystem->id]; 1308 1309 TAILQ_INIT(&sgroup->queued); 1310 1311 rc = poll_group_update_subsystem(group, subsystem); 1312 if (rc) { 1313 nvmf_poll_group_remove_subsystem(group, subsystem, NULL, NULL); 1314 goto fini; 1315 } 1316 1317 sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1318 fini: 1319 if (cb_fn) { 1320 cb_fn(cb_arg, rc); 1321 } 1322 1323 return rc; 1324 } 1325 1326 static void 1327 _nvmf_poll_group_remove_subsystem_cb(void *ctx, int status) 1328 { 1329 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 1330 struct spdk_nvmf_subsystem *subsystem; 1331 struct spdk_nvmf_poll_group *group; 1332 struct spdk_nvmf_subsystem_poll_group *sgroup; 1333 spdk_nvmf_poll_group_mod_done cpl_fn = NULL; 1334 void *cpl_ctx = NULL; 1335 uint32_t nsid; 1336 1337 group = qpair_ctx->group; 1338 subsystem = qpair_ctx->subsystem; 1339 cpl_fn = qpair_ctx->cpl_fn; 1340 cpl_ctx = qpair_ctx->cpl_ctx; 1341 sgroup = &group->sgroups[subsystem->id]; 1342 1343 if (status) { 1344 goto fini; 1345 } 1346 1347 for (nsid = 0; nsid < sgroup->num_ns; nsid++) { 1348 if (sgroup->ns_info[nsid].channel) { 1349 spdk_put_io_channel(sgroup->ns_info[nsid].channel); 1350 sgroup->ns_info[nsid].channel = NULL; 1351 } 1352 } 1353 1354 sgroup->num_ns = 0; 1355 free(sgroup->ns_info); 1356 sgroup->ns_info = NULL; 1357 fini: 1358 free(qpair_ctx); 1359 if (cpl_fn) { 1360 cpl_fn(cpl_ctx, status); 1361 } 1362 } 1363 1364 static void 1365 _nvmf_subsystem_disconnect_next_qpair(void *ctx) 1366 { 1367 struct spdk_nvmf_qpair *qpair; 1368 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 1369 struct spdk_nvmf_subsystem *subsystem; 1370 struct spdk_nvmf_poll_group *group; 1371 int rc = 0; 1372 1373 group = qpair_ctx->group; 1374 subsystem = qpair_ctx->subsystem; 1375 1376 TAILQ_FOREACH(qpair, &group->qpairs, link) { 1377 if ((qpair->ctrlr != NULL) && (qpair->ctrlr->subsys == subsystem)) { 1378 break; 1379 } 1380 } 1381 1382 if (qpair) { 1383 rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_subsystem_disconnect_next_qpair, qpair_ctx); 1384 } 1385 1386 if (!qpair || rc != 0) { 1387 _nvmf_poll_group_remove_subsystem_cb(ctx, rc); 1388 } 1389 return; 1390 } 1391 1392 void 1393 nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group, 1394 struct spdk_nvmf_subsystem *subsystem, 1395 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1396 { 1397 struct spdk_nvmf_qpair *qpair; 1398 struct spdk_nvmf_subsystem_poll_group *sgroup; 1399 struct nvmf_qpair_disconnect_many_ctx *ctx; 1400 int rc = 0; 1401 1402 ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx)); 1403 1404 if (!ctx) { 1405 SPDK_ERRLOG("Unable to allocate memory for context to remove poll subsystem\n"); 1406 goto fini; 1407 } 1408 1409 ctx->group = group; 1410 ctx->subsystem = subsystem; 1411 ctx->cpl_fn = cb_fn; 1412 ctx->cpl_ctx = cb_arg; 1413 1414 sgroup = &group->sgroups[subsystem->id]; 1415 sgroup->state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 1416 1417 TAILQ_FOREACH(qpair, &group->qpairs, link) { 1418 if ((qpair->ctrlr != NULL) && (qpair->ctrlr->subsys == subsystem)) { 1419 break; 1420 } 1421 } 1422 1423 if (qpair) { 1424 rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_subsystem_disconnect_next_qpair, ctx); 1425 } else { 1426 /* call the callback immediately. It will handle any channel iteration */ 1427 _nvmf_poll_group_remove_subsystem_cb(ctx, 0); 1428 } 1429 1430 if (rc != 0 && rc != -EINPROGRESS) { 1431 free(ctx); 1432 goto fini; 1433 } 1434 1435 return; 1436 fini: 1437 if (cb_fn) { 1438 cb_fn(cb_arg, rc); 1439 } 1440 } 1441 1442 void 1443 nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group, 1444 struct spdk_nvmf_subsystem *subsystem, 1445 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1446 { 1447 struct spdk_nvmf_subsystem_poll_group *sgroup; 1448 int rc = 0; 1449 1450 if (subsystem->id >= group->num_sgroups) { 1451 rc = -1; 1452 goto fini; 1453 } 1454 1455 sgroup = &group->sgroups[subsystem->id]; 1456 if (sgroup == NULL) { 1457 rc = -1; 1458 goto fini; 1459 } 1460 1461 if (sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSED) { 1462 goto fini; 1463 } 1464 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSING; 1465 1466 if (sgroup->io_outstanding > 0) { 1467 assert(sgroup->cb_fn == NULL); 1468 sgroup->cb_fn = cb_fn; 1469 assert(sgroup->cb_arg == NULL); 1470 sgroup->cb_arg = cb_arg; 1471 return; 1472 } 1473 1474 assert(sgroup->io_outstanding == 0); 1475 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED; 1476 fini: 1477 if (cb_fn) { 1478 cb_fn(cb_arg, rc); 1479 } 1480 } 1481 1482 void 1483 nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group, 1484 struct spdk_nvmf_subsystem *subsystem, 1485 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1486 { 1487 struct spdk_nvmf_request *req, *tmp; 1488 struct spdk_nvmf_subsystem_poll_group *sgroup; 1489 int rc = 0; 1490 1491 if (subsystem->id >= group->num_sgroups) { 1492 rc = -1; 1493 goto fini; 1494 } 1495 1496 sgroup = &group->sgroups[subsystem->id]; 1497 1498 if (sgroup->state == SPDK_NVMF_SUBSYSTEM_ACTIVE) { 1499 goto fini; 1500 } 1501 1502 rc = poll_group_update_subsystem(group, subsystem); 1503 if (rc) { 1504 goto fini; 1505 } 1506 1507 sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1508 1509 /* Release all queued requests */ 1510 TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) { 1511 TAILQ_REMOVE(&sgroup->queued, req, link); 1512 spdk_nvmf_request_exec(req); 1513 } 1514 fini: 1515 if (cb_fn) { 1516 cb_fn(cb_arg, rc); 1517 } 1518 } 1519 1520 1521 struct spdk_nvmf_poll_group * 1522 spdk_nvmf_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair) 1523 { 1524 struct spdk_nvmf_transport_poll_group *tgroup; 1525 1526 tgroup = nvmf_transport_get_optimal_poll_group(qpair->transport, qpair); 1527 1528 if (tgroup == NULL) { 1529 return NULL; 1530 } 1531 1532 return tgroup->group; 1533 } 1534 1535 int 1536 spdk_nvmf_poll_group_get_stat(struct spdk_nvmf_tgt *tgt, 1537 struct spdk_nvmf_poll_group_stat *stat) 1538 { 1539 struct spdk_io_channel *ch; 1540 struct spdk_nvmf_poll_group *group; 1541 1542 if (tgt == NULL || stat == NULL) { 1543 return -EINVAL; 1544 } 1545 1546 ch = spdk_get_io_channel(tgt); 1547 group = spdk_io_channel_get_ctx(ch); 1548 *stat = group->stat; 1549 spdk_put_io_channel(ch); 1550 return 0; 1551 } 1552