1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2018-2019, 2021 Mellanox Technologies LTD. All rights reserved. 6 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 37 #include "spdk/bdev.h" 38 #include "spdk/bit_array.h" 39 #include "spdk/thread.h" 40 #include "spdk/nvmf.h" 41 #include "spdk/endian.h" 42 #include "spdk/string.h" 43 #include "spdk/log.h" 44 #include "spdk_internal/usdt.h" 45 46 #include "nvmf_internal.h" 47 #include "transport.h" 48 49 SPDK_LOG_REGISTER_COMPONENT(nvmf) 50 51 #define SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS 1024 52 53 static TAILQ_HEAD(, spdk_nvmf_tgt) g_nvmf_tgts = TAILQ_HEAD_INITIALIZER(g_nvmf_tgts); 54 55 typedef void (*nvmf_qpair_disconnect_cpl)(void *ctx, int status); 56 57 /* supplied to a single call to nvmf_qpair_disconnect */ 58 struct nvmf_qpair_disconnect_ctx { 59 struct spdk_nvmf_qpair *qpair; 60 struct spdk_nvmf_ctrlr *ctrlr; 61 nvmf_qpair_disconnect_cb cb_fn; 62 struct spdk_thread *thread; 63 void *ctx; 64 uint16_t qid; 65 }; 66 67 /* 68 * There are several times when we need to iterate through the list of all qpairs and selectively delete them. 69 * In order to do this sequentially without overlap, we must provide a context to recover the next qpair from 70 * to enable calling nvmf_qpair_disconnect on the next desired qpair. 71 */ 72 struct nvmf_qpair_disconnect_many_ctx { 73 struct spdk_nvmf_subsystem *subsystem; 74 struct spdk_nvmf_poll_group *group; 75 spdk_nvmf_poll_group_mod_done cpl_fn; 76 void *cpl_ctx; 77 uint32_t count; 78 }; 79 80 static void 81 nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair, 82 enum spdk_nvmf_qpair_state state) 83 { 84 assert(qpair != NULL); 85 assert(qpair->group->thread == spdk_get_thread()); 86 87 qpair->state = state; 88 } 89 90 static int 91 nvmf_poll_group_poll(void *ctx) 92 { 93 struct spdk_nvmf_poll_group *group = ctx; 94 int rc; 95 int count = 0; 96 struct spdk_nvmf_transport_poll_group *tgroup; 97 98 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 99 rc = nvmf_transport_poll_group_poll(tgroup); 100 if (rc < 0) { 101 return SPDK_POLLER_BUSY; 102 } 103 count += rc; 104 } 105 106 return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; 107 } 108 109 /* 110 * Reset and clean up the poll group (I/O channel code will actually free the 111 * group). 112 */ 113 static void 114 nvmf_tgt_cleanup_poll_group(struct spdk_nvmf_poll_group *group) 115 { 116 struct spdk_nvmf_transport_poll_group *tgroup, *tmp; 117 struct spdk_nvmf_subsystem_poll_group *sgroup; 118 uint32_t sid, nsid; 119 120 TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) { 121 TAILQ_REMOVE(&group->tgroups, tgroup, link); 122 nvmf_transport_poll_group_destroy(tgroup); 123 } 124 125 for (sid = 0; sid < group->num_sgroups; sid++) { 126 sgroup = &group->sgroups[sid]; 127 128 assert(sgroup != NULL); 129 130 for (nsid = 0; nsid < sgroup->num_ns; nsid++) { 131 if (sgroup->ns_info[nsid].channel) { 132 spdk_put_io_channel(sgroup->ns_info[nsid].channel); 133 sgroup->ns_info[nsid].channel = NULL; 134 } 135 } 136 137 free(sgroup->ns_info); 138 } 139 140 free(group->sgroups); 141 142 spdk_poller_unregister(&group->poller); 143 144 if (group->destroy_cb_fn) { 145 group->destroy_cb_fn(group->destroy_cb_arg, 0); 146 } 147 } 148 149 /* 150 * Callback to unregister a poll group from the target, and clean up its state. 151 */ 152 static void 153 nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf) 154 { 155 struct spdk_nvmf_tgt *tgt = io_device; 156 struct spdk_nvmf_poll_group *group = ctx_buf; 157 158 SPDK_DTRACE_PROBE1(nvmf_destroy_poll_group, spdk_thread_get_id(group->thread)); 159 160 pthread_mutex_lock(&tgt->mutex); 161 TAILQ_REMOVE(&tgt->poll_groups, group, link); 162 pthread_mutex_unlock(&tgt->mutex); 163 164 nvmf_tgt_cleanup_poll_group(group); 165 } 166 167 static int 168 nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf) 169 { 170 struct spdk_nvmf_tgt *tgt = io_device; 171 struct spdk_nvmf_poll_group *group = ctx_buf; 172 struct spdk_nvmf_transport *transport; 173 struct spdk_thread *thread = spdk_get_thread(); 174 uint32_t sid; 175 int rc; 176 177 TAILQ_INIT(&group->tgroups); 178 TAILQ_INIT(&group->qpairs); 179 group->thread = thread; 180 181 group->poller = SPDK_POLLER_REGISTER(nvmf_poll_group_poll, group, 0); 182 183 SPDK_DTRACE_PROBE1(nvmf_create_poll_group, spdk_thread_get_id(thread)); 184 185 TAILQ_FOREACH(transport, &tgt->transports, link) { 186 rc = nvmf_poll_group_add_transport(group, transport); 187 if (rc != 0) { 188 nvmf_tgt_cleanup_poll_group(group); 189 return rc; 190 } 191 } 192 193 group->num_sgroups = tgt->max_subsystems; 194 group->sgroups = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem_poll_group)); 195 if (!group->sgroups) { 196 nvmf_tgt_cleanup_poll_group(group); 197 return -ENOMEM; 198 } 199 200 for (sid = 0; sid < tgt->max_subsystems; sid++) { 201 struct spdk_nvmf_subsystem *subsystem; 202 203 subsystem = tgt->subsystems[sid]; 204 if (!subsystem) { 205 continue; 206 } 207 208 if (nvmf_poll_group_add_subsystem(group, subsystem, NULL, NULL) != 0) { 209 nvmf_tgt_cleanup_poll_group(group); 210 return -1; 211 } 212 } 213 214 pthread_mutex_lock(&tgt->mutex); 215 TAILQ_INSERT_TAIL(&tgt->poll_groups, group, link); 216 pthread_mutex_unlock(&tgt->mutex); 217 218 return 0; 219 } 220 221 static void 222 _nvmf_tgt_disconnect_next_qpair(void *ctx) 223 { 224 struct spdk_nvmf_qpair *qpair; 225 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 226 struct spdk_nvmf_poll_group *group = qpair_ctx->group; 227 struct spdk_io_channel *ch; 228 int rc = 0; 229 230 qpair = TAILQ_FIRST(&group->qpairs); 231 232 if (qpair) { 233 rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_tgt_disconnect_next_qpair, ctx); 234 } 235 236 if (!qpair || rc != 0) { 237 /* When the refcount from the channels reaches 0, nvmf_tgt_destroy_poll_group will be called. */ 238 ch = spdk_io_channel_from_ctx(group); 239 spdk_put_io_channel(ch); 240 free(qpair_ctx); 241 } 242 } 243 244 static void 245 nvmf_tgt_destroy_poll_group_qpairs(struct spdk_nvmf_poll_group *group) 246 { 247 struct nvmf_qpair_disconnect_many_ctx *ctx; 248 249 SPDK_DTRACE_PROBE1(nvmf_destroy_poll_group_qpairs, spdk_thread_get_id(group->thread)); 250 251 ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx)); 252 if (!ctx) { 253 SPDK_ERRLOG("Failed to allocate memory for destroy poll group ctx\n"); 254 return; 255 } 256 257 ctx->group = group; 258 _nvmf_tgt_disconnect_next_qpair(ctx); 259 } 260 261 struct spdk_nvmf_tgt * 262 spdk_nvmf_tgt_create(struct spdk_nvmf_target_opts *opts) 263 { 264 struct spdk_nvmf_tgt *tgt, *tmp_tgt; 265 266 if (strnlen(opts->name, NVMF_TGT_NAME_MAX_LENGTH) == NVMF_TGT_NAME_MAX_LENGTH) { 267 SPDK_ERRLOG("Provided target name exceeds the max length of %u.\n", NVMF_TGT_NAME_MAX_LENGTH); 268 return NULL; 269 } 270 271 TAILQ_FOREACH(tmp_tgt, &g_nvmf_tgts, link) { 272 if (!strncmp(opts->name, tmp_tgt->name, NVMF_TGT_NAME_MAX_LENGTH)) { 273 SPDK_ERRLOG("Provided target name must be unique.\n"); 274 return NULL; 275 } 276 } 277 278 tgt = calloc(1, sizeof(*tgt)); 279 if (!tgt) { 280 return NULL; 281 } 282 283 snprintf(tgt->name, NVMF_TGT_NAME_MAX_LENGTH, "%s", opts->name); 284 285 if (!opts || !opts->max_subsystems) { 286 tgt->max_subsystems = SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS; 287 } else { 288 tgt->max_subsystems = opts->max_subsystems; 289 } 290 291 if (!opts) { 292 tgt->crdt[0] = 0; 293 tgt->crdt[1] = 0; 294 tgt->crdt[2] = 0; 295 } else { 296 tgt->crdt[0] = opts->crdt[0]; 297 tgt->crdt[1] = opts->crdt[1]; 298 tgt->crdt[2] = opts->crdt[2]; 299 } 300 301 if (!opts) { 302 tgt->discovery_filter = SPDK_NVMF_TGT_DISCOVERY_MATCH_ANY; 303 } else { 304 tgt->discovery_filter = opts->discovery_filter; 305 } 306 307 tgt->discovery_genctr = 0; 308 TAILQ_INIT(&tgt->transports); 309 TAILQ_INIT(&tgt->poll_groups); 310 311 tgt->subsystems = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem *)); 312 if (!tgt->subsystems) { 313 free(tgt); 314 return NULL; 315 } 316 317 pthread_mutex_init(&tgt->mutex, NULL); 318 319 spdk_io_device_register(tgt, 320 nvmf_tgt_create_poll_group, 321 nvmf_tgt_destroy_poll_group, 322 sizeof(struct spdk_nvmf_poll_group), 323 tgt->name); 324 325 TAILQ_INSERT_HEAD(&g_nvmf_tgts, tgt, link); 326 327 return tgt; 328 } 329 330 static void 331 _nvmf_tgt_destroy_next_transport(void *ctx) 332 { 333 struct spdk_nvmf_tgt *tgt = ctx; 334 struct spdk_nvmf_transport *transport; 335 336 if (!TAILQ_EMPTY(&tgt->transports)) { 337 transport = TAILQ_FIRST(&tgt->transports); 338 TAILQ_REMOVE(&tgt->transports, transport, link); 339 spdk_nvmf_transport_destroy(transport, _nvmf_tgt_destroy_next_transport, tgt); 340 } else { 341 spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn = tgt->destroy_cb_fn; 342 void *destroy_cb_arg = tgt->destroy_cb_arg; 343 344 pthread_mutex_destroy(&tgt->mutex); 345 free(tgt); 346 347 if (destroy_cb_fn) { 348 destroy_cb_fn(destroy_cb_arg, 0); 349 } 350 } 351 } 352 353 static void 354 nvmf_tgt_destroy_cb(void *io_device) 355 { 356 struct spdk_nvmf_tgt *tgt = io_device; 357 uint32_t i; 358 int rc; 359 360 if (tgt->subsystems) { 361 for (i = 0; i < tgt->max_subsystems; i++) { 362 if (tgt->subsystems[i]) { 363 nvmf_subsystem_remove_all_listeners(tgt->subsystems[i], true); 364 365 rc = spdk_nvmf_subsystem_destroy(tgt->subsystems[i], nvmf_tgt_destroy_cb, tgt); 366 if (rc) { 367 if (rc == -EINPROGRESS) { 368 /* If rc is -EINPROGRESS, nvmf_tgt_destroy_cb will be called again when subsystem #i 369 * is destroyed, nvmf_tgt_destroy_cb will continue to destroy other subsystems if any */ 370 return; 371 } else { 372 SPDK_ERRLOG("Failed to destroy subsystem, id %u, rc %d\n", tgt->subsystems[i]->id, rc); 373 assert(0); 374 } 375 } 376 } 377 } 378 free(tgt->subsystems); 379 } 380 381 _nvmf_tgt_destroy_next_transport(tgt); 382 } 383 384 void 385 spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt, 386 spdk_nvmf_tgt_destroy_done_fn cb_fn, 387 void *cb_arg) 388 { 389 tgt->destroy_cb_fn = cb_fn; 390 tgt->destroy_cb_arg = cb_arg; 391 392 TAILQ_REMOVE(&g_nvmf_tgts, tgt, link); 393 394 spdk_io_device_unregister(tgt, nvmf_tgt_destroy_cb); 395 } 396 397 const char * 398 spdk_nvmf_tgt_get_name(struct spdk_nvmf_tgt *tgt) 399 { 400 return tgt->name; 401 } 402 403 struct spdk_nvmf_tgt * 404 spdk_nvmf_get_tgt(const char *name) 405 { 406 struct spdk_nvmf_tgt *tgt; 407 uint32_t num_targets = 0; 408 409 TAILQ_FOREACH(tgt, &g_nvmf_tgts, link) { 410 if (name) { 411 if (!strncmp(tgt->name, name, NVMF_TGT_NAME_MAX_LENGTH)) { 412 return tgt; 413 } 414 } 415 num_targets++; 416 } 417 418 /* 419 * special case. If there is only one target and 420 * no name was specified, return the only available 421 * target. If there is more than one target, name must 422 * be specified. 423 */ 424 if (!name && num_targets == 1) { 425 return TAILQ_FIRST(&g_nvmf_tgts); 426 } 427 428 return NULL; 429 } 430 431 struct spdk_nvmf_tgt * 432 spdk_nvmf_get_first_tgt(void) 433 { 434 return TAILQ_FIRST(&g_nvmf_tgts); 435 } 436 437 struct spdk_nvmf_tgt * 438 spdk_nvmf_get_next_tgt(struct spdk_nvmf_tgt *prev) 439 { 440 return TAILQ_NEXT(prev, link); 441 } 442 443 static void 444 nvmf_write_subsystem_config_json(struct spdk_json_write_ctx *w, 445 struct spdk_nvmf_subsystem *subsystem) 446 { 447 struct spdk_nvmf_host *host; 448 struct spdk_nvmf_subsystem_listener *listener; 449 const struct spdk_nvme_transport_id *trid; 450 struct spdk_nvmf_ns *ns; 451 struct spdk_nvmf_ns_opts ns_opts; 452 uint32_t max_namespaces; 453 char uuid_str[SPDK_UUID_STRING_LEN]; 454 455 if (spdk_nvmf_subsystem_get_type(subsystem) != SPDK_NVMF_SUBTYPE_NVME) { 456 return; 457 } 458 459 /* { */ 460 spdk_json_write_object_begin(w); 461 spdk_json_write_named_string(w, "method", "nvmf_create_subsystem"); 462 463 /* "params" : { */ 464 spdk_json_write_named_object_begin(w, "params"); 465 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 466 spdk_json_write_named_bool(w, "allow_any_host", spdk_nvmf_subsystem_get_allow_any_host(subsystem)); 467 spdk_json_write_named_string(w, "serial_number", spdk_nvmf_subsystem_get_sn(subsystem)); 468 spdk_json_write_named_string(w, "model_number", spdk_nvmf_subsystem_get_mn(subsystem)); 469 470 max_namespaces = spdk_nvmf_subsystem_get_max_namespaces(subsystem); 471 if (max_namespaces != 0) { 472 spdk_json_write_named_uint32(w, "max_namespaces", max_namespaces); 473 } 474 475 spdk_json_write_named_uint32(w, "min_cntlid", spdk_nvmf_subsystem_get_min_cntlid(subsystem)); 476 spdk_json_write_named_uint32(w, "max_cntlid", spdk_nvmf_subsystem_get_max_cntlid(subsystem)); 477 spdk_json_write_named_bool(w, "ana_reporting", nvmf_subsystem_get_ana_reporting(subsystem)); 478 479 /* } "params" */ 480 spdk_json_write_object_end(w); 481 482 /* } */ 483 spdk_json_write_object_end(w); 484 485 for (listener = spdk_nvmf_subsystem_get_first_listener(subsystem); listener != NULL; 486 listener = spdk_nvmf_subsystem_get_next_listener(subsystem, listener)) { 487 trid = spdk_nvmf_subsystem_listener_get_trid(listener); 488 489 spdk_json_write_object_begin(w); 490 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_listener"); 491 492 /* "params" : { */ 493 spdk_json_write_named_object_begin(w, "params"); 494 495 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 496 nvmf_transport_listen_dump_opts(listener->transport, trid, w); 497 498 /* } "params" */ 499 spdk_json_write_object_end(w); 500 501 /* } */ 502 spdk_json_write_object_end(w); 503 } 504 505 for (host = spdk_nvmf_subsystem_get_first_host(subsystem); host != NULL; 506 host = spdk_nvmf_subsystem_get_next_host(subsystem, host)) { 507 508 spdk_json_write_object_begin(w); 509 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_host"); 510 511 /* "params" : { */ 512 spdk_json_write_named_object_begin(w, "params"); 513 514 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 515 spdk_json_write_named_string(w, "host", spdk_nvmf_host_get_nqn(host)); 516 517 /* } "params" */ 518 spdk_json_write_object_end(w); 519 520 /* } */ 521 spdk_json_write_object_end(w); 522 } 523 524 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 525 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 526 spdk_nvmf_ns_get_opts(ns, &ns_opts, sizeof(ns_opts)); 527 528 spdk_json_write_object_begin(w); 529 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_ns"); 530 531 /* "params" : { */ 532 spdk_json_write_named_object_begin(w, "params"); 533 534 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 535 536 /* "namespace" : { */ 537 spdk_json_write_named_object_begin(w, "namespace"); 538 539 spdk_json_write_named_uint32(w, "nsid", spdk_nvmf_ns_get_id(ns)); 540 spdk_json_write_named_string(w, "bdev_name", spdk_bdev_get_name(spdk_nvmf_ns_get_bdev(ns))); 541 542 if (!spdk_mem_all_zero(ns_opts.nguid, sizeof(ns_opts.nguid))) { 543 SPDK_STATIC_ASSERT(sizeof(ns_opts.nguid) == sizeof(uint64_t) * 2, "size mismatch"); 544 spdk_json_write_named_string_fmt(w, "nguid", "%016"PRIX64"%016"PRIX64, from_be64(&ns_opts.nguid[0]), 545 from_be64(&ns_opts.nguid[8])); 546 } 547 548 if (!spdk_mem_all_zero(ns_opts.eui64, sizeof(ns_opts.eui64))) { 549 SPDK_STATIC_ASSERT(sizeof(ns_opts.eui64) == sizeof(uint64_t), "size mismatch"); 550 spdk_json_write_named_string_fmt(w, "eui64", "%016"PRIX64, from_be64(&ns_opts.eui64)); 551 } 552 553 if (!spdk_mem_all_zero(&ns_opts.uuid, sizeof(ns_opts.uuid))) { 554 spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &ns_opts.uuid); 555 spdk_json_write_named_string(w, "uuid", uuid_str); 556 } 557 558 if (nvmf_subsystem_get_ana_reporting(subsystem)) { 559 spdk_json_write_named_uint32(w, "anagrpid", ns_opts.anagrpid); 560 } 561 562 /* "namespace" */ 563 spdk_json_write_object_end(w); 564 565 /* } "params" */ 566 spdk_json_write_object_end(w); 567 568 /* } */ 569 spdk_json_write_object_end(w); 570 } 571 } 572 573 void 574 spdk_nvmf_tgt_write_config_json(struct spdk_json_write_ctx *w, struct spdk_nvmf_tgt *tgt) 575 { 576 struct spdk_nvmf_subsystem *subsystem; 577 struct spdk_nvmf_transport *transport; 578 579 spdk_json_write_object_begin(w); 580 spdk_json_write_named_string(w, "method", "nvmf_set_max_subsystems"); 581 582 spdk_json_write_named_object_begin(w, "params"); 583 spdk_json_write_named_uint32(w, "max_subsystems", tgt->max_subsystems); 584 spdk_json_write_object_end(w); 585 586 spdk_json_write_object_end(w); 587 588 spdk_json_write_object_begin(w); 589 spdk_json_write_named_string(w, "method", "nvmf_set_crdt"); 590 spdk_json_write_named_object_begin(w, "params"); 591 spdk_json_write_named_uint32(w, "crdt1", tgt->crdt[0]); 592 spdk_json_write_named_uint32(w, "crdt2", tgt->crdt[1]); 593 spdk_json_write_named_uint32(w, "crdt3", tgt->crdt[2]); 594 spdk_json_write_object_end(w); 595 spdk_json_write_object_end(w); 596 597 /* write transports */ 598 TAILQ_FOREACH(transport, &tgt->transports, link) { 599 spdk_json_write_object_begin(w); 600 spdk_json_write_named_string(w, "method", "nvmf_create_transport"); 601 nvmf_transport_dump_opts(transport, w, true); 602 spdk_json_write_object_end(w); 603 } 604 605 subsystem = spdk_nvmf_subsystem_get_first(tgt); 606 while (subsystem) { 607 nvmf_write_subsystem_config_json(w, subsystem); 608 subsystem = spdk_nvmf_subsystem_get_next(subsystem); 609 } 610 } 611 612 static void 613 nvmf_listen_opts_copy(struct spdk_nvmf_listen_opts *opts, 614 const struct spdk_nvmf_listen_opts *opts_src, size_t opts_size) 615 { 616 assert(opts); 617 assert(opts_src); 618 619 opts->opts_size = opts_size; 620 621 #define SET_FIELD(field) \ 622 if (offsetof(struct spdk_nvmf_listen_opts, field) + sizeof(opts->field) <= opts_size) { \ 623 opts->field = opts_src->field; \ 624 } \ 625 626 SET_FIELD(transport_specific); 627 #undef SET_FIELD 628 629 /* Do not remove this statement, you should always update this statement when you adding a new field, 630 * and do not forget to add the SET_FIELD statement for your added field. */ 631 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_listen_opts) == 16, "Incorrect size"); 632 } 633 634 void 635 spdk_nvmf_listen_opts_init(struct spdk_nvmf_listen_opts *opts, size_t opts_size) 636 { 637 struct spdk_nvmf_listen_opts opts_local = {}; 638 639 /* local version of opts should have defaults set here */ 640 641 nvmf_listen_opts_copy(opts, &opts_local, opts_size); 642 } 643 644 int 645 spdk_nvmf_tgt_listen_ext(struct spdk_nvmf_tgt *tgt, const struct spdk_nvme_transport_id *trid, 646 struct spdk_nvmf_listen_opts *opts) 647 { 648 struct spdk_nvmf_transport *transport; 649 int rc; 650 struct spdk_nvmf_listen_opts opts_local = {}; 651 652 if (!opts) { 653 SPDK_ERRLOG("opts should not be NULL\n"); 654 return -EINVAL; 655 } 656 657 if (!opts->opts_size) { 658 SPDK_ERRLOG("The opts_size in opts structure should not be zero\n"); 659 return -EINVAL; 660 } 661 662 transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring); 663 if (!transport) { 664 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n", 665 trid->trstring); 666 return -EINVAL; 667 } 668 669 nvmf_listen_opts_copy(&opts_local, opts, opts->opts_size); 670 rc = spdk_nvmf_transport_listen(transport, trid, &opts_local); 671 if (rc < 0) { 672 SPDK_ERRLOG("Unable to listen on address '%s'\n", trid->traddr); 673 } 674 675 return rc; 676 } 677 678 int 679 spdk_nvmf_tgt_stop_listen(struct spdk_nvmf_tgt *tgt, 680 struct spdk_nvme_transport_id *trid) 681 { 682 struct spdk_nvmf_transport *transport; 683 int rc; 684 685 transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring); 686 if (!transport) { 687 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n", 688 trid->trstring); 689 return -EINVAL; 690 } 691 692 rc = spdk_nvmf_transport_stop_listen(transport, trid); 693 if (rc < 0) { 694 SPDK_ERRLOG("Failed to stop listening on address '%s'\n", trid->traddr); 695 return rc; 696 } 697 return 0; 698 } 699 700 struct spdk_nvmf_tgt_add_transport_ctx { 701 struct spdk_nvmf_tgt *tgt; 702 struct spdk_nvmf_transport *transport; 703 spdk_nvmf_tgt_add_transport_done_fn cb_fn; 704 void *cb_arg; 705 int status; 706 }; 707 708 static void 709 _nvmf_tgt_remove_transport_done(struct spdk_io_channel_iter *i, int status) 710 { 711 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 712 713 ctx->cb_fn(ctx->cb_arg, ctx->status); 714 free(ctx); 715 } 716 717 static void 718 _nvmf_tgt_remove_transport(struct spdk_io_channel_iter *i) 719 { 720 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 721 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 722 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); 723 struct spdk_nvmf_transport_poll_group *tgroup, *tmp; 724 725 TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) { 726 if (tgroup->transport == ctx->transport) { 727 TAILQ_REMOVE(&group->tgroups, tgroup, link); 728 nvmf_transport_poll_group_destroy(tgroup); 729 } 730 } 731 732 spdk_for_each_channel_continue(i, 0); 733 } 734 735 static void 736 _nvmf_tgt_add_transport_done(struct spdk_io_channel_iter *i, int status) 737 { 738 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 739 740 if (status) { 741 ctx->status = status; 742 spdk_for_each_channel(ctx->tgt, 743 _nvmf_tgt_remove_transport, 744 ctx, 745 _nvmf_tgt_remove_transport_done); 746 return; 747 } 748 749 ctx->transport->tgt = ctx->tgt; 750 TAILQ_INSERT_TAIL(&ctx->tgt->transports, ctx->transport, link); 751 ctx->cb_fn(ctx->cb_arg, status); 752 free(ctx); 753 } 754 755 static void 756 _nvmf_tgt_add_transport(struct spdk_io_channel_iter *i) 757 { 758 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 759 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 760 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); 761 int rc; 762 763 rc = nvmf_poll_group_add_transport(group, ctx->transport); 764 spdk_for_each_channel_continue(i, rc); 765 } 766 767 void spdk_nvmf_tgt_add_transport(struct spdk_nvmf_tgt *tgt, 768 struct spdk_nvmf_transport *transport, 769 spdk_nvmf_tgt_add_transport_done_fn cb_fn, 770 void *cb_arg) 771 { 772 struct spdk_nvmf_tgt_add_transport_ctx *ctx; 773 774 SPDK_DTRACE_PROBE2(nvmf_tgt_add_transport, transport, tgt->name); 775 776 if (spdk_nvmf_tgt_get_transport(tgt, transport->ops->name)) { 777 cb_fn(cb_arg, -EEXIST); 778 return; /* transport already created */ 779 } 780 781 ctx = calloc(1, sizeof(*ctx)); 782 if (!ctx) { 783 cb_fn(cb_arg, -ENOMEM); 784 return; 785 } 786 787 ctx->tgt = tgt; 788 ctx->transport = transport; 789 ctx->cb_fn = cb_fn; 790 ctx->cb_arg = cb_arg; 791 792 spdk_for_each_channel(tgt, 793 _nvmf_tgt_add_transport, 794 ctx, 795 _nvmf_tgt_add_transport_done); 796 } 797 798 struct spdk_nvmf_subsystem * 799 spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn) 800 { 801 struct spdk_nvmf_subsystem *subsystem; 802 uint32_t sid; 803 804 if (!subnqn) { 805 return NULL; 806 } 807 808 /* Ensure that subnqn is null terminated */ 809 if (!memchr(subnqn, '\0', SPDK_NVMF_NQN_MAX_LEN + 1)) { 810 SPDK_ERRLOG("Connect SUBNQN is not null terminated\n"); 811 return NULL; 812 } 813 814 for (sid = 0; sid < tgt->max_subsystems; sid++) { 815 subsystem = tgt->subsystems[sid]; 816 if (subsystem == NULL) { 817 continue; 818 } 819 820 if (strcmp(subnqn, subsystem->subnqn) == 0) { 821 return subsystem; 822 } 823 } 824 825 return NULL; 826 } 827 828 struct spdk_nvmf_transport * 829 spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name) 830 { 831 struct spdk_nvmf_transport *transport; 832 833 TAILQ_FOREACH(transport, &tgt->transports, link) { 834 if (!strncasecmp(transport->ops->name, transport_name, SPDK_NVMF_TRSTRING_MAX_LEN)) { 835 return transport; 836 } 837 } 838 return NULL; 839 } 840 841 struct nvmf_new_qpair_ctx { 842 struct spdk_nvmf_qpair *qpair; 843 struct spdk_nvmf_poll_group *group; 844 }; 845 846 static void 847 _nvmf_poll_group_add(void *_ctx) 848 { 849 struct nvmf_new_qpair_ctx *ctx = _ctx; 850 struct spdk_nvmf_qpair *qpair = ctx->qpair; 851 struct spdk_nvmf_poll_group *group = ctx->group; 852 853 free(_ctx); 854 855 if (spdk_nvmf_poll_group_add(group, qpair) != 0) { 856 SPDK_ERRLOG("Unable to add the qpair to a poll group.\n"); 857 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 858 } 859 } 860 861 void 862 spdk_nvmf_tgt_new_qpair(struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair) 863 { 864 struct spdk_nvmf_poll_group *group; 865 struct nvmf_new_qpair_ctx *ctx; 866 867 group = spdk_nvmf_get_optimal_poll_group(qpair); 868 if (group == NULL) { 869 if (tgt->next_poll_group == NULL) { 870 tgt->next_poll_group = TAILQ_FIRST(&tgt->poll_groups); 871 if (tgt->next_poll_group == NULL) { 872 SPDK_ERRLOG("No poll groups exist.\n"); 873 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 874 return; 875 } 876 } 877 group = tgt->next_poll_group; 878 tgt->next_poll_group = TAILQ_NEXT(group, link); 879 } 880 881 ctx = calloc(1, sizeof(*ctx)); 882 if (!ctx) { 883 SPDK_ERRLOG("Unable to send message to poll group.\n"); 884 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 885 return; 886 } 887 888 ctx->qpair = qpair; 889 ctx->group = group; 890 891 spdk_thread_send_msg(group->thread, _nvmf_poll_group_add, ctx); 892 } 893 894 struct spdk_nvmf_poll_group * 895 spdk_nvmf_poll_group_create(struct spdk_nvmf_tgt *tgt) 896 { 897 struct spdk_io_channel *ch; 898 899 ch = spdk_get_io_channel(tgt); 900 if (!ch) { 901 SPDK_ERRLOG("Unable to get I/O channel for target\n"); 902 return NULL; 903 } 904 905 return spdk_io_channel_get_ctx(ch); 906 } 907 908 void 909 spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group, 910 spdk_nvmf_poll_group_destroy_done_fn cb_fn, 911 void *cb_arg) 912 { 913 assert(group->destroy_cb_fn == NULL); 914 group->destroy_cb_fn = cb_fn; 915 group->destroy_cb_arg = cb_arg; 916 917 /* This function will put the io_channel associated with this poll group */ 918 nvmf_tgt_destroy_poll_group_qpairs(group); 919 } 920 921 int 922 spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group, 923 struct spdk_nvmf_qpair *qpair) 924 { 925 int rc = -1; 926 struct spdk_nvmf_transport_poll_group *tgroup; 927 928 TAILQ_INIT(&qpair->outstanding); 929 qpair->group = group; 930 qpair->ctrlr = NULL; 931 qpair->disconnect_started = false; 932 933 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 934 if (tgroup->transport == qpair->transport) { 935 rc = nvmf_transport_poll_group_add(tgroup, qpair); 936 break; 937 } 938 } 939 940 /* We add the qpair to the group only it is successfully added into the tgroup */ 941 if (rc == 0) { 942 SPDK_DTRACE_PROBE2(nvmf_poll_group_add_qpair, qpair, spdk_thread_get_id(group->thread)); 943 TAILQ_INSERT_TAIL(&group->qpairs, qpair, link); 944 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ACTIVE); 945 } 946 947 return rc; 948 } 949 950 static void 951 _nvmf_ctrlr_destruct(void *ctx) 952 { 953 struct spdk_nvmf_ctrlr *ctrlr = ctx; 954 955 nvmf_ctrlr_destruct(ctrlr); 956 } 957 958 static void 959 _nvmf_ctrlr_free_from_qpair(void *ctx) 960 { 961 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 962 struct spdk_nvmf_ctrlr *ctrlr = qpair_ctx->ctrlr; 963 uint32_t count; 964 965 spdk_bit_array_clear(ctrlr->qpair_mask, qpair_ctx->qid); 966 count = spdk_bit_array_count_set(ctrlr->qpair_mask); 967 if (count == 0) { 968 assert(!ctrlr->in_destruct); 969 SPDK_DEBUGLOG(nvmf, "Last qpair %u, destroy ctrlr 0x%hx\n", qpair_ctx->qid, ctrlr->cntlid); 970 ctrlr->in_destruct = true; 971 spdk_thread_send_msg(ctrlr->subsys->thread, _nvmf_ctrlr_destruct, ctrlr); 972 } 973 free(qpair_ctx); 974 } 975 976 static void 977 _nvmf_transport_qpair_fini_complete(void *cb_ctx) 978 { 979 struct nvmf_qpair_disconnect_ctx *qpair_ctx = cb_ctx; 980 struct spdk_nvmf_ctrlr *ctrlr; 981 /* Store cb args since cb_ctx can be freed in _nvmf_ctrlr_free_from_qpair */ 982 nvmf_qpair_disconnect_cb cb_fn = qpair_ctx->cb_fn; 983 void *cb_arg = qpair_ctx->ctx; 984 struct spdk_thread *cb_thread = qpair_ctx->thread; 985 986 ctrlr = qpair_ctx->ctrlr; 987 SPDK_DEBUGLOG(nvmf, "Finish destroying qid %u\n", qpair_ctx->qid); 988 989 if (ctrlr) { 990 if (qpair_ctx->qid == 0) { 991 /* Admin qpair is removed, so set the pointer to NULL. 992 * This operation is safe since we are on ctrlr thread now, admin qpair's thread is the same 993 * as controller's thread */ 994 assert(ctrlr->thread == spdk_get_thread()); 995 ctrlr->admin_qpair = NULL; 996 } 997 /* Free qpair id from controller's bit mask and destroy the controller if it is the last qpair */ 998 if (ctrlr->thread) { 999 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_free_from_qpair, qpair_ctx); 1000 } else { 1001 _nvmf_ctrlr_free_from_qpair(qpair_ctx); 1002 } 1003 } else { 1004 free(qpair_ctx); 1005 } 1006 1007 if (cb_fn) { 1008 spdk_thread_send_msg(cb_thread, cb_fn, cb_arg); 1009 } 1010 } 1011 1012 void 1013 spdk_nvmf_poll_group_remove(struct spdk_nvmf_qpair *qpair) 1014 { 1015 struct spdk_nvmf_transport_poll_group *tgroup; 1016 int rc; 1017 1018 SPDK_DTRACE_PROBE2(nvmf_poll_group_remove_qpair, qpair, 1019 spdk_thread_get_id(qpair->group->thread)); 1020 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ERROR); 1021 1022 /* Find the tgroup and remove the qpair from the tgroup */ 1023 TAILQ_FOREACH(tgroup, &qpair->group->tgroups, link) { 1024 if (tgroup->transport == qpair->transport) { 1025 rc = nvmf_transport_poll_group_remove(tgroup, qpair); 1026 if (rc && (rc != ENOTSUP)) { 1027 SPDK_ERRLOG("Cannot remove qpair=%p from transport group=%p\n", 1028 qpair, tgroup); 1029 } 1030 break; 1031 } 1032 } 1033 1034 TAILQ_REMOVE(&qpair->group->qpairs, qpair, link); 1035 qpair->group = NULL; 1036 } 1037 1038 static void 1039 _nvmf_qpair_destroy(void *ctx, int status) 1040 { 1041 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 1042 struct spdk_nvmf_qpair *qpair = qpair_ctx->qpair; 1043 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 1044 struct spdk_nvmf_request *req, *tmp; 1045 struct spdk_nvmf_subsystem_poll_group *sgroup; 1046 1047 assert(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING); 1048 qpair_ctx->qid = qpair->qid; 1049 1050 if (ctrlr) { 1051 if (0 == qpair->qid) { 1052 assert(qpair->group->stat.current_admin_qpairs > 0); 1053 qpair->group->stat.current_admin_qpairs--; 1054 } else { 1055 assert(qpair->group->stat.current_io_qpairs > 0); 1056 qpair->group->stat.current_io_qpairs--; 1057 } 1058 1059 sgroup = &qpair->group->sgroups[ctrlr->subsys->id]; 1060 TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) { 1061 if (req->qpair == qpair) { 1062 TAILQ_REMOVE(&sgroup->queued, req, link); 1063 if (nvmf_transport_req_free(req)) { 1064 SPDK_ERRLOG("Transport request free error!/n"); 1065 } 1066 } 1067 } 1068 } 1069 1070 qpair_ctx->ctrlr = ctrlr; 1071 spdk_nvmf_poll_group_remove(qpair); 1072 nvmf_transport_qpair_fini(qpair, _nvmf_transport_qpair_fini_complete, qpair_ctx); 1073 } 1074 1075 static void 1076 _nvmf_qpair_disconnect_msg(void *ctx) 1077 { 1078 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 1079 1080 spdk_nvmf_qpair_disconnect(qpair_ctx->qpair, qpair_ctx->cb_fn, qpair_ctx->ctx); 1081 free(ctx); 1082 } 1083 1084 int 1085 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 1086 { 1087 struct spdk_nvmf_poll_group *group = qpair->group; 1088 struct nvmf_qpair_disconnect_ctx *qpair_ctx; 1089 1090 if (__atomic_test_and_set(&qpair->disconnect_started, __ATOMIC_RELAXED)) { 1091 if (cb_fn) { 1092 cb_fn(ctx); 1093 } 1094 return 0; 1095 } 1096 1097 /* If we get a qpair in the uninitialized state, we can just destroy it immediately */ 1098 if (qpair->state == SPDK_NVMF_QPAIR_UNINITIALIZED) { 1099 nvmf_transport_qpair_fini(qpair, NULL, NULL); 1100 if (cb_fn) { 1101 cb_fn(ctx); 1102 } 1103 return 0; 1104 } 1105 1106 assert(group != NULL); 1107 if (spdk_get_thread() != group->thread) { 1108 /* clear the atomic so we can set it on the next call on the proper thread. */ 1109 __atomic_clear(&qpair->disconnect_started, __ATOMIC_RELAXED); 1110 qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx)); 1111 if (!qpair_ctx) { 1112 SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n"); 1113 return -ENOMEM; 1114 } 1115 qpair_ctx->qpair = qpair; 1116 qpair_ctx->cb_fn = cb_fn; 1117 qpair_ctx->thread = group->thread; 1118 qpair_ctx->ctx = ctx; 1119 spdk_thread_send_msg(group->thread, _nvmf_qpair_disconnect_msg, qpair_ctx); 1120 return 0; 1121 } 1122 1123 SPDK_DTRACE_PROBE2(nvmf_qpair_disconnect, qpair, spdk_thread_get_id(group->thread)); 1124 assert(qpair->state == SPDK_NVMF_QPAIR_ACTIVE); 1125 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_DEACTIVATING); 1126 1127 qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx)); 1128 if (!qpair_ctx) { 1129 SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n"); 1130 return -ENOMEM; 1131 } 1132 1133 qpair_ctx->qpair = qpair; 1134 qpair_ctx->cb_fn = cb_fn; 1135 qpair_ctx->thread = group->thread; 1136 qpair_ctx->ctx = ctx; 1137 1138 /* Check for outstanding I/O */ 1139 if (!TAILQ_EMPTY(&qpair->outstanding)) { 1140 SPDK_DTRACE_PROBE2(nvmf_poll_group_drain_qpair, qpair, spdk_thread_get_id(group->thread)); 1141 qpair->state_cb = _nvmf_qpair_destroy; 1142 qpair->state_cb_arg = qpair_ctx; 1143 nvmf_qpair_abort_pending_zcopy_reqs(qpair); 1144 nvmf_qpair_free_aer(qpair); 1145 return 0; 1146 } 1147 1148 _nvmf_qpair_destroy(qpair_ctx, 0); 1149 1150 return 0; 1151 } 1152 1153 int 1154 spdk_nvmf_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair, 1155 struct spdk_nvme_transport_id *trid) 1156 { 1157 return nvmf_transport_qpair_get_peer_trid(qpair, trid); 1158 } 1159 1160 int 1161 spdk_nvmf_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair, 1162 struct spdk_nvme_transport_id *trid) 1163 { 1164 return nvmf_transport_qpair_get_local_trid(qpair, trid); 1165 } 1166 1167 int 1168 spdk_nvmf_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, 1169 struct spdk_nvme_transport_id *trid) 1170 { 1171 return nvmf_transport_qpair_get_listen_trid(qpair, trid); 1172 } 1173 1174 int 1175 nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group, 1176 struct spdk_nvmf_transport *transport) 1177 { 1178 struct spdk_nvmf_transport_poll_group *tgroup; 1179 1180 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 1181 if (tgroup->transport == transport) { 1182 /* Transport already in the poll group */ 1183 return 0; 1184 } 1185 } 1186 1187 tgroup = nvmf_transport_poll_group_create(transport, group); 1188 if (!tgroup) { 1189 SPDK_ERRLOG("Unable to create poll group for transport\n"); 1190 return -1; 1191 } 1192 SPDK_DTRACE_PROBE2(nvmf_transport_poll_group_create, transport, spdk_thread_get_id(group->thread)); 1193 1194 tgroup->group = group; 1195 TAILQ_INSERT_TAIL(&group->tgroups, tgroup, link); 1196 1197 return 0; 1198 } 1199 1200 static int 1201 poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, 1202 struct spdk_nvmf_subsystem *subsystem) 1203 { 1204 struct spdk_nvmf_subsystem_poll_group *sgroup; 1205 uint32_t new_num_ns, old_num_ns; 1206 uint32_t i, j; 1207 struct spdk_nvmf_ns *ns; 1208 struct spdk_nvmf_registrant *reg, *tmp; 1209 struct spdk_io_channel *ch; 1210 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 1211 struct spdk_nvmf_ctrlr *ctrlr; 1212 bool ns_changed; 1213 1214 /* Make sure our poll group has memory for this subsystem allocated */ 1215 if (subsystem->id >= group->num_sgroups) { 1216 return -ENOMEM; 1217 } 1218 1219 sgroup = &group->sgroups[subsystem->id]; 1220 1221 /* Make sure the array of namespace information is the correct size */ 1222 new_num_ns = subsystem->max_nsid; 1223 old_num_ns = sgroup->num_ns; 1224 1225 ns_changed = false; 1226 1227 if (old_num_ns == 0) { 1228 if (new_num_ns > 0) { 1229 /* First allocation */ 1230 sgroup->ns_info = calloc(new_num_ns, sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1231 if (!sgroup->ns_info) { 1232 return -ENOMEM; 1233 } 1234 } 1235 } else if (new_num_ns > old_num_ns) { 1236 void *buf; 1237 1238 /* Make the array larger */ 1239 buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1240 if (!buf) { 1241 return -ENOMEM; 1242 } 1243 1244 sgroup->ns_info = buf; 1245 1246 /* Null out the new namespace information slots */ 1247 for (i = old_num_ns; i < new_num_ns; i++) { 1248 memset(&sgroup->ns_info[i], 0, sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1249 } 1250 } else if (new_num_ns < old_num_ns) { 1251 void *buf; 1252 1253 /* Free the extra I/O channels */ 1254 for (i = new_num_ns; i < old_num_ns; i++) { 1255 ns_info = &sgroup->ns_info[i]; 1256 1257 if (ns_info->channel) { 1258 spdk_put_io_channel(ns_info->channel); 1259 ns_info->channel = NULL; 1260 } 1261 } 1262 1263 /* Make the array smaller */ 1264 if (new_num_ns > 0) { 1265 buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1266 if (!buf) { 1267 return -ENOMEM; 1268 } 1269 sgroup->ns_info = buf; 1270 } else { 1271 free(sgroup->ns_info); 1272 sgroup->ns_info = NULL; 1273 } 1274 } 1275 1276 sgroup->num_ns = new_num_ns; 1277 1278 /* Detect bdevs that were added or removed */ 1279 for (i = 0; i < sgroup->num_ns; i++) { 1280 ns = subsystem->ns[i]; 1281 ns_info = &sgroup->ns_info[i]; 1282 ch = ns_info->channel; 1283 1284 if (ns == NULL && ch == NULL) { 1285 /* Both NULL. Leave empty */ 1286 } else if (ns == NULL && ch != NULL) { 1287 /* There was a channel here, but the namespace is gone. */ 1288 ns_changed = true; 1289 spdk_put_io_channel(ch); 1290 ns_info->channel = NULL; 1291 } else if (ns != NULL && ch == NULL) { 1292 /* A namespace appeared but there is no channel yet */ 1293 ns_changed = true; 1294 ch = spdk_bdev_get_io_channel(ns->desc); 1295 if (ch == NULL) { 1296 SPDK_ERRLOG("Could not allocate I/O channel.\n"); 1297 return -ENOMEM; 1298 } 1299 ns_info->channel = ch; 1300 } else if (spdk_uuid_compare(&ns_info->uuid, spdk_bdev_get_uuid(ns->bdev)) != 0) { 1301 /* A namespace was here before, but was replaced by a new one. */ 1302 ns_changed = true; 1303 spdk_put_io_channel(ns_info->channel); 1304 memset(ns_info, 0, sizeof(*ns_info)); 1305 1306 ch = spdk_bdev_get_io_channel(ns->desc); 1307 if (ch == NULL) { 1308 SPDK_ERRLOG("Could not allocate I/O channel.\n"); 1309 return -ENOMEM; 1310 } 1311 ns_info->channel = ch; 1312 } else if (ns_info->num_blocks != spdk_bdev_get_num_blocks(ns->bdev)) { 1313 /* Namespace is still there but size has changed */ 1314 SPDK_DEBUGLOG(nvmf, "Namespace resized: subsystem_id %u," 1315 " nsid %u, pg %p, old %" PRIu64 ", new %" PRIu64 "\n", 1316 subsystem->id, 1317 ns->nsid, 1318 group, 1319 ns_info->num_blocks, 1320 spdk_bdev_get_num_blocks(ns->bdev)); 1321 ns_changed = true; 1322 } 1323 1324 if (ns == NULL) { 1325 memset(ns_info, 0, sizeof(*ns_info)); 1326 } else { 1327 ns_info->uuid = *spdk_bdev_get_uuid(ns->bdev); 1328 ns_info->num_blocks = spdk_bdev_get_num_blocks(ns->bdev); 1329 ns_info->crkey = ns->crkey; 1330 ns_info->rtype = ns->rtype; 1331 if (ns->holder) { 1332 ns_info->holder_id = ns->holder->hostid; 1333 } 1334 1335 memset(&ns_info->reg_hostid, 0, SPDK_NVMF_MAX_NUM_REGISTRANTS * sizeof(struct spdk_uuid)); 1336 j = 0; 1337 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 1338 if (j >= SPDK_NVMF_MAX_NUM_REGISTRANTS) { 1339 SPDK_ERRLOG("Maximum %u registrants can support.\n", SPDK_NVMF_MAX_NUM_REGISTRANTS); 1340 return -EINVAL; 1341 } 1342 ns_info->reg_hostid[j++] = reg->hostid; 1343 } 1344 } 1345 } 1346 1347 if (ns_changed) { 1348 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1349 /* It is possible that a ctrlr was added but the admin_qpair hasn't been 1350 * assigned yet. 1351 */ 1352 if (!ctrlr->admin_qpair) { 1353 continue; 1354 } 1355 if (ctrlr->admin_qpair->group == group) { 1356 nvmf_ctrlr_async_event_ns_notice(ctrlr); 1357 nvmf_ctrlr_async_event_ana_change_notice(ctrlr); 1358 } 1359 } 1360 } 1361 1362 return 0; 1363 } 1364 1365 int 1366 nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, 1367 struct spdk_nvmf_subsystem *subsystem) 1368 { 1369 return poll_group_update_subsystem(group, subsystem); 1370 } 1371 1372 int 1373 nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group, 1374 struct spdk_nvmf_subsystem *subsystem, 1375 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1376 { 1377 int rc = 0; 1378 struct spdk_nvmf_subsystem_poll_group *sgroup = &group->sgroups[subsystem->id]; 1379 uint32_t i; 1380 1381 TAILQ_INIT(&sgroup->queued); 1382 1383 rc = poll_group_update_subsystem(group, subsystem); 1384 if (rc) { 1385 nvmf_poll_group_remove_subsystem(group, subsystem, NULL, NULL); 1386 goto fini; 1387 } 1388 1389 sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1390 1391 for (i = 0; i < sgroup->num_ns; i++) { 1392 sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1393 } 1394 1395 fini: 1396 if (cb_fn) { 1397 cb_fn(cb_arg, rc); 1398 } 1399 1400 SPDK_DTRACE_PROBE2(nvmf_poll_group_add_subsystem, spdk_thread_get_id(group->thread), 1401 subsystem->subnqn); 1402 1403 return rc; 1404 } 1405 1406 static void 1407 _nvmf_poll_group_remove_subsystem_cb(void *ctx, int status) 1408 { 1409 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 1410 struct spdk_nvmf_subsystem *subsystem; 1411 struct spdk_nvmf_poll_group *group; 1412 struct spdk_nvmf_subsystem_poll_group *sgroup; 1413 spdk_nvmf_poll_group_mod_done cpl_fn = NULL; 1414 void *cpl_ctx = NULL; 1415 uint32_t nsid; 1416 1417 group = qpair_ctx->group; 1418 subsystem = qpair_ctx->subsystem; 1419 cpl_fn = qpair_ctx->cpl_fn; 1420 cpl_ctx = qpair_ctx->cpl_ctx; 1421 sgroup = &group->sgroups[subsystem->id]; 1422 1423 if (status) { 1424 goto fini; 1425 } 1426 1427 for (nsid = 0; nsid < sgroup->num_ns; nsid++) { 1428 if (sgroup->ns_info[nsid].channel) { 1429 spdk_put_io_channel(sgroup->ns_info[nsid].channel); 1430 sgroup->ns_info[nsid].channel = NULL; 1431 } 1432 } 1433 1434 sgroup->num_ns = 0; 1435 free(sgroup->ns_info); 1436 sgroup->ns_info = NULL; 1437 fini: 1438 free(qpair_ctx); 1439 if (cpl_fn) { 1440 cpl_fn(cpl_ctx, status); 1441 } 1442 } 1443 1444 static void nvmf_poll_group_remove_subsystem_msg(void *ctx); 1445 1446 static void 1447 remove_subsystem_qpair_cb(void *ctx) 1448 { 1449 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 1450 1451 assert(qpair_ctx->count > 0); 1452 qpair_ctx->count--; 1453 if (qpair_ctx->count == 0) { 1454 /* All of the asynchronous callbacks for this context have been 1455 * completed. Call nvmf_poll_group_remove_subsystem_msg() again 1456 * to check if all associated qpairs for this subsystem have 1457 * been removed from the poll group. 1458 */ 1459 nvmf_poll_group_remove_subsystem_msg(ctx); 1460 } 1461 } 1462 1463 static void 1464 nvmf_poll_group_remove_subsystem_msg(void *ctx) 1465 { 1466 struct spdk_nvmf_qpair *qpair, *qpair_tmp; 1467 struct spdk_nvmf_subsystem *subsystem; 1468 struct spdk_nvmf_poll_group *group; 1469 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 1470 bool qpairs_found = false; 1471 int rc = 0; 1472 1473 group = qpair_ctx->group; 1474 subsystem = qpair_ctx->subsystem; 1475 1476 /* Initialize count to 1. This acts like a ref count, to ensure that if spdk_nvmf_qpair_disconnect 1477 * immediately invokes the callback (i.e. the qpairs is already in process of being disconnected) 1478 * that we don't recursively call nvmf_poll_group_remove_subsystem_msg before we've iterated the 1479 * full list of qpairs. 1480 */ 1481 qpair_ctx->count = 1; 1482 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, qpair_tmp) { 1483 if ((qpair->ctrlr != NULL) && (qpair->ctrlr->subsys == subsystem)) { 1484 qpairs_found = true; 1485 qpair_ctx->count++; 1486 rc = spdk_nvmf_qpair_disconnect(qpair, remove_subsystem_qpair_cb, ctx); 1487 if (rc) { 1488 break; 1489 } 1490 } 1491 } 1492 qpair_ctx->count--; 1493 1494 if (!qpairs_found) { 1495 _nvmf_poll_group_remove_subsystem_cb(ctx, 0); 1496 return; 1497 } 1498 1499 if (qpair_ctx->count == 0 || rc) { 1500 /* If count == 0, it means there were some qpairs in the poll group but they 1501 * were already in process of being disconnected. So we send a message to this 1502 * same thread so that this function executes again later. We won't actually 1503 * invoke the remove_subsystem_cb until all of the qpairs are actually removed 1504 * from the poll group. 1505 */ 1506 spdk_thread_send_msg(spdk_get_thread(), nvmf_poll_group_remove_subsystem_msg, ctx); 1507 } 1508 } 1509 1510 void 1511 nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group, 1512 struct spdk_nvmf_subsystem *subsystem, 1513 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1514 { 1515 struct spdk_nvmf_subsystem_poll_group *sgroup; 1516 struct nvmf_qpair_disconnect_many_ctx *ctx; 1517 uint32_t i; 1518 1519 SPDK_DTRACE_PROBE3(nvmf_poll_group_remove_subsystem, group, spdk_thread_get_id(group->thread), 1520 subsystem->subnqn); 1521 1522 ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx)); 1523 if (!ctx) { 1524 SPDK_ERRLOG("Unable to allocate memory for context to remove poll subsystem\n"); 1525 if (cb_fn) { 1526 cb_fn(cb_arg, -1); 1527 } 1528 return; 1529 } 1530 1531 ctx->group = group; 1532 ctx->subsystem = subsystem; 1533 ctx->cpl_fn = cb_fn; 1534 ctx->cpl_ctx = cb_arg; 1535 1536 sgroup = &group->sgroups[subsystem->id]; 1537 sgroup->state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 1538 1539 for (i = 0; i < sgroup->num_ns; i++) { 1540 sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 1541 } 1542 1543 nvmf_poll_group_remove_subsystem_msg(ctx); 1544 } 1545 1546 void 1547 nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group, 1548 struct spdk_nvmf_subsystem *subsystem, 1549 uint32_t nsid, 1550 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1551 { 1552 struct spdk_nvmf_subsystem_poll_group *sgroup; 1553 struct spdk_nvmf_subsystem_pg_ns_info *ns_info = NULL; 1554 int rc = 0; 1555 1556 if (subsystem->id >= group->num_sgroups) { 1557 rc = -1; 1558 goto fini; 1559 } 1560 1561 sgroup = &group->sgroups[subsystem->id]; 1562 if (sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSED) { 1563 goto fini; 1564 } 1565 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSING; 1566 1567 /* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */ 1568 if (nsid - 1 < sgroup->num_ns) { 1569 ns_info = &sgroup->ns_info[nsid - 1]; 1570 ns_info->state = SPDK_NVMF_SUBSYSTEM_PAUSING; 1571 } 1572 1573 if (sgroup->mgmt_io_outstanding > 0) { 1574 assert(sgroup->cb_fn == NULL); 1575 sgroup->cb_fn = cb_fn; 1576 assert(sgroup->cb_arg == NULL); 1577 sgroup->cb_arg = cb_arg; 1578 return; 1579 } 1580 1581 if (ns_info != NULL && ns_info->io_outstanding > 0) { 1582 assert(sgroup->cb_fn == NULL); 1583 sgroup->cb_fn = cb_fn; 1584 assert(sgroup->cb_arg == NULL); 1585 sgroup->cb_arg = cb_arg; 1586 return; 1587 } 1588 1589 assert(sgroup->mgmt_io_outstanding == 0); 1590 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED; 1591 fini: 1592 if (cb_fn) { 1593 cb_fn(cb_arg, rc); 1594 } 1595 } 1596 1597 void 1598 nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group, 1599 struct spdk_nvmf_subsystem *subsystem, 1600 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1601 { 1602 struct spdk_nvmf_request *req, *tmp; 1603 struct spdk_nvmf_subsystem_poll_group *sgroup; 1604 int rc = 0; 1605 uint32_t i; 1606 1607 if (subsystem->id >= group->num_sgroups) { 1608 rc = -1; 1609 goto fini; 1610 } 1611 1612 sgroup = &group->sgroups[subsystem->id]; 1613 1614 if (sgroup->state == SPDK_NVMF_SUBSYSTEM_ACTIVE) { 1615 goto fini; 1616 } 1617 1618 rc = poll_group_update_subsystem(group, subsystem); 1619 if (rc) { 1620 goto fini; 1621 } 1622 1623 for (i = 0; i < sgroup->num_ns; i++) { 1624 sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1625 } 1626 1627 sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1628 1629 /* Release all queued requests */ 1630 TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) { 1631 TAILQ_REMOVE(&sgroup->queued, req, link); 1632 if (spdk_nvmf_request_using_zcopy(req)) { 1633 spdk_nvmf_request_zcopy_start(req); 1634 } else { 1635 spdk_nvmf_request_exec(req); 1636 } 1637 1638 } 1639 fini: 1640 if (cb_fn) { 1641 cb_fn(cb_arg, rc); 1642 } 1643 } 1644 1645 1646 struct spdk_nvmf_poll_group * 1647 spdk_nvmf_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair) 1648 { 1649 struct spdk_nvmf_transport_poll_group *tgroup; 1650 1651 tgroup = nvmf_transport_get_optimal_poll_group(qpair->transport, qpair); 1652 1653 if (tgroup == NULL) { 1654 return NULL; 1655 } 1656 1657 return tgroup->group; 1658 } 1659 1660 void 1661 spdk_nvmf_poll_group_dump_stat(struct spdk_nvmf_poll_group *group, struct spdk_json_write_ctx *w) 1662 { 1663 struct spdk_nvmf_transport_poll_group *tgroup; 1664 1665 spdk_json_write_object_begin(w); 1666 1667 spdk_json_write_named_string(w, "name", spdk_thread_get_name(spdk_get_thread())); 1668 spdk_json_write_named_uint32(w, "admin_qpairs", group->stat.admin_qpairs); 1669 spdk_json_write_named_uint32(w, "io_qpairs", group->stat.io_qpairs); 1670 spdk_json_write_named_uint32(w, "current_admin_qpairs", group->stat.current_admin_qpairs); 1671 spdk_json_write_named_uint32(w, "current_io_qpairs", group->stat.current_io_qpairs); 1672 spdk_json_write_named_uint64(w, "pending_bdev_io", group->stat.pending_bdev_io); 1673 1674 spdk_json_write_named_array_begin(w, "transports"); 1675 1676 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 1677 spdk_json_write_object_begin(w); 1678 /* 1679 * The trtype field intentionally contains a transport name as this is more informative. 1680 * The field has not been renamed for backward compatibility. 1681 */ 1682 spdk_json_write_named_string(w, "trtype", spdk_nvmf_get_transport_name(tgroup->transport)); 1683 1684 if (tgroup->transport->ops->poll_group_dump_stat) { 1685 tgroup->transport->ops->poll_group_dump_stat(tgroup, w); 1686 } 1687 1688 spdk_json_write_object_end(w); 1689 } 1690 1691 spdk_json_write_array_end(w); 1692 spdk_json_write_object_end(w); 1693 } 1694