1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2018-2019, 2021 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/bdev.h" 10 #include "spdk/bit_array.h" 11 #include "spdk/thread.h" 12 #include "spdk/nvmf.h" 13 #include "spdk/endian.h" 14 #include "spdk/string.h" 15 #include "spdk/log.h" 16 #include "spdk_internal/usdt.h" 17 18 #include "nvmf_internal.h" 19 #include "transport.h" 20 21 SPDK_LOG_REGISTER_COMPONENT(nvmf) 22 23 #define SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS 1024 24 25 static TAILQ_HEAD(, spdk_nvmf_tgt) g_nvmf_tgts = TAILQ_HEAD_INITIALIZER(g_nvmf_tgts); 26 27 typedef void (*nvmf_qpair_disconnect_cpl)(void *ctx, int status); 28 29 /* supplied to a single call to nvmf_qpair_disconnect */ 30 struct nvmf_qpair_disconnect_ctx { 31 struct spdk_nvmf_qpair *qpair; 32 struct spdk_nvmf_ctrlr *ctrlr; 33 nvmf_qpair_disconnect_cb cb_fn; 34 struct spdk_thread *thread; 35 void *ctx; 36 uint16_t qid; 37 }; 38 39 /* 40 * There are several times when we need to iterate through the list of all qpairs and selectively delete them. 41 * In order to do this sequentially without overlap, we must provide a context to recover the next qpair from 42 * to enable calling nvmf_qpair_disconnect on the next desired qpair. 43 */ 44 struct nvmf_qpair_disconnect_many_ctx { 45 struct spdk_nvmf_subsystem *subsystem; 46 struct spdk_nvmf_poll_group *group; 47 spdk_nvmf_poll_group_mod_done cpl_fn; 48 void *cpl_ctx; 49 }; 50 51 static struct spdk_nvmf_referral * 52 nvmf_tgt_find_referral(struct spdk_nvmf_tgt *tgt, 53 const struct spdk_nvme_transport_id *trid) 54 { 55 struct spdk_nvmf_referral *referral; 56 57 TAILQ_FOREACH(referral, &tgt->referrals, link) { 58 if (spdk_nvme_transport_id_compare(&referral->trid, trid) == 0) { 59 return referral; 60 } 61 } 62 63 return NULL; 64 } 65 66 int 67 spdk_nvmf_tgt_add_referral(struct spdk_nvmf_tgt *tgt, 68 struct spdk_nvme_transport_id *trid, 69 bool secure_channel) 70 { 71 struct spdk_nvmf_referral *referral; 72 73 /* If the entry already exists, just ignore it. */ 74 if (nvmf_tgt_find_referral(tgt, trid)) { 75 return 0; 76 } 77 78 referral = calloc(1, sizeof(*referral)); 79 if (!referral) { 80 SPDK_ERRLOG("Failed to allocate memory for a referral\n"); 81 return -ENOMEM; 82 } 83 84 referral->entry.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 85 referral->entry.treq.secure_channel = secure_channel ? 86 SPDK_NVMF_TREQ_SECURE_CHANNEL_REQUIRED 87 : SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_REQUIRED; 88 referral->entry.cntlid = 89 0xffff; /* Discovery controller shall support the dynamic controller model */ 90 referral->entry.trtype = trid->trtype; 91 referral->entry.adrfam = trid->adrfam; 92 snprintf(referral->entry.subnqn, sizeof(referral->entry.subnqn), "%s", SPDK_NVMF_DISCOVERY_NQN); 93 memcpy(&referral->trid, trid, sizeof(struct spdk_nvme_transport_id)); 94 spdk_strcpy_pad(referral->entry.trsvcid, trid->trsvcid, sizeof(referral->entry.trsvcid), ' '); 95 spdk_strcpy_pad(referral->entry.traddr, trid->traddr, sizeof(referral->entry.traddr), ' '); 96 97 TAILQ_INSERT_HEAD(&tgt->referrals, referral, link); 98 nvmf_update_discovery_log(tgt, NULL); 99 100 return 0; 101 } 102 103 int 104 spdk_nvmf_tgt_remove_referral(struct spdk_nvmf_tgt *tgt, 105 struct spdk_nvme_transport_id *trid) 106 { 107 struct spdk_nvmf_referral *referral; 108 109 referral = nvmf_tgt_find_referral(tgt, trid); 110 if (referral == NULL) { 111 return -ENOENT; 112 } 113 114 TAILQ_REMOVE(&tgt->referrals, referral, link); 115 nvmf_update_discovery_log(tgt, NULL); 116 117 free(referral); 118 119 return 0; 120 } 121 122 static void 123 nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair, 124 enum spdk_nvmf_qpair_state state) 125 { 126 assert(qpair != NULL); 127 assert(qpair->group->thread == spdk_get_thread()); 128 129 qpair->state = state; 130 } 131 132 static int 133 nvmf_poll_group_poll(void *ctx) 134 { 135 struct spdk_nvmf_poll_group *group = ctx; 136 int rc; 137 int count = 0; 138 struct spdk_nvmf_transport_poll_group *tgroup; 139 140 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 141 rc = nvmf_transport_poll_group_poll(tgroup); 142 if (rc < 0) { 143 return SPDK_POLLER_BUSY; 144 } 145 count += rc; 146 } 147 148 return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; 149 } 150 151 /* 152 * Reset and clean up the poll group (I/O channel code will actually free the 153 * group). 154 */ 155 static void 156 nvmf_tgt_cleanup_poll_group(struct spdk_nvmf_poll_group *group) 157 { 158 struct spdk_nvmf_transport_poll_group *tgroup, *tmp; 159 struct spdk_nvmf_subsystem_poll_group *sgroup; 160 uint32_t sid, nsid; 161 162 TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) { 163 TAILQ_REMOVE(&group->tgroups, tgroup, link); 164 nvmf_transport_poll_group_destroy(tgroup); 165 } 166 167 for (sid = 0; sid < group->num_sgroups; sid++) { 168 sgroup = &group->sgroups[sid]; 169 170 assert(sgroup != NULL); 171 172 for (nsid = 0; nsid < sgroup->num_ns; nsid++) { 173 if (sgroup->ns_info[nsid].channel) { 174 spdk_put_io_channel(sgroup->ns_info[nsid].channel); 175 sgroup->ns_info[nsid].channel = NULL; 176 } 177 } 178 179 free(sgroup->ns_info); 180 } 181 182 free(group->sgroups); 183 184 spdk_poller_unregister(&group->poller); 185 186 if (group->destroy_cb_fn) { 187 group->destroy_cb_fn(group->destroy_cb_arg, 0); 188 } 189 } 190 191 /* 192 * Callback to unregister a poll group from the target, and clean up its state. 193 */ 194 static void 195 nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf) 196 { 197 struct spdk_nvmf_tgt *tgt = io_device; 198 struct spdk_nvmf_poll_group *group = ctx_buf; 199 200 SPDK_DTRACE_PROBE1_TICKS(nvmf_destroy_poll_group, spdk_thread_get_id(group->thread)); 201 202 pthread_mutex_lock(&tgt->mutex); 203 TAILQ_REMOVE(&tgt->poll_groups, group, link); 204 tgt->num_poll_groups--; 205 pthread_mutex_unlock(&tgt->mutex); 206 207 assert(!(tgt->state == NVMF_TGT_PAUSING || tgt->state == NVMF_TGT_RESUMING)); 208 nvmf_tgt_cleanup_poll_group(group); 209 } 210 211 static int 212 nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group, 213 struct spdk_nvmf_transport *transport) 214 { 215 struct spdk_nvmf_transport_poll_group *tgroup; 216 217 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 218 if (tgroup->transport == transport) { 219 /* Transport already in the poll group */ 220 return 0; 221 } 222 } 223 224 tgroup = nvmf_transport_poll_group_create(transport, group); 225 if (!tgroup) { 226 SPDK_ERRLOG("Unable to create poll group for transport\n"); 227 return -1; 228 } 229 SPDK_DTRACE_PROBE2_TICKS(nvmf_transport_poll_group_create, transport, 230 spdk_thread_get_id(group->thread)); 231 232 tgroup->group = group; 233 TAILQ_INSERT_TAIL(&group->tgroups, tgroup, link); 234 235 return 0; 236 } 237 238 static int 239 nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf) 240 { 241 struct spdk_nvmf_tgt *tgt = io_device; 242 struct spdk_nvmf_poll_group *group = ctx_buf; 243 struct spdk_nvmf_transport *transport; 244 struct spdk_nvmf_subsystem *subsystem; 245 struct spdk_thread *thread = spdk_get_thread(); 246 int rc; 247 248 group->tgt = tgt; 249 TAILQ_INIT(&group->tgroups); 250 TAILQ_INIT(&group->qpairs); 251 group->thread = thread; 252 pthread_mutex_init(&group->mutex, NULL); 253 254 group->poller = SPDK_POLLER_REGISTER(nvmf_poll_group_poll, group, 0); 255 256 SPDK_DTRACE_PROBE1_TICKS(nvmf_create_poll_group, spdk_thread_get_id(thread)); 257 258 TAILQ_FOREACH(transport, &tgt->transports, link) { 259 rc = nvmf_poll_group_add_transport(group, transport); 260 if (rc != 0) { 261 nvmf_tgt_cleanup_poll_group(group); 262 return rc; 263 } 264 } 265 266 group->num_sgroups = tgt->max_subsystems; 267 group->sgroups = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem_poll_group)); 268 if (!group->sgroups) { 269 nvmf_tgt_cleanup_poll_group(group); 270 return -ENOMEM; 271 } 272 273 for (subsystem = spdk_nvmf_subsystem_get_first(tgt); 274 subsystem != NULL; 275 subsystem = spdk_nvmf_subsystem_get_next(subsystem)) { 276 if (nvmf_poll_group_add_subsystem(group, subsystem, NULL, NULL) != 0) { 277 nvmf_tgt_cleanup_poll_group(group); 278 return -1; 279 } 280 } 281 282 pthread_mutex_lock(&tgt->mutex); 283 tgt->num_poll_groups++; 284 TAILQ_INSERT_TAIL(&tgt->poll_groups, group, link); 285 pthread_mutex_unlock(&tgt->mutex); 286 287 return 0; 288 } 289 290 static void 291 _nvmf_tgt_disconnect_qpairs(void *ctx) 292 { 293 struct spdk_nvmf_qpair *qpair, *qpair_tmp; 294 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 295 struct spdk_nvmf_poll_group *group = qpair_ctx->group; 296 struct spdk_io_channel *ch; 297 int rc; 298 299 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, qpair_tmp) { 300 rc = spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 301 if (rc && rc != -EINPROGRESS) { 302 break; 303 } 304 } 305 306 if (TAILQ_EMPTY(&group->qpairs)) { 307 /* When the refcount from the channels reaches 0, nvmf_tgt_destroy_poll_group will be called. */ 308 ch = spdk_io_channel_from_ctx(group); 309 spdk_put_io_channel(ch); 310 free(qpair_ctx); 311 return; 312 } 313 314 /* Some qpairs are in process of being disconnected. Send a message and try to remove them again */ 315 spdk_thread_send_msg(spdk_get_thread(), _nvmf_tgt_disconnect_qpairs, ctx); 316 } 317 318 static void 319 nvmf_tgt_destroy_poll_group_qpairs(struct spdk_nvmf_poll_group *group) 320 { 321 struct nvmf_qpair_disconnect_many_ctx *ctx; 322 323 SPDK_DTRACE_PROBE1_TICKS(nvmf_destroy_poll_group_qpairs, spdk_thread_get_id(group->thread)); 324 325 ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx)); 326 if (!ctx) { 327 SPDK_ERRLOG("Failed to allocate memory for destroy poll group ctx\n"); 328 return; 329 } 330 331 ctx->group = group; 332 _nvmf_tgt_disconnect_qpairs(ctx); 333 } 334 335 struct spdk_nvmf_tgt * 336 spdk_nvmf_tgt_create(struct spdk_nvmf_target_opts *opts) 337 { 338 struct spdk_nvmf_tgt *tgt, *tmp_tgt; 339 340 if (strnlen(opts->name, NVMF_TGT_NAME_MAX_LENGTH) == NVMF_TGT_NAME_MAX_LENGTH) { 341 SPDK_ERRLOG("Provided target name exceeds the max length of %u.\n", NVMF_TGT_NAME_MAX_LENGTH); 342 return NULL; 343 } 344 345 TAILQ_FOREACH(tmp_tgt, &g_nvmf_tgts, link) { 346 if (!strncmp(opts->name, tmp_tgt->name, NVMF_TGT_NAME_MAX_LENGTH)) { 347 SPDK_ERRLOG("Provided target name must be unique.\n"); 348 return NULL; 349 } 350 } 351 352 tgt = calloc(1, sizeof(*tgt)); 353 if (!tgt) { 354 return NULL; 355 } 356 357 snprintf(tgt->name, NVMF_TGT_NAME_MAX_LENGTH, "%s", opts->name); 358 359 if (!opts || !opts->max_subsystems) { 360 tgt->max_subsystems = SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS; 361 } else { 362 tgt->max_subsystems = opts->max_subsystems; 363 } 364 365 if (!opts) { 366 tgt->crdt[0] = 0; 367 tgt->crdt[1] = 0; 368 tgt->crdt[2] = 0; 369 } else { 370 tgt->crdt[0] = opts->crdt[0]; 371 tgt->crdt[1] = opts->crdt[1]; 372 tgt->crdt[2] = opts->crdt[2]; 373 } 374 375 if (!opts) { 376 tgt->discovery_filter = SPDK_NVMF_TGT_DISCOVERY_MATCH_ANY; 377 } else { 378 tgt->discovery_filter = opts->discovery_filter; 379 } 380 381 tgt->discovery_genctr = 0; 382 TAILQ_INIT(&tgt->transports); 383 TAILQ_INIT(&tgt->poll_groups); 384 TAILQ_INIT(&tgt->referrals); 385 tgt->num_poll_groups = 0; 386 387 tgt->subsystem_ids = spdk_bit_array_create(tgt->max_subsystems); 388 if (tgt->subsystem_ids == NULL) { 389 free(tgt); 390 return NULL; 391 } 392 393 RB_INIT(&tgt->subsystems); 394 395 pthread_mutex_init(&tgt->mutex, NULL); 396 397 spdk_io_device_register(tgt, 398 nvmf_tgt_create_poll_group, 399 nvmf_tgt_destroy_poll_group, 400 sizeof(struct spdk_nvmf_poll_group), 401 tgt->name); 402 403 tgt->state = NVMF_TGT_RUNNING; 404 405 TAILQ_INSERT_HEAD(&g_nvmf_tgts, tgt, link); 406 407 return tgt; 408 } 409 410 static void 411 _nvmf_tgt_destroy_next_transport(void *ctx) 412 { 413 struct spdk_nvmf_tgt *tgt = ctx; 414 struct spdk_nvmf_transport *transport; 415 416 if (!TAILQ_EMPTY(&tgt->transports)) { 417 transport = TAILQ_FIRST(&tgt->transports); 418 TAILQ_REMOVE(&tgt->transports, transport, link); 419 spdk_nvmf_transport_destroy(transport, _nvmf_tgt_destroy_next_transport, tgt); 420 } else { 421 spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn = tgt->destroy_cb_fn; 422 void *destroy_cb_arg = tgt->destroy_cb_arg; 423 424 pthread_mutex_destroy(&tgt->mutex); 425 free(tgt); 426 427 if (destroy_cb_fn) { 428 destroy_cb_fn(destroy_cb_arg, 0); 429 } 430 } 431 } 432 433 static void 434 nvmf_tgt_destroy_cb(void *io_device) 435 { 436 struct spdk_nvmf_tgt *tgt = io_device; 437 struct spdk_nvmf_subsystem *subsystem, *subsystem_next; 438 int rc; 439 struct spdk_nvmf_referral *referral; 440 441 while ((referral = TAILQ_FIRST(&tgt->referrals))) { 442 TAILQ_REMOVE(&tgt->referrals, referral, link); 443 free(referral); 444 } 445 446 /* We will be freeing subsystems in this loop, so we always need to get the next one 447 * ahead of time, since we can't call get_next() on a subsystem that's been freed. 448 */ 449 for (subsystem = spdk_nvmf_subsystem_get_first(tgt), 450 subsystem_next = spdk_nvmf_subsystem_get_next(subsystem); 451 subsystem != NULL; 452 subsystem = subsystem_next, 453 subsystem_next = spdk_nvmf_subsystem_get_next(subsystem_next)) { 454 nvmf_subsystem_remove_all_listeners(subsystem, true); 455 456 rc = spdk_nvmf_subsystem_destroy(subsystem, nvmf_tgt_destroy_cb, tgt); 457 if (rc) { 458 if (rc == -EINPROGRESS) { 459 /* If rc is -EINPROGRESS, nvmf_tgt_destroy_cb will be called again when subsystem #i 460 * is destroyed, nvmf_tgt_destroy_cb will continue to destroy other subsystems if any */ 461 return; 462 } else { 463 SPDK_ERRLOG("Failed to destroy subsystem %s, rc %d\n", subsystem->subnqn, rc); 464 } 465 } 466 } 467 spdk_bit_array_free(&tgt->subsystem_ids); 468 _nvmf_tgt_destroy_next_transport(tgt); 469 } 470 471 void 472 spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt, 473 spdk_nvmf_tgt_destroy_done_fn cb_fn, 474 void *cb_arg) 475 { 476 assert(!(tgt->state == NVMF_TGT_PAUSING || tgt->state == NVMF_TGT_RESUMING)); 477 478 tgt->destroy_cb_fn = cb_fn; 479 tgt->destroy_cb_arg = cb_arg; 480 481 TAILQ_REMOVE(&g_nvmf_tgts, tgt, link); 482 483 spdk_io_device_unregister(tgt, nvmf_tgt_destroy_cb); 484 } 485 486 const char * 487 spdk_nvmf_tgt_get_name(struct spdk_nvmf_tgt *tgt) 488 { 489 return tgt->name; 490 } 491 492 struct spdk_nvmf_tgt * 493 spdk_nvmf_get_tgt(const char *name) 494 { 495 struct spdk_nvmf_tgt *tgt; 496 uint32_t num_targets = 0; 497 498 TAILQ_FOREACH(tgt, &g_nvmf_tgts, link) { 499 if (name) { 500 if (!strncmp(tgt->name, name, NVMF_TGT_NAME_MAX_LENGTH)) { 501 return tgt; 502 } 503 } 504 num_targets++; 505 } 506 507 /* 508 * special case. If there is only one target and 509 * no name was specified, return the only available 510 * target. If there is more than one target, name must 511 * be specified. 512 */ 513 if (!name && num_targets == 1) { 514 return TAILQ_FIRST(&g_nvmf_tgts); 515 } 516 517 return NULL; 518 } 519 520 struct spdk_nvmf_tgt * 521 spdk_nvmf_get_first_tgt(void) 522 { 523 return TAILQ_FIRST(&g_nvmf_tgts); 524 } 525 526 struct spdk_nvmf_tgt * 527 spdk_nvmf_get_next_tgt(struct spdk_nvmf_tgt *prev) 528 { 529 return TAILQ_NEXT(prev, link); 530 } 531 532 static void 533 nvmf_write_subsystem_config_json(struct spdk_json_write_ctx *w, 534 struct spdk_nvmf_subsystem *subsystem) 535 { 536 struct spdk_nvmf_host *host; 537 struct spdk_nvmf_subsystem_listener *listener; 538 const struct spdk_nvme_transport_id *trid; 539 struct spdk_nvmf_ns *ns; 540 struct spdk_nvmf_ns_opts ns_opts; 541 uint32_t max_namespaces; 542 char uuid_str[SPDK_UUID_STRING_LEN]; 543 struct spdk_nvmf_transport *transport; 544 545 if (spdk_nvmf_subsystem_get_type(subsystem) != SPDK_NVMF_SUBTYPE_NVME) { 546 return; 547 } 548 549 /* { */ 550 spdk_json_write_object_begin(w); 551 spdk_json_write_named_string(w, "method", "nvmf_create_subsystem"); 552 553 /* "params" : { */ 554 spdk_json_write_named_object_begin(w, "params"); 555 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 556 spdk_json_write_named_bool(w, "allow_any_host", spdk_nvmf_subsystem_get_allow_any_host(subsystem)); 557 spdk_json_write_named_string(w, "serial_number", spdk_nvmf_subsystem_get_sn(subsystem)); 558 spdk_json_write_named_string(w, "model_number", spdk_nvmf_subsystem_get_mn(subsystem)); 559 560 max_namespaces = spdk_nvmf_subsystem_get_max_namespaces(subsystem); 561 if (max_namespaces != 0) { 562 spdk_json_write_named_uint32(w, "max_namespaces", max_namespaces); 563 } 564 565 spdk_json_write_named_uint32(w, "min_cntlid", spdk_nvmf_subsystem_get_min_cntlid(subsystem)); 566 spdk_json_write_named_uint32(w, "max_cntlid", spdk_nvmf_subsystem_get_max_cntlid(subsystem)); 567 spdk_json_write_named_bool(w, "ana_reporting", nvmf_subsystem_get_ana_reporting(subsystem)); 568 569 /* } "params" */ 570 spdk_json_write_object_end(w); 571 572 /* } */ 573 spdk_json_write_object_end(w); 574 575 for (host = spdk_nvmf_subsystem_get_first_host(subsystem); host != NULL; 576 host = spdk_nvmf_subsystem_get_next_host(subsystem, host)) { 577 578 spdk_json_write_object_begin(w); 579 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_host"); 580 581 /* "params" : { */ 582 spdk_json_write_named_object_begin(w, "params"); 583 584 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 585 spdk_json_write_named_string(w, "host", spdk_nvmf_host_get_nqn(host)); 586 587 TAILQ_FOREACH(transport, &subsystem->tgt->transports, link) { 588 if (transport->ops->subsystem_dump_host != NULL) { 589 transport->ops->subsystem_dump_host(transport, subsystem, host->nqn, w); 590 } 591 } 592 593 /* } "params" */ 594 spdk_json_write_object_end(w); 595 596 /* } */ 597 spdk_json_write_object_end(w); 598 } 599 600 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 601 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 602 spdk_nvmf_ns_get_opts(ns, &ns_opts, sizeof(ns_opts)); 603 604 spdk_json_write_object_begin(w); 605 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_ns"); 606 607 /* "params" : { */ 608 spdk_json_write_named_object_begin(w, "params"); 609 610 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 611 612 /* "namespace" : { */ 613 spdk_json_write_named_object_begin(w, "namespace"); 614 615 spdk_json_write_named_uint32(w, "nsid", spdk_nvmf_ns_get_id(ns)); 616 spdk_json_write_named_string(w, "bdev_name", spdk_bdev_get_name(spdk_nvmf_ns_get_bdev(ns))); 617 618 if (!spdk_mem_all_zero(ns_opts.nguid, sizeof(ns_opts.nguid))) { 619 SPDK_STATIC_ASSERT(sizeof(ns_opts.nguid) == sizeof(uint64_t) * 2, "size mismatch"); 620 spdk_json_write_named_string_fmt(w, "nguid", "%016"PRIX64"%016"PRIX64, from_be64(&ns_opts.nguid[0]), 621 from_be64(&ns_opts.nguid[8])); 622 } 623 624 if (!spdk_mem_all_zero(ns_opts.eui64, sizeof(ns_opts.eui64))) { 625 SPDK_STATIC_ASSERT(sizeof(ns_opts.eui64) == sizeof(uint64_t), "size mismatch"); 626 spdk_json_write_named_string_fmt(w, "eui64", "%016"PRIX64, from_be64(&ns_opts.eui64)); 627 } 628 629 if (!spdk_uuid_is_null(&ns_opts.uuid)) { 630 spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &ns_opts.uuid); 631 spdk_json_write_named_string(w, "uuid", uuid_str); 632 } 633 634 if (nvmf_subsystem_get_ana_reporting(subsystem)) { 635 spdk_json_write_named_uint32(w, "anagrpid", ns_opts.anagrpid); 636 } 637 638 /* "namespace" */ 639 spdk_json_write_object_end(w); 640 641 /* } "params" */ 642 spdk_json_write_object_end(w); 643 644 /* } */ 645 spdk_json_write_object_end(w); 646 } 647 648 for (listener = spdk_nvmf_subsystem_get_first_listener(subsystem); listener != NULL; 649 listener = spdk_nvmf_subsystem_get_next_listener(subsystem, listener)) { 650 transport = listener->transport; 651 trid = spdk_nvmf_subsystem_listener_get_trid(listener); 652 653 spdk_json_write_object_begin(w); 654 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_listener"); 655 656 /* "params" : { */ 657 spdk_json_write_named_object_begin(w, "params"); 658 659 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 660 661 spdk_json_write_named_object_begin(w, "listen_address"); 662 nvmf_transport_listen_dump_trid(trid, w); 663 spdk_json_write_object_end(w); 664 if (transport->ops->listen_dump_opts) { 665 transport->ops->listen_dump_opts(transport, trid, w); 666 } 667 668 spdk_json_write_named_bool(w, "secure_channel", listener->opts.secure_channel); 669 670 /* } "params" */ 671 spdk_json_write_object_end(w); 672 673 /* } */ 674 spdk_json_write_object_end(w); 675 } 676 677 } 678 679 void 680 spdk_nvmf_tgt_write_config_json(struct spdk_json_write_ctx *w, struct spdk_nvmf_tgt *tgt) 681 { 682 struct spdk_nvmf_subsystem *subsystem; 683 struct spdk_nvmf_transport *transport; 684 685 spdk_json_write_object_begin(w); 686 spdk_json_write_named_string(w, "method", "nvmf_set_max_subsystems"); 687 688 spdk_json_write_named_object_begin(w, "params"); 689 spdk_json_write_named_uint32(w, "max_subsystems", tgt->max_subsystems); 690 spdk_json_write_object_end(w); 691 692 spdk_json_write_object_end(w); 693 694 spdk_json_write_object_begin(w); 695 spdk_json_write_named_string(w, "method", "nvmf_set_crdt"); 696 spdk_json_write_named_object_begin(w, "params"); 697 spdk_json_write_named_uint32(w, "crdt1", tgt->crdt[0]); 698 spdk_json_write_named_uint32(w, "crdt2", tgt->crdt[1]); 699 spdk_json_write_named_uint32(w, "crdt3", tgt->crdt[2]); 700 spdk_json_write_object_end(w); 701 spdk_json_write_object_end(w); 702 703 /* write transports */ 704 TAILQ_FOREACH(transport, &tgt->transports, link) { 705 spdk_json_write_object_begin(w); 706 spdk_json_write_named_string(w, "method", "nvmf_create_transport"); 707 nvmf_transport_dump_opts(transport, w, true); 708 spdk_json_write_object_end(w); 709 } 710 711 subsystem = spdk_nvmf_subsystem_get_first(tgt); 712 while (subsystem) { 713 nvmf_write_subsystem_config_json(w, subsystem); 714 subsystem = spdk_nvmf_subsystem_get_next(subsystem); 715 } 716 } 717 718 static void 719 nvmf_listen_opts_copy(struct spdk_nvmf_listen_opts *opts, 720 const struct spdk_nvmf_listen_opts *opts_src, size_t opts_size) 721 { 722 assert(opts); 723 assert(opts_src); 724 725 opts->opts_size = opts_size; 726 727 #define SET_FIELD(field) \ 728 if (offsetof(struct spdk_nvmf_listen_opts, field) + sizeof(opts->field) <= opts_size) { \ 729 opts->field = opts_src->field; \ 730 } \ 731 732 SET_FIELD(transport_specific); 733 SET_FIELD(secure_channel); 734 #undef SET_FIELD 735 736 /* Do not remove this statement, you should always update this statement when you adding a new field, 737 * and do not forget to add the SET_FIELD statement for your added field. */ 738 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_listen_opts) == 17, "Incorrect size"); 739 } 740 741 void 742 spdk_nvmf_listen_opts_init(struct spdk_nvmf_listen_opts *opts, size_t opts_size) 743 { 744 struct spdk_nvmf_listen_opts opts_local = {}; 745 746 /* local version of opts should have defaults set here */ 747 748 nvmf_listen_opts_copy(opts, &opts_local, opts_size); 749 } 750 751 int 752 spdk_nvmf_tgt_listen_ext(struct spdk_nvmf_tgt *tgt, const struct spdk_nvme_transport_id *trid, 753 struct spdk_nvmf_listen_opts *opts) 754 { 755 struct spdk_nvmf_transport *transport; 756 int rc; 757 struct spdk_nvmf_listen_opts opts_local = {}; 758 759 if (!opts) { 760 SPDK_ERRLOG("opts should not be NULL\n"); 761 return -EINVAL; 762 } 763 764 if (!opts->opts_size) { 765 SPDK_ERRLOG("The opts_size in opts structure should not be zero\n"); 766 return -EINVAL; 767 } 768 769 transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring); 770 if (!transport) { 771 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n", 772 trid->trstring); 773 return -EINVAL; 774 } 775 776 nvmf_listen_opts_copy(&opts_local, opts, opts->opts_size); 777 rc = spdk_nvmf_transport_listen(transport, trid, &opts_local); 778 if (rc < 0) { 779 SPDK_ERRLOG("Unable to listen on address '%s'\n", trid->traddr); 780 } 781 782 return rc; 783 } 784 785 int 786 spdk_nvmf_tgt_stop_listen(struct spdk_nvmf_tgt *tgt, 787 struct spdk_nvme_transport_id *trid) 788 { 789 struct spdk_nvmf_transport *transport; 790 int rc; 791 792 transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring); 793 if (!transport) { 794 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n", 795 trid->trstring); 796 return -EINVAL; 797 } 798 799 rc = spdk_nvmf_transport_stop_listen(transport, trid); 800 if (rc < 0) { 801 SPDK_ERRLOG("Failed to stop listening on address '%s'\n", trid->traddr); 802 return rc; 803 } 804 return 0; 805 } 806 807 struct spdk_nvmf_tgt_add_transport_ctx { 808 struct spdk_nvmf_tgt *tgt; 809 struct spdk_nvmf_transport *transport; 810 spdk_nvmf_tgt_add_transport_done_fn cb_fn; 811 void *cb_arg; 812 int status; 813 }; 814 815 static void 816 _nvmf_tgt_remove_transport_done(struct spdk_io_channel_iter *i, int status) 817 { 818 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 819 820 ctx->cb_fn(ctx->cb_arg, ctx->status); 821 free(ctx); 822 } 823 824 static void 825 _nvmf_tgt_remove_transport(struct spdk_io_channel_iter *i) 826 { 827 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 828 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 829 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); 830 struct spdk_nvmf_transport_poll_group *tgroup, *tmp; 831 832 TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) { 833 if (tgroup->transport == ctx->transport) { 834 TAILQ_REMOVE(&group->tgroups, tgroup, link); 835 nvmf_transport_poll_group_destroy(tgroup); 836 } 837 } 838 839 spdk_for_each_channel_continue(i, 0); 840 } 841 842 static void 843 _nvmf_tgt_add_transport_done(struct spdk_io_channel_iter *i, int status) 844 { 845 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 846 847 if (status) { 848 ctx->status = status; 849 spdk_for_each_channel(ctx->tgt, 850 _nvmf_tgt_remove_transport, 851 ctx, 852 _nvmf_tgt_remove_transport_done); 853 return; 854 } 855 856 ctx->transport->tgt = ctx->tgt; 857 TAILQ_INSERT_TAIL(&ctx->tgt->transports, ctx->transport, link); 858 ctx->cb_fn(ctx->cb_arg, status); 859 free(ctx); 860 } 861 862 static void 863 _nvmf_tgt_add_transport(struct spdk_io_channel_iter *i) 864 { 865 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 866 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 867 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); 868 int rc; 869 870 rc = nvmf_poll_group_add_transport(group, ctx->transport); 871 spdk_for_each_channel_continue(i, rc); 872 } 873 874 void 875 spdk_nvmf_tgt_add_transport(struct spdk_nvmf_tgt *tgt, 876 struct spdk_nvmf_transport *transport, 877 spdk_nvmf_tgt_add_transport_done_fn cb_fn, 878 void *cb_arg) 879 { 880 struct spdk_nvmf_tgt_add_transport_ctx *ctx; 881 882 SPDK_DTRACE_PROBE2_TICKS(nvmf_tgt_add_transport, transport, tgt->name); 883 884 if (spdk_nvmf_tgt_get_transport(tgt, transport->ops->name)) { 885 cb_fn(cb_arg, -EEXIST); 886 return; /* transport already created */ 887 } 888 889 ctx = calloc(1, sizeof(*ctx)); 890 if (!ctx) { 891 cb_fn(cb_arg, -ENOMEM); 892 return; 893 } 894 895 ctx->tgt = tgt; 896 ctx->transport = transport; 897 ctx->cb_fn = cb_fn; 898 ctx->cb_arg = cb_arg; 899 900 spdk_for_each_channel(tgt, 901 _nvmf_tgt_add_transport, 902 ctx, 903 _nvmf_tgt_add_transport_done); 904 } 905 906 struct nvmf_tgt_pause_ctx { 907 struct spdk_nvmf_tgt *tgt; 908 spdk_nvmf_tgt_pause_polling_cb_fn cb_fn; 909 void *cb_arg; 910 }; 911 912 static void 913 _nvmf_tgt_pause_polling_done(struct spdk_io_channel_iter *i, int status) 914 { 915 struct nvmf_tgt_pause_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 916 917 ctx->tgt->state = NVMF_TGT_PAUSED; 918 919 ctx->cb_fn(ctx->cb_arg, status); 920 free(ctx); 921 } 922 923 static void 924 _nvmf_tgt_pause_polling(struct spdk_io_channel_iter *i) 925 { 926 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 927 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); 928 929 spdk_poller_unregister(&group->poller); 930 931 spdk_for_each_channel_continue(i, 0); 932 } 933 934 int 935 spdk_nvmf_tgt_pause_polling(struct spdk_nvmf_tgt *tgt, spdk_nvmf_tgt_pause_polling_cb_fn cb_fn, 936 void *cb_arg) 937 { 938 struct nvmf_tgt_pause_ctx *ctx; 939 940 SPDK_DTRACE_PROBE2_TICKS(nvmf_tgt_pause_polling, tgt, tgt->name); 941 942 switch (tgt->state) { 943 case NVMF_TGT_PAUSING: 944 case NVMF_TGT_RESUMING: 945 return -EBUSY; 946 case NVMF_TGT_RUNNING: 947 break; 948 default: 949 return -EINVAL; 950 } 951 952 ctx = calloc(1, sizeof(*ctx)); 953 if (!ctx) { 954 return -ENOMEM; 955 } 956 957 958 tgt->state = NVMF_TGT_PAUSING; 959 960 ctx->tgt = tgt; 961 ctx->cb_fn = cb_fn; 962 ctx->cb_arg = cb_arg; 963 964 spdk_for_each_channel(tgt, 965 _nvmf_tgt_pause_polling, 966 ctx, 967 _nvmf_tgt_pause_polling_done); 968 return 0; 969 } 970 971 static void 972 _nvmf_tgt_resume_polling_done(struct spdk_io_channel_iter *i, int status) 973 { 974 struct nvmf_tgt_pause_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 975 976 ctx->tgt->state = NVMF_TGT_RUNNING; 977 978 ctx->cb_fn(ctx->cb_arg, status); 979 free(ctx); 980 } 981 982 static void 983 _nvmf_tgt_resume_polling(struct spdk_io_channel_iter *i) 984 { 985 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 986 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); 987 988 assert(group->poller == NULL); 989 group->poller = SPDK_POLLER_REGISTER(nvmf_poll_group_poll, group, 0); 990 991 spdk_for_each_channel_continue(i, 0); 992 } 993 994 int 995 spdk_nvmf_tgt_resume_polling(struct spdk_nvmf_tgt *tgt, spdk_nvmf_tgt_resume_polling_cb_fn cb_fn, 996 void *cb_arg) 997 { 998 struct nvmf_tgt_pause_ctx *ctx; 999 1000 SPDK_DTRACE_PROBE2_TICKS(nvmf_tgt_resume_polling, tgt, tgt->name); 1001 1002 switch (tgt->state) { 1003 case NVMF_TGT_PAUSING: 1004 case NVMF_TGT_RESUMING: 1005 return -EBUSY; 1006 case NVMF_TGT_PAUSED: 1007 break; 1008 default: 1009 return -EINVAL; 1010 } 1011 1012 ctx = calloc(1, sizeof(*ctx)); 1013 if (!ctx) { 1014 return -ENOMEM; 1015 } 1016 1017 tgt->state = NVMF_TGT_RESUMING; 1018 1019 ctx->tgt = tgt; 1020 ctx->cb_fn = cb_fn; 1021 ctx->cb_arg = cb_arg; 1022 1023 spdk_for_each_channel(tgt, 1024 _nvmf_tgt_resume_polling, 1025 ctx, 1026 _nvmf_tgt_resume_polling_done); 1027 return 0; 1028 } 1029 1030 struct spdk_nvmf_subsystem * 1031 spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn) 1032 { 1033 struct spdk_nvmf_subsystem subsystem; 1034 1035 if (!subnqn) { 1036 return NULL; 1037 } 1038 1039 /* Ensure that subnqn is null terminated */ 1040 if (!memchr(subnqn, '\0', SPDK_NVMF_NQN_MAX_LEN + 1)) { 1041 SPDK_ERRLOG("Connect SUBNQN is not null terminated\n"); 1042 return NULL; 1043 } 1044 1045 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 1046 return RB_FIND(subsystem_tree, &tgt->subsystems, &subsystem); 1047 } 1048 1049 struct spdk_nvmf_transport * 1050 spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name) 1051 { 1052 struct spdk_nvmf_transport *transport; 1053 1054 TAILQ_FOREACH(transport, &tgt->transports, link) { 1055 if (!strncasecmp(transport->ops->name, transport_name, SPDK_NVMF_TRSTRING_MAX_LEN)) { 1056 return transport; 1057 } 1058 } 1059 return NULL; 1060 } 1061 1062 struct nvmf_new_qpair_ctx { 1063 struct spdk_nvmf_qpair *qpair; 1064 struct spdk_nvmf_poll_group *group; 1065 }; 1066 1067 static void 1068 _nvmf_poll_group_add(void *_ctx) 1069 { 1070 struct nvmf_new_qpair_ctx *ctx = _ctx; 1071 struct spdk_nvmf_qpair *qpair = ctx->qpair; 1072 struct spdk_nvmf_poll_group *group = ctx->group; 1073 1074 free(_ctx); 1075 1076 if (spdk_nvmf_poll_group_add(group, qpair) != 0) { 1077 SPDK_ERRLOG("Unable to add the qpair to a poll group.\n"); 1078 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 1079 } 1080 } 1081 1082 void 1083 spdk_nvmf_tgt_new_qpair(struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair) 1084 { 1085 struct spdk_nvmf_poll_group *group; 1086 struct nvmf_new_qpair_ctx *ctx; 1087 1088 group = spdk_nvmf_get_optimal_poll_group(qpair); 1089 if (group == NULL) { 1090 if (tgt->next_poll_group == NULL) { 1091 tgt->next_poll_group = TAILQ_FIRST(&tgt->poll_groups); 1092 if (tgt->next_poll_group == NULL) { 1093 SPDK_ERRLOG("No poll groups exist.\n"); 1094 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 1095 return; 1096 } 1097 } 1098 group = tgt->next_poll_group; 1099 tgt->next_poll_group = TAILQ_NEXT(group, link); 1100 } 1101 1102 ctx = calloc(1, sizeof(*ctx)); 1103 if (!ctx) { 1104 SPDK_ERRLOG("Unable to send message to poll group.\n"); 1105 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 1106 return; 1107 } 1108 1109 ctx->qpair = qpair; 1110 ctx->group = group; 1111 1112 pthread_mutex_lock(&group->mutex); 1113 group->current_unassociated_qpairs++; 1114 pthread_mutex_unlock(&group->mutex); 1115 1116 spdk_thread_send_msg(group->thread, _nvmf_poll_group_add, ctx); 1117 } 1118 1119 struct spdk_nvmf_poll_group * 1120 spdk_nvmf_poll_group_create(struct spdk_nvmf_tgt *tgt) 1121 { 1122 struct spdk_io_channel *ch; 1123 1124 ch = spdk_get_io_channel(tgt); 1125 if (!ch) { 1126 SPDK_ERRLOG("Unable to get I/O channel for target\n"); 1127 return NULL; 1128 } 1129 1130 return spdk_io_channel_get_ctx(ch); 1131 } 1132 1133 void 1134 spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group, 1135 spdk_nvmf_poll_group_destroy_done_fn cb_fn, 1136 void *cb_arg) 1137 { 1138 assert(group->destroy_cb_fn == NULL); 1139 group->destroy_cb_fn = cb_fn; 1140 group->destroy_cb_arg = cb_arg; 1141 1142 /* This function will put the io_channel associated with this poll group */ 1143 nvmf_tgt_destroy_poll_group_qpairs(group); 1144 } 1145 1146 int 1147 spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group, 1148 struct spdk_nvmf_qpair *qpair) 1149 { 1150 int rc = -1; 1151 struct spdk_nvmf_transport_poll_group *tgroup; 1152 1153 TAILQ_INIT(&qpair->outstanding); 1154 qpair->group = group; 1155 qpair->ctrlr = NULL; 1156 qpair->disconnect_started = false; 1157 1158 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 1159 if (tgroup->transport == qpair->transport) { 1160 rc = nvmf_transport_poll_group_add(tgroup, qpair); 1161 break; 1162 } 1163 } 1164 1165 /* We add the qpair to the group only it is successfully added into the tgroup */ 1166 if (rc == 0) { 1167 SPDK_DTRACE_PROBE2_TICKS(nvmf_poll_group_add_qpair, qpair, spdk_thread_get_id(group->thread)); 1168 TAILQ_INSERT_TAIL(&group->qpairs, qpair, link); 1169 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ACTIVE); 1170 } 1171 1172 return rc; 1173 } 1174 1175 static void 1176 _nvmf_ctrlr_destruct(void *ctx) 1177 { 1178 struct spdk_nvmf_ctrlr *ctrlr = ctx; 1179 1180 nvmf_ctrlr_destruct(ctrlr); 1181 } 1182 1183 static void 1184 _nvmf_ctrlr_free_from_qpair(void *ctx) 1185 { 1186 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 1187 struct spdk_nvmf_ctrlr *ctrlr = qpair_ctx->ctrlr; 1188 uint32_t count; 1189 1190 spdk_bit_array_clear(ctrlr->qpair_mask, qpair_ctx->qid); 1191 count = spdk_bit_array_count_set(ctrlr->qpair_mask); 1192 if (count == 0) { 1193 assert(!ctrlr->in_destruct); 1194 SPDK_DEBUGLOG(nvmf, "Last qpair %u, destroy ctrlr 0x%hx\n", qpair_ctx->qid, ctrlr->cntlid); 1195 ctrlr->in_destruct = true; 1196 spdk_thread_send_msg(ctrlr->subsys->thread, _nvmf_ctrlr_destruct, ctrlr); 1197 } 1198 free(qpair_ctx); 1199 } 1200 1201 static void 1202 _nvmf_transport_qpair_fini_complete(void *cb_ctx) 1203 { 1204 struct nvmf_qpair_disconnect_ctx *qpair_ctx = cb_ctx; 1205 struct spdk_nvmf_ctrlr *ctrlr; 1206 /* Store cb args since cb_ctx can be freed in _nvmf_ctrlr_free_from_qpair */ 1207 nvmf_qpair_disconnect_cb cb_fn = qpair_ctx->cb_fn; 1208 void *cb_arg = qpair_ctx->ctx; 1209 struct spdk_thread *cb_thread = qpair_ctx->thread; 1210 1211 ctrlr = qpair_ctx->ctrlr; 1212 SPDK_DEBUGLOG(nvmf, "Finish destroying qid %u\n", qpair_ctx->qid); 1213 1214 if (ctrlr) { 1215 if (qpair_ctx->qid == 0) { 1216 /* Admin qpair is removed, so set the pointer to NULL. 1217 * This operation is safe since we are on ctrlr thread now, admin qpair's thread is the same 1218 * as controller's thread */ 1219 assert(ctrlr->thread == spdk_get_thread()); 1220 ctrlr->admin_qpair = NULL; 1221 } 1222 /* Free qpair id from controller's bit mask and destroy the controller if it is the last qpair */ 1223 if (ctrlr->thread) { 1224 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_free_from_qpair, qpair_ctx); 1225 } else { 1226 _nvmf_ctrlr_free_from_qpair(qpair_ctx); 1227 } 1228 } else { 1229 free(qpair_ctx); 1230 } 1231 1232 if (cb_fn) { 1233 spdk_thread_send_msg(cb_thread, cb_fn, cb_arg); 1234 } 1235 } 1236 1237 void 1238 spdk_nvmf_poll_group_remove(struct spdk_nvmf_qpair *qpair) 1239 { 1240 struct spdk_nvmf_transport_poll_group *tgroup; 1241 int rc; 1242 1243 SPDK_DTRACE_PROBE2_TICKS(nvmf_poll_group_remove_qpair, qpair, 1244 spdk_thread_get_id(qpair->group->thread)); 1245 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ERROR); 1246 1247 /* Find the tgroup and remove the qpair from the tgroup */ 1248 TAILQ_FOREACH(tgroup, &qpair->group->tgroups, link) { 1249 if (tgroup->transport == qpair->transport) { 1250 rc = nvmf_transport_poll_group_remove(tgroup, qpair); 1251 if (rc && (rc != ENOTSUP)) { 1252 SPDK_ERRLOG("Cannot remove qpair=%p from transport group=%p\n", 1253 qpair, tgroup); 1254 } 1255 break; 1256 } 1257 } 1258 1259 TAILQ_REMOVE(&qpair->group->qpairs, qpair, link); 1260 qpair->group = NULL; 1261 } 1262 1263 static void 1264 _nvmf_qpair_sgroup_req_clean(struct spdk_nvmf_subsystem_poll_group *sgroup, 1265 const struct spdk_nvmf_qpair *qpair) 1266 { 1267 struct spdk_nvmf_request *req, *tmp; 1268 TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) { 1269 if (req->qpair == qpair) { 1270 TAILQ_REMOVE(&sgroup->queued, req, link); 1271 if (nvmf_transport_req_free(req)) { 1272 SPDK_ERRLOG("Transport request free error!\n"); 1273 } 1274 } 1275 } 1276 } 1277 1278 static void 1279 _nvmf_qpair_destroy(void *ctx, int status) 1280 { 1281 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 1282 struct spdk_nvmf_qpair *qpair = qpair_ctx->qpair; 1283 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 1284 struct spdk_nvmf_subsystem_poll_group *sgroup; 1285 uint32_t sid; 1286 1287 assert(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING); 1288 qpair_ctx->qid = qpair->qid; 1289 1290 if (qpair->connect_received) { 1291 if (0 == qpair->qid) { 1292 assert(qpair->group->stat.current_admin_qpairs > 0); 1293 qpair->group->stat.current_admin_qpairs--; 1294 } else { 1295 assert(qpair->group->stat.current_io_qpairs > 0); 1296 qpair->group->stat.current_io_qpairs--; 1297 } 1298 } else { 1299 pthread_mutex_lock(&qpair->group->mutex); 1300 qpair->group->current_unassociated_qpairs--; 1301 pthread_mutex_unlock(&qpair->group->mutex); 1302 } 1303 1304 if (ctrlr) { 1305 sgroup = &qpair->group->sgroups[ctrlr->subsys->id]; 1306 _nvmf_qpair_sgroup_req_clean(sgroup, qpair); 1307 } else { 1308 for (sid = 0; sid < qpair->group->num_sgroups; sid++) { 1309 sgroup = &qpair->group->sgroups[sid]; 1310 assert(sgroup != NULL); 1311 _nvmf_qpair_sgroup_req_clean(sgroup, qpair); 1312 } 1313 } 1314 1315 qpair_ctx->ctrlr = ctrlr; 1316 spdk_nvmf_poll_group_remove(qpair); 1317 nvmf_transport_qpair_fini(qpair, _nvmf_transport_qpair_fini_complete, qpair_ctx); 1318 } 1319 1320 static void 1321 _nvmf_qpair_disconnect_msg(void *ctx) 1322 { 1323 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 1324 1325 spdk_nvmf_qpair_disconnect(qpair_ctx->qpair, qpair_ctx->cb_fn, qpair_ctx->ctx); 1326 free(ctx); 1327 } 1328 1329 SPDK_LOG_DEPRECATION_REGISTER(spdk_nvmf_qpair_disconnect, "cb_fn and ctx are deprecated", "v24.01", 1330 0); 1331 1332 int 1333 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 1334 { 1335 struct spdk_nvmf_poll_group *group = qpair->group; 1336 struct nvmf_qpair_disconnect_ctx *qpair_ctx; 1337 1338 if (__atomic_test_and_set(&qpair->disconnect_started, __ATOMIC_RELAXED)) { 1339 return -EINPROGRESS; 1340 } 1341 1342 if (cb_fn || ctx) { 1343 SPDK_LOG_DEPRECATED(spdk_nvmf_qpair_disconnect); 1344 } 1345 1346 /* If we get a qpair in the uninitialized state, we can just destroy it immediately */ 1347 if (qpair->state == SPDK_NVMF_QPAIR_UNINITIALIZED) { 1348 nvmf_transport_qpair_fini(qpair, NULL, NULL); 1349 if (cb_fn) { 1350 cb_fn(ctx); 1351 } 1352 return 0; 1353 } 1354 1355 assert(group != NULL); 1356 if (spdk_get_thread() != group->thread) { 1357 /* clear the atomic so we can set it on the next call on the proper thread. */ 1358 __atomic_clear(&qpair->disconnect_started, __ATOMIC_RELAXED); 1359 qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx)); 1360 if (!qpair_ctx) { 1361 SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n"); 1362 return -ENOMEM; 1363 } 1364 qpair_ctx->qpair = qpair; 1365 qpair_ctx->cb_fn = cb_fn; 1366 qpair_ctx->thread = group->thread; 1367 qpair_ctx->ctx = ctx; 1368 spdk_thread_send_msg(group->thread, _nvmf_qpair_disconnect_msg, qpair_ctx); 1369 return 0; 1370 } 1371 1372 SPDK_DTRACE_PROBE2_TICKS(nvmf_qpair_disconnect, qpair, spdk_thread_get_id(group->thread)); 1373 assert(qpair->state == SPDK_NVMF_QPAIR_ACTIVE); 1374 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_DEACTIVATING); 1375 1376 qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx)); 1377 if (!qpair_ctx) { 1378 SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n"); 1379 return -ENOMEM; 1380 } 1381 1382 qpair_ctx->qpair = qpair; 1383 qpair_ctx->cb_fn = cb_fn; 1384 qpair_ctx->thread = group->thread; 1385 qpair_ctx->ctx = ctx; 1386 1387 /* Check for outstanding I/O */ 1388 if (!TAILQ_EMPTY(&qpair->outstanding)) { 1389 SPDK_DTRACE_PROBE2_TICKS(nvmf_poll_group_drain_qpair, qpair, spdk_thread_get_id(group->thread)); 1390 qpair->state_cb = _nvmf_qpair_destroy; 1391 qpair->state_cb_arg = qpair_ctx; 1392 nvmf_qpair_abort_pending_zcopy_reqs(qpair); 1393 nvmf_qpair_free_aer(qpair); 1394 return 0; 1395 } 1396 1397 _nvmf_qpair_destroy(qpair_ctx, 0); 1398 1399 return 0; 1400 } 1401 1402 int 1403 spdk_nvmf_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair, 1404 struct spdk_nvme_transport_id *trid) 1405 { 1406 memset(trid, 0, sizeof(*trid)); 1407 return nvmf_transport_qpair_get_peer_trid(qpair, trid); 1408 } 1409 1410 int 1411 spdk_nvmf_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair, 1412 struct spdk_nvme_transport_id *trid) 1413 { 1414 memset(trid, 0, sizeof(*trid)); 1415 return nvmf_transport_qpair_get_local_trid(qpair, trid); 1416 } 1417 1418 int 1419 spdk_nvmf_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, 1420 struct spdk_nvme_transport_id *trid) 1421 { 1422 memset(trid, 0, sizeof(*trid)); 1423 return nvmf_transport_qpair_get_listen_trid(qpair, trid); 1424 } 1425 1426 static int 1427 poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, 1428 struct spdk_nvmf_subsystem *subsystem) 1429 { 1430 struct spdk_nvmf_subsystem_poll_group *sgroup; 1431 uint32_t new_num_ns, old_num_ns; 1432 uint32_t i, j; 1433 struct spdk_nvmf_ns *ns; 1434 struct spdk_nvmf_registrant *reg, *tmp; 1435 struct spdk_io_channel *ch; 1436 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 1437 struct spdk_nvmf_ctrlr *ctrlr; 1438 bool ns_changed; 1439 1440 /* Make sure our poll group has memory for this subsystem allocated */ 1441 if (subsystem->id >= group->num_sgroups) { 1442 return -ENOMEM; 1443 } 1444 1445 sgroup = &group->sgroups[subsystem->id]; 1446 1447 /* Make sure the array of namespace information is the correct size */ 1448 new_num_ns = subsystem->max_nsid; 1449 old_num_ns = sgroup->num_ns; 1450 1451 ns_changed = false; 1452 1453 if (old_num_ns == 0) { 1454 if (new_num_ns > 0) { 1455 /* First allocation */ 1456 sgroup->ns_info = calloc(new_num_ns, sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1457 if (!sgroup->ns_info) { 1458 return -ENOMEM; 1459 } 1460 } 1461 } else if (new_num_ns > old_num_ns) { 1462 void *buf; 1463 1464 /* Make the array larger */ 1465 buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1466 if (!buf) { 1467 return -ENOMEM; 1468 } 1469 1470 sgroup->ns_info = buf; 1471 1472 /* Null out the new namespace information slots */ 1473 for (i = old_num_ns; i < new_num_ns; i++) { 1474 memset(&sgroup->ns_info[i], 0, sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1475 } 1476 } else if (new_num_ns < old_num_ns) { 1477 void *buf; 1478 1479 /* Free the extra I/O channels */ 1480 for (i = new_num_ns; i < old_num_ns; i++) { 1481 ns_info = &sgroup->ns_info[i]; 1482 1483 if (ns_info->channel) { 1484 spdk_put_io_channel(ns_info->channel); 1485 ns_info->channel = NULL; 1486 } 1487 } 1488 1489 /* Make the array smaller */ 1490 if (new_num_ns > 0) { 1491 buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1492 if (!buf) { 1493 return -ENOMEM; 1494 } 1495 sgroup->ns_info = buf; 1496 } else { 1497 free(sgroup->ns_info); 1498 sgroup->ns_info = NULL; 1499 } 1500 } 1501 1502 sgroup->num_ns = new_num_ns; 1503 1504 /* Detect bdevs that were added or removed */ 1505 for (i = 0; i < sgroup->num_ns; i++) { 1506 ns = subsystem->ns[i]; 1507 ns_info = &sgroup->ns_info[i]; 1508 ch = ns_info->channel; 1509 1510 if (ns == NULL && ch == NULL) { 1511 /* Both NULL. Leave empty */ 1512 } else if (ns == NULL && ch != NULL) { 1513 /* There was a channel here, but the namespace is gone. */ 1514 ns_changed = true; 1515 spdk_put_io_channel(ch); 1516 ns_info->channel = NULL; 1517 } else if (ns != NULL && ch == NULL) { 1518 /* A namespace appeared but there is no channel yet */ 1519 ns_changed = true; 1520 ch = spdk_bdev_get_io_channel(ns->desc); 1521 if (ch == NULL) { 1522 SPDK_ERRLOG("Could not allocate I/O channel.\n"); 1523 return -ENOMEM; 1524 } 1525 ns_info->channel = ch; 1526 } else if (spdk_uuid_compare(&ns_info->uuid, spdk_bdev_get_uuid(ns->bdev)) != 0) { 1527 /* A namespace was here before, but was replaced by a new one. */ 1528 ns_changed = true; 1529 spdk_put_io_channel(ns_info->channel); 1530 memset(ns_info, 0, sizeof(*ns_info)); 1531 1532 ch = spdk_bdev_get_io_channel(ns->desc); 1533 if (ch == NULL) { 1534 SPDK_ERRLOG("Could not allocate I/O channel.\n"); 1535 return -ENOMEM; 1536 } 1537 ns_info->channel = ch; 1538 } else if (ns_info->num_blocks != spdk_bdev_get_num_blocks(ns->bdev)) { 1539 /* Namespace is still there but size has changed */ 1540 SPDK_DEBUGLOG(nvmf, "Namespace resized: subsystem_id %u," 1541 " nsid %u, pg %p, old %" PRIu64 ", new %" PRIu64 "\n", 1542 subsystem->id, 1543 ns->nsid, 1544 group, 1545 ns_info->num_blocks, 1546 spdk_bdev_get_num_blocks(ns->bdev)); 1547 ns_changed = true; 1548 } 1549 1550 if (ns == NULL) { 1551 memset(ns_info, 0, sizeof(*ns_info)); 1552 } else { 1553 ns_info->uuid = *spdk_bdev_get_uuid(ns->bdev); 1554 ns_info->num_blocks = spdk_bdev_get_num_blocks(ns->bdev); 1555 ns_info->crkey = ns->crkey; 1556 ns_info->rtype = ns->rtype; 1557 if (ns->holder) { 1558 ns_info->holder_id = ns->holder->hostid; 1559 } 1560 1561 memset(&ns_info->reg_hostid, 0, SPDK_NVMF_MAX_NUM_REGISTRANTS * sizeof(struct spdk_uuid)); 1562 j = 0; 1563 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 1564 if (j >= SPDK_NVMF_MAX_NUM_REGISTRANTS) { 1565 SPDK_ERRLOG("Maximum %u registrants can support.\n", SPDK_NVMF_MAX_NUM_REGISTRANTS); 1566 return -EINVAL; 1567 } 1568 ns_info->reg_hostid[j++] = reg->hostid; 1569 } 1570 } 1571 } 1572 1573 if (ns_changed) { 1574 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1575 if (ctrlr->thread != spdk_get_thread()) { 1576 continue; 1577 } 1578 /* It is possible that a ctrlr was added but the admin_qpair hasn't been 1579 * assigned yet. 1580 */ 1581 if (!ctrlr->admin_qpair) { 1582 continue; 1583 } 1584 if (ctrlr->admin_qpair->group == group) { 1585 nvmf_ctrlr_async_event_ns_notice(ctrlr); 1586 nvmf_ctrlr_async_event_ana_change_notice(ctrlr); 1587 } 1588 } 1589 } 1590 1591 return 0; 1592 } 1593 1594 int 1595 nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, 1596 struct spdk_nvmf_subsystem *subsystem) 1597 { 1598 return poll_group_update_subsystem(group, subsystem); 1599 } 1600 1601 int 1602 nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group, 1603 struct spdk_nvmf_subsystem *subsystem, 1604 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1605 { 1606 int rc = 0; 1607 struct spdk_nvmf_subsystem_poll_group *sgroup = &group->sgroups[subsystem->id]; 1608 uint32_t i; 1609 1610 TAILQ_INIT(&sgroup->queued); 1611 1612 rc = poll_group_update_subsystem(group, subsystem); 1613 if (rc) { 1614 nvmf_poll_group_remove_subsystem(group, subsystem, NULL, NULL); 1615 goto fini; 1616 } 1617 1618 sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1619 1620 for (i = 0; i < sgroup->num_ns; i++) { 1621 sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1622 } 1623 1624 fini: 1625 if (cb_fn) { 1626 cb_fn(cb_arg, rc); 1627 } 1628 1629 SPDK_DTRACE_PROBE2_TICKS(nvmf_poll_group_add_subsystem, spdk_thread_get_id(group->thread), 1630 subsystem->subnqn); 1631 1632 return rc; 1633 } 1634 1635 static void 1636 _nvmf_poll_group_remove_subsystem_cb(void *ctx, int status) 1637 { 1638 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 1639 struct spdk_nvmf_subsystem *subsystem; 1640 struct spdk_nvmf_poll_group *group; 1641 struct spdk_nvmf_subsystem_poll_group *sgroup; 1642 spdk_nvmf_poll_group_mod_done cpl_fn = NULL; 1643 void *cpl_ctx = NULL; 1644 uint32_t nsid; 1645 1646 group = qpair_ctx->group; 1647 subsystem = qpair_ctx->subsystem; 1648 cpl_fn = qpair_ctx->cpl_fn; 1649 cpl_ctx = qpair_ctx->cpl_ctx; 1650 sgroup = &group->sgroups[subsystem->id]; 1651 1652 if (status) { 1653 goto fini; 1654 } 1655 1656 for (nsid = 0; nsid < sgroup->num_ns; nsid++) { 1657 if (sgroup->ns_info[nsid].channel) { 1658 spdk_put_io_channel(sgroup->ns_info[nsid].channel); 1659 sgroup->ns_info[nsid].channel = NULL; 1660 } 1661 } 1662 1663 sgroup->num_ns = 0; 1664 free(sgroup->ns_info); 1665 sgroup->ns_info = NULL; 1666 fini: 1667 free(qpair_ctx); 1668 if (cpl_fn) { 1669 cpl_fn(cpl_ctx, status); 1670 } 1671 } 1672 1673 static void nvmf_poll_group_remove_subsystem_msg(void *ctx); 1674 1675 static void 1676 nvmf_poll_group_remove_subsystem_msg(void *ctx) 1677 { 1678 struct spdk_nvmf_qpair *qpair, *qpair_tmp; 1679 struct spdk_nvmf_subsystem *subsystem; 1680 struct spdk_nvmf_poll_group *group; 1681 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 1682 bool qpairs_found = false; 1683 int rc = 0; 1684 1685 group = qpair_ctx->group; 1686 subsystem = qpair_ctx->subsystem; 1687 1688 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, qpair_tmp) { 1689 if ((qpair->ctrlr != NULL) && (qpair->ctrlr->subsys == subsystem)) { 1690 qpairs_found = true; 1691 rc = spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 1692 if (rc && rc != -EINPROGRESS) { 1693 break; 1694 } 1695 } 1696 } 1697 1698 if (!qpairs_found) { 1699 _nvmf_poll_group_remove_subsystem_cb(ctx, 0); 1700 return; 1701 } 1702 1703 /* Some qpairs are in process of being disconnected. Send a message and try to remove them again */ 1704 spdk_thread_send_msg(spdk_get_thread(), nvmf_poll_group_remove_subsystem_msg, ctx); 1705 } 1706 1707 void 1708 nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group, 1709 struct spdk_nvmf_subsystem *subsystem, 1710 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1711 { 1712 struct spdk_nvmf_subsystem_poll_group *sgroup; 1713 struct nvmf_qpair_disconnect_many_ctx *ctx; 1714 uint32_t i; 1715 1716 SPDK_DTRACE_PROBE3_TICKS(nvmf_poll_group_remove_subsystem, group, spdk_thread_get_id(group->thread), 1717 subsystem->subnqn); 1718 1719 ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx)); 1720 if (!ctx) { 1721 SPDK_ERRLOG("Unable to allocate memory for context to remove poll subsystem\n"); 1722 if (cb_fn) { 1723 cb_fn(cb_arg, -1); 1724 } 1725 return; 1726 } 1727 1728 ctx->group = group; 1729 ctx->subsystem = subsystem; 1730 ctx->cpl_fn = cb_fn; 1731 ctx->cpl_ctx = cb_arg; 1732 1733 sgroup = &group->sgroups[subsystem->id]; 1734 sgroup->state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 1735 1736 for (i = 0; i < sgroup->num_ns; i++) { 1737 sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 1738 } 1739 1740 nvmf_poll_group_remove_subsystem_msg(ctx); 1741 } 1742 1743 void 1744 nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group, 1745 struct spdk_nvmf_subsystem *subsystem, 1746 uint32_t nsid, 1747 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1748 { 1749 struct spdk_nvmf_subsystem_poll_group *sgroup; 1750 struct spdk_nvmf_subsystem_pg_ns_info *ns_info = NULL; 1751 int rc = 0; 1752 uint32_t i; 1753 1754 if (subsystem->id >= group->num_sgroups) { 1755 rc = -1; 1756 goto fini; 1757 } 1758 1759 sgroup = &group->sgroups[subsystem->id]; 1760 if (sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSED) { 1761 goto fini; 1762 } 1763 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSING; 1764 1765 if (nsid == SPDK_NVME_GLOBAL_NS_TAG) { 1766 for (i = 0; i < sgroup->num_ns; i++) { 1767 ns_info = &sgroup->ns_info[i]; 1768 ns_info->state = SPDK_NVMF_SUBSYSTEM_PAUSING; 1769 } 1770 } else { 1771 /* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */ 1772 if (nsid - 1 < sgroup->num_ns) { 1773 ns_info = &sgroup->ns_info[nsid - 1]; 1774 ns_info->state = SPDK_NVMF_SUBSYSTEM_PAUSING; 1775 } 1776 } 1777 1778 if (sgroup->mgmt_io_outstanding > 0) { 1779 assert(sgroup->cb_fn == NULL); 1780 sgroup->cb_fn = cb_fn; 1781 assert(sgroup->cb_arg == NULL); 1782 sgroup->cb_arg = cb_arg; 1783 return; 1784 } 1785 1786 if (nsid == SPDK_NVME_GLOBAL_NS_TAG) { 1787 for (i = 0; i < sgroup->num_ns; i++) { 1788 ns_info = &sgroup->ns_info[i]; 1789 1790 if (ns_info->io_outstanding > 0) { 1791 assert(sgroup->cb_fn == NULL); 1792 sgroup->cb_fn = cb_fn; 1793 assert(sgroup->cb_arg == NULL); 1794 sgroup->cb_arg = cb_arg; 1795 return; 1796 } 1797 } 1798 } else { 1799 if (ns_info != NULL && ns_info->io_outstanding > 0) { 1800 assert(sgroup->cb_fn == NULL); 1801 sgroup->cb_fn = cb_fn; 1802 assert(sgroup->cb_arg == NULL); 1803 sgroup->cb_arg = cb_arg; 1804 return; 1805 } 1806 } 1807 1808 assert(sgroup->mgmt_io_outstanding == 0); 1809 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED; 1810 fini: 1811 if (cb_fn) { 1812 cb_fn(cb_arg, rc); 1813 } 1814 } 1815 1816 void 1817 nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group, 1818 struct spdk_nvmf_subsystem *subsystem, 1819 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1820 { 1821 struct spdk_nvmf_request *req, *tmp; 1822 struct spdk_nvmf_subsystem_poll_group *sgroup; 1823 int rc = 0; 1824 uint32_t i; 1825 1826 if (subsystem->id >= group->num_sgroups) { 1827 rc = -1; 1828 goto fini; 1829 } 1830 1831 sgroup = &group->sgroups[subsystem->id]; 1832 1833 if (sgroup->state == SPDK_NVMF_SUBSYSTEM_ACTIVE) { 1834 goto fini; 1835 } 1836 1837 rc = poll_group_update_subsystem(group, subsystem); 1838 if (rc) { 1839 goto fini; 1840 } 1841 1842 for (i = 0; i < sgroup->num_ns; i++) { 1843 sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1844 } 1845 1846 sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1847 1848 /* Release all queued requests */ 1849 TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) { 1850 TAILQ_REMOVE(&sgroup->queued, req, link); 1851 if (spdk_nvmf_request_using_zcopy(req)) { 1852 spdk_nvmf_request_zcopy_start(req); 1853 } else { 1854 spdk_nvmf_request_exec(req); 1855 } 1856 1857 } 1858 fini: 1859 if (cb_fn) { 1860 cb_fn(cb_arg, rc); 1861 } 1862 } 1863 1864 1865 struct spdk_nvmf_poll_group * 1866 spdk_nvmf_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair) 1867 { 1868 struct spdk_nvmf_transport_poll_group *tgroup; 1869 1870 tgroup = nvmf_transport_get_optimal_poll_group(qpair->transport, qpair); 1871 1872 if (tgroup == NULL) { 1873 return NULL; 1874 } 1875 1876 return tgroup->group; 1877 } 1878 1879 void 1880 spdk_nvmf_poll_group_dump_stat(struct spdk_nvmf_poll_group *group, struct spdk_json_write_ctx *w) 1881 { 1882 struct spdk_nvmf_transport_poll_group *tgroup; 1883 1884 spdk_json_write_object_begin(w); 1885 1886 spdk_json_write_named_string(w, "name", spdk_thread_get_name(spdk_get_thread())); 1887 spdk_json_write_named_uint32(w, "admin_qpairs", group->stat.admin_qpairs); 1888 spdk_json_write_named_uint32(w, "io_qpairs", group->stat.io_qpairs); 1889 spdk_json_write_named_uint32(w, "current_admin_qpairs", group->stat.current_admin_qpairs); 1890 spdk_json_write_named_uint32(w, "current_io_qpairs", group->stat.current_io_qpairs); 1891 spdk_json_write_named_uint64(w, "pending_bdev_io", group->stat.pending_bdev_io); 1892 spdk_json_write_named_uint64(w, "completed_nvme_io", group->stat.completed_nvme_io); 1893 1894 spdk_json_write_named_array_begin(w, "transports"); 1895 1896 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 1897 spdk_json_write_object_begin(w); 1898 /* 1899 * The trtype field intentionally contains a transport name as this is more informative. 1900 * The field has not been renamed for backward compatibility. 1901 */ 1902 spdk_json_write_named_string(w, "trtype", spdk_nvmf_get_transport_name(tgroup->transport)); 1903 1904 if (tgroup->transport->ops->poll_group_dump_stat) { 1905 tgroup->transport->ops->poll_group_dump_stat(tgroup, w); 1906 } 1907 1908 spdk_json_write_object_end(w); 1909 } 1910 1911 spdk_json_write_array_end(w); 1912 spdk_json_write_object_end(w); 1913 } 1914