1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2018-2019, 2021 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/bdev.h" 37 #include "spdk/bit_array.h" 38 #include "spdk/thread.h" 39 #include "spdk/nvmf.h" 40 #include "spdk/trace.h" 41 #include "spdk/endian.h" 42 #include "spdk/string.h" 43 44 #include "spdk/log.h" 45 46 #include "nvmf_internal.h" 47 #include "transport.h" 48 49 SPDK_LOG_REGISTER_COMPONENT(nvmf) 50 51 #define SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS 1024 52 #define SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US 10000 53 54 static TAILQ_HEAD(, spdk_nvmf_tgt) g_nvmf_tgts = TAILQ_HEAD_INITIALIZER(g_nvmf_tgts); 55 56 typedef void (*nvmf_qpair_disconnect_cpl)(void *ctx, int status); 57 static void nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf); 58 59 /* supplied to a single call to nvmf_qpair_disconnect */ 60 struct nvmf_qpair_disconnect_ctx { 61 struct spdk_nvmf_qpair *qpair; 62 struct spdk_nvmf_ctrlr *ctrlr; 63 nvmf_qpair_disconnect_cb cb_fn; 64 struct spdk_thread *thread; 65 void *ctx; 66 uint16_t qid; 67 }; 68 69 /* 70 * There are several times when we need to iterate through the list of all qpairs and selectively delete them. 71 * In order to do this sequentially without overlap, we must provide a context to recover the next qpair from 72 * to enable calling nvmf_qpair_disconnect on the next desired qpair. 73 */ 74 struct nvmf_qpair_disconnect_many_ctx { 75 struct spdk_nvmf_subsystem *subsystem; 76 struct spdk_nvmf_poll_group *group; 77 spdk_nvmf_poll_group_mod_done cpl_fn; 78 void *cpl_ctx; 79 uint32_t count; 80 }; 81 82 static void 83 nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair, 84 enum spdk_nvmf_qpair_state state) 85 { 86 assert(qpair != NULL); 87 assert(qpair->group->thread == spdk_get_thread()); 88 89 qpair->state = state; 90 } 91 92 static int 93 nvmf_poll_group_poll(void *ctx) 94 { 95 struct spdk_nvmf_poll_group *group = ctx; 96 int rc; 97 int count = 0; 98 struct spdk_nvmf_transport_poll_group *tgroup; 99 100 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 101 rc = nvmf_transport_poll_group_poll(tgroup); 102 if (rc < 0) { 103 return SPDK_POLLER_BUSY; 104 } 105 count += rc; 106 } 107 108 return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; 109 } 110 111 static int 112 nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf) 113 { 114 struct spdk_nvmf_tgt *tgt = io_device; 115 struct spdk_nvmf_poll_group *group = ctx_buf; 116 struct spdk_nvmf_transport *transport; 117 uint32_t sid; 118 119 TAILQ_INIT(&group->tgroups); 120 TAILQ_INIT(&group->qpairs); 121 122 TAILQ_FOREACH(transport, &tgt->transports, link) { 123 nvmf_poll_group_add_transport(group, transport); 124 } 125 126 group->num_sgroups = tgt->max_subsystems; 127 group->sgroups = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem_poll_group)); 128 if (!group->sgroups) { 129 return -ENOMEM; 130 } 131 132 for (sid = 0; sid < tgt->max_subsystems; sid++) { 133 struct spdk_nvmf_subsystem *subsystem; 134 135 subsystem = tgt->subsystems[sid]; 136 if (!subsystem) { 137 continue; 138 } 139 140 if (nvmf_poll_group_add_subsystem(group, subsystem, NULL, NULL) != 0) { 141 nvmf_tgt_destroy_poll_group(io_device, ctx_buf); 142 return -1; 143 } 144 } 145 146 pthread_mutex_lock(&tgt->mutex); 147 TAILQ_INSERT_TAIL(&tgt->poll_groups, group, link); 148 pthread_mutex_unlock(&tgt->mutex); 149 150 group->poller = SPDK_POLLER_REGISTER(nvmf_poll_group_poll, group, 0); 151 group->thread = spdk_get_thread(); 152 153 return 0; 154 } 155 156 static void 157 nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf) 158 { 159 struct spdk_nvmf_tgt *tgt = io_device; 160 struct spdk_nvmf_poll_group *group = ctx_buf; 161 struct spdk_nvmf_transport_poll_group *tgroup, *tmp; 162 struct spdk_nvmf_subsystem_poll_group *sgroup; 163 uint32_t sid, nsid; 164 165 pthread_mutex_lock(&tgt->mutex); 166 TAILQ_REMOVE(&tgt->poll_groups, group, link); 167 pthread_mutex_unlock(&tgt->mutex); 168 169 TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) { 170 TAILQ_REMOVE(&group->tgroups, tgroup, link); 171 nvmf_transport_poll_group_destroy(tgroup); 172 } 173 174 for (sid = 0; sid < group->num_sgroups; sid++) { 175 sgroup = &group->sgroups[sid]; 176 177 for (nsid = 0; nsid < sgroup->num_ns; nsid++) { 178 if (sgroup->ns_info[nsid].channel) { 179 spdk_put_io_channel(sgroup->ns_info[nsid].channel); 180 sgroup->ns_info[nsid].channel = NULL; 181 } 182 } 183 184 free(sgroup->ns_info); 185 } 186 187 free(group->sgroups); 188 189 spdk_poller_unregister(&group->poller); 190 191 if (group->destroy_cb_fn) { 192 group->destroy_cb_fn(group->destroy_cb_arg, 0); 193 } 194 } 195 196 static void 197 _nvmf_tgt_disconnect_next_qpair(void *ctx) 198 { 199 struct spdk_nvmf_qpair *qpair; 200 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 201 struct spdk_nvmf_poll_group *group = qpair_ctx->group; 202 struct spdk_io_channel *ch; 203 int rc = 0; 204 205 qpair = TAILQ_FIRST(&group->qpairs); 206 207 if (qpair) { 208 rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_tgt_disconnect_next_qpair, ctx); 209 } 210 211 if (!qpair || rc != 0) { 212 /* When the refcount from the channels reaches 0, nvmf_tgt_destroy_poll_group will be called. */ 213 ch = spdk_io_channel_from_ctx(group); 214 spdk_put_io_channel(ch); 215 free(qpair_ctx); 216 } 217 } 218 219 static void 220 nvmf_tgt_destroy_poll_group_qpairs(struct spdk_nvmf_poll_group *group) 221 { 222 struct nvmf_qpair_disconnect_many_ctx *ctx; 223 224 ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx)); 225 226 if (!ctx) { 227 SPDK_ERRLOG("Failed to allocate memory for destroy poll group ctx\n"); 228 return; 229 } 230 231 ctx->group = group; 232 _nvmf_tgt_disconnect_next_qpair(ctx); 233 } 234 235 static int 236 nvmf_tgt_accept(void *ctx) 237 { 238 struct spdk_nvmf_tgt *tgt = ctx; 239 struct spdk_nvmf_transport *transport, *tmp; 240 int count = 0; 241 242 TAILQ_FOREACH_SAFE(transport, &tgt->transports, link, tmp) { 243 count += nvmf_transport_accept(transport); 244 } 245 246 return count; 247 } 248 249 struct spdk_nvmf_tgt * 250 spdk_nvmf_tgt_create(struct spdk_nvmf_target_opts *opts) 251 { 252 struct spdk_nvmf_tgt *tgt, *tmp_tgt; 253 uint32_t acceptor_poll_rate; 254 255 if (strnlen(opts->name, NVMF_TGT_NAME_MAX_LENGTH) == NVMF_TGT_NAME_MAX_LENGTH) { 256 SPDK_ERRLOG("Provided target name exceeds the max length of %u.\n", NVMF_TGT_NAME_MAX_LENGTH); 257 return NULL; 258 } 259 260 TAILQ_FOREACH(tmp_tgt, &g_nvmf_tgts, link) { 261 if (!strncmp(opts->name, tmp_tgt->name, NVMF_TGT_NAME_MAX_LENGTH)) { 262 SPDK_ERRLOG("Provided target name must be unique.\n"); 263 return NULL; 264 } 265 } 266 267 tgt = calloc(1, sizeof(*tgt)); 268 if (!tgt) { 269 return NULL; 270 } 271 272 snprintf(tgt->name, NVMF_TGT_NAME_MAX_LENGTH, "%s", opts->name); 273 274 if (!opts || !opts->max_subsystems) { 275 tgt->max_subsystems = SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS; 276 } else { 277 tgt->max_subsystems = opts->max_subsystems; 278 } 279 280 if (!opts || !opts->acceptor_poll_rate) { 281 acceptor_poll_rate = SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US; 282 } else { 283 acceptor_poll_rate = opts->acceptor_poll_rate; 284 } 285 286 tgt->discovery_genctr = 0; 287 TAILQ_INIT(&tgt->transports); 288 TAILQ_INIT(&tgt->poll_groups); 289 290 tgt->subsystems = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem *)); 291 if (!tgt->subsystems) { 292 free(tgt); 293 return NULL; 294 } 295 296 pthread_mutex_init(&tgt->mutex, NULL); 297 298 tgt->accept_poller = SPDK_POLLER_REGISTER(nvmf_tgt_accept, tgt, acceptor_poll_rate); 299 if (!tgt->accept_poller) { 300 pthread_mutex_destroy(&tgt->mutex); 301 free(tgt->subsystems); 302 free(tgt); 303 return NULL; 304 } 305 306 spdk_io_device_register(tgt, 307 nvmf_tgt_create_poll_group, 308 nvmf_tgt_destroy_poll_group, 309 sizeof(struct spdk_nvmf_poll_group), 310 tgt->name); 311 312 TAILQ_INSERT_HEAD(&g_nvmf_tgts, tgt, link); 313 314 return tgt; 315 } 316 317 static void 318 _nvmf_tgt_destroy_next_transport(void *ctx) 319 { 320 struct spdk_nvmf_tgt *tgt = ctx; 321 struct spdk_nvmf_transport *transport; 322 323 if (!TAILQ_EMPTY(&tgt->transports)) { 324 transport = TAILQ_FIRST(&tgt->transports); 325 TAILQ_REMOVE(&tgt->transports, transport, link); 326 spdk_nvmf_transport_destroy(transport, _nvmf_tgt_destroy_next_transport, tgt); 327 } else { 328 spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn = tgt->destroy_cb_fn; 329 void *destroy_cb_arg = tgt->destroy_cb_arg; 330 331 pthread_mutex_destroy(&tgt->mutex); 332 free(tgt); 333 334 if (destroy_cb_fn) { 335 destroy_cb_fn(destroy_cb_arg, 0); 336 } 337 } 338 } 339 340 static void 341 nvmf_tgt_destroy_cb(void *io_device) 342 { 343 struct spdk_nvmf_tgt *tgt = io_device; 344 uint32_t i; 345 346 if (tgt->subsystems) { 347 for (i = 0; i < tgt->max_subsystems; i++) { 348 if (tgt->subsystems[i]) { 349 nvmf_subsystem_remove_all_listeners(tgt->subsystems[i], true); 350 spdk_nvmf_subsystem_destroy(tgt->subsystems[i]); 351 } 352 } 353 free(tgt->subsystems); 354 } 355 356 _nvmf_tgt_destroy_next_transport(tgt); 357 } 358 359 void 360 spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt, 361 spdk_nvmf_tgt_destroy_done_fn cb_fn, 362 void *cb_arg) 363 { 364 tgt->destroy_cb_fn = cb_fn; 365 tgt->destroy_cb_arg = cb_arg; 366 367 spdk_poller_unregister(&tgt->accept_poller); 368 369 TAILQ_REMOVE(&g_nvmf_tgts, tgt, link); 370 371 spdk_io_device_unregister(tgt, nvmf_tgt_destroy_cb); 372 } 373 374 const char * 375 spdk_nvmf_tgt_get_name(struct spdk_nvmf_tgt *tgt) 376 { 377 return tgt->name; 378 } 379 380 struct spdk_nvmf_tgt * 381 spdk_nvmf_get_tgt(const char *name) 382 { 383 struct spdk_nvmf_tgt *tgt; 384 uint32_t num_targets = 0; 385 386 TAILQ_FOREACH(tgt, &g_nvmf_tgts, link) { 387 if (name) { 388 if (!strncmp(tgt->name, name, NVMF_TGT_NAME_MAX_LENGTH)) { 389 return tgt; 390 } 391 } 392 num_targets++; 393 } 394 395 /* 396 * special case. If there is only one target and 397 * no name was specified, return the only available 398 * target. If there is more than one target, name must 399 * be specified. 400 */ 401 if (!name && num_targets == 1) { 402 return TAILQ_FIRST(&g_nvmf_tgts); 403 } 404 405 return NULL; 406 } 407 408 struct spdk_nvmf_tgt * 409 spdk_nvmf_get_first_tgt(void) 410 { 411 return TAILQ_FIRST(&g_nvmf_tgts); 412 } 413 414 struct spdk_nvmf_tgt * 415 spdk_nvmf_get_next_tgt(struct spdk_nvmf_tgt *prev) 416 { 417 return TAILQ_NEXT(prev, link); 418 } 419 420 static void 421 nvmf_write_subsystem_config_json(struct spdk_json_write_ctx *w, 422 struct spdk_nvmf_subsystem *subsystem) 423 { 424 struct spdk_nvmf_host *host; 425 struct spdk_nvmf_subsystem_listener *listener; 426 const struct spdk_nvme_transport_id *trid; 427 struct spdk_nvmf_ns *ns; 428 struct spdk_nvmf_ns_opts ns_opts; 429 uint32_t max_namespaces; 430 char uuid_str[SPDK_UUID_STRING_LEN]; 431 const char *adrfam; 432 433 if (spdk_nvmf_subsystem_get_type(subsystem) != SPDK_NVMF_SUBTYPE_NVME) { 434 return; 435 } 436 437 /* { */ 438 spdk_json_write_object_begin(w); 439 spdk_json_write_named_string(w, "method", "nvmf_create_subsystem"); 440 441 /* "params" : { */ 442 spdk_json_write_named_object_begin(w, "params"); 443 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 444 spdk_json_write_named_bool(w, "allow_any_host", spdk_nvmf_subsystem_get_allow_any_host(subsystem)); 445 spdk_json_write_named_string(w, "serial_number", spdk_nvmf_subsystem_get_sn(subsystem)); 446 spdk_json_write_named_string(w, "model_number", spdk_nvmf_subsystem_get_mn(subsystem)); 447 448 max_namespaces = spdk_nvmf_subsystem_get_max_namespaces(subsystem); 449 if (max_namespaces != 0) { 450 spdk_json_write_named_uint32(w, "max_namespaces", max_namespaces); 451 } 452 453 /* } "params" */ 454 spdk_json_write_object_end(w); 455 456 /* } */ 457 spdk_json_write_object_end(w); 458 459 for (listener = spdk_nvmf_subsystem_get_first_listener(subsystem); listener != NULL; 460 listener = spdk_nvmf_subsystem_get_next_listener(subsystem, listener)) { 461 trid = spdk_nvmf_subsystem_listener_get_trid(listener); 462 463 adrfam = spdk_nvme_transport_id_adrfam_str(trid->adrfam); 464 465 spdk_json_write_object_begin(w); 466 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_listener"); 467 468 /* "params" : { */ 469 spdk_json_write_named_object_begin(w, "params"); 470 471 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 472 473 /* "listen_address" : { */ 474 spdk_json_write_named_object_begin(w, "listen_address"); 475 476 spdk_json_write_named_string(w, "trtype", trid->trstring); 477 if (adrfam) { 478 spdk_json_write_named_string(w, "adrfam", adrfam); 479 } 480 481 spdk_json_write_named_string(w, "traddr", trid->traddr); 482 spdk_json_write_named_string(w, "trsvcid", trid->trsvcid); 483 /* } "listen_address" */ 484 spdk_json_write_object_end(w); 485 486 /* } "params" */ 487 spdk_json_write_object_end(w); 488 489 /* } */ 490 spdk_json_write_object_end(w); 491 } 492 493 for (host = spdk_nvmf_subsystem_get_first_host(subsystem); host != NULL; 494 host = spdk_nvmf_subsystem_get_next_host(subsystem, host)) { 495 496 spdk_json_write_object_begin(w); 497 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_host"); 498 499 /* "params" : { */ 500 spdk_json_write_named_object_begin(w, "params"); 501 502 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 503 spdk_json_write_named_string(w, "host", spdk_nvmf_host_get_nqn(host)); 504 505 /* } "params" */ 506 spdk_json_write_object_end(w); 507 508 /* } */ 509 spdk_json_write_object_end(w); 510 } 511 512 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 513 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 514 spdk_nvmf_ns_get_opts(ns, &ns_opts, sizeof(ns_opts)); 515 516 spdk_json_write_object_begin(w); 517 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_ns"); 518 519 /* "params" : { */ 520 spdk_json_write_named_object_begin(w, "params"); 521 522 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 523 524 /* "namespace" : { */ 525 spdk_json_write_named_object_begin(w, "namespace"); 526 527 spdk_json_write_named_uint32(w, "nsid", spdk_nvmf_ns_get_id(ns)); 528 spdk_json_write_named_string(w, "bdev_name", spdk_bdev_get_name(spdk_nvmf_ns_get_bdev(ns))); 529 530 if (!spdk_mem_all_zero(ns_opts.nguid, sizeof(ns_opts.nguid))) { 531 SPDK_STATIC_ASSERT(sizeof(ns_opts.nguid) == sizeof(uint64_t) * 2, "size mismatch"); 532 spdk_json_write_named_string_fmt(w, "nguid", "%016"PRIX64"%016"PRIX64, from_be64(&ns_opts.nguid[0]), 533 from_be64(&ns_opts.nguid[8])); 534 } 535 536 if (!spdk_mem_all_zero(ns_opts.eui64, sizeof(ns_opts.eui64))) { 537 SPDK_STATIC_ASSERT(sizeof(ns_opts.eui64) == sizeof(uint64_t), "size mismatch"); 538 spdk_json_write_named_string_fmt(w, "eui64", "%016"PRIX64, from_be64(&ns_opts.eui64)); 539 } 540 541 if (!spdk_mem_all_zero(&ns_opts.uuid, sizeof(ns_opts.uuid))) { 542 spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &ns_opts.uuid); 543 spdk_json_write_named_string(w, "uuid", uuid_str); 544 } 545 546 /* "namespace" */ 547 spdk_json_write_object_end(w); 548 549 /* } "params" */ 550 spdk_json_write_object_end(w); 551 552 /* } */ 553 spdk_json_write_object_end(w); 554 } 555 } 556 557 void 558 spdk_nvmf_tgt_write_config_json(struct spdk_json_write_ctx *w, struct spdk_nvmf_tgt *tgt) 559 { 560 struct spdk_nvmf_subsystem *subsystem; 561 struct spdk_nvmf_transport *transport; 562 563 spdk_json_write_object_begin(w); 564 spdk_json_write_named_string(w, "method", "nvmf_set_max_subsystems"); 565 566 spdk_json_write_named_object_begin(w, "params"); 567 spdk_json_write_named_uint32(w, "max_subsystems", tgt->max_subsystems); 568 spdk_json_write_object_end(w); 569 570 spdk_json_write_object_end(w); 571 572 /* write transports */ 573 TAILQ_FOREACH(transport, &tgt->transports, link) { 574 spdk_json_write_object_begin(w); 575 spdk_json_write_named_string(w, "method", "nvmf_create_transport"); 576 577 spdk_json_write_named_object_begin(w, "params"); 578 spdk_json_write_named_string(w, "trtype", transport->ops->name); 579 spdk_json_write_named_uint32(w, "max_queue_depth", transport->opts.max_queue_depth); 580 spdk_json_write_named_uint32(w, "max_io_qpairs_per_ctrlr", 581 transport->opts.max_qpairs_per_ctrlr - 1); 582 spdk_json_write_named_uint32(w, "in_capsule_data_size", transport->opts.in_capsule_data_size); 583 spdk_json_write_named_uint32(w, "max_io_size", transport->opts.max_io_size); 584 spdk_json_write_named_uint32(w, "io_unit_size", transport->opts.io_unit_size); 585 spdk_json_write_named_uint32(w, "max_aq_depth", transport->opts.max_aq_depth); 586 spdk_json_write_named_uint32(w, "num_shared_buffers", transport->opts.num_shared_buffers); 587 spdk_json_write_named_uint32(w, "buf_cache_size", transport->opts.buf_cache_size); 588 spdk_json_write_named_bool(w, "dif_insert_or_strip", transport->opts.dif_insert_or_strip); 589 if (transport->ops->dump_opts) { 590 transport->ops->dump_opts(transport, w); 591 } 592 spdk_json_write_named_uint32(w, "abort_timeout_sec", transport->opts.abort_timeout_sec); 593 spdk_json_write_object_end(w); 594 595 spdk_json_write_object_end(w); 596 } 597 598 subsystem = spdk_nvmf_subsystem_get_first(tgt); 599 while (subsystem) { 600 nvmf_write_subsystem_config_json(w, subsystem); 601 subsystem = spdk_nvmf_subsystem_get_next(subsystem); 602 } 603 } 604 605 static void 606 nvmf_listen_opts_copy(struct spdk_nvmf_listen_opts *opts, 607 const struct spdk_nvmf_listen_opts *opts_src, size_t opts_size) 608 { 609 assert(opts); 610 assert(opts_src); 611 612 opts->opts_size = opts_size; 613 614 #define SET_FIELD(field) \ 615 if (offsetof(struct spdk_nvmf_listen_opts, field) + sizeof(opts->field) <= opts_size) { \ 616 opts->field = opts_src->field; \ 617 } \ 618 619 SET_FIELD(transport_specific); 620 #undef SET_FIELD 621 622 /* Do not remove this statement, you should always update this statement when you adding a new field, 623 * and do not forget to add the SET_FIELD statement for your added field. */ 624 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_listen_opts) == 16, "Incorrect size"); 625 } 626 627 void 628 spdk_nvmf_listen_opts_init(struct spdk_nvmf_listen_opts *opts, size_t opts_size) 629 { 630 struct spdk_nvmf_listen_opts opts_local = {}; 631 632 /* local version of opts should have defaults set here */ 633 634 nvmf_listen_opts_copy(opts, &opts_local, opts_size); 635 } 636 637 int 638 spdk_nvmf_tgt_listen_ext(struct spdk_nvmf_tgt *tgt, const struct spdk_nvme_transport_id *trid, 639 struct spdk_nvmf_listen_opts *opts) 640 { 641 struct spdk_nvmf_transport *transport; 642 int rc; 643 struct spdk_nvmf_listen_opts opts_local = {}; 644 645 if (!opts) { 646 SPDK_ERRLOG("opts should not be NULL\n"); 647 return -EINVAL; 648 } 649 650 if (!opts->opts_size) { 651 SPDK_ERRLOG("The opts_size in opts structure should not be zero\n"); 652 return -EINVAL; 653 } 654 655 transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring); 656 if (!transport) { 657 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n", 658 trid->trstring); 659 return -EINVAL; 660 } 661 662 nvmf_listen_opts_copy(&opts_local, opts, opts->opts_size); 663 rc = spdk_nvmf_transport_listen(transport, trid, &opts_local); 664 if (rc < 0) { 665 SPDK_ERRLOG("Unable to listen on address '%s'\n", trid->traddr); 666 } 667 668 return rc; 669 } 670 671 int 672 spdk_nvmf_tgt_stop_listen(struct spdk_nvmf_tgt *tgt, 673 struct spdk_nvme_transport_id *trid) 674 { 675 struct spdk_nvmf_transport *transport; 676 int rc; 677 678 transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring); 679 if (!transport) { 680 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n", 681 trid->trstring); 682 return -EINVAL; 683 } 684 685 rc = spdk_nvmf_transport_stop_listen(transport, trid); 686 if (rc < 0) { 687 SPDK_ERRLOG("Failed to stop listening on address '%s'\n", trid->traddr); 688 return rc; 689 } 690 return 0; 691 } 692 693 struct spdk_nvmf_tgt_add_transport_ctx { 694 struct spdk_nvmf_tgt *tgt; 695 struct spdk_nvmf_transport *transport; 696 spdk_nvmf_tgt_add_transport_done_fn cb_fn; 697 void *cb_arg; 698 }; 699 700 static void 701 _nvmf_tgt_add_transport_done(struct spdk_io_channel_iter *i, int status) 702 { 703 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 704 705 ctx->cb_fn(ctx->cb_arg, status); 706 707 free(ctx); 708 } 709 710 static void 711 _nvmf_tgt_add_transport(struct spdk_io_channel_iter *i) 712 { 713 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 714 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 715 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); 716 int rc; 717 718 rc = nvmf_poll_group_add_transport(group, ctx->transport); 719 spdk_for_each_channel_continue(i, rc); 720 } 721 722 void spdk_nvmf_tgt_add_transport(struct spdk_nvmf_tgt *tgt, 723 struct spdk_nvmf_transport *transport, 724 spdk_nvmf_tgt_add_transport_done_fn cb_fn, 725 void *cb_arg) 726 { 727 struct spdk_nvmf_tgt_add_transport_ctx *ctx; 728 729 if (spdk_nvmf_tgt_get_transport(tgt, transport->ops->name)) { 730 cb_fn(cb_arg, -EEXIST); 731 return; /* transport already created */ 732 } 733 734 transport->tgt = tgt; 735 TAILQ_INSERT_TAIL(&tgt->transports, transport, link); 736 737 ctx = calloc(1, sizeof(*ctx)); 738 if (!ctx) { 739 cb_fn(cb_arg, -ENOMEM); 740 return; 741 } 742 743 ctx->tgt = tgt; 744 ctx->transport = transport; 745 ctx->cb_fn = cb_fn; 746 ctx->cb_arg = cb_arg; 747 748 spdk_for_each_channel(tgt, 749 _nvmf_tgt_add_transport, 750 ctx, 751 _nvmf_tgt_add_transport_done); 752 } 753 754 struct spdk_nvmf_subsystem * 755 spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn) 756 { 757 struct spdk_nvmf_subsystem *subsystem; 758 uint32_t sid; 759 760 if (!subnqn) { 761 return NULL; 762 } 763 764 /* Ensure that subnqn is null terminated */ 765 if (!memchr(subnqn, '\0', SPDK_NVMF_NQN_MAX_LEN + 1)) { 766 SPDK_ERRLOG("Connect SUBNQN is not null terminated\n"); 767 return NULL; 768 } 769 770 for (sid = 0; sid < tgt->max_subsystems; sid++) { 771 subsystem = tgt->subsystems[sid]; 772 if (subsystem == NULL) { 773 continue; 774 } 775 776 if (strcmp(subnqn, subsystem->subnqn) == 0) { 777 return subsystem; 778 } 779 } 780 781 return NULL; 782 } 783 784 struct spdk_nvmf_transport * 785 spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name) 786 { 787 struct spdk_nvmf_transport *transport; 788 789 TAILQ_FOREACH(transport, &tgt->transports, link) { 790 if (!strncasecmp(transport->ops->name, transport_name, SPDK_NVMF_TRSTRING_MAX_LEN)) { 791 return transport; 792 } 793 } 794 return NULL; 795 } 796 797 struct nvmf_new_qpair_ctx { 798 struct spdk_nvmf_qpair *qpair; 799 struct spdk_nvmf_poll_group *group; 800 }; 801 802 static void 803 _nvmf_poll_group_add(void *_ctx) 804 { 805 struct nvmf_new_qpair_ctx *ctx = _ctx; 806 struct spdk_nvmf_qpair *qpair = ctx->qpair; 807 struct spdk_nvmf_poll_group *group = ctx->group; 808 809 free(_ctx); 810 811 if (spdk_nvmf_poll_group_add(group, qpair) != 0) { 812 SPDK_ERRLOG("Unable to add the qpair to a poll group.\n"); 813 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 814 } 815 } 816 817 void 818 spdk_nvmf_tgt_new_qpair(struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair) 819 { 820 struct spdk_nvmf_poll_group *group; 821 struct nvmf_new_qpair_ctx *ctx; 822 823 group = spdk_nvmf_get_optimal_poll_group(qpair); 824 if (group == NULL) { 825 if (tgt->next_poll_group == NULL) { 826 tgt->next_poll_group = TAILQ_FIRST(&tgt->poll_groups); 827 if (tgt->next_poll_group == NULL) { 828 SPDK_ERRLOG("No poll groups exist.\n"); 829 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 830 return; 831 } 832 } 833 group = tgt->next_poll_group; 834 tgt->next_poll_group = TAILQ_NEXT(group, link); 835 } 836 837 ctx = calloc(1, sizeof(*ctx)); 838 if (!ctx) { 839 SPDK_ERRLOG("Unable to send message to poll group.\n"); 840 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 841 return; 842 } 843 844 ctx->qpair = qpair; 845 ctx->group = group; 846 847 spdk_thread_send_msg(group->thread, _nvmf_poll_group_add, ctx); 848 } 849 850 struct spdk_nvmf_poll_group * 851 spdk_nvmf_poll_group_create(struct spdk_nvmf_tgt *tgt) 852 { 853 struct spdk_io_channel *ch; 854 855 ch = spdk_get_io_channel(tgt); 856 if (!ch) { 857 SPDK_ERRLOG("Unable to get I/O channel for target\n"); 858 return NULL; 859 } 860 861 return spdk_io_channel_get_ctx(ch); 862 } 863 864 void 865 spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group, 866 spdk_nvmf_poll_group_destroy_done_fn cb_fn, 867 void *cb_arg) 868 { 869 assert(group->destroy_cb_fn == NULL); 870 group->destroy_cb_fn = cb_fn; 871 group->destroy_cb_arg = cb_arg; 872 873 /* This function will put the io_channel associated with this poll group */ 874 nvmf_tgt_destroy_poll_group_qpairs(group); 875 } 876 877 int 878 spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group, 879 struct spdk_nvmf_qpair *qpair) 880 { 881 int rc = -1; 882 struct spdk_nvmf_transport_poll_group *tgroup; 883 884 TAILQ_INIT(&qpair->outstanding); 885 qpair->group = group; 886 qpair->ctrlr = NULL; 887 qpair->disconnect_started = false; 888 889 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 890 if (tgroup->transport == qpair->transport) { 891 rc = nvmf_transport_poll_group_add(tgroup, qpair); 892 break; 893 } 894 } 895 896 /* We add the qpair to the group only it is succesfully added into the tgroup */ 897 if (rc == 0) { 898 TAILQ_INSERT_TAIL(&group->qpairs, qpair, link); 899 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ACTIVE); 900 } 901 902 return rc; 903 } 904 905 static void 906 _nvmf_ctrlr_destruct(void *ctx) 907 { 908 struct spdk_nvmf_ctrlr *ctrlr = ctx; 909 910 nvmf_ctrlr_destruct(ctrlr); 911 } 912 913 static void 914 _nvmf_transport_qpair_fini_complete(void *cb_ctx) 915 { 916 struct nvmf_qpair_disconnect_ctx *qpair_ctx = cb_ctx; 917 918 if (qpair_ctx->cb_fn) { 919 spdk_thread_send_msg(qpair_ctx->thread, qpair_ctx->cb_fn, qpair_ctx->ctx); 920 } 921 free(qpair_ctx); 922 } 923 924 static void 925 _nvmf_transport_qpair_fini(void *ctx) 926 { 927 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 928 929 nvmf_transport_qpair_fini(qpair_ctx->qpair, _nvmf_transport_qpair_fini_complete, qpair_ctx); 930 } 931 932 static void 933 _nvmf_ctrlr_free_from_qpair(void *ctx) 934 { 935 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 936 struct spdk_nvmf_ctrlr *ctrlr = qpair_ctx->ctrlr; 937 uint32_t count; 938 939 spdk_bit_array_clear(ctrlr->qpair_mask, qpair_ctx->qid); 940 count = spdk_bit_array_count_set(ctrlr->qpair_mask); 941 if (count == 0) { 942 ctrlr->in_destruct = true; 943 spdk_thread_send_msg(ctrlr->subsys->thread, _nvmf_ctrlr_destruct, ctrlr); 944 } 945 946 spdk_thread_send_msg(qpair_ctx->thread, _nvmf_transport_qpair_fini, qpair_ctx); 947 } 948 949 void 950 spdk_nvmf_poll_group_remove(struct spdk_nvmf_qpair *qpair) 951 { 952 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 953 struct spdk_nvmf_transport_poll_group *tgroup; 954 struct spdk_nvmf_request *req, *tmp; 955 struct spdk_nvmf_subsystem_poll_group *sgroup; 956 int rc; 957 958 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ERROR); 959 960 /* Find the tgroup and remove the qpair from the tgroup */ 961 TAILQ_FOREACH(tgroup, &qpair->group->tgroups, link) { 962 if (tgroup->transport == qpair->transport) { 963 rc = nvmf_transport_poll_group_remove(tgroup, qpair); 964 if (rc && (rc != ENOTSUP)) { 965 SPDK_ERRLOG("Cannot remove qpair=%p from transport group=%p\n", 966 qpair, tgroup); 967 } 968 break; 969 } 970 } 971 972 if (ctrlr) { 973 sgroup = &qpair->group->sgroups[ctrlr->subsys->id]; 974 TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) { 975 if (req->qpair == qpair) { 976 TAILQ_REMOVE(&sgroup->queued, req, link); 977 if (nvmf_transport_req_free(req)) { 978 SPDK_ERRLOG("Transport request free error!\n"); 979 } 980 } 981 } 982 } 983 984 TAILQ_REMOVE(&qpair->group->qpairs, qpair, link); 985 qpair->group = NULL; 986 } 987 988 static void 989 _nvmf_qpair_destroy(void *ctx, int status) 990 { 991 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 992 struct spdk_nvmf_qpair *qpair = qpair_ctx->qpair; 993 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 994 995 assert(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING); 996 qpair_ctx->qid = qpair->qid; 997 998 spdk_nvmf_poll_group_remove(qpair); 999 1000 if (!ctrlr || !ctrlr->thread) { 1001 nvmf_transport_qpair_fini(qpair, _nvmf_transport_qpair_fini_complete, qpair_ctx); 1002 return; 1003 } 1004 1005 qpair_ctx->ctrlr = ctrlr; 1006 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_free_from_qpair, qpair_ctx); 1007 } 1008 1009 static void 1010 _nvmf_qpair_disconnect_msg(void *ctx) 1011 { 1012 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 1013 1014 spdk_nvmf_qpair_disconnect(qpair_ctx->qpair, qpair_ctx->cb_fn, qpair_ctx->ctx); 1015 free(ctx); 1016 } 1017 1018 int 1019 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 1020 { 1021 struct nvmf_qpair_disconnect_ctx *qpair_ctx; 1022 1023 if (__atomic_test_and_set(&qpair->disconnect_started, __ATOMIC_RELAXED)) { 1024 if (cb_fn) { 1025 cb_fn(ctx); 1026 } 1027 return 0; 1028 } 1029 1030 /* If we get a qpair in the uninitialized state, we can just destroy it immediately */ 1031 if (qpair->state == SPDK_NVMF_QPAIR_UNINITIALIZED) { 1032 nvmf_transport_qpair_fini(qpair, NULL, NULL); 1033 if (cb_fn) { 1034 cb_fn(ctx); 1035 } 1036 return 0; 1037 } 1038 1039 assert(qpair->group != NULL); 1040 if (spdk_get_thread() != qpair->group->thread) { 1041 /* clear the atomic so we can set it on the next call on the proper thread. */ 1042 __atomic_clear(&qpair->disconnect_started, __ATOMIC_RELAXED); 1043 qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx)); 1044 if (!qpair_ctx) { 1045 SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n"); 1046 return -ENOMEM; 1047 } 1048 qpair_ctx->qpair = qpair; 1049 qpair_ctx->cb_fn = cb_fn; 1050 qpair_ctx->thread = qpair->group->thread; 1051 qpair_ctx->ctx = ctx; 1052 spdk_thread_send_msg(qpair->group->thread, _nvmf_qpair_disconnect_msg, qpair_ctx); 1053 return 0; 1054 } 1055 1056 assert(qpair->state == SPDK_NVMF_QPAIR_ACTIVE); 1057 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_DEACTIVATING); 1058 1059 qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx)); 1060 if (!qpair_ctx) { 1061 SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n"); 1062 return -ENOMEM; 1063 } 1064 1065 qpair_ctx->qpair = qpair; 1066 qpair_ctx->cb_fn = cb_fn; 1067 qpair_ctx->thread = qpair->group->thread; 1068 qpair_ctx->ctx = ctx; 1069 1070 /* Check for outstanding I/O */ 1071 if (!TAILQ_EMPTY(&qpair->outstanding)) { 1072 qpair->state_cb = _nvmf_qpair_destroy; 1073 qpair->state_cb_arg = qpair_ctx; 1074 nvmf_qpair_free_aer(qpair); 1075 return 0; 1076 } 1077 1078 _nvmf_qpair_destroy(qpair_ctx, 0); 1079 1080 return 0; 1081 } 1082 1083 int 1084 spdk_nvmf_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair, 1085 struct spdk_nvme_transport_id *trid) 1086 { 1087 return nvmf_transport_qpair_get_peer_trid(qpair, trid); 1088 } 1089 1090 int 1091 spdk_nvmf_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair, 1092 struct spdk_nvme_transport_id *trid) 1093 { 1094 return nvmf_transport_qpair_get_local_trid(qpair, trid); 1095 } 1096 1097 int 1098 spdk_nvmf_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, 1099 struct spdk_nvme_transport_id *trid) 1100 { 1101 return nvmf_transport_qpair_get_listen_trid(qpair, trid); 1102 } 1103 1104 int 1105 nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group, 1106 struct spdk_nvmf_transport *transport) 1107 { 1108 struct spdk_nvmf_transport_poll_group *tgroup; 1109 1110 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 1111 if (tgroup->transport == transport) { 1112 /* Transport already in the poll group */ 1113 return 0; 1114 } 1115 } 1116 1117 tgroup = nvmf_transport_poll_group_create(transport); 1118 if (!tgroup) { 1119 SPDK_ERRLOG("Unable to create poll group for transport\n"); 1120 return -1; 1121 } 1122 1123 tgroup->group = group; 1124 TAILQ_INSERT_TAIL(&group->tgroups, tgroup, link); 1125 1126 return 0; 1127 } 1128 1129 static int 1130 poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, 1131 struct spdk_nvmf_subsystem *subsystem) 1132 { 1133 struct spdk_nvmf_subsystem_poll_group *sgroup; 1134 uint32_t new_num_ns, old_num_ns; 1135 uint32_t i, j; 1136 struct spdk_nvmf_ns *ns; 1137 struct spdk_nvmf_registrant *reg, *tmp; 1138 struct spdk_io_channel *ch; 1139 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 1140 struct spdk_nvmf_ctrlr *ctrlr; 1141 bool ns_changed; 1142 1143 /* Make sure our poll group has memory for this subsystem allocated */ 1144 if (subsystem->id >= group->num_sgroups) { 1145 return -ENOMEM; 1146 } 1147 1148 sgroup = &group->sgroups[subsystem->id]; 1149 1150 /* Make sure the array of namespace information is the correct size */ 1151 new_num_ns = subsystem->max_nsid; 1152 old_num_ns = sgroup->num_ns; 1153 1154 ns_changed = false; 1155 1156 if (old_num_ns == 0) { 1157 if (new_num_ns > 0) { 1158 /* First allocation */ 1159 sgroup->ns_info = calloc(new_num_ns, sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1160 if (!sgroup->ns_info) { 1161 return -ENOMEM; 1162 } 1163 } 1164 } else if (new_num_ns > old_num_ns) { 1165 void *buf; 1166 1167 /* Make the array larger */ 1168 buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1169 if (!buf) { 1170 return -ENOMEM; 1171 } 1172 1173 sgroup->ns_info = buf; 1174 1175 /* Null out the new namespace information slots */ 1176 for (i = old_num_ns; i < new_num_ns; i++) { 1177 memset(&sgroup->ns_info[i], 0, sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1178 } 1179 } else if (new_num_ns < old_num_ns) { 1180 void *buf; 1181 1182 /* Free the extra I/O channels */ 1183 for (i = new_num_ns; i < old_num_ns; i++) { 1184 ns_info = &sgroup->ns_info[i]; 1185 1186 if (ns_info->channel) { 1187 spdk_put_io_channel(ns_info->channel); 1188 ns_info->channel = NULL; 1189 } 1190 } 1191 1192 /* Make the array smaller */ 1193 if (new_num_ns > 0) { 1194 buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1195 if (!buf) { 1196 return -ENOMEM; 1197 } 1198 sgroup->ns_info = buf; 1199 } else { 1200 free(sgroup->ns_info); 1201 sgroup->ns_info = NULL; 1202 } 1203 } 1204 1205 sgroup->num_ns = new_num_ns; 1206 1207 /* Detect bdevs that were added or removed */ 1208 for (i = 0; i < sgroup->num_ns; i++) { 1209 ns = subsystem->ns[i]; 1210 ns_info = &sgroup->ns_info[i]; 1211 ch = ns_info->channel; 1212 1213 if (ns == NULL && ch == NULL) { 1214 /* Both NULL. Leave empty */ 1215 } else if (ns == NULL && ch != NULL) { 1216 /* There was a channel here, but the namespace is gone. */ 1217 ns_changed = true; 1218 spdk_put_io_channel(ch); 1219 ns_info->channel = NULL; 1220 } else if (ns != NULL && ch == NULL) { 1221 /* A namespace appeared but there is no channel yet */ 1222 ns_changed = true; 1223 ch = spdk_bdev_get_io_channel(ns->desc); 1224 if (ch == NULL) { 1225 SPDK_ERRLOG("Could not allocate I/O channel.\n"); 1226 return -ENOMEM; 1227 } 1228 ns_info->channel = ch; 1229 } else if (spdk_uuid_compare(&ns_info->uuid, spdk_bdev_get_uuid(ns->bdev)) != 0) { 1230 /* A namespace was here before, but was replaced by a new one. */ 1231 ns_changed = true; 1232 spdk_put_io_channel(ns_info->channel); 1233 memset(ns_info, 0, sizeof(*ns_info)); 1234 1235 ch = spdk_bdev_get_io_channel(ns->desc); 1236 if (ch == NULL) { 1237 SPDK_ERRLOG("Could not allocate I/O channel.\n"); 1238 return -ENOMEM; 1239 } 1240 ns_info->channel = ch; 1241 } else if (ns_info->num_blocks != spdk_bdev_get_num_blocks(ns->bdev)) { 1242 /* Namespace is still there but size has changed */ 1243 SPDK_DEBUGLOG(nvmf, "Namespace resized: subsystem_id %u," 1244 " nsid %u, pg %p, old %" PRIu64 ", new %" PRIu64 "\n", 1245 subsystem->id, 1246 ns->nsid, 1247 group, 1248 ns_info->num_blocks, 1249 spdk_bdev_get_num_blocks(ns->bdev)); 1250 ns_changed = true; 1251 } 1252 1253 if (ns == NULL) { 1254 memset(ns_info, 0, sizeof(*ns_info)); 1255 } else { 1256 ns_info->uuid = *spdk_bdev_get_uuid(ns->bdev); 1257 ns_info->num_blocks = spdk_bdev_get_num_blocks(ns->bdev); 1258 ns_info->crkey = ns->crkey; 1259 ns_info->rtype = ns->rtype; 1260 if (ns->holder) { 1261 ns_info->holder_id = ns->holder->hostid; 1262 } 1263 1264 memset(&ns_info->reg_hostid, 0, SPDK_NVMF_MAX_NUM_REGISTRANTS * sizeof(struct spdk_uuid)); 1265 j = 0; 1266 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 1267 if (j >= SPDK_NVMF_MAX_NUM_REGISTRANTS) { 1268 SPDK_ERRLOG("Maximum %u registrants can support.\n", SPDK_NVMF_MAX_NUM_REGISTRANTS); 1269 return -EINVAL; 1270 } 1271 ns_info->reg_hostid[j++] = reg->hostid; 1272 } 1273 } 1274 } 1275 1276 if (ns_changed) { 1277 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1278 if (ctrlr->admin_qpair->group == group) { 1279 nvmf_ctrlr_async_event_ns_notice(ctrlr); 1280 nvmf_ctrlr_async_event_ana_change_notice(ctrlr); 1281 } 1282 } 1283 } 1284 1285 return 0; 1286 } 1287 1288 int 1289 nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, 1290 struct spdk_nvmf_subsystem *subsystem) 1291 { 1292 return poll_group_update_subsystem(group, subsystem); 1293 } 1294 1295 int 1296 nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group, 1297 struct spdk_nvmf_subsystem *subsystem, 1298 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1299 { 1300 int rc = 0; 1301 struct spdk_nvmf_subsystem_poll_group *sgroup = &group->sgroups[subsystem->id]; 1302 uint32_t i; 1303 1304 TAILQ_INIT(&sgroup->queued); 1305 1306 rc = poll_group_update_subsystem(group, subsystem); 1307 if (rc) { 1308 nvmf_poll_group_remove_subsystem(group, subsystem, NULL, NULL); 1309 goto fini; 1310 } 1311 1312 sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1313 1314 for (i = 0; i < sgroup->num_ns; i++) { 1315 sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1316 } 1317 1318 fini: 1319 if (cb_fn) { 1320 cb_fn(cb_arg, rc); 1321 } 1322 1323 return rc; 1324 } 1325 1326 static void 1327 _nvmf_poll_group_remove_subsystem_cb(void *ctx, int status) 1328 { 1329 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 1330 struct spdk_nvmf_subsystem *subsystem; 1331 struct spdk_nvmf_poll_group *group; 1332 struct spdk_nvmf_subsystem_poll_group *sgroup; 1333 spdk_nvmf_poll_group_mod_done cpl_fn = NULL; 1334 void *cpl_ctx = NULL; 1335 uint32_t nsid; 1336 1337 group = qpair_ctx->group; 1338 subsystem = qpair_ctx->subsystem; 1339 cpl_fn = qpair_ctx->cpl_fn; 1340 cpl_ctx = qpair_ctx->cpl_ctx; 1341 sgroup = &group->sgroups[subsystem->id]; 1342 1343 if (status) { 1344 goto fini; 1345 } 1346 1347 for (nsid = 0; nsid < sgroup->num_ns; nsid++) { 1348 if (sgroup->ns_info[nsid].channel) { 1349 spdk_put_io_channel(sgroup->ns_info[nsid].channel); 1350 sgroup->ns_info[nsid].channel = NULL; 1351 } 1352 } 1353 1354 sgroup->num_ns = 0; 1355 free(sgroup->ns_info); 1356 sgroup->ns_info = NULL; 1357 fini: 1358 free(qpair_ctx); 1359 if (cpl_fn) { 1360 cpl_fn(cpl_ctx, status); 1361 } 1362 } 1363 1364 static void 1365 remove_subsystem_qpair_cb(void *ctx) 1366 { 1367 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 1368 1369 assert(qpair_ctx->count > 0); 1370 qpair_ctx->count--; 1371 if (qpair_ctx->count == 0) { 1372 _nvmf_poll_group_remove_subsystem_cb(ctx, 0); 1373 } 1374 } 1375 1376 static void 1377 nvmf_poll_group_remove_subsystem_msg(void *ctx) 1378 { 1379 struct spdk_nvmf_qpair *qpair, *qpair_tmp; 1380 struct spdk_nvmf_subsystem *subsystem; 1381 struct spdk_nvmf_poll_group *group; 1382 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 1383 int rc = 0; 1384 1385 group = qpair_ctx->group; 1386 subsystem = qpair_ctx->subsystem; 1387 1388 /* Initialize count to 1. This acts like a ref count, to ensure that if spdk_nvmf_qpair_disconnect 1389 * immediately invokes the callback (i.e. the qpairs is already in process of being disconnected) 1390 * that we don't prematurely call _nvmf_poll_group_remove_subsystem_cb() before we've 1391 * iterated the full list of qpairs. 1392 */ 1393 qpair_ctx->count = 1; 1394 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, qpair_tmp) { 1395 if ((qpair->ctrlr != NULL) && (qpair->ctrlr->subsys == subsystem)) { 1396 qpair_ctx->count++; 1397 rc = spdk_nvmf_qpair_disconnect(qpair, remove_subsystem_qpair_cb, ctx); 1398 if (rc) { 1399 break; 1400 } 1401 } 1402 } 1403 qpair_ctx->count--; 1404 1405 if (qpair_ctx->count == 0 || rc) { 1406 _nvmf_poll_group_remove_subsystem_cb(ctx, rc); 1407 } 1408 } 1409 1410 void 1411 nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group, 1412 struct spdk_nvmf_subsystem *subsystem, 1413 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1414 { 1415 struct spdk_nvmf_subsystem_poll_group *sgroup; 1416 struct nvmf_qpair_disconnect_many_ctx *ctx; 1417 uint32_t i; 1418 1419 ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx)); 1420 if (!ctx) { 1421 SPDK_ERRLOG("Unable to allocate memory for context to remove poll subsystem\n"); 1422 if (cb_fn) { 1423 cb_fn(cb_arg, -1); 1424 } 1425 return; 1426 } 1427 1428 ctx->group = group; 1429 ctx->subsystem = subsystem; 1430 ctx->cpl_fn = cb_fn; 1431 ctx->cpl_ctx = cb_arg; 1432 1433 sgroup = &group->sgroups[subsystem->id]; 1434 sgroup->state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 1435 1436 for (i = 0; i < sgroup->num_ns; i++) { 1437 sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 1438 } 1439 1440 nvmf_poll_group_remove_subsystem_msg(ctx); 1441 } 1442 1443 void 1444 nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group, 1445 struct spdk_nvmf_subsystem *subsystem, 1446 uint32_t nsid, 1447 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1448 { 1449 struct spdk_nvmf_subsystem_poll_group *sgroup; 1450 struct spdk_nvmf_subsystem_pg_ns_info *ns_info = NULL; 1451 int rc = 0; 1452 1453 if (subsystem->id >= group->num_sgroups) { 1454 rc = -1; 1455 goto fini; 1456 } 1457 1458 sgroup = &group->sgroups[subsystem->id]; 1459 if (sgroup == NULL) { 1460 rc = -1; 1461 goto fini; 1462 } 1463 1464 if (sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSED) { 1465 goto fini; 1466 } 1467 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSING; 1468 1469 /* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */ 1470 if (nsid - 1 < sgroup->num_ns) { 1471 ns_info = &sgroup->ns_info[nsid - 1]; 1472 ns_info->state = SPDK_NVMF_SUBSYSTEM_PAUSING; 1473 } 1474 1475 if (sgroup->mgmt_io_outstanding > 0) { 1476 assert(sgroup->cb_fn == NULL); 1477 sgroup->cb_fn = cb_fn; 1478 assert(sgroup->cb_arg == NULL); 1479 sgroup->cb_arg = cb_arg; 1480 return; 1481 } 1482 1483 if (ns_info != NULL && ns_info->io_outstanding > 0) { 1484 assert(sgroup->cb_fn == NULL); 1485 sgroup->cb_fn = cb_fn; 1486 assert(sgroup->cb_arg == NULL); 1487 sgroup->cb_arg = cb_arg; 1488 return; 1489 } 1490 1491 assert(sgroup->mgmt_io_outstanding == 0); 1492 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED; 1493 fini: 1494 if (cb_fn) { 1495 cb_fn(cb_arg, rc); 1496 } 1497 } 1498 1499 void 1500 nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group, 1501 struct spdk_nvmf_subsystem *subsystem, 1502 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1503 { 1504 struct spdk_nvmf_request *req, *tmp; 1505 struct spdk_nvmf_subsystem_poll_group *sgroup; 1506 int rc = 0; 1507 uint32_t i; 1508 1509 if (subsystem->id >= group->num_sgroups) { 1510 rc = -1; 1511 goto fini; 1512 } 1513 1514 sgroup = &group->sgroups[subsystem->id]; 1515 1516 if (sgroup->state == SPDK_NVMF_SUBSYSTEM_ACTIVE) { 1517 goto fini; 1518 } 1519 1520 rc = poll_group_update_subsystem(group, subsystem); 1521 if (rc) { 1522 goto fini; 1523 } 1524 1525 for (i = 0; i < sgroup->num_ns; i++) { 1526 sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1527 } 1528 1529 sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1530 1531 /* Release all queued requests */ 1532 TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) { 1533 TAILQ_REMOVE(&sgroup->queued, req, link); 1534 spdk_nvmf_request_exec(req); 1535 } 1536 fini: 1537 if (cb_fn) { 1538 cb_fn(cb_arg, rc); 1539 } 1540 } 1541 1542 1543 struct spdk_nvmf_poll_group * 1544 spdk_nvmf_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair) 1545 { 1546 struct spdk_nvmf_transport_poll_group *tgroup; 1547 1548 tgroup = nvmf_transport_get_optimal_poll_group(qpair->transport, qpair); 1549 1550 if (tgroup == NULL) { 1551 return NULL; 1552 } 1553 1554 return tgroup->group; 1555 } 1556 1557 int 1558 spdk_nvmf_poll_group_get_stat(struct spdk_nvmf_tgt *tgt, 1559 struct spdk_nvmf_poll_group_stat *stat) 1560 { 1561 struct spdk_io_channel *ch; 1562 struct spdk_nvmf_poll_group *group; 1563 1564 SPDK_ERRLOG("spdk_nvmf_poll_group_get_stat is deprecated and will be removed\n"); 1565 1566 if (tgt == NULL || stat == NULL) { 1567 return -EINVAL; 1568 } 1569 1570 ch = spdk_get_io_channel(tgt); 1571 group = spdk_io_channel_get_ctx(ch); 1572 *stat = group->stat; 1573 spdk_put_io_channel(ch); 1574 return 0; 1575 } 1576 1577 void 1578 spdk_nvmf_poll_group_dump_stat(struct spdk_nvmf_poll_group *group, struct spdk_json_write_ctx *w) 1579 { 1580 struct spdk_nvmf_transport_poll_group *tgroup; 1581 1582 spdk_json_write_object_begin(w); 1583 1584 spdk_json_write_named_string(w, "name", spdk_thread_get_name(spdk_get_thread())); 1585 spdk_json_write_named_uint32(w, "admin_qpairs", group->stat.admin_qpairs); 1586 spdk_json_write_named_uint32(w, "io_qpairs", group->stat.io_qpairs); 1587 spdk_json_write_named_uint64(w, "pending_bdev_io", group->stat.pending_bdev_io); 1588 1589 spdk_json_write_named_array_begin(w, "transports"); 1590 1591 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 1592 spdk_json_write_object_begin(w); 1593 /* 1594 * The trtype field intentionally contains a transport name as this is more informative. 1595 * The field has not been renamed for backward compatibility. 1596 */ 1597 spdk_json_write_named_string(w, "trtype", spdk_nvmf_get_transport_name(tgroup->transport)); 1598 1599 if (tgroup->transport->ops->poll_group_dump_stat) { 1600 tgroup->transport->ops->poll_group_dump_stat(tgroup, w); 1601 } 1602 1603 spdk_json_write_object_end(w); 1604 } 1605 1606 spdk_json_write_array_end(w); 1607 spdk_json_write_object_end(w); 1608 } 1609