1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2018-2019, 2021 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/bdev.h" 37 #include "spdk/bit_array.h" 38 #include "spdk/thread.h" 39 #include "spdk/nvmf.h" 40 #include "spdk/endian.h" 41 #include "spdk/string.h" 42 #include "spdk/log.h" 43 #include "spdk_internal/usdt.h" 44 45 #include "nvmf_internal.h" 46 #include "transport.h" 47 48 SPDK_LOG_REGISTER_COMPONENT(nvmf) 49 50 #define SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS 1024 51 #define SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US 10000 52 53 static TAILQ_HEAD(, spdk_nvmf_tgt) g_nvmf_tgts = TAILQ_HEAD_INITIALIZER(g_nvmf_tgts); 54 55 typedef void (*nvmf_qpair_disconnect_cpl)(void *ctx, int status); 56 static void nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf); 57 58 /* supplied to a single call to nvmf_qpair_disconnect */ 59 struct nvmf_qpair_disconnect_ctx { 60 struct spdk_nvmf_qpair *qpair; 61 struct spdk_nvmf_ctrlr *ctrlr; 62 nvmf_qpair_disconnect_cb cb_fn; 63 struct spdk_thread *thread; 64 void *ctx; 65 uint16_t qid; 66 }; 67 68 /* 69 * There are several times when we need to iterate through the list of all qpairs and selectively delete them. 70 * In order to do this sequentially without overlap, we must provide a context to recover the next qpair from 71 * to enable calling nvmf_qpair_disconnect on the next desired qpair. 72 */ 73 struct nvmf_qpair_disconnect_many_ctx { 74 struct spdk_nvmf_subsystem *subsystem; 75 struct spdk_nvmf_poll_group *group; 76 spdk_nvmf_poll_group_mod_done cpl_fn; 77 void *cpl_ctx; 78 uint32_t count; 79 }; 80 81 static void 82 nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair, 83 enum spdk_nvmf_qpair_state state) 84 { 85 assert(qpair != NULL); 86 assert(qpair->group->thread == spdk_get_thread()); 87 88 qpair->state = state; 89 } 90 91 static int 92 nvmf_poll_group_poll(void *ctx) 93 { 94 struct spdk_nvmf_poll_group *group = ctx; 95 int rc; 96 int count = 0; 97 struct spdk_nvmf_transport_poll_group *tgroup; 98 99 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 100 rc = nvmf_transport_poll_group_poll(tgroup); 101 if (rc < 0) { 102 return SPDK_POLLER_BUSY; 103 } 104 count += rc; 105 } 106 107 return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; 108 } 109 110 static int 111 nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf) 112 { 113 struct spdk_nvmf_tgt *tgt = io_device; 114 struct spdk_nvmf_poll_group *group = ctx_buf; 115 struct spdk_nvmf_transport *transport; 116 struct spdk_thread *thread = spdk_get_thread(); 117 uint32_t sid; 118 119 SPDK_DTRACE_PROBE1(nvmf_create_poll_group, spdk_thread_get_id(thread)); 120 121 TAILQ_INIT(&group->tgroups); 122 TAILQ_INIT(&group->qpairs); 123 124 TAILQ_FOREACH(transport, &tgt->transports, link) { 125 nvmf_poll_group_add_transport(group, transport); 126 } 127 128 group->num_sgroups = tgt->max_subsystems; 129 group->sgroups = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem_poll_group)); 130 if (!group->sgroups) { 131 return -ENOMEM; 132 } 133 134 for (sid = 0; sid < tgt->max_subsystems; sid++) { 135 struct spdk_nvmf_subsystem *subsystem; 136 137 subsystem = tgt->subsystems[sid]; 138 if (!subsystem) { 139 continue; 140 } 141 142 if (nvmf_poll_group_add_subsystem(group, subsystem, NULL, NULL) != 0) { 143 nvmf_tgt_destroy_poll_group(io_device, ctx_buf); 144 return -1; 145 } 146 } 147 148 pthread_mutex_lock(&tgt->mutex); 149 TAILQ_INSERT_TAIL(&tgt->poll_groups, group, link); 150 pthread_mutex_unlock(&tgt->mutex); 151 152 group->poller = SPDK_POLLER_REGISTER(nvmf_poll_group_poll, group, 0); 153 group->thread = thread; 154 155 return 0; 156 } 157 158 static void 159 nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf) 160 { 161 struct spdk_nvmf_tgt *tgt = io_device; 162 struct spdk_nvmf_poll_group *group = ctx_buf; 163 struct spdk_nvmf_transport_poll_group *tgroup, *tmp; 164 struct spdk_nvmf_subsystem_poll_group *sgroup; 165 uint32_t sid, nsid; 166 167 SPDK_DTRACE_PROBE1(nvmf_destroy_poll_group, spdk_thread_get_id(group->thread)); 168 169 pthread_mutex_lock(&tgt->mutex); 170 TAILQ_REMOVE(&tgt->poll_groups, group, link); 171 pthread_mutex_unlock(&tgt->mutex); 172 173 TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) { 174 TAILQ_REMOVE(&group->tgroups, tgroup, link); 175 nvmf_transport_poll_group_destroy(tgroup); 176 } 177 178 for (sid = 0; sid < group->num_sgroups; sid++) { 179 sgroup = &group->sgroups[sid]; 180 181 for (nsid = 0; nsid < sgroup->num_ns; nsid++) { 182 if (sgroup->ns_info[nsid].channel) { 183 spdk_put_io_channel(sgroup->ns_info[nsid].channel); 184 sgroup->ns_info[nsid].channel = NULL; 185 } 186 } 187 188 free(sgroup->ns_info); 189 } 190 191 free(group->sgroups); 192 193 spdk_poller_unregister(&group->poller); 194 195 if (group->destroy_cb_fn) { 196 group->destroy_cb_fn(group->destroy_cb_arg, 0); 197 } 198 } 199 200 static void 201 _nvmf_tgt_disconnect_next_qpair(void *ctx) 202 { 203 struct spdk_nvmf_qpair *qpair; 204 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 205 struct spdk_nvmf_poll_group *group = qpair_ctx->group; 206 struct spdk_io_channel *ch; 207 int rc = 0; 208 209 qpair = TAILQ_FIRST(&group->qpairs); 210 211 if (qpair) { 212 rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_tgt_disconnect_next_qpair, ctx); 213 } 214 215 if (!qpair || rc != 0) { 216 /* When the refcount from the channels reaches 0, nvmf_tgt_destroy_poll_group will be called. */ 217 ch = spdk_io_channel_from_ctx(group); 218 spdk_put_io_channel(ch); 219 free(qpair_ctx); 220 } 221 } 222 223 static void 224 nvmf_tgt_destroy_poll_group_qpairs(struct spdk_nvmf_poll_group *group) 225 { 226 struct nvmf_qpair_disconnect_many_ctx *ctx; 227 228 SPDK_DTRACE_PROBE1(nvmf_destroy_poll_group_qpairs, spdk_thread_get_id(group->thread)); 229 230 ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx)); 231 if (!ctx) { 232 SPDK_ERRLOG("Failed to allocate memory for destroy poll group ctx\n"); 233 return; 234 } 235 236 ctx->group = group; 237 _nvmf_tgt_disconnect_next_qpair(ctx); 238 } 239 240 static int 241 nvmf_tgt_accept(void *ctx) 242 { 243 struct spdk_nvmf_tgt *tgt = ctx; 244 struct spdk_nvmf_transport *transport, *tmp; 245 int count = 0; 246 247 TAILQ_FOREACH_SAFE(transport, &tgt->transports, link, tmp) { 248 count += nvmf_transport_accept(transport); 249 } 250 251 return count; 252 } 253 254 struct spdk_nvmf_tgt * 255 spdk_nvmf_tgt_create(struct spdk_nvmf_target_opts *opts) 256 { 257 struct spdk_nvmf_tgt *tgt, *tmp_tgt; 258 uint32_t acceptor_poll_rate; 259 260 if (strnlen(opts->name, NVMF_TGT_NAME_MAX_LENGTH) == NVMF_TGT_NAME_MAX_LENGTH) { 261 SPDK_ERRLOG("Provided target name exceeds the max length of %u.\n", NVMF_TGT_NAME_MAX_LENGTH); 262 return NULL; 263 } 264 265 TAILQ_FOREACH(tmp_tgt, &g_nvmf_tgts, link) { 266 if (!strncmp(opts->name, tmp_tgt->name, NVMF_TGT_NAME_MAX_LENGTH)) { 267 SPDK_ERRLOG("Provided target name must be unique.\n"); 268 return NULL; 269 } 270 } 271 272 tgt = calloc(1, sizeof(*tgt)); 273 if (!tgt) { 274 return NULL; 275 } 276 277 snprintf(tgt->name, NVMF_TGT_NAME_MAX_LENGTH, "%s", opts->name); 278 279 if (!opts || !opts->max_subsystems) { 280 tgt->max_subsystems = SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS; 281 } else { 282 tgt->max_subsystems = opts->max_subsystems; 283 } 284 285 if (!opts || !opts->acceptor_poll_rate) { 286 acceptor_poll_rate = SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US; 287 } else { 288 acceptor_poll_rate = opts->acceptor_poll_rate; 289 } 290 291 if (!opts) { 292 tgt->crdt[0] = 0; 293 tgt->crdt[1] = 0; 294 tgt->crdt[2] = 0; 295 } else { 296 tgt->crdt[0] = opts->crdt[0]; 297 tgt->crdt[1] = opts->crdt[1]; 298 tgt->crdt[2] = opts->crdt[2]; 299 } 300 301 tgt->discovery_genctr = 0; 302 TAILQ_INIT(&tgt->transports); 303 TAILQ_INIT(&tgt->poll_groups); 304 305 tgt->subsystems = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem *)); 306 if (!tgt->subsystems) { 307 free(tgt); 308 return NULL; 309 } 310 311 pthread_mutex_init(&tgt->mutex, NULL); 312 313 tgt->accept_poller = SPDK_POLLER_REGISTER(nvmf_tgt_accept, tgt, acceptor_poll_rate); 314 if (!tgt->accept_poller) { 315 pthread_mutex_destroy(&tgt->mutex); 316 free(tgt->subsystems); 317 free(tgt); 318 return NULL; 319 } 320 321 spdk_io_device_register(tgt, 322 nvmf_tgt_create_poll_group, 323 nvmf_tgt_destroy_poll_group, 324 sizeof(struct spdk_nvmf_poll_group), 325 tgt->name); 326 327 TAILQ_INSERT_HEAD(&g_nvmf_tgts, tgt, link); 328 329 return tgt; 330 } 331 332 static void 333 _nvmf_tgt_destroy_next_transport(void *ctx) 334 { 335 struct spdk_nvmf_tgt *tgt = ctx; 336 struct spdk_nvmf_transport *transport; 337 338 if (!TAILQ_EMPTY(&tgt->transports)) { 339 transport = TAILQ_FIRST(&tgt->transports); 340 TAILQ_REMOVE(&tgt->transports, transport, link); 341 spdk_nvmf_transport_destroy(transport, _nvmf_tgt_destroy_next_transport, tgt); 342 } else { 343 spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn = tgt->destroy_cb_fn; 344 void *destroy_cb_arg = tgt->destroy_cb_arg; 345 346 pthread_mutex_destroy(&tgt->mutex); 347 free(tgt); 348 349 if (destroy_cb_fn) { 350 destroy_cb_fn(destroy_cb_arg, 0); 351 } 352 } 353 } 354 355 static void 356 nvmf_tgt_destroy_cb(void *io_device) 357 { 358 struct spdk_nvmf_tgt *tgt = io_device; 359 uint32_t i; 360 361 if (tgt->subsystems) { 362 for (i = 0; i < tgt->max_subsystems; i++) { 363 if (tgt->subsystems[i]) { 364 nvmf_subsystem_remove_all_listeners(tgt->subsystems[i], true); 365 spdk_nvmf_subsystem_destroy(tgt->subsystems[i]); 366 } 367 } 368 free(tgt->subsystems); 369 } 370 371 _nvmf_tgt_destroy_next_transport(tgt); 372 } 373 374 void 375 spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt, 376 spdk_nvmf_tgt_destroy_done_fn cb_fn, 377 void *cb_arg) 378 { 379 tgt->destroy_cb_fn = cb_fn; 380 tgt->destroy_cb_arg = cb_arg; 381 382 spdk_poller_unregister(&tgt->accept_poller); 383 384 TAILQ_REMOVE(&g_nvmf_tgts, tgt, link); 385 386 spdk_io_device_unregister(tgt, nvmf_tgt_destroy_cb); 387 } 388 389 const char * 390 spdk_nvmf_tgt_get_name(struct spdk_nvmf_tgt *tgt) 391 { 392 return tgt->name; 393 } 394 395 struct spdk_nvmf_tgt * 396 spdk_nvmf_get_tgt(const char *name) 397 { 398 struct spdk_nvmf_tgt *tgt; 399 uint32_t num_targets = 0; 400 401 TAILQ_FOREACH(tgt, &g_nvmf_tgts, link) { 402 if (name) { 403 if (!strncmp(tgt->name, name, NVMF_TGT_NAME_MAX_LENGTH)) { 404 return tgt; 405 } 406 } 407 num_targets++; 408 } 409 410 /* 411 * special case. If there is only one target and 412 * no name was specified, return the only available 413 * target. If there is more than one target, name must 414 * be specified. 415 */ 416 if (!name && num_targets == 1) { 417 return TAILQ_FIRST(&g_nvmf_tgts); 418 } 419 420 return NULL; 421 } 422 423 struct spdk_nvmf_tgt * 424 spdk_nvmf_get_first_tgt(void) 425 { 426 return TAILQ_FIRST(&g_nvmf_tgts); 427 } 428 429 struct spdk_nvmf_tgt * 430 spdk_nvmf_get_next_tgt(struct spdk_nvmf_tgt *prev) 431 { 432 return TAILQ_NEXT(prev, link); 433 } 434 435 static void 436 nvmf_write_subsystem_config_json(struct spdk_json_write_ctx *w, 437 struct spdk_nvmf_subsystem *subsystem) 438 { 439 struct spdk_nvmf_host *host; 440 struct spdk_nvmf_subsystem_listener *listener; 441 const struct spdk_nvme_transport_id *trid; 442 struct spdk_nvmf_ns *ns; 443 struct spdk_nvmf_ns_opts ns_opts; 444 uint32_t max_namespaces; 445 char uuid_str[SPDK_UUID_STRING_LEN]; 446 const char *adrfam; 447 448 if (spdk_nvmf_subsystem_get_type(subsystem) != SPDK_NVMF_SUBTYPE_NVME) { 449 return; 450 } 451 452 /* { */ 453 spdk_json_write_object_begin(w); 454 spdk_json_write_named_string(w, "method", "nvmf_create_subsystem"); 455 456 /* "params" : { */ 457 spdk_json_write_named_object_begin(w, "params"); 458 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 459 spdk_json_write_named_bool(w, "allow_any_host", spdk_nvmf_subsystem_get_allow_any_host(subsystem)); 460 spdk_json_write_named_string(w, "serial_number", spdk_nvmf_subsystem_get_sn(subsystem)); 461 spdk_json_write_named_string(w, "model_number", spdk_nvmf_subsystem_get_mn(subsystem)); 462 463 max_namespaces = spdk_nvmf_subsystem_get_max_namespaces(subsystem); 464 if (max_namespaces != 0) { 465 spdk_json_write_named_uint32(w, "max_namespaces", max_namespaces); 466 } 467 468 spdk_json_write_named_uint32(w, "min_cntlid", spdk_nvmf_subsystem_get_min_cntlid(subsystem)); 469 spdk_json_write_named_uint32(w, "max_cntlid", spdk_nvmf_subsystem_get_max_cntlid(subsystem)); 470 471 /* } "params" */ 472 spdk_json_write_object_end(w); 473 474 /* } */ 475 spdk_json_write_object_end(w); 476 477 for (listener = spdk_nvmf_subsystem_get_first_listener(subsystem); listener != NULL; 478 listener = spdk_nvmf_subsystem_get_next_listener(subsystem, listener)) { 479 trid = spdk_nvmf_subsystem_listener_get_trid(listener); 480 481 adrfam = spdk_nvme_transport_id_adrfam_str(trid->adrfam); 482 483 spdk_json_write_object_begin(w); 484 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_listener"); 485 486 /* "params" : { */ 487 spdk_json_write_named_object_begin(w, "params"); 488 489 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 490 491 /* "listen_address" : { */ 492 spdk_json_write_named_object_begin(w, "listen_address"); 493 494 spdk_json_write_named_string(w, "trtype", trid->trstring); 495 if (adrfam) { 496 spdk_json_write_named_string(w, "adrfam", adrfam); 497 } 498 499 spdk_json_write_named_string(w, "traddr", trid->traddr); 500 spdk_json_write_named_string(w, "trsvcid", trid->trsvcid); 501 /* } "listen_address" */ 502 spdk_json_write_object_end(w); 503 504 /* } "params" */ 505 spdk_json_write_object_end(w); 506 507 /* } */ 508 spdk_json_write_object_end(w); 509 } 510 511 for (host = spdk_nvmf_subsystem_get_first_host(subsystem); host != NULL; 512 host = spdk_nvmf_subsystem_get_next_host(subsystem, host)) { 513 514 spdk_json_write_object_begin(w); 515 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_host"); 516 517 /* "params" : { */ 518 spdk_json_write_named_object_begin(w, "params"); 519 520 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 521 spdk_json_write_named_string(w, "host", spdk_nvmf_host_get_nqn(host)); 522 523 /* } "params" */ 524 spdk_json_write_object_end(w); 525 526 /* } */ 527 spdk_json_write_object_end(w); 528 } 529 530 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 531 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 532 spdk_nvmf_ns_get_opts(ns, &ns_opts, sizeof(ns_opts)); 533 534 spdk_json_write_object_begin(w); 535 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_ns"); 536 537 /* "params" : { */ 538 spdk_json_write_named_object_begin(w, "params"); 539 540 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 541 542 /* "namespace" : { */ 543 spdk_json_write_named_object_begin(w, "namespace"); 544 545 spdk_json_write_named_uint32(w, "nsid", spdk_nvmf_ns_get_id(ns)); 546 spdk_json_write_named_string(w, "bdev_name", spdk_bdev_get_name(spdk_nvmf_ns_get_bdev(ns))); 547 548 if (!spdk_mem_all_zero(ns_opts.nguid, sizeof(ns_opts.nguid))) { 549 SPDK_STATIC_ASSERT(sizeof(ns_opts.nguid) == sizeof(uint64_t) * 2, "size mismatch"); 550 spdk_json_write_named_string_fmt(w, "nguid", "%016"PRIX64"%016"PRIX64, from_be64(&ns_opts.nguid[0]), 551 from_be64(&ns_opts.nguid[8])); 552 } 553 554 if (!spdk_mem_all_zero(ns_opts.eui64, sizeof(ns_opts.eui64))) { 555 SPDK_STATIC_ASSERT(sizeof(ns_opts.eui64) == sizeof(uint64_t), "size mismatch"); 556 spdk_json_write_named_string_fmt(w, "eui64", "%016"PRIX64, from_be64(&ns_opts.eui64)); 557 } 558 559 if (!spdk_mem_all_zero(&ns_opts.uuid, sizeof(ns_opts.uuid))) { 560 spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &ns_opts.uuid); 561 spdk_json_write_named_string(w, "uuid", uuid_str); 562 } 563 564 /* "namespace" */ 565 spdk_json_write_object_end(w); 566 567 /* } "params" */ 568 spdk_json_write_object_end(w); 569 570 /* } */ 571 spdk_json_write_object_end(w); 572 } 573 } 574 575 void 576 spdk_nvmf_tgt_write_config_json(struct spdk_json_write_ctx *w, struct spdk_nvmf_tgt *tgt) 577 { 578 struct spdk_nvmf_subsystem *subsystem; 579 struct spdk_nvmf_transport *transport; 580 581 spdk_json_write_object_begin(w); 582 spdk_json_write_named_string(w, "method", "nvmf_set_max_subsystems"); 583 584 spdk_json_write_named_object_begin(w, "params"); 585 spdk_json_write_named_uint32(w, "max_subsystems", tgt->max_subsystems); 586 spdk_json_write_object_end(w); 587 588 spdk_json_write_object_end(w); 589 590 spdk_json_write_object_begin(w); 591 spdk_json_write_named_string(w, "method", "nvmf_set_crdt"); 592 spdk_json_write_named_object_begin(w, "params"); 593 spdk_json_write_named_uint32(w, "crdt1", tgt->crdt[0]); 594 spdk_json_write_named_uint32(w, "crdt2", tgt->crdt[1]); 595 spdk_json_write_named_uint32(w, "crdt3", tgt->crdt[2]); 596 spdk_json_write_object_end(w); 597 spdk_json_write_object_end(w); 598 599 /* write transports */ 600 TAILQ_FOREACH(transport, &tgt->transports, link) { 601 spdk_json_write_object_begin(w); 602 spdk_json_write_named_string(w, "method", "nvmf_create_transport"); 603 604 spdk_json_write_named_object_begin(w, "params"); 605 spdk_json_write_named_string(w, "trtype", transport->ops->name); 606 spdk_json_write_named_uint32(w, "max_queue_depth", transport->opts.max_queue_depth); 607 spdk_json_write_named_uint32(w, "max_io_qpairs_per_ctrlr", 608 transport->opts.max_qpairs_per_ctrlr - 1); 609 spdk_json_write_named_uint32(w, "in_capsule_data_size", transport->opts.in_capsule_data_size); 610 spdk_json_write_named_uint32(w, "max_io_size", transport->opts.max_io_size); 611 spdk_json_write_named_uint32(w, "io_unit_size", transport->opts.io_unit_size); 612 spdk_json_write_named_uint32(w, "max_aq_depth", transport->opts.max_aq_depth); 613 spdk_json_write_named_uint32(w, "num_shared_buffers", transport->opts.num_shared_buffers); 614 spdk_json_write_named_uint32(w, "buf_cache_size", transport->opts.buf_cache_size); 615 spdk_json_write_named_bool(w, "dif_insert_or_strip", transport->opts.dif_insert_or_strip); 616 if (transport->ops->dump_opts) { 617 transport->ops->dump_opts(transport, w); 618 } 619 spdk_json_write_named_uint32(w, "abort_timeout_sec", transport->opts.abort_timeout_sec); 620 spdk_json_write_object_end(w); 621 622 spdk_json_write_object_end(w); 623 } 624 625 subsystem = spdk_nvmf_subsystem_get_first(tgt); 626 while (subsystem) { 627 nvmf_write_subsystem_config_json(w, subsystem); 628 subsystem = spdk_nvmf_subsystem_get_next(subsystem); 629 } 630 } 631 632 static void 633 nvmf_listen_opts_copy(struct spdk_nvmf_listen_opts *opts, 634 const struct spdk_nvmf_listen_opts *opts_src, size_t opts_size) 635 { 636 assert(opts); 637 assert(opts_src); 638 639 opts->opts_size = opts_size; 640 641 #define SET_FIELD(field) \ 642 if (offsetof(struct spdk_nvmf_listen_opts, field) + sizeof(opts->field) <= opts_size) { \ 643 opts->field = opts_src->field; \ 644 } \ 645 646 SET_FIELD(transport_specific); 647 #undef SET_FIELD 648 649 /* Do not remove this statement, you should always update this statement when you adding a new field, 650 * and do not forget to add the SET_FIELD statement for your added field. */ 651 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_listen_opts) == 16, "Incorrect size"); 652 } 653 654 void 655 spdk_nvmf_listen_opts_init(struct spdk_nvmf_listen_opts *opts, size_t opts_size) 656 { 657 struct spdk_nvmf_listen_opts opts_local = {}; 658 659 /* local version of opts should have defaults set here */ 660 661 nvmf_listen_opts_copy(opts, &opts_local, opts_size); 662 } 663 664 int 665 spdk_nvmf_tgt_listen_ext(struct spdk_nvmf_tgt *tgt, const struct spdk_nvme_transport_id *trid, 666 struct spdk_nvmf_listen_opts *opts) 667 { 668 struct spdk_nvmf_transport *transport; 669 int rc; 670 struct spdk_nvmf_listen_opts opts_local = {}; 671 672 if (!opts) { 673 SPDK_ERRLOG("opts should not be NULL\n"); 674 return -EINVAL; 675 } 676 677 if (!opts->opts_size) { 678 SPDK_ERRLOG("The opts_size in opts structure should not be zero\n"); 679 return -EINVAL; 680 } 681 682 transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring); 683 if (!transport) { 684 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n", 685 trid->trstring); 686 return -EINVAL; 687 } 688 689 nvmf_listen_opts_copy(&opts_local, opts, opts->opts_size); 690 rc = spdk_nvmf_transport_listen(transport, trid, &opts_local); 691 if (rc < 0) { 692 SPDK_ERRLOG("Unable to listen on address '%s'\n", trid->traddr); 693 } 694 695 return rc; 696 } 697 698 int 699 spdk_nvmf_tgt_stop_listen(struct spdk_nvmf_tgt *tgt, 700 struct spdk_nvme_transport_id *trid) 701 { 702 struct spdk_nvmf_transport *transport; 703 int rc; 704 705 transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring); 706 if (!transport) { 707 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n", 708 trid->trstring); 709 return -EINVAL; 710 } 711 712 rc = spdk_nvmf_transport_stop_listen(transport, trid); 713 if (rc < 0) { 714 SPDK_ERRLOG("Failed to stop listening on address '%s'\n", trid->traddr); 715 return rc; 716 } 717 return 0; 718 } 719 720 struct spdk_nvmf_tgt_add_transport_ctx { 721 struct spdk_nvmf_tgt *tgt; 722 struct spdk_nvmf_transport *transport; 723 spdk_nvmf_tgt_add_transport_done_fn cb_fn; 724 void *cb_arg; 725 }; 726 727 static void 728 _nvmf_tgt_add_transport_done(struct spdk_io_channel_iter *i, int status) 729 { 730 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 731 732 ctx->cb_fn(ctx->cb_arg, status); 733 734 free(ctx); 735 } 736 737 static void 738 _nvmf_tgt_add_transport(struct spdk_io_channel_iter *i) 739 { 740 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 741 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 742 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); 743 int rc; 744 745 rc = nvmf_poll_group_add_transport(group, ctx->transport); 746 spdk_for_each_channel_continue(i, rc); 747 } 748 749 void spdk_nvmf_tgt_add_transport(struct spdk_nvmf_tgt *tgt, 750 struct spdk_nvmf_transport *transport, 751 spdk_nvmf_tgt_add_transport_done_fn cb_fn, 752 void *cb_arg) 753 { 754 struct spdk_nvmf_tgt_add_transport_ctx *ctx; 755 756 if (spdk_nvmf_tgt_get_transport(tgt, transport->ops->name)) { 757 cb_fn(cb_arg, -EEXIST); 758 return; /* transport already created */ 759 } 760 761 transport->tgt = tgt; 762 TAILQ_INSERT_TAIL(&tgt->transports, transport, link); 763 764 ctx = calloc(1, sizeof(*ctx)); 765 if (!ctx) { 766 cb_fn(cb_arg, -ENOMEM); 767 return; 768 } 769 770 ctx->tgt = tgt; 771 ctx->transport = transport; 772 ctx->cb_fn = cb_fn; 773 ctx->cb_arg = cb_arg; 774 775 spdk_for_each_channel(tgt, 776 _nvmf_tgt_add_transport, 777 ctx, 778 _nvmf_tgt_add_transport_done); 779 } 780 781 struct spdk_nvmf_subsystem * 782 spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn) 783 { 784 struct spdk_nvmf_subsystem *subsystem; 785 uint32_t sid; 786 787 if (!subnqn) { 788 return NULL; 789 } 790 791 /* Ensure that subnqn is null terminated */ 792 if (!memchr(subnqn, '\0', SPDK_NVMF_NQN_MAX_LEN + 1)) { 793 SPDK_ERRLOG("Connect SUBNQN is not null terminated\n"); 794 return NULL; 795 } 796 797 for (sid = 0; sid < tgt->max_subsystems; sid++) { 798 subsystem = tgt->subsystems[sid]; 799 if (subsystem == NULL) { 800 continue; 801 } 802 803 if (strcmp(subnqn, subsystem->subnqn) == 0) { 804 return subsystem; 805 } 806 } 807 808 return NULL; 809 } 810 811 struct spdk_nvmf_transport * 812 spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name) 813 { 814 struct spdk_nvmf_transport *transport; 815 816 TAILQ_FOREACH(transport, &tgt->transports, link) { 817 if (!strncasecmp(transport->ops->name, transport_name, SPDK_NVMF_TRSTRING_MAX_LEN)) { 818 return transport; 819 } 820 } 821 return NULL; 822 } 823 824 struct nvmf_new_qpair_ctx { 825 struct spdk_nvmf_qpair *qpair; 826 struct spdk_nvmf_poll_group *group; 827 }; 828 829 static void 830 _nvmf_poll_group_add(void *_ctx) 831 { 832 struct nvmf_new_qpair_ctx *ctx = _ctx; 833 struct spdk_nvmf_qpair *qpair = ctx->qpair; 834 struct spdk_nvmf_poll_group *group = ctx->group; 835 836 free(_ctx); 837 838 if (spdk_nvmf_poll_group_add(group, qpair) != 0) { 839 SPDK_ERRLOG("Unable to add the qpair to a poll group.\n"); 840 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 841 } 842 } 843 844 void 845 spdk_nvmf_tgt_new_qpair(struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair) 846 { 847 struct spdk_nvmf_poll_group *group; 848 struct nvmf_new_qpair_ctx *ctx; 849 850 group = spdk_nvmf_get_optimal_poll_group(qpair); 851 if (group == NULL) { 852 if (tgt->next_poll_group == NULL) { 853 tgt->next_poll_group = TAILQ_FIRST(&tgt->poll_groups); 854 if (tgt->next_poll_group == NULL) { 855 SPDK_ERRLOG("No poll groups exist.\n"); 856 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 857 return; 858 } 859 } 860 group = tgt->next_poll_group; 861 tgt->next_poll_group = TAILQ_NEXT(group, link); 862 } 863 864 ctx = calloc(1, sizeof(*ctx)); 865 if (!ctx) { 866 SPDK_ERRLOG("Unable to send message to poll group.\n"); 867 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 868 return; 869 } 870 871 ctx->qpair = qpair; 872 ctx->group = group; 873 874 spdk_thread_send_msg(group->thread, _nvmf_poll_group_add, ctx); 875 } 876 877 struct spdk_nvmf_poll_group * 878 spdk_nvmf_poll_group_create(struct spdk_nvmf_tgt *tgt) 879 { 880 struct spdk_io_channel *ch; 881 882 ch = spdk_get_io_channel(tgt); 883 if (!ch) { 884 SPDK_ERRLOG("Unable to get I/O channel for target\n"); 885 return NULL; 886 } 887 888 return spdk_io_channel_get_ctx(ch); 889 } 890 891 void 892 spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group, 893 spdk_nvmf_poll_group_destroy_done_fn cb_fn, 894 void *cb_arg) 895 { 896 assert(group->destroy_cb_fn == NULL); 897 group->destroy_cb_fn = cb_fn; 898 group->destroy_cb_arg = cb_arg; 899 900 /* This function will put the io_channel associated with this poll group */ 901 nvmf_tgt_destroy_poll_group_qpairs(group); 902 } 903 904 int 905 spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group, 906 struct spdk_nvmf_qpair *qpair) 907 { 908 int rc = -1; 909 struct spdk_nvmf_transport_poll_group *tgroup; 910 911 TAILQ_INIT(&qpair->outstanding); 912 qpair->group = group; 913 qpair->ctrlr = NULL; 914 qpair->disconnect_started = false; 915 916 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 917 if (tgroup->transport == qpair->transport) { 918 rc = nvmf_transport_poll_group_add(tgroup, qpair); 919 break; 920 } 921 } 922 923 /* We add the qpair to the group only it is succesfully added into the tgroup */ 924 if (rc == 0) { 925 SPDK_DTRACE_PROBE2(nvmf_poll_group_add_qpair, qpair, spdk_thread_get_id(group->thread)); 926 TAILQ_INSERT_TAIL(&group->qpairs, qpair, link); 927 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ACTIVE); 928 } 929 930 return rc; 931 } 932 933 static void 934 _nvmf_ctrlr_destruct(void *ctx) 935 { 936 struct spdk_nvmf_ctrlr *ctrlr = ctx; 937 938 nvmf_ctrlr_destruct(ctrlr); 939 } 940 941 static void 942 _nvmf_transport_qpair_fini_complete(void *cb_ctx) 943 { 944 struct nvmf_qpair_disconnect_ctx *qpair_ctx = cb_ctx; 945 946 if (qpair_ctx->cb_fn) { 947 spdk_thread_send_msg(qpair_ctx->thread, qpair_ctx->cb_fn, qpair_ctx->ctx); 948 } 949 free(qpair_ctx); 950 } 951 952 static void 953 _nvmf_transport_qpair_fini(void *ctx) 954 { 955 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 956 957 spdk_nvmf_poll_group_remove(qpair_ctx->qpair); 958 nvmf_transport_qpair_fini(qpair_ctx->qpair, _nvmf_transport_qpair_fini_complete, qpair_ctx); 959 } 960 961 static void 962 _nvmf_ctrlr_free_from_qpair(void *ctx) 963 { 964 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 965 struct spdk_nvmf_ctrlr *ctrlr = qpair_ctx->ctrlr; 966 uint32_t count; 967 968 spdk_bit_array_clear(ctrlr->qpair_mask, qpair_ctx->qid); 969 count = spdk_bit_array_count_set(ctrlr->qpair_mask); 970 if (count == 0) { 971 ctrlr->in_destruct = true; 972 spdk_thread_send_msg(ctrlr->subsys->thread, _nvmf_ctrlr_destruct, ctrlr); 973 } 974 975 spdk_thread_send_msg(qpair_ctx->thread, _nvmf_transport_qpair_fini, qpair_ctx); 976 } 977 978 void 979 spdk_nvmf_poll_group_remove(struct spdk_nvmf_qpair *qpair) 980 { 981 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 982 struct spdk_nvmf_transport_poll_group *tgroup; 983 struct spdk_nvmf_request *req, *tmp; 984 struct spdk_nvmf_subsystem_poll_group *sgroup; 985 int rc; 986 987 SPDK_DTRACE_PROBE2(nvmf_poll_group_remove_qpair, qpair, 988 spdk_thread_get_id(qpair->group->thread)); 989 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ERROR); 990 991 /* Find the tgroup and remove the qpair from the tgroup */ 992 TAILQ_FOREACH(tgroup, &qpair->group->tgroups, link) { 993 if (tgroup->transport == qpair->transport) { 994 rc = nvmf_transport_poll_group_remove(tgroup, qpair); 995 if (rc && (rc != ENOTSUP)) { 996 SPDK_ERRLOG("Cannot remove qpair=%p from transport group=%p\n", 997 qpair, tgroup); 998 } 999 break; 1000 } 1001 } 1002 1003 if (ctrlr) { 1004 sgroup = &qpair->group->sgroups[ctrlr->subsys->id]; 1005 TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) { 1006 if (req->qpair == qpair) { 1007 TAILQ_REMOVE(&sgroup->queued, req, link); 1008 if (nvmf_transport_req_free(req)) { 1009 SPDK_ERRLOG("Transport request free error!\n"); 1010 } 1011 } 1012 } 1013 } 1014 1015 TAILQ_REMOVE(&qpair->group->qpairs, qpair, link); 1016 qpair->group = NULL; 1017 } 1018 1019 static void 1020 _nvmf_qpair_destroy(void *ctx, int status) 1021 { 1022 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 1023 struct spdk_nvmf_qpair *qpair = qpair_ctx->qpair; 1024 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 1025 1026 assert(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING); 1027 qpair_ctx->qid = qpair->qid; 1028 1029 if (ctrlr) { 1030 if (0 == qpair->qid) { 1031 assert(qpair->group->stat.current_admin_qpairs > 0); 1032 qpair->group->stat.current_admin_qpairs--; 1033 } else { 1034 assert(qpair->group->stat.current_io_qpairs > 0); 1035 qpair->group->stat.current_io_qpairs--; 1036 } 1037 } 1038 1039 if (!ctrlr || !ctrlr->thread) { 1040 spdk_nvmf_poll_group_remove(qpair); 1041 nvmf_transport_qpair_fini(qpair, _nvmf_transport_qpair_fini_complete, qpair_ctx); 1042 return; 1043 } 1044 1045 qpair_ctx->ctrlr = ctrlr; 1046 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_free_from_qpair, qpair_ctx); 1047 } 1048 1049 static void 1050 _nvmf_qpair_disconnect_msg(void *ctx) 1051 { 1052 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 1053 1054 spdk_nvmf_qpair_disconnect(qpair_ctx->qpair, qpair_ctx->cb_fn, qpair_ctx->ctx); 1055 free(ctx); 1056 } 1057 1058 int 1059 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 1060 { 1061 struct spdk_nvmf_poll_group *group = qpair->group; 1062 struct nvmf_qpair_disconnect_ctx *qpair_ctx; 1063 1064 if (__atomic_test_and_set(&qpair->disconnect_started, __ATOMIC_RELAXED)) { 1065 if (cb_fn) { 1066 cb_fn(ctx); 1067 } 1068 return 0; 1069 } 1070 1071 /* If we get a qpair in the uninitialized state, we can just destroy it immediately */ 1072 if (qpair->state == SPDK_NVMF_QPAIR_UNINITIALIZED) { 1073 nvmf_transport_qpair_fini(qpair, NULL, NULL); 1074 if (cb_fn) { 1075 cb_fn(ctx); 1076 } 1077 return 0; 1078 } 1079 1080 assert(group != NULL); 1081 if (spdk_get_thread() != group->thread) { 1082 /* clear the atomic so we can set it on the next call on the proper thread. */ 1083 __atomic_clear(&qpair->disconnect_started, __ATOMIC_RELAXED); 1084 qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx)); 1085 if (!qpair_ctx) { 1086 SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n"); 1087 return -ENOMEM; 1088 } 1089 qpair_ctx->qpair = qpair; 1090 qpair_ctx->cb_fn = cb_fn; 1091 qpair_ctx->thread = group->thread; 1092 qpair_ctx->ctx = ctx; 1093 spdk_thread_send_msg(group->thread, _nvmf_qpair_disconnect_msg, qpair_ctx); 1094 return 0; 1095 } 1096 1097 SPDK_DTRACE_PROBE2(nvmf_qpair_disconnect, qpair, spdk_thread_get_id(group->thread)); 1098 assert(qpair->state == SPDK_NVMF_QPAIR_ACTIVE); 1099 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_DEACTIVATING); 1100 1101 qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx)); 1102 if (!qpair_ctx) { 1103 SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n"); 1104 return -ENOMEM; 1105 } 1106 1107 qpair_ctx->qpair = qpair; 1108 qpair_ctx->cb_fn = cb_fn; 1109 qpair_ctx->thread = group->thread; 1110 qpair_ctx->ctx = ctx; 1111 1112 /* Check for outstanding I/O */ 1113 if (!TAILQ_EMPTY(&qpair->outstanding)) { 1114 SPDK_DTRACE_PROBE2(nvmf_poll_group_drain_qpair, qpair, spdk_thread_get_id(group->thread)); 1115 qpair->state_cb = _nvmf_qpair_destroy; 1116 qpair->state_cb_arg = qpair_ctx; 1117 nvmf_qpair_free_aer(qpair); 1118 return 0; 1119 } 1120 1121 _nvmf_qpair_destroy(qpair_ctx, 0); 1122 1123 return 0; 1124 } 1125 1126 int 1127 spdk_nvmf_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair, 1128 struct spdk_nvme_transport_id *trid) 1129 { 1130 return nvmf_transport_qpair_get_peer_trid(qpair, trid); 1131 } 1132 1133 int 1134 spdk_nvmf_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair, 1135 struct spdk_nvme_transport_id *trid) 1136 { 1137 return nvmf_transport_qpair_get_local_trid(qpair, trid); 1138 } 1139 1140 int 1141 spdk_nvmf_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, 1142 struct spdk_nvme_transport_id *trid) 1143 { 1144 return nvmf_transport_qpair_get_listen_trid(qpair, trid); 1145 } 1146 1147 int 1148 nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group, 1149 struct spdk_nvmf_transport *transport) 1150 { 1151 struct spdk_nvmf_transport_poll_group *tgroup; 1152 1153 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 1154 if (tgroup->transport == transport) { 1155 /* Transport already in the poll group */ 1156 return 0; 1157 } 1158 } 1159 1160 tgroup = nvmf_transport_poll_group_create(transport); 1161 if (!tgroup) { 1162 SPDK_ERRLOG("Unable to create poll group for transport\n"); 1163 return -1; 1164 } 1165 1166 tgroup->group = group; 1167 TAILQ_INSERT_TAIL(&group->tgroups, tgroup, link); 1168 1169 return 0; 1170 } 1171 1172 static int 1173 poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, 1174 struct spdk_nvmf_subsystem *subsystem) 1175 { 1176 struct spdk_nvmf_subsystem_poll_group *sgroup; 1177 uint32_t new_num_ns, old_num_ns; 1178 uint32_t i, j; 1179 struct spdk_nvmf_ns *ns; 1180 struct spdk_nvmf_registrant *reg, *tmp; 1181 struct spdk_io_channel *ch; 1182 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 1183 struct spdk_nvmf_ctrlr *ctrlr; 1184 bool ns_changed; 1185 1186 /* Make sure our poll group has memory for this subsystem allocated */ 1187 if (subsystem->id >= group->num_sgroups) { 1188 return -ENOMEM; 1189 } 1190 1191 sgroup = &group->sgroups[subsystem->id]; 1192 1193 /* Make sure the array of namespace information is the correct size */ 1194 new_num_ns = subsystem->max_nsid; 1195 old_num_ns = sgroup->num_ns; 1196 1197 ns_changed = false; 1198 1199 if (old_num_ns == 0) { 1200 if (new_num_ns > 0) { 1201 /* First allocation */ 1202 sgroup->ns_info = calloc(new_num_ns, sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1203 if (!sgroup->ns_info) { 1204 return -ENOMEM; 1205 } 1206 } 1207 } else if (new_num_ns > old_num_ns) { 1208 void *buf; 1209 1210 /* Make the array larger */ 1211 buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1212 if (!buf) { 1213 return -ENOMEM; 1214 } 1215 1216 sgroup->ns_info = buf; 1217 1218 /* Null out the new namespace information slots */ 1219 for (i = old_num_ns; i < new_num_ns; i++) { 1220 memset(&sgroup->ns_info[i], 0, sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1221 } 1222 } else if (new_num_ns < old_num_ns) { 1223 void *buf; 1224 1225 /* Free the extra I/O channels */ 1226 for (i = new_num_ns; i < old_num_ns; i++) { 1227 ns_info = &sgroup->ns_info[i]; 1228 1229 if (ns_info->channel) { 1230 spdk_put_io_channel(ns_info->channel); 1231 ns_info->channel = NULL; 1232 } 1233 } 1234 1235 /* Make the array smaller */ 1236 if (new_num_ns > 0) { 1237 buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 1238 if (!buf) { 1239 return -ENOMEM; 1240 } 1241 sgroup->ns_info = buf; 1242 } else { 1243 free(sgroup->ns_info); 1244 sgroup->ns_info = NULL; 1245 } 1246 } 1247 1248 sgroup->num_ns = new_num_ns; 1249 1250 /* Detect bdevs that were added or removed */ 1251 for (i = 0; i < sgroup->num_ns; i++) { 1252 ns = subsystem->ns[i]; 1253 ns_info = &sgroup->ns_info[i]; 1254 ch = ns_info->channel; 1255 1256 if (ns == NULL && ch == NULL) { 1257 /* Both NULL. Leave empty */ 1258 } else if (ns == NULL && ch != NULL) { 1259 /* There was a channel here, but the namespace is gone. */ 1260 ns_changed = true; 1261 spdk_put_io_channel(ch); 1262 ns_info->channel = NULL; 1263 } else if (ns != NULL && ch == NULL) { 1264 /* A namespace appeared but there is no channel yet */ 1265 ns_changed = true; 1266 ch = spdk_bdev_get_io_channel(ns->desc); 1267 if (ch == NULL) { 1268 SPDK_ERRLOG("Could not allocate I/O channel.\n"); 1269 return -ENOMEM; 1270 } 1271 ns_info->channel = ch; 1272 } else if (spdk_uuid_compare(&ns_info->uuid, spdk_bdev_get_uuid(ns->bdev)) != 0) { 1273 /* A namespace was here before, but was replaced by a new one. */ 1274 ns_changed = true; 1275 spdk_put_io_channel(ns_info->channel); 1276 memset(ns_info, 0, sizeof(*ns_info)); 1277 1278 ch = spdk_bdev_get_io_channel(ns->desc); 1279 if (ch == NULL) { 1280 SPDK_ERRLOG("Could not allocate I/O channel.\n"); 1281 return -ENOMEM; 1282 } 1283 ns_info->channel = ch; 1284 } else if (ns_info->num_blocks != spdk_bdev_get_num_blocks(ns->bdev)) { 1285 /* Namespace is still there but size has changed */ 1286 SPDK_DEBUGLOG(nvmf, "Namespace resized: subsystem_id %u," 1287 " nsid %u, pg %p, old %" PRIu64 ", new %" PRIu64 "\n", 1288 subsystem->id, 1289 ns->nsid, 1290 group, 1291 ns_info->num_blocks, 1292 spdk_bdev_get_num_blocks(ns->bdev)); 1293 ns_changed = true; 1294 } 1295 1296 if (ns == NULL) { 1297 memset(ns_info, 0, sizeof(*ns_info)); 1298 } else { 1299 ns_info->uuid = *spdk_bdev_get_uuid(ns->bdev); 1300 ns_info->num_blocks = spdk_bdev_get_num_blocks(ns->bdev); 1301 ns_info->crkey = ns->crkey; 1302 ns_info->rtype = ns->rtype; 1303 if (ns->holder) { 1304 ns_info->holder_id = ns->holder->hostid; 1305 } 1306 1307 memset(&ns_info->reg_hostid, 0, SPDK_NVMF_MAX_NUM_REGISTRANTS * sizeof(struct spdk_uuid)); 1308 j = 0; 1309 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 1310 if (j >= SPDK_NVMF_MAX_NUM_REGISTRANTS) { 1311 SPDK_ERRLOG("Maximum %u registrants can support.\n", SPDK_NVMF_MAX_NUM_REGISTRANTS); 1312 return -EINVAL; 1313 } 1314 ns_info->reg_hostid[j++] = reg->hostid; 1315 } 1316 } 1317 } 1318 1319 if (ns_changed) { 1320 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1321 if (ctrlr->admin_qpair->group == group) { 1322 nvmf_ctrlr_async_event_ns_notice(ctrlr); 1323 nvmf_ctrlr_async_event_ana_change_notice(ctrlr); 1324 } 1325 } 1326 } 1327 1328 return 0; 1329 } 1330 1331 int 1332 nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, 1333 struct spdk_nvmf_subsystem *subsystem) 1334 { 1335 return poll_group_update_subsystem(group, subsystem); 1336 } 1337 1338 int 1339 nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group, 1340 struct spdk_nvmf_subsystem *subsystem, 1341 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1342 { 1343 int rc = 0; 1344 struct spdk_nvmf_subsystem_poll_group *sgroup = &group->sgroups[subsystem->id]; 1345 uint32_t i; 1346 1347 TAILQ_INIT(&sgroup->queued); 1348 1349 rc = poll_group_update_subsystem(group, subsystem); 1350 if (rc) { 1351 nvmf_poll_group_remove_subsystem(group, subsystem, NULL, NULL); 1352 goto fini; 1353 } 1354 1355 sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1356 1357 for (i = 0; i < sgroup->num_ns; i++) { 1358 sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1359 } 1360 1361 fini: 1362 if (cb_fn) { 1363 cb_fn(cb_arg, rc); 1364 } 1365 1366 return rc; 1367 } 1368 1369 static void 1370 _nvmf_poll_group_remove_subsystem_cb(void *ctx, int status) 1371 { 1372 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 1373 struct spdk_nvmf_subsystem *subsystem; 1374 struct spdk_nvmf_poll_group *group; 1375 struct spdk_nvmf_subsystem_poll_group *sgroup; 1376 spdk_nvmf_poll_group_mod_done cpl_fn = NULL; 1377 void *cpl_ctx = NULL; 1378 uint32_t nsid; 1379 1380 group = qpair_ctx->group; 1381 subsystem = qpair_ctx->subsystem; 1382 cpl_fn = qpair_ctx->cpl_fn; 1383 cpl_ctx = qpair_ctx->cpl_ctx; 1384 sgroup = &group->sgroups[subsystem->id]; 1385 1386 if (status) { 1387 goto fini; 1388 } 1389 1390 for (nsid = 0; nsid < sgroup->num_ns; nsid++) { 1391 if (sgroup->ns_info[nsid].channel) { 1392 spdk_put_io_channel(sgroup->ns_info[nsid].channel); 1393 sgroup->ns_info[nsid].channel = NULL; 1394 } 1395 } 1396 1397 sgroup->num_ns = 0; 1398 free(sgroup->ns_info); 1399 sgroup->ns_info = NULL; 1400 fini: 1401 free(qpair_ctx); 1402 if (cpl_fn) { 1403 cpl_fn(cpl_ctx, status); 1404 } 1405 } 1406 1407 static void nvmf_poll_group_remove_subsystem_msg(void *ctx); 1408 1409 static void 1410 remove_subsystem_qpair_cb(void *ctx) 1411 { 1412 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 1413 1414 assert(qpair_ctx->count > 0); 1415 qpair_ctx->count--; 1416 if (qpair_ctx->count == 0) { 1417 /* All of the asynchronous callbacks for this context have been 1418 * completed. Call nvmf_poll_group_remove_subsystem_msg() again 1419 * to check if all associated qpairs for this subsystem have 1420 * been removed from the poll group. 1421 */ 1422 nvmf_poll_group_remove_subsystem_msg(ctx); 1423 } 1424 } 1425 1426 static void 1427 nvmf_poll_group_remove_subsystem_msg(void *ctx) 1428 { 1429 struct spdk_nvmf_qpair *qpair, *qpair_tmp; 1430 struct spdk_nvmf_subsystem *subsystem; 1431 struct spdk_nvmf_poll_group *group; 1432 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 1433 bool qpairs_found = false; 1434 int rc = 0; 1435 1436 group = qpair_ctx->group; 1437 subsystem = qpair_ctx->subsystem; 1438 1439 /* Initialize count to 1. This acts like a ref count, to ensure that if spdk_nvmf_qpair_disconnect 1440 * immediately invokes the callback (i.e. the qpairs is already in process of being disconnected) 1441 * that we don't recursively call nvmf_poll_group_remove_subsystem_msg before we've iterated the 1442 * full list of qpairs. 1443 */ 1444 qpair_ctx->count = 1; 1445 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, qpair_tmp) { 1446 if ((qpair->ctrlr != NULL) && (qpair->ctrlr->subsys == subsystem)) { 1447 qpairs_found = true; 1448 qpair_ctx->count++; 1449 rc = spdk_nvmf_qpair_disconnect(qpair, remove_subsystem_qpair_cb, ctx); 1450 if (rc) { 1451 break; 1452 } 1453 } 1454 } 1455 qpair_ctx->count--; 1456 1457 if (!qpairs_found) { 1458 _nvmf_poll_group_remove_subsystem_cb(ctx, 0); 1459 return; 1460 } 1461 1462 if (qpair_ctx->count == 0 || rc) { 1463 /* If count == 0, it means there were some qpairs in the poll group but they 1464 * were already in process of being disconnected. So we send a message to this 1465 * same thread so that this function executes again later. We won't actually 1466 * invoke the remove_subsystem_cb until all of the qpairs are actually removed 1467 * from the poll group. 1468 */ 1469 spdk_thread_send_msg(spdk_get_thread(), nvmf_poll_group_remove_subsystem_msg, ctx); 1470 } 1471 } 1472 1473 void 1474 nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group, 1475 struct spdk_nvmf_subsystem *subsystem, 1476 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1477 { 1478 struct spdk_nvmf_subsystem_poll_group *sgroup; 1479 struct nvmf_qpair_disconnect_many_ctx *ctx; 1480 uint32_t i; 1481 1482 ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx)); 1483 if (!ctx) { 1484 SPDK_ERRLOG("Unable to allocate memory for context to remove poll subsystem\n"); 1485 if (cb_fn) { 1486 cb_fn(cb_arg, -1); 1487 } 1488 return; 1489 } 1490 1491 ctx->group = group; 1492 ctx->subsystem = subsystem; 1493 ctx->cpl_fn = cb_fn; 1494 ctx->cpl_ctx = cb_arg; 1495 1496 sgroup = &group->sgroups[subsystem->id]; 1497 sgroup->state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 1498 1499 for (i = 0; i < sgroup->num_ns; i++) { 1500 sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 1501 } 1502 1503 nvmf_poll_group_remove_subsystem_msg(ctx); 1504 } 1505 1506 void 1507 nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group, 1508 struct spdk_nvmf_subsystem *subsystem, 1509 uint32_t nsid, 1510 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1511 { 1512 struct spdk_nvmf_subsystem_poll_group *sgroup; 1513 struct spdk_nvmf_subsystem_pg_ns_info *ns_info = NULL; 1514 int rc = 0; 1515 1516 if (subsystem->id >= group->num_sgroups) { 1517 rc = -1; 1518 goto fini; 1519 } 1520 1521 sgroup = &group->sgroups[subsystem->id]; 1522 if (sgroup == NULL) { 1523 rc = -1; 1524 goto fini; 1525 } 1526 1527 if (sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSED) { 1528 goto fini; 1529 } 1530 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSING; 1531 1532 /* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */ 1533 if (nsid - 1 < sgroup->num_ns) { 1534 ns_info = &sgroup->ns_info[nsid - 1]; 1535 ns_info->state = SPDK_NVMF_SUBSYSTEM_PAUSING; 1536 } 1537 1538 if (sgroup->mgmt_io_outstanding > 0) { 1539 assert(sgroup->cb_fn == NULL); 1540 sgroup->cb_fn = cb_fn; 1541 assert(sgroup->cb_arg == NULL); 1542 sgroup->cb_arg = cb_arg; 1543 return; 1544 } 1545 1546 if (ns_info != NULL && ns_info->io_outstanding > 0) { 1547 assert(sgroup->cb_fn == NULL); 1548 sgroup->cb_fn = cb_fn; 1549 assert(sgroup->cb_arg == NULL); 1550 sgroup->cb_arg = cb_arg; 1551 return; 1552 } 1553 1554 assert(sgroup->mgmt_io_outstanding == 0); 1555 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED; 1556 fini: 1557 if (cb_fn) { 1558 cb_fn(cb_arg, rc); 1559 } 1560 } 1561 1562 void 1563 nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group, 1564 struct spdk_nvmf_subsystem *subsystem, 1565 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1566 { 1567 struct spdk_nvmf_request *req, *tmp; 1568 struct spdk_nvmf_subsystem_poll_group *sgroup; 1569 int rc = 0; 1570 uint32_t i; 1571 1572 if (subsystem->id >= group->num_sgroups) { 1573 rc = -1; 1574 goto fini; 1575 } 1576 1577 sgroup = &group->sgroups[subsystem->id]; 1578 1579 if (sgroup->state == SPDK_NVMF_SUBSYSTEM_ACTIVE) { 1580 goto fini; 1581 } 1582 1583 rc = poll_group_update_subsystem(group, subsystem); 1584 if (rc) { 1585 goto fini; 1586 } 1587 1588 for (i = 0; i < sgroup->num_ns; i++) { 1589 sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1590 } 1591 1592 sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1593 1594 /* Release all queued requests */ 1595 TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) { 1596 TAILQ_REMOVE(&sgroup->queued, req, link); 1597 spdk_nvmf_request_exec(req); 1598 } 1599 fini: 1600 if (cb_fn) { 1601 cb_fn(cb_arg, rc); 1602 } 1603 } 1604 1605 1606 struct spdk_nvmf_poll_group * 1607 spdk_nvmf_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair) 1608 { 1609 struct spdk_nvmf_transport_poll_group *tgroup; 1610 1611 tgroup = nvmf_transport_get_optimal_poll_group(qpair->transport, qpair); 1612 1613 if (tgroup == NULL) { 1614 return NULL; 1615 } 1616 1617 return tgroup->group; 1618 } 1619 1620 int 1621 spdk_nvmf_poll_group_get_stat(struct spdk_nvmf_tgt *tgt, 1622 struct spdk_nvmf_poll_group_stat *stat) 1623 { 1624 struct spdk_io_channel *ch; 1625 struct spdk_nvmf_poll_group *group; 1626 1627 SPDK_ERRLOG("spdk_nvmf_poll_group_get_stat is deprecated and will be removed\n"); 1628 1629 if (tgt == NULL || stat == NULL) { 1630 return -EINVAL; 1631 } 1632 1633 ch = spdk_get_io_channel(tgt); 1634 group = spdk_io_channel_get_ctx(ch); 1635 *stat = group->stat; 1636 spdk_put_io_channel(ch); 1637 return 0; 1638 } 1639 1640 void 1641 spdk_nvmf_poll_group_dump_stat(struct spdk_nvmf_poll_group *group, struct spdk_json_write_ctx *w) 1642 { 1643 struct spdk_nvmf_transport_poll_group *tgroup; 1644 1645 spdk_json_write_object_begin(w); 1646 1647 spdk_json_write_named_string(w, "name", spdk_thread_get_name(spdk_get_thread())); 1648 spdk_json_write_named_uint32(w, "admin_qpairs", group->stat.admin_qpairs); 1649 spdk_json_write_named_uint32(w, "io_qpairs", group->stat.io_qpairs); 1650 spdk_json_write_named_uint32(w, "current_admin_qpairs", group->stat.current_admin_qpairs); 1651 spdk_json_write_named_uint32(w, "current_io_qpairs", group->stat.current_io_qpairs); 1652 spdk_json_write_named_uint64(w, "pending_bdev_io", group->stat.pending_bdev_io); 1653 1654 spdk_json_write_named_array_begin(w, "transports"); 1655 1656 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 1657 spdk_json_write_object_begin(w); 1658 /* 1659 * The trtype field intentionally contains a transport name as this is more informative. 1660 * The field has not been renamed for backward compatibility. 1661 */ 1662 spdk_json_write_named_string(w, "trtype", spdk_nvmf_get_transport_name(tgroup->transport)); 1663 1664 if (tgroup->transport->ops->poll_group_dump_stat) { 1665 tgroup->transport->ops->poll_group_dump_stat(tgroup, w); 1666 } 1667 1668 spdk_json_write_object_end(w); 1669 } 1670 1671 spdk_json_write_array_end(w); 1672 spdk_json_write_object_end(w); 1673 } 1674