1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2018-2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/bdev.h" 37 #include "spdk/bit_array.h" 38 #include "spdk/conf.h" 39 #include "spdk/thread.h" 40 #include "spdk/nvmf.h" 41 #include "spdk/trace.h" 42 #include "spdk/endian.h" 43 #include "spdk/string.h" 44 45 #include "spdk_internal/log.h" 46 47 #include "nvmf_internal.h" 48 #include "transport.h" 49 50 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF) 51 52 #define SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS 1024 53 54 static TAILQ_HEAD(, spdk_nvmf_tgt) g_nvmf_tgts = TAILQ_HEAD_INITIALIZER(g_nvmf_tgts); 55 56 typedef void (*nvmf_qpair_disconnect_cpl)(void *ctx, int status); 57 static void spdk_nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf); 58 59 /* supplied to a single call to nvmf_qpair_disconnect */ 60 struct nvmf_qpair_disconnect_ctx { 61 struct spdk_nvmf_qpair *qpair; 62 struct spdk_nvmf_ctrlr *ctrlr; 63 nvmf_qpair_disconnect_cb cb_fn; 64 struct spdk_thread *thread; 65 void *ctx; 66 uint16_t qid; 67 }; 68 69 /* 70 * There are several times when we need to iterate through the list of all qpairs and selectively delete them. 71 * In order to do this sequentially without overlap, we must provide a context to recover the next qpair from 72 * to enable calling nvmf_qpair_disconnect on the next desired qpair. 73 */ 74 struct nvmf_qpair_disconnect_many_ctx { 75 struct spdk_nvmf_subsystem *subsystem; 76 struct spdk_nvmf_poll_group *group; 77 spdk_nvmf_poll_group_mod_done cpl_fn; 78 void *cpl_ctx; 79 }; 80 81 static void 82 spdk_nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair, 83 enum spdk_nvmf_qpair_state state) 84 { 85 assert(qpair != NULL); 86 assert(qpair->group->thread == spdk_get_thread()); 87 88 qpair->state = state; 89 } 90 91 static int 92 spdk_nvmf_poll_group_poll(void *ctx) 93 { 94 struct spdk_nvmf_poll_group *group = ctx; 95 int rc; 96 int count = 0; 97 struct spdk_nvmf_transport_poll_group *tgroup; 98 99 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 100 rc = spdk_nvmf_transport_poll_group_poll(tgroup); 101 if (rc < 0) { 102 return -1; 103 } 104 count += rc; 105 } 106 107 return count; 108 } 109 110 static int 111 spdk_nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf) 112 { 113 struct spdk_nvmf_tgt *tgt = io_device; 114 struct spdk_nvmf_poll_group *group = ctx_buf; 115 struct spdk_nvmf_transport *transport; 116 uint32_t sid; 117 118 TAILQ_INIT(&group->tgroups); 119 TAILQ_INIT(&group->qpairs); 120 121 TAILQ_FOREACH(transport, &tgt->transports, link) { 122 spdk_nvmf_poll_group_add_transport(group, transport); 123 } 124 125 group->num_sgroups = tgt->max_subsystems; 126 group->sgroups = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem_poll_group)); 127 if (!group->sgroups) { 128 return -ENOMEM; 129 } 130 131 for (sid = 0; sid < tgt->max_subsystems; sid++) { 132 struct spdk_nvmf_subsystem *subsystem; 133 134 subsystem = tgt->subsystems[sid]; 135 if (!subsystem) { 136 continue; 137 } 138 139 if (spdk_nvmf_poll_group_add_subsystem(group, subsystem, NULL, NULL) != 0) { 140 spdk_nvmf_tgt_destroy_poll_group(io_device, ctx_buf); 141 return -1; 142 } 143 } 144 145 group->poller = spdk_poller_register(spdk_nvmf_poll_group_poll, group, 0); 146 group->thread = spdk_get_thread(); 147 148 return 0; 149 } 150 151 static void 152 spdk_nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf) 153 { 154 struct spdk_nvmf_poll_group *group = ctx_buf; 155 struct spdk_nvmf_transport_poll_group *tgroup, *tmp; 156 struct spdk_nvmf_subsystem_poll_group *sgroup; 157 uint32_t sid, nsid; 158 159 TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) { 160 TAILQ_REMOVE(&group->tgroups, tgroup, link); 161 spdk_nvmf_transport_poll_group_destroy(tgroup); 162 } 163 164 for (sid = 0; sid < group->num_sgroups; sid++) { 165 sgroup = &group->sgroups[sid]; 166 167 for (nsid = 0; nsid < sgroup->num_ns; nsid++) { 168 if (sgroup->ns_info[nsid].channel) { 169 spdk_put_io_channel(sgroup->ns_info[nsid].channel); 170 sgroup->ns_info[nsid].channel = NULL; 171 } 172 } 173 174 free(sgroup->ns_info); 175 } 176 177 free(group->sgroups); 178 } 179 180 static void 181 _nvmf_tgt_disconnect_next_qpair(void *ctx) 182 { 183 struct spdk_nvmf_qpair *qpair; 184 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 185 struct spdk_nvmf_poll_group *group = qpair_ctx->group; 186 struct spdk_io_channel *ch; 187 int rc = 0; 188 189 qpair = TAILQ_FIRST(&group->qpairs); 190 191 if (qpair) { 192 rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_tgt_disconnect_next_qpair, ctx); 193 } 194 195 if (!qpair || rc != 0) { 196 /* When the refcount from the channels reaches 0, spdk_nvmf_tgt_destroy_poll_group will be called. */ 197 ch = spdk_io_channel_from_ctx(group); 198 spdk_put_io_channel(ch); 199 free(qpair_ctx); 200 } 201 } 202 203 static void 204 spdk_nvmf_tgt_destroy_poll_group_qpairs(struct spdk_nvmf_poll_group *group) 205 { 206 struct nvmf_qpair_disconnect_many_ctx *ctx; 207 208 ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx)); 209 210 if (!ctx) { 211 SPDK_ERRLOG("Failed to allocate memory for destroy poll group ctx\n"); 212 return; 213 } 214 215 spdk_poller_unregister(&group->poller); 216 217 ctx->group = group; 218 _nvmf_tgt_disconnect_next_qpair(ctx); 219 } 220 221 struct spdk_nvmf_tgt * 222 spdk_nvmf_tgt_create(struct spdk_nvmf_target_opts *opts) 223 { 224 struct spdk_nvmf_tgt *tgt, *tmp_tgt; 225 226 if (strnlen(opts->name, NVMF_TGT_NAME_MAX_LENGTH) == NVMF_TGT_NAME_MAX_LENGTH) { 227 SPDK_ERRLOG("Provided target name exceeds the max length of %u.\n", NVMF_TGT_NAME_MAX_LENGTH); 228 return NULL; 229 } 230 231 TAILQ_FOREACH(tmp_tgt, &g_nvmf_tgts, link) { 232 if (!strncmp(opts->name, tmp_tgt->name, strlen(tmp_tgt->name))) { 233 SPDK_ERRLOG("Provided target name must be unique.\n"); 234 return NULL; 235 } 236 } 237 238 tgt = calloc(1, sizeof(*tgt)); 239 if (!tgt) { 240 return NULL; 241 } 242 243 snprintf(tgt->name, NVMF_TGT_NAME_MAX_LENGTH, "%s", opts->name); 244 245 if (!opts || !opts->max_subsystems) { 246 tgt->max_subsystems = SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS; 247 } else { 248 tgt->max_subsystems = opts->max_subsystems; 249 } 250 251 tgt->discovery_genctr = 0; 252 tgt->discovery_log_page = NULL; 253 tgt->discovery_log_page_size = 0; 254 TAILQ_INIT(&tgt->transports); 255 256 tgt->subsystems = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem *)); 257 if (!tgt->subsystems) { 258 free(tgt); 259 return NULL; 260 } 261 262 TAILQ_INSERT_HEAD(&g_nvmf_tgts, tgt, link); 263 264 spdk_io_device_register(tgt, 265 spdk_nvmf_tgt_create_poll_group, 266 spdk_nvmf_tgt_destroy_poll_group, 267 sizeof(struct spdk_nvmf_poll_group), 268 tgt->name); 269 270 return tgt; 271 } 272 273 static void 274 spdk_nvmf_tgt_destroy_cb(void *io_device) 275 { 276 struct spdk_nvmf_tgt *tgt = io_device; 277 struct spdk_nvmf_transport *transport, *transport_tmp; 278 spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn; 279 void *destroy_cb_arg; 280 uint32_t i; 281 282 if (tgt->discovery_log_page) { 283 free(tgt->discovery_log_page); 284 } 285 286 if (tgt->subsystems) { 287 for (i = 0; i < tgt->max_subsystems; i++) { 288 if (tgt->subsystems[i]) { 289 spdk_nvmf_subsystem_destroy(tgt->subsystems[i]); 290 } 291 } 292 free(tgt->subsystems); 293 } 294 295 TAILQ_FOREACH_SAFE(transport, &tgt->transports, link, transport_tmp) { 296 TAILQ_REMOVE(&tgt->transports, transport, link); 297 spdk_nvmf_transport_destroy(transport); 298 } 299 300 destroy_cb_fn = tgt->destroy_cb_fn; 301 destroy_cb_arg = tgt->destroy_cb_arg; 302 303 free(tgt); 304 305 if (destroy_cb_fn) { 306 destroy_cb_fn(destroy_cb_arg, 0); 307 } 308 } 309 310 void 311 spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt, 312 spdk_nvmf_tgt_destroy_done_fn cb_fn, 313 void *cb_arg) 314 { 315 tgt->destroy_cb_fn = cb_fn; 316 tgt->destroy_cb_arg = cb_arg; 317 318 TAILQ_REMOVE(&g_nvmf_tgts, tgt, link); 319 320 spdk_io_device_unregister(tgt, spdk_nvmf_tgt_destroy_cb); 321 } 322 323 struct spdk_nvmf_tgt * 324 spdk_nvmf_get_tgt(const char *name) 325 { 326 struct spdk_nvmf_tgt *tgt; 327 uint32_t num_targets = 0; 328 329 TAILQ_FOREACH(tgt, &g_nvmf_tgts, link) { 330 if (name) { 331 if (!strncmp(tgt->name, name, strlen(tgt->name))) { 332 return tgt; 333 } 334 } 335 num_targets++; 336 } 337 338 /* 339 * special case. If there is only one target and 340 * no name was specified, return the only available 341 * target. If there is more than one target, name must 342 * be specified. 343 */ 344 if (!name && num_targets == 1) { 345 return TAILQ_FIRST(&g_nvmf_tgts); 346 } 347 348 return NULL; 349 } 350 351 static void 352 spdk_nvmf_write_subsystem_config_json(struct spdk_json_write_ctx *w, 353 struct spdk_nvmf_subsystem *subsystem) 354 { 355 struct spdk_nvmf_host *host; 356 struct spdk_nvmf_listener *listener; 357 const struct spdk_nvme_transport_id *trid; 358 struct spdk_nvmf_ns *ns; 359 struct spdk_nvmf_ns_opts ns_opts; 360 uint32_t max_namespaces; 361 char uuid_str[SPDK_UUID_STRING_LEN]; 362 const char *trtype; 363 const char *adrfam; 364 365 if (spdk_nvmf_subsystem_get_type(subsystem) != SPDK_NVMF_SUBTYPE_NVME) { 366 return; 367 } 368 369 /* { */ 370 spdk_json_write_object_begin(w); 371 spdk_json_write_named_string(w, "method", "nvmf_subsystem_create"); 372 373 /* "params" : { */ 374 spdk_json_write_named_object_begin(w, "params"); 375 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 376 spdk_json_write_named_bool(w, "allow_any_host", spdk_nvmf_subsystem_get_allow_any_host(subsystem)); 377 spdk_json_write_named_string(w, "serial_number", spdk_nvmf_subsystem_get_sn(subsystem)); 378 spdk_json_write_named_string(w, "model_number", spdk_nvmf_subsystem_get_mn(subsystem)); 379 380 max_namespaces = spdk_nvmf_subsystem_get_max_namespaces(subsystem); 381 if (max_namespaces != 0) { 382 spdk_json_write_named_uint32(w, "max_namespaces", max_namespaces); 383 } 384 385 /* } "params" */ 386 spdk_json_write_object_end(w); 387 388 /* } */ 389 spdk_json_write_object_end(w); 390 391 for (listener = spdk_nvmf_subsystem_get_first_listener(subsystem); listener != NULL; 392 listener = spdk_nvmf_subsystem_get_next_listener(subsystem, listener)) { 393 trid = spdk_nvmf_listener_get_trid(listener); 394 395 trtype = spdk_nvme_transport_id_trtype_str(trid->trtype); 396 adrfam = spdk_nvme_transport_id_adrfam_str(trid->adrfam); 397 398 spdk_json_write_object_begin(w); 399 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_listener"); 400 401 /* "params" : { */ 402 spdk_json_write_named_object_begin(w, "params"); 403 404 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 405 406 /* "listen_address" : { */ 407 spdk_json_write_named_object_begin(w, "listen_address"); 408 409 spdk_json_write_named_string(w, "trtype", trtype); 410 if (adrfam) { 411 spdk_json_write_named_string(w, "adrfam", adrfam); 412 } 413 414 spdk_json_write_named_string(w, "traddr", trid->traddr); 415 spdk_json_write_named_string(w, "trsvcid", trid->trsvcid); 416 /* } "listen_address" */ 417 spdk_json_write_object_end(w); 418 419 /* } "params" */ 420 spdk_json_write_object_end(w); 421 422 /* } */ 423 spdk_json_write_object_end(w); 424 } 425 426 for (host = spdk_nvmf_subsystem_get_first_host(subsystem); host != NULL; 427 host = spdk_nvmf_subsystem_get_next_host(subsystem, host)) { 428 429 spdk_json_write_object_begin(w); 430 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_host"); 431 432 /* "params" : { */ 433 spdk_json_write_named_object_begin(w, "params"); 434 435 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 436 spdk_json_write_named_string(w, "host", spdk_nvmf_host_get_nqn(host)); 437 438 /* } "params" */ 439 spdk_json_write_object_end(w); 440 441 /* } */ 442 spdk_json_write_object_end(w); 443 } 444 445 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; 446 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 447 spdk_nvmf_ns_get_opts(ns, &ns_opts, sizeof(ns_opts)); 448 449 spdk_json_write_object_begin(w); 450 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_ns"); 451 452 /* "params" : { */ 453 spdk_json_write_named_object_begin(w, "params"); 454 455 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); 456 457 /* "namespace" : { */ 458 spdk_json_write_named_object_begin(w, "namespace"); 459 460 spdk_json_write_named_uint32(w, "nsid", spdk_nvmf_ns_get_id(ns)); 461 spdk_json_write_named_string(w, "bdev_name", spdk_bdev_get_name(spdk_nvmf_ns_get_bdev(ns))); 462 463 if (!spdk_mem_all_zero(ns_opts.nguid, sizeof(ns_opts.nguid))) { 464 SPDK_STATIC_ASSERT(sizeof(ns_opts.nguid) == sizeof(uint64_t) * 2, "size mismatch"); 465 spdk_json_write_named_string_fmt(w, "nguid", "%016"PRIX64"%016"PRIX64, from_be64(&ns_opts.nguid[0]), 466 from_be64(&ns_opts.nguid[8])); 467 } 468 469 if (!spdk_mem_all_zero(ns_opts.eui64, sizeof(ns_opts.eui64))) { 470 SPDK_STATIC_ASSERT(sizeof(ns_opts.eui64) == sizeof(uint64_t), "size mismatch"); 471 spdk_json_write_named_string_fmt(w, "eui64", "%016"PRIX64, from_be64(&ns_opts.eui64)); 472 } 473 474 if (!spdk_mem_all_zero(&ns_opts.uuid, sizeof(ns_opts.uuid))) { 475 spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &ns_opts.uuid); 476 spdk_json_write_named_string(w, "uuid", uuid_str); 477 } 478 479 /* "namespace" */ 480 spdk_json_write_object_end(w); 481 482 /* } "params" */ 483 spdk_json_write_object_end(w); 484 485 /* } */ 486 spdk_json_write_object_end(w); 487 } 488 } 489 490 void 491 spdk_nvmf_tgt_write_config_json(struct spdk_json_write_ctx *w, struct spdk_nvmf_tgt *tgt) 492 { 493 struct spdk_nvmf_subsystem *subsystem; 494 struct spdk_nvmf_transport *transport; 495 496 spdk_json_write_object_begin(w); 497 spdk_json_write_named_string(w, "method", "set_nvmf_target_max_subsystems"); 498 499 spdk_json_write_named_object_begin(w, "params"); 500 spdk_json_write_named_uint32(w, "max_subsystems", tgt->max_subsystems); 501 spdk_json_write_object_end(w); 502 503 spdk_json_write_object_end(w); 504 505 /* write transports */ 506 TAILQ_FOREACH(transport, &tgt->transports, link) { 507 spdk_json_write_object_begin(w); 508 spdk_json_write_named_string(w, "method", "nvmf_create_transport"); 509 510 spdk_json_write_named_object_begin(w, "params"); 511 spdk_json_write_named_string(w, "trtype", spdk_nvme_transport_id_trtype_str(transport->ops->type)); 512 spdk_json_write_named_uint32(w, "max_queue_depth", transport->opts.max_queue_depth); 513 spdk_json_write_named_uint32(w, "max_qpairs_per_ctrlr", transport->opts.max_qpairs_per_ctrlr); 514 spdk_json_write_named_uint32(w, "in_capsule_data_size", transport->opts.in_capsule_data_size); 515 spdk_json_write_named_uint32(w, "max_io_size", transport->opts.max_io_size); 516 spdk_json_write_named_uint32(w, "io_unit_size", transport->opts.io_unit_size); 517 spdk_json_write_named_uint32(w, "max_aq_depth", transport->opts.max_aq_depth); 518 if (transport->ops->type == SPDK_NVME_TRANSPORT_RDMA) { 519 spdk_json_write_named_uint32(w, "max_srq_depth", transport->opts.max_srq_depth); 520 } 521 spdk_json_write_object_end(w); 522 523 spdk_json_write_object_end(w); 524 } 525 526 subsystem = spdk_nvmf_subsystem_get_first(tgt); 527 while (subsystem) { 528 spdk_nvmf_write_subsystem_config_json(w, subsystem); 529 subsystem = spdk_nvmf_subsystem_get_next(subsystem); 530 } 531 } 532 533 void 534 spdk_nvmf_tgt_listen(struct spdk_nvmf_tgt *tgt, 535 struct spdk_nvme_transport_id *trid, 536 spdk_nvmf_tgt_listen_done_fn cb_fn, 537 void *cb_arg) 538 { 539 struct spdk_nvmf_transport *transport; 540 const char *trtype; 541 int rc; 542 543 transport = spdk_nvmf_tgt_get_transport(tgt, trid->trtype); 544 if (!transport) { 545 trtype = spdk_nvme_transport_id_trtype_str(trid->trtype); 546 if (trtype != NULL) { 547 SPDK_ERRLOG("Unable to listen on transport %s. The transport must be created first.\n", trtype); 548 } else { 549 SPDK_ERRLOG("The specified trtype %d is unknown. Please make sure that it is properly registered.\n", 550 trid->trtype); 551 } 552 cb_fn(cb_arg, -EINVAL); 553 return; 554 } 555 556 rc = spdk_nvmf_transport_listen(transport, trid); 557 if (rc < 0) { 558 SPDK_ERRLOG("Unable to listen on address '%s'\n", trid->traddr); 559 cb_fn(cb_arg, rc); 560 return; 561 } 562 563 tgt->discovery_genctr++; 564 565 cb_fn(cb_arg, 0); 566 } 567 568 struct spdk_nvmf_tgt_add_transport_ctx { 569 struct spdk_nvmf_tgt *tgt; 570 struct spdk_nvmf_transport *transport; 571 spdk_nvmf_tgt_add_transport_done_fn cb_fn; 572 void *cb_arg; 573 }; 574 575 static void 576 _spdk_nvmf_tgt_add_transport_done(struct spdk_io_channel_iter *i, int status) 577 { 578 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 579 580 ctx->cb_fn(ctx->cb_arg, status); 581 582 free(ctx); 583 } 584 585 static void 586 _spdk_nvmf_tgt_add_transport(struct spdk_io_channel_iter *i) 587 { 588 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 589 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 590 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); 591 int rc; 592 593 rc = spdk_nvmf_poll_group_add_transport(group, ctx->transport); 594 spdk_for_each_channel_continue(i, rc); 595 } 596 597 void spdk_nvmf_tgt_add_transport(struct spdk_nvmf_tgt *tgt, 598 struct spdk_nvmf_transport *transport, 599 spdk_nvmf_tgt_add_transport_done_fn cb_fn, 600 void *cb_arg) 601 { 602 struct spdk_nvmf_tgt_add_transport_ctx *ctx; 603 604 if (spdk_nvmf_tgt_get_transport(tgt, transport->ops->type)) { 605 cb_fn(cb_arg, -EEXIST); 606 return; /* transport already created */ 607 } 608 609 transport->tgt = tgt; 610 TAILQ_INSERT_TAIL(&tgt->transports, transport, link); 611 612 ctx = calloc(1, sizeof(*ctx)); 613 if (!ctx) { 614 cb_fn(cb_arg, -ENOMEM); 615 return; 616 } 617 618 ctx->tgt = tgt; 619 ctx->transport = transport; 620 ctx->cb_fn = cb_fn; 621 ctx->cb_arg = cb_arg; 622 623 spdk_for_each_channel(tgt, 624 _spdk_nvmf_tgt_add_transport, 625 ctx, 626 _spdk_nvmf_tgt_add_transport_done); 627 } 628 629 struct spdk_nvmf_subsystem * 630 spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn) 631 { 632 struct spdk_nvmf_subsystem *subsystem; 633 uint32_t sid; 634 635 if (!subnqn) { 636 return NULL; 637 } 638 639 for (sid = 0; sid < tgt->max_subsystems; sid++) { 640 subsystem = tgt->subsystems[sid]; 641 if (subsystem == NULL) { 642 continue; 643 } 644 645 if (strcmp(subnqn, subsystem->subnqn) == 0) { 646 return subsystem; 647 } 648 } 649 650 return NULL; 651 } 652 653 struct spdk_nvmf_transport * 654 spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, enum spdk_nvme_transport_type type) 655 { 656 struct spdk_nvmf_transport *transport; 657 658 TAILQ_FOREACH(transport, &tgt->transports, link) { 659 if (transport->ops->type == type) { 660 return transport; 661 } 662 } 663 664 return NULL; 665 } 666 667 void 668 spdk_nvmf_tgt_accept(struct spdk_nvmf_tgt *tgt, new_qpair_fn cb_fn) 669 { 670 struct spdk_nvmf_transport *transport, *tmp; 671 672 TAILQ_FOREACH_SAFE(transport, &tgt->transports, link, tmp) { 673 spdk_nvmf_transport_accept(transport, cb_fn); 674 } 675 } 676 677 struct spdk_nvmf_poll_group * 678 spdk_nvmf_poll_group_create(struct spdk_nvmf_tgt *tgt) 679 { 680 struct spdk_io_channel *ch; 681 682 ch = spdk_get_io_channel(tgt); 683 if (!ch) { 684 SPDK_ERRLOG("Unable to get I/O channel for target\n"); 685 return NULL; 686 } 687 688 return spdk_io_channel_get_ctx(ch); 689 } 690 691 void 692 spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group) 693 { 694 /* This function will put the io_channel associated with this poll group */ 695 spdk_nvmf_tgt_destroy_poll_group_qpairs(group); 696 } 697 698 int 699 spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group, 700 struct spdk_nvmf_qpair *qpair) 701 { 702 int rc = -1; 703 struct spdk_nvmf_transport_poll_group *tgroup; 704 705 TAILQ_INIT(&qpair->outstanding); 706 qpair->group = group; 707 708 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 709 if (tgroup->transport == qpair->transport) { 710 rc = spdk_nvmf_transport_poll_group_add(tgroup, qpair); 711 break; 712 } 713 } 714 715 /* We add the qpair to the group only it is succesfully added into the tgroup */ 716 if (rc == 0) { 717 TAILQ_INSERT_TAIL(&group->qpairs, qpair, link); 718 spdk_nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ACTIVE); 719 } 720 721 return rc; 722 } 723 724 static 725 void _nvmf_ctrlr_destruct(void *ctx) 726 { 727 struct spdk_nvmf_ctrlr *ctrlr = ctx; 728 729 spdk_nvmf_ctrlr_destruct(ctrlr); 730 } 731 732 static void 733 _spdk_nvmf_ctrlr_free_from_qpair(void *ctx) 734 { 735 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 736 struct spdk_nvmf_ctrlr *ctrlr = qpair_ctx->ctrlr; 737 uint32_t count; 738 739 spdk_bit_array_clear(ctrlr->qpair_mask, qpair_ctx->qid); 740 count = spdk_bit_array_count_set(ctrlr->qpair_mask); 741 if (count == 0) { 742 spdk_bit_array_free(&ctrlr->qpair_mask); 743 744 spdk_thread_send_msg(ctrlr->subsys->thread, _nvmf_ctrlr_destruct, ctrlr); 745 } 746 747 if (qpair_ctx->cb_fn) { 748 spdk_thread_send_msg(qpair_ctx->thread, qpair_ctx->cb_fn, qpair_ctx->ctx); 749 } 750 free(qpair_ctx); 751 } 752 753 static void 754 _spdk_nvmf_qpair_destroy(void *ctx, int status) 755 { 756 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; 757 struct spdk_nvmf_qpair *qpair = qpair_ctx->qpair; 758 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; 759 struct spdk_nvmf_transport_poll_group *tgroup; 760 struct spdk_nvmf_request *req, *tmp; 761 struct spdk_nvmf_subsystem_poll_group *sgroup; 762 int rc; 763 764 assert(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING); 765 spdk_nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ERROR); 766 qpair_ctx->qid = qpair->qid; 767 768 /* Find the tgroup and remove the qpair from the tgroup */ 769 TAILQ_FOREACH(tgroup, &qpair->group->tgroups, link) { 770 if (tgroup->transport == qpair->transport) { 771 rc = spdk_nvmf_transport_poll_group_remove(tgroup, qpair); 772 if (rc && (rc != ENOTSUP)) { 773 SPDK_ERRLOG("Cannot remove qpair=%p from transport group=%p\n", 774 qpair, tgroup); 775 } 776 break; 777 } 778 } 779 780 if (ctrlr) { 781 sgroup = &qpair->group->sgroups[ctrlr->subsys->id]; 782 TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) { 783 if (req->qpair == qpair) { 784 TAILQ_REMOVE(&sgroup->queued, req, link); 785 if (spdk_nvmf_transport_req_free(req)) { 786 SPDK_ERRLOG("Transport request free error!\n"); 787 } 788 } 789 } 790 } 791 792 TAILQ_REMOVE(&qpair->group->qpairs, qpair, link); 793 794 spdk_nvmf_transport_qpair_fini(qpair); 795 796 if (!ctrlr || !ctrlr->thread) { 797 if (qpair_ctx->cb_fn) { 798 spdk_thread_send_msg(qpair_ctx->thread, qpair_ctx->cb_fn, qpair_ctx->ctx); 799 } 800 free(qpair_ctx); 801 return; 802 } 803 804 qpair_ctx->ctrlr = ctrlr; 805 spdk_thread_send_msg(ctrlr->thread, _spdk_nvmf_ctrlr_free_from_qpair, qpair_ctx); 806 807 } 808 809 int 810 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 811 { 812 struct nvmf_qpair_disconnect_ctx *qpair_ctx; 813 814 /* If we get a qpair in the uninitialized state, we can just destroy it immediately */ 815 if (qpair->state == SPDK_NVMF_QPAIR_UNINITIALIZED) { 816 spdk_nvmf_transport_qpair_fini(qpair); 817 if (cb_fn) { 818 cb_fn(ctx); 819 } 820 return 0; 821 } 822 823 /* The queue pair must be disconnected from the thread that owns it */ 824 assert(qpair->group->thread == spdk_get_thread()); 825 826 if (qpair->state != SPDK_NVMF_QPAIR_ACTIVE) { 827 /* This can occur if the connection is killed by the target, 828 * which results in a notification that the connection 829 * died. Send a message to defer the processing of this 830 * callback. This allows the stack to unwind in the case 831 * where a bunch of connections are disconnected in 832 * a loop. */ 833 if (cb_fn) { 834 spdk_thread_send_msg(qpair->group->thread, cb_fn, ctx); 835 } 836 return 0; 837 } 838 839 assert(qpair->state == SPDK_NVMF_QPAIR_ACTIVE); 840 spdk_nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_DEACTIVATING); 841 842 qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx)); 843 if (!qpair_ctx) { 844 SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n"); 845 return -ENOMEM; 846 } 847 848 qpair_ctx->qpair = qpair; 849 qpair_ctx->cb_fn = cb_fn; 850 qpair_ctx->thread = qpair->group->thread; 851 qpair_ctx->ctx = ctx; 852 853 /* Check for outstanding I/O */ 854 if (!TAILQ_EMPTY(&qpair->outstanding)) { 855 qpair->state_cb = _spdk_nvmf_qpair_destroy; 856 qpair->state_cb_arg = qpair_ctx; 857 spdk_nvmf_qpair_free_aer(qpair); 858 return 0; 859 } 860 861 _spdk_nvmf_qpair_destroy(qpair_ctx, 0); 862 863 return 0; 864 } 865 866 int 867 spdk_nvmf_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair, 868 struct spdk_nvme_transport_id *trid) 869 { 870 return spdk_nvmf_transport_qpair_get_peer_trid(qpair, trid); 871 } 872 873 int 874 spdk_nvmf_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair, 875 struct spdk_nvme_transport_id *trid) 876 { 877 return spdk_nvmf_transport_qpair_get_local_trid(qpair, trid); 878 } 879 880 int 881 spdk_nvmf_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, 882 struct spdk_nvme_transport_id *trid) 883 { 884 return spdk_nvmf_transport_qpair_get_listen_trid(qpair, trid); 885 } 886 887 int 888 spdk_nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group, 889 struct spdk_nvmf_transport *transport) 890 { 891 struct spdk_nvmf_transport_poll_group *tgroup; 892 893 TAILQ_FOREACH(tgroup, &group->tgroups, link) { 894 if (tgroup->transport == transport) { 895 /* Transport already in the poll group */ 896 return 0; 897 } 898 } 899 900 tgroup = spdk_nvmf_transport_poll_group_create(transport); 901 if (!tgroup) { 902 SPDK_ERRLOG("Unable to create poll group for transport\n"); 903 return -1; 904 } 905 906 tgroup->group = group; 907 TAILQ_INSERT_TAIL(&group->tgroups, tgroup, link); 908 909 return 0; 910 } 911 912 static int 913 poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, 914 struct spdk_nvmf_subsystem *subsystem) 915 { 916 struct spdk_nvmf_subsystem_poll_group *sgroup; 917 uint32_t new_num_ns, old_num_ns; 918 uint32_t i, j; 919 struct spdk_nvmf_ns *ns; 920 struct spdk_nvmf_registrant *reg, *tmp; 921 struct spdk_io_channel *ch; 922 struct spdk_nvmf_subsystem_pg_ns_info *ns_info; 923 struct spdk_nvmf_ctrlr *ctrlr; 924 bool ns_changed; 925 926 /* Make sure our poll group has memory for this subsystem allocated */ 927 if (subsystem->id >= group->num_sgroups) { 928 return -ENOMEM; 929 } 930 931 sgroup = &group->sgroups[subsystem->id]; 932 933 /* Make sure the array of namespace information is the correct size */ 934 new_num_ns = subsystem->max_nsid; 935 old_num_ns = sgroup->num_ns; 936 937 ns_changed = false; 938 939 if (old_num_ns == 0) { 940 if (new_num_ns > 0) { 941 /* First allocation */ 942 sgroup->ns_info = calloc(new_num_ns, sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 943 if (!sgroup->ns_info) { 944 return -ENOMEM; 945 } 946 } 947 } else if (new_num_ns > old_num_ns) { 948 void *buf; 949 950 /* Make the array larger */ 951 buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 952 if (!buf) { 953 return -ENOMEM; 954 } 955 956 sgroup->ns_info = buf; 957 958 /* Null out the new namespace information slots */ 959 for (i = old_num_ns; i < new_num_ns; i++) { 960 memset(&sgroup->ns_info[i], 0, sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 961 } 962 } else if (new_num_ns < old_num_ns) { 963 void *buf; 964 965 /* Free the extra I/O channels */ 966 for (i = new_num_ns; i < old_num_ns; i++) { 967 ns_info = &sgroup->ns_info[i]; 968 969 if (ns_info->channel) { 970 spdk_put_io_channel(ns_info->channel); 971 ns_info->channel = NULL; 972 } 973 } 974 975 /* Make the array smaller */ 976 if (new_num_ns > 0) { 977 buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info)); 978 if (!buf) { 979 return -ENOMEM; 980 } 981 sgroup->ns_info = buf; 982 } else { 983 free(sgroup->ns_info); 984 sgroup->ns_info = NULL; 985 } 986 } 987 988 sgroup->num_ns = new_num_ns; 989 990 /* Detect bdevs that were added or removed */ 991 for (i = 0; i < sgroup->num_ns; i++) { 992 ns = subsystem->ns[i]; 993 ns_info = &sgroup->ns_info[i]; 994 ch = ns_info->channel; 995 996 if (ns == NULL && ch == NULL) { 997 /* Both NULL. Leave empty */ 998 } else if (ns == NULL && ch != NULL) { 999 /* There was a channel here, but the namespace is gone. */ 1000 ns_changed = true; 1001 spdk_put_io_channel(ch); 1002 ns_info->channel = NULL; 1003 } else if (ns != NULL && ch == NULL) { 1004 /* A namespace appeared but there is no channel yet */ 1005 ns_changed = true; 1006 ch = spdk_bdev_get_io_channel(ns->desc); 1007 if (ch == NULL) { 1008 SPDK_ERRLOG("Could not allocate I/O channel.\n"); 1009 return -ENOMEM; 1010 } 1011 ns_info->channel = ch; 1012 } else if (spdk_uuid_compare(&ns_info->uuid, spdk_bdev_get_uuid(ns->bdev)) != 0) { 1013 /* A namespace was here before, but was replaced by a new one. */ 1014 ns_changed = true; 1015 spdk_put_io_channel(ns_info->channel); 1016 memset(ns_info, 0, sizeof(*ns_info)); 1017 1018 ch = spdk_bdev_get_io_channel(ns->desc); 1019 if (ch == NULL) { 1020 SPDK_ERRLOG("Could not allocate I/O channel.\n"); 1021 return -ENOMEM; 1022 } 1023 ns_info->channel = ch; 1024 } 1025 1026 if (ns == NULL) { 1027 memset(ns_info, 0, sizeof(*ns_info)); 1028 } else { 1029 ns_info->uuid = *spdk_bdev_get_uuid(ns->bdev); 1030 ns_info->crkey = ns->crkey; 1031 ns_info->rtype = ns->rtype; 1032 if (ns->holder) { 1033 ns_info->holder_id = ns->holder->hostid; 1034 } 1035 1036 memset(&ns_info->reg_hostid, 0, SPDK_NVMF_MAX_NUM_REGISTRANTS * sizeof(struct spdk_uuid)); 1037 j = 0; 1038 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 1039 if (j >= SPDK_NVMF_MAX_NUM_REGISTRANTS) { 1040 SPDK_ERRLOG("Maximum %u registrants can support.\n", SPDK_NVMF_MAX_NUM_REGISTRANTS); 1041 return -EINVAL; 1042 } 1043 ns_info->reg_hostid[j++] = reg->hostid; 1044 } 1045 } 1046 } 1047 1048 if (ns_changed) { 1049 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1050 if (ctrlr->admin_qpair->group == group) { 1051 spdk_nvmf_ctrlr_async_event_ns_notice(ctrlr); 1052 } 1053 } 1054 } 1055 1056 return 0; 1057 } 1058 1059 int 1060 spdk_nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, 1061 struct spdk_nvmf_subsystem *subsystem) 1062 { 1063 return poll_group_update_subsystem(group, subsystem); 1064 } 1065 1066 int 1067 spdk_nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group, 1068 struct spdk_nvmf_subsystem *subsystem, 1069 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1070 { 1071 int rc = 0; 1072 struct spdk_nvmf_subsystem_poll_group *sgroup = &group->sgroups[subsystem->id]; 1073 1074 TAILQ_INIT(&sgroup->queued); 1075 1076 rc = poll_group_update_subsystem(group, subsystem); 1077 if (rc) { 1078 spdk_nvmf_poll_group_remove_subsystem(group, subsystem, NULL, NULL); 1079 goto fini; 1080 } 1081 1082 sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1083 fini: 1084 if (cb_fn) { 1085 cb_fn(cb_arg, rc); 1086 } 1087 1088 return rc; 1089 } 1090 1091 static void 1092 _nvmf_poll_group_remove_subsystem_cb(void *ctx, int status) 1093 { 1094 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 1095 struct spdk_nvmf_subsystem *subsystem; 1096 struct spdk_nvmf_poll_group *group; 1097 struct spdk_nvmf_subsystem_poll_group *sgroup; 1098 spdk_nvmf_poll_group_mod_done cpl_fn = NULL; 1099 void *cpl_ctx = NULL; 1100 uint32_t nsid; 1101 1102 group = qpair_ctx->group; 1103 subsystem = qpair_ctx->subsystem; 1104 cpl_fn = qpair_ctx->cpl_fn; 1105 cpl_ctx = qpair_ctx->cpl_ctx; 1106 sgroup = &group->sgroups[subsystem->id]; 1107 1108 if (status) { 1109 goto fini; 1110 } 1111 1112 for (nsid = 0; nsid < sgroup->num_ns; nsid++) { 1113 if (sgroup->ns_info[nsid].channel) { 1114 spdk_put_io_channel(sgroup->ns_info[nsid].channel); 1115 sgroup->ns_info[nsid].channel = NULL; 1116 } 1117 } 1118 1119 sgroup->num_ns = 0; 1120 free(sgroup->ns_info); 1121 sgroup->ns_info = NULL; 1122 fini: 1123 free(qpair_ctx); 1124 if (cpl_fn) { 1125 cpl_fn(cpl_ctx, status); 1126 } 1127 } 1128 1129 static void 1130 _nvmf_subsystem_disconnect_next_qpair(void *ctx) 1131 { 1132 struct spdk_nvmf_qpair *qpair; 1133 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; 1134 struct spdk_nvmf_subsystem *subsystem; 1135 struct spdk_nvmf_poll_group *group; 1136 int rc = 0; 1137 1138 group = qpair_ctx->group; 1139 subsystem = qpair_ctx->subsystem; 1140 1141 TAILQ_FOREACH(qpair, &group->qpairs, link) { 1142 if ((qpair->ctrlr != NULL) && (qpair->ctrlr->subsys == subsystem)) { 1143 break; 1144 } 1145 } 1146 1147 if (qpair) { 1148 rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_subsystem_disconnect_next_qpair, qpair_ctx); 1149 } 1150 1151 if (!qpair || rc != 0) { 1152 _nvmf_poll_group_remove_subsystem_cb(ctx, rc); 1153 } 1154 return; 1155 } 1156 1157 void 1158 spdk_nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group, 1159 struct spdk_nvmf_subsystem *subsystem, 1160 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1161 { 1162 struct spdk_nvmf_qpair *qpair; 1163 struct spdk_nvmf_subsystem_poll_group *sgroup; 1164 struct nvmf_qpair_disconnect_many_ctx *ctx; 1165 int rc = 0; 1166 1167 ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx)); 1168 1169 if (!ctx) { 1170 SPDK_ERRLOG("Unable to allocate memory for context to remove poll subsystem\n"); 1171 goto fini; 1172 } 1173 1174 ctx->group = group; 1175 ctx->subsystem = subsystem; 1176 ctx->cpl_fn = cb_fn; 1177 ctx->cpl_ctx = cb_arg; 1178 1179 sgroup = &group->sgroups[subsystem->id]; 1180 sgroup->state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 1181 1182 TAILQ_FOREACH(qpair, &group->qpairs, link) { 1183 if ((qpair->ctrlr != NULL) && (qpair->ctrlr->subsys == subsystem)) { 1184 break; 1185 } 1186 } 1187 1188 if (qpair) { 1189 rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_subsystem_disconnect_next_qpair, ctx); 1190 } else { 1191 /* call the callback immediately. It will handle any channel iteration */ 1192 _nvmf_poll_group_remove_subsystem_cb(ctx, 0); 1193 } 1194 1195 if (rc != 0) { 1196 free(ctx); 1197 goto fini; 1198 } 1199 1200 return; 1201 fini: 1202 if (cb_fn) { 1203 cb_fn(cb_arg, rc); 1204 } 1205 } 1206 1207 void 1208 spdk_nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group, 1209 struct spdk_nvmf_subsystem *subsystem, 1210 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1211 { 1212 struct spdk_nvmf_subsystem_poll_group *sgroup; 1213 int rc = 0; 1214 1215 if (subsystem->id >= group->num_sgroups) { 1216 rc = -1; 1217 goto fini; 1218 } 1219 1220 sgroup = &group->sgroups[subsystem->id]; 1221 if (sgroup == NULL) { 1222 rc = -1; 1223 goto fini; 1224 } 1225 1226 assert(sgroup->state == SPDK_NVMF_SUBSYSTEM_ACTIVE); 1227 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSING; 1228 1229 if (sgroup->io_outstanding > 0) { 1230 sgroup->cb_fn = cb_fn; 1231 sgroup->cb_arg = cb_arg; 1232 return; 1233 } 1234 1235 assert(sgroup->io_outstanding == 0); 1236 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED; 1237 fini: 1238 if (cb_fn) { 1239 cb_fn(cb_arg, rc); 1240 } 1241 } 1242 1243 void 1244 spdk_nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group, 1245 struct spdk_nvmf_subsystem *subsystem, 1246 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 1247 { 1248 struct spdk_nvmf_request *req, *tmp; 1249 struct spdk_nvmf_subsystem_poll_group *sgroup; 1250 int rc = 0; 1251 1252 if (subsystem->id >= group->num_sgroups) { 1253 rc = -1; 1254 goto fini; 1255 } 1256 1257 sgroup = &group->sgroups[subsystem->id]; 1258 1259 assert(sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSED); 1260 1261 rc = poll_group_update_subsystem(group, subsystem); 1262 if (rc) { 1263 goto fini; 1264 } 1265 1266 sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1267 1268 /* Release all queued requests */ 1269 TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) { 1270 TAILQ_REMOVE(&sgroup->queued, req, link); 1271 spdk_nvmf_request_exec(req); 1272 } 1273 fini: 1274 if (cb_fn) { 1275 cb_fn(cb_arg, rc); 1276 } 1277 } 1278 1279 1280 struct spdk_nvmf_poll_group * 1281 spdk_nvmf_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair) 1282 { 1283 struct spdk_nvmf_transport_poll_group *tgroup; 1284 1285 tgroup = spdk_nvmf_transport_get_optimal_poll_group(qpair->transport, qpair); 1286 1287 if (tgroup == NULL) { 1288 return NULL; 1289 } 1290 1291 return tgroup->group; 1292 } 1293 1294 int 1295 spdk_nvmf_poll_group_get_stat(struct spdk_nvmf_tgt *tgt, 1296 struct spdk_nvmf_poll_group_stat *stat) 1297 { 1298 struct spdk_io_channel *ch; 1299 struct spdk_nvmf_poll_group *group; 1300 1301 if (tgt == NULL || stat == NULL) { 1302 return -EINVAL; 1303 } 1304 1305 ch = spdk_get_io_channel(tgt); 1306 group = spdk_io_channel_get_ctx(ch); 1307 *stat = group->stat; 1308 spdk_put_io_channel(ch); 1309 return 0; 1310 } 1311