1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 37 #include "nvmf_internal.h" 38 #include "transport.h" 39 40 #include "spdk/assert.h" 41 #include "spdk/likely.h" 42 #include "spdk/string.h" 43 #include "spdk/trace.h" 44 #include "spdk/nvmf_spec.h" 45 #include "spdk/uuid.h" 46 #include "spdk/json.h" 47 #include "spdk/file.h" 48 49 #define __SPDK_BDEV_MODULE_ONLY 50 #include "spdk/bdev_module.h" 51 #include "spdk/log.h" 52 #include "spdk_internal/utf.h" 53 #include "spdk_internal/usdt.h" 54 55 #define MODEL_NUMBER_DEFAULT "SPDK bdev Controller" 56 #define NVMF_SUBSYSTEM_DEFAULT_NAMESPACES 32 57 58 /* 59 * States for parsing valid domains in NQNs according to RFC 1034 60 */ 61 enum spdk_nvmf_nqn_domain_states { 62 /* First character of a domain must be a letter */ 63 SPDK_NVMF_DOMAIN_ACCEPT_LETTER = 0, 64 65 /* Subsequent characters can be any of letter, digit, or hyphen */ 66 SPDK_NVMF_DOMAIN_ACCEPT_LDH = 1, 67 68 /* A domain label must end with either a letter or digit */ 69 SPDK_NVMF_DOMAIN_ACCEPT_ANY = 2 70 }; 71 72 static int _nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem); 73 74 /* Returns true if is a valid ASCII string as defined by the NVMe spec */ 75 static bool 76 nvmf_valid_ascii_string(const void *buf, size_t size) 77 { 78 const uint8_t *str = buf; 79 size_t i; 80 81 for (i = 0; i < size; i++) { 82 if (str[i] < 0x20 || str[i] > 0x7E) { 83 return false; 84 } 85 } 86 87 return true; 88 } 89 90 static bool 91 nvmf_valid_nqn(const char *nqn) 92 { 93 size_t len; 94 struct spdk_uuid uuid_value; 95 uint32_t i; 96 int bytes_consumed; 97 uint32_t domain_label_length; 98 char *reverse_domain_end; 99 uint32_t reverse_domain_end_index; 100 enum spdk_nvmf_nqn_domain_states domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER; 101 102 /* Check for length requirements */ 103 len = strlen(nqn); 104 if (len > SPDK_NVMF_NQN_MAX_LEN) { 105 SPDK_ERRLOG("Invalid NQN \"%s\": length %zu > max %d\n", nqn, len, SPDK_NVMF_NQN_MAX_LEN); 106 return false; 107 } 108 109 /* The nqn must be at least as long as SPDK_NVMF_NQN_MIN_LEN to contain the necessary prefix. */ 110 if (len < SPDK_NVMF_NQN_MIN_LEN) { 111 SPDK_ERRLOG("Invalid NQN \"%s\": length %zu < min %d\n", nqn, len, SPDK_NVMF_NQN_MIN_LEN); 112 return false; 113 } 114 115 /* Check for discovery controller nqn */ 116 if (!strcmp(nqn, SPDK_NVMF_DISCOVERY_NQN)) { 117 return true; 118 } 119 120 /* Check for equality with the generic nqn structure of the form "nqn.2014-08.org.nvmexpress:uuid:11111111-2222-3333-4444-555555555555" */ 121 if (!strncmp(nqn, SPDK_NVMF_NQN_UUID_PRE, SPDK_NVMF_NQN_UUID_PRE_LEN)) { 122 if (len != SPDK_NVMF_NQN_UUID_PRE_LEN + SPDK_NVMF_UUID_STRING_LEN) { 123 SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not the correct length\n", nqn); 124 return false; 125 } 126 127 if (spdk_uuid_parse(&uuid_value, &nqn[SPDK_NVMF_NQN_UUID_PRE_LEN])) { 128 SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not formatted correctly\n", nqn); 129 return false; 130 } 131 return true; 132 } 133 134 /* If the nqn does not match the uuid structure, the next several checks validate the form "nqn.yyyy-mm.reverse.domain:user-string" */ 135 136 if (strncmp(nqn, "nqn.", 4) != 0) { 137 SPDK_ERRLOG("Invalid NQN \"%s\": NQN must begin with \"nqn.\".\n", nqn); 138 return false; 139 } 140 141 /* Check for yyyy-mm. */ 142 if (!(isdigit(nqn[4]) && isdigit(nqn[5]) && isdigit(nqn[6]) && isdigit(nqn[7]) && 143 nqn[8] == '-' && isdigit(nqn[9]) && isdigit(nqn[10]) && nqn[11] == '.')) { 144 SPDK_ERRLOG("Invalid date code in NQN \"%s\"\n", nqn); 145 return false; 146 } 147 148 reverse_domain_end = strchr(nqn, ':'); 149 if (reverse_domain_end != NULL && (reverse_domain_end_index = reverse_domain_end - nqn) < len - 1) { 150 } else { 151 SPDK_ERRLOG("Invalid NQN \"%s\". NQN must contain user specified name with a ':' as a prefix.\n", 152 nqn); 153 return false; 154 } 155 156 /* Check for valid reverse domain */ 157 domain_label_length = 0; 158 for (i = 12; i < reverse_domain_end_index; i++) { 159 if (domain_label_length > SPDK_DOMAIN_LABEL_MAX_LEN) { 160 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". At least one Label is too long.\n", nqn); 161 return false; 162 } 163 164 switch (domain_state) { 165 166 case SPDK_NVMF_DOMAIN_ACCEPT_LETTER: { 167 if (isalpha(nqn[i])) { 168 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 169 domain_label_length++; 170 break; 171 } else { 172 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must start with a letter.\n", nqn); 173 return false; 174 } 175 } 176 177 case SPDK_NVMF_DOMAIN_ACCEPT_LDH: { 178 if (isalpha(nqn[i]) || isdigit(nqn[i])) { 179 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 180 domain_label_length++; 181 break; 182 } else if (nqn[i] == '-') { 183 if (i == reverse_domain_end_index - 1) { 184 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 185 nqn); 186 return false; 187 } 188 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH; 189 domain_label_length++; 190 break; 191 } else if (nqn[i] == '.') { 192 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 193 nqn); 194 return false; 195 } else { 196 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n", 197 nqn); 198 return false; 199 } 200 } 201 202 case SPDK_NVMF_DOMAIN_ACCEPT_ANY: { 203 if (isalpha(nqn[i]) || isdigit(nqn[i])) { 204 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 205 domain_label_length++; 206 break; 207 } else if (nqn[i] == '-') { 208 if (i == reverse_domain_end_index - 1) { 209 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 210 nqn); 211 return false; 212 } 213 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH; 214 domain_label_length++; 215 break; 216 } else if (nqn[i] == '.') { 217 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER; 218 domain_label_length = 0; 219 break; 220 } else { 221 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n", 222 nqn); 223 return false; 224 } 225 } 226 } 227 } 228 229 i = reverse_domain_end_index + 1; 230 while (i < len) { 231 bytes_consumed = utf8_valid(&nqn[i], &nqn[len]); 232 if (bytes_consumed <= 0) { 233 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only valid utf-8.\n", nqn); 234 return false; 235 } 236 237 i += bytes_consumed; 238 } 239 return true; 240 } 241 242 static void subsystem_state_change_on_pg(struct spdk_io_channel_iter *i); 243 244 struct spdk_nvmf_subsystem * 245 spdk_nvmf_subsystem_create(struct spdk_nvmf_tgt *tgt, 246 const char *nqn, 247 enum spdk_nvmf_subtype type, 248 uint32_t num_ns) 249 { 250 struct spdk_nvmf_subsystem *subsystem; 251 uint32_t sid; 252 253 if (spdk_nvmf_tgt_find_subsystem(tgt, nqn)) { 254 SPDK_ERRLOG("Subsystem NQN '%s' already exists\n", nqn); 255 return NULL; 256 } 257 258 if (!nvmf_valid_nqn(nqn)) { 259 return NULL; 260 } 261 262 if (type == SPDK_NVMF_SUBTYPE_DISCOVERY) { 263 if (num_ns != 0) { 264 SPDK_ERRLOG("Discovery subsystem cannot have namespaces.\n"); 265 return NULL; 266 } 267 } else if (num_ns == 0) { 268 num_ns = NVMF_SUBSYSTEM_DEFAULT_NAMESPACES; 269 } 270 271 /* Find a free subsystem id (sid) */ 272 for (sid = 0; sid < tgt->max_subsystems; sid++) { 273 if (tgt->subsystems[sid] == NULL) { 274 break; 275 } 276 } 277 if (sid >= tgt->max_subsystems) { 278 return NULL; 279 } 280 281 subsystem = calloc(1, sizeof(struct spdk_nvmf_subsystem)); 282 if (subsystem == NULL) { 283 return NULL; 284 } 285 286 subsystem->thread = spdk_get_thread(); 287 subsystem->state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 288 subsystem->tgt = tgt; 289 subsystem->id = sid; 290 subsystem->subtype = type; 291 subsystem->max_nsid = num_ns; 292 subsystem->next_cntlid = 0; 293 subsystem->min_cntlid = NVMF_MIN_CNTLID; 294 subsystem->max_cntlid = NVMF_MAX_CNTLID; 295 snprintf(subsystem->subnqn, sizeof(subsystem->subnqn), "%s", nqn); 296 pthread_mutex_init(&subsystem->mutex, NULL); 297 TAILQ_INIT(&subsystem->listeners); 298 TAILQ_INIT(&subsystem->hosts); 299 TAILQ_INIT(&subsystem->ctrlrs); 300 301 if (num_ns != 0) { 302 subsystem->ns = calloc(num_ns, sizeof(struct spdk_nvmf_ns *)); 303 if (subsystem->ns == NULL) { 304 SPDK_ERRLOG("Namespace memory allocation failed\n"); 305 pthread_mutex_destroy(&subsystem->mutex); 306 free(subsystem); 307 return NULL; 308 } 309 subsystem->ana_group = calloc(num_ns, sizeof(uint32_t)); 310 if (subsystem->ana_group == NULL) { 311 SPDK_ERRLOG("ANA group memory allocation failed\n"); 312 pthread_mutex_destroy(&subsystem->mutex); 313 free(subsystem->ns); 314 free(subsystem); 315 return NULL; 316 } 317 } 318 319 memset(subsystem->sn, '0', sizeof(subsystem->sn) - 1); 320 subsystem->sn[sizeof(subsystem->sn) - 1] = '\0'; 321 322 snprintf(subsystem->mn, sizeof(subsystem->mn), "%s", 323 MODEL_NUMBER_DEFAULT); 324 325 tgt->subsystems[sid] = subsystem; 326 327 return subsystem; 328 } 329 330 /* Must hold subsystem->mutex while calling this function */ 331 static void 332 nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_host *host) 333 { 334 TAILQ_REMOVE(&subsystem->hosts, host, link); 335 free(host); 336 } 337 338 static void 339 _nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem, 340 struct spdk_nvmf_subsystem_listener *listener, 341 bool stop) 342 { 343 struct spdk_nvmf_transport *transport; 344 struct spdk_nvmf_ctrlr *ctrlr; 345 346 if (stop) { 347 transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, listener->trid->trstring); 348 if (transport != NULL) { 349 spdk_nvmf_transport_stop_listen(transport, listener->trid); 350 } 351 } 352 353 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 354 if (ctrlr->listener == listener) { 355 ctrlr->listener = NULL; 356 } 357 } 358 359 TAILQ_REMOVE(&subsystem->listeners, listener, link); 360 nvmf_update_discovery_log(listener->subsystem->tgt, NULL); 361 free(listener->ana_state); 362 free(listener); 363 } 364 365 static void 366 _nvmf_subsystem_destroy_msg(void *cb_arg) 367 { 368 struct spdk_nvmf_subsystem *subsystem = cb_arg; 369 370 _nvmf_subsystem_destroy(subsystem); 371 } 372 373 static int 374 _nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem) 375 { 376 struct spdk_nvmf_ns *ns; 377 nvmf_subsystem_destroy_cb async_destroy_cb = NULL; 378 void *async_destroy_cb_arg = NULL; 379 int rc; 380 381 if (!TAILQ_EMPTY(&subsystem->ctrlrs)) { 382 SPDK_DEBUGLOG(nvmf, "subsystem %p %s has active controllers\n", subsystem, subsystem->subnqn); 383 subsystem->async_destroy = true; 384 rc = spdk_thread_send_msg(subsystem->thread, _nvmf_subsystem_destroy_msg, subsystem); 385 if (rc) { 386 SPDK_ERRLOG("Failed to send thread msg, rc %d\n", rc); 387 assert(0); 388 return rc; 389 } 390 return -EINPROGRESS; 391 } 392 393 ns = spdk_nvmf_subsystem_get_first_ns(subsystem); 394 while (ns != NULL) { 395 struct spdk_nvmf_ns *next_ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns); 396 397 spdk_nvmf_subsystem_remove_ns(subsystem, ns->opts.nsid); 398 ns = next_ns; 399 } 400 401 free(subsystem->ns); 402 free(subsystem->ana_group); 403 404 subsystem->tgt->subsystems[subsystem->id] = NULL; 405 406 pthread_mutex_destroy(&subsystem->mutex); 407 408 if (subsystem->async_destroy) { 409 async_destroy_cb = subsystem->async_destroy_cb; 410 async_destroy_cb_arg = subsystem->async_destroy_cb_arg; 411 } 412 413 free(subsystem); 414 415 if (async_destroy_cb) { 416 async_destroy_cb(async_destroy_cb_arg); 417 } 418 419 return 0; 420 } 421 422 int 423 spdk_nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem, nvmf_subsystem_destroy_cb cpl_cb, 424 void *cpl_cb_arg) 425 { 426 struct spdk_nvmf_host *host, *host_tmp; 427 428 if (!subsystem) { 429 return -EINVAL; 430 } 431 432 assert(spdk_get_thread() == subsystem->thread); 433 434 if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) { 435 SPDK_ERRLOG("Subsystem can only be destroyed in inactive state\n"); 436 assert(0); 437 return -EAGAIN; 438 } 439 if (subsystem->destroying) { 440 SPDK_ERRLOG("Subsystem destruction is already started\n"); 441 assert(0); 442 return -EALREADY; 443 } 444 445 subsystem->destroying = true; 446 447 SPDK_DEBUGLOG(nvmf, "subsystem is %p %s\n", subsystem, subsystem->subnqn); 448 449 nvmf_subsystem_remove_all_listeners(subsystem, false); 450 451 pthread_mutex_lock(&subsystem->mutex); 452 453 TAILQ_FOREACH_SAFE(host, &subsystem->hosts, link, host_tmp) { 454 nvmf_subsystem_remove_host(subsystem, host); 455 } 456 457 pthread_mutex_unlock(&subsystem->mutex); 458 459 subsystem->async_destroy_cb = cpl_cb; 460 subsystem->async_destroy_cb_arg = cpl_cb_arg; 461 462 return _nvmf_subsystem_destroy(subsystem); 463 } 464 465 /* we have to use the typedef in the function declaration to appease astyle. */ 466 typedef enum spdk_nvmf_subsystem_state spdk_nvmf_subsystem_state_t; 467 468 static spdk_nvmf_subsystem_state_t 469 nvmf_subsystem_get_intermediate_state(enum spdk_nvmf_subsystem_state current_state, 470 enum spdk_nvmf_subsystem_state requested_state) 471 { 472 switch (requested_state) { 473 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 474 return SPDK_NVMF_SUBSYSTEM_DEACTIVATING; 475 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 476 if (current_state == SPDK_NVMF_SUBSYSTEM_PAUSED) { 477 return SPDK_NVMF_SUBSYSTEM_RESUMING; 478 } else { 479 return SPDK_NVMF_SUBSYSTEM_ACTIVATING; 480 } 481 case SPDK_NVMF_SUBSYSTEM_PAUSED: 482 return SPDK_NVMF_SUBSYSTEM_PAUSING; 483 default: 484 assert(false); 485 return SPDK_NVMF_SUBSYSTEM_NUM_STATES; 486 } 487 } 488 489 static int 490 nvmf_subsystem_set_state(struct spdk_nvmf_subsystem *subsystem, 491 enum spdk_nvmf_subsystem_state state) 492 { 493 enum spdk_nvmf_subsystem_state actual_old_state, expected_old_state; 494 bool exchanged; 495 496 switch (state) { 497 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 498 expected_old_state = SPDK_NVMF_SUBSYSTEM_DEACTIVATING; 499 break; 500 case SPDK_NVMF_SUBSYSTEM_ACTIVATING: 501 expected_old_state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 502 break; 503 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 504 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING; 505 break; 506 case SPDK_NVMF_SUBSYSTEM_PAUSING: 507 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 508 break; 509 case SPDK_NVMF_SUBSYSTEM_PAUSED: 510 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSING; 511 break; 512 case SPDK_NVMF_SUBSYSTEM_RESUMING: 513 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSED; 514 break; 515 case SPDK_NVMF_SUBSYSTEM_DEACTIVATING: 516 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 517 break; 518 default: 519 assert(false); 520 return -1; 521 } 522 523 actual_old_state = expected_old_state; 524 exchanged = __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false, 525 __ATOMIC_RELAXED, __ATOMIC_RELAXED); 526 if (spdk_unlikely(exchanged == false)) { 527 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING && 528 state == SPDK_NVMF_SUBSYSTEM_ACTIVE) { 529 expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING; 530 } 531 /* This is for the case when activating the subsystem fails. */ 532 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_ACTIVATING && 533 state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING) { 534 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING; 535 } 536 /* This is for the case when resuming the subsystem fails. */ 537 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING && 538 state == SPDK_NVMF_SUBSYSTEM_PAUSING) { 539 expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING; 540 } 541 /* This is for the case when stopping paused subsystem */ 542 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_PAUSED && 543 state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING) { 544 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSED; 545 } 546 actual_old_state = expected_old_state; 547 __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false, 548 __ATOMIC_RELAXED, __ATOMIC_RELAXED); 549 } 550 assert(actual_old_state == expected_old_state); 551 return actual_old_state - expected_old_state; 552 } 553 554 struct subsystem_state_change_ctx { 555 struct spdk_nvmf_subsystem *subsystem; 556 uint16_t nsid; 557 558 enum spdk_nvmf_subsystem_state original_state; 559 enum spdk_nvmf_subsystem_state requested_state; 560 561 spdk_nvmf_subsystem_state_change_done cb_fn; 562 void *cb_arg; 563 }; 564 565 static void 566 subsystem_state_change_revert_done(struct spdk_io_channel_iter *i, int status) 567 { 568 struct subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 569 570 /* Nothing to be done here if the state setting fails, we are just screwed. */ 571 if (nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state)) { 572 SPDK_ERRLOG("Unable to revert the subsystem state after operation failure.\n"); 573 } 574 575 ctx->subsystem->changing_state = false; 576 if (ctx->cb_fn) { 577 /* return a failure here. This function only exists in an error path. */ 578 ctx->cb_fn(ctx->subsystem, ctx->cb_arg, -1); 579 } 580 free(ctx); 581 } 582 583 static void 584 subsystem_state_change_done(struct spdk_io_channel_iter *i, int status) 585 { 586 struct subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 587 enum spdk_nvmf_subsystem_state intermediate_state; 588 589 SPDK_DTRACE_PROBE4(nvmf_subsystem_change_state_done, ctx->subsystem->subnqn, 590 ctx->requested_state, ctx->original_state, status); 591 592 if (status == 0) { 593 status = nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state); 594 if (status) { 595 status = -1; 596 } 597 } 598 599 if (status) { 600 intermediate_state = nvmf_subsystem_get_intermediate_state(ctx->requested_state, 601 ctx->original_state); 602 assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES); 603 604 if (nvmf_subsystem_set_state(ctx->subsystem, intermediate_state)) { 605 goto out; 606 } 607 ctx->requested_state = ctx->original_state; 608 spdk_for_each_channel(ctx->subsystem->tgt, 609 subsystem_state_change_on_pg, 610 ctx, 611 subsystem_state_change_revert_done); 612 return; 613 } 614 615 out: 616 ctx->subsystem->changing_state = false; 617 if (ctx->cb_fn) { 618 ctx->cb_fn(ctx->subsystem, ctx->cb_arg, status); 619 } 620 free(ctx); 621 } 622 623 static void 624 subsystem_state_change_continue(void *ctx, int status) 625 { 626 struct spdk_io_channel_iter *i = ctx; 627 struct subsystem_state_change_ctx *_ctx __attribute__((unused)); 628 629 _ctx = spdk_io_channel_iter_get_ctx(i); 630 SPDK_DTRACE_PROBE3(nvmf_pg_change_state_done, _ctx->subsystem->subnqn, 631 _ctx->requested_state, spdk_thread_get_id(spdk_get_thread())); 632 633 spdk_for_each_channel_continue(i, status); 634 } 635 636 static void 637 subsystem_state_change_on_pg(struct spdk_io_channel_iter *i) 638 { 639 struct subsystem_state_change_ctx *ctx; 640 struct spdk_io_channel *ch; 641 struct spdk_nvmf_poll_group *group; 642 643 ctx = spdk_io_channel_iter_get_ctx(i); 644 ch = spdk_io_channel_iter_get_channel(i); 645 group = spdk_io_channel_get_ctx(ch); 646 647 SPDK_DTRACE_PROBE3(nvmf_pg_change_state, ctx->subsystem->subnqn, 648 ctx->requested_state, spdk_thread_get_id(spdk_get_thread())); 649 switch (ctx->requested_state) { 650 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 651 nvmf_poll_group_remove_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 652 break; 653 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 654 if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_ACTIVATING) { 655 nvmf_poll_group_add_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 656 } else if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_RESUMING) { 657 nvmf_poll_group_resume_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 658 } 659 break; 660 case SPDK_NVMF_SUBSYSTEM_PAUSED: 661 nvmf_poll_group_pause_subsystem(group, ctx->subsystem, ctx->nsid, subsystem_state_change_continue, 662 i); 663 break; 664 default: 665 assert(false); 666 break; 667 } 668 } 669 670 static int 671 nvmf_subsystem_state_change(struct spdk_nvmf_subsystem *subsystem, 672 uint32_t nsid, 673 enum spdk_nvmf_subsystem_state requested_state, 674 spdk_nvmf_subsystem_state_change_done cb_fn, 675 void *cb_arg) 676 { 677 struct subsystem_state_change_ctx *ctx; 678 enum spdk_nvmf_subsystem_state intermediate_state; 679 int rc; 680 681 if (__sync_val_compare_and_swap(&subsystem->changing_state, false, true)) { 682 return -EBUSY; 683 } 684 685 SPDK_DTRACE_PROBE3(nvmf_subsystem_change_state, subsystem->subnqn, 686 requested_state, subsystem->state); 687 /* If we are already in the requested state, just call the callback immediately. */ 688 if (subsystem->state == requested_state) { 689 subsystem->changing_state = false; 690 if (cb_fn) { 691 cb_fn(subsystem, cb_arg, 0); 692 } 693 return 0; 694 } 695 696 intermediate_state = nvmf_subsystem_get_intermediate_state(subsystem->state, requested_state); 697 assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES); 698 699 ctx = calloc(1, sizeof(*ctx)); 700 if (!ctx) { 701 subsystem->changing_state = false; 702 return -ENOMEM; 703 } 704 705 ctx->original_state = subsystem->state; 706 rc = nvmf_subsystem_set_state(subsystem, intermediate_state); 707 if (rc) { 708 free(ctx); 709 subsystem->changing_state = false; 710 return rc; 711 } 712 713 ctx->subsystem = subsystem; 714 ctx->nsid = nsid; 715 ctx->requested_state = requested_state; 716 ctx->cb_fn = cb_fn; 717 ctx->cb_arg = cb_arg; 718 719 spdk_for_each_channel(subsystem->tgt, 720 subsystem_state_change_on_pg, 721 ctx, 722 subsystem_state_change_done); 723 724 return 0; 725 } 726 727 int 728 spdk_nvmf_subsystem_start(struct spdk_nvmf_subsystem *subsystem, 729 spdk_nvmf_subsystem_state_change_done cb_fn, 730 void *cb_arg) 731 { 732 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg); 733 } 734 735 int 736 spdk_nvmf_subsystem_stop(struct spdk_nvmf_subsystem *subsystem, 737 spdk_nvmf_subsystem_state_change_done cb_fn, 738 void *cb_arg) 739 { 740 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_INACTIVE, cb_fn, cb_arg); 741 } 742 743 int 744 spdk_nvmf_subsystem_pause(struct spdk_nvmf_subsystem *subsystem, 745 uint32_t nsid, 746 spdk_nvmf_subsystem_state_change_done cb_fn, 747 void *cb_arg) 748 { 749 return nvmf_subsystem_state_change(subsystem, nsid, SPDK_NVMF_SUBSYSTEM_PAUSED, cb_fn, cb_arg); 750 } 751 752 int 753 spdk_nvmf_subsystem_resume(struct spdk_nvmf_subsystem *subsystem, 754 spdk_nvmf_subsystem_state_change_done cb_fn, 755 void *cb_arg) 756 { 757 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg); 758 } 759 760 struct spdk_nvmf_subsystem * 761 spdk_nvmf_subsystem_get_first(struct spdk_nvmf_tgt *tgt) 762 { 763 struct spdk_nvmf_subsystem *subsystem; 764 uint32_t sid; 765 766 for (sid = 0; sid < tgt->max_subsystems; sid++) { 767 subsystem = tgt->subsystems[sid]; 768 if (subsystem) { 769 return subsystem; 770 } 771 } 772 773 return NULL; 774 } 775 776 struct spdk_nvmf_subsystem * 777 spdk_nvmf_subsystem_get_next(struct spdk_nvmf_subsystem *subsystem) 778 { 779 uint32_t sid; 780 struct spdk_nvmf_tgt *tgt; 781 782 if (!subsystem) { 783 return NULL; 784 } 785 786 tgt = subsystem->tgt; 787 788 for (sid = subsystem->id + 1; sid < tgt->max_subsystems; sid++) { 789 subsystem = tgt->subsystems[sid]; 790 if (subsystem) { 791 return subsystem; 792 } 793 } 794 795 return NULL; 796 } 797 798 /* Must hold subsystem->mutex while calling this function */ 799 static struct spdk_nvmf_host * 800 nvmf_subsystem_find_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 801 { 802 struct spdk_nvmf_host *host = NULL; 803 804 TAILQ_FOREACH(host, &subsystem->hosts, link) { 805 if (strcmp(hostnqn, host->nqn) == 0) { 806 return host; 807 } 808 } 809 810 return NULL; 811 } 812 813 int 814 spdk_nvmf_subsystem_add_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 815 { 816 struct spdk_nvmf_host *host; 817 818 if (!nvmf_valid_nqn(hostnqn)) { 819 return -EINVAL; 820 } 821 822 pthread_mutex_lock(&subsystem->mutex); 823 824 if (nvmf_subsystem_find_host(subsystem, hostnqn)) { 825 /* This subsystem already allows the specified host. */ 826 pthread_mutex_unlock(&subsystem->mutex); 827 return 0; 828 } 829 830 host = calloc(1, sizeof(*host)); 831 if (!host) { 832 pthread_mutex_unlock(&subsystem->mutex); 833 return -ENOMEM; 834 } 835 836 snprintf(host->nqn, sizeof(host->nqn), "%s", hostnqn); 837 838 TAILQ_INSERT_HEAD(&subsystem->hosts, host, link); 839 840 if (!TAILQ_EMPTY(&subsystem->listeners)) { 841 nvmf_update_discovery_log(subsystem->tgt, hostnqn); 842 } 843 844 pthread_mutex_unlock(&subsystem->mutex); 845 846 return 0; 847 } 848 849 int 850 spdk_nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 851 { 852 struct spdk_nvmf_host *host; 853 854 pthread_mutex_lock(&subsystem->mutex); 855 856 host = nvmf_subsystem_find_host(subsystem, hostnqn); 857 if (host == NULL) { 858 pthread_mutex_unlock(&subsystem->mutex); 859 return -ENOENT; 860 } 861 862 nvmf_subsystem_remove_host(subsystem, host); 863 pthread_mutex_unlock(&subsystem->mutex); 864 865 return 0; 866 } 867 868 struct nvmf_subsystem_disconnect_host_ctx { 869 struct spdk_nvmf_subsystem *subsystem; 870 char *hostnqn; 871 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn; 872 void *cb_arg; 873 }; 874 875 static void 876 nvmf_subsystem_disconnect_host_fini(struct spdk_io_channel_iter *i, int status) 877 { 878 struct nvmf_subsystem_disconnect_host_ctx *ctx; 879 880 ctx = spdk_io_channel_iter_get_ctx(i); 881 882 if (ctx->cb_fn) { 883 ctx->cb_fn(ctx->cb_arg, status); 884 } 885 free(ctx->hostnqn); 886 free(ctx); 887 } 888 889 static void 890 nvmf_subsystem_disconnect_qpairs_by_host(struct spdk_io_channel_iter *i) 891 { 892 struct nvmf_subsystem_disconnect_host_ctx *ctx; 893 struct spdk_nvmf_poll_group *group; 894 struct spdk_io_channel *ch; 895 struct spdk_nvmf_qpair *qpair, *tmp_qpair; 896 struct spdk_nvmf_ctrlr *ctrlr; 897 898 ctx = spdk_io_channel_iter_get_ctx(i); 899 ch = spdk_io_channel_iter_get_channel(i); 900 group = spdk_io_channel_get_ctx(ch); 901 902 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, tmp_qpair) { 903 ctrlr = qpair->ctrlr; 904 905 if (ctrlr == NULL || ctrlr->subsys != ctx->subsystem) { 906 continue; 907 } 908 909 if (strncmp(ctrlr->hostnqn, ctx->hostnqn, sizeof(ctrlr->hostnqn)) == 0) { 910 /* Right now this does not wait for the queue pairs to actually disconnect. */ 911 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 912 } 913 } 914 spdk_for_each_channel_continue(i, 0); 915 } 916 917 int 918 spdk_nvmf_subsystem_disconnect_host(struct spdk_nvmf_subsystem *subsystem, 919 const char *hostnqn, 920 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 921 void *cb_arg) 922 { 923 struct nvmf_subsystem_disconnect_host_ctx *ctx; 924 925 ctx = calloc(1, sizeof(struct nvmf_subsystem_disconnect_host_ctx)); 926 if (ctx == NULL) { 927 return -ENOMEM; 928 } 929 930 ctx->hostnqn = strdup(hostnqn); 931 if (ctx->hostnqn == NULL) { 932 free(ctx); 933 return -ENOMEM; 934 } 935 936 ctx->subsystem = subsystem; 937 ctx->cb_fn = cb_fn; 938 ctx->cb_arg = cb_arg; 939 940 spdk_for_each_channel(subsystem->tgt, nvmf_subsystem_disconnect_qpairs_by_host, ctx, 941 nvmf_subsystem_disconnect_host_fini); 942 943 return 0; 944 } 945 946 int 947 spdk_nvmf_subsystem_set_allow_any_host(struct spdk_nvmf_subsystem *subsystem, bool allow_any_host) 948 { 949 pthread_mutex_lock(&subsystem->mutex); 950 subsystem->flags.allow_any_host = allow_any_host; 951 if (!TAILQ_EMPTY(&subsystem->listeners)) { 952 nvmf_update_discovery_log(subsystem->tgt, NULL); 953 } 954 pthread_mutex_unlock(&subsystem->mutex); 955 956 return 0; 957 } 958 959 bool 960 spdk_nvmf_subsystem_get_allow_any_host(const struct spdk_nvmf_subsystem *subsystem) 961 { 962 bool allow_any_host; 963 struct spdk_nvmf_subsystem *sub; 964 965 /* Technically, taking the mutex modifies data in the subsystem. But the const 966 * is still important to convey that this doesn't mutate any other data. Cast 967 * it away to work around this. */ 968 sub = (struct spdk_nvmf_subsystem *)subsystem; 969 970 pthread_mutex_lock(&sub->mutex); 971 allow_any_host = sub->flags.allow_any_host; 972 pthread_mutex_unlock(&sub->mutex); 973 974 return allow_any_host; 975 } 976 977 bool 978 spdk_nvmf_subsystem_host_allowed(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 979 { 980 bool allowed; 981 982 if (!hostnqn) { 983 return false; 984 } 985 986 pthread_mutex_lock(&subsystem->mutex); 987 988 if (subsystem->flags.allow_any_host) { 989 pthread_mutex_unlock(&subsystem->mutex); 990 return true; 991 } 992 993 allowed = nvmf_subsystem_find_host(subsystem, hostnqn) != NULL; 994 pthread_mutex_unlock(&subsystem->mutex); 995 996 return allowed; 997 } 998 999 struct spdk_nvmf_host * 1000 spdk_nvmf_subsystem_get_first_host(struct spdk_nvmf_subsystem *subsystem) 1001 { 1002 return TAILQ_FIRST(&subsystem->hosts); 1003 } 1004 1005 1006 struct spdk_nvmf_host * 1007 spdk_nvmf_subsystem_get_next_host(struct spdk_nvmf_subsystem *subsystem, 1008 struct spdk_nvmf_host *prev_host) 1009 { 1010 return TAILQ_NEXT(prev_host, link); 1011 } 1012 1013 const char * 1014 spdk_nvmf_host_get_nqn(const struct spdk_nvmf_host *host) 1015 { 1016 return host->nqn; 1017 } 1018 1019 struct spdk_nvmf_subsystem_listener * 1020 nvmf_subsystem_find_listener(struct spdk_nvmf_subsystem *subsystem, 1021 const struct spdk_nvme_transport_id *trid) 1022 { 1023 struct spdk_nvmf_subsystem_listener *listener; 1024 1025 TAILQ_FOREACH(listener, &subsystem->listeners, link) { 1026 if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) { 1027 return listener; 1028 } 1029 } 1030 1031 return NULL; 1032 } 1033 1034 /** 1035 * Function to be called once the target is listening. 1036 * 1037 * \param ctx Context argument passed to this function. 1038 * \param status 0 if it completed successfully, or negative errno if it failed. 1039 */ 1040 static void 1041 _nvmf_subsystem_add_listener_done(void *ctx, int status) 1042 { 1043 struct spdk_nvmf_subsystem_listener *listener = ctx; 1044 1045 if (status) { 1046 listener->cb_fn(listener->cb_arg, status); 1047 free(listener); 1048 return; 1049 } 1050 1051 TAILQ_INSERT_HEAD(&listener->subsystem->listeners, listener, link); 1052 nvmf_update_discovery_log(listener->subsystem->tgt, NULL); 1053 listener->cb_fn(listener->cb_arg, status); 1054 } 1055 1056 void 1057 spdk_nvmf_subsystem_add_listener(struct spdk_nvmf_subsystem *subsystem, 1058 struct spdk_nvme_transport_id *trid, 1059 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 1060 void *cb_arg) 1061 { 1062 struct spdk_nvmf_transport *transport; 1063 struct spdk_nvmf_subsystem_listener *listener; 1064 struct spdk_nvmf_listener *tr_listener; 1065 uint32_t i; 1066 int rc = 0; 1067 1068 assert(cb_fn != NULL); 1069 1070 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1071 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1072 cb_fn(cb_arg, -EAGAIN); 1073 return; 1074 } 1075 1076 if (nvmf_subsystem_find_listener(subsystem, trid)) { 1077 /* Listener already exists in this subsystem */ 1078 cb_fn(cb_arg, 0); 1079 return; 1080 } 1081 1082 transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, trid->trstring); 1083 if (!transport) { 1084 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n", 1085 trid->trstring); 1086 cb_fn(cb_arg, -EINVAL); 1087 return; 1088 } 1089 1090 tr_listener = nvmf_transport_find_listener(transport, trid); 1091 if (!tr_listener) { 1092 SPDK_ERRLOG("Cannot find transport listener for %s\n", trid->traddr); 1093 cb_fn(cb_arg, -EINVAL); 1094 return; 1095 } 1096 1097 listener = calloc(1, sizeof(*listener)); 1098 if (!listener) { 1099 cb_fn(cb_arg, -ENOMEM); 1100 return; 1101 } 1102 1103 listener->trid = &tr_listener->trid; 1104 listener->transport = transport; 1105 listener->cb_fn = cb_fn; 1106 listener->cb_arg = cb_arg; 1107 listener->subsystem = subsystem; 1108 listener->ana_state = calloc(subsystem->max_nsid, sizeof(enum spdk_nvme_ana_state)); 1109 if (!listener->ana_state) { 1110 free(listener); 1111 cb_fn(cb_arg, -ENOMEM); 1112 return; 1113 } 1114 1115 for (i = 0; i < subsystem->max_nsid; i++) { 1116 listener->ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1117 } 1118 1119 if (transport->ops->listen_associate != NULL) { 1120 rc = transport->ops->listen_associate(transport, subsystem, trid); 1121 } 1122 1123 _nvmf_subsystem_add_listener_done(listener, rc); 1124 } 1125 1126 int 1127 spdk_nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem, 1128 const struct spdk_nvme_transport_id *trid) 1129 { 1130 struct spdk_nvmf_subsystem_listener *listener; 1131 1132 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1133 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1134 return -EAGAIN; 1135 } 1136 1137 listener = nvmf_subsystem_find_listener(subsystem, trid); 1138 if (listener == NULL) { 1139 return -ENOENT; 1140 } 1141 1142 _nvmf_subsystem_remove_listener(subsystem, listener, false); 1143 1144 return 0; 1145 } 1146 1147 void 1148 nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem, 1149 bool stop) 1150 { 1151 struct spdk_nvmf_subsystem_listener *listener, *listener_tmp; 1152 1153 TAILQ_FOREACH_SAFE(listener, &subsystem->listeners, link, listener_tmp) { 1154 _nvmf_subsystem_remove_listener(subsystem, listener, stop); 1155 } 1156 } 1157 1158 bool 1159 spdk_nvmf_subsystem_listener_allowed(struct spdk_nvmf_subsystem *subsystem, 1160 const struct spdk_nvme_transport_id *trid) 1161 { 1162 struct spdk_nvmf_subsystem_listener *listener; 1163 1164 if (!strcmp(subsystem->subnqn, SPDK_NVMF_DISCOVERY_NQN)) { 1165 return true; 1166 } 1167 1168 TAILQ_FOREACH(listener, &subsystem->listeners, link) { 1169 if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) { 1170 return true; 1171 } 1172 } 1173 1174 return false; 1175 } 1176 1177 struct spdk_nvmf_subsystem_listener * 1178 spdk_nvmf_subsystem_get_first_listener(struct spdk_nvmf_subsystem *subsystem) 1179 { 1180 return TAILQ_FIRST(&subsystem->listeners); 1181 } 1182 1183 struct spdk_nvmf_subsystem_listener * 1184 spdk_nvmf_subsystem_get_next_listener(struct spdk_nvmf_subsystem *subsystem, 1185 struct spdk_nvmf_subsystem_listener *prev_listener) 1186 { 1187 return TAILQ_NEXT(prev_listener, link); 1188 } 1189 1190 const struct spdk_nvme_transport_id * 1191 spdk_nvmf_subsystem_listener_get_trid(struct spdk_nvmf_subsystem_listener *listener) 1192 { 1193 return listener->trid; 1194 } 1195 1196 void 1197 spdk_nvmf_subsystem_allow_any_listener(struct spdk_nvmf_subsystem *subsystem, 1198 bool allow_any_listener) 1199 { 1200 subsystem->flags.allow_any_listener = allow_any_listener; 1201 } 1202 1203 bool 1204 spdk_nvmf_subsytem_any_listener_allowed(struct spdk_nvmf_subsystem *subsystem) 1205 { 1206 return subsystem->flags.allow_any_listener; 1207 } 1208 1209 1210 struct subsystem_update_ns_ctx { 1211 struct spdk_nvmf_subsystem *subsystem; 1212 1213 spdk_nvmf_subsystem_state_change_done cb_fn; 1214 void *cb_arg; 1215 }; 1216 1217 static void 1218 subsystem_update_ns_done(struct spdk_io_channel_iter *i, int status) 1219 { 1220 struct subsystem_update_ns_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1221 1222 if (ctx->cb_fn) { 1223 ctx->cb_fn(ctx->subsystem, ctx->cb_arg, status); 1224 } 1225 free(ctx); 1226 } 1227 1228 static void 1229 subsystem_update_ns_on_pg(struct spdk_io_channel_iter *i) 1230 { 1231 int rc; 1232 struct subsystem_update_ns_ctx *ctx; 1233 struct spdk_nvmf_poll_group *group; 1234 struct spdk_nvmf_subsystem *subsystem; 1235 1236 ctx = spdk_io_channel_iter_get_ctx(i); 1237 group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i)); 1238 subsystem = ctx->subsystem; 1239 1240 rc = nvmf_poll_group_update_subsystem(group, subsystem); 1241 spdk_for_each_channel_continue(i, rc); 1242 } 1243 1244 static int 1245 nvmf_subsystem_update_ns(struct spdk_nvmf_subsystem *subsystem, spdk_channel_for_each_cpl cpl, 1246 void *ctx) 1247 { 1248 spdk_for_each_channel(subsystem->tgt, 1249 subsystem_update_ns_on_pg, 1250 ctx, 1251 cpl); 1252 1253 return 0; 1254 } 1255 1256 static void 1257 nvmf_subsystem_ns_changed(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1258 { 1259 struct spdk_nvmf_ctrlr *ctrlr; 1260 1261 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1262 nvmf_ctrlr_ns_changed(ctrlr, nsid); 1263 } 1264 } 1265 1266 static uint32_t 1267 nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns); 1268 1269 int 1270 spdk_nvmf_subsystem_remove_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1271 { 1272 struct spdk_nvmf_transport *transport; 1273 struct spdk_nvmf_ns *ns; 1274 1275 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1276 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1277 assert(false); 1278 return -1; 1279 } 1280 1281 if (nsid == 0 || nsid > subsystem->max_nsid) { 1282 return -1; 1283 } 1284 1285 ns = subsystem->ns[nsid - 1]; 1286 if (!ns) { 1287 return -1; 1288 } 1289 1290 subsystem->ns[nsid - 1] = NULL; 1291 1292 assert(ns->anagrpid - 1 < subsystem->max_nsid); 1293 assert(subsystem->ana_group[ns->anagrpid - 1] > 0); 1294 1295 subsystem->ana_group[ns->anagrpid - 1]--; 1296 1297 free(ns->ptpl_file); 1298 nvmf_ns_reservation_clear_all_registrants(ns); 1299 spdk_bdev_module_release_bdev(ns->bdev); 1300 spdk_bdev_close(ns->desc); 1301 free(ns); 1302 1303 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 1304 transport = spdk_nvmf_transport_get_next(transport)) { 1305 if (transport->ops->subsystem_remove_ns) { 1306 transport->ops->subsystem_remove_ns(transport, subsystem, nsid); 1307 } 1308 } 1309 1310 nvmf_subsystem_ns_changed(subsystem, nsid); 1311 1312 return 0; 1313 } 1314 1315 struct subsystem_ns_change_ctx { 1316 struct spdk_nvmf_subsystem *subsystem; 1317 spdk_nvmf_subsystem_state_change_done cb_fn; 1318 uint32_t nsid; 1319 }; 1320 1321 static void 1322 _nvmf_ns_hot_remove(struct spdk_nvmf_subsystem *subsystem, 1323 void *cb_arg, int status) 1324 { 1325 struct subsystem_ns_change_ctx *ctx = cb_arg; 1326 int rc; 1327 1328 rc = spdk_nvmf_subsystem_remove_ns(subsystem, ctx->nsid); 1329 if (rc != 0) { 1330 SPDK_ERRLOG("Failed to make changes to NVME-oF subsystem with id: %u\n", subsystem->id); 1331 } 1332 1333 spdk_nvmf_subsystem_resume(subsystem, NULL, NULL); 1334 1335 free(ctx); 1336 } 1337 1338 static void 1339 nvmf_ns_change_msg(void *ns_ctx) 1340 { 1341 struct subsystem_ns_change_ctx *ctx = ns_ctx; 1342 int rc; 1343 1344 rc = spdk_nvmf_subsystem_pause(ctx->subsystem, ctx->nsid, ctx->cb_fn, ctx); 1345 if (rc) { 1346 if (rc == -EBUSY) { 1347 /* Try again, this is not a permanent situation. */ 1348 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ctx); 1349 } else { 1350 free(ctx); 1351 SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n"); 1352 } 1353 } 1354 } 1355 1356 static void 1357 nvmf_ns_hot_remove(void *remove_ctx) 1358 { 1359 struct spdk_nvmf_ns *ns = remove_ctx; 1360 struct subsystem_ns_change_ctx *ns_ctx; 1361 int rc; 1362 1363 /* We have to allocate a new context because this op 1364 * is asynchronous and we could lose the ns in the middle. 1365 */ 1366 ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx)); 1367 if (!ns_ctx) { 1368 SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n"); 1369 return; 1370 } 1371 1372 ns_ctx->subsystem = ns->subsystem; 1373 ns_ctx->nsid = ns->opts.nsid; 1374 ns_ctx->cb_fn = _nvmf_ns_hot_remove; 1375 1376 rc = spdk_nvmf_subsystem_pause(ns->subsystem, ns_ctx->nsid, _nvmf_ns_hot_remove, ns_ctx); 1377 if (rc) { 1378 if (rc == -EBUSY) { 1379 /* Try again, this is not a permanent situation. */ 1380 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx); 1381 } else { 1382 SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n"); 1383 free(ns_ctx); 1384 } 1385 } 1386 } 1387 1388 static void 1389 _nvmf_ns_resize(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status) 1390 { 1391 struct subsystem_ns_change_ctx *ctx = cb_arg; 1392 1393 nvmf_subsystem_ns_changed(subsystem, ctx->nsid); 1394 spdk_nvmf_subsystem_resume(subsystem, NULL, NULL); 1395 1396 free(ctx); 1397 } 1398 1399 static void 1400 nvmf_ns_resize(void *event_ctx) 1401 { 1402 struct spdk_nvmf_ns *ns = event_ctx; 1403 struct subsystem_ns_change_ctx *ns_ctx; 1404 int rc; 1405 1406 /* We have to allocate a new context because this op 1407 * is asynchronous and we could lose the ns in the middle. 1408 */ 1409 ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx)); 1410 if (!ns_ctx) { 1411 SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n"); 1412 return; 1413 } 1414 1415 ns_ctx->subsystem = ns->subsystem; 1416 ns_ctx->nsid = ns->opts.nsid; 1417 ns_ctx->cb_fn = _nvmf_ns_resize; 1418 1419 /* Specify 0 for the nsid here, because we do not need to pause the namespace. 1420 * Namespaces can only be resized bigger, so there is no need to quiesce I/O. 1421 */ 1422 rc = spdk_nvmf_subsystem_pause(ns->subsystem, 0, _nvmf_ns_resize, ns_ctx); 1423 if (rc) { 1424 if (rc == -EBUSY) { 1425 /* Try again, this is not a permanent situation. */ 1426 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx); 1427 } else { 1428 SPDK_ERRLOG("Unable to pause subsystem to process namespace resize!\n"); 1429 free(ns_ctx); 1430 } 1431 } 1432 } 1433 1434 static void 1435 nvmf_ns_event(enum spdk_bdev_event_type type, 1436 struct spdk_bdev *bdev, 1437 void *event_ctx) 1438 { 1439 SPDK_DEBUGLOG(nvmf, "Bdev event: type %d, name %s, subsystem_id %d, ns_id %d\n", 1440 type, 1441 spdk_bdev_get_name(bdev), 1442 ((struct spdk_nvmf_ns *)event_ctx)->subsystem->id, 1443 ((struct spdk_nvmf_ns *)event_ctx)->nsid); 1444 1445 switch (type) { 1446 case SPDK_BDEV_EVENT_REMOVE: 1447 nvmf_ns_hot_remove(event_ctx); 1448 break; 1449 case SPDK_BDEV_EVENT_RESIZE: 1450 nvmf_ns_resize(event_ctx); 1451 break; 1452 default: 1453 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 1454 break; 1455 } 1456 } 1457 1458 void 1459 spdk_nvmf_ns_opts_get_defaults(struct spdk_nvmf_ns_opts *opts, size_t opts_size) 1460 { 1461 if (!opts) { 1462 SPDK_ERRLOG("opts should not be NULL.\n"); 1463 return; 1464 } 1465 1466 if (!opts_size) { 1467 SPDK_ERRLOG("opts_size should not be zero.\n"); 1468 return; 1469 } 1470 1471 memset(opts, 0, opts_size); 1472 opts->opts_size = opts_size; 1473 1474 #define FIELD_OK(field) \ 1475 offsetof(struct spdk_nvmf_ns_opts, field) + sizeof(opts->field) <= opts_size 1476 1477 #define SET_FIELD(field, value) \ 1478 if (FIELD_OK(field)) { \ 1479 opts->field = value; \ 1480 } \ 1481 1482 /* All current fields are set to 0 by default. */ 1483 SET_FIELD(nsid, 0); 1484 if (FIELD_OK(nguid)) { 1485 memset(opts->nguid, 0, sizeof(opts->nguid)); 1486 } 1487 if (FIELD_OK(eui64)) { 1488 memset(opts->eui64, 0, sizeof(opts->eui64)); 1489 } 1490 if (FIELD_OK(uuid)) { 1491 memset(&opts->uuid, 0, sizeof(opts->uuid)); 1492 } 1493 SET_FIELD(anagrpid, 0); 1494 1495 #undef FIELD_OK 1496 #undef SET_FIELD 1497 } 1498 1499 static void 1500 nvmf_ns_opts_copy(struct spdk_nvmf_ns_opts *opts, 1501 const struct spdk_nvmf_ns_opts *user_opts, 1502 size_t opts_size) 1503 { 1504 #define FIELD_OK(field) \ 1505 offsetof(struct spdk_nvmf_ns_opts, field) + sizeof(opts->field) <= user_opts->opts_size 1506 1507 #define SET_FIELD(field) \ 1508 if (FIELD_OK(field)) { \ 1509 opts->field = user_opts->field; \ 1510 } \ 1511 1512 SET_FIELD(nsid); 1513 if (FIELD_OK(nguid)) { 1514 memcpy(opts->nguid, user_opts->nguid, sizeof(opts->nguid)); 1515 } 1516 if (FIELD_OK(eui64)) { 1517 memcpy(opts->eui64, user_opts->eui64, sizeof(opts->eui64)); 1518 } 1519 if (FIELD_OK(uuid)) { 1520 memcpy(&opts->uuid, &user_opts->uuid, sizeof(opts->uuid)); 1521 } 1522 SET_FIELD(anagrpid); 1523 1524 opts->opts_size = user_opts->opts_size; 1525 1526 /* We should not remove this statement, but need to update the assert statement 1527 * if we add a new field, and also add a corresponding SET_FIELD statement. 1528 */ 1529 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_ns_opts) == 64, "Incorrect size"); 1530 1531 #undef FIELD_OK 1532 #undef SET_FIELD 1533 } 1534 1535 /* Dummy bdev module used to to claim bdevs. */ 1536 static struct spdk_bdev_module ns_bdev_module = { 1537 .name = "NVMe-oF Target", 1538 }; 1539 1540 static int 1541 nvmf_ns_load_reservation(const char *file, struct spdk_nvmf_reservation_info *info); 1542 static int 1543 nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info); 1544 1545 uint32_t 1546 spdk_nvmf_subsystem_add_ns_ext(struct spdk_nvmf_subsystem *subsystem, const char *bdev_name, 1547 const struct spdk_nvmf_ns_opts *user_opts, size_t opts_size, 1548 const char *ptpl_file) 1549 { 1550 struct spdk_nvmf_transport *transport; 1551 struct spdk_nvmf_ns_opts opts; 1552 struct spdk_nvmf_ns *ns; 1553 struct spdk_nvmf_reservation_info info = {0}; 1554 int rc; 1555 1556 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1557 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1558 return 0; 1559 } 1560 1561 spdk_nvmf_ns_opts_get_defaults(&opts, sizeof(opts)); 1562 if (user_opts) { 1563 nvmf_ns_opts_copy(&opts, user_opts, opts_size); 1564 } 1565 1566 if (opts.nsid == SPDK_NVME_GLOBAL_NS_TAG) { 1567 SPDK_ERRLOG("Invalid NSID %" PRIu32 "\n", opts.nsid); 1568 return 0; 1569 } 1570 1571 if (opts.nsid == 0) { 1572 /* 1573 * NSID not specified - find a free index. 1574 * 1575 * If no free slots are found, opts.nsid will be subsystem->max_nsid + 1, which will 1576 * expand max_nsid if possible. 1577 */ 1578 for (opts.nsid = 1; opts.nsid <= subsystem->max_nsid; opts.nsid++) { 1579 if (_nvmf_subsystem_get_ns(subsystem, opts.nsid) == NULL) { 1580 break; 1581 } 1582 } 1583 } 1584 1585 if (_nvmf_subsystem_get_ns(subsystem, opts.nsid)) { 1586 SPDK_ERRLOG("Requested NSID %" PRIu32 " already in use\n", opts.nsid); 1587 return 0; 1588 } 1589 1590 if (opts.nsid > subsystem->max_nsid) { 1591 SPDK_ERRLOG("NSID greater than maximum not allowed\n"); 1592 return 0; 1593 } 1594 1595 if (opts.anagrpid == 0) { 1596 opts.anagrpid = opts.nsid; 1597 } 1598 1599 if (opts.anagrpid > subsystem->max_nsid) { 1600 SPDK_ERRLOG("ANAGRPID greater than maximum NSID not allowed\n"); 1601 return 0; 1602 } 1603 1604 ns = calloc(1, sizeof(*ns)); 1605 if (ns == NULL) { 1606 SPDK_ERRLOG("Namespace allocation failed\n"); 1607 return 0; 1608 } 1609 1610 rc = spdk_bdev_open_ext(bdev_name, true, nvmf_ns_event, ns, &ns->desc); 1611 if (rc != 0) { 1612 SPDK_ERRLOG("Subsystem %s: bdev %s cannot be opened, error=%d\n", 1613 subsystem->subnqn, bdev_name, rc); 1614 free(ns); 1615 return 0; 1616 } 1617 1618 ns->bdev = spdk_bdev_desc_get_bdev(ns->desc); 1619 1620 if (spdk_bdev_get_md_size(ns->bdev) != 0 && !spdk_bdev_is_md_interleaved(ns->bdev)) { 1621 SPDK_ERRLOG("Can't attach bdev with separate metadata.\n"); 1622 spdk_bdev_close(ns->desc); 1623 free(ns); 1624 return 0; 1625 } 1626 1627 rc = spdk_bdev_module_claim_bdev(ns->bdev, ns->desc, &ns_bdev_module); 1628 if (rc != 0) { 1629 spdk_bdev_close(ns->desc); 1630 free(ns); 1631 return 0; 1632 } 1633 1634 /* Cache the zcopy capability of the bdev device */ 1635 ns->zcopy = spdk_bdev_io_type_supported(ns->bdev, SPDK_BDEV_IO_TYPE_ZCOPY); 1636 1637 if (spdk_mem_all_zero(&opts.uuid, sizeof(opts.uuid))) { 1638 opts.uuid = *spdk_bdev_get_uuid(ns->bdev); 1639 } 1640 1641 /* if nguid descriptor is supported by bdev module (nvme) then uuid = nguid */ 1642 if (spdk_mem_all_zero(opts.nguid, sizeof(opts.nguid))) { 1643 SPDK_STATIC_ASSERT(sizeof(opts.nguid) == sizeof(opts.uuid), "size mismatch"); 1644 memcpy(opts.nguid, spdk_bdev_get_uuid(ns->bdev), sizeof(opts.nguid)); 1645 } 1646 1647 ns->opts = opts; 1648 ns->subsystem = subsystem; 1649 subsystem->ns[opts.nsid - 1] = ns; 1650 ns->nsid = opts.nsid; 1651 ns->anagrpid = opts.anagrpid; 1652 subsystem->ana_group[ns->anagrpid - 1]++; 1653 TAILQ_INIT(&ns->registrants); 1654 if (ptpl_file) { 1655 rc = nvmf_ns_load_reservation(ptpl_file, &info); 1656 if (!rc) { 1657 rc = nvmf_ns_reservation_restore(ns, &info); 1658 if (rc) { 1659 SPDK_ERRLOG("Subsystem restore reservation failed\n"); 1660 goto err_ns_reservation_restore; 1661 } 1662 } 1663 ns->ptpl_file = strdup(ptpl_file); 1664 if (!ns->ptpl_file) { 1665 SPDK_ERRLOG("Namespace ns->ptpl_file allocation failed\n"); 1666 goto err_strdup; 1667 } 1668 } 1669 1670 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 1671 transport = spdk_nvmf_transport_get_next(transport)) { 1672 if (transport->ops->subsystem_add_ns) { 1673 rc = transport->ops->subsystem_add_ns(transport, subsystem, ns); 1674 if (rc) { 1675 SPDK_ERRLOG("Namespace attachment is not allowed by %s transport\n", transport->ops->name); 1676 goto err_subsystem_add_ns; 1677 } 1678 } 1679 } 1680 1681 SPDK_DEBUGLOG(nvmf, "Subsystem %s: bdev %s assigned nsid %" PRIu32 "\n", 1682 spdk_nvmf_subsystem_get_nqn(subsystem), 1683 bdev_name, 1684 opts.nsid); 1685 1686 nvmf_subsystem_ns_changed(subsystem, opts.nsid); 1687 1688 return opts.nsid; 1689 1690 err_subsystem_add_ns: 1691 free(ns->ptpl_file); 1692 err_strdup: 1693 nvmf_ns_reservation_clear_all_registrants(ns); 1694 err_ns_reservation_restore: 1695 subsystem->ns[opts.nsid - 1] = NULL; 1696 spdk_bdev_module_release_bdev(ns->bdev); 1697 spdk_bdev_close(ns->desc); 1698 free(ns); 1699 return 0; 1700 1701 } 1702 1703 static uint32_t 1704 nvmf_subsystem_get_next_allocated_nsid(struct spdk_nvmf_subsystem *subsystem, 1705 uint32_t prev_nsid) 1706 { 1707 uint32_t nsid; 1708 1709 if (prev_nsid >= subsystem->max_nsid) { 1710 return 0; 1711 } 1712 1713 for (nsid = prev_nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 1714 if (subsystem->ns[nsid - 1]) { 1715 return nsid; 1716 } 1717 } 1718 1719 return 0; 1720 } 1721 1722 struct spdk_nvmf_ns * 1723 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 1724 { 1725 uint32_t first_nsid; 1726 1727 first_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, 0); 1728 return _nvmf_subsystem_get_ns(subsystem, first_nsid); 1729 } 1730 1731 struct spdk_nvmf_ns * 1732 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 1733 struct spdk_nvmf_ns *prev_ns) 1734 { 1735 uint32_t next_nsid; 1736 1737 next_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, prev_ns->opts.nsid); 1738 return _nvmf_subsystem_get_ns(subsystem, next_nsid); 1739 } 1740 1741 struct spdk_nvmf_ns * 1742 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1743 { 1744 return _nvmf_subsystem_get_ns(subsystem, nsid); 1745 } 1746 1747 uint32_t 1748 spdk_nvmf_ns_get_id(const struct spdk_nvmf_ns *ns) 1749 { 1750 return ns->opts.nsid; 1751 } 1752 1753 struct spdk_bdev * 1754 spdk_nvmf_ns_get_bdev(struct spdk_nvmf_ns *ns) 1755 { 1756 return ns->bdev; 1757 } 1758 1759 void 1760 spdk_nvmf_ns_get_opts(const struct spdk_nvmf_ns *ns, struct spdk_nvmf_ns_opts *opts, 1761 size_t opts_size) 1762 { 1763 memset(opts, 0, opts_size); 1764 memcpy(opts, &ns->opts, spdk_min(sizeof(ns->opts), opts_size)); 1765 } 1766 1767 const char * 1768 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 1769 { 1770 return subsystem->sn; 1771 } 1772 1773 int 1774 spdk_nvmf_subsystem_set_sn(struct spdk_nvmf_subsystem *subsystem, const char *sn) 1775 { 1776 size_t len, max_len; 1777 1778 max_len = sizeof(subsystem->sn) - 1; 1779 len = strlen(sn); 1780 if (len > max_len) { 1781 SPDK_DEBUGLOG(nvmf, "Invalid sn \"%s\": length %zu > max %zu\n", 1782 sn, len, max_len); 1783 return -1; 1784 } 1785 1786 if (!nvmf_valid_ascii_string(sn, len)) { 1787 SPDK_DEBUGLOG(nvmf, "Non-ASCII sn\n"); 1788 SPDK_LOGDUMP(nvmf, "sn", sn, len); 1789 return -1; 1790 } 1791 1792 snprintf(subsystem->sn, sizeof(subsystem->sn), "%s", sn); 1793 1794 return 0; 1795 } 1796 1797 const char * 1798 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 1799 { 1800 return subsystem->mn; 1801 } 1802 1803 int 1804 spdk_nvmf_subsystem_set_mn(struct spdk_nvmf_subsystem *subsystem, const char *mn) 1805 { 1806 size_t len, max_len; 1807 1808 if (mn == NULL) { 1809 mn = MODEL_NUMBER_DEFAULT; 1810 } 1811 max_len = sizeof(subsystem->mn) - 1; 1812 len = strlen(mn); 1813 if (len > max_len) { 1814 SPDK_DEBUGLOG(nvmf, "Invalid mn \"%s\": length %zu > max %zu\n", 1815 mn, len, max_len); 1816 return -1; 1817 } 1818 1819 if (!nvmf_valid_ascii_string(mn, len)) { 1820 SPDK_DEBUGLOG(nvmf, "Non-ASCII mn\n"); 1821 SPDK_LOGDUMP(nvmf, "mn", mn, len); 1822 return -1; 1823 } 1824 1825 snprintf(subsystem->mn, sizeof(subsystem->mn), "%s", mn); 1826 1827 return 0; 1828 } 1829 1830 const char * 1831 spdk_nvmf_subsystem_get_nqn(const struct spdk_nvmf_subsystem *subsystem) 1832 { 1833 return subsystem->subnqn; 1834 } 1835 1836 enum spdk_nvmf_subtype spdk_nvmf_subsystem_get_type(struct spdk_nvmf_subsystem *subsystem) 1837 { 1838 return subsystem->subtype; 1839 } 1840 1841 uint32_t 1842 spdk_nvmf_subsystem_get_max_nsid(struct spdk_nvmf_subsystem *subsystem) 1843 { 1844 return subsystem->max_nsid; 1845 } 1846 1847 int 1848 nvmf_subsystem_set_cntlid_range(struct spdk_nvmf_subsystem *subsystem, 1849 uint16_t min_cntlid, uint16_t max_cntlid) 1850 { 1851 if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) { 1852 return -EAGAIN; 1853 } 1854 1855 if (min_cntlid > max_cntlid) { 1856 return -EINVAL; 1857 } 1858 /* The spec reserves cntlid values in the range FFF0h to FFFFh. */ 1859 if (min_cntlid < NVMF_MIN_CNTLID || min_cntlid > NVMF_MAX_CNTLID || 1860 max_cntlid < NVMF_MIN_CNTLID || max_cntlid > NVMF_MAX_CNTLID) { 1861 return -EINVAL; 1862 } 1863 subsystem->min_cntlid = min_cntlid; 1864 subsystem->max_cntlid = max_cntlid; 1865 if (subsystem->next_cntlid < min_cntlid || subsystem->next_cntlid > max_cntlid - 1) { 1866 subsystem->next_cntlid = min_cntlid - 1; 1867 } 1868 1869 return 0; 1870 } 1871 1872 static uint16_t 1873 nvmf_subsystem_gen_cntlid(struct spdk_nvmf_subsystem *subsystem) 1874 { 1875 int count; 1876 1877 /* 1878 * In the worst case, we might have to try all CNTLID values between min_cntlid and max_cntlid 1879 * before we find one that is unused (or find that all values are in use). 1880 */ 1881 for (count = 0; count < subsystem->max_cntlid - subsystem->min_cntlid + 1; count++) { 1882 subsystem->next_cntlid++; 1883 if (subsystem->next_cntlid > subsystem->max_cntlid) { 1884 subsystem->next_cntlid = subsystem->min_cntlid; 1885 } 1886 1887 /* Check if a controller with this cntlid currently exists. */ 1888 if (nvmf_subsystem_get_ctrlr(subsystem, subsystem->next_cntlid) == NULL) { 1889 /* Found unused cntlid */ 1890 return subsystem->next_cntlid; 1891 } 1892 } 1893 1894 /* All valid cntlid values are in use. */ 1895 return 0xFFFF; 1896 } 1897 1898 int 1899 nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr) 1900 { 1901 ctrlr->cntlid = nvmf_subsystem_gen_cntlid(subsystem); 1902 if (ctrlr->cntlid == 0xFFFF) { 1903 /* Unable to get a cntlid */ 1904 SPDK_ERRLOG("Reached max simultaneous ctrlrs\n"); 1905 return -EBUSY; 1906 } 1907 1908 TAILQ_INSERT_TAIL(&subsystem->ctrlrs, ctrlr, link); 1909 1910 return 0; 1911 } 1912 1913 void 1914 nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem, 1915 struct spdk_nvmf_ctrlr *ctrlr) 1916 { 1917 assert(spdk_get_thread() == subsystem->thread); 1918 assert(subsystem == ctrlr->subsys); 1919 SPDK_DEBUGLOG(nvmf, "remove ctrlr %p from subsys %p %s\n", ctrlr, subsystem, subsystem->subnqn); 1920 TAILQ_REMOVE(&subsystem->ctrlrs, ctrlr, link); 1921 } 1922 1923 struct spdk_nvmf_ctrlr * 1924 nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid) 1925 { 1926 struct spdk_nvmf_ctrlr *ctrlr; 1927 1928 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1929 if (ctrlr->cntlid == cntlid) { 1930 return ctrlr; 1931 } 1932 } 1933 1934 return NULL; 1935 } 1936 1937 uint32_t 1938 spdk_nvmf_subsystem_get_max_namespaces(const struct spdk_nvmf_subsystem *subsystem) 1939 { 1940 return subsystem->max_nsid; 1941 } 1942 1943 uint16_t 1944 spdk_nvmf_subsystem_get_min_cntlid(const struct spdk_nvmf_subsystem *subsystem) 1945 { 1946 return subsystem->min_cntlid; 1947 } 1948 1949 uint16_t 1950 spdk_nvmf_subsystem_get_max_cntlid(const struct spdk_nvmf_subsystem *subsystem) 1951 { 1952 return subsystem->max_cntlid; 1953 } 1954 1955 struct _nvmf_ns_registrant { 1956 uint64_t rkey; 1957 char *host_uuid; 1958 }; 1959 1960 struct _nvmf_ns_registrants { 1961 size_t num_regs; 1962 struct _nvmf_ns_registrant reg[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 1963 }; 1964 1965 struct _nvmf_ns_reservation { 1966 bool ptpl_activated; 1967 enum spdk_nvme_reservation_type rtype; 1968 uint64_t crkey; 1969 char *bdev_uuid; 1970 char *holder_uuid; 1971 struct _nvmf_ns_registrants regs; 1972 }; 1973 1974 static const struct spdk_json_object_decoder nvmf_ns_pr_reg_decoders[] = { 1975 {"rkey", offsetof(struct _nvmf_ns_registrant, rkey), spdk_json_decode_uint64}, 1976 {"host_uuid", offsetof(struct _nvmf_ns_registrant, host_uuid), spdk_json_decode_string}, 1977 }; 1978 1979 static int 1980 nvmf_decode_ns_pr_reg(const struct spdk_json_val *val, void *out) 1981 { 1982 struct _nvmf_ns_registrant *reg = out; 1983 1984 return spdk_json_decode_object(val, nvmf_ns_pr_reg_decoders, 1985 SPDK_COUNTOF(nvmf_ns_pr_reg_decoders), reg); 1986 } 1987 1988 static int 1989 nvmf_decode_ns_pr_regs(const struct spdk_json_val *val, void *out) 1990 { 1991 struct _nvmf_ns_registrants *regs = out; 1992 1993 return spdk_json_decode_array(val, nvmf_decode_ns_pr_reg, regs->reg, 1994 SPDK_NVMF_MAX_NUM_REGISTRANTS, ®s->num_regs, 1995 sizeof(struct _nvmf_ns_registrant)); 1996 } 1997 1998 static const struct spdk_json_object_decoder nvmf_ns_pr_decoders[] = { 1999 {"ptpl", offsetof(struct _nvmf_ns_reservation, ptpl_activated), spdk_json_decode_bool, true}, 2000 {"rtype", offsetof(struct _nvmf_ns_reservation, rtype), spdk_json_decode_uint32, true}, 2001 {"crkey", offsetof(struct _nvmf_ns_reservation, crkey), spdk_json_decode_uint64, true}, 2002 {"bdev_uuid", offsetof(struct _nvmf_ns_reservation, bdev_uuid), spdk_json_decode_string}, 2003 {"holder_uuid", offsetof(struct _nvmf_ns_reservation, holder_uuid), spdk_json_decode_string, true}, 2004 {"registrants", offsetof(struct _nvmf_ns_reservation, regs), nvmf_decode_ns_pr_regs}, 2005 }; 2006 2007 static int 2008 nvmf_ns_load_reservation(const char *file, struct spdk_nvmf_reservation_info *info) 2009 { 2010 FILE *fd; 2011 size_t json_size; 2012 ssize_t values_cnt, rc; 2013 void *json = NULL, *end; 2014 struct spdk_json_val *values = NULL; 2015 struct _nvmf_ns_reservation res = {}; 2016 uint32_t i; 2017 2018 fd = fopen(file, "r"); 2019 /* It's not an error if the file does not exist */ 2020 if (!fd) { 2021 SPDK_NOTICELOG("File %s does not exist\n", file); 2022 return -ENOENT; 2023 } 2024 2025 /* Load all persist file contents into a local buffer */ 2026 json = spdk_posix_file_load(fd, &json_size); 2027 fclose(fd); 2028 if (!json) { 2029 SPDK_ERRLOG("Load persit file %s failed\n", file); 2030 return -ENOMEM; 2031 } 2032 2033 rc = spdk_json_parse(json, json_size, NULL, 0, &end, 0); 2034 if (rc < 0) { 2035 SPDK_NOTICELOG("Parsing JSON configuration failed (%zd)\n", rc); 2036 goto exit; 2037 } 2038 2039 values_cnt = rc; 2040 values = calloc(values_cnt, sizeof(struct spdk_json_val)); 2041 if (values == NULL) { 2042 goto exit; 2043 } 2044 2045 rc = spdk_json_parse(json, json_size, values, values_cnt, &end, 0); 2046 if (rc != values_cnt) { 2047 SPDK_ERRLOG("Parsing JSON configuration failed (%zd)\n", rc); 2048 goto exit; 2049 } 2050 2051 /* Decode json */ 2052 if (spdk_json_decode_object(values, nvmf_ns_pr_decoders, 2053 SPDK_COUNTOF(nvmf_ns_pr_decoders), 2054 &res)) { 2055 SPDK_ERRLOG("Invalid objects in the persist file %s\n", file); 2056 rc = -EINVAL; 2057 goto exit; 2058 } 2059 2060 if (res.regs.num_regs > SPDK_NVMF_MAX_NUM_REGISTRANTS) { 2061 SPDK_ERRLOG("Can only support up to %u registrants\n", SPDK_NVMF_MAX_NUM_REGISTRANTS); 2062 rc = -ERANGE; 2063 goto exit; 2064 } 2065 2066 rc = 0; 2067 info->ptpl_activated = res.ptpl_activated; 2068 info->rtype = res.rtype; 2069 info->crkey = res.crkey; 2070 snprintf(info->bdev_uuid, sizeof(info->bdev_uuid), "%s", res.bdev_uuid); 2071 snprintf(info->holder_uuid, sizeof(info->holder_uuid), "%s", res.holder_uuid); 2072 info->num_regs = res.regs.num_regs; 2073 for (i = 0; i < res.regs.num_regs; i++) { 2074 info->registrants[i].rkey = res.regs.reg[i].rkey; 2075 snprintf(info->registrants[i].host_uuid, sizeof(info->registrants[i].host_uuid), "%s", 2076 res.regs.reg[i].host_uuid); 2077 } 2078 2079 exit: 2080 free(json); 2081 free(values); 2082 free(res.bdev_uuid); 2083 free(res.holder_uuid); 2084 for (i = 0; i < res.regs.num_regs; i++) { 2085 free(res.regs.reg[i].host_uuid); 2086 } 2087 2088 return rc; 2089 } 2090 2091 static bool 2092 nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns); 2093 2094 static int 2095 nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info) 2096 { 2097 uint32_t i; 2098 struct spdk_nvmf_registrant *reg, *holder = NULL; 2099 struct spdk_uuid bdev_uuid, holder_uuid; 2100 bool rkey_flag = false; 2101 2102 SPDK_DEBUGLOG(nvmf, "NSID %u, PTPL %u, Number of registrants %u\n", 2103 ns->nsid, info->ptpl_activated, info->num_regs); 2104 2105 /* it's not an error */ 2106 if (!info->ptpl_activated || !info->num_regs) { 2107 return 0; 2108 } 2109 2110 /* Check info->crkey exist or not in info->registrants[i].rkey */ 2111 for (i = 0; i < info->num_regs; i++) { 2112 if (info->crkey == info->registrants[i].rkey) { 2113 rkey_flag = true; 2114 } 2115 } 2116 if (!rkey_flag) { 2117 return -EINVAL; 2118 } 2119 2120 spdk_uuid_parse(&bdev_uuid, info->bdev_uuid); 2121 if (spdk_uuid_compare(&bdev_uuid, spdk_bdev_get_uuid(ns->bdev))) { 2122 SPDK_ERRLOG("Existing bdev UUID is not same with configuration file\n"); 2123 return -EINVAL; 2124 } 2125 2126 ns->crkey = info->crkey; 2127 ns->rtype = info->rtype; 2128 ns->ptpl_activated = info->ptpl_activated; 2129 spdk_uuid_parse(&holder_uuid, info->holder_uuid); 2130 2131 SPDK_DEBUGLOG(nvmf, "Bdev UUID %s\n", info->bdev_uuid); 2132 if (info->rtype) { 2133 SPDK_DEBUGLOG(nvmf, "Holder UUID %s, RTYPE %u, RKEY 0x%"PRIx64"\n", 2134 info->holder_uuid, info->rtype, info->crkey); 2135 } 2136 2137 for (i = 0; i < info->num_regs; i++) { 2138 reg = calloc(1, sizeof(*reg)); 2139 if (!reg) { 2140 return -ENOMEM; 2141 } 2142 spdk_uuid_parse(®->hostid, info->registrants[i].host_uuid); 2143 reg->rkey = info->registrants[i].rkey; 2144 TAILQ_INSERT_TAIL(&ns->registrants, reg, link); 2145 if (!spdk_uuid_compare(&holder_uuid, ®->hostid)) { 2146 holder = reg; 2147 } 2148 SPDK_DEBUGLOG(nvmf, "Registrant RKEY 0x%"PRIx64", Host UUID %s\n", 2149 info->registrants[i].rkey, info->registrants[i].host_uuid); 2150 } 2151 2152 if (nvmf_ns_reservation_all_registrants_type(ns)) { 2153 ns->holder = TAILQ_FIRST(&ns->registrants); 2154 } else { 2155 ns->holder = holder; 2156 } 2157 2158 return 0; 2159 } 2160 2161 static int 2162 nvmf_ns_json_write_cb(void *cb_ctx, const void *data, size_t size) 2163 { 2164 char *file = cb_ctx; 2165 size_t rc; 2166 FILE *fd; 2167 2168 fd = fopen(file, "w"); 2169 if (!fd) { 2170 SPDK_ERRLOG("Can't open file %s for write\n", file); 2171 return -ENOENT; 2172 } 2173 rc = fwrite(data, 1, size, fd); 2174 fclose(fd); 2175 2176 return rc == size ? 0 : -1; 2177 } 2178 2179 static int 2180 nvmf_ns_reservation_update(const char *file, struct spdk_nvmf_reservation_info *info) 2181 { 2182 struct spdk_json_write_ctx *w; 2183 uint32_t i; 2184 int rc = 0; 2185 2186 w = spdk_json_write_begin(nvmf_ns_json_write_cb, (void *)file, 0); 2187 if (w == NULL) { 2188 return -ENOMEM; 2189 } 2190 /* clear the configuration file */ 2191 if (!info->ptpl_activated) { 2192 goto exit; 2193 } 2194 2195 spdk_json_write_object_begin(w); 2196 spdk_json_write_named_bool(w, "ptpl", info->ptpl_activated); 2197 spdk_json_write_named_uint32(w, "rtype", info->rtype); 2198 spdk_json_write_named_uint64(w, "crkey", info->crkey); 2199 spdk_json_write_named_string(w, "bdev_uuid", info->bdev_uuid); 2200 spdk_json_write_named_string(w, "holder_uuid", info->holder_uuid); 2201 2202 spdk_json_write_named_array_begin(w, "registrants"); 2203 for (i = 0; i < info->num_regs; i++) { 2204 spdk_json_write_object_begin(w); 2205 spdk_json_write_named_uint64(w, "rkey", info->registrants[i].rkey); 2206 spdk_json_write_named_string(w, "host_uuid", info->registrants[i].host_uuid); 2207 spdk_json_write_object_end(w); 2208 } 2209 spdk_json_write_array_end(w); 2210 spdk_json_write_object_end(w); 2211 2212 exit: 2213 rc = spdk_json_write_end(w); 2214 return rc; 2215 } 2216 2217 static int 2218 nvmf_ns_update_reservation_info(struct spdk_nvmf_ns *ns) 2219 { 2220 struct spdk_nvmf_reservation_info info; 2221 struct spdk_nvmf_registrant *reg, *tmp; 2222 uint32_t i = 0; 2223 2224 assert(ns != NULL); 2225 2226 if (!ns->bdev || !ns->ptpl_file) { 2227 return 0; 2228 } 2229 2230 memset(&info, 0, sizeof(info)); 2231 spdk_uuid_fmt_lower(info.bdev_uuid, sizeof(info.bdev_uuid), spdk_bdev_get_uuid(ns->bdev)); 2232 2233 if (ns->rtype) { 2234 info.rtype = ns->rtype; 2235 info.crkey = ns->crkey; 2236 if (!nvmf_ns_reservation_all_registrants_type(ns)) { 2237 assert(ns->holder != NULL); 2238 spdk_uuid_fmt_lower(info.holder_uuid, sizeof(info.holder_uuid), &ns->holder->hostid); 2239 } 2240 } 2241 2242 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2243 spdk_uuid_fmt_lower(info.registrants[i].host_uuid, sizeof(info.registrants[i].host_uuid), 2244 ®->hostid); 2245 info.registrants[i++].rkey = reg->rkey; 2246 } 2247 2248 info.num_regs = i; 2249 info.ptpl_activated = ns->ptpl_activated; 2250 2251 return nvmf_ns_reservation_update(ns->ptpl_file, &info); 2252 } 2253 2254 static struct spdk_nvmf_registrant * 2255 nvmf_ns_reservation_get_registrant(struct spdk_nvmf_ns *ns, 2256 struct spdk_uuid *uuid) 2257 { 2258 struct spdk_nvmf_registrant *reg, *tmp; 2259 2260 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2261 if (!spdk_uuid_compare(®->hostid, uuid)) { 2262 return reg; 2263 } 2264 } 2265 2266 return NULL; 2267 } 2268 2269 /* Generate reservation notice log to registered HostID controllers */ 2270 static void 2271 nvmf_subsystem_gen_ctrlr_notification(struct spdk_nvmf_subsystem *subsystem, 2272 struct spdk_nvmf_ns *ns, 2273 struct spdk_uuid *hostid_list, 2274 uint32_t num_hostid, 2275 enum spdk_nvme_reservation_notification_log_page_type type) 2276 { 2277 struct spdk_nvmf_ctrlr *ctrlr; 2278 uint32_t i; 2279 2280 for (i = 0; i < num_hostid; i++) { 2281 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 2282 if (!spdk_uuid_compare(&ctrlr->hostid, &hostid_list[i])) { 2283 nvmf_ctrlr_reservation_notice_log(ctrlr, ns, type); 2284 } 2285 } 2286 } 2287 } 2288 2289 /* Get all registrants' hostid other than the controller who issued the command */ 2290 static uint32_t 2291 nvmf_ns_reservation_get_all_other_hostid(struct spdk_nvmf_ns *ns, 2292 struct spdk_uuid *hostid_list, 2293 uint32_t max_num_hostid, 2294 struct spdk_uuid *current_hostid) 2295 { 2296 struct spdk_nvmf_registrant *reg, *tmp; 2297 uint32_t num_hostid = 0; 2298 2299 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2300 if (spdk_uuid_compare(®->hostid, current_hostid)) { 2301 if (num_hostid == max_num_hostid) { 2302 assert(false); 2303 return max_num_hostid; 2304 } 2305 hostid_list[num_hostid++] = reg->hostid; 2306 } 2307 } 2308 2309 return num_hostid; 2310 } 2311 2312 /* Calculate the unregistered HostID list according to list 2313 * prior to execute preempt command and list after executing 2314 * preempt command. 2315 */ 2316 static uint32_t 2317 nvmf_ns_reservation_get_unregistered_hostid(struct spdk_uuid *old_hostid_list, 2318 uint32_t old_num_hostid, 2319 struct spdk_uuid *remaining_hostid_list, 2320 uint32_t remaining_num_hostid) 2321 { 2322 struct spdk_uuid temp_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2323 uint32_t i, j, num_hostid = 0; 2324 bool found; 2325 2326 if (!remaining_num_hostid) { 2327 return old_num_hostid; 2328 } 2329 2330 for (i = 0; i < old_num_hostid; i++) { 2331 found = false; 2332 for (j = 0; j < remaining_num_hostid; j++) { 2333 if (!spdk_uuid_compare(&old_hostid_list[i], &remaining_hostid_list[j])) { 2334 found = true; 2335 break; 2336 } 2337 } 2338 if (!found) { 2339 spdk_uuid_copy(&temp_hostid_list[num_hostid++], &old_hostid_list[i]); 2340 } 2341 } 2342 2343 if (num_hostid) { 2344 memcpy(old_hostid_list, temp_hostid_list, sizeof(struct spdk_uuid) * num_hostid); 2345 } 2346 2347 return num_hostid; 2348 } 2349 2350 /* current reservation type is all registrants or not */ 2351 static bool 2352 nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns) 2353 { 2354 return (ns->rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS || 2355 ns->rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 2356 } 2357 2358 /* current registrant is reservation holder or not */ 2359 static bool 2360 nvmf_ns_reservation_registrant_is_holder(struct spdk_nvmf_ns *ns, 2361 struct spdk_nvmf_registrant *reg) 2362 { 2363 if (!reg) { 2364 return false; 2365 } 2366 2367 if (nvmf_ns_reservation_all_registrants_type(ns)) { 2368 return true; 2369 } 2370 2371 return (ns->holder == reg); 2372 } 2373 2374 static int 2375 nvmf_ns_reservation_add_registrant(struct spdk_nvmf_ns *ns, 2376 struct spdk_nvmf_ctrlr *ctrlr, 2377 uint64_t nrkey) 2378 { 2379 struct spdk_nvmf_registrant *reg; 2380 2381 reg = calloc(1, sizeof(*reg)); 2382 if (!reg) { 2383 return -ENOMEM; 2384 } 2385 2386 reg->rkey = nrkey; 2387 /* set hostid for the registrant */ 2388 spdk_uuid_copy(®->hostid, &ctrlr->hostid); 2389 TAILQ_INSERT_TAIL(&ns->registrants, reg, link); 2390 ns->gen++; 2391 2392 return 0; 2393 } 2394 2395 static void 2396 nvmf_ns_reservation_release_reservation(struct spdk_nvmf_ns *ns) 2397 { 2398 ns->rtype = 0; 2399 ns->crkey = 0; 2400 ns->holder = NULL; 2401 } 2402 2403 /* release the reservation if the last registrant was removed */ 2404 static void 2405 nvmf_ns_reservation_check_release_on_remove_registrant(struct spdk_nvmf_ns *ns, 2406 struct spdk_nvmf_registrant *reg) 2407 { 2408 struct spdk_nvmf_registrant *next_reg; 2409 2410 /* no reservation holder */ 2411 if (!ns->holder) { 2412 assert(ns->rtype == 0); 2413 return; 2414 } 2415 2416 next_reg = TAILQ_FIRST(&ns->registrants); 2417 if (next_reg && nvmf_ns_reservation_all_registrants_type(ns)) { 2418 /* the next valid registrant is the new holder now */ 2419 ns->holder = next_reg; 2420 } else if (nvmf_ns_reservation_registrant_is_holder(ns, reg)) { 2421 /* release the reservation */ 2422 nvmf_ns_reservation_release_reservation(ns); 2423 } 2424 } 2425 2426 static void 2427 nvmf_ns_reservation_remove_registrant(struct spdk_nvmf_ns *ns, 2428 struct spdk_nvmf_registrant *reg) 2429 { 2430 TAILQ_REMOVE(&ns->registrants, reg, link); 2431 nvmf_ns_reservation_check_release_on_remove_registrant(ns, reg); 2432 free(reg); 2433 ns->gen++; 2434 return; 2435 } 2436 2437 static uint32_t 2438 nvmf_ns_reservation_remove_registrants_by_key(struct spdk_nvmf_ns *ns, 2439 uint64_t rkey) 2440 { 2441 struct spdk_nvmf_registrant *reg, *tmp; 2442 uint32_t count = 0; 2443 2444 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2445 if (reg->rkey == rkey) { 2446 nvmf_ns_reservation_remove_registrant(ns, reg); 2447 count++; 2448 } 2449 } 2450 return count; 2451 } 2452 2453 static uint32_t 2454 nvmf_ns_reservation_remove_all_other_registrants(struct spdk_nvmf_ns *ns, 2455 struct spdk_nvmf_registrant *reg) 2456 { 2457 struct spdk_nvmf_registrant *reg_tmp, *reg_tmp2; 2458 uint32_t count = 0; 2459 2460 TAILQ_FOREACH_SAFE(reg_tmp, &ns->registrants, link, reg_tmp2) { 2461 if (reg_tmp != reg) { 2462 nvmf_ns_reservation_remove_registrant(ns, reg_tmp); 2463 count++; 2464 } 2465 } 2466 return count; 2467 } 2468 2469 static uint32_t 2470 nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns) 2471 { 2472 struct spdk_nvmf_registrant *reg, *reg_tmp; 2473 uint32_t count = 0; 2474 2475 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, reg_tmp) { 2476 nvmf_ns_reservation_remove_registrant(ns, reg); 2477 count++; 2478 } 2479 return count; 2480 } 2481 2482 static void 2483 nvmf_ns_reservation_acquire_reservation(struct spdk_nvmf_ns *ns, uint64_t rkey, 2484 enum spdk_nvme_reservation_type rtype, 2485 struct spdk_nvmf_registrant *holder) 2486 { 2487 ns->rtype = rtype; 2488 ns->crkey = rkey; 2489 assert(ns->holder == NULL); 2490 ns->holder = holder; 2491 } 2492 2493 static bool 2494 nvmf_ns_reservation_register(struct spdk_nvmf_ns *ns, 2495 struct spdk_nvmf_ctrlr *ctrlr, 2496 struct spdk_nvmf_request *req) 2497 { 2498 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2499 uint8_t rrega, iekey, cptpl, rtype; 2500 struct spdk_nvme_reservation_register_data key; 2501 struct spdk_nvmf_registrant *reg; 2502 uint8_t status = SPDK_NVME_SC_SUCCESS; 2503 bool update_sgroup = false; 2504 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2505 uint32_t num_hostid = 0; 2506 int rc; 2507 2508 rrega = cmd->cdw10_bits.resv_register.rrega; 2509 iekey = cmd->cdw10_bits.resv_register.iekey; 2510 cptpl = cmd->cdw10_bits.resv_register.cptpl; 2511 2512 if (req->data && req->length >= sizeof(key)) { 2513 memcpy(&key, req->data, sizeof(key)); 2514 } else { 2515 SPDK_ERRLOG("No key provided. Failing request.\n"); 2516 status = SPDK_NVME_SC_INVALID_FIELD; 2517 goto exit; 2518 } 2519 2520 SPDK_DEBUGLOG(nvmf, "REGISTER: RREGA %u, IEKEY %u, CPTPL %u, " 2521 "NRKEY 0x%"PRIx64", NRKEY 0x%"PRIx64"\n", 2522 rrega, iekey, cptpl, key.crkey, key.nrkey); 2523 2524 if (cptpl == SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON) { 2525 /* Ture to OFF state, and need to be updated in the configuration file */ 2526 if (ns->ptpl_activated) { 2527 ns->ptpl_activated = 0; 2528 update_sgroup = true; 2529 } 2530 } else if (cptpl == SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS) { 2531 if (ns->ptpl_file == NULL) { 2532 status = SPDK_NVME_SC_INVALID_FIELD; 2533 goto exit; 2534 } else if (ns->ptpl_activated == 0) { 2535 ns->ptpl_activated = 1; 2536 update_sgroup = true; 2537 } 2538 } 2539 2540 /* current Host Identifier has registrant or not */ 2541 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 2542 2543 switch (rrega) { 2544 case SPDK_NVME_RESERVE_REGISTER_KEY: 2545 if (!reg) { 2546 /* register new controller */ 2547 if (key.nrkey == 0) { 2548 SPDK_ERRLOG("Can't register zeroed new key\n"); 2549 status = SPDK_NVME_SC_INVALID_FIELD; 2550 goto exit; 2551 } 2552 rc = nvmf_ns_reservation_add_registrant(ns, ctrlr, key.nrkey); 2553 if (rc < 0) { 2554 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2555 goto exit; 2556 } 2557 update_sgroup = true; 2558 } else { 2559 /* register with same key is not an error */ 2560 if (reg->rkey != key.nrkey) { 2561 SPDK_ERRLOG("The same host already register a " 2562 "key with 0x%"PRIx64"\n", 2563 reg->rkey); 2564 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2565 goto exit; 2566 } 2567 } 2568 break; 2569 case SPDK_NVME_RESERVE_UNREGISTER_KEY: 2570 if (!reg || (!iekey && reg->rkey != key.crkey)) { 2571 SPDK_ERRLOG("No registrant or current key doesn't match " 2572 "with existing registrant key\n"); 2573 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2574 goto exit; 2575 } 2576 2577 rtype = ns->rtype; 2578 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 2579 SPDK_NVMF_MAX_NUM_REGISTRANTS, 2580 &ctrlr->hostid); 2581 2582 nvmf_ns_reservation_remove_registrant(ns, reg); 2583 2584 if (!ns->rtype && num_hostid && (rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY || 2585 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY)) { 2586 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2587 hostid_list, 2588 num_hostid, 2589 SPDK_NVME_RESERVATION_RELEASED); 2590 } 2591 update_sgroup = true; 2592 break; 2593 case SPDK_NVME_RESERVE_REPLACE_KEY: 2594 if (key.nrkey == 0) { 2595 SPDK_ERRLOG("Can't register zeroed new key\n"); 2596 status = SPDK_NVME_SC_INVALID_FIELD; 2597 goto exit; 2598 } 2599 /* Registrant exists */ 2600 if (reg) { 2601 if (!iekey && reg->rkey != key.crkey) { 2602 SPDK_ERRLOG("Current key doesn't match " 2603 "existing registrant key\n"); 2604 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2605 goto exit; 2606 } 2607 if (reg->rkey == key.nrkey) { 2608 goto exit; 2609 } 2610 reg->rkey = key.nrkey; 2611 } else if (iekey) { /* No registrant but IEKEY is set */ 2612 /* new registrant */ 2613 rc = nvmf_ns_reservation_add_registrant(ns, ctrlr, key.nrkey); 2614 if (rc < 0) { 2615 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2616 goto exit; 2617 } 2618 } else { /* No registrant */ 2619 SPDK_ERRLOG("No registrant\n"); 2620 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2621 goto exit; 2622 2623 } 2624 update_sgroup = true; 2625 break; 2626 default: 2627 status = SPDK_NVME_SC_INVALID_FIELD; 2628 goto exit; 2629 } 2630 2631 exit: 2632 if (update_sgroup) { 2633 rc = nvmf_ns_update_reservation_info(ns); 2634 if (rc != 0) { 2635 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2636 } 2637 } 2638 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2639 req->rsp->nvme_cpl.status.sc = status; 2640 return update_sgroup; 2641 } 2642 2643 static bool 2644 nvmf_ns_reservation_acquire(struct spdk_nvmf_ns *ns, 2645 struct spdk_nvmf_ctrlr *ctrlr, 2646 struct spdk_nvmf_request *req) 2647 { 2648 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2649 uint8_t racqa, iekey, rtype; 2650 struct spdk_nvme_reservation_acquire_data key; 2651 struct spdk_nvmf_registrant *reg; 2652 bool all_regs = false; 2653 uint32_t count = 0; 2654 bool update_sgroup = true; 2655 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2656 uint32_t num_hostid = 0; 2657 struct spdk_uuid new_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2658 uint32_t new_num_hostid = 0; 2659 bool reservation_released = false; 2660 uint8_t status = SPDK_NVME_SC_SUCCESS; 2661 2662 racqa = cmd->cdw10_bits.resv_acquire.racqa; 2663 iekey = cmd->cdw10_bits.resv_acquire.iekey; 2664 rtype = cmd->cdw10_bits.resv_acquire.rtype; 2665 2666 if (req->data && req->length >= sizeof(key)) { 2667 memcpy(&key, req->data, sizeof(key)); 2668 } else { 2669 SPDK_ERRLOG("No key provided. Failing request.\n"); 2670 status = SPDK_NVME_SC_INVALID_FIELD; 2671 goto exit; 2672 } 2673 2674 SPDK_DEBUGLOG(nvmf, "ACQUIRE: RACQA %u, IEKEY %u, RTYPE %u, " 2675 "NRKEY 0x%"PRIx64", PRKEY 0x%"PRIx64"\n", 2676 racqa, iekey, rtype, key.crkey, key.prkey); 2677 2678 if (iekey || rtype > SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) { 2679 SPDK_ERRLOG("Ignore existing key field set to 1\n"); 2680 status = SPDK_NVME_SC_INVALID_FIELD; 2681 update_sgroup = false; 2682 goto exit; 2683 } 2684 2685 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 2686 /* must be registrant and CRKEY must match */ 2687 if (!reg || reg->rkey != key.crkey) { 2688 SPDK_ERRLOG("No registrant or current key doesn't match " 2689 "with existing registrant key\n"); 2690 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2691 update_sgroup = false; 2692 goto exit; 2693 } 2694 2695 all_regs = nvmf_ns_reservation_all_registrants_type(ns); 2696 2697 switch (racqa) { 2698 case SPDK_NVME_RESERVE_ACQUIRE: 2699 /* it's not an error for the holder to acquire same reservation type again */ 2700 if (nvmf_ns_reservation_registrant_is_holder(ns, reg) && ns->rtype == rtype) { 2701 /* do nothing */ 2702 update_sgroup = false; 2703 } else if (ns->holder == NULL) { 2704 /* first time to acquire the reservation */ 2705 nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg); 2706 } else { 2707 SPDK_ERRLOG("Invalid rtype or current registrant is not holder\n"); 2708 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2709 update_sgroup = false; 2710 goto exit; 2711 } 2712 break; 2713 case SPDK_NVME_RESERVE_PREEMPT: 2714 /* no reservation holder */ 2715 if (!ns->holder) { 2716 /* unregister with PRKEY */ 2717 nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 2718 break; 2719 } 2720 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 2721 SPDK_NVMF_MAX_NUM_REGISTRANTS, 2722 &ctrlr->hostid); 2723 2724 /* only 1 reservation holder and reservation key is valid */ 2725 if (!all_regs) { 2726 /* preempt itself */ 2727 if (nvmf_ns_reservation_registrant_is_holder(ns, reg) && 2728 ns->crkey == key.prkey) { 2729 ns->rtype = rtype; 2730 reservation_released = true; 2731 break; 2732 } 2733 2734 if (ns->crkey == key.prkey) { 2735 nvmf_ns_reservation_remove_registrant(ns, ns->holder); 2736 nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg); 2737 reservation_released = true; 2738 } else if (key.prkey != 0) { 2739 nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 2740 } else { 2741 /* PRKEY is zero */ 2742 SPDK_ERRLOG("Current PRKEY is zero\n"); 2743 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2744 update_sgroup = false; 2745 goto exit; 2746 } 2747 } else { 2748 /* release all other registrants except for the current one */ 2749 if (key.prkey == 0) { 2750 nvmf_ns_reservation_remove_all_other_registrants(ns, reg); 2751 assert(ns->holder == reg); 2752 } else { 2753 count = nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 2754 if (count == 0) { 2755 SPDK_ERRLOG("PRKEY doesn't match any registrant\n"); 2756 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2757 update_sgroup = false; 2758 goto exit; 2759 } 2760 } 2761 } 2762 break; 2763 default: 2764 status = SPDK_NVME_SC_INVALID_FIELD; 2765 update_sgroup = false; 2766 break; 2767 } 2768 2769 exit: 2770 if (update_sgroup && racqa == SPDK_NVME_RESERVE_PREEMPT) { 2771 new_num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, new_hostid_list, 2772 SPDK_NVMF_MAX_NUM_REGISTRANTS, 2773 &ctrlr->hostid); 2774 /* Preempt notification occurs on the unregistered controllers 2775 * other than the controller who issued the command. 2776 */ 2777 num_hostid = nvmf_ns_reservation_get_unregistered_hostid(hostid_list, 2778 num_hostid, 2779 new_hostid_list, 2780 new_num_hostid); 2781 if (num_hostid) { 2782 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2783 hostid_list, 2784 num_hostid, 2785 SPDK_NVME_REGISTRATION_PREEMPTED); 2786 2787 } 2788 /* Reservation released notification occurs on the 2789 * controllers which are the remaining registrants other than 2790 * the controller who issued the command. 2791 */ 2792 if (reservation_released && new_num_hostid) { 2793 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2794 new_hostid_list, 2795 new_num_hostid, 2796 SPDK_NVME_RESERVATION_RELEASED); 2797 2798 } 2799 } 2800 if (update_sgroup && ns->ptpl_activated) { 2801 if (nvmf_ns_update_reservation_info(ns)) { 2802 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2803 } 2804 } 2805 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2806 req->rsp->nvme_cpl.status.sc = status; 2807 return update_sgroup; 2808 } 2809 2810 static bool 2811 nvmf_ns_reservation_release(struct spdk_nvmf_ns *ns, 2812 struct spdk_nvmf_ctrlr *ctrlr, 2813 struct spdk_nvmf_request *req) 2814 { 2815 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2816 uint8_t rrela, iekey, rtype; 2817 struct spdk_nvmf_registrant *reg; 2818 uint64_t crkey; 2819 uint8_t status = SPDK_NVME_SC_SUCCESS; 2820 bool update_sgroup = true; 2821 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2822 uint32_t num_hostid = 0; 2823 2824 rrela = cmd->cdw10_bits.resv_release.rrela; 2825 iekey = cmd->cdw10_bits.resv_release.iekey; 2826 rtype = cmd->cdw10_bits.resv_release.rtype; 2827 2828 if (req->data && req->length >= sizeof(crkey)) { 2829 memcpy(&crkey, req->data, sizeof(crkey)); 2830 } else { 2831 SPDK_ERRLOG("No key provided. Failing request.\n"); 2832 status = SPDK_NVME_SC_INVALID_FIELD; 2833 goto exit; 2834 } 2835 2836 SPDK_DEBUGLOG(nvmf, "RELEASE: RRELA %u, IEKEY %u, RTYPE %u, " 2837 "CRKEY 0x%"PRIx64"\n", rrela, iekey, rtype, crkey); 2838 2839 if (iekey) { 2840 SPDK_ERRLOG("Ignore existing key field set to 1\n"); 2841 status = SPDK_NVME_SC_INVALID_FIELD; 2842 update_sgroup = false; 2843 goto exit; 2844 } 2845 2846 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 2847 if (!reg || reg->rkey != crkey) { 2848 SPDK_ERRLOG("No registrant or current key doesn't match " 2849 "with existing registrant key\n"); 2850 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2851 update_sgroup = false; 2852 goto exit; 2853 } 2854 2855 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 2856 SPDK_NVMF_MAX_NUM_REGISTRANTS, 2857 &ctrlr->hostid); 2858 2859 switch (rrela) { 2860 case SPDK_NVME_RESERVE_RELEASE: 2861 if (!ns->holder) { 2862 SPDK_DEBUGLOG(nvmf, "RELEASE: no holder\n"); 2863 update_sgroup = false; 2864 goto exit; 2865 } 2866 if (ns->rtype != rtype) { 2867 SPDK_ERRLOG("Type doesn't match\n"); 2868 status = SPDK_NVME_SC_INVALID_FIELD; 2869 update_sgroup = false; 2870 goto exit; 2871 } 2872 if (!nvmf_ns_reservation_registrant_is_holder(ns, reg)) { 2873 /* not the reservation holder, this isn't an error */ 2874 update_sgroup = false; 2875 goto exit; 2876 } 2877 2878 rtype = ns->rtype; 2879 nvmf_ns_reservation_release_reservation(ns); 2880 2881 if (num_hostid && rtype != SPDK_NVME_RESERVE_WRITE_EXCLUSIVE && 2882 rtype != SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) { 2883 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2884 hostid_list, 2885 num_hostid, 2886 SPDK_NVME_RESERVATION_RELEASED); 2887 } 2888 break; 2889 case SPDK_NVME_RESERVE_CLEAR: 2890 nvmf_ns_reservation_clear_all_registrants(ns); 2891 if (num_hostid) { 2892 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2893 hostid_list, 2894 num_hostid, 2895 SPDK_NVME_RESERVATION_PREEMPTED); 2896 } 2897 break; 2898 default: 2899 status = SPDK_NVME_SC_INVALID_FIELD; 2900 update_sgroup = false; 2901 goto exit; 2902 } 2903 2904 exit: 2905 if (update_sgroup && ns->ptpl_activated) { 2906 if (nvmf_ns_update_reservation_info(ns)) { 2907 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2908 } 2909 } 2910 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2911 req->rsp->nvme_cpl.status.sc = status; 2912 return update_sgroup; 2913 } 2914 2915 static void 2916 nvmf_ns_reservation_report(struct spdk_nvmf_ns *ns, 2917 struct spdk_nvmf_ctrlr *ctrlr, 2918 struct spdk_nvmf_request *req) 2919 { 2920 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2921 struct spdk_nvmf_registrant *reg, *tmp; 2922 struct spdk_nvme_reservation_status_extended_data *status_data; 2923 struct spdk_nvme_registered_ctrlr_extended_data *ctrlr_data; 2924 uint8_t *payload; 2925 uint32_t transfer_len, payload_len = 0; 2926 uint32_t regctl = 0; 2927 uint8_t status = SPDK_NVME_SC_SUCCESS; 2928 2929 if (req->data == NULL) { 2930 SPDK_ERRLOG("No data transfer specified for request. " 2931 " Unable to transfer back response.\n"); 2932 status = SPDK_NVME_SC_INVALID_FIELD; 2933 goto exit; 2934 } 2935 2936 if (!cmd->cdw11_bits.resv_report.eds) { 2937 SPDK_ERRLOG("NVMeoF uses extended controller data structure, " 2938 "please set EDS bit in cdw11 and try again\n"); 2939 status = SPDK_NVME_SC_HOSTID_INCONSISTENT_FORMAT; 2940 goto exit; 2941 } 2942 2943 /* Number of Dwords of the Reservation Status data structure to transfer */ 2944 transfer_len = (cmd->cdw10 + 1) * sizeof(uint32_t); 2945 payload = req->data; 2946 2947 if (transfer_len < sizeof(struct spdk_nvme_reservation_status_extended_data)) { 2948 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2949 goto exit; 2950 } 2951 2952 status_data = (struct spdk_nvme_reservation_status_extended_data *)payload; 2953 status_data->data.gen = ns->gen; 2954 status_data->data.rtype = ns->rtype; 2955 status_data->data.ptpls = ns->ptpl_activated; 2956 payload_len += sizeof(struct spdk_nvme_reservation_status_extended_data); 2957 2958 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2959 payload_len += sizeof(struct spdk_nvme_registered_ctrlr_extended_data); 2960 if (payload_len > transfer_len) { 2961 break; 2962 } 2963 2964 ctrlr_data = (struct spdk_nvme_registered_ctrlr_extended_data *) 2965 (payload + sizeof(*status_data) + sizeof(*ctrlr_data) * regctl); 2966 /* Set to 0xffffh for dynamic controller */ 2967 ctrlr_data->cntlid = 0xffff; 2968 ctrlr_data->rcsts.status = (ns->holder == reg) ? true : false; 2969 ctrlr_data->rkey = reg->rkey; 2970 spdk_uuid_copy((struct spdk_uuid *)ctrlr_data->hostid, ®->hostid); 2971 regctl++; 2972 } 2973 status_data->data.regctl = regctl; 2974 2975 exit: 2976 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2977 req->rsp->nvme_cpl.status.sc = status; 2978 return; 2979 } 2980 2981 static void 2982 nvmf_ns_reservation_complete(void *ctx) 2983 { 2984 struct spdk_nvmf_request *req = ctx; 2985 2986 spdk_nvmf_request_complete(req); 2987 } 2988 2989 static void 2990 _nvmf_ns_reservation_update_done(struct spdk_nvmf_subsystem *subsystem, 2991 void *cb_arg, int status) 2992 { 2993 struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)cb_arg; 2994 struct spdk_nvmf_poll_group *group = req->qpair->group; 2995 2996 spdk_thread_send_msg(group->thread, nvmf_ns_reservation_complete, req); 2997 } 2998 2999 void 3000 nvmf_ns_reservation_request(void *ctx) 3001 { 3002 struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)ctx; 3003 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3004 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 3005 struct subsystem_update_ns_ctx *update_ctx; 3006 uint32_t nsid; 3007 struct spdk_nvmf_ns *ns; 3008 bool update_sgroup = false; 3009 3010 nsid = cmd->nsid; 3011 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, nsid); 3012 assert(ns != NULL); 3013 3014 switch (cmd->opc) { 3015 case SPDK_NVME_OPC_RESERVATION_REGISTER: 3016 update_sgroup = nvmf_ns_reservation_register(ns, ctrlr, req); 3017 break; 3018 case SPDK_NVME_OPC_RESERVATION_ACQUIRE: 3019 update_sgroup = nvmf_ns_reservation_acquire(ns, ctrlr, req); 3020 break; 3021 case SPDK_NVME_OPC_RESERVATION_RELEASE: 3022 update_sgroup = nvmf_ns_reservation_release(ns, ctrlr, req); 3023 break; 3024 case SPDK_NVME_OPC_RESERVATION_REPORT: 3025 nvmf_ns_reservation_report(ns, ctrlr, req); 3026 break; 3027 default: 3028 break; 3029 } 3030 3031 /* update reservation information to subsystem's poll group */ 3032 if (update_sgroup) { 3033 update_ctx = calloc(1, sizeof(*update_ctx)); 3034 if (update_ctx == NULL) { 3035 SPDK_ERRLOG("Can't alloc subsystem poll group update context\n"); 3036 goto update_done; 3037 } 3038 update_ctx->subsystem = ctrlr->subsys; 3039 update_ctx->cb_fn = _nvmf_ns_reservation_update_done; 3040 update_ctx->cb_arg = req; 3041 3042 nvmf_subsystem_update_ns(ctrlr->subsys, subsystem_update_ns_done, update_ctx); 3043 return; 3044 } 3045 3046 update_done: 3047 _nvmf_ns_reservation_update_done(ctrlr->subsys, (void *)req, 0); 3048 } 3049 3050 int 3051 spdk_nvmf_subsystem_set_ana_reporting(struct spdk_nvmf_subsystem *subsystem, 3052 bool ana_reporting) 3053 { 3054 if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) { 3055 return -EAGAIN; 3056 } 3057 3058 subsystem->flags.ana_reporting = ana_reporting; 3059 3060 return 0; 3061 } 3062 3063 bool 3064 nvmf_subsystem_get_ana_reporting(struct spdk_nvmf_subsystem *subsystem) 3065 { 3066 return subsystem->flags.ana_reporting; 3067 } 3068 3069 struct subsystem_listener_update_ctx { 3070 struct spdk_nvmf_subsystem_listener *listener; 3071 3072 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn; 3073 void *cb_arg; 3074 }; 3075 3076 static void 3077 subsystem_listener_update_done(struct spdk_io_channel_iter *i, int status) 3078 { 3079 struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 3080 3081 if (ctx->cb_fn) { 3082 ctx->cb_fn(ctx->cb_arg, status); 3083 } 3084 free(ctx); 3085 } 3086 3087 static void 3088 subsystem_listener_update_on_pg(struct spdk_io_channel_iter *i) 3089 { 3090 struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 3091 struct spdk_nvmf_subsystem_listener *listener; 3092 struct spdk_nvmf_poll_group *group; 3093 struct spdk_nvmf_ctrlr *ctrlr; 3094 3095 listener = ctx->listener; 3096 group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i)); 3097 3098 TAILQ_FOREACH(ctrlr, &listener->subsystem->ctrlrs, link) { 3099 if (ctrlr->admin_qpair->group == group && ctrlr->listener == listener) { 3100 nvmf_ctrlr_async_event_ana_change_notice(ctrlr); 3101 } 3102 } 3103 3104 spdk_for_each_channel_continue(i, 0); 3105 } 3106 3107 void 3108 nvmf_subsystem_set_ana_state(struct spdk_nvmf_subsystem *subsystem, 3109 const struct spdk_nvme_transport_id *trid, 3110 enum spdk_nvme_ana_state ana_state, uint32_t anagrpid, 3111 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, void *cb_arg) 3112 { 3113 struct spdk_nvmf_subsystem_listener *listener; 3114 struct subsystem_listener_update_ctx *ctx; 3115 uint32_t i; 3116 3117 assert(cb_fn != NULL); 3118 assert(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 3119 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED); 3120 3121 if (!subsystem->flags.ana_reporting) { 3122 SPDK_ERRLOG("ANA reporting is disabled\n"); 3123 cb_fn(cb_arg, -EINVAL); 3124 return; 3125 } 3126 3127 /* ANA Change state is not used, ANA Persistent Loss state 3128 * is not supported yet. 3129 */ 3130 if (!(ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE || 3131 ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE || 3132 ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE)) { 3133 SPDK_ERRLOG("ANA state %d is not supported\n", ana_state); 3134 cb_fn(cb_arg, -ENOTSUP); 3135 return; 3136 } 3137 3138 if (anagrpid > subsystem->max_nsid) { 3139 SPDK_ERRLOG("ANA group ID %" PRIu32 " is more than maximum\n", anagrpid); 3140 cb_fn(cb_arg, -EINVAL); 3141 return; 3142 } 3143 3144 listener = nvmf_subsystem_find_listener(subsystem, trid); 3145 if (!listener) { 3146 SPDK_ERRLOG("Unable to find listener.\n"); 3147 cb_fn(cb_arg, -EINVAL); 3148 return; 3149 } 3150 3151 if (anagrpid != 0 && listener->ana_state[anagrpid - 1] == ana_state) { 3152 cb_fn(cb_arg, 0); 3153 return; 3154 } 3155 3156 ctx = calloc(1, sizeof(*ctx)); 3157 if (!ctx) { 3158 SPDK_ERRLOG("Unable to allocate context\n"); 3159 cb_fn(cb_arg, -ENOMEM); 3160 return; 3161 } 3162 3163 for (i = 1; i <= subsystem->max_nsid; i++) { 3164 if (anagrpid == 0 || i == anagrpid) { 3165 listener->ana_state[i - 1] = ana_state; 3166 } 3167 } 3168 listener->ana_state_change_count++; 3169 3170 ctx->listener = listener; 3171 ctx->cb_fn = cb_fn; 3172 ctx->cb_arg = cb_arg; 3173 3174 spdk_for_each_channel(subsystem->tgt, 3175 subsystem_listener_update_on_pg, 3176 ctx, 3177 subsystem_listener_update_done); 3178 } 3179