1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "nvmf_internal.h" 37 #include "transport.h" 38 39 #include "spdk/likely.h" 40 #include "spdk/string.h" 41 #include "spdk/trace.h" 42 #include "spdk/nvmf_spec.h" 43 #include "spdk/uuid.h" 44 #include "spdk/json.h" 45 #include "spdk/file.h" 46 47 #include "spdk/bdev_module.h" 48 #include "spdk/log.h" 49 #include "spdk_internal/utf.h" 50 51 #define MODEL_NUMBER_DEFAULT "SPDK bdev Controller" 52 53 /* 54 * States for parsing valid domains in NQNs according to RFC 1034 55 */ 56 enum spdk_nvmf_nqn_domain_states { 57 /* First character of a domain must be a letter */ 58 SPDK_NVMF_DOMAIN_ACCEPT_LETTER = 0, 59 60 /* Subsequent characters can be any of letter, digit, or hyphen */ 61 SPDK_NVMF_DOMAIN_ACCEPT_LDH = 1, 62 63 /* A domain label must end with either a letter or digit */ 64 SPDK_NVMF_DOMAIN_ACCEPT_ANY = 2 65 }; 66 67 /* Returns true if is a valid ASCII string as defined by the NVMe spec */ 68 static bool 69 nvmf_valid_ascii_string(const void *buf, size_t size) 70 { 71 const uint8_t *str = buf; 72 size_t i; 73 74 for (i = 0; i < size; i++) { 75 if (str[i] < 0x20 || str[i] > 0x7E) { 76 return false; 77 } 78 } 79 80 return true; 81 } 82 83 static bool 84 nvmf_valid_nqn(const char *nqn) 85 { 86 size_t len; 87 struct spdk_uuid uuid_value; 88 uint32_t i; 89 int bytes_consumed; 90 uint32_t domain_label_length; 91 char *reverse_domain_end; 92 uint32_t reverse_domain_end_index; 93 enum spdk_nvmf_nqn_domain_states domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER; 94 95 /* Check for length requirements */ 96 len = strlen(nqn); 97 if (len > SPDK_NVMF_NQN_MAX_LEN) { 98 SPDK_ERRLOG("Invalid NQN \"%s\": length %zu > max %d\n", nqn, len, SPDK_NVMF_NQN_MAX_LEN); 99 return false; 100 } 101 102 /* The nqn must be at least as long as SPDK_NVMF_NQN_MIN_LEN to contain the necessary prefix. */ 103 if (len < SPDK_NVMF_NQN_MIN_LEN) { 104 SPDK_ERRLOG("Invalid NQN \"%s\": length %zu < min %d\n", nqn, len, SPDK_NVMF_NQN_MIN_LEN); 105 return false; 106 } 107 108 /* Check for discovery controller nqn */ 109 if (!strcmp(nqn, SPDK_NVMF_DISCOVERY_NQN)) { 110 return true; 111 } 112 113 /* Check for equality with the generic nqn structure of the form "nqn.2014-08.org.nvmexpress:uuid:11111111-2222-3333-4444-555555555555" */ 114 if (!strncmp(nqn, SPDK_NVMF_NQN_UUID_PRE, SPDK_NVMF_NQN_UUID_PRE_LEN)) { 115 if (len != SPDK_NVMF_NQN_UUID_PRE_LEN + SPDK_NVMF_UUID_STRING_LEN) { 116 SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not the correct length\n", nqn); 117 return false; 118 } 119 120 if (spdk_uuid_parse(&uuid_value, &nqn[SPDK_NVMF_NQN_UUID_PRE_LEN])) { 121 SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not formatted correctly\n", nqn); 122 return false; 123 } 124 return true; 125 } 126 127 /* If the nqn does not match the uuid structure, the next several checks validate the form "nqn.yyyy-mm.reverse.domain:user-string" */ 128 129 if (strncmp(nqn, "nqn.", 4) != 0) { 130 SPDK_ERRLOG("Invalid NQN \"%s\": NQN must begin with \"nqn.\".\n", nqn); 131 return false; 132 } 133 134 /* Check for yyyy-mm. */ 135 if (!(isdigit(nqn[4]) && isdigit(nqn[5]) && isdigit(nqn[6]) && isdigit(nqn[7]) && 136 nqn[8] == '-' && isdigit(nqn[9]) && isdigit(nqn[10]) && nqn[11] == '.')) { 137 SPDK_ERRLOG("Invalid date code in NQN \"%s\"\n", nqn); 138 return false; 139 } 140 141 reverse_domain_end = strchr(nqn, ':'); 142 if (reverse_domain_end != NULL && (reverse_domain_end_index = reverse_domain_end - nqn) < len - 1) { 143 } else { 144 SPDK_ERRLOG("Invalid NQN \"%s\". NQN must contain user specified name with a ':' as a prefix.\n", 145 nqn); 146 return false; 147 } 148 149 /* Check for valid reverse domain */ 150 domain_label_length = 0; 151 for (i = 12; i < reverse_domain_end_index; i++) { 152 if (domain_label_length > SPDK_DOMAIN_LABEL_MAX_LEN) { 153 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". At least one Label is too long.\n", nqn); 154 return false; 155 } 156 157 switch (domain_state) { 158 159 case SPDK_NVMF_DOMAIN_ACCEPT_LETTER: { 160 if (isalpha(nqn[i])) { 161 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 162 domain_label_length++; 163 break; 164 } else { 165 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must start with a letter.\n", nqn); 166 return false; 167 } 168 } 169 170 case SPDK_NVMF_DOMAIN_ACCEPT_LDH: { 171 if (isalpha(nqn[i]) || isdigit(nqn[i])) { 172 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 173 domain_label_length++; 174 break; 175 } else if (nqn[i] == '-') { 176 if (i == reverse_domain_end_index - 1) { 177 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 178 nqn); 179 return false; 180 } 181 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH; 182 domain_label_length++; 183 break; 184 } else if (nqn[i] == '.') { 185 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 186 nqn); 187 return false; 188 } else { 189 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n", 190 nqn); 191 return false; 192 } 193 } 194 195 case SPDK_NVMF_DOMAIN_ACCEPT_ANY: { 196 if (isalpha(nqn[i]) || isdigit(nqn[i])) { 197 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 198 domain_label_length++; 199 break; 200 } else if (nqn[i] == '-') { 201 if (i == reverse_domain_end_index - 1) { 202 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 203 nqn); 204 return false; 205 } 206 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH; 207 domain_label_length++; 208 break; 209 } else if (nqn[i] == '.') { 210 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER; 211 domain_label_length = 0; 212 break; 213 } else { 214 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n", 215 nqn); 216 return false; 217 } 218 } 219 } 220 } 221 222 i = reverse_domain_end_index + 1; 223 while (i < len) { 224 bytes_consumed = utf8_valid(&nqn[i], &nqn[len]); 225 if (bytes_consumed <= 0) { 226 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only valid utf-8.\n", nqn); 227 return false; 228 } 229 230 i += bytes_consumed; 231 } 232 return true; 233 } 234 235 static void subsystem_state_change_on_pg(struct spdk_io_channel_iter *i); 236 237 struct spdk_nvmf_subsystem * 238 spdk_nvmf_subsystem_create(struct spdk_nvmf_tgt *tgt, 239 const char *nqn, 240 enum spdk_nvmf_subtype type, 241 uint32_t num_ns) 242 { 243 struct spdk_nvmf_subsystem *subsystem; 244 uint32_t sid; 245 246 if (spdk_nvmf_tgt_find_subsystem(tgt, nqn)) { 247 SPDK_ERRLOG("Subsystem NQN '%s' already exists\n", nqn); 248 return NULL; 249 } 250 251 if (!nvmf_valid_nqn(nqn)) { 252 return NULL; 253 } 254 255 if (type == SPDK_NVMF_SUBTYPE_DISCOVERY && num_ns != 0) { 256 SPDK_ERRLOG("Discovery subsystem cannot have namespaces.\n"); 257 return NULL; 258 } 259 260 /* Find a free subsystem id (sid) */ 261 for (sid = 0; sid < tgt->max_subsystems; sid++) { 262 if (tgt->subsystems[sid] == NULL) { 263 break; 264 } 265 } 266 if (sid >= tgt->max_subsystems) { 267 return NULL; 268 } 269 270 subsystem = calloc(1, sizeof(struct spdk_nvmf_subsystem)); 271 if (subsystem == NULL) { 272 return NULL; 273 } 274 275 subsystem->thread = spdk_get_thread(); 276 subsystem->state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 277 subsystem->tgt = tgt; 278 subsystem->id = sid; 279 subsystem->subtype = type; 280 subsystem->max_nsid = num_ns; 281 subsystem->max_allowed_nsid = num_ns; 282 subsystem->next_cntlid = 0; 283 snprintf(subsystem->subnqn, sizeof(subsystem->subnqn), "%s", nqn); 284 pthread_mutex_init(&subsystem->mutex, NULL); 285 TAILQ_INIT(&subsystem->listeners); 286 TAILQ_INIT(&subsystem->hosts); 287 TAILQ_INIT(&subsystem->ctrlrs); 288 289 if (num_ns != 0) { 290 subsystem->ns = calloc(num_ns, sizeof(struct spdk_nvmf_ns *)); 291 if (subsystem->ns == NULL) { 292 SPDK_ERRLOG("Namespace memory allocation failed\n"); 293 pthread_mutex_destroy(&subsystem->mutex); 294 free(subsystem); 295 return NULL; 296 } 297 } 298 299 memset(subsystem->sn, '0', sizeof(subsystem->sn) - 1); 300 subsystem->sn[sizeof(subsystem->sn) - 1] = '\0'; 301 302 snprintf(subsystem->mn, sizeof(subsystem->mn), "%s", 303 MODEL_NUMBER_DEFAULT); 304 305 tgt->subsystems[sid] = subsystem; 306 nvmf_update_discovery_log(tgt, NULL); 307 308 return subsystem; 309 } 310 311 /* Must hold subsystem->mutex while calling this function */ 312 static void 313 nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_host *host) 314 { 315 TAILQ_REMOVE(&subsystem->hosts, host, link); 316 free(host); 317 } 318 319 static void 320 _nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem, 321 struct spdk_nvmf_subsystem_listener *listener, 322 bool stop) 323 { 324 struct spdk_nvmf_transport *transport; 325 326 if (stop) { 327 transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, listener->trid->trstring); 328 if (transport != NULL) { 329 spdk_nvmf_transport_stop_listen(transport, listener->trid); 330 } 331 } 332 333 TAILQ_REMOVE(&subsystem->listeners, listener, link); 334 free(listener); 335 } 336 337 void 338 spdk_nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem) 339 { 340 struct spdk_nvmf_host *host, *host_tmp; 341 struct spdk_nvmf_ctrlr *ctrlr, *ctrlr_tmp; 342 struct spdk_nvmf_ns *ns; 343 344 if (!subsystem) { 345 return; 346 } 347 348 assert(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE); 349 350 SPDK_DEBUGLOG(nvmf, "subsystem is %p\n", subsystem); 351 352 nvmf_subsystem_remove_all_listeners(subsystem, false); 353 354 pthread_mutex_lock(&subsystem->mutex); 355 356 TAILQ_FOREACH_SAFE(host, &subsystem->hosts, link, host_tmp) { 357 nvmf_subsystem_remove_host(subsystem, host); 358 } 359 360 pthread_mutex_unlock(&subsystem->mutex); 361 362 TAILQ_FOREACH_SAFE(ctrlr, &subsystem->ctrlrs, link, ctrlr_tmp) { 363 nvmf_ctrlr_destruct(ctrlr); 364 } 365 366 ns = spdk_nvmf_subsystem_get_first_ns(subsystem); 367 while (ns != NULL) { 368 struct spdk_nvmf_ns *next_ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns); 369 370 spdk_nvmf_subsystem_remove_ns(subsystem, ns->opts.nsid); 371 ns = next_ns; 372 } 373 374 free(subsystem->ns); 375 376 subsystem->tgt->subsystems[subsystem->id] = NULL; 377 nvmf_update_discovery_log(subsystem->tgt, NULL); 378 379 pthread_mutex_destroy(&subsystem->mutex); 380 381 free(subsystem); 382 } 383 384 385 /* we have to use the typedef in the function declaration to appease astyle. */ 386 typedef enum spdk_nvmf_subsystem_state spdk_nvmf_subsystem_state_t; 387 388 static spdk_nvmf_subsystem_state_t 389 nvmf_subsystem_get_intermediate_state(enum spdk_nvmf_subsystem_state current_state, 390 enum spdk_nvmf_subsystem_state requested_state) 391 { 392 switch (requested_state) { 393 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 394 return SPDK_NVMF_SUBSYSTEM_DEACTIVATING; 395 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 396 if (current_state == SPDK_NVMF_SUBSYSTEM_PAUSED) { 397 return SPDK_NVMF_SUBSYSTEM_RESUMING; 398 } else { 399 return SPDK_NVMF_SUBSYSTEM_ACTIVATING; 400 } 401 case SPDK_NVMF_SUBSYSTEM_PAUSED: 402 return SPDK_NVMF_SUBSYSTEM_PAUSING; 403 default: 404 assert(false); 405 return SPDK_NVMF_SUBSYSTEM_NUM_STATES; 406 } 407 } 408 409 static int 410 nvmf_subsystem_set_state(struct spdk_nvmf_subsystem *subsystem, 411 enum spdk_nvmf_subsystem_state state) 412 { 413 enum spdk_nvmf_subsystem_state actual_old_state, expected_old_state; 414 bool exchanged; 415 416 switch (state) { 417 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 418 expected_old_state = SPDK_NVMF_SUBSYSTEM_DEACTIVATING; 419 break; 420 case SPDK_NVMF_SUBSYSTEM_ACTIVATING: 421 expected_old_state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 422 break; 423 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 424 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING; 425 break; 426 case SPDK_NVMF_SUBSYSTEM_PAUSING: 427 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 428 break; 429 case SPDK_NVMF_SUBSYSTEM_PAUSED: 430 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSING; 431 break; 432 case SPDK_NVMF_SUBSYSTEM_RESUMING: 433 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSED; 434 break; 435 case SPDK_NVMF_SUBSYSTEM_DEACTIVATING: 436 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 437 break; 438 default: 439 assert(false); 440 return -1; 441 } 442 443 actual_old_state = expected_old_state; 444 exchanged = __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false, 445 __ATOMIC_RELAXED, __ATOMIC_RELAXED); 446 if (spdk_unlikely(exchanged == false)) { 447 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING && 448 state == SPDK_NVMF_SUBSYSTEM_ACTIVE) { 449 expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING; 450 } 451 /* This is for the case when activating the subsystem fails. */ 452 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_ACTIVATING && 453 state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING) { 454 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING; 455 } 456 /* This is for the case when resuming the subsystem fails. */ 457 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING && 458 state == SPDK_NVMF_SUBSYSTEM_PAUSING) { 459 expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING; 460 } 461 actual_old_state = expected_old_state; 462 __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false, 463 __ATOMIC_RELAXED, __ATOMIC_RELAXED); 464 } 465 assert(actual_old_state == expected_old_state); 466 return actual_old_state - expected_old_state; 467 } 468 469 struct subsystem_state_change_ctx { 470 struct spdk_nvmf_subsystem *subsystem; 471 472 enum spdk_nvmf_subsystem_state original_state; 473 474 enum spdk_nvmf_subsystem_state requested_state; 475 476 spdk_nvmf_subsystem_state_change_done cb_fn; 477 void *cb_arg; 478 }; 479 480 static void 481 subsystem_state_change_revert_done(struct spdk_io_channel_iter *i, int status) 482 { 483 struct subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 484 485 /* Nothing to be done here if the state setting fails, we are just screwed. */ 486 if (nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state)) { 487 SPDK_ERRLOG("Unable to revert the subsystem state after operation failure.\n"); 488 } 489 490 ctx->subsystem->changing_state = false; 491 if (ctx->cb_fn) { 492 /* return a failure here. This function only exists in an error path. */ 493 ctx->cb_fn(ctx->subsystem, ctx->cb_arg, -1); 494 } 495 free(ctx); 496 } 497 498 static void 499 subsystem_state_change_done(struct spdk_io_channel_iter *i, int status) 500 { 501 struct subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 502 enum spdk_nvmf_subsystem_state intermediate_state; 503 504 if (status == 0) { 505 status = nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state); 506 if (status) { 507 status = -1; 508 } 509 } 510 511 if (status) { 512 intermediate_state = nvmf_subsystem_get_intermediate_state(ctx->requested_state, 513 ctx->original_state); 514 assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES); 515 516 if (nvmf_subsystem_set_state(ctx->subsystem, intermediate_state)) { 517 goto out; 518 } 519 ctx->requested_state = ctx->original_state; 520 spdk_for_each_channel(ctx->subsystem->tgt, 521 subsystem_state_change_on_pg, 522 ctx, 523 subsystem_state_change_revert_done); 524 return; 525 } 526 527 out: 528 ctx->subsystem->changing_state = false; 529 if (ctx->cb_fn) { 530 ctx->cb_fn(ctx->subsystem, ctx->cb_arg, status); 531 } 532 free(ctx); 533 } 534 535 static void 536 subsystem_state_change_continue(void *ctx, int status) 537 { 538 struct spdk_io_channel_iter *i = ctx; 539 spdk_for_each_channel_continue(i, status); 540 } 541 542 static void 543 subsystem_state_change_on_pg(struct spdk_io_channel_iter *i) 544 { 545 struct subsystem_state_change_ctx *ctx; 546 struct spdk_io_channel *ch; 547 struct spdk_nvmf_poll_group *group; 548 549 ctx = spdk_io_channel_iter_get_ctx(i); 550 ch = spdk_io_channel_iter_get_channel(i); 551 group = spdk_io_channel_get_ctx(ch); 552 553 switch (ctx->requested_state) { 554 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 555 nvmf_poll_group_remove_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 556 break; 557 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 558 if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_ACTIVATING) { 559 nvmf_poll_group_add_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 560 } else if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_RESUMING) { 561 nvmf_poll_group_resume_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 562 } 563 break; 564 case SPDK_NVMF_SUBSYSTEM_PAUSED: 565 nvmf_poll_group_pause_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 566 break; 567 default: 568 assert(false); 569 break; 570 } 571 } 572 573 static int 574 nvmf_subsystem_state_change(struct spdk_nvmf_subsystem *subsystem, 575 enum spdk_nvmf_subsystem_state requested_state, 576 spdk_nvmf_subsystem_state_change_done cb_fn, 577 void *cb_arg) 578 { 579 struct subsystem_state_change_ctx *ctx; 580 enum spdk_nvmf_subsystem_state intermediate_state; 581 int rc; 582 583 if (__sync_val_compare_and_swap(&subsystem->changing_state, false, true)) { 584 return -EBUSY; 585 } 586 587 /* If we are already in the requested state, just call the callback immediately. */ 588 if (subsystem->state == requested_state) { 589 subsystem->changing_state = false; 590 if (cb_fn) { 591 cb_fn(subsystem, cb_arg, 0); 592 } 593 return 0; 594 } 595 596 intermediate_state = nvmf_subsystem_get_intermediate_state(subsystem->state, requested_state); 597 assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES); 598 599 ctx = calloc(1, sizeof(*ctx)); 600 if (!ctx) { 601 subsystem->changing_state = false; 602 return -ENOMEM; 603 } 604 605 ctx->original_state = subsystem->state; 606 rc = nvmf_subsystem_set_state(subsystem, intermediate_state); 607 if (rc) { 608 free(ctx); 609 subsystem->changing_state = false; 610 return rc; 611 } 612 613 ctx->subsystem = subsystem; 614 ctx->requested_state = requested_state; 615 ctx->cb_fn = cb_fn; 616 ctx->cb_arg = cb_arg; 617 618 spdk_for_each_channel(subsystem->tgt, 619 subsystem_state_change_on_pg, 620 ctx, 621 subsystem_state_change_done); 622 623 return 0; 624 } 625 626 int 627 spdk_nvmf_subsystem_start(struct spdk_nvmf_subsystem *subsystem, 628 spdk_nvmf_subsystem_state_change_done cb_fn, 629 void *cb_arg) 630 { 631 return nvmf_subsystem_state_change(subsystem, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg); 632 } 633 634 int 635 spdk_nvmf_subsystem_stop(struct spdk_nvmf_subsystem *subsystem, 636 spdk_nvmf_subsystem_state_change_done cb_fn, 637 void *cb_arg) 638 { 639 return nvmf_subsystem_state_change(subsystem, SPDK_NVMF_SUBSYSTEM_INACTIVE, cb_fn, cb_arg); 640 } 641 642 int 643 spdk_nvmf_subsystem_pause(struct spdk_nvmf_subsystem *subsystem, 644 spdk_nvmf_subsystem_state_change_done cb_fn, 645 void *cb_arg) 646 { 647 return nvmf_subsystem_state_change(subsystem, SPDK_NVMF_SUBSYSTEM_PAUSED, cb_fn, cb_arg); 648 } 649 650 int 651 spdk_nvmf_subsystem_resume(struct spdk_nvmf_subsystem *subsystem, 652 spdk_nvmf_subsystem_state_change_done cb_fn, 653 void *cb_arg) 654 { 655 return nvmf_subsystem_state_change(subsystem, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg); 656 } 657 658 struct spdk_nvmf_subsystem * 659 spdk_nvmf_subsystem_get_first(struct spdk_nvmf_tgt *tgt) 660 { 661 struct spdk_nvmf_subsystem *subsystem; 662 uint32_t sid; 663 664 for (sid = 0; sid < tgt->max_subsystems; sid++) { 665 subsystem = tgt->subsystems[sid]; 666 if (subsystem) { 667 return subsystem; 668 } 669 } 670 671 return NULL; 672 } 673 674 struct spdk_nvmf_subsystem * 675 spdk_nvmf_subsystem_get_next(struct spdk_nvmf_subsystem *subsystem) 676 { 677 uint32_t sid; 678 struct spdk_nvmf_tgt *tgt; 679 680 if (!subsystem) { 681 return NULL; 682 } 683 684 tgt = subsystem->tgt; 685 686 for (sid = subsystem->id + 1; sid < tgt->max_subsystems; sid++) { 687 subsystem = tgt->subsystems[sid]; 688 if (subsystem) { 689 return subsystem; 690 } 691 } 692 693 return NULL; 694 } 695 696 /* Must hold subsystem->mutex while calling this function */ 697 static struct spdk_nvmf_host * 698 nvmf_subsystem_find_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 699 { 700 struct spdk_nvmf_host *host = NULL; 701 702 TAILQ_FOREACH(host, &subsystem->hosts, link) { 703 if (strcmp(hostnqn, host->nqn) == 0) { 704 return host; 705 } 706 } 707 708 return NULL; 709 } 710 711 int 712 spdk_nvmf_subsystem_add_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 713 { 714 struct spdk_nvmf_host *host; 715 716 if (!nvmf_valid_nqn(hostnqn)) { 717 return -EINVAL; 718 } 719 720 pthread_mutex_lock(&subsystem->mutex); 721 722 if (nvmf_subsystem_find_host(subsystem, hostnqn)) { 723 /* This subsystem already allows the specified host. */ 724 pthread_mutex_unlock(&subsystem->mutex); 725 return 0; 726 } 727 728 host = calloc(1, sizeof(*host)); 729 if (!host) { 730 pthread_mutex_unlock(&subsystem->mutex); 731 return -ENOMEM; 732 } 733 734 snprintf(host->nqn, sizeof(host->nqn), "%s", hostnqn); 735 736 TAILQ_INSERT_HEAD(&subsystem->hosts, host, link); 737 738 nvmf_update_discovery_log(subsystem->tgt, hostnqn); 739 740 pthread_mutex_unlock(&subsystem->mutex); 741 742 return 0; 743 } 744 745 int 746 spdk_nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 747 { 748 struct spdk_nvmf_host *host; 749 750 pthread_mutex_lock(&subsystem->mutex); 751 752 host = nvmf_subsystem_find_host(subsystem, hostnqn); 753 if (host == NULL) { 754 pthread_mutex_unlock(&subsystem->mutex); 755 return -ENOENT; 756 } 757 758 nvmf_subsystem_remove_host(subsystem, host); 759 pthread_mutex_unlock(&subsystem->mutex); 760 761 return 0; 762 } 763 764 struct nvmf_subsystem_disconnect_host_ctx { 765 struct spdk_nvmf_subsystem *subsystem; 766 char *hostnqn; 767 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn; 768 void *cb_arg; 769 }; 770 771 static void 772 nvmf_subsystem_disconnect_host_fini(struct spdk_io_channel_iter *i, int status) 773 { 774 struct nvmf_subsystem_disconnect_host_ctx *ctx; 775 776 ctx = spdk_io_channel_iter_get_ctx(i); 777 778 if (ctx->cb_fn) { 779 ctx->cb_fn(ctx->cb_arg, status); 780 } 781 free(ctx->hostnqn); 782 free(ctx); 783 } 784 785 static void 786 nvmf_subsystem_disconnect_qpairs_by_host(struct spdk_io_channel_iter *i) 787 { 788 struct nvmf_subsystem_disconnect_host_ctx *ctx; 789 struct spdk_nvmf_poll_group *group; 790 struct spdk_io_channel *ch; 791 struct spdk_nvmf_qpair *qpair, *tmp_qpair; 792 struct spdk_nvmf_ctrlr *ctrlr; 793 794 ctx = spdk_io_channel_iter_get_ctx(i); 795 ch = spdk_io_channel_iter_get_channel(i); 796 group = spdk_io_channel_get_ctx(ch); 797 798 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, tmp_qpair) { 799 ctrlr = qpair->ctrlr; 800 801 if (ctrlr == NULL || ctrlr->subsys != ctx->subsystem) { 802 continue; 803 } 804 805 if (strncmp(ctrlr->hostnqn, ctx->hostnqn, sizeof(ctrlr->hostnqn)) == 0) { 806 /* Right now this does not wait for the queue pairs to actually disconnect. */ 807 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 808 } 809 } 810 spdk_for_each_channel_continue(i, 0); 811 } 812 813 int 814 spdk_nvmf_subsystem_disconnect_host(struct spdk_nvmf_subsystem *subsystem, 815 const char *hostnqn, 816 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 817 void *cb_arg) 818 { 819 struct nvmf_subsystem_disconnect_host_ctx *ctx; 820 821 ctx = calloc(1, sizeof(struct nvmf_subsystem_disconnect_host_ctx)); 822 if (ctx == NULL) { 823 return -ENOMEM; 824 } 825 826 ctx->subsystem = subsystem; 827 ctx->hostnqn = strdup(hostnqn); 828 ctx->cb_fn = cb_fn; 829 ctx->cb_arg = cb_arg; 830 831 spdk_for_each_channel(subsystem->tgt, nvmf_subsystem_disconnect_qpairs_by_host, ctx, 832 nvmf_subsystem_disconnect_host_fini); 833 834 return 0; 835 } 836 837 int 838 spdk_nvmf_subsystem_set_allow_any_host(struct spdk_nvmf_subsystem *subsystem, bool allow_any_host) 839 { 840 pthread_mutex_lock(&subsystem->mutex); 841 subsystem->flags.allow_any_host = allow_any_host; 842 nvmf_update_discovery_log(subsystem->tgt, NULL); 843 pthread_mutex_unlock(&subsystem->mutex); 844 845 return 0; 846 } 847 848 bool 849 spdk_nvmf_subsystem_get_allow_any_host(const struct spdk_nvmf_subsystem *subsystem) 850 { 851 bool allow_any_host; 852 struct spdk_nvmf_subsystem *sub; 853 854 /* Technically, taking the mutex modifies data in the subsystem. But the const 855 * is still important to convey that this doesn't mutate any other data. Cast 856 * it away to work around this. */ 857 sub = (struct spdk_nvmf_subsystem *)subsystem; 858 859 pthread_mutex_lock(&sub->mutex); 860 allow_any_host = sub->flags.allow_any_host; 861 pthread_mutex_unlock(&sub->mutex); 862 863 return allow_any_host; 864 } 865 866 bool 867 spdk_nvmf_subsystem_host_allowed(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 868 { 869 bool allowed; 870 871 if (!hostnqn) { 872 return false; 873 } 874 875 pthread_mutex_lock(&subsystem->mutex); 876 877 if (subsystem->flags.allow_any_host) { 878 pthread_mutex_unlock(&subsystem->mutex); 879 return true; 880 } 881 882 allowed = nvmf_subsystem_find_host(subsystem, hostnqn) != NULL; 883 pthread_mutex_unlock(&subsystem->mutex); 884 885 return allowed; 886 } 887 888 struct spdk_nvmf_host * 889 spdk_nvmf_subsystem_get_first_host(struct spdk_nvmf_subsystem *subsystem) 890 { 891 return TAILQ_FIRST(&subsystem->hosts); 892 } 893 894 895 struct spdk_nvmf_host * 896 spdk_nvmf_subsystem_get_next_host(struct spdk_nvmf_subsystem *subsystem, 897 struct spdk_nvmf_host *prev_host) 898 { 899 return TAILQ_NEXT(prev_host, link); 900 } 901 902 const char * 903 spdk_nvmf_host_get_nqn(const struct spdk_nvmf_host *host) 904 { 905 return host->nqn; 906 } 907 908 struct spdk_nvmf_subsystem_listener * 909 nvmf_subsystem_find_listener(struct spdk_nvmf_subsystem *subsystem, 910 const struct spdk_nvme_transport_id *trid) 911 { 912 struct spdk_nvmf_subsystem_listener *listener; 913 914 TAILQ_FOREACH(listener, &subsystem->listeners, link) { 915 if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) { 916 return listener; 917 } 918 } 919 920 return NULL; 921 } 922 923 /** 924 * Function to be called once the target is listening. 925 * 926 * \param ctx Context argument passed to this function. 927 * \param status 0 if it completed successfully, or negative errno if it failed. 928 */ 929 static void 930 _nvmf_subsystem_add_listener_done(void *ctx, int status) 931 { 932 struct spdk_nvmf_subsystem_listener *listener = ctx; 933 934 if (status) { 935 listener->cb_fn(listener->cb_arg, status); 936 free(listener); 937 return; 938 } 939 940 TAILQ_INSERT_HEAD(&listener->subsystem->listeners, listener, link); 941 nvmf_update_discovery_log(listener->subsystem->tgt, NULL); 942 listener->cb_fn(listener->cb_arg, status); 943 } 944 945 void 946 spdk_nvmf_subsystem_add_listener(struct spdk_nvmf_subsystem *subsystem, 947 struct spdk_nvme_transport_id *trid, 948 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 949 void *cb_arg) 950 { 951 struct spdk_nvmf_transport *transport; 952 struct spdk_nvmf_subsystem_listener *listener; 953 struct spdk_nvmf_listener *tr_listener; 954 int rc = 0; 955 956 assert(cb_fn != NULL); 957 958 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 959 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 960 cb_fn(cb_arg, -EAGAIN); 961 return; 962 } 963 964 if (nvmf_subsystem_find_listener(subsystem, trid)) { 965 /* Listener already exists in this subsystem */ 966 cb_fn(cb_arg, 0); 967 return; 968 } 969 970 transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, trid->trstring); 971 if (transport == NULL) { 972 SPDK_ERRLOG("Unknown transport type %d\n", trid->trtype); 973 cb_fn(cb_arg, -EINVAL); 974 return; 975 } 976 977 tr_listener = nvmf_transport_find_listener(transport, trid); 978 if (!tr_listener) { 979 SPDK_ERRLOG("Cannot find transport listener for %s\n", trid->traddr); 980 cb_fn(cb_arg, -EINVAL); 981 return; 982 } 983 984 listener = calloc(1, sizeof(*listener)); 985 if (!listener) { 986 cb_fn(cb_arg, -ENOMEM); 987 return; 988 } 989 990 listener->trid = &tr_listener->trid; 991 listener->transport = transport; 992 listener->cb_fn = cb_fn; 993 listener->cb_arg = cb_arg; 994 listener->subsystem = subsystem; 995 listener->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 996 997 if (transport->ops->listen_associate != NULL) { 998 rc = transport->ops->listen_associate(transport, subsystem, trid); 999 } 1000 1001 _nvmf_subsystem_add_listener_done(listener, rc); 1002 } 1003 1004 int 1005 spdk_nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem, 1006 const struct spdk_nvme_transport_id *trid) 1007 { 1008 struct spdk_nvmf_subsystem_listener *listener; 1009 1010 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1011 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1012 return -EAGAIN; 1013 } 1014 1015 listener = nvmf_subsystem_find_listener(subsystem, trid); 1016 if (listener == NULL) { 1017 return -ENOENT; 1018 } 1019 1020 _nvmf_subsystem_remove_listener(subsystem, listener, false); 1021 1022 return 0; 1023 } 1024 1025 void 1026 nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem, 1027 bool stop) 1028 { 1029 struct spdk_nvmf_subsystem_listener *listener, *listener_tmp; 1030 1031 TAILQ_FOREACH_SAFE(listener, &subsystem->listeners, link, listener_tmp) { 1032 _nvmf_subsystem_remove_listener(subsystem, listener, stop); 1033 } 1034 } 1035 1036 bool 1037 spdk_nvmf_subsystem_listener_allowed(struct spdk_nvmf_subsystem *subsystem, 1038 const struct spdk_nvme_transport_id *trid) 1039 { 1040 struct spdk_nvmf_subsystem_listener *listener; 1041 1042 if (!strcmp(subsystem->subnqn, SPDK_NVMF_DISCOVERY_NQN)) { 1043 return true; 1044 } 1045 1046 TAILQ_FOREACH(listener, &subsystem->listeners, link) { 1047 if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) { 1048 return true; 1049 } 1050 } 1051 1052 return false; 1053 } 1054 1055 struct spdk_nvmf_subsystem_listener * 1056 spdk_nvmf_subsystem_get_first_listener(struct spdk_nvmf_subsystem *subsystem) 1057 { 1058 return TAILQ_FIRST(&subsystem->listeners); 1059 } 1060 1061 struct spdk_nvmf_subsystem_listener * 1062 spdk_nvmf_subsystem_get_next_listener(struct spdk_nvmf_subsystem *subsystem, 1063 struct spdk_nvmf_subsystem_listener *prev_listener) 1064 { 1065 return TAILQ_NEXT(prev_listener, link); 1066 } 1067 1068 const struct spdk_nvme_transport_id * 1069 spdk_nvmf_subsystem_listener_get_trid(struct spdk_nvmf_subsystem_listener *listener) 1070 { 1071 return listener->trid; 1072 } 1073 1074 void 1075 spdk_nvmf_subsystem_allow_any_listener(struct spdk_nvmf_subsystem *subsystem, 1076 bool allow_any_listener) 1077 { 1078 subsystem->flags.allow_any_listener = allow_any_listener; 1079 } 1080 1081 bool 1082 spdk_nvmf_subsytem_any_listener_allowed(struct spdk_nvmf_subsystem *subsystem) 1083 { 1084 return subsystem->flags.allow_any_listener; 1085 } 1086 1087 1088 struct subsystem_update_ns_ctx { 1089 struct spdk_nvmf_subsystem *subsystem; 1090 1091 spdk_nvmf_subsystem_state_change_done cb_fn; 1092 void *cb_arg; 1093 }; 1094 1095 static void 1096 subsystem_update_ns_done(struct spdk_io_channel_iter *i, int status) 1097 { 1098 struct subsystem_update_ns_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1099 1100 if (ctx->cb_fn) { 1101 ctx->cb_fn(ctx->subsystem, ctx->cb_arg, status); 1102 } 1103 free(ctx); 1104 } 1105 1106 static void 1107 subsystem_update_ns_on_pg(struct spdk_io_channel_iter *i) 1108 { 1109 int rc; 1110 struct subsystem_update_ns_ctx *ctx; 1111 struct spdk_nvmf_poll_group *group; 1112 struct spdk_nvmf_subsystem *subsystem; 1113 1114 ctx = spdk_io_channel_iter_get_ctx(i); 1115 group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i)); 1116 subsystem = ctx->subsystem; 1117 1118 rc = nvmf_poll_group_update_subsystem(group, subsystem); 1119 spdk_for_each_channel_continue(i, rc); 1120 } 1121 1122 static int 1123 nvmf_subsystem_update_ns(struct spdk_nvmf_subsystem *subsystem, spdk_channel_for_each_cpl cpl, 1124 void *ctx) 1125 { 1126 spdk_for_each_channel(subsystem->tgt, 1127 subsystem_update_ns_on_pg, 1128 ctx, 1129 cpl); 1130 1131 return 0; 1132 } 1133 1134 static void 1135 nvmf_subsystem_ns_changed(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1136 { 1137 struct spdk_nvmf_ctrlr *ctrlr; 1138 1139 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1140 nvmf_ctrlr_ns_changed(ctrlr, nsid); 1141 } 1142 } 1143 1144 int 1145 spdk_nvmf_subsystem_remove_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1146 { 1147 struct spdk_nvmf_ns *ns; 1148 struct spdk_nvmf_registrant *reg, *reg_tmp; 1149 1150 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1151 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1152 assert(false); 1153 return -1; 1154 } 1155 1156 if (nsid == 0 || nsid > subsystem->max_nsid) { 1157 return -1; 1158 } 1159 1160 ns = subsystem->ns[nsid - 1]; 1161 if (!ns) { 1162 return -1; 1163 } 1164 1165 subsystem->ns[nsid - 1] = NULL; 1166 1167 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, reg_tmp) { 1168 TAILQ_REMOVE(&ns->registrants, reg, link); 1169 free(reg); 1170 } 1171 spdk_bdev_module_release_bdev(ns->bdev); 1172 spdk_bdev_close(ns->desc); 1173 if (ns->ptpl_file) { 1174 free(ns->ptpl_file); 1175 } 1176 free(ns); 1177 1178 nvmf_subsystem_ns_changed(subsystem, nsid); 1179 1180 return 0; 1181 } 1182 1183 struct subsystem_ns_change_ctx { 1184 struct spdk_nvmf_subsystem *subsystem; 1185 spdk_nvmf_subsystem_state_change_done cb_fn; 1186 uint32_t nsid; 1187 }; 1188 1189 static void 1190 _nvmf_ns_hot_remove(struct spdk_nvmf_subsystem *subsystem, 1191 void *cb_arg, int status) 1192 { 1193 struct subsystem_ns_change_ctx *ctx = cb_arg; 1194 int rc; 1195 1196 rc = spdk_nvmf_subsystem_remove_ns(subsystem, ctx->nsid); 1197 if (rc != 0) { 1198 SPDK_ERRLOG("Failed to make changes to NVME-oF subsystem with id: %u\n", subsystem->id); 1199 } 1200 1201 spdk_nvmf_subsystem_resume(subsystem, NULL, NULL); 1202 1203 free(ctx); 1204 } 1205 1206 static void 1207 nvmf_ns_change_msg(void *ns_ctx) 1208 { 1209 struct subsystem_ns_change_ctx *ctx = ns_ctx; 1210 int rc; 1211 1212 rc = spdk_nvmf_subsystem_pause(ctx->subsystem, ctx->cb_fn, ctx); 1213 if (rc) { 1214 if (rc == -EBUSY) { 1215 /* Try again, this is not a permanent situation. */ 1216 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ctx); 1217 } else { 1218 free(ctx); 1219 SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n"); 1220 } 1221 } 1222 } 1223 1224 static void 1225 nvmf_ns_hot_remove(void *remove_ctx) 1226 { 1227 struct spdk_nvmf_ns *ns = remove_ctx; 1228 struct subsystem_ns_change_ctx *ns_ctx; 1229 int rc; 1230 1231 /* We have to allocate a new context because this op 1232 * is asynchronous and we could lose the ns in the middle. 1233 */ 1234 ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx)); 1235 if (!ns_ctx) { 1236 SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n"); 1237 return; 1238 } 1239 1240 ns_ctx->subsystem = ns->subsystem; 1241 ns_ctx->nsid = ns->opts.nsid; 1242 ns_ctx->cb_fn = _nvmf_ns_hot_remove; 1243 1244 rc = spdk_nvmf_subsystem_pause(ns->subsystem, _nvmf_ns_hot_remove, ns_ctx); 1245 if (rc) { 1246 if (rc == -EBUSY) { 1247 /* Try again, this is not a permanent situation. */ 1248 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx); 1249 } else { 1250 SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n"); 1251 free(ns_ctx); 1252 } 1253 } 1254 } 1255 1256 static void 1257 _nvmf_ns_resize(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status) 1258 { 1259 struct subsystem_ns_change_ctx *ctx = cb_arg; 1260 1261 nvmf_subsystem_ns_changed(subsystem, ctx->nsid); 1262 spdk_nvmf_subsystem_resume(subsystem, NULL, NULL); 1263 1264 free(ctx); 1265 } 1266 1267 static void 1268 nvmf_ns_resize(void *event_ctx) 1269 { 1270 struct spdk_nvmf_ns *ns = event_ctx; 1271 struct subsystem_ns_change_ctx *ns_ctx; 1272 int rc; 1273 1274 /* We have to allocate a new context because this op 1275 * is asynchronous and we could lose the ns in the middle. 1276 */ 1277 ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx)); 1278 if (!ns_ctx) { 1279 SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n"); 1280 return; 1281 } 1282 1283 ns_ctx->subsystem = ns->subsystem; 1284 ns_ctx->nsid = ns->opts.nsid; 1285 ns_ctx->cb_fn = _nvmf_ns_resize; 1286 1287 rc = spdk_nvmf_subsystem_pause(ns->subsystem, _nvmf_ns_resize, ns_ctx); 1288 if (rc) { 1289 if (rc == -EBUSY) { 1290 /* Try again, this is not a permanent situation. */ 1291 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx); 1292 } 1293 SPDK_ERRLOG("Unable to pause subsystem to process namespace resize!\n"); 1294 free(ns_ctx); 1295 } 1296 } 1297 1298 static void 1299 nvmf_ns_event(enum spdk_bdev_event_type type, 1300 struct spdk_bdev *bdev, 1301 void *event_ctx) 1302 { 1303 SPDK_DEBUGLOG(nvmf, "Bdev event: type %d, name %s, subsystem_id %d, ns_id %d\n", 1304 type, 1305 bdev->name, 1306 ((struct spdk_nvmf_ns *)event_ctx)->subsystem->id, 1307 ((struct spdk_nvmf_ns *)event_ctx)->nsid); 1308 1309 switch (type) { 1310 case SPDK_BDEV_EVENT_REMOVE: 1311 nvmf_ns_hot_remove(event_ctx); 1312 break; 1313 case SPDK_BDEV_EVENT_RESIZE: 1314 nvmf_ns_resize(event_ctx); 1315 break; 1316 default: 1317 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 1318 break; 1319 } 1320 } 1321 1322 void 1323 spdk_nvmf_ns_opts_get_defaults(struct spdk_nvmf_ns_opts *opts, size_t opts_size) 1324 { 1325 /* All current fields are set to 0 by default. */ 1326 memset(opts, 0, opts_size); 1327 } 1328 1329 /* Dummy bdev module used to to claim bdevs. */ 1330 static struct spdk_bdev_module ns_bdev_module = { 1331 .name = "NVMe-oF Target", 1332 }; 1333 1334 static int 1335 nvmf_ns_load_reservation(const char *file, struct spdk_nvmf_reservation_info *info); 1336 static int 1337 nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info); 1338 1339 uint32_t 1340 spdk_nvmf_subsystem_add_ns_ext(struct spdk_nvmf_subsystem *subsystem, const char *bdev_name, 1341 const struct spdk_nvmf_ns_opts *user_opts, size_t opts_size, 1342 const char *ptpl_file) 1343 { 1344 struct spdk_nvmf_ns_opts opts; 1345 struct spdk_nvmf_ns *ns; 1346 struct spdk_nvmf_reservation_info info = {0}; 1347 int rc; 1348 1349 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1350 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1351 return 0; 1352 } 1353 1354 spdk_nvmf_ns_opts_get_defaults(&opts, sizeof(opts)); 1355 if (user_opts) { 1356 memcpy(&opts, user_opts, spdk_min(sizeof(opts), opts_size)); 1357 } 1358 1359 if (opts.nsid == SPDK_NVME_GLOBAL_NS_TAG) { 1360 SPDK_ERRLOG("Invalid NSID %" PRIu32 "\n", opts.nsid); 1361 return 0; 1362 } 1363 1364 if (opts.nsid == 0) { 1365 /* 1366 * NSID not specified - find a free index. 1367 * 1368 * If no free slots are found, opts.nsid will be subsystem->max_nsid + 1, which will 1369 * expand max_nsid if possible. 1370 */ 1371 for (opts.nsid = 1; opts.nsid <= subsystem->max_nsid; opts.nsid++) { 1372 if (_nvmf_subsystem_get_ns(subsystem, opts.nsid) == NULL) { 1373 break; 1374 } 1375 } 1376 } 1377 1378 if (_nvmf_subsystem_get_ns(subsystem, opts.nsid)) { 1379 SPDK_ERRLOG("Requested NSID %" PRIu32 " already in use\n", opts.nsid); 1380 return 0; 1381 } 1382 1383 if (opts.nsid > subsystem->max_nsid) { 1384 struct spdk_nvmf_ns **new_ns_array; 1385 1386 /* If MaxNamespaces was specified, we can't extend max_nsid beyond it. */ 1387 if (subsystem->max_allowed_nsid > 0 && opts.nsid > subsystem->max_allowed_nsid) { 1388 SPDK_ERRLOG("Can't extend NSID range above MaxNamespaces\n"); 1389 return 0; 1390 } 1391 1392 /* If a controller is connected, we can't change NN. */ 1393 if (!TAILQ_EMPTY(&subsystem->ctrlrs)) { 1394 SPDK_ERRLOG("Can't extend NSID range while controllers are connected\n"); 1395 return 0; 1396 } 1397 1398 new_ns_array = realloc(subsystem->ns, sizeof(struct spdk_nvmf_ns *) * opts.nsid); 1399 if (new_ns_array == NULL) { 1400 SPDK_ERRLOG("Memory allocation error while resizing namespace array.\n"); 1401 return 0; 1402 } 1403 1404 memset(new_ns_array + subsystem->max_nsid, 0, 1405 sizeof(struct spdk_nvmf_ns *) * (opts.nsid - subsystem->max_nsid)); 1406 subsystem->ns = new_ns_array; 1407 subsystem->max_nsid = opts.nsid; 1408 } 1409 1410 ns = calloc(1, sizeof(*ns)); 1411 if (ns == NULL) { 1412 SPDK_ERRLOG("Namespace allocation failed\n"); 1413 return 0; 1414 } 1415 1416 rc = spdk_bdev_open_ext(bdev_name, true, nvmf_ns_event, ns, &ns->desc); 1417 if (rc != 0) { 1418 SPDK_ERRLOG("Subsystem %s: bdev %s cannot be opened, error=%d\n", 1419 subsystem->subnqn, bdev_name, rc); 1420 free(ns); 1421 return 0; 1422 } 1423 1424 ns->bdev = spdk_bdev_desc_get_bdev(ns->desc); 1425 1426 if (spdk_bdev_get_md_size(ns->bdev) != 0 && !spdk_bdev_is_md_interleaved(ns->bdev)) { 1427 SPDK_ERRLOG("Can't attach bdev with separate metadata.\n"); 1428 spdk_bdev_close(ns->desc); 1429 free(ns); 1430 return 0; 1431 } 1432 1433 rc = spdk_bdev_module_claim_bdev(ns->bdev, ns->desc, &ns_bdev_module); 1434 if (rc != 0) { 1435 spdk_bdev_close(ns->desc); 1436 free(ns); 1437 return 0; 1438 } 1439 1440 if (spdk_mem_all_zero(&opts.uuid, sizeof(opts.uuid))) { 1441 opts.uuid = *spdk_bdev_get_uuid(ns->bdev); 1442 } 1443 1444 ns->opts = opts; 1445 ns->subsystem = subsystem; 1446 subsystem->ns[opts.nsid - 1] = ns; 1447 ns->nsid = opts.nsid; 1448 TAILQ_INIT(&ns->registrants); 1449 1450 if (ptpl_file) { 1451 rc = nvmf_ns_load_reservation(ptpl_file, &info); 1452 if (!rc) { 1453 rc = nvmf_ns_reservation_restore(ns, &info); 1454 if (rc) { 1455 SPDK_ERRLOG("Subsystem restore reservation failed\n"); 1456 subsystem->ns[opts.nsid - 1] = NULL; 1457 spdk_bdev_module_release_bdev(ns->bdev); 1458 spdk_bdev_close(ns->desc); 1459 free(ns); 1460 return 0; 1461 } 1462 } 1463 ns->ptpl_file = strdup(ptpl_file); 1464 } 1465 1466 SPDK_DEBUGLOG(nvmf, "Subsystem %s: bdev %s assigned nsid %" PRIu32 "\n", 1467 spdk_nvmf_subsystem_get_nqn(subsystem), 1468 bdev_name, 1469 opts.nsid); 1470 1471 nvmf_subsystem_ns_changed(subsystem, opts.nsid); 1472 1473 return opts.nsid; 1474 } 1475 1476 uint32_t 1477 spdk_nvmf_subsystem_add_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_bdev *bdev, 1478 const struct spdk_nvmf_ns_opts *user_opts, size_t opts_size, 1479 const char *ptpl_file) 1480 { 1481 return spdk_nvmf_subsystem_add_ns_ext(subsystem, spdk_bdev_get_name(bdev), 1482 user_opts, opts_size, ptpl_file); 1483 } 1484 1485 static uint32_t 1486 nvmf_subsystem_get_next_allocated_nsid(struct spdk_nvmf_subsystem *subsystem, 1487 uint32_t prev_nsid) 1488 { 1489 uint32_t nsid; 1490 1491 if (prev_nsid >= subsystem->max_nsid) { 1492 return 0; 1493 } 1494 1495 for (nsid = prev_nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 1496 if (subsystem->ns[nsid - 1]) { 1497 return nsid; 1498 } 1499 } 1500 1501 return 0; 1502 } 1503 1504 struct spdk_nvmf_ns * 1505 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 1506 { 1507 uint32_t first_nsid; 1508 1509 first_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, 0); 1510 return _nvmf_subsystem_get_ns(subsystem, first_nsid); 1511 } 1512 1513 struct spdk_nvmf_ns * 1514 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 1515 struct spdk_nvmf_ns *prev_ns) 1516 { 1517 uint32_t next_nsid; 1518 1519 next_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, prev_ns->opts.nsid); 1520 return _nvmf_subsystem_get_ns(subsystem, next_nsid); 1521 } 1522 1523 struct spdk_nvmf_ns * 1524 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1525 { 1526 return _nvmf_subsystem_get_ns(subsystem, nsid); 1527 } 1528 1529 uint32_t 1530 spdk_nvmf_ns_get_id(const struct spdk_nvmf_ns *ns) 1531 { 1532 return ns->opts.nsid; 1533 } 1534 1535 struct spdk_bdev * 1536 spdk_nvmf_ns_get_bdev(struct spdk_nvmf_ns *ns) 1537 { 1538 return ns->bdev; 1539 } 1540 1541 void 1542 spdk_nvmf_ns_get_opts(const struct spdk_nvmf_ns *ns, struct spdk_nvmf_ns_opts *opts, 1543 size_t opts_size) 1544 { 1545 memset(opts, 0, opts_size); 1546 memcpy(opts, &ns->opts, spdk_min(sizeof(ns->opts), opts_size)); 1547 } 1548 1549 const char * 1550 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 1551 { 1552 return subsystem->sn; 1553 } 1554 1555 int 1556 spdk_nvmf_subsystem_set_sn(struct spdk_nvmf_subsystem *subsystem, const char *sn) 1557 { 1558 size_t len, max_len; 1559 1560 max_len = sizeof(subsystem->sn) - 1; 1561 len = strlen(sn); 1562 if (len > max_len) { 1563 SPDK_DEBUGLOG(nvmf, "Invalid sn \"%s\": length %zu > max %zu\n", 1564 sn, len, max_len); 1565 return -1; 1566 } 1567 1568 if (!nvmf_valid_ascii_string(sn, len)) { 1569 SPDK_DEBUGLOG(nvmf, "Non-ASCII sn\n"); 1570 SPDK_LOGDUMP(nvmf, "sn", sn, len); 1571 return -1; 1572 } 1573 1574 snprintf(subsystem->sn, sizeof(subsystem->sn), "%s", sn); 1575 1576 return 0; 1577 } 1578 1579 const char * 1580 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 1581 { 1582 return subsystem->mn; 1583 } 1584 1585 int 1586 spdk_nvmf_subsystem_set_mn(struct spdk_nvmf_subsystem *subsystem, const char *mn) 1587 { 1588 size_t len, max_len; 1589 1590 if (mn == NULL) { 1591 mn = MODEL_NUMBER_DEFAULT; 1592 } 1593 max_len = sizeof(subsystem->mn) - 1; 1594 len = strlen(mn); 1595 if (len > max_len) { 1596 SPDK_DEBUGLOG(nvmf, "Invalid mn \"%s\": length %zu > max %zu\n", 1597 mn, len, max_len); 1598 return -1; 1599 } 1600 1601 if (!nvmf_valid_ascii_string(mn, len)) { 1602 SPDK_DEBUGLOG(nvmf, "Non-ASCII mn\n"); 1603 SPDK_LOGDUMP(nvmf, "mn", mn, len); 1604 return -1; 1605 } 1606 1607 snprintf(subsystem->mn, sizeof(subsystem->mn), "%s", mn); 1608 1609 return 0; 1610 } 1611 1612 const char * 1613 spdk_nvmf_subsystem_get_nqn(const struct spdk_nvmf_subsystem *subsystem) 1614 { 1615 return subsystem->subnqn; 1616 } 1617 1618 enum spdk_nvmf_subtype spdk_nvmf_subsystem_get_type(struct spdk_nvmf_subsystem *subsystem) 1619 { 1620 return subsystem->subtype; 1621 } 1622 1623 uint32_t 1624 spdk_nvmf_subsystem_get_max_nsid(struct spdk_nvmf_subsystem *subsystem) 1625 { 1626 return subsystem->max_nsid; 1627 } 1628 1629 static uint16_t 1630 nvmf_subsystem_gen_cntlid(struct spdk_nvmf_subsystem *subsystem) 1631 { 1632 int count; 1633 1634 /* 1635 * In the worst case, we might have to try all CNTLID values between 1 and 0xFFF0 - 1 1636 * before we find one that is unused (or find that all values are in use). 1637 */ 1638 for (count = 0; count < 0xFFF0 - 1; count++) { 1639 subsystem->next_cntlid++; 1640 if (subsystem->next_cntlid >= 0xFFF0) { 1641 /* The spec reserves cntlid values in the range FFF0h to FFFFh. */ 1642 subsystem->next_cntlid = 1; 1643 } 1644 1645 /* Check if a controller with this cntlid currently exists. */ 1646 if (nvmf_subsystem_get_ctrlr(subsystem, subsystem->next_cntlid) == NULL) { 1647 /* Found unused cntlid */ 1648 return subsystem->next_cntlid; 1649 } 1650 } 1651 1652 /* All valid cntlid values are in use. */ 1653 return 0xFFFF; 1654 } 1655 1656 int 1657 nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr) 1658 { 1659 ctrlr->cntlid = nvmf_subsystem_gen_cntlid(subsystem); 1660 if (ctrlr->cntlid == 0xFFFF) { 1661 /* Unable to get a cntlid */ 1662 SPDK_ERRLOG("Reached max simultaneous ctrlrs\n"); 1663 return -EBUSY; 1664 } 1665 1666 TAILQ_INSERT_TAIL(&subsystem->ctrlrs, ctrlr, link); 1667 1668 return 0; 1669 } 1670 1671 void 1672 nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem, 1673 struct spdk_nvmf_ctrlr *ctrlr) 1674 { 1675 assert(subsystem == ctrlr->subsys); 1676 TAILQ_REMOVE(&subsystem->ctrlrs, ctrlr, link); 1677 } 1678 1679 struct spdk_nvmf_ctrlr * 1680 nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid) 1681 { 1682 struct spdk_nvmf_ctrlr *ctrlr; 1683 1684 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1685 if (ctrlr->cntlid == cntlid) { 1686 return ctrlr; 1687 } 1688 } 1689 1690 return NULL; 1691 } 1692 1693 uint32_t 1694 spdk_nvmf_subsystem_get_max_namespaces(const struct spdk_nvmf_subsystem *subsystem) 1695 { 1696 return subsystem->max_allowed_nsid; 1697 } 1698 1699 struct _nvmf_ns_registrant { 1700 uint64_t rkey; 1701 char *host_uuid; 1702 }; 1703 1704 struct _nvmf_ns_registrants { 1705 size_t num_regs; 1706 struct _nvmf_ns_registrant reg[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 1707 }; 1708 1709 struct _nvmf_ns_reservation { 1710 bool ptpl_activated; 1711 enum spdk_nvme_reservation_type rtype; 1712 uint64_t crkey; 1713 char *bdev_uuid; 1714 char *holder_uuid; 1715 struct _nvmf_ns_registrants regs; 1716 }; 1717 1718 static const struct spdk_json_object_decoder nvmf_ns_pr_reg_decoders[] = { 1719 {"rkey", offsetof(struct _nvmf_ns_registrant, rkey), spdk_json_decode_uint64}, 1720 {"host_uuid", offsetof(struct _nvmf_ns_registrant, host_uuid), spdk_json_decode_string}, 1721 }; 1722 1723 static int 1724 nvmf_decode_ns_pr_reg(const struct spdk_json_val *val, void *out) 1725 { 1726 struct _nvmf_ns_registrant *reg = out; 1727 1728 return spdk_json_decode_object(val, nvmf_ns_pr_reg_decoders, 1729 SPDK_COUNTOF(nvmf_ns_pr_reg_decoders), reg); 1730 } 1731 1732 static int 1733 nvmf_decode_ns_pr_regs(const struct spdk_json_val *val, void *out) 1734 { 1735 struct _nvmf_ns_registrants *regs = out; 1736 1737 return spdk_json_decode_array(val, nvmf_decode_ns_pr_reg, regs->reg, 1738 SPDK_NVMF_MAX_NUM_REGISTRANTS, ®s->num_regs, 1739 sizeof(struct _nvmf_ns_registrant)); 1740 } 1741 1742 static const struct spdk_json_object_decoder nvmf_ns_pr_decoders[] = { 1743 {"ptpl", offsetof(struct _nvmf_ns_reservation, ptpl_activated), spdk_json_decode_bool, true}, 1744 {"rtype", offsetof(struct _nvmf_ns_reservation, rtype), spdk_json_decode_uint32, true}, 1745 {"crkey", offsetof(struct _nvmf_ns_reservation, crkey), spdk_json_decode_uint64, true}, 1746 {"bdev_uuid", offsetof(struct _nvmf_ns_reservation, bdev_uuid), spdk_json_decode_string}, 1747 {"holder_uuid", offsetof(struct _nvmf_ns_reservation, holder_uuid), spdk_json_decode_string, true}, 1748 {"registrants", offsetof(struct _nvmf_ns_reservation, regs), nvmf_decode_ns_pr_regs}, 1749 }; 1750 1751 static int 1752 nvmf_ns_load_reservation(const char *file, struct spdk_nvmf_reservation_info *info) 1753 { 1754 FILE *fd; 1755 size_t json_size; 1756 ssize_t values_cnt, rc; 1757 void *json = NULL, *end; 1758 struct spdk_json_val *values = NULL; 1759 struct _nvmf_ns_reservation res = {}; 1760 uint32_t i; 1761 1762 fd = fopen(file, "r"); 1763 /* It's not an error if the file does not exist */ 1764 if (!fd) { 1765 SPDK_NOTICELOG("File %s does not exist\n", file); 1766 return -ENOENT; 1767 } 1768 1769 /* Load all persist file contents into a local buffer */ 1770 json = spdk_posix_file_load(fd, &json_size); 1771 fclose(fd); 1772 if (!json) { 1773 SPDK_ERRLOG("Load persit file %s failed\n", file); 1774 return -ENOMEM; 1775 } 1776 1777 rc = spdk_json_parse(json, json_size, NULL, 0, &end, 0); 1778 if (rc < 0) { 1779 SPDK_NOTICELOG("Parsing JSON configuration failed (%zd)\n", rc); 1780 goto exit; 1781 } 1782 1783 values_cnt = rc; 1784 values = calloc(values_cnt, sizeof(struct spdk_json_val)); 1785 if (values == NULL) { 1786 goto exit; 1787 } 1788 1789 rc = spdk_json_parse(json, json_size, values, values_cnt, &end, 0); 1790 if (rc != values_cnt) { 1791 SPDK_ERRLOG("Parsing JSON configuration failed (%zd)\n", rc); 1792 goto exit; 1793 } 1794 1795 /* Decode json */ 1796 if (spdk_json_decode_object(values, nvmf_ns_pr_decoders, 1797 SPDK_COUNTOF(nvmf_ns_pr_decoders), 1798 &res)) { 1799 SPDK_ERRLOG("Invalid objects in the persist file %s\n", file); 1800 rc = -EINVAL; 1801 goto exit; 1802 } 1803 1804 if (res.regs.num_regs > SPDK_NVMF_MAX_NUM_REGISTRANTS) { 1805 SPDK_ERRLOG("Can only support up to %u registrants\n", SPDK_NVMF_MAX_NUM_REGISTRANTS); 1806 rc = -ERANGE; 1807 goto exit; 1808 } 1809 1810 rc = 0; 1811 info->ptpl_activated = res.ptpl_activated; 1812 info->rtype = res.rtype; 1813 info->crkey = res.crkey; 1814 snprintf(info->bdev_uuid, sizeof(info->bdev_uuid), "%s", res.bdev_uuid); 1815 snprintf(info->holder_uuid, sizeof(info->holder_uuid), "%s", res.holder_uuid); 1816 info->num_regs = res.regs.num_regs; 1817 for (i = 0; i < res.regs.num_regs; i++) { 1818 info->registrants[i].rkey = res.regs.reg[i].rkey; 1819 snprintf(info->registrants[i].host_uuid, sizeof(info->registrants[i].host_uuid), "%s", 1820 res.regs.reg[i].host_uuid); 1821 } 1822 1823 exit: 1824 free(json); 1825 free(values); 1826 free(res.bdev_uuid); 1827 free(res.holder_uuid); 1828 for (i = 0; i < res.regs.num_regs; i++) { 1829 free(res.regs.reg[i].host_uuid); 1830 } 1831 1832 return rc; 1833 } 1834 1835 static bool 1836 nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns); 1837 1838 static int 1839 nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info) 1840 { 1841 uint32_t i; 1842 struct spdk_nvmf_registrant *reg, *holder = NULL; 1843 struct spdk_uuid bdev_uuid, holder_uuid; 1844 1845 SPDK_DEBUGLOG(nvmf, "NSID %u, PTPL %u, Number of registrants %u\n", 1846 ns->nsid, info->ptpl_activated, info->num_regs); 1847 1848 /* it's not an error */ 1849 if (!info->ptpl_activated || !info->num_regs) { 1850 return 0; 1851 } 1852 1853 spdk_uuid_parse(&bdev_uuid, info->bdev_uuid); 1854 if (spdk_uuid_compare(&bdev_uuid, spdk_bdev_get_uuid(ns->bdev))) { 1855 SPDK_ERRLOG("Existing bdev UUID is not same with configuration file\n"); 1856 return -EINVAL; 1857 } 1858 1859 ns->crkey = info->crkey; 1860 ns->rtype = info->rtype; 1861 ns->ptpl_activated = info->ptpl_activated; 1862 spdk_uuid_parse(&holder_uuid, info->holder_uuid); 1863 1864 SPDK_DEBUGLOG(nvmf, "Bdev UUID %s\n", info->bdev_uuid); 1865 if (info->rtype) { 1866 SPDK_DEBUGLOG(nvmf, "Holder UUID %s, RTYPE %u, RKEY 0x%"PRIx64"\n", 1867 info->holder_uuid, info->rtype, info->crkey); 1868 } 1869 1870 for (i = 0; i < info->num_regs; i++) { 1871 reg = calloc(1, sizeof(*reg)); 1872 if (!reg) { 1873 return -ENOMEM; 1874 } 1875 spdk_uuid_parse(®->hostid, info->registrants[i].host_uuid); 1876 reg->rkey = info->registrants[i].rkey; 1877 TAILQ_INSERT_TAIL(&ns->registrants, reg, link); 1878 if (!spdk_uuid_compare(&holder_uuid, ®->hostid)) { 1879 holder = reg; 1880 } 1881 SPDK_DEBUGLOG(nvmf, "Registrant RKEY 0x%"PRIx64", Host UUID %s\n", 1882 info->registrants[i].rkey, info->registrants[i].host_uuid); 1883 } 1884 1885 if (nvmf_ns_reservation_all_registrants_type(ns)) { 1886 ns->holder = TAILQ_FIRST(&ns->registrants); 1887 } else { 1888 ns->holder = holder; 1889 } 1890 1891 return 0; 1892 } 1893 1894 static int 1895 nvmf_ns_json_write_cb(void *cb_ctx, const void *data, size_t size) 1896 { 1897 char *file = cb_ctx; 1898 size_t rc; 1899 FILE *fd; 1900 1901 fd = fopen(file, "w"); 1902 if (!fd) { 1903 SPDK_ERRLOG("Can't open file %s for write\n", file); 1904 return -ENOENT; 1905 } 1906 rc = fwrite(data, 1, size, fd); 1907 fclose(fd); 1908 1909 return rc == size ? 0 : -1; 1910 } 1911 1912 static int 1913 nvmf_ns_reservation_update(const char *file, struct spdk_nvmf_reservation_info *info) 1914 { 1915 struct spdk_json_write_ctx *w; 1916 uint32_t i; 1917 int rc = 0; 1918 1919 w = spdk_json_write_begin(nvmf_ns_json_write_cb, (void *)file, 0); 1920 if (w == NULL) { 1921 return -ENOMEM; 1922 } 1923 /* clear the configuration file */ 1924 if (!info->ptpl_activated) { 1925 goto exit; 1926 } 1927 1928 spdk_json_write_object_begin(w); 1929 spdk_json_write_named_bool(w, "ptpl", info->ptpl_activated); 1930 spdk_json_write_named_uint32(w, "rtype", info->rtype); 1931 spdk_json_write_named_uint64(w, "crkey", info->crkey); 1932 spdk_json_write_named_string(w, "bdev_uuid", info->bdev_uuid); 1933 spdk_json_write_named_string(w, "holder_uuid", info->holder_uuid); 1934 1935 spdk_json_write_named_array_begin(w, "registrants"); 1936 for (i = 0; i < info->num_regs; i++) { 1937 spdk_json_write_object_begin(w); 1938 spdk_json_write_named_uint64(w, "rkey", info->registrants[i].rkey); 1939 spdk_json_write_named_string(w, "host_uuid", info->registrants[i].host_uuid); 1940 spdk_json_write_object_end(w); 1941 } 1942 spdk_json_write_array_end(w); 1943 spdk_json_write_object_end(w); 1944 1945 exit: 1946 rc = spdk_json_write_end(w); 1947 return rc; 1948 } 1949 1950 static int 1951 nvmf_ns_update_reservation_info(struct spdk_nvmf_ns *ns) 1952 { 1953 struct spdk_nvmf_reservation_info info; 1954 struct spdk_nvmf_registrant *reg, *tmp; 1955 uint32_t i = 0; 1956 1957 assert(ns != NULL); 1958 1959 if (!ns->bdev || !ns->ptpl_file) { 1960 return 0; 1961 } 1962 1963 memset(&info, 0, sizeof(info)); 1964 spdk_uuid_fmt_lower(info.bdev_uuid, sizeof(info.bdev_uuid), spdk_bdev_get_uuid(ns->bdev)); 1965 1966 if (ns->rtype) { 1967 info.rtype = ns->rtype; 1968 info.crkey = ns->crkey; 1969 if (!nvmf_ns_reservation_all_registrants_type(ns)) { 1970 assert(ns->holder != NULL); 1971 spdk_uuid_fmt_lower(info.holder_uuid, sizeof(info.holder_uuid), &ns->holder->hostid); 1972 } 1973 } 1974 1975 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 1976 spdk_uuid_fmt_lower(info.registrants[i].host_uuid, sizeof(info.registrants[i].host_uuid), 1977 ®->hostid); 1978 info.registrants[i++].rkey = reg->rkey; 1979 } 1980 1981 info.num_regs = i; 1982 info.ptpl_activated = ns->ptpl_activated; 1983 1984 return nvmf_ns_reservation_update(ns->ptpl_file, &info); 1985 } 1986 1987 static struct spdk_nvmf_registrant * 1988 nvmf_ns_reservation_get_registrant(struct spdk_nvmf_ns *ns, 1989 struct spdk_uuid *uuid) 1990 { 1991 struct spdk_nvmf_registrant *reg, *tmp; 1992 1993 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 1994 if (!spdk_uuid_compare(®->hostid, uuid)) { 1995 return reg; 1996 } 1997 } 1998 1999 return NULL; 2000 } 2001 2002 /* Generate reservation notice log to registered HostID controllers */ 2003 static void 2004 nvmf_subsystem_gen_ctrlr_notification(struct spdk_nvmf_subsystem *subsystem, 2005 struct spdk_nvmf_ns *ns, 2006 struct spdk_uuid *hostid_list, 2007 uint32_t num_hostid, 2008 enum spdk_nvme_reservation_notification_log_page_type type) 2009 { 2010 struct spdk_nvmf_ctrlr *ctrlr; 2011 uint32_t i; 2012 2013 for (i = 0; i < num_hostid; i++) { 2014 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 2015 if (!spdk_uuid_compare(&ctrlr->hostid, &hostid_list[i])) { 2016 nvmf_ctrlr_reservation_notice_log(ctrlr, ns, type); 2017 } 2018 } 2019 } 2020 } 2021 2022 /* Get all registrants' hostid other than the controller who issued the command */ 2023 static uint32_t 2024 nvmf_ns_reservation_get_all_other_hostid(struct spdk_nvmf_ns *ns, 2025 struct spdk_uuid *hostid_list, 2026 uint32_t max_num_hostid, 2027 struct spdk_uuid *current_hostid) 2028 { 2029 struct spdk_nvmf_registrant *reg, *tmp; 2030 uint32_t num_hostid = 0; 2031 2032 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2033 if (spdk_uuid_compare(®->hostid, current_hostid)) { 2034 if (num_hostid == max_num_hostid) { 2035 assert(false); 2036 return max_num_hostid; 2037 } 2038 hostid_list[num_hostid++] = reg->hostid; 2039 } 2040 } 2041 2042 return num_hostid; 2043 } 2044 2045 /* Calculate the unregistered HostID list according to list 2046 * prior to execute preempt command and list after executing 2047 * preempt command. 2048 */ 2049 static uint32_t 2050 nvmf_ns_reservation_get_unregistered_hostid(struct spdk_uuid *old_hostid_list, 2051 uint32_t old_num_hostid, 2052 struct spdk_uuid *remaining_hostid_list, 2053 uint32_t remaining_num_hostid) 2054 { 2055 struct spdk_uuid temp_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2056 uint32_t i, j, num_hostid = 0; 2057 bool found; 2058 2059 if (!remaining_num_hostid) { 2060 return old_num_hostid; 2061 } 2062 2063 for (i = 0; i < old_num_hostid; i++) { 2064 found = false; 2065 for (j = 0; j < remaining_num_hostid; j++) { 2066 if (!spdk_uuid_compare(&old_hostid_list[i], &remaining_hostid_list[j])) { 2067 found = true; 2068 break; 2069 } 2070 } 2071 if (!found) { 2072 spdk_uuid_copy(&temp_hostid_list[num_hostid++], &old_hostid_list[i]); 2073 } 2074 } 2075 2076 if (num_hostid) { 2077 memcpy(old_hostid_list, temp_hostid_list, sizeof(struct spdk_uuid) * num_hostid); 2078 } 2079 2080 return num_hostid; 2081 } 2082 2083 /* current reservation type is all registrants or not */ 2084 static bool 2085 nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns) 2086 { 2087 return (ns->rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS || 2088 ns->rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 2089 } 2090 2091 /* current registrant is reservation holder or not */ 2092 static bool 2093 nvmf_ns_reservation_registrant_is_holder(struct spdk_nvmf_ns *ns, 2094 struct spdk_nvmf_registrant *reg) 2095 { 2096 if (!reg) { 2097 return false; 2098 } 2099 2100 if (nvmf_ns_reservation_all_registrants_type(ns)) { 2101 return true; 2102 } 2103 2104 return (ns->holder == reg); 2105 } 2106 2107 static int 2108 nvmf_ns_reservation_add_registrant(struct spdk_nvmf_ns *ns, 2109 struct spdk_nvmf_ctrlr *ctrlr, 2110 uint64_t nrkey) 2111 { 2112 struct spdk_nvmf_registrant *reg; 2113 2114 reg = calloc(1, sizeof(*reg)); 2115 if (!reg) { 2116 return -ENOMEM; 2117 } 2118 2119 reg->rkey = nrkey; 2120 /* set hostid for the registrant */ 2121 spdk_uuid_copy(®->hostid, &ctrlr->hostid); 2122 TAILQ_INSERT_TAIL(&ns->registrants, reg, link); 2123 ns->gen++; 2124 2125 return 0; 2126 } 2127 2128 static void 2129 nvmf_ns_reservation_release_reservation(struct spdk_nvmf_ns *ns) 2130 { 2131 ns->rtype = 0; 2132 ns->crkey = 0; 2133 ns->holder = NULL; 2134 } 2135 2136 /* release the reservation if the last registrant was removed */ 2137 static void 2138 nvmf_ns_reservation_check_release_on_remove_registrant(struct spdk_nvmf_ns *ns, 2139 struct spdk_nvmf_registrant *reg) 2140 { 2141 struct spdk_nvmf_registrant *next_reg; 2142 2143 /* no reservation holder */ 2144 if (!ns->holder) { 2145 assert(ns->rtype == 0); 2146 return; 2147 } 2148 2149 next_reg = TAILQ_FIRST(&ns->registrants); 2150 if (next_reg && nvmf_ns_reservation_all_registrants_type(ns)) { 2151 /* the next valid registrant is the new holder now */ 2152 ns->holder = next_reg; 2153 } else if (nvmf_ns_reservation_registrant_is_holder(ns, reg)) { 2154 /* release the reservation */ 2155 nvmf_ns_reservation_release_reservation(ns); 2156 } 2157 } 2158 2159 static void 2160 nvmf_ns_reservation_remove_registrant(struct spdk_nvmf_ns *ns, 2161 struct spdk_nvmf_registrant *reg) 2162 { 2163 TAILQ_REMOVE(&ns->registrants, reg, link); 2164 nvmf_ns_reservation_check_release_on_remove_registrant(ns, reg); 2165 free(reg); 2166 ns->gen++; 2167 return; 2168 } 2169 2170 static uint32_t 2171 nvmf_ns_reservation_remove_registrants_by_key(struct spdk_nvmf_ns *ns, 2172 uint64_t rkey) 2173 { 2174 struct spdk_nvmf_registrant *reg, *tmp; 2175 uint32_t count = 0; 2176 2177 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2178 if (reg->rkey == rkey) { 2179 nvmf_ns_reservation_remove_registrant(ns, reg); 2180 count++; 2181 } 2182 } 2183 return count; 2184 } 2185 2186 static uint32_t 2187 nvmf_ns_reservation_remove_all_other_registrants(struct spdk_nvmf_ns *ns, 2188 struct spdk_nvmf_registrant *reg) 2189 { 2190 struct spdk_nvmf_registrant *reg_tmp, *reg_tmp2; 2191 uint32_t count = 0; 2192 2193 TAILQ_FOREACH_SAFE(reg_tmp, &ns->registrants, link, reg_tmp2) { 2194 if (reg_tmp != reg) { 2195 nvmf_ns_reservation_remove_registrant(ns, reg_tmp); 2196 count++; 2197 } 2198 } 2199 return count; 2200 } 2201 2202 static uint32_t 2203 nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns) 2204 { 2205 struct spdk_nvmf_registrant *reg, *reg_tmp; 2206 uint32_t count = 0; 2207 2208 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, reg_tmp) { 2209 nvmf_ns_reservation_remove_registrant(ns, reg); 2210 count++; 2211 } 2212 return count; 2213 } 2214 2215 static void 2216 nvmf_ns_reservation_acquire_reservation(struct spdk_nvmf_ns *ns, uint64_t rkey, 2217 enum spdk_nvme_reservation_type rtype, 2218 struct spdk_nvmf_registrant *holder) 2219 { 2220 ns->rtype = rtype; 2221 ns->crkey = rkey; 2222 assert(ns->holder == NULL); 2223 ns->holder = holder; 2224 } 2225 2226 static bool 2227 nvmf_ns_reservation_register(struct spdk_nvmf_ns *ns, 2228 struct spdk_nvmf_ctrlr *ctrlr, 2229 struct spdk_nvmf_request *req) 2230 { 2231 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2232 uint8_t rrega, iekey, cptpl, rtype; 2233 struct spdk_nvme_reservation_register_data key; 2234 struct spdk_nvmf_registrant *reg; 2235 uint8_t status = SPDK_NVME_SC_SUCCESS; 2236 bool update_sgroup = false; 2237 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2238 uint32_t num_hostid = 0; 2239 int rc; 2240 2241 rrega = cmd->cdw10_bits.resv_register.rrega; 2242 iekey = cmd->cdw10_bits.resv_register.iekey; 2243 cptpl = cmd->cdw10_bits.resv_register.cptpl; 2244 2245 if (req->data && req->length >= sizeof(key)) { 2246 memcpy(&key, req->data, sizeof(key)); 2247 } else { 2248 SPDK_ERRLOG("No key provided. Failing request.\n"); 2249 status = SPDK_NVME_SC_INVALID_FIELD; 2250 goto exit; 2251 } 2252 2253 SPDK_DEBUGLOG(nvmf, "REGISTER: RREGA %u, IEKEY %u, CPTPL %u, " 2254 "NRKEY 0x%"PRIx64", NRKEY 0x%"PRIx64"\n", 2255 rrega, iekey, cptpl, key.crkey, key.nrkey); 2256 2257 if (cptpl == SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON) { 2258 /* Ture to OFF state, and need to be updated in the configuration file */ 2259 if (ns->ptpl_activated) { 2260 ns->ptpl_activated = 0; 2261 update_sgroup = true; 2262 } 2263 } else if (cptpl == SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS) { 2264 if (ns->ptpl_file == NULL) { 2265 status = SPDK_NVME_SC_INVALID_FIELD; 2266 goto exit; 2267 } else if (ns->ptpl_activated == 0) { 2268 ns->ptpl_activated = 1; 2269 update_sgroup = true; 2270 } 2271 } 2272 2273 /* current Host Identifier has registrant or not */ 2274 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 2275 2276 switch (rrega) { 2277 case SPDK_NVME_RESERVE_REGISTER_KEY: 2278 if (!reg) { 2279 /* register new controller */ 2280 if (key.nrkey == 0) { 2281 SPDK_ERRLOG("Can't register zeroed new key\n"); 2282 status = SPDK_NVME_SC_INVALID_FIELD; 2283 goto exit; 2284 } 2285 rc = nvmf_ns_reservation_add_registrant(ns, ctrlr, key.nrkey); 2286 if (rc < 0) { 2287 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2288 goto exit; 2289 } 2290 update_sgroup = true; 2291 } else { 2292 /* register with same key is not an error */ 2293 if (reg->rkey != key.nrkey) { 2294 SPDK_ERRLOG("The same host already register a " 2295 "key with 0x%"PRIx64"\n", 2296 reg->rkey); 2297 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2298 goto exit; 2299 } 2300 } 2301 break; 2302 case SPDK_NVME_RESERVE_UNREGISTER_KEY: 2303 if (!reg || (!iekey && reg->rkey != key.crkey)) { 2304 SPDK_ERRLOG("No registrant or current key doesn't match " 2305 "with existing registrant key\n"); 2306 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2307 goto exit; 2308 } 2309 2310 rtype = ns->rtype; 2311 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 2312 SPDK_NVMF_MAX_NUM_REGISTRANTS, 2313 &ctrlr->hostid); 2314 2315 nvmf_ns_reservation_remove_registrant(ns, reg); 2316 2317 if (!ns->rtype && num_hostid && (rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY || 2318 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY)) { 2319 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2320 hostid_list, 2321 num_hostid, 2322 SPDK_NVME_RESERVATION_RELEASED); 2323 } 2324 update_sgroup = true; 2325 break; 2326 case SPDK_NVME_RESERVE_REPLACE_KEY: 2327 if (!reg || (!iekey && reg->rkey != key.crkey)) { 2328 SPDK_ERRLOG("No registrant or current key doesn't match " 2329 "with existing registrant key\n"); 2330 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2331 goto exit; 2332 } 2333 if (key.nrkey == 0) { 2334 SPDK_ERRLOG("Can't register zeroed new key\n"); 2335 status = SPDK_NVME_SC_INVALID_FIELD; 2336 goto exit; 2337 } 2338 reg->rkey = key.nrkey; 2339 update_sgroup = true; 2340 break; 2341 default: 2342 status = SPDK_NVME_SC_INVALID_FIELD; 2343 goto exit; 2344 } 2345 2346 exit: 2347 if (update_sgroup) { 2348 rc = nvmf_ns_update_reservation_info(ns); 2349 if (rc != 0) { 2350 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2351 } 2352 } 2353 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2354 req->rsp->nvme_cpl.status.sc = status; 2355 return update_sgroup; 2356 } 2357 2358 static bool 2359 nvmf_ns_reservation_acquire(struct spdk_nvmf_ns *ns, 2360 struct spdk_nvmf_ctrlr *ctrlr, 2361 struct spdk_nvmf_request *req) 2362 { 2363 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2364 uint8_t racqa, iekey, rtype; 2365 struct spdk_nvme_reservation_acquire_data key; 2366 struct spdk_nvmf_registrant *reg; 2367 bool all_regs = false; 2368 uint32_t count = 0; 2369 bool update_sgroup = true; 2370 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2371 uint32_t num_hostid = 0; 2372 struct spdk_uuid new_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2373 uint32_t new_num_hostid = 0; 2374 bool reservation_released = false; 2375 uint8_t status = SPDK_NVME_SC_SUCCESS; 2376 2377 racqa = cmd->cdw10_bits.resv_acquire.racqa; 2378 iekey = cmd->cdw10_bits.resv_acquire.iekey; 2379 rtype = cmd->cdw10_bits.resv_acquire.rtype; 2380 2381 if (req->data && req->length >= sizeof(key)) { 2382 memcpy(&key, req->data, sizeof(key)); 2383 } else { 2384 SPDK_ERRLOG("No key provided. Failing request.\n"); 2385 status = SPDK_NVME_SC_INVALID_FIELD; 2386 goto exit; 2387 } 2388 2389 SPDK_DEBUGLOG(nvmf, "ACQUIRE: RACQA %u, IEKEY %u, RTYPE %u, " 2390 "NRKEY 0x%"PRIx64", PRKEY 0x%"PRIx64"\n", 2391 racqa, iekey, rtype, key.crkey, key.prkey); 2392 2393 if (iekey || rtype > SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) { 2394 SPDK_ERRLOG("Ignore existing key field set to 1\n"); 2395 status = SPDK_NVME_SC_INVALID_FIELD; 2396 update_sgroup = false; 2397 goto exit; 2398 } 2399 2400 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 2401 /* must be registrant and CRKEY must match */ 2402 if (!reg || reg->rkey != key.crkey) { 2403 SPDK_ERRLOG("No registrant or current key doesn't match " 2404 "with existing registrant key\n"); 2405 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2406 update_sgroup = false; 2407 goto exit; 2408 } 2409 2410 all_regs = nvmf_ns_reservation_all_registrants_type(ns); 2411 2412 switch (racqa) { 2413 case SPDK_NVME_RESERVE_ACQUIRE: 2414 /* it's not an error for the holder to acquire same reservation type again */ 2415 if (nvmf_ns_reservation_registrant_is_holder(ns, reg) && ns->rtype == rtype) { 2416 /* do nothing */ 2417 update_sgroup = false; 2418 } else if (ns->holder == NULL) { 2419 /* fisrt time to acquire the reservation */ 2420 nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg); 2421 } else { 2422 SPDK_ERRLOG("Invalid rtype or current registrant is not holder\n"); 2423 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2424 update_sgroup = false; 2425 goto exit; 2426 } 2427 break; 2428 case SPDK_NVME_RESERVE_PREEMPT: 2429 /* no reservation holder */ 2430 if (!ns->holder) { 2431 /* unregister with PRKEY */ 2432 nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 2433 break; 2434 } 2435 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 2436 SPDK_NVMF_MAX_NUM_REGISTRANTS, 2437 &ctrlr->hostid); 2438 2439 /* only 1 reservation holder and reservation key is valid */ 2440 if (!all_regs) { 2441 /* preempt itself */ 2442 if (nvmf_ns_reservation_registrant_is_holder(ns, reg) && 2443 ns->crkey == key.prkey) { 2444 ns->rtype = rtype; 2445 reservation_released = true; 2446 break; 2447 } 2448 2449 if (ns->crkey == key.prkey) { 2450 nvmf_ns_reservation_remove_registrant(ns, ns->holder); 2451 nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg); 2452 reservation_released = true; 2453 } else if (key.prkey != 0) { 2454 nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 2455 } else { 2456 /* PRKEY is zero */ 2457 SPDK_ERRLOG("Current PRKEY is zero\n"); 2458 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2459 update_sgroup = false; 2460 goto exit; 2461 } 2462 } else { 2463 /* release all other registrants except for the current one */ 2464 if (key.prkey == 0) { 2465 nvmf_ns_reservation_remove_all_other_registrants(ns, reg); 2466 assert(ns->holder == reg); 2467 } else { 2468 count = nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 2469 if (count == 0) { 2470 SPDK_ERRLOG("PRKEY doesn't match any registrant\n"); 2471 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2472 update_sgroup = false; 2473 goto exit; 2474 } 2475 } 2476 } 2477 break; 2478 default: 2479 status = SPDK_NVME_SC_INVALID_FIELD; 2480 update_sgroup = false; 2481 break; 2482 } 2483 2484 exit: 2485 if (update_sgroup && racqa == SPDK_NVME_RESERVE_PREEMPT) { 2486 new_num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, new_hostid_list, 2487 SPDK_NVMF_MAX_NUM_REGISTRANTS, 2488 &ctrlr->hostid); 2489 /* Preempt notification occurs on the unregistered controllers 2490 * other than the controller who issued the command. 2491 */ 2492 num_hostid = nvmf_ns_reservation_get_unregistered_hostid(hostid_list, 2493 num_hostid, 2494 new_hostid_list, 2495 new_num_hostid); 2496 if (num_hostid) { 2497 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2498 hostid_list, 2499 num_hostid, 2500 SPDK_NVME_REGISTRATION_PREEMPTED); 2501 2502 } 2503 /* Reservation released notification occurs on the 2504 * controllers which are the remaining registrants other than 2505 * the controller who issued the command. 2506 */ 2507 if (reservation_released && new_num_hostid) { 2508 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2509 new_hostid_list, 2510 new_num_hostid, 2511 SPDK_NVME_RESERVATION_RELEASED); 2512 2513 } 2514 } 2515 if (update_sgroup && ns->ptpl_activated) { 2516 if (nvmf_ns_update_reservation_info(ns)) { 2517 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2518 } 2519 } 2520 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2521 req->rsp->nvme_cpl.status.sc = status; 2522 return update_sgroup; 2523 } 2524 2525 static bool 2526 nvmf_ns_reservation_release(struct spdk_nvmf_ns *ns, 2527 struct spdk_nvmf_ctrlr *ctrlr, 2528 struct spdk_nvmf_request *req) 2529 { 2530 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2531 uint8_t rrela, iekey, rtype; 2532 struct spdk_nvmf_registrant *reg; 2533 uint64_t crkey; 2534 uint8_t status = SPDK_NVME_SC_SUCCESS; 2535 bool update_sgroup = true; 2536 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2537 uint32_t num_hostid = 0; 2538 2539 rrela = cmd->cdw10_bits.resv_release.rrela; 2540 iekey = cmd->cdw10_bits.resv_release.iekey; 2541 rtype = cmd->cdw10_bits.resv_release.rtype; 2542 2543 if (req->data && req->length >= sizeof(crkey)) { 2544 memcpy(&crkey, req->data, sizeof(crkey)); 2545 } else { 2546 SPDK_ERRLOG("No key provided. Failing request.\n"); 2547 status = SPDK_NVME_SC_INVALID_FIELD; 2548 goto exit; 2549 } 2550 2551 SPDK_DEBUGLOG(nvmf, "RELEASE: RRELA %u, IEKEY %u, RTYPE %u, " 2552 "CRKEY 0x%"PRIx64"\n", rrela, iekey, rtype, crkey); 2553 2554 if (iekey) { 2555 SPDK_ERRLOG("Ignore existing key field set to 1\n"); 2556 status = SPDK_NVME_SC_INVALID_FIELD; 2557 update_sgroup = false; 2558 goto exit; 2559 } 2560 2561 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 2562 if (!reg || reg->rkey != crkey) { 2563 SPDK_ERRLOG("No registrant or current key doesn't match " 2564 "with existing registrant key\n"); 2565 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2566 update_sgroup = false; 2567 goto exit; 2568 } 2569 2570 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 2571 SPDK_NVMF_MAX_NUM_REGISTRANTS, 2572 &ctrlr->hostid); 2573 2574 switch (rrela) { 2575 case SPDK_NVME_RESERVE_RELEASE: 2576 if (!ns->holder) { 2577 SPDK_DEBUGLOG(nvmf, "RELEASE: no holder\n"); 2578 update_sgroup = false; 2579 goto exit; 2580 } 2581 if (ns->rtype != rtype) { 2582 SPDK_ERRLOG("Type doesn't match\n"); 2583 status = SPDK_NVME_SC_INVALID_FIELD; 2584 update_sgroup = false; 2585 goto exit; 2586 } 2587 if (!nvmf_ns_reservation_registrant_is_holder(ns, reg)) { 2588 /* not the reservation holder, this isn't an error */ 2589 update_sgroup = false; 2590 goto exit; 2591 } 2592 2593 rtype = ns->rtype; 2594 nvmf_ns_reservation_release_reservation(ns); 2595 2596 if (num_hostid && rtype != SPDK_NVME_RESERVE_WRITE_EXCLUSIVE && 2597 rtype != SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) { 2598 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2599 hostid_list, 2600 num_hostid, 2601 SPDK_NVME_RESERVATION_RELEASED); 2602 } 2603 break; 2604 case SPDK_NVME_RESERVE_CLEAR: 2605 nvmf_ns_reservation_clear_all_registrants(ns); 2606 if (num_hostid) { 2607 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2608 hostid_list, 2609 num_hostid, 2610 SPDK_NVME_RESERVATION_PREEMPTED); 2611 } 2612 break; 2613 default: 2614 status = SPDK_NVME_SC_INVALID_FIELD; 2615 update_sgroup = false; 2616 goto exit; 2617 } 2618 2619 exit: 2620 if (update_sgroup && ns->ptpl_activated) { 2621 if (nvmf_ns_update_reservation_info(ns)) { 2622 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2623 } 2624 } 2625 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2626 req->rsp->nvme_cpl.status.sc = status; 2627 return update_sgroup; 2628 } 2629 2630 static void 2631 nvmf_ns_reservation_report(struct spdk_nvmf_ns *ns, 2632 struct spdk_nvmf_ctrlr *ctrlr, 2633 struct spdk_nvmf_request *req) 2634 { 2635 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2636 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 2637 struct spdk_nvmf_ctrlr *ctrlr_tmp; 2638 struct spdk_nvmf_registrant *reg, *tmp; 2639 struct spdk_nvme_reservation_status_extended_data *status_data; 2640 struct spdk_nvme_registered_ctrlr_extended_data *ctrlr_data; 2641 uint8_t *payload; 2642 uint32_t len, count = 0; 2643 uint32_t regctl = 0; 2644 uint8_t status = SPDK_NVME_SC_SUCCESS; 2645 2646 if (req->data == NULL) { 2647 SPDK_ERRLOG("No data transfer specified for request. " 2648 " Unable to transfer back response.\n"); 2649 status = SPDK_NVME_SC_INVALID_FIELD; 2650 goto exit; 2651 } 2652 2653 if (!cmd->cdw11_bits.resv_report.eds) { 2654 SPDK_ERRLOG("NVMeoF uses extended controller data structure, " 2655 "please set EDS bit in cdw11 and try again\n"); 2656 status = SPDK_NVME_SC_HOSTID_INCONSISTENT_FORMAT; 2657 goto exit; 2658 } 2659 2660 /* Get number of registerd controllers, one Host may have more than 2661 * one controller based on different ports. 2662 */ 2663 TAILQ_FOREACH(ctrlr_tmp, &subsystem->ctrlrs, link) { 2664 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr_tmp->hostid); 2665 if (reg) { 2666 regctl++; 2667 } 2668 } 2669 2670 len = sizeof(*status_data) + sizeof(*ctrlr_data) * regctl; 2671 payload = calloc(1, len); 2672 if (!payload) { 2673 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2674 goto exit; 2675 } 2676 2677 status_data = (struct spdk_nvme_reservation_status_extended_data *)payload; 2678 status_data->data.gen = ns->gen; 2679 status_data->data.rtype = ns->rtype; 2680 status_data->data.regctl = regctl; 2681 status_data->data.ptpls = ns->ptpl_activated; 2682 2683 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2684 assert(count <= regctl); 2685 ctrlr_data = (struct spdk_nvme_registered_ctrlr_extended_data *) 2686 (payload + sizeof(*status_data) + sizeof(*ctrlr_data) * count); 2687 /* Set to 0xffffh for dynamic controller */ 2688 ctrlr_data->cntlid = 0xffff; 2689 ctrlr_data->rcsts.status = (ns->holder == reg) ? true : false; 2690 ctrlr_data->rkey = reg->rkey; 2691 spdk_uuid_copy((struct spdk_uuid *)ctrlr_data->hostid, ®->hostid); 2692 count++; 2693 } 2694 2695 memcpy(req->data, payload, spdk_min(len, (cmd->cdw10 + 1) * sizeof(uint32_t))); 2696 free(payload); 2697 2698 exit: 2699 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2700 req->rsp->nvme_cpl.status.sc = status; 2701 return; 2702 } 2703 2704 static void 2705 nvmf_ns_reservation_complete(void *ctx) 2706 { 2707 struct spdk_nvmf_request *req = ctx; 2708 2709 spdk_nvmf_request_complete(req); 2710 } 2711 2712 static void 2713 _nvmf_ns_reservation_update_done(struct spdk_nvmf_subsystem *subsystem, 2714 void *cb_arg, int status) 2715 { 2716 struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)cb_arg; 2717 struct spdk_nvmf_poll_group *group = req->qpair->group; 2718 2719 spdk_thread_send_msg(group->thread, nvmf_ns_reservation_complete, req); 2720 } 2721 2722 void 2723 nvmf_ns_reservation_request(void *ctx) 2724 { 2725 struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)ctx; 2726 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2727 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2728 struct subsystem_update_ns_ctx *update_ctx; 2729 uint32_t nsid; 2730 struct spdk_nvmf_ns *ns; 2731 bool update_sgroup = false; 2732 2733 nsid = cmd->nsid; 2734 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, nsid); 2735 assert(ns != NULL); 2736 2737 switch (cmd->opc) { 2738 case SPDK_NVME_OPC_RESERVATION_REGISTER: 2739 update_sgroup = nvmf_ns_reservation_register(ns, ctrlr, req); 2740 break; 2741 case SPDK_NVME_OPC_RESERVATION_ACQUIRE: 2742 update_sgroup = nvmf_ns_reservation_acquire(ns, ctrlr, req); 2743 break; 2744 case SPDK_NVME_OPC_RESERVATION_RELEASE: 2745 update_sgroup = nvmf_ns_reservation_release(ns, ctrlr, req); 2746 break; 2747 case SPDK_NVME_OPC_RESERVATION_REPORT: 2748 nvmf_ns_reservation_report(ns, ctrlr, req); 2749 break; 2750 default: 2751 break; 2752 } 2753 2754 /* update reservation information to subsystem's poll group */ 2755 if (update_sgroup) { 2756 update_ctx = calloc(1, sizeof(*update_ctx)); 2757 if (update_ctx == NULL) { 2758 SPDK_ERRLOG("Can't alloc subsystem poll group update context\n"); 2759 goto update_done; 2760 } 2761 update_ctx->subsystem = ctrlr->subsys; 2762 update_ctx->cb_fn = _nvmf_ns_reservation_update_done; 2763 update_ctx->cb_arg = req; 2764 2765 nvmf_subsystem_update_ns(ctrlr->subsys, subsystem_update_ns_done, update_ctx); 2766 return; 2767 } 2768 2769 update_done: 2770 _nvmf_ns_reservation_update_done(ctrlr->subsys, (void *)req, 0); 2771 } 2772 2773 int 2774 spdk_nvmf_subsystem_set_ana_reporting(struct spdk_nvmf_subsystem *subsystem, 2775 bool ana_reporting) 2776 { 2777 if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) { 2778 return -EAGAIN; 2779 } 2780 2781 subsystem->flags.ana_reporting = ana_reporting; 2782 2783 return 0; 2784 } 2785 2786 struct subsystem_listener_update_ctx { 2787 struct spdk_nvmf_subsystem_listener *listener; 2788 2789 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn; 2790 void *cb_arg; 2791 }; 2792 2793 static void 2794 subsystem_listener_update_done(struct spdk_io_channel_iter *i, int status) 2795 { 2796 struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2797 2798 if (ctx->cb_fn) { 2799 ctx->cb_fn(ctx->cb_arg, status); 2800 } 2801 free(ctx); 2802 } 2803 2804 static void 2805 subsystem_listener_update_on_pg(struct spdk_io_channel_iter *i) 2806 { 2807 struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2808 struct spdk_nvmf_subsystem_listener *listener; 2809 struct spdk_nvmf_poll_group *group; 2810 struct spdk_nvmf_ctrlr *ctrlr; 2811 2812 listener = ctx->listener; 2813 group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i)); 2814 2815 TAILQ_FOREACH(ctrlr, &listener->subsystem->ctrlrs, link) { 2816 if (ctrlr->admin_qpair->group == group && ctrlr->listener == listener) { 2817 nvmf_ctrlr_async_event_ana_change_notice(ctrlr); 2818 } 2819 } 2820 2821 spdk_for_each_channel_continue(i, 0); 2822 } 2823 2824 void 2825 nvmf_subsystem_set_ana_state(struct spdk_nvmf_subsystem *subsystem, 2826 const struct spdk_nvme_transport_id *trid, 2827 enum spdk_nvme_ana_state ana_state, 2828 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, void *cb_arg) 2829 { 2830 struct spdk_nvmf_subsystem_listener *listener; 2831 struct subsystem_listener_update_ctx *ctx; 2832 2833 assert(cb_fn != NULL); 2834 assert(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 2835 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED); 2836 2837 if (!subsystem->flags.ana_reporting) { 2838 SPDK_ERRLOG("ANA reporting is disabled\n"); 2839 cb_fn(cb_arg, -EINVAL); 2840 return; 2841 } 2842 2843 /* ANA Change state is not used, ANA Persistent Loss state 2844 * is not supported yet. 2845 */ 2846 if (!(ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE || 2847 ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE || 2848 ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE)) { 2849 SPDK_ERRLOG("ANA state %d is not supported\n", ana_state); 2850 cb_fn(cb_arg, -ENOTSUP); 2851 return; 2852 } 2853 2854 listener = nvmf_subsystem_find_listener(subsystem, trid); 2855 if (!listener) { 2856 SPDK_ERRLOG("Unable to find listener.\n"); 2857 cb_fn(cb_arg, -EINVAL); 2858 return; 2859 } 2860 2861 if (listener->ana_state == ana_state) { 2862 cb_fn(cb_arg, 0); 2863 return; 2864 } 2865 2866 ctx = calloc(1, sizeof(*ctx)); 2867 if (!ctx) { 2868 SPDK_ERRLOG("Unable to allocate context\n"); 2869 cb_fn(cb_arg, -ENOMEM); 2870 return; 2871 } 2872 2873 listener->ana_state = ana_state; 2874 listener->ana_state_change_count++; 2875 2876 ctx->listener = listener; 2877 ctx->cb_fn = cb_fn; 2878 ctx->cb_arg = cb_arg; 2879 2880 spdk_for_each_channel(subsystem->tgt, 2881 subsystem_listener_update_on_pg, 2882 ctx, 2883 subsystem_listener_update_done); 2884 } 2885