1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "nvmf_internal.h" 37 #include "transport.h" 38 39 #include "spdk/likely.h" 40 #include "spdk/string.h" 41 #include "spdk/trace.h" 42 #include "spdk/nvmf_spec.h" 43 #include "spdk/uuid.h" 44 #include "spdk/json.h" 45 #include "spdk/file.h" 46 47 #include "spdk/bdev_module.h" 48 #include "spdk/log.h" 49 #include "spdk_internal/utf.h" 50 51 #define MODEL_NUMBER_DEFAULT "SPDK bdev Controller" 52 #define NVMF_SUBSYSTEM_DEFAULT_NAMESPACES 32 53 54 /* 55 * States for parsing valid domains in NQNs according to RFC 1034 56 */ 57 enum spdk_nvmf_nqn_domain_states { 58 /* First character of a domain must be a letter */ 59 SPDK_NVMF_DOMAIN_ACCEPT_LETTER = 0, 60 61 /* Subsequent characters can be any of letter, digit, or hyphen */ 62 SPDK_NVMF_DOMAIN_ACCEPT_LDH = 1, 63 64 /* A domain label must end with either a letter or digit */ 65 SPDK_NVMF_DOMAIN_ACCEPT_ANY = 2 66 }; 67 68 /* Returns true if is a valid ASCII string as defined by the NVMe spec */ 69 static bool 70 nvmf_valid_ascii_string(const void *buf, size_t size) 71 { 72 const uint8_t *str = buf; 73 size_t i; 74 75 for (i = 0; i < size; i++) { 76 if (str[i] < 0x20 || str[i] > 0x7E) { 77 return false; 78 } 79 } 80 81 return true; 82 } 83 84 static bool 85 nvmf_valid_nqn(const char *nqn) 86 { 87 size_t len; 88 struct spdk_uuid uuid_value; 89 uint32_t i; 90 int bytes_consumed; 91 uint32_t domain_label_length; 92 char *reverse_domain_end; 93 uint32_t reverse_domain_end_index; 94 enum spdk_nvmf_nqn_domain_states domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER; 95 96 /* Check for length requirements */ 97 len = strlen(nqn); 98 if (len > SPDK_NVMF_NQN_MAX_LEN) { 99 SPDK_ERRLOG("Invalid NQN \"%s\": length %zu > max %d\n", nqn, len, SPDK_NVMF_NQN_MAX_LEN); 100 return false; 101 } 102 103 /* The nqn must be at least as long as SPDK_NVMF_NQN_MIN_LEN to contain the necessary prefix. */ 104 if (len < SPDK_NVMF_NQN_MIN_LEN) { 105 SPDK_ERRLOG("Invalid NQN \"%s\": length %zu < min %d\n", nqn, len, SPDK_NVMF_NQN_MIN_LEN); 106 return false; 107 } 108 109 /* Check for discovery controller nqn */ 110 if (!strcmp(nqn, SPDK_NVMF_DISCOVERY_NQN)) { 111 return true; 112 } 113 114 /* Check for equality with the generic nqn structure of the form "nqn.2014-08.org.nvmexpress:uuid:11111111-2222-3333-4444-555555555555" */ 115 if (!strncmp(nqn, SPDK_NVMF_NQN_UUID_PRE, SPDK_NVMF_NQN_UUID_PRE_LEN)) { 116 if (len != SPDK_NVMF_NQN_UUID_PRE_LEN + SPDK_NVMF_UUID_STRING_LEN) { 117 SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not the correct length\n", nqn); 118 return false; 119 } 120 121 if (spdk_uuid_parse(&uuid_value, &nqn[SPDK_NVMF_NQN_UUID_PRE_LEN])) { 122 SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not formatted correctly\n", nqn); 123 return false; 124 } 125 return true; 126 } 127 128 /* If the nqn does not match the uuid structure, the next several checks validate the form "nqn.yyyy-mm.reverse.domain:user-string" */ 129 130 if (strncmp(nqn, "nqn.", 4) != 0) { 131 SPDK_ERRLOG("Invalid NQN \"%s\": NQN must begin with \"nqn.\".\n", nqn); 132 return false; 133 } 134 135 /* Check for yyyy-mm. */ 136 if (!(isdigit(nqn[4]) && isdigit(nqn[5]) && isdigit(nqn[6]) && isdigit(nqn[7]) && 137 nqn[8] == '-' && isdigit(nqn[9]) && isdigit(nqn[10]) && nqn[11] == '.')) { 138 SPDK_ERRLOG("Invalid date code in NQN \"%s\"\n", nqn); 139 return false; 140 } 141 142 reverse_domain_end = strchr(nqn, ':'); 143 if (reverse_domain_end != NULL && (reverse_domain_end_index = reverse_domain_end - nqn) < len - 1) { 144 } else { 145 SPDK_ERRLOG("Invalid NQN \"%s\". NQN must contain user specified name with a ':' as a prefix.\n", 146 nqn); 147 return false; 148 } 149 150 /* Check for valid reverse domain */ 151 domain_label_length = 0; 152 for (i = 12; i < reverse_domain_end_index; i++) { 153 if (domain_label_length > SPDK_DOMAIN_LABEL_MAX_LEN) { 154 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". At least one Label is too long.\n", nqn); 155 return false; 156 } 157 158 switch (domain_state) { 159 160 case SPDK_NVMF_DOMAIN_ACCEPT_LETTER: { 161 if (isalpha(nqn[i])) { 162 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 163 domain_label_length++; 164 break; 165 } else { 166 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must start with a letter.\n", nqn); 167 return false; 168 } 169 } 170 171 case SPDK_NVMF_DOMAIN_ACCEPT_LDH: { 172 if (isalpha(nqn[i]) || isdigit(nqn[i])) { 173 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 174 domain_label_length++; 175 break; 176 } else if (nqn[i] == '-') { 177 if (i == reverse_domain_end_index - 1) { 178 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 179 nqn); 180 return false; 181 } 182 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH; 183 domain_label_length++; 184 break; 185 } else if (nqn[i] == '.') { 186 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 187 nqn); 188 return false; 189 } else { 190 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n", 191 nqn); 192 return false; 193 } 194 } 195 196 case SPDK_NVMF_DOMAIN_ACCEPT_ANY: { 197 if (isalpha(nqn[i]) || isdigit(nqn[i])) { 198 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 199 domain_label_length++; 200 break; 201 } else if (nqn[i] == '-') { 202 if (i == reverse_domain_end_index - 1) { 203 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 204 nqn); 205 return false; 206 } 207 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH; 208 domain_label_length++; 209 break; 210 } else if (nqn[i] == '.') { 211 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER; 212 domain_label_length = 0; 213 break; 214 } else { 215 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n", 216 nqn); 217 return false; 218 } 219 } 220 } 221 } 222 223 i = reverse_domain_end_index + 1; 224 while (i < len) { 225 bytes_consumed = utf8_valid(&nqn[i], &nqn[len]); 226 if (bytes_consumed <= 0) { 227 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only valid utf-8.\n", nqn); 228 return false; 229 } 230 231 i += bytes_consumed; 232 } 233 return true; 234 } 235 236 static void subsystem_state_change_on_pg(struct spdk_io_channel_iter *i); 237 238 struct spdk_nvmf_subsystem * 239 spdk_nvmf_subsystem_create(struct spdk_nvmf_tgt *tgt, 240 const char *nqn, 241 enum spdk_nvmf_subtype type, 242 uint32_t num_ns) 243 { 244 struct spdk_nvmf_subsystem *subsystem; 245 uint32_t sid; 246 247 if (spdk_nvmf_tgt_find_subsystem(tgt, nqn)) { 248 SPDK_ERRLOG("Subsystem NQN '%s' already exists\n", nqn); 249 return NULL; 250 } 251 252 if (!nvmf_valid_nqn(nqn)) { 253 return NULL; 254 } 255 256 if (type == SPDK_NVMF_SUBTYPE_DISCOVERY) { 257 if (num_ns != 0) { 258 SPDK_ERRLOG("Discovery subsystem cannot have namespaces.\n"); 259 return NULL; 260 } 261 } else if (num_ns == 0) { 262 num_ns = NVMF_SUBSYSTEM_DEFAULT_NAMESPACES; 263 } 264 265 /* Find a free subsystem id (sid) */ 266 for (sid = 0; sid < tgt->max_subsystems; sid++) { 267 if (tgt->subsystems[sid] == NULL) { 268 break; 269 } 270 } 271 if (sid >= tgt->max_subsystems) { 272 return NULL; 273 } 274 275 subsystem = calloc(1, sizeof(struct spdk_nvmf_subsystem)); 276 if (subsystem == NULL) { 277 return NULL; 278 } 279 280 subsystem->thread = spdk_get_thread(); 281 subsystem->state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 282 subsystem->tgt = tgt; 283 subsystem->id = sid; 284 subsystem->subtype = type; 285 subsystem->max_nsid = num_ns; 286 subsystem->next_cntlid = 0; 287 snprintf(subsystem->subnqn, sizeof(subsystem->subnqn), "%s", nqn); 288 pthread_mutex_init(&subsystem->mutex, NULL); 289 TAILQ_INIT(&subsystem->listeners); 290 TAILQ_INIT(&subsystem->hosts); 291 TAILQ_INIT(&subsystem->ctrlrs); 292 293 if (num_ns != 0) { 294 subsystem->ns = calloc(num_ns, sizeof(struct spdk_nvmf_ns *)); 295 if (subsystem->ns == NULL) { 296 SPDK_ERRLOG("Namespace memory allocation failed\n"); 297 pthread_mutex_destroy(&subsystem->mutex); 298 free(subsystem); 299 return NULL; 300 } 301 } 302 303 memset(subsystem->sn, '0', sizeof(subsystem->sn) - 1); 304 subsystem->sn[sizeof(subsystem->sn) - 1] = '\0'; 305 306 snprintf(subsystem->mn, sizeof(subsystem->mn), "%s", 307 MODEL_NUMBER_DEFAULT); 308 309 tgt->subsystems[sid] = subsystem; 310 nvmf_update_discovery_log(tgt, NULL); 311 312 return subsystem; 313 } 314 315 /* Must hold subsystem->mutex while calling this function */ 316 static void 317 nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_host *host) 318 { 319 TAILQ_REMOVE(&subsystem->hosts, host, link); 320 free(host); 321 } 322 323 static void 324 _nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem, 325 struct spdk_nvmf_subsystem_listener *listener, 326 bool stop) 327 { 328 struct spdk_nvmf_transport *transport; 329 330 if (stop) { 331 transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, listener->trid->trstring); 332 if (transport != NULL) { 333 spdk_nvmf_transport_stop_listen(transport, listener->trid); 334 } 335 } 336 337 TAILQ_REMOVE(&subsystem->listeners, listener, link); 338 free(listener); 339 } 340 341 void 342 spdk_nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem) 343 { 344 struct spdk_nvmf_host *host, *host_tmp; 345 struct spdk_nvmf_ctrlr *ctrlr, *ctrlr_tmp; 346 struct spdk_nvmf_ns *ns; 347 348 if (!subsystem) { 349 return; 350 } 351 352 assert(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE); 353 354 SPDK_DEBUGLOG(nvmf, "subsystem is %p\n", subsystem); 355 356 nvmf_subsystem_remove_all_listeners(subsystem, false); 357 358 pthread_mutex_lock(&subsystem->mutex); 359 360 TAILQ_FOREACH_SAFE(host, &subsystem->hosts, link, host_tmp) { 361 nvmf_subsystem_remove_host(subsystem, host); 362 } 363 364 pthread_mutex_unlock(&subsystem->mutex); 365 366 TAILQ_FOREACH_SAFE(ctrlr, &subsystem->ctrlrs, link, ctrlr_tmp) { 367 nvmf_ctrlr_destruct(ctrlr); 368 } 369 370 ns = spdk_nvmf_subsystem_get_first_ns(subsystem); 371 while (ns != NULL) { 372 struct spdk_nvmf_ns *next_ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns); 373 374 spdk_nvmf_subsystem_remove_ns(subsystem, ns->opts.nsid); 375 ns = next_ns; 376 } 377 378 free(subsystem->ns); 379 380 subsystem->tgt->subsystems[subsystem->id] = NULL; 381 nvmf_update_discovery_log(subsystem->tgt, NULL); 382 383 pthread_mutex_destroy(&subsystem->mutex); 384 385 free(subsystem); 386 } 387 388 389 /* we have to use the typedef in the function declaration to appease astyle. */ 390 typedef enum spdk_nvmf_subsystem_state spdk_nvmf_subsystem_state_t; 391 392 static spdk_nvmf_subsystem_state_t 393 nvmf_subsystem_get_intermediate_state(enum spdk_nvmf_subsystem_state current_state, 394 enum spdk_nvmf_subsystem_state requested_state) 395 { 396 switch (requested_state) { 397 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 398 return SPDK_NVMF_SUBSYSTEM_DEACTIVATING; 399 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 400 if (current_state == SPDK_NVMF_SUBSYSTEM_PAUSED) { 401 return SPDK_NVMF_SUBSYSTEM_RESUMING; 402 } else { 403 return SPDK_NVMF_SUBSYSTEM_ACTIVATING; 404 } 405 case SPDK_NVMF_SUBSYSTEM_PAUSED: 406 return SPDK_NVMF_SUBSYSTEM_PAUSING; 407 default: 408 assert(false); 409 return SPDK_NVMF_SUBSYSTEM_NUM_STATES; 410 } 411 } 412 413 static int 414 nvmf_subsystem_set_state(struct spdk_nvmf_subsystem *subsystem, 415 enum spdk_nvmf_subsystem_state state) 416 { 417 enum spdk_nvmf_subsystem_state actual_old_state, expected_old_state; 418 bool exchanged; 419 420 switch (state) { 421 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 422 expected_old_state = SPDK_NVMF_SUBSYSTEM_DEACTIVATING; 423 break; 424 case SPDK_NVMF_SUBSYSTEM_ACTIVATING: 425 expected_old_state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 426 break; 427 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 428 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING; 429 break; 430 case SPDK_NVMF_SUBSYSTEM_PAUSING: 431 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 432 break; 433 case SPDK_NVMF_SUBSYSTEM_PAUSED: 434 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSING; 435 break; 436 case SPDK_NVMF_SUBSYSTEM_RESUMING: 437 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSED; 438 break; 439 case SPDK_NVMF_SUBSYSTEM_DEACTIVATING: 440 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 441 break; 442 default: 443 assert(false); 444 return -1; 445 } 446 447 actual_old_state = expected_old_state; 448 exchanged = __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false, 449 __ATOMIC_RELAXED, __ATOMIC_RELAXED); 450 if (spdk_unlikely(exchanged == false)) { 451 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING && 452 state == SPDK_NVMF_SUBSYSTEM_ACTIVE) { 453 expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING; 454 } 455 /* This is for the case when activating the subsystem fails. */ 456 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_ACTIVATING && 457 state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING) { 458 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING; 459 } 460 /* This is for the case when resuming the subsystem fails. */ 461 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING && 462 state == SPDK_NVMF_SUBSYSTEM_PAUSING) { 463 expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING; 464 } 465 actual_old_state = expected_old_state; 466 __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false, 467 __ATOMIC_RELAXED, __ATOMIC_RELAXED); 468 } 469 assert(actual_old_state == expected_old_state); 470 return actual_old_state - expected_old_state; 471 } 472 473 struct subsystem_state_change_ctx { 474 struct spdk_nvmf_subsystem *subsystem; 475 uint16_t nsid; 476 477 enum spdk_nvmf_subsystem_state original_state; 478 enum spdk_nvmf_subsystem_state requested_state; 479 480 spdk_nvmf_subsystem_state_change_done cb_fn; 481 void *cb_arg; 482 }; 483 484 static void 485 subsystem_state_change_revert_done(struct spdk_io_channel_iter *i, int status) 486 { 487 struct subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 488 489 /* Nothing to be done here if the state setting fails, we are just screwed. */ 490 if (nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state)) { 491 SPDK_ERRLOG("Unable to revert the subsystem state after operation failure.\n"); 492 } 493 494 ctx->subsystem->changing_state = false; 495 if (ctx->cb_fn) { 496 /* return a failure here. This function only exists in an error path. */ 497 ctx->cb_fn(ctx->subsystem, ctx->cb_arg, -1); 498 } 499 free(ctx); 500 } 501 502 static void 503 subsystem_state_change_done(struct spdk_io_channel_iter *i, int status) 504 { 505 struct subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 506 enum spdk_nvmf_subsystem_state intermediate_state; 507 508 if (status == 0) { 509 status = nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state); 510 if (status) { 511 status = -1; 512 } 513 } 514 515 if (status) { 516 intermediate_state = nvmf_subsystem_get_intermediate_state(ctx->requested_state, 517 ctx->original_state); 518 assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES); 519 520 if (nvmf_subsystem_set_state(ctx->subsystem, intermediate_state)) { 521 goto out; 522 } 523 ctx->requested_state = ctx->original_state; 524 spdk_for_each_channel(ctx->subsystem->tgt, 525 subsystem_state_change_on_pg, 526 ctx, 527 subsystem_state_change_revert_done); 528 return; 529 } 530 531 out: 532 ctx->subsystem->changing_state = false; 533 if (ctx->cb_fn) { 534 ctx->cb_fn(ctx->subsystem, ctx->cb_arg, status); 535 } 536 free(ctx); 537 } 538 539 static void 540 subsystem_state_change_continue(void *ctx, int status) 541 { 542 struct spdk_io_channel_iter *i = ctx; 543 spdk_for_each_channel_continue(i, status); 544 } 545 546 static void 547 subsystem_state_change_on_pg(struct spdk_io_channel_iter *i) 548 { 549 struct subsystem_state_change_ctx *ctx; 550 struct spdk_io_channel *ch; 551 struct spdk_nvmf_poll_group *group; 552 553 ctx = spdk_io_channel_iter_get_ctx(i); 554 ch = spdk_io_channel_iter_get_channel(i); 555 group = spdk_io_channel_get_ctx(ch); 556 557 switch (ctx->requested_state) { 558 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 559 nvmf_poll_group_remove_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 560 break; 561 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 562 if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_ACTIVATING) { 563 nvmf_poll_group_add_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 564 } else if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_RESUMING) { 565 nvmf_poll_group_resume_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 566 } 567 break; 568 case SPDK_NVMF_SUBSYSTEM_PAUSED: 569 nvmf_poll_group_pause_subsystem(group, ctx->subsystem, ctx->nsid, subsystem_state_change_continue, 570 i); 571 break; 572 default: 573 assert(false); 574 break; 575 } 576 } 577 578 static int 579 nvmf_subsystem_state_change(struct spdk_nvmf_subsystem *subsystem, 580 uint32_t nsid, 581 enum spdk_nvmf_subsystem_state requested_state, 582 spdk_nvmf_subsystem_state_change_done cb_fn, 583 void *cb_arg) 584 { 585 struct subsystem_state_change_ctx *ctx; 586 enum spdk_nvmf_subsystem_state intermediate_state; 587 int rc; 588 589 if (__sync_val_compare_and_swap(&subsystem->changing_state, false, true)) { 590 return -EBUSY; 591 } 592 593 /* If we are already in the requested state, just call the callback immediately. */ 594 if (subsystem->state == requested_state) { 595 subsystem->changing_state = false; 596 if (cb_fn) { 597 cb_fn(subsystem, cb_arg, 0); 598 } 599 return 0; 600 } 601 602 intermediate_state = nvmf_subsystem_get_intermediate_state(subsystem->state, requested_state); 603 assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES); 604 605 ctx = calloc(1, sizeof(*ctx)); 606 if (!ctx) { 607 subsystem->changing_state = false; 608 return -ENOMEM; 609 } 610 611 ctx->original_state = subsystem->state; 612 rc = nvmf_subsystem_set_state(subsystem, intermediate_state); 613 if (rc) { 614 free(ctx); 615 subsystem->changing_state = false; 616 return rc; 617 } 618 619 ctx->subsystem = subsystem; 620 ctx->nsid = nsid; 621 ctx->requested_state = requested_state; 622 ctx->cb_fn = cb_fn; 623 ctx->cb_arg = cb_arg; 624 625 spdk_for_each_channel(subsystem->tgt, 626 subsystem_state_change_on_pg, 627 ctx, 628 subsystem_state_change_done); 629 630 return 0; 631 } 632 633 int 634 spdk_nvmf_subsystem_start(struct spdk_nvmf_subsystem *subsystem, 635 spdk_nvmf_subsystem_state_change_done cb_fn, 636 void *cb_arg) 637 { 638 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg); 639 } 640 641 int 642 spdk_nvmf_subsystem_stop(struct spdk_nvmf_subsystem *subsystem, 643 spdk_nvmf_subsystem_state_change_done cb_fn, 644 void *cb_arg) 645 { 646 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_INACTIVE, cb_fn, cb_arg); 647 } 648 649 int 650 spdk_nvmf_subsystem_pause(struct spdk_nvmf_subsystem *subsystem, 651 uint32_t nsid, 652 spdk_nvmf_subsystem_state_change_done cb_fn, 653 void *cb_arg) 654 { 655 return nvmf_subsystem_state_change(subsystem, nsid, SPDK_NVMF_SUBSYSTEM_PAUSED, cb_fn, cb_arg); 656 } 657 658 int 659 spdk_nvmf_subsystem_resume(struct spdk_nvmf_subsystem *subsystem, 660 spdk_nvmf_subsystem_state_change_done cb_fn, 661 void *cb_arg) 662 { 663 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg); 664 } 665 666 struct spdk_nvmf_subsystem * 667 spdk_nvmf_subsystem_get_first(struct spdk_nvmf_tgt *tgt) 668 { 669 struct spdk_nvmf_subsystem *subsystem; 670 uint32_t sid; 671 672 for (sid = 0; sid < tgt->max_subsystems; sid++) { 673 subsystem = tgt->subsystems[sid]; 674 if (subsystem) { 675 return subsystem; 676 } 677 } 678 679 return NULL; 680 } 681 682 struct spdk_nvmf_subsystem * 683 spdk_nvmf_subsystem_get_next(struct spdk_nvmf_subsystem *subsystem) 684 { 685 uint32_t sid; 686 struct spdk_nvmf_tgt *tgt; 687 688 if (!subsystem) { 689 return NULL; 690 } 691 692 tgt = subsystem->tgt; 693 694 for (sid = subsystem->id + 1; sid < tgt->max_subsystems; sid++) { 695 subsystem = tgt->subsystems[sid]; 696 if (subsystem) { 697 return subsystem; 698 } 699 } 700 701 return NULL; 702 } 703 704 /* Must hold subsystem->mutex while calling this function */ 705 static struct spdk_nvmf_host * 706 nvmf_subsystem_find_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 707 { 708 struct spdk_nvmf_host *host = NULL; 709 710 TAILQ_FOREACH(host, &subsystem->hosts, link) { 711 if (strcmp(hostnqn, host->nqn) == 0) { 712 return host; 713 } 714 } 715 716 return NULL; 717 } 718 719 int 720 spdk_nvmf_subsystem_add_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 721 { 722 struct spdk_nvmf_host *host; 723 724 if (!nvmf_valid_nqn(hostnqn)) { 725 return -EINVAL; 726 } 727 728 pthread_mutex_lock(&subsystem->mutex); 729 730 if (nvmf_subsystem_find_host(subsystem, hostnqn)) { 731 /* This subsystem already allows the specified host. */ 732 pthread_mutex_unlock(&subsystem->mutex); 733 return 0; 734 } 735 736 host = calloc(1, sizeof(*host)); 737 if (!host) { 738 pthread_mutex_unlock(&subsystem->mutex); 739 return -ENOMEM; 740 } 741 742 snprintf(host->nqn, sizeof(host->nqn), "%s", hostnqn); 743 744 TAILQ_INSERT_HEAD(&subsystem->hosts, host, link); 745 746 nvmf_update_discovery_log(subsystem->tgt, hostnqn); 747 748 pthread_mutex_unlock(&subsystem->mutex); 749 750 return 0; 751 } 752 753 int 754 spdk_nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 755 { 756 struct spdk_nvmf_host *host; 757 758 pthread_mutex_lock(&subsystem->mutex); 759 760 host = nvmf_subsystem_find_host(subsystem, hostnqn); 761 if (host == NULL) { 762 pthread_mutex_unlock(&subsystem->mutex); 763 return -ENOENT; 764 } 765 766 nvmf_subsystem_remove_host(subsystem, host); 767 pthread_mutex_unlock(&subsystem->mutex); 768 769 return 0; 770 } 771 772 struct nvmf_subsystem_disconnect_host_ctx { 773 struct spdk_nvmf_subsystem *subsystem; 774 char *hostnqn; 775 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn; 776 void *cb_arg; 777 }; 778 779 static void 780 nvmf_subsystem_disconnect_host_fini(struct spdk_io_channel_iter *i, int status) 781 { 782 struct nvmf_subsystem_disconnect_host_ctx *ctx; 783 784 ctx = spdk_io_channel_iter_get_ctx(i); 785 786 if (ctx->cb_fn) { 787 ctx->cb_fn(ctx->cb_arg, status); 788 } 789 free(ctx->hostnqn); 790 free(ctx); 791 } 792 793 static void 794 nvmf_subsystem_disconnect_qpairs_by_host(struct spdk_io_channel_iter *i) 795 { 796 struct nvmf_subsystem_disconnect_host_ctx *ctx; 797 struct spdk_nvmf_poll_group *group; 798 struct spdk_io_channel *ch; 799 struct spdk_nvmf_qpair *qpair, *tmp_qpair; 800 struct spdk_nvmf_ctrlr *ctrlr; 801 802 ctx = spdk_io_channel_iter_get_ctx(i); 803 ch = spdk_io_channel_iter_get_channel(i); 804 group = spdk_io_channel_get_ctx(ch); 805 806 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, tmp_qpair) { 807 ctrlr = qpair->ctrlr; 808 809 if (ctrlr == NULL || ctrlr->subsys != ctx->subsystem) { 810 continue; 811 } 812 813 if (strncmp(ctrlr->hostnqn, ctx->hostnqn, sizeof(ctrlr->hostnqn)) == 0) { 814 /* Right now this does not wait for the queue pairs to actually disconnect. */ 815 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 816 } 817 } 818 spdk_for_each_channel_continue(i, 0); 819 } 820 821 int 822 spdk_nvmf_subsystem_disconnect_host(struct spdk_nvmf_subsystem *subsystem, 823 const char *hostnqn, 824 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 825 void *cb_arg) 826 { 827 struct nvmf_subsystem_disconnect_host_ctx *ctx; 828 829 ctx = calloc(1, sizeof(struct nvmf_subsystem_disconnect_host_ctx)); 830 if (ctx == NULL) { 831 return -ENOMEM; 832 } 833 834 ctx->subsystem = subsystem; 835 ctx->hostnqn = strdup(hostnqn); 836 ctx->cb_fn = cb_fn; 837 ctx->cb_arg = cb_arg; 838 839 spdk_for_each_channel(subsystem->tgt, nvmf_subsystem_disconnect_qpairs_by_host, ctx, 840 nvmf_subsystem_disconnect_host_fini); 841 842 return 0; 843 } 844 845 int 846 spdk_nvmf_subsystem_set_allow_any_host(struct spdk_nvmf_subsystem *subsystem, bool allow_any_host) 847 { 848 pthread_mutex_lock(&subsystem->mutex); 849 subsystem->flags.allow_any_host = allow_any_host; 850 nvmf_update_discovery_log(subsystem->tgt, NULL); 851 pthread_mutex_unlock(&subsystem->mutex); 852 853 return 0; 854 } 855 856 bool 857 spdk_nvmf_subsystem_get_allow_any_host(const struct spdk_nvmf_subsystem *subsystem) 858 { 859 bool allow_any_host; 860 struct spdk_nvmf_subsystem *sub; 861 862 /* Technically, taking the mutex modifies data in the subsystem. But the const 863 * is still important to convey that this doesn't mutate any other data. Cast 864 * it away to work around this. */ 865 sub = (struct spdk_nvmf_subsystem *)subsystem; 866 867 pthread_mutex_lock(&sub->mutex); 868 allow_any_host = sub->flags.allow_any_host; 869 pthread_mutex_unlock(&sub->mutex); 870 871 return allow_any_host; 872 } 873 874 bool 875 spdk_nvmf_subsystem_host_allowed(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 876 { 877 bool allowed; 878 879 if (!hostnqn) { 880 return false; 881 } 882 883 pthread_mutex_lock(&subsystem->mutex); 884 885 if (subsystem->flags.allow_any_host) { 886 pthread_mutex_unlock(&subsystem->mutex); 887 return true; 888 } 889 890 allowed = nvmf_subsystem_find_host(subsystem, hostnqn) != NULL; 891 pthread_mutex_unlock(&subsystem->mutex); 892 893 return allowed; 894 } 895 896 struct spdk_nvmf_host * 897 spdk_nvmf_subsystem_get_first_host(struct spdk_nvmf_subsystem *subsystem) 898 { 899 return TAILQ_FIRST(&subsystem->hosts); 900 } 901 902 903 struct spdk_nvmf_host * 904 spdk_nvmf_subsystem_get_next_host(struct spdk_nvmf_subsystem *subsystem, 905 struct spdk_nvmf_host *prev_host) 906 { 907 return TAILQ_NEXT(prev_host, link); 908 } 909 910 const char * 911 spdk_nvmf_host_get_nqn(const struct spdk_nvmf_host *host) 912 { 913 return host->nqn; 914 } 915 916 struct spdk_nvmf_subsystem_listener * 917 nvmf_subsystem_find_listener(struct spdk_nvmf_subsystem *subsystem, 918 const struct spdk_nvme_transport_id *trid) 919 { 920 struct spdk_nvmf_subsystem_listener *listener; 921 922 TAILQ_FOREACH(listener, &subsystem->listeners, link) { 923 if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) { 924 return listener; 925 } 926 } 927 928 return NULL; 929 } 930 931 /** 932 * Function to be called once the target is listening. 933 * 934 * \param ctx Context argument passed to this function. 935 * \param status 0 if it completed successfully, or negative errno if it failed. 936 */ 937 static void 938 _nvmf_subsystem_add_listener_done(void *ctx, int status) 939 { 940 struct spdk_nvmf_subsystem_listener *listener = ctx; 941 942 if (status) { 943 listener->cb_fn(listener->cb_arg, status); 944 free(listener); 945 return; 946 } 947 948 TAILQ_INSERT_HEAD(&listener->subsystem->listeners, listener, link); 949 nvmf_update_discovery_log(listener->subsystem->tgt, NULL); 950 listener->cb_fn(listener->cb_arg, status); 951 } 952 953 void 954 spdk_nvmf_subsystem_add_listener(struct spdk_nvmf_subsystem *subsystem, 955 struct spdk_nvme_transport_id *trid, 956 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 957 void *cb_arg) 958 { 959 struct spdk_nvmf_transport *transport; 960 struct spdk_nvmf_subsystem_listener *listener; 961 struct spdk_nvmf_listener *tr_listener; 962 int rc = 0; 963 964 assert(cb_fn != NULL); 965 966 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 967 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 968 cb_fn(cb_arg, -EAGAIN); 969 return; 970 } 971 972 if (nvmf_subsystem_find_listener(subsystem, trid)) { 973 /* Listener already exists in this subsystem */ 974 cb_fn(cb_arg, 0); 975 return; 976 } 977 978 transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, trid->trstring); 979 if (!transport) { 980 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n", 981 trid->trstring); 982 cb_fn(cb_arg, -EINVAL); 983 return; 984 } 985 986 tr_listener = nvmf_transport_find_listener(transport, trid); 987 if (!tr_listener) { 988 SPDK_ERRLOG("Cannot find transport listener for %s\n", trid->traddr); 989 cb_fn(cb_arg, -EINVAL); 990 return; 991 } 992 993 listener = calloc(1, sizeof(*listener)); 994 if (!listener) { 995 cb_fn(cb_arg, -ENOMEM); 996 return; 997 } 998 999 listener->trid = &tr_listener->trid; 1000 listener->transport = transport; 1001 listener->cb_fn = cb_fn; 1002 listener->cb_arg = cb_arg; 1003 listener->subsystem = subsystem; 1004 listener->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 1005 1006 if (transport->ops->listen_associate != NULL) { 1007 rc = transport->ops->listen_associate(transport, subsystem, trid); 1008 } 1009 1010 _nvmf_subsystem_add_listener_done(listener, rc); 1011 } 1012 1013 int 1014 spdk_nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem, 1015 const struct spdk_nvme_transport_id *trid) 1016 { 1017 struct spdk_nvmf_subsystem_listener *listener; 1018 1019 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1020 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1021 return -EAGAIN; 1022 } 1023 1024 listener = nvmf_subsystem_find_listener(subsystem, trid); 1025 if (listener == NULL) { 1026 return -ENOENT; 1027 } 1028 1029 _nvmf_subsystem_remove_listener(subsystem, listener, false); 1030 1031 return 0; 1032 } 1033 1034 void 1035 nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem, 1036 bool stop) 1037 { 1038 struct spdk_nvmf_subsystem_listener *listener, *listener_tmp; 1039 1040 TAILQ_FOREACH_SAFE(listener, &subsystem->listeners, link, listener_tmp) { 1041 _nvmf_subsystem_remove_listener(subsystem, listener, stop); 1042 } 1043 } 1044 1045 bool 1046 spdk_nvmf_subsystem_listener_allowed(struct spdk_nvmf_subsystem *subsystem, 1047 const struct spdk_nvme_transport_id *trid) 1048 { 1049 struct spdk_nvmf_subsystem_listener *listener; 1050 1051 if (!strcmp(subsystem->subnqn, SPDK_NVMF_DISCOVERY_NQN)) { 1052 return true; 1053 } 1054 1055 TAILQ_FOREACH(listener, &subsystem->listeners, link) { 1056 if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) { 1057 return true; 1058 } 1059 } 1060 1061 return false; 1062 } 1063 1064 struct spdk_nvmf_subsystem_listener * 1065 spdk_nvmf_subsystem_get_first_listener(struct spdk_nvmf_subsystem *subsystem) 1066 { 1067 return TAILQ_FIRST(&subsystem->listeners); 1068 } 1069 1070 struct spdk_nvmf_subsystem_listener * 1071 spdk_nvmf_subsystem_get_next_listener(struct spdk_nvmf_subsystem *subsystem, 1072 struct spdk_nvmf_subsystem_listener *prev_listener) 1073 { 1074 return TAILQ_NEXT(prev_listener, link); 1075 } 1076 1077 const struct spdk_nvme_transport_id * 1078 spdk_nvmf_subsystem_listener_get_trid(struct spdk_nvmf_subsystem_listener *listener) 1079 { 1080 return listener->trid; 1081 } 1082 1083 void 1084 spdk_nvmf_subsystem_allow_any_listener(struct spdk_nvmf_subsystem *subsystem, 1085 bool allow_any_listener) 1086 { 1087 subsystem->flags.allow_any_listener = allow_any_listener; 1088 } 1089 1090 bool 1091 spdk_nvmf_subsytem_any_listener_allowed(struct spdk_nvmf_subsystem *subsystem) 1092 { 1093 return subsystem->flags.allow_any_listener; 1094 } 1095 1096 1097 struct subsystem_update_ns_ctx { 1098 struct spdk_nvmf_subsystem *subsystem; 1099 1100 spdk_nvmf_subsystem_state_change_done cb_fn; 1101 void *cb_arg; 1102 }; 1103 1104 static void 1105 subsystem_update_ns_done(struct spdk_io_channel_iter *i, int status) 1106 { 1107 struct subsystem_update_ns_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1108 1109 if (ctx->cb_fn) { 1110 ctx->cb_fn(ctx->subsystem, ctx->cb_arg, status); 1111 } 1112 free(ctx); 1113 } 1114 1115 static void 1116 subsystem_update_ns_on_pg(struct spdk_io_channel_iter *i) 1117 { 1118 int rc; 1119 struct subsystem_update_ns_ctx *ctx; 1120 struct spdk_nvmf_poll_group *group; 1121 struct spdk_nvmf_subsystem *subsystem; 1122 1123 ctx = spdk_io_channel_iter_get_ctx(i); 1124 group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i)); 1125 subsystem = ctx->subsystem; 1126 1127 rc = nvmf_poll_group_update_subsystem(group, subsystem); 1128 spdk_for_each_channel_continue(i, rc); 1129 } 1130 1131 static int 1132 nvmf_subsystem_update_ns(struct spdk_nvmf_subsystem *subsystem, spdk_channel_for_each_cpl cpl, 1133 void *ctx) 1134 { 1135 spdk_for_each_channel(subsystem->tgt, 1136 subsystem_update_ns_on_pg, 1137 ctx, 1138 cpl); 1139 1140 return 0; 1141 } 1142 1143 static void 1144 nvmf_subsystem_ns_changed(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1145 { 1146 struct spdk_nvmf_ctrlr *ctrlr; 1147 1148 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1149 nvmf_ctrlr_ns_changed(ctrlr, nsid); 1150 } 1151 } 1152 1153 static uint32_t 1154 nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns); 1155 1156 int 1157 spdk_nvmf_subsystem_remove_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1158 { 1159 struct spdk_nvmf_transport *transport; 1160 struct spdk_nvmf_ns *ns; 1161 1162 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1163 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1164 assert(false); 1165 return -1; 1166 } 1167 1168 if (nsid == 0 || nsid > subsystem->max_nsid) { 1169 return -1; 1170 } 1171 1172 ns = subsystem->ns[nsid - 1]; 1173 if (!ns) { 1174 return -1; 1175 } 1176 1177 subsystem->ns[nsid - 1] = NULL; 1178 1179 free(ns->ptpl_file); 1180 nvmf_ns_reservation_clear_all_registrants(ns); 1181 spdk_bdev_module_release_bdev(ns->bdev); 1182 spdk_bdev_close(ns->desc); 1183 free(ns); 1184 1185 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 1186 transport = spdk_nvmf_transport_get_next(transport)) { 1187 if (transport->ops->subsystem_remove_ns) { 1188 transport->ops->subsystem_remove_ns(transport, subsystem, nsid); 1189 } 1190 } 1191 1192 nvmf_subsystem_ns_changed(subsystem, nsid); 1193 1194 return 0; 1195 } 1196 1197 struct subsystem_ns_change_ctx { 1198 struct spdk_nvmf_subsystem *subsystem; 1199 spdk_nvmf_subsystem_state_change_done cb_fn; 1200 uint32_t nsid; 1201 }; 1202 1203 static void 1204 _nvmf_ns_hot_remove(struct spdk_nvmf_subsystem *subsystem, 1205 void *cb_arg, int status) 1206 { 1207 struct subsystem_ns_change_ctx *ctx = cb_arg; 1208 int rc; 1209 1210 rc = spdk_nvmf_subsystem_remove_ns(subsystem, ctx->nsid); 1211 if (rc != 0) { 1212 SPDK_ERRLOG("Failed to make changes to NVME-oF subsystem with id: %u\n", subsystem->id); 1213 } 1214 1215 spdk_nvmf_subsystem_resume(subsystem, NULL, NULL); 1216 1217 free(ctx); 1218 } 1219 1220 static void 1221 nvmf_ns_change_msg(void *ns_ctx) 1222 { 1223 struct subsystem_ns_change_ctx *ctx = ns_ctx; 1224 int rc; 1225 1226 rc = spdk_nvmf_subsystem_pause(ctx->subsystem, ctx->nsid, ctx->cb_fn, ctx); 1227 if (rc) { 1228 if (rc == -EBUSY) { 1229 /* Try again, this is not a permanent situation. */ 1230 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ctx); 1231 } else { 1232 free(ctx); 1233 SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n"); 1234 } 1235 } 1236 } 1237 1238 static void 1239 nvmf_ns_hot_remove(void *remove_ctx) 1240 { 1241 struct spdk_nvmf_ns *ns = remove_ctx; 1242 struct subsystem_ns_change_ctx *ns_ctx; 1243 int rc; 1244 1245 /* We have to allocate a new context because this op 1246 * is asynchronous and we could lose the ns in the middle. 1247 */ 1248 ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx)); 1249 if (!ns_ctx) { 1250 SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n"); 1251 return; 1252 } 1253 1254 ns_ctx->subsystem = ns->subsystem; 1255 ns_ctx->nsid = ns->opts.nsid; 1256 ns_ctx->cb_fn = _nvmf_ns_hot_remove; 1257 1258 rc = spdk_nvmf_subsystem_pause(ns->subsystem, ns_ctx->nsid, _nvmf_ns_hot_remove, ns_ctx); 1259 if (rc) { 1260 if (rc == -EBUSY) { 1261 /* Try again, this is not a permanent situation. */ 1262 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx); 1263 } else { 1264 SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n"); 1265 free(ns_ctx); 1266 } 1267 } 1268 } 1269 1270 static void 1271 _nvmf_ns_resize(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status) 1272 { 1273 struct subsystem_ns_change_ctx *ctx = cb_arg; 1274 1275 nvmf_subsystem_ns_changed(subsystem, ctx->nsid); 1276 spdk_nvmf_subsystem_resume(subsystem, NULL, NULL); 1277 1278 free(ctx); 1279 } 1280 1281 static void 1282 nvmf_ns_resize(void *event_ctx) 1283 { 1284 struct spdk_nvmf_ns *ns = event_ctx; 1285 struct subsystem_ns_change_ctx *ns_ctx; 1286 int rc; 1287 1288 /* We have to allocate a new context because this op 1289 * is asynchronous and we could lose the ns in the middle. 1290 */ 1291 ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx)); 1292 if (!ns_ctx) { 1293 SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n"); 1294 return; 1295 } 1296 1297 ns_ctx->subsystem = ns->subsystem; 1298 ns_ctx->nsid = ns->opts.nsid; 1299 ns_ctx->cb_fn = _nvmf_ns_resize; 1300 1301 /* Specify 0 for the nsid here, because we do not need to pause the namespace. 1302 * Namespaces can only be resized bigger, so there is no need to quiesce I/O. 1303 */ 1304 rc = spdk_nvmf_subsystem_pause(ns->subsystem, 0, _nvmf_ns_resize, ns_ctx); 1305 if (rc) { 1306 if (rc == -EBUSY) { 1307 /* Try again, this is not a permanent situation. */ 1308 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx); 1309 } 1310 SPDK_ERRLOG("Unable to pause subsystem to process namespace resize!\n"); 1311 free(ns_ctx); 1312 } 1313 } 1314 1315 static void 1316 nvmf_ns_event(enum spdk_bdev_event_type type, 1317 struct spdk_bdev *bdev, 1318 void *event_ctx) 1319 { 1320 SPDK_DEBUGLOG(nvmf, "Bdev event: type %d, name %s, subsystem_id %d, ns_id %d\n", 1321 type, 1322 bdev->name, 1323 ((struct spdk_nvmf_ns *)event_ctx)->subsystem->id, 1324 ((struct spdk_nvmf_ns *)event_ctx)->nsid); 1325 1326 switch (type) { 1327 case SPDK_BDEV_EVENT_REMOVE: 1328 nvmf_ns_hot_remove(event_ctx); 1329 break; 1330 case SPDK_BDEV_EVENT_RESIZE: 1331 nvmf_ns_resize(event_ctx); 1332 break; 1333 default: 1334 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 1335 break; 1336 } 1337 } 1338 1339 void 1340 spdk_nvmf_ns_opts_get_defaults(struct spdk_nvmf_ns_opts *opts, size_t opts_size) 1341 { 1342 /* All current fields are set to 0 by default. */ 1343 memset(opts, 0, opts_size); 1344 } 1345 1346 /* Dummy bdev module used to to claim bdevs. */ 1347 static struct spdk_bdev_module ns_bdev_module = { 1348 .name = "NVMe-oF Target", 1349 }; 1350 1351 static int 1352 nvmf_ns_load_reservation(const char *file, struct spdk_nvmf_reservation_info *info); 1353 static int 1354 nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info); 1355 1356 uint32_t 1357 spdk_nvmf_subsystem_add_ns_ext(struct spdk_nvmf_subsystem *subsystem, const char *bdev_name, 1358 const struct spdk_nvmf_ns_opts *user_opts, size_t opts_size, 1359 const char *ptpl_file) 1360 { 1361 struct spdk_nvmf_transport *transport; 1362 struct spdk_nvmf_ns_opts opts; 1363 struct spdk_nvmf_ns *ns; 1364 struct spdk_nvmf_reservation_info info = {0}; 1365 int rc; 1366 1367 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1368 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1369 return 0; 1370 } 1371 1372 spdk_nvmf_ns_opts_get_defaults(&opts, sizeof(opts)); 1373 if (user_opts) { 1374 memcpy(&opts, user_opts, spdk_min(sizeof(opts), opts_size)); 1375 } 1376 1377 if (opts.nsid == SPDK_NVME_GLOBAL_NS_TAG) { 1378 SPDK_ERRLOG("Invalid NSID %" PRIu32 "\n", opts.nsid); 1379 return 0; 1380 } 1381 1382 if (opts.nsid == 0) { 1383 /* 1384 * NSID not specified - find a free index. 1385 * 1386 * If no free slots are found, opts.nsid will be subsystem->max_nsid + 1, which will 1387 * expand max_nsid if possible. 1388 */ 1389 for (opts.nsid = 1; opts.nsid <= subsystem->max_nsid; opts.nsid++) { 1390 if (_nvmf_subsystem_get_ns(subsystem, opts.nsid) == NULL) { 1391 break; 1392 } 1393 } 1394 } 1395 1396 if (_nvmf_subsystem_get_ns(subsystem, opts.nsid)) { 1397 SPDK_ERRLOG("Requested NSID %" PRIu32 " already in use\n", opts.nsid); 1398 return 0; 1399 } 1400 1401 if (opts.nsid > subsystem->max_nsid) { 1402 SPDK_ERRLOG("NSID greater than maximum not allowed\n"); 1403 return 0; 1404 } 1405 1406 ns = calloc(1, sizeof(*ns)); 1407 if (ns == NULL) { 1408 SPDK_ERRLOG("Namespace allocation failed\n"); 1409 return 0; 1410 } 1411 1412 rc = spdk_bdev_open_ext(bdev_name, true, nvmf_ns_event, ns, &ns->desc); 1413 if (rc != 0) { 1414 SPDK_ERRLOG("Subsystem %s: bdev %s cannot be opened, error=%d\n", 1415 subsystem->subnqn, bdev_name, rc); 1416 free(ns); 1417 return 0; 1418 } 1419 1420 ns->bdev = spdk_bdev_desc_get_bdev(ns->desc); 1421 1422 if (spdk_bdev_get_md_size(ns->bdev) != 0 && !spdk_bdev_is_md_interleaved(ns->bdev)) { 1423 SPDK_ERRLOG("Can't attach bdev with separate metadata.\n"); 1424 spdk_bdev_close(ns->desc); 1425 free(ns); 1426 return 0; 1427 } 1428 1429 rc = spdk_bdev_module_claim_bdev(ns->bdev, ns->desc, &ns_bdev_module); 1430 if (rc != 0) { 1431 spdk_bdev_close(ns->desc); 1432 free(ns); 1433 return 0; 1434 } 1435 1436 if (spdk_mem_all_zero(&opts.uuid, sizeof(opts.uuid))) { 1437 opts.uuid = *spdk_bdev_get_uuid(ns->bdev); 1438 } 1439 1440 ns->opts = opts; 1441 ns->subsystem = subsystem; 1442 subsystem->ns[opts.nsid - 1] = ns; 1443 ns->nsid = opts.nsid; 1444 TAILQ_INIT(&ns->registrants); 1445 1446 if (ptpl_file) { 1447 rc = nvmf_ns_load_reservation(ptpl_file, &info); 1448 if (!rc) { 1449 rc = nvmf_ns_reservation_restore(ns, &info); 1450 if (rc) { 1451 SPDK_ERRLOG("Subsystem restore reservation failed\n"); 1452 subsystem->ns[opts.nsid - 1] = NULL; 1453 spdk_bdev_module_release_bdev(ns->bdev); 1454 spdk_bdev_close(ns->desc); 1455 free(ns); 1456 return 0; 1457 } 1458 } 1459 ns->ptpl_file = strdup(ptpl_file); 1460 } 1461 1462 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 1463 transport = spdk_nvmf_transport_get_next(transport)) { 1464 if (transport->ops->subsystem_add_ns) { 1465 rc = transport->ops->subsystem_add_ns(transport, subsystem, ns); 1466 if (rc) { 1467 SPDK_ERRLOG("Namespace attachment is not allowed by %s transport\n", transport->ops->name); 1468 free(ns->ptpl_file); 1469 nvmf_ns_reservation_clear_all_registrants(ns); 1470 subsystem->ns[opts.nsid - 1] = NULL; 1471 spdk_bdev_module_release_bdev(ns->bdev); 1472 spdk_bdev_close(ns->desc); 1473 free(ns); 1474 return 0; 1475 } 1476 } 1477 } 1478 1479 SPDK_DEBUGLOG(nvmf, "Subsystem %s: bdev %s assigned nsid %" PRIu32 "\n", 1480 spdk_nvmf_subsystem_get_nqn(subsystem), 1481 bdev_name, 1482 opts.nsid); 1483 1484 nvmf_subsystem_ns_changed(subsystem, opts.nsid); 1485 1486 return opts.nsid; 1487 } 1488 1489 uint32_t 1490 spdk_nvmf_subsystem_add_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_bdev *bdev, 1491 const struct spdk_nvmf_ns_opts *user_opts, size_t opts_size, 1492 const char *ptpl_file) 1493 { 1494 return spdk_nvmf_subsystem_add_ns_ext(subsystem, spdk_bdev_get_name(bdev), 1495 user_opts, opts_size, ptpl_file); 1496 } 1497 1498 static uint32_t 1499 nvmf_subsystem_get_next_allocated_nsid(struct spdk_nvmf_subsystem *subsystem, 1500 uint32_t prev_nsid) 1501 { 1502 uint32_t nsid; 1503 1504 if (prev_nsid >= subsystem->max_nsid) { 1505 return 0; 1506 } 1507 1508 for (nsid = prev_nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 1509 if (subsystem->ns[nsid - 1]) { 1510 return nsid; 1511 } 1512 } 1513 1514 return 0; 1515 } 1516 1517 struct spdk_nvmf_ns * 1518 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 1519 { 1520 uint32_t first_nsid; 1521 1522 first_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, 0); 1523 return _nvmf_subsystem_get_ns(subsystem, first_nsid); 1524 } 1525 1526 struct spdk_nvmf_ns * 1527 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 1528 struct spdk_nvmf_ns *prev_ns) 1529 { 1530 uint32_t next_nsid; 1531 1532 next_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, prev_ns->opts.nsid); 1533 return _nvmf_subsystem_get_ns(subsystem, next_nsid); 1534 } 1535 1536 struct spdk_nvmf_ns * 1537 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1538 { 1539 return _nvmf_subsystem_get_ns(subsystem, nsid); 1540 } 1541 1542 uint32_t 1543 spdk_nvmf_ns_get_id(const struct spdk_nvmf_ns *ns) 1544 { 1545 return ns->opts.nsid; 1546 } 1547 1548 struct spdk_bdev * 1549 spdk_nvmf_ns_get_bdev(struct spdk_nvmf_ns *ns) 1550 { 1551 return ns->bdev; 1552 } 1553 1554 void 1555 spdk_nvmf_ns_get_opts(const struct spdk_nvmf_ns *ns, struct spdk_nvmf_ns_opts *opts, 1556 size_t opts_size) 1557 { 1558 memset(opts, 0, opts_size); 1559 memcpy(opts, &ns->opts, spdk_min(sizeof(ns->opts), opts_size)); 1560 } 1561 1562 const char * 1563 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 1564 { 1565 return subsystem->sn; 1566 } 1567 1568 int 1569 spdk_nvmf_subsystem_set_sn(struct spdk_nvmf_subsystem *subsystem, const char *sn) 1570 { 1571 size_t len, max_len; 1572 1573 max_len = sizeof(subsystem->sn) - 1; 1574 len = strlen(sn); 1575 if (len > max_len) { 1576 SPDK_DEBUGLOG(nvmf, "Invalid sn \"%s\": length %zu > max %zu\n", 1577 sn, len, max_len); 1578 return -1; 1579 } 1580 1581 if (!nvmf_valid_ascii_string(sn, len)) { 1582 SPDK_DEBUGLOG(nvmf, "Non-ASCII sn\n"); 1583 SPDK_LOGDUMP(nvmf, "sn", sn, len); 1584 return -1; 1585 } 1586 1587 snprintf(subsystem->sn, sizeof(subsystem->sn), "%s", sn); 1588 1589 return 0; 1590 } 1591 1592 const char * 1593 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 1594 { 1595 return subsystem->mn; 1596 } 1597 1598 int 1599 spdk_nvmf_subsystem_set_mn(struct spdk_nvmf_subsystem *subsystem, const char *mn) 1600 { 1601 size_t len, max_len; 1602 1603 if (mn == NULL) { 1604 mn = MODEL_NUMBER_DEFAULT; 1605 } 1606 max_len = sizeof(subsystem->mn) - 1; 1607 len = strlen(mn); 1608 if (len > max_len) { 1609 SPDK_DEBUGLOG(nvmf, "Invalid mn \"%s\": length %zu > max %zu\n", 1610 mn, len, max_len); 1611 return -1; 1612 } 1613 1614 if (!nvmf_valid_ascii_string(mn, len)) { 1615 SPDK_DEBUGLOG(nvmf, "Non-ASCII mn\n"); 1616 SPDK_LOGDUMP(nvmf, "mn", mn, len); 1617 return -1; 1618 } 1619 1620 snprintf(subsystem->mn, sizeof(subsystem->mn), "%s", mn); 1621 1622 return 0; 1623 } 1624 1625 const char * 1626 spdk_nvmf_subsystem_get_nqn(const struct spdk_nvmf_subsystem *subsystem) 1627 { 1628 return subsystem->subnqn; 1629 } 1630 1631 enum spdk_nvmf_subtype spdk_nvmf_subsystem_get_type(struct spdk_nvmf_subsystem *subsystem) 1632 { 1633 return subsystem->subtype; 1634 } 1635 1636 uint32_t 1637 spdk_nvmf_subsystem_get_max_nsid(struct spdk_nvmf_subsystem *subsystem) 1638 { 1639 return subsystem->max_nsid; 1640 } 1641 1642 static uint16_t 1643 nvmf_subsystem_gen_cntlid(struct spdk_nvmf_subsystem *subsystem) 1644 { 1645 int count; 1646 1647 /* 1648 * In the worst case, we might have to try all CNTLID values between 1 and 0xFFF0 - 1 1649 * before we find one that is unused (or find that all values are in use). 1650 */ 1651 for (count = 0; count < 0xFFF0 - 1; count++) { 1652 subsystem->next_cntlid++; 1653 if (subsystem->next_cntlid >= 0xFFF0) { 1654 /* The spec reserves cntlid values in the range FFF0h to FFFFh. */ 1655 subsystem->next_cntlid = 1; 1656 } 1657 1658 /* Check if a controller with this cntlid currently exists. */ 1659 if (nvmf_subsystem_get_ctrlr(subsystem, subsystem->next_cntlid) == NULL) { 1660 /* Found unused cntlid */ 1661 return subsystem->next_cntlid; 1662 } 1663 } 1664 1665 /* All valid cntlid values are in use. */ 1666 return 0xFFFF; 1667 } 1668 1669 int 1670 nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr) 1671 { 1672 ctrlr->cntlid = nvmf_subsystem_gen_cntlid(subsystem); 1673 if (ctrlr->cntlid == 0xFFFF) { 1674 /* Unable to get a cntlid */ 1675 SPDK_ERRLOG("Reached max simultaneous ctrlrs\n"); 1676 return -EBUSY; 1677 } 1678 1679 TAILQ_INSERT_TAIL(&subsystem->ctrlrs, ctrlr, link); 1680 1681 return 0; 1682 } 1683 1684 void 1685 nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem, 1686 struct spdk_nvmf_ctrlr *ctrlr) 1687 { 1688 assert(subsystem == ctrlr->subsys); 1689 TAILQ_REMOVE(&subsystem->ctrlrs, ctrlr, link); 1690 } 1691 1692 struct spdk_nvmf_ctrlr * 1693 nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid) 1694 { 1695 struct spdk_nvmf_ctrlr *ctrlr; 1696 1697 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1698 if (ctrlr->cntlid == cntlid) { 1699 return ctrlr; 1700 } 1701 } 1702 1703 return NULL; 1704 } 1705 1706 uint32_t 1707 spdk_nvmf_subsystem_get_max_namespaces(const struct spdk_nvmf_subsystem *subsystem) 1708 { 1709 return subsystem->max_nsid; 1710 } 1711 1712 struct _nvmf_ns_registrant { 1713 uint64_t rkey; 1714 char *host_uuid; 1715 }; 1716 1717 struct _nvmf_ns_registrants { 1718 size_t num_regs; 1719 struct _nvmf_ns_registrant reg[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 1720 }; 1721 1722 struct _nvmf_ns_reservation { 1723 bool ptpl_activated; 1724 enum spdk_nvme_reservation_type rtype; 1725 uint64_t crkey; 1726 char *bdev_uuid; 1727 char *holder_uuid; 1728 struct _nvmf_ns_registrants regs; 1729 }; 1730 1731 static const struct spdk_json_object_decoder nvmf_ns_pr_reg_decoders[] = { 1732 {"rkey", offsetof(struct _nvmf_ns_registrant, rkey), spdk_json_decode_uint64}, 1733 {"host_uuid", offsetof(struct _nvmf_ns_registrant, host_uuid), spdk_json_decode_string}, 1734 }; 1735 1736 static int 1737 nvmf_decode_ns_pr_reg(const struct spdk_json_val *val, void *out) 1738 { 1739 struct _nvmf_ns_registrant *reg = out; 1740 1741 return spdk_json_decode_object(val, nvmf_ns_pr_reg_decoders, 1742 SPDK_COUNTOF(nvmf_ns_pr_reg_decoders), reg); 1743 } 1744 1745 static int 1746 nvmf_decode_ns_pr_regs(const struct spdk_json_val *val, void *out) 1747 { 1748 struct _nvmf_ns_registrants *regs = out; 1749 1750 return spdk_json_decode_array(val, nvmf_decode_ns_pr_reg, regs->reg, 1751 SPDK_NVMF_MAX_NUM_REGISTRANTS, ®s->num_regs, 1752 sizeof(struct _nvmf_ns_registrant)); 1753 } 1754 1755 static const struct spdk_json_object_decoder nvmf_ns_pr_decoders[] = { 1756 {"ptpl", offsetof(struct _nvmf_ns_reservation, ptpl_activated), spdk_json_decode_bool, true}, 1757 {"rtype", offsetof(struct _nvmf_ns_reservation, rtype), spdk_json_decode_uint32, true}, 1758 {"crkey", offsetof(struct _nvmf_ns_reservation, crkey), spdk_json_decode_uint64, true}, 1759 {"bdev_uuid", offsetof(struct _nvmf_ns_reservation, bdev_uuid), spdk_json_decode_string}, 1760 {"holder_uuid", offsetof(struct _nvmf_ns_reservation, holder_uuid), spdk_json_decode_string, true}, 1761 {"registrants", offsetof(struct _nvmf_ns_reservation, regs), nvmf_decode_ns_pr_regs}, 1762 }; 1763 1764 static int 1765 nvmf_ns_load_reservation(const char *file, struct spdk_nvmf_reservation_info *info) 1766 { 1767 FILE *fd; 1768 size_t json_size; 1769 ssize_t values_cnt, rc; 1770 void *json = NULL, *end; 1771 struct spdk_json_val *values = NULL; 1772 struct _nvmf_ns_reservation res = {}; 1773 uint32_t i; 1774 1775 fd = fopen(file, "r"); 1776 /* It's not an error if the file does not exist */ 1777 if (!fd) { 1778 SPDK_NOTICELOG("File %s does not exist\n", file); 1779 return -ENOENT; 1780 } 1781 1782 /* Load all persist file contents into a local buffer */ 1783 json = spdk_posix_file_load(fd, &json_size); 1784 fclose(fd); 1785 if (!json) { 1786 SPDK_ERRLOG("Load persit file %s failed\n", file); 1787 return -ENOMEM; 1788 } 1789 1790 rc = spdk_json_parse(json, json_size, NULL, 0, &end, 0); 1791 if (rc < 0) { 1792 SPDK_NOTICELOG("Parsing JSON configuration failed (%zd)\n", rc); 1793 goto exit; 1794 } 1795 1796 values_cnt = rc; 1797 values = calloc(values_cnt, sizeof(struct spdk_json_val)); 1798 if (values == NULL) { 1799 goto exit; 1800 } 1801 1802 rc = spdk_json_parse(json, json_size, values, values_cnt, &end, 0); 1803 if (rc != values_cnt) { 1804 SPDK_ERRLOG("Parsing JSON configuration failed (%zd)\n", rc); 1805 goto exit; 1806 } 1807 1808 /* Decode json */ 1809 if (spdk_json_decode_object(values, nvmf_ns_pr_decoders, 1810 SPDK_COUNTOF(nvmf_ns_pr_decoders), 1811 &res)) { 1812 SPDK_ERRLOG("Invalid objects in the persist file %s\n", file); 1813 rc = -EINVAL; 1814 goto exit; 1815 } 1816 1817 if (res.regs.num_regs > SPDK_NVMF_MAX_NUM_REGISTRANTS) { 1818 SPDK_ERRLOG("Can only support up to %u registrants\n", SPDK_NVMF_MAX_NUM_REGISTRANTS); 1819 rc = -ERANGE; 1820 goto exit; 1821 } 1822 1823 rc = 0; 1824 info->ptpl_activated = res.ptpl_activated; 1825 info->rtype = res.rtype; 1826 info->crkey = res.crkey; 1827 snprintf(info->bdev_uuid, sizeof(info->bdev_uuid), "%s", res.bdev_uuid); 1828 snprintf(info->holder_uuid, sizeof(info->holder_uuid), "%s", res.holder_uuid); 1829 info->num_regs = res.regs.num_regs; 1830 for (i = 0; i < res.regs.num_regs; i++) { 1831 info->registrants[i].rkey = res.regs.reg[i].rkey; 1832 snprintf(info->registrants[i].host_uuid, sizeof(info->registrants[i].host_uuid), "%s", 1833 res.regs.reg[i].host_uuid); 1834 } 1835 1836 exit: 1837 free(json); 1838 free(values); 1839 free(res.bdev_uuid); 1840 free(res.holder_uuid); 1841 for (i = 0; i < res.regs.num_regs; i++) { 1842 free(res.regs.reg[i].host_uuid); 1843 } 1844 1845 return rc; 1846 } 1847 1848 static bool 1849 nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns); 1850 1851 static int 1852 nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info) 1853 { 1854 uint32_t i; 1855 struct spdk_nvmf_registrant *reg, *holder = NULL; 1856 struct spdk_uuid bdev_uuid, holder_uuid; 1857 1858 SPDK_DEBUGLOG(nvmf, "NSID %u, PTPL %u, Number of registrants %u\n", 1859 ns->nsid, info->ptpl_activated, info->num_regs); 1860 1861 /* it's not an error */ 1862 if (!info->ptpl_activated || !info->num_regs) { 1863 return 0; 1864 } 1865 1866 spdk_uuid_parse(&bdev_uuid, info->bdev_uuid); 1867 if (spdk_uuid_compare(&bdev_uuid, spdk_bdev_get_uuid(ns->bdev))) { 1868 SPDK_ERRLOG("Existing bdev UUID is not same with configuration file\n"); 1869 return -EINVAL; 1870 } 1871 1872 ns->crkey = info->crkey; 1873 ns->rtype = info->rtype; 1874 ns->ptpl_activated = info->ptpl_activated; 1875 spdk_uuid_parse(&holder_uuid, info->holder_uuid); 1876 1877 SPDK_DEBUGLOG(nvmf, "Bdev UUID %s\n", info->bdev_uuid); 1878 if (info->rtype) { 1879 SPDK_DEBUGLOG(nvmf, "Holder UUID %s, RTYPE %u, RKEY 0x%"PRIx64"\n", 1880 info->holder_uuid, info->rtype, info->crkey); 1881 } 1882 1883 for (i = 0; i < info->num_regs; i++) { 1884 reg = calloc(1, sizeof(*reg)); 1885 if (!reg) { 1886 return -ENOMEM; 1887 } 1888 spdk_uuid_parse(®->hostid, info->registrants[i].host_uuid); 1889 reg->rkey = info->registrants[i].rkey; 1890 TAILQ_INSERT_TAIL(&ns->registrants, reg, link); 1891 if (!spdk_uuid_compare(&holder_uuid, ®->hostid)) { 1892 holder = reg; 1893 } 1894 SPDK_DEBUGLOG(nvmf, "Registrant RKEY 0x%"PRIx64", Host UUID %s\n", 1895 info->registrants[i].rkey, info->registrants[i].host_uuid); 1896 } 1897 1898 if (nvmf_ns_reservation_all_registrants_type(ns)) { 1899 ns->holder = TAILQ_FIRST(&ns->registrants); 1900 } else { 1901 ns->holder = holder; 1902 } 1903 1904 return 0; 1905 } 1906 1907 static int 1908 nvmf_ns_json_write_cb(void *cb_ctx, const void *data, size_t size) 1909 { 1910 char *file = cb_ctx; 1911 size_t rc; 1912 FILE *fd; 1913 1914 fd = fopen(file, "w"); 1915 if (!fd) { 1916 SPDK_ERRLOG("Can't open file %s for write\n", file); 1917 return -ENOENT; 1918 } 1919 rc = fwrite(data, 1, size, fd); 1920 fclose(fd); 1921 1922 return rc == size ? 0 : -1; 1923 } 1924 1925 static int 1926 nvmf_ns_reservation_update(const char *file, struct spdk_nvmf_reservation_info *info) 1927 { 1928 struct spdk_json_write_ctx *w; 1929 uint32_t i; 1930 int rc = 0; 1931 1932 w = spdk_json_write_begin(nvmf_ns_json_write_cb, (void *)file, 0); 1933 if (w == NULL) { 1934 return -ENOMEM; 1935 } 1936 /* clear the configuration file */ 1937 if (!info->ptpl_activated) { 1938 goto exit; 1939 } 1940 1941 spdk_json_write_object_begin(w); 1942 spdk_json_write_named_bool(w, "ptpl", info->ptpl_activated); 1943 spdk_json_write_named_uint32(w, "rtype", info->rtype); 1944 spdk_json_write_named_uint64(w, "crkey", info->crkey); 1945 spdk_json_write_named_string(w, "bdev_uuid", info->bdev_uuid); 1946 spdk_json_write_named_string(w, "holder_uuid", info->holder_uuid); 1947 1948 spdk_json_write_named_array_begin(w, "registrants"); 1949 for (i = 0; i < info->num_regs; i++) { 1950 spdk_json_write_object_begin(w); 1951 spdk_json_write_named_uint64(w, "rkey", info->registrants[i].rkey); 1952 spdk_json_write_named_string(w, "host_uuid", info->registrants[i].host_uuid); 1953 spdk_json_write_object_end(w); 1954 } 1955 spdk_json_write_array_end(w); 1956 spdk_json_write_object_end(w); 1957 1958 exit: 1959 rc = spdk_json_write_end(w); 1960 return rc; 1961 } 1962 1963 static int 1964 nvmf_ns_update_reservation_info(struct spdk_nvmf_ns *ns) 1965 { 1966 struct spdk_nvmf_reservation_info info; 1967 struct spdk_nvmf_registrant *reg, *tmp; 1968 uint32_t i = 0; 1969 1970 assert(ns != NULL); 1971 1972 if (!ns->bdev || !ns->ptpl_file) { 1973 return 0; 1974 } 1975 1976 memset(&info, 0, sizeof(info)); 1977 spdk_uuid_fmt_lower(info.bdev_uuid, sizeof(info.bdev_uuid), spdk_bdev_get_uuid(ns->bdev)); 1978 1979 if (ns->rtype) { 1980 info.rtype = ns->rtype; 1981 info.crkey = ns->crkey; 1982 if (!nvmf_ns_reservation_all_registrants_type(ns)) { 1983 assert(ns->holder != NULL); 1984 spdk_uuid_fmt_lower(info.holder_uuid, sizeof(info.holder_uuid), &ns->holder->hostid); 1985 } 1986 } 1987 1988 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 1989 spdk_uuid_fmt_lower(info.registrants[i].host_uuid, sizeof(info.registrants[i].host_uuid), 1990 ®->hostid); 1991 info.registrants[i++].rkey = reg->rkey; 1992 } 1993 1994 info.num_regs = i; 1995 info.ptpl_activated = ns->ptpl_activated; 1996 1997 return nvmf_ns_reservation_update(ns->ptpl_file, &info); 1998 } 1999 2000 static struct spdk_nvmf_registrant * 2001 nvmf_ns_reservation_get_registrant(struct spdk_nvmf_ns *ns, 2002 struct spdk_uuid *uuid) 2003 { 2004 struct spdk_nvmf_registrant *reg, *tmp; 2005 2006 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2007 if (!spdk_uuid_compare(®->hostid, uuid)) { 2008 return reg; 2009 } 2010 } 2011 2012 return NULL; 2013 } 2014 2015 /* Generate reservation notice log to registered HostID controllers */ 2016 static void 2017 nvmf_subsystem_gen_ctrlr_notification(struct spdk_nvmf_subsystem *subsystem, 2018 struct spdk_nvmf_ns *ns, 2019 struct spdk_uuid *hostid_list, 2020 uint32_t num_hostid, 2021 enum spdk_nvme_reservation_notification_log_page_type type) 2022 { 2023 struct spdk_nvmf_ctrlr *ctrlr; 2024 uint32_t i; 2025 2026 for (i = 0; i < num_hostid; i++) { 2027 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 2028 if (!spdk_uuid_compare(&ctrlr->hostid, &hostid_list[i])) { 2029 nvmf_ctrlr_reservation_notice_log(ctrlr, ns, type); 2030 } 2031 } 2032 } 2033 } 2034 2035 /* Get all registrants' hostid other than the controller who issued the command */ 2036 static uint32_t 2037 nvmf_ns_reservation_get_all_other_hostid(struct spdk_nvmf_ns *ns, 2038 struct spdk_uuid *hostid_list, 2039 uint32_t max_num_hostid, 2040 struct spdk_uuid *current_hostid) 2041 { 2042 struct spdk_nvmf_registrant *reg, *tmp; 2043 uint32_t num_hostid = 0; 2044 2045 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2046 if (spdk_uuid_compare(®->hostid, current_hostid)) { 2047 if (num_hostid == max_num_hostid) { 2048 assert(false); 2049 return max_num_hostid; 2050 } 2051 hostid_list[num_hostid++] = reg->hostid; 2052 } 2053 } 2054 2055 return num_hostid; 2056 } 2057 2058 /* Calculate the unregistered HostID list according to list 2059 * prior to execute preempt command and list after executing 2060 * preempt command. 2061 */ 2062 static uint32_t 2063 nvmf_ns_reservation_get_unregistered_hostid(struct spdk_uuid *old_hostid_list, 2064 uint32_t old_num_hostid, 2065 struct spdk_uuid *remaining_hostid_list, 2066 uint32_t remaining_num_hostid) 2067 { 2068 struct spdk_uuid temp_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2069 uint32_t i, j, num_hostid = 0; 2070 bool found; 2071 2072 if (!remaining_num_hostid) { 2073 return old_num_hostid; 2074 } 2075 2076 for (i = 0; i < old_num_hostid; i++) { 2077 found = false; 2078 for (j = 0; j < remaining_num_hostid; j++) { 2079 if (!spdk_uuid_compare(&old_hostid_list[i], &remaining_hostid_list[j])) { 2080 found = true; 2081 break; 2082 } 2083 } 2084 if (!found) { 2085 spdk_uuid_copy(&temp_hostid_list[num_hostid++], &old_hostid_list[i]); 2086 } 2087 } 2088 2089 if (num_hostid) { 2090 memcpy(old_hostid_list, temp_hostid_list, sizeof(struct spdk_uuid) * num_hostid); 2091 } 2092 2093 return num_hostid; 2094 } 2095 2096 /* current reservation type is all registrants or not */ 2097 static bool 2098 nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns) 2099 { 2100 return (ns->rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS || 2101 ns->rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 2102 } 2103 2104 /* current registrant is reservation holder or not */ 2105 static bool 2106 nvmf_ns_reservation_registrant_is_holder(struct spdk_nvmf_ns *ns, 2107 struct spdk_nvmf_registrant *reg) 2108 { 2109 if (!reg) { 2110 return false; 2111 } 2112 2113 if (nvmf_ns_reservation_all_registrants_type(ns)) { 2114 return true; 2115 } 2116 2117 return (ns->holder == reg); 2118 } 2119 2120 static int 2121 nvmf_ns_reservation_add_registrant(struct spdk_nvmf_ns *ns, 2122 struct spdk_nvmf_ctrlr *ctrlr, 2123 uint64_t nrkey) 2124 { 2125 struct spdk_nvmf_registrant *reg; 2126 2127 reg = calloc(1, sizeof(*reg)); 2128 if (!reg) { 2129 return -ENOMEM; 2130 } 2131 2132 reg->rkey = nrkey; 2133 /* set hostid for the registrant */ 2134 spdk_uuid_copy(®->hostid, &ctrlr->hostid); 2135 TAILQ_INSERT_TAIL(&ns->registrants, reg, link); 2136 ns->gen++; 2137 2138 return 0; 2139 } 2140 2141 static void 2142 nvmf_ns_reservation_release_reservation(struct spdk_nvmf_ns *ns) 2143 { 2144 ns->rtype = 0; 2145 ns->crkey = 0; 2146 ns->holder = NULL; 2147 } 2148 2149 /* release the reservation if the last registrant was removed */ 2150 static void 2151 nvmf_ns_reservation_check_release_on_remove_registrant(struct spdk_nvmf_ns *ns, 2152 struct spdk_nvmf_registrant *reg) 2153 { 2154 struct spdk_nvmf_registrant *next_reg; 2155 2156 /* no reservation holder */ 2157 if (!ns->holder) { 2158 assert(ns->rtype == 0); 2159 return; 2160 } 2161 2162 next_reg = TAILQ_FIRST(&ns->registrants); 2163 if (next_reg && nvmf_ns_reservation_all_registrants_type(ns)) { 2164 /* the next valid registrant is the new holder now */ 2165 ns->holder = next_reg; 2166 } else if (nvmf_ns_reservation_registrant_is_holder(ns, reg)) { 2167 /* release the reservation */ 2168 nvmf_ns_reservation_release_reservation(ns); 2169 } 2170 } 2171 2172 static void 2173 nvmf_ns_reservation_remove_registrant(struct spdk_nvmf_ns *ns, 2174 struct spdk_nvmf_registrant *reg) 2175 { 2176 TAILQ_REMOVE(&ns->registrants, reg, link); 2177 nvmf_ns_reservation_check_release_on_remove_registrant(ns, reg); 2178 free(reg); 2179 ns->gen++; 2180 return; 2181 } 2182 2183 static uint32_t 2184 nvmf_ns_reservation_remove_registrants_by_key(struct spdk_nvmf_ns *ns, 2185 uint64_t rkey) 2186 { 2187 struct spdk_nvmf_registrant *reg, *tmp; 2188 uint32_t count = 0; 2189 2190 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2191 if (reg->rkey == rkey) { 2192 nvmf_ns_reservation_remove_registrant(ns, reg); 2193 count++; 2194 } 2195 } 2196 return count; 2197 } 2198 2199 static uint32_t 2200 nvmf_ns_reservation_remove_all_other_registrants(struct spdk_nvmf_ns *ns, 2201 struct spdk_nvmf_registrant *reg) 2202 { 2203 struct spdk_nvmf_registrant *reg_tmp, *reg_tmp2; 2204 uint32_t count = 0; 2205 2206 TAILQ_FOREACH_SAFE(reg_tmp, &ns->registrants, link, reg_tmp2) { 2207 if (reg_tmp != reg) { 2208 nvmf_ns_reservation_remove_registrant(ns, reg_tmp); 2209 count++; 2210 } 2211 } 2212 return count; 2213 } 2214 2215 static uint32_t 2216 nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns) 2217 { 2218 struct spdk_nvmf_registrant *reg, *reg_tmp; 2219 uint32_t count = 0; 2220 2221 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, reg_tmp) { 2222 nvmf_ns_reservation_remove_registrant(ns, reg); 2223 count++; 2224 } 2225 return count; 2226 } 2227 2228 static void 2229 nvmf_ns_reservation_acquire_reservation(struct spdk_nvmf_ns *ns, uint64_t rkey, 2230 enum spdk_nvme_reservation_type rtype, 2231 struct spdk_nvmf_registrant *holder) 2232 { 2233 ns->rtype = rtype; 2234 ns->crkey = rkey; 2235 assert(ns->holder == NULL); 2236 ns->holder = holder; 2237 } 2238 2239 static bool 2240 nvmf_ns_reservation_register(struct spdk_nvmf_ns *ns, 2241 struct spdk_nvmf_ctrlr *ctrlr, 2242 struct spdk_nvmf_request *req) 2243 { 2244 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2245 uint8_t rrega, iekey, cptpl, rtype; 2246 struct spdk_nvme_reservation_register_data key; 2247 struct spdk_nvmf_registrant *reg; 2248 uint8_t status = SPDK_NVME_SC_SUCCESS; 2249 bool update_sgroup = false; 2250 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2251 uint32_t num_hostid = 0; 2252 int rc; 2253 2254 rrega = cmd->cdw10_bits.resv_register.rrega; 2255 iekey = cmd->cdw10_bits.resv_register.iekey; 2256 cptpl = cmd->cdw10_bits.resv_register.cptpl; 2257 2258 if (req->data && req->length >= sizeof(key)) { 2259 memcpy(&key, req->data, sizeof(key)); 2260 } else { 2261 SPDK_ERRLOG("No key provided. Failing request.\n"); 2262 status = SPDK_NVME_SC_INVALID_FIELD; 2263 goto exit; 2264 } 2265 2266 SPDK_DEBUGLOG(nvmf, "REGISTER: RREGA %u, IEKEY %u, CPTPL %u, " 2267 "NRKEY 0x%"PRIx64", NRKEY 0x%"PRIx64"\n", 2268 rrega, iekey, cptpl, key.crkey, key.nrkey); 2269 2270 if (cptpl == SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON) { 2271 /* Ture to OFF state, and need to be updated in the configuration file */ 2272 if (ns->ptpl_activated) { 2273 ns->ptpl_activated = 0; 2274 update_sgroup = true; 2275 } 2276 } else if (cptpl == SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS) { 2277 if (ns->ptpl_file == NULL) { 2278 status = SPDK_NVME_SC_INVALID_FIELD; 2279 goto exit; 2280 } else if (ns->ptpl_activated == 0) { 2281 ns->ptpl_activated = 1; 2282 update_sgroup = true; 2283 } 2284 } 2285 2286 /* current Host Identifier has registrant or not */ 2287 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 2288 2289 switch (rrega) { 2290 case SPDK_NVME_RESERVE_REGISTER_KEY: 2291 if (!reg) { 2292 /* register new controller */ 2293 if (key.nrkey == 0) { 2294 SPDK_ERRLOG("Can't register zeroed new key\n"); 2295 status = SPDK_NVME_SC_INVALID_FIELD; 2296 goto exit; 2297 } 2298 rc = nvmf_ns_reservation_add_registrant(ns, ctrlr, key.nrkey); 2299 if (rc < 0) { 2300 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2301 goto exit; 2302 } 2303 update_sgroup = true; 2304 } else { 2305 /* register with same key is not an error */ 2306 if (reg->rkey != key.nrkey) { 2307 SPDK_ERRLOG("The same host already register a " 2308 "key with 0x%"PRIx64"\n", 2309 reg->rkey); 2310 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2311 goto exit; 2312 } 2313 } 2314 break; 2315 case SPDK_NVME_RESERVE_UNREGISTER_KEY: 2316 if (!reg || (!iekey && reg->rkey != key.crkey)) { 2317 SPDK_ERRLOG("No registrant or current key doesn't match " 2318 "with existing registrant key\n"); 2319 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2320 goto exit; 2321 } 2322 2323 rtype = ns->rtype; 2324 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 2325 SPDK_NVMF_MAX_NUM_REGISTRANTS, 2326 &ctrlr->hostid); 2327 2328 nvmf_ns_reservation_remove_registrant(ns, reg); 2329 2330 if (!ns->rtype && num_hostid && (rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY || 2331 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY)) { 2332 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2333 hostid_list, 2334 num_hostid, 2335 SPDK_NVME_RESERVATION_RELEASED); 2336 } 2337 update_sgroup = true; 2338 break; 2339 case SPDK_NVME_RESERVE_REPLACE_KEY: 2340 if (!reg || (!iekey && reg->rkey != key.crkey)) { 2341 SPDK_ERRLOG("No registrant or current key doesn't match " 2342 "with existing registrant key\n"); 2343 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2344 goto exit; 2345 } 2346 if (key.nrkey == 0) { 2347 SPDK_ERRLOG("Can't register zeroed new key\n"); 2348 status = SPDK_NVME_SC_INVALID_FIELD; 2349 goto exit; 2350 } 2351 reg->rkey = key.nrkey; 2352 update_sgroup = true; 2353 break; 2354 default: 2355 status = SPDK_NVME_SC_INVALID_FIELD; 2356 goto exit; 2357 } 2358 2359 exit: 2360 if (update_sgroup) { 2361 rc = nvmf_ns_update_reservation_info(ns); 2362 if (rc != 0) { 2363 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2364 } 2365 } 2366 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2367 req->rsp->nvme_cpl.status.sc = status; 2368 return update_sgroup; 2369 } 2370 2371 static bool 2372 nvmf_ns_reservation_acquire(struct spdk_nvmf_ns *ns, 2373 struct spdk_nvmf_ctrlr *ctrlr, 2374 struct spdk_nvmf_request *req) 2375 { 2376 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2377 uint8_t racqa, iekey, rtype; 2378 struct spdk_nvme_reservation_acquire_data key; 2379 struct spdk_nvmf_registrant *reg; 2380 bool all_regs = false; 2381 uint32_t count = 0; 2382 bool update_sgroup = true; 2383 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2384 uint32_t num_hostid = 0; 2385 struct spdk_uuid new_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2386 uint32_t new_num_hostid = 0; 2387 bool reservation_released = false; 2388 uint8_t status = SPDK_NVME_SC_SUCCESS; 2389 2390 racqa = cmd->cdw10_bits.resv_acquire.racqa; 2391 iekey = cmd->cdw10_bits.resv_acquire.iekey; 2392 rtype = cmd->cdw10_bits.resv_acquire.rtype; 2393 2394 if (req->data && req->length >= sizeof(key)) { 2395 memcpy(&key, req->data, sizeof(key)); 2396 } else { 2397 SPDK_ERRLOG("No key provided. Failing request.\n"); 2398 status = SPDK_NVME_SC_INVALID_FIELD; 2399 goto exit; 2400 } 2401 2402 SPDK_DEBUGLOG(nvmf, "ACQUIRE: RACQA %u, IEKEY %u, RTYPE %u, " 2403 "NRKEY 0x%"PRIx64", PRKEY 0x%"PRIx64"\n", 2404 racqa, iekey, rtype, key.crkey, key.prkey); 2405 2406 if (iekey || rtype > SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) { 2407 SPDK_ERRLOG("Ignore existing key field set to 1\n"); 2408 status = SPDK_NVME_SC_INVALID_FIELD; 2409 update_sgroup = false; 2410 goto exit; 2411 } 2412 2413 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 2414 /* must be registrant and CRKEY must match */ 2415 if (!reg || reg->rkey != key.crkey) { 2416 SPDK_ERRLOG("No registrant or current key doesn't match " 2417 "with existing registrant key\n"); 2418 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2419 update_sgroup = false; 2420 goto exit; 2421 } 2422 2423 all_regs = nvmf_ns_reservation_all_registrants_type(ns); 2424 2425 switch (racqa) { 2426 case SPDK_NVME_RESERVE_ACQUIRE: 2427 /* it's not an error for the holder to acquire same reservation type again */ 2428 if (nvmf_ns_reservation_registrant_is_holder(ns, reg) && ns->rtype == rtype) { 2429 /* do nothing */ 2430 update_sgroup = false; 2431 } else if (ns->holder == NULL) { 2432 /* fisrt time to acquire the reservation */ 2433 nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg); 2434 } else { 2435 SPDK_ERRLOG("Invalid rtype or current registrant is not holder\n"); 2436 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2437 update_sgroup = false; 2438 goto exit; 2439 } 2440 break; 2441 case SPDK_NVME_RESERVE_PREEMPT: 2442 /* no reservation holder */ 2443 if (!ns->holder) { 2444 /* unregister with PRKEY */ 2445 nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 2446 break; 2447 } 2448 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 2449 SPDK_NVMF_MAX_NUM_REGISTRANTS, 2450 &ctrlr->hostid); 2451 2452 /* only 1 reservation holder and reservation key is valid */ 2453 if (!all_regs) { 2454 /* preempt itself */ 2455 if (nvmf_ns_reservation_registrant_is_holder(ns, reg) && 2456 ns->crkey == key.prkey) { 2457 ns->rtype = rtype; 2458 reservation_released = true; 2459 break; 2460 } 2461 2462 if (ns->crkey == key.prkey) { 2463 nvmf_ns_reservation_remove_registrant(ns, ns->holder); 2464 nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg); 2465 reservation_released = true; 2466 } else if (key.prkey != 0) { 2467 nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 2468 } else { 2469 /* PRKEY is zero */ 2470 SPDK_ERRLOG("Current PRKEY is zero\n"); 2471 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2472 update_sgroup = false; 2473 goto exit; 2474 } 2475 } else { 2476 /* release all other registrants except for the current one */ 2477 if (key.prkey == 0) { 2478 nvmf_ns_reservation_remove_all_other_registrants(ns, reg); 2479 assert(ns->holder == reg); 2480 } else { 2481 count = nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 2482 if (count == 0) { 2483 SPDK_ERRLOG("PRKEY doesn't match any registrant\n"); 2484 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2485 update_sgroup = false; 2486 goto exit; 2487 } 2488 } 2489 } 2490 break; 2491 default: 2492 status = SPDK_NVME_SC_INVALID_FIELD; 2493 update_sgroup = false; 2494 break; 2495 } 2496 2497 exit: 2498 if (update_sgroup && racqa == SPDK_NVME_RESERVE_PREEMPT) { 2499 new_num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, new_hostid_list, 2500 SPDK_NVMF_MAX_NUM_REGISTRANTS, 2501 &ctrlr->hostid); 2502 /* Preempt notification occurs on the unregistered controllers 2503 * other than the controller who issued the command. 2504 */ 2505 num_hostid = nvmf_ns_reservation_get_unregistered_hostid(hostid_list, 2506 num_hostid, 2507 new_hostid_list, 2508 new_num_hostid); 2509 if (num_hostid) { 2510 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2511 hostid_list, 2512 num_hostid, 2513 SPDK_NVME_REGISTRATION_PREEMPTED); 2514 2515 } 2516 /* Reservation released notification occurs on the 2517 * controllers which are the remaining registrants other than 2518 * the controller who issued the command. 2519 */ 2520 if (reservation_released && new_num_hostid) { 2521 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2522 new_hostid_list, 2523 new_num_hostid, 2524 SPDK_NVME_RESERVATION_RELEASED); 2525 2526 } 2527 } 2528 if (update_sgroup && ns->ptpl_activated) { 2529 if (nvmf_ns_update_reservation_info(ns)) { 2530 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2531 } 2532 } 2533 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2534 req->rsp->nvme_cpl.status.sc = status; 2535 return update_sgroup; 2536 } 2537 2538 static bool 2539 nvmf_ns_reservation_release(struct spdk_nvmf_ns *ns, 2540 struct spdk_nvmf_ctrlr *ctrlr, 2541 struct spdk_nvmf_request *req) 2542 { 2543 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2544 uint8_t rrela, iekey, rtype; 2545 struct spdk_nvmf_registrant *reg; 2546 uint64_t crkey; 2547 uint8_t status = SPDK_NVME_SC_SUCCESS; 2548 bool update_sgroup = true; 2549 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2550 uint32_t num_hostid = 0; 2551 2552 rrela = cmd->cdw10_bits.resv_release.rrela; 2553 iekey = cmd->cdw10_bits.resv_release.iekey; 2554 rtype = cmd->cdw10_bits.resv_release.rtype; 2555 2556 if (req->data && req->length >= sizeof(crkey)) { 2557 memcpy(&crkey, req->data, sizeof(crkey)); 2558 } else { 2559 SPDK_ERRLOG("No key provided. Failing request.\n"); 2560 status = SPDK_NVME_SC_INVALID_FIELD; 2561 goto exit; 2562 } 2563 2564 SPDK_DEBUGLOG(nvmf, "RELEASE: RRELA %u, IEKEY %u, RTYPE %u, " 2565 "CRKEY 0x%"PRIx64"\n", rrela, iekey, rtype, crkey); 2566 2567 if (iekey) { 2568 SPDK_ERRLOG("Ignore existing key field set to 1\n"); 2569 status = SPDK_NVME_SC_INVALID_FIELD; 2570 update_sgroup = false; 2571 goto exit; 2572 } 2573 2574 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 2575 if (!reg || reg->rkey != crkey) { 2576 SPDK_ERRLOG("No registrant or current key doesn't match " 2577 "with existing registrant key\n"); 2578 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2579 update_sgroup = false; 2580 goto exit; 2581 } 2582 2583 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 2584 SPDK_NVMF_MAX_NUM_REGISTRANTS, 2585 &ctrlr->hostid); 2586 2587 switch (rrela) { 2588 case SPDK_NVME_RESERVE_RELEASE: 2589 if (!ns->holder) { 2590 SPDK_DEBUGLOG(nvmf, "RELEASE: no holder\n"); 2591 update_sgroup = false; 2592 goto exit; 2593 } 2594 if (ns->rtype != rtype) { 2595 SPDK_ERRLOG("Type doesn't match\n"); 2596 status = SPDK_NVME_SC_INVALID_FIELD; 2597 update_sgroup = false; 2598 goto exit; 2599 } 2600 if (!nvmf_ns_reservation_registrant_is_holder(ns, reg)) { 2601 /* not the reservation holder, this isn't an error */ 2602 update_sgroup = false; 2603 goto exit; 2604 } 2605 2606 rtype = ns->rtype; 2607 nvmf_ns_reservation_release_reservation(ns); 2608 2609 if (num_hostid && rtype != SPDK_NVME_RESERVE_WRITE_EXCLUSIVE && 2610 rtype != SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) { 2611 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2612 hostid_list, 2613 num_hostid, 2614 SPDK_NVME_RESERVATION_RELEASED); 2615 } 2616 break; 2617 case SPDK_NVME_RESERVE_CLEAR: 2618 nvmf_ns_reservation_clear_all_registrants(ns); 2619 if (num_hostid) { 2620 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2621 hostid_list, 2622 num_hostid, 2623 SPDK_NVME_RESERVATION_PREEMPTED); 2624 } 2625 break; 2626 default: 2627 status = SPDK_NVME_SC_INVALID_FIELD; 2628 update_sgroup = false; 2629 goto exit; 2630 } 2631 2632 exit: 2633 if (update_sgroup && ns->ptpl_activated) { 2634 if (nvmf_ns_update_reservation_info(ns)) { 2635 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2636 } 2637 } 2638 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2639 req->rsp->nvme_cpl.status.sc = status; 2640 return update_sgroup; 2641 } 2642 2643 static void 2644 nvmf_ns_reservation_report(struct spdk_nvmf_ns *ns, 2645 struct spdk_nvmf_ctrlr *ctrlr, 2646 struct spdk_nvmf_request *req) 2647 { 2648 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2649 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; 2650 struct spdk_nvmf_ctrlr *ctrlr_tmp; 2651 struct spdk_nvmf_registrant *reg, *tmp; 2652 struct spdk_nvme_reservation_status_extended_data *status_data; 2653 struct spdk_nvme_registered_ctrlr_extended_data *ctrlr_data; 2654 uint8_t *payload; 2655 uint32_t len, count = 0; 2656 uint32_t regctl = 0; 2657 uint8_t status = SPDK_NVME_SC_SUCCESS; 2658 2659 if (req->data == NULL) { 2660 SPDK_ERRLOG("No data transfer specified for request. " 2661 " Unable to transfer back response.\n"); 2662 status = SPDK_NVME_SC_INVALID_FIELD; 2663 goto exit; 2664 } 2665 2666 if (!cmd->cdw11_bits.resv_report.eds) { 2667 SPDK_ERRLOG("NVMeoF uses extended controller data structure, " 2668 "please set EDS bit in cdw11 and try again\n"); 2669 status = SPDK_NVME_SC_HOSTID_INCONSISTENT_FORMAT; 2670 goto exit; 2671 } 2672 2673 /* Get number of registerd controllers, one Host may have more than 2674 * one controller based on different ports. 2675 */ 2676 TAILQ_FOREACH(ctrlr_tmp, &subsystem->ctrlrs, link) { 2677 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr_tmp->hostid); 2678 if (reg) { 2679 regctl++; 2680 } 2681 } 2682 2683 len = sizeof(*status_data) + sizeof(*ctrlr_data) * regctl; 2684 payload = calloc(1, len); 2685 if (!payload) { 2686 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2687 goto exit; 2688 } 2689 2690 status_data = (struct spdk_nvme_reservation_status_extended_data *)payload; 2691 status_data->data.gen = ns->gen; 2692 status_data->data.rtype = ns->rtype; 2693 status_data->data.regctl = regctl; 2694 status_data->data.ptpls = ns->ptpl_activated; 2695 2696 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2697 assert(count <= regctl); 2698 ctrlr_data = (struct spdk_nvme_registered_ctrlr_extended_data *) 2699 (payload + sizeof(*status_data) + sizeof(*ctrlr_data) * count); 2700 /* Set to 0xffffh for dynamic controller */ 2701 ctrlr_data->cntlid = 0xffff; 2702 ctrlr_data->rcsts.status = (ns->holder == reg) ? true : false; 2703 ctrlr_data->rkey = reg->rkey; 2704 spdk_uuid_copy((struct spdk_uuid *)ctrlr_data->hostid, ®->hostid); 2705 count++; 2706 } 2707 2708 memcpy(req->data, payload, spdk_min(len, (cmd->cdw10 + 1) * sizeof(uint32_t))); 2709 free(payload); 2710 2711 exit: 2712 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2713 req->rsp->nvme_cpl.status.sc = status; 2714 return; 2715 } 2716 2717 static void 2718 nvmf_ns_reservation_complete(void *ctx) 2719 { 2720 struct spdk_nvmf_request *req = ctx; 2721 2722 spdk_nvmf_request_complete(req); 2723 } 2724 2725 static void 2726 _nvmf_ns_reservation_update_done(struct spdk_nvmf_subsystem *subsystem, 2727 void *cb_arg, int status) 2728 { 2729 struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)cb_arg; 2730 struct spdk_nvmf_poll_group *group = req->qpair->group; 2731 2732 spdk_thread_send_msg(group->thread, nvmf_ns_reservation_complete, req); 2733 } 2734 2735 void 2736 nvmf_ns_reservation_request(void *ctx) 2737 { 2738 struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)ctx; 2739 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2740 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2741 struct subsystem_update_ns_ctx *update_ctx; 2742 uint32_t nsid; 2743 struct spdk_nvmf_ns *ns; 2744 bool update_sgroup = false; 2745 2746 nsid = cmd->nsid; 2747 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, nsid); 2748 assert(ns != NULL); 2749 2750 switch (cmd->opc) { 2751 case SPDK_NVME_OPC_RESERVATION_REGISTER: 2752 update_sgroup = nvmf_ns_reservation_register(ns, ctrlr, req); 2753 break; 2754 case SPDK_NVME_OPC_RESERVATION_ACQUIRE: 2755 update_sgroup = nvmf_ns_reservation_acquire(ns, ctrlr, req); 2756 break; 2757 case SPDK_NVME_OPC_RESERVATION_RELEASE: 2758 update_sgroup = nvmf_ns_reservation_release(ns, ctrlr, req); 2759 break; 2760 case SPDK_NVME_OPC_RESERVATION_REPORT: 2761 nvmf_ns_reservation_report(ns, ctrlr, req); 2762 break; 2763 default: 2764 break; 2765 } 2766 2767 /* update reservation information to subsystem's poll group */ 2768 if (update_sgroup) { 2769 update_ctx = calloc(1, sizeof(*update_ctx)); 2770 if (update_ctx == NULL) { 2771 SPDK_ERRLOG("Can't alloc subsystem poll group update context\n"); 2772 goto update_done; 2773 } 2774 update_ctx->subsystem = ctrlr->subsys; 2775 update_ctx->cb_fn = _nvmf_ns_reservation_update_done; 2776 update_ctx->cb_arg = req; 2777 2778 nvmf_subsystem_update_ns(ctrlr->subsys, subsystem_update_ns_done, update_ctx); 2779 return; 2780 } 2781 2782 update_done: 2783 _nvmf_ns_reservation_update_done(ctrlr->subsys, (void *)req, 0); 2784 } 2785 2786 int 2787 spdk_nvmf_subsystem_set_ana_reporting(struct spdk_nvmf_subsystem *subsystem, 2788 bool ana_reporting) 2789 { 2790 if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) { 2791 return -EAGAIN; 2792 } 2793 2794 subsystem->flags.ana_reporting = ana_reporting; 2795 2796 return 0; 2797 } 2798 2799 struct subsystem_listener_update_ctx { 2800 struct spdk_nvmf_subsystem_listener *listener; 2801 2802 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn; 2803 void *cb_arg; 2804 }; 2805 2806 static void 2807 subsystem_listener_update_done(struct spdk_io_channel_iter *i, int status) 2808 { 2809 struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2810 2811 if (ctx->cb_fn) { 2812 ctx->cb_fn(ctx->cb_arg, status); 2813 } 2814 free(ctx); 2815 } 2816 2817 static void 2818 subsystem_listener_update_on_pg(struct spdk_io_channel_iter *i) 2819 { 2820 struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2821 struct spdk_nvmf_subsystem_listener *listener; 2822 struct spdk_nvmf_poll_group *group; 2823 struct spdk_nvmf_ctrlr *ctrlr; 2824 2825 listener = ctx->listener; 2826 group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i)); 2827 2828 TAILQ_FOREACH(ctrlr, &listener->subsystem->ctrlrs, link) { 2829 if (ctrlr->admin_qpair->group == group && ctrlr->listener == listener) { 2830 nvmf_ctrlr_async_event_ana_change_notice(ctrlr); 2831 } 2832 } 2833 2834 spdk_for_each_channel_continue(i, 0); 2835 } 2836 2837 void 2838 nvmf_subsystem_set_ana_state(struct spdk_nvmf_subsystem *subsystem, 2839 const struct spdk_nvme_transport_id *trid, 2840 enum spdk_nvme_ana_state ana_state, 2841 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, void *cb_arg) 2842 { 2843 struct spdk_nvmf_subsystem_listener *listener; 2844 struct subsystem_listener_update_ctx *ctx; 2845 2846 assert(cb_fn != NULL); 2847 assert(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 2848 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED); 2849 2850 if (!subsystem->flags.ana_reporting) { 2851 SPDK_ERRLOG("ANA reporting is disabled\n"); 2852 cb_fn(cb_arg, -EINVAL); 2853 return; 2854 } 2855 2856 /* ANA Change state is not used, ANA Persistent Loss state 2857 * is not supported yet. 2858 */ 2859 if (!(ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE || 2860 ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE || 2861 ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE)) { 2862 SPDK_ERRLOG("ANA state %d is not supported\n", ana_state); 2863 cb_fn(cb_arg, -ENOTSUP); 2864 return; 2865 } 2866 2867 listener = nvmf_subsystem_find_listener(subsystem, trid); 2868 if (!listener) { 2869 SPDK_ERRLOG("Unable to find listener.\n"); 2870 cb_fn(cb_arg, -EINVAL); 2871 return; 2872 } 2873 2874 if (listener->ana_state == ana_state) { 2875 cb_fn(cb_arg, 0); 2876 return; 2877 } 2878 2879 ctx = calloc(1, sizeof(*ctx)); 2880 if (!ctx) { 2881 SPDK_ERRLOG("Unable to allocate context\n"); 2882 cb_fn(cb_arg, -ENOMEM); 2883 return; 2884 } 2885 2886 listener->ana_state = ana_state; 2887 listener->ana_state_change_count++; 2888 2889 ctx->listener = listener; 2890 ctx->cb_fn = cb_fn; 2891 ctx->cb_arg = cb_arg; 2892 2893 spdk_for_each_channel(subsystem->tgt, 2894 subsystem_listener_update_on_pg, 2895 ctx, 2896 subsystem_listener_update_done); 2897 } 2898