1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "nvmf_internal.h" 37 #include "transport.h" 38 39 #include "spdk/assert.h" 40 #include "spdk/likely.h" 41 #include "spdk/string.h" 42 #include "spdk/trace.h" 43 #include "spdk/nvmf_spec.h" 44 #include "spdk/uuid.h" 45 #include "spdk/json.h" 46 #include "spdk/file.h" 47 48 #define __SPDK_BDEV_MODULE_ONLY 49 #include "spdk/bdev_module.h" 50 #include "spdk/log.h" 51 #include "spdk_internal/utf.h" 52 #include "spdk_internal/usdt.h" 53 54 #define MODEL_NUMBER_DEFAULT "SPDK bdev Controller" 55 #define NVMF_SUBSYSTEM_DEFAULT_NAMESPACES 32 56 57 /* 58 * States for parsing valid domains in NQNs according to RFC 1034 59 */ 60 enum spdk_nvmf_nqn_domain_states { 61 /* First character of a domain must be a letter */ 62 SPDK_NVMF_DOMAIN_ACCEPT_LETTER = 0, 63 64 /* Subsequent characters can be any of letter, digit, or hyphen */ 65 SPDK_NVMF_DOMAIN_ACCEPT_LDH = 1, 66 67 /* A domain label must end with either a letter or digit */ 68 SPDK_NVMF_DOMAIN_ACCEPT_ANY = 2 69 }; 70 71 /* Returns true if is a valid ASCII string as defined by the NVMe spec */ 72 static bool 73 nvmf_valid_ascii_string(const void *buf, size_t size) 74 { 75 const uint8_t *str = buf; 76 size_t i; 77 78 for (i = 0; i < size; i++) { 79 if (str[i] < 0x20 || str[i] > 0x7E) { 80 return false; 81 } 82 } 83 84 return true; 85 } 86 87 static bool 88 nvmf_valid_nqn(const char *nqn) 89 { 90 size_t len; 91 struct spdk_uuid uuid_value; 92 uint32_t i; 93 int bytes_consumed; 94 uint32_t domain_label_length; 95 char *reverse_domain_end; 96 uint32_t reverse_domain_end_index; 97 enum spdk_nvmf_nqn_domain_states domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER; 98 99 /* Check for length requirements */ 100 len = strlen(nqn); 101 if (len > SPDK_NVMF_NQN_MAX_LEN) { 102 SPDK_ERRLOG("Invalid NQN \"%s\": length %zu > max %d\n", nqn, len, SPDK_NVMF_NQN_MAX_LEN); 103 return false; 104 } 105 106 /* The nqn must be at least as long as SPDK_NVMF_NQN_MIN_LEN to contain the necessary prefix. */ 107 if (len < SPDK_NVMF_NQN_MIN_LEN) { 108 SPDK_ERRLOG("Invalid NQN \"%s\": length %zu < min %d\n", nqn, len, SPDK_NVMF_NQN_MIN_LEN); 109 return false; 110 } 111 112 /* Check for discovery controller nqn */ 113 if (!strcmp(nqn, SPDK_NVMF_DISCOVERY_NQN)) { 114 return true; 115 } 116 117 /* Check for equality with the generic nqn structure of the form "nqn.2014-08.org.nvmexpress:uuid:11111111-2222-3333-4444-555555555555" */ 118 if (!strncmp(nqn, SPDK_NVMF_NQN_UUID_PRE, SPDK_NVMF_NQN_UUID_PRE_LEN)) { 119 if (len != SPDK_NVMF_NQN_UUID_PRE_LEN + SPDK_NVMF_UUID_STRING_LEN) { 120 SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not the correct length\n", nqn); 121 return false; 122 } 123 124 if (spdk_uuid_parse(&uuid_value, &nqn[SPDK_NVMF_NQN_UUID_PRE_LEN])) { 125 SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not formatted correctly\n", nqn); 126 return false; 127 } 128 return true; 129 } 130 131 /* If the nqn does not match the uuid structure, the next several checks validate the form "nqn.yyyy-mm.reverse.domain:user-string" */ 132 133 if (strncmp(nqn, "nqn.", 4) != 0) { 134 SPDK_ERRLOG("Invalid NQN \"%s\": NQN must begin with \"nqn.\".\n", nqn); 135 return false; 136 } 137 138 /* Check for yyyy-mm. */ 139 if (!(isdigit(nqn[4]) && isdigit(nqn[5]) && isdigit(nqn[6]) && isdigit(nqn[7]) && 140 nqn[8] == '-' && isdigit(nqn[9]) && isdigit(nqn[10]) && nqn[11] == '.')) { 141 SPDK_ERRLOG("Invalid date code in NQN \"%s\"\n", nqn); 142 return false; 143 } 144 145 reverse_domain_end = strchr(nqn, ':'); 146 if (reverse_domain_end != NULL && (reverse_domain_end_index = reverse_domain_end - nqn) < len - 1) { 147 } else { 148 SPDK_ERRLOG("Invalid NQN \"%s\". NQN must contain user specified name with a ':' as a prefix.\n", 149 nqn); 150 return false; 151 } 152 153 /* Check for valid reverse domain */ 154 domain_label_length = 0; 155 for (i = 12; i < reverse_domain_end_index; i++) { 156 if (domain_label_length > SPDK_DOMAIN_LABEL_MAX_LEN) { 157 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". At least one Label is too long.\n", nqn); 158 return false; 159 } 160 161 switch (domain_state) { 162 163 case SPDK_NVMF_DOMAIN_ACCEPT_LETTER: { 164 if (isalpha(nqn[i])) { 165 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 166 domain_label_length++; 167 break; 168 } else { 169 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must start with a letter.\n", nqn); 170 return false; 171 } 172 } 173 174 case SPDK_NVMF_DOMAIN_ACCEPT_LDH: { 175 if (isalpha(nqn[i]) || isdigit(nqn[i])) { 176 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 177 domain_label_length++; 178 break; 179 } else if (nqn[i] == '-') { 180 if (i == reverse_domain_end_index - 1) { 181 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 182 nqn); 183 return false; 184 } 185 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH; 186 domain_label_length++; 187 break; 188 } else if (nqn[i] == '.') { 189 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 190 nqn); 191 return false; 192 } else { 193 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n", 194 nqn); 195 return false; 196 } 197 } 198 199 case SPDK_NVMF_DOMAIN_ACCEPT_ANY: { 200 if (isalpha(nqn[i]) || isdigit(nqn[i])) { 201 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 202 domain_label_length++; 203 break; 204 } else if (nqn[i] == '-') { 205 if (i == reverse_domain_end_index - 1) { 206 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 207 nqn); 208 return false; 209 } 210 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH; 211 domain_label_length++; 212 break; 213 } else if (nqn[i] == '.') { 214 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER; 215 domain_label_length = 0; 216 break; 217 } else { 218 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n", 219 nqn); 220 return false; 221 } 222 } 223 } 224 } 225 226 i = reverse_domain_end_index + 1; 227 while (i < len) { 228 bytes_consumed = utf8_valid(&nqn[i], &nqn[len]); 229 if (bytes_consumed <= 0) { 230 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only valid utf-8.\n", nqn); 231 return false; 232 } 233 234 i += bytes_consumed; 235 } 236 return true; 237 } 238 239 static void subsystem_state_change_on_pg(struct spdk_io_channel_iter *i); 240 241 struct spdk_nvmf_subsystem * 242 spdk_nvmf_subsystem_create(struct spdk_nvmf_tgt *tgt, 243 const char *nqn, 244 enum spdk_nvmf_subtype type, 245 uint32_t num_ns) 246 { 247 struct spdk_nvmf_subsystem *subsystem; 248 uint32_t sid; 249 250 if (spdk_nvmf_tgt_find_subsystem(tgt, nqn)) { 251 SPDK_ERRLOG("Subsystem NQN '%s' already exists\n", nqn); 252 return NULL; 253 } 254 255 if (!nvmf_valid_nqn(nqn)) { 256 return NULL; 257 } 258 259 if (type == SPDK_NVMF_SUBTYPE_DISCOVERY) { 260 if (num_ns != 0) { 261 SPDK_ERRLOG("Discovery subsystem cannot have namespaces.\n"); 262 return NULL; 263 } 264 } else if (num_ns == 0) { 265 num_ns = NVMF_SUBSYSTEM_DEFAULT_NAMESPACES; 266 } 267 268 /* Find a free subsystem id (sid) */ 269 for (sid = 0; sid < tgt->max_subsystems; sid++) { 270 if (tgt->subsystems[sid] == NULL) { 271 break; 272 } 273 } 274 if (sid >= tgt->max_subsystems) { 275 return NULL; 276 } 277 278 subsystem = calloc(1, sizeof(struct spdk_nvmf_subsystem)); 279 if (subsystem == NULL) { 280 return NULL; 281 } 282 283 subsystem->thread = spdk_get_thread(); 284 subsystem->state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 285 subsystem->tgt = tgt; 286 subsystem->id = sid; 287 subsystem->subtype = type; 288 subsystem->max_nsid = num_ns; 289 subsystem->next_cntlid = 0; 290 subsystem->min_cntlid = NVMF_MIN_CNTLID; 291 subsystem->max_cntlid = NVMF_MAX_CNTLID; 292 snprintf(subsystem->subnqn, sizeof(subsystem->subnqn), "%s", nqn); 293 pthread_mutex_init(&subsystem->mutex, NULL); 294 TAILQ_INIT(&subsystem->listeners); 295 TAILQ_INIT(&subsystem->hosts); 296 TAILQ_INIT(&subsystem->ctrlrs); 297 298 if (num_ns != 0) { 299 subsystem->ns = calloc(num_ns, sizeof(struct spdk_nvmf_ns *)); 300 if (subsystem->ns == NULL) { 301 SPDK_ERRLOG("Namespace memory allocation failed\n"); 302 pthread_mutex_destroy(&subsystem->mutex); 303 free(subsystem); 304 return NULL; 305 } 306 subsystem->ana_group = calloc(num_ns, sizeof(uint32_t)); 307 if (subsystem->ana_group == NULL) { 308 SPDK_ERRLOG("ANA group memory allocation failed\n"); 309 pthread_mutex_destroy(&subsystem->mutex); 310 free(subsystem->ns); 311 free(subsystem); 312 return NULL; 313 } 314 } 315 316 memset(subsystem->sn, '0', sizeof(subsystem->sn) - 1); 317 subsystem->sn[sizeof(subsystem->sn) - 1] = '\0'; 318 319 snprintf(subsystem->mn, sizeof(subsystem->mn), "%s", 320 MODEL_NUMBER_DEFAULT); 321 322 tgt->subsystems[sid] = subsystem; 323 nvmf_update_discovery_log(tgt, NULL); 324 325 return subsystem; 326 } 327 328 /* Must hold subsystem->mutex while calling this function */ 329 static void 330 nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_host *host) 331 { 332 TAILQ_REMOVE(&subsystem->hosts, host, link); 333 free(host); 334 } 335 336 static void 337 _nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem, 338 struct spdk_nvmf_subsystem_listener *listener, 339 bool stop) 340 { 341 struct spdk_nvmf_transport *transport; 342 343 if (stop) { 344 transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, listener->trid->trstring); 345 if (transport != NULL) { 346 spdk_nvmf_transport_stop_listen(transport, listener->trid); 347 } 348 } 349 350 TAILQ_REMOVE(&subsystem->listeners, listener, link); 351 free(listener->ana_state); 352 free(listener); 353 } 354 355 void 356 spdk_nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem) 357 { 358 struct spdk_nvmf_host *host, *host_tmp; 359 struct spdk_nvmf_ctrlr *ctrlr, *ctrlr_tmp; 360 struct spdk_nvmf_ns *ns; 361 362 if (!subsystem) { 363 return; 364 } 365 366 assert(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE); 367 368 SPDK_DEBUGLOG(nvmf, "subsystem is %p\n", subsystem); 369 370 nvmf_subsystem_remove_all_listeners(subsystem, false); 371 372 pthread_mutex_lock(&subsystem->mutex); 373 374 TAILQ_FOREACH_SAFE(host, &subsystem->hosts, link, host_tmp) { 375 nvmf_subsystem_remove_host(subsystem, host); 376 } 377 378 pthread_mutex_unlock(&subsystem->mutex); 379 380 TAILQ_FOREACH_SAFE(ctrlr, &subsystem->ctrlrs, link, ctrlr_tmp) { 381 nvmf_ctrlr_destruct(ctrlr); 382 } 383 384 ns = spdk_nvmf_subsystem_get_first_ns(subsystem); 385 while (ns != NULL) { 386 struct spdk_nvmf_ns *next_ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns); 387 388 spdk_nvmf_subsystem_remove_ns(subsystem, ns->opts.nsid); 389 ns = next_ns; 390 } 391 392 free(subsystem->ns); 393 free(subsystem->ana_group); 394 395 subsystem->tgt->subsystems[subsystem->id] = NULL; 396 nvmf_update_discovery_log(subsystem->tgt, NULL); 397 398 pthread_mutex_destroy(&subsystem->mutex); 399 400 free(subsystem); 401 } 402 403 404 /* we have to use the typedef in the function declaration to appease astyle. */ 405 typedef enum spdk_nvmf_subsystem_state spdk_nvmf_subsystem_state_t; 406 407 static spdk_nvmf_subsystem_state_t 408 nvmf_subsystem_get_intermediate_state(enum spdk_nvmf_subsystem_state current_state, 409 enum spdk_nvmf_subsystem_state requested_state) 410 { 411 switch (requested_state) { 412 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 413 return SPDK_NVMF_SUBSYSTEM_DEACTIVATING; 414 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 415 if (current_state == SPDK_NVMF_SUBSYSTEM_PAUSED) { 416 return SPDK_NVMF_SUBSYSTEM_RESUMING; 417 } else { 418 return SPDK_NVMF_SUBSYSTEM_ACTIVATING; 419 } 420 case SPDK_NVMF_SUBSYSTEM_PAUSED: 421 return SPDK_NVMF_SUBSYSTEM_PAUSING; 422 default: 423 assert(false); 424 return SPDK_NVMF_SUBSYSTEM_NUM_STATES; 425 } 426 } 427 428 static int 429 nvmf_subsystem_set_state(struct spdk_nvmf_subsystem *subsystem, 430 enum spdk_nvmf_subsystem_state state) 431 { 432 enum spdk_nvmf_subsystem_state actual_old_state, expected_old_state; 433 bool exchanged; 434 435 switch (state) { 436 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 437 expected_old_state = SPDK_NVMF_SUBSYSTEM_DEACTIVATING; 438 break; 439 case SPDK_NVMF_SUBSYSTEM_ACTIVATING: 440 expected_old_state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 441 break; 442 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 443 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING; 444 break; 445 case SPDK_NVMF_SUBSYSTEM_PAUSING: 446 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 447 break; 448 case SPDK_NVMF_SUBSYSTEM_PAUSED: 449 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSING; 450 break; 451 case SPDK_NVMF_SUBSYSTEM_RESUMING: 452 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSED; 453 break; 454 case SPDK_NVMF_SUBSYSTEM_DEACTIVATING: 455 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 456 break; 457 default: 458 assert(false); 459 return -1; 460 } 461 462 actual_old_state = expected_old_state; 463 exchanged = __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false, 464 __ATOMIC_RELAXED, __ATOMIC_RELAXED); 465 if (spdk_unlikely(exchanged == false)) { 466 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING && 467 state == SPDK_NVMF_SUBSYSTEM_ACTIVE) { 468 expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING; 469 } 470 /* This is for the case when activating the subsystem fails. */ 471 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_ACTIVATING && 472 state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING) { 473 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING; 474 } 475 /* This is for the case when resuming the subsystem fails. */ 476 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING && 477 state == SPDK_NVMF_SUBSYSTEM_PAUSING) { 478 expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING; 479 } 480 actual_old_state = expected_old_state; 481 __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false, 482 __ATOMIC_RELAXED, __ATOMIC_RELAXED); 483 } 484 assert(actual_old_state == expected_old_state); 485 return actual_old_state - expected_old_state; 486 } 487 488 struct subsystem_state_change_ctx { 489 struct spdk_nvmf_subsystem *subsystem; 490 uint16_t nsid; 491 492 enum spdk_nvmf_subsystem_state original_state; 493 enum spdk_nvmf_subsystem_state requested_state; 494 495 spdk_nvmf_subsystem_state_change_done cb_fn; 496 void *cb_arg; 497 }; 498 499 static void 500 subsystem_state_change_revert_done(struct spdk_io_channel_iter *i, int status) 501 { 502 struct subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 503 504 /* Nothing to be done here if the state setting fails, we are just screwed. */ 505 if (nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state)) { 506 SPDK_ERRLOG("Unable to revert the subsystem state after operation failure.\n"); 507 } 508 509 ctx->subsystem->changing_state = false; 510 if (ctx->cb_fn) { 511 /* return a failure here. This function only exists in an error path. */ 512 ctx->cb_fn(ctx->subsystem, ctx->cb_arg, -1); 513 } 514 free(ctx); 515 } 516 517 static void 518 subsystem_state_change_done(struct spdk_io_channel_iter *i, int status) 519 { 520 struct subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 521 enum spdk_nvmf_subsystem_state intermediate_state; 522 523 SPDK_DTRACE_PROBE4(nvmf_subsystem_change_state_done, ctx->subsystem->subnqn, 524 ctx->requested_state, ctx->original_state, status); 525 526 if (status == 0) { 527 status = nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state); 528 if (status) { 529 status = -1; 530 } 531 } 532 533 if (status) { 534 intermediate_state = nvmf_subsystem_get_intermediate_state(ctx->requested_state, 535 ctx->original_state); 536 assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES); 537 538 if (nvmf_subsystem_set_state(ctx->subsystem, intermediate_state)) { 539 goto out; 540 } 541 ctx->requested_state = ctx->original_state; 542 spdk_for_each_channel(ctx->subsystem->tgt, 543 subsystem_state_change_on_pg, 544 ctx, 545 subsystem_state_change_revert_done); 546 return; 547 } 548 549 out: 550 ctx->subsystem->changing_state = false; 551 if (ctx->cb_fn) { 552 ctx->cb_fn(ctx->subsystem, ctx->cb_arg, status); 553 } 554 free(ctx); 555 } 556 557 static void 558 subsystem_state_change_continue(void *ctx, int status) 559 { 560 struct spdk_io_channel_iter *i = ctx; 561 struct subsystem_state_change_ctx *_ctx __attribute__((unused)); 562 563 _ctx = spdk_io_channel_iter_get_ctx(i); 564 SPDK_DTRACE_PROBE3(nvmf_pg_change_state_done, _ctx->subsystem->subnqn, 565 _ctx->requested_state, spdk_thread_get_id(spdk_get_thread())); 566 567 spdk_for_each_channel_continue(i, status); 568 } 569 570 static void 571 subsystem_state_change_on_pg(struct spdk_io_channel_iter *i) 572 { 573 struct subsystem_state_change_ctx *ctx; 574 struct spdk_io_channel *ch; 575 struct spdk_nvmf_poll_group *group; 576 577 ctx = spdk_io_channel_iter_get_ctx(i); 578 ch = spdk_io_channel_iter_get_channel(i); 579 group = spdk_io_channel_get_ctx(ch); 580 581 SPDK_DTRACE_PROBE3(nvmf_pg_change_state, ctx->subsystem->subnqn, 582 ctx->requested_state, spdk_thread_get_id(spdk_get_thread())); 583 switch (ctx->requested_state) { 584 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 585 nvmf_poll_group_remove_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 586 break; 587 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 588 if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_ACTIVATING) { 589 nvmf_poll_group_add_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 590 } else if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_RESUMING) { 591 nvmf_poll_group_resume_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 592 } 593 break; 594 case SPDK_NVMF_SUBSYSTEM_PAUSED: 595 nvmf_poll_group_pause_subsystem(group, ctx->subsystem, ctx->nsid, subsystem_state_change_continue, 596 i); 597 break; 598 default: 599 assert(false); 600 break; 601 } 602 } 603 604 static int 605 nvmf_subsystem_state_change(struct spdk_nvmf_subsystem *subsystem, 606 uint32_t nsid, 607 enum spdk_nvmf_subsystem_state requested_state, 608 spdk_nvmf_subsystem_state_change_done cb_fn, 609 void *cb_arg) 610 { 611 struct subsystem_state_change_ctx *ctx; 612 enum spdk_nvmf_subsystem_state intermediate_state; 613 int rc; 614 615 if (__sync_val_compare_and_swap(&subsystem->changing_state, false, true)) { 616 return -EBUSY; 617 } 618 619 SPDK_DTRACE_PROBE3(nvmf_subsystem_change_state, subsystem->subnqn, 620 requested_state, subsystem->state); 621 /* If we are already in the requested state, just call the callback immediately. */ 622 if (subsystem->state == requested_state) { 623 subsystem->changing_state = false; 624 if (cb_fn) { 625 cb_fn(subsystem, cb_arg, 0); 626 } 627 return 0; 628 } 629 630 intermediate_state = nvmf_subsystem_get_intermediate_state(subsystem->state, requested_state); 631 assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES); 632 633 ctx = calloc(1, sizeof(*ctx)); 634 if (!ctx) { 635 subsystem->changing_state = false; 636 return -ENOMEM; 637 } 638 639 ctx->original_state = subsystem->state; 640 rc = nvmf_subsystem_set_state(subsystem, intermediate_state); 641 if (rc) { 642 free(ctx); 643 subsystem->changing_state = false; 644 return rc; 645 } 646 647 ctx->subsystem = subsystem; 648 ctx->nsid = nsid; 649 ctx->requested_state = requested_state; 650 ctx->cb_fn = cb_fn; 651 ctx->cb_arg = cb_arg; 652 653 spdk_for_each_channel(subsystem->tgt, 654 subsystem_state_change_on_pg, 655 ctx, 656 subsystem_state_change_done); 657 658 return 0; 659 } 660 661 int 662 spdk_nvmf_subsystem_start(struct spdk_nvmf_subsystem *subsystem, 663 spdk_nvmf_subsystem_state_change_done cb_fn, 664 void *cb_arg) 665 { 666 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg); 667 } 668 669 int 670 spdk_nvmf_subsystem_stop(struct spdk_nvmf_subsystem *subsystem, 671 spdk_nvmf_subsystem_state_change_done cb_fn, 672 void *cb_arg) 673 { 674 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_INACTIVE, cb_fn, cb_arg); 675 } 676 677 int 678 spdk_nvmf_subsystem_pause(struct spdk_nvmf_subsystem *subsystem, 679 uint32_t nsid, 680 spdk_nvmf_subsystem_state_change_done cb_fn, 681 void *cb_arg) 682 { 683 return nvmf_subsystem_state_change(subsystem, nsid, SPDK_NVMF_SUBSYSTEM_PAUSED, cb_fn, cb_arg); 684 } 685 686 int 687 spdk_nvmf_subsystem_resume(struct spdk_nvmf_subsystem *subsystem, 688 spdk_nvmf_subsystem_state_change_done cb_fn, 689 void *cb_arg) 690 { 691 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg); 692 } 693 694 struct spdk_nvmf_subsystem * 695 spdk_nvmf_subsystem_get_first(struct spdk_nvmf_tgt *tgt) 696 { 697 struct spdk_nvmf_subsystem *subsystem; 698 uint32_t sid; 699 700 for (sid = 0; sid < tgt->max_subsystems; sid++) { 701 subsystem = tgt->subsystems[sid]; 702 if (subsystem) { 703 return subsystem; 704 } 705 } 706 707 return NULL; 708 } 709 710 struct spdk_nvmf_subsystem * 711 spdk_nvmf_subsystem_get_next(struct spdk_nvmf_subsystem *subsystem) 712 { 713 uint32_t sid; 714 struct spdk_nvmf_tgt *tgt; 715 716 if (!subsystem) { 717 return NULL; 718 } 719 720 tgt = subsystem->tgt; 721 722 for (sid = subsystem->id + 1; sid < tgt->max_subsystems; sid++) { 723 subsystem = tgt->subsystems[sid]; 724 if (subsystem) { 725 return subsystem; 726 } 727 } 728 729 return NULL; 730 } 731 732 /* Must hold subsystem->mutex while calling this function */ 733 static struct spdk_nvmf_host * 734 nvmf_subsystem_find_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 735 { 736 struct spdk_nvmf_host *host = NULL; 737 738 TAILQ_FOREACH(host, &subsystem->hosts, link) { 739 if (strcmp(hostnqn, host->nqn) == 0) { 740 return host; 741 } 742 } 743 744 return NULL; 745 } 746 747 int 748 spdk_nvmf_subsystem_add_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 749 { 750 struct spdk_nvmf_host *host; 751 752 if (!nvmf_valid_nqn(hostnqn)) { 753 return -EINVAL; 754 } 755 756 pthread_mutex_lock(&subsystem->mutex); 757 758 if (nvmf_subsystem_find_host(subsystem, hostnqn)) { 759 /* This subsystem already allows the specified host. */ 760 pthread_mutex_unlock(&subsystem->mutex); 761 return 0; 762 } 763 764 host = calloc(1, sizeof(*host)); 765 if (!host) { 766 pthread_mutex_unlock(&subsystem->mutex); 767 return -ENOMEM; 768 } 769 770 snprintf(host->nqn, sizeof(host->nqn), "%s", hostnqn); 771 772 TAILQ_INSERT_HEAD(&subsystem->hosts, host, link); 773 774 nvmf_update_discovery_log(subsystem->tgt, hostnqn); 775 776 pthread_mutex_unlock(&subsystem->mutex); 777 778 return 0; 779 } 780 781 int 782 spdk_nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 783 { 784 struct spdk_nvmf_host *host; 785 786 pthread_mutex_lock(&subsystem->mutex); 787 788 host = nvmf_subsystem_find_host(subsystem, hostnqn); 789 if (host == NULL) { 790 pthread_mutex_unlock(&subsystem->mutex); 791 return -ENOENT; 792 } 793 794 nvmf_subsystem_remove_host(subsystem, host); 795 pthread_mutex_unlock(&subsystem->mutex); 796 797 return 0; 798 } 799 800 struct nvmf_subsystem_disconnect_host_ctx { 801 struct spdk_nvmf_subsystem *subsystem; 802 char *hostnqn; 803 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn; 804 void *cb_arg; 805 }; 806 807 static void 808 nvmf_subsystem_disconnect_host_fini(struct spdk_io_channel_iter *i, int status) 809 { 810 struct nvmf_subsystem_disconnect_host_ctx *ctx; 811 812 ctx = spdk_io_channel_iter_get_ctx(i); 813 814 if (ctx->cb_fn) { 815 ctx->cb_fn(ctx->cb_arg, status); 816 } 817 free(ctx->hostnqn); 818 free(ctx); 819 } 820 821 static void 822 nvmf_subsystem_disconnect_qpairs_by_host(struct spdk_io_channel_iter *i) 823 { 824 struct nvmf_subsystem_disconnect_host_ctx *ctx; 825 struct spdk_nvmf_poll_group *group; 826 struct spdk_io_channel *ch; 827 struct spdk_nvmf_qpair *qpair, *tmp_qpair; 828 struct spdk_nvmf_ctrlr *ctrlr; 829 830 ctx = spdk_io_channel_iter_get_ctx(i); 831 ch = spdk_io_channel_iter_get_channel(i); 832 group = spdk_io_channel_get_ctx(ch); 833 834 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, tmp_qpair) { 835 ctrlr = qpair->ctrlr; 836 837 if (ctrlr == NULL || ctrlr->subsys != ctx->subsystem) { 838 continue; 839 } 840 841 if (strncmp(ctrlr->hostnqn, ctx->hostnqn, sizeof(ctrlr->hostnqn)) == 0) { 842 /* Right now this does not wait for the queue pairs to actually disconnect. */ 843 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 844 } 845 } 846 spdk_for_each_channel_continue(i, 0); 847 } 848 849 int 850 spdk_nvmf_subsystem_disconnect_host(struct spdk_nvmf_subsystem *subsystem, 851 const char *hostnqn, 852 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 853 void *cb_arg) 854 { 855 struct nvmf_subsystem_disconnect_host_ctx *ctx; 856 857 ctx = calloc(1, sizeof(struct nvmf_subsystem_disconnect_host_ctx)); 858 if (ctx == NULL) { 859 return -ENOMEM; 860 } 861 862 ctx->hostnqn = strdup(hostnqn); 863 if (ctx->hostnqn == NULL) { 864 free(ctx); 865 return -ENOMEM; 866 } 867 868 ctx->subsystem = subsystem; 869 ctx->cb_fn = cb_fn; 870 ctx->cb_arg = cb_arg; 871 872 spdk_for_each_channel(subsystem->tgt, nvmf_subsystem_disconnect_qpairs_by_host, ctx, 873 nvmf_subsystem_disconnect_host_fini); 874 875 return 0; 876 } 877 878 int 879 spdk_nvmf_subsystem_set_allow_any_host(struct spdk_nvmf_subsystem *subsystem, bool allow_any_host) 880 { 881 pthread_mutex_lock(&subsystem->mutex); 882 subsystem->flags.allow_any_host = allow_any_host; 883 nvmf_update_discovery_log(subsystem->tgt, NULL); 884 pthread_mutex_unlock(&subsystem->mutex); 885 886 return 0; 887 } 888 889 bool 890 spdk_nvmf_subsystem_get_allow_any_host(const struct spdk_nvmf_subsystem *subsystem) 891 { 892 bool allow_any_host; 893 struct spdk_nvmf_subsystem *sub; 894 895 /* Technically, taking the mutex modifies data in the subsystem. But the const 896 * is still important to convey that this doesn't mutate any other data. Cast 897 * it away to work around this. */ 898 sub = (struct spdk_nvmf_subsystem *)subsystem; 899 900 pthread_mutex_lock(&sub->mutex); 901 allow_any_host = sub->flags.allow_any_host; 902 pthread_mutex_unlock(&sub->mutex); 903 904 return allow_any_host; 905 } 906 907 bool 908 spdk_nvmf_subsystem_host_allowed(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 909 { 910 bool allowed; 911 912 if (!hostnqn) { 913 return false; 914 } 915 916 pthread_mutex_lock(&subsystem->mutex); 917 918 if (subsystem->flags.allow_any_host) { 919 pthread_mutex_unlock(&subsystem->mutex); 920 return true; 921 } 922 923 allowed = nvmf_subsystem_find_host(subsystem, hostnqn) != NULL; 924 pthread_mutex_unlock(&subsystem->mutex); 925 926 return allowed; 927 } 928 929 struct spdk_nvmf_host * 930 spdk_nvmf_subsystem_get_first_host(struct spdk_nvmf_subsystem *subsystem) 931 { 932 return TAILQ_FIRST(&subsystem->hosts); 933 } 934 935 936 struct spdk_nvmf_host * 937 spdk_nvmf_subsystem_get_next_host(struct spdk_nvmf_subsystem *subsystem, 938 struct spdk_nvmf_host *prev_host) 939 { 940 return TAILQ_NEXT(prev_host, link); 941 } 942 943 const char * 944 spdk_nvmf_host_get_nqn(const struct spdk_nvmf_host *host) 945 { 946 return host->nqn; 947 } 948 949 struct spdk_nvmf_subsystem_listener * 950 nvmf_subsystem_find_listener(struct spdk_nvmf_subsystem *subsystem, 951 const struct spdk_nvme_transport_id *trid) 952 { 953 struct spdk_nvmf_subsystem_listener *listener; 954 955 TAILQ_FOREACH(listener, &subsystem->listeners, link) { 956 if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) { 957 return listener; 958 } 959 } 960 961 return NULL; 962 } 963 964 /** 965 * Function to be called once the target is listening. 966 * 967 * \param ctx Context argument passed to this function. 968 * \param status 0 if it completed successfully, or negative errno if it failed. 969 */ 970 static void 971 _nvmf_subsystem_add_listener_done(void *ctx, int status) 972 { 973 struct spdk_nvmf_subsystem_listener *listener = ctx; 974 975 if (status) { 976 listener->cb_fn(listener->cb_arg, status); 977 free(listener); 978 return; 979 } 980 981 TAILQ_INSERT_HEAD(&listener->subsystem->listeners, listener, link); 982 nvmf_update_discovery_log(listener->subsystem->tgt, NULL); 983 listener->cb_fn(listener->cb_arg, status); 984 } 985 986 void 987 spdk_nvmf_subsystem_add_listener(struct spdk_nvmf_subsystem *subsystem, 988 struct spdk_nvme_transport_id *trid, 989 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 990 void *cb_arg) 991 { 992 struct spdk_nvmf_transport *transport; 993 struct spdk_nvmf_subsystem_listener *listener; 994 struct spdk_nvmf_listener *tr_listener; 995 uint32_t i; 996 int rc = 0; 997 998 assert(cb_fn != NULL); 999 1000 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1001 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1002 cb_fn(cb_arg, -EAGAIN); 1003 return; 1004 } 1005 1006 if (nvmf_subsystem_find_listener(subsystem, trid)) { 1007 /* Listener already exists in this subsystem */ 1008 cb_fn(cb_arg, 0); 1009 return; 1010 } 1011 1012 transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, trid->trstring); 1013 if (!transport) { 1014 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n", 1015 trid->trstring); 1016 cb_fn(cb_arg, -EINVAL); 1017 return; 1018 } 1019 1020 tr_listener = nvmf_transport_find_listener(transport, trid); 1021 if (!tr_listener) { 1022 SPDK_ERRLOG("Cannot find transport listener for %s\n", trid->traddr); 1023 cb_fn(cb_arg, -EINVAL); 1024 return; 1025 } 1026 1027 listener = calloc(1, sizeof(*listener)); 1028 if (!listener) { 1029 cb_fn(cb_arg, -ENOMEM); 1030 return; 1031 } 1032 1033 listener->trid = &tr_listener->trid; 1034 listener->transport = transport; 1035 listener->cb_fn = cb_fn; 1036 listener->cb_arg = cb_arg; 1037 listener->subsystem = subsystem; 1038 listener->ana_state = calloc(subsystem->max_nsid, sizeof(enum spdk_nvme_ana_state)); 1039 if (!listener->ana_state) { 1040 free(listener); 1041 cb_fn(cb_arg, -ENOMEM); 1042 return; 1043 } 1044 1045 for (i = 0; i < subsystem->max_nsid; i++) { 1046 listener->ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1047 } 1048 1049 if (transport->ops->listen_associate != NULL) { 1050 rc = transport->ops->listen_associate(transport, subsystem, trid); 1051 } 1052 1053 _nvmf_subsystem_add_listener_done(listener, rc); 1054 } 1055 1056 int 1057 spdk_nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem, 1058 const struct spdk_nvme_transport_id *trid) 1059 { 1060 struct spdk_nvmf_subsystem_listener *listener; 1061 1062 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1063 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1064 return -EAGAIN; 1065 } 1066 1067 listener = nvmf_subsystem_find_listener(subsystem, trid); 1068 if (listener == NULL) { 1069 return -ENOENT; 1070 } 1071 1072 _nvmf_subsystem_remove_listener(subsystem, listener, false); 1073 1074 return 0; 1075 } 1076 1077 void 1078 nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem, 1079 bool stop) 1080 { 1081 struct spdk_nvmf_subsystem_listener *listener, *listener_tmp; 1082 1083 TAILQ_FOREACH_SAFE(listener, &subsystem->listeners, link, listener_tmp) { 1084 _nvmf_subsystem_remove_listener(subsystem, listener, stop); 1085 } 1086 } 1087 1088 bool 1089 spdk_nvmf_subsystem_listener_allowed(struct spdk_nvmf_subsystem *subsystem, 1090 const struct spdk_nvme_transport_id *trid) 1091 { 1092 struct spdk_nvmf_subsystem_listener *listener; 1093 1094 if (!strcmp(subsystem->subnqn, SPDK_NVMF_DISCOVERY_NQN)) { 1095 return true; 1096 } 1097 1098 TAILQ_FOREACH(listener, &subsystem->listeners, link) { 1099 if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) { 1100 return true; 1101 } 1102 } 1103 1104 return false; 1105 } 1106 1107 struct spdk_nvmf_subsystem_listener * 1108 spdk_nvmf_subsystem_get_first_listener(struct spdk_nvmf_subsystem *subsystem) 1109 { 1110 return TAILQ_FIRST(&subsystem->listeners); 1111 } 1112 1113 struct spdk_nvmf_subsystem_listener * 1114 spdk_nvmf_subsystem_get_next_listener(struct spdk_nvmf_subsystem *subsystem, 1115 struct spdk_nvmf_subsystem_listener *prev_listener) 1116 { 1117 return TAILQ_NEXT(prev_listener, link); 1118 } 1119 1120 const struct spdk_nvme_transport_id * 1121 spdk_nvmf_subsystem_listener_get_trid(struct spdk_nvmf_subsystem_listener *listener) 1122 { 1123 return listener->trid; 1124 } 1125 1126 void 1127 spdk_nvmf_subsystem_allow_any_listener(struct spdk_nvmf_subsystem *subsystem, 1128 bool allow_any_listener) 1129 { 1130 subsystem->flags.allow_any_listener = allow_any_listener; 1131 } 1132 1133 bool 1134 spdk_nvmf_subsytem_any_listener_allowed(struct spdk_nvmf_subsystem *subsystem) 1135 { 1136 return subsystem->flags.allow_any_listener; 1137 } 1138 1139 1140 struct subsystem_update_ns_ctx { 1141 struct spdk_nvmf_subsystem *subsystem; 1142 1143 spdk_nvmf_subsystem_state_change_done cb_fn; 1144 void *cb_arg; 1145 }; 1146 1147 static void 1148 subsystem_update_ns_done(struct spdk_io_channel_iter *i, int status) 1149 { 1150 struct subsystem_update_ns_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1151 1152 if (ctx->cb_fn) { 1153 ctx->cb_fn(ctx->subsystem, ctx->cb_arg, status); 1154 } 1155 free(ctx); 1156 } 1157 1158 static void 1159 subsystem_update_ns_on_pg(struct spdk_io_channel_iter *i) 1160 { 1161 int rc; 1162 struct subsystem_update_ns_ctx *ctx; 1163 struct spdk_nvmf_poll_group *group; 1164 struct spdk_nvmf_subsystem *subsystem; 1165 1166 ctx = spdk_io_channel_iter_get_ctx(i); 1167 group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i)); 1168 subsystem = ctx->subsystem; 1169 1170 rc = nvmf_poll_group_update_subsystem(group, subsystem); 1171 spdk_for_each_channel_continue(i, rc); 1172 } 1173 1174 static int 1175 nvmf_subsystem_update_ns(struct spdk_nvmf_subsystem *subsystem, spdk_channel_for_each_cpl cpl, 1176 void *ctx) 1177 { 1178 spdk_for_each_channel(subsystem->tgt, 1179 subsystem_update_ns_on_pg, 1180 ctx, 1181 cpl); 1182 1183 return 0; 1184 } 1185 1186 static void 1187 nvmf_subsystem_ns_changed(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1188 { 1189 struct spdk_nvmf_ctrlr *ctrlr; 1190 1191 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1192 nvmf_ctrlr_ns_changed(ctrlr, nsid); 1193 } 1194 } 1195 1196 static uint32_t 1197 nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns); 1198 1199 int 1200 spdk_nvmf_subsystem_remove_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1201 { 1202 struct spdk_nvmf_transport *transport; 1203 struct spdk_nvmf_ns *ns; 1204 1205 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1206 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1207 assert(false); 1208 return -1; 1209 } 1210 1211 if (nsid == 0 || nsid > subsystem->max_nsid) { 1212 return -1; 1213 } 1214 1215 ns = subsystem->ns[nsid - 1]; 1216 if (!ns) { 1217 return -1; 1218 } 1219 1220 subsystem->ns[nsid - 1] = NULL; 1221 1222 assert(ns->anagrpid - 1 < subsystem->max_nsid); 1223 assert(subsystem->ana_group[ns->anagrpid - 1] > 0); 1224 1225 subsystem->ana_group[ns->anagrpid - 1]--; 1226 1227 free(ns->ptpl_file); 1228 nvmf_ns_reservation_clear_all_registrants(ns); 1229 spdk_bdev_module_release_bdev(ns->bdev); 1230 spdk_bdev_close(ns->desc); 1231 free(ns); 1232 1233 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 1234 transport = spdk_nvmf_transport_get_next(transport)) { 1235 if (transport->ops->subsystem_remove_ns) { 1236 transport->ops->subsystem_remove_ns(transport, subsystem, nsid); 1237 } 1238 } 1239 1240 nvmf_subsystem_ns_changed(subsystem, nsid); 1241 1242 return 0; 1243 } 1244 1245 struct subsystem_ns_change_ctx { 1246 struct spdk_nvmf_subsystem *subsystem; 1247 spdk_nvmf_subsystem_state_change_done cb_fn; 1248 uint32_t nsid; 1249 }; 1250 1251 static void 1252 _nvmf_ns_hot_remove(struct spdk_nvmf_subsystem *subsystem, 1253 void *cb_arg, int status) 1254 { 1255 struct subsystem_ns_change_ctx *ctx = cb_arg; 1256 int rc; 1257 1258 rc = spdk_nvmf_subsystem_remove_ns(subsystem, ctx->nsid); 1259 if (rc != 0) { 1260 SPDK_ERRLOG("Failed to make changes to NVME-oF subsystem with id: %u\n", subsystem->id); 1261 } 1262 1263 spdk_nvmf_subsystem_resume(subsystem, NULL, NULL); 1264 1265 free(ctx); 1266 } 1267 1268 static void 1269 nvmf_ns_change_msg(void *ns_ctx) 1270 { 1271 struct subsystem_ns_change_ctx *ctx = ns_ctx; 1272 int rc; 1273 1274 rc = spdk_nvmf_subsystem_pause(ctx->subsystem, ctx->nsid, ctx->cb_fn, ctx); 1275 if (rc) { 1276 if (rc == -EBUSY) { 1277 /* Try again, this is not a permanent situation. */ 1278 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ctx); 1279 } else { 1280 free(ctx); 1281 SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n"); 1282 } 1283 } 1284 } 1285 1286 static void 1287 nvmf_ns_hot_remove(void *remove_ctx) 1288 { 1289 struct spdk_nvmf_ns *ns = remove_ctx; 1290 struct subsystem_ns_change_ctx *ns_ctx; 1291 int rc; 1292 1293 /* We have to allocate a new context because this op 1294 * is asynchronous and we could lose the ns in the middle. 1295 */ 1296 ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx)); 1297 if (!ns_ctx) { 1298 SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n"); 1299 return; 1300 } 1301 1302 ns_ctx->subsystem = ns->subsystem; 1303 ns_ctx->nsid = ns->opts.nsid; 1304 ns_ctx->cb_fn = _nvmf_ns_hot_remove; 1305 1306 rc = spdk_nvmf_subsystem_pause(ns->subsystem, ns_ctx->nsid, _nvmf_ns_hot_remove, ns_ctx); 1307 if (rc) { 1308 if (rc == -EBUSY) { 1309 /* Try again, this is not a permanent situation. */ 1310 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx); 1311 } else { 1312 SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n"); 1313 free(ns_ctx); 1314 } 1315 } 1316 } 1317 1318 static void 1319 _nvmf_ns_resize(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status) 1320 { 1321 struct subsystem_ns_change_ctx *ctx = cb_arg; 1322 1323 nvmf_subsystem_ns_changed(subsystem, ctx->nsid); 1324 spdk_nvmf_subsystem_resume(subsystem, NULL, NULL); 1325 1326 free(ctx); 1327 } 1328 1329 static void 1330 nvmf_ns_resize(void *event_ctx) 1331 { 1332 struct spdk_nvmf_ns *ns = event_ctx; 1333 struct subsystem_ns_change_ctx *ns_ctx; 1334 int rc; 1335 1336 /* We have to allocate a new context because this op 1337 * is asynchronous and we could lose the ns in the middle. 1338 */ 1339 ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx)); 1340 if (!ns_ctx) { 1341 SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n"); 1342 return; 1343 } 1344 1345 ns_ctx->subsystem = ns->subsystem; 1346 ns_ctx->nsid = ns->opts.nsid; 1347 ns_ctx->cb_fn = _nvmf_ns_resize; 1348 1349 /* Specify 0 for the nsid here, because we do not need to pause the namespace. 1350 * Namespaces can only be resized bigger, so there is no need to quiesce I/O. 1351 */ 1352 rc = spdk_nvmf_subsystem_pause(ns->subsystem, 0, _nvmf_ns_resize, ns_ctx); 1353 if (rc) { 1354 if (rc == -EBUSY) { 1355 /* Try again, this is not a permanent situation. */ 1356 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx); 1357 } else { 1358 SPDK_ERRLOG("Unable to pause subsystem to process namespace resize!\n"); 1359 free(ns_ctx); 1360 } 1361 } 1362 } 1363 1364 static void 1365 nvmf_ns_event(enum spdk_bdev_event_type type, 1366 struct spdk_bdev *bdev, 1367 void *event_ctx) 1368 { 1369 SPDK_DEBUGLOG(nvmf, "Bdev event: type %d, name %s, subsystem_id %d, ns_id %d\n", 1370 type, 1371 spdk_bdev_get_name(bdev), 1372 ((struct spdk_nvmf_ns *)event_ctx)->subsystem->id, 1373 ((struct spdk_nvmf_ns *)event_ctx)->nsid); 1374 1375 switch (type) { 1376 case SPDK_BDEV_EVENT_REMOVE: 1377 nvmf_ns_hot_remove(event_ctx); 1378 break; 1379 case SPDK_BDEV_EVENT_RESIZE: 1380 nvmf_ns_resize(event_ctx); 1381 break; 1382 default: 1383 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 1384 break; 1385 } 1386 } 1387 1388 void 1389 spdk_nvmf_ns_opts_get_defaults(struct spdk_nvmf_ns_opts *opts, size_t opts_size) 1390 { 1391 if (!opts) { 1392 SPDK_ERRLOG("opts should not be NULL.\n"); 1393 return; 1394 } 1395 1396 if (!opts_size) { 1397 SPDK_ERRLOG("opts_size should not be zero.\n"); 1398 return; 1399 } 1400 1401 memset(opts, 0, opts_size); 1402 opts->opts_size = opts_size; 1403 1404 #define FIELD_OK(field) \ 1405 offsetof(struct spdk_nvmf_ns_opts, field) + sizeof(opts->field) <= opts_size 1406 1407 #define SET_FIELD(field, value) \ 1408 if (FIELD_OK(field)) { \ 1409 opts->field = value; \ 1410 } \ 1411 1412 /* All current fields are set to 0 by default. */ 1413 SET_FIELD(nsid, 0); 1414 if (FIELD_OK(nguid)) { 1415 memset(opts->nguid, 0, sizeof(opts->nguid)); 1416 } 1417 if (FIELD_OK(eui64)) { 1418 memset(opts->eui64, 0, sizeof(opts->eui64)); 1419 } 1420 if (FIELD_OK(uuid)) { 1421 memset(&opts->uuid, 0, sizeof(opts->uuid)); 1422 } 1423 SET_FIELD(anagrpid, 0); 1424 1425 #undef FIELD_OK 1426 #undef SET_FIELD 1427 } 1428 1429 static void 1430 nvmf_ns_opts_copy(struct spdk_nvmf_ns_opts *opts, 1431 const struct spdk_nvmf_ns_opts *user_opts, 1432 size_t opts_size) 1433 { 1434 #define FIELD_OK(field) \ 1435 offsetof(struct spdk_nvmf_ns_opts, field) + sizeof(opts->field) <= user_opts->opts_size 1436 1437 #define SET_FIELD(field) \ 1438 if (FIELD_OK(field)) { \ 1439 opts->field = user_opts->field; \ 1440 } \ 1441 1442 SET_FIELD(nsid); 1443 if (FIELD_OK(nguid)) { 1444 memcpy(opts->nguid, user_opts->nguid, sizeof(opts->nguid)); 1445 } 1446 if (FIELD_OK(eui64)) { 1447 memcpy(opts->eui64, user_opts->eui64, sizeof(opts->eui64)); 1448 } 1449 if (FIELD_OK(uuid)) { 1450 memcpy(&opts->uuid, &user_opts->uuid, sizeof(opts->uuid)); 1451 } 1452 SET_FIELD(anagrpid); 1453 1454 opts->opts_size = user_opts->opts_size; 1455 1456 /* We should not remove this statement, but need to update the assert statement 1457 * if we add a new field, and also add a corresponding SET_FIELD statement. 1458 */ 1459 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_ns_opts) == 64, "Incorrect size"); 1460 1461 #undef FIELD_OK 1462 #undef SET_FIELD 1463 } 1464 1465 /* Dummy bdev module used to to claim bdevs. */ 1466 static struct spdk_bdev_module ns_bdev_module = { 1467 .name = "NVMe-oF Target", 1468 }; 1469 1470 static int 1471 nvmf_ns_load_reservation(const char *file, struct spdk_nvmf_reservation_info *info); 1472 static int 1473 nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info); 1474 1475 uint32_t 1476 spdk_nvmf_subsystem_add_ns_ext(struct spdk_nvmf_subsystem *subsystem, const char *bdev_name, 1477 const struct spdk_nvmf_ns_opts *user_opts, size_t opts_size, 1478 const char *ptpl_file) 1479 { 1480 struct spdk_nvmf_transport *transport; 1481 struct spdk_nvmf_ns_opts opts; 1482 struct spdk_nvmf_ns *ns; 1483 struct spdk_nvmf_reservation_info info = {0}; 1484 int rc; 1485 1486 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1487 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1488 return 0; 1489 } 1490 1491 spdk_nvmf_ns_opts_get_defaults(&opts, sizeof(opts)); 1492 if (user_opts) { 1493 nvmf_ns_opts_copy(&opts, user_opts, opts_size); 1494 } 1495 1496 if (opts.nsid == SPDK_NVME_GLOBAL_NS_TAG) { 1497 SPDK_ERRLOG("Invalid NSID %" PRIu32 "\n", opts.nsid); 1498 return 0; 1499 } 1500 1501 if (opts.nsid == 0) { 1502 /* 1503 * NSID not specified - find a free index. 1504 * 1505 * If no free slots are found, opts.nsid will be subsystem->max_nsid + 1, which will 1506 * expand max_nsid if possible. 1507 */ 1508 for (opts.nsid = 1; opts.nsid <= subsystem->max_nsid; opts.nsid++) { 1509 if (_nvmf_subsystem_get_ns(subsystem, opts.nsid) == NULL) { 1510 break; 1511 } 1512 } 1513 } 1514 1515 if (_nvmf_subsystem_get_ns(subsystem, opts.nsid)) { 1516 SPDK_ERRLOG("Requested NSID %" PRIu32 " already in use\n", opts.nsid); 1517 return 0; 1518 } 1519 1520 if (opts.nsid > subsystem->max_nsid) { 1521 SPDK_ERRLOG("NSID greater than maximum not allowed\n"); 1522 return 0; 1523 } 1524 1525 if (opts.anagrpid == 0) { 1526 opts.anagrpid = opts.nsid; 1527 } 1528 1529 if (opts.anagrpid > subsystem->max_nsid) { 1530 SPDK_ERRLOG("ANAGRPID greater than maximum NSID not allowed\n"); 1531 return 0; 1532 } 1533 1534 ns = calloc(1, sizeof(*ns)); 1535 if (ns == NULL) { 1536 SPDK_ERRLOG("Namespace allocation failed\n"); 1537 return 0; 1538 } 1539 1540 rc = spdk_bdev_open_ext(bdev_name, true, nvmf_ns_event, ns, &ns->desc); 1541 if (rc != 0) { 1542 SPDK_ERRLOG("Subsystem %s: bdev %s cannot be opened, error=%d\n", 1543 subsystem->subnqn, bdev_name, rc); 1544 free(ns); 1545 return 0; 1546 } 1547 1548 ns->bdev = spdk_bdev_desc_get_bdev(ns->desc); 1549 1550 if (spdk_bdev_get_md_size(ns->bdev) != 0 && !spdk_bdev_is_md_interleaved(ns->bdev)) { 1551 SPDK_ERRLOG("Can't attach bdev with separate metadata.\n"); 1552 spdk_bdev_close(ns->desc); 1553 free(ns); 1554 return 0; 1555 } 1556 1557 rc = spdk_bdev_module_claim_bdev(ns->bdev, ns->desc, &ns_bdev_module); 1558 if (rc != 0) { 1559 spdk_bdev_close(ns->desc); 1560 free(ns); 1561 return 0; 1562 } 1563 1564 /* Cache the zcopy capability of the bdev device */ 1565 ns->zcopy = spdk_bdev_io_type_supported(ns->bdev, SPDK_BDEV_IO_TYPE_ZCOPY); 1566 1567 if (spdk_mem_all_zero(&opts.uuid, sizeof(opts.uuid))) { 1568 opts.uuid = *spdk_bdev_get_uuid(ns->bdev); 1569 } 1570 1571 /* if nguid descriptor is supported by bdev module (nvme) then uuid = nguid */ 1572 if (spdk_mem_all_zero(opts.nguid, sizeof(opts.nguid))) { 1573 SPDK_STATIC_ASSERT(sizeof(opts.nguid) == sizeof(opts.uuid), "size mismatch"); 1574 memcpy(opts.nguid, spdk_bdev_get_uuid(ns->bdev), sizeof(opts.nguid)); 1575 } 1576 1577 ns->opts = opts; 1578 ns->subsystem = subsystem; 1579 subsystem->ns[opts.nsid - 1] = ns; 1580 ns->nsid = opts.nsid; 1581 ns->anagrpid = opts.anagrpid; 1582 subsystem->ana_group[ns->anagrpid - 1]++; 1583 TAILQ_INIT(&ns->registrants); 1584 if (ptpl_file) { 1585 rc = nvmf_ns_load_reservation(ptpl_file, &info); 1586 if (!rc) { 1587 rc = nvmf_ns_reservation_restore(ns, &info); 1588 if (rc) { 1589 SPDK_ERRLOG("Subsystem restore reservation failed\n"); 1590 goto err_ns_reservation_restore; 1591 } 1592 } 1593 ns->ptpl_file = strdup(ptpl_file); 1594 if (!ns->ptpl_file) { 1595 SPDK_ERRLOG("Namespace ns->ptpl_file allocation failed\n"); 1596 goto err_strdup; 1597 } 1598 } 1599 1600 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 1601 transport = spdk_nvmf_transport_get_next(transport)) { 1602 if (transport->ops->subsystem_add_ns) { 1603 rc = transport->ops->subsystem_add_ns(transport, subsystem, ns); 1604 if (rc) { 1605 SPDK_ERRLOG("Namespace attachment is not allowed by %s transport\n", transport->ops->name); 1606 goto err_subsystem_add_ns; 1607 } 1608 } 1609 } 1610 1611 SPDK_DEBUGLOG(nvmf, "Subsystem %s: bdev %s assigned nsid %" PRIu32 "\n", 1612 spdk_nvmf_subsystem_get_nqn(subsystem), 1613 bdev_name, 1614 opts.nsid); 1615 1616 nvmf_subsystem_ns_changed(subsystem, opts.nsid); 1617 1618 return opts.nsid; 1619 1620 err_subsystem_add_ns: 1621 free(ns->ptpl_file); 1622 err_strdup: 1623 nvmf_ns_reservation_clear_all_registrants(ns); 1624 err_ns_reservation_restore: 1625 subsystem->ns[opts.nsid - 1] = NULL; 1626 spdk_bdev_module_release_bdev(ns->bdev); 1627 spdk_bdev_close(ns->desc); 1628 free(ns); 1629 return 0; 1630 1631 } 1632 1633 static uint32_t 1634 nvmf_subsystem_get_next_allocated_nsid(struct spdk_nvmf_subsystem *subsystem, 1635 uint32_t prev_nsid) 1636 { 1637 uint32_t nsid; 1638 1639 if (prev_nsid >= subsystem->max_nsid) { 1640 return 0; 1641 } 1642 1643 for (nsid = prev_nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 1644 if (subsystem->ns[nsid - 1]) { 1645 return nsid; 1646 } 1647 } 1648 1649 return 0; 1650 } 1651 1652 struct spdk_nvmf_ns * 1653 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 1654 { 1655 uint32_t first_nsid; 1656 1657 first_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, 0); 1658 return _nvmf_subsystem_get_ns(subsystem, first_nsid); 1659 } 1660 1661 struct spdk_nvmf_ns * 1662 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 1663 struct spdk_nvmf_ns *prev_ns) 1664 { 1665 uint32_t next_nsid; 1666 1667 next_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, prev_ns->opts.nsid); 1668 return _nvmf_subsystem_get_ns(subsystem, next_nsid); 1669 } 1670 1671 struct spdk_nvmf_ns * 1672 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1673 { 1674 return _nvmf_subsystem_get_ns(subsystem, nsid); 1675 } 1676 1677 uint32_t 1678 spdk_nvmf_ns_get_id(const struct spdk_nvmf_ns *ns) 1679 { 1680 return ns->opts.nsid; 1681 } 1682 1683 struct spdk_bdev * 1684 spdk_nvmf_ns_get_bdev(struct spdk_nvmf_ns *ns) 1685 { 1686 return ns->bdev; 1687 } 1688 1689 void 1690 spdk_nvmf_ns_get_opts(const struct spdk_nvmf_ns *ns, struct spdk_nvmf_ns_opts *opts, 1691 size_t opts_size) 1692 { 1693 memset(opts, 0, opts_size); 1694 memcpy(opts, &ns->opts, spdk_min(sizeof(ns->opts), opts_size)); 1695 } 1696 1697 const char * 1698 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 1699 { 1700 return subsystem->sn; 1701 } 1702 1703 int 1704 spdk_nvmf_subsystem_set_sn(struct spdk_nvmf_subsystem *subsystem, const char *sn) 1705 { 1706 size_t len, max_len; 1707 1708 max_len = sizeof(subsystem->sn) - 1; 1709 len = strlen(sn); 1710 if (len > max_len) { 1711 SPDK_DEBUGLOG(nvmf, "Invalid sn \"%s\": length %zu > max %zu\n", 1712 sn, len, max_len); 1713 return -1; 1714 } 1715 1716 if (!nvmf_valid_ascii_string(sn, len)) { 1717 SPDK_DEBUGLOG(nvmf, "Non-ASCII sn\n"); 1718 SPDK_LOGDUMP(nvmf, "sn", sn, len); 1719 return -1; 1720 } 1721 1722 snprintf(subsystem->sn, sizeof(subsystem->sn), "%s", sn); 1723 1724 return 0; 1725 } 1726 1727 const char * 1728 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 1729 { 1730 return subsystem->mn; 1731 } 1732 1733 int 1734 spdk_nvmf_subsystem_set_mn(struct spdk_nvmf_subsystem *subsystem, const char *mn) 1735 { 1736 size_t len, max_len; 1737 1738 if (mn == NULL) { 1739 mn = MODEL_NUMBER_DEFAULT; 1740 } 1741 max_len = sizeof(subsystem->mn) - 1; 1742 len = strlen(mn); 1743 if (len > max_len) { 1744 SPDK_DEBUGLOG(nvmf, "Invalid mn \"%s\": length %zu > max %zu\n", 1745 mn, len, max_len); 1746 return -1; 1747 } 1748 1749 if (!nvmf_valid_ascii_string(mn, len)) { 1750 SPDK_DEBUGLOG(nvmf, "Non-ASCII mn\n"); 1751 SPDK_LOGDUMP(nvmf, "mn", mn, len); 1752 return -1; 1753 } 1754 1755 snprintf(subsystem->mn, sizeof(subsystem->mn), "%s", mn); 1756 1757 return 0; 1758 } 1759 1760 const char * 1761 spdk_nvmf_subsystem_get_nqn(const struct spdk_nvmf_subsystem *subsystem) 1762 { 1763 return subsystem->subnqn; 1764 } 1765 1766 enum spdk_nvmf_subtype spdk_nvmf_subsystem_get_type(struct spdk_nvmf_subsystem *subsystem) 1767 { 1768 return subsystem->subtype; 1769 } 1770 1771 uint32_t 1772 spdk_nvmf_subsystem_get_max_nsid(struct spdk_nvmf_subsystem *subsystem) 1773 { 1774 return subsystem->max_nsid; 1775 } 1776 1777 int 1778 nvmf_subsystem_set_cntlid_range(struct spdk_nvmf_subsystem *subsystem, 1779 uint16_t min_cntlid, uint16_t max_cntlid) 1780 { 1781 if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) { 1782 return -EAGAIN; 1783 } 1784 1785 if (min_cntlid > max_cntlid) { 1786 return -EINVAL; 1787 } 1788 /* The spec reserves cntlid values in the range FFF0h to FFFFh. */ 1789 if (min_cntlid < NVMF_MIN_CNTLID || min_cntlid > NVMF_MAX_CNTLID || 1790 max_cntlid < NVMF_MIN_CNTLID || max_cntlid > NVMF_MAX_CNTLID) { 1791 return -EINVAL; 1792 } 1793 subsystem->min_cntlid = min_cntlid; 1794 subsystem->max_cntlid = max_cntlid; 1795 if (subsystem->next_cntlid < min_cntlid || subsystem->next_cntlid > max_cntlid - 1) { 1796 subsystem->next_cntlid = min_cntlid - 1; 1797 } 1798 1799 return 0; 1800 } 1801 1802 static uint16_t 1803 nvmf_subsystem_gen_cntlid(struct spdk_nvmf_subsystem *subsystem) 1804 { 1805 int count; 1806 1807 /* 1808 * In the worst case, we might have to try all CNTLID values between min_cntlid and max_cntlid 1809 * before we find one that is unused (or find that all values are in use). 1810 */ 1811 for (count = 0; count < subsystem->max_cntlid - subsystem->min_cntlid + 1; count++) { 1812 subsystem->next_cntlid++; 1813 if (subsystem->next_cntlid > subsystem->max_cntlid) { 1814 subsystem->next_cntlid = subsystem->min_cntlid; 1815 } 1816 1817 /* Check if a controller with this cntlid currently exists. */ 1818 if (nvmf_subsystem_get_ctrlr(subsystem, subsystem->next_cntlid) == NULL) { 1819 /* Found unused cntlid */ 1820 return subsystem->next_cntlid; 1821 } 1822 } 1823 1824 /* All valid cntlid values are in use. */ 1825 return 0xFFFF; 1826 } 1827 1828 int 1829 nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr) 1830 { 1831 ctrlr->cntlid = nvmf_subsystem_gen_cntlid(subsystem); 1832 if (ctrlr->cntlid == 0xFFFF) { 1833 /* Unable to get a cntlid */ 1834 SPDK_ERRLOG("Reached max simultaneous ctrlrs\n"); 1835 return -EBUSY; 1836 } 1837 1838 TAILQ_INSERT_TAIL(&subsystem->ctrlrs, ctrlr, link); 1839 1840 return 0; 1841 } 1842 1843 void 1844 nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem, 1845 struct spdk_nvmf_ctrlr *ctrlr) 1846 { 1847 assert(subsystem == ctrlr->subsys); 1848 TAILQ_REMOVE(&subsystem->ctrlrs, ctrlr, link); 1849 } 1850 1851 struct spdk_nvmf_ctrlr * 1852 nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid) 1853 { 1854 struct spdk_nvmf_ctrlr *ctrlr; 1855 1856 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1857 if (ctrlr->cntlid == cntlid) { 1858 return ctrlr; 1859 } 1860 } 1861 1862 return NULL; 1863 } 1864 1865 uint32_t 1866 spdk_nvmf_subsystem_get_max_namespaces(const struct spdk_nvmf_subsystem *subsystem) 1867 { 1868 return subsystem->max_nsid; 1869 } 1870 1871 uint16_t 1872 spdk_nvmf_subsystem_get_min_cntlid(const struct spdk_nvmf_subsystem *subsystem) 1873 { 1874 return subsystem->min_cntlid; 1875 } 1876 1877 uint16_t 1878 spdk_nvmf_subsystem_get_max_cntlid(const struct spdk_nvmf_subsystem *subsystem) 1879 { 1880 return subsystem->max_cntlid; 1881 } 1882 1883 struct _nvmf_ns_registrant { 1884 uint64_t rkey; 1885 char *host_uuid; 1886 }; 1887 1888 struct _nvmf_ns_registrants { 1889 size_t num_regs; 1890 struct _nvmf_ns_registrant reg[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 1891 }; 1892 1893 struct _nvmf_ns_reservation { 1894 bool ptpl_activated; 1895 enum spdk_nvme_reservation_type rtype; 1896 uint64_t crkey; 1897 char *bdev_uuid; 1898 char *holder_uuid; 1899 struct _nvmf_ns_registrants regs; 1900 }; 1901 1902 static const struct spdk_json_object_decoder nvmf_ns_pr_reg_decoders[] = { 1903 {"rkey", offsetof(struct _nvmf_ns_registrant, rkey), spdk_json_decode_uint64}, 1904 {"host_uuid", offsetof(struct _nvmf_ns_registrant, host_uuid), spdk_json_decode_string}, 1905 }; 1906 1907 static int 1908 nvmf_decode_ns_pr_reg(const struct spdk_json_val *val, void *out) 1909 { 1910 struct _nvmf_ns_registrant *reg = out; 1911 1912 return spdk_json_decode_object(val, nvmf_ns_pr_reg_decoders, 1913 SPDK_COUNTOF(nvmf_ns_pr_reg_decoders), reg); 1914 } 1915 1916 static int 1917 nvmf_decode_ns_pr_regs(const struct spdk_json_val *val, void *out) 1918 { 1919 struct _nvmf_ns_registrants *regs = out; 1920 1921 return spdk_json_decode_array(val, nvmf_decode_ns_pr_reg, regs->reg, 1922 SPDK_NVMF_MAX_NUM_REGISTRANTS, ®s->num_regs, 1923 sizeof(struct _nvmf_ns_registrant)); 1924 } 1925 1926 static const struct spdk_json_object_decoder nvmf_ns_pr_decoders[] = { 1927 {"ptpl", offsetof(struct _nvmf_ns_reservation, ptpl_activated), spdk_json_decode_bool, true}, 1928 {"rtype", offsetof(struct _nvmf_ns_reservation, rtype), spdk_json_decode_uint32, true}, 1929 {"crkey", offsetof(struct _nvmf_ns_reservation, crkey), spdk_json_decode_uint64, true}, 1930 {"bdev_uuid", offsetof(struct _nvmf_ns_reservation, bdev_uuid), spdk_json_decode_string}, 1931 {"holder_uuid", offsetof(struct _nvmf_ns_reservation, holder_uuid), spdk_json_decode_string, true}, 1932 {"registrants", offsetof(struct _nvmf_ns_reservation, regs), nvmf_decode_ns_pr_regs}, 1933 }; 1934 1935 static int 1936 nvmf_ns_load_reservation(const char *file, struct spdk_nvmf_reservation_info *info) 1937 { 1938 FILE *fd; 1939 size_t json_size; 1940 ssize_t values_cnt, rc; 1941 void *json = NULL, *end; 1942 struct spdk_json_val *values = NULL; 1943 struct _nvmf_ns_reservation res = {}; 1944 uint32_t i; 1945 1946 fd = fopen(file, "r"); 1947 /* It's not an error if the file does not exist */ 1948 if (!fd) { 1949 SPDK_NOTICELOG("File %s does not exist\n", file); 1950 return -ENOENT; 1951 } 1952 1953 /* Load all persist file contents into a local buffer */ 1954 json = spdk_posix_file_load(fd, &json_size); 1955 fclose(fd); 1956 if (!json) { 1957 SPDK_ERRLOG("Load persit file %s failed\n", file); 1958 return -ENOMEM; 1959 } 1960 1961 rc = spdk_json_parse(json, json_size, NULL, 0, &end, 0); 1962 if (rc < 0) { 1963 SPDK_NOTICELOG("Parsing JSON configuration failed (%zd)\n", rc); 1964 goto exit; 1965 } 1966 1967 values_cnt = rc; 1968 values = calloc(values_cnt, sizeof(struct spdk_json_val)); 1969 if (values == NULL) { 1970 goto exit; 1971 } 1972 1973 rc = spdk_json_parse(json, json_size, values, values_cnt, &end, 0); 1974 if (rc != values_cnt) { 1975 SPDK_ERRLOG("Parsing JSON configuration failed (%zd)\n", rc); 1976 goto exit; 1977 } 1978 1979 /* Decode json */ 1980 if (spdk_json_decode_object(values, nvmf_ns_pr_decoders, 1981 SPDK_COUNTOF(nvmf_ns_pr_decoders), 1982 &res)) { 1983 SPDK_ERRLOG("Invalid objects in the persist file %s\n", file); 1984 rc = -EINVAL; 1985 goto exit; 1986 } 1987 1988 if (res.regs.num_regs > SPDK_NVMF_MAX_NUM_REGISTRANTS) { 1989 SPDK_ERRLOG("Can only support up to %u registrants\n", SPDK_NVMF_MAX_NUM_REGISTRANTS); 1990 rc = -ERANGE; 1991 goto exit; 1992 } 1993 1994 rc = 0; 1995 info->ptpl_activated = res.ptpl_activated; 1996 info->rtype = res.rtype; 1997 info->crkey = res.crkey; 1998 snprintf(info->bdev_uuid, sizeof(info->bdev_uuid), "%s", res.bdev_uuid); 1999 snprintf(info->holder_uuid, sizeof(info->holder_uuid), "%s", res.holder_uuid); 2000 info->num_regs = res.regs.num_regs; 2001 for (i = 0; i < res.regs.num_regs; i++) { 2002 info->registrants[i].rkey = res.regs.reg[i].rkey; 2003 snprintf(info->registrants[i].host_uuid, sizeof(info->registrants[i].host_uuid), "%s", 2004 res.regs.reg[i].host_uuid); 2005 } 2006 2007 exit: 2008 free(json); 2009 free(values); 2010 free(res.bdev_uuid); 2011 free(res.holder_uuid); 2012 for (i = 0; i < res.regs.num_regs; i++) { 2013 free(res.regs.reg[i].host_uuid); 2014 } 2015 2016 return rc; 2017 } 2018 2019 static bool 2020 nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns); 2021 2022 static int 2023 nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info) 2024 { 2025 uint32_t i; 2026 struct spdk_nvmf_registrant *reg, *holder = NULL; 2027 struct spdk_uuid bdev_uuid, holder_uuid; 2028 2029 SPDK_DEBUGLOG(nvmf, "NSID %u, PTPL %u, Number of registrants %u\n", 2030 ns->nsid, info->ptpl_activated, info->num_regs); 2031 2032 /* it's not an error */ 2033 if (!info->ptpl_activated || !info->num_regs) { 2034 return 0; 2035 } 2036 2037 spdk_uuid_parse(&bdev_uuid, info->bdev_uuid); 2038 if (spdk_uuid_compare(&bdev_uuid, spdk_bdev_get_uuid(ns->bdev))) { 2039 SPDK_ERRLOG("Existing bdev UUID is not same with configuration file\n"); 2040 return -EINVAL; 2041 } 2042 2043 ns->crkey = info->crkey; 2044 ns->rtype = info->rtype; 2045 ns->ptpl_activated = info->ptpl_activated; 2046 spdk_uuid_parse(&holder_uuid, info->holder_uuid); 2047 2048 SPDK_DEBUGLOG(nvmf, "Bdev UUID %s\n", info->bdev_uuid); 2049 if (info->rtype) { 2050 SPDK_DEBUGLOG(nvmf, "Holder UUID %s, RTYPE %u, RKEY 0x%"PRIx64"\n", 2051 info->holder_uuid, info->rtype, info->crkey); 2052 } 2053 2054 for (i = 0; i < info->num_regs; i++) { 2055 reg = calloc(1, sizeof(*reg)); 2056 if (!reg) { 2057 return -ENOMEM; 2058 } 2059 spdk_uuid_parse(®->hostid, info->registrants[i].host_uuid); 2060 reg->rkey = info->registrants[i].rkey; 2061 TAILQ_INSERT_TAIL(&ns->registrants, reg, link); 2062 if (!spdk_uuid_compare(&holder_uuid, ®->hostid)) { 2063 holder = reg; 2064 } 2065 SPDK_DEBUGLOG(nvmf, "Registrant RKEY 0x%"PRIx64", Host UUID %s\n", 2066 info->registrants[i].rkey, info->registrants[i].host_uuid); 2067 } 2068 2069 if (nvmf_ns_reservation_all_registrants_type(ns)) { 2070 ns->holder = TAILQ_FIRST(&ns->registrants); 2071 } else { 2072 ns->holder = holder; 2073 } 2074 2075 return 0; 2076 } 2077 2078 static int 2079 nvmf_ns_json_write_cb(void *cb_ctx, const void *data, size_t size) 2080 { 2081 char *file = cb_ctx; 2082 size_t rc; 2083 FILE *fd; 2084 2085 fd = fopen(file, "w"); 2086 if (!fd) { 2087 SPDK_ERRLOG("Can't open file %s for write\n", file); 2088 return -ENOENT; 2089 } 2090 rc = fwrite(data, 1, size, fd); 2091 fclose(fd); 2092 2093 return rc == size ? 0 : -1; 2094 } 2095 2096 static int 2097 nvmf_ns_reservation_update(const char *file, struct spdk_nvmf_reservation_info *info) 2098 { 2099 struct spdk_json_write_ctx *w; 2100 uint32_t i; 2101 int rc = 0; 2102 2103 w = spdk_json_write_begin(nvmf_ns_json_write_cb, (void *)file, 0); 2104 if (w == NULL) { 2105 return -ENOMEM; 2106 } 2107 /* clear the configuration file */ 2108 if (!info->ptpl_activated) { 2109 goto exit; 2110 } 2111 2112 spdk_json_write_object_begin(w); 2113 spdk_json_write_named_bool(w, "ptpl", info->ptpl_activated); 2114 spdk_json_write_named_uint32(w, "rtype", info->rtype); 2115 spdk_json_write_named_uint64(w, "crkey", info->crkey); 2116 spdk_json_write_named_string(w, "bdev_uuid", info->bdev_uuid); 2117 spdk_json_write_named_string(w, "holder_uuid", info->holder_uuid); 2118 2119 spdk_json_write_named_array_begin(w, "registrants"); 2120 for (i = 0; i < info->num_regs; i++) { 2121 spdk_json_write_object_begin(w); 2122 spdk_json_write_named_uint64(w, "rkey", info->registrants[i].rkey); 2123 spdk_json_write_named_string(w, "host_uuid", info->registrants[i].host_uuid); 2124 spdk_json_write_object_end(w); 2125 } 2126 spdk_json_write_array_end(w); 2127 spdk_json_write_object_end(w); 2128 2129 exit: 2130 rc = spdk_json_write_end(w); 2131 return rc; 2132 } 2133 2134 static int 2135 nvmf_ns_update_reservation_info(struct spdk_nvmf_ns *ns) 2136 { 2137 struct spdk_nvmf_reservation_info info; 2138 struct spdk_nvmf_registrant *reg, *tmp; 2139 uint32_t i = 0; 2140 2141 assert(ns != NULL); 2142 2143 if (!ns->bdev || !ns->ptpl_file) { 2144 return 0; 2145 } 2146 2147 memset(&info, 0, sizeof(info)); 2148 spdk_uuid_fmt_lower(info.bdev_uuid, sizeof(info.bdev_uuid), spdk_bdev_get_uuid(ns->bdev)); 2149 2150 if (ns->rtype) { 2151 info.rtype = ns->rtype; 2152 info.crkey = ns->crkey; 2153 if (!nvmf_ns_reservation_all_registrants_type(ns)) { 2154 assert(ns->holder != NULL); 2155 spdk_uuid_fmt_lower(info.holder_uuid, sizeof(info.holder_uuid), &ns->holder->hostid); 2156 } 2157 } 2158 2159 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2160 spdk_uuid_fmt_lower(info.registrants[i].host_uuid, sizeof(info.registrants[i].host_uuid), 2161 ®->hostid); 2162 info.registrants[i++].rkey = reg->rkey; 2163 } 2164 2165 info.num_regs = i; 2166 info.ptpl_activated = ns->ptpl_activated; 2167 2168 return nvmf_ns_reservation_update(ns->ptpl_file, &info); 2169 } 2170 2171 static struct spdk_nvmf_registrant * 2172 nvmf_ns_reservation_get_registrant(struct spdk_nvmf_ns *ns, 2173 struct spdk_uuid *uuid) 2174 { 2175 struct spdk_nvmf_registrant *reg, *tmp; 2176 2177 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2178 if (!spdk_uuid_compare(®->hostid, uuid)) { 2179 return reg; 2180 } 2181 } 2182 2183 return NULL; 2184 } 2185 2186 /* Generate reservation notice log to registered HostID controllers */ 2187 static void 2188 nvmf_subsystem_gen_ctrlr_notification(struct spdk_nvmf_subsystem *subsystem, 2189 struct spdk_nvmf_ns *ns, 2190 struct spdk_uuid *hostid_list, 2191 uint32_t num_hostid, 2192 enum spdk_nvme_reservation_notification_log_page_type type) 2193 { 2194 struct spdk_nvmf_ctrlr *ctrlr; 2195 uint32_t i; 2196 2197 for (i = 0; i < num_hostid; i++) { 2198 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 2199 if (!spdk_uuid_compare(&ctrlr->hostid, &hostid_list[i])) { 2200 nvmf_ctrlr_reservation_notice_log(ctrlr, ns, type); 2201 } 2202 } 2203 } 2204 } 2205 2206 /* Get all registrants' hostid other than the controller who issued the command */ 2207 static uint32_t 2208 nvmf_ns_reservation_get_all_other_hostid(struct spdk_nvmf_ns *ns, 2209 struct spdk_uuid *hostid_list, 2210 uint32_t max_num_hostid, 2211 struct spdk_uuid *current_hostid) 2212 { 2213 struct spdk_nvmf_registrant *reg, *tmp; 2214 uint32_t num_hostid = 0; 2215 2216 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2217 if (spdk_uuid_compare(®->hostid, current_hostid)) { 2218 if (num_hostid == max_num_hostid) { 2219 assert(false); 2220 return max_num_hostid; 2221 } 2222 hostid_list[num_hostid++] = reg->hostid; 2223 } 2224 } 2225 2226 return num_hostid; 2227 } 2228 2229 /* Calculate the unregistered HostID list according to list 2230 * prior to execute preempt command and list after executing 2231 * preempt command. 2232 */ 2233 static uint32_t 2234 nvmf_ns_reservation_get_unregistered_hostid(struct spdk_uuid *old_hostid_list, 2235 uint32_t old_num_hostid, 2236 struct spdk_uuid *remaining_hostid_list, 2237 uint32_t remaining_num_hostid) 2238 { 2239 struct spdk_uuid temp_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2240 uint32_t i, j, num_hostid = 0; 2241 bool found; 2242 2243 if (!remaining_num_hostid) { 2244 return old_num_hostid; 2245 } 2246 2247 for (i = 0; i < old_num_hostid; i++) { 2248 found = false; 2249 for (j = 0; j < remaining_num_hostid; j++) { 2250 if (!spdk_uuid_compare(&old_hostid_list[i], &remaining_hostid_list[j])) { 2251 found = true; 2252 break; 2253 } 2254 } 2255 if (!found) { 2256 spdk_uuid_copy(&temp_hostid_list[num_hostid++], &old_hostid_list[i]); 2257 } 2258 } 2259 2260 if (num_hostid) { 2261 memcpy(old_hostid_list, temp_hostid_list, sizeof(struct spdk_uuid) * num_hostid); 2262 } 2263 2264 return num_hostid; 2265 } 2266 2267 /* current reservation type is all registrants or not */ 2268 static bool 2269 nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns) 2270 { 2271 return (ns->rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS || 2272 ns->rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 2273 } 2274 2275 /* current registrant is reservation holder or not */ 2276 static bool 2277 nvmf_ns_reservation_registrant_is_holder(struct spdk_nvmf_ns *ns, 2278 struct spdk_nvmf_registrant *reg) 2279 { 2280 if (!reg) { 2281 return false; 2282 } 2283 2284 if (nvmf_ns_reservation_all_registrants_type(ns)) { 2285 return true; 2286 } 2287 2288 return (ns->holder == reg); 2289 } 2290 2291 static int 2292 nvmf_ns_reservation_add_registrant(struct spdk_nvmf_ns *ns, 2293 struct spdk_nvmf_ctrlr *ctrlr, 2294 uint64_t nrkey) 2295 { 2296 struct spdk_nvmf_registrant *reg; 2297 2298 reg = calloc(1, sizeof(*reg)); 2299 if (!reg) { 2300 return -ENOMEM; 2301 } 2302 2303 reg->rkey = nrkey; 2304 /* set hostid for the registrant */ 2305 spdk_uuid_copy(®->hostid, &ctrlr->hostid); 2306 TAILQ_INSERT_TAIL(&ns->registrants, reg, link); 2307 ns->gen++; 2308 2309 return 0; 2310 } 2311 2312 static void 2313 nvmf_ns_reservation_release_reservation(struct spdk_nvmf_ns *ns) 2314 { 2315 ns->rtype = 0; 2316 ns->crkey = 0; 2317 ns->holder = NULL; 2318 } 2319 2320 /* release the reservation if the last registrant was removed */ 2321 static void 2322 nvmf_ns_reservation_check_release_on_remove_registrant(struct spdk_nvmf_ns *ns, 2323 struct spdk_nvmf_registrant *reg) 2324 { 2325 struct spdk_nvmf_registrant *next_reg; 2326 2327 /* no reservation holder */ 2328 if (!ns->holder) { 2329 assert(ns->rtype == 0); 2330 return; 2331 } 2332 2333 next_reg = TAILQ_FIRST(&ns->registrants); 2334 if (next_reg && nvmf_ns_reservation_all_registrants_type(ns)) { 2335 /* the next valid registrant is the new holder now */ 2336 ns->holder = next_reg; 2337 } else if (nvmf_ns_reservation_registrant_is_holder(ns, reg)) { 2338 /* release the reservation */ 2339 nvmf_ns_reservation_release_reservation(ns); 2340 } 2341 } 2342 2343 static void 2344 nvmf_ns_reservation_remove_registrant(struct spdk_nvmf_ns *ns, 2345 struct spdk_nvmf_registrant *reg) 2346 { 2347 TAILQ_REMOVE(&ns->registrants, reg, link); 2348 nvmf_ns_reservation_check_release_on_remove_registrant(ns, reg); 2349 free(reg); 2350 ns->gen++; 2351 return; 2352 } 2353 2354 static uint32_t 2355 nvmf_ns_reservation_remove_registrants_by_key(struct spdk_nvmf_ns *ns, 2356 uint64_t rkey) 2357 { 2358 struct spdk_nvmf_registrant *reg, *tmp; 2359 uint32_t count = 0; 2360 2361 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2362 if (reg->rkey == rkey) { 2363 nvmf_ns_reservation_remove_registrant(ns, reg); 2364 count++; 2365 } 2366 } 2367 return count; 2368 } 2369 2370 static uint32_t 2371 nvmf_ns_reservation_remove_all_other_registrants(struct spdk_nvmf_ns *ns, 2372 struct spdk_nvmf_registrant *reg) 2373 { 2374 struct spdk_nvmf_registrant *reg_tmp, *reg_tmp2; 2375 uint32_t count = 0; 2376 2377 TAILQ_FOREACH_SAFE(reg_tmp, &ns->registrants, link, reg_tmp2) { 2378 if (reg_tmp != reg) { 2379 nvmf_ns_reservation_remove_registrant(ns, reg_tmp); 2380 count++; 2381 } 2382 } 2383 return count; 2384 } 2385 2386 static uint32_t 2387 nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns) 2388 { 2389 struct spdk_nvmf_registrant *reg, *reg_tmp; 2390 uint32_t count = 0; 2391 2392 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, reg_tmp) { 2393 nvmf_ns_reservation_remove_registrant(ns, reg); 2394 count++; 2395 } 2396 return count; 2397 } 2398 2399 static void 2400 nvmf_ns_reservation_acquire_reservation(struct spdk_nvmf_ns *ns, uint64_t rkey, 2401 enum spdk_nvme_reservation_type rtype, 2402 struct spdk_nvmf_registrant *holder) 2403 { 2404 ns->rtype = rtype; 2405 ns->crkey = rkey; 2406 assert(ns->holder == NULL); 2407 ns->holder = holder; 2408 } 2409 2410 static bool 2411 nvmf_ns_reservation_register(struct spdk_nvmf_ns *ns, 2412 struct spdk_nvmf_ctrlr *ctrlr, 2413 struct spdk_nvmf_request *req) 2414 { 2415 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2416 uint8_t rrega, iekey, cptpl, rtype; 2417 struct spdk_nvme_reservation_register_data key; 2418 struct spdk_nvmf_registrant *reg; 2419 uint8_t status = SPDK_NVME_SC_SUCCESS; 2420 bool update_sgroup = false; 2421 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2422 uint32_t num_hostid = 0; 2423 int rc; 2424 2425 rrega = cmd->cdw10_bits.resv_register.rrega; 2426 iekey = cmd->cdw10_bits.resv_register.iekey; 2427 cptpl = cmd->cdw10_bits.resv_register.cptpl; 2428 2429 if (req->data && req->length >= sizeof(key)) { 2430 memcpy(&key, req->data, sizeof(key)); 2431 } else { 2432 SPDK_ERRLOG("No key provided. Failing request.\n"); 2433 status = SPDK_NVME_SC_INVALID_FIELD; 2434 goto exit; 2435 } 2436 2437 SPDK_DEBUGLOG(nvmf, "REGISTER: RREGA %u, IEKEY %u, CPTPL %u, " 2438 "NRKEY 0x%"PRIx64", NRKEY 0x%"PRIx64"\n", 2439 rrega, iekey, cptpl, key.crkey, key.nrkey); 2440 2441 if (cptpl == SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON) { 2442 /* Ture to OFF state, and need to be updated in the configuration file */ 2443 if (ns->ptpl_activated) { 2444 ns->ptpl_activated = 0; 2445 update_sgroup = true; 2446 } 2447 } else if (cptpl == SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS) { 2448 if (ns->ptpl_file == NULL) { 2449 status = SPDK_NVME_SC_INVALID_FIELD; 2450 goto exit; 2451 } else if (ns->ptpl_activated == 0) { 2452 ns->ptpl_activated = 1; 2453 update_sgroup = true; 2454 } 2455 } 2456 2457 /* current Host Identifier has registrant or not */ 2458 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 2459 2460 switch (rrega) { 2461 case SPDK_NVME_RESERVE_REGISTER_KEY: 2462 if (!reg) { 2463 /* register new controller */ 2464 if (key.nrkey == 0) { 2465 SPDK_ERRLOG("Can't register zeroed new key\n"); 2466 status = SPDK_NVME_SC_INVALID_FIELD; 2467 goto exit; 2468 } 2469 rc = nvmf_ns_reservation_add_registrant(ns, ctrlr, key.nrkey); 2470 if (rc < 0) { 2471 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2472 goto exit; 2473 } 2474 update_sgroup = true; 2475 } else { 2476 /* register with same key is not an error */ 2477 if (reg->rkey != key.nrkey) { 2478 SPDK_ERRLOG("The same host already register a " 2479 "key with 0x%"PRIx64"\n", 2480 reg->rkey); 2481 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2482 goto exit; 2483 } 2484 } 2485 break; 2486 case SPDK_NVME_RESERVE_UNREGISTER_KEY: 2487 if (!reg || (!iekey && reg->rkey != key.crkey)) { 2488 SPDK_ERRLOG("No registrant or current key doesn't match " 2489 "with existing registrant key\n"); 2490 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2491 goto exit; 2492 } 2493 2494 rtype = ns->rtype; 2495 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 2496 SPDK_NVMF_MAX_NUM_REGISTRANTS, 2497 &ctrlr->hostid); 2498 2499 nvmf_ns_reservation_remove_registrant(ns, reg); 2500 2501 if (!ns->rtype && num_hostid && (rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY || 2502 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY)) { 2503 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2504 hostid_list, 2505 num_hostid, 2506 SPDK_NVME_RESERVATION_RELEASED); 2507 } 2508 update_sgroup = true; 2509 break; 2510 case SPDK_NVME_RESERVE_REPLACE_KEY: 2511 if (key.nrkey == 0) { 2512 SPDK_ERRLOG("Can't register zeroed new key\n"); 2513 status = SPDK_NVME_SC_INVALID_FIELD; 2514 goto exit; 2515 } 2516 /* Registrant exists */ 2517 if (reg) { 2518 if (!iekey && reg->rkey != key.crkey) { 2519 SPDK_ERRLOG("Current key doesn't match " 2520 "existing registrant key\n"); 2521 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2522 goto exit; 2523 } 2524 if (reg->rkey == key.nrkey) { 2525 goto exit; 2526 } 2527 reg->rkey = key.nrkey; 2528 } else if (iekey) { /* No registrant but IEKEY is set */ 2529 /* new registrant */ 2530 rc = nvmf_ns_reservation_add_registrant(ns, ctrlr, key.nrkey); 2531 if (rc < 0) { 2532 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2533 goto exit; 2534 } 2535 } else { /* No registrant */ 2536 SPDK_ERRLOG("No registrant\n"); 2537 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2538 goto exit; 2539 2540 } 2541 update_sgroup = true; 2542 break; 2543 default: 2544 status = SPDK_NVME_SC_INVALID_FIELD; 2545 goto exit; 2546 } 2547 2548 exit: 2549 if (update_sgroup) { 2550 rc = nvmf_ns_update_reservation_info(ns); 2551 if (rc != 0) { 2552 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2553 } 2554 } 2555 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2556 req->rsp->nvme_cpl.status.sc = status; 2557 return update_sgroup; 2558 } 2559 2560 static bool 2561 nvmf_ns_reservation_acquire(struct spdk_nvmf_ns *ns, 2562 struct spdk_nvmf_ctrlr *ctrlr, 2563 struct spdk_nvmf_request *req) 2564 { 2565 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2566 uint8_t racqa, iekey, rtype; 2567 struct spdk_nvme_reservation_acquire_data key; 2568 struct spdk_nvmf_registrant *reg; 2569 bool all_regs = false; 2570 uint32_t count = 0; 2571 bool update_sgroup = true; 2572 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2573 uint32_t num_hostid = 0; 2574 struct spdk_uuid new_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2575 uint32_t new_num_hostid = 0; 2576 bool reservation_released = false; 2577 uint8_t status = SPDK_NVME_SC_SUCCESS; 2578 2579 racqa = cmd->cdw10_bits.resv_acquire.racqa; 2580 iekey = cmd->cdw10_bits.resv_acquire.iekey; 2581 rtype = cmd->cdw10_bits.resv_acquire.rtype; 2582 2583 if (req->data && req->length >= sizeof(key)) { 2584 memcpy(&key, req->data, sizeof(key)); 2585 } else { 2586 SPDK_ERRLOG("No key provided. Failing request.\n"); 2587 status = SPDK_NVME_SC_INVALID_FIELD; 2588 goto exit; 2589 } 2590 2591 SPDK_DEBUGLOG(nvmf, "ACQUIRE: RACQA %u, IEKEY %u, RTYPE %u, " 2592 "NRKEY 0x%"PRIx64", PRKEY 0x%"PRIx64"\n", 2593 racqa, iekey, rtype, key.crkey, key.prkey); 2594 2595 if (iekey || rtype > SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) { 2596 SPDK_ERRLOG("Ignore existing key field set to 1\n"); 2597 status = SPDK_NVME_SC_INVALID_FIELD; 2598 update_sgroup = false; 2599 goto exit; 2600 } 2601 2602 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 2603 /* must be registrant and CRKEY must match */ 2604 if (!reg || reg->rkey != key.crkey) { 2605 SPDK_ERRLOG("No registrant or current key doesn't match " 2606 "with existing registrant key\n"); 2607 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2608 update_sgroup = false; 2609 goto exit; 2610 } 2611 2612 all_regs = nvmf_ns_reservation_all_registrants_type(ns); 2613 2614 switch (racqa) { 2615 case SPDK_NVME_RESERVE_ACQUIRE: 2616 /* it's not an error for the holder to acquire same reservation type again */ 2617 if (nvmf_ns_reservation_registrant_is_holder(ns, reg) && ns->rtype == rtype) { 2618 /* do nothing */ 2619 update_sgroup = false; 2620 } else if (ns->holder == NULL) { 2621 /* fisrt time to acquire the reservation */ 2622 nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg); 2623 } else { 2624 SPDK_ERRLOG("Invalid rtype or current registrant is not holder\n"); 2625 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2626 update_sgroup = false; 2627 goto exit; 2628 } 2629 break; 2630 case SPDK_NVME_RESERVE_PREEMPT: 2631 /* no reservation holder */ 2632 if (!ns->holder) { 2633 /* unregister with PRKEY */ 2634 nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 2635 break; 2636 } 2637 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 2638 SPDK_NVMF_MAX_NUM_REGISTRANTS, 2639 &ctrlr->hostid); 2640 2641 /* only 1 reservation holder and reservation key is valid */ 2642 if (!all_regs) { 2643 /* preempt itself */ 2644 if (nvmf_ns_reservation_registrant_is_holder(ns, reg) && 2645 ns->crkey == key.prkey) { 2646 ns->rtype = rtype; 2647 reservation_released = true; 2648 break; 2649 } 2650 2651 if (ns->crkey == key.prkey) { 2652 nvmf_ns_reservation_remove_registrant(ns, ns->holder); 2653 nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg); 2654 reservation_released = true; 2655 } else if (key.prkey != 0) { 2656 nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 2657 } else { 2658 /* PRKEY is zero */ 2659 SPDK_ERRLOG("Current PRKEY is zero\n"); 2660 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2661 update_sgroup = false; 2662 goto exit; 2663 } 2664 } else { 2665 /* release all other registrants except for the current one */ 2666 if (key.prkey == 0) { 2667 nvmf_ns_reservation_remove_all_other_registrants(ns, reg); 2668 assert(ns->holder == reg); 2669 } else { 2670 count = nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 2671 if (count == 0) { 2672 SPDK_ERRLOG("PRKEY doesn't match any registrant\n"); 2673 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2674 update_sgroup = false; 2675 goto exit; 2676 } 2677 } 2678 } 2679 break; 2680 default: 2681 status = SPDK_NVME_SC_INVALID_FIELD; 2682 update_sgroup = false; 2683 break; 2684 } 2685 2686 exit: 2687 if (update_sgroup && racqa == SPDK_NVME_RESERVE_PREEMPT) { 2688 new_num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, new_hostid_list, 2689 SPDK_NVMF_MAX_NUM_REGISTRANTS, 2690 &ctrlr->hostid); 2691 /* Preempt notification occurs on the unregistered controllers 2692 * other than the controller who issued the command. 2693 */ 2694 num_hostid = nvmf_ns_reservation_get_unregistered_hostid(hostid_list, 2695 num_hostid, 2696 new_hostid_list, 2697 new_num_hostid); 2698 if (num_hostid) { 2699 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2700 hostid_list, 2701 num_hostid, 2702 SPDK_NVME_REGISTRATION_PREEMPTED); 2703 2704 } 2705 /* Reservation released notification occurs on the 2706 * controllers which are the remaining registrants other than 2707 * the controller who issued the command. 2708 */ 2709 if (reservation_released && new_num_hostid) { 2710 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2711 new_hostid_list, 2712 new_num_hostid, 2713 SPDK_NVME_RESERVATION_RELEASED); 2714 2715 } 2716 } 2717 if (update_sgroup && ns->ptpl_activated) { 2718 if (nvmf_ns_update_reservation_info(ns)) { 2719 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2720 } 2721 } 2722 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2723 req->rsp->nvme_cpl.status.sc = status; 2724 return update_sgroup; 2725 } 2726 2727 static bool 2728 nvmf_ns_reservation_release(struct spdk_nvmf_ns *ns, 2729 struct spdk_nvmf_ctrlr *ctrlr, 2730 struct spdk_nvmf_request *req) 2731 { 2732 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2733 uint8_t rrela, iekey, rtype; 2734 struct spdk_nvmf_registrant *reg; 2735 uint64_t crkey; 2736 uint8_t status = SPDK_NVME_SC_SUCCESS; 2737 bool update_sgroup = true; 2738 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2739 uint32_t num_hostid = 0; 2740 2741 rrela = cmd->cdw10_bits.resv_release.rrela; 2742 iekey = cmd->cdw10_bits.resv_release.iekey; 2743 rtype = cmd->cdw10_bits.resv_release.rtype; 2744 2745 if (req->data && req->length >= sizeof(crkey)) { 2746 memcpy(&crkey, req->data, sizeof(crkey)); 2747 } else { 2748 SPDK_ERRLOG("No key provided. Failing request.\n"); 2749 status = SPDK_NVME_SC_INVALID_FIELD; 2750 goto exit; 2751 } 2752 2753 SPDK_DEBUGLOG(nvmf, "RELEASE: RRELA %u, IEKEY %u, RTYPE %u, " 2754 "CRKEY 0x%"PRIx64"\n", rrela, iekey, rtype, crkey); 2755 2756 if (iekey) { 2757 SPDK_ERRLOG("Ignore existing key field set to 1\n"); 2758 status = SPDK_NVME_SC_INVALID_FIELD; 2759 update_sgroup = false; 2760 goto exit; 2761 } 2762 2763 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 2764 if (!reg || reg->rkey != crkey) { 2765 SPDK_ERRLOG("No registrant or current key doesn't match " 2766 "with existing registrant key\n"); 2767 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2768 update_sgroup = false; 2769 goto exit; 2770 } 2771 2772 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 2773 SPDK_NVMF_MAX_NUM_REGISTRANTS, 2774 &ctrlr->hostid); 2775 2776 switch (rrela) { 2777 case SPDK_NVME_RESERVE_RELEASE: 2778 if (!ns->holder) { 2779 SPDK_DEBUGLOG(nvmf, "RELEASE: no holder\n"); 2780 update_sgroup = false; 2781 goto exit; 2782 } 2783 if (ns->rtype != rtype) { 2784 SPDK_ERRLOG("Type doesn't match\n"); 2785 status = SPDK_NVME_SC_INVALID_FIELD; 2786 update_sgroup = false; 2787 goto exit; 2788 } 2789 if (!nvmf_ns_reservation_registrant_is_holder(ns, reg)) { 2790 /* not the reservation holder, this isn't an error */ 2791 update_sgroup = false; 2792 goto exit; 2793 } 2794 2795 rtype = ns->rtype; 2796 nvmf_ns_reservation_release_reservation(ns); 2797 2798 if (num_hostid && rtype != SPDK_NVME_RESERVE_WRITE_EXCLUSIVE && 2799 rtype != SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) { 2800 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2801 hostid_list, 2802 num_hostid, 2803 SPDK_NVME_RESERVATION_RELEASED); 2804 } 2805 break; 2806 case SPDK_NVME_RESERVE_CLEAR: 2807 nvmf_ns_reservation_clear_all_registrants(ns); 2808 if (num_hostid) { 2809 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2810 hostid_list, 2811 num_hostid, 2812 SPDK_NVME_RESERVATION_PREEMPTED); 2813 } 2814 break; 2815 default: 2816 status = SPDK_NVME_SC_INVALID_FIELD; 2817 update_sgroup = false; 2818 goto exit; 2819 } 2820 2821 exit: 2822 if (update_sgroup && ns->ptpl_activated) { 2823 if (nvmf_ns_update_reservation_info(ns)) { 2824 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2825 } 2826 } 2827 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2828 req->rsp->nvme_cpl.status.sc = status; 2829 return update_sgroup; 2830 } 2831 2832 static void 2833 nvmf_ns_reservation_report(struct spdk_nvmf_ns *ns, 2834 struct spdk_nvmf_ctrlr *ctrlr, 2835 struct spdk_nvmf_request *req) 2836 { 2837 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2838 struct spdk_nvmf_registrant *reg, *tmp; 2839 struct spdk_nvme_reservation_status_extended_data *status_data; 2840 struct spdk_nvme_registered_ctrlr_extended_data *ctrlr_data; 2841 uint8_t *payload; 2842 uint32_t transfer_len, payload_len = 0; 2843 uint32_t regctl = 0; 2844 uint8_t status = SPDK_NVME_SC_SUCCESS; 2845 2846 if (req->data == NULL) { 2847 SPDK_ERRLOG("No data transfer specified for request. " 2848 " Unable to transfer back response.\n"); 2849 status = SPDK_NVME_SC_INVALID_FIELD; 2850 goto exit; 2851 } 2852 2853 if (!cmd->cdw11_bits.resv_report.eds) { 2854 SPDK_ERRLOG("NVMeoF uses extended controller data structure, " 2855 "please set EDS bit in cdw11 and try again\n"); 2856 status = SPDK_NVME_SC_HOSTID_INCONSISTENT_FORMAT; 2857 goto exit; 2858 } 2859 2860 /* Number of Dwords of the Reservation Status data structure to transfer */ 2861 transfer_len = (cmd->cdw10 + 1) * sizeof(uint32_t); 2862 payload = req->data; 2863 2864 if (transfer_len < sizeof(struct spdk_nvme_reservation_status_extended_data)) { 2865 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2866 goto exit; 2867 } 2868 2869 status_data = (struct spdk_nvme_reservation_status_extended_data *)payload; 2870 status_data->data.gen = ns->gen; 2871 status_data->data.rtype = ns->rtype; 2872 status_data->data.ptpls = ns->ptpl_activated; 2873 payload_len += sizeof(struct spdk_nvme_reservation_status_extended_data); 2874 2875 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2876 payload_len += sizeof(struct spdk_nvme_registered_ctrlr_extended_data); 2877 if (payload_len > transfer_len) { 2878 break; 2879 } 2880 2881 ctrlr_data = (struct spdk_nvme_registered_ctrlr_extended_data *) 2882 (payload + sizeof(*status_data) + sizeof(*ctrlr_data) * regctl); 2883 /* Set to 0xffffh for dynamic controller */ 2884 ctrlr_data->cntlid = 0xffff; 2885 ctrlr_data->rcsts.status = (ns->holder == reg) ? true : false; 2886 ctrlr_data->rkey = reg->rkey; 2887 spdk_uuid_copy((struct spdk_uuid *)ctrlr_data->hostid, ®->hostid); 2888 regctl++; 2889 } 2890 status_data->data.regctl = regctl; 2891 2892 exit: 2893 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2894 req->rsp->nvme_cpl.status.sc = status; 2895 return; 2896 } 2897 2898 static void 2899 nvmf_ns_reservation_complete(void *ctx) 2900 { 2901 struct spdk_nvmf_request *req = ctx; 2902 2903 spdk_nvmf_request_complete(req); 2904 } 2905 2906 static void 2907 _nvmf_ns_reservation_update_done(struct spdk_nvmf_subsystem *subsystem, 2908 void *cb_arg, int status) 2909 { 2910 struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)cb_arg; 2911 struct spdk_nvmf_poll_group *group = req->qpair->group; 2912 2913 spdk_thread_send_msg(group->thread, nvmf_ns_reservation_complete, req); 2914 } 2915 2916 void 2917 nvmf_ns_reservation_request(void *ctx) 2918 { 2919 struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)ctx; 2920 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2921 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 2922 struct subsystem_update_ns_ctx *update_ctx; 2923 uint32_t nsid; 2924 struct spdk_nvmf_ns *ns; 2925 bool update_sgroup = false; 2926 2927 nsid = cmd->nsid; 2928 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, nsid); 2929 assert(ns != NULL); 2930 2931 switch (cmd->opc) { 2932 case SPDK_NVME_OPC_RESERVATION_REGISTER: 2933 update_sgroup = nvmf_ns_reservation_register(ns, ctrlr, req); 2934 break; 2935 case SPDK_NVME_OPC_RESERVATION_ACQUIRE: 2936 update_sgroup = nvmf_ns_reservation_acquire(ns, ctrlr, req); 2937 break; 2938 case SPDK_NVME_OPC_RESERVATION_RELEASE: 2939 update_sgroup = nvmf_ns_reservation_release(ns, ctrlr, req); 2940 break; 2941 case SPDK_NVME_OPC_RESERVATION_REPORT: 2942 nvmf_ns_reservation_report(ns, ctrlr, req); 2943 break; 2944 default: 2945 break; 2946 } 2947 2948 /* update reservation information to subsystem's poll group */ 2949 if (update_sgroup) { 2950 update_ctx = calloc(1, sizeof(*update_ctx)); 2951 if (update_ctx == NULL) { 2952 SPDK_ERRLOG("Can't alloc subsystem poll group update context\n"); 2953 goto update_done; 2954 } 2955 update_ctx->subsystem = ctrlr->subsys; 2956 update_ctx->cb_fn = _nvmf_ns_reservation_update_done; 2957 update_ctx->cb_arg = req; 2958 2959 nvmf_subsystem_update_ns(ctrlr->subsys, subsystem_update_ns_done, update_ctx); 2960 return; 2961 } 2962 2963 update_done: 2964 _nvmf_ns_reservation_update_done(ctrlr->subsys, (void *)req, 0); 2965 } 2966 2967 int 2968 spdk_nvmf_subsystem_set_ana_reporting(struct spdk_nvmf_subsystem *subsystem, 2969 bool ana_reporting) 2970 { 2971 if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) { 2972 return -EAGAIN; 2973 } 2974 2975 subsystem->flags.ana_reporting = ana_reporting; 2976 2977 return 0; 2978 } 2979 2980 bool 2981 nvmf_subsystem_get_ana_reporting(struct spdk_nvmf_subsystem *subsystem) 2982 { 2983 return subsystem->flags.ana_reporting; 2984 } 2985 2986 struct subsystem_listener_update_ctx { 2987 struct spdk_nvmf_subsystem_listener *listener; 2988 2989 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn; 2990 void *cb_arg; 2991 }; 2992 2993 static void 2994 subsystem_listener_update_done(struct spdk_io_channel_iter *i, int status) 2995 { 2996 struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 2997 2998 if (ctx->cb_fn) { 2999 ctx->cb_fn(ctx->cb_arg, status); 3000 } 3001 free(ctx); 3002 } 3003 3004 static void 3005 subsystem_listener_update_on_pg(struct spdk_io_channel_iter *i) 3006 { 3007 struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 3008 struct spdk_nvmf_subsystem_listener *listener; 3009 struct spdk_nvmf_poll_group *group; 3010 struct spdk_nvmf_ctrlr *ctrlr; 3011 3012 listener = ctx->listener; 3013 group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i)); 3014 3015 TAILQ_FOREACH(ctrlr, &listener->subsystem->ctrlrs, link) { 3016 if (ctrlr->admin_qpair->group == group && ctrlr->listener == listener) { 3017 nvmf_ctrlr_async_event_ana_change_notice(ctrlr); 3018 } 3019 } 3020 3021 spdk_for_each_channel_continue(i, 0); 3022 } 3023 3024 void 3025 nvmf_subsystem_set_ana_state(struct spdk_nvmf_subsystem *subsystem, 3026 const struct spdk_nvme_transport_id *trid, 3027 enum spdk_nvme_ana_state ana_state, uint32_t anagrpid, 3028 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, void *cb_arg) 3029 { 3030 struct spdk_nvmf_subsystem_listener *listener; 3031 struct subsystem_listener_update_ctx *ctx; 3032 uint32_t i; 3033 3034 assert(cb_fn != NULL); 3035 assert(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 3036 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED); 3037 3038 if (!subsystem->flags.ana_reporting) { 3039 SPDK_ERRLOG("ANA reporting is disabled\n"); 3040 cb_fn(cb_arg, -EINVAL); 3041 return; 3042 } 3043 3044 /* ANA Change state is not used, ANA Persistent Loss state 3045 * is not supported yet. 3046 */ 3047 if (!(ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE || 3048 ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE || 3049 ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE)) { 3050 SPDK_ERRLOG("ANA state %d is not supported\n", ana_state); 3051 cb_fn(cb_arg, -ENOTSUP); 3052 return; 3053 } 3054 3055 if (anagrpid > subsystem->max_nsid) { 3056 SPDK_ERRLOG("ANA group ID %" PRIu32 " is more than maximum\n", anagrpid); 3057 cb_fn(cb_arg, -EINVAL); 3058 return; 3059 } 3060 3061 listener = nvmf_subsystem_find_listener(subsystem, trid); 3062 if (!listener) { 3063 SPDK_ERRLOG("Unable to find listener.\n"); 3064 cb_fn(cb_arg, -EINVAL); 3065 return; 3066 } 3067 3068 if (anagrpid != 0 && listener->ana_state[anagrpid - 1] == ana_state) { 3069 cb_fn(cb_arg, 0); 3070 return; 3071 } 3072 3073 ctx = calloc(1, sizeof(*ctx)); 3074 if (!ctx) { 3075 SPDK_ERRLOG("Unable to allocate context\n"); 3076 cb_fn(cb_arg, -ENOMEM); 3077 return; 3078 } 3079 3080 for (i = 1; i <= subsystem->max_nsid; i++) { 3081 if (anagrpid == 0 || i == anagrpid) { 3082 listener->ana_state[i - 1] = ana_state; 3083 } 3084 } 3085 listener->ana_state_change_count++; 3086 3087 ctx->listener = listener; 3088 ctx->cb_fn = cb_fn; 3089 ctx->cb_arg = cb_arg; 3090 3091 spdk_for_each_channel(subsystem->tgt, 3092 subsystem_listener_update_on_pg, 3093 ctx, 3094 subsystem_listener_update_done); 3095 } 3096