1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "nvmf_internal.h" 10 #include "transport.h" 11 12 #include "spdk/assert.h" 13 #include "spdk/likely.h" 14 #include "spdk/string.h" 15 #include "spdk/trace.h" 16 #include "spdk/nvmf_spec.h" 17 #include "spdk/uuid.h" 18 #include "spdk/json.h" 19 #include "spdk/file.h" 20 #include "spdk/bit_array.h" 21 #include "spdk/bdev.h" 22 23 #define __SPDK_BDEV_MODULE_ONLY 24 #include "spdk/bdev_module.h" 25 #include "spdk/log.h" 26 #include "spdk_internal/utf.h" 27 #include "spdk_internal/usdt.h" 28 29 #define MODEL_NUMBER_DEFAULT "SPDK bdev Controller" 30 #define NVMF_SUBSYSTEM_DEFAULT_NAMESPACES 32 31 32 /* 33 * States for parsing valid domains in NQNs according to RFC 1034 34 */ 35 enum spdk_nvmf_nqn_domain_states { 36 /* First character of a domain must be a letter */ 37 SPDK_NVMF_DOMAIN_ACCEPT_LETTER = 0, 38 39 /* Subsequent characters can be any of letter, digit, or hyphen */ 40 SPDK_NVMF_DOMAIN_ACCEPT_LDH = 1, 41 42 /* A domain label must end with either a letter or digit */ 43 SPDK_NVMF_DOMAIN_ACCEPT_ANY = 2 44 }; 45 46 static int _nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem); 47 48 /* Returns true if is a valid ASCII string as defined by the NVMe spec */ 49 static bool 50 nvmf_valid_ascii_string(const void *buf, size_t size) 51 { 52 const uint8_t *str = buf; 53 size_t i; 54 55 for (i = 0; i < size; i++) { 56 if (str[i] < 0x20 || str[i] > 0x7E) { 57 return false; 58 } 59 } 60 61 return true; 62 } 63 64 bool 65 nvmf_nqn_is_valid(const char *nqn) 66 { 67 size_t len; 68 struct spdk_uuid uuid_value; 69 uint32_t i; 70 int bytes_consumed; 71 uint32_t domain_label_length; 72 char *reverse_domain_end; 73 uint32_t reverse_domain_end_index; 74 enum spdk_nvmf_nqn_domain_states domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER; 75 76 /* Check for length requirements */ 77 len = strlen(nqn); 78 if (len > SPDK_NVMF_NQN_MAX_LEN) { 79 SPDK_ERRLOG("Invalid NQN \"%s\": length %zu > max %d\n", nqn, len, SPDK_NVMF_NQN_MAX_LEN); 80 return false; 81 } 82 83 /* The nqn must be at least as long as SPDK_NVMF_NQN_MIN_LEN to contain the necessary prefix. */ 84 if (len < SPDK_NVMF_NQN_MIN_LEN) { 85 SPDK_ERRLOG("Invalid NQN \"%s\": length %zu < min %d\n", nqn, len, SPDK_NVMF_NQN_MIN_LEN); 86 return false; 87 } 88 89 /* Check for discovery controller nqn */ 90 if (!strcmp(nqn, SPDK_NVMF_DISCOVERY_NQN)) { 91 return true; 92 } 93 94 /* Check for equality with the generic nqn structure of the form "nqn.2014-08.org.nvmexpress:uuid:11111111-2222-3333-4444-555555555555" */ 95 if (!strncmp(nqn, SPDK_NVMF_NQN_UUID_PRE, SPDK_NVMF_NQN_UUID_PRE_LEN)) { 96 if (len != SPDK_NVMF_NQN_UUID_PRE_LEN + SPDK_NVMF_UUID_STRING_LEN) { 97 SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not the correct length\n", nqn); 98 return false; 99 } 100 101 if (spdk_uuid_parse(&uuid_value, &nqn[SPDK_NVMF_NQN_UUID_PRE_LEN])) { 102 SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not formatted correctly\n", nqn); 103 return false; 104 } 105 return true; 106 } 107 108 /* If the nqn does not match the uuid structure, the next several checks validate the form "nqn.yyyy-mm.reverse.domain:user-string" */ 109 110 if (strncmp(nqn, "nqn.", 4) != 0) { 111 SPDK_ERRLOG("Invalid NQN \"%s\": NQN must begin with \"nqn.\".\n", nqn); 112 return false; 113 } 114 115 /* Check for yyyy-mm. */ 116 if (!(isdigit(nqn[4]) && isdigit(nqn[5]) && isdigit(nqn[6]) && isdigit(nqn[7]) && 117 nqn[8] == '-' && isdigit(nqn[9]) && isdigit(nqn[10]) && nqn[11] == '.')) { 118 SPDK_ERRLOG("Invalid date code in NQN \"%s\"\n", nqn); 119 return false; 120 } 121 122 reverse_domain_end = strchr(nqn, ':'); 123 if (reverse_domain_end != NULL && (reverse_domain_end_index = reverse_domain_end - nqn) < len - 1) { 124 } else { 125 SPDK_ERRLOG("Invalid NQN \"%s\". NQN must contain user specified name with a ':' as a prefix.\n", 126 nqn); 127 return false; 128 } 129 130 /* Check for valid reverse domain */ 131 domain_label_length = 0; 132 for (i = 12; i < reverse_domain_end_index; i++) { 133 if (domain_label_length > SPDK_DOMAIN_LABEL_MAX_LEN) { 134 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". At least one Label is too long.\n", nqn); 135 return false; 136 } 137 138 switch (domain_state) { 139 140 case SPDK_NVMF_DOMAIN_ACCEPT_LETTER: { 141 if (isalpha(nqn[i])) { 142 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 143 domain_label_length++; 144 break; 145 } else { 146 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must start with a letter.\n", nqn); 147 return false; 148 } 149 } 150 151 case SPDK_NVMF_DOMAIN_ACCEPT_LDH: { 152 if (isalpha(nqn[i]) || isdigit(nqn[i])) { 153 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 154 domain_label_length++; 155 break; 156 } else if (nqn[i] == '-') { 157 if (i == reverse_domain_end_index - 1) { 158 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 159 nqn); 160 return false; 161 } 162 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH; 163 domain_label_length++; 164 break; 165 } else if (nqn[i] == '.') { 166 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 167 nqn); 168 return false; 169 } else { 170 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n", 171 nqn); 172 return false; 173 } 174 } 175 176 case SPDK_NVMF_DOMAIN_ACCEPT_ANY: { 177 if (isalpha(nqn[i]) || isdigit(nqn[i])) { 178 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 179 domain_label_length++; 180 break; 181 } else if (nqn[i] == '-') { 182 if (i == reverse_domain_end_index - 1) { 183 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 184 nqn); 185 return false; 186 } 187 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH; 188 domain_label_length++; 189 break; 190 } else if (nqn[i] == '.') { 191 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER; 192 domain_label_length = 0; 193 break; 194 } else { 195 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n", 196 nqn); 197 return false; 198 } 199 } 200 } 201 } 202 203 i = reverse_domain_end_index + 1; 204 while (i < len) { 205 bytes_consumed = utf8_valid(&nqn[i], &nqn[len]); 206 if (bytes_consumed <= 0) { 207 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only valid utf-8.\n", nqn); 208 return false; 209 } 210 211 i += bytes_consumed; 212 } 213 return true; 214 } 215 216 static void subsystem_state_change_on_pg(struct spdk_io_channel_iter *i); 217 218 struct spdk_nvmf_subsystem * 219 spdk_nvmf_subsystem_create(struct spdk_nvmf_tgt *tgt, 220 const char *nqn, 221 enum spdk_nvmf_subtype type, 222 uint32_t num_ns) 223 { 224 struct spdk_nvmf_subsystem *subsystem; 225 uint32_t sid; 226 227 if (spdk_nvmf_tgt_find_subsystem(tgt, nqn)) { 228 SPDK_ERRLOG("Subsystem NQN '%s' already exists\n", nqn); 229 return NULL; 230 } 231 232 if (!nvmf_nqn_is_valid(nqn)) { 233 SPDK_ERRLOG("Subsystem NQN '%s' is invalid\n", nqn); 234 return NULL; 235 } 236 237 if (type == SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT || 238 type == SPDK_NVMF_SUBTYPE_DISCOVERY) { 239 if (num_ns != 0) { 240 SPDK_ERRLOG("Discovery subsystem cannot have namespaces.\n"); 241 return NULL; 242 } 243 } else if (num_ns == 0) { 244 num_ns = NVMF_SUBSYSTEM_DEFAULT_NAMESPACES; 245 } 246 247 /* Find a free subsystem id (sid) */ 248 sid = spdk_bit_array_find_first_clear(tgt->subsystem_ids, 0); 249 if (sid == UINT32_MAX) { 250 SPDK_ERRLOG("No free subsystem IDs are available for subsystem creation\n"); 251 return NULL; 252 } 253 subsystem = calloc(1, sizeof(struct spdk_nvmf_subsystem)); 254 if (subsystem == NULL) { 255 SPDK_ERRLOG("Subsystem memory allocation failed\n"); 256 return NULL; 257 } 258 259 subsystem->thread = spdk_get_thread(); 260 subsystem->state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 261 subsystem->tgt = tgt; 262 subsystem->id = sid; 263 subsystem->subtype = type; 264 subsystem->max_nsid = num_ns; 265 subsystem->next_cntlid = 0; 266 subsystem->min_cntlid = NVMF_MIN_CNTLID; 267 subsystem->max_cntlid = NVMF_MAX_CNTLID; 268 snprintf(subsystem->subnqn, sizeof(subsystem->subnqn), "%s", nqn); 269 pthread_mutex_init(&subsystem->mutex, NULL); 270 TAILQ_INIT(&subsystem->listeners); 271 TAILQ_INIT(&subsystem->hosts); 272 TAILQ_INIT(&subsystem->ctrlrs); 273 subsystem->used_listener_ids = spdk_bit_array_create(NVMF_MAX_LISTENERS_PER_SUBSYSTEM); 274 if (subsystem->used_listener_ids == NULL) { 275 pthread_mutex_destroy(&subsystem->mutex); 276 free(subsystem); 277 SPDK_ERRLOG("Listener id array memory allocation failed\n"); 278 return NULL; 279 } 280 281 if (num_ns != 0) { 282 subsystem->ns = calloc(num_ns, sizeof(struct spdk_nvmf_ns *)); 283 if (subsystem->ns == NULL) { 284 SPDK_ERRLOG("Namespace memory allocation failed\n"); 285 pthread_mutex_destroy(&subsystem->mutex); 286 spdk_bit_array_free(&subsystem->used_listener_ids); 287 free(subsystem); 288 return NULL; 289 } 290 subsystem->ana_group = calloc(num_ns, sizeof(uint32_t)); 291 if (subsystem->ana_group == NULL) { 292 SPDK_ERRLOG("ANA group memory allocation failed\n"); 293 pthread_mutex_destroy(&subsystem->mutex); 294 free(subsystem->ns); 295 spdk_bit_array_free(&subsystem->used_listener_ids); 296 free(subsystem); 297 return NULL; 298 } 299 } 300 301 memset(subsystem->sn, '0', sizeof(subsystem->sn) - 1); 302 subsystem->sn[sizeof(subsystem->sn) - 1] = '\0'; 303 304 snprintf(subsystem->mn, sizeof(subsystem->mn), "%s", 305 MODEL_NUMBER_DEFAULT); 306 307 spdk_bit_array_set(tgt->subsystem_ids, sid); 308 RB_INSERT(subsystem_tree, &tgt->subsystems, subsystem); 309 310 SPDK_DTRACE_PROBE1(nvmf_subsystem_create, subsystem->subnqn); 311 312 return subsystem; 313 } 314 315 /* Must hold subsystem->mutex while calling this function */ 316 static void 317 nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_host *host) 318 { 319 TAILQ_REMOVE(&subsystem->hosts, host, link); 320 free(host); 321 } 322 323 static void 324 _nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem, 325 struct spdk_nvmf_subsystem_listener *listener, 326 bool stop) 327 { 328 struct spdk_nvmf_transport *transport; 329 struct spdk_nvmf_ctrlr *ctrlr; 330 331 if (stop) { 332 transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, listener->trid->trstring); 333 if (transport != NULL) { 334 spdk_nvmf_transport_stop_listen(transport, listener->trid); 335 } 336 } 337 338 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 339 if (ctrlr->listener == listener) { 340 ctrlr->listener = NULL; 341 } 342 } 343 344 TAILQ_REMOVE(&subsystem->listeners, listener, link); 345 nvmf_update_discovery_log(listener->subsystem->tgt, NULL); 346 free(listener->ana_state); 347 spdk_bit_array_clear(subsystem->used_listener_ids, listener->id); 348 free(listener); 349 } 350 351 static void 352 _nvmf_subsystem_destroy_msg(void *cb_arg) 353 { 354 struct spdk_nvmf_subsystem *subsystem = cb_arg; 355 356 _nvmf_subsystem_destroy(subsystem); 357 } 358 359 static int 360 _nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem) 361 { 362 struct spdk_nvmf_ns *ns; 363 nvmf_subsystem_destroy_cb async_destroy_cb = NULL; 364 void *async_destroy_cb_arg = NULL; 365 int rc; 366 367 if (!TAILQ_EMPTY(&subsystem->ctrlrs)) { 368 SPDK_DEBUGLOG(nvmf, "subsystem %p %s has active controllers\n", subsystem, subsystem->subnqn); 369 subsystem->async_destroy = true; 370 rc = spdk_thread_send_msg(subsystem->thread, _nvmf_subsystem_destroy_msg, subsystem); 371 if (rc) { 372 SPDK_ERRLOG("Failed to send thread msg, rc %d\n", rc); 373 assert(0); 374 return rc; 375 } 376 return -EINPROGRESS; 377 } 378 379 ns = spdk_nvmf_subsystem_get_first_ns(subsystem); 380 while (ns != NULL) { 381 struct spdk_nvmf_ns *next_ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns); 382 383 spdk_nvmf_subsystem_remove_ns(subsystem, ns->opts.nsid); 384 ns = next_ns; 385 } 386 387 free(subsystem->ns); 388 free(subsystem->ana_group); 389 390 RB_REMOVE(subsystem_tree, &subsystem->tgt->subsystems, subsystem); 391 assert(spdk_bit_array_get(subsystem->tgt->subsystem_ids, subsystem->id) == true); 392 spdk_bit_array_clear(subsystem->tgt->subsystem_ids, subsystem->id); 393 394 pthread_mutex_destroy(&subsystem->mutex); 395 396 spdk_bit_array_free(&subsystem->used_listener_ids); 397 398 if (subsystem->async_destroy) { 399 async_destroy_cb = subsystem->async_destroy_cb; 400 async_destroy_cb_arg = subsystem->async_destroy_cb_arg; 401 } 402 403 free(subsystem); 404 405 if (async_destroy_cb) { 406 async_destroy_cb(async_destroy_cb_arg); 407 } 408 409 return 0; 410 } 411 412 static struct spdk_nvmf_ns * 413 _nvmf_subsystem_get_first_zoned_ns(struct spdk_nvmf_subsystem *subsystem) 414 { 415 struct spdk_nvmf_ns *ns = spdk_nvmf_subsystem_get_first_ns(subsystem); 416 while (ns != NULL) { 417 if (ns->csi == SPDK_NVME_CSI_ZNS) { 418 return ns; 419 } 420 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns); 421 } 422 return NULL; 423 } 424 425 int 426 spdk_nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem, nvmf_subsystem_destroy_cb cpl_cb, 427 void *cpl_cb_arg) 428 { 429 struct spdk_nvmf_host *host, *host_tmp; 430 struct spdk_nvmf_transport *transport; 431 432 if (!subsystem) { 433 return -EINVAL; 434 } 435 436 SPDK_DTRACE_PROBE1(nvmf_subsystem_destroy, subsystem->subnqn); 437 438 assert(spdk_get_thread() == subsystem->thread); 439 440 if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) { 441 SPDK_ERRLOG("Subsystem can only be destroyed in inactive state, %s state %d\n", 442 subsystem->subnqn, subsystem->state); 443 return -EAGAIN; 444 } 445 if (subsystem->destroying) { 446 SPDK_ERRLOG("Subsystem destruction is already started\n"); 447 assert(0); 448 return -EALREADY; 449 } 450 451 subsystem->destroying = true; 452 453 SPDK_DEBUGLOG(nvmf, "subsystem is %p %s\n", subsystem, subsystem->subnqn); 454 455 nvmf_subsystem_remove_all_listeners(subsystem, false); 456 457 pthread_mutex_lock(&subsystem->mutex); 458 459 TAILQ_FOREACH_SAFE(host, &subsystem->hosts, link, host_tmp) { 460 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 461 transport = spdk_nvmf_transport_get_next(transport)) { 462 if (transport->ops->subsystem_remove_host) { 463 transport->ops->subsystem_remove_host(transport, subsystem, host->nqn); 464 } 465 } 466 nvmf_subsystem_remove_host(subsystem, host); 467 } 468 469 pthread_mutex_unlock(&subsystem->mutex); 470 471 subsystem->async_destroy_cb = cpl_cb; 472 subsystem->async_destroy_cb_arg = cpl_cb_arg; 473 474 return _nvmf_subsystem_destroy(subsystem); 475 } 476 477 /* we have to use the typedef in the function declaration to appease astyle. */ 478 typedef enum spdk_nvmf_subsystem_state spdk_nvmf_subsystem_state_t; 479 480 static spdk_nvmf_subsystem_state_t 481 nvmf_subsystem_get_intermediate_state(enum spdk_nvmf_subsystem_state current_state, 482 enum spdk_nvmf_subsystem_state requested_state) 483 { 484 switch (requested_state) { 485 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 486 return SPDK_NVMF_SUBSYSTEM_DEACTIVATING; 487 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 488 if (current_state == SPDK_NVMF_SUBSYSTEM_PAUSED) { 489 return SPDK_NVMF_SUBSYSTEM_RESUMING; 490 } else { 491 return SPDK_NVMF_SUBSYSTEM_ACTIVATING; 492 } 493 case SPDK_NVMF_SUBSYSTEM_PAUSED: 494 return SPDK_NVMF_SUBSYSTEM_PAUSING; 495 default: 496 assert(false); 497 return SPDK_NVMF_SUBSYSTEM_NUM_STATES; 498 } 499 } 500 501 static int 502 nvmf_subsystem_set_state(struct spdk_nvmf_subsystem *subsystem, 503 enum spdk_nvmf_subsystem_state state) 504 { 505 enum spdk_nvmf_subsystem_state actual_old_state, expected_old_state; 506 bool exchanged; 507 508 switch (state) { 509 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 510 expected_old_state = SPDK_NVMF_SUBSYSTEM_DEACTIVATING; 511 break; 512 case SPDK_NVMF_SUBSYSTEM_ACTIVATING: 513 expected_old_state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 514 break; 515 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 516 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING; 517 break; 518 case SPDK_NVMF_SUBSYSTEM_PAUSING: 519 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 520 break; 521 case SPDK_NVMF_SUBSYSTEM_PAUSED: 522 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSING; 523 break; 524 case SPDK_NVMF_SUBSYSTEM_RESUMING: 525 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSED; 526 break; 527 case SPDK_NVMF_SUBSYSTEM_DEACTIVATING: 528 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 529 break; 530 default: 531 assert(false); 532 return -1; 533 } 534 535 actual_old_state = expected_old_state; 536 exchanged = __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false, 537 __ATOMIC_RELAXED, __ATOMIC_RELAXED); 538 if (spdk_unlikely(exchanged == false)) { 539 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING && 540 state == SPDK_NVMF_SUBSYSTEM_ACTIVE) { 541 expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING; 542 } 543 /* This is for the case when activating the subsystem fails. */ 544 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_ACTIVATING && 545 state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING) { 546 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING; 547 } 548 /* This is for the case when resuming the subsystem fails. */ 549 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING && 550 state == SPDK_NVMF_SUBSYSTEM_PAUSING) { 551 expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING; 552 } 553 /* This is for the case when stopping paused subsystem */ 554 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_PAUSED && 555 state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING) { 556 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSED; 557 } 558 actual_old_state = expected_old_state; 559 __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false, 560 __ATOMIC_RELAXED, __ATOMIC_RELAXED); 561 } 562 assert(actual_old_state == expected_old_state); 563 return actual_old_state - expected_old_state; 564 } 565 566 struct subsystem_state_change_ctx { 567 struct spdk_nvmf_subsystem *subsystem; 568 uint16_t nsid; 569 570 enum spdk_nvmf_subsystem_state original_state; 571 enum spdk_nvmf_subsystem_state requested_state; 572 573 spdk_nvmf_subsystem_state_change_done cb_fn; 574 void *cb_arg; 575 }; 576 577 static void 578 subsystem_state_change_revert_done(struct spdk_io_channel_iter *i, int status) 579 { 580 struct subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 581 582 /* Nothing to be done here if the state setting fails, we are just screwed. */ 583 if (nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state)) { 584 SPDK_ERRLOG("Unable to revert the subsystem state after operation failure.\n"); 585 } 586 587 ctx->subsystem->changing_state = false; 588 if (ctx->cb_fn) { 589 /* return a failure here. This function only exists in an error path. */ 590 ctx->cb_fn(ctx->subsystem, ctx->cb_arg, -1); 591 } 592 free(ctx); 593 } 594 595 static void 596 subsystem_state_change_done(struct spdk_io_channel_iter *i, int status) 597 { 598 struct subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 599 enum spdk_nvmf_subsystem_state intermediate_state; 600 601 SPDK_DTRACE_PROBE4(nvmf_subsystem_change_state_done, ctx->subsystem->subnqn, 602 ctx->requested_state, ctx->original_state, status); 603 604 if (status == 0) { 605 status = nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state); 606 if (status) { 607 status = -1; 608 } 609 } 610 611 if (status) { 612 intermediate_state = nvmf_subsystem_get_intermediate_state(ctx->requested_state, 613 ctx->original_state); 614 assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES); 615 616 if (nvmf_subsystem_set_state(ctx->subsystem, intermediate_state)) { 617 goto out; 618 } 619 ctx->requested_state = ctx->original_state; 620 spdk_for_each_channel(ctx->subsystem->tgt, 621 subsystem_state_change_on_pg, 622 ctx, 623 subsystem_state_change_revert_done); 624 return; 625 } 626 627 out: 628 ctx->subsystem->changing_state = false; 629 if (ctx->cb_fn) { 630 ctx->cb_fn(ctx->subsystem, ctx->cb_arg, status); 631 } 632 free(ctx); 633 } 634 635 static void 636 subsystem_state_change_continue(void *ctx, int status) 637 { 638 struct spdk_io_channel_iter *i = ctx; 639 struct subsystem_state_change_ctx *_ctx __attribute__((unused)); 640 641 _ctx = spdk_io_channel_iter_get_ctx(i); 642 SPDK_DTRACE_PROBE3(nvmf_pg_change_state_done, _ctx->subsystem->subnqn, 643 _ctx->requested_state, spdk_thread_get_id(spdk_get_thread())); 644 645 spdk_for_each_channel_continue(i, status); 646 } 647 648 static void 649 subsystem_state_change_on_pg(struct spdk_io_channel_iter *i) 650 { 651 struct subsystem_state_change_ctx *ctx; 652 struct spdk_io_channel *ch; 653 struct spdk_nvmf_poll_group *group; 654 655 ctx = spdk_io_channel_iter_get_ctx(i); 656 ch = spdk_io_channel_iter_get_channel(i); 657 group = spdk_io_channel_get_ctx(ch); 658 659 SPDK_DTRACE_PROBE3(nvmf_pg_change_state, ctx->subsystem->subnqn, 660 ctx->requested_state, spdk_thread_get_id(spdk_get_thread())); 661 switch (ctx->requested_state) { 662 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 663 nvmf_poll_group_remove_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 664 break; 665 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 666 if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_ACTIVATING) { 667 nvmf_poll_group_add_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 668 } else if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_RESUMING) { 669 nvmf_poll_group_resume_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 670 } 671 break; 672 case SPDK_NVMF_SUBSYSTEM_PAUSED: 673 nvmf_poll_group_pause_subsystem(group, ctx->subsystem, ctx->nsid, subsystem_state_change_continue, 674 i); 675 break; 676 default: 677 assert(false); 678 break; 679 } 680 } 681 682 static int 683 nvmf_subsystem_state_change(struct spdk_nvmf_subsystem *subsystem, 684 uint32_t nsid, 685 enum spdk_nvmf_subsystem_state requested_state, 686 spdk_nvmf_subsystem_state_change_done cb_fn, 687 void *cb_arg) 688 { 689 struct subsystem_state_change_ctx *ctx; 690 enum spdk_nvmf_subsystem_state intermediate_state; 691 int rc; 692 693 if (__sync_val_compare_and_swap(&subsystem->changing_state, false, true)) { 694 return -EBUSY; 695 } 696 697 SPDK_DTRACE_PROBE3(nvmf_subsystem_change_state, subsystem->subnqn, 698 requested_state, subsystem->state); 699 /* If we are already in the requested state, just call the callback immediately. */ 700 if (subsystem->state == requested_state) { 701 subsystem->changing_state = false; 702 if (cb_fn) { 703 cb_fn(subsystem, cb_arg, 0); 704 } 705 return 0; 706 } 707 708 intermediate_state = nvmf_subsystem_get_intermediate_state(subsystem->state, requested_state); 709 assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES); 710 711 ctx = calloc(1, sizeof(*ctx)); 712 if (!ctx) { 713 subsystem->changing_state = false; 714 return -ENOMEM; 715 } 716 717 ctx->original_state = subsystem->state; 718 rc = nvmf_subsystem_set_state(subsystem, intermediate_state); 719 if (rc) { 720 free(ctx); 721 subsystem->changing_state = false; 722 return rc; 723 } 724 725 ctx->subsystem = subsystem; 726 ctx->nsid = nsid; 727 ctx->requested_state = requested_state; 728 ctx->cb_fn = cb_fn; 729 ctx->cb_arg = cb_arg; 730 731 spdk_for_each_channel(subsystem->tgt, 732 subsystem_state_change_on_pg, 733 ctx, 734 subsystem_state_change_done); 735 736 return 0; 737 } 738 739 int 740 spdk_nvmf_subsystem_start(struct spdk_nvmf_subsystem *subsystem, 741 spdk_nvmf_subsystem_state_change_done cb_fn, 742 void *cb_arg) 743 { 744 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg); 745 } 746 747 int 748 spdk_nvmf_subsystem_stop(struct spdk_nvmf_subsystem *subsystem, 749 spdk_nvmf_subsystem_state_change_done cb_fn, 750 void *cb_arg) 751 { 752 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_INACTIVE, cb_fn, cb_arg); 753 } 754 755 int 756 spdk_nvmf_subsystem_pause(struct spdk_nvmf_subsystem *subsystem, 757 uint32_t nsid, 758 spdk_nvmf_subsystem_state_change_done cb_fn, 759 void *cb_arg) 760 { 761 return nvmf_subsystem_state_change(subsystem, nsid, SPDK_NVMF_SUBSYSTEM_PAUSED, cb_fn, cb_arg); 762 } 763 764 int 765 spdk_nvmf_subsystem_resume(struct spdk_nvmf_subsystem *subsystem, 766 spdk_nvmf_subsystem_state_change_done cb_fn, 767 void *cb_arg) 768 { 769 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg); 770 } 771 772 struct spdk_nvmf_subsystem * 773 spdk_nvmf_subsystem_get_first(struct spdk_nvmf_tgt *tgt) 774 { 775 return RB_MIN(subsystem_tree, &tgt->subsystems); 776 } 777 778 struct spdk_nvmf_subsystem * 779 spdk_nvmf_subsystem_get_next(struct spdk_nvmf_subsystem *subsystem) 780 { 781 if (!subsystem) { 782 return NULL; 783 } 784 785 return RB_NEXT(subsystem_tree, &tgt->subsystems, subsystem); 786 } 787 788 static int 789 nvmf_ns_add_host(struct spdk_nvmf_ns *ns, const char *hostnqn) 790 { 791 struct spdk_nvmf_host *host; 792 793 host = calloc(1, sizeof(*host)); 794 if (!host) { 795 return -ENOMEM; 796 } 797 snprintf(host->nqn, sizeof(host->nqn), "%s", hostnqn); 798 TAILQ_INSERT_HEAD(&ns->hosts, host, link); 799 return 0; 800 } 801 802 static void 803 nvmf_ns_remove_host(struct spdk_nvmf_ns *ns, struct spdk_nvmf_host *host) 804 { 805 TAILQ_REMOVE(&ns->hosts, host, link); 806 free(host); 807 } 808 809 static void 810 _async_event_ns_notice(void *_ctrlr) 811 { 812 struct spdk_nvmf_ctrlr *ctrlr = _ctrlr; 813 814 nvmf_ctrlr_async_event_ns_notice(ctrlr); 815 } 816 817 static void 818 send_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr) 819 { 820 spdk_thread_send_msg(ctrlr->thread, _async_event_ns_notice, ctrlr); 821 } 822 823 static int 824 nvmf_ns_visible(struct spdk_nvmf_subsystem *subsystem, 825 uint32_t nsid, 826 const char *hostnqn, 827 bool visible) 828 { 829 struct spdk_nvmf_ns *ns; 830 struct spdk_nvmf_ctrlr *ctrlr; 831 struct spdk_nvmf_host *host; 832 int rc; 833 834 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 835 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 836 assert(false); 837 return -1; 838 } 839 840 if (hostnqn == NULL || !nvmf_nqn_is_valid(hostnqn)) { 841 return -EINVAL; 842 } 843 844 if (nsid == 0 || nsid > subsystem->max_nsid) { 845 return -EINVAL; 846 } 847 848 ns = subsystem->ns[nsid - 1]; 849 if (!ns) { 850 return -ENOENT; 851 } 852 853 if (ns->always_visible) { 854 /* No individual host control */ 855 return -EPERM; 856 } 857 858 /* Save host info to use for any future controllers. */ 859 host = nvmf_ns_find_host(ns, hostnqn); 860 if (visible && host == NULL) { 861 rc = nvmf_ns_add_host(ns, hostnqn); 862 if (rc) { 863 return rc; 864 } 865 } else if (!visible && host != NULL) { 866 nvmf_ns_remove_host(ns, host); 867 } 868 869 /* Also apply to existing controllers. */ 870 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 871 if (strcmp(hostnqn, ctrlr->hostnqn) || 872 spdk_bit_array_get(ctrlr->visible_ns, nsid - 1) == visible) { 873 continue; 874 } 875 if (visible) { 876 spdk_bit_array_set(ctrlr->visible_ns, nsid - 1); 877 } else { 878 spdk_bit_array_clear(ctrlr->visible_ns, nsid - 1); 879 } 880 send_async_event_ns_notice(ctrlr); 881 nvmf_ctrlr_ns_changed(ctrlr, nsid); 882 } 883 884 return 0; 885 } 886 887 int 888 spdk_nvmf_ns_add_host(struct spdk_nvmf_subsystem *subsystem, 889 uint32_t nsid, 890 const char *hostnqn, 891 uint32_t flags) 892 { 893 SPDK_DTRACE_PROBE4(spdk_nvmf_ns_add_host, 894 subsystem->subnqn, 895 nsid, 896 hostnqn, 897 flags); 898 return nvmf_ns_visible(subsystem, nsid, hostnqn, true); 899 } 900 901 int 902 spdk_nvmf_ns_remove_host(struct spdk_nvmf_subsystem *subsystem, 903 uint32_t nsid, 904 const char *hostnqn, 905 uint32_t flags) 906 { 907 SPDK_DTRACE_PROBE4(spdk_nvmf_ns_remove_host, 908 subsystem->subnqn, 909 nsid, 910 hostnqn, 911 flags); 912 return nvmf_ns_visible(subsystem, nsid, hostnqn, false); 913 } 914 915 /* Must hold subsystem->mutex while calling this function */ 916 static struct spdk_nvmf_host * 917 nvmf_subsystem_find_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 918 { 919 struct spdk_nvmf_host *host = NULL; 920 921 TAILQ_FOREACH(host, &subsystem->hosts, link) { 922 if (strcmp(hostnqn, host->nqn) == 0) { 923 return host; 924 } 925 } 926 927 return NULL; 928 } 929 930 int 931 spdk_nvmf_subsystem_add_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn, 932 const struct spdk_json_val *params) 933 { 934 struct spdk_nvmf_host *host; 935 struct spdk_nvmf_transport *transport; 936 int rc; 937 938 if (!nvmf_nqn_is_valid(hostnqn)) { 939 return -EINVAL; 940 } 941 942 pthread_mutex_lock(&subsystem->mutex); 943 944 if (nvmf_subsystem_find_host(subsystem, hostnqn)) { 945 /* This subsystem already allows the specified host. */ 946 pthread_mutex_unlock(&subsystem->mutex); 947 return 0; 948 } 949 950 host = calloc(1, sizeof(*host)); 951 if (!host) { 952 pthread_mutex_unlock(&subsystem->mutex); 953 return -ENOMEM; 954 } 955 956 snprintf(host->nqn, sizeof(host->nqn), "%s", hostnqn); 957 958 SPDK_DTRACE_PROBE2(nvmf_subsystem_add_host, subsystem->subnqn, host->nqn); 959 960 TAILQ_INSERT_HEAD(&subsystem->hosts, host, link); 961 962 if (!TAILQ_EMPTY(&subsystem->listeners)) { 963 nvmf_update_discovery_log(subsystem->tgt, hostnqn); 964 } 965 966 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 967 transport = spdk_nvmf_transport_get_next(transport)) { 968 if (transport->ops->subsystem_add_host) { 969 rc = transport->ops->subsystem_add_host(transport, subsystem, hostnqn, params); 970 if (rc) { 971 SPDK_ERRLOG("Unable to add host to %s transport\n", transport->ops->name); 972 /* Remove this host from all transports we've managed to add it to. */ 973 pthread_mutex_unlock(&subsystem->mutex); 974 spdk_nvmf_subsystem_remove_host(subsystem, hostnqn); 975 return rc; 976 } 977 } 978 } 979 980 pthread_mutex_unlock(&subsystem->mutex); 981 982 return 0; 983 } 984 985 int 986 spdk_nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 987 { 988 struct spdk_nvmf_host *host; 989 struct spdk_nvmf_transport *transport; 990 991 pthread_mutex_lock(&subsystem->mutex); 992 993 host = nvmf_subsystem_find_host(subsystem, hostnqn); 994 if (host == NULL) { 995 pthread_mutex_unlock(&subsystem->mutex); 996 return -ENOENT; 997 } 998 999 SPDK_DTRACE_PROBE2(nvmf_subsystem_remove_host, subsystem->subnqn, host->nqn); 1000 1001 nvmf_subsystem_remove_host(subsystem, host); 1002 1003 if (!TAILQ_EMPTY(&subsystem->listeners)) { 1004 nvmf_update_discovery_log(subsystem->tgt, hostnqn); 1005 } 1006 1007 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 1008 transport = spdk_nvmf_transport_get_next(transport)) { 1009 if (transport->ops->subsystem_remove_host) { 1010 transport->ops->subsystem_remove_host(transport, subsystem, hostnqn); 1011 } 1012 } 1013 1014 pthread_mutex_unlock(&subsystem->mutex); 1015 1016 return 0; 1017 } 1018 1019 struct nvmf_subsystem_disconnect_host_ctx { 1020 struct spdk_nvmf_subsystem *subsystem; 1021 char *hostnqn; 1022 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn; 1023 void *cb_arg; 1024 }; 1025 1026 static void 1027 nvmf_subsystem_disconnect_host_fini(struct spdk_io_channel_iter *i, int status) 1028 { 1029 struct nvmf_subsystem_disconnect_host_ctx *ctx; 1030 1031 ctx = spdk_io_channel_iter_get_ctx(i); 1032 1033 if (ctx->cb_fn) { 1034 ctx->cb_fn(ctx->cb_arg, status); 1035 } 1036 free(ctx->hostnqn); 1037 free(ctx); 1038 } 1039 1040 static void 1041 nvmf_subsystem_disconnect_qpairs_by_host(struct spdk_io_channel_iter *i) 1042 { 1043 struct nvmf_subsystem_disconnect_host_ctx *ctx; 1044 struct spdk_nvmf_poll_group *group; 1045 struct spdk_io_channel *ch; 1046 struct spdk_nvmf_qpair *qpair, *tmp_qpair; 1047 struct spdk_nvmf_ctrlr *ctrlr; 1048 1049 ctx = spdk_io_channel_iter_get_ctx(i); 1050 ch = spdk_io_channel_iter_get_channel(i); 1051 group = spdk_io_channel_get_ctx(ch); 1052 1053 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, tmp_qpair) { 1054 ctrlr = qpair->ctrlr; 1055 1056 if (ctrlr == NULL || ctrlr->subsys != ctx->subsystem) { 1057 continue; 1058 } 1059 1060 if (strncmp(ctrlr->hostnqn, ctx->hostnqn, sizeof(ctrlr->hostnqn)) == 0) { 1061 /* Right now this does not wait for the queue pairs to actually disconnect. */ 1062 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 1063 } 1064 } 1065 spdk_for_each_channel_continue(i, 0); 1066 } 1067 1068 int 1069 spdk_nvmf_subsystem_disconnect_host(struct spdk_nvmf_subsystem *subsystem, 1070 const char *hostnqn, 1071 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 1072 void *cb_arg) 1073 { 1074 struct nvmf_subsystem_disconnect_host_ctx *ctx; 1075 1076 ctx = calloc(1, sizeof(struct nvmf_subsystem_disconnect_host_ctx)); 1077 if (ctx == NULL) { 1078 return -ENOMEM; 1079 } 1080 1081 ctx->hostnqn = strdup(hostnqn); 1082 if (ctx->hostnqn == NULL) { 1083 free(ctx); 1084 return -ENOMEM; 1085 } 1086 1087 ctx->subsystem = subsystem; 1088 ctx->cb_fn = cb_fn; 1089 ctx->cb_arg = cb_arg; 1090 1091 spdk_for_each_channel(subsystem->tgt, nvmf_subsystem_disconnect_qpairs_by_host, ctx, 1092 nvmf_subsystem_disconnect_host_fini); 1093 1094 return 0; 1095 } 1096 1097 int 1098 spdk_nvmf_subsystem_set_allow_any_host(struct spdk_nvmf_subsystem *subsystem, bool allow_any_host) 1099 { 1100 pthread_mutex_lock(&subsystem->mutex); 1101 subsystem->flags.allow_any_host = allow_any_host; 1102 if (!TAILQ_EMPTY(&subsystem->listeners)) { 1103 nvmf_update_discovery_log(subsystem->tgt, NULL); 1104 } 1105 pthread_mutex_unlock(&subsystem->mutex); 1106 1107 return 0; 1108 } 1109 1110 bool 1111 spdk_nvmf_subsystem_get_allow_any_host(const struct spdk_nvmf_subsystem *subsystem) 1112 { 1113 bool allow_any_host; 1114 struct spdk_nvmf_subsystem *sub; 1115 1116 /* Technically, taking the mutex modifies data in the subsystem. But the const 1117 * is still important to convey that this doesn't mutate any other data. Cast 1118 * it away to work around this. */ 1119 sub = (struct spdk_nvmf_subsystem *)subsystem; 1120 1121 pthread_mutex_lock(&sub->mutex); 1122 allow_any_host = sub->flags.allow_any_host; 1123 pthread_mutex_unlock(&sub->mutex); 1124 1125 return allow_any_host; 1126 } 1127 1128 bool 1129 spdk_nvmf_subsystem_host_allowed(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 1130 { 1131 bool allowed; 1132 1133 if (!hostnqn) { 1134 return false; 1135 } 1136 1137 pthread_mutex_lock(&subsystem->mutex); 1138 1139 if (subsystem->flags.allow_any_host) { 1140 pthread_mutex_unlock(&subsystem->mutex); 1141 return true; 1142 } 1143 1144 allowed = nvmf_subsystem_find_host(subsystem, hostnqn) != NULL; 1145 pthread_mutex_unlock(&subsystem->mutex); 1146 1147 return allowed; 1148 } 1149 1150 struct spdk_nvmf_host * 1151 spdk_nvmf_subsystem_get_first_host(struct spdk_nvmf_subsystem *subsystem) 1152 { 1153 return TAILQ_FIRST(&subsystem->hosts); 1154 } 1155 1156 1157 struct spdk_nvmf_host * 1158 spdk_nvmf_subsystem_get_next_host(struct spdk_nvmf_subsystem *subsystem, 1159 struct spdk_nvmf_host *prev_host) 1160 { 1161 return TAILQ_NEXT(prev_host, link); 1162 } 1163 1164 const char * 1165 spdk_nvmf_host_get_nqn(const struct spdk_nvmf_host *host) 1166 { 1167 return host->nqn; 1168 } 1169 1170 struct spdk_nvmf_subsystem_listener * 1171 nvmf_subsystem_find_listener(struct spdk_nvmf_subsystem *subsystem, 1172 const struct spdk_nvme_transport_id *trid) 1173 { 1174 struct spdk_nvmf_subsystem_listener *listener; 1175 1176 TAILQ_FOREACH(listener, &subsystem->listeners, link) { 1177 if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) { 1178 return listener; 1179 } 1180 } 1181 1182 return NULL; 1183 } 1184 1185 /** 1186 * Function to be called once the target is listening. 1187 * 1188 * \param ctx Context argument passed to this function. 1189 * \param status 0 if it completed successfully, or negative errno if it failed. 1190 */ 1191 static void 1192 _nvmf_subsystem_add_listener_done(void *ctx, int status) 1193 { 1194 struct spdk_nvmf_subsystem_listener *listener = ctx; 1195 1196 if (status) { 1197 listener->cb_fn(listener->cb_arg, status); 1198 free(listener); 1199 return; 1200 } 1201 1202 TAILQ_INSERT_HEAD(&listener->subsystem->listeners, listener, link); 1203 nvmf_update_discovery_log(listener->subsystem->tgt, NULL); 1204 listener->cb_fn(listener->cb_arg, status); 1205 } 1206 1207 void 1208 spdk_nvmf_subsystem_listener_opts_init(struct spdk_nvmf_listener_opts *opts, size_t size) 1209 { 1210 if (opts == NULL) { 1211 SPDK_ERRLOG("opts should not be NULL\n"); 1212 assert(false); 1213 return; 1214 } 1215 if (size == 0) { 1216 SPDK_ERRLOG("size should not be zero\n"); 1217 assert(false); 1218 return; 1219 } 1220 1221 memset(opts, 0, size); 1222 opts->opts_size = size; 1223 1224 #define FIELD_OK(field) \ 1225 offsetof(struct spdk_nvmf_listener_opts, field) + sizeof(opts->field) <= size 1226 1227 #define SET_FIELD(field, value) \ 1228 if (FIELD_OK(field)) { \ 1229 opts->field = value; \ 1230 } \ 1231 1232 SET_FIELD(secure_channel, false); 1233 SET_FIELD(ana_state, SPDK_NVME_ANA_OPTIMIZED_STATE); 1234 1235 #undef FIELD_OK 1236 #undef SET_FIELD 1237 } 1238 1239 static int 1240 listener_opts_copy(struct spdk_nvmf_listener_opts *src, struct spdk_nvmf_listener_opts *dst) 1241 { 1242 if (src->opts_size == 0) { 1243 SPDK_ERRLOG("source structure size should not be zero\n"); 1244 assert(false); 1245 return -EINVAL; 1246 } 1247 1248 memset(dst, 0, sizeof(*dst)); 1249 dst->opts_size = src->opts_size; 1250 1251 #define FIELD_OK(field) \ 1252 offsetof(struct spdk_nvmf_listener_opts, field) + sizeof(src->field) <= src->opts_size 1253 1254 #define SET_FIELD(field) \ 1255 if (FIELD_OK(field)) { \ 1256 dst->field = src->field; \ 1257 } \ 1258 1259 SET_FIELD(secure_channel); 1260 SET_FIELD(ana_state); 1261 /* We should not remove this statement, but need to update the assert statement 1262 * if we add a new field, and also add a corresponding SET_FIELD statement. */ 1263 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_listener_opts) == 16, "Incorrect size"); 1264 1265 #undef SET_FIELD 1266 #undef FIELD_OK 1267 1268 return 0; 1269 } 1270 1271 static void 1272 _nvmf_subsystem_add_listener(struct spdk_nvmf_subsystem *subsystem, 1273 struct spdk_nvme_transport_id *trid, 1274 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 1275 void *cb_arg, struct spdk_nvmf_listener_opts *opts) 1276 { 1277 struct spdk_nvmf_transport *transport; 1278 struct spdk_nvmf_subsystem_listener *listener; 1279 struct spdk_nvmf_listener *tr_listener; 1280 uint32_t i; 1281 uint32_t id; 1282 int rc = 0; 1283 1284 assert(cb_fn != NULL); 1285 1286 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1287 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1288 cb_fn(cb_arg, -EAGAIN); 1289 return; 1290 } 1291 1292 if (nvmf_subsystem_find_listener(subsystem, trid)) { 1293 /* Listener already exists in this subsystem */ 1294 cb_fn(cb_arg, 0); 1295 return; 1296 } 1297 1298 transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, trid->trstring); 1299 if (!transport) { 1300 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n", 1301 trid->trstring); 1302 cb_fn(cb_arg, -EINVAL); 1303 return; 1304 } 1305 1306 tr_listener = nvmf_transport_find_listener(transport, trid); 1307 if (!tr_listener) { 1308 SPDK_ERRLOG("Cannot find transport listener for %s\n", trid->traddr); 1309 cb_fn(cb_arg, -EINVAL); 1310 return; 1311 } 1312 1313 listener = calloc(1, sizeof(*listener)); 1314 if (!listener) { 1315 cb_fn(cb_arg, -ENOMEM); 1316 return; 1317 } 1318 1319 listener->trid = &tr_listener->trid; 1320 listener->transport = transport; 1321 listener->cb_fn = cb_fn; 1322 listener->cb_arg = cb_arg; 1323 listener->subsystem = subsystem; 1324 listener->ana_state = calloc(subsystem->max_nsid, sizeof(enum spdk_nvme_ana_state)); 1325 if (!listener->ana_state) { 1326 free(listener); 1327 cb_fn(cb_arg, -ENOMEM); 1328 return; 1329 } 1330 1331 spdk_nvmf_subsystem_listener_opts_init(&listener->opts, sizeof(listener->opts)); 1332 if (opts != NULL) { 1333 rc = listener_opts_copy(opts, &listener->opts); 1334 if (rc) { 1335 SPDK_ERRLOG("Unable to copy listener options\n"); 1336 free(listener->ana_state); 1337 free(listener); 1338 cb_fn(cb_arg, -EINVAL); 1339 return; 1340 } 1341 } 1342 1343 id = spdk_bit_array_find_first_clear(subsystem->used_listener_ids, 0); 1344 if (id == UINT32_MAX) { 1345 SPDK_ERRLOG("Cannot add any more listeners\n"); 1346 free(listener->ana_state); 1347 free(listener); 1348 cb_fn(cb_arg, -EINVAL); 1349 return; 1350 } 1351 1352 spdk_bit_array_set(subsystem->used_listener_ids, id); 1353 listener->id = id; 1354 1355 for (i = 0; i < subsystem->max_nsid; i++) { 1356 listener->ana_state[i] = listener->opts.ana_state; 1357 } 1358 1359 if (transport->ops->listen_associate != NULL) { 1360 rc = transport->ops->listen_associate(transport, subsystem, trid); 1361 } 1362 1363 SPDK_DTRACE_PROBE4(nvmf_subsystem_add_listener, subsystem->subnqn, listener->trid->trtype, 1364 listener->trid->traddr, listener->trid->trsvcid); 1365 1366 _nvmf_subsystem_add_listener_done(listener, rc); 1367 } 1368 1369 void 1370 spdk_nvmf_subsystem_add_listener(struct spdk_nvmf_subsystem *subsystem, 1371 struct spdk_nvme_transport_id *trid, 1372 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 1373 void *cb_arg) 1374 { 1375 _nvmf_subsystem_add_listener(subsystem, trid, cb_fn, cb_arg, NULL); 1376 } 1377 1378 void 1379 spdk_nvmf_subsystem_add_listener_ext(struct spdk_nvmf_subsystem *subsystem, 1380 struct spdk_nvme_transport_id *trid, 1381 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 1382 void *cb_arg, struct spdk_nvmf_listener_opts *opts) 1383 { 1384 _nvmf_subsystem_add_listener(subsystem, trid, cb_fn, cb_arg, opts); 1385 } 1386 1387 int 1388 spdk_nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem, 1389 const struct spdk_nvme_transport_id *trid) 1390 { 1391 struct spdk_nvmf_subsystem_listener *listener; 1392 1393 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1394 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1395 return -EAGAIN; 1396 } 1397 1398 listener = nvmf_subsystem_find_listener(subsystem, trid); 1399 if (listener == NULL) { 1400 return -ENOENT; 1401 } 1402 1403 SPDK_DTRACE_PROBE4(nvmf_subsystem_remove_listener, subsystem->subnqn, listener->trid->trtype, 1404 listener->trid->traddr, listener->trid->trsvcid); 1405 1406 _nvmf_subsystem_remove_listener(subsystem, listener, false); 1407 1408 return 0; 1409 } 1410 1411 void 1412 nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem, 1413 bool stop) 1414 { 1415 struct spdk_nvmf_subsystem_listener *listener, *listener_tmp; 1416 1417 TAILQ_FOREACH_SAFE(listener, &subsystem->listeners, link, listener_tmp) { 1418 _nvmf_subsystem_remove_listener(subsystem, listener, stop); 1419 } 1420 } 1421 1422 bool 1423 spdk_nvmf_subsystem_listener_allowed(struct spdk_nvmf_subsystem *subsystem, 1424 const struct spdk_nvme_transport_id *trid) 1425 { 1426 struct spdk_nvmf_subsystem_listener *listener; 1427 1428 TAILQ_FOREACH(listener, &subsystem->listeners, link) { 1429 if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) { 1430 return true; 1431 } 1432 } 1433 1434 if (!strcmp(subsystem->subnqn, SPDK_NVMF_DISCOVERY_NQN)) { 1435 SPDK_WARNLOG("Allowing connection to discovery subsystem on %s/%s/%s, " 1436 "even though this listener was not added to the discovery " 1437 "subsystem. This behavior is deprecated and will be removed " 1438 "in a future release.\n", 1439 spdk_nvme_transport_id_trtype_str(trid->trtype), trid->traddr, trid->trsvcid); 1440 return true; 1441 } 1442 1443 return false; 1444 } 1445 1446 struct spdk_nvmf_subsystem_listener * 1447 spdk_nvmf_subsystem_get_first_listener(struct spdk_nvmf_subsystem *subsystem) 1448 { 1449 return TAILQ_FIRST(&subsystem->listeners); 1450 } 1451 1452 struct spdk_nvmf_subsystem_listener * 1453 spdk_nvmf_subsystem_get_next_listener(struct spdk_nvmf_subsystem *subsystem, 1454 struct spdk_nvmf_subsystem_listener *prev_listener) 1455 { 1456 return TAILQ_NEXT(prev_listener, link); 1457 } 1458 1459 const struct spdk_nvme_transport_id * 1460 spdk_nvmf_subsystem_listener_get_trid(struct spdk_nvmf_subsystem_listener *listener) 1461 { 1462 return listener->trid; 1463 } 1464 1465 void 1466 spdk_nvmf_subsystem_allow_any_listener(struct spdk_nvmf_subsystem *subsystem, 1467 bool allow_any_listener) 1468 { 1469 subsystem->flags.allow_any_listener = allow_any_listener; 1470 } 1471 1472 SPDK_LOG_DEPRECATION_REGISTER(spdk_nvmf_subsytem_any_listener_allowed, 1473 "spdk_nvmf_subsytem_any_listener_allowed is deprecated", "v24.05", 0); 1474 1475 bool 1476 spdk_nvmf_subsytem_any_listener_allowed(struct spdk_nvmf_subsystem *subsystem) 1477 { 1478 SPDK_LOG_DEPRECATED(spdk_nvmf_subsytem_any_listener_allowed); 1479 return subsystem->flags.allow_any_listener; 1480 } 1481 1482 bool 1483 spdk_nvmf_subsystem_any_listener_allowed(struct spdk_nvmf_subsystem *subsystem) 1484 { 1485 return subsystem->flags.allow_any_listener; 1486 } 1487 1488 struct subsystem_update_ns_ctx { 1489 struct spdk_nvmf_subsystem *subsystem; 1490 1491 spdk_nvmf_subsystem_state_change_done cb_fn; 1492 void *cb_arg; 1493 }; 1494 1495 static void 1496 subsystem_update_ns_done(struct spdk_io_channel_iter *i, int status) 1497 { 1498 struct subsystem_update_ns_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1499 1500 if (ctx->cb_fn) { 1501 ctx->cb_fn(ctx->subsystem, ctx->cb_arg, status); 1502 } 1503 free(ctx); 1504 } 1505 1506 static void 1507 subsystem_update_ns_on_pg(struct spdk_io_channel_iter *i) 1508 { 1509 int rc; 1510 struct subsystem_update_ns_ctx *ctx; 1511 struct spdk_nvmf_poll_group *group; 1512 struct spdk_nvmf_subsystem *subsystem; 1513 1514 ctx = spdk_io_channel_iter_get_ctx(i); 1515 group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i)); 1516 subsystem = ctx->subsystem; 1517 1518 rc = nvmf_poll_group_update_subsystem(group, subsystem); 1519 spdk_for_each_channel_continue(i, rc); 1520 } 1521 1522 static int 1523 nvmf_subsystem_update_ns(struct spdk_nvmf_subsystem *subsystem, 1524 spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg) 1525 { 1526 struct subsystem_update_ns_ctx *ctx; 1527 1528 ctx = calloc(1, sizeof(*ctx)); 1529 if (ctx == NULL) { 1530 SPDK_ERRLOG("Can't alloc subsystem poll group update context\n"); 1531 return -ENOMEM; 1532 } 1533 ctx->subsystem = subsystem; 1534 ctx->cb_fn = cb_fn; 1535 ctx->cb_arg = cb_arg; 1536 1537 spdk_for_each_channel(subsystem->tgt, 1538 subsystem_update_ns_on_pg, 1539 ctx, 1540 subsystem_update_ns_done); 1541 return 0; 1542 } 1543 1544 static void 1545 nvmf_subsystem_ns_changed(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1546 { 1547 struct spdk_nvmf_ctrlr *ctrlr; 1548 1549 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1550 if (nvmf_ctrlr_ns_is_visible(ctrlr, nsid)) { 1551 nvmf_ctrlr_ns_changed(ctrlr, nsid); 1552 } 1553 } 1554 } 1555 1556 static uint32_t nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns); 1557 1558 int 1559 spdk_nvmf_subsystem_remove_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1560 { 1561 struct spdk_nvmf_transport *transport; 1562 struct spdk_nvmf_ns *ns; 1563 struct spdk_nvmf_host *host, *tmp; 1564 struct spdk_nvmf_ctrlr *ctrlr; 1565 1566 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1567 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1568 assert(false); 1569 return -1; 1570 } 1571 1572 if (nsid == 0 || nsid > subsystem->max_nsid) { 1573 return -1; 1574 } 1575 1576 ns = subsystem->ns[nsid - 1]; 1577 if (!ns) { 1578 return -1; 1579 } 1580 1581 subsystem->ns[nsid - 1] = NULL; 1582 1583 assert(ns->anagrpid - 1 < subsystem->max_nsid); 1584 assert(subsystem->ana_group[ns->anagrpid - 1] > 0); 1585 1586 subsystem->ana_group[ns->anagrpid - 1]--; 1587 1588 TAILQ_FOREACH_SAFE(host, &ns->hosts, link, tmp) { 1589 nvmf_ns_remove_host(ns, host); 1590 } 1591 1592 free(ns->ptpl_file); 1593 nvmf_ns_reservation_clear_all_registrants(ns); 1594 spdk_bdev_module_release_bdev(ns->bdev); 1595 spdk_bdev_close(ns->desc); 1596 free(ns); 1597 1598 if (subsystem->fdp_supported && !spdk_nvmf_subsystem_get_first_ns(subsystem)) { 1599 subsystem->fdp_supported = false; 1600 SPDK_DEBUGLOG(nvmf, "Subsystem with id: %u doesn't have FDP capability.\n", 1601 subsystem->id); 1602 } 1603 1604 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 1605 transport = spdk_nvmf_transport_get_next(transport)) { 1606 if (transport->ops->subsystem_remove_ns) { 1607 transport->ops->subsystem_remove_ns(transport, subsystem, nsid); 1608 } 1609 } 1610 1611 nvmf_subsystem_ns_changed(subsystem, nsid); 1612 1613 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1614 spdk_bit_array_clear(ctrlr->visible_ns, nsid - 1); 1615 } 1616 1617 return 0; 1618 } 1619 1620 struct subsystem_ns_change_ctx { 1621 struct spdk_nvmf_subsystem *subsystem; 1622 spdk_nvmf_subsystem_state_change_done cb_fn; 1623 uint32_t nsid; 1624 }; 1625 1626 static void 1627 _nvmf_ns_hot_remove(struct spdk_nvmf_subsystem *subsystem, 1628 void *cb_arg, int status) 1629 { 1630 struct subsystem_ns_change_ctx *ctx = cb_arg; 1631 int rc; 1632 1633 rc = spdk_nvmf_subsystem_remove_ns(subsystem, ctx->nsid); 1634 if (rc != 0) { 1635 SPDK_ERRLOG("Failed to make changes to NVME-oF subsystem with id: %u\n", subsystem->id); 1636 } 1637 1638 rc = spdk_nvmf_subsystem_resume(subsystem, NULL, NULL); 1639 if (rc != 0) { 1640 SPDK_ERRLOG("Failed to resume NVME-oF subsystem with id: %u\n", subsystem->id); 1641 } 1642 1643 free(ctx); 1644 } 1645 1646 static void 1647 nvmf_ns_change_msg(void *ns_ctx) 1648 { 1649 struct subsystem_ns_change_ctx *ctx = ns_ctx; 1650 int rc; 1651 1652 SPDK_DTRACE_PROBE2(nvmf_ns_change, ctx->nsid, ctx->subsystem->subnqn); 1653 1654 rc = spdk_nvmf_subsystem_pause(ctx->subsystem, ctx->nsid, ctx->cb_fn, ctx); 1655 if (rc) { 1656 if (rc == -EBUSY) { 1657 /* Try again, this is not a permanent situation. */ 1658 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ctx); 1659 } else { 1660 free(ctx); 1661 SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n"); 1662 } 1663 } 1664 } 1665 1666 static void 1667 nvmf_ns_hot_remove(void *remove_ctx) 1668 { 1669 struct spdk_nvmf_ns *ns = remove_ctx; 1670 struct subsystem_ns_change_ctx *ns_ctx; 1671 int rc; 1672 1673 /* We have to allocate a new context because this op 1674 * is asynchronous and we could lose the ns in the middle. 1675 */ 1676 ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx)); 1677 if (!ns_ctx) { 1678 SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n"); 1679 return; 1680 } 1681 1682 ns_ctx->subsystem = ns->subsystem; 1683 ns_ctx->nsid = ns->opts.nsid; 1684 ns_ctx->cb_fn = _nvmf_ns_hot_remove; 1685 1686 rc = spdk_nvmf_subsystem_pause(ns->subsystem, ns_ctx->nsid, _nvmf_ns_hot_remove, ns_ctx); 1687 if (rc) { 1688 if (rc == -EBUSY) { 1689 /* Try again, this is not a permanent situation. */ 1690 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx); 1691 } else { 1692 SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n"); 1693 free(ns_ctx); 1694 } 1695 } 1696 } 1697 1698 static void 1699 _nvmf_ns_resize(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status) 1700 { 1701 struct subsystem_ns_change_ctx *ctx = cb_arg; 1702 1703 nvmf_subsystem_ns_changed(subsystem, ctx->nsid); 1704 if (spdk_nvmf_subsystem_resume(subsystem, NULL, NULL) != 0) { 1705 SPDK_ERRLOG("Failed to resume NVME-oF subsystem with id: %u\n", subsystem->id); 1706 } 1707 1708 free(ctx); 1709 } 1710 1711 static void 1712 nvmf_ns_resize(void *event_ctx) 1713 { 1714 struct spdk_nvmf_ns *ns = event_ctx; 1715 struct subsystem_ns_change_ctx *ns_ctx; 1716 int rc; 1717 1718 /* We have to allocate a new context because this op 1719 * is asynchronous and we could lose the ns in the middle. 1720 */ 1721 ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx)); 1722 if (!ns_ctx) { 1723 SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n"); 1724 return; 1725 } 1726 1727 ns_ctx->subsystem = ns->subsystem; 1728 ns_ctx->nsid = ns->opts.nsid; 1729 ns_ctx->cb_fn = _nvmf_ns_resize; 1730 1731 /* Specify 0 for the nsid here, because we do not need to pause the namespace. 1732 * Namespaces can only be resized bigger, so there is no need to quiesce I/O. 1733 */ 1734 rc = spdk_nvmf_subsystem_pause(ns->subsystem, 0, _nvmf_ns_resize, ns_ctx); 1735 if (rc) { 1736 if (rc == -EBUSY) { 1737 /* Try again, this is not a permanent situation. */ 1738 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx); 1739 } else { 1740 SPDK_ERRLOG("Unable to pause subsystem to process namespace resize!\n"); 1741 free(ns_ctx); 1742 } 1743 } 1744 } 1745 1746 static void 1747 nvmf_ns_event(enum spdk_bdev_event_type type, 1748 struct spdk_bdev *bdev, 1749 void *event_ctx) 1750 { 1751 SPDK_DEBUGLOG(nvmf, "Bdev event: type %d, name %s, subsystem_id %d, ns_id %d\n", 1752 type, 1753 spdk_bdev_get_name(bdev), 1754 ((struct spdk_nvmf_ns *)event_ctx)->subsystem->id, 1755 ((struct spdk_nvmf_ns *)event_ctx)->nsid); 1756 1757 switch (type) { 1758 case SPDK_BDEV_EVENT_REMOVE: 1759 nvmf_ns_hot_remove(event_ctx); 1760 break; 1761 case SPDK_BDEV_EVENT_RESIZE: 1762 nvmf_ns_resize(event_ctx); 1763 break; 1764 default: 1765 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 1766 break; 1767 } 1768 } 1769 1770 void 1771 spdk_nvmf_ns_opts_get_defaults(struct spdk_nvmf_ns_opts *opts, size_t opts_size) 1772 { 1773 if (!opts) { 1774 SPDK_ERRLOG("opts should not be NULL.\n"); 1775 return; 1776 } 1777 1778 if (!opts_size) { 1779 SPDK_ERRLOG("opts_size should not be zero.\n"); 1780 return; 1781 } 1782 1783 memset(opts, 0, opts_size); 1784 opts->opts_size = opts_size; 1785 1786 #define FIELD_OK(field) \ 1787 offsetof(struct spdk_nvmf_ns_opts, field) + sizeof(opts->field) <= opts_size 1788 1789 #define SET_FIELD(field, value) \ 1790 if (FIELD_OK(field)) { \ 1791 opts->field = value; \ 1792 } \ 1793 1794 /* All current fields are set to 0 by default. */ 1795 SET_FIELD(nsid, 0); 1796 if (FIELD_OK(nguid)) { 1797 memset(opts->nguid, 0, sizeof(opts->nguid)); 1798 } 1799 if (FIELD_OK(eui64)) { 1800 memset(opts->eui64, 0, sizeof(opts->eui64)); 1801 } 1802 if (FIELD_OK(uuid)) { 1803 spdk_uuid_set_null(&opts->uuid); 1804 } 1805 SET_FIELD(anagrpid, 0); 1806 SET_FIELD(transport_specific, NULL); 1807 1808 #undef FIELD_OK 1809 #undef SET_FIELD 1810 } 1811 1812 static void 1813 nvmf_ns_opts_copy(struct spdk_nvmf_ns_opts *opts, 1814 const struct spdk_nvmf_ns_opts *user_opts, 1815 size_t opts_size) 1816 { 1817 #define FIELD_OK(field) \ 1818 offsetof(struct spdk_nvmf_ns_opts, field) + sizeof(opts->field) <= user_opts->opts_size 1819 1820 #define SET_FIELD(field) \ 1821 if (FIELD_OK(field)) { \ 1822 opts->field = user_opts->field; \ 1823 } \ 1824 1825 SET_FIELD(nsid); 1826 if (FIELD_OK(nguid)) { 1827 memcpy(opts->nguid, user_opts->nguid, sizeof(opts->nguid)); 1828 } 1829 if (FIELD_OK(eui64)) { 1830 memcpy(opts->eui64, user_opts->eui64, sizeof(opts->eui64)); 1831 } 1832 if (FIELD_OK(uuid)) { 1833 spdk_uuid_copy(&opts->uuid, &user_opts->uuid); 1834 } 1835 SET_FIELD(anagrpid); 1836 SET_FIELD(no_auto_visible); 1837 SET_FIELD(transport_specific); 1838 1839 opts->opts_size = user_opts->opts_size; 1840 1841 /* We should not remove this statement, but need to update the assert statement 1842 * if we add a new field, and also add a corresponding SET_FIELD statement. 1843 */ 1844 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_ns_opts) == 72, "Incorrect size"); 1845 1846 #undef FIELD_OK 1847 #undef SET_FIELD 1848 } 1849 1850 /* Dummy bdev module used to to claim bdevs. */ 1851 static struct spdk_bdev_module ns_bdev_module = { 1852 .name = "NVMe-oF Target", 1853 }; 1854 1855 static int nvmf_ns_reservation_update(const struct spdk_nvmf_ns *ns, 1856 const struct spdk_nvmf_reservation_info *info); 1857 static int nvmf_ns_reservation_load(const struct spdk_nvmf_ns *ns, 1858 struct spdk_nvmf_reservation_info *info); 1859 static int nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, 1860 struct spdk_nvmf_reservation_info *info); 1861 1862 uint32_t 1863 spdk_nvmf_subsystem_add_ns_ext(struct spdk_nvmf_subsystem *subsystem, const char *bdev_name, 1864 const struct spdk_nvmf_ns_opts *user_opts, size_t opts_size, 1865 const char *ptpl_file) 1866 { 1867 struct spdk_nvmf_transport *transport; 1868 struct spdk_nvmf_ns_opts opts; 1869 struct spdk_nvmf_ns *ns, *first_ns; 1870 struct spdk_nvmf_ctrlr *ctrlr; 1871 struct spdk_nvmf_reservation_info info = {0}; 1872 int rc; 1873 bool zone_append_supported; 1874 uint64_t max_zone_append_size_kib; 1875 1876 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1877 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1878 return 0; 1879 } 1880 1881 spdk_nvmf_ns_opts_get_defaults(&opts, sizeof(opts)); 1882 if (user_opts) { 1883 nvmf_ns_opts_copy(&opts, user_opts, opts_size); 1884 } 1885 1886 if (opts.nsid == SPDK_NVME_GLOBAL_NS_TAG) { 1887 SPDK_ERRLOG("Invalid NSID %" PRIu32 "\n", opts.nsid); 1888 return 0; 1889 } 1890 1891 if (opts.nsid == 0) { 1892 /* 1893 * NSID not specified - find a free index. 1894 * 1895 * If no free slots are found, opts.nsid will be subsystem->max_nsid + 1, which will 1896 * expand max_nsid if possible. 1897 */ 1898 for (opts.nsid = 1; opts.nsid <= subsystem->max_nsid; opts.nsid++) { 1899 if (_nvmf_subsystem_get_ns(subsystem, opts.nsid) == NULL) { 1900 break; 1901 } 1902 } 1903 } 1904 1905 if (_nvmf_subsystem_get_ns(subsystem, opts.nsid)) { 1906 SPDK_ERRLOG("Requested NSID %" PRIu32 " already in use\n", opts.nsid); 1907 return 0; 1908 } 1909 1910 if (opts.nsid > subsystem->max_nsid) { 1911 SPDK_ERRLOG("NSID greater than maximum not allowed\n"); 1912 return 0; 1913 } 1914 1915 if (opts.anagrpid == 0) { 1916 opts.anagrpid = opts.nsid; 1917 } 1918 1919 if (opts.anagrpid > subsystem->max_nsid) { 1920 SPDK_ERRLOG("ANAGRPID greater than maximum NSID not allowed\n"); 1921 return 0; 1922 } 1923 1924 ns = calloc(1, sizeof(*ns)); 1925 if (ns == NULL) { 1926 SPDK_ERRLOG("Namespace allocation failed\n"); 1927 return 0; 1928 } 1929 1930 TAILQ_INIT(&ns->hosts); 1931 ns->always_visible = !opts.no_auto_visible; 1932 if (ns->always_visible) { 1933 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1934 spdk_bit_array_set(ctrlr->visible_ns, opts.nsid - 1); 1935 } 1936 } 1937 1938 rc = spdk_bdev_open_ext(bdev_name, true, nvmf_ns_event, ns, &ns->desc); 1939 if (rc != 0) { 1940 SPDK_ERRLOG("Subsystem %s: bdev %s cannot be opened, error=%d\n", 1941 subsystem->subnqn, bdev_name, rc); 1942 free(ns); 1943 return 0; 1944 } 1945 1946 ns->bdev = spdk_bdev_desc_get_bdev(ns->desc); 1947 1948 if (spdk_bdev_get_md_size(ns->bdev) != 0) { 1949 if (!spdk_bdev_is_md_interleaved(ns->bdev)) { 1950 SPDK_ERRLOG("Can't attach bdev with separate metadata.\n"); 1951 spdk_bdev_close(ns->desc); 1952 free(ns); 1953 return 0; 1954 } 1955 1956 if (spdk_bdev_get_md_size(ns->bdev) > SPDK_BDEV_MAX_INTERLEAVED_MD_SIZE) { 1957 SPDK_ERRLOG("Maximum supported interleaved md size %u, current md size %u\n", 1958 SPDK_BDEV_MAX_INTERLEAVED_MD_SIZE, spdk_bdev_get_md_size(ns->bdev)); 1959 spdk_bdev_close(ns->desc); 1960 free(ns); 1961 return 0; 1962 } 1963 } 1964 1965 rc = spdk_bdev_module_claim_bdev(ns->bdev, ns->desc, &ns_bdev_module); 1966 if (rc != 0) { 1967 spdk_bdev_close(ns->desc); 1968 free(ns); 1969 return 0; 1970 } 1971 1972 /* Cache the zcopy capability of the bdev device */ 1973 ns->zcopy = spdk_bdev_io_type_supported(ns->bdev, SPDK_BDEV_IO_TYPE_ZCOPY); 1974 1975 if (spdk_uuid_is_null(&opts.uuid)) { 1976 opts.uuid = *spdk_bdev_get_uuid(ns->bdev); 1977 } 1978 1979 /* if nguid descriptor is supported by bdev module (nvme) then uuid = nguid */ 1980 if (spdk_mem_all_zero(opts.nguid, sizeof(opts.nguid))) { 1981 SPDK_STATIC_ASSERT(sizeof(opts.nguid) == sizeof(opts.uuid), "size mismatch"); 1982 memcpy(opts.nguid, spdk_bdev_get_uuid(ns->bdev), sizeof(opts.nguid)); 1983 } 1984 1985 if (spdk_bdev_is_zoned(ns->bdev)) { 1986 SPDK_DEBUGLOG(nvmf, "The added namespace is backed by a zoned block device.\n"); 1987 ns->csi = SPDK_NVME_CSI_ZNS; 1988 1989 zone_append_supported = spdk_bdev_io_type_supported(ns->bdev, 1990 SPDK_BDEV_IO_TYPE_ZONE_APPEND); 1991 max_zone_append_size_kib = spdk_bdev_get_max_zone_append_size( 1992 ns->bdev) * spdk_bdev_get_block_size(ns->bdev); 1993 1994 if (_nvmf_subsystem_get_first_zoned_ns(subsystem) != NULL && 1995 (subsystem->zone_append_supported != zone_append_supported || 1996 subsystem->max_zone_append_size_kib != max_zone_append_size_kib)) { 1997 SPDK_ERRLOG("Namespaces with different zone append support or different zone append size are not allowed.\n"); 1998 goto err; 1999 } 2000 2001 subsystem->zone_append_supported = zone_append_supported; 2002 subsystem->max_zone_append_size_kib = max_zone_append_size_kib; 2003 } 2004 2005 first_ns = spdk_nvmf_subsystem_get_first_ns(subsystem); 2006 if (!first_ns) { 2007 if (spdk_bdev_get_nvme_ctratt(ns->bdev).bits.fdps) { 2008 SPDK_DEBUGLOG(nvmf, "Subsystem with id: %u has FDP capability.\n", 2009 subsystem->id); 2010 subsystem->fdp_supported = true; 2011 } 2012 } else { 2013 if (spdk_bdev_get_nvme_ctratt(first_ns->bdev).bits.fdps != 2014 spdk_bdev_get_nvme_ctratt(ns->bdev).bits.fdps) { 2015 SPDK_ERRLOG("Subsystem with id: %u can%s FDP namespace.\n", subsystem->id, 2016 spdk_bdev_get_nvme_ctratt(first_ns->bdev).bits.fdps ? " only add" : "not add"); 2017 goto err; 2018 } 2019 } 2020 2021 ns->opts = opts; 2022 ns->subsystem = subsystem; 2023 subsystem->ns[opts.nsid - 1] = ns; 2024 ns->nsid = opts.nsid; 2025 ns->anagrpid = opts.anagrpid; 2026 subsystem->ana_group[ns->anagrpid - 1]++; 2027 TAILQ_INIT(&ns->registrants); 2028 if (ptpl_file) { 2029 ns->ptpl_file = strdup(ptpl_file); 2030 if (!ns->ptpl_file) { 2031 SPDK_ERRLOG("Namespace ns->ptpl_file allocation failed\n"); 2032 goto err; 2033 } 2034 } 2035 2036 if (nvmf_ns_is_ptpl_capable(ns)) { 2037 rc = nvmf_ns_reservation_load(ns, &info); 2038 if (rc) { 2039 SPDK_ERRLOG("Subsystem load reservation failed\n"); 2040 goto err; 2041 } 2042 2043 rc = nvmf_ns_reservation_restore(ns, &info); 2044 if (rc) { 2045 SPDK_ERRLOG("Subsystem restore reservation failed\n"); 2046 goto err; 2047 } 2048 } 2049 2050 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 2051 transport = spdk_nvmf_transport_get_next(transport)) { 2052 if (transport->ops->subsystem_add_ns) { 2053 rc = transport->ops->subsystem_add_ns(transport, subsystem, ns); 2054 if (rc) { 2055 SPDK_ERRLOG("Namespace attachment is not allowed by %s transport\n", transport->ops->name); 2056 nvmf_ns_reservation_clear_all_registrants(ns); 2057 goto err; 2058 } 2059 } 2060 } 2061 2062 /* JSON value obj is freed before sending the response. Set NULL to prevent usage of dangling pointer. */ 2063 ns->opts.transport_specific = NULL; 2064 2065 SPDK_DEBUGLOG(nvmf, "Subsystem %s: bdev %s assigned nsid %" PRIu32 "\n", 2066 spdk_nvmf_subsystem_get_nqn(subsystem), 2067 bdev_name, 2068 opts.nsid); 2069 2070 nvmf_subsystem_ns_changed(subsystem, opts.nsid); 2071 2072 SPDK_DTRACE_PROBE2(nvmf_subsystem_add_ns, subsystem->subnqn, ns->nsid); 2073 2074 return opts.nsid; 2075 err: 2076 subsystem->ns[opts.nsid - 1] = NULL; 2077 spdk_bdev_module_release_bdev(ns->bdev); 2078 spdk_bdev_close(ns->desc); 2079 free(ns->ptpl_file); 2080 free(ns); 2081 2082 return 0; 2083 } 2084 2085 static uint32_t 2086 nvmf_subsystem_get_next_allocated_nsid(struct spdk_nvmf_subsystem *subsystem, 2087 uint32_t prev_nsid) 2088 { 2089 uint32_t nsid; 2090 2091 if (prev_nsid >= subsystem->max_nsid) { 2092 return 0; 2093 } 2094 2095 for (nsid = prev_nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 2096 if (subsystem->ns[nsid - 1]) { 2097 return nsid; 2098 } 2099 } 2100 2101 return 0; 2102 } 2103 2104 struct spdk_nvmf_ns * 2105 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 2106 { 2107 uint32_t first_nsid; 2108 2109 first_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, 0); 2110 return _nvmf_subsystem_get_ns(subsystem, first_nsid); 2111 } 2112 2113 struct spdk_nvmf_ns * 2114 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 2115 struct spdk_nvmf_ns *prev_ns) 2116 { 2117 uint32_t next_nsid; 2118 2119 next_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, prev_ns->opts.nsid); 2120 return _nvmf_subsystem_get_ns(subsystem, next_nsid); 2121 } 2122 2123 struct spdk_nvmf_ns * 2124 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 2125 { 2126 return _nvmf_subsystem_get_ns(subsystem, nsid); 2127 } 2128 2129 uint32_t 2130 spdk_nvmf_ns_get_id(const struct spdk_nvmf_ns *ns) 2131 { 2132 return ns->opts.nsid; 2133 } 2134 2135 struct spdk_bdev * 2136 spdk_nvmf_ns_get_bdev(struct spdk_nvmf_ns *ns) 2137 { 2138 return ns->bdev; 2139 } 2140 2141 void 2142 spdk_nvmf_ns_get_opts(const struct spdk_nvmf_ns *ns, struct spdk_nvmf_ns_opts *opts, 2143 size_t opts_size) 2144 { 2145 memset(opts, 0, opts_size); 2146 memcpy(opts, &ns->opts, spdk_min(sizeof(ns->opts), opts_size)); 2147 } 2148 2149 const char * 2150 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 2151 { 2152 return subsystem->sn; 2153 } 2154 2155 int 2156 spdk_nvmf_subsystem_set_sn(struct spdk_nvmf_subsystem *subsystem, const char *sn) 2157 { 2158 size_t len, max_len; 2159 2160 max_len = sizeof(subsystem->sn) - 1; 2161 len = strlen(sn); 2162 if (len > max_len) { 2163 SPDK_DEBUGLOG(nvmf, "Invalid sn \"%s\": length %zu > max %zu\n", 2164 sn, len, max_len); 2165 return -1; 2166 } 2167 2168 if (!nvmf_valid_ascii_string(sn, len)) { 2169 SPDK_DEBUGLOG(nvmf, "Non-ASCII sn\n"); 2170 SPDK_LOGDUMP(nvmf, "sn", sn, len); 2171 return -1; 2172 } 2173 2174 snprintf(subsystem->sn, sizeof(subsystem->sn), "%s", sn); 2175 2176 return 0; 2177 } 2178 2179 const char * 2180 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 2181 { 2182 return subsystem->mn; 2183 } 2184 2185 int 2186 spdk_nvmf_subsystem_set_mn(struct spdk_nvmf_subsystem *subsystem, const char *mn) 2187 { 2188 size_t len, max_len; 2189 2190 if (mn == NULL) { 2191 mn = MODEL_NUMBER_DEFAULT; 2192 } 2193 max_len = sizeof(subsystem->mn) - 1; 2194 len = strlen(mn); 2195 if (len > max_len) { 2196 SPDK_DEBUGLOG(nvmf, "Invalid mn \"%s\": length %zu > max %zu\n", 2197 mn, len, max_len); 2198 return -1; 2199 } 2200 2201 if (!nvmf_valid_ascii_string(mn, len)) { 2202 SPDK_DEBUGLOG(nvmf, "Non-ASCII mn\n"); 2203 SPDK_LOGDUMP(nvmf, "mn", mn, len); 2204 return -1; 2205 } 2206 2207 snprintf(subsystem->mn, sizeof(subsystem->mn), "%s", mn); 2208 2209 return 0; 2210 } 2211 2212 const char * 2213 spdk_nvmf_subsystem_get_nqn(const struct spdk_nvmf_subsystem *subsystem) 2214 { 2215 return subsystem->subnqn; 2216 } 2217 2218 /* We have to use the typedef in the function declaration to appease astyle. */ 2219 typedef enum spdk_nvmf_subtype spdk_nvmf_subtype_t; 2220 2221 spdk_nvmf_subtype_t 2222 spdk_nvmf_subsystem_get_type(struct spdk_nvmf_subsystem *subsystem) 2223 { 2224 return subsystem->subtype; 2225 } 2226 2227 uint32_t 2228 spdk_nvmf_subsystem_get_max_nsid(struct spdk_nvmf_subsystem *subsystem) 2229 { 2230 return subsystem->max_nsid; 2231 } 2232 2233 int 2234 nvmf_subsystem_set_cntlid_range(struct spdk_nvmf_subsystem *subsystem, 2235 uint16_t min_cntlid, uint16_t max_cntlid) 2236 { 2237 if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) { 2238 return -EAGAIN; 2239 } 2240 2241 if (min_cntlid > max_cntlid) { 2242 return -EINVAL; 2243 } 2244 /* The spec reserves cntlid values in the range FFF0h to FFFFh. */ 2245 if (min_cntlid < NVMF_MIN_CNTLID || min_cntlid > NVMF_MAX_CNTLID || 2246 max_cntlid < NVMF_MIN_CNTLID || max_cntlid > NVMF_MAX_CNTLID) { 2247 return -EINVAL; 2248 } 2249 subsystem->min_cntlid = min_cntlid; 2250 subsystem->max_cntlid = max_cntlid; 2251 if (subsystem->next_cntlid < min_cntlid || subsystem->next_cntlid > max_cntlid - 1) { 2252 subsystem->next_cntlid = min_cntlid - 1; 2253 } 2254 2255 return 0; 2256 } 2257 2258 static uint16_t 2259 nvmf_subsystem_gen_cntlid(struct spdk_nvmf_subsystem *subsystem) 2260 { 2261 int count; 2262 2263 /* 2264 * In the worst case, we might have to try all CNTLID values between min_cntlid and max_cntlid 2265 * before we find one that is unused (or find that all values are in use). 2266 */ 2267 for (count = 0; count < subsystem->max_cntlid - subsystem->min_cntlid + 1; count++) { 2268 subsystem->next_cntlid++; 2269 if (subsystem->next_cntlid > subsystem->max_cntlid) { 2270 subsystem->next_cntlid = subsystem->min_cntlid; 2271 } 2272 2273 /* Check if a controller with this cntlid currently exists. */ 2274 if (nvmf_subsystem_get_ctrlr(subsystem, subsystem->next_cntlid) == NULL) { 2275 /* Found unused cntlid */ 2276 return subsystem->next_cntlid; 2277 } 2278 } 2279 2280 /* All valid cntlid values are in use. */ 2281 return 0xFFFF; 2282 } 2283 2284 int 2285 nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr) 2286 { 2287 2288 if (ctrlr->dynamic_ctrlr) { 2289 ctrlr->cntlid = nvmf_subsystem_gen_cntlid(subsystem); 2290 if (ctrlr->cntlid == 0xFFFF) { 2291 /* Unable to get a cntlid */ 2292 SPDK_ERRLOG("Reached max simultaneous ctrlrs\n"); 2293 return -EBUSY; 2294 } 2295 } else if (nvmf_subsystem_get_ctrlr(subsystem, ctrlr->cntlid) != NULL) { 2296 SPDK_ERRLOG("Ctrlr with cntlid %u already exist\n", ctrlr->cntlid); 2297 return -EEXIST; 2298 } 2299 2300 TAILQ_INSERT_TAIL(&subsystem->ctrlrs, ctrlr, link); 2301 2302 SPDK_DTRACE_PROBE3(nvmf_subsystem_add_ctrlr, subsystem->subnqn, ctrlr, ctrlr->hostnqn); 2303 2304 return 0; 2305 } 2306 2307 void 2308 nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem, 2309 struct spdk_nvmf_ctrlr *ctrlr) 2310 { 2311 SPDK_DTRACE_PROBE3(nvmf_subsystem_remove_ctrlr, subsystem->subnqn, ctrlr, ctrlr->hostnqn); 2312 2313 assert(spdk_get_thread() == subsystem->thread); 2314 assert(subsystem == ctrlr->subsys); 2315 SPDK_DEBUGLOG(nvmf, "remove ctrlr %p id 0x%x from subsys %p %s\n", ctrlr, ctrlr->cntlid, subsystem, 2316 subsystem->subnqn); 2317 TAILQ_REMOVE(&subsystem->ctrlrs, ctrlr, link); 2318 } 2319 2320 struct spdk_nvmf_ctrlr * 2321 nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid) 2322 { 2323 struct spdk_nvmf_ctrlr *ctrlr; 2324 2325 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 2326 if (ctrlr->cntlid == cntlid) { 2327 return ctrlr; 2328 } 2329 } 2330 2331 return NULL; 2332 } 2333 2334 uint32_t 2335 spdk_nvmf_subsystem_get_max_namespaces(const struct spdk_nvmf_subsystem *subsystem) 2336 { 2337 return subsystem->max_nsid; 2338 } 2339 2340 uint16_t 2341 spdk_nvmf_subsystem_get_min_cntlid(const struct spdk_nvmf_subsystem *subsystem) 2342 { 2343 return subsystem->min_cntlid; 2344 } 2345 2346 uint16_t 2347 spdk_nvmf_subsystem_get_max_cntlid(const struct spdk_nvmf_subsystem *subsystem) 2348 { 2349 return subsystem->max_cntlid; 2350 } 2351 2352 struct _nvmf_ns_registrant { 2353 uint64_t rkey; 2354 char *host_uuid; 2355 }; 2356 2357 struct _nvmf_ns_registrants { 2358 size_t num_regs; 2359 struct _nvmf_ns_registrant reg[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2360 }; 2361 2362 struct _nvmf_ns_reservation { 2363 bool ptpl_activated; 2364 enum spdk_nvme_reservation_type rtype; 2365 uint64_t crkey; 2366 char *bdev_uuid; 2367 char *holder_uuid; 2368 struct _nvmf_ns_registrants regs; 2369 }; 2370 2371 static const struct spdk_json_object_decoder nvmf_ns_pr_reg_decoders[] = { 2372 {"rkey", offsetof(struct _nvmf_ns_registrant, rkey), spdk_json_decode_uint64}, 2373 {"host_uuid", offsetof(struct _nvmf_ns_registrant, host_uuid), spdk_json_decode_string}, 2374 }; 2375 2376 static int 2377 nvmf_decode_ns_pr_reg(const struct spdk_json_val *val, void *out) 2378 { 2379 struct _nvmf_ns_registrant *reg = out; 2380 2381 return spdk_json_decode_object(val, nvmf_ns_pr_reg_decoders, 2382 SPDK_COUNTOF(nvmf_ns_pr_reg_decoders), reg); 2383 } 2384 2385 static int 2386 nvmf_decode_ns_pr_regs(const struct spdk_json_val *val, void *out) 2387 { 2388 struct _nvmf_ns_registrants *regs = out; 2389 2390 return spdk_json_decode_array(val, nvmf_decode_ns_pr_reg, regs->reg, 2391 SPDK_NVMF_MAX_NUM_REGISTRANTS, ®s->num_regs, 2392 sizeof(struct _nvmf_ns_registrant)); 2393 } 2394 2395 static const struct spdk_json_object_decoder nvmf_ns_pr_decoders[] = { 2396 {"ptpl", offsetof(struct _nvmf_ns_reservation, ptpl_activated), spdk_json_decode_bool, true}, 2397 {"rtype", offsetof(struct _nvmf_ns_reservation, rtype), spdk_json_decode_uint32, true}, 2398 {"crkey", offsetof(struct _nvmf_ns_reservation, crkey), spdk_json_decode_uint64, true}, 2399 {"bdev_uuid", offsetof(struct _nvmf_ns_reservation, bdev_uuid), spdk_json_decode_string}, 2400 {"holder_uuid", offsetof(struct _nvmf_ns_reservation, holder_uuid), spdk_json_decode_string, true}, 2401 {"registrants", offsetof(struct _nvmf_ns_reservation, regs), nvmf_decode_ns_pr_regs}, 2402 }; 2403 2404 static int 2405 nvmf_ns_reservation_load_json(const struct spdk_nvmf_ns *ns, 2406 struct spdk_nvmf_reservation_info *info) 2407 { 2408 size_t json_size; 2409 ssize_t values_cnt, rc; 2410 void *json = NULL, *end; 2411 struct spdk_json_val *values = NULL; 2412 struct _nvmf_ns_reservation res = {}; 2413 const char *file = ns->ptpl_file; 2414 uint32_t i; 2415 2416 /* Load all persist file contents into a local buffer */ 2417 json = spdk_posix_file_load_from_name(file, &json_size); 2418 if (!json) { 2419 SPDK_ERRLOG("Load persit file %s failed\n", file); 2420 return -ENOMEM; 2421 } 2422 2423 rc = spdk_json_parse(json, json_size, NULL, 0, &end, 0); 2424 if (rc < 0) { 2425 SPDK_NOTICELOG("Parsing JSON configuration failed (%zd)\n", rc); 2426 goto exit; 2427 } 2428 2429 values_cnt = rc; 2430 values = calloc(values_cnt, sizeof(struct spdk_json_val)); 2431 if (values == NULL) { 2432 goto exit; 2433 } 2434 2435 rc = spdk_json_parse(json, json_size, values, values_cnt, &end, 0); 2436 if (rc != values_cnt) { 2437 SPDK_ERRLOG("Parsing JSON configuration failed (%zd)\n", rc); 2438 goto exit; 2439 } 2440 2441 /* Decode json */ 2442 if (spdk_json_decode_object(values, nvmf_ns_pr_decoders, 2443 SPDK_COUNTOF(nvmf_ns_pr_decoders), 2444 &res)) { 2445 SPDK_ERRLOG("Invalid objects in the persist file %s\n", file); 2446 rc = -EINVAL; 2447 goto exit; 2448 } 2449 2450 if (res.regs.num_regs > SPDK_NVMF_MAX_NUM_REGISTRANTS) { 2451 SPDK_ERRLOG("Can only support up to %u registrants\n", SPDK_NVMF_MAX_NUM_REGISTRANTS); 2452 rc = -ERANGE; 2453 goto exit; 2454 } 2455 2456 rc = 0; 2457 info->ptpl_activated = res.ptpl_activated; 2458 info->rtype = res.rtype; 2459 info->crkey = res.crkey; 2460 snprintf(info->bdev_uuid, sizeof(info->bdev_uuid), "%s", res.bdev_uuid); 2461 snprintf(info->holder_uuid, sizeof(info->holder_uuid), "%s", res.holder_uuid); 2462 info->num_regs = res.regs.num_regs; 2463 for (i = 0; i < res.regs.num_regs; i++) { 2464 info->registrants[i].rkey = res.regs.reg[i].rkey; 2465 snprintf(info->registrants[i].host_uuid, sizeof(info->registrants[i].host_uuid), "%s", 2466 res.regs.reg[i].host_uuid); 2467 } 2468 2469 exit: 2470 free(json); 2471 free(values); 2472 free(res.bdev_uuid); 2473 free(res.holder_uuid); 2474 for (i = 0; i < res.regs.num_regs; i++) { 2475 free(res.regs.reg[i].host_uuid); 2476 } 2477 2478 return rc; 2479 } 2480 2481 static bool nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns); 2482 2483 static int 2484 nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info) 2485 { 2486 uint32_t i; 2487 struct spdk_nvmf_registrant *reg, *holder = NULL; 2488 struct spdk_uuid bdev_uuid, holder_uuid; 2489 bool rkey_flag = false; 2490 2491 SPDK_DEBUGLOG(nvmf, "NSID %u, PTPL %u, Number of registrants %u\n", 2492 ns->nsid, info->ptpl_activated, info->num_regs); 2493 2494 /* it's not an error */ 2495 if (!info->ptpl_activated || !info->num_regs) { 2496 return 0; 2497 } 2498 2499 /* Check info->crkey exist or not in info->registrants[i].rkey */ 2500 for (i = 0; i < info->num_regs; i++) { 2501 if (info->crkey == info->registrants[i].rkey) { 2502 rkey_flag = true; 2503 } 2504 } 2505 if (!rkey_flag && info->crkey != 0) { 2506 return -EINVAL; 2507 } 2508 2509 spdk_uuid_parse(&bdev_uuid, info->bdev_uuid); 2510 if (spdk_uuid_compare(&bdev_uuid, spdk_bdev_get_uuid(ns->bdev))) { 2511 SPDK_ERRLOG("Existing bdev UUID is not same with configuration file\n"); 2512 return -EINVAL; 2513 } 2514 2515 ns->crkey = info->crkey; 2516 ns->rtype = info->rtype; 2517 ns->ptpl_activated = info->ptpl_activated; 2518 spdk_uuid_parse(&holder_uuid, info->holder_uuid); 2519 2520 SPDK_DEBUGLOG(nvmf, "Bdev UUID %s\n", info->bdev_uuid); 2521 if (info->rtype) { 2522 SPDK_DEBUGLOG(nvmf, "Holder UUID %s, RTYPE %u, RKEY 0x%"PRIx64"\n", 2523 info->holder_uuid, info->rtype, info->crkey); 2524 } 2525 2526 for (i = 0; i < info->num_regs; i++) { 2527 reg = calloc(1, sizeof(*reg)); 2528 if (!reg) { 2529 return -ENOMEM; 2530 } 2531 spdk_uuid_parse(®->hostid, info->registrants[i].host_uuid); 2532 reg->rkey = info->registrants[i].rkey; 2533 TAILQ_INSERT_TAIL(&ns->registrants, reg, link); 2534 if (info->crkey != 0 && !spdk_uuid_compare(&holder_uuid, ®->hostid)) { 2535 holder = reg; 2536 } 2537 SPDK_DEBUGLOG(nvmf, "Registrant RKEY 0x%"PRIx64", Host UUID %s\n", 2538 info->registrants[i].rkey, info->registrants[i].host_uuid); 2539 } 2540 2541 if (nvmf_ns_reservation_all_registrants_type(ns)) { 2542 ns->holder = TAILQ_FIRST(&ns->registrants); 2543 } else { 2544 ns->holder = holder; 2545 } 2546 2547 return 0; 2548 } 2549 2550 static int 2551 nvmf_ns_json_write_cb(void *cb_ctx, const void *data, size_t size) 2552 { 2553 char *file = cb_ctx; 2554 size_t rc; 2555 FILE *fd; 2556 2557 fd = fopen(file, "w"); 2558 if (!fd) { 2559 SPDK_ERRLOG("Can't open file %s for write\n", file); 2560 return -ENOENT; 2561 } 2562 rc = fwrite(data, 1, size, fd); 2563 fclose(fd); 2564 2565 return rc == size ? 0 : -1; 2566 } 2567 2568 static int 2569 nvmf_ns_reservation_update_json(const struct spdk_nvmf_ns *ns, 2570 const struct spdk_nvmf_reservation_info *info) 2571 { 2572 const char *file = ns->ptpl_file; 2573 struct spdk_json_write_ctx *w; 2574 uint32_t i; 2575 int rc = 0; 2576 2577 w = spdk_json_write_begin(nvmf_ns_json_write_cb, (void *)file, 0); 2578 if (w == NULL) { 2579 return -ENOMEM; 2580 } 2581 /* clear the configuration file */ 2582 if (!info->ptpl_activated) { 2583 goto exit; 2584 } 2585 2586 spdk_json_write_object_begin(w); 2587 spdk_json_write_named_bool(w, "ptpl", info->ptpl_activated); 2588 spdk_json_write_named_uint32(w, "rtype", info->rtype); 2589 spdk_json_write_named_uint64(w, "crkey", info->crkey); 2590 spdk_json_write_named_string(w, "bdev_uuid", info->bdev_uuid); 2591 spdk_json_write_named_string(w, "holder_uuid", info->holder_uuid); 2592 2593 spdk_json_write_named_array_begin(w, "registrants"); 2594 for (i = 0; i < info->num_regs; i++) { 2595 spdk_json_write_object_begin(w); 2596 spdk_json_write_named_uint64(w, "rkey", info->registrants[i].rkey); 2597 spdk_json_write_named_string(w, "host_uuid", info->registrants[i].host_uuid); 2598 spdk_json_write_object_end(w); 2599 } 2600 spdk_json_write_array_end(w); 2601 spdk_json_write_object_end(w); 2602 2603 exit: 2604 rc = spdk_json_write_end(w); 2605 return rc; 2606 } 2607 2608 static int 2609 nvmf_ns_update_reservation_info(struct spdk_nvmf_ns *ns) 2610 { 2611 struct spdk_nvmf_reservation_info info; 2612 struct spdk_nvmf_registrant *reg, *tmp; 2613 uint32_t i = 0; 2614 2615 assert(ns != NULL); 2616 2617 if (!ns->bdev || !nvmf_ns_is_ptpl_capable(ns)) { 2618 return 0; 2619 } 2620 2621 memset(&info, 0, sizeof(info)); 2622 spdk_uuid_fmt_lower(info.bdev_uuid, sizeof(info.bdev_uuid), spdk_bdev_get_uuid(ns->bdev)); 2623 2624 if (ns->rtype) { 2625 info.rtype = ns->rtype; 2626 info.crkey = ns->crkey; 2627 if (!nvmf_ns_reservation_all_registrants_type(ns)) { 2628 assert(ns->holder != NULL); 2629 spdk_uuid_fmt_lower(info.holder_uuid, sizeof(info.holder_uuid), &ns->holder->hostid); 2630 } 2631 } 2632 2633 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2634 spdk_uuid_fmt_lower(info.registrants[i].host_uuid, sizeof(info.registrants[i].host_uuid), 2635 ®->hostid); 2636 info.registrants[i++].rkey = reg->rkey; 2637 } 2638 2639 info.num_regs = i; 2640 info.ptpl_activated = ns->ptpl_activated; 2641 2642 return nvmf_ns_reservation_update(ns, &info); 2643 } 2644 2645 static struct spdk_nvmf_registrant * 2646 nvmf_ns_reservation_get_registrant(struct spdk_nvmf_ns *ns, 2647 struct spdk_uuid *uuid) 2648 { 2649 struct spdk_nvmf_registrant *reg, *tmp; 2650 2651 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2652 if (!spdk_uuid_compare(®->hostid, uuid)) { 2653 return reg; 2654 } 2655 } 2656 2657 return NULL; 2658 } 2659 2660 /* Generate reservation notice log to registered HostID controllers */ 2661 static void 2662 nvmf_subsystem_gen_ctrlr_notification(struct spdk_nvmf_subsystem *subsystem, 2663 struct spdk_nvmf_ns *ns, 2664 struct spdk_uuid *hostid_list, 2665 uint32_t num_hostid, 2666 enum spdk_nvme_reservation_notification_log_page_type type) 2667 { 2668 struct spdk_nvmf_ctrlr *ctrlr; 2669 uint32_t i; 2670 2671 for (i = 0; i < num_hostid; i++) { 2672 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 2673 if (!spdk_uuid_compare(&ctrlr->hostid, &hostid_list[i])) { 2674 nvmf_ctrlr_reservation_notice_log(ctrlr, ns, type); 2675 } 2676 } 2677 } 2678 } 2679 2680 /* Get all registrants' hostid other than the controller who issued the command */ 2681 static uint32_t 2682 nvmf_ns_reservation_get_all_other_hostid(struct spdk_nvmf_ns *ns, 2683 struct spdk_uuid *hostid_list, 2684 uint32_t max_num_hostid, 2685 struct spdk_uuid *current_hostid) 2686 { 2687 struct spdk_nvmf_registrant *reg, *tmp; 2688 uint32_t num_hostid = 0; 2689 2690 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2691 if (spdk_uuid_compare(®->hostid, current_hostid)) { 2692 if (num_hostid == max_num_hostid) { 2693 assert(false); 2694 return max_num_hostid; 2695 } 2696 hostid_list[num_hostid++] = reg->hostid; 2697 } 2698 } 2699 2700 return num_hostid; 2701 } 2702 2703 /* Calculate the unregistered HostID list according to list 2704 * prior to execute preempt command and list after executing 2705 * preempt command. 2706 */ 2707 static uint32_t 2708 nvmf_ns_reservation_get_unregistered_hostid(struct spdk_uuid *old_hostid_list, 2709 uint32_t old_num_hostid, 2710 struct spdk_uuid *remaining_hostid_list, 2711 uint32_t remaining_num_hostid) 2712 { 2713 struct spdk_uuid temp_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2714 uint32_t i, j, num_hostid = 0; 2715 bool found; 2716 2717 if (!remaining_num_hostid) { 2718 return old_num_hostid; 2719 } 2720 2721 for (i = 0; i < old_num_hostid; i++) { 2722 found = false; 2723 for (j = 0; j < remaining_num_hostid; j++) { 2724 if (!spdk_uuid_compare(&old_hostid_list[i], &remaining_hostid_list[j])) { 2725 found = true; 2726 break; 2727 } 2728 } 2729 if (!found) { 2730 spdk_uuid_copy(&temp_hostid_list[num_hostid++], &old_hostid_list[i]); 2731 } 2732 } 2733 2734 if (num_hostid) { 2735 memcpy(old_hostid_list, temp_hostid_list, sizeof(struct spdk_uuid) * num_hostid); 2736 } 2737 2738 return num_hostid; 2739 } 2740 2741 /* current reservation type is all registrants or not */ 2742 static bool 2743 nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns) 2744 { 2745 return (ns->rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS || 2746 ns->rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 2747 } 2748 2749 /* current registrant is reservation holder or not */ 2750 static bool 2751 nvmf_ns_reservation_registrant_is_holder(struct spdk_nvmf_ns *ns, 2752 struct spdk_nvmf_registrant *reg) 2753 { 2754 if (!reg) { 2755 return false; 2756 } 2757 2758 if (nvmf_ns_reservation_all_registrants_type(ns)) { 2759 return true; 2760 } 2761 2762 return (ns->holder == reg); 2763 } 2764 2765 static int 2766 nvmf_ns_reservation_add_registrant(struct spdk_nvmf_ns *ns, 2767 struct spdk_nvmf_ctrlr *ctrlr, 2768 uint64_t nrkey) 2769 { 2770 struct spdk_nvmf_registrant *reg; 2771 2772 reg = calloc(1, sizeof(*reg)); 2773 if (!reg) { 2774 return -ENOMEM; 2775 } 2776 2777 reg->rkey = nrkey; 2778 /* set hostid for the registrant */ 2779 spdk_uuid_copy(®->hostid, &ctrlr->hostid); 2780 TAILQ_INSERT_TAIL(&ns->registrants, reg, link); 2781 ns->gen++; 2782 2783 return 0; 2784 } 2785 2786 static void 2787 nvmf_ns_reservation_release_reservation(struct spdk_nvmf_ns *ns) 2788 { 2789 ns->rtype = 0; 2790 ns->crkey = 0; 2791 ns->holder = NULL; 2792 } 2793 2794 /* release the reservation if the last registrant was removed */ 2795 static void 2796 nvmf_ns_reservation_check_release_on_remove_registrant(struct spdk_nvmf_ns *ns, 2797 struct spdk_nvmf_registrant *reg) 2798 { 2799 struct spdk_nvmf_registrant *next_reg; 2800 2801 /* no reservation holder */ 2802 if (!ns->holder) { 2803 assert(ns->rtype == 0); 2804 return; 2805 } 2806 2807 next_reg = TAILQ_FIRST(&ns->registrants); 2808 if (next_reg && nvmf_ns_reservation_all_registrants_type(ns)) { 2809 /* the next valid registrant is the new holder now */ 2810 ns->holder = next_reg; 2811 } else if (nvmf_ns_reservation_registrant_is_holder(ns, reg)) { 2812 /* release the reservation */ 2813 nvmf_ns_reservation_release_reservation(ns); 2814 } 2815 } 2816 2817 static void 2818 nvmf_ns_reservation_remove_registrant(struct spdk_nvmf_ns *ns, 2819 struct spdk_nvmf_registrant *reg) 2820 { 2821 TAILQ_REMOVE(&ns->registrants, reg, link); 2822 nvmf_ns_reservation_check_release_on_remove_registrant(ns, reg); 2823 free(reg); 2824 ns->gen++; 2825 return; 2826 } 2827 2828 static uint32_t 2829 nvmf_ns_reservation_remove_registrants_by_key(struct spdk_nvmf_ns *ns, 2830 uint64_t rkey) 2831 { 2832 struct spdk_nvmf_registrant *reg, *tmp; 2833 uint32_t count = 0; 2834 2835 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2836 if (reg->rkey == rkey) { 2837 nvmf_ns_reservation_remove_registrant(ns, reg); 2838 count++; 2839 } 2840 } 2841 return count; 2842 } 2843 2844 static uint32_t 2845 nvmf_ns_reservation_remove_all_other_registrants(struct spdk_nvmf_ns *ns, 2846 struct spdk_nvmf_registrant *reg) 2847 { 2848 struct spdk_nvmf_registrant *reg_tmp, *reg_tmp2; 2849 uint32_t count = 0; 2850 2851 TAILQ_FOREACH_SAFE(reg_tmp, &ns->registrants, link, reg_tmp2) { 2852 if (reg_tmp != reg) { 2853 nvmf_ns_reservation_remove_registrant(ns, reg_tmp); 2854 count++; 2855 } 2856 } 2857 return count; 2858 } 2859 2860 static uint32_t 2861 nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns) 2862 { 2863 struct spdk_nvmf_registrant *reg, *reg_tmp; 2864 uint32_t count = 0; 2865 2866 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, reg_tmp) { 2867 nvmf_ns_reservation_remove_registrant(ns, reg); 2868 count++; 2869 } 2870 return count; 2871 } 2872 2873 static void 2874 nvmf_ns_reservation_acquire_reservation(struct spdk_nvmf_ns *ns, uint64_t rkey, 2875 enum spdk_nvme_reservation_type rtype, 2876 struct spdk_nvmf_registrant *holder) 2877 { 2878 ns->rtype = rtype; 2879 ns->crkey = rkey; 2880 assert(ns->holder == NULL); 2881 ns->holder = holder; 2882 } 2883 2884 static bool 2885 nvmf_ns_reservation_register(struct spdk_nvmf_ns *ns, 2886 struct spdk_nvmf_ctrlr *ctrlr, 2887 struct spdk_nvmf_request *req) 2888 { 2889 struct spdk_nvme_reservation_register_data key = { 0 }; 2890 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2891 uint8_t rrega, iekey, cptpl, rtype; 2892 struct spdk_nvmf_registrant *reg; 2893 uint8_t status = SPDK_NVME_SC_SUCCESS; 2894 bool update_sgroup = false; 2895 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2896 uint32_t num_hostid = 0; 2897 int rc; 2898 2899 rrega = cmd->cdw10_bits.resv_register.rrega; 2900 iekey = cmd->cdw10_bits.resv_register.iekey; 2901 cptpl = cmd->cdw10_bits.resv_register.cptpl; 2902 2903 if (req->iovcnt > 0 && req->length >= sizeof(key)) { 2904 struct spdk_iov_xfer ix; 2905 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 2906 spdk_iov_xfer_to_buf(&ix, &key, sizeof(key)); 2907 } else { 2908 SPDK_ERRLOG("No key provided. Failing request.\n"); 2909 status = SPDK_NVME_SC_INVALID_FIELD; 2910 goto exit; 2911 } 2912 2913 SPDK_DEBUGLOG(nvmf, "REGISTER: RREGA %u, IEKEY %u, CPTPL %u, " 2914 "NRKEY 0x%"PRIx64", NRKEY 0x%"PRIx64"\n", 2915 rrega, iekey, cptpl, key.crkey, key.nrkey); 2916 2917 if (cptpl == SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON) { 2918 /* Ture to OFF state, and need to be updated in the configuration file */ 2919 if (ns->ptpl_activated) { 2920 ns->ptpl_activated = 0; 2921 update_sgroup = true; 2922 } 2923 } else if (cptpl == SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS) { 2924 if (!nvmf_ns_is_ptpl_capable(ns)) { 2925 status = SPDK_NVME_SC_INVALID_FIELD; 2926 goto exit; 2927 } else if (ns->ptpl_activated == 0) { 2928 ns->ptpl_activated = 1; 2929 update_sgroup = true; 2930 } 2931 } 2932 2933 /* current Host Identifier has registrant or not */ 2934 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 2935 2936 switch (rrega) { 2937 case SPDK_NVME_RESERVE_REGISTER_KEY: 2938 if (!reg) { 2939 /* register new controller */ 2940 if (key.nrkey == 0) { 2941 SPDK_ERRLOG("Can't register zeroed new key\n"); 2942 status = SPDK_NVME_SC_INVALID_FIELD; 2943 goto exit; 2944 } 2945 rc = nvmf_ns_reservation_add_registrant(ns, ctrlr, key.nrkey); 2946 if (rc < 0) { 2947 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2948 goto exit; 2949 } 2950 update_sgroup = true; 2951 } else { 2952 /* register with same key is not an error */ 2953 if (reg->rkey != key.nrkey) { 2954 SPDK_ERRLOG("The same host already register a " 2955 "key with 0x%"PRIx64"\n", 2956 reg->rkey); 2957 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2958 goto exit; 2959 } 2960 } 2961 break; 2962 case SPDK_NVME_RESERVE_UNREGISTER_KEY: 2963 if (!reg || (!iekey && reg->rkey != key.crkey)) { 2964 SPDK_ERRLOG("No registrant or current key doesn't match " 2965 "with existing registrant key\n"); 2966 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2967 goto exit; 2968 } 2969 2970 rtype = ns->rtype; 2971 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 2972 SPDK_NVMF_MAX_NUM_REGISTRANTS, 2973 &ctrlr->hostid); 2974 2975 nvmf_ns_reservation_remove_registrant(ns, reg); 2976 2977 if (!ns->rtype && num_hostid && (rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY || 2978 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY)) { 2979 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 2980 hostid_list, 2981 num_hostid, 2982 SPDK_NVME_RESERVATION_RELEASED); 2983 } 2984 update_sgroup = true; 2985 break; 2986 case SPDK_NVME_RESERVE_REPLACE_KEY: 2987 if (key.nrkey == 0) { 2988 SPDK_ERRLOG("Can't register zeroed new key\n"); 2989 status = SPDK_NVME_SC_INVALID_FIELD; 2990 goto exit; 2991 } 2992 /* Registrant exists */ 2993 if (reg) { 2994 if (!iekey && reg->rkey != key.crkey) { 2995 SPDK_ERRLOG("Current key doesn't match " 2996 "existing registrant key\n"); 2997 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2998 goto exit; 2999 } 3000 if (reg->rkey == key.nrkey) { 3001 goto exit; 3002 } 3003 reg->rkey = key.nrkey; 3004 } else if (iekey) { /* No registrant but IEKEY is set */ 3005 /* new registrant */ 3006 rc = nvmf_ns_reservation_add_registrant(ns, ctrlr, key.nrkey); 3007 if (rc < 0) { 3008 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 3009 goto exit; 3010 } 3011 } else { /* No registrant */ 3012 SPDK_ERRLOG("No registrant\n"); 3013 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3014 goto exit; 3015 3016 } 3017 update_sgroup = true; 3018 break; 3019 default: 3020 status = SPDK_NVME_SC_INVALID_FIELD; 3021 goto exit; 3022 } 3023 3024 exit: 3025 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3026 req->rsp->nvme_cpl.status.sc = status; 3027 return update_sgroup; 3028 } 3029 3030 static bool 3031 nvmf_ns_reservation_acquire(struct spdk_nvmf_ns *ns, 3032 struct spdk_nvmf_ctrlr *ctrlr, 3033 struct spdk_nvmf_request *req) 3034 { 3035 struct spdk_nvme_reservation_acquire_data key = { 0 }; 3036 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3037 uint8_t racqa, iekey, rtype; 3038 struct spdk_nvmf_registrant *reg; 3039 bool all_regs = false; 3040 uint32_t count = 0; 3041 bool update_sgroup = true; 3042 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 3043 uint32_t num_hostid = 0; 3044 struct spdk_uuid new_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 3045 uint32_t new_num_hostid = 0; 3046 bool reservation_released = false; 3047 uint8_t status = SPDK_NVME_SC_SUCCESS; 3048 3049 racqa = cmd->cdw10_bits.resv_acquire.racqa; 3050 iekey = cmd->cdw10_bits.resv_acquire.iekey; 3051 rtype = cmd->cdw10_bits.resv_acquire.rtype; 3052 3053 if (req->iovcnt > 0 && req->length >= sizeof(key)) { 3054 struct spdk_iov_xfer ix; 3055 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 3056 spdk_iov_xfer_to_buf(&ix, &key, sizeof(key)); 3057 } else { 3058 SPDK_ERRLOG("No key provided. Failing request.\n"); 3059 status = SPDK_NVME_SC_INVALID_FIELD; 3060 goto exit; 3061 } 3062 3063 SPDK_DEBUGLOG(nvmf, "ACQUIRE: RACQA %u, IEKEY %u, RTYPE %u, " 3064 "NRKEY 0x%"PRIx64", PRKEY 0x%"PRIx64"\n", 3065 racqa, iekey, rtype, key.crkey, key.prkey); 3066 3067 if (iekey || rtype > SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) { 3068 SPDK_ERRLOG("Ignore existing key field set to 1\n"); 3069 status = SPDK_NVME_SC_INVALID_FIELD; 3070 update_sgroup = false; 3071 goto exit; 3072 } 3073 3074 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 3075 /* must be registrant and CRKEY must match */ 3076 if (!reg || reg->rkey != key.crkey) { 3077 SPDK_ERRLOG("No registrant or current key doesn't match " 3078 "with existing registrant key\n"); 3079 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3080 update_sgroup = false; 3081 goto exit; 3082 } 3083 3084 all_regs = nvmf_ns_reservation_all_registrants_type(ns); 3085 3086 switch (racqa) { 3087 case SPDK_NVME_RESERVE_ACQUIRE: 3088 /* it's not an error for the holder to acquire same reservation type again */ 3089 if (nvmf_ns_reservation_registrant_is_holder(ns, reg) && ns->rtype == rtype) { 3090 /* do nothing */ 3091 update_sgroup = false; 3092 } else if (ns->holder == NULL) { 3093 /* first time to acquire the reservation */ 3094 nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg); 3095 } else { 3096 SPDK_ERRLOG("Invalid rtype or current registrant is not holder\n"); 3097 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3098 update_sgroup = false; 3099 goto exit; 3100 } 3101 break; 3102 case SPDK_NVME_RESERVE_PREEMPT: 3103 /* no reservation holder */ 3104 if (!ns->holder) { 3105 /* unregister with PRKEY */ 3106 nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 3107 break; 3108 } 3109 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 3110 SPDK_NVMF_MAX_NUM_REGISTRANTS, 3111 &ctrlr->hostid); 3112 3113 /* only 1 reservation holder and reservation key is valid */ 3114 if (!all_regs) { 3115 /* preempt itself */ 3116 if (nvmf_ns_reservation_registrant_is_holder(ns, reg) && 3117 ns->crkey == key.prkey) { 3118 ns->rtype = rtype; 3119 reservation_released = true; 3120 break; 3121 } 3122 3123 if (ns->crkey == key.prkey) { 3124 nvmf_ns_reservation_remove_registrant(ns, ns->holder); 3125 nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg); 3126 reservation_released = true; 3127 } else if (key.prkey != 0) { 3128 nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 3129 } else { 3130 /* PRKEY is zero */ 3131 SPDK_ERRLOG("Current PRKEY is zero\n"); 3132 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3133 update_sgroup = false; 3134 goto exit; 3135 } 3136 } else { 3137 /* release all other registrants except for the current one */ 3138 if (key.prkey == 0) { 3139 nvmf_ns_reservation_remove_all_other_registrants(ns, reg); 3140 assert(ns->holder == reg); 3141 } else { 3142 count = nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 3143 if (count == 0) { 3144 SPDK_ERRLOG("PRKEY doesn't match any registrant\n"); 3145 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3146 update_sgroup = false; 3147 goto exit; 3148 } 3149 } 3150 } 3151 break; 3152 default: 3153 status = SPDK_NVME_SC_INVALID_FIELD; 3154 update_sgroup = false; 3155 break; 3156 } 3157 3158 exit: 3159 if (update_sgroup && racqa == SPDK_NVME_RESERVE_PREEMPT) { 3160 new_num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, new_hostid_list, 3161 SPDK_NVMF_MAX_NUM_REGISTRANTS, 3162 &ctrlr->hostid); 3163 /* Preempt notification occurs on the unregistered controllers 3164 * other than the controller who issued the command. 3165 */ 3166 num_hostid = nvmf_ns_reservation_get_unregistered_hostid(hostid_list, 3167 num_hostid, 3168 new_hostid_list, 3169 new_num_hostid); 3170 if (num_hostid) { 3171 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 3172 hostid_list, 3173 num_hostid, 3174 SPDK_NVME_REGISTRATION_PREEMPTED); 3175 3176 } 3177 /* Reservation released notification occurs on the 3178 * controllers which are the remaining registrants other than 3179 * the controller who issued the command. 3180 */ 3181 if (reservation_released && new_num_hostid) { 3182 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 3183 new_hostid_list, 3184 new_num_hostid, 3185 SPDK_NVME_RESERVATION_RELEASED); 3186 3187 } 3188 } 3189 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3190 req->rsp->nvme_cpl.status.sc = status; 3191 return update_sgroup; 3192 } 3193 3194 static bool 3195 nvmf_ns_reservation_release(struct spdk_nvmf_ns *ns, 3196 struct spdk_nvmf_ctrlr *ctrlr, 3197 struct spdk_nvmf_request *req) 3198 { 3199 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3200 uint8_t rrela, iekey, rtype; 3201 struct spdk_nvmf_registrant *reg; 3202 uint64_t crkey = 0; 3203 uint8_t status = SPDK_NVME_SC_SUCCESS; 3204 bool update_sgroup = true; 3205 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 3206 uint32_t num_hostid = 0; 3207 3208 rrela = cmd->cdw10_bits.resv_release.rrela; 3209 iekey = cmd->cdw10_bits.resv_release.iekey; 3210 rtype = cmd->cdw10_bits.resv_release.rtype; 3211 3212 if (req->iovcnt > 0 && req->length >= sizeof(crkey)) { 3213 struct spdk_iov_xfer ix; 3214 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 3215 spdk_iov_xfer_to_buf(&ix, &crkey, sizeof(crkey)); 3216 } else { 3217 SPDK_ERRLOG("No key provided. Failing request.\n"); 3218 status = SPDK_NVME_SC_INVALID_FIELD; 3219 goto exit; 3220 } 3221 3222 SPDK_DEBUGLOG(nvmf, "RELEASE: RRELA %u, IEKEY %u, RTYPE %u, " 3223 "CRKEY 0x%"PRIx64"\n", rrela, iekey, rtype, crkey); 3224 3225 if (iekey) { 3226 SPDK_ERRLOG("Ignore existing key field set to 1\n"); 3227 status = SPDK_NVME_SC_INVALID_FIELD; 3228 update_sgroup = false; 3229 goto exit; 3230 } 3231 3232 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 3233 if (!reg || reg->rkey != crkey) { 3234 SPDK_ERRLOG("No registrant or current key doesn't match " 3235 "with existing registrant key\n"); 3236 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3237 update_sgroup = false; 3238 goto exit; 3239 } 3240 3241 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 3242 SPDK_NVMF_MAX_NUM_REGISTRANTS, 3243 &ctrlr->hostid); 3244 3245 switch (rrela) { 3246 case SPDK_NVME_RESERVE_RELEASE: 3247 if (!ns->holder) { 3248 SPDK_DEBUGLOG(nvmf, "RELEASE: no holder\n"); 3249 update_sgroup = false; 3250 goto exit; 3251 } 3252 if (ns->rtype != rtype) { 3253 SPDK_ERRLOG("Type doesn't match\n"); 3254 status = SPDK_NVME_SC_INVALID_FIELD; 3255 update_sgroup = false; 3256 goto exit; 3257 } 3258 if (!nvmf_ns_reservation_registrant_is_holder(ns, reg)) { 3259 /* not the reservation holder, this isn't an error */ 3260 update_sgroup = false; 3261 goto exit; 3262 } 3263 3264 rtype = ns->rtype; 3265 nvmf_ns_reservation_release_reservation(ns); 3266 3267 if (num_hostid && rtype != SPDK_NVME_RESERVE_WRITE_EXCLUSIVE && 3268 rtype != SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) { 3269 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 3270 hostid_list, 3271 num_hostid, 3272 SPDK_NVME_RESERVATION_RELEASED); 3273 } 3274 break; 3275 case SPDK_NVME_RESERVE_CLEAR: 3276 nvmf_ns_reservation_clear_all_registrants(ns); 3277 if (num_hostid) { 3278 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 3279 hostid_list, 3280 num_hostid, 3281 SPDK_NVME_RESERVATION_PREEMPTED); 3282 } 3283 break; 3284 default: 3285 status = SPDK_NVME_SC_INVALID_FIELD; 3286 update_sgroup = false; 3287 goto exit; 3288 } 3289 3290 exit: 3291 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3292 req->rsp->nvme_cpl.status.sc = status; 3293 return update_sgroup; 3294 } 3295 3296 static void 3297 nvmf_ns_reservation_report(struct spdk_nvmf_ns *ns, 3298 struct spdk_nvmf_ctrlr *ctrlr, 3299 struct spdk_nvmf_request *req) 3300 { 3301 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3302 struct spdk_nvmf_registrant *reg, *tmp; 3303 struct spdk_nvme_reservation_status_extended_data status_data = { 0 }; 3304 struct spdk_iov_xfer ix; 3305 uint32_t transfer_len; 3306 uint32_t regctl = 0; 3307 uint8_t status = SPDK_NVME_SC_SUCCESS; 3308 3309 if (req->iovcnt == 0) { 3310 SPDK_ERRLOG("No data transfer specified for request. " 3311 " Unable to transfer back response.\n"); 3312 status = SPDK_NVME_SC_INVALID_FIELD; 3313 goto exit; 3314 } 3315 3316 if (!cmd->cdw11_bits.resv_report.eds) { 3317 SPDK_ERRLOG("NVMeoF uses extended controller data structure, " 3318 "please set EDS bit in cdw11 and try again\n"); 3319 status = SPDK_NVME_SC_HOSTID_INCONSISTENT_FORMAT; 3320 goto exit; 3321 } 3322 3323 /* Number of Dwords of the Reservation Status data structure to transfer */ 3324 transfer_len = (cmd->cdw10 + 1) * sizeof(uint32_t); 3325 3326 if (transfer_len < sizeof(struct spdk_nvme_reservation_status_extended_data)) { 3327 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 3328 goto exit; 3329 } 3330 3331 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 3332 3333 status_data.data.gen = ns->gen; 3334 status_data.data.rtype = ns->rtype; 3335 status_data.data.ptpls = ns->ptpl_activated; 3336 3337 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 3338 regctl++; 3339 } 3340 3341 /* 3342 * We report the number of registrants as per the spec here, even if 3343 * the iov isn't big enough to contain them all. In that case, the 3344 * spdk_iov_xfer_from_buf() won't actually copy any of the remaining 3345 * data; as it keeps track of the iov cursor itself, it's simplest to 3346 * just walk the entire list anyway. 3347 */ 3348 status_data.data.regctl = regctl; 3349 3350 spdk_iov_xfer_from_buf(&ix, &status_data, sizeof(status_data)); 3351 3352 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 3353 struct spdk_nvme_registered_ctrlr_extended_data ctrlr_data = { 0 }; 3354 3355 /* Set to 0xffffh for dynamic controller */ 3356 ctrlr_data.cntlid = 0xffff; 3357 ctrlr_data.rcsts.status = (ns->holder == reg) ? true : false; 3358 ctrlr_data.rkey = reg->rkey; 3359 spdk_uuid_copy((struct spdk_uuid *)ctrlr_data.hostid, ®->hostid); 3360 3361 spdk_iov_xfer_from_buf(&ix, &ctrlr_data, sizeof(ctrlr_data)); 3362 } 3363 3364 exit: 3365 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3366 req->rsp->nvme_cpl.status.sc = status; 3367 return; 3368 } 3369 3370 static void 3371 nvmf_ns_reservation_complete(void *ctx) 3372 { 3373 struct spdk_nvmf_request *req = ctx; 3374 3375 spdk_nvmf_request_complete(req); 3376 } 3377 3378 static void 3379 _nvmf_ns_reservation_update_done(struct spdk_nvmf_subsystem *subsystem, 3380 void *cb_arg, int status) 3381 { 3382 struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)cb_arg; 3383 struct spdk_nvmf_poll_group *group = req->qpair->group; 3384 3385 spdk_thread_send_msg(group->thread, nvmf_ns_reservation_complete, req); 3386 } 3387 3388 void 3389 nvmf_ns_reservation_request(void *ctx) 3390 { 3391 struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)ctx; 3392 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3393 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 3394 uint32_t nsid; 3395 struct spdk_nvmf_ns *ns; 3396 bool update_sgroup = false; 3397 int status = 0; 3398 3399 nsid = cmd->nsid; 3400 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, nsid); 3401 assert(ns != NULL); 3402 3403 switch (cmd->opc) { 3404 case SPDK_NVME_OPC_RESERVATION_REGISTER: 3405 update_sgroup = nvmf_ns_reservation_register(ns, ctrlr, req); 3406 break; 3407 case SPDK_NVME_OPC_RESERVATION_ACQUIRE: 3408 update_sgroup = nvmf_ns_reservation_acquire(ns, ctrlr, req); 3409 break; 3410 case SPDK_NVME_OPC_RESERVATION_RELEASE: 3411 update_sgroup = nvmf_ns_reservation_release(ns, ctrlr, req); 3412 break; 3413 case SPDK_NVME_OPC_RESERVATION_REPORT: 3414 nvmf_ns_reservation_report(ns, ctrlr, req); 3415 break; 3416 default: 3417 break; 3418 } 3419 3420 /* update reservation information to subsystem's poll group */ 3421 if (update_sgroup) { 3422 if (ns->ptpl_activated || cmd->opc == SPDK_NVME_OPC_RESERVATION_REGISTER) { 3423 if (nvmf_ns_update_reservation_info(ns) != 0) { 3424 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 3425 } 3426 } 3427 status = nvmf_subsystem_update_ns(ctrlr->subsys, _nvmf_ns_reservation_update_done, req); 3428 if (status == 0) { 3429 return; 3430 } 3431 } 3432 3433 _nvmf_ns_reservation_update_done(ctrlr->subsys, req, status); 3434 } 3435 3436 static bool 3437 nvmf_ns_is_ptpl_capable_json(const struct spdk_nvmf_ns *ns) 3438 { 3439 return ns->ptpl_file != NULL; 3440 } 3441 3442 static struct spdk_nvmf_ns_reservation_ops g_reservation_ops = { 3443 .is_ptpl_capable = nvmf_ns_is_ptpl_capable_json, 3444 .update = nvmf_ns_reservation_update_json, 3445 .load = nvmf_ns_reservation_load_json, 3446 }; 3447 3448 bool 3449 nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns) 3450 { 3451 return g_reservation_ops.is_ptpl_capable(ns); 3452 } 3453 3454 static int 3455 nvmf_ns_reservation_update(const struct spdk_nvmf_ns *ns, 3456 const struct spdk_nvmf_reservation_info *info) 3457 { 3458 return g_reservation_ops.update(ns, info); 3459 } 3460 3461 static int 3462 nvmf_ns_reservation_load(const struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info) 3463 { 3464 return g_reservation_ops.load(ns, info); 3465 } 3466 3467 void 3468 spdk_nvmf_set_custom_ns_reservation_ops(const struct spdk_nvmf_ns_reservation_ops *ops) 3469 { 3470 g_reservation_ops = *ops; 3471 } 3472 3473 int 3474 spdk_nvmf_subsystem_set_ana_reporting(struct spdk_nvmf_subsystem *subsystem, 3475 bool ana_reporting) 3476 { 3477 if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) { 3478 return -EAGAIN; 3479 } 3480 3481 subsystem->flags.ana_reporting = ana_reporting; 3482 3483 return 0; 3484 } 3485 3486 bool 3487 spdk_nvmf_subsystem_get_ana_reporting(struct spdk_nvmf_subsystem *subsystem) 3488 { 3489 return subsystem->flags.ana_reporting; 3490 } 3491 3492 struct subsystem_listener_update_ctx { 3493 struct spdk_nvmf_subsystem_listener *listener; 3494 3495 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn; 3496 void *cb_arg; 3497 }; 3498 3499 static void 3500 subsystem_listener_update_done(struct spdk_io_channel_iter *i, int status) 3501 { 3502 struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 3503 3504 if (ctx->cb_fn) { 3505 ctx->cb_fn(ctx->cb_arg, status); 3506 } 3507 free(ctx); 3508 } 3509 3510 static void 3511 subsystem_listener_update_on_pg(struct spdk_io_channel_iter *i) 3512 { 3513 struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 3514 struct spdk_nvmf_subsystem_listener *listener; 3515 struct spdk_nvmf_poll_group *group; 3516 struct spdk_nvmf_ctrlr *ctrlr; 3517 3518 listener = ctx->listener; 3519 group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i)); 3520 3521 TAILQ_FOREACH(ctrlr, &listener->subsystem->ctrlrs, link) { 3522 if (ctrlr->thread != spdk_get_thread()) { 3523 continue; 3524 } 3525 3526 if (ctrlr->admin_qpair && ctrlr->admin_qpair->group == group && ctrlr->listener == listener) { 3527 nvmf_ctrlr_async_event_ana_change_notice(ctrlr); 3528 } 3529 } 3530 3531 spdk_for_each_channel_continue(i, 0); 3532 } 3533 3534 void 3535 spdk_nvmf_subsystem_set_ana_state(struct spdk_nvmf_subsystem *subsystem, 3536 const struct spdk_nvme_transport_id *trid, 3537 enum spdk_nvme_ana_state ana_state, uint32_t anagrpid, 3538 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, void *cb_arg) 3539 { 3540 struct spdk_nvmf_subsystem_listener *listener; 3541 struct subsystem_listener_update_ctx *ctx; 3542 uint32_t i; 3543 3544 assert(cb_fn != NULL); 3545 assert(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 3546 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED); 3547 3548 if (!subsystem->flags.ana_reporting) { 3549 SPDK_ERRLOG("ANA reporting is disabled\n"); 3550 cb_fn(cb_arg, -EINVAL); 3551 return; 3552 } 3553 3554 /* ANA Change state is not used, ANA Persistent Loss state 3555 * is not supported yet. 3556 */ 3557 if (!(ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE || 3558 ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE || 3559 ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE)) { 3560 SPDK_ERRLOG("ANA state %d is not supported\n", ana_state); 3561 cb_fn(cb_arg, -ENOTSUP); 3562 return; 3563 } 3564 3565 if (anagrpid > subsystem->max_nsid) { 3566 SPDK_ERRLOG("ANA group ID %" PRIu32 " is more than maximum\n", anagrpid); 3567 cb_fn(cb_arg, -EINVAL); 3568 return; 3569 } 3570 3571 listener = nvmf_subsystem_find_listener(subsystem, trid); 3572 if (!listener) { 3573 SPDK_ERRLOG("Unable to find listener.\n"); 3574 cb_fn(cb_arg, -EINVAL); 3575 return; 3576 } 3577 3578 if (anagrpid != 0 && listener->ana_state[anagrpid - 1] == ana_state) { 3579 cb_fn(cb_arg, 0); 3580 return; 3581 } 3582 3583 ctx = calloc(1, sizeof(*ctx)); 3584 if (!ctx) { 3585 SPDK_ERRLOG("Unable to allocate context\n"); 3586 cb_fn(cb_arg, -ENOMEM); 3587 return; 3588 } 3589 3590 for (i = 1; i <= subsystem->max_nsid; i++) { 3591 if (anagrpid == 0 || i == anagrpid) { 3592 listener->ana_state[i - 1] = ana_state; 3593 } 3594 } 3595 listener->ana_state_change_count++; 3596 3597 ctx->listener = listener; 3598 ctx->cb_fn = cb_fn; 3599 ctx->cb_arg = cb_arg; 3600 3601 spdk_for_each_channel(subsystem->tgt, 3602 subsystem_listener_update_on_pg, 3603 ctx, 3604 subsystem_listener_update_done); 3605 } 3606 3607 int 3608 spdk_nvmf_subsystem_get_ana_state(struct spdk_nvmf_subsystem *subsystem, 3609 const struct spdk_nvme_transport_id *trid, 3610 uint32_t anagrpid, 3611 enum spdk_nvme_ana_state *ana_state) 3612 { 3613 assert(ana_state != NULL); 3614 3615 struct spdk_nvmf_subsystem_listener *listener; 3616 3617 if (!subsystem->flags.ana_reporting) { 3618 SPDK_ERRLOG("ANA reporting is disabled\n"); 3619 return -EINVAL; 3620 } 3621 3622 if (anagrpid <= 0 || anagrpid > subsystem->max_nsid) { 3623 SPDK_ERRLOG("ANA group ID %" PRIu32 " is invalid\n", anagrpid); 3624 return -EINVAL; 3625 } 3626 3627 listener = nvmf_subsystem_find_listener(subsystem, trid); 3628 if (!listener) { 3629 SPDK_ERRLOG("Unable to find listener.\n"); 3630 return -EINVAL; 3631 } 3632 3633 *ana_state = listener->ana_state[anagrpid - 1]; 3634 return 0; 3635 } 3636 3637 bool 3638 spdk_nvmf_subsystem_is_discovery(struct spdk_nvmf_subsystem *subsystem) 3639 { 3640 return subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT || 3641 subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY; 3642 } 3643 3644 bool 3645 nvmf_nqn_is_discovery(const char *nqn) 3646 { 3647 return strcmp(nqn, SPDK_NVMF_DISCOVERY_NQN) == 0; 3648 } 3649