1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "nvmf_internal.h" 10 #include "transport.h" 11 12 #include "spdk/assert.h" 13 #include "spdk/likely.h" 14 #include "spdk/string.h" 15 #include "spdk/trace.h" 16 #include "spdk/nvmf_spec.h" 17 #include "spdk/uuid.h" 18 #include "spdk/json.h" 19 #include "spdk/file.h" 20 #include "spdk/bit_array.h" 21 #include "spdk/bdev.h" 22 23 #define __SPDK_BDEV_MODULE_ONLY 24 #include "spdk/bdev_module.h" 25 #include "spdk/log.h" 26 #include "spdk_internal/utf.h" 27 #include "spdk_internal/usdt.h" 28 29 #define MODEL_NUMBER_DEFAULT "SPDK bdev Controller" 30 #define NVMF_SUBSYSTEM_DEFAULT_NAMESPACES 32 31 32 /* 33 * States for parsing valid domains in NQNs according to RFC 1034 34 */ 35 enum spdk_nvmf_nqn_domain_states { 36 /* First character of a domain must be a letter */ 37 SPDK_NVMF_DOMAIN_ACCEPT_LETTER = 0, 38 39 /* Subsequent characters can be any of letter, digit, or hyphen */ 40 SPDK_NVMF_DOMAIN_ACCEPT_LDH = 1, 41 42 /* A domain label must end with either a letter or digit */ 43 SPDK_NVMF_DOMAIN_ACCEPT_ANY = 2 44 }; 45 46 static int _nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem); 47 48 /* Returns true if is a valid ASCII string as defined by the NVMe spec */ 49 static bool 50 nvmf_valid_ascii_string(const void *buf, size_t size) 51 { 52 const uint8_t *str = buf; 53 size_t i; 54 55 for (i = 0; i < size; i++) { 56 if (str[i] < 0x20 || str[i] > 0x7E) { 57 return false; 58 } 59 } 60 61 return true; 62 } 63 64 bool 65 nvmf_nqn_is_valid(const char *nqn) 66 { 67 size_t len; 68 struct spdk_uuid uuid_value; 69 uint32_t i; 70 int bytes_consumed; 71 uint32_t domain_label_length; 72 char *reverse_domain_end; 73 uint32_t reverse_domain_end_index; 74 enum spdk_nvmf_nqn_domain_states domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER; 75 76 /* Check for length requirements */ 77 len = strlen(nqn); 78 if (len > SPDK_NVMF_NQN_MAX_LEN) { 79 SPDK_ERRLOG("Invalid NQN \"%s\": length %zu > max %d\n", nqn, len, SPDK_NVMF_NQN_MAX_LEN); 80 return false; 81 } 82 83 /* The nqn must be at least as long as SPDK_NVMF_NQN_MIN_LEN to contain the necessary prefix. */ 84 if (len < SPDK_NVMF_NQN_MIN_LEN) { 85 SPDK_ERRLOG("Invalid NQN \"%s\": length %zu < min %d\n", nqn, len, SPDK_NVMF_NQN_MIN_LEN); 86 return false; 87 } 88 89 /* Check for discovery controller nqn */ 90 if (!strcmp(nqn, SPDK_NVMF_DISCOVERY_NQN)) { 91 return true; 92 } 93 94 /* Check for equality with the generic nqn structure of the form "nqn.2014-08.org.nvmexpress:uuid:11111111-2222-3333-4444-555555555555" */ 95 if (!strncmp(nqn, SPDK_NVMF_NQN_UUID_PRE, SPDK_NVMF_NQN_UUID_PRE_LEN)) { 96 if (len != SPDK_NVMF_NQN_UUID_PRE_LEN + SPDK_NVMF_UUID_STRING_LEN) { 97 SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not the correct length\n", nqn); 98 return false; 99 } 100 101 if (spdk_uuid_parse(&uuid_value, &nqn[SPDK_NVMF_NQN_UUID_PRE_LEN])) { 102 SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not formatted correctly\n", nqn); 103 return false; 104 } 105 return true; 106 } 107 108 /* If the nqn does not match the uuid structure, the next several checks validate the form "nqn.yyyy-mm.reverse.domain:user-string" */ 109 110 if (strncmp(nqn, "nqn.", 4) != 0) { 111 SPDK_ERRLOG("Invalid NQN \"%s\": NQN must begin with \"nqn.\".\n", nqn); 112 return false; 113 } 114 115 /* Check for yyyy-mm. */ 116 if (!(isdigit(nqn[4]) && isdigit(nqn[5]) && isdigit(nqn[6]) && isdigit(nqn[7]) && 117 nqn[8] == '-' && isdigit(nqn[9]) && isdigit(nqn[10]) && nqn[11] == '.')) { 118 SPDK_ERRLOG("Invalid date code in NQN \"%s\"\n", nqn); 119 return false; 120 } 121 122 reverse_domain_end = strchr(nqn, ':'); 123 if (reverse_domain_end != NULL && (reverse_domain_end_index = reverse_domain_end - nqn) < len - 1) { 124 } else { 125 SPDK_ERRLOG("Invalid NQN \"%s\". NQN must contain user specified name with a ':' as a prefix.\n", 126 nqn); 127 return false; 128 } 129 130 /* Check for valid reverse domain */ 131 domain_label_length = 0; 132 for (i = 12; i < reverse_domain_end_index; i++) { 133 if (domain_label_length > SPDK_DOMAIN_LABEL_MAX_LEN) { 134 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". At least one Label is too long.\n", nqn); 135 return false; 136 } 137 138 switch (domain_state) { 139 140 case SPDK_NVMF_DOMAIN_ACCEPT_LETTER: { 141 if (isalpha(nqn[i])) { 142 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 143 domain_label_length++; 144 break; 145 } else { 146 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must start with a letter.\n", nqn); 147 return false; 148 } 149 } 150 151 case SPDK_NVMF_DOMAIN_ACCEPT_LDH: { 152 if (isalpha(nqn[i]) || isdigit(nqn[i])) { 153 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 154 domain_label_length++; 155 break; 156 } else if (nqn[i] == '-') { 157 if (i == reverse_domain_end_index - 1) { 158 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 159 nqn); 160 return false; 161 } 162 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH; 163 domain_label_length++; 164 break; 165 } else if (nqn[i] == '.') { 166 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 167 nqn); 168 return false; 169 } else { 170 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n", 171 nqn); 172 return false; 173 } 174 } 175 176 case SPDK_NVMF_DOMAIN_ACCEPT_ANY: { 177 if (isalpha(nqn[i]) || isdigit(nqn[i])) { 178 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 179 domain_label_length++; 180 break; 181 } else if (nqn[i] == '-') { 182 if (i == reverse_domain_end_index - 1) { 183 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 184 nqn); 185 return false; 186 } 187 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH; 188 domain_label_length++; 189 break; 190 } else if (nqn[i] == '.') { 191 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER; 192 domain_label_length = 0; 193 break; 194 } else { 195 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n", 196 nqn); 197 return false; 198 } 199 } 200 } 201 } 202 203 i = reverse_domain_end_index + 1; 204 while (i < len) { 205 bytes_consumed = utf8_valid(&nqn[i], &nqn[len]); 206 if (bytes_consumed <= 0) { 207 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only valid utf-8.\n", nqn); 208 return false; 209 } 210 211 i += bytes_consumed; 212 } 213 return true; 214 } 215 216 static void subsystem_state_change_on_pg(struct spdk_io_channel_iter *i); 217 218 struct spdk_nvmf_subsystem * 219 spdk_nvmf_subsystem_create(struct spdk_nvmf_tgt *tgt, 220 const char *nqn, 221 enum spdk_nvmf_subtype type, 222 uint32_t num_ns) 223 { 224 struct spdk_nvmf_subsystem *subsystem; 225 uint32_t sid; 226 227 if (spdk_nvmf_tgt_find_subsystem(tgt, nqn)) { 228 SPDK_ERRLOG("Subsystem NQN '%s' already exists\n", nqn); 229 return NULL; 230 } 231 232 if (!nvmf_nqn_is_valid(nqn)) { 233 SPDK_ERRLOG("Subsystem NQN '%s' is invalid\n", nqn); 234 return NULL; 235 } 236 237 if (type == SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT || 238 type == SPDK_NVMF_SUBTYPE_DISCOVERY) { 239 if (num_ns != 0) { 240 SPDK_ERRLOG("Discovery subsystem cannot have namespaces.\n"); 241 return NULL; 242 } 243 } else if (num_ns == 0) { 244 num_ns = NVMF_SUBSYSTEM_DEFAULT_NAMESPACES; 245 } 246 247 /* Find a free subsystem id (sid) */ 248 sid = spdk_bit_array_find_first_clear(tgt->subsystem_ids, 0); 249 if (sid == UINT32_MAX) { 250 SPDK_ERRLOG("No free subsystem IDs are available for subsystem creation\n"); 251 return NULL; 252 } 253 subsystem = calloc(1, sizeof(struct spdk_nvmf_subsystem)); 254 if (subsystem == NULL) { 255 SPDK_ERRLOG("Subsystem memory allocation failed\n"); 256 return NULL; 257 } 258 259 subsystem->thread = spdk_get_thread(); 260 subsystem->state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 261 subsystem->tgt = tgt; 262 subsystem->id = sid; 263 subsystem->subtype = type; 264 subsystem->max_nsid = num_ns; 265 subsystem->next_cntlid = 0; 266 subsystem->min_cntlid = NVMF_MIN_CNTLID; 267 subsystem->max_cntlid = NVMF_MAX_CNTLID; 268 snprintf(subsystem->subnqn, sizeof(subsystem->subnqn), "%s", nqn); 269 pthread_mutex_init(&subsystem->mutex, NULL); 270 TAILQ_INIT(&subsystem->listeners); 271 TAILQ_INIT(&subsystem->hosts); 272 TAILQ_INIT(&subsystem->ctrlrs); 273 TAILQ_INIT(&subsystem->state_changes); 274 subsystem->used_listener_ids = spdk_bit_array_create(NVMF_MAX_LISTENERS_PER_SUBSYSTEM); 275 if (subsystem->used_listener_ids == NULL) { 276 pthread_mutex_destroy(&subsystem->mutex); 277 free(subsystem); 278 SPDK_ERRLOG("Listener id array memory allocation failed\n"); 279 return NULL; 280 } 281 282 if (num_ns != 0) { 283 subsystem->ns = calloc(num_ns, sizeof(struct spdk_nvmf_ns *)); 284 if (subsystem->ns == NULL) { 285 SPDK_ERRLOG("Namespace memory allocation failed\n"); 286 pthread_mutex_destroy(&subsystem->mutex); 287 spdk_bit_array_free(&subsystem->used_listener_ids); 288 free(subsystem); 289 return NULL; 290 } 291 subsystem->ana_group = calloc(num_ns, sizeof(uint32_t)); 292 if (subsystem->ana_group == NULL) { 293 SPDK_ERRLOG("ANA group memory allocation failed\n"); 294 pthread_mutex_destroy(&subsystem->mutex); 295 free(subsystem->ns); 296 spdk_bit_array_free(&subsystem->used_listener_ids); 297 free(subsystem); 298 return NULL; 299 } 300 } 301 302 memset(subsystem->sn, '0', sizeof(subsystem->sn) - 1); 303 subsystem->sn[sizeof(subsystem->sn) - 1] = '\0'; 304 305 snprintf(subsystem->mn, sizeof(subsystem->mn), "%s", 306 MODEL_NUMBER_DEFAULT); 307 308 spdk_bit_array_set(tgt->subsystem_ids, sid); 309 RB_INSERT(subsystem_tree, &tgt->subsystems, subsystem); 310 311 SPDK_DTRACE_PROBE1(nvmf_subsystem_create, subsystem->subnqn); 312 313 return subsystem; 314 } 315 316 static void 317 nvmf_host_free(struct spdk_nvmf_host *host) 318 { 319 spdk_keyring_put_key(host->dhchap_key); 320 spdk_keyring_put_key(host->dhchap_ctrlr_key); 321 free(host); 322 } 323 324 /* Must hold subsystem->mutex while calling this function */ 325 static void 326 nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_host *host) 327 { 328 TAILQ_REMOVE(&subsystem->hosts, host, link); 329 nvmf_host_free(host); 330 } 331 332 static void 333 _nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem, 334 struct spdk_nvmf_subsystem_listener *listener, 335 bool stop) 336 { 337 struct spdk_nvmf_transport *transport; 338 struct spdk_nvmf_ctrlr *ctrlr; 339 340 if (stop) { 341 transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, listener->trid->trstring); 342 if (transport != NULL) { 343 spdk_nvmf_transport_stop_listen(transport, listener->trid); 344 } 345 } 346 347 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 348 if (ctrlr->listener == listener) { 349 ctrlr->listener = NULL; 350 } 351 } 352 353 TAILQ_REMOVE(&subsystem->listeners, listener, link); 354 nvmf_update_discovery_log(listener->subsystem->tgt, NULL); 355 free(listener->ana_state); 356 spdk_bit_array_clear(subsystem->used_listener_ids, listener->id); 357 free(listener); 358 } 359 360 static void 361 _nvmf_subsystem_destroy_msg(void *cb_arg) 362 { 363 struct spdk_nvmf_subsystem *subsystem = cb_arg; 364 365 _nvmf_subsystem_destroy(subsystem); 366 } 367 368 static int 369 _nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem) 370 { 371 struct nvmf_subsystem_state_change_ctx *ctx; 372 struct spdk_nvmf_ns *ns; 373 nvmf_subsystem_destroy_cb async_destroy_cb = NULL; 374 void *async_destroy_cb_arg = NULL; 375 int rc; 376 377 if (!TAILQ_EMPTY(&subsystem->ctrlrs)) { 378 SPDK_DEBUGLOG(nvmf, "subsystem %p %s has active controllers\n", subsystem, subsystem->subnqn); 379 subsystem->async_destroy = true; 380 rc = spdk_thread_send_msg(subsystem->thread, _nvmf_subsystem_destroy_msg, subsystem); 381 if (rc) { 382 SPDK_ERRLOG("Failed to send thread msg, rc %d\n", rc); 383 assert(0); 384 return rc; 385 } 386 return -EINPROGRESS; 387 } 388 389 ns = spdk_nvmf_subsystem_get_first_ns(subsystem); 390 while (ns != NULL) { 391 struct spdk_nvmf_ns *next_ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns); 392 393 spdk_nvmf_subsystem_remove_ns(subsystem, ns->opts.nsid); 394 ns = next_ns; 395 } 396 397 while ((ctx = TAILQ_FIRST(&subsystem->state_changes))) { 398 SPDK_WARNLOG("subsystem %s has pending state change requests\n", subsystem->subnqn); 399 TAILQ_REMOVE(&subsystem->state_changes, ctx, link); 400 if (ctx->cb_fn != NULL) { 401 ctx->cb_fn(subsystem, ctx->cb_arg, -ECANCELED); 402 } 403 free(ctx); 404 } 405 406 free(subsystem->ns); 407 free(subsystem->ana_group); 408 409 RB_REMOVE(subsystem_tree, &subsystem->tgt->subsystems, subsystem); 410 assert(spdk_bit_array_get(subsystem->tgt->subsystem_ids, subsystem->id) == true); 411 spdk_bit_array_clear(subsystem->tgt->subsystem_ids, subsystem->id); 412 413 pthread_mutex_destroy(&subsystem->mutex); 414 415 spdk_bit_array_free(&subsystem->used_listener_ids); 416 417 if (subsystem->async_destroy) { 418 async_destroy_cb = subsystem->async_destroy_cb; 419 async_destroy_cb_arg = subsystem->async_destroy_cb_arg; 420 } 421 422 free(subsystem); 423 424 if (async_destroy_cb) { 425 async_destroy_cb(async_destroy_cb_arg); 426 } 427 428 return 0; 429 } 430 431 static struct spdk_nvmf_ns * 432 _nvmf_subsystem_get_first_zoned_ns(struct spdk_nvmf_subsystem *subsystem) 433 { 434 struct spdk_nvmf_ns *ns = spdk_nvmf_subsystem_get_first_ns(subsystem); 435 while (ns != NULL) { 436 if (ns->csi == SPDK_NVME_CSI_ZNS) { 437 return ns; 438 } 439 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns); 440 } 441 return NULL; 442 } 443 444 int 445 spdk_nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem, nvmf_subsystem_destroy_cb cpl_cb, 446 void *cpl_cb_arg) 447 { 448 struct spdk_nvmf_host *host, *host_tmp; 449 struct spdk_nvmf_transport *transport; 450 451 if (!subsystem) { 452 return -EINVAL; 453 } 454 455 SPDK_DTRACE_PROBE1(nvmf_subsystem_destroy, subsystem->subnqn); 456 457 assert(spdk_get_thread() == subsystem->thread); 458 459 if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) { 460 SPDK_ERRLOG("Subsystem can only be destroyed in inactive state, %s state %d\n", 461 subsystem->subnqn, subsystem->state); 462 return -EAGAIN; 463 } 464 if (subsystem->destroying) { 465 SPDK_ERRLOG("Subsystem destruction is already started\n"); 466 assert(0); 467 return -EALREADY; 468 } 469 470 subsystem->destroying = true; 471 472 SPDK_DEBUGLOG(nvmf, "subsystem is %p %s\n", subsystem, subsystem->subnqn); 473 474 nvmf_subsystem_remove_all_listeners(subsystem, false); 475 476 pthread_mutex_lock(&subsystem->mutex); 477 478 TAILQ_FOREACH_SAFE(host, &subsystem->hosts, link, host_tmp) { 479 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 480 transport = spdk_nvmf_transport_get_next(transport)) { 481 if (transport->ops->subsystem_remove_host) { 482 transport->ops->subsystem_remove_host(transport, subsystem, host->nqn); 483 } 484 } 485 nvmf_subsystem_remove_host(subsystem, host); 486 } 487 488 pthread_mutex_unlock(&subsystem->mutex); 489 490 subsystem->async_destroy_cb = cpl_cb; 491 subsystem->async_destroy_cb_arg = cpl_cb_arg; 492 493 return _nvmf_subsystem_destroy(subsystem); 494 } 495 496 /* we have to use the typedef in the function declaration to appease astyle. */ 497 typedef enum spdk_nvmf_subsystem_state spdk_nvmf_subsystem_state_t; 498 499 static spdk_nvmf_subsystem_state_t 500 nvmf_subsystem_get_intermediate_state(enum spdk_nvmf_subsystem_state current_state, 501 enum spdk_nvmf_subsystem_state requested_state) 502 { 503 switch (requested_state) { 504 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 505 return SPDK_NVMF_SUBSYSTEM_DEACTIVATING; 506 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 507 if (current_state == SPDK_NVMF_SUBSYSTEM_PAUSED) { 508 return SPDK_NVMF_SUBSYSTEM_RESUMING; 509 } else { 510 return SPDK_NVMF_SUBSYSTEM_ACTIVATING; 511 } 512 case SPDK_NVMF_SUBSYSTEM_PAUSED: 513 return SPDK_NVMF_SUBSYSTEM_PAUSING; 514 default: 515 assert(false); 516 return SPDK_NVMF_SUBSYSTEM_NUM_STATES; 517 } 518 } 519 520 static int 521 nvmf_subsystem_set_state(struct spdk_nvmf_subsystem *subsystem, 522 enum spdk_nvmf_subsystem_state state) 523 { 524 enum spdk_nvmf_subsystem_state actual_old_state, expected_old_state; 525 bool exchanged; 526 527 switch (state) { 528 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 529 expected_old_state = SPDK_NVMF_SUBSYSTEM_DEACTIVATING; 530 break; 531 case SPDK_NVMF_SUBSYSTEM_ACTIVATING: 532 expected_old_state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 533 break; 534 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 535 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING; 536 break; 537 case SPDK_NVMF_SUBSYSTEM_PAUSING: 538 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 539 break; 540 case SPDK_NVMF_SUBSYSTEM_PAUSED: 541 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSING; 542 break; 543 case SPDK_NVMF_SUBSYSTEM_RESUMING: 544 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSED; 545 break; 546 case SPDK_NVMF_SUBSYSTEM_DEACTIVATING: 547 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 548 break; 549 default: 550 assert(false); 551 return -1; 552 } 553 554 actual_old_state = expected_old_state; 555 exchanged = __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false, 556 __ATOMIC_RELAXED, __ATOMIC_RELAXED); 557 if (spdk_unlikely(exchanged == false)) { 558 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING && 559 state == SPDK_NVMF_SUBSYSTEM_ACTIVE) { 560 expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING; 561 } 562 /* This is for the case when activating the subsystem fails. */ 563 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_ACTIVATING && 564 state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING) { 565 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING; 566 } 567 /* This is for the case when resuming the subsystem fails. */ 568 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING && 569 state == SPDK_NVMF_SUBSYSTEM_PAUSING) { 570 expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING; 571 } 572 /* This is for the case when stopping paused subsystem */ 573 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_PAUSED && 574 state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING) { 575 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSED; 576 } 577 actual_old_state = expected_old_state; 578 __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false, 579 __ATOMIC_RELAXED, __ATOMIC_RELAXED); 580 } 581 assert(actual_old_state == expected_old_state); 582 return actual_old_state - expected_old_state; 583 } 584 585 static void nvmf_subsystem_do_state_change(struct nvmf_subsystem_state_change_ctx *ctx); 586 587 static void 588 _nvmf_subsystem_state_change_complete(void *_ctx) 589 { 590 struct nvmf_subsystem_state_change_ctx *next, *ctx = _ctx; 591 struct spdk_nvmf_subsystem *subsystem = ctx->subsystem; 592 593 pthread_mutex_lock(&subsystem->mutex); 594 assert(TAILQ_FIRST(&subsystem->state_changes) == ctx); 595 TAILQ_REMOVE(&subsystem->state_changes, ctx, link); 596 next = TAILQ_FIRST(&subsystem->state_changes); 597 pthread_mutex_unlock(&subsystem->mutex); 598 599 if (ctx->cb_fn != NULL) { 600 ctx->cb_fn(subsystem, ctx->cb_arg, ctx->status); 601 } 602 free(ctx); 603 604 if (next != NULL) { 605 nvmf_subsystem_do_state_change(next); 606 } 607 } 608 609 static void 610 nvmf_subsystem_state_change_complete(struct nvmf_subsystem_state_change_ctx *ctx, int status) 611 { 612 ctx->status = status; 613 spdk_thread_exec_msg(ctx->thread, _nvmf_subsystem_state_change_complete, ctx); 614 } 615 616 static void 617 subsystem_state_change_revert_done(struct spdk_io_channel_iter *i, int status) 618 { 619 struct nvmf_subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 620 621 /* Nothing to be done here if the state setting fails, we are just screwed. */ 622 if (nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state)) { 623 SPDK_ERRLOG("Unable to revert the subsystem state after operation failure.\n"); 624 } 625 626 /* return a failure here. This function only exists in an error path. */ 627 nvmf_subsystem_state_change_complete(ctx, -1); 628 } 629 630 static void 631 subsystem_state_change_done(struct spdk_io_channel_iter *i, int status) 632 { 633 struct nvmf_subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 634 enum spdk_nvmf_subsystem_state intermediate_state; 635 636 SPDK_DTRACE_PROBE4(nvmf_subsystem_change_state_done, ctx->subsystem->subnqn, 637 ctx->requested_state, ctx->original_state, status); 638 639 if (status == 0) { 640 status = nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state); 641 if (status) { 642 status = -1; 643 } 644 } 645 646 if (status) { 647 intermediate_state = nvmf_subsystem_get_intermediate_state(ctx->requested_state, 648 ctx->original_state); 649 assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES); 650 651 if (nvmf_subsystem_set_state(ctx->subsystem, intermediate_state)) { 652 goto out; 653 } 654 ctx->requested_state = ctx->original_state; 655 spdk_for_each_channel(ctx->subsystem->tgt, 656 subsystem_state_change_on_pg, 657 ctx, 658 subsystem_state_change_revert_done); 659 return; 660 } 661 662 out: 663 nvmf_subsystem_state_change_complete(ctx, status); 664 } 665 666 static void 667 subsystem_state_change_continue(void *ctx, int status) 668 { 669 struct spdk_io_channel_iter *i = ctx; 670 struct nvmf_subsystem_state_change_ctx *_ctx __attribute__((unused)); 671 672 _ctx = spdk_io_channel_iter_get_ctx(i); 673 SPDK_DTRACE_PROBE3(nvmf_pg_change_state_done, _ctx->subsystem->subnqn, 674 _ctx->requested_state, spdk_thread_get_id(spdk_get_thread())); 675 676 spdk_for_each_channel_continue(i, status); 677 } 678 679 static void 680 subsystem_state_change_on_pg(struct spdk_io_channel_iter *i) 681 { 682 struct nvmf_subsystem_state_change_ctx *ctx; 683 struct spdk_io_channel *ch; 684 struct spdk_nvmf_poll_group *group; 685 686 ctx = spdk_io_channel_iter_get_ctx(i); 687 ch = spdk_io_channel_iter_get_channel(i); 688 group = spdk_io_channel_get_ctx(ch); 689 690 SPDK_DTRACE_PROBE3(nvmf_pg_change_state, ctx->subsystem->subnqn, 691 ctx->requested_state, spdk_thread_get_id(spdk_get_thread())); 692 switch (ctx->requested_state) { 693 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 694 nvmf_poll_group_remove_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 695 break; 696 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 697 if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_ACTIVATING) { 698 nvmf_poll_group_add_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 699 } else if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_RESUMING) { 700 nvmf_poll_group_resume_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 701 } 702 break; 703 case SPDK_NVMF_SUBSYSTEM_PAUSED: 704 nvmf_poll_group_pause_subsystem(group, ctx->subsystem, ctx->nsid, subsystem_state_change_continue, 705 i); 706 break; 707 default: 708 assert(false); 709 break; 710 } 711 } 712 713 static void 714 nvmf_subsystem_do_state_change(struct nvmf_subsystem_state_change_ctx *ctx) 715 { 716 struct spdk_nvmf_subsystem *subsystem = ctx->subsystem; 717 enum spdk_nvmf_subsystem_state intermediate_state; 718 int rc; 719 720 SPDK_DTRACE_PROBE3(nvmf_subsystem_change_state, subsystem->subnqn, 721 ctx->requested_state, subsystem->state); 722 723 /* If we are already in the requested state, just call the callback immediately. */ 724 if (subsystem->state == ctx->requested_state) { 725 nvmf_subsystem_state_change_complete(ctx, 0); 726 return; 727 } 728 729 intermediate_state = nvmf_subsystem_get_intermediate_state(subsystem->state, 730 ctx->requested_state); 731 assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES); 732 733 ctx->original_state = subsystem->state; 734 rc = nvmf_subsystem_set_state(subsystem, intermediate_state); 735 if (rc) { 736 nvmf_subsystem_state_change_complete(ctx, -1); 737 return; 738 } 739 740 spdk_for_each_channel(subsystem->tgt, 741 subsystem_state_change_on_pg, 742 ctx, 743 subsystem_state_change_done); 744 } 745 746 747 static int 748 nvmf_subsystem_state_change(struct spdk_nvmf_subsystem *subsystem, 749 uint32_t nsid, 750 enum spdk_nvmf_subsystem_state requested_state, 751 spdk_nvmf_subsystem_state_change_done cb_fn, 752 void *cb_arg) 753 { 754 struct nvmf_subsystem_state_change_ctx *ctx; 755 struct spdk_thread *thread; 756 757 thread = spdk_get_thread(); 758 if (thread == NULL) { 759 return -EINVAL; 760 } 761 762 ctx = calloc(1, sizeof(*ctx)); 763 if (!ctx) { 764 return -ENOMEM; 765 } 766 767 ctx->subsystem = subsystem; 768 ctx->nsid = nsid; 769 ctx->requested_state = requested_state; 770 ctx->cb_fn = cb_fn; 771 ctx->cb_arg = cb_arg; 772 ctx->thread = thread; 773 774 pthread_mutex_lock(&subsystem->mutex); 775 TAILQ_INSERT_TAIL(&subsystem->state_changes, ctx, link); 776 if (ctx != TAILQ_FIRST(&subsystem->state_changes)) { 777 pthread_mutex_unlock(&subsystem->mutex); 778 return 0; 779 } 780 pthread_mutex_unlock(&subsystem->mutex); 781 782 nvmf_subsystem_do_state_change(ctx); 783 784 return 0; 785 } 786 787 int 788 spdk_nvmf_subsystem_start(struct spdk_nvmf_subsystem *subsystem, 789 spdk_nvmf_subsystem_state_change_done cb_fn, 790 void *cb_arg) 791 { 792 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg); 793 } 794 795 int 796 spdk_nvmf_subsystem_stop(struct spdk_nvmf_subsystem *subsystem, 797 spdk_nvmf_subsystem_state_change_done cb_fn, 798 void *cb_arg) 799 { 800 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_INACTIVE, cb_fn, cb_arg); 801 } 802 803 int 804 spdk_nvmf_subsystem_pause(struct spdk_nvmf_subsystem *subsystem, 805 uint32_t nsid, 806 spdk_nvmf_subsystem_state_change_done cb_fn, 807 void *cb_arg) 808 { 809 return nvmf_subsystem_state_change(subsystem, nsid, SPDK_NVMF_SUBSYSTEM_PAUSED, cb_fn, cb_arg); 810 } 811 812 int 813 spdk_nvmf_subsystem_resume(struct spdk_nvmf_subsystem *subsystem, 814 spdk_nvmf_subsystem_state_change_done cb_fn, 815 void *cb_arg) 816 { 817 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg); 818 } 819 820 struct spdk_nvmf_subsystem * 821 spdk_nvmf_subsystem_get_first(struct spdk_nvmf_tgt *tgt) 822 { 823 return RB_MIN(subsystem_tree, &tgt->subsystems); 824 } 825 826 struct spdk_nvmf_subsystem * 827 spdk_nvmf_subsystem_get_next(struct spdk_nvmf_subsystem *subsystem) 828 { 829 if (!subsystem) { 830 return NULL; 831 } 832 833 return RB_NEXT(subsystem_tree, &tgt->subsystems, subsystem); 834 } 835 836 static int 837 nvmf_ns_add_host(struct spdk_nvmf_ns *ns, const char *hostnqn) 838 { 839 struct spdk_nvmf_host *host; 840 841 host = calloc(1, sizeof(*host)); 842 if (!host) { 843 return -ENOMEM; 844 } 845 snprintf(host->nqn, sizeof(host->nqn), "%s", hostnqn); 846 TAILQ_INSERT_HEAD(&ns->hosts, host, link); 847 return 0; 848 } 849 850 static void 851 nvmf_ns_remove_host(struct spdk_nvmf_ns *ns, struct spdk_nvmf_host *host) 852 { 853 TAILQ_REMOVE(&ns->hosts, host, link); 854 free(host); 855 } 856 857 static void 858 _async_event_ns_notice(void *_ctrlr) 859 { 860 struct spdk_nvmf_ctrlr *ctrlr = _ctrlr; 861 862 nvmf_ctrlr_async_event_ns_notice(ctrlr); 863 } 864 865 static void 866 send_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr) 867 { 868 spdk_thread_send_msg(ctrlr->thread, _async_event_ns_notice, ctrlr); 869 } 870 871 static int 872 nvmf_ns_visible(struct spdk_nvmf_subsystem *subsystem, 873 uint32_t nsid, 874 const char *hostnqn, 875 bool visible) 876 { 877 struct spdk_nvmf_ns *ns; 878 struct spdk_nvmf_ctrlr *ctrlr; 879 struct spdk_nvmf_host *host; 880 int rc; 881 882 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 883 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 884 assert(false); 885 return -1; 886 } 887 888 if (hostnqn == NULL || !nvmf_nqn_is_valid(hostnqn)) { 889 return -EINVAL; 890 } 891 892 if (nsid == 0 || nsid > subsystem->max_nsid) { 893 return -EINVAL; 894 } 895 896 ns = subsystem->ns[nsid - 1]; 897 if (!ns) { 898 return -ENOENT; 899 } 900 901 if (ns->always_visible) { 902 /* No individual host control */ 903 return -EPERM; 904 } 905 906 /* Save host info to use for any future controllers. */ 907 host = nvmf_ns_find_host(ns, hostnqn); 908 if (visible && host == NULL) { 909 rc = nvmf_ns_add_host(ns, hostnqn); 910 if (rc) { 911 return rc; 912 } 913 } else if (!visible && host != NULL) { 914 nvmf_ns_remove_host(ns, host); 915 } 916 917 /* Also apply to existing controllers. */ 918 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 919 if (strcmp(hostnqn, ctrlr->hostnqn) || 920 spdk_bit_array_get(ctrlr->visible_ns, nsid - 1) == visible) { 921 continue; 922 } 923 if (visible) { 924 spdk_bit_array_set(ctrlr->visible_ns, nsid - 1); 925 } else { 926 spdk_bit_array_clear(ctrlr->visible_ns, nsid - 1); 927 } 928 send_async_event_ns_notice(ctrlr); 929 nvmf_ctrlr_ns_changed(ctrlr, nsid); 930 } 931 932 return 0; 933 } 934 935 int 936 spdk_nvmf_ns_add_host(struct spdk_nvmf_subsystem *subsystem, 937 uint32_t nsid, 938 const char *hostnqn, 939 uint32_t flags) 940 { 941 SPDK_DTRACE_PROBE4(spdk_nvmf_ns_add_host, 942 subsystem->subnqn, 943 nsid, 944 hostnqn, 945 flags); 946 return nvmf_ns_visible(subsystem, nsid, hostnqn, true); 947 } 948 949 int 950 spdk_nvmf_ns_remove_host(struct spdk_nvmf_subsystem *subsystem, 951 uint32_t nsid, 952 const char *hostnqn, 953 uint32_t flags) 954 { 955 SPDK_DTRACE_PROBE4(spdk_nvmf_ns_remove_host, 956 subsystem->subnqn, 957 nsid, 958 hostnqn, 959 flags); 960 return nvmf_ns_visible(subsystem, nsid, hostnqn, false); 961 } 962 963 /* Must hold subsystem->mutex while calling this function */ 964 static struct spdk_nvmf_host * 965 nvmf_subsystem_find_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 966 { 967 struct spdk_nvmf_host *host = NULL; 968 969 TAILQ_FOREACH(host, &subsystem->hosts, link) { 970 if (strcmp(hostnqn, host->nqn) == 0) { 971 return host; 972 } 973 } 974 975 return NULL; 976 } 977 978 int 979 spdk_nvmf_subsystem_add_host_ext(struct spdk_nvmf_subsystem *subsystem, 980 const char *hostnqn, struct spdk_nvmf_host_opts *opts) 981 { 982 struct spdk_nvmf_host *host; 983 struct spdk_nvmf_transport *transport; 984 struct spdk_key *key; 985 int rc; 986 987 if (!nvmf_nqn_is_valid(hostnqn)) { 988 return -EINVAL; 989 } 990 991 pthread_mutex_lock(&subsystem->mutex); 992 993 if (nvmf_subsystem_find_host(subsystem, hostnqn)) { 994 /* This subsystem already allows the specified host. */ 995 pthread_mutex_unlock(&subsystem->mutex); 996 return -EINVAL; 997 } 998 999 host = calloc(1, sizeof(*host)); 1000 if (!host) { 1001 pthread_mutex_unlock(&subsystem->mutex); 1002 return -ENOMEM; 1003 } 1004 1005 key = SPDK_GET_FIELD(opts, dhchap_key, NULL); 1006 if (key != NULL) { 1007 if (!nvmf_auth_is_supported()) { 1008 SPDK_ERRLOG("NVMe in-band authentication is unsupported\n"); 1009 pthread_mutex_unlock(&subsystem->mutex); 1010 nvmf_host_free(host); 1011 return -EINVAL; 1012 } 1013 host->dhchap_key = spdk_key_dup(key); 1014 if (host->dhchap_key == NULL) { 1015 pthread_mutex_unlock(&subsystem->mutex); 1016 nvmf_host_free(host); 1017 return -EINVAL; 1018 } 1019 key = SPDK_GET_FIELD(opts, dhchap_ctrlr_key, NULL); 1020 if (key != NULL) { 1021 host->dhchap_ctrlr_key = spdk_key_dup(key); 1022 if (host->dhchap_ctrlr_key == NULL) { 1023 pthread_mutex_unlock(&subsystem->mutex); 1024 nvmf_host_free(host); 1025 return -EINVAL; 1026 } 1027 } 1028 } else if (SPDK_GET_FIELD(opts, dhchap_ctrlr_key, NULL) != NULL) { 1029 SPDK_ERRLOG("DH-HMAC-CHAP controller key requires host key to be set\n"); 1030 pthread_mutex_unlock(&subsystem->mutex); 1031 nvmf_host_free(host); 1032 return -EINVAL; 1033 } 1034 1035 snprintf(host->nqn, sizeof(host->nqn), "%s", hostnqn); 1036 1037 SPDK_DTRACE_PROBE2(nvmf_subsystem_add_host, subsystem->subnqn, host->nqn); 1038 1039 TAILQ_INSERT_HEAD(&subsystem->hosts, host, link); 1040 1041 if (!TAILQ_EMPTY(&subsystem->listeners)) { 1042 nvmf_update_discovery_log(subsystem->tgt, hostnqn); 1043 } 1044 1045 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 1046 transport = spdk_nvmf_transport_get_next(transport)) { 1047 if (transport->ops->subsystem_add_host) { 1048 rc = transport->ops->subsystem_add_host(transport, subsystem, hostnqn, 1049 SPDK_GET_FIELD(opts, params, NULL)); 1050 if (rc) { 1051 SPDK_ERRLOG("Unable to add host to %s transport\n", transport->ops->name); 1052 /* Remove this host from all transports we've managed to add it to. */ 1053 pthread_mutex_unlock(&subsystem->mutex); 1054 spdk_nvmf_subsystem_remove_host(subsystem, hostnqn); 1055 return rc; 1056 } 1057 } 1058 } 1059 1060 pthread_mutex_unlock(&subsystem->mutex); 1061 1062 return 0; 1063 } 1064 1065 int 1066 spdk_nvmf_subsystem_add_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn, 1067 const struct spdk_json_val *params) 1068 { 1069 struct spdk_nvmf_host_opts opts = {}; 1070 1071 opts.size = SPDK_SIZEOF(&opts, params); 1072 opts.params = params; 1073 1074 return spdk_nvmf_subsystem_add_host_ext(subsystem, hostnqn, &opts); 1075 } 1076 1077 int 1078 spdk_nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 1079 { 1080 struct spdk_nvmf_host *host; 1081 struct spdk_nvmf_transport *transport; 1082 1083 pthread_mutex_lock(&subsystem->mutex); 1084 1085 host = nvmf_subsystem_find_host(subsystem, hostnqn); 1086 if (host == NULL) { 1087 pthread_mutex_unlock(&subsystem->mutex); 1088 return -ENOENT; 1089 } 1090 1091 SPDK_DTRACE_PROBE2(nvmf_subsystem_remove_host, subsystem->subnqn, host->nqn); 1092 1093 nvmf_subsystem_remove_host(subsystem, host); 1094 1095 if (!TAILQ_EMPTY(&subsystem->listeners)) { 1096 nvmf_update_discovery_log(subsystem->tgt, hostnqn); 1097 } 1098 1099 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 1100 transport = spdk_nvmf_transport_get_next(transport)) { 1101 if (transport->ops->subsystem_remove_host) { 1102 transport->ops->subsystem_remove_host(transport, subsystem, hostnqn); 1103 } 1104 } 1105 1106 pthread_mutex_unlock(&subsystem->mutex); 1107 1108 return 0; 1109 } 1110 1111 struct nvmf_subsystem_disconnect_host_ctx { 1112 struct spdk_nvmf_subsystem *subsystem; 1113 char *hostnqn; 1114 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn; 1115 void *cb_arg; 1116 }; 1117 1118 static void 1119 nvmf_subsystem_disconnect_host_fini(struct spdk_io_channel_iter *i, int status) 1120 { 1121 struct nvmf_subsystem_disconnect_host_ctx *ctx; 1122 1123 ctx = spdk_io_channel_iter_get_ctx(i); 1124 1125 if (ctx->cb_fn) { 1126 ctx->cb_fn(ctx->cb_arg, status); 1127 } 1128 free(ctx->hostnqn); 1129 free(ctx); 1130 } 1131 1132 static void 1133 nvmf_subsystem_disconnect_qpairs_by_host(struct spdk_io_channel_iter *i) 1134 { 1135 struct nvmf_subsystem_disconnect_host_ctx *ctx; 1136 struct spdk_nvmf_poll_group *group; 1137 struct spdk_io_channel *ch; 1138 struct spdk_nvmf_qpair *qpair, *tmp_qpair; 1139 struct spdk_nvmf_ctrlr *ctrlr; 1140 1141 ctx = spdk_io_channel_iter_get_ctx(i); 1142 ch = spdk_io_channel_iter_get_channel(i); 1143 group = spdk_io_channel_get_ctx(ch); 1144 1145 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, tmp_qpair) { 1146 ctrlr = qpair->ctrlr; 1147 1148 if (ctrlr == NULL || ctrlr->subsys != ctx->subsystem) { 1149 continue; 1150 } 1151 1152 if (strncmp(ctrlr->hostnqn, ctx->hostnqn, sizeof(ctrlr->hostnqn)) == 0) { 1153 /* Right now this does not wait for the queue pairs to actually disconnect. */ 1154 spdk_nvmf_qpair_disconnect(qpair); 1155 } 1156 } 1157 spdk_for_each_channel_continue(i, 0); 1158 } 1159 1160 int 1161 spdk_nvmf_subsystem_disconnect_host(struct spdk_nvmf_subsystem *subsystem, 1162 const char *hostnqn, 1163 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 1164 void *cb_arg) 1165 { 1166 struct nvmf_subsystem_disconnect_host_ctx *ctx; 1167 1168 ctx = calloc(1, sizeof(struct nvmf_subsystem_disconnect_host_ctx)); 1169 if (ctx == NULL) { 1170 return -ENOMEM; 1171 } 1172 1173 ctx->hostnqn = strdup(hostnqn); 1174 if (ctx->hostnqn == NULL) { 1175 free(ctx); 1176 return -ENOMEM; 1177 } 1178 1179 ctx->subsystem = subsystem; 1180 ctx->cb_fn = cb_fn; 1181 ctx->cb_arg = cb_arg; 1182 1183 spdk_for_each_channel(subsystem->tgt, nvmf_subsystem_disconnect_qpairs_by_host, ctx, 1184 nvmf_subsystem_disconnect_host_fini); 1185 1186 return 0; 1187 } 1188 1189 int 1190 spdk_nvmf_subsystem_set_allow_any_host(struct spdk_nvmf_subsystem *subsystem, bool allow_any_host) 1191 { 1192 pthread_mutex_lock(&subsystem->mutex); 1193 subsystem->allow_any_host = allow_any_host; 1194 if (!TAILQ_EMPTY(&subsystem->listeners)) { 1195 nvmf_update_discovery_log(subsystem->tgt, NULL); 1196 } 1197 pthread_mutex_unlock(&subsystem->mutex); 1198 1199 return 0; 1200 } 1201 1202 bool 1203 spdk_nvmf_subsystem_get_allow_any_host(const struct spdk_nvmf_subsystem *subsystem) 1204 { 1205 bool allow_any_host; 1206 struct spdk_nvmf_subsystem *sub; 1207 1208 /* Technically, taking the mutex modifies data in the subsystem. But the const 1209 * is still important to convey that this doesn't mutate any other data. Cast 1210 * it away to work around this. */ 1211 sub = (struct spdk_nvmf_subsystem *)subsystem; 1212 1213 pthread_mutex_lock(&sub->mutex); 1214 allow_any_host = sub->allow_any_host; 1215 pthread_mutex_unlock(&sub->mutex); 1216 1217 return allow_any_host; 1218 } 1219 1220 bool 1221 spdk_nvmf_subsystem_host_allowed(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 1222 { 1223 bool allowed; 1224 1225 if (!hostnqn) { 1226 return false; 1227 } 1228 1229 pthread_mutex_lock(&subsystem->mutex); 1230 1231 if (subsystem->allow_any_host) { 1232 pthread_mutex_unlock(&subsystem->mutex); 1233 return true; 1234 } 1235 1236 allowed = nvmf_subsystem_find_host(subsystem, hostnqn) != NULL; 1237 pthread_mutex_unlock(&subsystem->mutex); 1238 1239 return allowed; 1240 } 1241 1242 bool 1243 nvmf_subsystem_host_auth_required(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 1244 { 1245 struct spdk_nvmf_host *host; 1246 bool status; 1247 1248 pthread_mutex_lock(&subsystem->mutex); 1249 host = nvmf_subsystem_find_host(subsystem, hostnqn); 1250 status = host != NULL && host->dhchap_key != NULL; 1251 pthread_mutex_unlock(&subsystem->mutex); 1252 1253 return status; 1254 } 1255 1256 struct spdk_key * 1257 nvmf_subsystem_get_dhchap_key(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn, 1258 enum nvmf_auth_key_type type) 1259 { 1260 struct spdk_nvmf_host *host; 1261 struct spdk_key *key = NULL; 1262 1263 pthread_mutex_lock(&subsystem->mutex); 1264 host = nvmf_subsystem_find_host(subsystem, hostnqn); 1265 if (host != NULL) { 1266 switch (type) { 1267 case NVMF_AUTH_KEY_HOST: 1268 key = host->dhchap_key; 1269 break; 1270 case NVMF_AUTH_KEY_CTRLR: 1271 key = host->dhchap_ctrlr_key; 1272 break; 1273 } 1274 if (key != NULL) { 1275 key = spdk_key_dup(key); 1276 } 1277 } 1278 pthread_mutex_unlock(&subsystem->mutex); 1279 1280 return key; 1281 } 1282 1283 struct spdk_nvmf_host * 1284 spdk_nvmf_subsystem_get_first_host(struct spdk_nvmf_subsystem *subsystem) 1285 { 1286 return TAILQ_FIRST(&subsystem->hosts); 1287 } 1288 1289 1290 struct spdk_nvmf_host * 1291 spdk_nvmf_subsystem_get_next_host(struct spdk_nvmf_subsystem *subsystem, 1292 struct spdk_nvmf_host *prev_host) 1293 { 1294 return TAILQ_NEXT(prev_host, link); 1295 } 1296 1297 const char * 1298 spdk_nvmf_host_get_nqn(const struct spdk_nvmf_host *host) 1299 { 1300 return host->nqn; 1301 } 1302 1303 struct spdk_nvmf_subsystem_listener * 1304 nvmf_subsystem_find_listener(struct spdk_nvmf_subsystem *subsystem, 1305 const struct spdk_nvme_transport_id *trid) 1306 { 1307 struct spdk_nvmf_subsystem_listener *listener; 1308 1309 TAILQ_FOREACH(listener, &subsystem->listeners, link) { 1310 if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) { 1311 return listener; 1312 } 1313 } 1314 1315 return NULL; 1316 } 1317 1318 /** 1319 * Function to be called once the target is listening. 1320 * 1321 * \param ctx Context argument passed to this function. 1322 * \param status 0 if it completed successfully, or negative errno if it failed. 1323 */ 1324 static void 1325 _nvmf_subsystem_add_listener_done(void *ctx, int status) 1326 { 1327 struct spdk_nvmf_subsystem_listener *listener = ctx; 1328 1329 if (status) { 1330 listener->cb_fn(listener->cb_arg, status); 1331 free(listener); 1332 return; 1333 } 1334 1335 TAILQ_INSERT_HEAD(&listener->subsystem->listeners, listener, link); 1336 nvmf_update_discovery_log(listener->subsystem->tgt, NULL); 1337 listener->cb_fn(listener->cb_arg, status); 1338 } 1339 1340 void 1341 spdk_nvmf_subsystem_listener_opts_init(struct spdk_nvmf_listener_opts *opts, size_t size) 1342 { 1343 if (opts == NULL) { 1344 SPDK_ERRLOG("opts should not be NULL\n"); 1345 assert(false); 1346 return; 1347 } 1348 if (size == 0) { 1349 SPDK_ERRLOG("size should not be zero\n"); 1350 assert(false); 1351 return; 1352 } 1353 1354 memset(opts, 0, size); 1355 opts->opts_size = size; 1356 1357 #define FIELD_OK(field) \ 1358 offsetof(struct spdk_nvmf_listener_opts, field) + sizeof(opts->field) <= size 1359 1360 #define SET_FIELD(field, value) \ 1361 if (FIELD_OK(field)) { \ 1362 opts->field = value; \ 1363 } \ 1364 1365 SET_FIELD(secure_channel, false); 1366 SET_FIELD(ana_state, SPDK_NVME_ANA_OPTIMIZED_STATE); 1367 1368 #undef FIELD_OK 1369 #undef SET_FIELD 1370 } 1371 1372 static int 1373 listener_opts_copy(struct spdk_nvmf_listener_opts *src, struct spdk_nvmf_listener_opts *dst) 1374 { 1375 if (src->opts_size == 0) { 1376 SPDK_ERRLOG("source structure size should not be zero\n"); 1377 assert(false); 1378 return -EINVAL; 1379 } 1380 1381 memset(dst, 0, sizeof(*dst)); 1382 dst->opts_size = src->opts_size; 1383 1384 #define FIELD_OK(field) \ 1385 offsetof(struct spdk_nvmf_listener_opts, field) + sizeof(src->field) <= src->opts_size 1386 1387 #define SET_FIELD(field) \ 1388 if (FIELD_OK(field)) { \ 1389 dst->field = src->field; \ 1390 } \ 1391 1392 SET_FIELD(secure_channel); 1393 SET_FIELD(ana_state); 1394 /* We should not remove this statement, but need to update the assert statement 1395 * if we add a new field, and also add a corresponding SET_FIELD statement. */ 1396 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_listener_opts) == 16, "Incorrect size"); 1397 1398 #undef SET_FIELD 1399 #undef FIELD_OK 1400 1401 return 0; 1402 } 1403 1404 static void 1405 _nvmf_subsystem_add_listener(struct spdk_nvmf_subsystem *subsystem, 1406 struct spdk_nvme_transport_id *trid, 1407 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 1408 void *cb_arg, struct spdk_nvmf_listener_opts *opts) 1409 { 1410 struct spdk_nvmf_transport *transport; 1411 struct spdk_nvmf_subsystem_listener *listener; 1412 struct spdk_nvmf_listener *tr_listener; 1413 uint32_t i; 1414 uint32_t id; 1415 int rc = 0; 1416 1417 assert(cb_fn != NULL); 1418 1419 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1420 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1421 cb_fn(cb_arg, -EAGAIN); 1422 return; 1423 } 1424 1425 if (nvmf_subsystem_find_listener(subsystem, trid)) { 1426 /* Listener already exists in this subsystem */ 1427 cb_fn(cb_arg, 0); 1428 return; 1429 } 1430 1431 transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, trid->trstring); 1432 if (!transport) { 1433 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n", 1434 trid->trstring); 1435 cb_fn(cb_arg, -EINVAL); 1436 return; 1437 } 1438 1439 tr_listener = nvmf_transport_find_listener(transport, trid); 1440 if (!tr_listener) { 1441 SPDK_ERRLOG("Cannot find transport listener for %s\n", trid->traddr); 1442 cb_fn(cb_arg, -EINVAL); 1443 return; 1444 } 1445 1446 listener = calloc(1, sizeof(*listener)); 1447 if (!listener) { 1448 cb_fn(cb_arg, -ENOMEM); 1449 return; 1450 } 1451 1452 listener->trid = &tr_listener->trid; 1453 listener->transport = transport; 1454 listener->cb_fn = cb_fn; 1455 listener->cb_arg = cb_arg; 1456 listener->subsystem = subsystem; 1457 listener->ana_state = calloc(subsystem->max_nsid, sizeof(enum spdk_nvme_ana_state)); 1458 if (!listener->ana_state) { 1459 free(listener); 1460 cb_fn(cb_arg, -ENOMEM); 1461 return; 1462 } 1463 1464 spdk_nvmf_subsystem_listener_opts_init(&listener->opts, sizeof(listener->opts)); 1465 if (opts != NULL) { 1466 rc = listener_opts_copy(opts, &listener->opts); 1467 if (rc) { 1468 SPDK_ERRLOG("Unable to copy listener options\n"); 1469 free(listener->ana_state); 1470 free(listener); 1471 cb_fn(cb_arg, -EINVAL); 1472 return; 1473 } 1474 } 1475 1476 id = spdk_bit_array_find_first_clear(subsystem->used_listener_ids, 0); 1477 if (id == UINT32_MAX) { 1478 SPDK_ERRLOG("Cannot add any more listeners\n"); 1479 free(listener->ana_state); 1480 free(listener); 1481 cb_fn(cb_arg, -EINVAL); 1482 return; 1483 } 1484 1485 spdk_bit_array_set(subsystem->used_listener_ids, id); 1486 listener->id = id; 1487 1488 for (i = 0; i < subsystem->max_nsid; i++) { 1489 listener->ana_state[i] = listener->opts.ana_state; 1490 } 1491 1492 if (transport->ops->listen_associate != NULL) { 1493 rc = transport->ops->listen_associate(transport, subsystem, trid); 1494 } 1495 1496 SPDK_DTRACE_PROBE4(nvmf_subsystem_add_listener, subsystem->subnqn, listener->trid->trtype, 1497 listener->trid->traddr, listener->trid->trsvcid); 1498 1499 _nvmf_subsystem_add_listener_done(listener, rc); 1500 } 1501 1502 void 1503 spdk_nvmf_subsystem_add_listener(struct spdk_nvmf_subsystem *subsystem, 1504 struct spdk_nvme_transport_id *trid, 1505 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 1506 void *cb_arg) 1507 { 1508 _nvmf_subsystem_add_listener(subsystem, trid, cb_fn, cb_arg, NULL); 1509 } 1510 1511 void 1512 spdk_nvmf_subsystem_add_listener_ext(struct spdk_nvmf_subsystem *subsystem, 1513 struct spdk_nvme_transport_id *trid, 1514 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 1515 void *cb_arg, struct spdk_nvmf_listener_opts *opts) 1516 { 1517 _nvmf_subsystem_add_listener(subsystem, trid, cb_fn, cb_arg, opts); 1518 } 1519 1520 int 1521 spdk_nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem, 1522 const struct spdk_nvme_transport_id *trid) 1523 { 1524 struct spdk_nvmf_subsystem_listener *listener; 1525 1526 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1527 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1528 return -EAGAIN; 1529 } 1530 1531 listener = nvmf_subsystem_find_listener(subsystem, trid); 1532 if (listener == NULL) { 1533 return -ENOENT; 1534 } 1535 1536 SPDK_DTRACE_PROBE4(nvmf_subsystem_remove_listener, subsystem->subnqn, listener->trid->trtype, 1537 listener->trid->traddr, listener->trid->trsvcid); 1538 1539 _nvmf_subsystem_remove_listener(subsystem, listener, false); 1540 1541 return 0; 1542 } 1543 1544 void 1545 nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem, 1546 bool stop) 1547 { 1548 struct spdk_nvmf_subsystem_listener *listener, *listener_tmp; 1549 1550 TAILQ_FOREACH_SAFE(listener, &subsystem->listeners, link, listener_tmp) { 1551 _nvmf_subsystem_remove_listener(subsystem, listener, stop); 1552 } 1553 } 1554 1555 bool 1556 spdk_nvmf_subsystem_listener_allowed(struct spdk_nvmf_subsystem *subsystem, 1557 const struct spdk_nvme_transport_id *trid) 1558 { 1559 struct spdk_nvmf_subsystem_listener *listener; 1560 1561 TAILQ_FOREACH(listener, &subsystem->listeners, link) { 1562 if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) { 1563 return true; 1564 } 1565 } 1566 1567 if (!strcmp(subsystem->subnqn, SPDK_NVMF_DISCOVERY_NQN)) { 1568 SPDK_WARNLOG("Allowing connection to discovery subsystem on %s/%s/%s, " 1569 "even though this listener was not added to the discovery " 1570 "subsystem. This behavior is deprecated and will be removed " 1571 "in a future release.\n", 1572 spdk_nvme_transport_id_trtype_str(trid->trtype), trid->traddr, trid->trsvcid); 1573 return true; 1574 } 1575 1576 return false; 1577 } 1578 1579 struct spdk_nvmf_subsystem_listener * 1580 spdk_nvmf_subsystem_get_first_listener(struct spdk_nvmf_subsystem *subsystem) 1581 { 1582 return TAILQ_FIRST(&subsystem->listeners); 1583 } 1584 1585 struct spdk_nvmf_subsystem_listener * 1586 spdk_nvmf_subsystem_get_next_listener(struct spdk_nvmf_subsystem *subsystem, 1587 struct spdk_nvmf_subsystem_listener *prev_listener) 1588 { 1589 return TAILQ_NEXT(prev_listener, link); 1590 } 1591 1592 const struct spdk_nvme_transport_id * 1593 spdk_nvmf_subsystem_listener_get_trid(struct spdk_nvmf_subsystem_listener *listener) 1594 { 1595 return listener->trid; 1596 } 1597 1598 void 1599 spdk_nvmf_subsystem_allow_any_listener(struct spdk_nvmf_subsystem *subsystem, 1600 bool allow_any_listener) 1601 { 1602 subsystem->flags.allow_any_listener = allow_any_listener; 1603 } 1604 1605 bool 1606 spdk_nvmf_subsystem_any_listener_allowed(struct spdk_nvmf_subsystem *subsystem) 1607 { 1608 return subsystem->flags.allow_any_listener; 1609 } 1610 1611 struct subsystem_update_ns_ctx { 1612 struct spdk_nvmf_subsystem *subsystem; 1613 1614 spdk_nvmf_subsystem_state_change_done cb_fn; 1615 void *cb_arg; 1616 }; 1617 1618 static void 1619 subsystem_update_ns_done(struct spdk_io_channel_iter *i, int status) 1620 { 1621 struct subsystem_update_ns_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1622 1623 if (ctx->cb_fn) { 1624 ctx->cb_fn(ctx->subsystem, ctx->cb_arg, status); 1625 } 1626 free(ctx); 1627 } 1628 1629 static void 1630 subsystem_update_ns_on_pg(struct spdk_io_channel_iter *i) 1631 { 1632 int rc; 1633 struct subsystem_update_ns_ctx *ctx; 1634 struct spdk_nvmf_poll_group *group; 1635 struct spdk_nvmf_subsystem *subsystem; 1636 1637 ctx = spdk_io_channel_iter_get_ctx(i); 1638 group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i)); 1639 subsystem = ctx->subsystem; 1640 1641 rc = nvmf_poll_group_update_subsystem(group, subsystem); 1642 spdk_for_each_channel_continue(i, rc); 1643 } 1644 1645 static int 1646 nvmf_subsystem_update_ns(struct spdk_nvmf_subsystem *subsystem, 1647 spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg) 1648 { 1649 struct subsystem_update_ns_ctx *ctx; 1650 1651 ctx = calloc(1, sizeof(*ctx)); 1652 if (ctx == NULL) { 1653 SPDK_ERRLOG("Can't alloc subsystem poll group update context\n"); 1654 return -ENOMEM; 1655 } 1656 ctx->subsystem = subsystem; 1657 ctx->cb_fn = cb_fn; 1658 ctx->cb_arg = cb_arg; 1659 1660 spdk_for_each_channel(subsystem->tgt, 1661 subsystem_update_ns_on_pg, 1662 ctx, 1663 subsystem_update_ns_done); 1664 return 0; 1665 } 1666 1667 static void 1668 nvmf_subsystem_ns_changed(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1669 { 1670 struct spdk_nvmf_ctrlr *ctrlr; 1671 1672 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1673 if (nvmf_ctrlr_ns_is_visible(ctrlr, nsid)) { 1674 nvmf_ctrlr_ns_changed(ctrlr, nsid); 1675 } 1676 } 1677 } 1678 1679 static uint32_t nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns); 1680 1681 int 1682 spdk_nvmf_subsystem_remove_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1683 { 1684 struct spdk_nvmf_transport *transport; 1685 struct spdk_nvmf_ns *ns; 1686 struct spdk_nvmf_host *host, *tmp; 1687 struct spdk_nvmf_ctrlr *ctrlr; 1688 1689 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1690 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1691 assert(false); 1692 return -1; 1693 } 1694 1695 if (nsid == 0 || nsid > subsystem->max_nsid) { 1696 return -1; 1697 } 1698 1699 ns = subsystem->ns[nsid - 1]; 1700 if (!ns) { 1701 return -1; 1702 } 1703 1704 subsystem->ns[nsid - 1] = NULL; 1705 1706 assert(ns->anagrpid - 1 < subsystem->max_nsid); 1707 assert(subsystem->ana_group[ns->anagrpid - 1] > 0); 1708 1709 subsystem->ana_group[ns->anagrpid - 1]--; 1710 1711 TAILQ_FOREACH_SAFE(host, &ns->hosts, link, tmp) { 1712 nvmf_ns_remove_host(ns, host); 1713 } 1714 1715 free(ns->ptpl_file); 1716 nvmf_ns_reservation_clear_all_registrants(ns); 1717 spdk_bdev_module_release_bdev(ns->bdev); 1718 spdk_bdev_close(ns->desc); 1719 free(ns); 1720 1721 if (subsystem->fdp_supported && !spdk_nvmf_subsystem_get_first_ns(subsystem)) { 1722 subsystem->fdp_supported = false; 1723 SPDK_DEBUGLOG(nvmf, "Subsystem with id: %u doesn't have FDP capability.\n", 1724 subsystem->id); 1725 } 1726 1727 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 1728 transport = spdk_nvmf_transport_get_next(transport)) { 1729 if (transport->ops->subsystem_remove_ns) { 1730 transport->ops->subsystem_remove_ns(transport, subsystem, nsid); 1731 } 1732 } 1733 1734 nvmf_subsystem_ns_changed(subsystem, nsid); 1735 1736 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1737 spdk_bit_array_clear(ctrlr->visible_ns, nsid - 1); 1738 } 1739 1740 return 0; 1741 } 1742 1743 struct subsystem_ns_change_ctx { 1744 struct spdk_nvmf_subsystem *subsystem; 1745 spdk_nvmf_subsystem_state_change_done cb_fn; 1746 uint32_t nsid; 1747 }; 1748 1749 static void 1750 _nvmf_ns_hot_remove(struct spdk_nvmf_subsystem *subsystem, 1751 void *cb_arg, int status) 1752 { 1753 struct subsystem_ns_change_ctx *ctx = cb_arg; 1754 int rc; 1755 1756 rc = spdk_nvmf_subsystem_remove_ns(subsystem, ctx->nsid); 1757 if (rc != 0) { 1758 SPDK_ERRLOG("Failed to make changes to NVME-oF subsystem with id: %u\n", subsystem->id); 1759 } 1760 1761 rc = spdk_nvmf_subsystem_resume(subsystem, NULL, NULL); 1762 if (rc != 0) { 1763 SPDK_ERRLOG("Failed to resume NVME-oF subsystem with id: %u\n", subsystem->id); 1764 } 1765 1766 free(ctx); 1767 } 1768 1769 static void 1770 nvmf_ns_change_msg(void *ns_ctx) 1771 { 1772 struct subsystem_ns_change_ctx *ctx = ns_ctx; 1773 int rc; 1774 1775 SPDK_DTRACE_PROBE2(nvmf_ns_change, ctx->nsid, ctx->subsystem->subnqn); 1776 1777 rc = spdk_nvmf_subsystem_pause(ctx->subsystem, ctx->nsid, ctx->cb_fn, ctx); 1778 if (rc) { 1779 if (rc == -EBUSY) { 1780 /* Try again, this is not a permanent situation. */ 1781 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ctx); 1782 } else { 1783 free(ctx); 1784 SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n"); 1785 } 1786 } 1787 } 1788 1789 static void 1790 nvmf_ns_hot_remove(void *remove_ctx) 1791 { 1792 struct spdk_nvmf_ns *ns = remove_ctx; 1793 struct subsystem_ns_change_ctx *ns_ctx; 1794 int rc; 1795 1796 /* We have to allocate a new context because this op 1797 * is asynchronous and we could lose the ns in the middle. 1798 */ 1799 ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx)); 1800 if (!ns_ctx) { 1801 SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n"); 1802 return; 1803 } 1804 1805 ns_ctx->subsystem = ns->subsystem; 1806 ns_ctx->nsid = ns->opts.nsid; 1807 ns_ctx->cb_fn = _nvmf_ns_hot_remove; 1808 1809 rc = spdk_nvmf_subsystem_pause(ns->subsystem, ns_ctx->nsid, _nvmf_ns_hot_remove, ns_ctx); 1810 if (rc) { 1811 if (rc == -EBUSY) { 1812 /* Try again, this is not a permanent situation. */ 1813 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx); 1814 } else { 1815 SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n"); 1816 free(ns_ctx); 1817 } 1818 } 1819 } 1820 1821 static void 1822 _nvmf_ns_resize(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status) 1823 { 1824 struct subsystem_ns_change_ctx *ctx = cb_arg; 1825 1826 nvmf_subsystem_ns_changed(subsystem, ctx->nsid); 1827 if (spdk_nvmf_subsystem_resume(subsystem, NULL, NULL) != 0) { 1828 SPDK_ERRLOG("Failed to resume NVME-oF subsystem with id: %u\n", subsystem->id); 1829 } 1830 1831 free(ctx); 1832 } 1833 1834 static void 1835 nvmf_ns_resize(void *event_ctx) 1836 { 1837 struct spdk_nvmf_ns *ns = event_ctx; 1838 struct subsystem_ns_change_ctx *ns_ctx; 1839 int rc; 1840 1841 /* We have to allocate a new context because this op 1842 * is asynchronous and we could lose the ns in the middle. 1843 */ 1844 ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx)); 1845 if (!ns_ctx) { 1846 SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n"); 1847 return; 1848 } 1849 1850 ns_ctx->subsystem = ns->subsystem; 1851 ns_ctx->nsid = ns->opts.nsid; 1852 ns_ctx->cb_fn = _nvmf_ns_resize; 1853 1854 /* Specify 0 for the nsid here, because we do not need to pause the namespace. 1855 * Namespaces can only be resized bigger, so there is no need to quiesce I/O. 1856 */ 1857 rc = spdk_nvmf_subsystem_pause(ns->subsystem, 0, _nvmf_ns_resize, ns_ctx); 1858 if (rc) { 1859 if (rc == -EBUSY) { 1860 /* Try again, this is not a permanent situation. */ 1861 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx); 1862 } else { 1863 SPDK_ERRLOG("Unable to pause subsystem to process namespace resize!\n"); 1864 free(ns_ctx); 1865 } 1866 } 1867 } 1868 1869 static void 1870 nvmf_ns_event(enum spdk_bdev_event_type type, 1871 struct spdk_bdev *bdev, 1872 void *event_ctx) 1873 { 1874 SPDK_DEBUGLOG(nvmf, "Bdev event: type %d, name %s, subsystem_id %d, ns_id %d\n", 1875 type, 1876 spdk_bdev_get_name(bdev), 1877 ((struct spdk_nvmf_ns *)event_ctx)->subsystem->id, 1878 ((struct spdk_nvmf_ns *)event_ctx)->nsid); 1879 1880 switch (type) { 1881 case SPDK_BDEV_EVENT_REMOVE: 1882 nvmf_ns_hot_remove(event_ctx); 1883 break; 1884 case SPDK_BDEV_EVENT_RESIZE: 1885 nvmf_ns_resize(event_ctx); 1886 break; 1887 default: 1888 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 1889 break; 1890 } 1891 } 1892 1893 void 1894 spdk_nvmf_ns_opts_get_defaults(struct spdk_nvmf_ns_opts *opts, size_t opts_size) 1895 { 1896 if (!opts) { 1897 SPDK_ERRLOG("opts should not be NULL.\n"); 1898 return; 1899 } 1900 1901 if (!opts_size) { 1902 SPDK_ERRLOG("opts_size should not be zero.\n"); 1903 return; 1904 } 1905 1906 memset(opts, 0, opts_size); 1907 opts->opts_size = opts_size; 1908 1909 #define FIELD_OK(field) \ 1910 offsetof(struct spdk_nvmf_ns_opts, field) + sizeof(opts->field) <= opts_size 1911 1912 #define SET_FIELD(field, value) \ 1913 if (FIELD_OK(field)) { \ 1914 opts->field = value; \ 1915 } \ 1916 1917 /* All current fields are set to 0 by default. */ 1918 SET_FIELD(nsid, 0); 1919 if (FIELD_OK(nguid)) { 1920 memset(opts->nguid, 0, sizeof(opts->nguid)); 1921 } 1922 if (FIELD_OK(eui64)) { 1923 memset(opts->eui64, 0, sizeof(opts->eui64)); 1924 } 1925 if (FIELD_OK(uuid)) { 1926 spdk_uuid_set_null(&opts->uuid); 1927 } 1928 SET_FIELD(anagrpid, 0); 1929 SET_FIELD(transport_specific, NULL); 1930 1931 #undef FIELD_OK 1932 #undef SET_FIELD 1933 } 1934 1935 static void 1936 nvmf_ns_opts_copy(struct spdk_nvmf_ns_opts *opts, 1937 const struct spdk_nvmf_ns_opts *user_opts, 1938 size_t opts_size) 1939 { 1940 #define FIELD_OK(field) \ 1941 offsetof(struct spdk_nvmf_ns_opts, field) + sizeof(opts->field) <= user_opts->opts_size 1942 1943 #define SET_FIELD(field) \ 1944 if (FIELD_OK(field)) { \ 1945 opts->field = user_opts->field; \ 1946 } \ 1947 1948 SET_FIELD(nsid); 1949 if (FIELD_OK(nguid)) { 1950 memcpy(opts->nguid, user_opts->nguid, sizeof(opts->nguid)); 1951 } 1952 if (FIELD_OK(eui64)) { 1953 memcpy(opts->eui64, user_opts->eui64, sizeof(opts->eui64)); 1954 } 1955 if (FIELD_OK(uuid)) { 1956 spdk_uuid_copy(&opts->uuid, &user_opts->uuid); 1957 } 1958 SET_FIELD(anagrpid); 1959 SET_FIELD(no_auto_visible); 1960 SET_FIELD(transport_specific); 1961 1962 opts->opts_size = user_opts->opts_size; 1963 1964 /* We should not remove this statement, but need to update the assert statement 1965 * if we add a new field, and also add a corresponding SET_FIELD statement. 1966 */ 1967 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_ns_opts) == 72, "Incorrect size"); 1968 1969 #undef FIELD_OK 1970 #undef SET_FIELD 1971 } 1972 1973 /* Dummy bdev module used to to claim bdevs. */ 1974 static struct spdk_bdev_module ns_bdev_module = { 1975 .name = "NVMe-oF Target", 1976 }; 1977 1978 static int nvmf_ns_reservation_update(const struct spdk_nvmf_ns *ns, 1979 const struct spdk_nvmf_reservation_info *info); 1980 static int nvmf_ns_reservation_load(const struct spdk_nvmf_ns *ns, 1981 struct spdk_nvmf_reservation_info *info); 1982 static int nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, 1983 struct spdk_nvmf_reservation_info *info); 1984 1985 bool 1986 nvmf_subsystem_zone_append_supported(struct spdk_nvmf_subsystem *subsystem) 1987 { 1988 struct spdk_nvmf_ns *ns; 1989 1990 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); 1991 ns != NULL; 1992 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { 1993 if (spdk_bdev_is_zoned(ns->bdev) && 1994 spdk_bdev_io_type_supported(ns->bdev, SPDK_BDEV_IO_TYPE_ZONE_APPEND)) { 1995 return true; 1996 } 1997 } 1998 1999 return false; 2000 } 2001 2002 uint32_t 2003 spdk_nvmf_subsystem_add_ns_ext(struct spdk_nvmf_subsystem *subsystem, const char *bdev_name, 2004 const struct spdk_nvmf_ns_opts *user_opts, size_t opts_size, 2005 const char *ptpl_file) 2006 { 2007 struct spdk_nvmf_transport *transport; 2008 struct spdk_nvmf_ns_opts opts; 2009 struct spdk_nvmf_ns *ns, *first_ns; 2010 struct spdk_nvmf_ctrlr *ctrlr; 2011 struct spdk_nvmf_reservation_info info = {0}; 2012 int rc; 2013 bool zone_append_supported; 2014 uint64_t max_zone_append_size_kib; 2015 2016 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 2017 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 2018 return 0; 2019 } 2020 2021 spdk_nvmf_ns_opts_get_defaults(&opts, sizeof(opts)); 2022 if (user_opts) { 2023 nvmf_ns_opts_copy(&opts, user_opts, opts_size); 2024 } 2025 2026 if (opts.nsid == SPDK_NVME_GLOBAL_NS_TAG) { 2027 SPDK_ERRLOG("Invalid NSID %" PRIu32 "\n", opts.nsid); 2028 return 0; 2029 } 2030 2031 if (opts.nsid == 0) { 2032 /* 2033 * NSID not specified - find a free index. 2034 * 2035 * If no free slots are found, return error. 2036 */ 2037 for (opts.nsid = 1; opts.nsid <= subsystem->max_nsid; opts.nsid++) { 2038 if (_nvmf_subsystem_get_ns(subsystem, opts.nsid) == NULL) { 2039 break; 2040 } 2041 } 2042 if (opts.nsid > subsystem->max_nsid) { 2043 SPDK_ERRLOG("No free namespace slot available in the subsystem\n"); 2044 return 0; 2045 } 2046 } 2047 2048 if (opts.nsid > subsystem->max_nsid) { 2049 SPDK_ERRLOG("NSID greater than maximum not allowed\n"); 2050 return 0; 2051 } 2052 2053 if (_nvmf_subsystem_get_ns(subsystem, opts.nsid)) { 2054 SPDK_ERRLOG("Requested NSID %" PRIu32 " already in use\n", opts.nsid); 2055 return 0; 2056 } 2057 2058 if (opts.anagrpid == 0) { 2059 opts.anagrpid = opts.nsid; 2060 } 2061 2062 if (opts.anagrpid > subsystem->max_nsid) { 2063 SPDK_ERRLOG("ANAGRPID greater than maximum NSID not allowed\n"); 2064 return 0; 2065 } 2066 2067 ns = calloc(1, sizeof(*ns)); 2068 if (ns == NULL) { 2069 SPDK_ERRLOG("Namespace allocation failed\n"); 2070 return 0; 2071 } 2072 2073 TAILQ_INIT(&ns->hosts); 2074 ns->always_visible = !opts.no_auto_visible; 2075 if (ns->always_visible) { 2076 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 2077 spdk_bit_array_set(ctrlr->visible_ns, opts.nsid - 1); 2078 } 2079 } 2080 2081 rc = spdk_bdev_open_ext(bdev_name, true, nvmf_ns_event, ns, &ns->desc); 2082 if (rc != 0) { 2083 SPDK_ERRLOG("Subsystem %s: bdev %s cannot be opened, error=%d\n", 2084 subsystem->subnqn, bdev_name, rc); 2085 free(ns); 2086 return 0; 2087 } 2088 2089 ns->bdev = spdk_bdev_desc_get_bdev(ns->desc); 2090 2091 if (spdk_bdev_get_md_size(ns->bdev) != 0) { 2092 if (!spdk_bdev_is_md_interleaved(ns->bdev)) { 2093 SPDK_ERRLOG("Can't attach bdev with separate metadata.\n"); 2094 spdk_bdev_close(ns->desc); 2095 free(ns); 2096 return 0; 2097 } 2098 2099 if (spdk_bdev_get_md_size(ns->bdev) > SPDK_BDEV_MAX_INTERLEAVED_MD_SIZE) { 2100 SPDK_ERRLOG("Maximum supported interleaved md size %u, current md size %u\n", 2101 SPDK_BDEV_MAX_INTERLEAVED_MD_SIZE, spdk_bdev_get_md_size(ns->bdev)); 2102 spdk_bdev_close(ns->desc); 2103 free(ns); 2104 return 0; 2105 } 2106 } 2107 2108 rc = spdk_bdev_module_claim_bdev(ns->bdev, ns->desc, &ns_bdev_module); 2109 if (rc != 0) { 2110 spdk_bdev_close(ns->desc); 2111 free(ns); 2112 return 0; 2113 } 2114 2115 /* Cache the zcopy capability of the bdev device */ 2116 ns->zcopy = spdk_bdev_io_type_supported(ns->bdev, SPDK_BDEV_IO_TYPE_ZCOPY); 2117 2118 if (spdk_uuid_is_null(&opts.uuid)) { 2119 opts.uuid = *spdk_bdev_get_uuid(ns->bdev); 2120 } 2121 2122 /* if nguid descriptor is supported by bdev module (nvme) then uuid = nguid */ 2123 if (spdk_mem_all_zero(opts.nguid, sizeof(opts.nguid))) { 2124 SPDK_STATIC_ASSERT(sizeof(opts.nguid) == sizeof(opts.uuid), "size mismatch"); 2125 memcpy(opts.nguid, spdk_bdev_get_uuid(ns->bdev), sizeof(opts.nguid)); 2126 } 2127 2128 if (spdk_bdev_is_zoned(ns->bdev)) { 2129 SPDK_DEBUGLOG(nvmf, "The added namespace is backed by a zoned block device.\n"); 2130 ns->csi = SPDK_NVME_CSI_ZNS; 2131 2132 zone_append_supported = spdk_bdev_io_type_supported(ns->bdev, 2133 SPDK_BDEV_IO_TYPE_ZONE_APPEND); 2134 max_zone_append_size_kib = spdk_bdev_get_max_zone_append_size( 2135 ns->bdev) * spdk_bdev_get_block_size(ns->bdev); 2136 2137 if (_nvmf_subsystem_get_first_zoned_ns(subsystem) != NULL && 2138 (nvmf_subsystem_zone_append_supported(subsystem) != zone_append_supported || 2139 subsystem->max_zone_append_size_kib != max_zone_append_size_kib)) { 2140 SPDK_ERRLOG("Namespaces with different zone append support or different zone append size are not allowed.\n"); 2141 goto err; 2142 } 2143 2144 subsystem->max_zone_append_size_kib = max_zone_append_size_kib; 2145 } 2146 2147 first_ns = spdk_nvmf_subsystem_get_first_ns(subsystem); 2148 if (!first_ns) { 2149 if (spdk_bdev_get_nvme_ctratt(ns->bdev).bits.fdps) { 2150 SPDK_DEBUGLOG(nvmf, "Subsystem with id: %u has FDP capability.\n", 2151 subsystem->id); 2152 subsystem->fdp_supported = true; 2153 } 2154 } else { 2155 if (spdk_bdev_get_nvme_ctratt(first_ns->bdev).bits.fdps != 2156 spdk_bdev_get_nvme_ctratt(ns->bdev).bits.fdps) { 2157 SPDK_ERRLOG("Subsystem with id: %u can%s FDP namespace.\n", subsystem->id, 2158 spdk_bdev_get_nvme_ctratt(first_ns->bdev).bits.fdps ? " only add" : "not add"); 2159 goto err; 2160 } 2161 } 2162 2163 ns->opts = opts; 2164 ns->subsystem = subsystem; 2165 subsystem->ns[opts.nsid - 1] = ns; 2166 ns->nsid = opts.nsid; 2167 ns->anagrpid = opts.anagrpid; 2168 subsystem->ana_group[ns->anagrpid - 1]++; 2169 TAILQ_INIT(&ns->registrants); 2170 if (ptpl_file) { 2171 ns->ptpl_file = strdup(ptpl_file); 2172 if (!ns->ptpl_file) { 2173 SPDK_ERRLOG("Namespace ns->ptpl_file allocation failed\n"); 2174 goto err; 2175 } 2176 } 2177 2178 if (nvmf_ns_is_ptpl_capable(ns)) { 2179 rc = nvmf_ns_reservation_load(ns, &info); 2180 if (rc) { 2181 SPDK_ERRLOG("Subsystem load reservation failed\n"); 2182 goto err; 2183 } 2184 2185 rc = nvmf_ns_reservation_restore(ns, &info); 2186 if (rc) { 2187 SPDK_ERRLOG("Subsystem restore reservation failed\n"); 2188 goto err; 2189 } 2190 } 2191 2192 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 2193 transport = spdk_nvmf_transport_get_next(transport)) { 2194 if (transport->ops->subsystem_add_ns) { 2195 rc = transport->ops->subsystem_add_ns(transport, subsystem, ns); 2196 if (rc) { 2197 SPDK_ERRLOG("Namespace attachment is not allowed by %s transport\n", transport->ops->name); 2198 nvmf_ns_reservation_clear_all_registrants(ns); 2199 goto err; 2200 } 2201 } 2202 } 2203 2204 /* JSON value obj is freed before sending the response. Set NULL to prevent usage of dangling pointer. */ 2205 ns->opts.transport_specific = NULL; 2206 2207 SPDK_DEBUGLOG(nvmf, "Subsystem %s: bdev %s assigned nsid %" PRIu32 "\n", 2208 spdk_nvmf_subsystem_get_nqn(subsystem), 2209 bdev_name, 2210 opts.nsid); 2211 2212 nvmf_subsystem_ns_changed(subsystem, opts.nsid); 2213 2214 SPDK_DTRACE_PROBE2(nvmf_subsystem_add_ns, subsystem->subnqn, ns->nsid); 2215 2216 return opts.nsid; 2217 err: 2218 subsystem->ns[opts.nsid - 1] = NULL; 2219 spdk_bdev_module_release_bdev(ns->bdev); 2220 spdk_bdev_close(ns->desc); 2221 free(ns->ptpl_file); 2222 free(ns); 2223 2224 return 0; 2225 } 2226 2227 static uint32_t 2228 nvmf_subsystem_get_next_allocated_nsid(struct spdk_nvmf_subsystem *subsystem, 2229 uint32_t prev_nsid) 2230 { 2231 uint32_t nsid; 2232 2233 if (prev_nsid >= subsystem->max_nsid) { 2234 return 0; 2235 } 2236 2237 for (nsid = prev_nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 2238 if (subsystem->ns[nsid - 1]) { 2239 return nsid; 2240 } 2241 } 2242 2243 return 0; 2244 } 2245 2246 struct spdk_nvmf_ns * 2247 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 2248 { 2249 uint32_t first_nsid; 2250 2251 first_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, 0); 2252 return _nvmf_subsystem_get_ns(subsystem, first_nsid); 2253 } 2254 2255 struct spdk_nvmf_ns * 2256 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 2257 struct spdk_nvmf_ns *prev_ns) 2258 { 2259 uint32_t next_nsid; 2260 2261 next_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, prev_ns->opts.nsid); 2262 return _nvmf_subsystem_get_ns(subsystem, next_nsid); 2263 } 2264 2265 struct spdk_nvmf_ns * 2266 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 2267 { 2268 return _nvmf_subsystem_get_ns(subsystem, nsid); 2269 } 2270 2271 uint32_t 2272 spdk_nvmf_ns_get_id(const struct spdk_nvmf_ns *ns) 2273 { 2274 return ns->opts.nsid; 2275 } 2276 2277 struct spdk_bdev * 2278 spdk_nvmf_ns_get_bdev(struct spdk_nvmf_ns *ns) 2279 { 2280 return ns->bdev; 2281 } 2282 2283 void 2284 spdk_nvmf_ns_get_opts(const struct spdk_nvmf_ns *ns, struct spdk_nvmf_ns_opts *opts, 2285 size_t opts_size) 2286 { 2287 memset(opts, 0, opts_size); 2288 memcpy(opts, &ns->opts, spdk_min(sizeof(ns->opts), opts_size)); 2289 } 2290 2291 const char * 2292 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 2293 { 2294 return subsystem->sn; 2295 } 2296 2297 int 2298 spdk_nvmf_subsystem_set_sn(struct spdk_nvmf_subsystem *subsystem, const char *sn) 2299 { 2300 size_t len, max_len; 2301 2302 max_len = sizeof(subsystem->sn) - 1; 2303 len = strlen(sn); 2304 if (len > max_len) { 2305 SPDK_DEBUGLOG(nvmf, "Invalid sn \"%s\": length %zu > max %zu\n", 2306 sn, len, max_len); 2307 return -1; 2308 } 2309 2310 if (!nvmf_valid_ascii_string(sn, len)) { 2311 SPDK_DEBUGLOG(nvmf, "Non-ASCII sn\n"); 2312 SPDK_LOGDUMP(nvmf, "sn", sn, len); 2313 return -1; 2314 } 2315 2316 snprintf(subsystem->sn, sizeof(subsystem->sn), "%s", sn); 2317 2318 return 0; 2319 } 2320 2321 const char * 2322 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 2323 { 2324 return subsystem->mn; 2325 } 2326 2327 int 2328 spdk_nvmf_subsystem_set_mn(struct spdk_nvmf_subsystem *subsystem, const char *mn) 2329 { 2330 size_t len, max_len; 2331 2332 if (mn == NULL) { 2333 mn = MODEL_NUMBER_DEFAULT; 2334 } 2335 max_len = sizeof(subsystem->mn) - 1; 2336 len = strlen(mn); 2337 if (len > max_len) { 2338 SPDK_DEBUGLOG(nvmf, "Invalid mn \"%s\": length %zu > max %zu\n", 2339 mn, len, max_len); 2340 return -1; 2341 } 2342 2343 if (!nvmf_valid_ascii_string(mn, len)) { 2344 SPDK_DEBUGLOG(nvmf, "Non-ASCII mn\n"); 2345 SPDK_LOGDUMP(nvmf, "mn", mn, len); 2346 return -1; 2347 } 2348 2349 snprintf(subsystem->mn, sizeof(subsystem->mn), "%s", mn); 2350 2351 return 0; 2352 } 2353 2354 const char * 2355 spdk_nvmf_subsystem_get_nqn(const struct spdk_nvmf_subsystem *subsystem) 2356 { 2357 return subsystem->subnqn; 2358 } 2359 2360 /* We have to use the typedef in the function declaration to appease astyle. */ 2361 typedef enum spdk_nvmf_subtype spdk_nvmf_subtype_t; 2362 2363 spdk_nvmf_subtype_t 2364 spdk_nvmf_subsystem_get_type(struct spdk_nvmf_subsystem *subsystem) 2365 { 2366 return subsystem->subtype; 2367 } 2368 2369 uint32_t 2370 spdk_nvmf_subsystem_get_max_nsid(struct spdk_nvmf_subsystem *subsystem) 2371 { 2372 return subsystem->max_nsid; 2373 } 2374 2375 int 2376 spdk_nvmf_subsystem_set_cntlid_range(struct spdk_nvmf_subsystem *subsystem, 2377 uint16_t min_cntlid, uint16_t max_cntlid) 2378 { 2379 if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) { 2380 return -EAGAIN; 2381 } 2382 2383 if (min_cntlid > max_cntlid) { 2384 return -EINVAL; 2385 } 2386 /* The spec reserves cntlid values in the range FFF0h to FFFFh. */ 2387 if (min_cntlid < NVMF_MIN_CNTLID || min_cntlid > NVMF_MAX_CNTLID || 2388 max_cntlid < NVMF_MIN_CNTLID || max_cntlid > NVMF_MAX_CNTLID) { 2389 return -EINVAL; 2390 } 2391 subsystem->min_cntlid = min_cntlid; 2392 subsystem->max_cntlid = max_cntlid; 2393 if (subsystem->next_cntlid < min_cntlid || subsystem->next_cntlid > max_cntlid - 1) { 2394 subsystem->next_cntlid = min_cntlid - 1; 2395 } 2396 2397 return 0; 2398 } 2399 2400 static uint16_t 2401 nvmf_subsystem_gen_cntlid(struct spdk_nvmf_subsystem *subsystem) 2402 { 2403 int count; 2404 2405 /* 2406 * In the worst case, we might have to try all CNTLID values between min_cntlid and max_cntlid 2407 * before we find one that is unused (or find that all values are in use). 2408 */ 2409 for (count = 0; count < subsystem->max_cntlid - subsystem->min_cntlid + 1; count++) { 2410 subsystem->next_cntlid++; 2411 if (subsystem->next_cntlid > subsystem->max_cntlid) { 2412 subsystem->next_cntlid = subsystem->min_cntlid; 2413 } 2414 2415 /* Check if a controller with this cntlid currently exists. */ 2416 if (nvmf_subsystem_get_ctrlr(subsystem, subsystem->next_cntlid) == NULL) { 2417 /* Found unused cntlid */ 2418 return subsystem->next_cntlid; 2419 } 2420 } 2421 2422 /* All valid cntlid values are in use. */ 2423 return 0xFFFF; 2424 } 2425 2426 int 2427 nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr) 2428 { 2429 2430 if (ctrlr->dynamic_ctrlr) { 2431 ctrlr->cntlid = nvmf_subsystem_gen_cntlid(subsystem); 2432 if (ctrlr->cntlid == 0xFFFF) { 2433 /* Unable to get a cntlid */ 2434 SPDK_ERRLOG("Reached max simultaneous ctrlrs\n"); 2435 return -EBUSY; 2436 } 2437 } else if (nvmf_subsystem_get_ctrlr(subsystem, ctrlr->cntlid) != NULL) { 2438 SPDK_ERRLOG("Ctrlr with cntlid %u already exist\n", ctrlr->cntlid); 2439 return -EEXIST; 2440 } 2441 2442 TAILQ_INSERT_TAIL(&subsystem->ctrlrs, ctrlr, link); 2443 2444 SPDK_DTRACE_PROBE3(nvmf_subsystem_add_ctrlr, subsystem->subnqn, ctrlr, ctrlr->hostnqn); 2445 2446 return 0; 2447 } 2448 2449 void 2450 nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem, 2451 struct spdk_nvmf_ctrlr *ctrlr) 2452 { 2453 SPDK_DTRACE_PROBE3(nvmf_subsystem_remove_ctrlr, subsystem->subnqn, ctrlr, ctrlr->hostnqn); 2454 2455 assert(spdk_get_thread() == subsystem->thread); 2456 assert(subsystem == ctrlr->subsys); 2457 SPDK_DEBUGLOG(nvmf, "remove ctrlr %p id 0x%x from subsys %p %s\n", ctrlr, ctrlr->cntlid, subsystem, 2458 subsystem->subnqn); 2459 TAILQ_REMOVE(&subsystem->ctrlrs, ctrlr, link); 2460 } 2461 2462 struct spdk_nvmf_ctrlr * 2463 nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid) 2464 { 2465 struct spdk_nvmf_ctrlr *ctrlr; 2466 2467 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 2468 if (ctrlr->cntlid == cntlid) { 2469 return ctrlr; 2470 } 2471 } 2472 2473 return NULL; 2474 } 2475 2476 uint32_t 2477 spdk_nvmf_subsystem_get_max_namespaces(const struct spdk_nvmf_subsystem *subsystem) 2478 { 2479 return subsystem->max_nsid; 2480 } 2481 2482 uint16_t 2483 spdk_nvmf_subsystem_get_min_cntlid(const struct spdk_nvmf_subsystem *subsystem) 2484 { 2485 return subsystem->min_cntlid; 2486 } 2487 2488 uint16_t 2489 spdk_nvmf_subsystem_get_max_cntlid(const struct spdk_nvmf_subsystem *subsystem) 2490 { 2491 return subsystem->max_cntlid; 2492 } 2493 2494 struct _nvmf_ns_registrant { 2495 uint64_t rkey; 2496 char *host_uuid; 2497 }; 2498 2499 struct _nvmf_ns_registrants { 2500 size_t num_regs; 2501 struct _nvmf_ns_registrant reg[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2502 }; 2503 2504 struct _nvmf_ns_reservation { 2505 bool ptpl_activated; 2506 enum spdk_nvme_reservation_type rtype; 2507 uint64_t crkey; 2508 char *bdev_uuid; 2509 char *holder_uuid; 2510 struct _nvmf_ns_registrants regs; 2511 }; 2512 2513 static const struct spdk_json_object_decoder nvmf_ns_pr_reg_decoders[] = { 2514 {"rkey", offsetof(struct _nvmf_ns_registrant, rkey), spdk_json_decode_uint64}, 2515 {"host_uuid", offsetof(struct _nvmf_ns_registrant, host_uuid), spdk_json_decode_string}, 2516 }; 2517 2518 static int 2519 nvmf_decode_ns_pr_reg(const struct spdk_json_val *val, void *out) 2520 { 2521 struct _nvmf_ns_registrant *reg = out; 2522 2523 return spdk_json_decode_object(val, nvmf_ns_pr_reg_decoders, 2524 SPDK_COUNTOF(nvmf_ns_pr_reg_decoders), reg); 2525 } 2526 2527 static int 2528 nvmf_decode_ns_pr_regs(const struct spdk_json_val *val, void *out) 2529 { 2530 struct _nvmf_ns_registrants *regs = out; 2531 2532 return spdk_json_decode_array(val, nvmf_decode_ns_pr_reg, regs->reg, 2533 SPDK_NVMF_MAX_NUM_REGISTRANTS, ®s->num_regs, 2534 sizeof(struct _nvmf_ns_registrant)); 2535 } 2536 2537 static const struct spdk_json_object_decoder nvmf_ns_pr_decoders[] = { 2538 {"ptpl", offsetof(struct _nvmf_ns_reservation, ptpl_activated), spdk_json_decode_bool, true}, 2539 {"rtype", offsetof(struct _nvmf_ns_reservation, rtype), spdk_json_decode_uint32, true}, 2540 {"crkey", offsetof(struct _nvmf_ns_reservation, crkey), spdk_json_decode_uint64, true}, 2541 {"bdev_uuid", offsetof(struct _nvmf_ns_reservation, bdev_uuid), spdk_json_decode_string}, 2542 {"holder_uuid", offsetof(struct _nvmf_ns_reservation, holder_uuid), spdk_json_decode_string, true}, 2543 {"registrants", offsetof(struct _nvmf_ns_reservation, regs), nvmf_decode_ns_pr_regs}, 2544 }; 2545 2546 static int 2547 nvmf_ns_reservation_load_json(const struct spdk_nvmf_ns *ns, 2548 struct spdk_nvmf_reservation_info *info) 2549 { 2550 size_t json_size; 2551 ssize_t values_cnt, rc; 2552 void *json = NULL, *end; 2553 struct spdk_json_val *values = NULL; 2554 struct _nvmf_ns_reservation res = {}; 2555 const char *file = ns->ptpl_file; 2556 uint32_t i; 2557 2558 /* Load all persist file contents into a local buffer */ 2559 json = spdk_posix_file_load_from_name(file, &json_size); 2560 if (!json) { 2561 SPDK_ERRLOG("Load persit file %s failed\n", file); 2562 return -ENOMEM; 2563 } 2564 2565 rc = spdk_json_parse(json, json_size, NULL, 0, &end, 0); 2566 if (rc < 0) { 2567 SPDK_NOTICELOG("Parsing JSON configuration failed (%zd)\n", rc); 2568 goto exit; 2569 } 2570 2571 values_cnt = rc; 2572 values = calloc(values_cnt, sizeof(struct spdk_json_val)); 2573 if (values == NULL) { 2574 goto exit; 2575 } 2576 2577 rc = spdk_json_parse(json, json_size, values, values_cnt, &end, 0); 2578 if (rc != values_cnt) { 2579 SPDK_ERRLOG("Parsing JSON configuration failed (%zd)\n", rc); 2580 goto exit; 2581 } 2582 2583 /* Decode json */ 2584 if (spdk_json_decode_object(values, nvmf_ns_pr_decoders, 2585 SPDK_COUNTOF(nvmf_ns_pr_decoders), 2586 &res)) { 2587 SPDK_ERRLOG("Invalid objects in the persist file %s\n", file); 2588 rc = -EINVAL; 2589 goto exit; 2590 } 2591 2592 if (res.regs.num_regs > SPDK_NVMF_MAX_NUM_REGISTRANTS) { 2593 SPDK_ERRLOG("Can only support up to %u registrants\n", SPDK_NVMF_MAX_NUM_REGISTRANTS); 2594 rc = -ERANGE; 2595 goto exit; 2596 } 2597 2598 rc = 0; 2599 info->ptpl_activated = res.ptpl_activated; 2600 info->rtype = res.rtype; 2601 info->crkey = res.crkey; 2602 snprintf(info->bdev_uuid, sizeof(info->bdev_uuid), "%s", res.bdev_uuid); 2603 snprintf(info->holder_uuid, sizeof(info->holder_uuid), "%s", res.holder_uuid); 2604 info->num_regs = res.regs.num_regs; 2605 for (i = 0; i < res.regs.num_regs; i++) { 2606 info->registrants[i].rkey = res.regs.reg[i].rkey; 2607 snprintf(info->registrants[i].host_uuid, sizeof(info->registrants[i].host_uuid), "%s", 2608 res.regs.reg[i].host_uuid); 2609 } 2610 2611 exit: 2612 free(json); 2613 free(values); 2614 free(res.bdev_uuid); 2615 free(res.holder_uuid); 2616 for (i = 0; i < res.regs.num_regs; i++) { 2617 free(res.regs.reg[i].host_uuid); 2618 } 2619 2620 return rc; 2621 } 2622 2623 static bool nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns); 2624 2625 static int 2626 nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info) 2627 { 2628 uint32_t i; 2629 struct spdk_nvmf_registrant *reg, *holder = NULL; 2630 struct spdk_uuid bdev_uuid, holder_uuid; 2631 bool rkey_flag = false; 2632 2633 SPDK_DEBUGLOG(nvmf, "NSID %u, PTPL %u, Number of registrants %u\n", 2634 ns->nsid, info->ptpl_activated, info->num_regs); 2635 2636 /* it's not an error */ 2637 if (!info->ptpl_activated || !info->num_regs) { 2638 return 0; 2639 } 2640 2641 /* Check info->crkey exist or not in info->registrants[i].rkey */ 2642 for (i = 0; i < info->num_regs; i++) { 2643 if (info->crkey == info->registrants[i].rkey) { 2644 rkey_flag = true; 2645 } 2646 } 2647 if (!rkey_flag && info->crkey != 0) { 2648 return -EINVAL; 2649 } 2650 2651 spdk_uuid_parse(&bdev_uuid, info->bdev_uuid); 2652 if (spdk_uuid_compare(&bdev_uuid, spdk_bdev_get_uuid(ns->bdev))) { 2653 SPDK_ERRLOG("Existing bdev UUID is not same with configuration file\n"); 2654 return -EINVAL; 2655 } 2656 2657 ns->crkey = info->crkey; 2658 ns->rtype = info->rtype; 2659 ns->ptpl_activated = info->ptpl_activated; 2660 spdk_uuid_parse(&holder_uuid, info->holder_uuid); 2661 2662 SPDK_DEBUGLOG(nvmf, "Bdev UUID %s\n", info->bdev_uuid); 2663 if (info->rtype) { 2664 SPDK_DEBUGLOG(nvmf, "Holder UUID %s, RTYPE %u, RKEY 0x%"PRIx64"\n", 2665 info->holder_uuid, info->rtype, info->crkey); 2666 } 2667 2668 for (i = 0; i < info->num_regs; i++) { 2669 reg = calloc(1, sizeof(*reg)); 2670 if (!reg) { 2671 return -ENOMEM; 2672 } 2673 spdk_uuid_parse(®->hostid, info->registrants[i].host_uuid); 2674 reg->rkey = info->registrants[i].rkey; 2675 TAILQ_INSERT_TAIL(&ns->registrants, reg, link); 2676 if (info->crkey != 0 && !spdk_uuid_compare(&holder_uuid, ®->hostid)) { 2677 holder = reg; 2678 } 2679 SPDK_DEBUGLOG(nvmf, "Registrant RKEY 0x%"PRIx64", Host UUID %s\n", 2680 info->registrants[i].rkey, info->registrants[i].host_uuid); 2681 } 2682 2683 if (nvmf_ns_reservation_all_registrants_type(ns)) { 2684 ns->holder = TAILQ_FIRST(&ns->registrants); 2685 } else { 2686 ns->holder = holder; 2687 } 2688 2689 return 0; 2690 } 2691 2692 static int 2693 nvmf_ns_json_write_cb(void *cb_ctx, const void *data, size_t size) 2694 { 2695 char *file = cb_ctx; 2696 size_t rc; 2697 FILE *fd; 2698 2699 fd = fopen(file, "w"); 2700 if (!fd) { 2701 SPDK_ERRLOG("Can't open file %s for write\n", file); 2702 return -ENOENT; 2703 } 2704 rc = fwrite(data, 1, size, fd); 2705 fclose(fd); 2706 2707 return rc == size ? 0 : -1; 2708 } 2709 2710 static int 2711 nvmf_ns_reservation_update_json(const struct spdk_nvmf_ns *ns, 2712 const struct spdk_nvmf_reservation_info *info) 2713 { 2714 const char *file = ns->ptpl_file; 2715 struct spdk_json_write_ctx *w; 2716 uint32_t i; 2717 int rc = 0; 2718 2719 w = spdk_json_write_begin(nvmf_ns_json_write_cb, (void *)file, 0); 2720 if (w == NULL) { 2721 return -ENOMEM; 2722 } 2723 /* clear the configuration file */ 2724 if (!info->ptpl_activated) { 2725 goto exit; 2726 } 2727 2728 spdk_json_write_object_begin(w); 2729 spdk_json_write_named_bool(w, "ptpl", info->ptpl_activated); 2730 spdk_json_write_named_uint32(w, "rtype", info->rtype); 2731 spdk_json_write_named_uint64(w, "crkey", info->crkey); 2732 spdk_json_write_named_string(w, "bdev_uuid", info->bdev_uuid); 2733 spdk_json_write_named_string(w, "holder_uuid", info->holder_uuid); 2734 2735 spdk_json_write_named_array_begin(w, "registrants"); 2736 for (i = 0; i < info->num_regs; i++) { 2737 spdk_json_write_object_begin(w); 2738 spdk_json_write_named_uint64(w, "rkey", info->registrants[i].rkey); 2739 spdk_json_write_named_string(w, "host_uuid", info->registrants[i].host_uuid); 2740 spdk_json_write_object_end(w); 2741 } 2742 spdk_json_write_array_end(w); 2743 spdk_json_write_object_end(w); 2744 2745 exit: 2746 rc = spdk_json_write_end(w); 2747 return rc; 2748 } 2749 2750 static int 2751 nvmf_ns_update_reservation_info(struct spdk_nvmf_ns *ns) 2752 { 2753 struct spdk_nvmf_reservation_info info; 2754 struct spdk_nvmf_registrant *reg, *tmp; 2755 uint32_t i = 0; 2756 2757 assert(ns != NULL); 2758 2759 if (!ns->bdev || !nvmf_ns_is_ptpl_capable(ns)) { 2760 return 0; 2761 } 2762 2763 memset(&info, 0, sizeof(info)); 2764 spdk_uuid_fmt_lower(info.bdev_uuid, sizeof(info.bdev_uuid), spdk_bdev_get_uuid(ns->bdev)); 2765 2766 if (ns->rtype) { 2767 info.rtype = ns->rtype; 2768 info.crkey = ns->crkey; 2769 if (!nvmf_ns_reservation_all_registrants_type(ns)) { 2770 assert(ns->holder != NULL); 2771 spdk_uuid_fmt_lower(info.holder_uuid, sizeof(info.holder_uuid), &ns->holder->hostid); 2772 } 2773 } 2774 2775 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2776 spdk_uuid_fmt_lower(info.registrants[i].host_uuid, sizeof(info.registrants[i].host_uuid), 2777 ®->hostid); 2778 info.registrants[i++].rkey = reg->rkey; 2779 } 2780 2781 info.num_regs = i; 2782 info.ptpl_activated = ns->ptpl_activated; 2783 2784 return nvmf_ns_reservation_update(ns, &info); 2785 } 2786 2787 static struct spdk_nvmf_registrant * 2788 nvmf_ns_reservation_get_registrant(struct spdk_nvmf_ns *ns, 2789 struct spdk_uuid *uuid) 2790 { 2791 struct spdk_nvmf_registrant *reg, *tmp; 2792 2793 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2794 if (!spdk_uuid_compare(®->hostid, uuid)) { 2795 return reg; 2796 } 2797 } 2798 2799 return NULL; 2800 } 2801 2802 /* Generate reservation notice log to registered HostID controllers */ 2803 static void 2804 nvmf_subsystem_gen_ctrlr_notification(struct spdk_nvmf_subsystem *subsystem, 2805 struct spdk_nvmf_ns *ns, 2806 struct spdk_uuid *hostid_list, 2807 uint32_t num_hostid, 2808 enum spdk_nvme_reservation_notification_log_page_type type) 2809 { 2810 struct spdk_nvmf_ctrlr *ctrlr; 2811 uint32_t i; 2812 2813 for (i = 0; i < num_hostid; i++) { 2814 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 2815 if (!spdk_uuid_compare(&ctrlr->hostid, &hostid_list[i])) { 2816 nvmf_ctrlr_reservation_notice_log(ctrlr, ns, type); 2817 } 2818 } 2819 } 2820 } 2821 2822 /* Get all registrants' hostid other than the controller who issued the command */ 2823 static uint32_t 2824 nvmf_ns_reservation_get_all_other_hostid(struct spdk_nvmf_ns *ns, 2825 struct spdk_uuid *hostid_list, 2826 uint32_t max_num_hostid, 2827 struct spdk_uuid *current_hostid) 2828 { 2829 struct spdk_nvmf_registrant *reg, *tmp; 2830 uint32_t num_hostid = 0; 2831 2832 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2833 if (spdk_uuid_compare(®->hostid, current_hostid)) { 2834 if (num_hostid == max_num_hostid) { 2835 assert(false); 2836 return max_num_hostid; 2837 } 2838 hostid_list[num_hostid++] = reg->hostid; 2839 } 2840 } 2841 2842 return num_hostid; 2843 } 2844 2845 /* Calculate the unregistered HostID list according to list 2846 * prior to execute preempt command and list after executing 2847 * preempt command. 2848 */ 2849 static uint32_t 2850 nvmf_ns_reservation_get_unregistered_hostid(struct spdk_uuid *old_hostid_list, 2851 uint32_t old_num_hostid, 2852 struct spdk_uuid *remaining_hostid_list, 2853 uint32_t remaining_num_hostid) 2854 { 2855 struct spdk_uuid temp_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2856 uint32_t i, j, num_hostid = 0; 2857 bool found; 2858 2859 if (!remaining_num_hostid) { 2860 return old_num_hostid; 2861 } 2862 2863 for (i = 0; i < old_num_hostid; i++) { 2864 found = false; 2865 for (j = 0; j < remaining_num_hostid; j++) { 2866 if (!spdk_uuid_compare(&old_hostid_list[i], &remaining_hostid_list[j])) { 2867 found = true; 2868 break; 2869 } 2870 } 2871 if (!found) { 2872 spdk_uuid_copy(&temp_hostid_list[num_hostid++], &old_hostid_list[i]); 2873 } 2874 } 2875 2876 if (num_hostid) { 2877 memcpy(old_hostid_list, temp_hostid_list, sizeof(struct spdk_uuid) * num_hostid); 2878 } 2879 2880 return num_hostid; 2881 } 2882 2883 /* current reservation type is all registrants or not */ 2884 static bool 2885 nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns) 2886 { 2887 return (ns->rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS || 2888 ns->rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 2889 } 2890 2891 /* current registrant is reservation holder or not */ 2892 static bool 2893 nvmf_ns_reservation_registrant_is_holder(struct spdk_nvmf_ns *ns, 2894 struct spdk_nvmf_registrant *reg) 2895 { 2896 if (!reg) { 2897 return false; 2898 } 2899 2900 if (nvmf_ns_reservation_all_registrants_type(ns)) { 2901 return true; 2902 } 2903 2904 return (ns->holder == reg); 2905 } 2906 2907 static int 2908 nvmf_ns_reservation_add_registrant(struct spdk_nvmf_ns *ns, 2909 struct spdk_nvmf_ctrlr *ctrlr, 2910 uint64_t nrkey) 2911 { 2912 struct spdk_nvmf_registrant *reg; 2913 2914 reg = calloc(1, sizeof(*reg)); 2915 if (!reg) { 2916 return -ENOMEM; 2917 } 2918 2919 reg->rkey = nrkey; 2920 /* set hostid for the registrant */ 2921 spdk_uuid_copy(®->hostid, &ctrlr->hostid); 2922 TAILQ_INSERT_TAIL(&ns->registrants, reg, link); 2923 ns->gen++; 2924 2925 return 0; 2926 } 2927 2928 static void 2929 nvmf_ns_reservation_release_reservation(struct spdk_nvmf_ns *ns) 2930 { 2931 ns->rtype = 0; 2932 ns->crkey = 0; 2933 ns->holder = NULL; 2934 } 2935 2936 /* release the reservation if the last registrant was removed */ 2937 static void 2938 nvmf_ns_reservation_check_release_on_remove_registrant(struct spdk_nvmf_ns *ns, 2939 struct spdk_nvmf_registrant *reg) 2940 { 2941 struct spdk_nvmf_registrant *next_reg; 2942 2943 /* no reservation holder */ 2944 if (!ns->holder) { 2945 assert(ns->rtype == 0); 2946 return; 2947 } 2948 2949 next_reg = TAILQ_FIRST(&ns->registrants); 2950 if (next_reg && nvmf_ns_reservation_all_registrants_type(ns)) { 2951 /* the next valid registrant is the new holder now */ 2952 ns->holder = next_reg; 2953 } else if (nvmf_ns_reservation_registrant_is_holder(ns, reg)) { 2954 /* release the reservation */ 2955 nvmf_ns_reservation_release_reservation(ns); 2956 } 2957 } 2958 2959 static void 2960 nvmf_ns_reservation_remove_registrant(struct spdk_nvmf_ns *ns, 2961 struct spdk_nvmf_registrant *reg) 2962 { 2963 TAILQ_REMOVE(&ns->registrants, reg, link); 2964 nvmf_ns_reservation_check_release_on_remove_registrant(ns, reg); 2965 free(reg); 2966 ns->gen++; 2967 return; 2968 } 2969 2970 static uint32_t 2971 nvmf_ns_reservation_remove_registrants_by_key(struct spdk_nvmf_ns *ns, 2972 uint64_t rkey) 2973 { 2974 struct spdk_nvmf_registrant *reg, *tmp; 2975 uint32_t count = 0; 2976 2977 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2978 if (reg->rkey == rkey) { 2979 nvmf_ns_reservation_remove_registrant(ns, reg); 2980 count++; 2981 } 2982 } 2983 return count; 2984 } 2985 2986 static uint32_t 2987 nvmf_ns_reservation_remove_all_other_registrants(struct spdk_nvmf_ns *ns, 2988 struct spdk_nvmf_registrant *reg) 2989 { 2990 struct spdk_nvmf_registrant *reg_tmp, *reg_tmp2; 2991 uint32_t count = 0; 2992 2993 TAILQ_FOREACH_SAFE(reg_tmp, &ns->registrants, link, reg_tmp2) { 2994 if (reg_tmp != reg) { 2995 nvmf_ns_reservation_remove_registrant(ns, reg_tmp); 2996 count++; 2997 } 2998 } 2999 return count; 3000 } 3001 3002 static uint32_t 3003 nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns) 3004 { 3005 struct spdk_nvmf_registrant *reg, *reg_tmp; 3006 uint32_t count = 0; 3007 3008 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, reg_tmp) { 3009 nvmf_ns_reservation_remove_registrant(ns, reg); 3010 count++; 3011 } 3012 return count; 3013 } 3014 3015 static void 3016 nvmf_ns_reservation_acquire_reservation(struct spdk_nvmf_ns *ns, uint64_t rkey, 3017 enum spdk_nvme_reservation_type rtype, 3018 struct spdk_nvmf_registrant *holder) 3019 { 3020 ns->rtype = rtype; 3021 ns->crkey = rkey; 3022 assert(ns->holder == NULL); 3023 ns->holder = holder; 3024 } 3025 3026 static bool 3027 nvmf_ns_reservation_register(struct spdk_nvmf_ns *ns, 3028 struct spdk_nvmf_ctrlr *ctrlr, 3029 struct spdk_nvmf_request *req) 3030 { 3031 struct spdk_nvme_reservation_register_data key = { 0 }; 3032 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3033 uint8_t rrega, iekey, cptpl, rtype; 3034 struct spdk_nvmf_registrant *reg; 3035 uint8_t status = SPDK_NVME_SC_SUCCESS; 3036 bool update_sgroup = false; 3037 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 3038 uint32_t num_hostid = 0; 3039 int rc; 3040 3041 rrega = cmd->cdw10_bits.resv_register.rrega; 3042 iekey = cmd->cdw10_bits.resv_register.iekey; 3043 cptpl = cmd->cdw10_bits.resv_register.cptpl; 3044 3045 if (req->iovcnt > 0 && req->length >= sizeof(key)) { 3046 struct spdk_iov_xfer ix; 3047 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 3048 spdk_iov_xfer_to_buf(&ix, &key, sizeof(key)); 3049 } else { 3050 SPDK_ERRLOG("No key provided. Failing request.\n"); 3051 status = SPDK_NVME_SC_INVALID_FIELD; 3052 goto exit; 3053 } 3054 3055 SPDK_DEBUGLOG(nvmf, "REGISTER: RREGA %u, IEKEY %u, CPTPL %u, " 3056 "NRKEY 0x%"PRIx64", NRKEY 0x%"PRIx64"\n", 3057 rrega, iekey, cptpl, key.crkey, key.nrkey); 3058 3059 if (cptpl == SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON) { 3060 /* Ture to OFF state, and need to be updated in the configuration file */ 3061 if (ns->ptpl_activated) { 3062 ns->ptpl_activated = 0; 3063 update_sgroup = true; 3064 } 3065 } else if (cptpl == SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS) { 3066 if (!nvmf_ns_is_ptpl_capable(ns)) { 3067 status = SPDK_NVME_SC_INVALID_FIELD; 3068 goto exit; 3069 } else if (ns->ptpl_activated == 0) { 3070 ns->ptpl_activated = 1; 3071 update_sgroup = true; 3072 } 3073 } 3074 3075 /* current Host Identifier has registrant or not */ 3076 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 3077 3078 switch (rrega) { 3079 case SPDK_NVME_RESERVE_REGISTER_KEY: 3080 if (!reg) { 3081 /* register new controller */ 3082 if (key.nrkey == 0) { 3083 SPDK_ERRLOG("Can't register zeroed new key\n"); 3084 status = SPDK_NVME_SC_INVALID_FIELD; 3085 goto exit; 3086 } 3087 rc = nvmf_ns_reservation_add_registrant(ns, ctrlr, key.nrkey); 3088 if (rc < 0) { 3089 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 3090 goto exit; 3091 } 3092 update_sgroup = true; 3093 } else { 3094 /* register with same key is not an error */ 3095 if (reg->rkey != key.nrkey) { 3096 SPDK_ERRLOG("The same host already register a " 3097 "key with 0x%"PRIx64"\n", 3098 reg->rkey); 3099 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3100 goto exit; 3101 } 3102 } 3103 break; 3104 case SPDK_NVME_RESERVE_UNREGISTER_KEY: 3105 if (!reg || (!iekey && reg->rkey != key.crkey)) { 3106 SPDK_ERRLOG("No registrant or current key doesn't match " 3107 "with existing registrant key\n"); 3108 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3109 goto exit; 3110 } 3111 3112 rtype = ns->rtype; 3113 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 3114 SPDK_NVMF_MAX_NUM_REGISTRANTS, 3115 &ctrlr->hostid); 3116 3117 nvmf_ns_reservation_remove_registrant(ns, reg); 3118 3119 if (!ns->rtype && num_hostid && (rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY || 3120 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY)) { 3121 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 3122 hostid_list, 3123 num_hostid, 3124 SPDK_NVME_RESERVATION_RELEASED); 3125 } 3126 update_sgroup = true; 3127 break; 3128 case SPDK_NVME_RESERVE_REPLACE_KEY: 3129 if (key.nrkey == 0) { 3130 SPDK_ERRLOG("Can't register zeroed new key\n"); 3131 status = SPDK_NVME_SC_INVALID_FIELD; 3132 goto exit; 3133 } 3134 /* Registrant exists */ 3135 if (reg) { 3136 if (!iekey && reg->rkey != key.crkey) { 3137 SPDK_ERRLOG("Current key doesn't match " 3138 "existing registrant key\n"); 3139 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3140 goto exit; 3141 } 3142 if (reg->rkey == key.nrkey) { 3143 goto exit; 3144 } 3145 reg->rkey = key.nrkey; 3146 } else if (iekey) { /* No registrant but IEKEY is set */ 3147 /* new registrant */ 3148 rc = nvmf_ns_reservation_add_registrant(ns, ctrlr, key.nrkey); 3149 if (rc < 0) { 3150 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 3151 goto exit; 3152 } 3153 } else { /* No registrant */ 3154 SPDK_ERRLOG("No registrant\n"); 3155 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3156 goto exit; 3157 3158 } 3159 update_sgroup = true; 3160 break; 3161 default: 3162 status = SPDK_NVME_SC_INVALID_FIELD; 3163 goto exit; 3164 } 3165 3166 exit: 3167 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3168 req->rsp->nvme_cpl.status.sc = status; 3169 return update_sgroup; 3170 } 3171 3172 static bool 3173 nvmf_ns_reservation_acquire(struct spdk_nvmf_ns *ns, 3174 struct spdk_nvmf_ctrlr *ctrlr, 3175 struct spdk_nvmf_request *req) 3176 { 3177 struct spdk_nvme_reservation_acquire_data key = { 0 }; 3178 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3179 uint8_t racqa, iekey, rtype; 3180 struct spdk_nvmf_registrant *reg; 3181 bool all_regs = false; 3182 uint32_t count = 0; 3183 bool update_sgroup = true; 3184 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 3185 uint32_t num_hostid = 0; 3186 struct spdk_uuid new_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 3187 uint32_t new_num_hostid = 0; 3188 bool reservation_released = false; 3189 uint8_t status = SPDK_NVME_SC_SUCCESS; 3190 3191 racqa = cmd->cdw10_bits.resv_acquire.racqa; 3192 iekey = cmd->cdw10_bits.resv_acquire.iekey; 3193 rtype = cmd->cdw10_bits.resv_acquire.rtype; 3194 3195 if (req->iovcnt > 0 && req->length >= sizeof(key)) { 3196 struct spdk_iov_xfer ix; 3197 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 3198 spdk_iov_xfer_to_buf(&ix, &key, sizeof(key)); 3199 } else { 3200 SPDK_ERRLOG("No key provided. Failing request.\n"); 3201 status = SPDK_NVME_SC_INVALID_FIELD; 3202 goto exit; 3203 } 3204 3205 SPDK_DEBUGLOG(nvmf, "ACQUIRE: RACQA %u, IEKEY %u, RTYPE %u, " 3206 "NRKEY 0x%"PRIx64", PRKEY 0x%"PRIx64"\n", 3207 racqa, iekey, rtype, key.crkey, key.prkey); 3208 3209 if (iekey || rtype > SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) { 3210 SPDK_ERRLOG("Ignore existing key field set to 1\n"); 3211 status = SPDK_NVME_SC_INVALID_FIELD; 3212 update_sgroup = false; 3213 goto exit; 3214 } 3215 3216 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 3217 /* must be registrant and CRKEY must match */ 3218 if (!reg || reg->rkey != key.crkey) { 3219 SPDK_ERRLOG("No registrant or current key doesn't match " 3220 "with existing registrant key\n"); 3221 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3222 update_sgroup = false; 3223 goto exit; 3224 } 3225 3226 all_regs = nvmf_ns_reservation_all_registrants_type(ns); 3227 3228 switch (racqa) { 3229 case SPDK_NVME_RESERVE_ACQUIRE: 3230 /* it's not an error for the holder to acquire same reservation type again */ 3231 if (nvmf_ns_reservation_registrant_is_holder(ns, reg) && ns->rtype == rtype) { 3232 /* do nothing */ 3233 update_sgroup = false; 3234 } else if (ns->holder == NULL) { 3235 /* first time to acquire the reservation */ 3236 nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg); 3237 } else { 3238 SPDK_ERRLOG("Invalid rtype or current registrant is not holder\n"); 3239 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3240 update_sgroup = false; 3241 goto exit; 3242 } 3243 break; 3244 case SPDK_NVME_RESERVE_PREEMPT: 3245 /* no reservation holder */ 3246 if (!ns->holder) { 3247 /* unregister with PRKEY */ 3248 nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 3249 break; 3250 } 3251 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 3252 SPDK_NVMF_MAX_NUM_REGISTRANTS, 3253 &ctrlr->hostid); 3254 3255 /* only 1 reservation holder and reservation key is valid */ 3256 if (!all_regs) { 3257 /* preempt itself */ 3258 if (nvmf_ns_reservation_registrant_is_holder(ns, reg) && 3259 ns->crkey == key.prkey) { 3260 ns->rtype = rtype; 3261 reservation_released = true; 3262 break; 3263 } 3264 3265 if (ns->crkey == key.prkey) { 3266 nvmf_ns_reservation_remove_registrant(ns, ns->holder); 3267 nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg); 3268 reservation_released = true; 3269 } else if (key.prkey != 0) { 3270 nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 3271 } else { 3272 /* PRKEY is zero */ 3273 SPDK_ERRLOG("Current PRKEY is zero\n"); 3274 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3275 update_sgroup = false; 3276 goto exit; 3277 } 3278 } else { 3279 /* release all other registrants except for the current one */ 3280 if (key.prkey == 0) { 3281 nvmf_ns_reservation_remove_all_other_registrants(ns, reg); 3282 assert(ns->holder == reg); 3283 } else { 3284 count = nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 3285 if (count == 0) { 3286 SPDK_ERRLOG("PRKEY doesn't match any registrant\n"); 3287 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3288 update_sgroup = false; 3289 goto exit; 3290 } 3291 } 3292 } 3293 break; 3294 default: 3295 status = SPDK_NVME_SC_INVALID_FIELD; 3296 update_sgroup = false; 3297 break; 3298 } 3299 3300 exit: 3301 if (update_sgroup && racqa == SPDK_NVME_RESERVE_PREEMPT) { 3302 new_num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, new_hostid_list, 3303 SPDK_NVMF_MAX_NUM_REGISTRANTS, 3304 &ctrlr->hostid); 3305 /* Preempt notification occurs on the unregistered controllers 3306 * other than the controller who issued the command. 3307 */ 3308 num_hostid = nvmf_ns_reservation_get_unregistered_hostid(hostid_list, 3309 num_hostid, 3310 new_hostid_list, 3311 new_num_hostid); 3312 if (num_hostid) { 3313 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 3314 hostid_list, 3315 num_hostid, 3316 SPDK_NVME_REGISTRATION_PREEMPTED); 3317 3318 } 3319 /* Reservation released notification occurs on the 3320 * controllers which are the remaining registrants other than 3321 * the controller who issued the command. 3322 */ 3323 if (reservation_released && new_num_hostid) { 3324 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 3325 new_hostid_list, 3326 new_num_hostid, 3327 SPDK_NVME_RESERVATION_RELEASED); 3328 3329 } 3330 } 3331 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3332 req->rsp->nvme_cpl.status.sc = status; 3333 return update_sgroup; 3334 } 3335 3336 static bool 3337 nvmf_ns_reservation_release(struct spdk_nvmf_ns *ns, 3338 struct spdk_nvmf_ctrlr *ctrlr, 3339 struct spdk_nvmf_request *req) 3340 { 3341 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3342 uint8_t rrela, iekey, rtype; 3343 struct spdk_nvmf_registrant *reg; 3344 uint64_t crkey = 0; 3345 uint8_t status = SPDK_NVME_SC_SUCCESS; 3346 bool update_sgroup = true; 3347 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 3348 uint32_t num_hostid = 0; 3349 3350 rrela = cmd->cdw10_bits.resv_release.rrela; 3351 iekey = cmd->cdw10_bits.resv_release.iekey; 3352 rtype = cmd->cdw10_bits.resv_release.rtype; 3353 3354 if (req->iovcnt > 0 && req->length >= sizeof(crkey)) { 3355 struct spdk_iov_xfer ix; 3356 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 3357 spdk_iov_xfer_to_buf(&ix, &crkey, sizeof(crkey)); 3358 } else { 3359 SPDK_ERRLOG("No key provided. Failing request.\n"); 3360 status = SPDK_NVME_SC_INVALID_FIELD; 3361 goto exit; 3362 } 3363 3364 SPDK_DEBUGLOG(nvmf, "RELEASE: RRELA %u, IEKEY %u, RTYPE %u, " 3365 "CRKEY 0x%"PRIx64"\n", rrela, iekey, rtype, crkey); 3366 3367 if (iekey) { 3368 SPDK_ERRLOG("Ignore existing key field set to 1\n"); 3369 status = SPDK_NVME_SC_INVALID_FIELD; 3370 update_sgroup = false; 3371 goto exit; 3372 } 3373 3374 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 3375 if (!reg || reg->rkey != crkey) { 3376 SPDK_ERRLOG("No registrant or current key doesn't match " 3377 "with existing registrant key\n"); 3378 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3379 update_sgroup = false; 3380 goto exit; 3381 } 3382 3383 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 3384 SPDK_NVMF_MAX_NUM_REGISTRANTS, 3385 &ctrlr->hostid); 3386 3387 switch (rrela) { 3388 case SPDK_NVME_RESERVE_RELEASE: 3389 if (!ns->holder) { 3390 SPDK_DEBUGLOG(nvmf, "RELEASE: no holder\n"); 3391 update_sgroup = false; 3392 goto exit; 3393 } 3394 if (ns->rtype != rtype) { 3395 SPDK_ERRLOG("Type doesn't match\n"); 3396 status = SPDK_NVME_SC_INVALID_FIELD; 3397 update_sgroup = false; 3398 goto exit; 3399 } 3400 if (!nvmf_ns_reservation_registrant_is_holder(ns, reg)) { 3401 /* not the reservation holder, this isn't an error */ 3402 update_sgroup = false; 3403 goto exit; 3404 } 3405 3406 rtype = ns->rtype; 3407 nvmf_ns_reservation_release_reservation(ns); 3408 3409 if (num_hostid && rtype != SPDK_NVME_RESERVE_WRITE_EXCLUSIVE && 3410 rtype != SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) { 3411 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 3412 hostid_list, 3413 num_hostid, 3414 SPDK_NVME_RESERVATION_RELEASED); 3415 } 3416 break; 3417 case SPDK_NVME_RESERVE_CLEAR: 3418 nvmf_ns_reservation_clear_all_registrants(ns); 3419 if (num_hostid) { 3420 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 3421 hostid_list, 3422 num_hostid, 3423 SPDK_NVME_RESERVATION_PREEMPTED); 3424 } 3425 break; 3426 default: 3427 status = SPDK_NVME_SC_INVALID_FIELD; 3428 update_sgroup = false; 3429 goto exit; 3430 } 3431 3432 exit: 3433 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3434 req->rsp->nvme_cpl.status.sc = status; 3435 return update_sgroup; 3436 } 3437 3438 static void 3439 nvmf_ns_reservation_report(struct spdk_nvmf_ns *ns, 3440 struct spdk_nvmf_ctrlr *ctrlr, 3441 struct spdk_nvmf_request *req) 3442 { 3443 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3444 struct spdk_nvmf_registrant *reg, *tmp; 3445 struct spdk_nvme_reservation_status_extended_data status_data = { 0 }; 3446 struct spdk_iov_xfer ix; 3447 uint32_t transfer_len; 3448 uint32_t regctl = 0; 3449 uint8_t status = SPDK_NVME_SC_SUCCESS; 3450 3451 if (req->iovcnt == 0) { 3452 SPDK_ERRLOG("No data transfer specified for request. " 3453 " Unable to transfer back response.\n"); 3454 status = SPDK_NVME_SC_INVALID_FIELD; 3455 goto exit; 3456 } 3457 3458 if (!cmd->cdw11_bits.resv_report.eds) { 3459 SPDK_ERRLOG("NVMeoF uses extended controller data structure, " 3460 "please set EDS bit in cdw11 and try again\n"); 3461 status = SPDK_NVME_SC_HOSTID_INCONSISTENT_FORMAT; 3462 goto exit; 3463 } 3464 3465 /* Number of Dwords of the Reservation Status data structure to transfer */ 3466 transfer_len = (cmd->cdw10 + 1) * sizeof(uint32_t); 3467 3468 if (transfer_len < sizeof(struct spdk_nvme_reservation_status_extended_data)) { 3469 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 3470 goto exit; 3471 } 3472 3473 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 3474 3475 status_data.data.gen = ns->gen; 3476 status_data.data.rtype = ns->rtype; 3477 status_data.data.ptpls = ns->ptpl_activated; 3478 3479 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 3480 regctl++; 3481 } 3482 3483 /* 3484 * We report the number of registrants as per the spec here, even if 3485 * the iov isn't big enough to contain them all. In that case, the 3486 * spdk_iov_xfer_from_buf() won't actually copy any of the remaining 3487 * data; as it keeps track of the iov cursor itself, it's simplest to 3488 * just walk the entire list anyway. 3489 */ 3490 status_data.data.regctl = regctl; 3491 3492 spdk_iov_xfer_from_buf(&ix, &status_data, sizeof(status_data)); 3493 3494 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 3495 struct spdk_nvme_registered_ctrlr_extended_data ctrlr_data = { 0 }; 3496 3497 /* Set to 0xffffh for dynamic controller */ 3498 ctrlr_data.cntlid = 0xffff; 3499 ctrlr_data.rcsts.status = (ns->holder == reg) ? true : false; 3500 ctrlr_data.rkey = reg->rkey; 3501 spdk_uuid_copy((struct spdk_uuid *)ctrlr_data.hostid, ®->hostid); 3502 3503 spdk_iov_xfer_from_buf(&ix, &ctrlr_data, sizeof(ctrlr_data)); 3504 } 3505 3506 exit: 3507 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3508 req->rsp->nvme_cpl.status.sc = status; 3509 return; 3510 } 3511 3512 static void 3513 nvmf_ns_reservation_complete(void *ctx) 3514 { 3515 struct spdk_nvmf_request *req = ctx; 3516 3517 spdk_nvmf_request_complete(req); 3518 } 3519 3520 static void 3521 _nvmf_ns_reservation_update_done(struct spdk_nvmf_subsystem *subsystem, 3522 void *cb_arg, int status) 3523 { 3524 struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)cb_arg; 3525 struct spdk_nvmf_poll_group *group = req->qpair->group; 3526 3527 spdk_thread_send_msg(group->thread, nvmf_ns_reservation_complete, req); 3528 } 3529 3530 void 3531 nvmf_ns_reservation_request(void *ctx) 3532 { 3533 struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)ctx; 3534 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3535 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 3536 uint32_t nsid; 3537 struct spdk_nvmf_ns *ns; 3538 bool update_sgroup = false; 3539 int status = 0; 3540 3541 nsid = cmd->nsid; 3542 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, nsid); 3543 assert(ns != NULL); 3544 3545 switch (cmd->opc) { 3546 case SPDK_NVME_OPC_RESERVATION_REGISTER: 3547 update_sgroup = nvmf_ns_reservation_register(ns, ctrlr, req); 3548 break; 3549 case SPDK_NVME_OPC_RESERVATION_ACQUIRE: 3550 update_sgroup = nvmf_ns_reservation_acquire(ns, ctrlr, req); 3551 break; 3552 case SPDK_NVME_OPC_RESERVATION_RELEASE: 3553 update_sgroup = nvmf_ns_reservation_release(ns, ctrlr, req); 3554 break; 3555 case SPDK_NVME_OPC_RESERVATION_REPORT: 3556 nvmf_ns_reservation_report(ns, ctrlr, req); 3557 break; 3558 default: 3559 break; 3560 } 3561 3562 /* update reservation information to subsystem's poll group */ 3563 if (update_sgroup) { 3564 if (ns->ptpl_activated || cmd->opc == SPDK_NVME_OPC_RESERVATION_REGISTER) { 3565 if (nvmf_ns_update_reservation_info(ns) != 0) { 3566 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 3567 } 3568 } 3569 status = nvmf_subsystem_update_ns(ctrlr->subsys, _nvmf_ns_reservation_update_done, req); 3570 if (status == 0) { 3571 return; 3572 } 3573 } 3574 3575 _nvmf_ns_reservation_update_done(ctrlr->subsys, req, status); 3576 } 3577 3578 static bool 3579 nvmf_ns_is_ptpl_capable_json(const struct spdk_nvmf_ns *ns) 3580 { 3581 return ns->ptpl_file != NULL; 3582 } 3583 3584 static struct spdk_nvmf_ns_reservation_ops g_reservation_ops = { 3585 .is_ptpl_capable = nvmf_ns_is_ptpl_capable_json, 3586 .update = nvmf_ns_reservation_update_json, 3587 .load = nvmf_ns_reservation_load_json, 3588 }; 3589 3590 bool 3591 nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns) 3592 { 3593 return g_reservation_ops.is_ptpl_capable(ns); 3594 } 3595 3596 static int 3597 nvmf_ns_reservation_update(const struct spdk_nvmf_ns *ns, 3598 const struct spdk_nvmf_reservation_info *info) 3599 { 3600 return g_reservation_ops.update(ns, info); 3601 } 3602 3603 static int 3604 nvmf_ns_reservation_load(const struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info) 3605 { 3606 return g_reservation_ops.load(ns, info); 3607 } 3608 3609 void 3610 spdk_nvmf_set_custom_ns_reservation_ops(const struct spdk_nvmf_ns_reservation_ops *ops) 3611 { 3612 g_reservation_ops = *ops; 3613 } 3614 3615 int 3616 spdk_nvmf_subsystem_set_ana_reporting(struct spdk_nvmf_subsystem *subsystem, 3617 bool ana_reporting) 3618 { 3619 if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) { 3620 return -EAGAIN; 3621 } 3622 3623 subsystem->flags.ana_reporting = ana_reporting; 3624 3625 return 0; 3626 } 3627 3628 bool 3629 spdk_nvmf_subsystem_get_ana_reporting(struct spdk_nvmf_subsystem *subsystem) 3630 { 3631 return subsystem->flags.ana_reporting; 3632 } 3633 3634 struct subsystem_listener_update_ctx { 3635 struct spdk_nvmf_subsystem_listener *listener; 3636 3637 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn; 3638 void *cb_arg; 3639 }; 3640 3641 static void 3642 subsystem_listener_update_done(struct spdk_io_channel_iter *i, int status) 3643 { 3644 struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 3645 3646 if (ctx->cb_fn) { 3647 ctx->cb_fn(ctx->cb_arg, status); 3648 } 3649 free(ctx); 3650 } 3651 3652 static void 3653 subsystem_listener_update_on_pg(struct spdk_io_channel_iter *i) 3654 { 3655 struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 3656 struct spdk_nvmf_subsystem_listener *listener; 3657 struct spdk_nvmf_poll_group *group; 3658 struct spdk_nvmf_ctrlr *ctrlr; 3659 3660 listener = ctx->listener; 3661 group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i)); 3662 3663 TAILQ_FOREACH(ctrlr, &listener->subsystem->ctrlrs, link) { 3664 if (ctrlr->thread != spdk_get_thread()) { 3665 continue; 3666 } 3667 3668 if (ctrlr->admin_qpair && ctrlr->admin_qpair->group == group && ctrlr->listener == listener) { 3669 nvmf_ctrlr_async_event_ana_change_notice(ctrlr); 3670 } 3671 } 3672 3673 spdk_for_each_channel_continue(i, 0); 3674 } 3675 3676 void 3677 spdk_nvmf_subsystem_set_ana_state(struct spdk_nvmf_subsystem *subsystem, 3678 const struct spdk_nvme_transport_id *trid, 3679 enum spdk_nvme_ana_state ana_state, uint32_t anagrpid, 3680 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, void *cb_arg) 3681 { 3682 struct spdk_nvmf_subsystem_listener *listener; 3683 struct subsystem_listener_update_ctx *ctx; 3684 uint32_t i; 3685 3686 assert(cb_fn != NULL); 3687 assert(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 3688 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED); 3689 3690 if (!subsystem->flags.ana_reporting) { 3691 SPDK_ERRLOG("ANA reporting is disabled\n"); 3692 cb_fn(cb_arg, -EINVAL); 3693 return; 3694 } 3695 3696 /* ANA Change state is not used, ANA Persistent Loss state 3697 * is not supported yet. 3698 */ 3699 if (!(ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE || 3700 ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE || 3701 ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE)) { 3702 SPDK_ERRLOG("ANA state %d is not supported\n", ana_state); 3703 cb_fn(cb_arg, -ENOTSUP); 3704 return; 3705 } 3706 3707 if (anagrpid > subsystem->max_nsid) { 3708 SPDK_ERRLOG("ANA group ID %" PRIu32 " is more than maximum\n", anagrpid); 3709 cb_fn(cb_arg, -EINVAL); 3710 return; 3711 } 3712 3713 listener = nvmf_subsystem_find_listener(subsystem, trid); 3714 if (!listener) { 3715 SPDK_ERRLOG("Unable to find listener.\n"); 3716 cb_fn(cb_arg, -EINVAL); 3717 return; 3718 } 3719 3720 if (anagrpid != 0 && listener->ana_state[anagrpid - 1] == ana_state) { 3721 cb_fn(cb_arg, 0); 3722 return; 3723 } 3724 3725 ctx = calloc(1, sizeof(*ctx)); 3726 if (!ctx) { 3727 SPDK_ERRLOG("Unable to allocate context\n"); 3728 cb_fn(cb_arg, -ENOMEM); 3729 return; 3730 } 3731 3732 for (i = 1; i <= subsystem->max_nsid; i++) { 3733 if (anagrpid == 0 || i == anagrpid) { 3734 listener->ana_state[i - 1] = ana_state; 3735 } 3736 } 3737 listener->ana_state_change_count++; 3738 3739 ctx->listener = listener; 3740 ctx->cb_fn = cb_fn; 3741 ctx->cb_arg = cb_arg; 3742 3743 spdk_for_each_channel(subsystem->tgt, 3744 subsystem_listener_update_on_pg, 3745 ctx, 3746 subsystem_listener_update_done); 3747 } 3748 3749 int 3750 spdk_nvmf_subsystem_get_ana_state(struct spdk_nvmf_subsystem *subsystem, 3751 const struct spdk_nvme_transport_id *trid, 3752 uint32_t anagrpid, 3753 enum spdk_nvme_ana_state *ana_state) 3754 { 3755 assert(ana_state != NULL); 3756 3757 struct spdk_nvmf_subsystem_listener *listener; 3758 3759 if (!subsystem->flags.ana_reporting) { 3760 SPDK_ERRLOG("ANA reporting is disabled\n"); 3761 return -EINVAL; 3762 } 3763 3764 if (anagrpid <= 0 || anagrpid > subsystem->max_nsid) { 3765 SPDK_ERRLOG("ANA group ID %" PRIu32 " is invalid\n", anagrpid); 3766 return -EINVAL; 3767 } 3768 3769 listener = nvmf_subsystem_find_listener(subsystem, trid); 3770 if (!listener) { 3771 SPDK_ERRLOG("Unable to find listener.\n"); 3772 return -EINVAL; 3773 } 3774 3775 *ana_state = listener->ana_state[anagrpid - 1]; 3776 return 0; 3777 } 3778 3779 bool 3780 spdk_nvmf_subsystem_is_discovery(struct spdk_nvmf_subsystem *subsystem) 3781 { 3782 return subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT || 3783 subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY; 3784 } 3785 3786 bool 3787 nvmf_nqn_is_discovery(const char *nqn) 3788 { 3789 return strcmp(nqn, SPDK_NVMF_DISCOVERY_NQN) == 0; 3790 } 3791