1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "nvmf_internal.h" 10 #include "transport.h" 11 12 #include "spdk/assert.h" 13 #include "spdk/likely.h" 14 #include "spdk/string.h" 15 #include "spdk/trace.h" 16 #include "spdk/nvmf_spec.h" 17 #include "spdk/uuid.h" 18 #include "spdk/json.h" 19 #include "spdk/file.h" 20 #include "spdk/bit_array.h" 21 #include "spdk/bdev.h" 22 23 #define __SPDK_BDEV_MODULE_ONLY 24 #include "spdk/bdev_module.h" 25 #include "spdk/log.h" 26 #include "spdk_internal/utf.h" 27 #include "spdk_internal/usdt.h" 28 29 #define MODEL_NUMBER_DEFAULT "SPDK bdev Controller" 30 #define NVMF_SUBSYSTEM_DEFAULT_NAMESPACES 32 31 32 /* 33 * States for parsing valid domains in NQNs according to RFC 1034 34 */ 35 enum spdk_nvmf_nqn_domain_states { 36 /* First character of a domain must be a letter */ 37 SPDK_NVMF_DOMAIN_ACCEPT_LETTER = 0, 38 39 /* Subsequent characters can be any of letter, digit, or hyphen */ 40 SPDK_NVMF_DOMAIN_ACCEPT_LDH = 1, 41 42 /* A domain label must end with either a letter or digit */ 43 SPDK_NVMF_DOMAIN_ACCEPT_ANY = 2 44 }; 45 46 static int _nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem); 47 48 /* Returns true if is a valid ASCII string as defined by the NVMe spec */ 49 static bool 50 nvmf_valid_ascii_string(const void *buf, size_t size) 51 { 52 const uint8_t *str = buf; 53 size_t i; 54 55 for (i = 0; i < size; i++) { 56 if (str[i] < 0x20 || str[i] > 0x7E) { 57 return false; 58 } 59 } 60 61 return true; 62 } 63 64 bool 65 nvmf_nqn_is_valid(const char *nqn) 66 { 67 size_t len; 68 struct spdk_uuid uuid_value; 69 uint32_t i; 70 int bytes_consumed; 71 uint32_t domain_label_length; 72 char *reverse_domain_end; 73 uint32_t reverse_domain_end_index; 74 enum spdk_nvmf_nqn_domain_states domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER; 75 76 /* Check for length requirements */ 77 len = strlen(nqn); 78 if (len > SPDK_NVMF_NQN_MAX_LEN) { 79 SPDK_ERRLOG("Invalid NQN \"%s\": length %zu > max %d\n", nqn, len, SPDK_NVMF_NQN_MAX_LEN); 80 return false; 81 } 82 83 /* The nqn must be at least as long as SPDK_NVMF_NQN_MIN_LEN to contain the necessary prefix. */ 84 if (len < SPDK_NVMF_NQN_MIN_LEN) { 85 SPDK_ERRLOG("Invalid NQN \"%s\": length %zu < min %d\n", nqn, len, SPDK_NVMF_NQN_MIN_LEN); 86 return false; 87 } 88 89 /* Check for discovery controller nqn */ 90 if (!strcmp(nqn, SPDK_NVMF_DISCOVERY_NQN)) { 91 return true; 92 } 93 94 /* Check for equality with the generic nqn structure of the form "nqn.2014-08.org.nvmexpress:uuid:11111111-2222-3333-4444-555555555555" */ 95 if (!strncmp(nqn, SPDK_NVMF_NQN_UUID_PRE, SPDK_NVMF_NQN_UUID_PRE_LEN)) { 96 if (len != SPDK_NVMF_NQN_UUID_PRE_LEN + SPDK_NVMF_UUID_STRING_LEN) { 97 SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not the correct length\n", nqn); 98 return false; 99 } 100 101 if (spdk_uuid_parse(&uuid_value, &nqn[SPDK_NVMF_NQN_UUID_PRE_LEN])) { 102 SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not formatted correctly\n", nqn); 103 return false; 104 } 105 return true; 106 } 107 108 /* If the nqn does not match the uuid structure, the next several checks validate the form "nqn.yyyy-mm.reverse.domain:user-string" */ 109 110 if (strncmp(nqn, "nqn.", 4) != 0) { 111 SPDK_ERRLOG("Invalid NQN \"%s\": NQN must begin with \"nqn.\".\n", nqn); 112 return false; 113 } 114 115 /* Check for yyyy-mm. */ 116 if (!(isdigit(nqn[4]) && isdigit(nqn[5]) && isdigit(nqn[6]) && isdigit(nqn[7]) && 117 nqn[8] == '-' && isdigit(nqn[9]) && isdigit(nqn[10]) && nqn[11] == '.')) { 118 SPDK_ERRLOG("Invalid date code in NQN \"%s\"\n", nqn); 119 return false; 120 } 121 122 reverse_domain_end = strchr(nqn, ':'); 123 if (reverse_domain_end != NULL && (reverse_domain_end_index = reverse_domain_end - nqn) < len - 1) { 124 } else { 125 SPDK_ERRLOG("Invalid NQN \"%s\". NQN must contain user specified name with a ':' as a prefix.\n", 126 nqn); 127 return false; 128 } 129 130 /* Check for valid reverse domain */ 131 domain_label_length = 0; 132 for (i = 12; i < reverse_domain_end_index; i++) { 133 if (domain_label_length > SPDK_DOMAIN_LABEL_MAX_LEN) { 134 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". At least one Label is too long.\n", nqn); 135 return false; 136 } 137 138 switch (domain_state) { 139 140 case SPDK_NVMF_DOMAIN_ACCEPT_LETTER: { 141 if (isalpha(nqn[i])) { 142 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 143 domain_label_length++; 144 break; 145 } else { 146 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must start with a letter.\n", nqn); 147 return false; 148 } 149 } 150 151 case SPDK_NVMF_DOMAIN_ACCEPT_LDH: { 152 if (isalpha(nqn[i]) || isdigit(nqn[i])) { 153 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 154 domain_label_length++; 155 break; 156 } else if (nqn[i] == '-') { 157 if (i == reverse_domain_end_index - 1) { 158 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 159 nqn); 160 return false; 161 } 162 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH; 163 domain_label_length++; 164 break; 165 } else if (nqn[i] == '.') { 166 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 167 nqn); 168 return false; 169 } else { 170 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n", 171 nqn); 172 return false; 173 } 174 } 175 176 case SPDK_NVMF_DOMAIN_ACCEPT_ANY: { 177 if (isalpha(nqn[i]) || isdigit(nqn[i])) { 178 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY; 179 domain_label_length++; 180 break; 181 } else if (nqn[i] == '-') { 182 if (i == reverse_domain_end_index - 1) { 183 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n", 184 nqn); 185 return false; 186 } 187 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH; 188 domain_label_length++; 189 break; 190 } else if (nqn[i] == '.') { 191 domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER; 192 domain_label_length = 0; 193 break; 194 } else { 195 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n", 196 nqn); 197 return false; 198 } 199 } 200 } 201 } 202 203 i = reverse_domain_end_index + 1; 204 while (i < len) { 205 bytes_consumed = utf8_valid(&nqn[i], &nqn[len]); 206 if (bytes_consumed <= 0) { 207 SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only valid utf-8.\n", nqn); 208 return false; 209 } 210 211 i += bytes_consumed; 212 } 213 return true; 214 } 215 216 static void subsystem_state_change_on_pg(struct spdk_io_channel_iter *i); 217 218 struct spdk_nvmf_subsystem * 219 spdk_nvmf_subsystem_create(struct spdk_nvmf_tgt *tgt, 220 const char *nqn, 221 enum spdk_nvmf_subtype type, 222 uint32_t num_ns) 223 { 224 struct spdk_nvmf_subsystem *subsystem; 225 uint32_t sid; 226 227 if (spdk_nvmf_tgt_find_subsystem(tgt, nqn)) { 228 SPDK_ERRLOG("Subsystem NQN '%s' already exists\n", nqn); 229 return NULL; 230 } 231 232 if (!nvmf_nqn_is_valid(nqn)) { 233 SPDK_ERRLOG("Subsystem NQN '%s' is invalid\n", nqn); 234 return NULL; 235 } 236 237 if (type == SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT || 238 type == SPDK_NVMF_SUBTYPE_DISCOVERY) { 239 if (num_ns != 0) { 240 SPDK_ERRLOG("Discovery subsystem cannot have namespaces.\n"); 241 return NULL; 242 } 243 } else if (num_ns == 0) { 244 num_ns = NVMF_SUBSYSTEM_DEFAULT_NAMESPACES; 245 } 246 247 /* Find a free subsystem id (sid) */ 248 sid = spdk_bit_array_find_first_clear(tgt->subsystem_ids, 0); 249 if (sid == UINT32_MAX) { 250 SPDK_ERRLOG("No free subsystem IDs are available for subsystem creation\n"); 251 return NULL; 252 } 253 subsystem = calloc(1, sizeof(struct spdk_nvmf_subsystem)); 254 if (subsystem == NULL) { 255 SPDK_ERRLOG("Subsystem memory allocation failed\n"); 256 return NULL; 257 } 258 259 subsystem->thread = spdk_get_thread(); 260 subsystem->state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 261 subsystem->tgt = tgt; 262 subsystem->id = sid; 263 subsystem->subtype = type; 264 subsystem->max_nsid = num_ns; 265 subsystem->next_cntlid = 0; 266 subsystem->min_cntlid = NVMF_MIN_CNTLID; 267 subsystem->max_cntlid = NVMF_MAX_CNTLID; 268 snprintf(subsystem->subnqn, sizeof(subsystem->subnqn), "%s", nqn); 269 pthread_mutex_init(&subsystem->mutex, NULL); 270 TAILQ_INIT(&subsystem->listeners); 271 TAILQ_INIT(&subsystem->hosts); 272 TAILQ_INIT(&subsystem->ctrlrs); 273 TAILQ_INIT(&subsystem->state_changes); 274 subsystem->used_listener_ids = spdk_bit_array_create(NVMF_MAX_LISTENERS_PER_SUBSYSTEM); 275 if (subsystem->used_listener_ids == NULL) { 276 pthread_mutex_destroy(&subsystem->mutex); 277 free(subsystem); 278 SPDK_ERRLOG("Listener id array memory allocation failed\n"); 279 return NULL; 280 } 281 282 if (num_ns != 0) { 283 subsystem->ns = calloc(num_ns, sizeof(struct spdk_nvmf_ns *)); 284 if (subsystem->ns == NULL) { 285 SPDK_ERRLOG("Namespace memory allocation failed\n"); 286 pthread_mutex_destroy(&subsystem->mutex); 287 spdk_bit_array_free(&subsystem->used_listener_ids); 288 free(subsystem); 289 return NULL; 290 } 291 subsystem->ana_group = calloc(num_ns, sizeof(uint32_t)); 292 if (subsystem->ana_group == NULL) { 293 SPDK_ERRLOG("ANA group memory allocation failed\n"); 294 pthread_mutex_destroy(&subsystem->mutex); 295 free(subsystem->ns); 296 spdk_bit_array_free(&subsystem->used_listener_ids); 297 free(subsystem); 298 return NULL; 299 } 300 } 301 302 memset(subsystem->sn, '0', sizeof(subsystem->sn) - 1); 303 subsystem->sn[sizeof(subsystem->sn) - 1] = '\0'; 304 305 snprintf(subsystem->mn, sizeof(subsystem->mn), "%s", 306 MODEL_NUMBER_DEFAULT); 307 308 spdk_bit_array_set(tgt->subsystem_ids, sid); 309 RB_INSERT(subsystem_tree, &tgt->subsystems, subsystem); 310 311 SPDK_DTRACE_PROBE1(nvmf_subsystem_create, subsystem->subnqn); 312 313 return subsystem; 314 } 315 316 /* Must hold subsystem->mutex while calling this function */ 317 static void 318 nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_host *host) 319 { 320 TAILQ_REMOVE(&subsystem->hosts, host, link); 321 free(host); 322 } 323 324 static void 325 _nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem, 326 struct spdk_nvmf_subsystem_listener *listener, 327 bool stop) 328 { 329 struct spdk_nvmf_transport *transport; 330 struct spdk_nvmf_ctrlr *ctrlr; 331 332 if (stop) { 333 transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, listener->trid->trstring); 334 if (transport != NULL) { 335 spdk_nvmf_transport_stop_listen(transport, listener->trid); 336 } 337 } 338 339 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 340 if (ctrlr->listener == listener) { 341 ctrlr->listener = NULL; 342 } 343 } 344 345 TAILQ_REMOVE(&subsystem->listeners, listener, link); 346 nvmf_update_discovery_log(listener->subsystem->tgt, NULL); 347 free(listener->ana_state); 348 spdk_bit_array_clear(subsystem->used_listener_ids, listener->id); 349 free(listener); 350 } 351 352 static void 353 _nvmf_subsystem_destroy_msg(void *cb_arg) 354 { 355 struct spdk_nvmf_subsystem *subsystem = cb_arg; 356 357 _nvmf_subsystem_destroy(subsystem); 358 } 359 360 static int 361 _nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem) 362 { 363 struct nvmf_subsystem_state_change_ctx *ctx; 364 struct spdk_nvmf_ns *ns; 365 nvmf_subsystem_destroy_cb async_destroy_cb = NULL; 366 void *async_destroy_cb_arg = NULL; 367 int rc; 368 369 if (!TAILQ_EMPTY(&subsystem->ctrlrs)) { 370 SPDK_DEBUGLOG(nvmf, "subsystem %p %s has active controllers\n", subsystem, subsystem->subnqn); 371 subsystem->async_destroy = true; 372 rc = spdk_thread_send_msg(subsystem->thread, _nvmf_subsystem_destroy_msg, subsystem); 373 if (rc) { 374 SPDK_ERRLOG("Failed to send thread msg, rc %d\n", rc); 375 assert(0); 376 return rc; 377 } 378 return -EINPROGRESS; 379 } 380 381 ns = spdk_nvmf_subsystem_get_first_ns(subsystem); 382 while (ns != NULL) { 383 struct spdk_nvmf_ns *next_ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns); 384 385 spdk_nvmf_subsystem_remove_ns(subsystem, ns->opts.nsid); 386 ns = next_ns; 387 } 388 389 while ((ctx = TAILQ_FIRST(&subsystem->state_changes))) { 390 SPDK_WARNLOG("subsystem %s has pending state change requests\n", subsystem->subnqn); 391 TAILQ_REMOVE(&subsystem->state_changes, ctx, link); 392 if (ctx->cb_fn != NULL) { 393 ctx->cb_fn(subsystem, ctx->cb_arg, -ECANCELED); 394 } 395 free(ctx); 396 } 397 398 free(subsystem->ns); 399 free(subsystem->ana_group); 400 401 RB_REMOVE(subsystem_tree, &subsystem->tgt->subsystems, subsystem); 402 assert(spdk_bit_array_get(subsystem->tgt->subsystem_ids, subsystem->id) == true); 403 spdk_bit_array_clear(subsystem->tgt->subsystem_ids, subsystem->id); 404 405 pthread_mutex_destroy(&subsystem->mutex); 406 407 spdk_bit_array_free(&subsystem->used_listener_ids); 408 409 if (subsystem->async_destroy) { 410 async_destroy_cb = subsystem->async_destroy_cb; 411 async_destroy_cb_arg = subsystem->async_destroy_cb_arg; 412 } 413 414 free(subsystem); 415 416 if (async_destroy_cb) { 417 async_destroy_cb(async_destroy_cb_arg); 418 } 419 420 return 0; 421 } 422 423 static struct spdk_nvmf_ns * 424 _nvmf_subsystem_get_first_zoned_ns(struct spdk_nvmf_subsystem *subsystem) 425 { 426 struct spdk_nvmf_ns *ns = spdk_nvmf_subsystem_get_first_ns(subsystem); 427 while (ns != NULL) { 428 if (ns->csi == SPDK_NVME_CSI_ZNS) { 429 return ns; 430 } 431 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns); 432 } 433 return NULL; 434 } 435 436 int 437 spdk_nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem, nvmf_subsystem_destroy_cb cpl_cb, 438 void *cpl_cb_arg) 439 { 440 struct spdk_nvmf_host *host, *host_tmp; 441 struct spdk_nvmf_transport *transport; 442 443 if (!subsystem) { 444 return -EINVAL; 445 } 446 447 SPDK_DTRACE_PROBE1(nvmf_subsystem_destroy, subsystem->subnqn); 448 449 assert(spdk_get_thread() == subsystem->thread); 450 451 if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) { 452 SPDK_ERRLOG("Subsystem can only be destroyed in inactive state, %s state %d\n", 453 subsystem->subnqn, subsystem->state); 454 return -EAGAIN; 455 } 456 if (subsystem->destroying) { 457 SPDK_ERRLOG("Subsystem destruction is already started\n"); 458 assert(0); 459 return -EALREADY; 460 } 461 462 subsystem->destroying = true; 463 464 SPDK_DEBUGLOG(nvmf, "subsystem is %p %s\n", subsystem, subsystem->subnqn); 465 466 nvmf_subsystem_remove_all_listeners(subsystem, false); 467 468 pthread_mutex_lock(&subsystem->mutex); 469 470 TAILQ_FOREACH_SAFE(host, &subsystem->hosts, link, host_tmp) { 471 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 472 transport = spdk_nvmf_transport_get_next(transport)) { 473 if (transport->ops->subsystem_remove_host) { 474 transport->ops->subsystem_remove_host(transport, subsystem, host->nqn); 475 } 476 } 477 nvmf_subsystem_remove_host(subsystem, host); 478 } 479 480 pthread_mutex_unlock(&subsystem->mutex); 481 482 subsystem->async_destroy_cb = cpl_cb; 483 subsystem->async_destroy_cb_arg = cpl_cb_arg; 484 485 return _nvmf_subsystem_destroy(subsystem); 486 } 487 488 /* we have to use the typedef in the function declaration to appease astyle. */ 489 typedef enum spdk_nvmf_subsystem_state spdk_nvmf_subsystem_state_t; 490 491 static spdk_nvmf_subsystem_state_t 492 nvmf_subsystem_get_intermediate_state(enum spdk_nvmf_subsystem_state current_state, 493 enum spdk_nvmf_subsystem_state requested_state) 494 { 495 switch (requested_state) { 496 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 497 return SPDK_NVMF_SUBSYSTEM_DEACTIVATING; 498 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 499 if (current_state == SPDK_NVMF_SUBSYSTEM_PAUSED) { 500 return SPDK_NVMF_SUBSYSTEM_RESUMING; 501 } else { 502 return SPDK_NVMF_SUBSYSTEM_ACTIVATING; 503 } 504 case SPDK_NVMF_SUBSYSTEM_PAUSED: 505 return SPDK_NVMF_SUBSYSTEM_PAUSING; 506 default: 507 assert(false); 508 return SPDK_NVMF_SUBSYSTEM_NUM_STATES; 509 } 510 } 511 512 static int 513 nvmf_subsystem_set_state(struct spdk_nvmf_subsystem *subsystem, 514 enum spdk_nvmf_subsystem_state state) 515 { 516 enum spdk_nvmf_subsystem_state actual_old_state, expected_old_state; 517 bool exchanged; 518 519 switch (state) { 520 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 521 expected_old_state = SPDK_NVMF_SUBSYSTEM_DEACTIVATING; 522 break; 523 case SPDK_NVMF_SUBSYSTEM_ACTIVATING: 524 expected_old_state = SPDK_NVMF_SUBSYSTEM_INACTIVE; 525 break; 526 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 527 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING; 528 break; 529 case SPDK_NVMF_SUBSYSTEM_PAUSING: 530 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 531 break; 532 case SPDK_NVMF_SUBSYSTEM_PAUSED: 533 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSING; 534 break; 535 case SPDK_NVMF_SUBSYSTEM_RESUMING: 536 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSED; 537 break; 538 case SPDK_NVMF_SUBSYSTEM_DEACTIVATING: 539 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 540 break; 541 default: 542 assert(false); 543 return -1; 544 } 545 546 actual_old_state = expected_old_state; 547 exchanged = __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false, 548 __ATOMIC_RELAXED, __ATOMIC_RELAXED); 549 if (spdk_unlikely(exchanged == false)) { 550 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING && 551 state == SPDK_NVMF_SUBSYSTEM_ACTIVE) { 552 expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING; 553 } 554 /* This is for the case when activating the subsystem fails. */ 555 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_ACTIVATING && 556 state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING) { 557 expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING; 558 } 559 /* This is for the case when resuming the subsystem fails. */ 560 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING && 561 state == SPDK_NVMF_SUBSYSTEM_PAUSING) { 562 expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING; 563 } 564 /* This is for the case when stopping paused subsystem */ 565 if (actual_old_state == SPDK_NVMF_SUBSYSTEM_PAUSED && 566 state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING) { 567 expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSED; 568 } 569 actual_old_state = expected_old_state; 570 __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false, 571 __ATOMIC_RELAXED, __ATOMIC_RELAXED); 572 } 573 assert(actual_old_state == expected_old_state); 574 return actual_old_state - expected_old_state; 575 } 576 577 static void nvmf_subsystem_do_state_change(struct nvmf_subsystem_state_change_ctx *ctx); 578 579 static void 580 _nvmf_subsystem_state_change_complete(void *_ctx) 581 { 582 struct nvmf_subsystem_state_change_ctx *next, *ctx = _ctx; 583 struct spdk_nvmf_subsystem *subsystem = ctx->subsystem; 584 585 pthread_mutex_lock(&subsystem->mutex); 586 assert(TAILQ_FIRST(&subsystem->state_changes) == ctx); 587 TAILQ_REMOVE(&subsystem->state_changes, ctx, link); 588 next = TAILQ_FIRST(&subsystem->state_changes); 589 pthread_mutex_unlock(&subsystem->mutex); 590 591 if (ctx->cb_fn != NULL) { 592 ctx->cb_fn(subsystem, ctx->cb_arg, ctx->status); 593 } 594 free(ctx); 595 596 if (next != NULL) { 597 nvmf_subsystem_do_state_change(next); 598 } 599 } 600 601 static void 602 nvmf_subsystem_state_change_complete(struct nvmf_subsystem_state_change_ctx *ctx, int status) 603 { 604 ctx->status = status; 605 spdk_thread_exec_msg(ctx->thread, _nvmf_subsystem_state_change_complete, ctx); 606 } 607 608 static void 609 subsystem_state_change_revert_done(struct spdk_io_channel_iter *i, int status) 610 { 611 struct nvmf_subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 612 613 /* Nothing to be done here if the state setting fails, we are just screwed. */ 614 if (nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state)) { 615 SPDK_ERRLOG("Unable to revert the subsystem state after operation failure.\n"); 616 } 617 618 /* return a failure here. This function only exists in an error path. */ 619 nvmf_subsystem_state_change_complete(ctx, -1); 620 } 621 622 static void 623 subsystem_state_change_done(struct spdk_io_channel_iter *i, int status) 624 { 625 struct nvmf_subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 626 enum spdk_nvmf_subsystem_state intermediate_state; 627 628 SPDK_DTRACE_PROBE4(nvmf_subsystem_change_state_done, ctx->subsystem->subnqn, 629 ctx->requested_state, ctx->original_state, status); 630 631 if (status == 0) { 632 status = nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state); 633 if (status) { 634 status = -1; 635 } 636 } 637 638 if (status) { 639 intermediate_state = nvmf_subsystem_get_intermediate_state(ctx->requested_state, 640 ctx->original_state); 641 assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES); 642 643 if (nvmf_subsystem_set_state(ctx->subsystem, intermediate_state)) { 644 goto out; 645 } 646 ctx->requested_state = ctx->original_state; 647 spdk_for_each_channel(ctx->subsystem->tgt, 648 subsystem_state_change_on_pg, 649 ctx, 650 subsystem_state_change_revert_done); 651 return; 652 } 653 654 out: 655 nvmf_subsystem_state_change_complete(ctx, status); 656 } 657 658 static void 659 subsystem_state_change_continue(void *ctx, int status) 660 { 661 struct spdk_io_channel_iter *i = ctx; 662 struct nvmf_subsystem_state_change_ctx *_ctx __attribute__((unused)); 663 664 _ctx = spdk_io_channel_iter_get_ctx(i); 665 SPDK_DTRACE_PROBE3(nvmf_pg_change_state_done, _ctx->subsystem->subnqn, 666 _ctx->requested_state, spdk_thread_get_id(spdk_get_thread())); 667 668 spdk_for_each_channel_continue(i, status); 669 } 670 671 static void 672 subsystem_state_change_on_pg(struct spdk_io_channel_iter *i) 673 { 674 struct nvmf_subsystem_state_change_ctx *ctx; 675 struct spdk_io_channel *ch; 676 struct spdk_nvmf_poll_group *group; 677 678 ctx = spdk_io_channel_iter_get_ctx(i); 679 ch = spdk_io_channel_iter_get_channel(i); 680 group = spdk_io_channel_get_ctx(ch); 681 682 SPDK_DTRACE_PROBE3(nvmf_pg_change_state, ctx->subsystem->subnqn, 683 ctx->requested_state, spdk_thread_get_id(spdk_get_thread())); 684 switch (ctx->requested_state) { 685 case SPDK_NVMF_SUBSYSTEM_INACTIVE: 686 nvmf_poll_group_remove_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 687 break; 688 case SPDK_NVMF_SUBSYSTEM_ACTIVE: 689 if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_ACTIVATING) { 690 nvmf_poll_group_add_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 691 } else if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_RESUMING) { 692 nvmf_poll_group_resume_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i); 693 } 694 break; 695 case SPDK_NVMF_SUBSYSTEM_PAUSED: 696 nvmf_poll_group_pause_subsystem(group, ctx->subsystem, ctx->nsid, subsystem_state_change_continue, 697 i); 698 break; 699 default: 700 assert(false); 701 break; 702 } 703 } 704 705 static void 706 nvmf_subsystem_do_state_change(struct nvmf_subsystem_state_change_ctx *ctx) 707 { 708 struct spdk_nvmf_subsystem *subsystem = ctx->subsystem; 709 enum spdk_nvmf_subsystem_state intermediate_state; 710 int rc; 711 712 SPDK_DTRACE_PROBE3(nvmf_subsystem_change_state, subsystem->subnqn, 713 ctx->requested_state, subsystem->state); 714 715 /* If we are already in the requested state, just call the callback immediately. */ 716 if (subsystem->state == ctx->requested_state) { 717 nvmf_subsystem_state_change_complete(ctx, 0); 718 return; 719 } 720 721 intermediate_state = nvmf_subsystem_get_intermediate_state(subsystem->state, 722 ctx->requested_state); 723 assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES); 724 725 ctx->original_state = subsystem->state; 726 rc = nvmf_subsystem_set_state(subsystem, intermediate_state); 727 if (rc) { 728 nvmf_subsystem_state_change_complete(ctx, -1); 729 return; 730 } 731 732 spdk_for_each_channel(subsystem->tgt, 733 subsystem_state_change_on_pg, 734 ctx, 735 subsystem_state_change_done); 736 } 737 738 739 static int 740 nvmf_subsystem_state_change(struct spdk_nvmf_subsystem *subsystem, 741 uint32_t nsid, 742 enum spdk_nvmf_subsystem_state requested_state, 743 spdk_nvmf_subsystem_state_change_done cb_fn, 744 void *cb_arg) 745 { 746 struct nvmf_subsystem_state_change_ctx *ctx; 747 struct spdk_thread *thread; 748 749 thread = spdk_get_thread(); 750 if (thread == NULL) { 751 return -EINVAL; 752 } 753 754 ctx = calloc(1, sizeof(*ctx)); 755 if (!ctx) { 756 return -ENOMEM; 757 } 758 759 ctx->subsystem = subsystem; 760 ctx->nsid = nsid; 761 ctx->requested_state = requested_state; 762 ctx->cb_fn = cb_fn; 763 ctx->cb_arg = cb_arg; 764 ctx->thread = thread; 765 766 pthread_mutex_lock(&subsystem->mutex); 767 TAILQ_INSERT_TAIL(&subsystem->state_changes, ctx, link); 768 if (ctx != TAILQ_FIRST(&subsystem->state_changes)) { 769 pthread_mutex_unlock(&subsystem->mutex); 770 return 0; 771 } 772 pthread_mutex_unlock(&subsystem->mutex); 773 774 nvmf_subsystem_do_state_change(ctx); 775 776 return 0; 777 } 778 779 int 780 spdk_nvmf_subsystem_start(struct spdk_nvmf_subsystem *subsystem, 781 spdk_nvmf_subsystem_state_change_done cb_fn, 782 void *cb_arg) 783 { 784 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg); 785 } 786 787 int 788 spdk_nvmf_subsystem_stop(struct spdk_nvmf_subsystem *subsystem, 789 spdk_nvmf_subsystem_state_change_done cb_fn, 790 void *cb_arg) 791 { 792 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_INACTIVE, cb_fn, cb_arg); 793 } 794 795 int 796 spdk_nvmf_subsystem_pause(struct spdk_nvmf_subsystem *subsystem, 797 uint32_t nsid, 798 spdk_nvmf_subsystem_state_change_done cb_fn, 799 void *cb_arg) 800 { 801 return nvmf_subsystem_state_change(subsystem, nsid, SPDK_NVMF_SUBSYSTEM_PAUSED, cb_fn, cb_arg); 802 } 803 804 int 805 spdk_nvmf_subsystem_resume(struct spdk_nvmf_subsystem *subsystem, 806 spdk_nvmf_subsystem_state_change_done cb_fn, 807 void *cb_arg) 808 { 809 return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg); 810 } 811 812 struct spdk_nvmf_subsystem * 813 spdk_nvmf_subsystem_get_first(struct spdk_nvmf_tgt *tgt) 814 { 815 return RB_MIN(subsystem_tree, &tgt->subsystems); 816 } 817 818 struct spdk_nvmf_subsystem * 819 spdk_nvmf_subsystem_get_next(struct spdk_nvmf_subsystem *subsystem) 820 { 821 if (!subsystem) { 822 return NULL; 823 } 824 825 return RB_NEXT(subsystem_tree, &tgt->subsystems, subsystem); 826 } 827 828 static int 829 nvmf_ns_add_host(struct spdk_nvmf_ns *ns, const char *hostnqn) 830 { 831 struct spdk_nvmf_host *host; 832 833 host = calloc(1, sizeof(*host)); 834 if (!host) { 835 return -ENOMEM; 836 } 837 snprintf(host->nqn, sizeof(host->nqn), "%s", hostnqn); 838 TAILQ_INSERT_HEAD(&ns->hosts, host, link); 839 return 0; 840 } 841 842 static void 843 nvmf_ns_remove_host(struct spdk_nvmf_ns *ns, struct spdk_nvmf_host *host) 844 { 845 TAILQ_REMOVE(&ns->hosts, host, link); 846 free(host); 847 } 848 849 static void 850 _async_event_ns_notice(void *_ctrlr) 851 { 852 struct spdk_nvmf_ctrlr *ctrlr = _ctrlr; 853 854 nvmf_ctrlr_async_event_ns_notice(ctrlr); 855 } 856 857 static void 858 send_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr) 859 { 860 spdk_thread_send_msg(ctrlr->thread, _async_event_ns_notice, ctrlr); 861 } 862 863 static int 864 nvmf_ns_visible(struct spdk_nvmf_subsystem *subsystem, 865 uint32_t nsid, 866 const char *hostnqn, 867 bool visible) 868 { 869 struct spdk_nvmf_ns *ns; 870 struct spdk_nvmf_ctrlr *ctrlr; 871 struct spdk_nvmf_host *host; 872 int rc; 873 874 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 875 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 876 assert(false); 877 return -1; 878 } 879 880 if (hostnqn == NULL || !nvmf_nqn_is_valid(hostnqn)) { 881 return -EINVAL; 882 } 883 884 if (nsid == 0 || nsid > subsystem->max_nsid) { 885 return -EINVAL; 886 } 887 888 ns = subsystem->ns[nsid - 1]; 889 if (!ns) { 890 return -ENOENT; 891 } 892 893 if (ns->always_visible) { 894 /* No individual host control */ 895 return -EPERM; 896 } 897 898 /* Save host info to use for any future controllers. */ 899 host = nvmf_ns_find_host(ns, hostnqn); 900 if (visible && host == NULL) { 901 rc = nvmf_ns_add_host(ns, hostnqn); 902 if (rc) { 903 return rc; 904 } 905 } else if (!visible && host != NULL) { 906 nvmf_ns_remove_host(ns, host); 907 } 908 909 /* Also apply to existing controllers. */ 910 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 911 if (strcmp(hostnqn, ctrlr->hostnqn) || 912 spdk_bit_array_get(ctrlr->visible_ns, nsid - 1) == visible) { 913 continue; 914 } 915 if (visible) { 916 spdk_bit_array_set(ctrlr->visible_ns, nsid - 1); 917 } else { 918 spdk_bit_array_clear(ctrlr->visible_ns, nsid - 1); 919 } 920 send_async_event_ns_notice(ctrlr); 921 nvmf_ctrlr_ns_changed(ctrlr, nsid); 922 } 923 924 return 0; 925 } 926 927 int 928 spdk_nvmf_ns_add_host(struct spdk_nvmf_subsystem *subsystem, 929 uint32_t nsid, 930 const char *hostnqn, 931 uint32_t flags) 932 { 933 SPDK_DTRACE_PROBE4(spdk_nvmf_ns_add_host, 934 subsystem->subnqn, 935 nsid, 936 hostnqn, 937 flags); 938 return nvmf_ns_visible(subsystem, nsid, hostnqn, true); 939 } 940 941 int 942 spdk_nvmf_ns_remove_host(struct spdk_nvmf_subsystem *subsystem, 943 uint32_t nsid, 944 const char *hostnqn, 945 uint32_t flags) 946 { 947 SPDK_DTRACE_PROBE4(spdk_nvmf_ns_remove_host, 948 subsystem->subnqn, 949 nsid, 950 hostnqn, 951 flags); 952 return nvmf_ns_visible(subsystem, nsid, hostnqn, false); 953 } 954 955 /* Must hold subsystem->mutex while calling this function */ 956 static struct spdk_nvmf_host * 957 nvmf_subsystem_find_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 958 { 959 struct spdk_nvmf_host *host = NULL; 960 961 TAILQ_FOREACH(host, &subsystem->hosts, link) { 962 if (strcmp(hostnqn, host->nqn) == 0) { 963 return host; 964 } 965 } 966 967 return NULL; 968 } 969 970 int 971 spdk_nvmf_subsystem_add_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn, 972 const struct spdk_json_val *params) 973 { 974 struct spdk_nvmf_host *host; 975 struct spdk_nvmf_transport *transport; 976 int rc; 977 978 if (!nvmf_nqn_is_valid(hostnqn)) { 979 return -EINVAL; 980 } 981 982 pthread_mutex_lock(&subsystem->mutex); 983 984 if (nvmf_subsystem_find_host(subsystem, hostnqn)) { 985 /* This subsystem already allows the specified host. */ 986 pthread_mutex_unlock(&subsystem->mutex); 987 return 0; 988 } 989 990 host = calloc(1, sizeof(*host)); 991 if (!host) { 992 pthread_mutex_unlock(&subsystem->mutex); 993 return -ENOMEM; 994 } 995 996 snprintf(host->nqn, sizeof(host->nqn), "%s", hostnqn); 997 998 SPDK_DTRACE_PROBE2(nvmf_subsystem_add_host, subsystem->subnqn, host->nqn); 999 1000 TAILQ_INSERT_HEAD(&subsystem->hosts, host, link); 1001 1002 if (!TAILQ_EMPTY(&subsystem->listeners)) { 1003 nvmf_update_discovery_log(subsystem->tgt, hostnqn); 1004 } 1005 1006 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 1007 transport = spdk_nvmf_transport_get_next(transport)) { 1008 if (transport->ops->subsystem_add_host) { 1009 rc = transport->ops->subsystem_add_host(transport, subsystem, hostnqn, params); 1010 if (rc) { 1011 SPDK_ERRLOG("Unable to add host to %s transport\n", transport->ops->name); 1012 /* Remove this host from all transports we've managed to add it to. */ 1013 pthread_mutex_unlock(&subsystem->mutex); 1014 spdk_nvmf_subsystem_remove_host(subsystem, hostnqn); 1015 return rc; 1016 } 1017 } 1018 } 1019 1020 pthread_mutex_unlock(&subsystem->mutex); 1021 1022 return 0; 1023 } 1024 1025 int 1026 spdk_nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 1027 { 1028 struct spdk_nvmf_host *host; 1029 struct spdk_nvmf_transport *transport; 1030 1031 pthread_mutex_lock(&subsystem->mutex); 1032 1033 host = nvmf_subsystem_find_host(subsystem, hostnqn); 1034 if (host == NULL) { 1035 pthread_mutex_unlock(&subsystem->mutex); 1036 return -ENOENT; 1037 } 1038 1039 SPDK_DTRACE_PROBE2(nvmf_subsystem_remove_host, subsystem->subnqn, host->nqn); 1040 1041 nvmf_subsystem_remove_host(subsystem, host); 1042 1043 if (!TAILQ_EMPTY(&subsystem->listeners)) { 1044 nvmf_update_discovery_log(subsystem->tgt, hostnqn); 1045 } 1046 1047 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 1048 transport = spdk_nvmf_transport_get_next(transport)) { 1049 if (transport->ops->subsystem_remove_host) { 1050 transport->ops->subsystem_remove_host(transport, subsystem, hostnqn); 1051 } 1052 } 1053 1054 pthread_mutex_unlock(&subsystem->mutex); 1055 1056 return 0; 1057 } 1058 1059 struct nvmf_subsystem_disconnect_host_ctx { 1060 struct spdk_nvmf_subsystem *subsystem; 1061 char *hostnqn; 1062 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn; 1063 void *cb_arg; 1064 }; 1065 1066 static void 1067 nvmf_subsystem_disconnect_host_fini(struct spdk_io_channel_iter *i, int status) 1068 { 1069 struct nvmf_subsystem_disconnect_host_ctx *ctx; 1070 1071 ctx = spdk_io_channel_iter_get_ctx(i); 1072 1073 if (ctx->cb_fn) { 1074 ctx->cb_fn(ctx->cb_arg, status); 1075 } 1076 free(ctx->hostnqn); 1077 free(ctx); 1078 } 1079 1080 static void 1081 nvmf_subsystem_disconnect_qpairs_by_host(struct spdk_io_channel_iter *i) 1082 { 1083 struct nvmf_subsystem_disconnect_host_ctx *ctx; 1084 struct spdk_nvmf_poll_group *group; 1085 struct spdk_io_channel *ch; 1086 struct spdk_nvmf_qpair *qpair, *tmp_qpair; 1087 struct spdk_nvmf_ctrlr *ctrlr; 1088 1089 ctx = spdk_io_channel_iter_get_ctx(i); 1090 ch = spdk_io_channel_iter_get_channel(i); 1091 group = spdk_io_channel_get_ctx(ch); 1092 1093 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, tmp_qpair) { 1094 ctrlr = qpair->ctrlr; 1095 1096 if (ctrlr == NULL || ctrlr->subsys != ctx->subsystem) { 1097 continue; 1098 } 1099 1100 if (strncmp(ctrlr->hostnqn, ctx->hostnqn, sizeof(ctrlr->hostnqn)) == 0) { 1101 /* Right now this does not wait for the queue pairs to actually disconnect. */ 1102 spdk_nvmf_qpair_disconnect(qpair); 1103 } 1104 } 1105 spdk_for_each_channel_continue(i, 0); 1106 } 1107 1108 int 1109 spdk_nvmf_subsystem_disconnect_host(struct spdk_nvmf_subsystem *subsystem, 1110 const char *hostnqn, 1111 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 1112 void *cb_arg) 1113 { 1114 struct nvmf_subsystem_disconnect_host_ctx *ctx; 1115 1116 ctx = calloc(1, sizeof(struct nvmf_subsystem_disconnect_host_ctx)); 1117 if (ctx == NULL) { 1118 return -ENOMEM; 1119 } 1120 1121 ctx->hostnqn = strdup(hostnqn); 1122 if (ctx->hostnqn == NULL) { 1123 free(ctx); 1124 return -ENOMEM; 1125 } 1126 1127 ctx->subsystem = subsystem; 1128 ctx->cb_fn = cb_fn; 1129 ctx->cb_arg = cb_arg; 1130 1131 spdk_for_each_channel(subsystem->tgt, nvmf_subsystem_disconnect_qpairs_by_host, ctx, 1132 nvmf_subsystem_disconnect_host_fini); 1133 1134 return 0; 1135 } 1136 1137 int 1138 spdk_nvmf_subsystem_set_allow_any_host(struct spdk_nvmf_subsystem *subsystem, bool allow_any_host) 1139 { 1140 pthread_mutex_lock(&subsystem->mutex); 1141 subsystem->flags.allow_any_host = allow_any_host; 1142 if (!TAILQ_EMPTY(&subsystem->listeners)) { 1143 nvmf_update_discovery_log(subsystem->tgt, NULL); 1144 } 1145 pthread_mutex_unlock(&subsystem->mutex); 1146 1147 return 0; 1148 } 1149 1150 bool 1151 spdk_nvmf_subsystem_get_allow_any_host(const struct spdk_nvmf_subsystem *subsystem) 1152 { 1153 bool allow_any_host; 1154 struct spdk_nvmf_subsystem *sub; 1155 1156 /* Technically, taking the mutex modifies data in the subsystem. But the const 1157 * is still important to convey that this doesn't mutate any other data. Cast 1158 * it away to work around this. */ 1159 sub = (struct spdk_nvmf_subsystem *)subsystem; 1160 1161 pthread_mutex_lock(&sub->mutex); 1162 allow_any_host = sub->flags.allow_any_host; 1163 pthread_mutex_unlock(&sub->mutex); 1164 1165 return allow_any_host; 1166 } 1167 1168 bool 1169 spdk_nvmf_subsystem_host_allowed(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn) 1170 { 1171 bool allowed; 1172 1173 if (!hostnqn) { 1174 return false; 1175 } 1176 1177 pthread_mutex_lock(&subsystem->mutex); 1178 1179 if (subsystem->flags.allow_any_host) { 1180 pthread_mutex_unlock(&subsystem->mutex); 1181 return true; 1182 } 1183 1184 allowed = nvmf_subsystem_find_host(subsystem, hostnqn) != NULL; 1185 pthread_mutex_unlock(&subsystem->mutex); 1186 1187 return allowed; 1188 } 1189 1190 struct spdk_nvmf_host * 1191 spdk_nvmf_subsystem_get_first_host(struct spdk_nvmf_subsystem *subsystem) 1192 { 1193 return TAILQ_FIRST(&subsystem->hosts); 1194 } 1195 1196 1197 struct spdk_nvmf_host * 1198 spdk_nvmf_subsystem_get_next_host(struct spdk_nvmf_subsystem *subsystem, 1199 struct spdk_nvmf_host *prev_host) 1200 { 1201 return TAILQ_NEXT(prev_host, link); 1202 } 1203 1204 const char * 1205 spdk_nvmf_host_get_nqn(const struct spdk_nvmf_host *host) 1206 { 1207 return host->nqn; 1208 } 1209 1210 struct spdk_nvmf_subsystem_listener * 1211 nvmf_subsystem_find_listener(struct spdk_nvmf_subsystem *subsystem, 1212 const struct spdk_nvme_transport_id *trid) 1213 { 1214 struct spdk_nvmf_subsystem_listener *listener; 1215 1216 TAILQ_FOREACH(listener, &subsystem->listeners, link) { 1217 if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) { 1218 return listener; 1219 } 1220 } 1221 1222 return NULL; 1223 } 1224 1225 /** 1226 * Function to be called once the target is listening. 1227 * 1228 * \param ctx Context argument passed to this function. 1229 * \param status 0 if it completed successfully, or negative errno if it failed. 1230 */ 1231 static void 1232 _nvmf_subsystem_add_listener_done(void *ctx, int status) 1233 { 1234 struct spdk_nvmf_subsystem_listener *listener = ctx; 1235 1236 if (status) { 1237 listener->cb_fn(listener->cb_arg, status); 1238 free(listener); 1239 return; 1240 } 1241 1242 TAILQ_INSERT_HEAD(&listener->subsystem->listeners, listener, link); 1243 nvmf_update_discovery_log(listener->subsystem->tgt, NULL); 1244 listener->cb_fn(listener->cb_arg, status); 1245 } 1246 1247 void 1248 spdk_nvmf_subsystem_listener_opts_init(struct spdk_nvmf_listener_opts *opts, size_t size) 1249 { 1250 if (opts == NULL) { 1251 SPDK_ERRLOG("opts should not be NULL\n"); 1252 assert(false); 1253 return; 1254 } 1255 if (size == 0) { 1256 SPDK_ERRLOG("size should not be zero\n"); 1257 assert(false); 1258 return; 1259 } 1260 1261 memset(opts, 0, size); 1262 opts->opts_size = size; 1263 1264 #define FIELD_OK(field) \ 1265 offsetof(struct spdk_nvmf_listener_opts, field) + sizeof(opts->field) <= size 1266 1267 #define SET_FIELD(field, value) \ 1268 if (FIELD_OK(field)) { \ 1269 opts->field = value; \ 1270 } \ 1271 1272 SET_FIELD(secure_channel, false); 1273 SET_FIELD(ana_state, SPDK_NVME_ANA_OPTIMIZED_STATE); 1274 1275 #undef FIELD_OK 1276 #undef SET_FIELD 1277 } 1278 1279 static int 1280 listener_opts_copy(struct spdk_nvmf_listener_opts *src, struct spdk_nvmf_listener_opts *dst) 1281 { 1282 if (src->opts_size == 0) { 1283 SPDK_ERRLOG("source structure size should not be zero\n"); 1284 assert(false); 1285 return -EINVAL; 1286 } 1287 1288 memset(dst, 0, sizeof(*dst)); 1289 dst->opts_size = src->opts_size; 1290 1291 #define FIELD_OK(field) \ 1292 offsetof(struct spdk_nvmf_listener_opts, field) + sizeof(src->field) <= src->opts_size 1293 1294 #define SET_FIELD(field) \ 1295 if (FIELD_OK(field)) { \ 1296 dst->field = src->field; \ 1297 } \ 1298 1299 SET_FIELD(secure_channel); 1300 SET_FIELD(ana_state); 1301 /* We should not remove this statement, but need to update the assert statement 1302 * if we add a new field, and also add a corresponding SET_FIELD statement. */ 1303 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_listener_opts) == 16, "Incorrect size"); 1304 1305 #undef SET_FIELD 1306 #undef FIELD_OK 1307 1308 return 0; 1309 } 1310 1311 static void 1312 _nvmf_subsystem_add_listener(struct spdk_nvmf_subsystem *subsystem, 1313 struct spdk_nvme_transport_id *trid, 1314 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 1315 void *cb_arg, struct spdk_nvmf_listener_opts *opts) 1316 { 1317 struct spdk_nvmf_transport *transport; 1318 struct spdk_nvmf_subsystem_listener *listener; 1319 struct spdk_nvmf_listener *tr_listener; 1320 uint32_t i; 1321 uint32_t id; 1322 int rc = 0; 1323 1324 assert(cb_fn != NULL); 1325 1326 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1327 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1328 cb_fn(cb_arg, -EAGAIN); 1329 return; 1330 } 1331 1332 if (nvmf_subsystem_find_listener(subsystem, trid)) { 1333 /* Listener already exists in this subsystem */ 1334 cb_fn(cb_arg, 0); 1335 return; 1336 } 1337 1338 transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, trid->trstring); 1339 if (!transport) { 1340 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n", 1341 trid->trstring); 1342 cb_fn(cb_arg, -EINVAL); 1343 return; 1344 } 1345 1346 tr_listener = nvmf_transport_find_listener(transport, trid); 1347 if (!tr_listener) { 1348 SPDK_ERRLOG("Cannot find transport listener for %s\n", trid->traddr); 1349 cb_fn(cb_arg, -EINVAL); 1350 return; 1351 } 1352 1353 listener = calloc(1, sizeof(*listener)); 1354 if (!listener) { 1355 cb_fn(cb_arg, -ENOMEM); 1356 return; 1357 } 1358 1359 listener->trid = &tr_listener->trid; 1360 listener->transport = transport; 1361 listener->cb_fn = cb_fn; 1362 listener->cb_arg = cb_arg; 1363 listener->subsystem = subsystem; 1364 listener->ana_state = calloc(subsystem->max_nsid, sizeof(enum spdk_nvme_ana_state)); 1365 if (!listener->ana_state) { 1366 free(listener); 1367 cb_fn(cb_arg, -ENOMEM); 1368 return; 1369 } 1370 1371 spdk_nvmf_subsystem_listener_opts_init(&listener->opts, sizeof(listener->opts)); 1372 if (opts != NULL) { 1373 rc = listener_opts_copy(opts, &listener->opts); 1374 if (rc) { 1375 SPDK_ERRLOG("Unable to copy listener options\n"); 1376 free(listener->ana_state); 1377 free(listener); 1378 cb_fn(cb_arg, -EINVAL); 1379 return; 1380 } 1381 } 1382 1383 id = spdk_bit_array_find_first_clear(subsystem->used_listener_ids, 0); 1384 if (id == UINT32_MAX) { 1385 SPDK_ERRLOG("Cannot add any more listeners\n"); 1386 free(listener->ana_state); 1387 free(listener); 1388 cb_fn(cb_arg, -EINVAL); 1389 return; 1390 } 1391 1392 spdk_bit_array_set(subsystem->used_listener_ids, id); 1393 listener->id = id; 1394 1395 for (i = 0; i < subsystem->max_nsid; i++) { 1396 listener->ana_state[i] = listener->opts.ana_state; 1397 } 1398 1399 if (transport->ops->listen_associate != NULL) { 1400 rc = transport->ops->listen_associate(transport, subsystem, trid); 1401 } 1402 1403 SPDK_DTRACE_PROBE4(nvmf_subsystem_add_listener, subsystem->subnqn, listener->trid->trtype, 1404 listener->trid->traddr, listener->trid->trsvcid); 1405 1406 _nvmf_subsystem_add_listener_done(listener, rc); 1407 } 1408 1409 void 1410 spdk_nvmf_subsystem_add_listener(struct spdk_nvmf_subsystem *subsystem, 1411 struct spdk_nvme_transport_id *trid, 1412 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 1413 void *cb_arg) 1414 { 1415 _nvmf_subsystem_add_listener(subsystem, trid, cb_fn, cb_arg, NULL); 1416 } 1417 1418 void 1419 spdk_nvmf_subsystem_add_listener_ext(struct spdk_nvmf_subsystem *subsystem, 1420 struct spdk_nvme_transport_id *trid, 1421 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, 1422 void *cb_arg, struct spdk_nvmf_listener_opts *opts) 1423 { 1424 _nvmf_subsystem_add_listener(subsystem, trid, cb_fn, cb_arg, opts); 1425 } 1426 1427 int 1428 spdk_nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem, 1429 const struct spdk_nvme_transport_id *trid) 1430 { 1431 struct spdk_nvmf_subsystem_listener *listener; 1432 1433 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1434 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1435 return -EAGAIN; 1436 } 1437 1438 listener = nvmf_subsystem_find_listener(subsystem, trid); 1439 if (listener == NULL) { 1440 return -ENOENT; 1441 } 1442 1443 SPDK_DTRACE_PROBE4(nvmf_subsystem_remove_listener, subsystem->subnqn, listener->trid->trtype, 1444 listener->trid->traddr, listener->trid->trsvcid); 1445 1446 _nvmf_subsystem_remove_listener(subsystem, listener, false); 1447 1448 return 0; 1449 } 1450 1451 void 1452 nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem, 1453 bool stop) 1454 { 1455 struct spdk_nvmf_subsystem_listener *listener, *listener_tmp; 1456 1457 TAILQ_FOREACH_SAFE(listener, &subsystem->listeners, link, listener_tmp) { 1458 _nvmf_subsystem_remove_listener(subsystem, listener, stop); 1459 } 1460 } 1461 1462 bool 1463 spdk_nvmf_subsystem_listener_allowed(struct spdk_nvmf_subsystem *subsystem, 1464 const struct spdk_nvme_transport_id *trid) 1465 { 1466 struct spdk_nvmf_subsystem_listener *listener; 1467 1468 TAILQ_FOREACH(listener, &subsystem->listeners, link) { 1469 if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) { 1470 return true; 1471 } 1472 } 1473 1474 if (!strcmp(subsystem->subnqn, SPDK_NVMF_DISCOVERY_NQN)) { 1475 SPDK_WARNLOG("Allowing connection to discovery subsystem on %s/%s/%s, " 1476 "even though this listener was not added to the discovery " 1477 "subsystem. This behavior is deprecated and will be removed " 1478 "in a future release.\n", 1479 spdk_nvme_transport_id_trtype_str(trid->trtype), trid->traddr, trid->trsvcid); 1480 return true; 1481 } 1482 1483 return false; 1484 } 1485 1486 struct spdk_nvmf_subsystem_listener * 1487 spdk_nvmf_subsystem_get_first_listener(struct spdk_nvmf_subsystem *subsystem) 1488 { 1489 return TAILQ_FIRST(&subsystem->listeners); 1490 } 1491 1492 struct spdk_nvmf_subsystem_listener * 1493 spdk_nvmf_subsystem_get_next_listener(struct spdk_nvmf_subsystem *subsystem, 1494 struct spdk_nvmf_subsystem_listener *prev_listener) 1495 { 1496 return TAILQ_NEXT(prev_listener, link); 1497 } 1498 1499 const struct spdk_nvme_transport_id * 1500 spdk_nvmf_subsystem_listener_get_trid(struct spdk_nvmf_subsystem_listener *listener) 1501 { 1502 return listener->trid; 1503 } 1504 1505 void 1506 spdk_nvmf_subsystem_allow_any_listener(struct spdk_nvmf_subsystem *subsystem, 1507 bool allow_any_listener) 1508 { 1509 subsystem->flags.allow_any_listener = allow_any_listener; 1510 } 1511 1512 bool 1513 spdk_nvmf_subsystem_any_listener_allowed(struct spdk_nvmf_subsystem *subsystem) 1514 { 1515 return subsystem->flags.allow_any_listener; 1516 } 1517 1518 struct subsystem_update_ns_ctx { 1519 struct spdk_nvmf_subsystem *subsystem; 1520 1521 spdk_nvmf_subsystem_state_change_done cb_fn; 1522 void *cb_arg; 1523 }; 1524 1525 static void 1526 subsystem_update_ns_done(struct spdk_io_channel_iter *i, int status) 1527 { 1528 struct subsystem_update_ns_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 1529 1530 if (ctx->cb_fn) { 1531 ctx->cb_fn(ctx->subsystem, ctx->cb_arg, status); 1532 } 1533 free(ctx); 1534 } 1535 1536 static void 1537 subsystem_update_ns_on_pg(struct spdk_io_channel_iter *i) 1538 { 1539 int rc; 1540 struct subsystem_update_ns_ctx *ctx; 1541 struct spdk_nvmf_poll_group *group; 1542 struct spdk_nvmf_subsystem *subsystem; 1543 1544 ctx = spdk_io_channel_iter_get_ctx(i); 1545 group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i)); 1546 subsystem = ctx->subsystem; 1547 1548 rc = nvmf_poll_group_update_subsystem(group, subsystem); 1549 spdk_for_each_channel_continue(i, rc); 1550 } 1551 1552 static int 1553 nvmf_subsystem_update_ns(struct spdk_nvmf_subsystem *subsystem, 1554 spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg) 1555 { 1556 struct subsystem_update_ns_ctx *ctx; 1557 1558 ctx = calloc(1, sizeof(*ctx)); 1559 if (ctx == NULL) { 1560 SPDK_ERRLOG("Can't alloc subsystem poll group update context\n"); 1561 return -ENOMEM; 1562 } 1563 ctx->subsystem = subsystem; 1564 ctx->cb_fn = cb_fn; 1565 ctx->cb_arg = cb_arg; 1566 1567 spdk_for_each_channel(subsystem->tgt, 1568 subsystem_update_ns_on_pg, 1569 ctx, 1570 subsystem_update_ns_done); 1571 return 0; 1572 } 1573 1574 static void 1575 nvmf_subsystem_ns_changed(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1576 { 1577 struct spdk_nvmf_ctrlr *ctrlr; 1578 1579 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1580 if (nvmf_ctrlr_ns_is_visible(ctrlr, nsid)) { 1581 nvmf_ctrlr_ns_changed(ctrlr, nsid); 1582 } 1583 } 1584 } 1585 1586 static uint32_t nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns); 1587 1588 int 1589 spdk_nvmf_subsystem_remove_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 1590 { 1591 struct spdk_nvmf_transport *transport; 1592 struct spdk_nvmf_ns *ns; 1593 struct spdk_nvmf_host *host, *tmp; 1594 struct spdk_nvmf_ctrlr *ctrlr; 1595 1596 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1597 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1598 assert(false); 1599 return -1; 1600 } 1601 1602 if (nsid == 0 || nsid > subsystem->max_nsid) { 1603 return -1; 1604 } 1605 1606 ns = subsystem->ns[nsid - 1]; 1607 if (!ns) { 1608 return -1; 1609 } 1610 1611 subsystem->ns[nsid - 1] = NULL; 1612 1613 assert(ns->anagrpid - 1 < subsystem->max_nsid); 1614 assert(subsystem->ana_group[ns->anagrpid - 1] > 0); 1615 1616 subsystem->ana_group[ns->anagrpid - 1]--; 1617 1618 TAILQ_FOREACH_SAFE(host, &ns->hosts, link, tmp) { 1619 nvmf_ns_remove_host(ns, host); 1620 } 1621 1622 free(ns->ptpl_file); 1623 nvmf_ns_reservation_clear_all_registrants(ns); 1624 spdk_bdev_module_release_bdev(ns->bdev); 1625 spdk_bdev_close(ns->desc); 1626 free(ns); 1627 1628 if (subsystem->fdp_supported && !spdk_nvmf_subsystem_get_first_ns(subsystem)) { 1629 subsystem->fdp_supported = false; 1630 SPDK_DEBUGLOG(nvmf, "Subsystem with id: %u doesn't have FDP capability.\n", 1631 subsystem->id); 1632 } 1633 1634 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 1635 transport = spdk_nvmf_transport_get_next(transport)) { 1636 if (transport->ops->subsystem_remove_ns) { 1637 transport->ops->subsystem_remove_ns(transport, subsystem, nsid); 1638 } 1639 } 1640 1641 nvmf_subsystem_ns_changed(subsystem, nsid); 1642 1643 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1644 spdk_bit_array_clear(ctrlr->visible_ns, nsid - 1); 1645 } 1646 1647 return 0; 1648 } 1649 1650 struct subsystem_ns_change_ctx { 1651 struct spdk_nvmf_subsystem *subsystem; 1652 spdk_nvmf_subsystem_state_change_done cb_fn; 1653 uint32_t nsid; 1654 }; 1655 1656 static void 1657 _nvmf_ns_hot_remove(struct spdk_nvmf_subsystem *subsystem, 1658 void *cb_arg, int status) 1659 { 1660 struct subsystem_ns_change_ctx *ctx = cb_arg; 1661 int rc; 1662 1663 rc = spdk_nvmf_subsystem_remove_ns(subsystem, ctx->nsid); 1664 if (rc != 0) { 1665 SPDK_ERRLOG("Failed to make changes to NVME-oF subsystem with id: %u\n", subsystem->id); 1666 } 1667 1668 rc = spdk_nvmf_subsystem_resume(subsystem, NULL, NULL); 1669 if (rc != 0) { 1670 SPDK_ERRLOG("Failed to resume NVME-oF subsystem with id: %u\n", subsystem->id); 1671 } 1672 1673 free(ctx); 1674 } 1675 1676 static void 1677 nvmf_ns_change_msg(void *ns_ctx) 1678 { 1679 struct subsystem_ns_change_ctx *ctx = ns_ctx; 1680 int rc; 1681 1682 SPDK_DTRACE_PROBE2(nvmf_ns_change, ctx->nsid, ctx->subsystem->subnqn); 1683 1684 rc = spdk_nvmf_subsystem_pause(ctx->subsystem, ctx->nsid, ctx->cb_fn, ctx); 1685 if (rc) { 1686 if (rc == -EBUSY) { 1687 /* Try again, this is not a permanent situation. */ 1688 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ctx); 1689 } else { 1690 free(ctx); 1691 SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n"); 1692 } 1693 } 1694 } 1695 1696 static void 1697 nvmf_ns_hot_remove(void *remove_ctx) 1698 { 1699 struct spdk_nvmf_ns *ns = remove_ctx; 1700 struct subsystem_ns_change_ctx *ns_ctx; 1701 int rc; 1702 1703 /* We have to allocate a new context because this op 1704 * is asynchronous and we could lose the ns in the middle. 1705 */ 1706 ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx)); 1707 if (!ns_ctx) { 1708 SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n"); 1709 return; 1710 } 1711 1712 ns_ctx->subsystem = ns->subsystem; 1713 ns_ctx->nsid = ns->opts.nsid; 1714 ns_ctx->cb_fn = _nvmf_ns_hot_remove; 1715 1716 rc = spdk_nvmf_subsystem_pause(ns->subsystem, ns_ctx->nsid, _nvmf_ns_hot_remove, ns_ctx); 1717 if (rc) { 1718 if (rc == -EBUSY) { 1719 /* Try again, this is not a permanent situation. */ 1720 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx); 1721 } else { 1722 SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n"); 1723 free(ns_ctx); 1724 } 1725 } 1726 } 1727 1728 static void 1729 _nvmf_ns_resize(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status) 1730 { 1731 struct subsystem_ns_change_ctx *ctx = cb_arg; 1732 1733 nvmf_subsystem_ns_changed(subsystem, ctx->nsid); 1734 if (spdk_nvmf_subsystem_resume(subsystem, NULL, NULL) != 0) { 1735 SPDK_ERRLOG("Failed to resume NVME-oF subsystem with id: %u\n", subsystem->id); 1736 } 1737 1738 free(ctx); 1739 } 1740 1741 static void 1742 nvmf_ns_resize(void *event_ctx) 1743 { 1744 struct spdk_nvmf_ns *ns = event_ctx; 1745 struct subsystem_ns_change_ctx *ns_ctx; 1746 int rc; 1747 1748 /* We have to allocate a new context because this op 1749 * is asynchronous and we could lose the ns in the middle. 1750 */ 1751 ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx)); 1752 if (!ns_ctx) { 1753 SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n"); 1754 return; 1755 } 1756 1757 ns_ctx->subsystem = ns->subsystem; 1758 ns_ctx->nsid = ns->opts.nsid; 1759 ns_ctx->cb_fn = _nvmf_ns_resize; 1760 1761 /* Specify 0 for the nsid here, because we do not need to pause the namespace. 1762 * Namespaces can only be resized bigger, so there is no need to quiesce I/O. 1763 */ 1764 rc = spdk_nvmf_subsystem_pause(ns->subsystem, 0, _nvmf_ns_resize, ns_ctx); 1765 if (rc) { 1766 if (rc == -EBUSY) { 1767 /* Try again, this is not a permanent situation. */ 1768 spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx); 1769 } else { 1770 SPDK_ERRLOG("Unable to pause subsystem to process namespace resize!\n"); 1771 free(ns_ctx); 1772 } 1773 } 1774 } 1775 1776 static void 1777 nvmf_ns_event(enum spdk_bdev_event_type type, 1778 struct spdk_bdev *bdev, 1779 void *event_ctx) 1780 { 1781 SPDK_DEBUGLOG(nvmf, "Bdev event: type %d, name %s, subsystem_id %d, ns_id %d\n", 1782 type, 1783 spdk_bdev_get_name(bdev), 1784 ((struct spdk_nvmf_ns *)event_ctx)->subsystem->id, 1785 ((struct spdk_nvmf_ns *)event_ctx)->nsid); 1786 1787 switch (type) { 1788 case SPDK_BDEV_EVENT_REMOVE: 1789 nvmf_ns_hot_remove(event_ctx); 1790 break; 1791 case SPDK_BDEV_EVENT_RESIZE: 1792 nvmf_ns_resize(event_ctx); 1793 break; 1794 default: 1795 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 1796 break; 1797 } 1798 } 1799 1800 void 1801 spdk_nvmf_ns_opts_get_defaults(struct spdk_nvmf_ns_opts *opts, size_t opts_size) 1802 { 1803 if (!opts) { 1804 SPDK_ERRLOG("opts should not be NULL.\n"); 1805 return; 1806 } 1807 1808 if (!opts_size) { 1809 SPDK_ERRLOG("opts_size should not be zero.\n"); 1810 return; 1811 } 1812 1813 memset(opts, 0, opts_size); 1814 opts->opts_size = opts_size; 1815 1816 #define FIELD_OK(field) \ 1817 offsetof(struct spdk_nvmf_ns_opts, field) + sizeof(opts->field) <= opts_size 1818 1819 #define SET_FIELD(field, value) \ 1820 if (FIELD_OK(field)) { \ 1821 opts->field = value; \ 1822 } \ 1823 1824 /* All current fields are set to 0 by default. */ 1825 SET_FIELD(nsid, 0); 1826 if (FIELD_OK(nguid)) { 1827 memset(opts->nguid, 0, sizeof(opts->nguid)); 1828 } 1829 if (FIELD_OK(eui64)) { 1830 memset(opts->eui64, 0, sizeof(opts->eui64)); 1831 } 1832 if (FIELD_OK(uuid)) { 1833 spdk_uuid_set_null(&opts->uuid); 1834 } 1835 SET_FIELD(anagrpid, 0); 1836 SET_FIELD(transport_specific, NULL); 1837 1838 #undef FIELD_OK 1839 #undef SET_FIELD 1840 } 1841 1842 static void 1843 nvmf_ns_opts_copy(struct spdk_nvmf_ns_opts *opts, 1844 const struct spdk_nvmf_ns_opts *user_opts, 1845 size_t opts_size) 1846 { 1847 #define FIELD_OK(field) \ 1848 offsetof(struct spdk_nvmf_ns_opts, field) + sizeof(opts->field) <= user_opts->opts_size 1849 1850 #define SET_FIELD(field) \ 1851 if (FIELD_OK(field)) { \ 1852 opts->field = user_opts->field; \ 1853 } \ 1854 1855 SET_FIELD(nsid); 1856 if (FIELD_OK(nguid)) { 1857 memcpy(opts->nguid, user_opts->nguid, sizeof(opts->nguid)); 1858 } 1859 if (FIELD_OK(eui64)) { 1860 memcpy(opts->eui64, user_opts->eui64, sizeof(opts->eui64)); 1861 } 1862 if (FIELD_OK(uuid)) { 1863 spdk_uuid_copy(&opts->uuid, &user_opts->uuid); 1864 } 1865 SET_FIELD(anagrpid); 1866 SET_FIELD(no_auto_visible); 1867 SET_FIELD(transport_specific); 1868 1869 opts->opts_size = user_opts->opts_size; 1870 1871 /* We should not remove this statement, but need to update the assert statement 1872 * if we add a new field, and also add a corresponding SET_FIELD statement. 1873 */ 1874 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_ns_opts) == 72, "Incorrect size"); 1875 1876 #undef FIELD_OK 1877 #undef SET_FIELD 1878 } 1879 1880 /* Dummy bdev module used to to claim bdevs. */ 1881 static struct spdk_bdev_module ns_bdev_module = { 1882 .name = "NVMe-oF Target", 1883 }; 1884 1885 static int nvmf_ns_reservation_update(const struct spdk_nvmf_ns *ns, 1886 const struct spdk_nvmf_reservation_info *info); 1887 static int nvmf_ns_reservation_load(const struct spdk_nvmf_ns *ns, 1888 struct spdk_nvmf_reservation_info *info); 1889 static int nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, 1890 struct spdk_nvmf_reservation_info *info); 1891 1892 uint32_t 1893 spdk_nvmf_subsystem_add_ns_ext(struct spdk_nvmf_subsystem *subsystem, const char *bdev_name, 1894 const struct spdk_nvmf_ns_opts *user_opts, size_t opts_size, 1895 const char *ptpl_file) 1896 { 1897 struct spdk_nvmf_transport *transport; 1898 struct spdk_nvmf_ns_opts opts; 1899 struct spdk_nvmf_ns *ns, *first_ns; 1900 struct spdk_nvmf_ctrlr *ctrlr; 1901 struct spdk_nvmf_reservation_info info = {0}; 1902 int rc; 1903 bool zone_append_supported; 1904 uint64_t max_zone_append_size_kib; 1905 1906 if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 1907 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) { 1908 return 0; 1909 } 1910 1911 spdk_nvmf_ns_opts_get_defaults(&opts, sizeof(opts)); 1912 if (user_opts) { 1913 nvmf_ns_opts_copy(&opts, user_opts, opts_size); 1914 } 1915 1916 if (opts.nsid == SPDK_NVME_GLOBAL_NS_TAG) { 1917 SPDK_ERRLOG("Invalid NSID %" PRIu32 "\n", opts.nsid); 1918 return 0; 1919 } 1920 1921 if (opts.nsid == 0) { 1922 /* 1923 * NSID not specified - find a free index. 1924 * 1925 * If no free slots are found, opts.nsid will be subsystem->max_nsid + 1, which will 1926 * expand max_nsid if possible. 1927 */ 1928 for (opts.nsid = 1; opts.nsid <= subsystem->max_nsid; opts.nsid++) { 1929 if (_nvmf_subsystem_get_ns(subsystem, opts.nsid) == NULL) { 1930 break; 1931 } 1932 } 1933 } 1934 1935 if (_nvmf_subsystem_get_ns(subsystem, opts.nsid)) { 1936 SPDK_ERRLOG("Requested NSID %" PRIu32 " already in use\n", opts.nsid); 1937 return 0; 1938 } 1939 1940 if (opts.nsid > subsystem->max_nsid) { 1941 SPDK_ERRLOG("NSID greater than maximum not allowed\n"); 1942 return 0; 1943 } 1944 1945 if (opts.anagrpid == 0) { 1946 opts.anagrpid = opts.nsid; 1947 } 1948 1949 if (opts.anagrpid > subsystem->max_nsid) { 1950 SPDK_ERRLOG("ANAGRPID greater than maximum NSID not allowed\n"); 1951 return 0; 1952 } 1953 1954 ns = calloc(1, sizeof(*ns)); 1955 if (ns == NULL) { 1956 SPDK_ERRLOG("Namespace allocation failed\n"); 1957 return 0; 1958 } 1959 1960 TAILQ_INIT(&ns->hosts); 1961 ns->always_visible = !opts.no_auto_visible; 1962 if (ns->always_visible) { 1963 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 1964 spdk_bit_array_set(ctrlr->visible_ns, opts.nsid - 1); 1965 } 1966 } 1967 1968 rc = spdk_bdev_open_ext(bdev_name, true, nvmf_ns_event, ns, &ns->desc); 1969 if (rc != 0) { 1970 SPDK_ERRLOG("Subsystem %s: bdev %s cannot be opened, error=%d\n", 1971 subsystem->subnqn, bdev_name, rc); 1972 free(ns); 1973 return 0; 1974 } 1975 1976 ns->bdev = spdk_bdev_desc_get_bdev(ns->desc); 1977 1978 if (spdk_bdev_get_md_size(ns->bdev) != 0) { 1979 if (!spdk_bdev_is_md_interleaved(ns->bdev)) { 1980 SPDK_ERRLOG("Can't attach bdev with separate metadata.\n"); 1981 spdk_bdev_close(ns->desc); 1982 free(ns); 1983 return 0; 1984 } 1985 1986 if (spdk_bdev_get_md_size(ns->bdev) > SPDK_BDEV_MAX_INTERLEAVED_MD_SIZE) { 1987 SPDK_ERRLOG("Maximum supported interleaved md size %u, current md size %u\n", 1988 SPDK_BDEV_MAX_INTERLEAVED_MD_SIZE, spdk_bdev_get_md_size(ns->bdev)); 1989 spdk_bdev_close(ns->desc); 1990 free(ns); 1991 return 0; 1992 } 1993 } 1994 1995 rc = spdk_bdev_module_claim_bdev(ns->bdev, ns->desc, &ns_bdev_module); 1996 if (rc != 0) { 1997 spdk_bdev_close(ns->desc); 1998 free(ns); 1999 return 0; 2000 } 2001 2002 /* Cache the zcopy capability of the bdev device */ 2003 ns->zcopy = spdk_bdev_io_type_supported(ns->bdev, SPDK_BDEV_IO_TYPE_ZCOPY); 2004 2005 if (spdk_uuid_is_null(&opts.uuid)) { 2006 opts.uuid = *spdk_bdev_get_uuid(ns->bdev); 2007 } 2008 2009 /* if nguid descriptor is supported by bdev module (nvme) then uuid = nguid */ 2010 if (spdk_mem_all_zero(opts.nguid, sizeof(opts.nguid))) { 2011 SPDK_STATIC_ASSERT(sizeof(opts.nguid) == sizeof(opts.uuid), "size mismatch"); 2012 memcpy(opts.nguid, spdk_bdev_get_uuid(ns->bdev), sizeof(opts.nguid)); 2013 } 2014 2015 if (spdk_bdev_is_zoned(ns->bdev)) { 2016 SPDK_DEBUGLOG(nvmf, "The added namespace is backed by a zoned block device.\n"); 2017 ns->csi = SPDK_NVME_CSI_ZNS; 2018 2019 zone_append_supported = spdk_bdev_io_type_supported(ns->bdev, 2020 SPDK_BDEV_IO_TYPE_ZONE_APPEND); 2021 max_zone_append_size_kib = spdk_bdev_get_max_zone_append_size( 2022 ns->bdev) * spdk_bdev_get_block_size(ns->bdev); 2023 2024 if (_nvmf_subsystem_get_first_zoned_ns(subsystem) != NULL && 2025 (subsystem->zone_append_supported != zone_append_supported || 2026 subsystem->max_zone_append_size_kib != max_zone_append_size_kib)) { 2027 SPDK_ERRLOG("Namespaces with different zone append support or different zone append size are not allowed.\n"); 2028 goto err; 2029 } 2030 2031 subsystem->zone_append_supported = zone_append_supported; 2032 subsystem->max_zone_append_size_kib = max_zone_append_size_kib; 2033 } 2034 2035 first_ns = spdk_nvmf_subsystem_get_first_ns(subsystem); 2036 if (!first_ns) { 2037 if (spdk_bdev_get_nvme_ctratt(ns->bdev).bits.fdps) { 2038 SPDK_DEBUGLOG(nvmf, "Subsystem with id: %u has FDP capability.\n", 2039 subsystem->id); 2040 subsystem->fdp_supported = true; 2041 } 2042 } else { 2043 if (spdk_bdev_get_nvme_ctratt(first_ns->bdev).bits.fdps != 2044 spdk_bdev_get_nvme_ctratt(ns->bdev).bits.fdps) { 2045 SPDK_ERRLOG("Subsystem with id: %u can%s FDP namespace.\n", subsystem->id, 2046 spdk_bdev_get_nvme_ctratt(first_ns->bdev).bits.fdps ? " only add" : "not add"); 2047 goto err; 2048 } 2049 } 2050 2051 ns->opts = opts; 2052 ns->subsystem = subsystem; 2053 subsystem->ns[opts.nsid - 1] = ns; 2054 ns->nsid = opts.nsid; 2055 ns->anagrpid = opts.anagrpid; 2056 subsystem->ana_group[ns->anagrpid - 1]++; 2057 TAILQ_INIT(&ns->registrants); 2058 if (ptpl_file) { 2059 ns->ptpl_file = strdup(ptpl_file); 2060 if (!ns->ptpl_file) { 2061 SPDK_ERRLOG("Namespace ns->ptpl_file allocation failed\n"); 2062 goto err; 2063 } 2064 } 2065 2066 if (nvmf_ns_is_ptpl_capable(ns)) { 2067 rc = nvmf_ns_reservation_load(ns, &info); 2068 if (rc) { 2069 SPDK_ERRLOG("Subsystem load reservation failed\n"); 2070 goto err; 2071 } 2072 2073 rc = nvmf_ns_reservation_restore(ns, &info); 2074 if (rc) { 2075 SPDK_ERRLOG("Subsystem restore reservation failed\n"); 2076 goto err; 2077 } 2078 } 2079 2080 for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport; 2081 transport = spdk_nvmf_transport_get_next(transport)) { 2082 if (transport->ops->subsystem_add_ns) { 2083 rc = transport->ops->subsystem_add_ns(transport, subsystem, ns); 2084 if (rc) { 2085 SPDK_ERRLOG("Namespace attachment is not allowed by %s transport\n", transport->ops->name); 2086 nvmf_ns_reservation_clear_all_registrants(ns); 2087 goto err; 2088 } 2089 } 2090 } 2091 2092 /* JSON value obj is freed before sending the response. Set NULL to prevent usage of dangling pointer. */ 2093 ns->opts.transport_specific = NULL; 2094 2095 SPDK_DEBUGLOG(nvmf, "Subsystem %s: bdev %s assigned nsid %" PRIu32 "\n", 2096 spdk_nvmf_subsystem_get_nqn(subsystem), 2097 bdev_name, 2098 opts.nsid); 2099 2100 nvmf_subsystem_ns_changed(subsystem, opts.nsid); 2101 2102 SPDK_DTRACE_PROBE2(nvmf_subsystem_add_ns, subsystem->subnqn, ns->nsid); 2103 2104 return opts.nsid; 2105 err: 2106 subsystem->ns[opts.nsid - 1] = NULL; 2107 spdk_bdev_module_release_bdev(ns->bdev); 2108 spdk_bdev_close(ns->desc); 2109 free(ns->ptpl_file); 2110 free(ns); 2111 2112 return 0; 2113 } 2114 2115 static uint32_t 2116 nvmf_subsystem_get_next_allocated_nsid(struct spdk_nvmf_subsystem *subsystem, 2117 uint32_t prev_nsid) 2118 { 2119 uint32_t nsid; 2120 2121 if (prev_nsid >= subsystem->max_nsid) { 2122 return 0; 2123 } 2124 2125 for (nsid = prev_nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 2126 if (subsystem->ns[nsid - 1]) { 2127 return nsid; 2128 } 2129 } 2130 2131 return 0; 2132 } 2133 2134 struct spdk_nvmf_ns * 2135 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 2136 { 2137 uint32_t first_nsid; 2138 2139 first_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, 0); 2140 return _nvmf_subsystem_get_ns(subsystem, first_nsid); 2141 } 2142 2143 struct spdk_nvmf_ns * 2144 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 2145 struct spdk_nvmf_ns *prev_ns) 2146 { 2147 uint32_t next_nsid; 2148 2149 next_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, prev_ns->opts.nsid); 2150 return _nvmf_subsystem_get_ns(subsystem, next_nsid); 2151 } 2152 2153 struct spdk_nvmf_ns * 2154 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 2155 { 2156 return _nvmf_subsystem_get_ns(subsystem, nsid); 2157 } 2158 2159 uint32_t 2160 spdk_nvmf_ns_get_id(const struct spdk_nvmf_ns *ns) 2161 { 2162 return ns->opts.nsid; 2163 } 2164 2165 struct spdk_bdev * 2166 spdk_nvmf_ns_get_bdev(struct spdk_nvmf_ns *ns) 2167 { 2168 return ns->bdev; 2169 } 2170 2171 void 2172 spdk_nvmf_ns_get_opts(const struct spdk_nvmf_ns *ns, struct spdk_nvmf_ns_opts *opts, 2173 size_t opts_size) 2174 { 2175 memset(opts, 0, opts_size); 2176 memcpy(opts, &ns->opts, spdk_min(sizeof(ns->opts), opts_size)); 2177 } 2178 2179 const char * 2180 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 2181 { 2182 return subsystem->sn; 2183 } 2184 2185 int 2186 spdk_nvmf_subsystem_set_sn(struct spdk_nvmf_subsystem *subsystem, const char *sn) 2187 { 2188 size_t len, max_len; 2189 2190 max_len = sizeof(subsystem->sn) - 1; 2191 len = strlen(sn); 2192 if (len > max_len) { 2193 SPDK_DEBUGLOG(nvmf, "Invalid sn \"%s\": length %zu > max %zu\n", 2194 sn, len, max_len); 2195 return -1; 2196 } 2197 2198 if (!nvmf_valid_ascii_string(sn, len)) { 2199 SPDK_DEBUGLOG(nvmf, "Non-ASCII sn\n"); 2200 SPDK_LOGDUMP(nvmf, "sn", sn, len); 2201 return -1; 2202 } 2203 2204 snprintf(subsystem->sn, sizeof(subsystem->sn), "%s", sn); 2205 2206 return 0; 2207 } 2208 2209 const char * 2210 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 2211 { 2212 return subsystem->mn; 2213 } 2214 2215 int 2216 spdk_nvmf_subsystem_set_mn(struct spdk_nvmf_subsystem *subsystem, const char *mn) 2217 { 2218 size_t len, max_len; 2219 2220 if (mn == NULL) { 2221 mn = MODEL_NUMBER_DEFAULT; 2222 } 2223 max_len = sizeof(subsystem->mn) - 1; 2224 len = strlen(mn); 2225 if (len > max_len) { 2226 SPDK_DEBUGLOG(nvmf, "Invalid mn \"%s\": length %zu > max %zu\n", 2227 mn, len, max_len); 2228 return -1; 2229 } 2230 2231 if (!nvmf_valid_ascii_string(mn, len)) { 2232 SPDK_DEBUGLOG(nvmf, "Non-ASCII mn\n"); 2233 SPDK_LOGDUMP(nvmf, "mn", mn, len); 2234 return -1; 2235 } 2236 2237 snprintf(subsystem->mn, sizeof(subsystem->mn), "%s", mn); 2238 2239 return 0; 2240 } 2241 2242 const char * 2243 spdk_nvmf_subsystem_get_nqn(const struct spdk_nvmf_subsystem *subsystem) 2244 { 2245 return subsystem->subnqn; 2246 } 2247 2248 /* We have to use the typedef in the function declaration to appease astyle. */ 2249 typedef enum spdk_nvmf_subtype spdk_nvmf_subtype_t; 2250 2251 spdk_nvmf_subtype_t 2252 spdk_nvmf_subsystem_get_type(struct spdk_nvmf_subsystem *subsystem) 2253 { 2254 return subsystem->subtype; 2255 } 2256 2257 uint32_t 2258 spdk_nvmf_subsystem_get_max_nsid(struct spdk_nvmf_subsystem *subsystem) 2259 { 2260 return subsystem->max_nsid; 2261 } 2262 2263 int 2264 nvmf_subsystem_set_cntlid_range(struct spdk_nvmf_subsystem *subsystem, 2265 uint16_t min_cntlid, uint16_t max_cntlid) 2266 { 2267 if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) { 2268 return -EAGAIN; 2269 } 2270 2271 if (min_cntlid > max_cntlid) { 2272 return -EINVAL; 2273 } 2274 /* The spec reserves cntlid values in the range FFF0h to FFFFh. */ 2275 if (min_cntlid < NVMF_MIN_CNTLID || min_cntlid > NVMF_MAX_CNTLID || 2276 max_cntlid < NVMF_MIN_CNTLID || max_cntlid > NVMF_MAX_CNTLID) { 2277 return -EINVAL; 2278 } 2279 subsystem->min_cntlid = min_cntlid; 2280 subsystem->max_cntlid = max_cntlid; 2281 if (subsystem->next_cntlid < min_cntlid || subsystem->next_cntlid > max_cntlid - 1) { 2282 subsystem->next_cntlid = min_cntlid - 1; 2283 } 2284 2285 return 0; 2286 } 2287 2288 static uint16_t 2289 nvmf_subsystem_gen_cntlid(struct spdk_nvmf_subsystem *subsystem) 2290 { 2291 int count; 2292 2293 /* 2294 * In the worst case, we might have to try all CNTLID values between min_cntlid and max_cntlid 2295 * before we find one that is unused (or find that all values are in use). 2296 */ 2297 for (count = 0; count < subsystem->max_cntlid - subsystem->min_cntlid + 1; count++) { 2298 subsystem->next_cntlid++; 2299 if (subsystem->next_cntlid > subsystem->max_cntlid) { 2300 subsystem->next_cntlid = subsystem->min_cntlid; 2301 } 2302 2303 /* Check if a controller with this cntlid currently exists. */ 2304 if (nvmf_subsystem_get_ctrlr(subsystem, subsystem->next_cntlid) == NULL) { 2305 /* Found unused cntlid */ 2306 return subsystem->next_cntlid; 2307 } 2308 } 2309 2310 /* All valid cntlid values are in use. */ 2311 return 0xFFFF; 2312 } 2313 2314 int 2315 nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr) 2316 { 2317 2318 if (ctrlr->dynamic_ctrlr) { 2319 ctrlr->cntlid = nvmf_subsystem_gen_cntlid(subsystem); 2320 if (ctrlr->cntlid == 0xFFFF) { 2321 /* Unable to get a cntlid */ 2322 SPDK_ERRLOG("Reached max simultaneous ctrlrs\n"); 2323 return -EBUSY; 2324 } 2325 } else if (nvmf_subsystem_get_ctrlr(subsystem, ctrlr->cntlid) != NULL) { 2326 SPDK_ERRLOG("Ctrlr with cntlid %u already exist\n", ctrlr->cntlid); 2327 return -EEXIST; 2328 } 2329 2330 TAILQ_INSERT_TAIL(&subsystem->ctrlrs, ctrlr, link); 2331 2332 SPDK_DTRACE_PROBE3(nvmf_subsystem_add_ctrlr, subsystem->subnqn, ctrlr, ctrlr->hostnqn); 2333 2334 return 0; 2335 } 2336 2337 void 2338 nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem, 2339 struct spdk_nvmf_ctrlr *ctrlr) 2340 { 2341 SPDK_DTRACE_PROBE3(nvmf_subsystem_remove_ctrlr, subsystem->subnqn, ctrlr, ctrlr->hostnqn); 2342 2343 assert(spdk_get_thread() == subsystem->thread); 2344 assert(subsystem == ctrlr->subsys); 2345 SPDK_DEBUGLOG(nvmf, "remove ctrlr %p id 0x%x from subsys %p %s\n", ctrlr, ctrlr->cntlid, subsystem, 2346 subsystem->subnqn); 2347 TAILQ_REMOVE(&subsystem->ctrlrs, ctrlr, link); 2348 } 2349 2350 struct spdk_nvmf_ctrlr * 2351 nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid) 2352 { 2353 struct spdk_nvmf_ctrlr *ctrlr; 2354 2355 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 2356 if (ctrlr->cntlid == cntlid) { 2357 return ctrlr; 2358 } 2359 } 2360 2361 return NULL; 2362 } 2363 2364 uint32_t 2365 spdk_nvmf_subsystem_get_max_namespaces(const struct spdk_nvmf_subsystem *subsystem) 2366 { 2367 return subsystem->max_nsid; 2368 } 2369 2370 uint16_t 2371 spdk_nvmf_subsystem_get_min_cntlid(const struct spdk_nvmf_subsystem *subsystem) 2372 { 2373 return subsystem->min_cntlid; 2374 } 2375 2376 uint16_t 2377 spdk_nvmf_subsystem_get_max_cntlid(const struct spdk_nvmf_subsystem *subsystem) 2378 { 2379 return subsystem->max_cntlid; 2380 } 2381 2382 struct _nvmf_ns_registrant { 2383 uint64_t rkey; 2384 char *host_uuid; 2385 }; 2386 2387 struct _nvmf_ns_registrants { 2388 size_t num_regs; 2389 struct _nvmf_ns_registrant reg[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2390 }; 2391 2392 struct _nvmf_ns_reservation { 2393 bool ptpl_activated; 2394 enum spdk_nvme_reservation_type rtype; 2395 uint64_t crkey; 2396 char *bdev_uuid; 2397 char *holder_uuid; 2398 struct _nvmf_ns_registrants regs; 2399 }; 2400 2401 static const struct spdk_json_object_decoder nvmf_ns_pr_reg_decoders[] = { 2402 {"rkey", offsetof(struct _nvmf_ns_registrant, rkey), spdk_json_decode_uint64}, 2403 {"host_uuid", offsetof(struct _nvmf_ns_registrant, host_uuid), spdk_json_decode_string}, 2404 }; 2405 2406 static int 2407 nvmf_decode_ns_pr_reg(const struct spdk_json_val *val, void *out) 2408 { 2409 struct _nvmf_ns_registrant *reg = out; 2410 2411 return spdk_json_decode_object(val, nvmf_ns_pr_reg_decoders, 2412 SPDK_COUNTOF(nvmf_ns_pr_reg_decoders), reg); 2413 } 2414 2415 static int 2416 nvmf_decode_ns_pr_regs(const struct spdk_json_val *val, void *out) 2417 { 2418 struct _nvmf_ns_registrants *regs = out; 2419 2420 return spdk_json_decode_array(val, nvmf_decode_ns_pr_reg, regs->reg, 2421 SPDK_NVMF_MAX_NUM_REGISTRANTS, ®s->num_regs, 2422 sizeof(struct _nvmf_ns_registrant)); 2423 } 2424 2425 static const struct spdk_json_object_decoder nvmf_ns_pr_decoders[] = { 2426 {"ptpl", offsetof(struct _nvmf_ns_reservation, ptpl_activated), spdk_json_decode_bool, true}, 2427 {"rtype", offsetof(struct _nvmf_ns_reservation, rtype), spdk_json_decode_uint32, true}, 2428 {"crkey", offsetof(struct _nvmf_ns_reservation, crkey), spdk_json_decode_uint64, true}, 2429 {"bdev_uuid", offsetof(struct _nvmf_ns_reservation, bdev_uuid), spdk_json_decode_string}, 2430 {"holder_uuid", offsetof(struct _nvmf_ns_reservation, holder_uuid), spdk_json_decode_string, true}, 2431 {"registrants", offsetof(struct _nvmf_ns_reservation, regs), nvmf_decode_ns_pr_regs}, 2432 }; 2433 2434 static int 2435 nvmf_ns_reservation_load_json(const struct spdk_nvmf_ns *ns, 2436 struct spdk_nvmf_reservation_info *info) 2437 { 2438 size_t json_size; 2439 ssize_t values_cnt, rc; 2440 void *json = NULL, *end; 2441 struct spdk_json_val *values = NULL; 2442 struct _nvmf_ns_reservation res = {}; 2443 const char *file = ns->ptpl_file; 2444 uint32_t i; 2445 2446 /* Load all persist file contents into a local buffer */ 2447 json = spdk_posix_file_load_from_name(file, &json_size); 2448 if (!json) { 2449 SPDK_ERRLOG("Load persit file %s failed\n", file); 2450 return -ENOMEM; 2451 } 2452 2453 rc = spdk_json_parse(json, json_size, NULL, 0, &end, 0); 2454 if (rc < 0) { 2455 SPDK_NOTICELOG("Parsing JSON configuration failed (%zd)\n", rc); 2456 goto exit; 2457 } 2458 2459 values_cnt = rc; 2460 values = calloc(values_cnt, sizeof(struct spdk_json_val)); 2461 if (values == NULL) { 2462 goto exit; 2463 } 2464 2465 rc = spdk_json_parse(json, json_size, values, values_cnt, &end, 0); 2466 if (rc != values_cnt) { 2467 SPDK_ERRLOG("Parsing JSON configuration failed (%zd)\n", rc); 2468 goto exit; 2469 } 2470 2471 /* Decode json */ 2472 if (spdk_json_decode_object(values, nvmf_ns_pr_decoders, 2473 SPDK_COUNTOF(nvmf_ns_pr_decoders), 2474 &res)) { 2475 SPDK_ERRLOG("Invalid objects in the persist file %s\n", file); 2476 rc = -EINVAL; 2477 goto exit; 2478 } 2479 2480 if (res.regs.num_regs > SPDK_NVMF_MAX_NUM_REGISTRANTS) { 2481 SPDK_ERRLOG("Can only support up to %u registrants\n", SPDK_NVMF_MAX_NUM_REGISTRANTS); 2482 rc = -ERANGE; 2483 goto exit; 2484 } 2485 2486 rc = 0; 2487 info->ptpl_activated = res.ptpl_activated; 2488 info->rtype = res.rtype; 2489 info->crkey = res.crkey; 2490 snprintf(info->bdev_uuid, sizeof(info->bdev_uuid), "%s", res.bdev_uuid); 2491 snprintf(info->holder_uuid, sizeof(info->holder_uuid), "%s", res.holder_uuid); 2492 info->num_regs = res.regs.num_regs; 2493 for (i = 0; i < res.regs.num_regs; i++) { 2494 info->registrants[i].rkey = res.regs.reg[i].rkey; 2495 snprintf(info->registrants[i].host_uuid, sizeof(info->registrants[i].host_uuid), "%s", 2496 res.regs.reg[i].host_uuid); 2497 } 2498 2499 exit: 2500 free(json); 2501 free(values); 2502 free(res.bdev_uuid); 2503 free(res.holder_uuid); 2504 for (i = 0; i < res.regs.num_regs; i++) { 2505 free(res.regs.reg[i].host_uuid); 2506 } 2507 2508 return rc; 2509 } 2510 2511 static bool nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns); 2512 2513 static int 2514 nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info) 2515 { 2516 uint32_t i; 2517 struct spdk_nvmf_registrant *reg, *holder = NULL; 2518 struct spdk_uuid bdev_uuid, holder_uuid; 2519 bool rkey_flag = false; 2520 2521 SPDK_DEBUGLOG(nvmf, "NSID %u, PTPL %u, Number of registrants %u\n", 2522 ns->nsid, info->ptpl_activated, info->num_regs); 2523 2524 /* it's not an error */ 2525 if (!info->ptpl_activated || !info->num_regs) { 2526 return 0; 2527 } 2528 2529 /* Check info->crkey exist or not in info->registrants[i].rkey */ 2530 for (i = 0; i < info->num_regs; i++) { 2531 if (info->crkey == info->registrants[i].rkey) { 2532 rkey_flag = true; 2533 } 2534 } 2535 if (!rkey_flag && info->crkey != 0) { 2536 return -EINVAL; 2537 } 2538 2539 spdk_uuid_parse(&bdev_uuid, info->bdev_uuid); 2540 if (spdk_uuid_compare(&bdev_uuid, spdk_bdev_get_uuid(ns->bdev))) { 2541 SPDK_ERRLOG("Existing bdev UUID is not same with configuration file\n"); 2542 return -EINVAL; 2543 } 2544 2545 ns->crkey = info->crkey; 2546 ns->rtype = info->rtype; 2547 ns->ptpl_activated = info->ptpl_activated; 2548 spdk_uuid_parse(&holder_uuid, info->holder_uuid); 2549 2550 SPDK_DEBUGLOG(nvmf, "Bdev UUID %s\n", info->bdev_uuid); 2551 if (info->rtype) { 2552 SPDK_DEBUGLOG(nvmf, "Holder UUID %s, RTYPE %u, RKEY 0x%"PRIx64"\n", 2553 info->holder_uuid, info->rtype, info->crkey); 2554 } 2555 2556 for (i = 0; i < info->num_regs; i++) { 2557 reg = calloc(1, sizeof(*reg)); 2558 if (!reg) { 2559 return -ENOMEM; 2560 } 2561 spdk_uuid_parse(®->hostid, info->registrants[i].host_uuid); 2562 reg->rkey = info->registrants[i].rkey; 2563 TAILQ_INSERT_TAIL(&ns->registrants, reg, link); 2564 if (info->crkey != 0 && !spdk_uuid_compare(&holder_uuid, ®->hostid)) { 2565 holder = reg; 2566 } 2567 SPDK_DEBUGLOG(nvmf, "Registrant RKEY 0x%"PRIx64", Host UUID %s\n", 2568 info->registrants[i].rkey, info->registrants[i].host_uuid); 2569 } 2570 2571 if (nvmf_ns_reservation_all_registrants_type(ns)) { 2572 ns->holder = TAILQ_FIRST(&ns->registrants); 2573 } else { 2574 ns->holder = holder; 2575 } 2576 2577 return 0; 2578 } 2579 2580 static int 2581 nvmf_ns_json_write_cb(void *cb_ctx, const void *data, size_t size) 2582 { 2583 char *file = cb_ctx; 2584 size_t rc; 2585 FILE *fd; 2586 2587 fd = fopen(file, "w"); 2588 if (!fd) { 2589 SPDK_ERRLOG("Can't open file %s for write\n", file); 2590 return -ENOENT; 2591 } 2592 rc = fwrite(data, 1, size, fd); 2593 fclose(fd); 2594 2595 return rc == size ? 0 : -1; 2596 } 2597 2598 static int 2599 nvmf_ns_reservation_update_json(const struct spdk_nvmf_ns *ns, 2600 const struct spdk_nvmf_reservation_info *info) 2601 { 2602 const char *file = ns->ptpl_file; 2603 struct spdk_json_write_ctx *w; 2604 uint32_t i; 2605 int rc = 0; 2606 2607 w = spdk_json_write_begin(nvmf_ns_json_write_cb, (void *)file, 0); 2608 if (w == NULL) { 2609 return -ENOMEM; 2610 } 2611 /* clear the configuration file */ 2612 if (!info->ptpl_activated) { 2613 goto exit; 2614 } 2615 2616 spdk_json_write_object_begin(w); 2617 spdk_json_write_named_bool(w, "ptpl", info->ptpl_activated); 2618 spdk_json_write_named_uint32(w, "rtype", info->rtype); 2619 spdk_json_write_named_uint64(w, "crkey", info->crkey); 2620 spdk_json_write_named_string(w, "bdev_uuid", info->bdev_uuid); 2621 spdk_json_write_named_string(w, "holder_uuid", info->holder_uuid); 2622 2623 spdk_json_write_named_array_begin(w, "registrants"); 2624 for (i = 0; i < info->num_regs; i++) { 2625 spdk_json_write_object_begin(w); 2626 spdk_json_write_named_uint64(w, "rkey", info->registrants[i].rkey); 2627 spdk_json_write_named_string(w, "host_uuid", info->registrants[i].host_uuid); 2628 spdk_json_write_object_end(w); 2629 } 2630 spdk_json_write_array_end(w); 2631 spdk_json_write_object_end(w); 2632 2633 exit: 2634 rc = spdk_json_write_end(w); 2635 return rc; 2636 } 2637 2638 static int 2639 nvmf_ns_update_reservation_info(struct spdk_nvmf_ns *ns) 2640 { 2641 struct spdk_nvmf_reservation_info info; 2642 struct spdk_nvmf_registrant *reg, *tmp; 2643 uint32_t i = 0; 2644 2645 assert(ns != NULL); 2646 2647 if (!ns->bdev || !nvmf_ns_is_ptpl_capable(ns)) { 2648 return 0; 2649 } 2650 2651 memset(&info, 0, sizeof(info)); 2652 spdk_uuid_fmt_lower(info.bdev_uuid, sizeof(info.bdev_uuid), spdk_bdev_get_uuid(ns->bdev)); 2653 2654 if (ns->rtype) { 2655 info.rtype = ns->rtype; 2656 info.crkey = ns->crkey; 2657 if (!nvmf_ns_reservation_all_registrants_type(ns)) { 2658 assert(ns->holder != NULL); 2659 spdk_uuid_fmt_lower(info.holder_uuid, sizeof(info.holder_uuid), &ns->holder->hostid); 2660 } 2661 } 2662 2663 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2664 spdk_uuid_fmt_lower(info.registrants[i].host_uuid, sizeof(info.registrants[i].host_uuid), 2665 ®->hostid); 2666 info.registrants[i++].rkey = reg->rkey; 2667 } 2668 2669 info.num_regs = i; 2670 info.ptpl_activated = ns->ptpl_activated; 2671 2672 return nvmf_ns_reservation_update(ns, &info); 2673 } 2674 2675 static struct spdk_nvmf_registrant * 2676 nvmf_ns_reservation_get_registrant(struct spdk_nvmf_ns *ns, 2677 struct spdk_uuid *uuid) 2678 { 2679 struct spdk_nvmf_registrant *reg, *tmp; 2680 2681 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2682 if (!spdk_uuid_compare(®->hostid, uuid)) { 2683 return reg; 2684 } 2685 } 2686 2687 return NULL; 2688 } 2689 2690 /* Generate reservation notice log to registered HostID controllers */ 2691 static void 2692 nvmf_subsystem_gen_ctrlr_notification(struct spdk_nvmf_subsystem *subsystem, 2693 struct spdk_nvmf_ns *ns, 2694 struct spdk_uuid *hostid_list, 2695 uint32_t num_hostid, 2696 enum spdk_nvme_reservation_notification_log_page_type type) 2697 { 2698 struct spdk_nvmf_ctrlr *ctrlr; 2699 uint32_t i; 2700 2701 for (i = 0; i < num_hostid; i++) { 2702 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { 2703 if (!spdk_uuid_compare(&ctrlr->hostid, &hostid_list[i])) { 2704 nvmf_ctrlr_reservation_notice_log(ctrlr, ns, type); 2705 } 2706 } 2707 } 2708 } 2709 2710 /* Get all registrants' hostid other than the controller who issued the command */ 2711 static uint32_t 2712 nvmf_ns_reservation_get_all_other_hostid(struct spdk_nvmf_ns *ns, 2713 struct spdk_uuid *hostid_list, 2714 uint32_t max_num_hostid, 2715 struct spdk_uuid *current_hostid) 2716 { 2717 struct spdk_nvmf_registrant *reg, *tmp; 2718 uint32_t num_hostid = 0; 2719 2720 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2721 if (spdk_uuid_compare(®->hostid, current_hostid)) { 2722 if (num_hostid == max_num_hostid) { 2723 assert(false); 2724 return max_num_hostid; 2725 } 2726 hostid_list[num_hostid++] = reg->hostid; 2727 } 2728 } 2729 2730 return num_hostid; 2731 } 2732 2733 /* Calculate the unregistered HostID list according to list 2734 * prior to execute preempt command and list after executing 2735 * preempt command. 2736 */ 2737 static uint32_t 2738 nvmf_ns_reservation_get_unregistered_hostid(struct spdk_uuid *old_hostid_list, 2739 uint32_t old_num_hostid, 2740 struct spdk_uuid *remaining_hostid_list, 2741 uint32_t remaining_num_hostid) 2742 { 2743 struct spdk_uuid temp_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2744 uint32_t i, j, num_hostid = 0; 2745 bool found; 2746 2747 if (!remaining_num_hostid) { 2748 return old_num_hostid; 2749 } 2750 2751 for (i = 0; i < old_num_hostid; i++) { 2752 found = false; 2753 for (j = 0; j < remaining_num_hostid; j++) { 2754 if (!spdk_uuid_compare(&old_hostid_list[i], &remaining_hostid_list[j])) { 2755 found = true; 2756 break; 2757 } 2758 } 2759 if (!found) { 2760 spdk_uuid_copy(&temp_hostid_list[num_hostid++], &old_hostid_list[i]); 2761 } 2762 } 2763 2764 if (num_hostid) { 2765 memcpy(old_hostid_list, temp_hostid_list, sizeof(struct spdk_uuid) * num_hostid); 2766 } 2767 2768 return num_hostid; 2769 } 2770 2771 /* current reservation type is all registrants or not */ 2772 static bool 2773 nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns) 2774 { 2775 return (ns->rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS || 2776 ns->rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 2777 } 2778 2779 /* current registrant is reservation holder or not */ 2780 static bool 2781 nvmf_ns_reservation_registrant_is_holder(struct spdk_nvmf_ns *ns, 2782 struct spdk_nvmf_registrant *reg) 2783 { 2784 if (!reg) { 2785 return false; 2786 } 2787 2788 if (nvmf_ns_reservation_all_registrants_type(ns)) { 2789 return true; 2790 } 2791 2792 return (ns->holder == reg); 2793 } 2794 2795 static int 2796 nvmf_ns_reservation_add_registrant(struct spdk_nvmf_ns *ns, 2797 struct spdk_nvmf_ctrlr *ctrlr, 2798 uint64_t nrkey) 2799 { 2800 struct spdk_nvmf_registrant *reg; 2801 2802 reg = calloc(1, sizeof(*reg)); 2803 if (!reg) { 2804 return -ENOMEM; 2805 } 2806 2807 reg->rkey = nrkey; 2808 /* set hostid for the registrant */ 2809 spdk_uuid_copy(®->hostid, &ctrlr->hostid); 2810 TAILQ_INSERT_TAIL(&ns->registrants, reg, link); 2811 ns->gen++; 2812 2813 return 0; 2814 } 2815 2816 static void 2817 nvmf_ns_reservation_release_reservation(struct spdk_nvmf_ns *ns) 2818 { 2819 ns->rtype = 0; 2820 ns->crkey = 0; 2821 ns->holder = NULL; 2822 } 2823 2824 /* release the reservation if the last registrant was removed */ 2825 static void 2826 nvmf_ns_reservation_check_release_on_remove_registrant(struct spdk_nvmf_ns *ns, 2827 struct spdk_nvmf_registrant *reg) 2828 { 2829 struct spdk_nvmf_registrant *next_reg; 2830 2831 /* no reservation holder */ 2832 if (!ns->holder) { 2833 assert(ns->rtype == 0); 2834 return; 2835 } 2836 2837 next_reg = TAILQ_FIRST(&ns->registrants); 2838 if (next_reg && nvmf_ns_reservation_all_registrants_type(ns)) { 2839 /* the next valid registrant is the new holder now */ 2840 ns->holder = next_reg; 2841 } else if (nvmf_ns_reservation_registrant_is_holder(ns, reg)) { 2842 /* release the reservation */ 2843 nvmf_ns_reservation_release_reservation(ns); 2844 } 2845 } 2846 2847 static void 2848 nvmf_ns_reservation_remove_registrant(struct spdk_nvmf_ns *ns, 2849 struct spdk_nvmf_registrant *reg) 2850 { 2851 TAILQ_REMOVE(&ns->registrants, reg, link); 2852 nvmf_ns_reservation_check_release_on_remove_registrant(ns, reg); 2853 free(reg); 2854 ns->gen++; 2855 return; 2856 } 2857 2858 static uint32_t 2859 nvmf_ns_reservation_remove_registrants_by_key(struct spdk_nvmf_ns *ns, 2860 uint64_t rkey) 2861 { 2862 struct spdk_nvmf_registrant *reg, *tmp; 2863 uint32_t count = 0; 2864 2865 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 2866 if (reg->rkey == rkey) { 2867 nvmf_ns_reservation_remove_registrant(ns, reg); 2868 count++; 2869 } 2870 } 2871 return count; 2872 } 2873 2874 static uint32_t 2875 nvmf_ns_reservation_remove_all_other_registrants(struct spdk_nvmf_ns *ns, 2876 struct spdk_nvmf_registrant *reg) 2877 { 2878 struct spdk_nvmf_registrant *reg_tmp, *reg_tmp2; 2879 uint32_t count = 0; 2880 2881 TAILQ_FOREACH_SAFE(reg_tmp, &ns->registrants, link, reg_tmp2) { 2882 if (reg_tmp != reg) { 2883 nvmf_ns_reservation_remove_registrant(ns, reg_tmp); 2884 count++; 2885 } 2886 } 2887 return count; 2888 } 2889 2890 static uint32_t 2891 nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns) 2892 { 2893 struct spdk_nvmf_registrant *reg, *reg_tmp; 2894 uint32_t count = 0; 2895 2896 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, reg_tmp) { 2897 nvmf_ns_reservation_remove_registrant(ns, reg); 2898 count++; 2899 } 2900 return count; 2901 } 2902 2903 static void 2904 nvmf_ns_reservation_acquire_reservation(struct spdk_nvmf_ns *ns, uint64_t rkey, 2905 enum spdk_nvme_reservation_type rtype, 2906 struct spdk_nvmf_registrant *holder) 2907 { 2908 ns->rtype = rtype; 2909 ns->crkey = rkey; 2910 assert(ns->holder == NULL); 2911 ns->holder = holder; 2912 } 2913 2914 static bool 2915 nvmf_ns_reservation_register(struct spdk_nvmf_ns *ns, 2916 struct spdk_nvmf_ctrlr *ctrlr, 2917 struct spdk_nvmf_request *req) 2918 { 2919 struct spdk_nvme_reservation_register_data key = { 0 }; 2920 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 2921 uint8_t rrega, iekey, cptpl, rtype; 2922 struct spdk_nvmf_registrant *reg; 2923 uint8_t status = SPDK_NVME_SC_SUCCESS; 2924 bool update_sgroup = false; 2925 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 2926 uint32_t num_hostid = 0; 2927 int rc; 2928 2929 rrega = cmd->cdw10_bits.resv_register.rrega; 2930 iekey = cmd->cdw10_bits.resv_register.iekey; 2931 cptpl = cmd->cdw10_bits.resv_register.cptpl; 2932 2933 if (req->iovcnt > 0 && req->length >= sizeof(key)) { 2934 struct spdk_iov_xfer ix; 2935 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 2936 spdk_iov_xfer_to_buf(&ix, &key, sizeof(key)); 2937 } else { 2938 SPDK_ERRLOG("No key provided. Failing request.\n"); 2939 status = SPDK_NVME_SC_INVALID_FIELD; 2940 goto exit; 2941 } 2942 2943 SPDK_DEBUGLOG(nvmf, "REGISTER: RREGA %u, IEKEY %u, CPTPL %u, " 2944 "NRKEY 0x%"PRIx64", NRKEY 0x%"PRIx64"\n", 2945 rrega, iekey, cptpl, key.crkey, key.nrkey); 2946 2947 if (cptpl == SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON) { 2948 /* Ture to OFF state, and need to be updated in the configuration file */ 2949 if (ns->ptpl_activated) { 2950 ns->ptpl_activated = 0; 2951 update_sgroup = true; 2952 } 2953 } else if (cptpl == SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS) { 2954 if (!nvmf_ns_is_ptpl_capable(ns)) { 2955 status = SPDK_NVME_SC_INVALID_FIELD; 2956 goto exit; 2957 } else if (ns->ptpl_activated == 0) { 2958 ns->ptpl_activated = 1; 2959 update_sgroup = true; 2960 } 2961 } 2962 2963 /* current Host Identifier has registrant or not */ 2964 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 2965 2966 switch (rrega) { 2967 case SPDK_NVME_RESERVE_REGISTER_KEY: 2968 if (!reg) { 2969 /* register new controller */ 2970 if (key.nrkey == 0) { 2971 SPDK_ERRLOG("Can't register zeroed new key\n"); 2972 status = SPDK_NVME_SC_INVALID_FIELD; 2973 goto exit; 2974 } 2975 rc = nvmf_ns_reservation_add_registrant(ns, ctrlr, key.nrkey); 2976 if (rc < 0) { 2977 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2978 goto exit; 2979 } 2980 update_sgroup = true; 2981 } else { 2982 /* register with same key is not an error */ 2983 if (reg->rkey != key.nrkey) { 2984 SPDK_ERRLOG("The same host already register a " 2985 "key with 0x%"PRIx64"\n", 2986 reg->rkey); 2987 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2988 goto exit; 2989 } 2990 } 2991 break; 2992 case SPDK_NVME_RESERVE_UNREGISTER_KEY: 2993 if (!reg || (!iekey && reg->rkey != key.crkey)) { 2994 SPDK_ERRLOG("No registrant or current key doesn't match " 2995 "with existing registrant key\n"); 2996 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 2997 goto exit; 2998 } 2999 3000 rtype = ns->rtype; 3001 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 3002 SPDK_NVMF_MAX_NUM_REGISTRANTS, 3003 &ctrlr->hostid); 3004 3005 nvmf_ns_reservation_remove_registrant(ns, reg); 3006 3007 if (!ns->rtype && num_hostid && (rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY || 3008 rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY)) { 3009 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 3010 hostid_list, 3011 num_hostid, 3012 SPDK_NVME_RESERVATION_RELEASED); 3013 } 3014 update_sgroup = true; 3015 break; 3016 case SPDK_NVME_RESERVE_REPLACE_KEY: 3017 if (key.nrkey == 0) { 3018 SPDK_ERRLOG("Can't register zeroed new key\n"); 3019 status = SPDK_NVME_SC_INVALID_FIELD; 3020 goto exit; 3021 } 3022 /* Registrant exists */ 3023 if (reg) { 3024 if (!iekey && reg->rkey != key.crkey) { 3025 SPDK_ERRLOG("Current key doesn't match " 3026 "existing registrant key\n"); 3027 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3028 goto exit; 3029 } 3030 if (reg->rkey == key.nrkey) { 3031 goto exit; 3032 } 3033 reg->rkey = key.nrkey; 3034 } else if (iekey) { /* No registrant but IEKEY is set */ 3035 /* new registrant */ 3036 rc = nvmf_ns_reservation_add_registrant(ns, ctrlr, key.nrkey); 3037 if (rc < 0) { 3038 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 3039 goto exit; 3040 } 3041 } else { /* No registrant */ 3042 SPDK_ERRLOG("No registrant\n"); 3043 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3044 goto exit; 3045 3046 } 3047 update_sgroup = true; 3048 break; 3049 default: 3050 status = SPDK_NVME_SC_INVALID_FIELD; 3051 goto exit; 3052 } 3053 3054 exit: 3055 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3056 req->rsp->nvme_cpl.status.sc = status; 3057 return update_sgroup; 3058 } 3059 3060 static bool 3061 nvmf_ns_reservation_acquire(struct spdk_nvmf_ns *ns, 3062 struct spdk_nvmf_ctrlr *ctrlr, 3063 struct spdk_nvmf_request *req) 3064 { 3065 struct spdk_nvme_reservation_acquire_data key = { 0 }; 3066 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3067 uint8_t racqa, iekey, rtype; 3068 struct spdk_nvmf_registrant *reg; 3069 bool all_regs = false; 3070 uint32_t count = 0; 3071 bool update_sgroup = true; 3072 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 3073 uint32_t num_hostid = 0; 3074 struct spdk_uuid new_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 3075 uint32_t new_num_hostid = 0; 3076 bool reservation_released = false; 3077 uint8_t status = SPDK_NVME_SC_SUCCESS; 3078 3079 racqa = cmd->cdw10_bits.resv_acquire.racqa; 3080 iekey = cmd->cdw10_bits.resv_acquire.iekey; 3081 rtype = cmd->cdw10_bits.resv_acquire.rtype; 3082 3083 if (req->iovcnt > 0 && req->length >= sizeof(key)) { 3084 struct spdk_iov_xfer ix; 3085 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 3086 spdk_iov_xfer_to_buf(&ix, &key, sizeof(key)); 3087 } else { 3088 SPDK_ERRLOG("No key provided. Failing request.\n"); 3089 status = SPDK_NVME_SC_INVALID_FIELD; 3090 goto exit; 3091 } 3092 3093 SPDK_DEBUGLOG(nvmf, "ACQUIRE: RACQA %u, IEKEY %u, RTYPE %u, " 3094 "NRKEY 0x%"PRIx64", PRKEY 0x%"PRIx64"\n", 3095 racqa, iekey, rtype, key.crkey, key.prkey); 3096 3097 if (iekey || rtype > SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) { 3098 SPDK_ERRLOG("Ignore existing key field set to 1\n"); 3099 status = SPDK_NVME_SC_INVALID_FIELD; 3100 update_sgroup = false; 3101 goto exit; 3102 } 3103 3104 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 3105 /* must be registrant and CRKEY must match */ 3106 if (!reg || reg->rkey != key.crkey) { 3107 SPDK_ERRLOG("No registrant or current key doesn't match " 3108 "with existing registrant key\n"); 3109 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3110 update_sgroup = false; 3111 goto exit; 3112 } 3113 3114 all_regs = nvmf_ns_reservation_all_registrants_type(ns); 3115 3116 switch (racqa) { 3117 case SPDK_NVME_RESERVE_ACQUIRE: 3118 /* it's not an error for the holder to acquire same reservation type again */ 3119 if (nvmf_ns_reservation_registrant_is_holder(ns, reg) && ns->rtype == rtype) { 3120 /* do nothing */ 3121 update_sgroup = false; 3122 } else if (ns->holder == NULL) { 3123 /* first time to acquire the reservation */ 3124 nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg); 3125 } else { 3126 SPDK_ERRLOG("Invalid rtype or current registrant is not holder\n"); 3127 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3128 update_sgroup = false; 3129 goto exit; 3130 } 3131 break; 3132 case SPDK_NVME_RESERVE_PREEMPT: 3133 /* no reservation holder */ 3134 if (!ns->holder) { 3135 /* unregister with PRKEY */ 3136 nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 3137 break; 3138 } 3139 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 3140 SPDK_NVMF_MAX_NUM_REGISTRANTS, 3141 &ctrlr->hostid); 3142 3143 /* only 1 reservation holder and reservation key is valid */ 3144 if (!all_regs) { 3145 /* preempt itself */ 3146 if (nvmf_ns_reservation_registrant_is_holder(ns, reg) && 3147 ns->crkey == key.prkey) { 3148 ns->rtype = rtype; 3149 reservation_released = true; 3150 break; 3151 } 3152 3153 if (ns->crkey == key.prkey) { 3154 nvmf_ns_reservation_remove_registrant(ns, ns->holder); 3155 nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg); 3156 reservation_released = true; 3157 } else if (key.prkey != 0) { 3158 nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 3159 } else { 3160 /* PRKEY is zero */ 3161 SPDK_ERRLOG("Current PRKEY is zero\n"); 3162 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3163 update_sgroup = false; 3164 goto exit; 3165 } 3166 } else { 3167 /* release all other registrants except for the current one */ 3168 if (key.prkey == 0) { 3169 nvmf_ns_reservation_remove_all_other_registrants(ns, reg); 3170 assert(ns->holder == reg); 3171 } else { 3172 count = nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey); 3173 if (count == 0) { 3174 SPDK_ERRLOG("PRKEY doesn't match any registrant\n"); 3175 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3176 update_sgroup = false; 3177 goto exit; 3178 } 3179 } 3180 } 3181 break; 3182 default: 3183 status = SPDK_NVME_SC_INVALID_FIELD; 3184 update_sgroup = false; 3185 break; 3186 } 3187 3188 exit: 3189 if (update_sgroup && racqa == SPDK_NVME_RESERVE_PREEMPT) { 3190 new_num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, new_hostid_list, 3191 SPDK_NVMF_MAX_NUM_REGISTRANTS, 3192 &ctrlr->hostid); 3193 /* Preempt notification occurs on the unregistered controllers 3194 * other than the controller who issued the command. 3195 */ 3196 num_hostid = nvmf_ns_reservation_get_unregistered_hostid(hostid_list, 3197 num_hostid, 3198 new_hostid_list, 3199 new_num_hostid); 3200 if (num_hostid) { 3201 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 3202 hostid_list, 3203 num_hostid, 3204 SPDK_NVME_REGISTRATION_PREEMPTED); 3205 3206 } 3207 /* Reservation released notification occurs on the 3208 * controllers which are the remaining registrants other than 3209 * the controller who issued the command. 3210 */ 3211 if (reservation_released && new_num_hostid) { 3212 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 3213 new_hostid_list, 3214 new_num_hostid, 3215 SPDK_NVME_RESERVATION_RELEASED); 3216 3217 } 3218 } 3219 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3220 req->rsp->nvme_cpl.status.sc = status; 3221 return update_sgroup; 3222 } 3223 3224 static bool 3225 nvmf_ns_reservation_release(struct spdk_nvmf_ns *ns, 3226 struct spdk_nvmf_ctrlr *ctrlr, 3227 struct spdk_nvmf_request *req) 3228 { 3229 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3230 uint8_t rrela, iekey, rtype; 3231 struct spdk_nvmf_registrant *reg; 3232 uint64_t crkey = 0; 3233 uint8_t status = SPDK_NVME_SC_SUCCESS; 3234 bool update_sgroup = true; 3235 struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS]; 3236 uint32_t num_hostid = 0; 3237 3238 rrela = cmd->cdw10_bits.resv_release.rrela; 3239 iekey = cmd->cdw10_bits.resv_release.iekey; 3240 rtype = cmd->cdw10_bits.resv_release.rtype; 3241 3242 if (req->iovcnt > 0 && req->length >= sizeof(crkey)) { 3243 struct spdk_iov_xfer ix; 3244 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 3245 spdk_iov_xfer_to_buf(&ix, &crkey, sizeof(crkey)); 3246 } else { 3247 SPDK_ERRLOG("No key provided. Failing request.\n"); 3248 status = SPDK_NVME_SC_INVALID_FIELD; 3249 goto exit; 3250 } 3251 3252 SPDK_DEBUGLOG(nvmf, "RELEASE: RRELA %u, IEKEY %u, RTYPE %u, " 3253 "CRKEY 0x%"PRIx64"\n", rrela, iekey, rtype, crkey); 3254 3255 if (iekey) { 3256 SPDK_ERRLOG("Ignore existing key field set to 1\n"); 3257 status = SPDK_NVME_SC_INVALID_FIELD; 3258 update_sgroup = false; 3259 goto exit; 3260 } 3261 3262 reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid); 3263 if (!reg || reg->rkey != crkey) { 3264 SPDK_ERRLOG("No registrant or current key doesn't match " 3265 "with existing registrant key\n"); 3266 status = SPDK_NVME_SC_RESERVATION_CONFLICT; 3267 update_sgroup = false; 3268 goto exit; 3269 } 3270 3271 num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list, 3272 SPDK_NVMF_MAX_NUM_REGISTRANTS, 3273 &ctrlr->hostid); 3274 3275 switch (rrela) { 3276 case SPDK_NVME_RESERVE_RELEASE: 3277 if (!ns->holder) { 3278 SPDK_DEBUGLOG(nvmf, "RELEASE: no holder\n"); 3279 update_sgroup = false; 3280 goto exit; 3281 } 3282 if (ns->rtype != rtype) { 3283 SPDK_ERRLOG("Type doesn't match\n"); 3284 status = SPDK_NVME_SC_INVALID_FIELD; 3285 update_sgroup = false; 3286 goto exit; 3287 } 3288 if (!nvmf_ns_reservation_registrant_is_holder(ns, reg)) { 3289 /* not the reservation holder, this isn't an error */ 3290 update_sgroup = false; 3291 goto exit; 3292 } 3293 3294 rtype = ns->rtype; 3295 nvmf_ns_reservation_release_reservation(ns); 3296 3297 if (num_hostid && rtype != SPDK_NVME_RESERVE_WRITE_EXCLUSIVE && 3298 rtype != SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) { 3299 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 3300 hostid_list, 3301 num_hostid, 3302 SPDK_NVME_RESERVATION_RELEASED); 3303 } 3304 break; 3305 case SPDK_NVME_RESERVE_CLEAR: 3306 nvmf_ns_reservation_clear_all_registrants(ns); 3307 if (num_hostid) { 3308 nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns, 3309 hostid_list, 3310 num_hostid, 3311 SPDK_NVME_RESERVATION_PREEMPTED); 3312 } 3313 break; 3314 default: 3315 status = SPDK_NVME_SC_INVALID_FIELD; 3316 update_sgroup = false; 3317 goto exit; 3318 } 3319 3320 exit: 3321 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3322 req->rsp->nvme_cpl.status.sc = status; 3323 return update_sgroup; 3324 } 3325 3326 static void 3327 nvmf_ns_reservation_report(struct spdk_nvmf_ns *ns, 3328 struct spdk_nvmf_ctrlr *ctrlr, 3329 struct spdk_nvmf_request *req) 3330 { 3331 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3332 struct spdk_nvmf_registrant *reg, *tmp; 3333 struct spdk_nvme_reservation_status_extended_data status_data = { 0 }; 3334 struct spdk_iov_xfer ix; 3335 uint32_t transfer_len; 3336 uint32_t regctl = 0; 3337 uint8_t status = SPDK_NVME_SC_SUCCESS; 3338 3339 if (req->iovcnt == 0) { 3340 SPDK_ERRLOG("No data transfer specified for request. " 3341 " Unable to transfer back response.\n"); 3342 status = SPDK_NVME_SC_INVALID_FIELD; 3343 goto exit; 3344 } 3345 3346 if (!cmd->cdw11_bits.resv_report.eds) { 3347 SPDK_ERRLOG("NVMeoF uses extended controller data structure, " 3348 "please set EDS bit in cdw11 and try again\n"); 3349 status = SPDK_NVME_SC_HOSTID_INCONSISTENT_FORMAT; 3350 goto exit; 3351 } 3352 3353 /* Number of Dwords of the Reservation Status data structure to transfer */ 3354 transfer_len = (cmd->cdw10 + 1) * sizeof(uint32_t); 3355 3356 if (transfer_len < sizeof(struct spdk_nvme_reservation_status_extended_data)) { 3357 status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 3358 goto exit; 3359 } 3360 3361 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt); 3362 3363 status_data.data.gen = ns->gen; 3364 status_data.data.rtype = ns->rtype; 3365 status_data.data.ptpls = ns->ptpl_activated; 3366 3367 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 3368 regctl++; 3369 } 3370 3371 /* 3372 * We report the number of registrants as per the spec here, even if 3373 * the iov isn't big enough to contain them all. In that case, the 3374 * spdk_iov_xfer_from_buf() won't actually copy any of the remaining 3375 * data; as it keeps track of the iov cursor itself, it's simplest to 3376 * just walk the entire list anyway. 3377 */ 3378 status_data.data.regctl = regctl; 3379 3380 spdk_iov_xfer_from_buf(&ix, &status_data, sizeof(status_data)); 3381 3382 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) { 3383 struct spdk_nvme_registered_ctrlr_extended_data ctrlr_data = { 0 }; 3384 3385 /* Set to 0xffffh for dynamic controller */ 3386 ctrlr_data.cntlid = 0xffff; 3387 ctrlr_data.rcsts.status = (ns->holder == reg) ? true : false; 3388 ctrlr_data.rkey = reg->rkey; 3389 spdk_uuid_copy((struct spdk_uuid *)ctrlr_data.hostid, ®->hostid); 3390 3391 spdk_iov_xfer_from_buf(&ix, &ctrlr_data, sizeof(ctrlr_data)); 3392 } 3393 3394 exit: 3395 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3396 req->rsp->nvme_cpl.status.sc = status; 3397 return; 3398 } 3399 3400 static void 3401 nvmf_ns_reservation_complete(void *ctx) 3402 { 3403 struct spdk_nvmf_request *req = ctx; 3404 3405 spdk_nvmf_request_complete(req); 3406 } 3407 3408 static void 3409 _nvmf_ns_reservation_update_done(struct spdk_nvmf_subsystem *subsystem, 3410 void *cb_arg, int status) 3411 { 3412 struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)cb_arg; 3413 struct spdk_nvmf_poll_group *group = req->qpair->group; 3414 3415 spdk_thread_send_msg(group->thread, nvmf_ns_reservation_complete, req); 3416 } 3417 3418 void 3419 nvmf_ns_reservation_request(void *ctx) 3420 { 3421 struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)ctx; 3422 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 3423 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 3424 uint32_t nsid; 3425 struct spdk_nvmf_ns *ns; 3426 bool update_sgroup = false; 3427 int status = 0; 3428 3429 nsid = cmd->nsid; 3430 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, nsid); 3431 assert(ns != NULL); 3432 3433 switch (cmd->opc) { 3434 case SPDK_NVME_OPC_RESERVATION_REGISTER: 3435 update_sgroup = nvmf_ns_reservation_register(ns, ctrlr, req); 3436 break; 3437 case SPDK_NVME_OPC_RESERVATION_ACQUIRE: 3438 update_sgroup = nvmf_ns_reservation_acquire(ns, ctrlr, req); 3439 break; 3440 case SPDK_NVME_OPC_RESERVATION_RELEASE: 3441 update_sgroup = nvmf_ns_reservation_release(ns, ctrlr, req); 3442 break; 3443 case SPDK_NVME_OPC_RESERVATION_REPORT: 3444 nvmf_ns_reservation_report(ns, ctrlr, req); 3445 break; 3446 default: 3447 break; 3448 } 3449 3450 /* update reservation information to subsystem's poll group */ 3451 if (update_sgroup) { 3452 if (ns->ptpl_activated || cmd->opc == SPDK_NVME_OPC_RESERVATION_REGISTER) { 3453 if (nvmf_ns_update_reservation_info(ns) != 0) { 3454 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 3455 } 3456 } 3457 status = nvmf_subsystem_update_ns(ctrlr->subsys, _nvmf_ns_reservation_update_done, req); 3458 if (status == 0) { 3459 return; 3460 } 3461 } 3462 3463 _nvmf_ns_reservation_update_done(ctrlr->subsys, req, status); 3464 } 3465 3466 static bool 3467 nvmf_ns_is_ptpl_capable_json(const struct spdk_nvmf_ns *ns) 3468 { 3469 return ns->ptpl_file != NULL; 3470 } 3471 3472 static struct spdk_nvmf_ns_reservation_ops g_reservation_ops = { 3473 .is_ptpl_capable = nvmf_ns_is_ptpl_capable_json, 3474 .update = nvmf_ns_reservation_update_json, 3475 .load = nvmf_ns_reservation_load_json, 3476 }; 3477 3478 bool 3479 nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns) 3480 { 3481 return g_reservation_ops.is_ptpl_capable(ns); 3482 } 3483 3484 static int 3485 nvmf_ns_reservation_update(const struct spdk_nvmf_ns *ns, 3486 const struct spdk_nvmf_reservation_info *info) 3487 { 3488 return g_reservation_ops.update(ns, info); 3489 } 3490 3491 static int 3492 nvmf_ns_reservation_load(const struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info) 3493 { 3494 return g_reservation_ops.load(ns, info); 3495 } 3496 3497 void 3498 spdk_nvmf_set_custom_ns_reservation_ops(const struct spdk_nvmf_ns_reservation_ops *ops) 3499 { 3500 g_reservation_ops = *ops; 3501 } 3502 3503 int 3504 spdk_nvmf_subsystem_set_ana_reporting(struct spdk_nvmf_subsystem *subsystem, 3505 bool ana_reporting) 3506 { 3507 if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) { 3508 return -EAGAIN; 3509 } 3510 3511 subsystem->flags.ana_reporting = ana_reporting; 3512 3513 return 0; 3514 } 3515 3516 bool 3517 spdk_nvmf_subsystem_get_ana_reporting(struct spdk_nvmf_subsystem *subsystem) 3518 { 3519 return subsystem->flags.ana_reporting; 3520 } 3521 3522 struct subsystem_listener_update_ctx { 3523 struct spdk_nvmf_subsystem_listener *listener; 3524 3525 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn; 3526 void *cb_arg; 3527 }; 3528 3529 static void 3530 subsystem_listener_update_done(struct spdk_io_channel_iter *i, int status) 3531 { 3532 struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 3533 3534 if (ctx->cb_fn) { 3535 ctx->cb_fn(ctx->cb_arg, status); 3536 } 3537 free(ctx); 3538 } 3539 3540 static void 3541 subsystem_listener_update_on_pg(struct spdk_io_channel_iter *i) 3542 { 3543 struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 3544 struct spdk_nvmf_subsystem_listener *listener; 3545 struct spdk_nvmf_poll_group *group; 3546 struct spdk_nvmf_ctrlr *ctrlr; 3547 3548 listener = ctx->listener; 3549 group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i)); 3550 3551 TAILQ_FOREACH(ctrlr, &listener->subsystem->ctrlrs, link) { 3552 if (ctrlr->thread != spdk_get_thread()) { 3553 continue; 3554 } 3555 3556 if (ctrlr->admin_qpair && ctrlr->admin_qpair->group == group && ctrlr->listener == listener) { 3557 nvmf_ctrlr_async_event_ana_change_notice(ctrlr); 3558 } 3559 } 3560 3561 spdk_for_each_channel_continue(i, 0); 3562 } 3563 3564 void 3565 spdk_nvmf_subsystem_set_ana_state(struct spdk_nvmf_subsystem *subsystem, 3566 const struct spdk_nvme_transport_id *trid, 3567 enum spdk_nvme_ana_state ana_state, uint32_t anagrpid, 3568 spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, void *cb_arg) 3569 { 3570 struct spdk_nvmf_subsystem_listener *listener; 3571 struct subsystem_listener_update_ctx *ctx; 3572 uint32_t i; 3573 3574 assert(cb_fn != NULL); 3575 assert(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE || 3576 subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED); 3577 3578 if (!subsystem->flags.ana_reporting) { 3579 SPDK_ERRLOG("ANA reporting is disabled\n"); 3580 cb_fn(cb_arg, -EINVAL); 3581 return; 3582 } 3583 3584 /* ANA Change state is not used, ANA Persistent Loss state 3585 * is not supported yet. 3586 */ 3587 if (!(ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE || 3588 ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE || 3589 ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE)) { 3590 SPDK_ERRLOG("ANA state %d is not supported\n", ana_state); 3591 cb_fn(cb_arg, -ENOTSUP); 3592 return; 3593 } 3594 3595 if (anagrpid > subsystem->max_nsid) { 3596 SPDK_ERRLOG("ANA group ID %" PRIu32 " is more than maximum\n", anagrpid); 3597 cb_fn(cb_arg, -EINVAL); 3598 return; 3599 } 3600 3601 listener = nvmf_subsystem_find_listener(subsystem, trid); 3602 if (!listener) { 3603 SPDK_ERRLOG("Unable to find listener.\n"); 3604 cb_fn(cb_arg, -EINVAL); 3605 return; 3606 } 3607 3608 if (anagrpid != 0 && listener->ana_state[anagrpid - 1] == ana_state) { 3609 cb_fn(cb_arg, 0); 3610 return; 3611 } 3612 3613 ctx = calloc(1, sizeof(*ctx)); 3614 if (!ctx) { 3615 SPDK_ERRLOG("Unable to allocate context\n"); 3616 cb_fn(cb_arg, -ENOMEM); 3617 return; 3618 } 3619 3620 for (i = 1; i <= subsystem->max_nsid; i++) { 3621 if (anagrpid == 0 || i == anagrpid) { 3622 listener->ana_state[i - 1] = ana_state; 3623 } 3624 } 3625 listener->ana_state_change_count++; 3626 3627 ctx->listener = listener; 3628 ctx->cb_fn = cb_fn; 3629 ctx->cb_arg = cb_arg; 3630 3631 spdk_for_each_channel(subsystem->tgt, 3632 subsystem_listener_update_on_pg, 3633 ctx, 3634 subsystem_listener_update_done); 3635 } 3636 3637 int 3638 spdk_nvmf_subsystem_get_ana_state(struct spdk_nvmf_subsystem *subsystem, 3639 const struct spdk_nvme_transport_id *trid, 3640 uint32_t anagrpid, 3641 enum spdk_nvme_ana_state *ana_state) 3642 { 3643 assert(ana_state != NULL); 3644 3645 struct spdk_nvmf_subsystem_listener *listener; 3646 3647 if (!subsystem->flags.ana_reporting) { 3648 SPDK_ERRLOG("ANA reporting is disabled\n"); 3649 return -EINVAL; 3650 } 3651 3652 if (anagrpid <= 0 || anagrpid > subsystem->max_nsid) { 3653 SPDK_ERRLOG("ANA group ID %" PRIu32 " is invalid\n", anagrpid); 3654 return -EINVAL; 3655 } 3656 3657 listener = nvmf_subsystem_find_listener(subsystem, trid); 3658 if (!listener) { 3659 SPDK_ERRLOG("Unable to find listener.\n"); 3660 return -EINVAL; 3661 } 3662 3663 *ana_state = listener->ana_state[anagrpid - 1]; 3664 return 0; 3665 } 3666 3667 bool 3668 spdk_nvmf_subsystem_is_discovery(struct spdk_nvmf_subsystem *subsystem) 3669 { 3670 return subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT || 3671 subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY; 3672 } 3673 3674 bool 3675 nvmf_nqn_is_discovery(const char *nqn) 3676 { 3677 return strcmp(nqn, SPDK_NVMF_DISCOVERY_NQN) == 0; 3678 } 3679