1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2015 Intel Corporation. All rights reserved. 3 * Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved. 4 */ 5 6 #include "nvme_internal.h" 7 8 static inline struct spdk_nvme_ns_data * 9 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 10 { 11 return &ns->nsdata; 12 } 13 14 /** 15 * Update Namespace flags based on Identify Controller 16 * and Identify Namespace. This can be also used for 17 * Namespace Attribute Notice events and Namespace 18 * operations such as Attach/Detach. 19 */ 20 void 21 nvme_ns_set_identify_data(struct spdk_nvme_ns *ns) 22 { 23 struct spdk_nvme_ns_data *nsdata; 24 struct spdk_nvme_nvm_ns_data *nsdata_nvm; 25 uint32_t format_index; 26 27 nsdata = _nvme_ns_get_data(ns); 28 nsdata_nvm = ns->nsdata_nvm; 29 30 ns->flags = 0x0000; 31 format_index = spdk_nvme_ns_get_format_index(nsdata); 32 33 ns->sector_size = 1 << nsdata->lbaf[format_index].lbads; 34 ns->extended_lba_size = ns->sector_size; 35 36 ns->md_size = nsdata->lbaf[format_index].ms; 37 if (nsdata->flbas.extended) { 38 ns->flags |= SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED; 39 ns->extended_lba_size += ns->md_size; 40 } 41 42 ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->extended_lba_size; 43 ns->sectors_per_max_io_no_md = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->sector_size; 44 if (ns->ctrlr->quirks & NVME_QUIRK_MDTS_EXCLUDE_MD) { 45 ns->sectors_per_max_io = ns->sectors_per_max_io_no_md; 46 } 47 48 if (nsdata->noiob) { 49 ns->sectors_per_stripe = nsdata->noiob; 50 SPDK_DEBUGLOG(nvme, "ns %u optimal IO boundary %" PRIu32 " blocks\n", 51 ns->id, ns->sectors_per_stripe); 52 } else if (ns->ctrlr->quirks & NVME_INTEL_QUIRK_STRIPING && 53 ns->ctrlr->cdata.vs[3] != 0) { 54 ns->sectors_per_stripe = (1ULL << ns->ctrlr->cdata.vs[3]) * ns->ctrlr->min_page_size / 55 ns->sector_size; 56 SPDK_DEBUGLOG(nvme, "ns %u stripe size quirk %" PRIu32 " blocks\n", 57 ns->id, ns->sectors_per_stripe); 58 } else { 59 ns->sectors_per_stripe = 0; 60 } 61 62 if (ns->ctrlr->cdata.oncs.dsm) { 63 ns->flags |= SPDK_NVME_NS_DEALLOCATE_SUPPORTED; 64 } 65 66 if (ns->ctrlr->cdata.oncs.compare) { 67 ns->flags |= SPDK_NVME_NS_COMPARE_SUPPORTED; 68 } 69 70 if (ns->ctrlr->cdata.vwc.present) { 71 ns->flags |= SPDK_NVME_NS_FLUSH_SUPPORTED; 72 } 73 74 if (ns->ctrlr->cdata.oncs.write_zeroes) { 75 ns->flags |= SPDK_NVME_NS_WRITE_ZEROES_SUPPORTED; 76 } 77 78 if (ns->ctrlr->cdata.oncs.write_unc) { 79 ns->flags |= SPDK_NVME_NS_WRITE_UNCORRECTABLE_SUPPORTED; 80 } 81 82 if (nsdata->nsrescap.raw) { 83 ns->flags |= SPDK_NVME_NS_RESERVATION_SUPPORTED; 84 } 85 86 ns->pi_type = SPDK_NVME_FMT_NVM_PROTECTION_DISABLE; 87 if (nsdata->lbaf[format_index].ms && nsdata->dps.pit) { 88 ns->flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED; 89 ns->pi_type = nsdata->dps.pit; 90 if (nsdata_nvm != NULL && ns->ctrlr->cdata.ctratt.bits.elbas) { 91 /* We may have nsdata_nvm for other purposes but 92 * the elbaf array is only valid when elbas is 1. 93 */ 94 ns->pi_format = nsdata_nvm->elbaf[format_index].pif; 95 } else { 96 ns->pi_format = SPDK_NVME_16B_GUARD_PI; 97 } 98 } 99 } 100 101 static int 102 nvme_ctrlr_identify_ns(struct spdk_nvme_ns *ns) 103 { 104 struct nvme_completion_poll_status *status; 105 struct spdk_nvme_ns_data *nsdata; 106 int rc; 107 108 status = calloc(1, sizeof(*status)); 109 if (!status) { 110 SPDK_ERRLOG("Failed to allocate status tracker\n"); 111 return -ENOMEM; 112 } 113 114 nsdata = _nvme_ns_get_data(ns); 115 rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS, 0, ns->id, 0, 116 nsdata, sizeof(*nsdata), 117 nvme_completion_poll_cb, status); 118 if (rc != 0) { 119 free(status); 120 return rc; 121 } 122 123 if (nvme_wait_for_completion_robust_lock(ns->ctrlr->adminq, status, 124 &ns->ctrlr->ctrlr_lock)) { 125 if (!status->timed_out) { 126 free(status); 127 } 128 /* This can occur if the namespace is not active. Simply zero the 129 * namespace data and continue. */ 130 nvme_ns_destruct(ns); 131 return 0; 132 } 133 free(status); 134 135 nvme_ns_set_identify_data(ns); 136 137 return 0; 138 } 139 140 static int 141 nvme_ctrlr_identify_ns_zns_specific(struct spdk_nvme_ns *ns) 142 { 143 struct nvme_completion_poll_status *status; 144 struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr; 145 struct spdk_nvme_zns_ns_data *nsdata_zns; 146 int rc; 147 148 nvme_ns_free_zns_specific_data(ns); 149 150 nsdata_zns = spdk_zmalloc(sizeof(*nsdata_zns), 64, NULL, SPDK_ENV_NUMA_ID_ANY, 151 SPDK_MALLOC_SHARE); 152 if (!nsdata_zns) { 153 return -ENOMEM; 154 } 155 156 status = calloc(1, sizeof(*status)); 157 if (!status) { 158 SPDK_ERRLOG("Failed to allocate status tracker\n"); 159 spdk_free(nsdata_zns); 160 return -ENOMEM; 161 } 162 163 rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_NS_IOCS, 0, ns->id, ns->csi, 164 nsdata_zns, sizeof(*nsdata_zns), 165 nvme_completion_poll_cb, status); 166 if (rc != 0) { 167 spdk_free(nsdata_zns); 168 free(status); 169 return rc; 170 } 171 172 if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) { 173 SPDK_ERRLOG("Failed to retrieve Identify IOCS Specific Namespace Data Structure\n"); 174 spdk_free(nsdata_zns); 175 if (!status->timed_out) { 176 free(status); 177 } 178 return -ENXIO; 179 } 180 free(status); 181 ns->nsdata_zns = nsdata_zns; 182 183 return 0; 184 } 185 186 static int 187 nvme_ctrlr_identify_ns_nvm_specific(struct spdk_nvme_ns *ns) 188 { 189 struct nvme_completion_poll_status *status; 190 struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr; 191 struct spdk_nvme_nvm_ns_data *nsdata_nvm; 192 int rc; 193 194 nvme_ns_free_zns_specific_data(ns); 195 196 nsdata_nvm = spdk_zmalloc(sizeof(*nsdata_nvm), 64, NULL, SPDK_ENV_NUMA_ID_ANY, 197 SPDK_MALLOC_SHARE); 198 if (!nsdata_nvm) { 199 return -ENOMEM; 200 } 201 202 status = calloc(1, sizeof(*status)); 203 if (!status) { 204 SPDK_ERRLOG("Failed to allocate status tracker\n"); 205 spdk_free(nsdata_nvm); 206 return -ENOMEM; 207 } 208 209 rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_NS_IOCS, 0, ns->id, ns->csi, 210 nsdata_nvm, sizeof(*nsdata_nvm), 211 nvme_completion_poll_cb, status); 212 if (rc != 0) { 213 spdk_free(nsdata_nvm); 214 free(status); 215 return rc; 216 } 217 218 if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) { 219 SPDK_ERRLOG("Failed to retrieve Identify IOCS Specific Namespace Data Structure\n"); 220 spdk_free(nsdata_nvm); 221 if (!status->timed_out) { 222 free(status); 223 } 224 return -ENXIO; 225 } 226 free(status); 227 ns->nsdata_nvm = nsdata_nvm; 228 229 return 0; 230 } 231 232 static int 233 nvme_ctrlr_identify_ns_iocs_specific(struct spdk_nvme_ns *ns) 234 { 235 switch (ns->csi) { 236 case SPDK_NVME_CSI_ZNS: 237 return nvme_ctrlr_identify_ns_zns_specific(ns); 238 case SPDK_NVME_CSI_NVM: 239 if (ns->ctrlr->cdata.ctratt.bits.elbas) { 240 return nvme_ctrlr_identify_ns_nvm_specific(ns); 241 } 242 /* fallthrough */ 243 default: 244 /* 245 * This switch must handle all cases for which 246 * nvme_ns_has_supported_iocs_specific_data() returns true, 247 * other cases should never happen. 248 */ 249 assert(0); 250 } 251 252 return -EINVAL; 253 } 254 255 static int 256 nvme_ctrlr_identify_id_desc(struct spdk_nvme_ns *ns) 257 { 258 struct nvme_completion_poll_status *status; 259 int rc; 260 261 memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list)); 262 263 if ((ns->ctrlr->vs.raw < SPDK_NVME_VERSION(1, 3, 0) && 264 !(ns->ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS)) || 265 (ns->ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) { 266 SPDK_DEBUGLOG(nvme, "Version < 1.3; not attempting to retrieve NS ID Descriptor List\n"); 267 return 0; 268 } 269 270 status = calloc(1, sizeof(*status)); 271 if (!status) { 272 SPDK_ERRLOG("Failed to allocate status tracker\n"); 273 return -ENOMEM; 274 } 275 276 SPDK_DEBUGLOG(nvme, "Attempting to retrieve NS ID Descriptor List\n"); 277 rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST, 0, ns->id, 278 0, ns->id_desc_list, sizeof(ns->id_desc_list), 279 nvme_completion_poll_cb, status); 280 if (rc < 0) { 281 free(status); 282 return rc; 283 } 284 285 rc = nvme_wait_for_completion_robust_lock(ns->ctrlr->adminq, status, &ns->ctrlr->ctrlr_lock); 286 if (rc != 0) { 287 SPDK_WARNLOG("Failed to retrieve NS ID Descriptor List\n"); 288 memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list)); 289 } 290 291 if (!status->timed_out) { 292 free(status); 293 } 294 295 nvme_ns_set_id_desc_list_data(ns); 296 297 return rc; 298 } 299 300 uint32_t 301 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 302 { 303 return ns->id; 304 } 305 306 bool 307 spdk_nvme_ns_is_active(struct spdk_nvme_ns *ns) 308 { 309 const struct spdk_nvme_ns_data *nsdata = NULL; 310 311 /* 312 * According to the spec, valid NS has non-zero id. 313 */ 314 if (ns->id == 0) { 315 return false; 316 } 317 318 nsdata = _nvme_ns_get_data(ns); 319 320 /* 321 * According to the spec, Identify Namespace will return a zero-filled structure for 322 * inactive namespace IDs. 323 * Check NCAP since it must be nonzero for an active namespace. 324 */ 325 return nsdata->ncap != 0; 326 } 327 328 struct spdk_nvme_ctrlr * 329 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 330 { 331 return ns->ctrlr; 332 } 333 334 uint32_t 335 spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns) 336 { 337 return ns->ctrlr->max_xfer_size; 338 } 339 340 uint32_t 341 spdk_nvme_ns_get_sector_size(struct spdk_nvme_ns *ns) 342 { 343 return ns->sector_size; 344 } 345 346 uint32_t 347 spdk_nvme_ns_get_extended_sector_size(struct spdk_nvme_ns *ns) 348 { 349 return ns->extended_lba_size; 350 } 351 352 uint64_t 353 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 354 { 355 return _nvme_ns_get_data(ns)->nsze; 356 } 357 358 uint64_t 359 spdk_nvme_ns_get_size(struct spdk_nvme_ns *ns) 360 { 361 return spdk_nvme_ns_get_num_sectors(ns) * spdk_nvme_ns_get_sector_size(ns); 362 } 363 364 uint32_t 365 spdk_nvme_ns_get_flags(struct spdk_nvme_ns *ns) 366 { 367 return ns->flags; 368 } 369 370 enum spdk_nvme_pi_type 371 spdk_nvme_ns_get_pi_type(struct spdk_nvme_ns *ns) { 372 return ns->pi_type; 373 } 374 375 enum spdk_nvme_pi_format 376 spdk_nvme_ns_get_pi_format(struct spdk_nvme_ns *ns) { 377 return ns->pi_format; 378 } 379 380 bool 381 spdk_nvme_ns_supports_extended_lba(struct spdk_nvme_ns *ns) 382 { 383 return (ns->flags & SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED) ? true : false; 384 } 385 386 bool 387 spdk_nvme_ns_supports_compare(struct spdk_nvme_ns *ns) 388 { 389 return (ns->flags & SPDK_NVME_NS_COMPARE_SUPPORTED) ? true : false; 390 } 391 392 uint32_t 393 spdk_nvme_ns_get_md_size(struct spdk_nvme_ns *ns) 394 { 395 return ns->md_size; 396 } 397 398 uint32_t 399 spdk_nvme_ns_get_format_index(const struct spdk_nvme_ns_data *nsdata) 400 { 401 if (nsdata->nlbaf < 16) { 402 return nsdata->flbas.format; 403 } else { 404 return ((nsdata->flbas.msb_format << 4) + nsdata->flbas.format); 405 } 406 } 407 408 const struct spdk_nvme_ns_data * 409 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 410 { 411 return _nvme_ns_get_data(ns); 412 } 413 414 const struct spdk_nvme_nvm_ns_data * 415 spdk_nvme_nvm_ns_get_data(struct spdk_nvme_ns *ns) 416 { 417 return ns->nsdata_nvm; 418 } 419 420 /* We have to use the typedef in the function declaration to appease astyle. */ 421 typedef enum spdk_nvme_dealloc_logical_block_read_value 422 spdk_nvme_dealloc_logical_block_read_value_t; 423 424 spdk_nvme_dealloc_logical_block_read_value_t 425 spdk_nvme_ns_get_dealloc_logical_block_read_value( 426 struct spdk_nvme_ns *ns) 427 { 428 struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr; 429 const struct spdk_nvme_ns_data *data = spdk_nvme_ns_get_data(ns); 430 431 if (ctrlr->quirks & NVME_QUIRK_READ_ZERO_AFTER_DEALLOCATE) { 432 return SPDK_NVME_DEALLOC_READ_00; 433 } else { 434 return data->dlfeat.bits.read_value; 435 } 436 } 437 438 uint32_t 439 spdk_nvme_ns_get_optimal_io_boundary(struct spdk_nvme_ns *ns) 440 { 441 return ns->sectors_per_stripe; 442 } 443 444 static const void * 445 nvme_ns_find_id_desc(const struct spdk_nvme_ns *ns, enum spdk_nvme_nidt type, size_t *length) 446 { 447 const struct spdk_nvme_ns_id_desc *desc; 448 size_t offset; 449 450 offset = 0; 451 while (offset + 4 < sizeof(ns->id_desc_list)) { 452 desc = (const struct spdk_nvme_ns_id_desc *)&ns->id_desc_list[offset]; 453 454 if (desc->nidl == 0) { 455 /* End of list */ 456 return NULL; 457 } 458 459 /* 460 * Check if this descriptor fits within the list. 461 * 4 is the fixed-size descriptor header (not counted in NIDL). 462 */ 463 if (offset + desc->nidl + 4 > sizeof(ns->id_desc_list)) { 464 /* Descriptor longer than remaining space in list (invalid) */ 465 return NULL; 466 } 467 468 if (desc->nidt == type) { 469 *length = desc->nidl; 470 return &desc->nid[0]; 471 } 472 473 offset += 4 + desc->nidl; 474 } 475 476 return NULL; 477 } 478 479 const uint8_t * 480 spdk_nvme_ns_get_nguid(const struct spdk_nvme_ns *ns) 481 { 482 const uint8_t *nguid; 483 size_t size; 484 485 nguid = nvme_ns_find_id_desc(ns, SPDK_NVME_NIDT_NGUID, &size); 486 if (nguid && size != SPDK_SIZEOF_MEMBER(struct spdk_nvme_ns_data, nguid)) { 487 SPDK_WARNLOG("Invalid NIDT_NGUID descriptor length reported: %zu (expected: %zu)\n", 488 size, SPDK_SIZEOF_MEMBER(struct spdk_nvme_ns_data, nguid)); 489 return NULL; 490 } 491 492 return nguid; 493 } 494 495 const struct spdk_uuid * 496 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 497 { 498 const struct spdk_uuid *uuid; 499 size_t uuid_size; 500 501 uuid = nvme_ns_find_id_desc(ns, SPDK_NVME_NIDT_UUID, &uuid_size); 502 if (uuid && uuid_size != sizeof(*uuid)) { 503 SPDK_WARNLOG("Invalid NIDT_UUID descriptor length reported: %zu (expected: %zu)\n", 504 uuid_size, sizeof(*uuid)); 505 return NULL; 506 } 507 508 return uuid; 509 } 510 511 static enum spdk_nvme_csi 512 nvme_ns_get_csi(const struct spdk_nvme_ns *ns) { 513 const uint8_t *csi; 514 size_t csi_size; 515 516 csi = nvme_ns_find_id_desc(ns, SPDK_NVME_NIDT_CSI, &csi_size); 517 if (csi && csi_size != sizeof(*csi)) 518 { 519 SPDK_WARNLOG("Invalid NIDT_CSI descriptor length reported: %zu (expected: %zu)\n", 520 csi_size, sizeof(*csi)); 521 return SPDK_NVME_CSI_NVM; 522 } 523 if (!csi) 524 { 525 if (ns->ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS) { 526 SPDK_WARNLOG("CSI not reported for NSID: %" PRIu32 "\n", ns->id); 527 } 528 return SPDK_NVME_CSI_NVM; 529 } 530 531 return *csi; 532 } 533 534 void 535 nvme_ns_set_id_desc_list_data(struct spdk_nvme_ns *ns) 536 { 537 ns->csi = nvme_ns_get_csi(ns); 538 } 539 540 enum spdk_nvme_csi 541 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) { 542 return ns->csi; 543 } 544 545 void 546 nvme_ns_free_zns_specific_data(struct spdk_nvme_ns *ns) 547 { 548 if (!ns->id) { 549 return; 550 } 551 552 if (ns->nsdata_zns) { 553 spdk_free(ns->nsdata_zns); 554 ns->nsdata_zns = NULL; 555 } 556 } 557 558 void 559 nvme_ns_free_nvm_specific_data(struct spdk_nvme_ns *ns) 560 { 561 if (!ns->id) { 562 return; 563 } 564 565 if (ns->nsdata_nvm) { 566 spdk_free(ns->nsdata_nvm); 567 ns->nsdata_nvm = NULL; 568 } 569 } 570 571 void 572 nvme_ns_free_iocs_specific_data(struct spdk_nvme_ns *ns) 573 { 574 nvme_ns_free_zns_specific_data(ns); 575 nvme_ns_free_nvm_specific_data(ns); 576 } 577 578 bool 579 nvme_ns_has_supported_iocs_specific_data(struct spdk_nvme_ns *ns) 580 { 581 switch (ns->csi) { 582 case SPDK_NVME_CSI_NVM: 583 if (ns->ctrlr->cdata.ctratt.bits.elbas) { 584 return true; 585 } 586 587 return false; 588 case SPDK_NVME_CSI_ZNS: 589 return true; 590 default: 591 SPDK_WARNLOG("Unsupported CSI: %u for NSID: %u\n", ns->csi, ns->id); 592 return false; 593 } 594 } 595 596 uint32_t 597 spdk_nvme_ns_get_ana_group_id(const struct spdk_nvme_ns *ns) 598 { 599 return ns->ana_group_id; 600 } 601 602 enum spdk_nvme_ana_state 603 spdk_nvme_ns_get_ana_state(const struct spdk_nvme_ns *ns) { 604 return ns->ana_state; 605 } 606 607 int 608 nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id, 609 struct spdk_nvme_ctrlr *ctrlr) 610 { 611 int rc; 612 613 assert(id > 0); 614 615 ns->ctrlr = ctrlr; 616 ns->id = id; 617 /* This will be overwritten when reading ANA log page. */ 618 ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 619 620 rc = nvme_ctrlr_identify_ns(ns); 621 if (rc != 0) { 622 return rc; 623 } 624 625 /* skip Identify NS ID Descriptor List for inactive NS */ 626 if (!spdk_nvme_ns_is_active(ns)) { 627 return 0; 628 } 629 630 rc = nvme_ctrlr_identify_id_desc(ns); 631 if (rc != 0) { 632 return rc; 633 } 634 635 if (nvme_ctrlr_multi_iocs_enabled(ctrlr) && 636 nvme_ns_has_supported_iocs_specific_data(ns)) { 637 rc = nvme_ctrlr_identify_ns_iocs_specific(ns); 638 if (rc != 0) { 639 return rc; 640 } 641 } 642 643 return 0; 644 } 645 646 void 647 nvme_ns_destruct(struct spdk_nvme_ns *ns) 648 { 649 struct spdk_nvme_ns_data *nsdata; 650 651 if (!ns->id) { 652 return; 653 } 654 655 nsdata = _nvme_ns_get_data(ns); 656 memset(nsdata, 0, sizeof(*nsdata)); 657 memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list)); 658 nvme_ns_free_iocs_specific_data(ns); 659 ns->sector_size = 0; 660 ns->extended_lba_size = 0; 661 ns->md_size = 0; 662 ns->pi_type = 0; 663 ns->sectors_per_max_io = 0; 664 ns->sectors_per_max_io_no_md = 0; 665 ns->sectors_per_stripe = 0; 666 ns->flags = 0; 667 ns->csi = SPDK_NVME_CSI_NVM; 668 } 669