xref: /spdk/lib/nvme/nvme_ns.c (revision d65bd99e4f997710816545eb25185a247a4f010f)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2015 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
4  */
5 
6 #include "nvme_internal.h"
7 
8 static inline struct spdk_nvme_ns_data *
9 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
10 {
11 	return &ns->nsdata;
12 }
13 
14 /**
15  * Update Namespace flags based on Identify Controller
16  * and Identify Namespace.  This can be also used for
17  * Namespace Attribute Notice events and Namespace
18  * operations such as Attach/Detach.
19  */
20 void
21 nvme_ns_set_identify_data(struct spdk_nvme_ns *ns)
22 {
23 	struct spdk_nvme_ns_data	*nsdata;
24 	uint32_t			format_index;
25 
26 	nsdata = _nvme_ns_get_data(ns);
27 
28 	ns->flags = 0x0000;
29 	format_index = spdk_nvme_ns_get_format_index(nsdata);
30 
31 	ns->sector_size = 1 << nsdata->lbaf[format_index].lbads;
32 	ns->extended_lba_size = ns->sector_size;
33 
34 	ns->md_size = nsdata->lbaf[format_index].ms;
35 	if (nsdata->flbas.extended) {
36 		ns->flags |= SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED;
37 		ns->extended_lba_size += ns->md_size;
38 	}
39 
40 	ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->extended_lba_size;
41 	ns->sectors_per_max_io_no_md = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->sector_size;
42 	if (ns->ctrlr->quirks & NVME_QUIRK_MDTS_EXCLUDE_MD) {
43 		ns->sectors_per_max_io = ns->sectors_per_max_io_no_md;
44 	}
45 
46 	if (nsdata->noiob) {
47 		ns->sectors_per_stripe = nsdata->noiob;
48 		SPDK_DEBUGLOG(nvme, "ns %u optimal IO boundary %" PRIu32 " blocks\n",
49 			      ns->id, ns->sectors_per_stripe);
50 	} else if (ns->ctrlr->quirks & NVME_INTEL_QUIRK_STRIPING &&
51 		   ns->ctrlr->cdata.vs[3] != 0) {
52 		ns->sectors_per_stripe = (1ULL << ns->ctrlr->cdata.vs[3]) * ns->ctrlr->min_page_size /
53 					 ns->sector_size;
54 		SPDK_DEBUGLOG(nvme, "ns %u stripe size quirk %" PRIu32 " blocks\n",
55 			      ns->id, ns->sectors_per_stripe);
56 	} else {
57 		ns->sectors_per_stripe = 0;
58 	}
59 
60 	if (ns->ctrlr->cdata.oncs.dsm) {
61 		ns->flags |= SPDK_NVME_NS_DEALLOCATE_SUPPORTED;
62 	}
63 
64 	if (ns->ctrlr->cdata.oncs.compare) {
65 		ns->flags |= SPDK_NVME_NS_COMPARE_SUPPORTED;
66 	}
67 
68 	if (ns->ctrlr->cdata.vwc.present) {
69 		ns->flags |= SPDK_NVME_NS_FLUSH_SUPPORTED;
70 	}
71 
72 	if (ns->ctrlr->cdata.oncs.write_zeroes) {
73 		ns->flags |= SPDK_NVME_NS_WRITE_ZEROES_SUPPORTED;
74 	}
75 
76 	if (ns->ctrlr->cdata.oncs.write_unc) {
77 		ns->flags |= SPDK_NVME_NS_WRITE_UNCORRECTABLE_SUPPORTED;
78 	}
79 
80 	if (nsdata->nsrescap.raw) {
81 		ns->flags |= SPDK_NVME_NS_RESERVATION_SUPPORTED;
82 	}
83 
84 	ns->pi_type = SPDK_NVME_FMT_NVM_PROTECTION_DISABLE;
85 	if (nsdata->lbaf[format_index].ms && nsdata->dps.pit) {
86 		ns->flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
87 		ns->pi_type = nsdata->dps.pit;
88 	}
89 }
90 
91 static int
92 nvme_ctrlr_identify_ns(struct spdk_nvme_ns *ns)
93 {
94 	struct nvme_completion_poll_status	*status;
95 	struct spdk_nvme_ns_data		*nsdata;
96 	int					rc;
97 
98 	status = calloc(1, sizeof(*status));
99 	if (!status) {
100 		SPDK_ERRLOG("Failed to allocate status tracker\n");
101 		return -ENOMEM;
102 	}
103 
104 	nsdata = _nvme_ns_get_data(ns);
105 	rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS, 0, ns->id, 0,
106 				     nsdata, sizeof(*nsdata),
107 				     nvme_completion_poll_cb, status);
108 	if (rc != 0) {
109 		free(status);
110 		return rc;
111 	}
112 
113 	if (nvme_wait_for_completion_robust_lock(ns->ctrlr->adminq, status,
114 			&ns->ctrlr->ctrlr_lock)) {
115 		if (!status->timed_out) {
116 			free(status);
117 		}
118 		/* This can occur if the namespace is not active. Simply zero the
119 		 * namespace data and continue. */
120 		nvme_ns_destruct(ns);
121 		return 0;
122 	}
123 	free(status);
124 
125 	nvme_ns_set_identify_data(ns);
126 
127 	return 0;
128 }
129 
130 static int
131 nvme_ctrlr_identify_ns_iocs_specific(struct spdk_nvme_ns *ns)
132 {
133 	struct nvme_completion_poll_status *status;
134 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
135 	struct spdk_nvme_zns_ns_data *nsdata_zns;
136 	int rc;
137 
138 	switch (ns->csi) {
139 	case SPDK_NVME_CSI_ZNS:
140 		break;
141 	default:
142 		/*
143 		 * This switch must handle all cases for which
144 		 * nvme_ns_has_supported_iocs_specific_data() returns true,
145 		 * other cases should never happen.
146 		 */
147 		assert(0);
148 	}
149 
150 	nvme_ns_free_zns_specific_data(ns);
151 
152 	nsdata_zns = spdk_zmalloc(sizeof(*nsdata_zns), 64, NULL, SPDK_ENV_SOCKET_ID_ANY,
153 				  SPDK_MALLOC_SHARE);
154 	if (!nsdata_zns) {
155 		return -ENOMEM;
156 	}
157 
158 	status = calloc(1, sizeof(*status));
159 	if (!status) {
160 		SPDK_ERRLOG("Failed to allocate status tracker\n");
161 		spdk_free(nsdata_zns);
162 		return -ENOMEM;
163 	}
164 
165 	rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_NS_IOCS, 0, ns->id, ns->csi,
166 				     nsdata_zns, sizeof(*nsdata_zns),
167 				     nvme_completion_poll_cb, status);
168 	if (rc != 0) {
169 		spdk_free(nsdata_zns);
170 		free(status);
171 		return rc;
172 	}
173 
174 	if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
175 		SPDK_ERRLOG("Failed to retrieve Identify IOCS Specific Namespace Data Structure\n");
176 		spdk_free(nsdata_zns);
177 		if (!status->timed_out) {
178 			free(status);
179 		}
180 		return -ENXIO;
181 	}
182 	free(status);
183 	ns->nsdata_zns = nsdata_zns;
184 
185 	return 0;
186 }
187 
188 static int
189 nvme_ctrlr_identify_id_desc(struct spdk_nvme_ns *ns)
190 {
191 	struct nvme_completion_poll_status      *status;
192 	int                                     rc;
193 
194 	memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list));
195 
196 	if ((ns->ctrlr->vs.raw < SPDK_NVME_VERSION(1, 3, 0) &&
197 	     !(ns->ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS)) ||
198 	    (ns->ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
199 		SPDK_DEBUGLOG(nvme, "Version < 1.3; not attempting to retrieve NS ID Descriptor List\n");
200 		return 0;
201 	}
202 
203 	status = calloc(1, sizeof(*status));
204 	if (!status) {
205 		SPDK_ERRLOG("Failed to allocate status tracker\n");
206 		return -ENOMEM;
207 	}
208 
209 	SPDK_DEBUGLOG(nvme, "Attempting to retrieve NS ID Descriptor List\n");
210 	rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST, 0, ns->id,
211 				     0, ns->id_desc_list, sizeof(ns->id_desc_list),
212 				     nvme_completion_poll_cb, status);
213 	if (rc < 0) {
214 		free(status);
215 		return rc;
216 	}
217 
218 	rc = nvme_wait_for_completion_robust_lock(ns->ctrlr->adminq, status, &ns->ctrlr->ctrlr_lock);
219 	if (rc != 0) {
220 		SPDK_WARNLOG("Failed to retrieve NS ID Descriptor List\n");
221 		memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list));
222 	}
223 
224 	if (!status->timed_out) {
225 		free(status);
226 	}
227 
228 	nvme_ns_set_id_desc_list_data(ns);
229 
230 	return rc;
231 }
232 
233 uint32_t
234 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
235 {
236 	return ns->id;
237 }
238 
239 bool
240 spdk_nvme_ns_is_active(struct spdk_nvme_ns *ns)
241 {
242 	const struct spdk_nvme_ns_data *nsdata = NULL;
243 
244 	/*
245 	 * According to the spec, valid NS has non-zero id.
246 	 */
247 	if (ns->id == 0) {
248 		return false;
249 	}
250 
251 	nsdata = _nvme_ns_get_data(ns);
252 
253 	/*
254 	 * According to the spec, Identify Namespace will return a zero-filled structure for
255 	 *  inactive namespace IDs.
256 	 * Check NCAP since it must be nonzero for an active namespace.
257 	 */
258 	return nsdata->ncap != 0;
259 }
260 
261 struct spdk_nvme_ctrlr *
262 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
263 {
264 	return ns->ctrlr;
265 }
266 
267 uint32_t
268 spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
269 {
270 	return ns->ctrlr->max_xfer_size;
271 }
272 
273 uint32_t
274 spdk_nvme_ns_get_sector_size(struct spdk_nvme_ns *ns)
275 {
276 	return ns->sector_size;
277 }
278 
279 uint32_t
280 spdk_nvme_ns_get_extended_sector_size(struct spdk_nvme_ns *ns)
281 {
282 	return ns->extended_lba_size;
283 }
284 
285 uint64_t
286 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
287 {
288 	return _nvme_ns_get_data(ns)->nsze;
289 }
290 
291 uint64_t
292 spdk_nvme_ns_get_size(struct spdk_nvme_ns *ns)
293 {
294 	return spdk_nvme_ns_get_num_sectors(ns) * spdk_nvme_ns_get_sector_size(ns);
295 }
296 
297 uint32_t
298 spdk_nvme_ns_get_flags(struct spdk_nvme_ns *ns)
299 {
300 	return ns->flags;
301 }
302 
303 enum spdk_nvme_pi_type
304 spdk_nvme_ns_get_pi_type(struct spdk_nvme_ns *ns) {
305 	return ns->pi_type;
306 }
307 
308 bool
309 spdk_nvme_ns_supports_extended_lba(struct spdk_nvme_ns *ns)
310 {
311 	return (ns->flags & SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED) ? true : false;
312 }
313 
314 bool
315 spdk_nvme_ns_supports_compare(struct spdk_nvme_ns *ns)
316 {
317 	return (ns->flags & SPDK_NVME_NS_COMPARE_SUPPORTED) ? true : false;
318 }
319 
320 uint32_t
321 spdk_nvme_ns_get_md_size(struct spdk_nvme_ns *ns)
322 {
323 	return ns->md_size;
324 }
325 
326 uint32_t
327 spdk_nvme_ns_get_format_index(const struct spdk_nvme_ns_data *nsdata)
328 {
329 	if (nsdata->nlbaf < 16) {
330 		return nsdata->flbas.format;
331 	} else {
332 		return ((nsdata->flbas.msb_format << 4) + nsdata->flbas.format);
333 	}
334 }
335 
336 const struct spdk_nvme_ns_data *
337 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
338 {
339 	return _nvme_ns_get_data(ns);
340 }
341 
342 /* We have to use the typedef in the function declaration to appease astyle. */
343 typedef enum spdk_nvme_dealloc_logical_block_read_value
344 spdk_nvme_dealloc_logical_block_read_value_t;
345 
346 spdk_nvme_dealloc_logical_block_read_value_t
347 spdk_nvme_ns_get_dealloc_logical_block_read_value(
348 	struct spdk_nvme_ns *ns)
349 {
350 	struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
351 	const struct spdk_nvme_ns_data *data = spdk_nvme_ns_get_data(ns);
352 
353 	if (ctrlr->quirks & NVME_QUIRK_READ_ZERO_AFTER_DEALLOCATE) {
354 		return SPDK_NVME_DEALLOC_READ_00;
355 	} else {
356 		return data->dlfeat.bits.read_value;
357 	}
358 }
359 
360 uint32_t
361 spdk_nvme_ns_get_optimal_io_boundary(struct spdk_nvme_ns *ns)
362 {
363 	return ns->sectors_per_stripe;
364 }
365 
366 static const void *
367 nvme_ns_find_id_desc(const struct spdk_nvme_ns *ns, enum spdk_nvme_nidt type, size_t *length)
368 {
369 	const struct spdk_nvme_ns_id_desc *desc;
370 	size_t offset;
371 
372 	offset = 0;
373 	while (offset + 4 < sizeof(ns->id_desc_list)) {
374 		desc = (const struct spdk_nvme_ns_id_desc *)&ns->id_desc_list[offset];
375 
376 		if (desc->nidl == 0) {
377 			/* End of list */
378 			return NULL;
379 		}
380 
381 		/*
382 		 * Check if this descriptor fits within the list.
383 		 * 4 is the fixed-size descriptor header (not counted in NIDL).
384 		 */
385 		if (offset + desc->nidl + 4 > sizeof(ns->id_desc_list)) {
386 			/* Descriptor longer than remaining space in list (invalid) */
387 			return NULL;
388 		}
389 
390 		if (desc->nidt == type) {
391 			*length = desc->nidl;
392 			return &desc->nid[0];
393 		}
394 
395 		offset += 4 + desc->nidl;
396 	}
397 
398 	return NULL;
399 }
400 
401 const uint8_t *
402 spdk_nvme_ns_get_nguid(const struct spdk_nvme_ns *ns)
403 {
404 	const uint8_t *nguid;
405 	size_t size;
406 
407 	nguid = nvme_ns_find_id_desc(ns, SPDK_NVME_NIDT_NGUID, &size);
408 	if (nguid && size != SPDK_SIZEOF_MEMBER(struct spdk_nvme_ns_data, nguid)) {
409 		SPDK_WARNLOG("Invalid NIDT_NGUID descriptor length reported: %zu (expected: %zu)\n",
410 			     size, SPDK_SIZEOF_MEMBER(struct spdk_nvme_ns_data, nguid));
411 		return NULL;
412 	}
413 
414 	return nguid;
415 }
416 
417 const struct spdk_uuid *
418 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
419 {
420 	const struct spdk_uuid *uuid;
421 	size_t uuid_size;
422 
423 	uuid = nvme_ns_find_id_desc(ns, SPDK_NVME_NIDT_UUID, &uuid_size);
424 	if (uuid && uuid_size != sizeof(*uuid)) {
425 		SPDK_WARNLOG("Invalid NIDT_UUID descriptor length reported: %zu (expected: %zu)\n",
426 			     uuid_size, sizeof(*uuid));
427 		return NULL;
428 	}
429 
430 	return uuid;
431 }
432 
433 static enum spdk_nvme_csi
434 nvme_ns_get_csi(const struct spdk_nvme_ns *ns) {
435 	const uint8_t *csi;
436 	size_t csi_size;
437 
438 	csi = nvme_ns_find_id_desc(ns, SPDK_NVME_NIDT_CSI, &csi_size);
439 	if (csi && csi_size != sizeof(*csi))
440 	{
441 		SPDK_WARNLOG("Invalid NIDT_CSI descriptor length reported: %zu (expected: %zu)\n",
442 			     csi_size, sizeof(*csi));
443 		return SPDK_NVME_CSI_NVM;
444 	}
445 	if (!csi)
446 	{
447 		if (ns->ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS) {
448 			SPDK_WARNLOG("CSI not reported for NSID: %" PRIu32 "\n", ns->id);
449 		}
450 		return SPDK_NVME_CSI_NVM;
451 	}
452 
453 	return *csi;
454 }
455 
456 void
457 nvme_ns_set_id_desc_list_data(struct spdk_nvme_ns *ns)
458 {
459 	ns->csi = nvme_ns_get_csi(ns);
460 }
461 
462 enum spdk_nvme_csi
463 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) {
464 	return ns->csi;
465 }
466 
467 void
468 nvme_ns_free_zns_specific_data(struct spdk_nvme_ns *ns)
469 {
470 	if (!ns->id) {
471 		return;
472 	}
473 
474 	if (ns->nsdata_zns) {
475 		spdk_free(ns->nsdata_zns);
476 		ns->nsdata_zns = NULL;
477 	}
478 }
479 
480 void
481 nvme_ns_free_iocs_specific_data(struct spdk_nvme_ns *ns)
482 {
483 	nvme_ns_free_zns_specific_data(ns);
484 }
485 
486 bool
487 nvme_ns_has_supported_iocs_specific_data(struct spdk_nvme_ns *ns)
488 {
489 	switch (ns->csi) {
490 	case SPDK_NVME_CSI_NVM:
491 		/*
492 		 * NVM Command Set Specific Identify Namespace data structure
493 		 * is currently all-zeroes, reserved for future use.
494 		 */
495 		return false;
496 	case SPDK_NVME_CSI_ZNS:
497 		return true;
498 	default:
499 		SPDK_WARNLOG("Unsupported CSI: %u for NSID: %u\n", ns->csi, ns->id);
500 		return false;
501 	}
502 }
503 
504 uint32_t
505 spdk_nvme_ns_get_ana_group_id(const struct spdk_nvme_ns *ns)
506 {
507 	return ns->ana_group_id;
508 }
509 
510 enum spdk_nvme_ana_state
511 spdk_nvme_ns_get_ana_state(const struct spdk_nvme_ns *ns) {
512 	return ns->ana_state;
513 }
514 
515 int
516 nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
517 		  struct spdk_nvme_ctrlr *ctrlr)
518 {
519 	int	rc;
520 
521 	assert(id > 0);
522 
523 	ns->ctrlr = ctrlr;
524 	ns->id = id;
525 	/* This will be overwritten when reading ANA log page. */
526 	ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
527 
528 	rc = nvme_ctrlr_identify_ns(ns);
529 	if (rc != 0) {
530 		return rc;
531 	}
532 
533 	/* skip Identify NS ID Descriptor List for inactive NS */
534 	if (!spdk_nvme_ns_is_active(ns)) {
535 		return 0;
536 	}
537 
538 	rc = nvme_ctrlr_identify_id_desc(ns);
539 	if (rc != 0) {
540 		return rc;
541 	}
542 
543 	if (nvme_ctrlr_multi_iocs_enabled(ctrlr) &&
544 	    nvme_ns_has_supported_iocs_specific_data(ns)) {
545 		rc = nvme_ctrlr_identify_ns_iocs_specific(ns);
546 		if (rc != 0) {
547 			return rc;
548 		}
549 	}
550 
551 	return 0;
552 }
553 
554 void
555 nvme_ns_destruct(struct spdk_nvme_ns *ns)
556 {
557 	struct spdk_nvme_ns_data *nsdata;
558 
559 	if (!ns->id) {
560 		return;
561 	}
562 
563 	nsdata = _nvme_ns_get_data(ns);
564 	memset(nsdata, 0, sizeof(*nsdata));
565 	memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list));
566 	nvme_ns_free_iocs_specific_data(ns);
567 	ns->sector_size = 0;
568 	ns->extended_lba_size = 0;
569 	ns->md_size = 0;
570 	ns->pi_type = 0;
571 	ns->sectors_per_max_io = 0;
572 	ns->sectors_per_max_io_no_md = 0;
573 	ns->sectors_per_stripe = 0;
574 	ns->flags = 0;
575 	ns->csi = SPDK_NVME_CSI_NVM;
576 }
577