xref: /spdk/lib/nvme/nvme_internal.h (revision ceefb46358f845d6c4e9f9e18d02ec20a7098ff7)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2015 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #ifndef __NVME_INTERNAL_H__
8 #define __NVME_INTERNAL_H__
9 
10 #include "spdk/config.h"
11 #include "spdk/likely.h"
12 #include "spdk/stdinc.h"
13 
14 #include "spdk/nvme.h"
15 
16 #if defined(__i386__) || defined(__x86_64__)
17 #include <x86intrin.h>
18 #endif
19 
20 #include "spdk/queue.h"
21 #include "spdk/barrier.h"
22 #include "spdk/bit_array.h"
23 #include "spdk/mmio.h"
24 #include "spdk/pci_ids.h"
25 #include "spdk/util.h"
26 #include "spdk/memory.h"
27 #include "spdk/nvme_intel.h"
28 #include "spdk/nvmf_spec.h"
29 #include "spdk/tree.h"
30 #include "spdk/uuid.h"
31 
32 #include "spdk_internal/assert.h"
33 #include "spdk/log.h"
34 
35 extern pid_t g_spdk_nvme_pid;
36 
37 extern struct spdk_nvme_transport_opts g_spdk_nvme_transport_opts;
38 
39 /*
40  * Some Intel devices support vendor-unique read latency log page even
41  * though the log page directory says otherwise.
42  */
43 #define NVME_INTEL_QUIRK_READ_LATENCY 0x1
44 
45 /*
46  * Some Intel devices support vendor-unique write latency log page even
47  * though the log page directory says otherwise.
48  */
49 #define NVME_INTEL_QUIRK_WRITE_LATENCY 0x2
50 
51 /*
52  * The controller needs a delay before starts checking the device
53  * readiness, which is done by reading the NVME_CSTS_RDY bit.
54  */
55 #define NVME_QUIRK_DELAY_BEFORE_CHK_RDY	0x4
56 
57 /*
58  * The controller performs best when I/O is split on particular
59  * LBA boundaries.
60  */
61 #define NVME_INTEL_QUIRK_STRIPING 0x8
62 
63 /*
64  * The controller needs a delay after allocating an I/O queue pair
65  * before it is ready to accept I/O commands.
66  */
67 #define NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC 0x10
68 
69 /*
70  * Earlier NVMe devices do not indicate whether unmapped blocks
71  * will read all zeroes or not. This define indicates that the
72  * device does in fact read all zeroes after an unmap event
73  */
74 #define NVME_QUIRK_READ_ZERO_AFTER_DEALLOCATE 0x20
75 
76 /*
77  * The controller doesn't handle Identify value others than 0 or 1 correctly.
78  */
79 #define NVME_QUIRK_IDENTIFY_CNS 0x40
80 
81 /*
82  * The controller supports Open Channel command set if matching additional
83  * condition, like the first byte (value 0x1) in the vendor specific
84  * bits of the namespace identify structure is set.
85  */
86 #define NVME_QUIRK_OCSSD 0x80
87 
88 /*
89  * The controller has an Intel vendor ID but does not support Intel vendor-specific
90  * log pages.  This is primarily for QEMU emulated SSDs which report an Intel vendor
91  * ID but do not support these log pages.
92  */
93 #define NVME_INTEL_QUIRK_NO_LOG_PAGES 0x100
94 
95 /*
96  * The controller does not set SHST_COMPLETE in a reasonable amount of time.  This
97  * is primarily seen in virtual VMWare NVMe SSDs.  This quirk merely adds an additional
98  * error message that on VMWare NVMe SSDs, the shutdown timeout may be expected.
99  */
100 #define NVME_QUIRK_SHST_COMPLETE 0x200
101 
102 /*
103  * The controller requires an extra delay before starting the initialization process
104  * during attach.
105  */
106 #define NVME_QUIRK_DELAY_BEFORE_INIT 0x400
107 
108 /*
109  * Some SSDs exhibit poor performance with the default SPDK NVMe IO queue size.
110  * This quirk will increase the default to 1024 which matches other operating
111  * systems, at the cost of some extra memory usage.  Users can still override
112  * the increased default by changing the spdk_nvme_io_qpair_opts when allocating
113  * a new queue pair.
114  */
115 #define NVME_QUIRK_MINIMUM_IO_QUEUE_SIZE 0x800
116 
117 /**
118  * The maximum access width to PCI memory space is 8 Bytes, don't use AVX2 or
119  * SSE instructions to optimize the memory access(memcpy or memset) larger than
120  * 8 Bytes.
121  */
122 #define NVME_QUIRK_MAXIMUM_PCI_ACCESS_WIDTH 0x1000
123 
124 /**
125  * The SSD does not support OPAL even through it sets the security bit in OACS.
126  */
127 #define NVME_QUIRK_OACS_SECURITY 0x2000
128 
129 /**
130  * Intel P55XX SSDs can't support Dataset Management command with SGL format,
131  * so use PRP with DSM command.
132  */
133 #define NVME_QUIRK_NO_SGL_FOR_DSM 0x4000
134 
135 /**
136  * Maximum Data Transfer Size(MDTS) excludes interleaved metadata.
137  */
138 #define NVME_QUIRK_MDTS_EXCLUDE_MD 0x8000
139 
140 /**
141  * Force not to use SGL even the controller report that it can
142  * support it.
143  */
144 #define NVME_QUIRK_NOT_USE_SGL 0x10000
145 
146 /*
147  * Some SSDs require the admin submission queue size to equate to an even
148  * 4KiB multiple.
149  */
150 #define NVME_QUIRK_MINIMUM_ADMIN_QUEUE_SIZE 0x20000
151 
152 #define NVME_MAX_ASYNC_EVENTS	(8)
153 
154 #define NVME_MAX_ADMIN_TIMEOUT_IN_SECS	(30)
155 
156 /* Maximum log page size to fetch for AERs. */
157 #define NVME_MAX_AER_LOG_SIZE		(4096)
158 
159 /*
160  * NVME_MAX_IO_QUEUES in nvme_spec.h defines the 64K spec-limit, but this
161  *  define specifies the maximum number of queues this driver will actually
162  *  try to configure, if available.
163  */
164 #define DEFAULT_MAX_IO_QUEUES		(1024)
165 #define DEFAULT_ADMIN_QUEUE_SIZE	(32)
166 #define DEFAULT_IO_QUEUE_SIZE		(256)
167 #define DEFAULT_IO_QUEUE_SIZE_FOR_QUIRK	(1024) /* Matches Linux kernel driver */
168 
169 #define DEFAULT_IO_QUEUE_REQUESTS	(512)
170 
171 #define SPDK_NVME_DEFAULT_RETRY_COUNT	(4)
172 
173 #define SPDK_NVME_TRANSPORT_ACK_TIMEOUT_DISABLED	(0)
174 #define SPDK_NVME_DEFAULT_TRANSPORT_ACK_TIMEOUT	SPDK_NVME_TRANSPORT_ACK_TIMEOUT_DISABLED
175 
176 #define SPDK_NVME_TRANSPORT_TOS_DISABLED	(0)
177 
178 #define MIN_KEEP_ALIVE_TIMEOUT_IN_MS	(10000)
179 
180 /* We want to fit submission and completion rings each in a single 2MB
181  * hugepage to ensure physical address contiguity.
182  */
183 #define MAX_IO_QUEUE_ENTRIES		(VALUE_2MB / spdk_max( \
184 						sizeof(struct spdk_nvme_cmd), \
185 						sizeof(struct spdk_nvme_cpl)))
186 
187 /* Default timeout for fabrics connect commands. */
188 #ifdef DEBUG
189 #define NVME_FABRIC_CONNECT_COMMAND_TIMEOUT 0
190 #else
191 /* 500 millisecond timeout. */
192 #define NVME_FABRIC_CONNECT_COMMAND_TIMEOUT 500000
193 #endif
194 
195 /* This value indicates that a read from a PCIe register is invalid. This can happen when a device is no longer present */
196 #define SPDK_NVME_INVALID_REGISTER_VALUE 0xFFFFFFFFu
197 
198 enum nvme_payload_type {
199 	NVME_PAYLOAD_TYPE_INVALID = 0,
200 
201 	/** nvme_request::u.payload.contig_buffer is valid for this request */
202 	NVME_PAYLOAD_TYPE_CONTIG,
203 
204 	/** nvme_request::u.sgl is valid for this request */
205 	NVME_PAYLOAD_TYPE_SGL,
206 };
207 
208 /** Boot partition write states */
209 enum nvme_bp_write_state {
210 	SPDK_NVME_BP_WS_DOWNLOADING	= 0x0,
211 	SPDK_NVME_BP_WS_DOWNLOADED	= 0x1,
212 	SPDK_NVME_BP_WS_REPLACE		= 0x2,
213 	SPDK_NVME_BP_WS_ACTIVATE	= 0x3,
214 };
215 
216 /**
217  * Descriptor for a request data payload.
218  */
219 struct nvme_payload {
220 	/**
221 	 * Functions for retrieving physical addresses for scattered payloads.
222 	 */
223 	spdk_nvme_req_reset_sgl_cb reset_sgl_fn;
224 	spdk_nvme_req_next_sge_cb next_sge_fn;
225 
226 	/**
227 	 * Extended IO options passed by the user
228 	 */
229 	struct spdk_nvme_ns_cmd_ext_io_opts *opts;
230 	/**
231 	 * If reset_sgl_fn == NULL, this is a contig payload, and contig_or_cb_arg contains the
232 	 * virtual memory address of a single virtually contiguous buffer.
233 	 *
234 	 * If reset_sgl_fn != NULL, this is a SGL payload, and contig_or_cb_arg contains the
235 	 * cb_arg that will be passed to the SGL callback functions.
236 	 */
237 	void *contig_or_cb_arg;
238 
239 	/** Virtual memory address of a single virtually contiguous metadata buffer */
240 	void *md;
241 };
242 
243 #define NVME_PAYLOAD_CONTIG(contig_, md_) \
244 	(struct nvme_payload) { \
245 		.reset_sgl_fn = NULL, \
246 		.next_sge_fn = NULL, \
247 		.contig_or_cb_arg = (contig_), \
248 		.md = (md_), \
249 	}
250 
251 #define NVME_PAYLOAD_SGL(reset_sgl_fn_, next_sge_fn_, cb_arg_, md_) \
252 	(struct nvme_payload) { \
253 		.reset_sgl_fn = (reset_sgl_fn_), \
254 		.next_sge_fn = (next_sge_fn_), \
255 		.contig_or_cb_arg = (cb_arg_), \
256 		.md = (md_), \
257 	}
258 
259 static inline enum nvme_payload_type
260 nvme_payload_type(const struct nvme_payload *payload) {
261 	return payload->reset_sgl_fn ? NVME_PAYLOAD_TYPE_SGL : NVME_PAYLOAD_TYPE_CONTIG;
262 }
263 
264 struct nvme_error_cmd {
265 	bool				do_not_submit;
266 	uint64_t			timeout_tsc;
267 	uint32_t			err_count;
268 	uint8_t				opc;
269 	struct spdk_nvme_status		status;
270 	TAILQ_ENTRY(nvme_error_cmd)	link;
271 };
272 
273 struct nvme_request {
274 	struct spdk_nvme_cmd		cmd;
275 
276 	uint8_t				retries;
277 
278 	uint8_t				timed_out : 1;
279 
280 	/**
281 	 * True if the request is in the queued_req list.
282 	 */
283 	uint8_t				queued : 1;
284 	uint8_t				reserved : 6;
285 
286 	/**
287 	 * Number of children requests still outstanding for this
288 	 *  request which was split into multiple child requests.
289 	 */
290 	uint16_t			num_children;
291 
292 	/**
293 	 * Offset in bytes from the beginning of payload for this request.
294 	 * This is used for I/O commands that are split into multiple requests.
295 	 */
296 	uint32_t			payload_offset;
297 	uint32_t			md_offset;
298 
299 	uint32_t			payload_size;
300 
301 	/**
302 	 * Timeout ticks for error injection requests, can be extended in future
303 	 * to support per-request timeout feature.
304 	 */
305 	uint64_t			timeout_tsc;
306 
307 	/**
308 	 * Data payload for this request's command.
309 	 */
310 	struct nvme_payload		payload;
311 
312 	spdk_nvme_cmd_cb		cb_fn;
313 	void				*cb_arg;
314 	STAILQ_ENTRY(nvme_request)	stailq;
315 
316 	struct spdk_nvme_qpair		*qpair;
317 
318 	/*
319 	 * The value of spdk_get_ticks() when the request was submitted to the hardware.
320 	 * Only set if ctrlr->timeout_enabled is true.
321 	 */
322 	uint64_t			submit_tick;
323 
324 	/**
325 	 * The active admin request can be moved to a per process pending
326 	 *  list based on the saved pid to tell which process it belongs
327 	 *  to. The cpl saves the original completion information which
328 	 *  is used in the completion callback.
329 	 * NOTE: these below two fields are only used for admin request.
330 	 */
331 	pid_t				pid;
332 	struct spdk_nvme_cpl		cpl;
333 
334 	uint32_t			md_size;
335 
336 	/**
337 	 * The following members should not be reordered with members
338 	 *  above.  These members are only needed when splitting
339 	 *  requests which is done rarely, and the driver is careful
340 	 *  to not touch the following fields until a split operation is
341 	 *  needed, to avoid touching an extra cacheline.
342 	 */
343 
344 	/**
345 	 * Points to the outstanding child requests for a parent request.
346 	 *  Only valid if a request was split into multiple children
347 	 *  requests, and is not initialized for non-split requests.
348 	 */
349 	TAILQ_HEAD(, nvme_request)	children;
350 
351 	/**
352 	 * Linked-list pointers for a child request in its parent's list.
353 	 */
354 	TAILQ_ENTRY(nvme_request)	child_tailq;
355 
356 	/**
357 	 * Points to a parent request if part of a split request,
358 	 *   NULL otherwise.
359 	 */
360 	struct nvme_request		*parent;
361 
362 	/**
363 	 * Completion status for a parent request.  Initialized to all 0's
364 	 *  (SUCCESS) before child requests are submitted.  If a child
365 	 *  request completes with error, the error status is copied here,
366 	 *  to ensure that the parent request is also completed with error
367 	 *  status once all child requests are completed.
368 	 */
369 	struct spdk_nvme_cpl		parent_status;
370 
371 	/**
372 	 * The user_cb_fn and user_cb_arg fields are used for holding the original
373 	 * callback data when using nvme_allocate_request_user_copy.
374 	 */
375 	spdk_nvme_cmd_cb		user_cb_fn;
376 	void				*user_cb_arg;
377 	void				*user_buffer;
378 };
379 
380 struct nvme_completion_poll_status {
381 	struct spdk_nvme_cpl	cpl;
382 	uint64_t		timeout_tsc;
383 	/**
384 	 * DMA buffer retained throughout the duration of the command.  It'll be released
385 	 * automatically if the command times out, otherwise the user is responsible for freeing it.
386 	 */
387 	void			*dma_data;
388 	bool			done;
389 	/* This flag indicates that the request has been timed out and the memory
390 	   must be freed in a completion callback */
391 	bool			timed_out;
392 };
393 
394 struct nvme_async_event_request {
395 	struct spdk_nvme_ctrlr		*ctrlr;
396 	struct nvme_request		*req;
397 	struct spdk_nvme_cpl		cpl;
398 };
399 
400 enum nvme_qpair_state {
401 	NVME_QPAIR_DISCONNECTED,
402 	NVME_QPAIR_DISCONNECTING,
403 	NVME_QPAIR_CONNECTING,
404 	NVME_QPAIR_CONNECTED,
405 	NVME_QPAIR_ENABLING,
406 	NVME_QPAIR_ENABLED,
407 	NVME_QPAIR_DESTROYING,
408 };
409 
410 struct spdk_nvme_qpair {
411 	struct spdk_nvme_ctrlr			*ctrlr;
412 
413 	uint16_t				id;
414 
415 	uint8_t					qprio;
416 
417 	uint8_t					state : 3;
418 
419 	uint8_t					async: 1;
420 
421 	uint8_t					is_new_qpair: 1;
422 
423 	/*
424 	 * Members for handling IO qpair deletion inside of a completion context.
425 	 * These are specifically defined as single bits, so that they do not
426 	 *  push this data structure out to another cacheline.
427 	 */
428 	uint8_t					in_completion_context : 1;
429 	uint8_t					delete_after_completion_context: 1;
430 
431 	/*
432 	 * Set when no deletion notification is needed. For example, the process
433 	 * which allocated this qpair exited unexpectedly.
434 	 */
435 	uint8_t					no_deletion_notification_needed: 1;
436 
437 	uint8_t					last_fuse: 2;
438 
439 	uint8_t					transport_failure_reason: 2;
440 	uint8_t					last_transport_failure_reason: 2;
441 
442 	uint8_t					abort_dnr: 1;
443 
444 	enum spdk_nvme_transport_type		trtype;
445 
446 	uint32_t				num_outstanding_reqs;
447 
448 	/* request object used only for this qpair's FABRICS/CONNECT command (if needed) */
449 	struct nvme_request			*reserved_req;
450 
451 	STAILQ_HEAD(, nvme_request)		free_req;
452 	STAILQ_HEAD(, nvme_request)		queued_req;
453 
454 	/* List entry for spdk_nvme_transport_poll_group::qpairs */
455 	STAILQ_ENTRY(spdk_nvme_qpair)		poll_group_stailq;
456 
457 	/** Commands opcode in this list will return error */
458 	TAILQ_HEAD(, nvme_error_cmd)		err_cmd_head;
459 	/** Requests in this list will return error */
460 	STAILQ_HEAD(, nvme_request)		err_req_head;
461 
462 	struct spdk_nvme_ctrlr_process		*active_proc;
463 
464 	struct spdk_nvme_transport_poll_group	*poll_group;
465 
466 	void					*poll_group_tailq_head;
467 
468 	const struct spdk_nvme_transport	*transport;
469 
470 	/* Entries below here are not touched in the main I/O path. */
471 
472 	struct nvme_completion_poll_status	*poll_status;
473 
474 	/* List entry for spdk_nvme_ctrlr::active_io_qpairs */
475 	TAILQ_ENTRY(spdk_nvme_qpair)		tailq;
476 
477 	/* List entry for spdk_nvme_ctrlr_process::allocated_io_qpairs */
478 	TAILQ_ENTRY(spdk_nvme_qpair)		per_process_tailq;
479 
480 	STAILQ_HEAD(, nvme_request)		aborting_queued_req;
481 
482 	void					*req_buf;
483 };
484 
485 struct spdk_nvme_poll_group {
486 	void						*ctx;
487 	struct spdk_nvme_accel_fn_table			accel_fn_table;
488 	STAILQ_HEAD(, spdk_nvme_transport_poll_group)	tgroups;
489 };
490 
491 struct spdk_nvme_transport_poll_group {
492 	struct spdk_nvme_poll_group			*group;
493 	const struct spdk_nvme_transport		*transport;
494 	STAILQ_HEAD(, spdk_nvme_qpair)			connected_qpairs;
495 	STAILQ_HEAD(, spdk_nvme_qpair)			disconnected_qpairs;
496 	STAILQ_ENTRY(spdk_nvme_transport_poll_group)	link;
497 };
498 
499 struct spdk_nvme_ns {
500 	struct spdk_nvme_ctrlr		*ctrlr;
501 	uint32_t			sector_size;
502 
503 	/*
504 	 * Size of data transferred as part of each block,
505 	 * including metadata if FLBAS indicates the metadata is transferred
506 	 * as part of the data buffer at the end of each LBA.
507 	 */
508 	uint32_t			extended_lba_size;
509 
510 	uint32_t			md_size;
511 	uint32_t			pi_type;
512 	uint32_t			sectors_per_max_io;
513 	uint32_t			sectors_per_max_io_no_md;
514 	uint32_t			sectors_per_stripe;
515 	uint32_t			id;
516 	uint16_t			flags;
517 	bool				active;
518 
519 	/* Command Set Identifier */
520 	enum spdk_nvme_csi		csi;
521 
522 	/* Namespace Identification Descriptor List (CNS = 03h) */
523 	uint8_t				id_desc_list[4096];
524 
525 	uint32_t			ana_group_id;
526 	enum spdk_nvme_ana_state	ana_state;
527 
528 	/* Identify Namespace data. */
529 	struct spdk_nvme_ns_data	nsdata;
530 
531 	/* Zoned Namespace Command Set Specific Identify Namespace data. */
532 	struct spdk_nvme_zns_ns_data	*nsdata_zns;
533 
534 	RB_ENTRY(spdk_nvme_ns)		node;
535 };
536 
537 /**
538  * State of struct spdk_nvme_ctrlr (in particular, during initialization).
539  */
540 enum nvme_ctrlr_state {
541 	/**
542 	 * Wait before initializing the controller.
543 	 */
544 	NVME_CTRLR_STATE_INIT_DELAY,
545 
546 	/**
547 	 * Connect the admin queue.
548 	 */
549 	NVME_CTRLR_STATE_CONNECT_ADMINQ,
550 
551 	/**
552 	 * Controller has not started initialized yet.
553 	 */
554 	NVME_CTRLR_STATE_INIT = NVME_CTRLR_STATE_CONNECT_ADMINQ,
555 
556 	/**
557 	 * Waiting for admin queue to connect.
558 	 */
559 	NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ,
560 
561 	/**
562 	 * Read Version (VS) register.
563 	 */
564 	NVME_CTRLR_STATE_READ_VS,
565 
566 	/**
567 	 * Waiting for Version (VS) register to be read.
568 	 */
569 	NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS,
570 
571 	/**
572 	 * Read Capabilities (CAP) register.
573 	 */
574 	NVME_CTRLR_STATE_READ_CAP,
575 
576 	/**
577 	 * Waiting for Capabilities (CAP) register to be read.
578 	 */
579 	NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP,
580 
581 	/**
582 	 * Check EN to prepare for controller initialization.
583 	 */
584 	NVME_CTRLR_STATE_CHECK_EN,
585 
586 	/**
587 	 * Waiting for CC to be read as part of EN check.
588 	 */
589 	NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC,
590 
591 	/**
592 	 * Waiting for CSTS.RDY to transition from 0 to 1 so that CC.EN may be set to 0.
593 	 */
594 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
595 
596 	/**
597 	 * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 1.
598 	 */
599 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
600 
601 	/**
602 	 * Disabling the controller by setting CC.EN to 0.
603 	 */
604 	NVME_CTRLR_STATE_SET_EN_0,
605 
606 	/**
607 	 * Waiting for the CC register to be read as part of disabling the controller.
608 	 */
609 	NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC,
610 
611 	/**
612 	 * Waiting for CSTS.RDY to transition from 1 to 0 so that CC.EN may be set to 1.
613 	 */
614 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
615 
616 	/**
617 	 * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 0.
618 	 */
619 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS,
620 
621 	/**
622 	 * The controller is disabled. (CC.EN and CSTS.RDY are 0.)
623 	 */
624 	NVME_CTRLR_STATE_DISABLED,
625 
626 	/**
627 	 * Enable the controller by writing CC.EN to 1
628 	 */
629 	NVME_CTRLR_STATE_ENABLE,
630 
631 	/**
632 	 * Waiting for CC register to be written as part of enabling the controller.
633 	 */
634 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC,
635 
636 	/**
637 	 * Waiting for CSTS.RDY to transition from 0 to 1 after enabling the controller.
638 	 */
639 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
640 
641 	/**
642 	 * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 1.
643 	 */
644 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
645 
646 	/**
647 	 * Reset the Admin queue of the controller.
648 	 */
649 	NVME_CTRLR_STATE_RESET_ADMIN_QUEUE,
650 
651 	/**
652 	 * Identify Controller command will be sent to then controller.
653 	 */
654 	NVME_CTRLR_STATE_IDENTIFY,
655 
656 	/**
657 	 * Waiting for Identify Controller command be completed.
658 	 */
659 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
660 
661 	/**
662 	 * Configure AER of the controller.
663 	 */
664 	NVME_CTRLR_STATE_CONFIGURE_AER,
665 
666 	/**
667 	 * Waiting for the Configure AER to be completed.
668 	 */
669 	NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
670 
671 	/**
672 	 * Set Keep Alive Timeout of the controller.
673 	 */
674 	NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
675 
676 	/**
677 	 * Waiting for Set Keep Alive Timeout to be completed.
678 	 */
679 	NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
680 
681 	/**
682 	 * Get Identify I/O Command Set Specific Controller data structure.
683 	 */
684 	NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
685 
686 	/**
687 	 * Waiting for Identify I/O Command Set Specific Controller command to be completed.
688 	 */
689 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC,
690 
691 	/**
692 	 * Get Commands Supported and Effects log page for the Zoned Namespace Command Set.
693 	 */
694 	NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG,
695 
696 	/**
697 	 * Waiting for the Get Log Page command to be completed.
698 	 */
699 	NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG,
700 
701 	/**
702 	 * Set Number of Queues of the controller.
703 	 */
704 	NVME_CTRLR_STATE_SET_NUM_QUEUES,
705 
706 	/**
707 	 * Waiting for Set Num of Queues command to be completed.
708 	 */
709 	NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
710 
711 	/**
712 	 * Get active Namespace list of the controller.
713 	 */
714 	NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
715 
716 	/**
717 	 * Waiting for the Identify Active Namespace commands to be completed.
718 	 */
719 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS,
720 
721 	/**
722 	 * Get Identify Namespace Data structure for each NS.
723 	 */
724 	NVME_CTRLR_STATE_IDENTIFY_NS,
725 
726 	/**
727 	 * Waiting for the Identify Namespace commands to be completed.
728 	 */
729 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
730 
731 	/**
732 	 * Get Identify Namespace Identification Descriptors.
733 	 */
734 	NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
735 
736 	/**
737 	 * Get Identify I/O Command Set Specific Namespace data structure for each NS.
738 	 */
739 	NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
740 
741 	/**
742 	 * Waiting for the Identify I/O Command Set Specific Namespace commands to be completed.
743 	 */
744 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC,
745 
746 	/**
747 	 * Waiting for the Identify Namespace Identification
748 	 * Descriptors to be completed.
749 	 */
750 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
751 
752 	/**
753 	 * Set supported log pages of the controller.
754 	 */
755 	NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
756 
757 	/**
758 	 * Set supported log pages of INTEL controller.
759 	 */
760 	NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES,
761 
762 	/**
763 	 * Waiting for supported log pages of INTEL controller.
764 	 */
765 	NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES,
766 
767 	/**
768 	 * Set supported features of the controller.
769 	 */
770 	NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
771 
772 	/**
773 	 * Set Doorbell Buffer Config of the controller.
774 	 */
775 	NVME_CTRLR_STATE_SET_DB_BUF_CFG,
776 
777 	/**
778 	 * Waiting for Doorbell Buffer Config to be completed.
779 	 */
780 	NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
781 
782 	/**
783 	 * Set Host ID of the controller.
784 	 */
785 	NVME_CTRLR_STATE_SET_HOST_ID,
786 
787 	/**
788 	 * Waiting for Set Host ID to be completed.
789 	 */
790 	NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
791 
792 	/**
793 	 * Let transport layer do its part of initialization.
794 	 */
795 	NVME_CTRLR_STATE_TRANSPORT_READY,
796 
797 	/**
798 	 * Controller initialization has completed and the controller is ready.
799 	 */
800 	NVME_CTRLR_STATE_READY,
801 
802 	/**
803 	 * Controller initialization has an error.
804 	 */
805 	NVME_CTRLR_STATE_ERROR
806 };
807 
808 #define NVME_TIMEOUT_INFINITE		0
809 #define NVME_TIMEOUT_KEEP_EXISTING	UINT64_MAX
810 
811 struct spdk_nvme_ctrlr_aer_completion_list {
812 	struct spdk_nvme_cpl	cpl;
813 	STAILQ_ENTRY(spdk_nvme_ctrlr_aer_completion_list) link;
814 };
815 
816 /*
817  * Used to track properties for all processes accessing the controller.
818  */
819 struct spdk_nvme_ctrlr_process {
820 	/** Whether it is the primary process  */
821 	bool						is_primary;
822 
823 	/** Process ID */
824 	pid_t						pid;
825 
826 	/** Active admin requests to be completed */
827 	STAILQ_HEAD(, nvme_request)			active_reqs;
828 
829 	TAILQ_ENTRY(spdk_nvme_ctrlr_process)		tailq;
830 
831 	/** Per process PCI device handle */
832 	struct spdk_pci_device				*devhandle;
833 
834 	/** Reference to track the number of attachment to this controller. */
835 	int						ref;
836 
837 	/** Allocated IO qpairs */
838 	TAILQ_HEAD(, spdk_nvme_qpair)			allocated_io_qpairs;
839 
840 	spdk_nvme_aer_cb				aer_cb_fn;
841 	void						*aer_cb_arg;
842 
843 	/**
844 	 * A function pointer to timeout callback function
845 	 */
846 	spdk_nvme_timeout_cb		timeout_cb_fn;
847 	void				*timeout_cb_arg;
848 	/** separate timeout values for io vs. admin reqs */
849 	uint64_t			timeout_io_ticks;
850 	uint64_t			timeout_admin_ticks;
851 
852 	/** List to publish AENs to all procs in multiprocess setup */
853 	STAILQ_HEAD(, spdk_nvme_ctrlr_aer_completion_list)      async_events;
854 };
855 
856 struct nvme_register_completion {
857 	struct spdk_nvme_cpl			cpl;
858 	uint64_t				value;
859 	spdk_nvme_reg_cb			cb_fn;
860 	void					*cb_ctx;
861 	STAILQ_ENTRY(nvme_register_completion)	stailq;
862 	pid_t					pid;
863 };
864 
865 struct spdk_nvme_ctrlr {
866 	/* Hot data (accessed in I/O path) starts here. */
867 
868 	/* Tree of namespaces */
869 	RB_HEAD(nvme_ns_tree, spdk_nvme_ns)	ns;
870 
871 	/* The number of active namespaces */
872 	uint32_t			active_ns_count;
873 
874 	bool				is_removed;
875 
876 	bool				is_resetting;
877 
878 	bool				is_failed;
879 
880 	bool				is_destructed;
881 
882 	bool				timeout_enabled;
883 
884 	/* The application is preparing to reset the controller.  Transports
885 	 * can use this to skip unnecessary parts of the qpair deletion process
886 	 * for example, like the DELETE_SQ/CQ commands.
887 	 */
888 	bool				prepare_for_reset;
889 
890 	bool				is_disconnecting;
891 
892 	uint16_t			max_sges;
893 
894 	uint16_t			cntlid;
895 
896 	/** Controller support flags */
897 	uint64_t			flags;
898 
899 	/** NVMEoF in-capsule data size in bytes */
900 	uint32_t			ioccsz_bytes;
901 
902 	/** NVMEoF in-capsule data offset in 16 byte units */
903 	uint16_t			icdoff;
904 
905 	/* Cold data (not accessed in normal I/O path) is after this point. */
906 
907 	struct spdk_nvme_transport_id	trid;
908 
909 	union spdk_nvme_cap_register	cap;
910 	union spdk_nvme_vs_register	vs;
911 
912 	int				state;
913 	uint64_t			state_timeout_tsc;
914 
915 	uint64_t			next_keep_alive_tick;
916 	uint64_t			keep_alive_interval_ticks;
917 
918 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
919 
920 	/** All the log pages supported */
921 	bool				log_page_supported[256];
922 
923 	/** All the features supported */
924 	bool				feature_supported[256];
925 
926 	/** maximum i/o size in bytes */
927 	uint32_t			max_xfer_size;
928 
929 	/** minimum page size supported by this controller in bytes */
930 	uint32_t			min_page_size;
931 
932 	/** selected memory page size for this controller in bytes */
933 	uint32_t			page_size;
934 
935 	uint32_t			num_aers;
936 	struct nvme_async_event_request	aer[NVME_MAX_ASYNC_EVENTS];
937 
938 	/** guards access to the controller itself, including admin queues */
939 	pthread_mutex_t			ctrlr_lock;
940 
941 	struct spdk_nvme_qpair		*adminq;
942 
943 	/** shadow doorbell buffer */
944 	uint32_t			*shadow_doorbell;
945 	/** eventidx buffer */
946 	uint32_t			*eventidx;
947 
948 	/**
949 	 * Identify Controller data.
950 	 */
951 	struct spdk_nvme_ctrlr_data	cdata;
952 
953 	/**
954 	 * Zoned Namespace Command Set Specific Identify Controller data.
955 	 */
956 	struct spdk_nvme_zns_ctrlr_data	*cdata_zns;
957 
958 	struct spdk_bit_array		*free_io_qids;
959 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
960 
961 	struct spdk_nvme_ctrlr_opts	opts;
962 
963 	uint64_t			quirks;
964 
965 	/* Extra sleep time during controller initialization */
966 	uint64_t			sleep_timeout_tsc;
967 
968 	/** Track all the processes manage this controller */
969 	TAILQ_HEAD(, spdk_nvme_ctrlr_process)	active_procs;
970 
971 
972 	STAILQ_HEAD(, nvme_request)	queued_aborts;
973 	uint32_t			outstanding_aborts;
974 
975 	/* CB to notify the user when the ctrlr is removed/failed. */
976 	spdk_nvme_remove_cb			remove_cb;
977 	void					*cb_ctx;
978 
979 	struct spdk_nvme_qpair		*external_io_msgs_qpair;
980 	pthread_mutex_t			external_io_msgs_lock;
981 	struct spdk_ring		*external_io_msgs;
982 
983 	STAILQ_HEAD(, nvme_io_msg_producer) io_producers;
984 
985 	struct spdk_nvme_ana_page		*ana_log_page;
986 	struct spdk_nvme_ana_group_descriptor	*copied_ana_desc;
987 	uint32_t				ana_log_page_size;
988 
989 	/* scratchpad pointer that can be used to send data between two NVME_CTRLR_STATEs */
990 	void				*tmp_ptr;
991 
992 	/* maximum zone append size in bytes */
993 	uint32_t			max_zone_append_size;
994 
995 	/* PMR size in bytes */
996 	uint64_t			pmr_size;
997 
998 	/* Boot Partition Info */
999 	enum nvme_bp_write_state	bp_ws;
1000 	uint32_t			bpid;
1001 	spdk_nvme_cmd_cb		bp_write_cb_fn;
1002 	void				*bp_write_cb_arg;
1003 
1004 	/* Firmware Download */
1005 	void				*fw_payload;
1006 	unsigned int			fw_size_remaining;
1007 	unsigned int			fw_offset;
1008 	unsigned int			fw_transfer_size;
1009 
1010 	/* Completed register operations */
1011 	STAILQ_HEAD(, nvme_register_completion)	register_operations;
1012 
1013 	union spdk_nvme_cc_register		process_init_cc;
1014 };
1015 
1016 struct spdk_nvme_probe_ctx {
1017 	struct spdk_nvme_transport_id		trid;
1018 	void					*cb_ctx;
1019 	spdk_nvme_probe_cb			probe_cb;
1020 	spdk_nvme_attach_cb			attach_cb;
1021 	spdk_nvme_remove_cb			remove_cb;
1022 	TAILQ_HEAD(, spdk_nvme_ctrlr)		init_ctrlrs;
1023 };
1024 
1025 typedef void (*nvme_ctrlr_detach_cb)(struct spdk_nvme_ctrlr *ctrlr);
1026 
1027 enum nvme_ctrlr_detach_state {
1028 	NVME_CTRLR_DETACH_SET_CC,
1029 	NVME_CTRLR_DETACH_CHECK_CSTS,
1030 	NVME_CTRLR_DETACH_GET_CSTS,
1031 	NVME_CTRLR_DETACH_GET_CSTS_DONE,
1032 };
1033 
1034 struct nvme_ctrlr_detach_ctx {
1035 	struct spdk_nvme_ctrlr			*ctrlr;
1036 	nvme_ctrlr_detach_cb			cb_fn;
1037 	uint64_t				shutdown_start_tsc;
1038 	uint32_t				shutdown_timeout_ms;
1039 	bool					shutdown_complete;
1040 	enum nvme_ctrlr_detach_state		state;
1041 	union spdk_nvme_csts_register		csts;
1042 	TAILQ_ENTRY(nvme_ctrlr_detach_ctx)	link;
1043 };
1044 
1045 struct spdk_nvme_detach_ctx {
1046 	TAILQ_HEAD(, nvme_ctrlr_detach_ctx)	head;
1047 };
1048 
1049 struct nvme_driver {
1050 	pthread_mutex_t			lock;
1051 
1052 	/** Multi-process shared attached controller list */
1053 	TAILQ_HEAD(, spdk_nvme_ctrlr)	shared_attached_ctrlrs;
1054 
1055 	bool				initialized;
1056 	struct spdk_uuid		default_extended_host_id;
1057 
1058 	/** netlink socket fd for hotplug messages */
1059 	int				hotplug_fd;
1060 };
1061 
1062 extern struct nvme_driver *g_spdk_nvme_driver;
1063 
1064 int nvme_driver_init(void);
1065 
1066 #define nvme_delay		usleep
1067 
1068 static inline bool
1069 nvme_qpair_is_admin_queue(struct spdk_nvme_qpair *qpair)
1070 {
1071 	return qpair->id == 0;
1072 }
1073 
1074 static inline bool
1075 nvme_qpair_is_io_queue(struct spdk_nvme_qpair *qpair)
1076 {
1077 	return qpair->id != 0;
1078 }
1079 
1080 static inline int
1081 nvme_robust_mutex_lock(pthread_mutex_t *mtx)
1082 {
1083 	int rc = pthread_mutex_lock(mtx);
1084 
1085 #ifndef __FreeBSD__
1086 	if (rc == EOWNERDEAD) {
1087 		rc = pthread_mutex_consistent(mtx);
1088 	}
1089 #endif
1090 
1091 	return rc;
1092 }
1093 
1094 static inline int
1095 nvme_robust_mutex_unlock(pthread_mutex_t *mtx)
1096 {
1097 	return pthread_mutex_unlock(mtx);
1098 }
1099 
1100 /* Poll group management functions. */
1101 int nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
1102 int nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1103 
1104 /* Admin functions */
1105 int	nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr,
1106 				uint8_t cns, uint16_t cntid, uint32_t nsid,
1107 				uint8_t csi, void *payload, size_t payload_size,
1108 				spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1109 int	nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
1110 				      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn,
1111 				      void *cb_arg);
1112 int	nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
1113 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1114 int	nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
1115 		union spdk_nvme_feat_async_event_configuration config,
1116 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1117 int	nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
1118 				   spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1119 int	nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1120 				 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1121 int	nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1122 				 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1123 int	nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
1124 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1125 int	nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr,
1126 		uint64_t prp1, uint64_t prp2,
1127 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1128 int	nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
1129 				 void *cb_arg);
1130 int	nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1131 			      struct spdk_nvme_format *format, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1132 int	nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
1133 				 const struct spdk_nvme_fw_commit *fw_commit,
1134 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1135 int	nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
1136 		uint32_t size, uint32_t offset, void *payload,
1137 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1138 int	nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1139 				struct spdk_nvme_sanitize *sanitize, uint32_t cdw11,
1140 				spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1141 void	nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl);
1142 int	nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
1143 				 struct nvme_completion_poll_status *status);
1144 int	nvme_wait_for_completion_robust_lock(struct spdk_nvme_qpair *qpair,
1145 		struct nvme_completion_poll_status *status,
1146 		pthread_mutex_t *robust_mutex);
1147 int	nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
1148 		struct nvme_completion_poll_status *status,
1149 		uint64_t timeout_in_usecs);
1150 int	nvme_wait_for_completion_robust_lock_timeout(struct spdk_nvme_qpair *qpair,
1151 		struct nvme_completion_poll_status *status,
1152 		pthread_mutex_t *robust_mutex,
1153 		uint64_t timeout_in_usecs);
1154 int	nvme_wait_for_completion_robust_lock_timeout_poll(struct spdk_nvme_qpair *qpair,
1155 		struct nvme_completion_poll_status *status,
1156 		pthread_mutex_t *robust_mutex);
1157 
1158 struct spdk_nvme_ctrlr_process *nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr,
1159 		pid_t pid);
1160 struct spdk_nvme_ctrlr_process *nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr);
1161 int	nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle);
1162 void	nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr);
1163 struct spdk_pci_device *nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr);
1164 
1165 int	nvme_ctrlr_probe(const struct spdk_nvme_transport_id *trid,
1166 			 struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle);
1167 
1168 int	nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr);
1169 void	nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr);
1170 void	nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
1171 void	nvme_ctrlr_destruct_async(struct spdk_nvme_ctrlr *ctrlr,
1172 				  struct nvme_ctrlr_detach_ctx *ctx);
1173 int	nvme_ctrlr_destruct_poll_async(struct spdk_nvme_ctrlr *ctrlr,
1174 				       struct nvme_ctrlr_detach_ctx *ctx);
1175 void	nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove);
1176 int	nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr);
1177 void	nvme_ctrlr_disable(struct spdk_nvme_ctrlr *ctrlr);
1178 int	nvme_ctrlr_disable_poll(struct spdk_nvme_ctrlr *ctrlr);
1179 void	nvme_ctrlr_connected(struct spdk_nvme_probe_ctx *probe_ctx,
1180 			     struct spdk_nvme_ctrlr *ctrlr);
1181 
1182 int	nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
1183 					struct nvme_request *req);
1184 int	nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap);
1185 int	nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs);
1186 int	nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz);
1187 int	nvme_ctrlr_get_pmrcap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_pmrcap_register *pmrcap);
1188 int	nvme_ctrlr_get_bpinfo(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bpinfo_register *bpinfo);
1189 int	nvme_ctrlr_set_bprsel(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bprsel_register *bprsel);
1190 int	nvme_ctrlr_set_bpmbl(struct spdk_nvme_ctrlr *ctrlr, uint64_t bpmbl_value);
1191 bool	nvme_ctrlr_multi_iocs_enabled(struct spdk_nvme_ctrlr *ctrlr);
1192 void    nvme_ctrlr_process_async_event(struct spdk_nvme_ctrlr *ctrlr,
1193 				       const struct spdk_nvme_cpl *cpl);
1194 void nvme_ctrlr_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1195 void nvme_ctrlr_complete_queued_async_events(struct spdk_nvme_ctrlr *ctrlr);
1196 void nvme_ctrlr_abort_queued_aborts(struct spdk_nvme_ctrlr *ctrlr);
1197 int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
1198 		    struct spdk_nvme_ctrlr *ctrlr,
1199 		    enum spdk_nvme_qprio qprio,
1200 		    uint32_t num_requests, bool async);
1201 void	nvme_qpair_deinit(struct spdk_nvme_qpair *qpair);
1202 void	nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair);
1203 int	nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair,
1204 				  struct nvme_request *req);
1205 void	nvme_qpair_abort_all_queued_reqs(struct spdk_nvme_qpair *qpair);
1206 uint32_t nvme_qpair_abort_queued_reqs_with_cbarg(struct spdk_nvme_qpair *qpair, void *cmd_cb_arg);
1207 void	nvme_qpair_abort_queued_reqs(struct spdk_nvme_qpair *qpair);
1208 void	nvme_qpair_resubmit_requests(struct spdk_nvme_qpair *qpair, uint32_t num_requests);
1209 int	nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr);
1210 void	nvme_ns_set_identify_data(struct spdk_nvme_ns *ns);
1211 void	nvme_ns_set_id_desc_list_data(struct spdk_nvme_ns *ns);
1212 void	nvme_ns_free_zns_specific_data(struct spdk_nvme_ns *ns);
1213 void	nvme_ns_free_iocs_specific_data(struct spdk_nvme_ns *ns);
1214 bool	nvme_ns_has_supported_iocs_specific_data(struct spdk_nvme_ns *ns);
1215 int	nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
1216 			  struct spdk_nvme_ctrlr *ctrlr);
1217 void	nvme_ns_destruct(struct spdk_nvme_ns *ns);
1218 int	nvme_ns_cmd_zone_append_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1219 					void *buffer, void *metadata, uint64_t zslba,
1220 					uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1221 					uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag);
1222 int nvme_ns_cmd_zone_appendv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1223 				     uint64_t zslba, uint32_t lba_count,
1224 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1225 				     spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1226 				     spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1227 				     uint16_t apptag_mask, uint16_t apptag);
1228 
1229 int	nvme_fabric_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
1230 int	nvme_fabric_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
1231 int	nvme_fabric_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
1232 int	nvme_fabric_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
1233 int	nvme_fabric_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1234 		uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1235 int	nvme_fabric_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1236 		uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1237 int	nvme_fabric_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1238 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1239 int	nvme_fabric_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1240 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1241 int	nvme_fabric_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect);
1242 int	nvme_fabric_ctrlr_discover(struct spdk_nvme_ctrlr *ctrlr,
1243 				   struct spdk_nvme_probe_ctx *probe_ctx);
1244 int	nvme_fabric_qpair_connect(struct spdk_nvme_qpair *qpair, uint32_t num_entries);
1245 int	nvme_fabric_qpair_connect_async(struct spdk_nvme_qpair *qpair, uint32_t num_entries);
1246 int	nvme_fabric_qpair_connect_poll(struct spdk_nvme_qpair *qpair);
1247 
1248 typedef int (*spdk_nvme_parse_ana_log_page_cb)(
1249 	const struct spdk_nvme_ana_group_descriptor *desc, void *cb_arg);
1250 int	nvme_ctrlr_parse_ana_log_page(struct spdk_nvme_ctrlr *ctrlr,
1251 				      spdk_nvme_parse_ana_log_page_cb cb_fn, void *cb_arg);
1252 
1253 #define NVME_INIT_REQUEST(req, _cb_fn, _cb_arg, _payload, _payload_size, _md_size)	\
1254 	do {						\
1255 		req->cb_fn = _cb_fn;			\
1256 		req->cb_arg = _cb_arg;			\
1257 		req->payload = _payload;		\
1258 		req->payload_size = _payload_size;	\
1259 		req->md_size = _md_size;		\
1260 		req->pid = g_spdk_nvme_pid;		\
1261 		req->submit_tick = 0;			\
1262 	} while (0);
1263 
1264 static inline struct nvme_request *
1265 nvme_allocate_request(struct spdk_nvme_qpair *qpair,
1266 		      const struct nvme_payload *payload, uint32_t payload_size, uint32_t md_size,
1267 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1268 {
1269 	struct nvme_request *req;
1270 
1271 	req = STAILQ_FIRST(&qpair->free_req);
1272 	if (req == NULL) {
1273 		return req;
1274 	}
1275 
1276 	STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);
1277 	qpair->num_outstanding_reqs++;
1278 
1279 	/*
1280 	 * Only memset/zero fields that need it.  All other fields
1281 	 *  will be initialized appropriately either later in this
1282 	 *  function, or before they are needed later in the
1283 	 *  submission patch.  For example, the children
1284 	 *  TAILQ_ENTRY and following members are
1285 	 *  only used as part of I/O splitting so we avoid
1286 	 *  memsetting them until it is actually needed.
1287 	 *  They will be initialized in nvme_request_add_child()
1288 	 *  if the request is split.
1289 	 */
1290 	memset(req, 0, offsetof(struct nvme_request, payload_size));
1291 
1292 	NVME_INIT_REQUEST(req, cb_fn, cb_arg, *payload, payload_size, md_size);
1293 
1294 	return req;
1295 }
1296 
1297 static inline struct nvme_request *
1298 nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair,
1299 			     void *buffer, uint32_t payload_size,
1300 			     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1301 {
1302 	struct nvme_payload payload;
1303 
1304 	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
1305 
1306 	return nvme_allocate_request(qpair, &payload, payload_size, 0, cb_fn, cb_arg);
1307 }
1308 
1309 static inline struct nvme_request *
1310 nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1311 {
1312 	return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
1313 }
1314 
1315 struct nvme_request *nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair,
1316 		void *buffer, uint32_t payload_size,
1317 		spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller);
1318 
1319 static inline void
1320 nvme_complete_request(spdk_nvme_cmd_cb cb_fn, void *cb_arg, struct spdk_nvme_qpair *qpair,
1321 		      struct nvme_request *req, struct spdk_nvme_cpl *cpl)
1322 {
1323 	struct spdk_nvme_cpl            err_cpl;
1324 	struct nvme_error_cmd           *cmd;
1325 
1326 	/* error injection at completion path,
1327 	 * only inject for successful completed commands
1328 	 */
1329 	if (spdk_unlikely(!TAILQ_EMPTY(&qpair->err_cmd_head) &&
1330 			  !spdk_nvme_cpl_is_error(cpl))) {
1331 		TAILQ_FOREACH(cmd, &qpair->err_cmd_head, link) {
1332 
1333 			if (cmd->do_not_submit) {
1334 				continue;
1335 			}
1336 
1337 			if ((cmd->opc == req->cmd.opc) && cmd->err_count) {
1338 
1339 				err_cpl = *cpl;
1340 				err_cpl.status.sct = cmd->status.sct;
1341 				err_cpl.status.sc = cmd->status.sc;
1342 
1343 				cpl = &err_cpl;
1344 				cmd->err_count--;
1345 				break;
1346 			}
1347 		}
1348 	}
1349 
1350 	if (cb_fn) {
1351 		cb_fn(cb_arg, cpl);
1352 	}
1353 }
1354 
1355 static inline void
1356 nvme_free_request(struct nvme_request *req)
1357 {
1358 	assert(req != NULL);
1359 	assert(req->num_children == 0);
1360 	assert(req->qpair != NULL);
1361 
1362 	/* The reserved_req does not go in the free_req STAILQ - it is
1363 	 * saved only for use with a FABRICS/CONNECT command.
1364 	 */
1365 	if (spdk_likely(req->qpair->reserved_req != req)) {
1366 		STAILQ_INSERT_HEAD(&req->qpair->free_req, req, stailq);
1367 
1368 		assert(req->qpair->num_outstanding_reqs > 0);
1369 		req->qpair->num_outstanding_reqs--;
1370 	}
1371 }
1372 
1373 static inline void
1374 nvme_qpair_set_state(struct spdk_nvme_qpair *qpair, enum nvme_qpair_state state)
1375 {
1376 	qpair->state = state;
1377 	if (state == NVME_QPAIR_ENABLED) {
1378 		qpair->is_new_qpair = false;
1379 	}
1380 }
1381 
1382 static inline enum nvme_qpair_state
1383 nvme_qpair_get_state(struct spdk_nvme_qpair *qpair) {
1384 	return qpair->state;
1385 }
1386 
1387 static inline void
1388 nvme_qpair_free_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
1389 {
1390 	assert(req != NULL);
1391 	assert(req->num_children == 0);
1392 
1393 	STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
1394 
1395 	assert(req->qpair->num_outstanding_reqs > 0);
1396 	req->qpair->num_outstanding_reqs--;
1397 }
1398 
1399 static inline void
1400 nvme_request_remove_child(struct nvme_request *parent, struct nvme_request *child)
1401 {
1402 	assert(parent != NULL);
1403 	assert(child != NULL);
1404 	assert(child->parent == parent);
1405 	assert(parent->num_children != 0);
1406 
1407 	parent->num_children--;
1408 	child->parent = NULL;
1409 	TAILQ_REMOVE(&parent->children, child, child_tailq);
1410 }
1411 
1412 static inline void
1413 nvme_cb_complete_child(void *child_arg, const struct spdk_nvme_cpl *cpl)
1414 {
1415 	struct nvme_request *child = child_arg;
1416 	struct nvme_request *parent = child->parent;
1417 
1418 	nvme_request_remove_child(parent, child);
1419 
1420 	if (spdk_nvme_cpl_is_error(cpl)) {
1421 		memcpy(&parent->parent_status, cpl, sizeof(*cpl));
1422 	}
1423 
1424 	if (parent->num_children == 0) {
1425 		nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
1426 				      parent, &parent->parent_status);
1427 		nvme_free_request(parent);
1428 	}
1429 }
1430 
1431 static inline void
1432 nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child)
1433 {
1434 	assert(parent->num_children != UINT16_MAX);
1435 
1436 	if (parent->num_children == 0) {
1437 		/*
1438 		 * Defer initialization of the children TAILQ since it falls
1439 		 *  on a separate cacheline.  This ensures we do not touch this
1440 		 *  cacheline except on request splitting cases, which are
1441 		 *  relatively rare.
1442 		 */
1443 		TAILQ_INIT(&parent->children);
1444 		parent->parent = NULL;
1445 		memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl));
1446 	}
1447 
1448 	parent->num_children++;
1449 	TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
1450 	child->parent = parent;
1451 	child->cb_fn = nvme_cb_complete_child;
1452 	child->cb_arg = child;
1453 }
1454 
1455 static inline void
1456 nvme_request_free_children(struct nvme_request *req)
1457 {
1458 	struct nvme_request *child, *tmp;
1459 
1460 	if (req->num_children == 0) {
1461 		return;
1462 	}
1463 
1464 	/* free all child nvme_request */
1465 	TAILQ_FOREACH_SAFE(child, &req->children, child_tailq, tmp) {
1466 		nvme_request_remove_child(req, child);
1467 		nvme_request_free_children(child);
1468 		nvme_free_request(child);
1469 	}
1470 }
1471 
1472 int	nvme_request_check_timeout(struct nvme_request *req, uint16_t cid,
1473 				   struct spdk_nvme_ctrlr_process *active_proc, uint64_t now_tick);
1474 uint64_t nvme_get_quirks(const struct spdk_pci_id *id);
1475 
1476 int	nvme_robust_mutex_init_shared(pthread_mutex_t *mtx);
1477 int	nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx);
1478 
1479 bool	nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl);
1480 
1481 struct spdk_nvme_ctrlr *nvme_get_ctrlr_by_trid_unsafe(
1482 	const struct spdk_nvme_transport_id *trid);
1483 
1484 const struct spdk_nvme_transport *nvme_get_transport(const char *transport_name);
1485 const struct spdk_nvme_transport *nvme_get_first_transport(void);
1486 const struct spdk_nvme_transport *nvme_get_next_transport(const struct spdk_nvme_transport
1487 		*transport);
1488 void  nvme_ctrlr_update_namespaces(struct spdk_nvme_ctrlr *ctrlr);
1489 
1490 /* Transport specific functions */
1491 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
1492 		const struct spdk_nvme_ctrlr_opts *opts,
1493 		void *devhandle);
1494 int nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
1495 int nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect);
1496 int nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr);
1497 int nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr);
1498 int nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
1499 int nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
1500 int nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
1501 int nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
1502 int nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1503 		uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1504 int nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1505 		uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1506 int nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1507 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1508 int nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1509 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1510 uint32_t nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr);
1511 uint16_t nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr);
1512 struct spdk_nvme_qpair *nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
1513 		uint16_t qid, const struct spdk_nvme_io_qpair_opts *opts);
1514 int nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr);
1515 void *nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
1516 int nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr);
1517 int nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr);
1518 int nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr);
1519 void *nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
1520 int nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr);
1521 void nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
1522 		struct spdk_nvme_qpair *qpair);
1523 int nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr,
1524 				       struct spdk_nvme_qpair *qpair);
1525 void nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr,
1526 		struct spdk_nvme_qpair *qpair);
1527 void nvme_transport_ctrlr_disconnect_qpair_done(struct spdk_nvme_qpair *qpair);
1528 int nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
1529 		struct spdk_memory_domain **domains, int array_size);
1530 void nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair);
1531 int nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair);
1532 int nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req);
1533 int32_t nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1534 		uint32_t max_completions);
1535 void nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair);
1536 int nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
1537 		int (*iter_fn)(struct nvme_request *req, void *arg),
1538 		void *arg);
1539 
1540 struct spdk_nvme_transport_poll_group *nvme_transport_poll_group_create(
1541 	const struct spdk_nvme_transport *transport);
1542 struct spdk_nvme_transport_poll_group *nvme_transport_qpair_get_optimal_poll_group(
1543 	const struct spdk_nvme_transport *transport,
1544 	struct spdk_nvme_qpair *qpair);
1545 int nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
1546 				  struct spdk_nvme_qpair *qpair);
1547 int nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
1548 				     struct spdk_nvme_qpair *qpair);
1549 int nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1550 int nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
1551 int64_t nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
1552 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
1553 int nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup);
1554 int nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
1555 					struct spdk_nvme_transport_poll_group_stat **stats);
1556 void nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
1557 		struct spdk_nvme_transport_poll_group_stat *stats);
1558 enum spdk_nvme_transport_type nvme_transport_get_trtype(const struct spdk_nvme_transport
1559 		*transport);
1560 /*
1561  * Below ref related functions must be called with the global
1562  *  driver lock held for the multi-process condition.
1563  *  Within these functions, the per ctrlr ctrlr_lock is also
1564  *  acquired for the multi-thread condition.
1565  */
1566 void	nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr);
1567 void	nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr);
1568 int	nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr);
1569 
1570 static inline bool
1571 _is_page_aligned(uint64_t address, uint64_t page_size)
1572 {
1573 	return (address & (page_size - 1)) == 0;
1574 }
1575 
1576 #endif /* __NVME_INTERNAL_H__ */
1577