xref: /spdk/lib/nvme/nvme_internal.h (revision 7b8c7efe8fe5cbfb09d5ebff2fbad7ce49c7504d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2015 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #ifndef __NVME_INTERNAL_H__
8 #define __NVME_INTERNAL_H__
9 
10 #include "spdk/config.h"
11 #include "spdk/likely.h"
12 #include "spdk/stdinc.h"
13 
14 #include "spdk/nvme.h"
15 
16 #if defined(__i386__) || defined(__x86_64__)
17 #include <x86intrin.h>
18 #endif
19 
20 #include "spdk/queue.h"
21 #include "spdk/barrier.h"
22 #include "spdk/bit_array.h"
23 #include "spdk/mmio.h"
24 #include "spdk/pci_ids.h"
25 #include "spdk/util.h"
26 #include "spdk/memory.h"
27 #include "spdk/nvme_intel.h"
28 #include "spdk/nvmf_spec.h"
29 #include "spdk/tree.h"
30 #include "spdk/uuid.h"
31 
32 #include "spdk_internal/assert.h"
33 #include "spdk/log.h"
34 
35 extern pid_t g_spdk_nvme_pid;
36 
37 extern struct spdk_nvme_transport_opts g_spdk_nvme_transport_opts;
38 
39 /*
40  * Some Intel devices support vendor-unique read latency log page even
41  * though the log page directory says otherwise.
42  */
43 #define NVME_INTEL_QUIRK_READ_LATENCY 0x1
44 
45 /*
46  * Some Intel devices support vendor-unique write latency log page even
47  * though the log page directory says otherwise.
48  */
49 #define NVME_INTEL_QUIRK_WRITE_LATENCY 0x2
50 
51 /*
52  * The controller needs a delay before starts checking the device
53  * readiness, which is done by reading the NVME_CSTS_RDY bit.
54  */
55 #define NVME_QUIRK_DELAY_BEFORE_CHK_RDY	0x4
56 
57 /*
58  * The controller performs best when I/O is split on particular
59  * LBA boundaries.
60  */
61 #define NVME_INTEL_QUIRK_STRIPING 0x8
62 
63 /*
64  * The controller needs a delay after allocating an I/O queue pair
65  * before it is ready to accept I/O commands.
66  */
67 #define NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC 0x10
68 
69 /*
70  * Earlier NVMe devices do not indicate whether unmapped blocks
71  * will read all zeroes or not. This define indicates that the
72  * device does in fact read all zeroes after an unmap event
73  */
74 #define NVME_QUIRK_READ_ZERO_AFTER_DEALLOCATE 0x20
75 
76 /*
77  * The controller doesn't handle Identify value others than 0 or 1 correctly.
78  */
79 #define NVME_QUIRK_IDENTIFY_CNS 0x40
80 
81 /*
82  * The controller supports Open Channel command set if matching additional
83  * condition, like the first byte (value 0x1) in the vendor specific
84  * bits of the namespace identify structure is set.
85  */
86 #define NVME_QUIRK_OCSSD 0x80
87 
88 /*
89  * The controller has an Intel vendor ID but does not support Intel vendor-specific
90  * log pages.  This is primarily for QEMU emulated SSDs which report an Intel vendor
91  * ID but do not support these log pages.
92  */
93 #define NVME_INTEL_QUIRK_NO_LOG_PAGES 0x100
94 
95 /*
96  * The controller does not set SHST_COMPLETE in a reasonable amount of time.  This
97  * is primarily seen in virtual VMWare NVMe SSDs.  This quirk merely adds an additional
98  * error message that on VMWare NVMe SSDs, the shutdown timeout may be expected.
99  */
100 #define NVME_QUIRK_SHST_COMPLETE 0x200
101 
102 /*
103  * The controller requires an extra delay before starting the initialization process
104  * during attach.
105  */
106 #define NVME_QUIRK_DELAY_BEFORE_INIT 0x400
107 
108 /*
109  * Some SSDs exhibit poor performance with the default SPDK NVMe IO queue size.
110  * This quirk will increase the default to 1024 which matches other operating
111  * systems, at the cost of some extra memory usage.  Users can still override
112  * the increased default by changing the spdk_nvme_io_qpair_opts when allocating
113  * a new queue pair.
114  */
115 #define NVME_QUIRK_MINIMUM_IO_QUEUE_SIZE 0x800
116 
117 /**
118  * The maximum access width to PCI memory space is 8 Bytes, don't use AVX2 or
119  * SSE instructions to optimize the memory access(memcpy or memset) larger than
120  * 8 Bytes.
121  */
122 #define NVME_QUIRK_MAXIMUM_PCI_ACCESS_WIDTH 0x1000
123 
124 /**
125  * The SSD does not support OPAL even through it sets the security bit in OACS.
126  */
127 #define NVME_QUIRK_OACS_SECURITY 0x2000
128 
129 /**
130  * Intel P55XX SSDs can't support Dataset Management command with SGL format,
131  * so use PRP with DSM command.
132  */
133 #define NVME_QUIRK_NO_SGL_FOR_DSM 0x4000
134 
135 /**
136  * Maximum Data Transfer Size(MDTS) excludes interleaved metadata.
137  */
138 #define NVME_QUIRK_MDTS_EXCLUDE_MD 0x8000
139 
140 /**
141  * Force not to use SGL even the controller report that it can
142  * support it.
143  */
144 #define NVME_QUIRK_NOT_USE_SGL 0x10000
145 
146 /*
147  * Some SSDs require the admin submission queue size to equate to an even
148  * 4KiB multiple.
149  */
150 #define NVME_QUIRK_MINIMUM_ADMIN_QUEUE_SIZE 0x20000
151 
152 #define NVME_MAX_ASYNC_EVENTS	(8)
153 
154 #define NVME_MAX_ADMIN_TIMEOUT_IN_SECS	(30)
155 
156 /* Maximum log page size to fetch for AERs. */
157 #define NVME_MAX_AER_LOG_SIZE		(4096)
158 
159 /*
160  * NVME_MAX_IO_QUEUES in nvme_spec.h defines the 64K spec-limit, but this
161  *  define specifies the maximum number of queues this driver will actually
162  *  try to configure, if available.
163  */
164 #define DEFAULT_MAX_IO_QUEUES		(1024)
165 #define DEFAULT_ADMIN_QUEUE_SIZE	(32)
166 #define DEFAULT_IO_QUEUE_SIZE		(256)
167 #define DEFAULT_IO_QUEUE_SIZE_FOR_QUIRK	(1024) /* Matches Linux kernel driver */
168 
169 #define DEFAULT_IO_QUEUE_REQUESTS	(512)
170 
171 #define SPDK_NVME_DEFAULT_RETRY_COUNT	(4)
172 
173 #define SPDK_NVME_TRANSPORT_ACK_TIMEOUT_DISABLED	(0)
174 #define SPDK_NVME_DEFAULT_TRANSPORT_ACK_TIMEOUT	SPDK_NVME_TRANSPORT_ACK_TIMEOUT_DISABLED
175 
176 #define SPDK_NVME_TRANSPORT_TOS_DISABLED	(0)
177 
178 #define MIN_KEEP_ALIVE_TIMEOUT_IN_MS	(10000)
179 
180 /* We want to fit submission and completion rings each in a single 2MB
181  * hugepage to ensure physical address contiguity.
182  */
183 #define MAX_IO_QUEUE_ENTRIES		(VALUE_2MB / spdk_max( \
184 						sizeof(struct spdk_nvme_cmd), \
185 						sizeof(struct spdk_nvme_cpl)))
186 
187 /* Default timeout for fabrics connect commands. */
188 #ifdef DEBUG
189 #define NVME_FABRIC_CONNECT_COMMAND_TIMEOUT 0
190 #else
191 /* 500 millisecond timeout. */
192 #define NVME_FABRIC_CONNECT_COMMAND_TIMEOUT 500000
193 #endif
194 
195 /* This value indicates that a read from a PCIe register is invalid. This can happen when a device is no longer present */
196 #define SPDK_NVME_INVALID_REGISTER_VALUE 0xFFFFFFFFu
197 
198 enum nvme_payload_type {
199 	NVME_PAYLOAD_TYPE_INVALID = 0,
200 
201 	/** nvme_request::u.payload.contig_buffer is valid for this request */
202 	NVME_PAYLOAD_TYPE_CONTIG,
203 
204 	/** nvme_request::u.sgl is valid for this request */
205 	NVME_PAYLOAD_TYPE_SGL,
206 };
207 
208 /** Boot partition write states */
209 enum nvme_bp_write_state {
210 	SPDK_NVME_BP_WS_DOWNLOADING	= 0x0,
211 	SPDK_NVME_BP_WS_DOWNLOADED	= 0x1,
212 	SPDK_NVME_BP_WS_REPLACE		= 0x2,
213 	SPDK_NVME_BP_WS_ACTIVATE	= 0x3,
214 };
215 
216 /**
217  * Descriptor for a request data payload.
218  */
219 struct nvme_payload {
220 	/**
221 	 * Functions for retrieving physical addresses for scattered payloads.
222 	 */
223 	spdk_nvme_req_reset_sgl_cb reset_sgl_fn;
224 	spdk_nvme_req_next_sge_cb next_sge_fn;
225 
226 	/**
227 	 * Extended IO options passed by the user
228 	 */
229 	struct spdk_nvme_ns_cmd_ext_io_opts *opts;
230 	/**
231 	 * If reset_sgl_fn == NULL, this is a contig payload, and contig_or_cb_arg contains the
232 	 * virtual memory address of a single virtually contiguous buffer.
233 	 *
234 	 * If reset_sgl_fn != NULL, this is a SGL payload, and contig_or_cb_arg contains the
235 	 * cb_arg that will be passed to the SGL callback functions.
236 	 */
237 	void *contig_or_cb_arg;
238 
239 	/** Virtual memory address of a single virtually contiguous metadata buffer */
240 	void *md;
241 };
242 
243 #define NVME_PAYLOAD_CONTIG(contig_, md_) \
244 	(struct nvme_payload) { \
245 		.reset_sgl_fn = NULL, \
246 		.next_sge_fn = NULL, \
247 		.contig_or_cb_arg = (contig_), \
248 		.md = (md_), \
249 	}
250 
251 #define NVME_PAYLOAD_SGL(reset_sgl_fn_, next_sge_fn_, cb_arg_, md_) \
252 	(struct nvme_payload) { \
253 		.reset_sgl_fn = (reset_sgl_fn_), \
254 		.next_sge_fn = (next_sge_fn_), \
255 		.contig_or_cb_arg = (cb_arg_), \
256 		.md = (md_), \
257 	}
258 
259 static inline enum nvme_payload_type
260 nvme_payload_type(const struct nvme_payload *payload) {
261 	return payload->reset_sgl_fn ? NVME_PAYLOAD_TYPE_SGL : NVME_PAYLOAD_TYPE_CONTIG;
262 }
263 
264 struct nvme_error_cmd {
265 	bool				do_not_submit;
266 	uint64_t			timeout_tsc;
267 	uint32_t			err_count;
268 	uint8_t				opc;
269 	struct spdk_nvme_status		status;
270 	TAILQ_ENTRY(nvme_error_cmd)	link;
271 };
272 
273 struct nvme_request {
274 	struct spdk_nvme_cmd		cmd;
275 
276 	uint8_t				retries;
277 
278 	uint8_t				timed_out : 1;
279 
280 	/**
281 	 * True if the request is in the queued_req list.
282 	 */
283 	uint8_t				queued : 1;
284 	uint8_t				reserved : 6;
285 
286 	/**
287 	 * Number of children requests still outstanding for this
288 	 *  request which was split into multiple child requests.
289 	 */
290 	uint16_t			num_children;
291 
292 	/**
293 	 * Offset in bytes from the beginning of payload for this request.
294 	 * This is used for I/O commands that are split into multiple requests.
295 	 */
296 	uint32_t			payload_offset;
297 	uint32_t			md_offset;
298 
299 	uint32_t			payload_size;
300 
301 	/**
302 	 * Timeout ticks for error injection requests, can be extended in future
303 	 * to support per-request timeout feature.
304 	 */
305 	uint64_t			timeout_tsc;
306 
307 	/**
308 	 * Data payload for this request's command.
309 	 */
310 	struct nvme_payload		payload;
311 
312 	spdk_nvme_cmd_cb		cb_fn;
313 	void				*cb_arg;
314 	STAILQ_ENTRY(nvme_request)	stailq;
315 
316 	struct spdk_nvme_qpair		*qpair;
317 
318 	/*
319 	 * The value of spdk_get_ticks() when the request was submitted to the hardware.
320 	 * Only set if ctrlr->timeout_enabled is true.
321 	 */
322 	uint64_t			submit_tick;
323 
324 	/**
325 	 * The active admin request can be moved to a per process pending
326 	 *  list based on the saved pid to tell which process it belongs
327 	 *  to. The cpl saves the original completion information which
328 	 *  is used in the completion callback.
329 	 * NOTE: these below two fields are only used for admin request.
330 	 */
331 	pid_t				pid;
332 	struct spdk_nvme_cpl		cpl;
333 
334 	uint32_t			md_size;
335 
336 	/**
337 	 * The following members should not be reordered with members
338 	 *  above.  These members are only needed when splitting
339 	 *  requests which is done rarely, and the driver is careful
340 	 *  to not touch the following fields until a split operation is
341 	 *  needed, to avoid touching an extra cacheline.
342 	 */
343 
344 	/**
345 	 * Points to the outstanding child requests for a parent request.
346 	 *  Only valid if a request was split into multiple children
347 	 *  requests, and is not initialized for non-split requests.
348 	 */
349 	TAILQ_HEAD(, nvme_request)	children;
350 
351 	/**
352 	 * Linked-list pointers for a child request in its parent's list.
353 	 */
354 	TAILQ_ENTRY(nvme_request)	child_tailq;
355 
356 	/**
357 	 * Points to a parent request if part of a split request,
358 	 *   NULL otherwise.
359 	 */
360 	struct nvme_request		*parent;
361 
362 	/**
363 	 * Completion status for a parent request.  Initialized to all 0's
364 	 *  (SUCCESS) before child requests are submitted.  If a child
365 	 *  request completes with error, the error status is copied here,
366 	 *  to ensure that the parent request is also completed with error
367 	 *  status once all child requests are completed.
368 	 */
369 	struct spdk_nvme_cpl		parent_status;
370 
371 	/**
372 	 * The user_cb_fn and user_cb_arg fields are used for holding the original
373 	 * callback data when using nvme_allocate_request_user_copy.
374 	 */
375 	spdk_nvme_cmd_cb		user_cb_fn;
376 	void				*user_cb_arg;
377 	void				*user_buffer;
378 
379 	/** Sequence of accel operations associated with this request */
380 	void				*accel_sequence;
381 };
382 
383 struct nvme_completion_poll_status {
384 	struct spdk_nvme_cpl	cpl;
385 	uint64_t		timeout_tsc;
386 	/**
387 	 * DMA buffer retained throughout the duration of the command.  It'll be released
388 	 * automatically if the command times out, otherwise the user is responsible for freeing it.
389 	 */
390 	void			*dma_data;
391 	bool			done;
392 	/* This flag indicates that the request has been timed out and the memory
393 	   must be freed in a completion callback */
394 	bool			timed_out;
395 };
396 
397 struct nvme_async_event_request {
398 	struct spdk_nvme_ctrlr		*ctrlr;
399 	struct nvme_request		*req;
400 	struct spdk_nvme_cpl		cpl;
401 };
402 
403 enum nvme_qpair_state {
404 	NVME_QPAIR_DISCONNECTED,
405 	NVME_QPAIR_DISCONNECTING,
406 	NVME_QPAIR_CONNECTING,
407 	NVME_QPAIR_CONNECTED,
408 	NVME_QPAIR_ENABLING,
409 	NVME_QPAIR_ENABLED,
410 	NVME_QPAIR_DESTROYING,
411 };
412 
413 enum nvme_qpair_connect_state {
414 	NVME_QPAIR_CONNECT_STATE_CONNECTING,
415 	NVME_QPAIR_CONNECT_STATE_AUTHENTICATING,
416 	NVME_QPAIR_CONNECT_STATE_CONNECTED,
417 	NVME_QPAIR_CONNECT_STATE_FAILED,
418 };
419 
420 enum nvme_qpair_auth_state {
421 	NVME_QPAIR_AUTH_STATE_NEGOTIATE,
422 	NVME_QPAIR_AUTH_STATE_AWAIT_NEGOTIATE,
423 	NVME_QPAIR_AUTH_STATE_AWAIT_CHALLENGE,
424 	NVME_QPAIR_AUTH_STATE_AWAIT_REPLY,
425 	NVME_QPAIR_AUTH_STATE_AWAIT_SUCCESS1,
426 	NVME_QPAIR_AUTH_STATE_AWAIT_SUCCESS2,
427 	NVME_QPAIR_AUTH_STATE_AWAIT_FAILURE2,
428 	NVME_QPAIR_AUTH_STATE_DONE,
429 };
430 
431 /* Authentication transaction required (authreq.atr) */
432 #define NVME_QPAIR_AUTH_FLAG_ATR	(1 << 0)
433 /* Authentication and secure channel required (authreq.ascr) */
434 #define NVME_QPAIR_AUTH_FLAG_ASCR	(1 << 1)
435 
436 /* Maximum size of a digest */
437 #define NVME_AUTH_DIGEST_MAX_SIZE	64
438 
439 struct nvme_auth {
440 	/* State of the authentication */
441 	enum nvme_qpair_auth_state	state;
442 	/* Status of the authentication */
443 	int				status;
444 	/* Transaction ID */
445 	uint16_t			tid;
446 	/* Flags */
447 	uint32_t			flags;
448 	/* Selected hash function */
449 	uint8_t				hash;
450 	/* Buffer used for controller challenge */
451 	uint8_t				challenge[NVME_AUTH_DIGEST_MAX_SIZE];
452 };
453 
454 struct spdk_nvme_qpair {
455 	struct spdk_nvme_ctrlr			*ctrlr;
456 
457 	uint16_t				id;
458 
459 	uint8_t					qprio: 2;
460 
461 	uint8_t					state: 3;
462 
463 	uint8_t					async: 1;
464 
465 	uint8_t					is_new_qpair: 1;
466 
467 	uint8_t					abort_dnr: 1;
468 	/*
469 	 * Members for handling IO qpair deletion inside of a completion context.
470 	 * These are specifically defined as single bits, so that they do not
471 	 *  push this data structure out to another cacheline.
472 	 */
473 	uint8_t					in_completion_context: 1;
474 	uint8_t					delete_after_completion_context: 1;
475 
476 	/*
477 	 * Set when no deletion notification is needed. For example, the process
478 	 * which allocated this qpair exited unexpectedly.
479 	 */
480 	uint8_t					no_deletion_notification_needed: 1;
481 
482 	uint8_t					last_fuse: 2;
483 
484 	uint8_t					transport_failure_reason: 3;
485 	uint8_t					last_transport_failure_reason: 3;
486 
487 	/* The user is destroying qpair */
488 	uint8_t					destroy_in_progress: 1;
489 
490 	/* Number of IO outstanding at transport level */
491 	uint16_t				queue_depth;
492 
493 	enum spdk_nvme_transport_type		trtype;
494 
495 	uint32_t				num_outstanding_reqs;
496 
497 	/* request object used only for this qpair's FABRICS/CONNECT command (if needed) */
498 	struct nvme_request			*reserved_req;
499 
500 	STAILQ_HEAD(, nvme_request)		free_req;
501 	STAILQ_HEAD(, nvme_request)		queued_req;
502 
503 	/* List entry for spdk_nvme_transport_poll_group::qpairs */
504 	STAILQ_ENTRY(spdk_nvme_qpair)		poll_group_stailq;
505 
506 	/** Commands opcode in this list will return error */
507 	TAILQ_HEAD(, nvme_error_cmd)		err_cmd_head;
508 	/** Requests in this list will return error */
509 	STAILQ_HEAD(, nvme_request)		err_req_head;
510 
511 	struct spdk_nvme_ctrlr_process		*active_proc;
512 
513 	struct spdk_nvme_transport_poll_group	*poll_group;
514 
515 	void					*poll_group_tailq_head;
516 
517 	const struct spdk_nvme_transport	*transport;
518 
519 	/* Entries below here are not touched in the main I/O path. */
520 
521 	struct nvme_completion_poll_status	*poll_status;
522 	enum nvme_qpair_connect_state		connect_state;
523 
524 	/* List entry for spdk_nvme_ctrlr::active_io_qpairs */
525 	TAILQ_ENTRY(spdk_nvme_qpair)		tailq;
526 
527 	/* List entry for spdk_nvme_ctrlr_process::allocated_io_qpairs */
528 	TAILQ_ENTRY(spdk_nvme_qpair)		per_process_tailq;
529 
530 	STAILQ_HEAD(, nvme_request)		aborting_queued_req;
531 
532 	void					*req_buf;
533 
534 	/* In-band authentication state */
535 	struct nvme_auth			auth;
536 };
537 
538 struct spdk_nvme_poll_group {
539 	void						*ctx;
540 	struct spdk_nvme_accel_fn_table			accel_fn_table;
541 	STAILQ_HEAD(, spdk_nvme_transport_poll_group)	tgroups;
542 	bool						in_process_completions;
543 };
544 
545 struct spdk_nvme_transport_poll_group {
546 	struct spdk_nvme_poll_group			*group;
547 	const struct spdk_nvme_transport		*transport;
548 	STAILQ_HEAD(, spdk_nvme_qpair)			connected_qpairs;
549 	STAILQ_HEAD(, spdk_nvme_qpair)			disconnected_qpairs;
550 	STAILQ_ENTRY(spdk_nvme_transport_poll_group)	link;
551 	uint32_t					num_connected_qpairs;
552 };
553 
554 struct spdk_nvme_ns {
555 	struct spdk_nvme_ctrlr		*ctrlr;
556 	uint32_t			sector_size;
557 
558 	/*
559 	 * Size of data transferred as part of each block,
560 	 * including metadata if FLBAS indicates the metadata is transferred
561 	 * as part of the data buffer at the end of each LBA.
562 	 */
563 	uint32_t			extended_lba_size;
564 
565 	uint32_t			md_size;
566 	uint32_t			pi_type;
567 	uint32_t			sectors_per_max_io;
568 	uint32_t			sectors_per_max_io_no_md;
569 	uint32_t			sectors_per_stripe;
570 	uint32_t			id;
571 	uint16_t			flags;
572 	bool				active;
573 
574 	/* Command Set Identifier */
575 	enum spdk_nvme_csi		csi;
576 
577 	/* Namespace Identification Descriptor List (CNS = 03h) */
578 	uint8_t				id_desc_list[4096];
579 
580 	uint32_t			ana_group_id;
581 	enum spdk_nvme_ana_state	ana_state;
582 
583 	/* Identify Namespace data. */
584 	struct spdk_nvme_ns_data	nsdata;
585 
586 	/* Zoned Namespace Command Set Specific Identify Namespace data. */
587 	struct spdk_nvme_zns_ns_data	*nsdata_zns;
588 
589 	RB_ENTRY(spdk_nvme_ns)		node;
590 };
591 
592 /**
593  * State of struct spdk_nvme_ctrlr (in particular, during initialization).
594  */
595 enum nvme_ctrlr_state {
596 	/**
597 	 * Wait before initializing the controller.
598 	 */
599 	NVME_CTRLR_STATE_INIT_DELAY,
600 
601 	/**
602 	 * Connect the admin queue.
603 	 */
604 	NVME_CTRLR_STATE_CONNECT_ADMINQ,
605 
606 	/**
607 	 * Controller has not started initialized yet.
608 	 */
609 	NVME_CTRLR_STATE_INIT = NVME_CTRLR_STATE_CONNECT_ADMINQ,
610 
611 	/**
612 	 * Waiting for admin queue to connect.
613 	 */
614 	NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ,
615 
616 	/**
617 	 * Read Version (VS) register.
618 	 */
619 	NVME_CTRLR_STATE_READ_VS,
620 
621 	/**
622 	 * Waiting for Version (VS) register to be read.
623 	 */
624 	NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS,
625 
626 	/**
627 	 * Read Capabilities (CAP) register.
628 	 */
629 	NVME_CTRLR_STATE_READ_CAP,
630 
631 	/**
632 	 * Waiting for Capabilities (CAP) register to be read.
633 	 */
634 	NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP,
635 
636 	/**
637 	 * Check EN to prepare for controller initialization.
638 	 */
639 	NVME_CTRLR_STATE_CHECK_EN,
640 
641 	/**
642 	 * Waiting for CC to be read as part of EN check.
643 	 */
644 	NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC,
645 
646 	/**
647 	 * Waiting for CSTS.RDY to transition from 0 to 1 so that CC.EN may be set to 0.
648 	 */
649 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
650 
651 	/**
652 	 * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 1.
653 	 */
654 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
655 
656 	/**
657 	 * Disabling the controller by setting CC.EN to 0.
658 	 */
659 	NVME_CTRLR_STATE_SET_EN_0,
660 
661 	/**
662 	 * Waiting for the CC register to be read as part of disabling the controller.
663 	 */
664 	NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC,
665 
666 	/**
667 	 * Waiting for CSTS.RDY to transition from 1 to 0 so that CC.EN may be set to 1.
668 	 */
669 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
670 
671 	/**
672 	 * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 0.
673 	 */
674 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS,
675 
676 	/**
677 	 * The controller is disabled. (CC.EN and CSTS.RDY are 0.)
678 	 */
679 	NVME_CTRLR_STATE_DISABLED,
680 
681 	/**
682 	 * Enable the controller by writing CC.EN to 1
683 	 */
684 	NVME_CTRLR_STATE_ENABLE,
685 
686 	/**
687 	 * Waiting for CC register to be written as part of enabling the controller.
688 	 */
689 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC,
690 
691 	/**
692 	 * Waiting for CSTS.RDY to transition from 0 to 1 after enabling the controller.
693 	 */
694 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
695 
696 	/**
697 	 * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 1.
698 	 */
699 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
700 
701 	/**
702 	 * Reset the Admin queue of the controller.
703 	 */
704 	NVME_CTRLR_STATE_RESET_ADMIN_QUEUE,
705 
706 	/**
707 	 * Identify Controller command will be sent to then controller.
708 	 */
709 	NVME_CTRLR_STATE_IDENTIFY,
710 
711 	/**
712 	 * Waiting for Identify Controller command be completed.
713 	 */
714 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
715 
716 	/**
717 	 * Configure AER of the controller.
718 	 */
719 	NVME_CTRLR_STATE_CONFIGURE_AER,
720 
721 	/**
722 	 * Waiting for the Configure AER to be completed.
723 	 */
724 	NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
725 
726 	/**
727 	 * Set Keep Alive Timeout of the controller.
728 	 */
729 	NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
730 
731 	/**
732 	 * Waiting for Set Keep Alive Timeout to be completed.
733 	 */
734 	NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
735 
736 	/**
737 	 * Get Identify I/O Command Set Specific Controller data structure.
738 	 */
739 	NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
740 
741 	/**
742 	 * Waiting for Identify I/O Command Set Specific Controller command to be completed.
743 	 */
744 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC,
745 
746 	/**
747 	 * Get Commands Supported and Effects log page for the Zoned Namespace Command Set.
748 	 */
749 	NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG,
750 
751 	/**
752 	 * Waiting for the Get Log Page command to be completed.
753 	 */
754 	NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG,
755 
756 	/**
757 	 * Set Number of Queues of the controller.
758 	 */
759 	NVME_CTRLR_STATE_SET_NUM_QUEUES,
760 
761 	/**
762 	 * Waiting for Set Num of Queues command to be completed.
763 	 */
764 	NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
765 
766 	/**
767 	 * Get active Namespace list of the controller.
768 	 */
769 	NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
770 
771 	/**
772 	 * Waiting for the Identify Active Namespace commands to be completed.
773 	 */
774 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS,
775 
776 	/**
777 	 * Get Identify Namespace Data structure for each NS.
778 	 */
779 	NVME_CTRLR_STATE_IDENTIFY_NS,
780 
781 	/**
782 	 * Waiting for the Identify Namespace commands to be completed.
783 	 */
784 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
785 
786 	/**
787 	 * Get Identify Namespace Identification Descriptors.
788 	 */
789 	NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
790 
791 	/**
792 	 * Get Identify I/O Command Set Specific Namespace data structure for each NS.
793 	 */
794 	NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
795 
796 	/**
797 	 * Waiting for the Identify I/O Command Set Specific Namespace commands to be completed.
798 	 */
799 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC,
800 
801 	/**
802 	 * Waiting for the Identify Namespace Identification
803 	 * Descriptors to be completed.
804 	 */
805 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
806 
807 	/**
808 	 * Set supported log pages of the controller.
809 	 */
810 	NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
811 
812 	/**
813 	 * Set supported log pages of INTEL controller.
814 	 */
815 	NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES,
816 
817 	/**
818 	 * Waiting for supported log pages of INTEL controller.
819 	 */
820 	NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES,
821 
822 	/**
823 	 * Set supported features of the controller.
824 	 */
825 	NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
826 
827 	/**
828 	 * Set Doorbell Buffer Config of the controller.
829 	 */
830 	NVME_CTRLR_STATE_SET_DB_BUF_CFG,
831 
832 	/**
833 	 * Waiting for Doorbell Buffer Config to be completed.
834 	 */
835 	NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
836 
837 	/**
838 	 * Set Host ID of the controller.
839 	 */
840 	NVME_CTRLR_STATE_SET_HOST_ID,
841 
842 	/**
843 	 * Waiting for Set Host ID to be completed.
844 	 */
845 	NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
846 
847 	/**
848 	 * Let transport layer do its part of initialization.
849 	 */
850 	NVME_CTRLR_STATE_TRANSPORT_READY,
851 
852 	/**
853 	 * Controller initialization has completed and the controller is ready.
854 	 */
855 	NVME_CTRLR_STATE_READY,
856 
857 	/**
858 	 * Controller initialization has an error.
859 	 */
860 	NVME_CTRLR_STATE_ERROR,
861 
862 	/**
863 	 * Admin qpair was disconnected, controller needs to be re-initialized
864 	 */
865 	NVME_CTRLR_STATE_DISCONNECTED,
866 };
867 
868 #define NVME_TIMEOUT_INFINITE		0
869 #define NVME_TIMEOUT_KEEP_EXISTING	UINT64_MAX
870 
871 struct spdk_nvme_ctrlr_aer_completion_list {
872 	struct spdk_nvme_cpl	cpl;
873 	STAILQ_ENTRY(spdk_nvme_ctrlr_aer_completion_list) link;
874 };
875 
876 /*
877  * Used to track properties for all processes accessing the controller.
878  */
879 struct spdk_nvme_ctrlr_process {
880 	/** Whether it is the primary process  */
881 	bool						is_primary;
882 
883 	/** Process ID */
884 	pid_t						pid;
885 
886 	/** Active admin requests to be completed */
887 	STAILQ_HEAD(, nvme_request)			active_reqs;
888 
889 	TAILQ_ENTRY(spdk_nvme_ctrlr_process)		tailq;
890 
891 	/** Per process PCI device handle */
892 	struct spdk_pci_device				*devhandle;
893 
894 	/** Reference to track the number of attachment to this controller. */
895 	int						ref;
896 
897 	/** Allocated IO qpairs */
898 	TAILQ_HEAD(, spdk_nvme_qpair)			allocated_io_qpairs;
899 
900 	spdk_nvme_aer_cb				aer_cb_fn;
901 	void						*aer_cb_arg;
902 
903 	/**
904 	 * A function pointer to timeout callback function
905 	 */
906 	spdk_nvme_timeout_cb		timeout_cb_fn;
907 	void				*timeout_cb_arg;
908 	/** separate timeout values for io vs. admin reqs */
909 	uint64_t			timeout_io_ticks;
910 	uint64_t			timeout_admin_ticks;
911 
912 	/** List to publish AENs to all procs in multiprocess setup */
913 	STAILQ_HEAD(, spdk_nvme_ctrlr_aer_completion_list)      async_events;
914 };
915 
916 struct nvme_register_completion {
917 	struct spdk_nvme_cpl			cpl;
918 	uint64_t				value;
919 	spdk_nvme_reg_cb			cb_fn;
920 	void					*cb_ctx;
921 	STAILQ_ENTRY(nvme_register_completion)	stailq;
922 	pid_t					pid;
923 };
924 
925 struct spdk_nvme_ctrlr {
926 	/* Hot data (accessed in I/O path) starts here. */
927 
928 	/* Tree of namespaces */
929 	RB_HEAD(nvme_ns_tree, spdk_nvme_ns)	ns;
930 
931 	/* The number of active namespaces */
932 	uint32_t			active_ns_count;
933 
934 	bool				is_removed;
935 
936 	bool				is_resetting;
937 
938 	bool				is_failed;
939 
940 	bool				is_destructed;
941 
942 	bool				timeout_enabled;
943 
944 	/* The application is preparing to reset the controller.  Transports
945 	 * can use this to skip unnecessary parts of the qpair deletion process
946 	 * for example, like the DELETE_SQ/CQ commands.
947 	 */
948 	bool				prepare_for_reset;
949 
950 	bool				is_disconnecting;
951 
952 	bool				needs_io_msg_update;
953 
954 	uint16_t			max_sges;
955 
956 	uint16_t			cntlid;
957 
958 	/** Controller support flags */
959 	uint64_t			flags;
960 
961 	/** NVMEoF in-capsule data size in bytes */
962 	uint32_t			ioccsz_bytes;
963 
964 	/** NVMEoF in-capsule data offset in 16 byte units */
965 	uint16_t			icdoff;
966 
967 	/* Cold data (not accessed in normal I/O path) is after this point. */
968 
969 	struct spdk_nvme_transport_id	trid;
970 
971 	union spdk_nvme_cap_register	cap;
972 	union spdk_nvme_vs_register	vs;
973 
974 	int				state;
975 	uint64_t			state_timeout_tsc;
976 
977 	uint64_t			next_keep_alive_tick;
978 	uint64_t			keep_alive_interval_ticks;
979 
980 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
981 
982 	/** All the log pages supported */
983 	bool				log_page_supported[256];
984 
985 	/** All the features supported */
986 	bool				feature_supported[256];
987 
988 	/** maximum i/o size in bytes */
989 	uint32_t			max_xfer_size;
990 
991 	/** minimum page size supported by this controller in bytes */
992 	uint32_t			min_page_size;
993 
994 	/** selected memory page size for this controller in bytes */
995 	uint32_t			page_size;
996 
997 	uint32_t			num_aers;
998 	struct nvme_async_event_request	aer[NVME_MAX_ASYNC_EVENTS];
999 
1000 	/** guards access to the controller itself, including admin queues */
1001 	pthread_mutex_t			ctrlr_lock;
1002 
1003 	struct spdk_nvme_qpair		*adminq;
1004 
1005 	/** shadow doorbell buffer */
1006 	uint32_t			*shadow_doorbell;
1007 	/** eventidx buffer */
1008 	uint32_t			*eventidx;
1009 
1010 	/**
1011 	 * Identify Controller data.
1012 	 */
1013 	struct spdk_nvme_ctrlr_data	cdata;
1014 
1015 	/**
1016 	 * Zoned Namespace Command Set Specific Identify Controller data.
1017 	 */
1018 	struct spdk_nvme_zns_ctrlr_data	*cdata_zns;
1019 
1020 	struct spdk_bit_array		*free_io_qids;
1021 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
1022 
1023 	struct spdk_nvme_ctrlr_opts	opts;
1024 
1025 	uint64_t			quirks;
1026 
1027 	/* Extra sleep time during controller initialization */
1028 	uint64_t			sleep_timeout_tsc;
1029 
1030 	/** Track all the processes manage this controller */
1031 	TAILQ_HEAD(, spdk_nvme_ctrlr_process)	active_procs;
1032 
1033 
1034 	STAILQ_HEAD(, nvme_request)	queued_aborts;
1035 	uint32_t			outstanding_aborts;
1036 
1037 	/* CB to notify the user when the ctrlr is removed/failed. */
1038 	spdk_nvme_remove_cb			remove_cb;
1039 	void					*cb_ctx;
1040 
1041 	struct spdk_nvme_qpair		*external_io_msgs_qpair;
1042 	pthread_mutex_t			external_io_msgs_lock;
1043 	struct spdk_ring		*external_io_msgs;
1044 
1045 	STAILQ_HEAD(, nvme_io_msg_producer) io_producers;
1046 
1047 	struct spdk_nvme_ana_page		*ana_log_page;
1048 	struct spdk_nvme_ana_group_descriptor	*copied_ana_desc;
1049 	uint32_t				ana_log_page_size;
1050 
1051 	/* scratchpad pointer that can be used to send data between two NVME_CTRLR_STATEs */
1052 	void				*tmp_ptr;
1053 
1054 	/* maximum zone append size in bytes */
1055 	uint32_t			max_zone_append_size;
1056 
1057 	/* PMR size in bytes */
1058 	uint64_t			pmr_size;
1059 
1060 	/* Boot Partition Info */
1061 	enum nvme_bp_write_state	bp_ws;
1062 	uint32_t			bpid;
1063 	spdk_nvme_cmd_cb		bp_write_cb_fn;
1064 	void				*bp_write_cb_arg;
1065 
1066 	/* Firmware Download */
1067 	void				*fw_payload;
1068 	unsigned int			fw_size_remaining;
1069 	unsigned int			fw_offset;
1070 	unsigned int			fw_transfer_size;
1071 
1072 	/* Completed register operations */
1073 	STAILQ_HEAD(, nvme_register_completion)	register_operations;
1074 
1075 	union spdk_nvme_cc_register		process_init_cc;
1076 
1077 	/* Authentication transaction ID */
1078 	uint16_t				auth_tid;
1079 	/* Authentication sequence number */
1080 	uint32_t				auth_seqnum;
1081 };
1082 
1083 struct spdk_nvme_probe_ctx {
1084 	struct spdk_nvme_transport_id		trid;
1085 	void					*cb_ctx;
1086 	spdk_nvme_probe_cb			probe_cb;
1087 	spdk_nvme_attach_cb			attach_cb;
1088 	spdk_nvme_remove_cb			remove_cb;
1089 	TAILQ_HEAD(, spdk_nvme_ctrlr)		init_ctrlrs;
1090 };
1091 
1092 typedef void (*nvme_ctrlr_detach_cb)(struct spdk_nvme_ctrlr *ctrlr);
1093 
1094 enum nvme_ctrlr_detach_state {
1095 	NVME_CTRLR_DETACH_SET_CC,
1096 	NVME_CTRLR_DETACH_CHECK_CSTS,
1097 	NVME_CTRLR_DETACH_GET_CSTS,
1098 	NVME_CTRLR_DETACH_GET_CSTS_DONE,
1099 };
1100 
1101 struct nvme_ctrlr_detach_ctx {
1102 	struct spdk_nvme_ctrlr			*ctrlr;
1103 	nvme_ctrlr_detach_cb			cb_fn;
1104 	uint64_t				shutdown_start_tsc;
1105 	uint32_t				shutdown_timeout_ms;
1106 	bool					shutdown_complete;
1107 	enum nvme_ctrlr_detach_state		state;
1108 	union spdk_nvme_csts_register		csts;
1109 	TAILQ_ENTRY(nvme_ctrlr_detach_ctx)	link;
1110 };
1111 
1112 struct spdk_nvme_detach_ctx {
1113 	TAILQ_HEAD(, nvme_ctrlr_detach_ctx)	head;
1114 };
1115 
1116 struct nvme_driver {
1117 	pthread_mutex_t			lock;
1118 
1119 	/** Multi-process shared attached controller list */
1120 	TAILQ_HEAD(, spdk_nvme_ctrlr)	shared_attached_ctrlrs;
1121 
1122 	bool				initialized;
1123 	struct spdk_uuid		default_extended_host_id;
1124 
1125 	/** netlink socket fd for hotplug messages */
1126 	int				hotplug_fd;
1127 };
1128 
1129 #define nvme_ns_cmd_get_ext_io_opt(opts, field, defval) \
1130        ((opts) != NULL && offsetof(struct spdk_nvme_ns_cmd_ext_io_opts, field) + \
1131         sizeof((opts)->field) <= (opts)->size ? (opts)->field : (defval))
1132 
1133 extern struct nvme_driver *g_spdk_nvme_driver;
1134 
1135 int nvme_driver_init(void);
1136 
1137 #define nvme_delay		usleep
1138 
1139 static inline bool
1140 nvme_qpair_is_admin_queue(struct spdk_nvme_qpair *qpair)
1141 {
1142 	return qpair->id == 0;
1143 }
1144 
1145 static inline bool
1146 nvme_qpair_is_io_queue(struct spdk_nvme_qpair *qpair)
1147 {
1148 	return qpair->id != 0;
1149 }
1150 
1151 static inline int
1152 nvme_robust_mutex_lock(pthread_mutex_t *mtx)
1153 {
1154 	int rc = pthread_mutex_lock(mtx);
1155 
1156 #ifndef __FreeBSD__
1157 	if (rc == EOWNERDEAD) {
1158 		rc = pthread_mutex_consistent(mtx);
1159 	}
1160 #endif
1161 
1162 	return rc;
1163 }
1164 
1165 static inline int
1166 nvme_robust_mutex_unlock(pthread_mutex_t *mtx)
1167 {
1168 	return pthread_mutex_unlock(mtx);
1169 }
1170 
1171 /* Poll group management functions. */
1172 int nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
1173 int nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1174 
1175 /* Admin functions */
1176 int	nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr,
1177 				uint8_t cns, uint16_t cntid, uint32_t nsid,
1178 				uint8_t csi, void *payload, size_t payload_size,
1179 				spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1180 int	nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
1181 				      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn,
1182 				      void *cb_arg);
1183 int	nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
1184 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1185 int	nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
1186 		union spdk_nvme_feat_async_event_configuration config,
1187 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1188 int	nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
1189 				   spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1190 int	nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1191 				 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1192 int	nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1193 				 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1194 int	nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
1195 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1196 int	nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr,
1197 		uint64_t prp1, uint64_t prp2,
1198 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1199 int	nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
1200 				 void *cb_arg);
1201 int	nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1202 			      struct spdk_nvme_format *format, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1203 int	nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
1204 				 const struct spdk_nvme_fw_commit *fw_commit,
1205 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1206 int	nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
1207 		uint32_t size, uint32_t offset, void *payload,
1208 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1209 int	nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1210 				struct spdk_nvme_sanitize *sanitize, uint32_t cdw11,
1211 				spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1212 void	nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl);
1213 int	nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
1214 				 struct nvme_completion_poll_status *status);
1215 int	nvme_wait_for_completion_robust_lock(struct spdk_nvme_qpair *qpair,
1216 		struct nvme_completion_poll_status *status,
1217 		pthread_mutex_t *robust_mutex);
1218 int	nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
1219 		struct nvme_completion_poll_status *status,
1220 		uint64_t timeout_in_usecs);
1221 int	nvme_wait_for_completion_robust_lock_timeout(struct spdk_nvme_qpair *qpair,
1222 		struct nvme_completion_poll_status *status,
1223 		pthread_mutex_t *robust_mutex,
1224 		uint64_t timeout_in_usecs);
1225 int	nvme_wait_for_completion_robust_lock_timeout_poll(struct spdk_nvme_qpair *qpair,
1226 		struct nvme_completion_poll_status *status,
1227 		pthread_mutex_t *robust_mutex);
1228 
1229 struct spdk_nvme_ctrlr_process *nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr,
1230 		pid_t pid);
1231 struct spdk_nvme_ctrlr_process *nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr);
1232 int	nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle);
1233 void	nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr);
1234 struct spdk_pci_device *nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr);
1235 
1236 int	nvme_ctrlr_probe(const struct spdk_nvme_transport_id *trid,
1237 			 struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle);
1238 
1239 int	nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr);
1240 void	nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr);
1241 void	nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
1242 void	nvme_ctrlr_destruct_async(struct spdk_nvme_ctrlr *ctrlr,
1243 				  struct nvme_ctrlr_detach_ctx *ctx);
1244 int	nvme_ctrlr_destruct_poll_async(struct spdk_nvme_ctrlr *ctrlr,
1245 				       struct nvme_ctrlr_detach_ctx *ctx);
1246 void	nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove);
1247 int	nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr);
1248 void	nvme_ctrlr_disable(struct spdk_nvme_ctrlr *ctrlr);
1249 int	nvme_ctrlr_disable_poll(struct spdk_nvme_ctrlr *ctrlr);
1250 void	nvme_ctrlr_connected(struct spdk_nvme_probe_ctx *probe_ctx,
1251 			     struct spdk_nvme_ctrlr *ctrlr);
1252 
1253 int	nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
1254 					struct nvme_request *req);
1255 int	nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap);
1256 int	nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs);
1257 int	nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz);
1258 int	nvme_ctrlr_get_pmrcap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_pmrcap_register *pmrcap);
1259 int	nvme_ctrlr_get_bpinfo(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bpinfo_register *bpinfo);
1260 int	nvme_ctrlr_set_bprsel(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bprsel_register *bprsel);
1261 int	nvme_ctrlr_set_bpmbl(struct spdk_nvme_ctrlr *ctrlr, uint64_t bpmbl_value);
1262 bool	nvme_ctrlr_multi_iocs_enabled(struct spdk_nvme_ctrlr *ctrlr);
1263 void nvme_ctrlr_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1264 void nvme_ctrlr_abort_queued_aborts(struct spdk_nvme_ctrlr *ctrlr);
1265 int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
1266 		    struct spdk_nvme_ctrlr *ctrlr,
1267 		    enum spdk_nvme_qprio qprio,
1268 		    uint32_t num_requests, bool async);
1269 void	nvme_qpair_deinit(struct spdk_nvme_qpair *qpair);
1270 void	nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair);
1271 int	nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair,
1272 				  struct nvme_request *req);
1273 void	nvme_qpair_abort_all_queued_reqs(struct spdk_nvme_qpair *qpair);
1274 uint32_t nvme_qpair_abort_queued_reqs_with_cbarg(struct spdk_nvme_qpair *qpair, void *cmd_cb_arg);
1275 void	nvme_qpair_abort_queued_reqs(struct spdk_nvme_qpair *qpair);
1276 void	nvme_qpair_resubmit_requests(struct spdk_nvme_qpair *qpair, uint32_t num_requests);
1277 int	nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr);
1278 void	nvme_ns_set_identify_data(struct spdk_nvme_ns *ns);
1279 void	nvme_ns_set_id_desc_list_data(struct spdk_nvme_ns *ns);
1280 void	nvme_ns_free_zns_specific_data(struct spdk_nvme_ns *ns);
1281 void	nvme_ns_free_iocs_specific_data(struct spdk_nvme_ns *ns);
1282 bool	nvme_ns_has_supported_iocs_specific_data(struct spdk_nvme_ns *ns);
1283 int	nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
1284 			  struct spdk_nvme_ctrlr *ctrlr);
1285 void	nvme_ns_destruct(struct spdk_nvme_ns *ns);
1286 int	nvme_ns_cmd_zone_append_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1287 					void *buffer, void *metadata, uint64_t zslba,
1288 					uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1289 					uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag);
1290 int nvme_ns_cmd_zone_appendv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1291 				     uint64_t zslba, uint32_t lba_count,
1292 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1293 				     spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1294 				     spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1295 				     uint16_t apptag_mask, uint16_t apptag);
1296 
1297 int	nvme_fabric_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
1298 int	nvme_fabric_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
1299 int	nvme_fabric_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
1300 int	nvme_fabric_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
1301 int	nvme_fabric_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1302 		uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1303 int	nvme_fabric_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1304 		uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1305 int	nvme_fabric_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1306 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1307 int	nvme_fabric_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1308 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1309 int	nvme_fabric_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect);
1310 int	nvme_fabric_ctrlr_discover(struct spdk_nvme_ctrlr *ctrlr,
1311 				   struct spdk_nvme_probe_ctx *probe_ctx);
1312 int	nvme_fabric_qpair_connect(struct spdk_nvme_qpair *qpair, uint32_t num_entries);
1313 int	nvme_fabric_qpair_connect_async(struct spdk_nvme_qpair *qpair, uint32_t num_entries);
1314 int	nvme_fabric_qpair_connect_poll(struct spdk_nvme_qpair *qpair);
1315 int	nvme_fabric_qpair_authenticate_async(struct spdk_nvme_qpair *qpair);
1316 int	nvme_fabric_qpair_authenticate_poll(struct spdk_nvme_qpair *qpair);
1317 
1318 typedef int (*spdk_nvme_parse_ana_log_page_cb)(
1319 	const struct spdk_nvme_ana_group_descriptor *desc, void *cb_arg);
1320 int	nvme_ctrlr_parse_ana_log_page(struct spdk_nvme_ctrlr *ctrlr,
1321 				      spdk_nvme_parse_ana_log_page_cb cb_fn, void *cb_arg);
1322 
1323 static inline void
1324 nvme_request_clear(struct nvme_request *req)
1325 {
1326 	/*
1327 	 * Only memset/zero fields that need it.  All other fields
1328 	 *  will be initialized appropriately either later in this
1329 	 *  function, or before they are needed later in the
1330 	 *  submission patch.  For example, the children
1331 	 *  TAILQ_ENTRY and following members are
1332 	 *  only used as part of I/O splitting so we avoid
1333 	 *  memsetting them until it is actually needed.
1334 	 *  They will be initialized in nvme_request_add_child()
1335 	 *  if the request is split.
1336 	 */
1337 	memset(req, 0, offsetof(struct nvme_request, payload_size));
1338 }
1339 
1340 #define NVME_INIT_REQUEST(req, _cb_fn, _cb_arg, _payload, _payload_size, _md_size)	\
1341 	do {						\
1342 		nvme_request_clear(req);		\
1343 		req->cb_fn = _cb_fn;			\
1344 		req->cb_arg = _cb_arg;			\
1345 		req->payload = _payload;		\
1346 		req->payload_size = _payload_size;	\
1347 		req->md_size = _md_size;		\
1348 		req->pid = g_spdk_nvme_pid;		\
1349 		req->submit_tick = 0;			\
1350 		req->accel_sequence = NULL;		\
1351 	} while (0);
1352 
1353 static inline struct nvme_request *
1354 nvme_allocate_request(struct spdk_nvme_qpair *qpair,
1355 		      const struct nvme_payload *payload, uint32_t payload_size, uint32_t md_size,
1356 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1357 {
1358 	struct nvme_request *req;
1359 
1360 	req = STAILQ_FIRST(&qpair->free_req);
1361 	if (req == NULL) {
1362 		return req;
1363 	}
1364 
1365 	STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);
1366 	qpair->num_outstanding_reqs++;
1367 
1368 	NVME_INIT_REQUEST(req, cb_fn, cb_arg, *payload, payload_size, md_size);
1369 
1370 	return req;
1371 }
1372 
1373 static inline struct nvme_request *
1374 nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair,
1375 			     void *buffer, uint32_t payload_size,
1376 			     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1377 {
1378 	struct nvme_payload payload;
1379 
1380 	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
1381 
1382 	return nvme_allocate_request(qpair, &payload, payload_size, 0, cb_fn, cb_arg);
1383 }
1384 
1385 static inline struct nvme_request *
1386 nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1387 {
1388 	return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
1389 }
1390 
1391 struct nvme_request *nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair,
1392 		void *buffer, uint32_t payload_size,
1393 		spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller);
1394 
1395 static inline void
1396 _nvme_free_request(struct nvme_request *req, struct spdk_nvme_qpair *qpair)
1397 {
1398 	assert(req != NULL);
1399 	assert(req->num_children == 0);
1400 	assert(qpair != NULL);
1401 
1402 	/* The reserved_req does not go in the free_req STAILQ - it is
1403 	 * saved only for use with a FABRICS/CONNECT command.
1404 	 */
1405 	if (spdk_likely(qpair->reserved_req != req)) {
1406 		STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
1407 
1408 		assert(qpair->num_outstanding_reqs > 0);
1409 		qpair->num_outstanding_reqs--;
1410 	}
1411 }
1412 
1413 static inline void
1414 nvme_free_request(struct nvme_request *req)
1415 {
1416 	_nvme_free_request(req, req->qpair);
1417 }
1418 
1419 static inline void
1420 nvme_complete_request(spdk_nvme_cmd_cb cb_fn, void *cb_arg, struct spdk_nvme_qpair *qpair,
1421 		      struct nvme_request *req, struct spdk_nvme_cpl *cpl)
1422 {
1423 	struct spdk_nvme_cpl            err_cpl;
1424 	struct nvme_error_cmd           *cmd;
1425 
1426 	if (spdk_unlikely(req->accel_sequence != NULL)) {
1427 		struct spdk_nvme_poll_group *pg = qpair->poll_group->group;
1428 
1429 		/* Transports are required to execuete the sequence and clear req->accel_sequence.
1430 		 * If it's left non-NULL it must mean the request is failed. */
1431 		assert(spdk_nvme_cpl_is_error(cpl));
1432 		pg->accel_fn_table.abort_sequence(req->accel_sequence);
1433 		req->accel_sequence = NULL;
1434 	}
1435 
1436 	/* error injection at completion path,
1437 	 * only inject for successful completed commands
1438 	 */
1439 	if (spdk_unlikely(!TAILQ_EMPTY(&qpair->err_cmd_head) &&
1440 			  !spdk_nvme_cpl_is_error(cpl))) {
1441 		TAILQ_FOREACH(cmd, &qpair->err_cmd_head, link) {
1442 
1443 			if (cmd->do_not_submit) {
1444 				continue;
1445 			}
1446 
1447 			if ((cmd->opc == req->cmd.opc) && cmd->err_count) {
1448 
1449 				err_cpl = *cpl;
1450 				err_cpl.status.sct = cmd->status.sct;
1451 				err_cpl.status.sc = cmd->status.sc;
1452 
1453 				cpl = &err_cpl;
1454 				cmd->err_count--;
1455 				break;
1456 			}
1457 		}
1458 	}
1459 
1460 	/* For PCIe completions, we want to avoid touching the req itself to avoid
1461 	 * dependencies on loading those cachelines. So call the internal helper
1462 	 * function instead using the qpair that was passed by the caller, instead
1463 	 * of getting it from the req.
1464 	 */
1465 	_nvme_free_request(req, qpair);
1466 
1467 	if (spdk_likely(cb_fn)) {
1468 		cb_fn(cb_arg, cpl);
1469 	}
1470 }
1471 
1472 static inline void
1473 nvme_cleanup_user_req(struct nvme_request *req)
1474 {
1475 	if (req->user_buffer && req->payload_size) {
1476 		spdk_free(req->payload.contig_or_cb_arg);
1477 		req->user_buffer = NULL;
1478 	}
1479 
1480 	req->user_cb_arg = NULL;
1481 	req->user_cb_fn = NULL;
1482 }
1483 
1484 static inline void
1485 nvme_qpair_set_state(struct spdk_nvme_qpair *qpair, enum nvme_qpair_state state)
1486 {
1487 	qpair->state = state;
1488 	if (state == NVME_QPAIR_ENABLED) {
1489 		qpair->is_new_qpair = false;
1490 	}
1491 }
1492 
1493 static inline enum nvme_qpair_state
1494 nvme_qpair_get_state(struct spdk_nvme_qpair *qpair) {
1495 	return qpair->state;
1496 }
1497 
1498 static inline void
1499 nvme_request_remove_child(struct nvme_request *parent, struct nvme_request *child)
1500 {
1501 	assert(parent != NULL);
1502 	assert(child != NULL);
1503 	assert(child->parent == parent);
1504 	assert(parent->num_children != 0);
1505 
1506 	parent->num_children--;
1507 	child->parent = NULL;
1508 	TAILQ_REMOVE(&parent->children, child, child_tailq);
1509 }
1510 
1511 static inline void
1512 nvme_cb_complete_child(void *child_arg, const struct spdk_nvme_cpl *cpl)
1513 {
1514 	struct nvme_request *child = child_arg;
1515 	struct nvme_request *parent = child->parent;
1516 
1517 	nvme_request_remove_child(parent, child);
1518 
1519 	if (spdk_nvme_cpl_is_error(cpl)) {
1520 		memcpy(&parent->parent_status, cpl, sizeof(*cpl));
1521 	}
1522 
1523 	if (parent->num_children == 0) {
1524 		nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
1525 				      parent, &parent->parent_status);
1526 	}
1527 }
1528 
1529 static inline void
1530 nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child)
1531 {
1532 	assert(parent->num_children != UINT16_MAX);
1533 
1534 	if (parent->num_children == 0) {
1535 		/*
1536 		 * Defer initialization of the children TAILQ since it falls
1537 		 *  on a separate cacheline.  This ensures we do not touch this
1538 		 *  cacheline except on request splitting cases, which are
1539 		 *  relatively rare.
1540 		 */
1541 		TAILQ_INIT(&parent->children);
1542 		parent->parent = NULL;
1543 		memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl));
1544 	}
1545 
1546 	parent->num_children++;
1547 	TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
1548 	child->parent = parent;
1549 	child->cb_fn = nvme_cb_complete_child;
1550 	child->cb_arg = child;
1551 }
1552 
1553 static inline void
1554 nvme_request_free_children(struct nvme_request *req)
1555 {
1556 	struct nvme_request *child, *tmp;
1557 
1558 	if (req->num_children == 0) {
1559 		return;
1560 	}
1561 
1562 	/* free all child nvme_request */
1563 	TAILQ_FOREACH_SAFE(child, &req->children, child_tailq, tmp) {
1564 		nvme_request_remove_child(req, child);
1565 		nvme_request_free_children(child);
1566 		nvme_free_request(child);
1567 	}
1568 }
1569 
1570 int	nvme_request_check_timeout(struct nvme_request *req, uint16_t cid,
1571 				   struct spdk_nvme_ctrlr_process *active_proc, uint64_t now_tick);
1572 uint64_t nvme_get_quirks(const struct spdk_pci_id *id);
1573 
1574 int	nvme_robust_mutex_init_shared(pthread_mutex_t *mtx);
1575 int	nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx);
1576 
1577 bool	nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl);
1578 
1579 struct spdk_nvme_ctrlr *nvme_get_ctrlr_by_trid_unsafe(
1580 	const struct spdk_nvme_transport_id *trid);
1581 
1582 const struct spdk_nvme_transport *nvme_get_transport(const char *transport_name);
1583 const struct spdk_nvme_transport *nvme_get_first_transport(void);
1584 const struct spdk_nvme_transport *nvme_get_next_transport(const struct spdk_nvme_transport
1585 		*transport);
1586 void  nvme_ctrlr_update_namespaces(struct spdk_nvme_ctrlr *ctrlr);
1587 
1588 /* Transport specific functions */
1589 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
1590 		const struct spdk_nvme_ctrlr_opts *opts,
1591 		void *devhandle);
1592 int nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
1593 int nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect);
1594 int nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr);
1595 int nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr);
1596 int nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
1597 int nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
1598 int nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
1599 int nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
1600 int nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1601 		uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1602 int nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1603 		uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1604 int nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1605 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1606 int nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1607 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1608 uint32_t nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr);
1609 uint16_t nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr);
1610 struct spdk_nvme_qpair *nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
1611 		uint16_t qid, const struct spdk_nvme_io_qpair_opts *opts);
1612 int nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr);
1613 void *nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
1614 int nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr);
1615 int nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr);
1616 int nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr);
1617 void *nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
1618 int nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr);
1619 void nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
1620 		struct spdk_nvme_qpair *qpair);
1621 int nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr,
1622 				       struct spdk_nvme_qpair *qpair);
1623 void nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr,
1624 		struct spdk_nvme_qpair *qpair);
1625 void nvme_transport_ctrlr_disconnect_qpair_done(struct spdk_nvme_qpair *qpair);
1626 int nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
1627 		struct spdk_memory_domain **domains, int array_size);
1628 void nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair);
1629 int nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair);
1630 int nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req);
1631 int32_t nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1632 		uint32_t max_completions);
1633 void nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair);
1634 int nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
1635 		int (*iter_fn)(struct nvme_request *req, void *arg),
1636 		void *arg);
1637 
1638 struct spdk_nvme_transport_poll_group *nvme_transport_poll_group_create(
1639 	const struct spdk_nvme_transport *transport);
1640 struct spdk_nvme_transport_poll_group *nvme_transport_qpair_get_optimal_poll_group(
1641 	const struct spdk_nvme_transport *transport,
1642 	struct spdk_nvme_qpair *qpair);
1643 int nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
1644 				  struct spdk_nvme_qpair *qpair);
1645 int nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
1646 				     struct spdk_nvme_qpair *qpair);
1647 int nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1648 int nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
1649 int64_t nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
1650 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
1651 int nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup);
1652 int nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
1653 					struct spdk_nvme_transport_poll_group_stat **stats);
1654 void nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
1655 		struct spdk_nvme_transport_poll_group_stat *stats);
1656 enum spdk_nvme_transport_type nvme_transport_get_trtype(const struct spdk_nvme_transport
1657 		*transport);
1658 /*
1659  * Below ref related functions must be called with the global
1660  *  driver lock held for the multi-process condition.
1661  *  Within these functions, the per ctrlr ctrlr_lock is also
1662  *  acquired for the multi-thread condition.
1663  */
1664 void	nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr);
1665 void	nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr);
1666 int	nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr);
1667 
1668 int	nvme_ctrlr_reinitialize_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
1669 int	nvme_parse_addr(struct sockaddr_storage *sa, int family,
1670 			const char *addr, const char *service, long int *port);
1671 
1672 static inline bool
1673 _is_page_aligned(uint64_t address, uint64_t page_size)
1674 {
1675 	return (address & (page_size - 1)) == 0;
1676 }
1677 
1678 #endif /* __NVME_INTERNAL_H__ */
1679