xref: /spdk/lib/nvme/nvme_internal.h (revision 7025ceb9c119a6da0b6ee2013b6ae94b51fac2df)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2015 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #ifndef __NVME_INTERNAL_H__
8 #define __NVME_INTERNAL_H__
9 
10 #include "spdk/config.h"
11 #include "spdk/likely.h"
12 #include "spdk/stdinc.h"
13 
14 #include "spdk/nvme.h"
15 
16 #if defined(__i386__) || defined(__x86_64__)
17 #include <x86intrin.h>
18 #endif
19 
20 #include "spdk/queue.h"
21 #include "spdk/barrier.h"
22 #include "spdk/bit_array.h"
23 #include "spdk/mmio.h"
24 #include "spdk/pci_ids.h"
25 #include "spdk/util.h"
26 #include "spdk/memory.h"
27 #include "spdk/nvme_intel.h"
28 #include "spdk/nvmf_spec.h"
29 #include "spdk/tree.h"
30 #include "spdk/uuid.h"
31 
32 #include "spdk_internal/assert.h"
33 #include "spdk/log.h"
34 
35 extern pid_t g_spdk_nvme_pid;
36 
37 extern struct spdk_nvme_transport_opts g_spdk_nvme_transport_opts;
38 
39 /*
40  * Some Intel devices support vendor-unique read latency log page even
41  * though the log page directory says otherwise.
42  */
43 #define NVME_INTEL_QUIRK_READ_LATENCY 0x1
44 
45 /*
46  * Some Intel devices support vendor-unique write latency log page even
47  * though the log page directory says otherwise.
48  */
49 #define NVME_INTEL_QUIRK_WRITE_LATENCY 0x2
50 
51 /*
52  * The controller needs a delay before starts checking the device
53  * readiness, which is done by reading the NVME_CSTS_RDY bit.
54  */
55 #define NVME_QUIRK_DELAY_BEFORE_CHK_RDY	0x4
56 
57 /*
58  * The controller performs best when I/O is split on particular
59  * LBA boundaries.
60  */
61 #define NVME_INTEL_QUIRK_STRIPING 0x8
62 
63 /*
64  * The controller needs a delay after allocating an I/O queue pair
65  * before it is ready to accept I/O commands.
66  */
67 #define NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC 0x10
68 
69 /*
70  * Earlier NVMe devices do not indicate whether unmapped blocks
71  * will read all zeroes or not. This define indicates that the
72  * device does in fact read all zeroes after an unmap event
73  */
74 #define NVME_QUIRK_READ_ZERO_AFTER_DEALLOCATE 0x20
75 
76 /*
77  * The controller doesn't handle Identify value others than 0 or 1 correctly.
78  */
79 #define NVME_QUIRK_IDENTIFY_CNS 0x40
80 
81 /*
82  * The controller supports Open Channel command set if matching additional
83  * condition, like the first byte (value 0x1) in the vendor specific
84  * bits of the namespace identify structure is set.
85  */
86 #define NVME_QUIRK_OCSSD 0x80
87 
88 /*
89  * The controller has an Intel vendor ID but does not support Intel vendor-specific
90  * log pages.  This is primarily for QEMU emulated SSDs which report an Intel vendor
91  * ID but do not support these log pages.
92  */
93 #define NVME_INTEL_QUIRK_NO_LOG_PAGES 0x100
94 
95 /*
96  * The controller does not set SHST_COMPLETE in a reasonable amount of time.  This
97  * is primarily seen in virtual VMWare NVMe SSDs.  This quirk merely adds an additional
98  * error message that on VMWare NVMe SSDs, the shutdown timeout may be expected.
99  */
100 #define NVME_QUIRK_SHST_COMPLETE 0x200
101 
102 /*
103  * The controller requires an extra delay before starting the initialization process
104  * during attach.
105  */
106 #define NVME_QUIRK_DELAY_BEFORE_INIT 0x400
107 
108 /*
109  * Some SSDs exhibit poor performance with the default SPDK NVMe IO queue size.
110  * This quirk will increase the default to 1024 which matches other operating
111  * systems, at the cost of some extra memory usage.  Users can still override
112  * the increased default by changing the spdk_nvme_io_qpair_opts when allocating
113  * a new queue pair.
114  */
115 #define NVME_QUIRK_MINIMUM_IO_QUEUE_SIZE 0x800
116 
117 /**
118  * The maximum access width to PCI memory space is 8 Bytes, don't use AVX2 or
119  * SSE instructions to optimize the memory access(memcpy or memset) larger than
120  * 8 Bytes.
121  */
122 #define NVME_QUIRK_MAXIMUM_PCI_ACCESS_WIDTH 0x1000
123 
124 /**
125  * The SSD does not support OPAL even through it sets the security bit in OACS.
126  */
127 #define NVME_QUIRK_OACS_SECURITY 0x2000
128 
129 /**
130  * Intel P55XX SSDs can't support Dataset Management command with SGL format,
131  * so use PRP with DSM command.
132  */
133 #define NVME_QUIRK_NO_SGL_FOR_DSM 0x4000
134 
135 /**
136  * Maximum Data Transfer Size(MDTS) excludes interleaved metadata.
137  */
138 #define NVME_QUIRK_MDTS_EXCLUDE_MD 0x8000
139 
140 /**
141  * Force not to use SGL even the controller report that it can
142  * support it.
143  */
144 #define NVME_QUIRK_NOT_USE_SGL 0x10000
145 
146 /*
147  * Some SSDs require the admin submission queue size to equate to an even
148  * 4KiB multiple.
149  */
150 #define NVME_QUIRK_MINIMUM_ADMIN_QUEUE_SIZE 0x20000
151 
152 #define NVME_MAX_ASYNC_EVENTS	(8)
153 
154 #define NVME_MAX_ADMIN_TIMEOUT_IN_SECS	(30)
155 
156 /* Maximum log page size to fetch for AERs. */
157 #define NVME_MAX_AER_LOG_SIZE		(4096)
158 
159 /*
160  * NVME_MAX_IO_QUEUES in nvme_spec.h defines the 64K spec-limit, but this
161  *  define specifies the maximum number of queues this driver will actually
162  *  try to configure, if available.
163  */
164 #define DEFAULT_MAX_IO_QUEUES		(1024)
165 #define DEFAULT_ADMIN_QUEUE_SIZE	(32)
166 #define DEFAULT_IO_QUEUE_SIZE		(256)
167 #define DEFAULT_IO_QUEUE_SIZE_FOR_QUIRK	(1024) /* Matches Linux kernel driver */
168 
169 #define DEFAULT_IO_QUEUE_REQUESTS	(512)
170 
171 #define SPDK_NVME_DEFAULT_RETRY_COUNT	(4)
172 
173 #define SPDK_NVME_TRANSPORT_ACK_TIMEOUT_DISABLED	(0)
174 #define SPDK_NVME_DEFAULT_TRANSPORT_ACK_TIMEOUT	SPDK_NVME_TRANSPORT_ACK_TIMEOUT_DISABLED
175 
176 #define SPDK_NVME_TRANSPORT_TOS_DISABLED	(0)
177 
178 #define MIN_KEEP_ALIVE_TIMEOUT_IN_MS	(10000)
179 
180 /* We want to fit submission and completion rings each in a single 2MB
181  * hugepage to ensure physical address contiguity.
182  */
183 #define MAX_IO_QUEUE_ENTRIES		(VALUE_2MB / spdk_max( \
184 						sizeof(struct spdk_nvme_cmd), \
185 						sizeof(struct spdk_nvme_cpl)))
186 
187 /* Default timeout for fabrics connect commands. */
188 #ifdef DEBUG
189 #define NVME_FABRIC_CONNECT_COMMAND_TIMEOUT 0
190 #else
191 /* 500 millisecond timeout. */
192 #define NVME_FABRIC_CONNECT_COMMAND_TIMEOUT 500000
193 #endif
194 
195 /* This value indicates that a read from a PCIe register is invalid. This can happen when a device is no longer present */
196 #define SPDK_NVME_INVALID_REGISTER_VALUE 0xFFFFFFFFu
197 
198 enum nvme_payload_type {
199 	NVME_PAYLOAD_TYPE_INVALID = 0,
200 
201 	/** nvme_request::u.payload.contig_buffer is valid for this request */
202 	NVME_PAYLOAD_TYPE_CONTIG,
203 
204 	/** nvme_request::u.sgl is valid for this request */
205 	NVME_PAYLOAD_TYPE_SGL,
206 };
207 
208 /** Boot partition write states */
209 enum nvme_bp_write_state {
210 	SPDK_NVME_BP_WS_DOWNLOADING	= 0x0,
211 	SPDK_NVME_BP_WS_DOWNLOADED	= 0x1,
212 	SPDK_NVME_BP_WS_REPLACE		= 0x2,
213 	SPDK_NVME_BP_WS_ACTIVATE	= 0x3,
214 };
215 
216 /**
217  * Descriptor for a request data payload.
218  */
219 struct nvme_payload {
220 	/**
221 	 * Functions for retrieving physical addresses for scattered payloads.
222 	 */
223 	spdk_nvme_req_reset_sgl_cb reset_sgl_fn;
224 	spdk_nvme_req_next_sge_cb next_sge_fn;
225 
226 	/**
227 	 * Extended IO options passed by the user
228 	 */
229 	struct spdk_nvme_ns_cmd_ext_io_opts *opts;
230 	/**
231 	 * If reset_sgl_fn == NULL, this is a contig payload, and contig_or_cb_arg contains the
232 	 * virtual memory address of a single virtually contiguous buffer.
233 	 *
234 	 * If reset_sgl_fn != NULL, this is a SGL payload, and contig_or_cb_arg contains the
235 	 * cb_arg that will be passed to the SGL callback functions.
236 	 */
237 	void *contig_or_cb_arg;
238 
239 	/** Virtual memory address of a single virtually contiguous metadata buffer */
240 	void *md;
241 };
242 
243 #define NVME_PAYLOAD_CONTIG(contig_, md_) \
244 	(struct nvme_payload) { \
245 		.reset_sgl_fn = NULL, \
246 		.next_sge_fn = NULL, \
247 		.contig_or_cb_arg = (contig_), \
248 		.md = (md_), \
249 	}
250 
251 #define NVME_PAYLOAD_SGL(reset_sgl_fn_, next_sge_fn_, cb_arg_, md_) \
252 	(struct nvme_payload) { \
253 		.reset_sgl_fn = (reset_sgl_fn_), \
254 		.next_sge_fn = (next_sge_fn_), \
255 		.contig_or_cb_arg = (cb_arg_), \
256 		.md = (md_), \
257 	}
258 
259 static inline enum nvme_payload_type
260 nvme_payload_type(const struct nvme_payload *payload) {
261 	return payload->reset_sgl_fn ? NVME_PAYLOAD_TYPE_SGL : NVME_PAYLOAD_TYPE_CONTIG;
262 }
263 
264 struct nvme_error_cmd {
265 	bool				do_not_submit;
266 	uint64_t			timeout_tsc;
267 	uint32_t			err_count;
268 	uint8_t				opc;
269 	struct spdk_nvme_status		status;
270 	TAILQ_ENTRY(nvme_error_cmd)	link;
271 };
272 
273 struct nvme_request {
274 	struct spdk_nvme_cmd		cmd;
275 
276 	uint8_t				retries;
277 
278 	uint8_t				timed_out : 1;
279 
280 	/**
281 	 * True if the request is in the queued_req list.
282 	 */
283 	uint8_t				queued : 1;
284 	uint8_t				reserved : 6;
285 
286 	/**
287 	 * Number of children requests still outstanding for this
288 	 *  request which was split into multiple child requests.
289 	 */
290 	uint16_t			num_children;
291 
292 	/**
293 	 * Offset in bytes from the beginning of payload for this request.
294 	 * This is used for I/O commands that are split into multiple requests.
295 	 */
296 	uint32_t			payload_offset;
297 	uint32_t			md_offset;
298 
299 	uint32_t			payload_size;
300 
301 	/**
302 	 * Timeout ticks for error injection requests, can be extended in future
303 	 * to support per-request timeout feature.
304 	 */
305 	uint64_t			timeout_tsc;
306 
307 	/**
308 	 * Data payload for this request's command.
309 	 */
310 	struct nvme_payload		payload;
311 
312 	spdk_nvme_cmd_cb		cb_fn;
313 	void				*cb_arg;
314 	STAILQ_ENTRY(nvme_request)	stailq;
315 
316 	struct spdk_nvme_qpair		*qpair;
317 
318 	/*
319 	 * The value of spdk_get_ticks() when the request was submitted to the hardware.
320 	 * Only set if ctrlr->timeout_enabled is true.
321 	 */
322 	uint64_t			submit_tick;
323 
324 	/**
325 	 * The active admin request can be moved to a per process pending
326 	 *  list based on the saved pid to tell which process it belongs
327 	 *  to. The cpl saves the original completion information which
328 	 *  is used in the completion callback.
329 	 * NOTE: these below two fields are only used for admin request.
330 	 */
331 	pid_t				pid;
332 	struct spdk_nvme_cpl		cpl;
333 
334 	uint32_t			md_size;
335 
336 	/**
337 	 * The following members should not be reordered with members
338 	 *  above.  These members are only needed when splitting
339 	 *  requests which is done rarely, and the driver is careful
340 	 *  to not touch the following fields until a split operation is
341 	 *  needed, to avoid touching an extra cacheline.
342 	 */
343 
344 	/**
345 	 * Points to the outstanding child requests for a parent request.
346 	 *  Only valid if a request was split into multiple children
347 	 *  requests, and is not initialized for non-split requests.
348 	 */
349 	TAILQ_HEAD(, nvme_request)	children;
350 
351 	/**
352 	 * Linked-list pointers for a child request in its parent's list.
353 	 */
354 	TAILQ_ENTRY(nvme_request)	child_tailq;
355 
356 	/**
357 	 * Points to a parent request if part of a split request,
358 	 *   NULL otherwise.
359 	 */
360 	struct nvme_request		*parent;
361 
362 	/**
363 	 * Completion status for a parent request.  Initialized to all 0's
364 	 *  (SUCCESS) before child requests are submitted.  If a child
365 	 *  request completes with error, the error status is copied here,
366 	 *  to ensure that the parent request is also completed with error
367 	 *  status once all child requests are completed.
368 	 */
369 	struct spdk_nvme_cpl		parent_status;
370 
371 	/**
372 	 * The user_cb_fn and user_cb_arg fields are used for holding the original
373 	 * callback data when using nvme_allocate_request_user_copy.
374 	 */
375 	spdk_nvme_cmd_cb		user_cb_fn;
376 	void				*user_cb_arg;
377 	void				*user_buffer;
378 
379 	/** Sequence of accel operations associated with this request */
380 	void				*accel_sequence;
381 };
382 
383 struct nvme_completion_poll_status {
384 	struct spdk_nvme_cpl	cpl;
385 	uint64_t		timeout_tsc;
386 	/**
387 	 * DMA buffer retained throughout the duration of the command.  It'll be released
388 	 * automatically if the command times out, otherwise the user is responsible for freeing it.
389 	 */
390 	void			*dma_data;
391 	bool			done;
392 	/* This flag indicates that the request has been timed out and the memory
393 	   must be freed in a completion callback */
394 	bool			timed_out;
395 };
396 
397 struct nvme_async_event_request {
398 	struct spdk_nvme_ctrlr		*ctrlr;
399 	struct nvme_request		*req;
400 	struct spdk_nvme_cpl		cpl;
401 };
402 
403 enum nvme_qpair_state {
404 	NVME_QPAIR_DISCONNECTED,
405 	NVME_QPAIR_DISCONNECTING,
406 	NVME_QPAIR_CONNECTING,
407 	NVME_QPAIR_CONNECTED,
408 	NVME_QPAIR_ENABLING,
409 	NVME_QPAIR_ENABLED,
410 	NVME_QPAIR_DESTROYING,
411 };
412 
413 enum nvme_qpair_connect_state {
414 	NVME_QPAIR_CONNECT_STATE_CONNECTING,
415 	NVME_QPAIR_CONNECT_STATE_AUTHENTICATING,
416 	NVME_QPAIR_CONNECT_STATE_CONNECTED,
417 	NVME_QPAIR_CONNECT_STATE_FAILED,
418 };
419 
420 enum nvme_qpair_auth_state {
421 	NVME_QPAIR_AUTH_STATE_NEGOTIATE,
422 	NVME_QPAIR_AUTH_STATE_AWAIT_NEGOTIATE,
423 	NVME_QPAIR_AUTH_STATE_AWAIT_CHALLENGE,
424 	NVME_QPAIR_AUTH_STATE_AWAIT_REPLY,
425 	NVME_QPAIR_AUTH_STATE_AWAIT_SUCCESS1,
426 	NVME_QPAIR_AUTH_STATE_AWAIT_FAILURE2,
427 	NVME_QPAIR_AUTH_STATE_DONE,
428 };
429 
430 /* Authentication transaction required (authreq.atr) */
431 #define NVME_QPAIR_AUTH_FLAG_ATR	(1 << 0)
432 /* Authentication and secure channel required (authreq.ascr) */
433 #define NVME_QPAIR_AUTH_FLAG_ASCR	(1 << 1)
434 
435 struct nvme_auth {
436 	/* State of the authentication */
437 	enum nvme_qpair_auth_state	state;
438 	/* Status of the authentication */
439 	int				status;
440 	/* Transaction ID */
441 	uint16_t			tid;
442 	/* Flags */
443 	uint32_t			flags;
444 };
445 
446 struct spdk_nvme_qpair {
447 	struct spdk_nvme_ctrlr			*ctrlr;
448 
449 	uint16_t				id;
450 
451 	uint8_t					qprio: 2;
452 
453 	uint8_t					state: 3;
454 
455 	uint8_t					async: 1;
456 
457 	uint8_t					is_new_qpair: 1;
458 
459 	uint8_t					abort_dnr: 1;
460 	/*
461 	 * Members for handling IO qpair deletion inside of a completion context.
462 	 * These are specifically defined as single bits, so that they do not
463 	 *  push this data structure out to another cacheline.
464 	 */
465 	uint8_t					in_completion_context: 1;
466 	uint8_t					delete_after_completion_context: 1;
467 
468 	/*
469 	 * Set when no deletion notification is needed. For example, the process
470 	 * which allocated this qpair exited unexpectedly.
471 	 */
472 	uint8_t					no_deletion_notification_needed: 1;
473 
474 	uint8_t					last_fuse: 2;
475 
476 	uint8_t					transport_failure_reason: 3;
477 	uint8_t					last_transport_failure_reason: 3;
478 
479 	/* The user is destroying qpair */
480 	uint8_t					destroy_in_progress: 1;
481 
482 	enum spdk_nvme_transport_type		trtype;
483 
484 	uint32_t				num_outstanding_reqs;
485 
486 	/* request object used only for this qpair's FABRICS/CONNECT command (if needed) */
487 	struct nvme_request			*reserved_req;
488 
489 	STAILQ_HEAD(, nvme_request)		free_req;
490 	STAILQ_HEAD(, nvme_request)		queued_req;
491 
492 	/* List entry for spdk_nvme_transport_poll_group::qpairs */
493 	STAILQ_ENTRY(spdk_nvme_qpair)		poll_group_stailq;
494 
495 	/** Commands opcode in this list will return error */
496 	TAILQ_HEAD(, nvme_error_cmd)		err_cmd_head;
497 	/** Requests in this list will return error */
498 	STAILQ_HEAD(, nvme_request)		err_req_head;
499 
500 	struct spdk_nvme_ctrlr_process		*active_proc;
501 
502 	struct spdk_nvme_transport_poll_group	*poll_group;
503 
504 	void					*poll_group_tailq_head;
505 
506 	const struct spdk_nvme_transport	*transport;
507 
508 	/* Entries below here are not touched in the main I/O path. */
509 
510 	struct nvme_completion_poll_status	*poll_status;
511 	enum nvme_qpair_connect_state		connect_state;
512 
513 	/* List entry for spdk_nvme_ctrlr::active_io_qpairs */
514 	TAILQ_ENTRY(spdk_nvme_qpair)		tailq;
515 
516 	/* List entry for spdk_nvme_ctrlr_process::allocated_io_qpairs */
517 	TAILQ_ENTRY(spdk_nvme_qpair)		per_process_tailq;
518 
519 	STAILQ_HEAD(, nvme_request)		aborting_queued_req;
520 
521 	void					*req_buf;
522 
523 	/* In-band authentication state */
524 	struct nvme_auth			auth;
525 };
526 
527 struct spdk_nvme_poll_group {
528 	void						*ctx;
529 	struct spdk_nvme_accel_fn_table			accel_fn_table;
530 	STAILQ_HEAD(, spdk_nvme_transport_poll_group)	tgroups;
531 	bool						in_process_completions;
532 };
533 
534 struct spdk_nvme_transport_poll_group {
535 	struct spdk_nvme_poll_group			*group;
536 	const struct spdk_nvme_transport		*transport;
537 	STAILQ_HEAD(, spdk_nvme_qpair)			connected_qpairs;
538 	STAILQ_HEAD(, spdk_nvme_qpair)			disconnected_qpairs;
539 	STAILQ_ENTRY(spdk_nvme_transport_poll_group)	link;
540 	uint32_t					num_connected_qpairs;
541 };
542 
543 struct spdk_nvme_ns {
544 	struct spdk_nvme_ctrlr		*ctrlr;
545 	uint32_t			sector_size;
546 
547 	/*
548 	 * Size of data transferred as part of each block,
549 	 * including metadata if FLBAS indicates the metadata is transferred
550 	 * as part of the data buffer at the end of each LBA.
551 	 */
552 	uint32_t			extended_lba_size;
553 
554 	uint32_t			md_size;
555 	uint32_t			pi_type;
556 	uint32_t			sectors_per_max_io;
557 	uint32_t			sectors_per_max_io_no_md;
558 	uint32_t			sectors_per_stripe;
559 	uint32_t			id;
560 	uint16_t			flags;
561 	bool				active;
562 
563 	/* Command Set Identifier */
564 	enum spdk_nvme_csi		csi;
565 
566 	/* Namespace Identification Descriptor List (CNS = 03h) */
567 	uint8_t				id_desc_list[4096];
568 
569 	uint32_t			ana_group_id;
570 	enum spdk_nvme_ana_state	ana_state;
571 
572 	/* Identify Namespace data. */
573 	struct spdk_nvme_ns_data	nsdata;
574 
575 	/* Zoned Namespace Command Set Specific Identify Namespace data. */
576 	struct spdk_nvme_zns_ns_data	*nsdata_zns;
577 
578 	RB_ENTRY(spdk_nvme_ns)		node;
579 };
580 
581 /**
582  * State of struct spdk_nvme_ctrlr (in particular, during initialization).
583  */
584 enum nvme_ctrlr_state {
585 	/**
586 	 * Wait before initializing the controller.
587 	 */
588 	NVME_CTRLR_STATE_INIT_DELAY,
589 
590 	/**
591 	 * Connect the admin queue.
592 	 */
593 	NVME_CTRLR_STATE_CONNECT_ADMINQ,
594 
595 	/**
596 	 * Controller has not started initialized yet.
597 	 */
598 	NVME_CTRLR_STATE_INIT = NVME_CTRLR_STATE_CONNECT_ADMINQ,
599 
600 	/**
601 	 * Waiting for admin queue to connect.
602 	 */
603 	NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ,
604 
605 	/**
606 	 * Read Version (VS) register.
607 	 */
608 	NVME_CTRLR_STATE_READ_VS,
609 
610 	/**
611 	 * Waiting for Version (VS) register to be read.
612 	 */
613 	NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS,
614 
615 	/**
616 	 * Read Capabilities (CAP) register.
617 	 */
618 	NVME_CTRLR_STATE_READ_CAP,
619 
620 	/**
621 	 * Waiting for Capabilities (CAP) register to be read.
622 	 */
623 	NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP,
624 
625 	/**
626 	 * Check EN to prepare for controller initialization.
627 	 */
628 	NVME_CTRLR_STATE_CHECK_EN,
629 
630 	/**
631 	 * Waiting for CC to be read as part of EN check.
632 	 */
633 	NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC,
634 
635 	/**
636 	 * Waiting for CSTS.RDY to transition from 0 to 1 so that CC.EN may be set to 0.
637 	 */
638 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
639 
640 	/**
641 	 * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 1.
642 	 */
643 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
644 
645 	/**
646 	 * Disabling the controller by setting CC.EN to 0.
647 	 */
648 	NVME_CTRLR_STATE_SET_EN_0,
649 
650 	/**
651 	 * Waiting for the CC register to be read as part of disabling the controller.
652 	 */
653 	NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC,
654 
655 	/**
656 	 * Waiting for CSTS.RDY to transition from 1 to 0 so that CC.EN may be set to 1.
657 	 */
658 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
659 
660 	/**
661 	 * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 0.
662 	 */
663 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS,
664 
665 	/**
666 	 * The controller is disabled. (CC.EN and CSTS.RDY are 0.)
667 	 */
668 	NVME_CTRLR_STATE_DISABLED,
669 
670 	/**
671 	 * Enable the controller by writing CC.EN to 1
672 	 */
673 	NVME_CTRLR_STATE_ENABLE,
674 
675 	/**
676 	 * Waiting for CC register to be written as part of enabling the controller.
677 	 */
678 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC,
679 
680 	/**
681 	 * Waiting for CSTS.RDY to transition from 0 to 1 after enabling the controller.
682 	 */
683 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
684 
685 	/**
686 	 * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 1.
687 	 */
688 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
689 
690 	/**
691 	 * Reset the Admin queue of the controller.
692 	 */
693 	NVME_CTRLR_STATE_RESET_ADMIN_QUEUE,
694 
695 	/**
696 	 * Identify Controller command will be sent to then controller.
697 	 */
698 	NVME_CTRLR_STATE_IDENTIFY,
699 
700 	/**
701 	 * Waiting for Identify Controller command be completed.
702 	 */
703 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
704 
705 	/**
706 	 * Configure AER of the controller.
707 	 */
708 	NVME_CTRLR_STATE_CONFIGURE_AER,
709 
710 	/**
711 	 * Waiting for the Configure AER to be completed.
712 	 */
713 	NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
714 
715 	/**
716 	 * Set Keep Alive Timeout of the controller.
717 	 */
718 	NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
719 
720 	/**
721 	 * Waiting for Set Keep Alive Timeout to be completed.
722 	 */
723 	NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
724 
725 	/**
726 	 * Get Identify I/O Command Set Specific Controller data structure.
727 	 */
728 	NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
729 
730 	/**
731 	 * Waiting for Identify I/O Command Set Specific Controller command to be completed.
732 	 */
733 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC,
734 
735 	/**
736 	 * Get Commands Supported and Effects log page for the Zoned Namespace Command Set.
737 	 */
738 	NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG,
739 
740 	/**
741 	 * Waiting for the Get Log Page command to be completed.
742 	 */
743 	NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG,
744 
745 	/**
746 	 * Set Number of Queues of the controller.
747 	 */
748 	NVME_CTRLR_STATE_SET_NUM_QUEUES,
749 
750 	/**
751 	 * Waiting for Set Num of Queues command to be completed.
752 	 */
753 	NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
754 
755 	/**
756 	 * Get active Namespace list of the controller.
757 	 */
758 	NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
759 
760 	/**
761 	 * Waiting for the Identify Active Namespace commands to be completed.
762 	 */
763 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS,
764 
765 	/**
766 	 * Get Identify Namespace Data structure for each NS.
767 	 */
768 	NVME_CTRLR_STATE_IDENTIFY_NS,
769 
770 	/**
771 	 * Waiting for the Identify Namespace commands to be completed.
772 	 */
773 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
774 
775 	/**
776 	 * Get Identify Namespace Identification Descriptors.
777 	 */
778 	NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
779 
780 	/**
781 	 * Get Identify I/O Command Set Specific Namespace data structure for each NS.
782 	 */
783 	NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
784 
785 	/**
786 	 * Waiting for the Identify I/O Command Set Specific Namespace commands to be completed.
787 	 */
788 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC,
789 
790 	/**
791 	 * Waiting for the Identify Namespace Identification
792 	 * Descriptors to be completed.
793 	 */
794 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
795 
796 	/**
797 	 * Set supported log pages of the controller.
798 	 */
799 	NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
800 
801 	/**
802 	 * Set supported log pages of INTEL controller.
803 	 */
804 	NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES,
805 
806 	/**
807 	 * Waiting for supported log pages of INTEL controller.
808 	 */
809 	NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES,
810 
811 	/**
812 	 * Set supported features of the controller.
813 	 */
814 	NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
815 
816 	/**
817 	 * Set Doorbell Buffer Config of the controller.
818 	 */
819 	NVME_CTRLR_STATE_SET_DB_BUF_CFG,
820 
821 	/**
822 	 * Waiting for Doorbell Buffer Config to be completed.
823 	 */
824 	NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
825 
826 	/**
827 	 * Set Host ID of the controller.
828 	 */
829 	NVME_CTRLR_STATE_SET_HOST_ID,
830 
831 	/**
832 	 * Waiting for Set Host ID to be completed.
833 	 */
834 	NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
835 
836 	/**
837 	 * Let transport layer do its part of initialization.
838 	 */
839 	NVME_CTRLR_STATE_TRANSPORT_READY,
840 
841 	/**
842 	 * Controller initialization has completed and the controller is ready.
843 	 */
844 	NVME_CTRLR_STATE_READY,
845 
846 	/**
847 	 * Controller initialization has an error.
848 	 */
849 	NVME_CTRLR_STATE_ERROR,
850 
851 	/**
852 	 * Admin qpair was disconnected, controller needs to be re-initialized
853 	 */
854 	NVME_CTRLR_STATE_DISCONNECTED,
855 };
856 
857 #define NVME_TIMEOUT_INFINITE		0
858 #define NVME_TIMEOUT_KEEP_EXISTING	UINT64_MAX
859 
860 struct spdk_nvme_ctrlr_aer_completion_list {
861 	struct spdk_nvme_cpl	cpl;
862 	STAILQ_ENTRY(spdk_nvme_ctrlr_aer_completion_list) link;
863 };
864 
865 /*
866  * Used to track properties for all processes accessing the controller.
867  */
868 struct spdk_nvme_ctrlr_process {
869 	/** Whether it is the primary process  */
870 	bool						is_primary;
871 
872 	/** Process ID */
873 	pid_t						pid;
874 
875 	/** Active admin requests to be completed */
876 	STAILQ_HEAD(, nvme_request)			active_reqs;
877 
878 	TAILQ_ENTRY(spdk_nvme_ctrlr_process)		tailq;
879 
880 	/** Per process PCI device handle */
881 	struct spdk_pci_device				*devhandle;
882 
883 	/** Reference to track the number of attachment to this controller. */
884 	int						ref;
885 
886 	/** Allocated IO qpairs */
887 	TAILQ_HEAD(, spdk_nvme_qpair)			allocated_io_qpairs;
888 
889 	spdk_nvme_aer_cb				aer_cb_fn;
890 	void						*aer_cb_arg;
891 
892 	/**
893 	 * A function pointer to timeout callback function
894 	 */
895 	spdk_nvme_timeout_cb		timeout_cb_fn;
896 	void				*timeout_cb_arg;
897 	/** separate timeout values for io vs. admin reqs */
898 	uint64_t			timeout_io_ticks;
899 	uint64_t			timeout_admin_ticks;
900 
901 	/** List to publish AENs to all procs in multiprocess setup */
902 	STAILQ_HEAD(, spdk_nvme_ctrlr_aer_completion_list)      async_events;
903 };
904 
905 struct nvme_register_completion {
906 	struct spdk_nvme_cpl			cpl;
907 	uint64_t				value;
908 	spdk_nvme_reg_cb			cb_fn;
909 	void					*cb_ctx;
910 	STAILQ_ENTRY(nvme_register_completion)	stailq;
911 	pid_t					pid;
912 };
913 
914 struct spdk_nvme_ctrlr {
915 	/* Hot data (accessed in I/O path) starts here. */
916 
917 	/* Tree of namespaces */
918 	RB_HEAD(nvme_ns_tree, spdk_nvme_ns)	ns;
919 
920 	/* The number of active namespaces */
921 	uint32_t			active_ns_count;
922 
923 	bool				is_removed;
924 
925 	bool				is_resetting;
926 
927 	bool				is_failed;
928 
929 	bool				is_destructed;
930 
931 	bool				timeout_enabled;
932 
933 	/* The application is preparing to reset the controller.  Transports
934 	 * can use this to skip unnecessary parts of the qpair deletion process
935 	 * for example, like the DELETE_SQ/CQ commands.
936 	 */
937 	bool				prepare_for_reset;
938 
939 	bool				is_disconnecting;
940 
941 	bool				needs_io_msg_update;
942 
943 	uint16_t			max_sges;
944 
945 	uint16_t			cntlid;
946 
947 	/** Controller support flags */
948 	uint64_t			flags;
949 
950 	/** NVMEoF in-capsule data size in bytes */
951 	uint32_t			ioccsz_bytes;
952 
953 	/** NVMEoF in-capsule data offset in 16 byte units */
954 	uint16_t			icdoff;
955 
956 	/* Cold data (not accessed in normal I/O path) is after this point. */
957 
958 	struct spdk_nvme_transport_id	trid;
959 
960 	union spdk_nvme_cap_register	cap;
961 	union spdk_nvme_vs_register	vs;
962 
963 	int				state;
964 	uint64_t			state_timeout_tsc;
965 
966 	uint64_t			next_keep_alive_tick;
967 	uint64_t			keep_alive_interval_ticks;
968 
969 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
970 
971 	/** All the log pages supported */
972 	bool				log_page_supported[256];
973 
974 	/** All the features supported */
975 	bool				feature_supported[256];
976 
977 	/** maximum i/o size in bytes */
978 	uint32_t			max_xfer_size;
979 
980 	/** minimum page size supported by this controller in bytes */
981 	uint32_t			min_page_size;
982 
983 	/** selected memory page size for this controller in bytes */
984 	uint32_t			page_size;
985 
986 	uint32_t			num_aers;
987 	struct nvme_async_event_request	aer[NVME_MAX_ASYNC_EVENTS];
988 
989 	/** guards access to the controller itself, including admin queues */
990 	pthread_mutex_t			ctrlr_lock;
991 
992 	struct spdk_nvme_qpair		*adminq;
993 
994 	/** shadow doorbell buffer */
995 	uint32_t			*shadow_doorbell;
996 	/** eventidx buffer */
997 	uint32_t			*eventidx;
998 
999 	/**
1000 	 * Identify Controller data.
1001 	 */
1002 	struct spdk_nvme_ctrlr_data	cdata;
1003 
1004 	/**
1005 	 * Zoned Namespace Command Set Specific Identify Controller data.
1006 	 */
1007 	struct spdk_nvme_zns_ctrlr_data	*cdata_zns;
1008 
1009 	struct spdk_bit_array		*free_io_qids;
1010 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
1011 
1012 	struct spdk_nvme_ctrlr_opts	opts;
1013 
1014 	uint64_t			quirks;
1015 
1016 	/* Extra sleep time during controller initialization */
1017 	uint64_t			sleep_timeout_tsc;
1018 
1019 	/** Track all the processes manage this controller */
1020 	TAILQ_HEAD(, spdk_nvme_ctrlr_process)	active_procs;
1021 
1022 
1023 	STAILQ_HEAD(, nvme_request)	queued_aborts;
1024 	uint32_t			outstanding_aborts;
1025 
1026 	/* CB to notify the user when the ctrlr is removed/failed. */
1027 	spdk_nvme_remove_cb			remove_cb;
1028 	void					*cb_ctx;
1029 
1030 	struct spdk_nvme_qpair		*external_io_msgs_qpair;
1031 	pthread_mutex_t			external_io_msgs_lock;
1032 	struct spdk_ring		*external_io_msgs;
1033 
1034 	STAILQ_HEAD(, nvme_io_msg_producer) io_producers;
1035 
1036 	struct spdk_nvme_ana_page		*ana_log_page;
1037 	struct spdk_nvme_ana_group_descriptor	*copied_ana_desc;
1038 	uint32_t				ana_log_page_size;
1039 
1040 	/* scratchpad pointer that can be used to send data between two NVME_CTRLR_STATEs */
1041 	void				*tmp_ptr;
1042 
1043 	/* maximum zone append size in bytes */
1044 	uint32_t			max_zone_append_size;
1045 
1046 	/* PMR size in bytes */
1047 	uint64_t			pmr_size;
1048 
1049 	/* Boot Partition Info */
1050 	enum nvme_bp_write_state	bp_ws;
1051 	uint32_t			bpid;
1052 	spdk_nvme_cmd_cb		bp_write_cb_fn;
1053 	void				*bp_write_cb_arg;
1054 
1055 	/* Firmware Download */
1056 	void				*fw_payload;
1057 	unsigned int			fw_size_remaining;
1058 	unsigned int			fw_offset;
1059 	unsigned int			fw_transfer_size;
1060 
1061 	/* Completed register operations */
1062 	STAILQ_HEAD(, nvme_register_completion)	register_operations;
1063 
1064 	union spdk_nvme_cc_register		process_init_cc;
1065 
1066 	/* Authentication transaction ID */
1067 	uint16_t				auth_tid;
1068 };
1069 
1070 struct spdk_nvme_probe_ctx {
1071 	struct spdk_nvme_transport_id		trid;
1072 	void					*cb_ctx;
1073 	spdk_nvme_probe_cb			probe_cb;
1074 	spdk_nvme_attach_cb			attach_cb;
1075 	spdk_nvme_remove_cb			remove_cb;
1076 	TAILQ_HEAD(, spdk_nvme_ctrlr)		init_ctrlrs;
1077 };
1078 
1079 typedef void (*nvme_ctrlr_detach_cb)(struct spdk_nvme_ctrlr *ctrlr);
1080 
1081 enum nvme_ctrlr_detach_state {
1082 	NVME_CTRLR_DETACH_SET_CC,
1083 	NVME_CTRLR_DETACH_CHECK_CSTS,
1084 	NVME_CTRLR_DETACH_GET_CSTS,
1085 	NVME_CTRLR_DETACH_GET_CSTS_DONE,
1086 };
1087 
1088 struct nvme_ctrlr_detach_ctx {
1089 	struct spdk_nvme_ctrlr			*ctrlr;
1090 	nvme_ctrlr_detach_cb			cb_fn;
1091 	uint64_t				shutdown_start_tsc;
1092 	uint32_t				shutdown_timeout_ms;
1093 	bool					shutdown_complete;
1094 	enum nvme_ctrlr_detach_state		state;
1095 	union spdk_nvme_csts_register		csts;
1096 	TAILQ_ENTRY(nvme_ctrlr_detach_ctx)	link;
1097 };
1098 
1099 struct spdk_nvme_detach_ctx {
1100 	TAILQ_HEAD(, nvme_ctrlr_detach_ctx)	head;
1101 };
1102 
1103 struct nvme_driver {
1104 	pthread_mutex_t			lock;
1105 
1106 	/** Multi-process shared attached controller list */
1107 	TAILQ_HEAD(, spdk_nvme_ctrlr)	shared_attached_ctrlrs;
1108 
1109 	bool				initialized;
1110 	struct spdk_uuid		default_extended_host_id;
1111 
1112 	/** netlink socket fd for hotplug messages */
1113 	int				hotplug_fd;
1114 };
1115 
1116 #define nvme_ns_cmd_get_ext_io_opt(opts, field, defval) \
1117        ((opts) != NULL && offsetof(struct spdk_nvme_ns_cmd_ext_io_opts, field) + \
1118         sizeof((opts)->field) <= (opts)->size ? (opts)->field : (defval))
1119 
1120 extern struct nvme_driver *g_spdk_nvme_driver;
1121 
1122 int nvme_driver_init(void);
1123 
1124 #define nvme_delay		usleep
1125 
1126 static inline bool
1127 nvme_qpair_is_admin_queue(struct spdk_nvme_qpair *qpair)
1128 {
1129 	return qpair->id == 0;
1130 }
1131 
1132 static inline bool
1133 nvme_qpair_is_io_queue(struct spdk_nvme_qpair *qpair)
1134 {
1135 	return qpair->id != 0;
1136 }
1137 
1138 static inline int
1139 nvme_robust_mutex_lock(pthread_mutex_t *mtx)
1140 {
1141 	int rc = pthread_mutex_lock(mtx);
1142 
1143 #ifndef __FreeBSD__
1144 	if (rc == EOWNERDEAD) {
1145 		rc = pthread_mutex_consistent(mtx);
1146 	}
1147 #endif
1148 
1149 	return rc;
1150 }
1151 
1152 static inline int
1153 nvme_robust_mutex_unlock(pthread_mutex_t *mtx)
1154 {
1155 	return pthread_mutex_unlock(mtx);
1156 }
1157 
1158 /* Poll group management functions. */
1159 int nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
1160 int nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1161 
1162 /* Admin functions */
1163 int	nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr,
1164 				uint8_t cns, uint16_t cntid, uint32_t nsid,
1165 				uint8_t csi, void *payload, size_t payload_size,
1166 				spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1167 int	nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
1168 				      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn,
1169 				      void *cb_arg);
1170 int	nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
1171 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1172 int	nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
1173 		union spdk_nvme_feat_async_event_configuration config,
1174 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1175 int	nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
1176 				   spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1177 int	nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1178 				 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1179 int	nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1180 				 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1181 int	nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
1182 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1183 int	nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr,
1184 		uint64_t prp1, uint64_t prp2,
1185 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1186 int	nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
1187 				 void *cb_arg);
1188 int	nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1189 			      struct spdk_nvme_format *format, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1190 int	nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
1191 				 const struct spdk_nvme_fw_commit *fw_commit,
1192 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1193 int	nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
1194 		uint32_t size, uint32_t offset, void *payload,
1195 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1196 int	nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1197 				struct spdk_nvme_sanitize *sanitize, uint32_t cdw11,
1198 				spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1199 void	nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl);
1200 int	nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
1201 				 struct nvme_completion_poll_status *status);
1202 int	nvme_wait_for_completion_robust_lock(struct spdk_nvme_qpair *qpair,
1203 		struct nvme_completion_poll_status *status,
1204 		pthread_mutex_t *robust_mutex);
1205 int	nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
1206 		struct nvme_completion_poll_status *status,
1207 		uint64_t timeout_in_usecs);
1208 int	nvme_wait_for_completion_robust_lock_timeout(struct spdk_nvme_qpair *qpair,
1209 		struct nvme_completion_poll_status *status,
1210 		pthread_mutex_t *robust_mutex,
1211 		uint64_t timeout_in_usecs);
1212 int	nvme_wait_for_completion_robust_lock_timeout_poll(struct spdk_nvme_qpair *qpair,
1213 		struct nvme_completion_poll_status *status,
1214 		pthread_mutex_t *robust_mutex);
1215 
1216 struct spdk_nvme_ctrlr_process *nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr,
1217 		pid_t pid);
1218 struct spdk_nvme_ctrlr_process *nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr);
1219 int	nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle);
1220 void	nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr);
1221 struct spdk_pci_device *nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr);
1222 
1223 int	nvme_ctrlr_probe(const struct spdk_nvme_transport_id *trid,
1224 			 struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle);
1225 
1226 int	nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr);
1227 void	nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr);
1228 void	nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
1229 void	nvme_ctrlr_destruct_async(struct spdk_nvme_ctrlr *ctrlr,
1230 				  struct nvme_ctrlr_detach_ctx *ctx);
1231 int	nvme_ctrlr_destruct_poll_async(struct spdk_nvme_ctrlr *ctrlr,
1232 				       struct nvme_ctrlr_detach_ctx *ctx);
1233 void	nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove);
1234 int	nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr);
1235 void	nvme_ctrlr_disable(struct spdk_nvme_ctrlr *ctrlr);
1236 int	nvme_ctrlr_disable_poll(struct spdk_nvme_ctrlr *ctrlr);
1237 void	nvme_ctrlr_connected(struct spdk_nvme_probe_ctx *probe_ctx,
1238 			     struct spdk_nvme_ctrlr *ctrlr);
1239 
1240 int	nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
1241 					struct nvme_request *req);
1242 int	nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap);
1243 int	nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs);
1244 int	nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz);
1245 int	nvme_ctrlr_get_pmrcap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_pmrcap_register *pmrcap);
1246 int	nvme_ctrlr_get_bpinfo(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bpinfo_register *bpinfo);
1247 int	nvme_ctrlr_set_bprsel(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bprsel_register *bprsel);
1248 int	nvme_ctrlr_set_bpmbl(struct spdk_nvme_ctrlr *ctrlr, uint64_t bpmbl_value);
1249 bool	nvme_ctrlr_multi_iocs_enabled(struct spdk_nvme_ctrlr *ctrlr);
1250 void    nvme_ctrlr_process_async_event(struct spdk_nvme_ctrlr *ctrlr,
1251 				       const struct spdk_nvme_cpl *cpl);
1252 void nvme_ctrlr_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1253 void nvme_ctrlr_complete_queued_async_events(struct spdk_nvme_ctrlr *ctrlr);
1254 void nvme_ctrlr_abort_queued_aborts(struct spdk_nvme_ctrlr *ctrlr);
1255 int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
1256 		    struct spdk_nvme_ctrlr *ctrlr,
1257 		    enum spdk_nvme_qprio qprio,
1258 		    uint32_t num_requests, bool async);
1259 void	nvme_qpair_deinit(struct spdk_nvme_qpair *qpair);
1260 void	nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair);
1261 int	nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair,
1262 				  struct nvme_request *req);
1263 void	nvme_qpair_abort_all_queued_reqs(struct spdk_nvme_qpair *qpair);
1264 uint32_t nvme_qpair_abort_queued_reqs_with_cbarg(struct spdk_nvme_qpair *qpair, void *cmd_cb_arg);
1265 void	nvme_qpair_abort_queued_reqs(struct spdk_nvme_qpair *qpair);
1266 void	nvme_qpair_resubmit_requests(struct spdk_nvme_qpair *qpair, uint32_t num_requests);
1267 int	nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr);
1268 void	nvme_ns_set_identify_data(struct spdk_nvme_ns *ns);
1269 void	nvme_ns_set_id_desc_list_data(struct spdk_nvme_ns *ns);
1270 void	nvme_ns_free_zns_specific_data(struct spdk_nvme_ns *ns);
1271 void	nvme_ns_free_iocs_specific_data(struct spdk_nvme_ns *ns);
1272 bool	nvme_ns_has_supported_iocs_specific_data(struct spdk_nvme_ns *ns);
1273 int	nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
1274 			  struct spdk_nvme_ctrlr *ctrlr);
1275 void	nvme_ns_destruct(struct spdk_nvme_ns *ns);
1276 int	nvme_ns_cmd_zone_append_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1277 					void *buffer, void *metadata, uint64_t zslba,
1278 					uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1279 					uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag);
1280 int nvme_ns_cmd_zone_appendv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1281 				     uint64_t zslba, uint32_t lba_count,
1282 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1283 				     spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1284 				     spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1285 				     uint16_t apptag_mask, uint16_t apptag);
1286 
1287 int	nvme_fabric_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
1288 int	nvme_fabric_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
1289 int	nvme_fabric_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
1290 int	nvme_fabric_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
1291 int	nvme_fabric_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1292 		uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1293 int	nvme_fabric_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1294 		uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1295 int	nvme_fabric_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1296 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1297 int	nvme_fabric_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1298 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1299 int	nvme_fabric_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect);
1300 int	nvme_fabric_ctrlr_discover(struct spdk_nvme_ctrlr *ctrlr,
1301 				   struct spdk_nvme_probe_ctx *probe_ctx);
1302 int	nvme_fabric_qpair_connect(struct spdk_nvme_qpair *qpair, uint32_t num_entries);
1303 int	nvme_fabric_qpair_connect_async(struct spdk_nvme_qpair *qpair, uint32_t num_entries);
1304 int	nvme_fabric_qpair_connect_poll(struct spdk_nvme_qpair *qpair);
1305 int	nvme_fabric_qpair_authenticate_async(struct spdk_nvme_qpair *qpair);
1306 int	nvme_fabric_qpair_authenticate_poll(struct spdk_nvme_qpair *qpair);
1307 
1308 typedef int (*spdk_nvme_parse_ana_log_page_cb)(
1309 	const struct spdk_nvme_ana_group_descriptor *desc, void *cb_arg);
1310 int	nvme_ctrlr_parse_ana_log_page(struct spdk_nvme_ctrlr *ctrlr,
1311 				      spdk_nvme_parse_ana_log_page_cb cb_fn, void *cb_arg);
1312 
1313 static inline void
1314 nvme_request_clear(struct nvme_request *req)
1315 {
1316 	/*
1317 	 * Only memset/zero fields that need it.  All other fields
1318 	 *  will be initialized appropriately either later in this
1319 	 *  function, or before they are needed later in the
1320 	 *  submission patch.  For example, the children
1321 	 *  TAILQ_ENTRY and following members are
1322 	 *  only used as part of I/O splitting so we avoid
1323 	 *  memsetting them until it is actually needed.
1324 	 *  They will be initialized in nvme_request_add_child()
1325 	 *  if the request is split.
1326 	 */
1327 	memset(req, 0, offsetof(struct nvme_request, payload_size));
1328 }
1329 
1330 #define NVME_INIT_REQUEST(req, _cb_fn, _cb_arg, _payload, _payload_size, _md_size)	\
1331 	do {						\
1332 		nvme_request_clear(req);		\
1333 		req->cb_fn = _cb_fn;			\
1334 		req->cb_arg = _cb_arg;			\
1335 		req->payload = _payload;		\
1336 		req->payload_size = _payload_size;	\
1337 		req->md_size = _md_size;		\
1338 		req->pid = g_spdk_nvme_pid;		\
1339 		req->submit_tick = 0;			\
1340 		req->accel_sequence = NULL;		\
1341 	} while (0);
1342 
1343 static inline struct nvme_request *
1344 nvme_allocate_request(struct spdk_nvme_qpair *qpair,
1345 		      const struct nvme_payload *payload, uint32_t payload_size, uint32_t md_size,
1346 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1347 {
1348 	struct nvme_request *req;
1349 
1350 	req = STAILQ_FIRST(&qpair->free_req);
1351 	if (req == NULL) {
1352 		return req;
1353 	}
1354 
1355 	STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);
1356 	qpair->num_outstanding_reqs++;
1357 
1358 	NVME_INIT_REQUEST(req, cb_fn, cb_arg, *payload, payload_size, md_size);
1359 
1360 	return req;
1361 }
1362 
1363 static inline struct nvme_request *
1364 nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair,
1365 			     void *buffer, uint32_t payload_size,
1366 			     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1367 {
1368 	struct nvme_payload payload;
1369 
1370 	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
1371 
1372 	return nvme_allocate_request(qpair, &payload, payload_size, 0, cb_fn, cb_arg);
1373 }
1374 
1375 static inline struct nvme_request *
1376 nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1377 {
1378 	return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
1379 }
1380 
1381 struct nvme_request *nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair,
1382 		void *buffer, uint32_t payload_size,
1383 		spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller);
1384 
1385 static inline void
1386 _nvme_free_request(struct nvme_request *req, struct spdk_nvme_qpair *qpair)
1387 {
1388 	assert(req != NULL);
1389 	assert(req->num_children == 0);
1390 	assert(qpair != NULL);
1391 
1392 	/* The reserved_req does not go in the free_req STAILQ - it is
1393 	 * saved only for use with a FABRICS/CONNECT command.
1394 	 */
1395 	if (spdk_likely(qpair->reserved_req != req)) {
1396 		STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
1397 
1398 		assert(qpair->num_outstanding_reqs > 0);
1399 		qpair->num_outstanding_reqs--;
1400 	}
1401 }
1402 
1403 static inline void
1404 nvme_free_request(struct nvme_request *req)
1405 {
1406 	_nvme_free_request(req, req->qpair);
1407 }
1408 
1409 static inline void
1410 nvme_complete_request(spdk_nvme_cmd_cb cb_fn, void *cb_arg, struct spdk_nvme_qpair *qpair,
1411 		      struct nvme_request *req, struct spdk_nvme_cpl *cpl)
1412 {
1413 	struct spdk_nvme_cpl            err_cpl;
1414 	struct nvme_error_cmd           *cmd;
1415 
1416 	if (spdk_unlikely(req->accel_sequence != NULL)) {
1417 		struct spdk_nvme_poll_group *pg = qpair->poll_group->group;
1418 
1419 		/* Transports are required to execuete the sequence and clear req->accel_sequence.
1420 		 * If it's left non-NULL it must mean the request is failed. */
1421 		assert(spdk_nvme_cpl_is_error(cpl));
1422 		pg->accel_fn_table.abort_sequence(req->accel_sequence);
1423 		req->accel_sequence = NULL;
1424 	}
1425 
1426 	/* error injection at completion path,
1427 	 * only inject for successful completed commands
1428 	 */
1429 	if (spdk_unlikely(!TAILQ_EMPTY(&qpair->err_cmd_head) &&
1430 			  !spdk_nvme_cpl_is_error(cpl))) {
1431 		TAILQ_FOREACH(cmd, &qpair->err_cmd_head, link) {
1432 
1433 			if (cmd->do_not_submit) {
1434 				continue;
1435 			}
1436 
1437 			if ((cmd->opc == req->cmd.opc) && cmd->err_count) {
1438 
1439 				err_cpl = *cpl;
1440 				err_cpl.status.sct = cmd->status.sct;
1441 				err_cpl.status.sc = cmd->status.sc;
1442 
1443 				cpl = &err_cpl;
1444 				cmd->err_count--;
1445 				break;
1446 			}
1447 		}
1448 	}
1449 
1450 	/* For PCIe completions, we want to avoid touching the req itself to avoid
1451 	 * dependencies on loading those cachelines. So call the internal helper
1452 	 * function instead using the qpair that was passed by the caller, instead
1453 	 * of getting it from the req.
1454 	 */
1455 	_nvme_free_request(req, qpair);
1456 
1457 	if (spdk_likely(cb_fn)) {
1458 		cb_fn(cb_arg, cpl);
1459 	}
1460 }
1461 
1462 static inline void
1463 nvme_cleanup_user_req(struct nvme_request *req)
1464 {
1465 	if (req->user_buffer && req->payload_size) {
1466 		spdk_free(req->payload.contig_or_cb_arg);
1467 		req->user_buffer = NULL;
1468 	}
1469 
1470 	req->user_cb_arg = NULL;
1471 	req->user_cb_fn = NULL;
1472 }
1473 
1474 static inline void
1475 nvme_qpair_set_state(struct spdk_nvme_qpair *qpair, enum nvme_qpair_state state)
1476 {
1477 	qpair->state = state;
1478 	if (state == NVME_QPAIR_ENABLED) {
1479 		qpair->is_new_qpair = false;
1480 	}
1481 }
1482 
1483 static inline enum nvme_qpair_state
1484 nvme_qpair_get_state(struct spdk_nvme_qpair *qpair) {
1485 	return qpair->state;
1486 }
1487 
1488 static inline void
1489 nvme_request_remove_child(struct nvme_request *parent, struct nvme_request *child)
1490 {
1491 	assert(parent != NULL);
1492 	assert(child != NULL);
1493 	assert(child->parent == parent);
1494 	assert(parent->num_children != 0);
1495 
1496 	parent->num_children--;
1497 	child->parent = NULL;
1498 	TAILQ_REMOVE(&parent->children, child, child_tailq);
1499 }
1500 
1501 static inline void
1502 nvme_cb_complete_child(void *child_arg, const struct spdk_nvme_cpl *cpl)
1503 {
1504 	struct nvme_request *child = child_arg;
1505 	struct nvme_request *parent = child->parent;
1506 
1507 	nvme_request_remove_child(parent, child);
1508 
1509 	if (spdk_nvme_cpl_is_error(cpl)) {
1510 		memcpy(&parent->parent_status, cpl, sizeof(*cpl));
1511 	}
1512 
1513 	if (parent->num_children == 0) {
1514 		nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
1515 				      parent, &parent->parent_status);
1516 	}
1517 }
1518 
1519 static inline void
1520 nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child)
1521 {
1522 	assert(parent->num_children != UINT16_MAX);
1523 
1524 	if (parent->num_children == 0) {
1525 		/*
1526 		 * Defer initialization of the children TAILQ since it falls
1527 		 *  on a separate cacheline.  This ensures we do not touch this
1528 		 *  cacheline except on request splitting cases, which are
1529 		 *  relatively rare.
1530 		 */
1531 		TAILQ_INIT(&parent->children);
1532 		parent->parent = NULL;
1533 		memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl));
1534 	}
1535 
1536 	parent->num_children++;
1537 	TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
1538 	child->parent = parent;
1539 	child->cb_fn = nvme_cb_complete_child;
1540 	child->cb_arg = child;
1541 }
1542 
1543 static inline void
1544 nvme_request_free_children(struct nvme_request *req)
1545 {
1546 	struct nvme_request *child, *tmp;
1547 
1548 	if (req->num_children == 0) {
1549 		return;
1550 	}
1551 
1552 	/* free all child nvme_request */
1553 	TAILQ_FOREACH_SAFE(child, &req->children, child_tailq, tmp) {
1554 		nvme_request_remove_child(req, child);
1555 		nvme_request_free_children(child);
1556 		nvme_free_request(child);
1557 	}
1558 }
1559 
1560 int	nvme_request_check_timeout(struct nvme_request *req, uint16_t cid,
1561 				   struct spdk_nvme_ctrlr_process *active_proc, uint64_t now_tick);
1562 uint64_t nvme_get_quirks(const struct spdk_pci_id *id);
1563 
1564 int	nvme_robust_mutex_init_shared(pthread_mutex_t *mtx);
1565 int	nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx);
1566 
1567 bool	nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl);
1568 
1569 struct spdk_nvme_ctrlr *nvme_get_ctrlr_by_trid_unsafe(
1570 	const struct spdk_nvme_transport_id *trid);
1571 
1572 const struct spdk_nvme_transport *nvme_get_transport(const char *transport_name);
1573 const struct spdk_nvme_transport *nvme_get_first_transport(void);
1574 const struct spdk_nvme_transport *nvme_get_next_transport(const struct spdk_nvme_transport
1575 		*transport);
1576 void  nvme_ctrlr_update_namespaces(struct spdk_nvme_ctrlr *ctrlr);
1577 
1578 /* Transport specific functions */
1579 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
1580 		const struct spdk_nvme_ctrlr_opts *opts,
1581 		void *devhandle);
1582 int nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
1583 int nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect);
1584 int nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr);
1585 int nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr);
1586 int nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
1587 int nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
1588 int nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
1589 int nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
1590 int nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1591 		uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1592 int nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1593 		uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1594 int nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1595 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1596 int nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1597 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1598 uint32_t nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr);
1599 uint16_t nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr);
1600 struct spdk_nvme_qpair *nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
1601 		uint16_t qid, const struct spdk_nvme_io_qpair_opts *opts);
1602 int nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr);
1603 void *nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
1604 int nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr);
1605 int nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr);
1606 int nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr);
1607 void *nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
1608 int nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr);
1609 void nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
1610 		struct spdk_nvme_qpair *qpair);
1611 int nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr,
1612 				       struct spdk_nvme_qpair *qpair);
1613 void nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr,
1614 		struct spdk_nvme_qpair *qpair);
1615 void nvme_transport_ctrlr_disconnect_qpair_done(struct spdk_nvme_qpair *qpair);
1616 int nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
1617 		struct spdk_memory_domain **domains, int array_size);
1618 void nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair);
1619 int nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair);
1620 int nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req);
1621 int32_t nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1622 		uint32_t max_completions);
1623 void nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair);
1624 int nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
1625 		int (*iter_fn)(struct nvme_request *req, void *arg),
1626 		void *arg);
1627 
1628 struct spdk_nvme_transport_poll_group *nvme_transport_poll_group_create(
1629 	const struct spdk_nvme_transport *transport);
1630 struct spdk_nvme_transport_poll_group *nvme_transport_qpair_get_optimal_poll_group(
1631 	const struct spdk_nvme_transport *transport,
1632 	struct spdk_nvme_qpair *qpair);
1633 int nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
1634 				  struct spdk_nvme_qpair *qpair);
1635 int nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
1636 				     struct spdk_nvme_qpair *qpair);
1637 int nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1638 int nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
1639 int64_t nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
1640 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
1641 int nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup);
1642 int nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
1643 					struct spdk_nvme_transport_poll_group_stat **stats);
1644 void nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
1645 		struct spdk_nvme_transport_poll_group_stat *stats);
1646 enum spdk_nvme_transport_type nvme_transport_get_trtype(const struct spdk_nvme_transport
1647 		*transport);
1648 /*
1649  * Below ref related functions must be called with the global
1650  *  driver lock held for the multi-process condition.
1651  *  Within these functions, the per ctrlr ctrlr_lock is also
1652  *  acquired for the multi-thread condition.
1653  */
1654 void	nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr);
1655 void	nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr);
1656 int	nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr);
1657 
1658 int	nvme_ctrlr_reinitialize_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
1659 int	nvme_parse_addr(struct sockaddr_storage *sa, int family,
1660 			const char *addr, const char *service, long int *port);
1661 
1662 static inline bool
1663 _is_page_aligned(uint64_t address, uint64_t page_size)
1664 {
1665 	return (address & (page_size - 1)) == 0;
1666 }
1667 
1668 #endif /* __NVME_INTERNAL_H__ */
1669