xref: /spdk/lib/nvme/nvme_internal.h (revision 1a9ed697f0c1696ba6b5819e27e68a3fbbf3b223)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef __NVME_INTERNAL_H__
35 #define __NVME_INTERNAL_H__
36 
37 #include "spdk/config.h"
38 #include "spdk/likely.h"
39 #include "spdk/stdinc.h"
40 
41 #include "spdk/nvme.h"
42 
43 #if defined(__i386__) || defined(__x86_64__)
44 #include <x86intrin.h>
45 #endif
46 
47 #include "spdk/queue.h"
48 #include "spdk/barrier.h"
49 #include "spdk/bit_array.h"
50 #include "spdk/mmio.h"
51 #include "spdk/pci_ids.h"
52 #include "spdk/util.h"
53 #include "spdk/memory.h"
54 #include "spdk/nvme_intel.h"
55 #include "spdk/nvmf_spec.h"
56 #include "spdk/uuid.h"
57 
58 #include "spdk_internal/assert.h"
59 #include "spdk_internal/log.h"
60 
61 extern pid_t g_spdk_nvme_pid;
62 
63 /*
64  * Some Intel devices support vendor-unique read latency log page even
65  * though the log page directory says otherwise.
66  */
67 #define NVME_INTEL_QUIRK_READ_LATENCY 0x1
68 
69 /*
70  * Some Intel devices support vendor-unique write latency log page even
71  * though the log page directory says otherwise.
72  */
73 #define NVME_INTEL_QUIRK_WRITE_LATENCY 0x2
74 
75 /*
76  * The controller needs a delay before starts checking the device
77  * readiness, which is done by reading the NVME_CSTS_RDY bit.
78  */
79 #define NVME_QUIRK_DELAY_BEFORE_CHK_RDY	0x4
80 
81 /*
82  * The controller performs best when I/O is split on particular
83  * LBA boundaries.
84  */
85 #define NVME_INTEL_QUIRK_STRIPING 0x8
86 
87 /*
88  * The controller needs a delay after allocating an I/O queue pair
89  * before it is ready to accept I/O commands.
90  */
91 #define NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC 0x10
92 
93 /*
94  * Earlier NVMe devices do not indicate whether unmapped blocks
95  * will read all zeroes or not. This define indicates that the
96  * device does in fact read all zeroes after an unmap event
97  */
98 #define NVME_QUIRK_READ_ZERO_AFTER_DEALLOCATE 0x20
99 
100 /*
101  * The controller doesn't handle Identify value others than 0 or 1 correctly.
102  */
103 #define NVME_QUIRK_IDENTIFY_CNS 0x40
104 
105 /*
106  * The controller supports Open Channel command set if matching additional
107  * condition, like the first byte (value 0x1) in the vendor specific
108  * bits of the namespace identify structure is set.
109  */
110 #define NVME_QUIRK_OCSSD 0x80
111 
112 /*
113  * The controller has an Intel vendor ID but does not support Intel vendor-specific
114  * log pages.  This is primarily for QEMU emulated SSDs which report an Intel vendor
115  * ID but do not support these log pages.
116  */
117 #define NVME_INTEL_QUIRK_NO_LOG_PAGES 0x100
118 
119 /*
120  * The controller does not set SHST_COMPLETE in a reasonable amount of time.  This
121  * is primarily seen in virtual VMWare NVMe SSDs.  This quirk merely adds an additional
122  * error message that on VMWare NVMe SSDs, the shutdown timeout may be expected.
123  */
124 #define NVME_QUIRK_SHST_COMPLETE 0x200
125 
126 /*
127  * The controller requires an extra delay before starting the initialization process
128  * during attach.
129  */
130 #define NVME_QUIRK_DELAY_BEFORE_INIT 0x400
131 
132 /*
133  * Some SSDs exhibit poor performance with the default SPDK NVMe IO queue size.
134  * This quirk will increase the default to 1024 which matches other operating
135  * systems, at the cost of some extra memory usage.  Users can still override
136  * the increased default by changing the spdk_nvme_io_qpair_opts when allocating
137  * a new queue pair.
138  */
139 #define NVME_QUIRK_MINIMUM_IO_QUEUE_SIZE 0x800
140 
141 /**
142  * The maximum access width to PCI memory space is 8 Bytes, don't use AVX2 or
143  * SSE instructions to optimize the memory access(memcpy or memset) larger than
144  * 8 Bytes.
145  */
146 #define NVME_QUIRK_MAXIMUM_PCI_ACCESS_WIDTH 0x1000
147 
148 #define NVME_MAX_ASYNC_EVENTS	(8)
149 
150 #define NVME_MAX_ADMIN_TIMEOUT_IN_SECS	(30)
151 
152 /* Maximum log page size to fetch for AERs. */
153 #define NVME_MAX_AER_LOG_SIZE		(4096)
154 
155 /*
156  * NVME_MAX_IO_QUEUES in nvme_spec.h defines the 64K spec-limit, but this
157  *  define specifies the maximum number of queues this driver will actually
158  *  try to configure, if available.
159  */
160 #define DEFAULT_MAX_IO_QUEUES		(1024)
161 #define DEFAULT_ADMIN_QUEUE_SIZE	(32)
162 #define DEFAULT_IO_QUEUE_SIZE		(256)
163 #define DEFAULT_IO_QUEUE_SIZE_FOR_QUIRK	(1024) /* Matches Linux kernel driver */
164 
165 #define DEFAULT_IO_QUEUE_REQUESTS	(512)
166 
167 #define SPDK_NVME_DEFAULT_RETRY_COUNT	(4)
168 
169 #define SPDK_NVME_TRANSPORT_ACK_TIMEOUT_DISABLED	(0)
170 #define SPDK_NVME_DEFAULT_TRANSPORT_ACK_TIMEOUT	SPDK_NVME_TRANSPORT_ACK_TIMEOUT_DISABLED
171 
172 #define MIN_KEEP_ALIVE_TIMEOUT_IN_MS	(10000)
173 
174 /* We want to fit submission and completion rings each in a single 2MB
175  * hugepage to ensure physical address contiguity.
176  */
177 #define MAX_IO_QUEUE_ENTRIES		(VALUE_2MB / spdk_max( \
178 						sizeof(struct spdk_nvme_cmd), \
179 						sizeof(struct spdk_nvme_cpl)))
180 
181 enum nvme_payload_type {
182 	NVME_PAYLOAD_TYPE_INVALID = 0,
183 
184 	/** nvme_request::u.payload.contig_buffer is valid for this request */
185 	NVME_PAYLOAD_TYPE_CONTIG,
186 
187 	/** nvme_request::u.sgl is valid for this request */
188 	NVME_PAYLOAD_TYPE_SGL,
189 };
190 
191 /**
192  * Descriptor for a request data payload.
193  */
194 struct nvme_payload {
195 	/**
196 	 * Functions for retrieving physical addresses for scattered payloads.
197 	 */
198 	spdk_nvme_req_reset_sgl_cb reset_sgl_fn;
199 	spdk_nvme_req_next_sge_cb next_sge_fn;
200 
201 	/**
202 	 * If reset_sgl_fn == NULL, this is a contig payload, and contig_or_cb_arg contains the
203 	 * virtual memory address of a single virtually contiguous buffer.
204 	 *
205 	 * If reset_sgl_fn != NULL, this is a SGL payload, and contig_or_cb_arg contains the
206 	 * cb_arg that will be passed to the SGL callback functions.
207 	 */
208 	void *contig_or_cb_arg;
209 
210 	/** Virtual memory address of a single virtually contiguous metadata buffer */
211 	void *md;
212 };
213 
214 #define NVME_PAYLOAD_CONTIG(contig_, md_) \
215 	(struct nvme_payload) { \
216 		.reset_sgl_fn = NULL, \
217 		.next_sge_fn = NULL, \
218 		.contig_or_cb_arg = (contig_), \
219 		.md = (md_), \
220 	}
221 
222 #define NVME_PAYLOAD_SGL(reset_sgl_fn_, next_sge_fn_, cb_arg_, md_) \
223 	(struct nvme_payload) { \
224 		.reset_sgl_fn = (reset_sgl_fn_), \
225 		.next_sge_fn = (next_sge_fn_), \
226 		.contig_or_cb_arg = (cb_arg_), \
227 		.md = (md_), \
228 	}
229 
230 static inline enum nvme_payload_type
231 nvme_payload_type(const struct nvme_payload *payload) {
232 	return payload->reset_sgl_fn ? NVME_PAYLOAD_TYPE_SGL : NVME_PAYLOAD_TYPE_CONTIG;
233 }
234 
235 struct nvme_error_cmd {
236 	bool				do_not_submit;
237 	uint64_t			timeout_tsc;
238 	uint32_t			err_count;
239 	uint8_t				opc;
240 	struct spdk_nvme_status		status;
241 	TAILQ_ENTRY(nvme_error_cmd)	link;
242 };
243 
244 struct nvme_request {
245 	struct spdk_nvme_cmd		cmd;
246 
247 	uint8_t				retries;
248 
249 	uint8_t				timed_out : 1;
250 
251 	/**
252 	 * True if the request is in the queued_req list.
253 	 */
254 	uint8_t				queued : 1;
255 	uint8_t				reserved : 6;
256 
257 	/**
258 	 * Number of children requests still outstanding for this
259 	 *  request which was split into multiple child requests.
260 	 */
261 	uint16_t			num_children;
262 
263 	/**
264 	 * Offset in bytes from the beginning of payload for this request.
265 	 * This is used for I/O commands that are split into multiple requests.
266 	 */
267 	uint32_t			payload_offset;
268 	uint32_t			md_offset;
269 
270 	uint32_t			payload_size;
271 
272 	/**
273 	 * Timeout ticks for error injection requests, can be extended in future
274 	 * to support per-request timeout feature.
275 	 */
276 	uint64_t			timeout_tsc;
277 
278 	/**
279 	 * Data payload for this request's command.
280 	 */
281 	struct nvme_payload		payload;
282 
283 	spdk_nvme_cmd_cb		cb_fn;
284 	void				*cb_arg;
285 	STAILQ_ENTRY(nvme_request)	stailq;
286 
287 	struct spdk_nvme_qpair		*qpair;
288 
289 	/*
290 	 * The value of spdk_get_ticks() when the request was submitted to the hardware.
291 	 * Only set if ctrlr->timeout_enabled is true.
292 	 */
293 	uint64_t			submit_tick;
294 
295 	/**
296 	 * The active admin request can be moved to a per process pending
297 	 *  list based on the saved pid to tell which process it belongs
298 	 *  to. The cpl saves the original completion information which
299 	 *  is used in the completion callback.
300 	 * NOTE: these below two fields are only used for admin request.
301 	 */
302 	pid_t				pid;
303 	struct spdk_nvme_cpl		cpl;
304 
305 	uint32_t			md_size;
306 
307 	/**
308 	 * The following members should not be reordered with members
309 	 *  above.  These members are only needed when splitting
310 	 *  requests which is done rarely, and the driver is careful
311 	 *  to not touch the following fields until a split operation is
312 	 *  needed, to avoid touching an extra cacheline.
313 	 */
314 
315 	/**
316 	 * Points to the outstanding child requests for a parent request.
317 	 *  Only valid if a request was split into multiple children
318 	 *  requests, and is not initialized for non-split requests.
319 	 */
320 	TAILQ_HEAD(, nvme_request)	children;
321 
322 	/**
323 	 * Linked-list pointers for a child request in its parent's list.
324 	 */
325 	TAILQ_ENTRY(nvme_request)	child_tailq;
326 
327 	/**
328 	 * Points to a parent request if part of a split request,
329 	 *   NULL otherwise.
330 	 */
331 	struct nvme_request		*parent;
332 
333 	/**
334 	 * Completion status for a parent request.  Initialized to all 0's
335 	 *  (SUCCESS) before child requests are submitted.  If a child
336 	 *  request completes with error, the error status is copied here,
337 	 *  to ensure that the parent request is also completed with error
338 	 *  status once all child requests are completed.
339 	 */
340 	struct spdk_nvme_cpl		parent_status;
341 
342 	/**
343 	 * The user_cb_fn and user_cb_arg fields are used for holding the original
344 	 * callback data when using nvme_allocate_request_user_copy.
345 	 */
346 	spdk_nvme_cmd_cb		user_cb_fn;
347 	void				*user_cb_arg;
348 	void				*user_buffer;
349 };
350 
351 struct nvme_completion_poll_status {
352 	struct spdk_nvme_cpl	cpl;
353 	bool			done;
354 	/* This flag indicates that the request has been timed out and the memory
355 	   must be freed in a completion callback */
356 	bool			timed_out;
357 };
358 
359 struct nvme_async_event_request {
360 	struct spdk_nvme_ctrlr		*ctrlr;
361 	struct nvme_request		*req;
362 	struct spdk_nvme_cpl		cpl;
363 };
364 
365 enum nvme_qpair_state {
366 	NVME_QPAIR_DISCONNECTED,
367 	NVME_QPAIR_DISCONNECTING,
368 	NVME_QPAIR_CONNECTING,
369 	NVME_QPAIR_CONNECTED,
370 	NVME_QPAIR_ENABLING,
371 	NVME_QPAIR_ENABLED,
372 	NVME_QPAIR_DESTROYING,
373 };
374 
375 struct spdk_nvme_qpair {
376 	struct spdk_nvme_ctrlr			*ctrlr;
377 
378 	uint16_t				id;
379 
380 	uint8_t					qprio;
381 
382 	uint8_t					state : 3;
383 
384 	/*
385 	 * Members for handling IO qpair deletion inside of a completion context.
386 	 * These are specifically defined as single bits, so that they do not
387 	 *  push this data structure out to another cacheline.
388 	 */
389 	uint8_t					in_completion_context : 1;
390 	uint8_t					delete_after_completion_context: 1;
391 
392 	/*
393 	 * Set when no deletion notification is needed. For example, the process
394 	 * which allocated this qpair exited unexpectedly.
395 	 */
396 	uint8_t					no_deletion_notification_needed: 1;
397 
398 	uint8_t					first_fused_submitted: 1;
399 
400 	enum spdk_nvme_transport_type		trtype;
401 
402 	STAILQ_HEAD(, nvme_request)		free_req;
403 	STAILQ_HEAD(, nvme_request)		queued_req;
404 
405 	/* List entry for spdk_nvme_transport_poll_group::qpairs */
406 	STAILQ_ENTRY(spdk_nvme_qpair)		poll_group_stailq;
407 
408 	/** Commands opcode in this list will return error */
409 	TAILQ_HEAD(, nvme_error_cmd)		err_cmd_head;
410 	/** Requests in this list will return error */
411 	STAILQ_HEAD(, nvme_request)		err_req_head;
412 
413 	/* List entry for spdk_nvme_ctrlr::active_io_qpairs */
414 	TAILQ_ENTRY(spdk_nvme_qpair)		tailq;
415 
416 	/* List entry for spdk_nvme_ctrlr_process::allocated_io_qpairs */
417 	TAILQ_ENTRY(spdk_nvme_qpair)		per_process_tailq;
418 
419 	struct spdk_nvme_ctrlr_process		*active_proc;
420 
421 	struct spdk_nvme_transport_poll_group	*poll_group;
422 
423 	void					*poll_group_tailq_head;
424 
425 	void					*req_buf;
426 
427 	const struct spdk_nvme_transport	*transport;
428 
429 	uint8_t					transport_failure_reason: 2;
430 };
431 
432 struct spdk_nvme_poll_group {
433 	void						*ctx;
434 	STAILQ_HEAD(, spdk_nvme_transport_poll_group)	tgroups;
435 };
436 
437 struct spdk_nvme_transport_poll_group {
438 	struct spdk_nvme_poll_group			*group;
439 	const struct spdk_nvme_transport		*transport;
440 	STAILQ_HEAD(, spdk_nvme_qpair)			connected_qpairs;
441 	STAILQ_HEAD(, spdk_nvme_qpair)			disconnected_qpairs;
442 	STAILQ_ENTRY(spdk_nvme_transport_poll_group)	link;
443 	bool						in_completion_context;
444 	uint64_t					num_qpairs_to_delete;
445 };
446 
447 struct spdk_nvme_ns {
448 	struct spdk_nvme_ctrlr		*ctrlr;
449 	uint32_t			sector_size;
450 
451 	/*
452 	 * Size of data transferred as part of each block,
453 	 * including metadata if FLBAS indicates the metadata is transferred
454 	 * as part of the data buffer at the end of each LBA.
455 	 */
456 	uint32_t			extended_lba_size;
457 
458 	uint32_t			md_size;
459 	uint32_t			pi_type;
460 	uint32_t			sectors_per_max_io;
461 	uint32_t			sectors_per_stripe;
462 	uint32_t			id;
463 	uint16_t			flags;
464 
465 	/* Namespace Identification Descriptor List (CNS = 03h) */
466 	uint8_t				id_desc_list[4096];
467 };
468 
469 /**
470  * State of struct spdk_nvme_ctrlr (in particular, during initialization).
471  */
472 enum nvme_ctrlr_state {
473 	/**
474 	 * Wait before initializing the controller.
475 	 */
476 	NVME_CTRLR_STATE_INIT_DELAY,
477 
478 	/**
479 	 * Controller has not been initialized yet.
480 	 */
481 	NVME_CTRLR_STATE_INIT,
482 
483 	/**
484 	 * Waiting for CSTS.RDY to transition from 0 to 1 so that CC.EN may be set to 0.
485 	 */
486 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
487 
488 	/**
489 	 * Waiting for CSTS.RDY to transition from 1 to 0 so that CC.EN may be set to 1.
490 	 */
491 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
492 
493 	/**
494 	 * Enable the controller by writing CC.EN to 1
495 	 */
496 	NVME_CTRLR_STATE_ENABLE,
497 
498 	/**
499 	 * Waiting for CSTS.RDY to transition from 0 to 1 after enabling the controller.
500 	 */
501 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
502 
503 	/**
504 	 * Reset the Admin queue of the controller.
505 	 */
506 	NVME_CTRLR_STATE_RESET_ADMIN_QUEUE,
507 
508 	/**
509 	 * Identify Controller command will be sent to then controller.
510 	 */
511 	NVME_CTRLR_STATE_IDENTIFY,
512 
513 	/**
514 	 * Waiting for Identify Controller command be completed.
515 	 */
516 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
517 
518 	/**
519 	 * Set Number of Queues of the controller.
520 	 */
521 	NVME_CTRLR_STATE_SET_NUM_QUEUES,
522 
523 	/**
524 	 * Waiting for Set Num of Queues command to be completed.
525 	 */
526 	NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
527 
528 	/**
529 	 * Construct Namespace data structures of the controller.
530 	 */
531 	NVME_CTRLR_STATE_CONSTRUCT_NS,
532 
533 	/**
534 	 * Get active Namespace list of the controller.
535 	 */
536 	NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
537 
538 	/**
539 	 * Waiting for the Identify Active Namespace commands to be completed.
540 	 */
541 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS,
542 
543 	/**
544 	 * Get Identify Namespace Data structure for each NS.
545 	 */
546 	NVME_CTRLR_STATE_IDENTIFY_NS,
547 
548 	/**
549 	 * Waiting for the Identify Namespace commands to be completed.
550 	 */
551 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
552 
553 	/**
554 	 * Get Identify Namespace Identification Descriptors.
555 	 */
556 	NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
557 
558 	/**
559 	 * Waiting for the Identify Namespace Identification
560 	 * Descriptors to be completed.
561 	 */
562 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
563 
564 	/**
565 	 * Configure AER of the controller.
566 	 */
567 	NVME_CTRLR_STATE_CONFIGURE_AER,
568 
569 	/**
570 	 * Waiting for the Configure AER to be completed.
571 	 */
572 	NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
573 
574 	/**
575 	 * Set supported log pages of the controller.
576 	 */
577 	NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
578 
579 	/**
580 	 * Set supported features of the controller.
581 	 */
582 	NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
583 
584 	/**
585 	 * Set Doorbell Buffer Config of the controller.
586 	 */
587 	NVME_CTRLR_STATE_SET_DB_BUF_CFG,
588 
589 	/**
590 	 * Waiting for Doorbell Buffer Config to be completed.
591 	 */
592 	NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
593 
594 	/**
595 	 * Set Keep Alive Timeout of the controller.
596 	 */
597 	NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
598 
599 	/**
600 	 * Waiting for Set Keep Alive Timeout to be completed.
601 	 */
602 	NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
603 
604 	/**
605 	 * Set Host ID of the controller.
606 	 */
607 	NVME_CTRLR_STATE_SET_HOST_ID,
608 
609 	/**
610 	 * Waiting for Set Host ID to be completed.
611 	 */
612 	NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
613 
614 	/**
615 	 * Controller initialization has completed and the controller is ready.
616 	 */
617 	NVME_CTRLR_STATE_READY,
618 
619 	/**
620 	 * Controller inilialization has an error.
621 	 */
622 	NVME_CTRLR_STATE_ERROR
623 };
624 
625 #define NVME_TIMEOUT_INFINITE	0
626 
627 /*
628  * Used to track properties for all processes accessing the controller.
629  */
630 struct spdk_nvme_ctrlr_process {
631 	/** Whether it is the primary process  */
632 	bool						is_primary;
633 
634 	/** Process ID */
635 	pid_t						pid;
636 
637 	/** Active admin requests to be completed */
638 	STAILQ_HEAD(, nvme_request)			active_reqs;
639 
640 	TAILQ_ENTRY(spdk_nvme_ctrlr_process)		tailq;
641 
642 	/** Per process PCI device handle */
643 	struct spdk_pci_device				*devhandle;
644 
645 	/** Reference to track the number of attachment to this controller. */
646 	int						ref;
647 
648 	/** Allocated IO qpairs */
649 	TAILQ_HEAD(, spdk_nvme_qpair)			allocated_io_qpairs;
650 
651 	spdk_nvme_aer_cb				aer_cb_fn;
652 	void						*aer_cb_arg;
653 
654 	/**
655 	 * A function pointer to timeout callback function
656 	 */
657 	spdk_nvme_timeout_cb		timeout_cb_fn;
658 	void				*timeout_cb_arg;
659 	uint64_t			timeout_ticks;
660 };
661 
662 /*
663  * One of these per allocated PCI device.
664  */
665 struct spdk_nvme_ctrlr {
666 	/* Hot data (accessed in I/O path) starts here. */
667 
668 	/** Array of namespaces indexed by nsid - 1 */
669 	struct spdk_nvme_ns		*ns;
670 
671 	uint32_t			num_ns;
672 
673 	bool				is_removed;
674 
675 	bool				is_resetting;
676 
677 	bool				is_failed;
678 
679 	bool				is_destructed;
680 
681 	bool				timeout_enabled;
682 
683 	uint16_t			max_sges;
684 
685 	uint16_t			cntlid;
686 
687 	/** Controller support flags */
688 	uint64_t			flags;
689 
690 	/** NVMEoF in-capsule data size in bytes */
691 	uint32_t			ioccsz_bytes;
692 
693 	/** NVMEoF in-capsule data offset in 16 byte units */
694 	uint16_t			icdoff;
695 
696 	/* Cold data (not accessed in normal I/O path) is after this point. */
697 
698 	struct spdk_nvme_transport_id	trid;
699 
700 	union spdk_nvme_cap_register	cap;
701 	union spdk_nvme_vs_register	vs;
702 
703 	enum nvme_ctrlr_state		state;
704 	uint64_t			state_timeout_tsc;
705 
706 	uint64_t			next_keep_alive_tick;
707 	uint64_t			keep_alive_interval_ticks;
708 
709 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
710 
711 	/** All the log pages supported */
712 	bool				log_page_supported[256];
713 
714 	/** All the features supported */
715 	bool				feature_supported[256];
716 
717 	/** maximum i/o size in bytes */
718 	uint32_t			max_xfer_size;
719 
720 	/** minimum page size supported by this controller in bytes */
721 	uint32_t			min_page_size;
722 
723 	/** selected memory page size for this controller in bytes */
724 	uint32_t			page_size;
725 
726 	uint32_t			num_aers;
727 	struct nvme_async_event_request	aer[NVME_MAX_ASYNC_EVENTS];
728 
729 	/** guards access to the controller itself, including admin queues */
730 	pthread_mutex_t			ctrlr_lock;
731 
732 	struct spdk_nvme_qpair		*adminq;
733 
734 	/** shadow doorbell buffer */
735 	uint32_t			*shadow_doorbell;
736 	/** eventidx buffer */
737 	uint32_t			*eventidx;
738 
739 	/**
740 	 * Identify Controller data.
741 	 */
742 	struct spdk_nvme_ctrlr_data	cdata;
743 
744 	/**
745 	 * Keep track of active namespaces
746 	 */
747 	uint32_t			*active_ns_list;
748 
749 	/**
750 	 * Array of Identify Namespace data.
751 	 *
752 	 * Stored separately from ns since nsdata should not normally be accessed during I/O.
753 	 */
754 	struct spdk_nvme_ns_data	*nsdata;
755 
756 	struct spdk_bit_array		*free_io_qids;
757 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
758 
759 	struct spdk_nvme_ctrlr_opts	opts;
760 
761 	uint64_t			quirks;
762 
763 	/* Extra sleep time during controller initialization */
764 	uint64_t			sleep_timeout_tsc;
765 
766 	/** Track all the processes manage this controller */
767 	TAILQ_HEAD(, spdk_nvme_ctrlr_process)	active_procs;
768 
769 
770 	STAILQ_HEAD(, nvme_request)	queued_aborts;
771 	uint32_t			outstanding_aborts;
772 
773 	/* CB to notify the user when the ctrlr is removed/failed. */
774 	spdk_nvme_remove_cb			remove_cb;
775 	void					*cb_ctx;
776 
777 	struct spdk_nvme_qpair		*external_io_msgs_qpair;
778 	pthread_mutex_t			external_io_msgs_lock;
779 	struct spdk_ring		*external_io_msgs;
780 
781 	STAILQ_HEAD(, nvme_io_msg_producer) io_producers;
782 };
783 
784 struct spdk_nvme_probe_ctx {
785 	struct spdk_nvme_transport_id		trid;
786 	void					*cb_ctx;
787 	spdk_nvme_probe_cb			probe_cb;
788 	spdk_nvme_attach_cb			attach_cb;
789 	spdk_nvme_remove_cb			remove_cb;
790 	TAILQ_HEAD(, spdk_nvme_ctrlr)		init_ctrlrs;
791 };
792 
793 struct nvme_driver {
794 	pthread_mutex_t			lock;
795 
796 	/** Multi-process shared attached controller list */
797 	TAILQ_HEAD(, spdk_nvme_ctrlr)	shared_attached_ctrlrs;
798 
799 	bool				initialized;
800 	struct spdk_uuid		default_extended_host_id;
801 };
802 
803 extern struct nvme_driver *g_spdk_nvme_driver;
804 
805 int nvme_driver_init(void);
806 
807 #define nvme_delay		usleep
808 
809 static inline bool
810 nvme_qpair_is_admin_queue(struct spdk_nvme_qpair *qpair)
811 {
812 	return qpair->id == 0;
813 }
814 
815 static inline bool
816 nvme_qpair_is_io_queue(struct spdk_nvme_qpair *qpair)
817 {
818 	return qpair->id != 0;
819 }
820 
821 static inline int
822 nvme_robust_mutex_lock(pthread_mutex_t *mtx)
823 {
824 	int rc = pthread_mutex_lock(mtx);
825 
826 #ifndef __FreeBSD__
827 	if (rc == EOWNERDEAD) {
828 		rc = pthread_mutex_consistent(mtx);
829 	}
830 #endif
831 
832 	return rc;
833 }
834 
835 static inline int
836 nvme_robust_mutex_unlock(pthread_mutex_t *mtx)
837 {
838 	return pthread_mutex_unlock(mtx);
839 }
840 
841 /* Poll group management functions. */
842 int nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
843 int nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
844 
845 /* Admin functions */
846 int	nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr,
847 				uint8_t cns, uint16_t cntid, uint32_t nsid,
848 				void *payload, size_t payload_size,
849 				spdk_nvme_cmd_cb cb_fn, void *cb_arg);
850 int	nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
851 				      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn,
852 				      void *cb_arg);
853 int	nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
854 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg);
855 int	nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
856 		union spdk_nvme_feat_async_event_configuration config,
857 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
858 int	nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
859 				   spdk_nvme_cmd_cb cb_fn, void *cb_arg);
860 int	nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
861 				 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
862 int	nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
863 				 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
864 int	nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
865 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg);
866 int	nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr,
867 		uint64_t prp1, uint64_t prp2,
868 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
869 int	nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
870 				 void *cb_arg);
871 int	nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
872 			      struct spdk_nvme_format *format, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
873 int	nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
874 				 const struct spdk_nvme_fw_commit *fw_commit,
875 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg);
876 int	nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
877 		uint32_t size, uint32_t offset, void *payload,
878 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
879 int	nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
880 				struct spdk_nvme_sanitize *sanitize, uint32_t cdw11,
881 				spdk_nvme_cmd_cb cb_fn, void *cb_arg);
882 void	nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl);
883 int	spdk_nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
884 				      struct nvme_completion_poll_status *status);
885 int	spdk_nvme_wait_for_completion_robust_lock(struct spdk_nvme_qpair *qpair,
886 		struct nvme_completion_poll_status *status,
887 		pthread_mutex_t *robust_mutex);
888 int	spdk_nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
889 		struct nvme_completion_poll_status *status,
890 		uint64_t timeout_in_secs);
891 
892 struct spdk_nvme_ctrlr_process *spdk_nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr,
893 		pid_t pid);
894 struct spdk_nvme_ctrlr_process *spdk_nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr);
895 int	nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle);
896 void	nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr);
897 struct spdk_pci_device *nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr);
898 
899 int	nvme_ctrlr_probe(const struct spdk_nvme_transport_id *trid,
900 			 struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle);
901 
902 int	nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr);
903 void	nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr);
904 void	nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
905 void	nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove);
906 int	nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr);
907 int	nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr);
908 void	nvme_ctrlr_connected(struct spdk_nvme_probe_ctx *probe_ctx,
909 			     struct spdk_nvme_ctrlr *ctrlr);
910 
911 int	nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
912 					struct nvme_request *req);
913 int	nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap);
914 int	nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs);
915 int	nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz);
916 void	nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_register *cap,
917 			    const union spdk_nvme_vs_register *vs);
918 void nvme_ctrlr_disconnect_qpair(struct spdk_nvme_qpair *qpair);
919 int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
920 		    struct spdk_nvme_ctrlr *ctrlr,
921 		    enum spdk_nvme_qprio qprio,
922 		    uint32_t num_requests);
923 void	nvme_qpair_deinit(struct spdk_nvme_qpair *qpair);
924 void	nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair);
925 int	nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair,
926 				  struct nvme_request *req);
927 void	nvme_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
928 
929 int	nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr);
930 void	nvme_ns_set_identify_data(struct spdk_nvme_ns *ns);
931 int	nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
932 			  struct spdk_nvme_ctrlr *ctrlr);
933 void	nvme_ns_destruct(struct spdk_nvme_ns *ns);
934 int	nvme_ns_update(struct spdk_nvme_ns *ns);
935 
936 int	nvme_fabric_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
937 int	nvme_fabric_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
938 int	nvme_fabric_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
939 int	nvme_fabric_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect);
940 int	nvme_fabric_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
941 int	nvme_fabric_ctrlr_discover(struct spdk_nvme_ctrlr *ctrlr,
942 				   struct spdk_nvme_probe_ctx *probe_ctx);
943 int	nvme_fabric_qpair_connect(struct spdk_nvme_qpair *qpair, uint32_t num_entries);
944 
945 static inline struct nvme_request *
946 nvme_allocate_request(struct spdk_nvme_qpair *qpair,
947 		      const struct nvme_payload *payload, uint32_t payload_size, uint32_t md_size,
948 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
949 {
950 	struct nvme_request *req;
951 
952 	req = STAILQ_FIRST(&qpair->free_req);
953 	if (req == NULL) {
954 		return req;
955 	}
956 
957 	STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);
958 
959 	/*
960 	 * Only memset/zero fields that need it.  All other fields
961 	 *  will be initialized appropriately either later in this
962 	 *  function, or before they are needed later in the
963 	 *  submission patch.  For example, the children
964 	 *  TAILQ_ENTRY and following members are
965 	 *  only used as part of I/O splitting so we avoid
966 	 *  memsetting them until it is actually needed.
967 	 *  They will be initialized in nvme_request_add_child()
968 	 *  if the request is split.
969 	 */
970 	memset(req, 0, offsetof(struct nvme_request, payload_size));
971 
972 	req->cb_fn = cb_fn;
973 	req->cb_arg = cb_arg;
974 	req->payload = *payload;
975 	req->payload_size = payload_size;
976 	req->md_size = md_size;
977 	req->pid = g_spdk_nvme_pid;
978 	req->submit_tick = 0;
979 
980 	return req;
981 }
982 
983 static inline struct nvme_request *
984 nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair,
985 			     void *buffer, uint32_t payload_size,
986 			     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
987 {
988 	struct nvme_payload payload;
989 
990 	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
991 
992 	return nvme_allocate_request(qpair, &payload, payload_size, 0, cb_fn, cb_arg);
993 }
994 
995 static inline struct nvme_request *
996 nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
997 {
998 	return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
999 }
1000 
1001 struct nvme_request *nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair,
1002 		void *buffer, uint32_t payload_size,
1003 		spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller);
1004 
1005 static inline void
1006 nvme_complete_request(spdk_nvme_cmd_cb cb_fn, void *cb_arg, struct spdk_nvme_qpair *qpair,
1007 		      struct nvme_request *req, struct spdk_nvme_cpl *cpl)
1008 {
1009 	struct spdk_nvme_cpl            err_cpl;
1010 	struct nvme_error_cmd           *cmd;
1011 
1012 	/* error injection at completion path,
1013 	 * only inject for successful completed commands
1014 	 */
1015 	if (spdk_unlikely(!TAILQ_EMPTY(&qpair->err_cmd_head) &&
1016 			  !spdk_nvme_cpl_is_error(cpl))) {
1017 		TAILQ_FOREACH(cmd, &qpair->err_cmd_head, link) {
1018 
1019 			if (cmd->do_not_submit) {
1020 				continue;
1021 			}
1022 
1023 			if ((cmd->opc == req->cmd.opc) && cmd->err_count) {
1024 
1025 				err_cpl = *cpl;
1026 				err_cpl.status.sct = cmd->status.sct;
1027 				err_cpl.status.sc = cmd->status.sc;
1028 
1029 				cpl = &err_cpl;
1030 				cmd->err_count--;
1031 				break;
1032 			}
1033 		}
1034 	}
1035 
1036 	if (cb_fn) {
1037 		cb_fn(cb_arg, cpl);
1038 	}
1039 }
1040 
1041 static inline void
1042 nvme_free_request(struct nvme_request *req)
1043 {
1044 	assert(req != NULL);
1045 	assert(req->num_children == 0);
1046 	assert(req->qpair != NULL);
1047 
1048 	STAILQ_INSERT_HEAD(&req->qpair->free_req, req, stailq);
1049 }
1050 
1051 static inline void
1052 nvme_qpair_set_state(struct spdk_nvme_qpair *qpair, enum nvme_qpair_state state)
1053 {
1054 	qpair->state = state;
1055 }
1056 
1057 static inline enum nvme_qpair_state
1058 nvme_qpair_get_state(struct spdk_nvme_qpair *qpair) {
1059 	return qpair->state;
1060 }
1061 
1062 static inline void
1063 nvme_qpair_free_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
1064 {
1065 	assert(req != NULL);
1066 	assert(req->num_children == 0);
1067 
1068 	STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
1069 }
1070 
1071 static inline void
1072 nvme_request_remove_child(struct nvme_request *parent, struct nvme_request *child)
1073 {
1074 	assert(parent != NULL);
1075 	assert(child != NULL);
1076 	assert(child->parent == parent);
1077 	assert(parent->num_children != 0);
1078 
1079 	parent->num_children--;
1080 	child->parent = NULL;
1081 	TAILQ_REMOVE(&parent->children, child, child_tailq);
1082 }
1083 
1084 static inline void
1085 nvme_cb_complete_child(void *child_arg, const struct spdk_nvme_cpl *cpl)
1086 {
1087 	struct nvme_request *child = child_arg;
1088 	struct nvme_request *parent = child->parent;
1089 
1090 	nvme_request_remove_child(parent, child);
1091 
1092 	if (spdk_nvme_cpl_is_error(cpl)) {
1093 		memcpy(&parent->parent_status, cpl, sizeof(*cpl));
1094 	}
1095 
1096 	if (parent->num_children == 0) {
1097 		nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
1098 				      parent, &parent->parent_status);
1099 		nvme_free_request(parent);
1100 	}
1101 }
1102 
1103 static inline void
1104 nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child)
1105 {
1106 	assert(parent->num_children != UINT16_MAX);
1107 
1108 	if (parent->num_children == 0) {
1109 		/*
1110 		 * Defer initialization of the children TAILQ since it falls
1111 		 *  on a separate cacheline.  This ensures we do not touch this
1112 		 *  cacheline except on request splitting cases, which are
1113 		 *  relatively rare.
1114 		 */
1115 		TAILQ_INIT(&parent->children);
1116 		parent->parent = NULL;
1117 		memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl));
1118 	}
1119 
1120 	parent->num_children++;
1121 	TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
1122 	child->parent = parent;
1123 	child->cb_fn = nvme_cb_complete_child;
1124 	child->cb_arg = child;
1125 }
1126 
1127 static inline void
1128 nvme_request_free_children(struct nvme_request *req)
1129 {
1130 	struct nvme_request *child, *tmp;
1131 
1132 	if (req->num_children == 0) {
1133 		return;
1134 	}
1135 
1136 	/* free all child nvme_request */
1137 	TAILQ_FOREACH_SAFE(child, &req->children, child_tailq, tmp) {
1138 		nvme_request_remove_child(req, child);
1139 		nvme_request_free_children(child);
1140 		nvme_free_request(child);
1141 	}
1142 }
1143 
1144 int	nvme_request_check_timeout(struct nvme_request *req, uint16_t cid,
1145 				   struct spdk_nvme_ctrlr_process *active_proc, uint64_t now_tick);
1146 uint64_t nvme_get_quirks(const struct spdk_pci_id *id);
1147 
1148 int	nvme_robust_mutex_init_shared(pthread_mutex_t *mtx);
1149 int	nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx);
1150 
1151 bool	nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl);
1152 
1153 struct spdk_nvme_ctrlr *nvme_get_ctrlr_by_trid_unsafe(
1154 	const struct spdk_nvme_transport_id *trid);
1155 
1156 const struct spdk_nvme_transport *nvme_get_transport(const char *transport_name);
1157 const struct spdk_nvme_transport *nvme_get_first_transport(void);
1158 const struct spdk_nvme_transport *nvme_get_next_transport(const struct spdk_nvme_transport
1159 		*transport);
1160 
1161 /* Transport specific functions */
1162 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
1163 		const struct spdk_nvme_ctrlr_opts *opts,
1164 		void *devhandle);
1165 int nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
1166 int nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect);
1167 int nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr);
1168 int nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
1169 int nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
1170 int nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
1171 int nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
1172 uint32_t nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr);
1173 uint16_t nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr);
1174 struct spdk_nvme_qpair *nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
1175 		uint16_t qid, const struct spdk_nvme_io_qpair_opts *opts);
1176 int nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr);
1177 void *nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
1178 int nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr);
1179 int nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
1180 		struct spdk_nvme_qpair *qpair);
1181 int nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr,
1182 				       struct spdk_nvme_qpair *qpair);
1183 void nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr,
1184 		struct spdk_nvme_qpair *qpair);
1185 void nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
1186 int nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair);
1187 int nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req);
1188 int32_t nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1189 		uint32_t max_completions);
1190 void nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair);
1191 struct spdk_nvme_transport_poll_group *nvme_transport_poll_group_create(
1192 	const struct spdk_nvme_transport *transport);
1193 int nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
1194 				  struct spdk_nvme_qpair *qpair);
1195 int nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
1196 				     struct spdk_nvme_qpair *qpair);
1197 int nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1198 int nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
1199 int64_t nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
1200 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
1201 int nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup);
1202 /*
1203  * Below ref related functions must be called with the global
1204  *  driver lock held for the multi-process condition.
1205  *  Within these functions, the per ctrlr ctrlr_lock is also
1206  *  acquired for the multi-thread condition.
1207  */
1208 void	nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr);
1209 void	nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr);
1210 int	nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr);
1211 
1212 static inline bool
1213 _is_page_aligned(uint64_t address, uint64_t page_size)
1214 {
1215 	return (address & (page_size - 1)) == 0;
1216 }
1217 
1218 #endif /* __NVME_INTERNAL_H__ */
1219