xref: /spdk/lib/nvme/nvme_internal.h (revision b30d57cdad6d2bc75cc1e4e2ebbcebcb0d98dcfa)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef __NVME_INTERNAL_H__
35 #define __NVME_INTERNAL_H__
36 
37 #include "spdk/config.h"
38 #include "spdk/likely.h"
39 #include "spdk/stdinc.h"
40 
41 #include "spdk/nvme.h"
42 
43 #if defined(__i386__) || defined(__x86_64__)
44 #include <x86intrin.h>
45 #endif
46 
47 #include "spdk/queue.h"
48 #include "spdk/barrier.h"
49 #include "spdk/bit_array.h"
50 #include "spdk/mmio.h"
51 #include "spdk/pci_ids.h"
52 #include "spdk/util.h"
53 #include "spdk/memory.h"
54 #include "spdk/nvme_intel.h"
55 #include "spdk/nvmf_spec.h"
56 #include "spdk/uuid.h"
57 
58 #include "spdk_internal/assert.h"
59 #include "spdk/log.h"
60 
61 extern pid_t g_spdk_nvme_pid;
62 
63 /*
64  * Some Intel devices support vendor-unique read latency log page even
65  * though the log page directory says otherwise.
66  */
67 #define NVME_INTEL_QUIRK_READ_LATENCY 0x1
68 
69 /*
70  * Some Intel devices support vendor-unique write latency log page even
71  * though the log page directory says otherwise.
72  */
73 #define NVME_INTEL_QUIRK_WRITE_LATENCY 0x2
74 
75 /*
76  * The controller needs a delay before starts checking the device
77  * readiness, which is done by reading the NVME_CSTS_RDY bit.
78  */
79 #define NVME_QUIRK_DELAY_BEFORE_CHK_RDY	0x4
80 
81 /*
82  * The controller performs best when I/O is split on particular
83  * LBA boundaries.
84  */
85 #define NVME_INTEL_QUIRK_STRIPING 0x8
86 
87 /*
88  * The controller needs a delay after allocating an I/O queue pair
89  * before it is ready to accept I/O commands.
90  */
91 #define NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC 0x10
92 
93 /*
94  * Earlier NVMe devices do not indicate whether unmapped blocks
95  * will read all zeroes or not. This define indicates that the
96  * device does in fact read all zeroes after an unmap event
97  */
98 #define NVME_QUIRK_READ_ZERO_AFTER_DEALLOCATE 0x20
99 
100 /*
101  * The controller doesn't handle Identify value others than 0 or 1 correctly.
102  */
103 #define NVME_QUIRK_IDENTIFY_CNS 0x40
104 
105 /*
106  * The controller supports Open Channel command set if matching additional
107  * condition, like the first byte (value 0x1) in the vendor specific
108  * bits of the namespace identify structure is set.
109  */
110 #define NVME_QUIRK_OCSSD 0x80
111 
112 /*
113  * The controller has an Intel vendor ID but does not support Intel vendor-specific
114  * log pages.  This is primarily for QEMU emulated SSDs which report an Intel vendor
115  * ID but do not support these log pages.
116  */
117 #define NVME_INTEL_QUIRK_NO_LOG_PAGES 0x100
118 
119 /*
120  * The controller does not set SHST_COMPLETE in a reasonable amount of time.  This
121  * is primarily seen in virtual VMWare NVMe SSDs.  This quirk merely adds an additional
122  * error message that on VMWare NVMe SSDs, the shutdown timeout may be expected.
123  */
124 #define NVME_QUIRK_SHST_COMPLETE 0x200
125 
126 /*
127  * The controller requires an extra delay before starting the initialization process
128  * during attach.
129  */
130 #define NVME_QUIRK_DELAY_BEFORE_INIT 0x400
131 
132 /*
133  * Some SSDs exhibit poor performance with the default SPDK NVMe IO queue size.
134  * This quirk will increase the default to 1024 which matches other operating
135  * systems, at the cost of some extra memory usage.  Users can still override
136  * the increased default by changing the spdk_nvme_io_qpair_opts when allocating
137  * a new queue pair.
138  */
139 #define NVME_QUIRK_MINIMUM_IO_QUEUE_SIZE 0x800
140 
141 /**
142  * The maximum access width to PCI memory space is 8 Bytes, don't use AVX2 or
143  * SSE instructions to optimize the memory access(memcpy or memset) larger than
144  * 8 Bytes.
145  */
146 #define NVME_QUIRK_MAXIMUM_PCI_ACCESS_WIDTH 0x1000
147 
148 /**
149  * The SSD does not support OPAL even through it sets the security bit in OACS.
150  */
151 #define NVME_QUIRK_OACS_SECURITY 0x2000
152 
153 #define NVME_MAX_ASYNC_EVENTS	(8)
154 
155 #define NVME_MAX_ADMIN_TIMEOUT_IN_SECS	(30)
156 
157 /* Maximum log page size to fetch for AERs. */
158 #define NVME_MAX_AER_LOG_SIZE		(4096)
159 
160 /*
161  * NVME_MAX_IO_QUEUES in nvme_spec.h defines the 64K spec-limit, but this
162  *  define specifies the maximum number of queues this driver will actually
163  *  try to configure, if available.
164  */
165 #define DEFAULT_MAX_IO_QUEUES		(1024)
166 #define DEFAULT_ADMIN_QUEUE_SIZE	(32)
167 #define DEFAULT_IO_QUEUE_SIZE		(256)
168 #define DEFAULT_IO_QUEUE_SIZE_FOR_QUIRK	(1024) /* Matches Linux kernel driver */
169 
170 #define DEFAULT_IO_QUEUE_REQUESTS	(512)
171 
172 #define SPDK_NVME_DEFAULT_RETRY_COUNT	(4)
173 
174 #define SPDK_NVME_TRANSPORT_ACK_TIMEOUT_DISABLED	(0)
175 #define SPDK_NVME_DEFAULT_TRANSPORT_ACK_TIMEOUT	SPDK_NVME_TRANSPORT_ACK_TIMEOUT_DISABLED
176 
177 #define MIN_KEEP_ALIVE_TIMEOUT_IN_MS	(10000)
178 
179 /* We want to fit submission and completion rings each in a single 2MB
180  * hugepage to ensure physical address contiguity.
181  */
182 #define MAX_IO_QUEUE_ENTRIES		(VALUE_2MB / spdk_max( \
183 						sizeof(struct spdk_nvme_cmd), \
184 						sizeof(struct spdk_nvme_cpl)))
185 
186 /* Default timeout for fabrics connect commands. */
187 #ifdef DEBUG
188 #define NVME_FABRIC_CONNECT_COMMAND_TIMEOUT 0
189 #else
190 /* 500 millisecond timeout. */
191 #define NVME_FABRIC_CONNECT_COMMAND_TIMEOUT 500000
192 #endif
193 
194 /* This value indicates that a read from a PCIe register is invalid. This can happen when a device is no longer present */
195 #define SPDK_NVME_INVALID_REGISTER_VALUE 0xFFFFFFFFu
196 
197 enum nvme_payload_type {
198 	NVME_PAYLOAD_TYPE_INVALID = 0,
199 
200 	/** nvme_request::u.payload.contig_buffer is valid for this request */
201 	NVME_PAYLOAD_TYPE_CONTIG,
202 
203 	/** nvme_request::u.sgl is valid for this request */
204 	NVME_PAYLOAD_TYPE_SGL,
205 };
206 
207 /**
208  * Descriptor for a request data payload.
209  */
210 struct nvme_payload {
211 	/**
212 	 * Functions for retrieving physical addresses for scattered payloads.
213 	 */
214 	spdk_nvme_req_reset_sgl_cb reset_sgl_fn;
215 	spdk_nvme_req_next_sge_cb next_sge_fn;
216 
217 	/**
218 	 * If reset_sgl_fn == NULL, this is a contig payload, and contig_or_cb_arg contains the
219 	 * virtual memory address of a single virtually contiguous buffer.
220 	 *
221 	 * If reset_sgl_fn != NULL, this is a SGL payload, and contig_or_cb_arg contains the
222 	 * cb_arg that will be passed to the SGL callback functions.
223 	 */
224 	void *contig_or_cb_arg;
225 
226 	/** Virtual memory address of a single virtually contiguous metadata buffer */
227 	void *md;
228 };
229 
230 #define NVME_PAYLOAD_CONTIG(contig_, md_) \
231 	(struct nvme_payload) { \
232 		.reset_sgl_fn = NULL, \
233 		.next_sge_fn = NULL, \
234 		.contig_or_cb_arg = (contig_), \
235 		.md = (md_), \
236 	}
237 
238 #define NVME_PAYLOAD_SGL(reset_sgl_fn_, next_sge_fn_, cb_arg_, md_) \
239 	(struct nvme_payload) { \
240 		.reset_sgl_fn = (reset_sgl_fn_), \
241 		.next_sge_fn = (next_sge_fn_), \
242 		.contig_or_cb_arg = (cb_arg_), \
243 		.md = (md_), \
244 	}
245 
246 static inline enum nvme_payload_type
247 nvme_payload_type(const struct nvme_payload *payload) {
248 	return payload->reset_sgl_fn ? NVME_PAYLOAD_TYPE_SGL : NVME_PAYLOAD_TYPE_CONTIG;
249 }
250 
251 struct nvme_error_cmd {
252 	bool				do_not_submit;
253 	uint64_t			timeout_tsc;
254 	uint32_t			err_count;
255 	uint8_t				opc;
256 	struct spdk_nvme_status		status;
257 	TAILQ_ENTRY(nvme_error_cmd)	link;
258 };
259 
260 struct nvme_request {
261 	struct spdk_nvme_cmd		cmd;
262 
263 	uint8_t				retries;
264 
265 	uint8_t				timed_out : 1;
266 
267 	/**
268 	 * True if the request is in the queued_req list.
269 	 */
270 	uint8_t				queued : 1;
271 	uint8_t				reserved : 6;
272 
273 	/**
274 	 * Number of children requests still outstanding for this
275 	 *  request which was split into multiple child requests.
276 	 */
277 	uint16_t			num_children;
278 
279 	/**
280 	 * Offset in bytes from the beginning of payload for this request.
281 	 * This is used for I/O commands that are split into multiple requests.
282 	 */
283 	uint32_t			payload_offset;
284 	uint32_t			md_offset;
285 
286 	uint32_t			payload_size;
287 
288 	/**
289 	 * Timeout ticks for error injection requests, can be extended in future
290 	 * to support per-request timeout feature.
291 	 */
292 	uint64_t			timeout_tsc;
293 
294 	/**
295 	 * Data payload for this request's command.
296 	 */
297 	struct nvme_payload		payload;
298 
299 	spdk_nvme_cmd_cb		cb_fn;
300 	void				*cb_arg;
301 	STAILQ_ENTRY(nvme_request)	stailq;
302 
303 	struct spdk_nvme_qpair		*qpair;
304 
305 	/*
306 	 * The value of spdk_get_ticks() when the request was submitted to the hardware.
307 	 * Only set if ctrlr->timeout_enabled is true.
308 	 */
309 	uint64_t			submit_tick;
310 
311 	/**
312 	 * The active admin request can be moved to a per process pending
313 	 *  list based on the saved pid to tell which process it belongs
314 	 *  to. The cpl saves the original completion information which
315 	 *  is used in the completion callback.
316 	 * NOTE: these below two fields are only used for admin request.
317 	 */
318 	pid_t				pid;
319 	struct spdk_nvme_cpl		cpl;
320 
321 	uint32_t			md_size;
322 
323 	/**
324 	 * The following members should not be reordered with members
325 	 *  above.  These members are only needed when splitting
326 	 *  requests which is done rarely, and the driver is careful
327 	 *  to not touch the following fields until a split operation is
328 	 *  needed, to avoid touching an extra cacheline.
329 	 */
330 
331 	/**
332 	 * Points to the outstanding child requests for a parent request.
333 	 *  Only valid if a request was split into multiple children
334 	 *  requests, and is not initialized for non-split requests.
335 	 */
336 	TAILQ_HEAD(, nvme_request)	children;
337 
338 	/**
339 	 * Linked-list pointers for a child request in its parent's list.
340 	 */
341 	TAILQ_ENTRY(nvme_request)	child_tailq;
342 
343 	/**
344 	 * Points to a parent request if part of a split request,
345 	 *   NULL otherwise.
346 	 */
347 	struct nvme_request		*parent;
348 
349 	/**
350 	 * Completion status for a parent request.  Initialized to all 0's
351 	 *  (SUCCESS) before child requests are submitted.  If a child
352 	 *  request completes with error, the error status is copied here,
353 	 *  to ensure that the parent request is also completed with error
354 	 *  status once all child requests are completed.
355 	 */
356 	struct spdk_nvme_cpl		parent_status;
357 
358 	/**
359 	 * The user_cb_fn and user_cb_arg fields are used for holding the original
360 	 * callback data when using nvme_allocate_request_user_copy.
361 	 */
362 	spdk_nvme_cmd_cb		user_cb_fn;
363 	void				*user_cb_arg;
364 	void				*user_buffer;
365 };
366 
367 struct nvme_completion_poll_status {
368 	struct spdk_nvme_cpl	cpl;
369 	bool			done;
370 	/* This flag indicates that the request has been timed out and the memory
371 	   must be freed in a completion callback */
372 	bool			timed_out;
373 };
374 
375 struct nvme_async_event_request {
376 	struct spdk_nvme_ctrlr		*ctrlr;
377 	struct nvme_request		*req;
378 	struct spdk_nvme_cpl		cpl;
379 };
380 
381 enum nvme_qpair_state {
382 	NVME_QPAIR_DISCONNECTED,
383 	NVME_QPAIR_DISCONNECTING,
384 	NVME_QPAIR_CONNECTING,
385 	NVME_QPAIR_CONNECTED,
386 	NVME_QPAIR_ENABLING,
387 	NVME_QPAIR_ENABLED,
388 	NVME_QPAIR_DESTROYING,
389 };
390 
391 struct spdk_nvme_qpair {
392 	struct spdk_nvme_ctrlr			*ctrlr;
393 
394 	uint16_t				id;
395 
396 	uint8_t					qprio;
397 
398 	uint8_t					state : 3;
399 
400 	/*
401 	 * Members for handling IO qpair deletion inside of a completion context.
402 	 * These are specifically defined as single bits, so that they do not
403 	 *  push this data structure out to another cacheline.
404 	 */
405 	uint8_t					in_completion_context : 1;
406 	uint8_t					delete_after_completion_context: 1;
407 
408 	/*
409 	 * Set when no deletion notification is needed. For example, the process
410 	 * which allocated this qpair exited unexpectedly.
411 	 */
412 	uint8_t					no_deletion_notification_needed: 1;
413 
414 	uint8_t					first_fused_submitted: 1;
415 
416 	enum spdk_nvme_transport_type		trtype;
417 
418 	STAILQ_HEAD(, nvme_request)		free_req;
419 	STAILQ_HEAD(, nvme_request)		queued_req;
420 	STAILQ_HEAD(, nvme_request)		aborting_queued_req;
421 
422 	/* List entry for spdk_nvme_transport_poll_group::qpairs */
423 	STAILQ_ENTRY(spdk_nvme_qpair)		poll_group_stailq;
424 
425 	/** Commands opcode in this list will return error */
426 	TAILQ_HEAD(, nvme_error_cmd)		err_cmd_head;
427 	/** Requests in this list will return error */
428 	STAILQ_HEAD(, nvme_request)		err_req_head;
429 
430 	/* List entry for spdk_nvme_ctrlr::active_io_qpairs */
431 	TAILQ_ENTRY(spdk_nvme_qpair)		tailq;
432 
433 	/* List entry for spdk_nvme_ctrlr_process::allocated_io_qpairs */
434 	TAILQ_ENTRY(spdk_nvme_qpair)		per_process_tailq;
435 
436 	struct spdk_nvme_ctrlr_process		*active_proc;
437 
438 	struct spdk_nvme_transport_poll_group	*poll_group;
439 
440 	void					*poll_group_tailq_head;
441 
442 	void					*req_buf;
443 
444 	const struct spdk_nvme_transport	*transport;
445 
446 	uint8_t					transport_failure_reason: 2;
447 };
448 
449 struct spdk_nvme_poll_group {
450 	void						*ctx;
451 	STAILQ_HEAD(, spdk_nvme_transport_poll_group)	tgroups;
452 };
453 
454 struct spdk_nvme_transport_poll_group {
455 	struct spdk_nvme_poll_group			*group;
456 	const struct spdk_nvme_transport		*transport;
457 	STAILQ_HEAD(, spdk_nvme_qpair)			connected_qpairs;
458 	STAILQ_HEAD(, spdk_nvme_qpair)			disconnected_qpairs;
459 	STAILQ_ENTRY(spdk_nvme_transport_poll_group)	link;
460 	bool						in_completion_context;
461 	uint64_t					num_qpairs_to_delete;
462 };
463 
464 struct spdk_nvme_ns {
465 	struct spdk_nvme_ctrlr		*ctrlr;
466 	uint32_t			sector_size;
467 
468 	/*
469 	 * Size of data transferred as part of each block,
470 	 * including metadata if FLBAS indicates the metadata is transferred
471 	 * as part of the data buffer at the end of each LBA.
472 	 */
473 	uint32_t			extended_lba_size;
474 
475 	uint32_t			md_size;
476 	uint32_t			pi_type;
477 	uint32_t			sectors_per_max_io;
478 	uint32_t			sectors_per_stripe;
479 	uint32_t			id;
480 	uint16_t			flags;
481 
482 	/* Command Set Identifier */
483 	enum spdk_nvme_csi		csi;
484 
485 	/* Namespace Identification Descriptor List (CNS = 03h) */
486 	uint8_t				id_desc_list[4096];
487 
488 	uint32_t			ana_group_id;
489 	enum spdk_nvme_ana_state	ana_state;
490 };
491 
492 /**
493  * State of struct spdk_nvme_ctrlr (in particular, during initialization).
494  */
495 enum nvme_ctrlr_state {
496 	/**
497 	 * Wait before initializing the controller.
498 	 */
499 	NVME_CTRLR_STATE_INIT_DELAY,
500 
501 	/**
502 	 * Controller has not been initialized yet.
503 	 */
504 	NVME_CTRLR_STATE_INIT,
505 
506 	/**
507 	 * Waiting for CSTS.RDY to transition from 0 to 1 so that CC.EN may be set to 0.
508 	 */
509 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
510 
511 	/**
512 	 * Waiting for CSTS.RDY to transition from 1 to 0 so that CC.EN may be set to 1.
513 	 */
514 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
515 
516 	/**
517 	 * Enable the controller by writing CC.EN to 1
518 	 */
519 	NVME_CTRLR_STATE_ENABLE,
520 
521 	/**
522 	 * Waiting for CSTS.RDY to transition from 0 to 1 after enabling the controller.
523 	 */
524 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
525 
526 	/**
527 	 * Reset the Admin queue of the controller.
528 	 */
529 	NVME_CTRLR_STATE_RESET_ADMIN_QUEUE,
530 
531 	/**
532 	 * Identify Controller command will be sent to then controller.
533 	 */
534 	NVME_CTRLR_STATE_IDENTIFY,
535 
536 	/**
537 	 * Waiting for Identify Controller command be completed.
538 	 */
539 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
540 
541 	/**
542 	 * Get Identify I/O Command Set Specific Controller data structure.
543 	 */
544 	NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
545 
546 	/**
547 	 * Waiting for Identify I/O Command Set Specific Controller command to be completed.
548 	 */
549 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC,
550 
551 	/**
552 	 * Get Commands Supported and Effects log page for the Zoned Namespace Command Set.
553 	 */
554 	NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG,
555 
556 	/**
557 	 * Waiting for the Get Log Page command to be completed.
558 	 */
559 	NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG,
560 
561 	/**
562 	 * Set Number of Queues of the controller.
563 	 */
564 	NVME_CTRLR_STATE_SET_NUM_QUEUES,
565 
566 	/**
567 	 * Waiting for Set Num of Queues command to be completed.
568 	 */
569 	NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
570 
571 	/**
572 	 * Construct Namespace data structures of the controller.
573 	 */
574 	NVME_CTRLR_STATE_CONSTRUCT_NS,
575 
576 	/**
577 	 * Get active Namespace list of the controller.
578 	 */
579 	NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
580 
581 	/**
582 	 * Waiting for the Identify Active Namespace commands to be completed.
583 	 */
584 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS,
585 
586 	/**
587 	 * Get Identify Namespace Data structure for each NS.
588 	 */
589 	NVME_CTRLR_STATE_IDENTIFY_NS,
590 
591 	/**
592 	 * Waiting for the Identify Namespace commands to be completed.
593 	 */
594 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
595 
596 	/**
597 	 * Get Identify Namespace Identification Descriptors.
598 	 */
599 	NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
600 
601 	/**
602 	 * Get Identify I/O Command Set Specific Namespace data structure for each NS.
603 	 */
604 	NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
605 
606 	/**
607 	 * Waiting for the Identify I/O Command Set Specific Namespace commands to be completed.
608 	 */
609 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC,
610 
611 	/**
612 	 * Waiting for the Identify Namespace Identification
613 	 * Descriptors to be completed.
614 	 */
615 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
616 
617 	/**
618 	 * Configure AER of the controller.
619 	 */
620 	NVME_CTRLR_STATE_CONFIGURE_AER,
621 
622 	/**
623 	 * Waiting for the Configure AER to be completed.
624 	 */
625 	NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
626 
627 	/**
628 	 * Set supported log pages of the controller.
629 	 */
630 	NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
631 
632 	/**
633 	 * Set supported features of the controller.
634 	 */
635 	NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
636 
637 	/**
638 	 * Set Doorbell Buffer Config of the controller.
639 	 */
640 	NVME_CTRLR_STATE_SET_DB_BUF_CFG,
641 
642 	/**
643 	 * Waiting for Doorbell Buffer Config to be completed.
644 	 */
645 	NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
646 
647 	/**
648 	 * Set Keep Alive Timeout of the controller.
649 	 */
650 	NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
651 
652 	/**
653 	 * Waiting for Set Keep Alive Timeout to be completed.
654 	 */
655 	NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
656 
657 	/**
658 	 * Set Host ID of the controller.
659 	 */
660 	NVME_CTRLR_STATE_SET_HOST_ID,
661 
662 	/**
663 	 * Waiting for Set Host ID to be completed.
664 	 */
665 	NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
666 
667 	/**
668 	 * Controller initialization has completed and the controller is ready.
669 	 */
670 	NVME_CTRLR_STATE_READY,
671 
672 	/**
673 	 * Controller inilialization has an error.
674 	 */
675 	NVME_CTRLR_STATE_ERROR
676 };
677 
678 #define NVME_TIMEOUT_INFINITE	0
679 
680 /*
681  * Used to track properties for all processes accessing the controller.
682  */
683 struct spdk_nvme_ctrlr_process {
684 	/** Whether it is the primary process  */
685 	bool						is_primary;
686 
687 	/** Process ID */
688 	pid_t						pid;
689 
690 	/** Active admin requests to be completed */
691 	STAILQ_HEAD(, nvme_request)			active_reqs;
692 
693 	TAILQ_ENTRY(spdk_nvme_ctrlr_process)		tailq;
694 
695 	/** Per process PCI device handle */
696 	struct spdk_pci_device				*devhandle;
697 
698 	/** Reference to track the number of attachment to this controller. */
699 	int						ref;
700 
701 	/** Allocated IO qpairs */
702 	TAILQ_HEAD(, spdk_nvme_qpair)			allocated_io_qpairs;
703 
704 	spdk_nvme_aer_cb				aer_cb_fn;
705 	void						*aer_cb_arg;
706 
707 	/**
708 	 * A function pointer to timeout callback function
709 	 */
710 	spdk_nvme_timeout_cb		timeout_cb_fn;
711 	void				*timeout_cb_arg;
712 	uint64_t			timeout_ticks;
713 };
714 
715 /*
716  * One of these per allocated PCI device.
717  */
718 struct spdk_nvme_ctrlr {
719 	/* Hot data (accessed in I/O path) starts here. */
720 
721 	/** Array of namespaces indexed by nsid - 1 */
722 	struct spdk_nvme_ns		*ns;
723 
724 	uint32_t			num_ns;
725 
726 	bool				is_removed;
727 
728 	bool				is_resetting;
729 
730 	bool				is_failed;
731 
732 	bool				is_destructed;
733 
734 	bool				timeout_enabled;
735 
736 	uint16_t			max_sges;
737 
738 	uint16_t			cntlid;
739 
740 	/** Controller support flags */
741 	uint64_t			flags;
742 
743 	/** NVMEoF in-capsule data size in bytes */
744 	uint32_t			ioccsz_bytes;
745 
746 	/** NVMEoF in-capsule data offset in 16 byte units */
747 	uint16_t			icdoff;
748 
749 	/* Cold data (not accessed in normal I/O path) is after this point. */
750 
751 	struct spdk_nvme_transport_id	trid;
752 
753 	union spdk_nvme_cap_register	cap;
754 	union spdk_nvme_vs_register	vs;
755 
756 	int				state;
757 	uint64_t			state_timeout_tsc;
758 
759 	uint64_t			next_keep_alive_tick;
760 	uint64_t			keep_alive_interval_ticks;
761 
762 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
763 
764 	/** All the log pages supported */
765 	bool				log_page_supported[256];
766 
767 	/** All the features supported */
768 	bool				feature_supported[256];
769 
770 	/** maximum i/o size in bytes */
771 	uint32_t			max_xfer_size;
772 
773 	/** minimum page size supported by this controller in bytes */
774 	uint32_t			min_page_size;
775 
776 	/** selected memory page size for this controller in bytes */
777 	uint32_t			page_size;
778 
779 	uint32_t			num_aers;
780 	struct nvme_async_event_request	aer[NVME_MAX_ASYNC_EVENTS];
781 
782 	/** guards access to the controller itself, including admin queues */
783 	pthread_mutex_t			ctrlr_lock;
784 
785 	struct spdk_nvme_qpair		*adminq;
786 
787 	/** shadow doorbell buffer */
788 	uint32_t			*shadow_doorbell;
789 	/** eventidx buffer */
790 	uint32_t			*eventidx;
791 
792 	/**
793 	 * Identify Controller data.
794 	 */
795 	struct spdk_nvme_ctrlr_data	cdata;
796 
797 	/**
798 	 * Zoned Namespace Command Set Specific Identify Controller data.
799 	 */
800 	struct spdk_nvme_zns_ctrlr_data	*cdata_zns;
801 
802 	/**
803 	 * Keep track of active namespaces
804 	 */
805 	uint32_t			*active_ns_list;
806 
807 	/**
808 	 * Array of Identify Namespace data.
809 	 *
810 	 * Stored separately from ns since nsdata should not normally be accessed during I/O.
811 	 */
812 	struct spdk_nvme_ns_data	*nsdata;
813 
814 	/**
815 	 * Array of pointers to Zoned Namespace Command Set Specific Identify Namespace data.
816 	 */
817 	struct spdk_nvme_zns_ns_data	**nsdata_zns;
818 
819 	struct spdk_bit_array		*free_io_qids;
820 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
821 
822 	struct spdk_nvme_ctrlr_opts	opts;
823 
824 	uint64_t			quirks;
825 
826 	/* Extra sleep time during controller initialization */
827 	uint64_t			sleep_timeout_tsc;
828 
829 	/** Track all the processes manage this controller */
830 	TAILQ_HEAD(, spdk_nvme_ctrlr_process)	active_procs;
831 
832 
833 	STAILQ_HEAD(, nvme_request)	queued_aborts;
834 	uint32_t			outstanding_aborts;
835 
836 	/* CB to notify the user when the ctrlr is removed/failed. */
837 	spdk_nvme_remove_cb			remove_cb;
838 	void					*cb_ctx;
839 
840 	struct spdk_nvme_qpair		*external_io_msgs_qpair;
841 	pthread_mutex_t			external_io_msgs_lock;
842 	struct spdk_ring		*external_io_msgs;
843 
844 	STAILQ_HEAD(, nvme_io_msg_producer) io_producers;
845 
846 	struct spdk_nvme_ana_page	*ana_log_page;
847 	uint32_t			ana_log_page_size;
848 
849 	/* scratchpad pointer that can be used to send data between two NVME_CTRLR_STATEs */
850 	void				*tmp_ptr;
851 
852 	/* maximum zone append size in bytes */
853 	uint32_t			max_zone_append_size;
854 };
855 
856 struct spdk_nvme_probe_ctx {
857 	struct spdk_nvme_transport_id		trid;
858 	void					*cb_ctx;
859 	spdk_nvme_probe_cb			probe_cb;
860 	spdk_nvme_attach_cb			attach_cb;
861 	spdk_nvme_remove_cb			remove_cb;
862 	TAILQ_HEAD(, spdk_nvme_ctrlr)		init_ctrlrs;
863 };
864 
865 typedef void (*nvme_ctrlr_detach_cb)(struct spdk_nvme_ctrlr *ctrlr);
866 
867 struct nvme_ctrlr_detach_ctx {
868 	struct spdk_nvme_ctrlr			*ctrlr;
869 	nvme_ctrlr_detach_cb			cb_fn;
870 	uint64_t				shutdown_start_tsc;
871 	uint32_t				shutdown_timeout_ms;
872 	bool					shutdown_complete;
873 	TAILQ_ENTRY(nvme_ctrlr_detach_ctx)	link;
874 };
875 
876 struct spdk_nvme_detach_ctx {
877 	TAILQ_HEAD(, nvme_ctrlr_detach_ctx)	head;
878 	bool					polling_started;
879 };
880 
881 struct nvme_driver {
882 	pthread_mutex_t			lock;
883 
884 	/** Multi-process shared attached controller list */
885 	TAILQ_HEAD(, spdk_nvme_ctrlr)	shared_attached_ctrlrs;
886 
887 	bool				initialized;
888 	struct spdk_uuid		default_extended_host_id;
889 
890 	/** netlink socket fd for hotplug messages */
891 	int				hotplug_fd;
892 };
893 
894 extern struct nvme_driver *g_spdk_nvme_driver;
895 
896 int nvme_driver_init(void);
897 
898 #define nvme_delay		usleep
899 
900 static inline bool
901 nvme_qpair_is_admin_queue(struct spdk_nvme_qpair *qpair)
902 {
903 	return qpair->id == 0;
904 }
905 
906 static inline bool
907 nvme_qpair_is_io_queue(struct spdk_nvme_qpair *qpair)
908 {
909 	return qpair->id != 0;
910 }
911 
912 static inline int
913 nvme_robust_mutex_lock(pthread_mutex_t *mtx)
914 {
915 	int rc = pthread_mutex_lock(mtx);
916 
917 #ifndef __FreeBSD__
918 	if (rc == EOWNERDEAD) {
919 		rc = pthread_mutex_consistent(mtx);
920 	}
921 #endif
922 
923 	return rc;
924 }
925 
926 static inline int
927 nvme_robust_mutex_unlock(pthread_mutex_t *mtx)
928 {
929 	return pthread_mutex_unlock(mtx);
930 }
931 
932 /* Poll group management functions. */
933 int nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
934 int nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
935 
936 /* Admin functions */
937 int	nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr,
938 				uint8_t cns, uint16_t cntid, uint32_t nsid,
939 				uint8_t csi, void *payload, size_t payload_size,
940 				spdk_nvme_cmd_cb cb_fn, void *cb_arg);
941 int	nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
942 				      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn,
943 				      void *cb_arg);
944 int	nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
945 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg);
946 int	nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
947 		union spdk_nvme_feat_async_event_configuration config,
948 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
949 int	nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
950 				   spdk_nvme_cmd_cb cb_fn, void *cb_arg);
951 int	nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
952 				 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
953 int	nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
954 				 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
955 int	nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
956 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg);
957 int	nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr,
958 		uint64_t prp1, uint64_t prp2,
959 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
960 int	nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
961 				 void *cb_arg);
962 int	nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
963 			      struct spdk_nvme_format *format, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
964 int	nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
965 				 const struct spdk_nvme_fw_commit *fw_commit,
966 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg);
967 int	nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
968 		uint32_t size, uint32_t offset, void *payload,
969 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
970 int	nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
971 				struct spdk_nvme_sanitize *sanitize, uint32_t cdw11,
972 				spdk_nvme_cmd_cb cb_fn, void *cb_arg);
973 void	nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl);
974 int	nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
975 				 struct nvme_completion_poll_status *status);
976 int	nvme_wait_for_completion_robust_lock(struct spdk_nvme_qpair *qpair,
977 		struct nvme_completion_poll_status *status,
978 		pthread_mutex_t *robust_mutex);
979 int	nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
980 		struct nvme_completion_poll_status *status,
981 		uint64_t timeout_in_usecs);
982 int	nvme_wait_for_completion_robust_lock_timeout(struct spdk_nvme_qpair *qpair,
983 		struct nvme_completion_poll_status *status,
984 		pthread_mutex_t *robust_mutex,
985 		uint64_t timeout_in_usecs);
986 
987 struct spdk_nvme_ctrlr_process *nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr,
988 		pid_t pid);
989 struct spdk_nvme_ctrlr_process *nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr);
990 int	nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle);
991 void	nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr);
992 struct spdk_pci_device *nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr);
993 
994 int	nvme_ctrlr_probe(const struct spdk_nvme_transport_id *trid,
995 			 struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle);
996 
997 int	nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr);
998 void	nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr);
999 void	nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
1000 void	nvme_ctrlr_destruct_async(struct spdk_nvme_ctrlr *ctrlr,
1001 				  struct nvme_ctrlr_detach_ctx *ctx);
1002 int	nvme_ctrlr_destruct_poll_async(struct spdk_nvme_ctrlr *ctrlr,
1003 				       struct nvme_ctrlr_detach_ctx *ctx);
1004 void	nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove);
1005 int	nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr);
1006 int	nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr);
1007 void	nvme_ctrlr_connected(struct spdk_nvme_probe_ctx *probe_ctx,
1008 			     struct spdk_nvme_ctrlr *ctrlr);
1009 
1010 int	nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
1011 					struct nvme_request *req);
1012 int	nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap);
1013 int	nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs);
1014 int	nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz);
1015 bool	nvme_ctrlr_multi_iocs_enabled(struct spdk_nvme_ctrlr *ctrlr);
1016 void	nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_register *cap,
1017 			    const union spdk_nvme_vs_register *vs);
1018 void nvme_ctrlr_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1019 int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
1020 		    struct spdk_nvme_ctrlr *ctrlr,
1021 		    enum spdk_nvme_qprio qprio,
1022 		    uint32_t num_requests);
1023 void	nvme_qpair_deinit(struct spdk_nvme_qpair *qpair);
1024 void	nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair);
1025 int	nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair,
1026 				  struct nvme_request *req);
1027 void	nvme_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
1028 uint32_t nvme_qpair_abort_queued_reqs(struct spdk_nvme_qpair *qpair, void *cmd_cb_arg);
1029 void	nvme_qpair_resubmit_requests(struct spdk_nvme_qpair *qpair, uint32_t num_requests);
1030 
1031 int	nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr);
1032 void	nvme_ns_set_identify_data(struct spdk_nvme_ns *ns);
1033 void	nvme_ns_set_id_desc_list_data(struct spdk_nvme_ns *ns);
1034 void	nvme_ns_free_zns_specific_data(struct spdk_nvme_ns *ns);
1035 void	nvme_ns_free_iocs_specific_data(struct spdk_nvme_ns *ns);
1036 bool	nvme_ns_has_supported_iocs_specific_data(struct spdk_nvme_ns *ns);
1037 int	nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
1038 			  struct spdk_nvme_ctrlr *ctrlr);
1039 void	nvme_ns_destruct(struct spdk_nvme_ns *ns);
1040 int	nvme_ns_update(struct spdk_nvme_ns *ns);
1041 
1042 int	nvme_fabric_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
1043 int	nvme_fabric_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
1044 int	nvme_fabric_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
1045 int	nvme_fabric_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect);
1046 int	nvme_fabric_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
1047 int	nvme_fabric_ctrlr_discover(struct spdk_nvme_ctrlr *ctrlr,
1048 				   struct spdk_nvme_probe_ctx *probe_ctx);
1049 int	nvme_fabric_qpair_connect(struct spdk_nvme_qpair *qpair, uint32_t num_entries);
1050 
1051 typedef int (*spdk_nvme_parse_ana_log_page_cb)(
1052 	const struct spdk_nvme_ana_group_descriptor *desc, void *cb_arg);
1053 int	nvme_ctrlr_parse_ana_log_page(struct spdk_nvme_ctrlr *ctrlr,
1054 				      spdk_nvme_parse_ana_log_page_cb cb_fn, void *cb_arg);
1055 
1056 static inline struct nvme_request *
1057 nvme_allocate_request(struct spdk_nvme_qpair *qpair,
1058 		      const struct nvme_payload *payload, uint32_t payload_size, uint32_t md_size,
1059 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1060 {
1061 	struct nvme_request *req;
1062 
1063 	req = STAILQ_FIRST(&qpair->free_req);
1064 	if (req == NULL) {
1065 		return req;
1066 	}
1067 
1068 	STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);
1069 
1070 	/*
1071 	 * Only memset/zero fields that need it.  All other fields
1072 	 *  will be initialized appropriately either later in this
1073 	 *  function, or before they are needed later in the
1074 	 *  submission patch.  For example, the children
1075 	 *  TAILQ_ENTRY and following members are
1076 	 *  only used as part of I/O splitting so we avoid
1077 	 *  memsetting them until it is actually needed.
1078 	 *  They will be initialized in nvme_request_add_child()
1079 	 *  if the request is split.
1080 	 */
1081 	memset(req, 0, offsetof(struct nvme_request, payload_size));
1082 
1083 	req->cb_fn = cb_fn;
1084 	req->cb_arg = cb_arg;
1085 	req->payload = *payload;
1086 	req->payload_size = payload_size;
1087 	req->md_size = md_size;
1088 	req->pid = g_spdk_nvme_pid;
1089 	req->submit_tick = 0;
1090 
1091 	return req;
1092 }
1093 
1094 static inline struct nvme_request *
1095 nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair,
1096 			     void *buffer, uint32_t payload_size,
1097 			     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1098 {
1099 	struct nvme_payload payload;
1100 
1101 	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
1102 
1103 	return nvme_allocate_request(qpair, &payload, payload_size, 0, cb_fn, cb_arg);
1104 }
1105 
1106 static inline struct nvme_request *
1107 nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1108 {
1109 	return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
1110 }
1111 
1112 struct nvme_request *nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair,
1113 		void *buffer, uint32_t payload_size,
1114 		spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller);
1115 
1116 static inline void
1117 nvme_complete_request(spdk_nvme_cmd_cb cb_fn, void *cb_arg, struct spdk_nvme_qpair *qpair,
1118 		      struct nvme_request *req, struct spdk_nvme_cpl *cpl)
1119 {
1120 	struct spdk_nvme_cpl            err_cpl;
1121 	struct nvme_error_cmd           *cmd;
1122 
1123 	/* error injection at completion path,
1124 	 * only inject for successful completed commands
1125 	 */
1126 	if (spdk_unlikely(!TAILQ_EMPTY(&qpair->err_cmd_head) &&
1127 			  !spdk_nvme_cpl_is_error(cpl))) {
1128 		TAILQ_FOREACH(cmd, &qpair->err_cmd_head, link) {
1129 
1130 			if (cmd->do_not_submit) {
1131 				continue;
1132 			}
1133 
1134 			if ((cmd->opc == req->cmd.opc) && cmd->err_count) {
1135 
1136 				err_cpl = *cpl;
1137 				err_cpl.status.sct = cmd->status.sct;
1138 				err_cpl.status.sc = cmd->status.sc;
1139 
1140 				cpl = &err_cpl;
1141 				cmd->err_count--;
1142 				break;
1143 			}
1144 		}
1145 	}
1146 
1147 	if (cb_fn) {
1148 		cb_fn(cb_arg, cpl);
1149 	}
1150 }
1151 
1152 static inline void
1153 nvme_free_request(struct nvme_request *req)
1154 {
1155 	assert(req != NULL);
1156 	assert(req->num_children == 0);
1157 	assert(req->qpair != NULL);
1158 
1159 	STAILQ_INSERT_HEAD(&req->qpair->free_req, req, stailq);
1160 }
1161 
1162 static inline void
1163 nvme_qpair_set_state(struct spdk_nvme_qpair *qpair, enum nvme_qpair_state state)
1164 {
1165 	qpair->state = state;
1166 }
1167 
1168 static inline enum nvme_qpair_state
1169 nvme_qpair_get_state(struct spdk_nvme_qpair *qpair) {
1170 	return qpair->state;
1171 }
1172 
1173 static inline void
1174 nvme_qpair_free_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
1175 {
1176 	assert(req != NULL);
1177 	assert(req->num_children == 0);
1178 
1179 	STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
1180 }
1181 
1182 static inline void
1183 nvme_request_remove_child(struct nvme_request *parent, struct nvme_request *child)
1184 {
1185 	assert(parent != NULL);
1186 	assert(child != NULL);
1187 	assert(child->parent == parent);
1188 	assert(parent->num_children != 0);
1189 
1190 	parent->num_children--;
1191 	child->parent = NULL;
1192 	TAILQ_REMOVE(&parent->children, child, child_tailq);
1193 }
1194 
1195 static inline void
1196 nvme_cb_complete_child(void *child_arg, const struct spdk_nvme_cpl *cpl)
1197 {
1198 	struct nvme_request *child = child_arg;
1199 	struct nvme_request *parent = child->parent;
1200 
1201 	nvme_request_remove_child(parent, child);
1202 
1203 	if (spdk_nvme_cpl_is_error(cpl)) {
1204 		memcpy(&parent->parent_status, cpl, sizeof(*cpl));
1205 	}
1206 
1207 	if (parent->num_children == 0) {
1208 		nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
1209 				      parent, &parent->parent_status);
1210 		nvme_free_request(parent);
1211 	}
1212 }
1213 
1214 static inline void
1215 nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child)
1216 {
1217 	assert(parent->num_children != UINT16_MAX);
1218 
1219 	if (parent->num_children == 0) {
1220 		/*
1221 		 * Defer initialization of the children TAILQ since it falls
1222 		 *  on a separate cacheline.  This ensures we do not touch this
1223 		 *  cacheline except on request splitting cases, which are
1224 		 *  relatively rare.
1225 		 */
1226 		TAILQ_INIT(&parent->children);
1227 		parent->parent = NULL;
1228 		memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl));
1229 	}
1230 
1231 	parent->num_children++;
1232 	TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
1233 	child->parent = parent;
1234 	child->cb_fn = nvme_cb_complete_child;
1235 	child->cb_arg = child;
1236 }
1237 
1238 static inline void
1239 nvme_request_free_children(struct nvme_request *req)
1240 {
1241 	struct nvme_request *child, *tmp;
1242 
1243 	if (req->num_children == 0) {
1244 		return;
1245 	}
1246 
1247 	/* free all child nvme_request */
1248 	TAILQ_FOREACH_SAFE(child, &req->children, child_tailq, tmp) {
1249 		nvme_request_remove_child(req, child);
1250 		nvme_request_free_children(child);
1251 		nvme_free_request(child);
1252 	}
1253 }
1254 
1255 int	nvme_request_check_timeout(struct nvme_request *req, uint16_t cid,
1256 				   struct spdk_nvme_ctrlr_process *active_proc, uint64_t now_tick);
1257 uint64_t nvme_get_quirks(const struct spdk_pci_id *id);
1258 
1259 int	nvme_robust_mutex_init_shared(pthread_mutex_t *mtx);
1260 int	nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx);
1261 
1262 bool	nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl);
1263 
1264 struct spdk_nvme_ctrlr *nvme_get_ctrlr_by_trid_unsafe(
1265 	const struct spdk_nvme_transport_id *trid);
1266 
1267 const struct spdk_nvme_transport *nvme_get_transport(const char *transport_name);
1268 const struct spdk_nvme_transport *nvme_get_first_transport(void);
1269 const struct spdk_nvme_transport *nvme_get_next_transport(const struct spdk_nvme_transport
1270 		*transport);
1271 
1272 /* Transport specific functions */
1273 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
1274 		const struct spdk_nvme_ctrlr_opts *opts,
1275 		void *devhandle);
1276 int nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
1277 int nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect);
1278 int nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr);
1279 int nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
1280 int nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
1281 int nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
1282 int nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
1283 uint32_t nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr);
1284 uint16_t nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr);
1285 struct spdk_nvme_qpair *nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
1286 		uint16_t qid, const struct spdk_nvme_io_qpair_opts *opts);
1287 int nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr);
1288 void *nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
1289 int nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr);
1290 int nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
1291 		struct spdk_nvme_qpair *qpair);
1292 int nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr,
1293 				       struct spdk_nvme_qpair *qpair);
1294 void nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr,
1295 		struct spdk_nvme_qpair *qpair);
1296 void nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
1297 int nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair);
1298 int nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req);
1299 int32_t nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1300 		uint32_t max_completions);
1301 void nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair);
1302 int nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
1303 		int (*iter_fn)(struct nvme_request *req, void *arg),
1304 		void *arg);
1305 
1306 struct spdk_nvme_transport_poll_group *nvme_transport_poll_group_create(
1307 	const struct spdk_nvme_transport *transport);
1308 int nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
1309 				  struct spdk_nvme_qpair *qpair);
1310 int nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
1311 				     struct spdk_nvme_qpair *qpair);
1312 int nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1313 int nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
1314 int64_t nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
1315 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
1316 int nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup);
1317 /*
1318  * Below ref related functions must be called with the global
1319  *  driver lock held for the multi-process condition.
1320  *  Within these functions, the per ctrlr ctrlr_lock is also
1321  *  acquired for the multi-thread condition.
1322  */
1323 void	nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr);
1324 void	nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr);
1325 int	nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr);
1326 
1327 static inline bool
1328 _is_page_aligned(uint64_t address, uint64_t page_size)
1329 {
1330 	return (address & (page_size - 1)) == 0;
1331 }
1332 
1333 #endif /* __NVME_INTERNAL_H__ */
1334