xref: /spdk/lib/nvme/nvme_internal.h (revision 18c8b52afa69f39481ebb75711b2f30b11693f9d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #ifndef __NVME_INTERNAL_H__
8 #define __NVME_INTERNAL_H__
9 
10 #include "spdk/config.h"
11 #include "spdk/likely.h"
12 #include "spdk/stdinc.h"
13 
14 #include "spdk/nvme.h"
15 
16 #if defined(__i386__) || defined(__x86_64__)
17 #include <x86intrin.h>
18 #endif
19 
20 #include "spdk/queue.h"
21 #include "spdk/barrier.h"
22 #include "spdk/bit_array.h"
23 #include "spdk/mmio.h"
24 #include "spdk/pci_ids.h"
25 #include "spdk/util.h"
26 #include "spdk/memory.h"
27 #include "spdk/nvme_intel.h"
28 #include "spdk/nvmf_spec.h"
29 #include "spdk/tree.h"
30 #include "spdk/uuid.h"
31 
32 #include "spdk_internal/assert.h"
33 #include "spdk/log.h"
34 
35 extern pid_t g_spdk_nvme_pid;
36 
37 /*
38  * Some Intel devices support vendor-unique read latency log page even
39  * though the log page directory says otherwise.
40  */
41 #define NVME_INTEL_QUIRK_READ_LATENCY 0x1
42 
43 /*
44  * Some Intel devices support vendor-unique write latency log page even
45  * though the log page directory says otherwise.
46  */
47 #define NVME_INTEL_QUIRK_WRITE_LATENCY 0x2
48 
49 /*
50  * The controller needs a delay before starts checking the device
51  * readiness, which is done by reading the NVME_CSTS_RDY bit.
52  */
53 #define NVME_QUIRK_DELAY_BEFORE_CHK_RDY	0x4
54 
55 /*
56  * The controller performs best when I/O is split on particular
57  * LBA boundaries.
58  */
59 #define NVME_INTEL_QUIRK_STRIPING 0x8
60 
61 /*
62  * The controller needs a delay after allocating an I/O queue pair
63  * before it is ready to accept I/O commands.
64  */
65 #define NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC 0x10
66 
67 /*
68  * Earlier NVMe devices do not indicate whether unmapped blocks
69  * will read all zeroes or not. This define indicates that the
70  * device does in fact read all zeroes after an unmap event
71  */
72 #define NVME_QUIRK_READ_ZERO_AFTER_DEALLOCATE 0x20
73 
74 /*
75  * The controller doesn't handle Identify value others than 0 or 1 correctly.
76  */
77 #define NVME_QUIRK_IDENTIFY_CNS 0x40
78 
79 /*
80  * The controller supports Open Channel command set if matching additional
81  * condition, like the first byte (value 0x1) in the vendor specific
82  * bits of the namespace identify structure is set.
83  */
84 #define NVME_QUIRK_OCSSD 0x80
85 
86 /*
87  * The controller has an Intel vendor ID but does not support Intel vendor-specific
88  * log pages.  This is primarily for QEMU emulated SSDs which report an Intel vendor
89  * ID but do not support these log pages.
90  */
91 #define NVME_INTEL_QUIRK_NO_LOG_PAGES 0x100
92 
93 /*
94  * The controller does not set SHST_COMPLETE in a reasonable amount of time.  This
95  * is primarily seen in virtual VMWare NVMe SSDs.  This quirk merely adds an additional
96  * error message that on VMWare NVMe SSDs, the shutdown timeout may be expected.
97  */
98 #define NVME_QUIRK_SHST_COMPLETE 0x200
99 
100 /*
101  * The controller requires an extra delay before starting the initialization process
102  * during attach.
103  */
104 #define NVME_QUIRK_DELAY_BEFORE_INIT 0x400
105 
106 /*
107  * Some SSDs exhibit poor performance with the default SPDK NVMe IO queue size.
108  * This quirk will increase the default to 1024 which matches other operating
109  * systems, at the cost of some extra memory usage.  Users can still override
110  * the increased default by changing the spdk_nvme_io_qpair_opts when allocating
111  * a new queue pair.
112  */
113 #define NVME_QUIRK_MINIMUM_IO_QUEUE_SIZE 0x800
114 
115 /**
116  * The maximum access width to PCI memory space is 8 Bytes, don't use AVX2 or
117  * SSE instructions to optimize the memory access(memcpy or memset) larger than
118  * 8 Bytes.
119  */
120 #define NVME_QUIRK_MAXIMUM_PCI_ACCESS_WIDTH 0x1000
121 
122 /**
123  * The SSD does not support OPAL even through it sets the security bit in OACS.
124  */
125 #define NVME_QUIRK_OACS_SECURITY 0x2000
126 
127 /**
128  * Intel P55XX SSDs can't support Dataset Management command with SGL format,
129  * so use PRP with DSM command.
130  */
131 #define NVME_QUIRK_NO_SGL_FOR_DSM 0x4000
132 
133 /**
134  * Maximum Data Transfer Size(MDTS) excludes interleaved metadata.
135  */
136 #define NVME_QUIRK_MDTS_EXCLUDE_MD 0x8000
137 
138 /**
139  * Force not to use SGL even the controller report that it can
140  * support it.
141  */
142 #define NVME_QUIRK_NOT_USE_SGL 0x10000
143 
144 /*
145  * Some SSDs require the admin submission queue size to equate to an even
146  * 4KiB multiple.
147  */
148 #define NVME_QUIRK_MINIMUM_ADMIN_QUEUE_SIZE 0x20000
149 
150 #define NVME_MAX_ASYNC_EVENTS	(8)
151 
152 #define NVME_MAX_ADMIN_TIMEOUT_IN_SECS	(30)
153 
154 /* Maximum log page size to fetch for AERs. */
155 #define NVME_MAX_AER_LOG_SIZE		(4096)
156 
157 /*
158  * NVME_MAX_IO_QUEUES in nvme_spec.h defines the 64K spec-limit, but this
159  *  define specifies the maximum number of queues this driver will actually
160  *  try to configure, if available.
161  */
162 #define DEFAULT_MAX_IO_QUEUES		(1024)
163 #define DEFAULT_ADMIN_QUEUE_SIZE	(32)
164 #define DEFAULT_IO_QUEUE_SIZE		(256)
165 #define DEFAULT_IO_QUEUE_SIZE_FOR_QUIRK	(1024) /* Matches Linux kernel driver */
166 
167 #define DEFAULT_IO_QUEUE_REQUESTS	(512)
168 
169 #define SPDK_NVME_DEFAULT_RETRY_COUNT	(4)
170 
171 #define SPDK_NVME_TRANSPORT_ACK_TIMEOUT_DISABLED	(0)
172 #define SPDK_NVME_DEFAULT_TRANSPORT_ACK_TIMEOUT	SPDK_NVME_TRANSPORT_ACK_TIMEOUT_DISABLED
173 
174 #define MIN_KEEP_ALIVE_TIMEOUT_IN_MS	(10000)
175 
176 /* We want to fit submission and completion rings each in a single 2MB
177  * hugepage to ensure physical address contiguity.
178  */
179 #define MAX_IO_QUEUE_ENTRIES		(VALUE_2MB / spdk_max( \
180 						sizeof(struct spdk_nvme_cmd), \
181 						sizeof(struct spdk_nvme_cpl)))
182 
183 /* Default timeout for fabrics connect commands. */
184 #ifdef DEBUG
185 #define NVME_FABRIC_CONNECT_COMMAND_TIMEOUT 0
186 #else
187 /* 500 millisecond timeout. */
188 #define NVME_FABRIC_CONNECT_COMMAND_TIMEOUT 500000
189 #endif
190 
191 /* This value indicates that a read from a PCIe register is invalid. This can happen when a device is no longer present */
192 #define SPDK_NVME_INVALID_REGISTER_VALUE 0xFFFFFFFFu
193 
194 enum nvme_payload_type {
195 	NVME_PAYLOAD_TYPE_INVALID = 0,
196 
197 	/** nvme_request::u.payload.contig_buffer is valid for this request */
198 	NVME_PAYLOAD_TYPE_CONTIG,
199 
200 	/** nvme_request::u.sgl is valid for this request */
201 	NVME_PAYLOAD_TYPE_SGL,
202 };
203 
204 /** Boot partition write states */
205 enum nvme_bp_write_state {
206 	SPDK_NVME_BP_WS_DOWNLOADING	= 0x0,
207 	SPDK_NVME_BP_WS_DOWNLOADED	= 0x1,
208 	SPDK_NVME_BP_WS_REPLACE		= 0x2,
209 	SPDK_NVME_BP_WS_ACTIVATE	= 0x3,
210 };
211 
212 /**
213  * Descriptor for a request data payload.
214  */
215 struct nvme_payload {
216 	/**
217 	 * Functions for retrieving physical addresses for scattered payloads.
218 	 */
219 	spdk_nvme_req_reset_sgl_cb reset_sgl_fn;
220 	spdk_nvme_req_next_sge_cb next_sge_fn;
221 
222 	/**
223 	 * Extended IO options passed by the user
224 	 */
225 	struct spdk_nvme_ns_cmd_ext_io_opts *opts;
226 	/**
227 	 * If reset_sgl_fn == NULL, this is a contig payload, and contig_or_cb_arg contains the
228 	 * virtual memory address of a single virtually contiguous buffer.
229 	 *
230 	 * If reset_sgl_fn != NULL, this is a SGL payload, and contig_or_cb_arg contains the
231 	 * cb_arg that will be passed to the SGL callback functions.
232 	 */
233 	void *contig_or_cb_arg;
234 
235 	/** Virtual memory address of a single virtually contiguous metadata buffer */
236 	void *md;
237 };
238 
239 #define NVME_PAYLOAD_CONTIG(contig_, md_) \
240 	(struct nvme_payload) { \
241 		.reset_sgl_fn = NULL, \
242 		.next_sge_fn = NULL, \
243 		.contig_or_cb_arg = (contig_), \
244 		.md = (md_), \
245 	}
246 
247 #define NVME_PAYLOAD_SGL(reset_sgl_fn_, next_sge_fn_, cb_arg_, md_) \
248 	(struct nvme_payload) { \
249 		.reset_sgl_fn = (reset_sgl_fn_), \
250 		.next_sge_fn = (next_sge_fn_), \
251 		.contig_or_cb_arg = (cb_arg_), \
252 		.md = (md_), \
253 	}
254 
255 static inline enum nvme_payload_type
256 nvme_payload_type(const struct nvme_payload *payload) {
257 	return payload->reset_sgl_fn ? NVME_PAYLOAD_TYPE_SGL : NVME_PAYLOAD_TYPE_CONTIG;
258 }
259 
260 struct nvme_error_cmd {
261 	bool				do_not_submit;
262 	uint64_t			timeout_tsc;
263 	uint32_t			err_count;
264 	uint8_t				opc;
265 	struct spdk_nvme_status		status;
266 	TAILQ_ENTRY(nvme_error_cmd)	link;
267 };
268 
269 struct nvme_request {
270 	struct spdk_nvme_cmd		cmd;
271 
272 	uint8_t				retries;
273 
274 	uint8_t				timed_out : 1;
275 
276 	/**
277 	 * True if the request is in the queued_req list.
278 	 */
279 	uint8_t				queued : 1;
280 	uint8_t				reserved : 6;
281 
282 	/**
283 	 * Number of children requests still outstanding for this
284 	 *  request which was split into multiple child requests.
285 	 */
286 	uint16_t			num_children;
287 
288 	/**
289 	 * Offset in bytes from the beginning of payload for this request.
290 	 * This is used for I/O commands that are split into multiple requests.
291 	 */
292 	uint32_t			payload_offset;
293 	uint32_t			md_offset;
294 
295 	uint32_t			payload_size;
296 
297 	/**
298 	 * Timeout ticks for error injection requests, can be extended in future
299 	 * to support per-request timeout feature.
300 	 */
301 	uint64_t			timeout_tsc;
302 
303 	/**
304 	 * Data payload for this request's command.
305 	 */
306 	struct nvme_payload		payload;
307 
308 	spdk_nvme_cmd_cb		cb_fn;
309 	void				*cb_arg;
310 	STAILQ_ENTRY(nvme_request)	stailq;
311 
312 	struct spdk_nvme_qpair		*qpair;
313 
314 	/*
315 	 * The value of spdk_get_ticks() when the request was submitted to the hardware.
316 	 * Only set if ctrlr->timeout_enabled is true.
317 	 */
318 	uint64_t			submit_tick;
319 
320 	/**
321 	 * The active admin request can be moved to a per process pending
322 	 *  list based on the saved pid to tell which process it belongs
323 	 *  to. The cpl saves the original completion information which
324 	 *  is used in the completion callback.
325 	 * NOTE: these below two fields are only used for admin request.
326 	 */
327 	pid_t				pid;
328 	struct spdk_nvme_cpl		cpl;
329 
330 	uint32_t			md_size;
331 
332 	/**
333 	 * The following members should not be reordered with members
334 	 *  above.  These members are only needed when splitting
335 	 *  requests which is done rarely, and the driver is careful
336 	 *  to not touch the following fields until a split operation is
337 	 *  needed, to avoid touching an extra cacheline.
338 	 */
339 
340 	/**
341 	 * Points to the outstanding child requests for a parent request.
342 	 *  Only valid if a request was split into multiple children
343 	 *  requests, and is not initialized for non-split requests.
344 	 */
345 	TAILQ_HEAD(, nvme_request)	children;
346 
347 	/**
348 	 * Linked-list pointers for a child request in its parent's list.
349 	 */
350 	TAILQ_ENTRY(nvme_request)	child_tailq;
351 
352 	/**
353 	 * Points to a parent request if part of a split request,
354 	 *   NULL otherwise.
355 	 */
356 	struct nvme_request		*parent;
357 
358 	/**
359 	 * Completion status for a parent request.  Initialized to all 0's
360 	 *  (SUCCESS) before child requests are submitted.  If a child
361 	 *  request completes with error, the error status is copied here,
362 	 *  to ensure that the parent request is also completed with error
363 	 *  status once all child requests are completed.
364 	 */
365 	struct spdk_nvme_cpl		parent_status;
366 
367 	/**
368 	 * The user_cb_fn and user_cb_arg fields are used for holding the original
369 	 * callback data when using nvme_allocate_request_user_copy.
370 	 */
371 	spdk_nvme_cmd_cb		user_cb_fn;
372 	void				*user_cb_arg;
373 	void				*user_buffer;
374 };
375 
376 struct nvme_completion_poll_status {
377 	struct spdk_nvme_cpl	cpl;
378 	uint64_t		timeout_tsc;
379 	/**
380 	 * DMA buffer retained throughout the duration of the command.  It'll be released
381 	 * automatically if the command times out, otherwise the user is responsible for freeing it.
382 	 */
383 	void			*dma_data;
384 	bool			done;
385 	/* This flag indicates that the request has been timed out and the memory
386 	   must be freed in a completion callback */
387 	bool			timed_out;
388 };
389 
390 struct nvme_async_event_request {
391 	struct spdk_nvme_ctrlr		*ctrlr;
392 	struct nvme_request		*req;
393 	struct spdk_nvme_cpl		cpl;
394 };
395 
396 enum nvme_qpair_state {
397 	NVME_QPAIR_DISCONNECTED,
398 	NVME_QPAIR_DISCONNECTING,
399 	NVME_QPAIR_CONNECTING,
400 	NVME_QPAIR_CONNECTED,
401 	NVME_QPAIR_ENABLING,
402 	NVME_QPAIR_ENABLED,
403 	NVME_QPAIR_DESTROYING,
404 };
405 
406 struct spdk_nvme_qpair {
407 	struct spdk_nvme_ctrlr			*ctrlr;
408 
409 	uint16_t				id;
410 
411 	uint8_t					qprio;
412 
413 	uint8_t					state : 3;
414 
415 	uint8_t					async: 1;
416 
417 	uint8_t					is_new_qpair: 1;
418 
419 	/*
420 	 * Members for handling IO qpair deletion inside of a completion context.
421 	 * These are specifically defined as single bits, so that they do not
422 	 *  push this data structure out to another cacheline.
423 	 */
424 	uint8_t					in_completion_context : 1;
425 	uint8_t					delete_after_completion_context: 1;
426 
427 	/*
428 	 * Set when no deletion notification is needed. For example, the process
429 	 * which allocated this qpair exited unexpectedly.
430 	 */
431 	uint8_t					no_deletion_notification_needed: 1;
432 
433 	uint8_t					last_fuse: 2;
434 
435 	uint8_t					transport_failure_reason: 2;
436 	uint8_t					last_transport_failure_reason: 2;
437 
438 	enum spdk_nvme_transport_type		trtype;
439 
440 	/* request object used only for this qpair's FABRICS/CONNECT command (if needed) */
441 	struct nvme_request			*reserved_req;
442 
443 	STAILQ_HEAD(, nvme_request)		free_req;
444 	STAILQ_HEAD(, nvme_request)		queued_req;
445 
446 	/* List entry for spdk_nvme_transport_poll_group::qpairs */
447 	STAILQ_ENTRY(spdk_nvme_qpair)		poll_group_stailq;
448 
449 	/** Commands opcode in this list will return error */
450 	TAILQ_HEAD(, nvme_error_cmd)		err_cmd_head;
451 	/** Requests in this list will return error */
452 	STAILQ_HEAD(, nvme_request)		err_req_head;
453 
454 	struct spdk_nvme_ctrlr_process		*active_proc;
455 
456 	struct spdk_nvme_transport_poll_group	*poll_group;
457 
458 	void					*poll_group_tailq_head;
459 
460 	const struct spdk_nvme_transport	*transport;
461 
462 	/* Entries below here are not touched in the main I/O path. */
463 
464 	struct nvme_completion_poll_status	*poll_status;
465 
466 	/* List entry for spdk_nvme_ctrlr::active_io_qpairs */
467 	TAILQ_ENTRY(spdk_nvme_qpair)		tailq;
468 
469 	/* List entry for spdk_nvme_ctrlr_process::allocated_io_qpairs */
470 	TAILQ_ENTRY(spdk_nvme_qpair)		per_process_tailq;
471 
472 	STAILQ_HEAD(, nvme_request)		aborting_queued_req;
473 
474 	void					*req_buf;
475 };
476 
477 struct spdk_nvme_poll_group {
478 	void						*ctx;
479 	struct spdk_nvme_accel_fn_table			accel_fn_table;
480 	STAILQ_HEAD(, spdk_nvme_transport_poll_group)	tgroups;
481 };
482 
483 struct spdk_nvme_transport_poll_group {
484 	struct spdk_nvme_poll_group			*group;
485 	const struct spdk_nvme_transport		*transport;
486 	STAILQ_HEAD(, spdk_nvme_qpair)			connected_qpairs;
487 	STAILQ_HEAD(, spdk_nvme_qpair)			disconnected_qpairs;
488 	STAILQ_ENTRY(spdk_nvme_transport_poll_group)	link;
489 };
490 
491 struct spdk_nvme_ns {
492 	struct spdk_nvme_ctrlr		*ctrlr;
493 	uint32_t			sector_size;
494 
495 	/*
496 	 * Size of data transferred as part of each block,
497 	 * including metadata if FLBAS indicates the metadata is transferred
498 	 * as part of the data buffer at the end of each LBA.
499 	 */
500 	uint32_t			extended_lba_size;
501 
502 	uint32_t			md_size;
503 	uint32_t			pi_type;
504 	uint32_t			sectors_per_max_io;
505 	uint32_t			sectors_per_max_io_no_md;
506 	uint32_t			sectors_per_stripe;
507 	uint32_t			id;
508 	uint16_t			flags;
509 	bool				active;
510 
511 	/* Command Set Identifier */
512 	enum spdk_nvme_csi		csi;
513 
514 	/* Namespace Identification Descriptor List (CNS = 03h) */
515 	uint8_t				id_desc_list[4096];
516 
517 	uint32_t			ana_group_id;
518 	enum spdk_nvme_ana_state	ana_state;
519 
520 	/* Identify Namespace data. */
521 	struct spdk_nvme_ns_data	nsdata;
522 
523 	/* Zoned Namespace Command Set Specific Identify Namespace data. */
524 	struct spdk_nvme_zns_ns_data	*nsdata_zns;
525 
526 	RB_ENTRY(spdk_nvme_ns)		node;
527 };
528 
529 /**
530  * State of struct spdk_nvme_ctrlr (in particular, during initialization).
531  */
532 enum nvme_ctrlr_state {
533 	/**
534 	 * Wait before initializing the controller.
535 	 */
536 	NVME_CTRLR_STATE_INIT_DELAY,
537 
538 	/**
539 	 * Connect the admin queue.
540 	 */
541 	NVME_CTRLR_STATE_CONNECT_ADMINQ,
542 
543 	/**
544 	 * Controller has not started initialized yet.
545 	 */
546 	NVME_CTRLR_STATE_INIT = NVME_CTRLR_STATE_CONNECT_ADMINQ,
547 
548 	/**
549 	 * Waiting for admin queue to connect.
550 	 */
551 	NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ,
552 
553 	/**
554 	 * Read Version (VS) register.
555 	 */
556 	NVME_CTRLR_STATE_READ_VS,
557 
558 	/**
559 	 * Waiting for Version (VS) register to be read.
560 	 */
561 	NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS,
562 
563 	/**
564 	 * Read Capabilities (CAP) register.
565 	 */
566 	NVME_CTRLR_STATE_READ_CAP,
567 
568 	/**
569 	 * Waiting for Capabilities (CAP) register to be read.
570 	 */
571 	NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP,
572 
573 	/**
574 	 * Check EN to prepare for controller initialization.
575 	 */
576 	NVME_CTRLR_STATE_CHECK_EN,
577 
578 	/**
579 	 * Waiting for CC to be read as part of EN check.
580 	 */
581 	NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC,
582 
583 	/**
584 	 * Waiting for CSTS.RDY to transition from 0 to 1 so that CC.EN may be set to 0.
585 	 */
586 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
587 
588 	/**
589 	 * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 1.
590 	 */
591 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
592 
593 	/**
594 	 * Disabling the controller by setting CC.EN to 0.
595 	 */
596 	NVME_CTRLR_STATE_SET_EN_0,
597 
598 	/**
599 	 * Waiting for the CC register to be read as part of disabling the controller.
600 	 */
601 	NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC,
602 
603 	/**
604 	 * Waiting for CSTS.RDY to transition from 1 to 0 so that CC.EN may be set to 1.
605 	 */
606 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
607 
608 	/**
609 	 * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 0.
610 	 */
611 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS,
612 
613 	/**
614 	 * The controller is disabled. (CC.EN and CSTS.RDY are 0.)
615 	 */
616 	NVME_CTRLR_STATE_DISABLED,
617 
618 	/**
619 	 * Enable the controller by writing CC.EN to 1
620 	 */
621 	NVME_CTRLR_STATE_ENABLE,
622 
623 	/**
624 	 * Waiting for CC register to be written as part of enabling the controller.
625 	 */
626 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC,
627 
628 	/**
629 	 * Waiting for CSTS.RDY to transition from 0 to 1 after enabling the controller.
630 	 */
631 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
632 
633 	/**
634 	 * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 1.
635 	 */
636 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
637 
638 	/**
639 	 * Reset the Admin queue of the controller.
640 	 */
641 	NVME_CTRLR_STATE_RESET_ADMIN_QUEUE,
642 
643 	/**
644 	 * Identify Controller command will be sent to then controller.
645 	 */
646 	NVME_CTRLR_STATE_IDENTIFY,
647 
648 	/**
649 	 * Waiting for Identify Controller command be completed.
650 	 */
651 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
652 
653 	/**
654 	 * Configure AER of the controller.
655 	 */
656 	NVME_CTRLR_STATE_CONFIGURE_AER,
657 
658 	/**
659 	 * Waiting for the Configure AER to be completed.
660 	 */
661 	NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
662 
663 	/**
664 	 * Set Keep Alive Timeout of the controller.
665 	 */
666 	NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
667 
668 	/**
669 	 * Waiting for Set Keep Alive Timeout to be completed.
670 	 */
671 	NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
672 
673 	/**
674 	 * Get Identify I/O Command Set Specific Controller data structure.
675 	 */
676 	NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
677 
678 	/**
679 	 * Waiting for Identify I/O Command Set Specific Controller command to be completed.
680 	 */
681 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC,
682 
683 	/**
684 	 * Get Commands Supported and Effects log page for the Zoned Namespace Command Set.
685 	 */
686 	NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG,
687 
688 	/**
689 	 * Waiting for the Get Log Page command to be completed.
690 	 */
691 	NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG,
692 
693 	/**
694 	 * Set Number of Queues of the controller.
695 	 */
696 	NVME_CTRLR_STATE_SET_NUM_QUEUES,
697 
698 	/**
699 	 * Waiting for Set Num of Queues command to be completed.
700 	 */
701 	NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
702 
703 	/**
704 	 * Get active Namespace list of the controller.
705 	 */
706 	NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
707 
708 	/**
709 	 * Waiting for the Identify Active Namespace commands to be completed.
710 	 */
711 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS,
712 
713 	/**
714 	 * Get Identify Namespace Data structure for each NS.
715 	 */
716 	NVME_CTRLR_STATE_IDENTIFY_NS,
717 
718 	/**
719 	 * Waiting for the Identify Namespace commands to be completed.
720 	 */
721 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
722 
723 	/**
724 	 * Get Identify Namespace Identification Descriptors.
725 	 */
726 	NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
727 
728 	/**
729 	 * Get Identify I/O Command Set Specific Namespace data structure for each NS.
730 	 */
731 	NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
732 
733 	/**
734 	 * Waiting for the Identify I/O Command Set Specific Namespace commands to be completed.
735 	 */
736 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC,
737 
738 	/**
739 	 * Waiting for the Identify Namespace Identification
740 	 * Descriptors to be completed.
741 	 */
742 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
743 
744 	/**
745 	 * Set supported log pages of the controller.
746 	 */
747 	NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
748 
749 	/**
750 	 * Set supported log pages of INTEL controller.
751 	 */
752 	NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES,
753 
754 	/**
755 	 * Waiting for supported log pages of INTEL controller.
756 	 */
757 	NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES,
758 
759 	/**
760 	 * Set supported features of the controller.
761 	 */
762 	NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
763 
764 	/**
765 	 * Set Doorbell Buffer Config of the controller.
766 	 */
767 	NVME_CTRLR_STATE_SET_DB_BUF_CFG,
768 
769 	/**
770 	 * Waiting for Doorbell Buffer Config to be completed.
771 	 */
772 	NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
773 
774 	/**
775 	 * Set Host ID of the controller.
776 	 */
777 	NVME_CTRLR_STATE_SET_HOST_ID,
778 
779 	/**
780 	 * Waiting for Set Host ID to be completed.
781 	 */
782 	NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
783 
784 	/**
785 	 * Let transport layer do its part of initialization.
786 	 */
787 	NVME_CTRLR_STATE_TRANSPORT_READY,
788 
789 	/**
790 	 * Controller initialization has completed and the controller is ready.
791 	 */
792 	NVME_CTRLR_STATE_READY,
793 
794 	/**
795 	 * Controller initialization has an error.
796 	 */
797 	NVME_CTRLR_STATE_ERROR
798 };
799 
800 #define NVME_TIMEOUT_INFINITE		0
801 #define NVME_TIMEOUT_KEEP_EXISTING	UINT64_MAX
802 
803 struct spdk_nvme_ctrlr_aer_completion_list {
804 	struct spdk_nvme_cpl	cpl;
805 	STAILQ_ENTRY(spdk_nvme_ctrlr_aer_completion_list) link;
806 };
807 
808 /*
809  * Used to track properties for all processes accessing the controller.
810  */
811 struct spdk_nvme_ctrlr_process {
812 	/** Whether it is the primary process  */
813 	bool						is_primary;
814 
815 	/** Process ID */
816 	pid_t						pid;
817 
818 	/** Active admin requests to be completed */
819 	STAILQ_HEAD(, nvme_request)			active_reqs;
820 
821 	TAILQ_ENTRY(spdk_nvme_ctrlr_process)		tailq;
822 
823 	/** Per process PCI device handle */
824 	struct spdk_pci_device				*devhandle;
825 
826 	/** Reference to track the number of attachment to this controller. */
827 	int						ref;
828 
829 	/** Allocated IO qpairs */
830 	TAILQ_HEAD(, spdk_nvme_qpair)			allocated_io_qpairs;
831 
832 	spdk_nvme_aer_cb				aer_cb_fn;
833 	void						*aer_cb_arg;
834 
835 	/**
836 	 * A function pointer to timeout callback function
837 	 */
838 	spdk_nvme_timeout_cb		timeout_cb_fn;
839 	void				*timeout_cb_arg;
840 	/** separate timeout values for io vs. admin reqs */
841 	uint64_t			timeout_io_ticks;
842 	uint64_t			timeout_admin_ticks;
843 
844 	/** List to publish AENs to all procs in multiprocess setup */
845 	STAILQ_HEAD(, spdk_nvme_ctrlr_aer_completion_list)      async_events;
846 };
847 
848 struct nvme_register_completion {
849 	struct spdk_nvme_cpl			cpl;
850 	uint64_t				value;
851 	spdk_nvme_reg_cb			cb_fn;
852 	void					*cb_ctx;
853 	STAILQ_ENTRY(nvme_register_completion)	stailq;
854 	pid_t					pid;
855 };
856 
857 struct spdk_nvme_ctrlr {
858 	/* Hot data (accessed in I/O path) starts here. */
859 
860 	/* Tree of namespaces */
861 	RB_HEAD(nvme_ns_tree, spdk_nvme_ns)	ns;
862 
863 	/* The number of active namespaces */
864 	uint32_t			active_ns_count;
865 
866 	bool				is_removed;
867 
868 	bool				is_resetting;
869 
870 	bool				is_failed;
871 
872 	bool				is_destructed;
873 
874 	bool				timeout_enabled;
875 
876 	/* The application is preparing to reset the controller.  Transports
877 	 * can use this to skip unnecessary parts of the qpair deletion process
878 	 * for example, like the DELETE_SQ/CQ commands.
879 	 */
880 	bool				prepare_for_reset;
881 
882 	bool				is_disconnecting;
883 
884 	uint16_t			max_sges;
885 
886 	uint16_t			cntlid;
887 
888 	/** Controller support flags */
889 	uint64_t			flags;
890 
891 	/** NVMEoF in-capsule data size in bytes */
892 	uint32_t			ioccsz_bytes;
893 
894 	/** NVMEoF in-capsule data offset in 16 byte units */
895 	uint16_t			icdoff;
896 
897 	/* Cold data (not accessed in normal I/O path) is after this point. */
898 
899 	struct spdk_nvme_transport_id	trid;
900 
901 	union spdk_nvme_cap_register	cap;
902 	union spdk_nvme_vs_register	vs;
903 
904 	int				state;
905 	uint64_t			state_timeout_tsc;
906 
907 	uint64_t			next_keep_alive_tick;
908 	uint64_t			keep_alive_interval_ticks;
909 
910 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
911 
912 	/** All the log pages supported */
913 	bool				log_page_supported[256];
914 
915 	/** All the features supported */
916 	bool				feature_supported[256];
917 
918 	/** maximum i/o size in bytes */
919 	uint32_t			max_xfer_size;
920 
921 	/** minimum page size supported by this controller in bytes */
922 	uint32_t			min_page_size;
923 
924 	/** selected memory page size for this controller in bytes */
925 	uint32_t			page_size;
926 
927 	uint32_t			num_aers;
928 	struct nvme_async_event_request	aer[NVME_MAX_ASYNC_EVENTS];
929 
930 	/** guards access to the controller itself, including admin queues */
931 	pthread_mutex_t			ctrlr_lock;
932 
933 	struct spdk_nvme_qpair		*adminq;
934 
935 	/** shadow doorbell buffer */
936 	uint32_t			*shadow_doorbell;
937 	/** eventidx buffer */
938 	uint32_t			*eventidx;
939 
940 	/**
941 	 * Identify Controller data.
942 	 */
943 	struct spdk_nvme_ctrlr_data	cdata;
944 
945 	/**
946 	 * Zoned Namespace Command Set Specific Identify Controller data.
947 	 */
948 	struct spdk_nvme_zns_ctrlr_data	*cdata_zns;
949 
950 	struct spdk_bit_array		*free_io_qids;
951 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
952 
953 	struct spdk_nvme_ctrlr_opts	opts;
954 
955 	uint64_t			quirks;
956 
957 	/* Extra sleep time during controller initialization */
958 	uint64_t			sleep_timeout_tsc;
959 
960 	/** Track all the processes manage this controller */
961 	TAILQ_HEAD(, spdk_nvme_ctrlr_process)	active_procs;
962 
963 
964 	STAILQ_HEAD(, nvme_request)	queued_aborts;
965 	uint32_t			outstanding_aborts;
966 
967 	/* CB to notify the user when the ctrlr is removed/failed. */
968 	spdk_nvme_remove_cb			remove_cb;
969 	void					*cb_ctx;
970 
971 	struct spdk_nvme_qpair		*external_io_msgs_qpair;
972 	pthread_mutex_t			external_io_msgs_lock;
973 	struct spdk_ring		*external_io_msgs;
974 
975 	STAILQ_HEAD(, nvme_io_msg_producer) io_producers;
976 
977 	struct spdk_nvme_ana_page		*ana_log_page;
978 	struct spdk_nvme_ana_group_descriptor	*copied_ana_desc;
979 	uint32_t				ana_log_page_size;
980 
981 	/* scratchpad pointer that can be used to send data between two NVME_CTRLR_STATEs */
982 	void				*tmp_ptr;
983 
984 	/* maximum zone append size in bytes */
985 	uint32_t			max_zone_append_size;
986 
987 	/* PMR size in bytes */
988 	uint64_t			pmr_size;
989 
990 	/* Boot Partition Info */
991 	enum nvme_bp_write_state	bp_ws;
992 	uint32_t			bpid;
993 	spdk_nvme_cmd_cb		bp_write_cb_fn;
994 	void				*bp_write_cb_arg;
995 
996 	/* Firmware Download */
997 	void				*fw_payload;
998 	unsigned int			fw_size_remaining;
999 	unsigned int			fw_offset;
1000 	unsigned int			fw_transfer_size;
1001 
1002 	/* Completed register operations */
1003 	STAILQ_HEAD(, nvme_register_completion)	register_operations;
1004 
1005 	union spdk_nvme_cc_register		process_init_cc;
1006 };
1007 
1008 struct spdk_nvme_probe_ctx {
1009 	struct spdk_nvme_transport_id		trid;
1010 	void					*cb_ctx;
1011 	spdk_nvme_probe_cb			probe_cb;
1012 	spdk_nvme_attach_cb			attach_cb;
1013 	spdk_nvme_remove_cb			remove_cb;
1014 	TAILQ_HEAD(, spdk_nvme_ctrlr)		init_ctrlrs;
1015 };
1016 
1017 typedef void (*nvme_ctrlr_detach_cb)(struct spdk_nvme_ctrlr *ctrlr);
1018 
1019 enum nvme_ctrlr_detach_state {
1020 	NVME_CTRLR_DETACH_SET_CC,
1021 	NVME_CTRLR_DETACH_CHECK_CSTS,
1022 	NVME_CTRLR_DETACH_GET_CSTS,
1023 	NVME_CTRLR_DETACH_GET_CSTS_DONE,
1024 };
1025 
1026 struct nvme_ctrlr_detach_ctx {
1027 	struct spdk_nvme_ctrlr			*ctrlr;
1028 	nvme_ctrlr_detach_cb			cb_fn;
1029 	uint64_t				shutdown_start_tsc;
1030 	uint32_t				shutdown_timeout_ms;
1031 	bool					shutdown_complete;
1032 	enum nvme_ctrlr_detach_state		state;
1033 	union spdk_nvme_csts_register		csts;
1034 	TAILQ_ENTRY(nvme_ctrlr_detach_ctx)	link;
1035 };
1036 
1037 struct spdk_nvme_detach_ctx {
1038 	TAILQ_HEAD(, nvme_ctrlr_detach_ctx)	head;
1039 };
1040 
1041 struct nvme_driver {
1042 	pthread_mutex_t			lock;
1043 
1044 	/** Multi-process shared attached controller list */
1045 	TAILQ_HEAD(, spdk_nvme_ctrlr)	shared_attached_ctrlrs;
1046 
1047 	bool				initialized;
1048 	struct spdk_uuid		default_extended_host_id;
1049 
1050 	/** netlink socket fd for hotplug messages */
1051 	int				hotplug_fd;
1052 };
1053 
1054 extern struct nvme_driver *g_spdk_nvme_driver;
1055 
1056 int nvme_driver_init(void);
1057 
1058 #define nvme_delay		usleep
1059 
1060 static inline bool
1061 nvme_qpair_is_admin_queue(struct spdk_nvme_qpair *qpair)
1062 {
1063 	return qpair->id == 0;
1064 }
1065 
1066 static inline bool
1067 nvme_qpair_is_io_queue(struct spdk_nvme_qpair *qpair)
1068 {
1069 	return qpair->id != 0;
1070 }
1071 
1072 static inline int
1073 nvme_robust_mutex_lock(pthread_mutex_t *mtx)
1074 {
1075 	int rc = pthread_mutex_lock(mtx);
1076 
1077 #ifndef __FreeBSD__
1078 	if (rc == EOWNERDEAD) {
1079 		rc = pthread_mutex_consistent(mtx);
1080 	}
1081 #endif
1082 
1083 	return rc;
1084 }
1085 
1086 static inline int
1087 nvme_robust_mutex_unlock(pthread_mutex_t *mtx)
1088 {
1089 	return pthread_mutex_unlock(mtx);
1090 }
1091 
1092 /* Poll group management functions. */
1093 int nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
1094 int nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1095 
1096 /* Admin functions */
1097 int	nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr,
1098 				uint8_t cns, uint16_t cntid, uint32_t nsid,
1099 				uint8_t csi, void *payload, size_t payload_size,
1100 				spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1101 int	nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
1102 				      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn,
1103 				      void *cb_arg);
1104 int	nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
1105 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1106 int	nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
1107 		union spdk_nvme_feat_async_event_configuration config,
1108 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1109 int	nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
1110 				   spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1111 int	nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1112 				 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1113 int	nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1114 				 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1115 int	nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
1116 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1117 int	nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr,
1118 		uint64_t prp1, uint64_t prp2,
1119 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1120 int	nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
1121 				 void *cb_arg);
1122 int	nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1123 			      struct spdk_nvme_format *format, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1124 int	nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
1125 				 const struct spdk_nvme_fw_commit *fw_commit,
1126 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1127 int	nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
1128 		uint32_t size, uint32_t offset, void *payload,
1129 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1130 int	nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1131 				struct spdk_nvme_sanitize *sanitize, uint32_t cdw11,
1132 				spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1133 void	nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl);
1134 int	nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
1135 				 struct nvme_completion_poll_status *status);
1136 int	nvme_wait_for_completion_robust_lock(struct spdk_nvme_qpair *qpair,
1137 		struct nvme_completion_poll_status *status,
1138 		pthread_mutex_t *robust_mutex);
1139 int	nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
1140 		struct nvme_completion_poll_status *status,
1141 		uint64_t timeout_in_usecs);
1142 int	nvme_wait_for_completion_robust_lock_timeout(struct spdk_nvme_qpair *qpair,
1143 		struct nvme_completion_poll_status *status,
1144 		pthread_mutex_t *robust_mutex,
1145 		uint64_t timeout_in_usecs);
1146 int	nvme_wait_for_completion_robust_lock_timeout_poll(struct spdk_nvme_qpair *qpair,
1147 		struct nvme_completion_poll_status *status,
1148 		pthread_mutex_t *robust_mutex);
1149 
1150 struct spdk_nvme_ctrlr_process *nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr,
1151 		pid_t pid);
1152 struct spdk_nvme_ctrlr_process *nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr);
1153 int	nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle);
1154 void	nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr);
1155 struct spdk_pci_device *nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr);
1156 
1157 int	nvme_ctrlr_probe(const struct spdk_nvme_transport_id *trid,
1158 			 struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle);
1159 
1160 int	nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr);
1161 void	nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr);
1162 void	nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
1163 void	nvme_ctrlr_destruct_async(struct spdk_nvme_ctrlr *ctrlr,
1164 				  struct nvme_ctrlr_detach_ctx *ctx);
1165 int	nvme_ctrlr_destruct_poll_async(struct spdk_nvme_ctrlr *ctrlr,
1166 				       struct nvme_ctrlr_detach_ctx *ctx);
1167 void	nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove);
1168 int	nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr);
1169 void	nvme_ctrlr_disable(struct spdk_nvme_ctrlr *ctrlr);
1170 int	nvme_ctrlr_disable_poll(struct spdk_nvme_ctrlr *ctrlr);
1171 void	nvme_ctrlr_connected(struct spdk_nvme_probe_ctx *probe_ctx,
1172 			     struct spdk_nvme_ctrlr *ctrlr);
1173 
1174 int	nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
1175 					struct nvme_request *req);
1176 int	nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap);
1177 int	nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs);
1178 int	nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz);
1179 int	nvme_ctrlr_get_pmrcap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_pmrcap_register *pmrcap);
1180 int	nvme_ctrlr_get_bpinfo(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bpinfo_register *bpinfo);
1181 int	nvme_ctrlr_set_bprsel(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bprsel_register *bprsel);
1182 int	nvme_ctrlr_set_bpmbl(struct spdk_nvme_ctrlr *ctrlr, uint64_t bpmbl_value);
1183 bool	nvme_ctrlr_multi_iocs_enabled(struct spdk_nvme_ctrlr *ctrlr);
1184 void    nvme_ctrlr_process_async_event(struct spdk_nvme_ctrlr *ctrlr,
1185 				       const struct spdk_nvme_cpl *cpl);
1186 void nvme_ctrlr_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1187 void nvme_ctrlr_complete_queued_async_events(struct spdk_nvme_ctrlr *ctrlr);
1188 void nvme_ctrlr_abort_queued_aborts(struct spdk_nvme_ctrlr *ctrlr);
1189 int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
1190 		    struct spdk_nvme_ctrlr *ctrlr,
1191 		    enum spdk_nvme_qprio qprio,
1192 		    uint32_t num_requests, bool async);
1193 void	nvme_qpair_deinit(struct spdk_nvme_qpair *qpair);
1194 void	nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair);
1195 int	nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair,
1196 				  struct nvme_request *req);
1197 void	nvme_qpair_abort_all_queued_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
1198 uint32_t nvme_qpair_abort_queued_reqs_with_cbarg(struct spdk_nvme_qpair *qpair, void *cmd_cb_arg);
1199 void	nvme_qpair_abort_queued_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
1200 void	nvme_qpair_resubmit_requests(struct spdk_nvme_qpair *qpair, uint32_t num_requests);
1201 int	nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr);
1202 void	nvme_ns_set_identify_data(struct spdk_nvme_ns *ns);
1203 void	nvme_ns_set_id_desc_list_data(struct spdk_nvme_ns *ns);
1204 void	nvme_ns_free_zns_specific_data(struct spdk_nvme_ns *ns);
1205 void	nvme_ns_free_iocs_specific_data(struct spdk_nvme_ns *ns);
1206 bool	nvme_ns_has_supported_iocs_specific_data(struct spdk_nvme_ns *ns);
1207 int	nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
1208 			  struct spdk_nvme_ctrlr *ctrlr);
1209 void	nvme_ns_destruct(struct spdk_nvme_ns *ns);
1210 int	nvme_ns_cmd_zone_append_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1211 					void *buffer, void *metadata, uint64_t zslba,
1212 					uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1213 					uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag);
1214 int nvme_ns_cmd_zone_appendv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1215 				     uint64_t zslba, uint32_t lba_count,
1216 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1217 				     spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1218 				     spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1219 				     uint16_t apptag_mask, uint16_t apptag);
1220 
1221 int	nvme_fabric_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
1222 int	nvme_fabric_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
1223 int	nvme_fabric_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
1224 int	nvme_fabric_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
1225 int	nvme_fabric_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1226 		uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1227 int	nvme_fabric_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1228 		uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1229 int	nvme_fabric_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1230 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1231 int	nvme_fabric_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1232 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1233 int	nvme_fabric_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect);
1234 int	nvme_fabric_ctrlr_discover(struct spdk_nvme_ctrlr *ctrlr,
1235 				   struct spdk_nvme_probe_ctx *probe_ctx);
1236 int	nvme_fabric_qpair_connect(struct spdk_nvme_qpair *qpair, uint32_t num_entries);
1237 int	nvme_fabric_qpair_connect_async(struct spdk_nvme_qpair *qpair, uint32_t num_entries);
1238 int	nvme_fabric_qpair_connect_poll(struct spdk_nvme_qpair *qpair);
1239 
1240 typedef int (*spdk_nvme_parse_ana_log_page_cb)(
1241 	const struct spdk_nvme_ana_group_descriptor *desc, void *cb_arg);
1242 int	nvme_ctrlr_parse_ana_log_page(struct spdk_nvme_ctrlr *ctrlr,
1243 				      spdk_nvme_parse_ana_log_page_cb cb_fn, void *cb_arg);
1244 
1245 #define NVME_INIT_REQUEST(req, _cb_fn, _cb_arg, _payload, _payload_size, _md_size)	\
1246 	do {						\
1247 		req->cb_fn = _cb_fn;			\
1248 		req->cb_arg = _cb_arg;			\
1249 		req->payload = _payload;		\
1250 		req->payload_size = _payload_size;	\
1251 		req->md_size = _md_size;		\
1252 		req->pid = g_spdk_nvme_pid;		\
1253 		req->submit_tick = 0;			\
1254 	} while (0);
1255 
1256 static inline struct nvme_request *
1257 nvme_allocate_request(struct spdk_nvme_qpair *qpair,
1258 		      const struct nvme_payload *payload, uint32_t payload_size, uint32_t md_size,
1259 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1260 {
1261 	struct nvme_request *req;
1262 
1263 	req = STAILQ_FIRST(&qpair->free_req);
1264 	if (req == NULL) {
1265 		return req;
1266 	}
1267 
1268 	STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);
1269 
1270 	/*
1271 	 * Only memset/zero fields that need it.  All other fields
1272 	 *  will be initialized appropriately either later in this
1273 	 *  function, or before they are needed later in the
1274 	 *  submission patch.  For example, the children
1275 	 *  TAILQ_ENTRY and following members are
1276 	 *  only used as part of I/O splitting so we avoid
1277 	 *  memsetting them until it is actually needed.
1278 	 *  They will be initialized in nvme_request_add_child()
1279 	 *  if the request is split.
1280 	 */
1281 	memset(req, 0, offsetof(struct nvme_request, payload_size));
1282 
1283 	NVME_INIT_REQUEST(req, cb_fn, cb_arg, *payload, payload_size, md_size);
1284 
1285 	return req;
1286 }
1287 
1288 static inline struct nvme_request *
1289 nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair,
1290 			     void *buffer, uint32_t payload_size,
1291 			     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1292 {
1293 	struct nvme_payload payload;
1294 
1295 	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
1296 
1297 	return nvme_allocate_request(qpair, &payload, payload_size, 0, cb_fn, cb_arg);
1298 }
1299 
1300 static inline struct nvme_request *
1301 nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1302 {
1303 	return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
1304 }
1305 
1306 struct nvme_request *nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair,
1307 		void *buffer, uint32_t payload_size,
1308 		spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller);
1309 
1310 static inline void
1311 nvme_complete_request(spdk_nvme_cmd_cb cb_fn, void *cb_arg, struct spdk_nvme_qpair *qpair,
1312 		      struct nvme_request *req, struct spdk_nvme_cpl *cpl)
1313 {
1314 	struct spdk_nvme_cpl            err_cpl;
1315 	struct nvme_error_cmd           *cmd;
1316 
1317 	/* error injection at completion path,
1318 	 * only inject for successful completed commands
1319 	 */
1320 	if (spdk_unlikely(!TAILQ_EMPTY(&qpair->err_cmd_head) &&
1321 			  !spdk_nvme_cpl_is_error(cpl))) {
1322 		TAILQ_FOREACH(cmd, &qpair->err_cmd_head, link) {
1323 
1324 			if (cmd->do_not_submit) {
1325 				continue;
1326 			}
1327 
1328 			if ((cmd->opc == req->cmd.opc) && cmd->err_count) {
1329 
1330 				err_cpl = *cpl;
1331 				err_cpl.status.sct = cmd->status.sct;
1332 				err_cpl.status.sc = cmd->status.sc;
1333 
1334 				cpl = &err_cpl;
1335 				cmd->err_count--;
1336 				break;
1337 			}
1338 		}
1339 	}
1340 
1341 	if (cb_fn) {
1342 		cb_fn(cb_arg, cpl);
1343 	}
1344 }
1345 
1346 static inline void
1347 nvme_free_request(struct nvme_request *req)
1348 {
1349 	assert(req != NULL);
1350 	assert(req->num_children == 0);
1351 	assert(req->qpair != NULL);
1352 
1353 	/* The reserved_req does not go in the free_req STAILQ - it is
1354 	 * saved only for use with a FABRICS/CONNECT command.
1355 	 */
1356 	if (spdk_likely(req->qpair->reserved_req != req)) {
1357 		STAILQ_INSERT_HEAD(&req->qpair->free_req, req, stailq);
1358 	}
1359 }
1360 
1361 static inline void
1362 nvme_qpair_set_state(struct spdk_nvme_qpair *qpair, enum nvme_qpair_state state)
1363 {
1364 	qpair->state = state;
1365 	if (state == NVME_QPAIR_ENABLED) {
1366 		qpair->is_new_qpair = false;
1367 	}
1368 }
1369 
1370 static inline enum nvme_qpair_state
1371 nvme_qpair_get_state(struct spdk_nvme_qpair *qpair) {
1372 	return qpair->state;
1373 }
1374 
1375 static inline void
1376 nvme_qpair_free_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
1377 {
1378 	assert(req != NULL);
1379 	assert(req->num_children == 0);
1380 
1381 	STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
1382 }
1383 
1384 static inline void
1385 nvme_request_remove_child(struct nvme_request *parent, struct nvme_request *child)
1386 {
1387 	assert(parent != NULL);
1388 	assert(child != NULL);
1389 	assert(child->parent == parent);
1390 	assert(parent->num_children != 0);
1391 
1392 	parent->num_children--;
1393 	child->parent = NULL;
1394 	TAILQ_REMOVE(&parent->children, child, child_tailq);
1395 }
1396 
1397 static inline void
1398 nvme_cb_complete_child(void *child_arg, const struct spdk_nvme_cpl *cpl)
1399 {
1400 	struct nvme_request *child = child_arg;
1401 	struct nvme_request *parent = child->parent;
1402 
1403 	nvme_request_remove_child(parent, child);
1404 
1405 	if (spdk_nvme_cpl_is_error(cpl)) {
1406 		memcpy(&parent->parent_status, cpl, sizeof(*cpl));
1407 	}
1408 
1409 	if (parent->num_children == 0) {
1410 		nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
1411 				      parent, &parent->parent_status);
1412 		nvme_free_request(parent);
1413 	}
1414 }
1415 
1416 static inline void
1417 nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child)
1418 {
1419 	assert(parent->num_children != UINT16_MAX);
1420 
1421 	if (parent->num_children == 0) {
1422 		/*
1423 		 * Defer initialization of the children TAILQ since it falls
1424 		 *  on a separate cacheline.  This ensures we do not touch this
1425 		 *  cacheline except on request splitting cases, which are
1426 		 *  relatively rare.
1427 		 */
1428 		TAILQ_INIT(&parent->children);
1429 		parent->parent = NULL;
1430 		memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl));
1431 	}
1432 
1433 	parent->num_children++;
1434 	TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
1435 	child->parent = parent;
1436 	child->cb_fn = nvme_cb_complete_child;
1437 	child->cb_arg = child;
1438 }
1439 
1440 static inline void
1441 nvme_request_free_children(struct nvme_request *req)
1442 {
1443 	struct nvme_request *child, *tmp;
1444 
1445 	if (req->num_children == 0) {
1446 		return;
1447 	}
1448 
1449 	/* free all child nvme_request */
1450 	TAILQ_FOREACH_SAFE(child, &req->children, child_tailq, tmp) {
1451 		nvme_request_remove_child(req, child);
1452 		nvme_request_free_children(child);
1453 		nvme_free_request(child);
1454 	}
1455 }
1456 
1457 int	nvme_request_check_timeout(struct nvme_request *req, uint16_t cid,
1458 				   struct spdk_nvme_ctrlr_process *active_proc, uint64_t now_tick);
1459 uint64_t nvme_get_quirks(const struct spdk_pci_id *id);
1460 
1461 int	nvme_robust_mutex_init_shared(pthread_mutex_t *mtx);
1462 int	nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx);
1463 
1464 bool	nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl);
1465 
1466 struct spdk_nvme_ctrlr *nvme_get_ctrlr_by_trid_unsafe(
1467 	const struct spdk_nvme_transport_id *trid);
1468 
1469 const struct spdk_nvme_transport *nvme_get_transport(const char *transport_name);
1470 const struct spdk_nvme_transport *nvme_get_first_transport(void);
1471 const struct spdk_nvme_transport *nvme_get_next_transport(const struct spdk_nvme_transport
1472 		*transport);
1473 void  nvme_ctrlr_update_namespaces(struct spdk_nvme_ctrlr *ctrlr);
1474 
1475 /* Transport specific functions */
1476 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
1477 		const struct spdk_nvme_ctrlr_opts *opts,
1478 		void *devhandle);
1479 int nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
1480 int nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect);
1481 int nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr);
1482 int nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr);
1483 int nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
1484 int nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
1485 int nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
1486 int nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
1487 int nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1488 		uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1489 int nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1490 		uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1491 int nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1492 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1493 int nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1494 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1495 uint32_t nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr);
1496 uint16_t nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr);
1497 struct spdk_nvme_qpair *nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
1498 		uint16_t qid, const struct spdk_nvme_io_qpair_opts *opts);
1499 int nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr);
1500 void *nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
1501 int nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr);
1502 int nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr);
1503 int nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr);
1504 void *nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
1505 int nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr);
1506 void nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
1507 		struct spdk_nvme_qpair *qpair);
1508 int nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr,
1509 				       struct spdk_nvme_qpair *qpair);
1510 void nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr,
1511 		struct spdk_nvme_qpair *qpair);
1512 void nvme_transport_ctrlr_disconnect_qpair_done(struct spdk_nvme_qpair *qpair);
1513 int nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
1514 		struct spdk_memory_domain **domains, int array_size);
1515 void nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
1516 int nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair);
1517 int nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req);
1518 int32_t nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1519 		uint32_t max_completions);
1520 void nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair);
1521 int nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
1522 		int (*iter_fn)(struct nvme_request *req, void *arg),
1523 		void *arg);
1524 
1525 struct spdk_nvme_transport_poll_group *nvme_transport_poll_group_create(
1526 	const struct spdk_nvme_transport *transport);
1527 struct spdk_nvme_transport_poll_group *nvme_transport_qpair_get_optimal_poll_group(
1528 	const struct spdk_nvme_transport *transport,
1529 	struct spdk_nvme_qpair *qpair);
1530 int nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
1531 				  struct spdk_nvme_qpair *qpair);
1532 int nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
1533 				     struct spdk_nvme_qpair *qpair);
1534 int nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1535 int nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
1536 int64_t nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
1537 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
1538 int nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup);
1539 int nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
1540 					struct spdk_nvme_transport_poll_group_stat **stats);
1541 void nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
1542 		struct spdk_nvme_transport_poll_group_stat *stats);
1543 enum spdk_nvme_transport_type nvme_transport_get_trtype(const struct spdk_nvme_transport
1544 		*transport);
1545 /*
1546  * Below ref related functions must be called with the global
1547  *  driver lock held for the multi-process condition.
1548  *  Within these functions, the per ctrlr ctrlr_lock is also
1549  *  acquired for the multi-thread condition.
1550  */
1551 void	nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr);
1552 void	nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr);
1553 int	nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr);
1554 
1555 static inline bool
1556 _is_page_aligned(uint64_t address, uint64_t page_size)
1557 {
1558 	return (address & (page_size - 1)) == 0;
1559 }
1560 
1561 #endif /* __NVME_INTERNAL_H__ */
1562