xref: /spdk/lib/nvme/nvme_internal.h (revision 1ae735a5d13f736acb1895cd8146266345791321)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2015 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #ifndef __NVME_INTERNAL_H__
8 #define __NVME_INTERNAL_H__
9 
10 #include "spdk/config.h"
11 #include "spdk/likely.h"
12 #include "spdk/stdinc.h"
13 
14 #include "spdk/nvme.h"
15 
16 #if defined(__i386__) || defined(__x86_64__)
17 #include <x86intrin.h>
18 #endif
19 
20 #include "spdk/queue.h"
21 #include "spdk/barrier.h"
22 #include "spdk/bit_array.h"
23 #include "spdk/mmio.h"
24 #include "spdk/pci_ids.h"
25 #include "spdk/util.h"
26 #include "spdk/memory.h"
27 #include "spdk/nvme_intel.h"
28 #include "spdk/nvmf_spec.h"
29 #include "spdk/tree.h"
30 #include "spdk/uuid.h"
31 #include "spdk/fd_group.h"
32 
33 #include "spdk_internal/assert.h"
34 #include "spdk/log.h"
35 
36 extern pid_t g_spdk_nvme_pid;
37 
38 extern struct spdk_nvme_transport_opts g_spdk_nvme_transport_opts;
39 
40 /*
41  * Some Intel devices support vendor-unique read latency log page even
42  * though the log page directory says otherwise.
43  */
44 #define NVME_INTEL_QUIRK_READ_LATENCY 0x1
45 
46 /*
47  * Some Intel devices support vendor-unique write latency log page even
48  * though the log page directory says otherwise.
49  */
50 #define NVME_INTEL_QUIRK_WRITE_LATENCY 0x2
51 
52 /*
53  * The controller needs a delay before starts checking the device
54  * readiness, which is done by reading the NVME_CSTS_RDY bit.
55  */
56 #define NVME_QUIRK_DELAY_BEFORE_CHK_RDY	0x4
57 
58 /*
59  * The controller performs best when I/O is split on particular
60  * LBA boundaries.
61  */
62 #define NVME_INTEL_QUIRK_STRIPING 0x8
63 
64 /*
65  * The controller needs a delay after allocating an I/O queue pair
66  * before it is ready to accept I/O commands.
67  */
68 #define NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC 0x10
69 
70 /*
71  * Earlier NVMe devices do not indicate whether unmapped blocks
72  * will read all zeroes or not. This define indicates that the
73  * device does in fact read all zeroes after an unmap event
74  */
75 #define NVME_QUIRK_READ_ZERO_AFTER_DEALLOCATE 0x20
76 
77 /*
78  * The controller doesn't handle Identify value others than 0 or 1 correctly.
79  */
80 #define NVME_QUIRK_IDENTIFY_CNS 0x40
81 
82 /*
83  * The controller supports Open Channel command set if matching additional
84  * condition, like the first byte (value 0x1) in the vendor specific
85  * bits of the namespace identify structure is set.
86  */
87 #define NVME_QUIRK_OCSSD 0x80
88 
89 /*
90  * The controller has an Intel vendor ID but does not support Intel vendor-specific
91  * log pages.  This is primarily for QEMU emulated SSDs which report an Intel vendor
92  * ID but do not support these log pages.
93  */
94 #define NVME_INTEL_QUIRK_NO_LOG_PAGES 0x100
95 
96 /*
97  * The controller does not set SHST_COMPLETE in a reasonable amount of time.  This
98  * is primarily seen in virtual VMWare NVMe SSDs.  This quirk merely adds an additional
99  * error message that on VMWare NVMe SSDs, the shutdown timeout may be expected.
100  */
101 #define NVME_QUIRK_SHST_COMPLETE 0x200
102 
103 /*
104  * The controller requires an extra delay before starting the initialization process
105  * during attach.
106  */
107 #define NVME_QUIRK_DELAY_BEFORE_INIT 0x400
108 
109 /*
110  * Some SSDs exhibit poor performance with the default SPDK NVMe IO queue size.
111  * This quirk will increase the default to 1024 which matches other operating
112  * systems, at the cost of some extra memory usage.  Users can still override
113  * the increased default by changing the spdk_nvme_io_qpair_opts when allocating
114  * a new queue pair.
115  */
116 #define NVME_QUIRK_MINIMUM_IO_QUEUE_SIZE 0x800
117 
118 /**
119  * The maximum access width to PCI memory space is 8 Bytes, don't use AVX2 or
120  * SSE instructions to optimize the memory access(memcpy or memset) larger than
121  * 8 Bytes.
122  */
123 #define NVME_QUIRK_MAXIMUM_PCI_ACCESS_WIDTH 0x1000
124 
125 /**
126  * The SSD does not support OPAL even through it sets the security bit in OACS.
127  */
128 #define NVME_QUIRK_OACS_SECURITY 0x2000
129 
130 /**
131  * Intel P55XX SSDs can't support Dataset Management command with SGL format,
132  * so use PRP with DSM command.
133  */
134 #define NVME_QUIRK_NO_SGL_FOR_DSM 0x4000
135 
136 /**
137  * Maximum Data Transfer Size(MDTS) excludes interleaved metadata.
138  */
139 #define NVME_QUIRK_MDTS_EXCLUDE_MD 0x8000
140 
141 /**
142  * Force not to use SGL even the controller report that it can
143  * support it.
144  */
145 #define NVME_QUIRK_NOT_USE_SGL 0x10000
146 
147 /*
148  * Some SSDs require the admin submission queue size to equate to an even
149  * 4KiB multiple.
150  */
151 #define NVME_QUIRK_MINIMUM_ADMIN_QUEUE_SIZE 0x20000
152 
153 #define NVME_MAX_ASYNC_EVENTS	(8)
154 
155 #define NVME_MAX_ADMIN_TIMEOUT_IN_SECS	(30)
156 
157 /* Maximum log page size to fetch for AERs. */
158 #define NVME_MAX_AER_LOG_SIZE		(4096)
159 
160 /*
161  * NVME_MAX_IO_QUEUES in nvme_spec.h defines the 64K spec-limit, but this
162  *  define specifies the maximum number of queues this driver will actually
163  *  try to configure, if available.
164  */
165 #define DEFAULT_MAX_IO_QUEUES		(1024)
166 #define MAX_IO_QUEUES_WITH_INTERRUPTS	(256)
167 #define DEFAULT_ADMIN_QUEUE_SIZE	(32)
168 #define DEFAULT_IO_QUEUE_SIZE		(256)
169 #define DEFAULT_IO_QUEUE_SIZE_FOR_QUIRK	(1024) /* Matches Linux kernel driver */
170 
171 #define DEFAULT_IO_QUEUE_REQUESTS	(512)
172 
173 #define SPDK_NVME_DEFAULT_RETRY_COUNT	(4)
174 
175 #define SPDK_NVME_TRANSPORT_ACK_TIMEOUT_DISABLED	(0)
176 #define SPDK_NVME_DEFAULT_TRANSPORT_ACK_TIMEOUT	SPDK_NVME_TRANSPORT_ACK_TIMEOUT_DISABLED
177 
178 #define SPDK_NVME_TRANSPORT_TOS_DISABLED	(0)
179 
180 #define MIN_KEEP_ALIVE_TIMEOUT_IN_MS	(10000)
181 
182 /* We want to fit submission and completion rings each in a single 2MB
183  * hugepage to ensure physical address contiguity.
184  */
185 #define MAX_IO_QUEUE_ENTRIES		(VALUE_2MB / spdk_max( \
186 						sizeof(struct spdk_nvme_cmd), \
187 						sizeof(struct spdk_nvme_cpl)))
188 
189 /* Default timeout for fabrics connect commands. */
190 #ifdef DEBUG
191 #define NVME_FABRIC_CONNECT_COMMAND_TIMEOUT 0
192 #else
193 /* 500 millisecond timeout. */
194 #define NVME_FABRIC_CONNECT_COMMAND_TIMEOUT 500000
195 #endif
196 
197 /* This value indicates that a read from a PCIe register is invalid. This can happen when a device is no longer present */
198 #define SPDK_NVME_INVALID_REGISTER_VALUE 0xFFFFFFFFu
199 
200 enum nvme_payload_type {
201 	NVME_PAYLOAD_TYPE_INVALID = 0,
202 
203 	/** nvme_request::u.payload.contig_buffer is valid for this request */
204 	NVME_PAYLOAD_TYPE_CONTIG,
205 
206 	/** nvme_request::u.sgl is valid for this request */
207 	NVME_PAYLOAD_TYPE_SGL,
208 };
209 
210 /** Boot partition write states */
211 enum nvme_bp_write_state {
212 	SPDK_NVME_BP_WS_DOWNLOADING	= 0x0,
213 	SPDK_NVME_BP_WS_DOWNLOADED	= 0x1,
214 	SPDK_NVME_BP_WS_REPLACE		= 0x2,
215 	SPDK_NVME_BP_WS_ACTIVATE	= 0x3,
216 };
217 
218 /**
219  * Descriptor for a request data payload.
220  */
221 struct nvme_payload {
222 	/**
223 	 * Functions for retrieving physical addresses for scattered payloads.
224 	 */
225 	spdk_nvme_req_reset_sgl_cb reset_sgl_fn;
226 	spdk_nvme_req_next_sge_cb next_sge_fn;
227 
228 	/**
229 	 * Extended IO options passed by the user
230 	 */
231 	struct spdk_nvme_ns_cmd_ext_io_opts *opts;
232 	/**
233 	 * If reset_sgl_fn == NULL, this is a contig payload, and contig_or_cb_arg contains the
234 	 * virtual memory address of a single virtually contiguous buffer.
235 	 *
236 	 * If reset_sgl_fn != NULL, this is a SGL payload, and contig_or_cb_arg contains the
237 	 * cb_arg that will be passed to the SGL callback functions.
238 	 */
239 	void *contig_or_cb_arg;
240 
241 	/** Virtual memory address of a single virtually contiguous metadata buffer */
242 	void *md;
243 };
244 
245 #define NVME_PAYLOAD_CONTIG(contig_, md_) \
246 	(struct nvme_payload) { \
247 		.reset_sgl_fn = NULL, \
248 		.next_sge_fn = NULL, \
249 		.contig_or_cb_arg = (contig_), \
250 		.md = (md_), \
251 	}
252 
253 #define NVME_PAYLOAD_SGL(reset_sgl_fn_, next_sge_fn_, cb_arg_, md_) \
254 	(struct nvme_payload) { \
255 		.reset_sgl_fn = (reset_sgl_fn_), \
256 		.next_sge_fn = (next_sge_fn_), \
257 		.contig_or_cb_arg = (cb_arg_), \
258 		.md = (md_), \
259 	}
260 
261 static inline enum nvme_payload_type
262 nvme_payload_type(const struct nvme_payload *payload) {
263 	return payload->reset_sgl_fn ? NVME_PAYLOAD_TYPE_SGL : NVME_PAYLOAD_TYPE_CONTIG;
264 }
265 
266 struct nvme_error_cmd {
267 	bool				do_not_submit;
268 	uint64_t			timeout_tsc;
269 	uint32_t			err_count;
270 	uint8_t				opc;
271 	struct spdk_nvme_status		status;
272 	TAILQ_ENTRY(nvme_error_cmd)	link;
273 };
274 
275 struct nvme_request {
276 	struct spdk_nvme_cmd		cmd;
277 
278 	uint8_t				retries;
279 
280 	uint8_t				timed_out : 1;
281 
282 	/**
283 	 * True if the request is in the queued_req list.
284 	 */
285 	uint8_t				queued : 1;
286 	uint8_t				reserved : 6;
287 
288 	/**
289 	 * Number of children requests still outstanding for this
290 	 *  request which was split into multiple child requests.
291 	 */
292 	uint16_t			num_children;
293 
294 	/**
295 	 * Offset in bytes from the beginning of payload for this request.
296 	 * This is used for I/O commands that are split into multiple requests.
297 	 */
298 	uint32_t			payload_offset;
299 	uint32_t			md_offset;
300 
301 	uint32_t			payload_size;
302 
303 	/**
304 	 * Timeout ticks for error injection requests, can be extended in future
305 	 * to support per-request timeout feature.
306 	 */
307 	uint64_t			timeout_tsc;
308 
309 	/**
310 	 * Data payload for this request's command.
311 	 */
312 	struct nvme_payload		payload;
313 
314 	spdk_nvme_cmd_cb		cb_fn;
315 	void				*cb_arg;
316 	STAILQ_ENTRY(nvme_request)	stailq;
317 
318 	struct spdk_nvme_qpair		*qpair;
319 
320 	/*
321 	 * The value of spdk_get_ticks() when the request was submitted to the hardware.
322 	 * Only set if ctrlr->timeout_enabled is true.
323 	 */
324 	uint64_t			submit_tick;
325 
326 	/**
327 	 * The active admin request can be moved to a per process pending
328 	 *  list based on the saved pid to tell which process it belongs
329 	 *  to. The cpl saves the original completion information which
330 	 *  is used in the completion callback.
331 	 * NOTE: these below two fields are only used for admin request.
332 	 */
333 	pid_t				pid;
334 	struct spdk_nvme_cpl		cpl;
335 
336 	uint32_t			md_size;
337 
338 	/**
339 	 * The following members should not be reordered with members
340 	 *  above.  These members are only needed when splitting
341 	 *  requests which is done rarely, and the driver is careful
342 	 *  to not touch the following fields until a split operation is
343 	 *  needed, to avoid touching an extra cacheline.
344 	 */
345 
346 	/**
347 	 * Points to the outstanding child requests for a parent request.
348 	 *  Only valid if a request was split into multiple children
349 	 *  requests, and is not initialized for non-split requests.
350 	 */
351 	TAILQ_HEAD(, nvme_request)	children;
352 
353 	/**
354 	 * Linked-list pointers for a child request in its parent's list.
355 	 */
356 	TAILQ_ENTRY(nvme_request)	child_tailq;
357 
358 	/**
359 	 * Points to a parent request if part of a split request,
360 	 *   NULL otherwise.
361 	 */
362 	struct nvme_request		*parent;
363 
364 	/**
365 	 * Completion status for a parent request.  Initialized to all 0's
366 	 *  (SUCCESS) before child requests are submitted.  If a child
367 	 *  request completes with error, the error status is copied here,
368 	 *  to ensure that the parent request is also completed with error
369 	 *  status once all child requests are completed.
370 	 */
371 	struct spdk_nvme_cpl		parent_status;
372 
373 	/**
374 	 * The user_cb_fn and user_cb_arg fields are used for holding the original
375 	 * callback data when using nvme_allocate_request_user_copy.
376 	 */
377 	spdk_nvme_cmd_cb		user_cb_fn;
378 	void				*user_cb_arg;
379 	void				*user_buffer;
380 
381 	/** Sequence of accel operations associated with this request */
382 	void				*accel_sequence;
383 };
384 
385 struct nvme_completion_poll_status {
386 	struct spdk_nvme_cpl	cpl;
387 	uint64_t		timeout_tsc;
388 	/**
389 	 * DMA buffer retained throughout the duration of the command.  It'll be released
390 	 * automatically if the command times out, otherwise the user is responsible for freeing it.
391 	 */
392 	void			*dma_data;
393 	bool			done;
394 	/* This flag indicates that the request has been timed out and the memory
395 	   must be freed in a completion callback */
396 	bool			timed_out;
397 };
398 
399 struct nvme_async_event_request {
400 	struct spdk_nvme_ctrlr		*ctrlr;
401 	struct nvme_request		*req;
402 	struct spdk_nvme_cpl		cpl;
403 };
404 
405 enum nvme_qpair_state {
406 	NVME_QPAIR_DISCONNECTED,
407 	NVME_QPAIR_DISCONNECTING,
408 	NVME_QPAIR_CONNECTING,
409 	NVME_QPAIR_CONNECTED,
410 	NVME_QPAIR_ENABLING,
411 	NVME_QPAIR_ENABLED,
412 	NVME_QPAIR_DESTROYING,
413 };
414 
415 enum nvme_qpair_auth_state {
416 	NVME_QPAIR_AUTH_STATE_NEGOTIATE,
417 	NVME_QPAIR_AUTH_STATE_AWAIT_NEGOTIATE,
418 	NVME_QPAIR_AUTH_STATE_AWAIT_CHALLENGE,
419 	NVME_QPAIR_AUTH_STATE_AWAIT_REPLY,
420 	NVME_QPAIR_AUTH_STATE_AWAIT_SUCCESS1,
421 	NVME_QPAIR_AUTH_STATE_AWAIT_SUCCESS2,
422 	NVME_QPAIR_AUTH_STATE_AWAIT_FAILURE2,
423 	NVME_QPAIR_AUTH_STATE_DONE,
424 };
425 
426 /* Authentication transaction required (authreq.atr) */
427 #define NVME_QPAIR_AUTH_FLAG_ATR	(1 << 0)
428 /* Authentication and secure channel required (authreq.ascr) */
429 #define NVME_QPAIR_AUTH_FLAG_ASCR	(1 << 1)
430 
431 /* Maximum size of a digest */
432 #define NVME_AUTH_DIGEST_MAX_SIZE	64
433 
434 struct nvme_auth {
435 	/* State of the authentication */
436 	enum nvme_qpair_auth_state	state;
437 	/* Status of the authentication */
438 	int				status;
439 	/* Transaction ID */
440 	uint16_t			tid;
441 	/* Flags */
442 	uint32_t			flags;
443 	/* Selected hash function */
444 	uint8_t				hash;
445 	/* Buffer used for controller challenge */
446 	uint8_t				challenge[NVME_AUTH_DIGEST_MAX_SIZE];
447 	/* User's auth cb fn/ctx */
448 	spdk_nvme_authenticate_cb	cb_fn;
449 	void				*cb_ctx;
450 };
451 
452 struct spdk_nvme_qpair {
453 	struct spdk_nvme_ctrlr			*ctrlr;
454 
455 	uint16_t				id;
456 
457 	uint8_t					qprio: 2;
458 
459 	uint8_t					state: 3;
460 
461 	uint8_t					async: 1;
462 
463 	uint8_t					is_new_qpair: 1;
464 
465 	uint8_t					abort_dnr: 1;
466 	/*
467 	 * Members for handling IO qpair deletion inside of a completion context.
468 	 * These are specifically defined as single bits, so that they do not
469 	 *  push this data structure out to another cacheline.
470 	 */
471 	uint8_t					in_completion_context: 1;
472 	uint8_t					delete_after_completion_context: 1;
473 
474 	/*
475 	 * Set when no deletion notification is needed. For example, the process
476 	 * which allocated this qpair exited unexpectedly.
477 	 */
478 	uint8_t					no_deletion_notification_needed: 1;
479 
480 	uint8_t					last_fuse: 2;
481 
482 	uint8_t					transport_failure_reason: 3;
483 	uint8_t					last_transport_failure_reason: 3;
484 
485 	/* The user is destroying qpair */
486 	uint8_t					destroy_in_progress: 1;
487 
488 	/* Number of IO outstanding at transport level */
489 	uint16_t				queue_depth;
490 
491 	enum spdk_nvme_transport_type		trtype;
492 
493 	uint32_t				num_outstanding_reqs;
494 
495 	/* request object used only for this qpair's FABRICS/CONNECT command (if needed) */
496 	struct nvme_request			*reserved_req;
497 
498 	STAILQ_HEAD(, nvme_request)		free_req;
499 	STAILQ_HEAD(, nvme_request)		queued_req;
500 
501 	/* List entry for spdk_nvme_transport_poll_group::qpairs */
502 	STAILQ_ENTRY(spdk_nvme_qpair)		poll_group_stailq;
503 
504 	/** Commands opcode in this list will return error */
505 	TAILQ_HEAD(, nvme_error_cmd)		err_cmd_head;
506 	/** Requests in this list will return error */
507 	STAILQ_HEAD(, nvme_request)		err_req_head;
508 
509 	struct spdk_nvme_ctrlr_process		*active_proc;
510 
511 	struct spdk_nvme_transport_poll_group	*poll_group;
512 
513 	void					*poll_group_tailq_head;
514 
515 	const struct spdk_nvme_transport	*transport;
516 
517 	/* Entries below here are not touched in the main I/O path. */
518 
519 	struct nvme_completion_poll_status	*poll_status;
520 
521 	/* List entry for spdk_nvme_ctrlr::active_io_qpairs */
522 	TAILQ_ENTRY(spdk_nvme_qpair)		tailq;
523 
524 	/* List entry for spdk_nvme_ctrlr_process::allocated_io_qpairs */
525 	TAILQ_ENTRY(spdk_nvme_qpair)		per_process_tailq;
526 
527 	STAILQ_HEAD(, nvme_request)		aborting_queued_req;
528 
529 	void					*req_buf;
530 
531 	/* In-band authentication state */
532 	struct nvme_auth			auth;
533 };
534 
535 struct spdk_nvme_poll_group {
536 	void						*ctx;
537 	struct spdk_nvme_accel_fn_table			accel_fn_table;
538 	STAILQ_HEAD(, spdk_nvme_transport_poll_group)	tgroups;
539 	bool						in_process_completions;
540 	bool						enable_interrupts;
541 	bool						enable_interrupts_is_valid;
542 	int						disconnect_qpair_fd;
543 	struct spdk_fd_group				*fgrp;
544 	struct {
545 		spdk_nvme_poll_group_interrupt_cb	cb_fn;
546 		void					*cb_ctx;
547 	} interrupt;
548 };
549 
550 struct spdk_nvme_transport_poll_group {
551 	struct spdk_nvme_poll_group			*group;
552 	const struct spdk_nvme_transport		*transport;
553 	STAILQ_HEAD(, spdk_nvme_qpair)			connected_qpairs;
554 	STAILQ_HEAD(, spdk_nvme_qpair)			disconnected_qpairs;
555 	STAILQ_ENTRY(spdk_nvme_transport_poll_group)	link;
556 	uint32_t					num_connected_qpairs;
557 };
558 
559 struct spdk_nvme_ns {
560 	struct spdk_nvme_ctrlr		*ctrlr;
561 	uint32_t			sector_size;
562 
563 	/*
564 	 * Size of data transferred as part of each block,
565 	 * including metadata if FLBAS indicates the metadata is transferred
566 	 * as part of the data buffer at the end of each LBA.
567 	 */
568 	uint32_t			extended_lba_size;
569 
570 	uint32_t			md_size;
571 	uint32_t			pi_type;
572 	uint32_t			pi_format;
573 	uint32_t			sectors_per_max_io;
574 	uint32_t			sectors_per_max_io_no_md;
575 	uint32_t			sectors_per_stripe;
576 	uint32_t			id;
577 	uint16_t			flags;
578 	bool				active;
579 
580 	/* Command Set Identifier */
581 	enum spdk_nvme_csi		csi;
582 
583 	/* Namespace Identification Descriptor List (CNS = 03h) */
584 	uint8_t				id_desc_list[4096];
585 
586 	uint32_t			ana_group_id;
587 	enum spdk_nvme_ana_state	ana_state;
588 
589 	/* Identify Namespace data. */
590 	struct spdk_nvme_ns_data	nsdata;
591 
592 	/* Zoned Namespace Command Set Specific Identify Namespace data. */
593 	struct spdk_nvme_zns_ns_data	*nsdata_zns;
594 
595 	struct spdk_nvme_nvm_ns_data	*nsdata_nvm;
596 
597 	RB_ENTRY(spdk_nvme_ns)		node;
598 };
599 
600 #define CTRLR_STRING(ctrlr) \
601 	(spdk_nvme_trtype_is_fabrics(ctrlr->trid.trtype) ? \
602 	ctrlr->trid.subnqn : ctrlr->trid.traddr)
603 
604 #define NVME_CTRLR_ERRLOG(ctrlr, format, ...) \
605 	SPDK_ERRLOG("[%s, %u] " format, CTRLR_STRING(ctrlr), ctrlr->cntlid, ##__VA_ARGS__);
606 
607 #define NVME_CTRLR_WARNLOG(ctrlr, format, ...) \
608 	SPDK_WARNLOG("[%s, %u] " format, CTRLR_STRING(ctrlr), ctrlr->cntlid, ##__VA_ARGS__);
609 
610 #define NVME_CTRLR_NOTICELOG(ctrlr, format, ...) \
611 	SPDK_NOTICELOG("[%s, %u] " format, CTRLR_STRING(ctrlr), ctrlr->cntlid, ##__VA_ARGS__);
612 
613 #define NVME_CTRLR_INFOLOG(ctrlr, format, ...) \
614 	SPDK_INFOLOG(nvme, "[%s, %u] " format, CTRLR_STRING(ctrlr), ctrlr->cntlid, ##__VA_ARGS__);
615 
616 #ifdef DEBUG
617 #define NVME_CTRLR_DEBUGLOG(ctrlr, format, ...) \
618 	SPDK_DEBUGLOG(nvme, "[%s, %u] " format, CTRLR_STRING(ctrlr), ctrlr->cntlid, ##__VA_ARGS__);
619 #else
620 #define NVME_CTRLR_DEBUGLOG(ctrlr, ...) do { } while (0)
621 #endif
622 
623 /**
624  * State of struct spdk_nvme_ctrlr (in particular, during initialization).
625  */
626 enum nvme_ctrlr_state {
627 	/**
628 	 * Wait before initializing the controller.
629 	 */
630 	NVME_CTRLR_STATE_INIT_DELAY,
631 
632 	/**
633 	 * Connect the admin queue.
634 	 */
635 	NVME_CTRLR_STATE_CONNECT_ADMINQ,
636 
637 	/**
638 	 * Controller has not started initialized yet.
639 	 */
640 	NVME_CTRLR_STATE_INIT = NVME_CTRLR_STATE_CONNECT_ADMINQ,
641 
642 	/**
643 	 * Waiting for admin queue to connect.
644 	 */
645 	NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ,
646 
647 	/**
648 	 * Read Version (VS) register.
649 	 */
650 	NVME_CTRLR_STATE_READ_VS,
651 
652 	/**
653 	 * Waiting for Version (VS) register to be read.
654 	 */
655 	NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS,
656 
657 	/**
658 	 * Read Capabilities (CAP) register.
659 	 */
660 	NVME_CTRLR_STATE_READ_CAP,
661 
662 	/**
663 	 * Waiting for Capabilities (CAP) register to be read.
664 	 */
665 	NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP,
666 
667 	/**
668 	 * Check EN to prepare for controller initialization.
669 	 */
670 	NVME_CTRLR_STATE_CHECK_EN,
671 
672 	/**
673 	 * Waiting for CC to be read as part of EN check.
674 	 */
675 	NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC,
676 
677 	/**
678 	 * Waiting for CSTS.RDY to transition from 0 to 1 so that CC.EN may be set to 0.
679 	 */
680 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
681 
682 	/**
683 	 * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 1.
684 	 */
685 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
686 
687 	/**
688 	 * Disabling the controller by setting CC.EN to 0.
689 	 */
690 	NVME_CTRLR_STATE_SET_EN_0,
691 
692 	/**
693 	 * Waiting for the CC register to be read as part of disabling the controller.
694 	 */
695 	NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC,
696 
697 	/**
698 	 * Waiting for CSTS.RDY to transition from 1 to 0 so that CC.EN may be set to 1.
699 	 */
700 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
701 
702 	/**
703 	 * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 0.
704 	 */
705 	NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS,
706 
707 	/**
708 	 * The controller is disabled. (CC.EN and CSTS.RDY are 0.)
709 	 */
710 	NVME_CTRLR_STATE_DISABLED,
711 
712 	/**
713 	 * Enable the controller by writing CC.EN to 1
714 	 */
715 	NVME_CTRLR_STATE_ENABLE,
716 
717 	/**
718 	 * Waiting for CC register to be written as part of enabling the controller.
719 	 */
720 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC,
721 
722 	/**
723 	 * Waiting for CSTS.RDY to transition from 0 to 1 after enabling the controller.
724 	 */
725 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
726 
727 	/**
728 	 * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 1.
729 	 */
730 	NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
731 
732 	/**
733 	 * Reset the Admin queue of the controller.
734 	 */
735 	NVME_CTRLR_STATE_RESET_ADMIN_QUEUE,
736 
737 	/**
738 	 * Identify Controller command will be sent to then controller.
739 	 */
740 	NVME_CTRLR_STATE_IDENTIFY,
741 
742 	/**
743 	 * Waiting for Identify Controller command be completed.
744 	 */
745 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
746 
747 	/**
748 	 * Configure AER of the controller.
749 	 */
750 	NVME_CTRLR_STATE_CONFIGURE_AER,
751 
752 	/**
753 	 * Waiting for the Configure AER to be completed.
754 	 */
755 	NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
756 
757 	/**
758 	 * Set Keep Alive Timeout of the controller.
759 	 */
760 	NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
761 
762 	/**
763 	 * Waiting for Set Keep Alive Timeout to be completed.
764 	 */
765 	NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
766 
767 	/**
768 	 * Get Identify I/O Command Set Specific Controller data structure.
769 	 */
770 	NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
771 
772 	/**
773 	 * Waiting for Identify I/O Command Set Specific Controller command to be completed.
774 	 */
775 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC,
776 
777 	/**
778 	 * Get Commands Supported and Effects log page for the Zoned Namespace Command Set.
779 	 */
780 	NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG,
781 
782 	/**
783 	 * Waiting for the Get Log Page command to be completed.
784 	 */
785 	NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG,
786 
787 	/**
788 	 * Set Number of Queues of the controller.
789 	 */
790 	NVME_CTRLR_STATE_SET_NUM_QUEUES,
791 
792 	/**
793 	 * Waiting for Set Num of Queues command to be completed.
794 	 */
795 	NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
796 
797 	/**
798 	 * Get active Namespace list of the controller.
799 	 */
800 	NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
801 
802 	/**
803 	 * Waiting for the Identify Active Namespace commands to be completed.
804 	 */
805 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS,
806 
807 	/**
808 	 * Get Identify Namespace Data structure for each NS.
809 	 */
810 	NVME_CTRLR_STATE_IDENTIFY_NS,
811 
812 	/**
813 	 * Waiting for the Identify Namespace commands to be completed.
814 	 */
815 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
816 
817 	/**
818 	 * Get Identify Namespace Identification Descriptors.
819 	 */
820 	NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
821 
822 	/**
823 	 * Get Identify I/O Command Set Specific Namespace data structure for each NS.
824 	 */
825 	NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
826 
827 	/**
828 	 * Waiting for the Identify I/O Command Set Specific Namespace commands to be completed.
829 	 */
830 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC,
831 
832 	/**
833 	 * Waiting for the Identify Namespace Identification
834 	 * Descriptors to be completed.
835 	 */
836 	NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
837 
838 	/**
839 	 * Set supported log pages of the controller.
840 	 */
841 	NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
842 
843 	/**
844 	 * Set supported log pages of INTEL controller.
845 	 */
846 	NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES,
847 
848 	/**
849 	 * Waiting for supported log pages of INTEL controller.
850 	 */
851 	NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES,
852 
853 	/**
854 	 * Set supported features of the controller.
855 	 */
856 	NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
857 
858 	/**
859 	 * Set the Host Behavior Support feature of the controller.
860 	 */
861 	NVME_CTRLR_STATE_SET_HOST_FEATURE,
862 
863 	/**
864 	 * Waiting for the Host Behavior Support feature of the controller.
865 	 */
866 	NVME_CTRLR_STATE_WAIT_FOR_SET_HOST_FEATURE,
867 
868 	/**
869 	 * Set Doorbell Buffer Config of the controller.
870 	 */
871 	NVME_CTRLR_STATE_SET_DB_BUF_CFG,
872 
873 	/**
874 	 * Waiting for Doorbell Buffer Config to be completed.
875 	 */
876 	NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
877 
878 	/**
879 	 * Set Host ID of the controller.
880 	 */
881 	NVME_CTRLR_STATE_SET_HOST_ID,
882 
883 	/**
884 	 * Waiting for Set Host ID to be completed.
885 	 */
886 	NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
887 
888 	/**
889 	 * Let transport layer do its part of initialization.
890 	 */
891 	NVME_CTRLR_STATE_TRANSPORT_READY,
892 
893 	/**
894 	 * Controller initialization has completed and the controller is ready.
895 	 */
896 	NVME_CTRLR_STATE_READY,
897 
898 	/**
899 	 * Controller initialization has an error.
900 	 */
901 	NVME_CTRLR_STATE_ERROR,
902 
903 	/**
904 	 * Admin qpair was disconnected, controller needs to be re-initialized
905 	 */
906 	NVME_CTRLR_STATE_DISCONNECTED,
907 };
908 
909 #define NVME_TIMEOUT_INFINITE		0
910 #define NVME_TIMEOUT_KEEP_EXISTING	UINT64_MAX
911 
912 struct spdk_nvme_ctrlr_aer_completion {
913 	struct spdk_nvme_cpl	cpl;
914 	STAILQ_ENTRY(spdk_nvme_ctrlr_aer_completion) link;
915 };
916 
917 /*
918  * Used to track properties for all processes accessing the controller.
919  */
920 struct spdk_nvme_ctrlr_process {
921 	/** Whether it is the primary process  */
922 	bool						is_primary;
923 
924 	/** Process ID */
925 	pid_t						pid;
926 
927 	/** Active admin requests to be completed */
928 	STAILQ_HEAD(, nvme_request)			active_reqs;
929 
930 	TAILQ_ENTRY(spdk_nvme_ctrlr_process)		tailq;
931 
932 	/** Per process PCI device handle */
933 	struct spdk_pci_device				*devhandle;
934 
935 	/** Reference to track the number of attachment to this controller. */
936 	int						ref;
937 
938 	/** Allocated IO qpairs */
939 	TAILQ_HEAD(, spdk_nvme_qpair)			allocated_io_qpairs;
940 
941 	spdk_nvme_aer_cb				aer_cb_fn;
942 	void						*aer_cb_arg;
943 
944 	/**
945 	 * A function pointer to timeout callback function
946 	 */
947 	spdk_nvme_timeout_cb		timeout_cb_fn;
948 	void				*timeout_cb_arg;
949 	/** separate timeout values for io vs. admin reqs */
950 	uint64_t			timeout_io_ticks;
951 	uint64_t			timeout_admin_ticks;
952 
953 	/** List to publish AENs to all procs in multiprocess setup */
954 	STAILQ_HEAD(, spdk_nvme_ctrlr_aer_completion)      async_events;
955 };
956 
957 struct nvme_register_completion {
958 	struct spdk_nvme_cpl			cpl;
959 	uint64_t				value;
960 	spdk_nvme_reg_cb			cb_fn;
961 	void					*cb_ctx;
962 	STAILQ_ENTRY(nvme_register_completion)	stailq;
963 	pid_t					pid;
964 };
965 
966 struct spdk_nvme_ctrlr {
967 	/* Hot data (accessed in I/O path) starts here. */
968 
969 	/* Tree of namespaces */
970 	RB_HEAD(nvme_ns_tree, spdk_nvme_ns)	ns;
971 
972 	/* The number of active namespaces */
973 	uint32_t			active_ns_count;
974 
975 	bool				is_removed;
976 
977 	bool				is_resetting;
978 
979 	bool				is_failed;
980 
981 	bool				is_destructed;
982 
983 	bool				timeout_enabled;
984 
985 	/* The application is preparing to reset the controller.  Transports
986 	 * can use this to skip unnecessary parts of the qpair deletion process
987 	 * for example, like the DELETE_SQ/CQ commands.
988 	 */
989 	bool				prepare_for_reset;
990 
991 	bool				is_disconnecting;
992 
993 	bool				needs_io_msg_update;
994 
995 	uint16_t			max_sges;
996 
997 	uint16_t			cntlid;
998 
999 	/** Controller support flags */
1000 	uint64_t			flags;
1001 
1002 	/** NVMEoF in-capsule data size in bytes */
1003 	uint32_t			ioccsz_bytes;
1004 
1005 	/** NVMEoF in-capsule data offset in 16 byte units */
1006 	uint16_t			icdoff;
1007 
1008 	/* Cold data (not accessed in normal I/O path) is after this point. */
1009 
1010 	struct spdk_nvme_transport_id	trid;
1011 
1012 	struct {
1013 		/** Is numa.id valid? Ensures numa.id == 0 is interpreted correctly. */
1014 		uint32_t		id_valid : 1;
1015 		int32_t			id : 31;
1016 	} numa;
1017 
1018 	union spdk_nvme_cap_register	cap;
1019 	union spdk_nvme_vs_register	vs;
1020 
1021 	int				state;
1022 	uint64_t			state_timeout_tsc;
1023 
1024 	uint64_t			next_keep_alive_tick;
1025 	uint64_t			keep_alive_interval_ticks;
1026 
1027 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
1028 
1029 	/** All the log pages supported */
1030 	bool				log_page_supported[256];
1031 
1032 	/** All the features supported */
1033 	bool				feature_supported[256];
1034 
1035 	/** maximum i/o size in bytes */
1036 	uint32_t			max_xfer_size;
1037 
1038 	/** minimum page size supported by this controller in bytes */
1039 	uint32_t			min_page_size;
1040 
1041 	/** selected memory page size for this controller in bytes */
1042 	uint32_t			page_size;
1043 
1044 	uint32_t			num_aers;
1045 	struct nvme_async_event_request	aer[NVME_MAX_ASYNC_EVENTS];
1046 
1047 	/** guards access to the controller itself, including admin queues */
1048 	pthread_mutex_t			ctrlr_lock;
1049 
1050 	struct spdk_nvme_qpair		*adminq;
1051 
1052 	/** shadow doorbell buffer */
1053 	uint32_t			*shadow_doorbell;
1054 	/** eventidx buffer */
1055 	uint32_t			*eventidx;
1056 
1057 	/**
1058 	 * Identify Controller data.
1059 	 */
1060 	struct spdk_nvme_ctrlr_data	cdata;
1061 
1062 	/**
1063 	 * Zoned Namespace Command Set Specific Identify Controller data.
1064 	 */
1065 	struct spdk_nvme_zns_ctrlr_data	*cdata_zns;
1066 
1067 	struct spdk_bit_array		*free_io_qids;
1068 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
1069 
1070 	struct spdk_nvme_ctrlr_opts	opts;
1071 
1072 	uint64_t			quirks;
1073 
1074 	/* Extra sleep time during controller initialization */
1075 	uint64_t			sleep_timeout_tsc;
1076 
1077 	/** Track all the processes manage this controller */
1078 	TAILQ_HEAD(, spdk_nvme_ctrlr_process)	active_procs;
1079 
1080 
1081 	STAILQ_HEAD(, nvme_request)	queued_aborts;
1082 	uint32_t			outstanding_aborts;
1083 
1084 	uint32_t			lock_depth;
1085 
1086 	/* CB to notify the user when the ctrlr is removed/failed. */
1087 	spdk_nvme_remove_cb			remove_cb;
1088 	void					*cb_ctx;
1089 
1090 	struct spdk_nvme_qpair		*external_io_msgs_qpair;
1091 	pthread_mutex_t			external_io_msgs_lock;
1092 	struct spdk_ring		*external_io_msgs;
1093 
1094 	STAILQ_HEAD(, nvme_io_msg_producer) io_producers;
1095 
1096 	struct spdk_nvme_ana_page		*ana_log_page;
1097 	struct spdk_nvme_ana_group_descriptor	*copied_ana_desc;
1098 	uint32_t				ana_log_page_size;
1099 
1100 	/* scratchpad pointer that can be used to send data between two NVME_CTRLR_STATEs */
1101 	void				*tmp_ptr;
1102 
1103 	/* maximum zone append size in bytes */
1104 	uint32_t			max_zone_append_size;
1105 
1106 	/* PMR size in bytes */
1107 	uint64_t			pmr_size;
1108 
1109 	/* Boot Partition Info */
1110 	enum nvme_bp_write_state	bp_ws;
1111 	uint32_t			bpid;
1112 	spdk_nvme_cmd_cb		bp_write_cb_fn;
1113 	void				*bp_write_cb_arg;
1114 
1115 	/* Firmware Download */
1116 	void				*fw_payload;
1117 	unsigned int			fw_size_remaining;
1118 	unsigned int			fw_offset;
1119 	unsigned int			fw_transfer_size;
1120 
1121 	/* Completed register operations */
1122 	STAILQ_HEAD(, nvme_register_completion)	register_operations;
1123 
1124 	union spdk_nvme_cc_register		process_init_cc;
1125 
1126 	/* Authentication transaction ID */
1127 	uint16_t				auth_tid;
1128 	/* Authentication sequence number */
1129 	uint32_t				auth_seqnum;
1130 };
1131 
1132 struct spdk_nvme_detach_ctx {
1133 	TAILQ_HEAD(, nvme_ctrlr_detach_ctx)	head;
1134 };
1135 
1136 struct spdk_nvme_probe_ctx {
1137 	struct spdk_nvme_transport_id		trid;
1138 	const struct spdk_nvme_ctrlr_opts	*opts;
1139 	void					*cb_ctx;
1140 	spdk_nvme_probe_cb			probe_cb;
1141 	spdk_nvme_attach_cb			attach_cb;
1142 	spdk_nvme_attach_fail_cb		attach_fail_cb;
1143 	spdk_nvme_remove_cb			remove_cb;
1144 	TAILQ_HEAD(, spdk_nvme_ctrlr)		init_ctrlrs;
1145 	/* detach contexts allocated for controllers that failed to initialize */
1146 	struct spdk_nvme_detach_ctx		failed_ctxs;
1147 };
1148 
1149 typedef void (*nvme_ctrlr_detach_cb)(struct spdk_nvme_ctrlr *ctrlr);
1150 
1151 enum nvme_ctrlr_detach_state {
1152 	NVME_CTRLR_DETACH_SET_CC,
1153 	NVME_CTRLR_DETACH_CHECK_CSTS,
1154 	NVME_CTRLR_DETACH_GET_CSTS,
1155 	NVME_CTRLR_DETACH_GET_CSTS_DONE,
1156 };
1157 
1158 struct nvme_ctrlr_detach_ctx {
1159 	struct spdk_nvme_ctrlr			*ctrlr;
1160 	nvme_ctrlr_detach_cb			cb_fn;
1161 	uint64_t				shutdown_start_tsc;
1162 	uint32_t				shutdown_timeout_ms;
1163 	bool					shutdown_complete;
1164 	enum nvme_ctrlr_detach_state		state;
1165 	union spdk_nvme_csts_register		csts;
1166 	TAILQ_ENTRY(nvme_ctrlr_detach_ctx)	link;
1167 };
1168 
1169 struct nvme_driver {
1170 	pthread_mutex_t			lock;
1171 
1172 	/** Multi-process shared attached controller list */
1173 	TAILQ_HEAD(, spdk_nvme_ctrlr)	shared_attached_ctrlrs;
1174 
1175 	bool				initialized;
1176 	struct spdk_uuid		default_extended_host_id;
1177 
1178 	/** netlink socket fd for hotplug messages */
1179 	int				hotplug_fd;
1180 };
1181 
1182 #define nvme_ns_cmd_get_ext_io_opt(opts, field, defval) \
1183        ((opts) != NULL && offsetof(struct spdk_nvme_ns_cmd_ext_io_opts, field) + \
1184         sizeof((opts)->field) <= (opts)->size ? (opts)->field : (defval))
1185 
1186 extern struct nvme_driver *g_spdk_nvme_driver;
1187 
1188 int nvme_driver_init(void);
1189 
1190 #define nvme_delay		usleep
1191 
1192 static inline bool
1193 nvme_qpair_is_admin_queue(struct spdk_nvme_qpair *qpair)
1194 {
1195 	return qpair->id == 0;
1196 }
1197 
1198 static inline bool
1199 nvme_qpair_is_io_queue(struct spdk_nvme_qpair *qpair)
1200 {
1201 	return qpair->id != 0;
1202 }
1203 
1204 static inline int
1205 nvme_robust_mutex_lock(pthread_mutex_t *mtx)
1206 {
1207 	int rc = pthread_mutex_lock(mtx);
1208 
1209 #ifndef __FreeBSD__
1210 	if (rc == EOWNERDEAD) {
1211 		rc = pthread_mutex_consistent(mtx);
1212 	}
1213 #endif
1214 
1215 	return rc;
1216 }
1217 
1218 static inline int
1219 nvme_ctrlr_lock(struct spdk_nvme_ctrlr *ctrlr)
1220 {
1221 	int rc;
1222 
1223 	rc = nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1224 	ctrlr->lock_depth++;
1225 	return rc;
1226 }
1227 
1228 static inline int
1229 nvme_robust_mutex_unlock(pthread_mutex_t *mtx)
1230 {
1231 	return pthread_mutex_unlock(mtx);
1232 }
1233 
1234 static inline int
1235 nvme_ctrlr_unlock(struct spdk_nvme_ctrlr *ctrlr)
1236 {
1237 	ctrlr->lock_depth--;
1238 	return nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1239 }
1240 
1241 /* Poll group management functions. */
1242 int nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
1243 int nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1244 void nvme_poll_group_write_disconnect_qpair_fd(struct spdk_nvme_poll_group *group);
1245 
1246 /* Admin functions */
1247 int	nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr,
1248 				uint8_t cns, uint16_t cntid, uint32_t nsid,
1249 				uint8_t csi, void *payload, size_t payload_size,
1250 				spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1251 int	nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
1252 				      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn,
1253 				      void *cb_arg);
1254 int	nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
1255 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1256 int	nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
1257 		union spdk_nvme_feat_async_event_configuration config,
1258 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1259 int	nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
1260 				   spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1261 int	nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1262 				 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1263 int	nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1264 				 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1265 int	nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
1266 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1267 int	nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr,
1268 		uint64_t prp1, uint64_t prp2,
1269 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1270 int	nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
1271 				 void *cb_arg);
1272 int	nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1273 			      struct spdk_nvme_format *format, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1274 int	nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
1275 				 const struct spdk_nvme_fw_commit *fw_commit,
1276 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1277 int	nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
1278 		uint32_t size, uint32_t offset, void *payload,
1279 		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1280 int	nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1281 				struct spdk_nvme_sanitize *sanitize, uint32_t cdw11,
1282 				spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1283 void	nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl);
1284 int	nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
1285 				 struct nvme_completion_poll_status *status);
1286 int	nvme_wait_for_completion_robust_lock(struct spdk_nvme_qpair *qpair,
1287 		struct nvme_completion_poll_status *status,
1288 		pthread_mutex_t *robust_mutex);
1289 int	nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
1290 		struct nvme_completion_poll_status *status,
1291 		uint64_t timeout_in_usecs);
1292 int	nvme_wait_for_completion_robust_lock_timeout(struct spdk_nvme_qpair *qpair,
1293 		struct nvme_completion_poll_status *status,
1294 		pthread_mutex_t *robust_mutex,
1295 		uint64_t timeout_in_usecs);
1296 int	nvme_wait_for_completion_robust_lock_timeout_poll(struct spdk_nvme_qpair *qpair,
1297 		struct nvme_completion_poll_status *status,
1298 		pthread_mutex_t *robust_mutex);
1299 
1300 struct spdk_nvme_ctrlr_process *nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr,
1301 		pid_t pid);
1302 struct spdk_nvme_ctrlr_process *nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr);
1303 int	nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle);
1304 void	nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr);
1305 struct spdk_pci_device *nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr);
1306 
1307 int	nvme_ctrlr_probe(const struct spdk_nvme_transport_id *trid,
1308 			 struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle);
1309 
1310 int	nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr);
1311 void	nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr);
1312 void	nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
1313 void	nvme_ctrlr_destruct_async(struct spdk_nvme_ctrlr *ctrlr,
1314 				  struct nvme_ctrlr_detach_ctx *ctx);
1315 int	nvme_ctrlr_destruct_poll_async(struct spdk_nvme_ctrlr *ctrlr,
1316 				       struct nvme_ctrlr_detach_ctx *ctx);
1317 void	nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove);
1318 int	nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr);
1319 void	nvme_ctrlr_disable(struct spdk_nvme_ctrlr *ctrlr);
1320 int	nvme_ctrlr_disable_poll(struct spdk_nvme_ctrlr *ctrlr);
1321 void	nvme_ctrlr_connected(struct spdk_nvme_probe_ctx *probe_ctx,
1322 			     struct spdk_nvme_ctrlr *ctrlr);
1323 
1324 int	nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
1325 					struct nvme_request *req);
1326 int	nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap);
1327 int	nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs);
1328 int	nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz);
1329 int	nvme_ctrlr_get_pmrcap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_pmrcap_register *pmrcap);
1330 int	nvme_ctrlr_get_bpinfo(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bpinfo_register *bpinfo);
1331 int	nvme_ctrlr_set_bprsel(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bprsel_register *bprsel);
1332 int	nvme_ctrlr_set_bpmbl(struct spdk_nvme_ctrlr *ctrlr, uint64_t bpmbl_value);
1333 bool	nvme_ctrlr_multi_iocs_enabled(struct spdk_nvme_ctrlr *ctrlr);
1334 void nvme_ctrlr_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1335 void nvme_ctrlr_abort_queued_aborts(struct spdk_nvme_ctrlr *ctrlr);
1336 int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
1337 		    struct spdk_nvme_ctrlr *ctrlr,
1338 		    enum spdk_nvme_qprio qprio,
1339 		    uint32_t num_requests, bool async);
1340 void	nvme_qpair_deinit(struct spdk_nvme_qpair *qpair);
1341 void	nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair);
1342 int	nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair,
1343 				  struct nvme_request *req);
1344 void	nvme_qpair_abort_all_queued_reqs(struct spdk_nvme_qpair *qpair);
1345 uint32_t nvme_qpair_abort_queued_reqs_with_cbarg(struct spdk_nvme_qpair *qpair, void *cmd_cb_arg);
1346 void	nvme_qpair_abort_queued_reqs(struct spdk_nvme_qpair *qpair);
1347 void	nvme_qpair_resubmit_requests(struct spdk_nvme_qpair *qpair, uint32_t num_requests);
1348 int	nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr);
1349 void	nvme_ns_set_identify_data(struct spdk_nvme_ns *ns);
1350 void	nvme_ns_set_id_desc_list_data(struct spdk_nvme_ns *ns);
1351 void	nvme_ns_free_zns_specific_data(struct spdk_nvme_ns *ns);
1352 void	nvme_ns_free_nvm_specific_data(struct spdk_nvme_ns *ns);
1353 void	nvme_ns_free_iocs_specific_data(struct spdk_nvme_ns *ns);
1354 bool	nvme_ns_has_supported_iocs_specific_data(struct spdk_nvme_ns *ns);
1355 int	nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
1356 			  struct spdk_nvme_ctrlr *ctrlr);
1357 void	nvme_ns_destruct(struct spdk_nvme_ns *ns);
1358 int	nvme_ns_cmd_zone_append_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1359 					void *buffer, void *metadata, uint64_t zslba,
1360 					uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1361 					uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag);
1362 int nvme_ns_cmd_zone_appendv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1363 				     uint64_t zslba, uint32_t lba_count,
1364 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1365 				     spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1366 				     spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1367 				     uint16_t apptag_mask, uint16_t apptag);
1368 
1369 int	nvme_fabric_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
1370 int	nvme_fabric_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
1371 int	nvme_fabric_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
1372 int	nvme_fabric_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
1373 int	nvme_fabric_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1374 		uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1375 int	nvme_fabric_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1376 		uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1377 int	nvme_fabric_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1378 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1379 int	nvme_fabric_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1380 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1381 int	nvme_fabric_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect);
1382 int	nvme_fabric_ctrlr_discover(struct spdk_nvme_ctrlr *ctrlr,
1383 				   struct spdk_nvme_probe_ctx *probe_ctx);
1384 int	nvme_fabric_qpair_connect(struct spdk_nvme_qpair *qpair, uint32_t num_entries);
1385 int	nvme_fabric_qpair_connect_async(struct spdk_nvme_qpair *qpair, uint32_t num_entries);
1386 int	nvme_fabric_qpair_connect_poll(struct spdk_nvme_qpair *qpair);
1387 bool	nvme_fabric_qpair_auth_required(struct spdk_nvme_qpair *qpair);
1388 int	nvme_fabric_qpair_authenticate_async(struct spdk_nvme_qpair *qpair);
1389 int	nvme_fabric_qpair_authenticate_poll(struct spdk_nvme_qpair *qpair);
1390 
1391 typedef int (*spdk_nvme_parse_ana_log_page_cb)(
1392 	const struct spdk_nvme_ana_group_descriptor *desc, void *cb_arg);
1393 int	nvme_ctrlr_parse_ana_log_page(struct spdk_nvme_ctrlr *ctrlr,
1394 				      spdk_nvme_parse_ana_log_page_cb cb_fn, void *cb_arg);
1395 
1396 static inline void
1397 nvme_request_clear(struct nvme_request *req)
1398 {
1399 	/*
1400 	 * Only memset/zero fields that need it.  All other fields
1401 	 *  will be initialized appropriately either later in this
1402 	 *  function, or before they are needed later in the
1403 	 *  submission patch.  For example, the children
1404 	 *  TAILQ_ENTRY and following members are
1405 	 *  only used as part of I/O splitting so we avoid
1406 	 *  memsetting them until it is actually needed.
1407 	 *  They will be initialized in nvme_request_add_child()
1408 	 *  if the request is split.
1409 	 */
1410 	memset(req, 0, offsetof(struct nvme_request, payload_size));
1411 }
1412 
1413 #define NVME_INIT_REQUEST(req, _cb_fn, _cb_arg, _payload, _payload_size, _md_size)	\
1414 	do {						\
1415 		nvme_request_clear(req);		\
1416 		req->cb_fn = _cb_fn;			\
1417 		req->cb_arg = _cb_arg;			\
1418 		req->payload = _payload;		\
1419 		req->payload_size = _payload_size;	\
1420 		req->md_size = _md_size;		\
1421 		req->pid = g_spdk_nvme_pid;		\
1422 		req->submit_tick = 0;			\
1423 		req->accel_sequence = NULL;		\
1424 	} while (0);
1425 
1426 static inline struct nvme_request *
1427 nvme_allocate_request(struct spdk_nvme_qpair *qpair,
1428 		      const struct nvme_payload *payload, uint32_t payload_size, uint32_t md_size,
1429 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1430 {
1431 	struct nvme_request *req;
1432 
1433 	req = STAILQ_FIRST(&qpair->free_req);
1434 	if (req == NULL) {
1435 		return req;
1436 	}
1437 
1438 	STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);
1439 	qpair->num_outstanding_reqs++;
1440 
1441 	NVME_INIT_REQUEST(req, cb_fn, cb_arg, *payload, payload_size, md_size);
1442 
1443 	return req;
1444 }
1445 
1446 static inline struct nvme_request *
1447 nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair,
1448 			     void *buffer, uint32_t payload_size,
1449 			     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1450 {
1451 	struct nvme_payload payload;
1452 
1453 	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
1454 
1455 	return nvme_allocate_request(qpair, &payload, payload_size, 0, cb_fn, cb_arg);
1456 }
1457 
1458 static inline struct nvme_request *
1459 nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1460 {
1461 	return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
1462 }
1463 
1464 struct nvme_request *nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair,
1465 		void *buffer, uint32_t payload_size,
1466 		spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller);
1467 
1468 static inline void
1469 _nvme_free_request(struct nvme_request *req, struct spdk_nvme_qpair *qpair)
1470 {
1471 	assert(req != NULL);
1472 	assert(req->num_children == 0);
1473 	assert(qpair != NULL);
1474 
1475 	/* The reserved_req does not go in the free_req STAILQ - it is
1476 	 * saved only for use with a FABRICS/CONNECT command.
1477 	 */
1478 	if (spdk_likely(qpair->reserved_req != req)) {
1479 		STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
1480 
1481 		assert(qpair->num_outstanding_reqs > 0);
1482 		qpair->num_outstanding_reqs--;
1483 	}
1484 }
1485 
1486 static inline void
1487 nvme_free_request(struct nvme_request *req)
1488 {
1489 	_nvme_free_request(req, req->qpair);
1490 }
1491 
1492 static inline void
1493 nvme_complete_request(spdk_nvme_cmd_cb cb_fn, void *cb_arg, struct spdk_nvme_qpair *qpair,
1494 		      struct nvme_request *req, struct spdk_nvme_cpl *cpl)
1495 {
1496 	struct spdk_nvme_cpl            err_cpl;
1497 	struct nvme_error_cmd           *cmd;
1498 
1499 	if (spdk_unlikely(req->accel_sequence != NULL)) {
1500 		struct spdk_nvme_poll_group *pg = qpair->poll_group->group;
1501 
1502 		/* Transports are required to execute the sequence and clear req->accel_sequence.
1503 		 * If it's left non-NULL it must mean the request is failed. */
1504 		assert(spdk_nvme_cpl_is_error(cpl));
1505 		pg->accel_fn_table.abort_sequence(req->accel_sequence);
1506 		req->accel_sequence = NULL;
1507 	}
1508 
1509 	/* error injection at completion path,
1510 	 * only inject for successful completed commands
1511 	 */
1512 	if (spdk_unlikely(!TAILQ_EMPTY(&qpair->err_cmd_head) &&
1513 			  !spdk_nvme_cpl_is_error(cpl))) {
1514 		TAILQ_FOREACH(cmd, &qpair->err_cmd_head, link) {
1515 
1516 			if (cmd->do_not_submit) {
1517 				continue;
1518 			}
1519 
1520 			if ((cmd->opc == req->cmd.opc) && cmd->err_count) {
1521 
1522 				err_cpl = *cpl;
1523 				err_cpl.status.sct = cmd->status.sct;
1524 				err_cpl.status.sc = cmd->status.sc;
1525 
1526 				cpl = &err_cpl;
1527 				cmd->err_count--;
1528 				break;
1529 			}
1530 		}
1531 	}
1532 
1533 	/* For PCIe completions, we want to avoid touching the req itself to avoid
1534 	 * dependencies on loading those cachelines. So call the internal helper
1535 	 * function instead using the qpair that was passed by the caller, instead
1536 	 * of getting it from the req.
1537 	 */
1538 	_nvme_free_request(req, qpair);
1539 
1540 	if (spdk_likely(cb_fn)) {
1541 		cb_fn(cb_arg, cpl);
1542 	}
1543 }
1544 
1545 static inline void
1546 nvme_cleanup_user_req(struct nvme_request *req)
1547 {
1548 	if (req->user_buffer && req->payload_size) {
1549 		spdk_free(req->payload.contig_or_cb_arg);
1550 		req->user_buffer = NULL;
1551 	}
1552 
1553 	req->user_cb_arg = NULL;
1554 	req->user_cb_fn = NULL;
1555 }
1556 
1557 static inline bool
1558 nvme_request_abort_match(struct nvme_request *req, void *cmd_cb_arg)
1559 {
1560 	return req->cb_arg == cmd_cb_arg ||
1561 	       req->user_cb_arg == cmd_cb_arg ||
1562 	       (req->parent != NULL && req->parent->cb_arg == cmd_cb_arg);
1563 }
1564 
1565 static inline void
1566 nvme_qpair_set_state(struct spdk_nvme_qpair *qpair, enum nvme_qpair_state state)
1567 {
1568 	qpair->state = state;
1569 	if (state == NVME_QPAIR_ENABLED) {
1570 		qpair->is_new_qpair = false;
1571 	}
1572 }
1573 
1574 static inline enum nvme_qpair_state
1575 nvme_qpair_get_state(struct spdk_nvme_qpair *qpair) {
1576 	return qpair->state;
1577 }
1578 
1579 static inline void
1580 nvme_request_remove_child(struct nvme_request *parent, struct nvme_request *child)
1581 {
1582 	assert(parent != NULL);
1583 	assert(child != NULL);
1584 	assert(child->parent == parent);
1585 	assert(parent->num_children != 0);
1586 
1587 	parent->num_children--;
1588 	child->parent = NULL;
1589 	TAILQ_REMOVE(&parent->children, child, child_tailq);
1590 }
1591 
1592 static inline void
1593 nvme_cb_complete_child(void *child_arg, const struct spdk_nvme_cpl *cpl)
1594 {
1595 	struct nvme_request *child = child_arg;
1596 	struct nvme_request *parent = child->parent;
1597 
1598 	nvme_request_remove_child(parent, child);
1599 
1600 	if (spdk_nvme_cpl_is_error(cpl)) {
1601 		memcpy(&parent->parent_status, cpl, sizeof(*cpl));
1602 	}
1603 
1604 	if (parent->num_children == 0) {
1605 		nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
1606 				      parent, &parent->parent_status);
1607 	}
1608 }
1609 
1610 static inline void
1611 nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child)
1612 {
1613 	assert(parent->num_children != UINT16_MAX);
1614 
1615 	if (parent->num_children == 0) {
1616 		/*
1617 		 * Defer initialization of the children TAILQ since it falls
1618 		 *  on a separate cacheline.  This ensures we do not touch this
1619 		 *  cacheline except on request splitting cases, which are
1620 		 *  relatively rare.
1621 		 */
1622 		TAILQ_INIT(&parent->children);
1623 		parent->parent = NULL;
1624 		memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl));
1625 	}
1626 
1627 	parent->num_children++;
1628 	TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
1629 	child->parent = parent;
1630 	child->cb_fn = nvme_cb_complete_child;
1631 	child->cb_arg = child;
1632 }
1633 
1634 static inline void
1635 nvme_request_free_children(struct nvme_request *req)
1636 {
1637 	struct nvme_request *child, *tmp;
1638 
1639 	if (req->num_children == 0) {
1640 		return;
1641 	}
1642 
1643 	/* free all child nvme_request */
1644 	TAILQ_FOREACH_SAFE(child, &req->children, child_tailq, tmp) {
1645 		nvme_request_remove_child(req, child);
1646 		nvme_request_free_children(child);
1647 		nvme_free_request(child);
1648 	}
1649 }
1650 
1651 int	nvme_request_check_timeout(struct nvme_request *req, uint16_t cid,
1652 				   struct spdk_nvme_ctrlr_process *active_proc, uint64_t now_tick);
1653 uint64_t nvme_get_quirks(const struct spdk_pci_id *id);
1654 
1655 int	nvme_robust_mutex_init_shared(pthread_mutex_t *mtx);
1656 int	nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx);
1657 
1658 bool	nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl);
1659 
1660 struct spdk_nvme_ctrlr *nvme_get_ctrlr_by_trid_unsafe(
1661 	const struct spdk_nvme_transport_id *trid, const char *hostnqn);
1662 
1663 const struct spdk_nvme_transport *nvme_get_transport(const char *transport_name);
1664 const struct spdk_nvme_transport *nvme_get_first_transport(void);
1665 const struct spdk_nvme_transport *nvme_get_next_transport(const struct spdk_nvme_transport
1666 		*transport);
1667 void  nvme_ctrlr_update_namespaces(struct spdk_nvme_ctrlr *ctrlr);
1668 
1669 /* Transport specific functions */
1670 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
1671 		const struct spdk_nvme_ctrlr_opts *opts,
1672 		void *devhandle);
1673 int nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
1674 int nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect);
1675 int nvme_transport_ctrlr_scan_attached(struct spdk_nvme_probe_ctx *probe_ctx);
1676 int nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr);
1677 int nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr);
1678 int nvme_transport_ctrlr_enable_interrupts(struct spdk_nvme_ctrlr *ctrlr);
1679 int nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
1680 int nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
1681 int nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
1682 int nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
1683 int nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1684 		uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1685 int nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1686 		uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1687 int nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1688 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1689 int nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1690 		spdk_nvme_reg_cb cb_fn, void *cb_arg);
1691 uint32_t nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr);
1692 uint16_t nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr);
1693 struct spdk_nvme_qpair *nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
1694 		uint16_t qid, const struct spdk_nvme_io_qpair_opts *opts);
1695 int nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr);
1696 void *nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
1697 int nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr);
1698 int nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr);
1699 int nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr);
1700 void *nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
1701 int nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr);
1702 void nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
1703 		struct spdk_nvme_qpair *qpair);
1704 int nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr,
1705 				       struct spdk_nvme_qpair *qpair);
1706 void nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr,
1707 		struct spdk_nvme_qpair *qpair);
1708 void nvme_transport_ctrlr_disconnect_qpair_done(struct spdk_nvme_qpair *qpair);
1709 int nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
1710 		struct spdk_memory_domain **domains, int array_size);
1711 void nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair);
1712 int nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair);
1713 int nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req);
1714 int nvme_transport_qpair_get_fd(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
1715 				struct spdk_event_handler_opts *opts);
1716 int32_t nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1717 		uint32_t max_completions);
1718 void nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair);
1719 int nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
1720 		int (*iter_fn)(struct nvme_request *req, void *arg),
1721 		void *arg);
1722 int nvme_transport_qpair_authenticate(struct spdk_nvme_qpair *qpair);
1723 
1724 struct spdk_nvme_transport_poll_group *nvme_transport_poll_group_create(
1725 	const struct spdk_nvme_transport *transport);
1726 struct spdk_nvme_transport_poll_group *nvme_transport_qpair_get_optimal_poll_group(
1727 	const struct spdk_nvme_transport *transport,
1728 	struct spdk_nvme_qpair *qpair);
1729 int nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
1730 				  struct spdk_nvme_qpair *qpair);
1731 int nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
1732 				     struct spdk_nvme_qpair *qpair);
1733 int nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1734 int nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
1735 int64_t nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
1736 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
1737 void nvme_transport_poll_group_check_disconnected_qpairs(
1738 	struct spdk_nvme_transport_poll_group *tgroup,
1739 	spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
1740 int nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup);
1741 int nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
1742 					struct spdk_nvme_transport_poll_group_stat **stats);
1743 void nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
1744 		struct spdk_nvme_transport_poll_group_stat *stats);
1745 enum spdk_nvme_transport_type nvme_transport_get_trtype(const struct spdk_nvme_transport
1746 		*transport);
1747 /*
1748  * Below ref related functions must be called with the global
1749  *  driver lock held for the multi-process condition.
1750  *  Within these functions, the per ctrlr ctrlr_lock is also
1751  *  acquired for the multi-thread condition.
1752  */
1753 void	nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr);
1754 void	nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr);
1755 int	nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr);
1756 
1757 int	nvme_ctrlr_reinitialize_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
1758 int	nvme_parse_addr(struct sockaddr_storage *sa, int family,
1759 			const char *addr, const char *service, long int *port);
1760 int	nvme_get_default_hostnqn(char *buf, int len);
1761 
1762 static inline bool
1763 _is_page_aligned(uint64_t address, uint64_t page_size)
1764 {
1765 	return (address & (page_size - 1)) == 0;
1766 }
1767 
1768 #endif /* __NVME_INTERNAL_H__ */
1769