xref: /spdk/lib/nvme/nvme_qpair.c (revision cc6920a4763d4b9a43aa40583c8397d8f14fa100)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "nvme_internal.h"
35 #include "spdk/nvme_ocssd.h"
36 #include "spdk/string.h"
37 
38 #define NVME_CMD_DPTR_STR_SIZE 256
39 
40 static int nvme_qpair_resubmit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req);
41 
42 struct nvme_string {
43 	uint16_t	value;
44 	const char	*str;
45 };
46 
47 static const struct nvme_string admin_opcode[] = {
48 	{ SPDK_NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" },
49 	{ SPDK_NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" },
50 	{ SPDK_NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" },
51 	{ SPDK_NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" },
52 	{ SPDK_NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" },
53 	{ SPDK_NVME_OPC_IDENTIFY, "IDENTIFY" },
54 	{ SPDK_NVME_OPC_ABORT, "ABORT" },
55 	{ SPDK_NVME_OPC_SET_FEATURES, "SET FEATURES" },
56 	{ SPDK_NVME_OPC_GET_FEATURES, "GET FEATURES" },
57 	{ SPDK_NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" },
58 	{ SPDK_NVME_OPC_NS_MANAGEMENT, "NAMESPACE MANAGEMENT" },
59 	{ SPDK_NVME_OPC_FIRMWARE_COMMIT, "FIRMWARE COMMIT" },
60 	{ SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" },
61 	{ SPDK_NVME_OPC_DEVICE_SELF_TEST, "DEVICE SELF-TEST" },
62 	{ SPDK_NVME_OPC_NS_ATTACHMENT, "NAMESPACE ATTACHMENT" },
63 	{ SPDK_NVME_OPC_KEEP_ALIVE, "KEEP ALIVE" },
64 	{ SPDK_NVME_OPC_DIRECTIVE_SEND, "DIRECTIVE SEND" },
65 	{ SPDK_NVME_OPC_DIRECTIVE_RECEIVE, "DIRECTIVE RECEIVE" },
66 	{ SPDK_NVME_OPC_VIRTUALIZATION_MANAGEMENT, "VIRTUALIZATION MANAGEMENT" },
67 	{ SPDK_NVME_OPC_NVME_MI_SEND, "NVME-MI SEND" },
68 	{ SPDK_NVME_OPC_NVME_MI_RECEIVE, "NVME-MI RECEIVE" },
69 	{ SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG, "DOORBELL BUFFER CONFIG" },
70 	{ SPDK_NVME_OPC_FABRIC, "FABRIC" },
71 	{ SPDK_NVME_OPC_FORMAT_NVM, "FORMAT NVM" },
72 	{ SPDK_NVME_OPC_SECURITY_SEND, "SECURITY SEND" },
73 	{ SPDK_NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" },
74 	{ SPDK_NVME_OPC_SANITIZE, "SANITIZE" },
75 	{ SPDK_NVME_OPC_GET_LBA_STATUS, "GET LBA STATUS" },
76 	{ SPDK_OCSSD_OPC_GEOMETRY, "OCSSD / GEOMETRY" },
77 	{ 0xFFFF, "ADMIN COMMAND" }
78 };
79 
80 static const struct nvme_string fabric_opcode[] = {
81 	{ SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET, "PROPERTY SET" },
82 	{ SPDK_NVMF_FABRIC_COMMAND_CONNECT, "CONNECT" },
83 	{ SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET, "PROPERTY GET" },
84 	{ SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND, "AUTHENTICATION SEND" },
85 	{ SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV, "AUTHENTICATION RECV" },
86 	{ 0xFFFF, "RESERVED / VENDOR SPECIFIC" }
87 };
88 
89 static const struct nvme_string feat_opcode[] = {
90 	{ SPDK_NVME_FEAT_ARBITRATION, "ARBITRATION" },
91 	{ SPDK_NVME_FEAT_POWER_MANAGEMENT, "POWER MANAGEMENT" },
92 	{ SPDK_NVME_FEAT_LBA_RANGE_TYPE, "LBA RANGE TYPE" },
93 	{ SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD, "TEMPERATURE THRESHOLD" },
94 	{ SPDK_NVME_FEAT_ERROR_RECOVERY, "ERROR_RECOVERY" },
95 	{ SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE, "VOLATILE WRITE CACHE" },
96 	{ SPDK_NVME_FEAT_NUMBER_OF_QUEUES, "NUMBER OF QUEUES" },
97 	{ SPDK_NVME_FEAT_INTERRUPT_COALESCING, "INTERRUPT COALESCING" },
98 	{ SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION, "INTERRUPT VECTOR CONFIGURATION" },
99 	{ SPDK_NVME_FEAT_WRITE_ATOMICITY, "WRITE ATOMICITY" },
100 	{ SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION, "ASYNC EVENT CONFIGURATION" },
101 	{ SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION, "AUTONOMOUS POWER STATE TRANSITION" },
102 	{ SPDK_NVME_FEAT_HOST_MEM_BUFFER, "HOST MEM BUFFER" },
103 	{ SPDK_NVME_FEAT_TIMESTAMP, "TIMESTAMP" },
104 	{ SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, "KEEP ALIVE TIMER" },
105 	{ SPDK_NVME_FEAT_HOST_CONTROLLED_THERMAL_MANAGEMENT, "HOST CONTROLLED THERMAL MANAGEMENT" },
106 	{ SPDK_NVME_FEAT_NON_OPERATIONAL_POWER_STATE_CONFIG, "NON OPERATIONAL POWER STATE CONFIG" },
107 	{ SPDK_NVME_FEAT_SOFTWARE_PROGRESS_MARKER, "SOFTWARE PROGRESS MARKER" },
108 	{ SPDK_NVME_FEAT_HOST_IDENTIFIER, "HOST IDENTIFIER" },
109 	{ SPDK_NVME_FEAT_HOST_RESERVE_MASK, "HOST RESERVE MASK" },
110 	{ SPDK_NVME_FEAT_HOST_RESERVE_PERSIST, "HOST RESERVE PERSIST" },
111 	{ 0xFFFF, "RESERVED" }
112 };
113 
114 static const struct nvme_string io_opcode[] = {
115 	{ SPDK_NVME_OPC_FLUSH, "FLUSH" },
116 	{ SPDK_NVME_OPC_WRITE, "WRITE" },
117 	{ SPDK_NVME_OPC_READ, "READ" },
118 	{ SPDK_NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" },
119 	{ SPDK_NVME_OPC_COMPARE, "COMPARE" },
120 	{ SPDK_NVME_OPC_WRITE_ZEROES, "WRITE ZEROES" },
121 	{ SPDK_NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" },
122 	{ SPDK_NVME_OPC_RESERVATION_REGISTER, "RESERVATION REGISTER" },
123 	{ SPDK_NVME_OPC_RESERVATION_REPORT, "RESERVATION REPORT" },
124 	{ SPDK_NVME_OPC_RESERVATION_ACQUIRE, "RESERVATION ACQUIRE" },
125 	{ SPDK_NVME_OPC_RESERVATION_RELEASE, "RESERVATION RELEASE" },
126 	{ SPDK_OCSSD_OPC_VECTOR_RESET, "OCSSD / VECTOR RESET" },
127 	{ SPDK_OCSSD_OPC_VECTOR_WRITE, "OCSSD / VECTOR WRITE" },
128 	{ SPDK_OCSSD_OPC_VECTOR_READ, "OCSSD / VECTOR READ" },
129 	{ SPDK_OCSSD_OPC_VECTOR_COPY, "OCSSD / VECTOR COPY" },
130 	{ 0xFFFF, "IO COMMAND" }
131 };
132 
133 static const struct nvme_string sgl_type[] = {
134 	{ SPDK_NVME_SGL_TYPE_DATA_BLOCK, "DATA BLOCK" },
135 	{ SPDK_NVME_SGL_TYPE_BIT_BUCKET, "BIT BUCKET" },
136 	{ SPDK_NVME_SGL_TYPE_SEGMENT, "SEGMENT" },
137 	{ SPDK_NVME_SGL_TYPE_LAST_SEGMENT, "LAST SEGMENT" },
138 	{ SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK, "TRANSPORT DATA BLOCK" },
139 	{ SPDK_NVME_SGL_TYPE_VENDOR_SPECIFIC, "VENDOR SPECIFIC" },
140 	{ 0xFFFF, "RESERVED" }
141 };
142 
143 static const struct nvme_string sgl_subtype[] = {
144 	{ SPDK_NVME_SGL_SUBTYPE_ADDRESS, "ADDRESS" },
145 	{ SPDK_NVME_SGL_SUBTYPE_OFFSET, "OFFSET" },
146 	{ SPDK_NVME_SGL_SUBTYPE_TRANSPORT, "TRANSPORT" },
147 	{ SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY, "INVALIDATE KEY" },
148 	{ 0xFFFF, "RESERVED" }
149 };
150 
151 static const char *
152 nvme_get_string(const struct nvme_string *strings, uint16_t value)
153 {
154 	const struct nvme_string *entry;
155 
156 	entry = strings;
157 
158 	while (entry->value != 0xFFFF) {
159 		if (entry->value == value) {
160 			return entry->str;
161 		}
162 		entry++;
163 	}
164 	return entry->str;
165 }
166 
167 static void
168 nvme_get_sgl_unkeyed(char *buf, size_t size, struct spdk_nvme_cmd *cmd)
169 {
170 	struct spdk_nvme_sgl_descriptor *sgl = &cmd->dptr.sgl1;
171 
172 	snprintf(buf, size, " len:0x%x", sgl->unkeyed.length);
173 }
174 
175 static void
176 nvme_get_sgl_keyed(char *buf, size_t size, struct spdk_nvme_cmd *cmd)
177 {
178 	struct spdk_nvme_sgl_descriptor *sgl = &cmd->dptr.sgl1;
179 
180 	snprintf(buf, size, " len:0x%x key:0x%x", sgl->keyed.length, sgl->keyed.key);
181 }
182 
183 static void
184 nvme_get_sgl(char *buf, size_t size, struct spdk_nvme_cmd *cmd)
185 {
186 	struct spdk_nvme_sgl_descriptor *sgl = &cmd->dptr.sgl1;
187 	int c;
188 
189 	c = snprintf(buf, size, "SGL %s %s 0x%" PRIx64, nvme_get_string(sgl_type, sgl->generic.type),
190 		     nvme_get_string(sgl_subtype, sgl->generic.subtype), sgl->address);
191 	assert(c >= 0 && (size_t)c < size);
192 
193 	if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK) {
194 		nvme_get_sgl_unkeyed(buf + c, size - c, cmd);
195 	}
196 
197 	if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK) {
198 		nvme_get_sgl_keyed(buf + c, size - c, cmd);
199 	}
200 }
201 
202 static void
203 nvme_get_prp(char *buf, size_t size, struct spdk_nvme_cmd *cmd)
204 {
205 	snprintf(buf, size, "PRP1 0x%" PRIx64 " PRP2 0x%" PRIx64, cmd->dptr.prp.prp1, cmd->dptr.prp.prp2);
206 }
207 
208 static void
209 nvme_get_dptr(char *buf, size_t size, struct spdk_nvme_cmd *cmd)
210 {
211 	if (spdk_nvme_opc_get_data_transfer(cmd->opc) != SPDK_NVME_DATA_NONE) {
212 		switch (cmd->psdt) {
213 		case SPDK_NVME_PSDT_PRP:
214 			nvme_get_prp(buf, size, cmd);
215 			break;
216 		case SPDK_NVME_PSDT_SGL_MPTR_CONTIG:
217 		case SPDK_NVME_PSDT_SGL_MPTR_SGL:
218 			nvme_get_sgl(buf, size, cmd);
219 			break;
220 		default:
221 			;
222 		}
223 	}
224 }
225 
226 static void
227 nvme_admin_qpair_print_command(uint16_t qid, struct spdk_nvme_cmd *cmd)
228 {
229 	struct spdk_nvmf_capsule_cmd *fcmd = (void *)cmd;
230 	char dptr[NVME_CMD_DPTR_STR_SIZE] = {'\0'};
231 
232 	assert(cmd != NULL);
233 
234 	nvme_get_dptr(dptr, sizeof(dptr), cmd);
235 
236 	switch ((int)cmd->opc) {
237 	case SPDK_NVME_OPC_SET_FEATURES:
238 	case SPDK_NVME_OPC_GET_FEATURES:
239 		SPDK_NOTICELOG("%s %s cid:%d cdw10:%08x %s\n",
240 			       nvme_get_string(admin_opcode, cmd->opc), nvme_get_string(feat_opcode,
241 					       cmd->cdw10_bits.set_features.fid), cmd->cid, cmd->cdw10, dptr);
242 		break;
243 	case SPDK_NVME_OPC_FABRIC:
244 		SPDK_NOTICELOG("%s %s qid:%d cid:%d %s\n",
245 			       nvme_get_string(admin_opcode, cmd->opc), nvme_get_string(fabric_opcode, fcmd->fctype), qid,
246 			       fcmd->cid, dptr);
247 		break;
248 	default:
249 		SPDK_NOTICELOG("%s (%02x) qid:%d cid:%d nsid:%x cdw10:%08x cdw11:%08x %s\n",
250 			       nvme_get_string(admin_opcode, cmd->opc), cmd->opc, qid, cmd->cid, cmd->nsid, cmd->cdw10,
251 			       cmd->cdw11, dptr);
252 	}
253 }
254 
255 static void
256 nvme_io_qpair_print_command(uint16_t qid, struct spdk_nvme_cmd *cmd)
257 {
258 	char dptr[NVME_CMD_DPTR_STR_SIZE] = {'\0'};
259 
260 	assert(cmd != NULL);
261 
262 	nvme_get_dptr(dptr, sizeof(dptr), cmd);
263 
264 	switch ((int)cmd->opc) {
265 	case SPDK_NVME_OPC_WRITE:
266 	case SPDK_NVME_OPC_READ:
267 	case SPDK_NVME_OPC_WRITE_UNCORRECTABLE:
268 	case SPDK_NVME_OPC_COMPARE:
269 		SPDK_NOTICELOG("%s sqid:%d cid:%d nsid:%d "
270 			       "lba:%llu len:%d %s\n",
271 			       nvme_get_string(io_opcode, cmd->opc), qid, cmd->cid, cmd->nsid,
272 			       ((unsigned long long)cmd->cdw11 << 32) + cmd->cdw10,
273 			       (cmd->cdw12 & 0xFFFF) + 1, dptr);
274 		break;
275 	case SPDK_NVME_OPC_FLUSH:
276 	case SPDK_NVME_OPC_DATASET_MANAGEMENT:
277 		SPDK_NOTICELOG("%s sqid:%d cid:%d nsid:%d\n",
278 			       nvme_get_string(io_opcode, cmd->opc), qid, cmd->cid, cmd->nsid);
279 		break;
280 	default:
281 		SPDK_NOTICELOG("%s (%02x) sqid:%d cid:%d nsid:%d\n",
282 			       nvme_get_string(io_opcode, cmd->opc), cmd->opc, qid, cmd->cid, cmd->nsid);
283 		break;
284 	}
285 }
286 
287 void
288 spdk_nvme_print_command(uint16_t qid, struct spdk_nvme_cmd *cmd)
289 {
290 	assert(cmd != NULL);
291 
292 	if (qid == 0 || cmd->opc == SPDK_NVME_OPC_FABRIC) {
293 		nvme_admin_qpair_print_command(qid, cmd);
294 	} else {
295 		nvme_io_qpair_print_command(qid, cmd);
296 	}
297 }
298 
299 void
300 spdk_nvme_qpair_print_command(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd)
301 {
302 	assert(qpair != NULL);
303 	assert(cmd != NULL);
304 
305 	spdk_nvme_print_command(qpair->id, cmd);
306 }
307 
308 static const struct nvme_string generic_status[] = {
309 	{ SPDK_NVME_SC_SUCCESS, "SUCCESS" },
310 	{ SPDK_NVME_SC_INVALID_OPCODE, "INVALID OPCODE" },
311 	{ SPDK_NVME_SC_INVALID_FIELD, "INVALID FIELD" },
312 	{ SPDK_NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" },
313 	{ SPDK_NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" },
314 	{ SPDK_NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" },
315 	{ SPDK_NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" },
316 	{ SPDK_NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" },
317 	{ SPDK_NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" },
318 	{ SPDK_NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" },
319 	{ SPDK_NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" },
320 	{ SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" },
321 	{ SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" },
322 	{ SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR, "INVALID SGL SEGMENT DESCRIPTOR" },
323 	{ SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS, "INVALID NUMBER OF SGL DESCRIPTORS" },
324 	{ SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID, "DATA SGL LENGTH INVALID" },
325 	{ SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID, "METADATA SGL LENGTH INVALID" },
326 	{ SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID, "SGL DESCRIPTOR TYPE INVALID" },
327 	{ SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF, "INVALID CONTROLLER MEMORY BUFFER" },
328 	{ SPDK_NVME_SC_INVALID_PRP_OFFSET, "INVALID PRP OFFSET" },
329 	{ SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED, "ATOMIC WRITE UNIT EXCEEDED" },
330 	{ SPDK_NVME_SC_OPERATION_DENIED, "OPERATION DENIED" },
331 	{ SPDK_NVME_SC_INVALID_SGL_OFFSET, "INVALID SGL OFFSET" },
332 	{ SPDK_NVME_SC_HOSTID_INCONSISTENT_FORMAT, "HOSTID INCONSISTENT FORMAT" },
333 	{ SPDK_NVME_SC_KEEP_ALIVE_EXPIRED, "KEEP ALIVE EXPIRED" },
334 	{ SPDK_NVME_SC_KEEP_ALIVE_INVALID, "KEEP ALIVE INVALID" },
335 	{ SPDK_NVME_SC_ABORTED_PREEMPT, "ABORTED - PREEMPT AND ABORT" },
336 	{ SPDK_NVME_SC_SANITIZE_FAILED, "SANITIZE FAILED" },
337 	{ SPDK_NVME_SC_SANITIZE_IN_PROGRESS, "SANITIZE IN PROGRESS" },
338 	{ SPDK_NVME_SC_SGL_DATA_BLOCK_GRANULARITY_INVALID, "DATA BLOCK GRANULARITY INVALID" },
339 	{ SPDK_NVME_SC_COMMAND_INVALID_IN_CMB, "COMMAND NOT SUPPORTED FOR QUEUE IN CMB" },
340 	{ SPDK_NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" },
341 	{ SPDK_NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" },
342 	{ SPDK_NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" },
343 	{ SPDK_NVME_SC_RESERVATION_CONFLICT, "RESERVATION CONFLICT" },
344 	{ SPDK_NVME_SC_FORMAT_IN_PROGRESS, "FORMAT IN PROGRESS" },
345 	{ 0xFFFF, "GENERIC" }
346 };
347 
348 static const struct nvme_string command_specific_status[] = {
349 	{ SPDK_NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" },
350 	{ SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" },
351 	{ SPDK_NVME_SC_INVALID_QUEUE_SIZE, "INVALID QUEUE SIZE" },
352 	{ SPDK_NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" },
353 	{ SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" },
354 	{ SPDK_NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" },
355 	{ SPDK_NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" },
356 	{ SPDK_NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" },
357 	{ SPDK_NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" },
358 	{ SPDK_NVME_SC_INVALID_FORMAT, "INVALID FORMAT" },
359 	{ SPDK_NVME_SC_FIRMWARE_REQ_CONVENTIONAL_RESET, "FIRMWARE REQUIRES CONVENTIONAL RESET" },
360 	{ SPDK_NVME_SC_INVALID_QUEUE_DELETION, "INVALID QUEUE DELETION" },
361 	{ SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE, "FEATURE ID NOT SAVEABLE" },
362 	{ SPDK_NVME_SC_FEATURE_NOT_CHANGEABLE, "FEATURE NOT CHANGEABLE" },
363 	{ SPDK_NVME_SC_FEATURE_NOT_NAMESPACE_SPECIFIC, "FEATURE NOT NAMESPACE SPECIFIC" },
364 	{ SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET, "FIRMWARE REQUIRES NVM RESET" },
365 	{ SPDK_NVME_SC_FIRMWARE_REQ_RESET, "FIRMWARE REQUIRES RESET" },
366 	{ SPDK_NVME_SC_FIRMWARE_REQ_MAX_TIME_VIOLATION, "FIRMWARE REQUIRES MAX TIME VIOLATION" },
367 	{ SPDK_NVME_SC_FIRMWARE_ACTIVATION_PROHIBITED, "FIRMWARE ACTIVATION PROHIBITED" },
368 	{ SPDK_NVME_SC_OVERLAPPING_RANGE, "OVERLAPPING RANGE" },
369 	{ SPDK_NVME_SC_NAMESPACE_INSUFFICIENT_CAPACITY, "NAMESPACE INSUFFICIENT CAPACITY" },
370 	{ SPDK_NVME_SC_NAMESPACE_ID_UNAVAILABLE, "NAMESPACE ID UNAVAILABLE" },
371 	{ SPDK_NVME_SC_NAMESPACE_ALREADY_ATTACHED, "NAMESPACE ALREADY ATTACHED" },
372 	{ SPDK_NVME_SC_NAMESPACE_IS_PRIVATE, "NAMESPACE IS PRIVATE" },
373 	{ SPDK_NVME_SC_NAMESPACE_NOT_ATTACHED, "NAMESPACE NOT ATTACHED" },
374 	{ SPDK_NVME_SC_THINPROVISIONING_NOT_SUPPORTED, "THINPROVISIONING NOT SUPPORTED" },
375 	{ SPDK_NVME_SC_CONTROLLER_LIST_INVALID, "CONTROLLER LIST INVALID" },
376 	{ SPDK_NVME_SC_DEVICE_SELF_TEST_IN_PROGRESS, "DEVICE SELF-TEST IN PROGRESS" },
377 	{ SPDK_NVME_SC_BOOT_PARTITION_WRITE_PROHIBITED, "BOOT PARTITION WRITE PROHIBITED" },
378 	{ SPDK_NVME_SC_INVALID_CTRLR_ID, "INVALID CONTROLLER ID" },
379 	{ SPDK_NVME_SC_INVALID_SECONDARY_CTRLR_STATE, "INVALID SECONDARY CONTROLLER STATE" },
380 	{ SPDK_NVME_SC_INVALID_NUM_CTRLR_RESOURCES, "INVALID NUMBER OF CONTROLLER RESOURCES" },
381 	{ SPDK_NVME_SC_INVALID_RESOURCE_ID, "INVALID RESOURCE IDENTIFIER" },
382 	{ SPDK_NVME_SC_STREAM_RESOURCE_ALLOCATION_FAILED, "STREAM RESOURCE ALLOCATION FAILED"},
383 	{ SPDK_NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" },
384 	{ SPDK_NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" },
385 	{ SPDK_NVME_SC_ATTEMPTED_WRITE_TO_RO_RANGE, "WRITE TO RO RANGE" },
386 	{ 0xFFFF, "COMMAND SPECIFIC" }
387 };
388 
389 static const struct nvme_string media_error_status[] = {
390 	{ SPDK_NVME_SC_WRITE_FAULTS, "WRITE FAULTS" },
391 	{ SPDK_NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" },
392 	{ SPDK_NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" },
393 	{ SPDK_NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" },
394 	{ SPDK_NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" },
395 	{ SPDK_NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" },
396 	{ SPDK_NVME_SC_ACCESS_DENIED, "ACCESS DENIED" },
397 	{ SPDK_NVME_SC_DEALLOCATED_OR_UNWRITTEN_BLOCK, "DEALLOCATED OR UNWRITTEN BLOCK" },
398 	{ SPDK_OCSSD_SC_OFFLINE_CHUNK, "RESET OFFLINE CHUNK" },
399 	{ SPDK_OCSSD_SC_INVALID_RESET, "INVALID RESET" },
400 	{ SPDK_OCSSD_SC_WRITE_FAIL_WRITE_NEXT_UNIT, "WRITE FAIL WRITE NEXT UNIT" },
401 	{ SPDK_OCSSD_SC_WRITE_FAIL_CHUNK_EARLY_CLOSE, "WRITE FAIL CHUNK EARLY CLOSE" },
402 	{ SPDK_OCSSD_SC_OUT_OF_ORDER_WRITE, "OUT OF ORDER WRITE" },
403 	{ SPDK_OCSSD_SC_READ_HIGH_ECC, "READ HIGH ECC" },
404 	{ 0xFFFF, "MEDIA ERROR" }
405 };
406 
407 static const struct nvme_string path_status[] = {
408 	{ SPDK_NVME_SC_INTERNAL_PATH_ERROR, "INTERNAL PATH ERROR" },
409 	{ SPDK_NVME_SC_CONTROLLER_PATH_ERROR, "CONTROLLER PATH ERROR" },
410 	{ SPDK_NVME_SC_HOST_PATH_ERROR, "HOST PATH ERROR" },
411 	{ SPDK_NVME_SC_ABORTED_BY_HOST, "ABORTED BY HOST" },
412 	{ 0xFFFF, "PATH ERROR" }
413 };
414 
415 const char *
416 spdk_nvme_cpl_get_status_string(const struct spdk_nvme_status *status)
417 {
418 	const struct nvme_string *entry;
419 
420 	switch (status->sct) {
421 	case SPDK_NVME_SCT_GENERIC:
422 		entry = generic_status;
423 		break;
424 	case SPDK_NVME_SCT_COMMAND_SPECIFIC:
425 		entry = command_specific_status;
426 		break;
427 	case SPDK_NVME_SCT_MEDIA_ERROR:
428 		entry = media_error_status;
429 		break;
430 	case SPDK_NVME_SCT_PATH:
431 		entry = path_status;
432 		break;
433 	case SPDK_NVME_SCT_VENDOR_SPECIFIC:
434 		return "VENDOR SPECIFIC";
435 	default:
436 		return "RESERVED";
437 	}
438 
439 	return nvme_get_string(entry, status->sc);
440 }
441 
442 void
443 spdk_nvme_print_completion(uint16_t qid, struct spdk_nvme_cpl *cpl)
444 {
445 	assert(cpl != NULL);
446 
447 	/* Check that sqid matches qid. Note that sqid is reserved
448 	 * for fabrics so don't print an error when sqid is 0. */
449 	if (cpl->sqid != qid && cpl->sqid != 0) {
450 		SPDK_ERRLOG("sqid %u doesn't match qid\n", cpl->sqid);
451 	}
452 
453 	SPDK_NOTICELOG("%s (%02x/%02x) qid:%d cid:%d cdw0:%x sqhd:%04x p:%x m:%x dnr:%x\n",
454 		       spdk_nvme_cpl_get_status_string(&cpl->status),
455 		       cpl->status.sct, cpl->status.sc, qid, cpl->cid, cpl->cdw0,
456 		       cpl->sqhd, cpl->status.p, cpl->status.m, cpl->status.dnr);
457 }
458 
459 void
460 spdk_nvme_qpair_print_completion(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cpl *cpl)
461 {
462 	spdk_nvme_print_completion(qpair->id, cpl);
463 }
464 
465 bool
466 nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl)
467 {
468 	/*
469 	 * TODO: spec is not clear how commands that are aborted due
470 	 *  to TLER will be marked.  So for now, it seems
471 	 *  NAMESPACE_NOT_READY is the only case where we should
472 	 *  look at the DNR bit.
473 	 */
474 	switch ((int)cpl->status.sct) {
475 	case SPDK_NVME_SCT_GENERIC:
476 		switch ((int)cpl->status.sc) {
477 		case SPDK_NVME_SC_NAMESPACE_NOT_READY:
478 		case SPDK_NVME_SC_FORMAT_IN_PROGRESS:
479 			if (cpl->status.dnr) {
480 				return false;
481 			} else {
482 				return true;
483 			}
484 		case SPDK_NVME_SC_INVALID_OPCODE:
485 		case SPDK_NVME_SC_INVALID_FIELD:
486 		case SPDK_NVME_SC_COMMAND_ID_CONFLICT:
487 		case SPDK_NVME_SC_DATA_TRANSFER_ERROR:
488 		case SPDK_NVME_SC_ABORTED_POWER_LOSS:
489 		case SPDK_NVME_SC_INTERNAL_DEVICE_ERROR:
490 		case SPDK_NVME_SC_ABORTED_BY_REQUEST:
491 		case SPDK_NVME_SC_ABORTED_SQ_DELETION:
492 		case SPDK_NVME_SC_ABORTED_FAILED_FUSED:
493 		case SPDK_NVME_SC_ABORTED_MISSING_FUSED:
494 		case SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
495 		case SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR:
496 		case SPDK_NVME_SC_LBA_OUT_OF_RANGE:
497 		case SPDK_NVME_SC_CAPACITY_EXCEEDED:
498 		default:
499 			return false;
500 		}
501 	case SPDK_NVME_SCT_PATH:
502 		/*
503 		 * Per NVMe TP 4028 (Path and Transport Error Enhancements), retries should be
504 		 * based on the setting of the DNR bit for Internal Path Error
505 		 */
506 		switch ((int)cpl->status.sc) {
507 		case SPDK_NVME_SC_INTERNAL_PATH_ERROR:
508 			return !cpl->status.dnr;
509 		default:
510 			return false;
511 		}
512 	case SPDK_NVME_SCT_COMMAND_SPECIFIC:
513 	case SPDK_NVME_SCT_MEDIA_ERROR:
514 	case SPDK_NVME_SCT_VENDOR_SPECIFIC:
515 	default:
516 		return false;
517 	}
518 }
519 
520 static void
521 nvme_qpair_manual_complete_request(struct spdk_nvme_qpair *qpair,
522 				   struct nvme_request *req, uint32_t sct, uint32_t sc,
523 				   uint32_t dnr, bool print_on_error)
524 {
525 	struct spdk_nvme_cpl	cpl;
526 	bool			error;
527 
528 	memset(&cpl, 0, sizeof(cpl));
529 	cpl.sqid = qpair->id;
530 	cpl.status.sct = sct;
531 	cpl.status.sc = sc;
532 	cpl.status.dnr = dnr;
533 
534 	error = spdk_nvme_cpl_is_error(&cpl);
535 
536 	if (error && print_on_error && !qpair->ctrlr->opts.disable_error_logging) {
537 		SPDK_NOTICELOG("Command completed manually:\n");
538 		spdk_nvme_qpair_print_command(qpair, &req->cmd);
539 		spdk_nvme_qpair_print_completion(qpair, &cpl);
540 	}
541 
542 	nvme_complete_request(req->cb_fn, req->cb_arg, qpair, req, &cpl);
543 	nvme_free_request(req);
544 }
545 
546 void
547 nvme_qpair_abort_queued_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
548 {
549 	struct nvme_request		*req;
550 	STAILQ_HEAD(, nvme_request)	tmp;
551 
552 	STAILQ_INIT(&tmp);
553 	STAILQ_SWAP(&tmp, &qpair->queued_req, nvme_request);
554 
555 	while (!STAILQ_EMPTY(&tmp)) {
556 		req = STAILQ_FIRST(&tmp);
557 		STAILQ_REMOVE_HEAD(&tmp, stailq);
558 		if (!qpair->ctrlr->opts.disable_error_logging) {
559 			SPDK_ERRLOG("aborting queued i/o\n");
560 		}
561 		nvme_qpair_manual_complete_request(qpair, req, SPDK_NVME_SCT_GENERIC,
562 						   SPDK_NVME_SC_ABORTED_BY_REQUEST, dnr, true);
563 	}
564 }
565 
566 /* The callback to a request may submit the next request which is queued and
567  * then the same callback may abort it immediately. This repetition may cause
568  * infinite recursive calls. Hence move aborting requests to another list here
569  * and abort them later at resubmission.
570  */
571 static void
572 _nvme_qpair_complete_abort_queued_reqs(struct spdk_nvme_qpair *qpair)
573 {
574 	struct nvme_request		*req;
575 	STAILQ_HEAD(, nvme_request)	tmp;
576 
577 	if (spdk_likely(STAILQ_EMPTY(&qpair->aborting_queued_req))) {
578 		return;
579 	}
580 
581 	STAILQ_INIT(&tmp);
582 	STAILQ_SWAP(&tmp, &qpair->aborting_queued_req, nvme_request);
583 
584 	while (!STAILQ_EMPTY(&tmp)) {
585 		req = STAILQ_FIRST(&tmp);
586 		STAILQ_REMOVE_HEAD(&tmp, stailq);
587 		nvme_qpair_manual_complete_request(qpair, req, SPDK_NVME_SCT_GENERIC,
588 						   SPDK_NVME_SC_ABORTED_BY_REQUEST, 1, true);
589 	}
590 }
591 
592 uint32_t
593 nvme_qpair_abort_queued_reqs_with_cbarg(struct spdk_nvme_qpair *qpair, void *cmd_cb_arg)
594 {
595 	struct nvme_request	*req, *tmp;
596 	uint32_t		aborting = 0;
597 
598 	STAILQ_FOREACH_SAFE(req, &qpair->queued_req, stailq, tmp) {
599 		if (req->cb_arg == cmd_cb_arg) {
600 			STAILQ_REMOVE(&qpair->queued_req, req, nvme_request, stailq);
601 			STAILQ_INSERT_TAIL(&qpair->aborting_queued_req, req, stailq);
602 			if (!qpair->ctrlr->opts.disable_error_logging) {
603 				SPDK_ERRLOG("aborting queued i/o\n");
604 			}
605 			aborting++;
606 		}
607 	}
608 
609 	return aborting;
610 }
611 
612 static inline bool
613 nvme_qpair_check_enabled(struct spdk_nvme_qpair *qpair)
614 {
615 	struct nvme_request *req;
616 
617 	/*
618 	 * Either during initial connect or reset, the qpair should follow the given state machine.
619 	 * QPAIR_DISABLED->QPAIR_CONNECTING->QPAIR_CONNECTED->QPAIR_ENABLING->QPAIR_ENABLED. In the
620 	 * reset case, once the qpair is properly connected, we need to abort any outstanding requests
621 	 * from the old transport connection and encourage the application to retry them. We also need
622 	 * to submit any queued requests that built up while we were in the connected or enabling state.
623 	 */
624 	if (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTED && !qpair->ctrlr->is_resetting) {
625 		nvme_qpair_set_state(qpair, NVME_QPAIR_ENABLING);
626 		/*
627 		 * PCIe is special, for fabrics transports, we can abort requests before disconnect during reset
628 		 * but we have historically not disconnected pcie qpairs during reset so we have to abort requests
629 		 * here.
630 		 */
631 		if (qpair->ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE &&
632 		    !qpair->is_new_qpair) {
633 			nvme_qpair_abort_all_queued_reqs(qpair, 0);
634 			nvme_transport_qpair_abort_reqs(qpair, 0);
635 		}
636 
637 		nvme_qpair_set_state(qpair, NVME_QPAIR_ENABLED);
638 		while (!STAILQ_EMPTY(&qpair->queued_req)) {
639 			req = STAILQ_FIRST(&qpair->queued_req);
640 			STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
641 			if (nvme_qpair_resubmit_request(qpair, req)) {
642 				break;
643 			}
644 		}
645 	}
646 
647 	/*
648 	 * When doing a reset, we must disconnect the qpair on the proper core.
649 	 * Note, reset is the only case where we set the failure reason without
650 	 * setting the qpair state since reset is done at the generic layer on the
651 	 * controller thread and we can't disconnect I/O qpairs from the controller
652 	 * thread.
653 	 */
654 	if (qpair->transport_failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE &&
655 	    nvme_qpair_get_state(qpair) == NVME_QPAIR_ENABLED) {
656 		/* Don't disconnect PCIe qpairs. They are a special case for reset. */
657 		if (qpair->ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
658 			nvme_ctrlr_disconnect_qpair(qpair);
659 		}
660 		return false;
661 	}
662 
663 	return nvme_qpair_get_state(qpair) == NVME_QPAIR_ENABLED;
664 }
665 
666 void
667 nvme_qpair_resubmit_requests(struct spdk_nvme_qpair *qpair, uint32_t num_requests)
668 {
669 	uint32_t i;
670 	int resubmit_rc;
671 	struct nvme_request *req;
672 
673 	assert(num_requests > 0);
674 
675 	for (i = 0; i < num_requests; i++) {
676 		if (qpair->ctrlr->is_resetting) {
677 			break;
678 		}
679 		if ((req = STAILQ_FIRST(&qpair->queued_req)) == NULL) {
680 			break;
681 		}
682 		STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
683 		resubmit_rc = nvme_qpair_resubmit_request(qpair, req);
684 		if (spdk_unlikely(resubmit_rc != 0)) {
685 			SPDK_DEBUGLOG(nvme, "Unable to resubmit as many requests as we completed.\n");
686 			break;
687 		}
688 	}
689 
690 	_nvme_qpair_complete_abort_queued_reqs(qpair);
691 }
692 
693 static void
694 nvme_complete_register_operations(struct spdk_nvme_qpair *qpair)
695 {
696 	struct nvme_register_completion *ctx;
697 	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
698 	STAILQ_HEAD(, nvme_register_completion) operations;
699 
700 	STAILQ_INIT(&operations);
701 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
702 	STAILQ_SWAP(&ctrlr->register_operations, &operations, nvme_register_completion);
703 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
704 
705 	while (!STAILQ_EMPTY(&operations)) {
706 		ctx = STAILQ_FIRST(&operations);
707 		STAILQ_REMOVE_HEAD(&operations, stailq);
708 		if (ctx->cb_fn != NULL) {
709 			ctx->cb_fn(ctx->cb_ctx, ctx->value, &ctx->cpl);
710 		}
711 		free(ctx);
712 	}
713 }
714 
715 int32_t
716 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
717 {
718 	int32_t ret;
719 	struct nvme_request *req, *tmp;
720 
721 	if (spdk_unlikely(qpair->ctrlr->is_failed)) {
722 		if (qpair->ctrlr->is_removed) {
723 			nvme_qpair_set_state(qpair, NVME_QPAIR_DESTROYING);
724 			nvme_qpair_abort_all_queued_reqs(qpair, 1 /* Do not retry */);
725 			nvme_transport_qpair_abort_reqs(qpair, 1);
726 		}
727 		return -ENXIO;
728 	}
729 
730 	if (spdk_unlikely(!nvme_qpair_check_enabled(qpair) &&
731 			  !(nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING))) {
732 		/*
733 		 * qpair is not enabled, likely because a controller reset is
734 		 *  in progress.
735 		 */
736 		return -ENXIO;
737 	}
738 
739 	/* error injection for those queued error requests */
740 	if (spdk_unlikely(!STAILQ_EMPTY(&qpair->err_req_head))) {
741 		STAILQ_FOREACH_SAFE(req, &qpair->err_req_head, stailq, tmp) {
742 			if (spdk_get_ticks() - req->submit_tick > req->timeout_tsc) {
743 				STAILQ_REMOVE(&qpair->err_req_head, req, nvme_request, stailq);
744 				nvme_qpair_manual_complete_request(qpair, req,
745 								   req->cpl.status.sct,
746 								   req->cpl.status.sc, 0, true);
747 			}
748 		}
749 	}
750 
751 	qpair->in_completion_context = 1;
752 	ret = nvme_transport_qpair_process_completions(qpair, max_completions);
753 	if (ret < 0) {
754 		SPDK_ERRLOG("CQ transport error %d (%s) on qpair id %hu\n", ret, spdk_strerror(-ret), qpair->id);
755 		if (nvme_qpair_is_admin_queue(qpair)) {
756 			nvme_ctrlr_fail(qpair->ctrlr, false);
757 		}
758 	}
759 	qpair->in_completion_context = 0;
760 	if (qpair->delete_after_completion_context) {
761 		/*
762 		 * A request to delete this qpair was made in the context of this completion
763 		 *  routine - so it is safe to delete it now.
764 		 */
765 		spdk_nvme_ctrlr_free_io_qpair(qpair);
766 		return ret;
767 	}
768 
769 	/*
770 	 * At this point, ret must represent the number of completions we reaped.
771 	 * submit as many queued requests as we completed.
772 	 */
773 	if (ret > 0) {
774 		nvme_qpair_resubmit_requests(qpair, ret);
775 	}
776 
777 	/* Complete any pending register operations */
778 	if (nvme_qpair_is_admin_queue(qpair)) {
779 		nvme_complete_register_operations(qpair);
780 	}
781 
782 	return ret;
783 }
784 
785 spdk_nvme_qp_failure_reason
786 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair)
787 {
788 	return qpair->transport_failure_reason;
789 }
790 
791 int
792 nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
793 		struct spdk_nvme_ctrlr *ctrlr,
794 		enum spdk_nvme_qprio qprio,
795 		uint32_t num_requests, bool async)
796 {
797 	size_t req_size_padded;
798 	uint32_t i;
799 
800 	qpair->id = id;
801 	qpair->qprio = qprio;
802 
803 	qpair->in_completion_context = 0;
804 	qpair->delete_after_completion_context = 0;
805 	qpair->no_deletion_notification_needed = 0;
806 
807 	qpair->ctrlr = ctrlr;
808 	qpair->trtype = ctrlr->trid.trtype;
809 	qpair->is_new_qpair = true;
810 	qpair->async = async;
811 	qpair->poll_status = NULL;
812 
813 	STAILQ_INIT(&qpair->free_req);
814 	STAILQ_INIT(&qpair->queued_req);
815 	STAILQ_INIT(&qpair->aborting_queued_req);
816 	TAILQ_INIT(&qpair->err_cmd_head);
817 	STAILQ_INIT(&qpair->err_req_head);
818 
819 	req_size_padded = (sizeof(struct nvme_request) + 63) & ~(size_t)63;
820 
821 	qpair->req_buf = spdk_zmalloc(req_size_padded * num_requests, 64, NULL,
822 				      SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
823 	if (qpair->req_buf == NULL) {
824 		SPDK_ERRLOG("no memory to allocate qpair(cntlid:0x%x sqid:%d) req_buf with %d request\n",
825 			    ctrlr->cntlid, qpair->id, num_requests);
826 		return -ENOMEM;
827 	}
828 
829 	for (i = 0; i < num_requests; i++) {
830 		struct nvme_request *req = qpair->req_buf + i * req_size_padded;
831 
832 		req->qpair = qpair;
833 		STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
834 	}
835 
836 	return 0;
837 }
838 
839 void
840 nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair)
841 {
842 	struct nvme_request		*req;
843 
844 	while (!STAILQ_EMPTY(&qpair->err_req_head)) {
845 		req = STAILQ_FIRST(&qpair->err_req_head);
846 		STAILQ_REMOVE_HEAD(&qpair->err_req_head, stailq);
847 		nvme_qpair_manual_complete_request(qpair, req,
848 						   req->cpl.status.sct,
849 						   req->cpl.status.sc, 0, true);
850 	}
851 }
852 
853 void
854 nvme_qpair_deinit(struct spdk_nvme_qpair *qpair)
855 {
856 	struct nvme_error_cmd *cmd, *entry;
857 
858 	nvme_qpair_abort_queued_reqs(qpair, 1);
859 	_nvme_qpair_complete_abort_queued_reqs(qpair);
860 	nvme_qpair_complete_error_reqs(qpair);
861 
862 	TAILQ_FOREACH_SAFE(cmd, &qpair->err_cmd_head, link, entry) {
863 		TAILQ_REMOVE(&qpair->err_cmd_head, cmd, link);
864 		spdk_free(cmd);
865 	}
866 
867 	spdk_free(qpair->req_buf);
868 }
869 
870 static inline int
871 _nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
872 {
873 	int			rc = 0;
874 	struct nvme_request	*child_req, *tmp;
875 	struct nvme_error_cmd	*cmd;
876 	struct spdk_nvme_ctrlr	*ctrlr = qpair->ctrlr;
877 	bool			child_req_failed = false;
878 
879 	nvme_qpair_check_enabled(qpair);
880 
881 	if (spdk_unlikely(nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED ||
882 			  nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING ||
883 			  nvme_qpair_get_state(qpair) == NVME_QPAIR_DESTROYING)) {
884 		TAILQ_FOREACH_SAFE(child_req, &req->children, child_tailq, tmp) {
885 			nvme_request_remove_child(req, child_req);
886 			nvme_request_free_children(child_req);
887 			nvme_free_request(child_req);
888 		}
889 		if (req->parent != NULL) {
890 			nvme_request_remove_child(req->parent, req);
891 		}
892 		nvme_free_request(req);
893 		return -ENXIO;
894 	}
895 
896 	if (req->num_children) {
897 		/*
898 		 * This is a split (parent) request. Submit all of the children but not the parent
899 		 * request itself, since the parent is the original unsplit request.
900 		 */
901 		TAILQ_FOREACH_SAFE(child_req, &req->children, child_tailq, tmp) {
902 			if (spdk_likely(!child_req_failed)) {
903 				rc = nvme_qpair_submit_request(qpair, child_req);
904 				if (spdk_unlikely(rc != 0)) {
905 					child_req_failed = true;
906 				}
907 			} else { /* free remaining child_reqs since one child_req fails */
908 				nvme_request_remove_child(req, child_req);
909 				nvme_request_free_children(child_req);
910 				nvme_free_request(child_req);
911 			}
912 		}
913 
914 		if (spdk_unlikely(child_req_failed)) {
915 			/* part of children requests have been submitted,
916 			 * return success since we must wait for those children to complete,
917 			 * but set the parent request to failure.
918 			 */
919 			if (req->num_children) {
920 				req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
921 				req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
922 				return 0;
923 			}
924 			goto error;
925 		}
926 
927 		return rc;
928 	}
929 
930 	/* queue those requests which matches with opcode in err_cmd list */
931 	if (spdk_unlikely(!TAILQ_EMPTY(&qpair->err_cmd_head))) {
932 		TAILQ_FOREACH(cmd, &qpair->err_cmd_head, link) {
933 			if (!cmd->do_not_submit) {
934 				continue;
935 			}
936 
937 			if ((cmd->opc == req->cmd.opc) && cmd->err_count) {
938 				/* add to error request list and set cpl */
939 				req->timeout_tsc = cmd->timeout_tsc;
940 				req->submit_tick = spdk_get_ticks();
941 				req->cpl.status.sct = cmd->status.sct;
942 				req->cpl.status.sc = cmd->status.sc;
943 				STAILQ_INSERT_TAIL(&qpair->err_req_head, req, stailq);
944 				cmd->err_count--;
945 				return 0;
946 			}
947 		}
948 	}
949 
950 	if (spdk_unlikely(ctrlr->is_failed)) {
951 		rc = -ENXIO;
952 		goto error;
953 	}
954 
955 	/* assign submit_tick before submitting req to specific transport */
956 	if (spdk_unlikely(ctrlr->timeout_enabled)) {
957 		if (req->submit_tick == 0) { /* req submitted for the first time */
958 			req->submit_tick = spdk_get_ticks();
959 			req->timed_out = false;
960 		}
961 	} else {
962 		req->submit_tick = 0;
963 	}
964 
965 	/* Allow two cases:
966 	 * 1. NVMe qpair is enabled.
967 	 * 2. Always allow fabrics commands through - these get
968 	 * the controller out of reset state.
969 	 */
970 	if (spdk_likely(nvme_qpair_get_state(qpair) == NVME_QPAIR_ENABLED) ||
971 	    (req->cmd.opc == SPDK_NVME_OPC_FABRIC &&
972 	     nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING)) {
973 		rc = nvme_transport_qpair_submit_request(qpair, req);
974 	} else {
975 		/* The controller is being reset - queue this request and
976 		 *  submit it later when the reset is completed.
977 		 */
978 		return -EAGAIN;
979 	}
980 
981 	if (spdk_likely(rc == 0)) {
982 		if (SPDK_DEBUGLOG_FLAG_ENABLED("nvme")) {
983 			spdk_nvme_print_command(qpair->id, &req->cmd);
984 		}
985 		req->queued = false;
986 		return 0;
987 	}
988 
989 	if (rc == -EAGAIN) {
990 		return -EAGAIN;
991 	}
992 
993 error:
994 	if (req->parent != NULL) {
995 		nvme_request_remove_child(req->parent, req);
996 	}
997 
998 	/* The request is from queued_req list we should trigger the callback from caller */
999 	if (spdk_unlikely(req->queued)) {
1000 		nvme_qpair_manual_complete_request(qpair, req, SPDK_NVME_SCT_GENERIC,
1001 						   SPDK_NVME_SC_INTERNAL_DEVICE_ERROR, true, true);
1002 		return rc;
1003 	}
1004 
1005 	nvme_free_request(req);
1006 
1007 	return rc;
1008 }
1009 
1010 int
1011 nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
1012 {
1013 	int rc;
1014 
1015 	if (spdk_unlikely(!STAILQ_EMPTY(&qpair->queued_req) && req->num_children == 0)) {
1016 		/*
1017 		 * Requests that have no children should be sent to the transport after all
1018 		 * currently queued requests. Requests with children will be split and go back
1019 		 * through this path.  We need to make an exception for the fabrics commands
1020 		 * while the qpair is connecting to be able to send the connect command
1021 		 * asynchronously.
1022 		 */
1023 		if (req->cmd.opc != SPDK_NVME_OPC_FABRIC ||
1024 		    nvme_qpair_get_state(qpair) != NVME_QPAIR_CONNECTING) {
1025 			STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
1026 			req->queued = true;
1027 			return 0;
1028 		}
1029 	}
1030 
1031 	rc = _nvme_qpair_submit_request(qpair, req);
1032 	if (rc == -EAGAIN) {
1033 		STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
1034 		req->queued = true;
1035 		rc = 0;
1036 	}
1037 
1038 	return rc;
1039 }
1040 
1041 static int
1042 nvme_qpair_resubmit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
1043 {
1044 	int rc;
1045 
1046 	/*
1047 	 * We should never have a request with children on the queue.
1048 	 * This is necessary to preserve the 1:1 relationship between
1049 	 * completions and resubmissions.
1050 	 */
1051 	assert(req->num_children == 0);
1052 	assert(req->queued);
1053 	rc = _nvme_qpair_submit_request(qpair, req);
1054 	if (spdk_unlikely(rc == -EAGAIN)) {
1055 		STAILQ_INSERT_HEAD(&qpair->queued_req, req, stailq);
1056 	}
1057 
1058 	return rc;
1059 }
1060 
1061 void
1062 nvme_qpair_abort_all_queued_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
1063 {
1064 	nvme_qpair_complete_error_reqs(qpair);
1065 	nvme_qpair_abort_queued_reqs(qpair, dnr);
1066 	_nvme_qpair_complete_abort_queued_reqs(qpair);
1067 }
1068 
1069 int
1070 spdk_nvme_qpair_add_cmd_error_injection(struct spdk_nvme_ctrlr *ctrlr,
1071 					struct spdk_nvme_qpair *qpair,
1072 					uint8_t opc, bool do_not_submit,
1073 					uint64_t timeout_in_us,
1074 					uint32_t err_count,
1075 					uint8_t sct, uint8_t sc)
1076 {
1077 	struct nvme_error_cmd *entry, *cmd = NULL;
1078 
1079 	if (qpair == NULL) {
1080 		qpair = ctrlr->adminq;
1081 	}
1082 
1083 	TAILQ_FOREACH(entry, &qpair->err_cmd_head, link) {
1084 		if (entry->opc == opc) {
1085 			cmd = entry;
1086 			break;
1087 		}
1088 	}
1089 
1090 	if (cmd == NULL) {
1091 		cmd = spdk_zmalloc(sizeof(*cmd), 64, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1092 		if (!cmd) {
1093 			return -ENOMEM;
1094 		}
1095 		TAILQ_INSERT_TAIL(&qpair->err_cmd_head, cmd, link);
1096 	}
1097 
1098 	cmd->do_not_submit = do_not_submit;
1099 	cmd->err_count = err_count;
1100 	cmd->timeout_tsc = timeout_in_us * spdk_get_ticks_hz() / 1000000ULL;
1101 	cmd->opc = opc;
1102 	cmd->status.sct = sct;
1103 	cmd->status.sc = sc;
1104 
1105 	return 0;
1106 }
1107 
1108 void
1109 spdk_nvme_qpair_remove_cmd_error_injection(struct spdk_nvme_ctrlr *ctrlr,
1110 		struct spdk_nvme_qpair *qpair,
1111 		uint8_t opc)
1112 {
1113 	struct nvme_error_cmd *cmd, *entry;
1114 
1115 	if (qpair == NULL) {
1116 		qpair = ctrlr->adminq;
1117 	}
1118 
1119 	TAILQ_FOREACH_SAFE(cmd, &qpair->err_cmd_head, link, entry) {
1120 		if (cmd->opc == opc) {
1121 			TAILQ_REMOVE(&qpair->err_cmd_head, cmd, link);
1122 			spdk_free(cmd);
1123 			return;
1124 		}
1125 	}
1126 
1127 	return;
1128 }
1129 
1130 uint16_t
1131 spdk_nvme_qpair_get_id(struct spdk_nvme_qpair *qpair)
1132 {
1133 	return qpair->id;
1134 }
1135