1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "nvme_internal.h" 35 #include "spdk/nvme_ocssd.h" 36 37 #define NVME_CMD_DPTR_STR_SIZE 256 38 39 static int nvme_qpair_resubmit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req); 40 41 struct nvme_string { 42 uint16_t value; 43 const char *str; 44 }; 45 46 static const struct nvme_string admin_opcode[] = { 47 { SPDK_NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" }, 48 { SPDK_NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" }, 49 { SPDK_NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" }, 50 { SPDK_NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" }, 51 { SPDK_NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" }, 52 { SPDK_NVME_OPC_IDENTIFY, "IDENTIFY" }, 53 { SPDK_NVME_OPC_ABORT, "ABORT" }, 54 { SPDK_NVME_OPC_SET_FEATURES, "SET FEATURES" }, 55 { SPDK_NVME_OPC_GET_FEATURES, "GET FEATURES" }, 56 { SPDK_NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" }, 57 { SPDK_NVME_OPC_NS_MANAGEMENT, "NAMESPACE MANAGEMENT" }, 58 { SPDK_NVME_OPC_FIRMWARE_COMMIT, "FIRMWARE COMMIT" }, 59 { SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" }, 60 { SPDK_NVME_OPC_DEVICE_SELF_TEST, "DEVICE SELF-TEST" }, 61 { SPDK_NVME_OPC_NS_ATTACHMENT, "NAMESPACE ATTACHMENT" }, 62 { SPDK_NVME_OPC_KEEP_ALIVE, "KEEP ALIVE" }, 63 { SPDK_NVME_OPC_DIRECTIVE_SEND, "DIRECTIVE SEND" }, 64 { SPDK_NVME_OPC_DIRECTIVE_RECEIVE, "DIRECTIVE RECEIVE" }, 65 { SPDK_NVME_OPC_VIRTUALIZATION_MANAGEMENT, "VIRTUALIZATION MANAGEMENT" }, 66 { SPDK_NVME_OPC_NVME_MI_SEND, "NVME-MI SEND" }, 67 { SPDK_NVME_OPC_NVME_MI_RECEIVE, "NVME-MI RECEIVE" }, 68 { SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG, "DOORBELL BUFFER CONFIG" }, 69 { SPDK_NVME_OPC_FABRIC, "FABRIC" }, 70 { SPDK_NVME_OPC_FORMAT_NVM, "FORMAT NVM" }, 71 { SPDK_NVME_OPC_SECURITY_SEND, "SECURITY SEND" }, 72 { SPDK_NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" }, 73 { SPDK_NVME_OPC_SANITIZE, "SANITIZE" }, 74 { SPDK_NVME_OPC_GET_LBA_STATUS, "GET LBA STATUS" }, 75 { SPDK_OCSSD_OPC_GEOMETRY, "OCSSD / GEOMETRY" }, 76 { 0xFFFF, "ADMIN COMMAND" } 77 }; 78 79 static const struct nvme_string fabric_opcode[] = { 80 { SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET, "PROPERTY SET" }, 81 { SPDK_NVMF_FABRIC_COMMAND_CONNECT, "CONNECT" }, 82 { SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET, "PROPERTY GET" }, 83 { SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND, "AUTHENTICATION SEND" }, 84 { SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV, "AUTHENTICATION RECV" }, 85 { 0xFFFF, "RESERVED / VENDOR SPECIFIC" } 86 }; 87 88 static const struct nvme_string feat_opcode[] = { 89 { SPDK_NVME_FEAT_ARBITRATION, "ARBITRATION" }, 90 { SPDK_NVME_FEAT_POWER_MANAGEMENT, "POWER MANAGEMENT" }, 91 { SPDK_NVME_FEAT_LBA_RANGE_TYPE, "LBA RANGE TYPE" }, 92 { SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD, "TEMPERATURE THRESHOLD" }, 93 { SPDK_NVME_FEAT_ERROR_RECOVERY, "ERROR_RECOVERY" }, 94 { SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE, "VOLATILE WRITE CACHE" }, 95 { SPDK_NVME_FEAT_NUMBER_OF_QUEUES, "NUMBER OF QUEUES" }, 96 { SPDK_NVME_FEAT_INTERRUPT_COALESCING, "INTERRUPT COALESCING" }, 97 { SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION, "INTERRUPT VECTOR CONFIGURATION" }, 98 { SPDK_NVME_FEAT_WRITE_ATOMICITY, "WRITE ATOMICITY" }, 99 { SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION, "ASYNC EVENT CONFIGURATION" }, 100 { SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION, "AUTONOMOUS POWER STATE TRANSITION" }, 101 { SPDK_NVME_FEAT_HOST_MEM_BUFFER, "HOST MEM BUFFER" }, 102 { SPDK_NVME_FEAT_TIMESTAMP, "TIMESTAMP" }, 103 { SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, "KEEP ALIVE TIMER" }, 104 { SPDK_NVME_FEAT_HOST_CONTROLLED_THERMAL_MANAGEMENT, "HOST CONTROLLED THERMAL MANAGEMENT" }, 105 { SPDK_NVME_FEAT_NON_OPERATIONAL_POWER_STATE_CONFIG, "NON OPERATIONAL POWER STATE CONFIG" }, 106 { SPDK_NVME_FEAT_SOFTWARE_PROGRESS_MARKER, "SOFTWARE PROGRESS MARKER" }, 107 { SPDK_NVME_FEAT_HOST_IDENTIFIER, "HOST IDENTIFIER" }, 108 { SPDK_NVME_FEAT_HOST_RESERVE_MASK, "HOST RESERVE MASK" }, 109 { SPDK_NVME_FEAT_HOST_RESERVE_PERSIST, "HOST RESERVE PERSIST" }, 110 { 0xFFFF, "RESERVED" } 111 }; 112 113 static const struct nvme_string io_opcode[] = { 114 { SPDK_NVME_OPC_FLUSH, "FLUSH" }, 115 { SPDK_NVME_OPC_WRITE, "WRITE" }, 116 { SPDK_NVME_OPC_READ, "READ" }, 117 { SPDK_NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" }, 118 { SPDK_NVME_OPC_COMPARE, "COMPARE" }, 119 { SPDK_NVME_OPC_WRITE_ZEROES, "WRITE ZEROES" }, 120 { SPDK_NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" }, 121 { SPDK_NVME_OPC_RESERVATION_REGISTER, "RESERVATION REGISTER" }, 122 { SPDK_NVME_OPC_RESERVATION_REPORT, "RESERVATION REPORT" }, 123 { SPDK_NVME_OPC_RESERVATION_ACQUIRE, "RESERVATION ACQUIRE" }, 124 { SPDK_NVME_OPC_RESERVATION_RELEASE, "RESERVATION RELEASE" }, 125 { SPDK_OCSSD_OPC_VECTOR_RESET, "OCSSD / VECTOR RESET" }, 126 { SPDK_OCSSD_OPC_VECTOR_WRITE, "OCSSD / VECTOR WRITE" }, 127 { SPDK_OCSSD_OPC_VECTOR_READ, "OCSSD / VECTOR READ" }, 128 { SPDK_OCSSD_OPC_VECTOR_COPY, "OCSSD / VECTOR COPY" }, 129 { 0xFFFF, "IO COMMAND" } 130 }; 131 132 static const struct nvme_string sgl_type[] = { 133 { SPDK_NVME_SGL_TYPE_DATA_BLOCK, "DATA BLOCK" }, 134 { SPDK_NVME_SGL_TYPE_BIT_BUCKET, "BIT BUCKET" }, 135 { SPDK_NVME_SGL_TYPE_SEGMENT, "SEGMENT" }, 136 { SPDK_NVME_SGL_TYPE_LAST_SEGMENT, "LAST SEGMENT" }, 137 { SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK, "TRANSPORT DATA BLOCK" }, 138 { SPDK_NVME_SGL_TYPE_VENDOR_SPECIFIC, "VENDOR SPECIFIC" }, 139 { 0xFFFF, "RESERVED" } 140 }; 141 142 static const struct nvme_string sgl_subtype[] = { 143 { SPDK_NVME_SGL_SUBTYPE_ADDRESS, "ADDRESS" }, 144 { SPDK_NVME_SGL_SUBTYPE_OFFSET, "OFFSET" }, 145 { SPDK_NVME_SGL_SUBTYPE_TRANSPORT, "TRANSPORT" }, 146 { SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY, "INVALIDATE KEY" }, 147 { 0xFFFF, "RESERVED" } 148 }; 149 150 static const char * 151 nvme_get_string(const struct nvme_string *strings, uint16_t value) 152 { 153 const struct nvme_string *entry; 154 155 entry = strings; 156 157 while (entry->value != 0xFFFF) { 158 if (entry->value == value) { 159 return entry->str; 160 } 161 entry++; 162 } 163 return entry->str; 164 } 165 166 static void 167 nvme_get_sgl_unkeyed(char *buf, size_t size, struct spdk_nvme_cmd *cmd) 168 { 169 struct spdk_nvme_sgl_descriptor *sgl = &cmd->dptr.sgl1; 170 171 snprintf(buf, size, " len:0x%x", sgl->unkeyed.length); 172 } 173 174 static void 175 nvme_get_sgl_keyed(char *buf, size_t size, struct spdk_nvme_cmd *cmd) 176 { 177 struct spdk_nvme_sgl_descriptor *sgl = &cmd->dptr.sgl1; 178 179 snprintf(buf, size, " len:0x%x key:0x%x", sgl->keyed.length, sgl->keyed.key); 180 } 181 182 static void 183 nvme_get_sgl(char *buf, size_t size, struct spdk_nvme_cmd *cmd) 184 { 185 struct spdk_nvme_sgl_descriptor *sgl = &cmd->dptr.sgl1; 186 int c; 187 188 c = snprintf(buf, size, "SGL %s %s 0x%" PRIx64, nvme_get_string(sgl_type, sgl->generic.type), 189 nvme_get_string(sgl_subtype, sgl->generic.subtype), sgl->address); 190 assert(c >= 0 && (size_t)c < size); 191 192 if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK) { 193 nvme_get_sgl_unkeyed(buf + c, size - c, cmd); 194 } 195 196 if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK) { 197 nvme_get_sgl_keyed(buf + c, size - c, cmd); 198 } 199 } 200 201 static void 202 nvme_get_prp(char *buf, size_t size, struct spdk_nvme_cmd *cmd) 203 { 204 snprintf(buf, size, "PRP1 0x%" PRIx64 " PRP2 0x%" PRIx64, cmd->dptr.prp.prp1, cmd->dptr.prp.prp2); 205 } 206 207 static void 208 nvme_get_dptr(char *buf, size_t size, struct spdk_nvme_cmd *cmd) 209 { 210 if (spdk_nvme_opc_get_data_transfer(cmd->opc) != SPDK_NVME_DATA_NONE) { 211 switch (cmd->psdt) { 212 case SPDK_NVME_PSDT_PRP: 213 nvme_get_prp(buf, size, cmd); 214 break; 215 case SPDK_NVME_PSDT_SGL_MPTR_CONTIG: 216 case SPDK_NVME_PSDT_SGL_MPTR_SGL: 217 nvme_get_sgl(buf, size, cmd); 218 break; 219 default: 220 ; 221 } 222 } 223 } 224 225 static void 226 nvme_admin_qpair_print_command(uint16_t qid, struct spdk_nvme_cmd *cmd) 227 { 228 struct spdk_nvmf_capsule_cmd *fcmd = (void *)cmd; 229 char dptr[NVME_CMD_DPTR_STR_SIZE] = {'\0'}; 230 231 assert(cmd != NULL); 232 233 nvme_get_dptr(dptr, sizeof(dptr), cmd); 234 235 switch ((int)cmd->opc) { 236 case SPDK_NVME_OPC_SET_FEATURES: 237 case SPDK_NVME_OPC_GET_FEATURES: 238 SPDK_NOTICELOG("%s %s cid:%d cdw10:%08x %s\n", 239 nvme_get_string(admin_opcode, cmd->opc), nvme_get_string(feat_opcode, 240 cmd->cdw10_bits.set_features.fid), cmd->cid, cmd->cdw10, dptr); 241 break; 242 case SPDK_NVME_OPC_FABRIC: 243 SPDK_NOTICELOG("%s %s qid:%d cid:%d %s\n", 244 nvme_get_string(admin_opcode, cmd->opc), nvme_get_string(fabric_opcode, fcmd->fctype), qid, 245 fcmd->cid, dptr); 246 break; 247 default: 248 SPDK_NOTICELOG("%s (%02x) qid:%d cid:%d nsid:%x cdw10:%08x cdw11:%08x %s\n", 249 nvme_get_string(admin_opcode, cmd->opc), cmd->opc, qid, cmd->cid, cmd->nsid, cmd->cdw10, 250 cmd->cdw11, dptr); 251 } 252 } 253 254 static void 255 nvme_io_qpair_print_command(uint16_t qid, struct spdk_nvme_cmd *cmd) 256 { 257 char dptr[NVME_CMD_DPTR_STR_SIZE] = {'\0'}; 258 259 assert(cmd != NULL); 260 261 nvme_get_dptr(dptr, sizeof(dptr), cmd); 262 263 switch ((int)cmd->opc) { 264 case SPDK_NVME_OPC_WRITE: 265 case SPDK_NVME_OPC_READ: 266 case SPDK_NVME_OPC_WRITE_UNCORRECTABLE: 267 case SPDK_NVME_OPC_COMPARE: 268 SPDK_NOTICELOG("%s sqid:%d cid:%d nsid:%d " 269 "lba:%llu len:%d %s\n", 270 nvme_get_string(io_opcode, cmd->opc), qid, cmd->cid, cmd->nsid, 271 ((unsigned long long)cmd->cdw11 << 32) + cmd->cdw10, 272 (cmd->cdw12 & 0xFFFF) + 1, dptr); 273 break; 274 case SPDK_NVME_OPC_FLUSH: 275 case SPDK_NVME_OPC_DATASET_MANAGEMENT: 276 SPDK_NOTICELOG("%s sqid:%d cid:%d nsid:%d\n", 277 nvme_get_string(io_opcode, cmd->opc), qid, cmd->cid, cmd->nsid); 278 break; 279 default: 280 SPDK_NOTICELOG("%s (%02x) sqid:%d cid:%d nsid:%d\n", 281 nvme_get_string(io_opcode, cmd->opc), cmd->opc, qid, cmd->cid, cmd->nsid); 282 break; 283 } 284 } 285 286 void 287 spdk_nvme_print_command(uint16_t qid, struct spdk_nvme_cmd *cmd) 288 { 289 assert(cmd != NULL); 290 291 if (qid == 0 || cmd->opc == SPDK_NVME_OPC_FABRIC) { 292 nvme_admin_qpair_print_command(qid, cmd); 293 } else { 294 nvme_io_qpair_print_command(qid, cmd); 295 } 296 } 297 298 void 299 spdk_nvme_qpair_print_command(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd) 300 { 301 assert(qpair != NULL); 302 assert(cmd != NULL); 303 304 spdk_nvme_print_command(qpair->id, cmd); 305 } 306 307 static const struct nvme_string generic_status[] = { 308 { SPDK_NVME_SC_SUCCESS, "SUCCESS" }, 309 { SPDK_NVME_SC_INVALID_OPCODE, "INVALID OPCODE" }, 310 { SPDK_NVME_SC_INVALID_FIELD, "INVALID FIELD" }, 311 { SPDK_NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" }, 312 { SPDK_NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" }, 313 { SPDK_NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" }, 314 { SPDK_NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" }, 315 { SPDK_NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" }, 316 { SPDK_NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" }, 317 { SPDK_NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" }, 318 { SPDK_NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" }, 319 { SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" }, 320 { SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" }, 321 { SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR, "INVALID SGL SEGMENT DESCRIPTOR" }, 322 { SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS, "INVALID NUMBER OF SGL DESCRIPTORS" }, 323 { SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID, "DATA SGL LENGTH INVALID" }, 324 { SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID, "METADATA SGL LENGTH INVALID" }, 325 { SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID, "SGL DESCRIPTOR TYPE INVALID" }, 326 { SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF, "INVALID CONTROLLER MEMORY BUFFER" }, 327 { SPDK_NVME_SC_INVALID_PRP_OFFSET, "INVALID PRP OFFSET" }, 328 { SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED, "ATOMIC WRITE UNIT EXCEEDED" }, 329 { SPDK_NVME_SC_OPERATION_DENIED, "OPERATION DENIED" }, 330 { SPDK_NVME_SC_INVALID_SGL_OFFSET, "INVALID SGL OFFSET" }, 331 { SPDK_NVME_SC_HOSTID_INCONSISTENT_FORMAT, "HOSTID INCONSISTENT FORMAT" }, 332 { SPDK_NVME_SC_KEEP_ALIVE_EXPIRED, "KEEP ALIVE EXPIRED" }, 333 { SPDK_NVME_SC_KEEP_ALIVE_INVALID, "KEEP ALIVE INVALID" }, 334 { SPDK_NVME_SC_ABORTED_PREEMPT, "ABORTED - PREEMPT AND ABORT" }, 335 { SPDK_NVME_SC_SANITIZE_FAILED, "SANITIZE FAILED" }, 336 { SPDK_NVME_SC_SANITIZE_IN_PROGRESS, "SANITIZE IN PROGRESS" }, 337 { SPDK_NVME_SC_SGL_DATA_BLOCK_GRANULARITY_INVALID, "DATA BLOCK GRANULARITY INVALID" }, 338 { SPDK_NVME_SC_COMMAND_INVALID_IN_CMB, "COMMAND NOT SUPPORTED FOR QUEUE IN CMB" }, 339 { SPDK_NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" }, 340 { SPDK_NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" }, 341 { SPDK_NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" }, 342 { SPDK_NVME_SC_RESERVATION_CONFLICT, "RESERVATION CONFLICT" }, 343 { SPDK_NVME_SC_FORMAT_IN_PROGRESS, "FORMAT IN PROGRESS" }, 344 { 0xFFFF, "GENERIC" } 345 }; 346 347 static const struct nvme_string command_specific_status[] = { 348 { SPDK_NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" }, 349 { SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" }, 350 { SPDK_NVME_SC_INVALID_QUEUE_SIZE, "INVALID QUEUE SIZE" }, 351 { SPDK_NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" }, 352 { SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" }, 353 { SPDK_NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" }, 354 { SPDK_NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" }, 355 { SPDK_NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" }, 356 { SPDK_NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" }, 357 { SPDK_NVME_SC_INVALID_FORMAT, "INVALID FORMAT" }, 358 { SPDK_NVME_SC_FIRMWARE_REQ_CONVENTIONAL_RESET, "FIRMWARE REQUIRES CONVENTIONAL RESET" }, 359 { SPDK_NVME_SC_INVALID_QUEUE_DELETION, "INVALID QUEUE DELETION" }, 360 { SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE, "FEATURE ID NOT SAVEABLE" }, 361 { SPDK_NVME_SC_FEATURE_NOT_CHANGEABLE, "FEATURE NOT CHANGEABLE" }, 362 { SPDK_NVME_SC_FEATURE_NOT_NAMESPACE_SPECIFIC, "FEATURE NOT NAMESPACE SPECIFIC" }, 363 { SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET, "FIRMWARE REQUIRES NVM RESET" }, 364 { SPDK_NVME_SC_FIRMWARE_REQ_RESET, "FIRMWARE REQUIRES RESET" }, 365 { SPDK_NVME_SC_FIRMWARE_REQ_MAX_TIME_VIOLATION, "FIRMWARE REQUIRES MAX TIME VIOLATION" }, 366 { SPDK_NVME_SC_FIRMWARE_ACTIVATION_PROHIBITED, "FIRMWARE ACTIVATION PROHIBITED" }, 367 { SPDK_NVME_SC_OVERLAPPING_RANGE, "OVERLAPPING RANGE" }, 368 { SPDK_NVME_SC_NAMESPACE_INSUFFICIENT_CAPACITY, "NAMESPACE INSUFFICIENT CAPACITY" }, 369 { SPDK_NVME_SC_NAMESPACE_ID_UNAVAILABLE, "NAMESPACE ID UNAVAILABLE" }, 370 { SPDK_NVME_SC_NAMESPACE_ALREADY_ATTACHED, "NAMESPACE ALREADY ATTACHED" }, 371 { SPDK_NVME_SC_NAMESPACE_IS_PRIVATE, "NAMESPACE IS PRIVATE" }, 372 { SPDK_NVME_SC_NAMESPACE_NOT_ATTACHED, "NAMESPACE NOT ATTACHED" }, 373 { SPDK_NVME_SC_THINPROVISIONING_NOT_SUPPORTED, "THINPROVISIONING NOT SUPPORTED" }, 374 { SPDK_NVME_SC_CONTROLLER_LIST_INVALID, "CONTROLLER LIST INVALID" }, 375 { SPDK_NVME_SC_DEVICE_SELF_TEST_IN_PROGRESS, "DEVICE SELF-TEST IN PROGRESS" }, 376 { SPDK_NVME_SC_BOOT_PARTITION_WRITE_PROHIBITED, "BOOT PARTITION WRITE PROHIBITED" }, 377 { SPDK_NVME_SC_INVALID_CTRLR_ID, "INVALID CONTROLLER ID" }, 378 { SPDK_NVME_SC_INVALID_SECONDARY_CTRLR_STATE, "INVALID SECONDARY CONTROLLER STATE" }, 379 { SPDK_NVME_SC_INVALID_NUM_CTRLR_RESOURCES, "INVALID NUMBER OF CONTROLLER RESOURCES" }, 380 { SPDK_NVME_SC_INVALID_RESOURCE_ID, "INVALID RESOURCE IDENTIFIER" }, 381 { SPDK_NVME_SC_STREAM_RESOURCE_ALLOCATION_FAILED, "STREAM RESOURCE ALLOCATION FAILED"}, 382 { SPDK_NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" }, 383 { SPDK_NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" }, 384 { SPDK_NVME_SC_ATTEMPTED_WRITE_TO_RO_RANGE, "WRITE TO RO RANGE" }, 385 { 0xFFFF, "COMMAND SPECIFIC" } 386 }; 387 388 static const struct nvme_string media_error_status[] = { 389 { SPDK_NVME_SC_WRITE_FAULTS, "WRITE FAULTS" }, 390 { SPDK_NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" }, 391 { SPDK_NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" }, 392 { SPDK_NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" }, 393 { SPDK_NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" }, 394 { SPDK_NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" }, 395 { SPDK_NVME_SC_ACCESS_DENIED, "ACCESS DENIED" }, 396 { SPDK_NVME_SC_DEALLOCATED_OR_UNWRITTEN_BLOCK, "DEALLOCATED OR UNWRITTEN BLOCK" }, 397 { SPDK_OCSSD_SC_OFFLINE_CHUNK, "RESET OFFLINE CHUNK" }, 398 { SPDK_OCSSD_SC_INVALID_RESET, "INVALID RESET" }, 399 { SPDK_OCSSD_SC_WRITE_FAIL_WRITE_NEXT_UNIT, "WRITE FAIL WRITE NEXT UNIT" }, 400 { SPDK_OCSSD_SC_WRITE_FAIL_CHUNK_EARLY_CLOSE, "WRITE FAIL CHUNK EARLY CLOSE" }, 401 { SPDK_OCSSD_SC_OUT_OF_ORDER_WRITE, "OUT OF ORDER WRITE" }, 402 { SPDK_OCSSD_SC_READ_HIGH_ECC, "READ HIGH ECC" }, 403 { 0xFFFF, "MEDIA ERROR" } 404 }; 405 406 static const struct nvme_string path_status[] = { 407 { SPDK_NVME_SC_INTERNAL_PATH_ERROR, "INTERNAL PATH ERROR" }, 408 { SPDK_NVME_SC_CONTROLLER_PATH_ERROR, "CONTROLLER PATH ERROR" }, 409 { SPDK_NVME_SC_HOST_PATH_ERROR, "HOST PATH ERROR" }, 410 { SPDK_NVME_SC_ABORTED_BY_HOST, "ABORTED BY HOST" }, 411 { 0xFFFF, "PATH ERROR" } 412 }; 413 414 const char * 415 spdk_nvme_cpl_get_status_string(const struct spdk_nvme_status *status) 416 { 417 const struct nvme_string *entry; 418 419 switch (status->sct) { 420 case SPDK_NVME_SCT_GENERIC: 421 entry = generic_status; 422 break; 423 case SPDK_NVME_SCT_COMMAND_SPECIFIC: 424 entry = command_specific_status; 425 break; 426 case SPDK_NVME_SCT_MEDIA_ERROR: 427 entry = media_error_status; 428 break; 429 case SPDK_NVME_SCT_PATH: 430 entry = path_status; 431 break; 432 case SPDK_NVME_SCT_VENDOR_SPECIFIC: 433 return "VENDOR SPECIFIC"; 434 default: 435 return "RESERVED"; 436 } 437 438 return nvme_get_string(entry, status->sc); 439 } 440 441 void 442 spdk_nvme_print_completion(uint16_t qid, struct spdk_nvme_cpl *cpl) 443 { 444 assert(cpl != NULL); 445 446 /* Check that sqid matches qid. Note that sqid is reserved 447 * for fabrics so don't print an error when sqid is 0. */ 448 if (cpl->sqid != qid && cpl->sqid != 0) { 449 SPDK_ERRLOG("sqid %u doesn't match qid\n", cpl->sqid); 450 } 451 452 SPDK_NOTICELOG("%s (%02x/%02x) qid:%d cid:%d cdw0:%x sqhd:%04x p:%x m:%x dnr:%x\n", 453 spdk_nvme_cpl_get_status_string(&cpl->status), 454 cpl->status.sct, cpl->status.sc, qid, cpl->cid, cpl->cdw0, 455 cpl->sqhd, cpl->status.p, cpl->status.m, cpl->status.dnr); 456 } 457 458 void 459 spdk_nvme_qpair_print_completion(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cpl *cpl) 460 { 461 spdk_nvme_print_completion(qpair->id, cpl); 462 } 463 464 bool 465 nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl) 466 { 467 /* 468 * TODO: spec is not clear how commands that are aborted due 469 * to TLER will be marked. So for now, it seems 470 * NAMESPACE_NOT_READY is the only case where we should 471 * look at the DNR bit. 472 */ 473 switch ((int)cpl->status.sct) { 474 case SPDK_NVME_SCT_GENERIC: 475 switch ((int)cpl->status.sc) { 476 case SPDK_NVME_SC_NAMESPACE_NOT_READY: 477 case SPDK_NVME_SC_FORMAT_IN_PROGRESS: 478 if (cpl->status.dnr) { 479 return false; 480 } else { 481 return true; 482 } 483 case SPDK_NVME_SC_INVALID_OPCODE: 484 case SPDK_NVME_SC_INVALID_FIELD: 485 case SPDK_NVME_SC_COMMAND_ID_CONFLICT: 486 case SPDK_NVME_SC_DATA_TRANSFER_ERROR: 487 case SPDK_NVME_SC_ABORTED_POWER_LOSS: 488 case SPDK_NVME_SC_INTERNAL_DEVICE_ERROR: 489 case SPDK_NVME_SC_ABORTED_BY_REQUEST: 490 case SPDK_NVME_SC_ABORTED_SQ_DELETION: 491 case SPDK_NVME_SC_ABORTED_FAILED_FUSED: 492 case SPDK_NVME_SC_ABORTED_MISSING_FUSED: 493 case SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT: 494 case SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR: 495 case SPDK_NVME_SC_LBA_OUT_OF_RANGE: 496 case SPDK_NVME_SC_CAPACITY_EXCEEDED: 497 default: 498 return false; 499 } 500 case SPDK_NVME_SCT_PATH: 501 /* 502 * Per NVMe TP 4028 (Path and Transport Error Enhancements), retries should be 503 * based on the setting of the DNR bit for Internal Path Error 504 */ 505 switch ((int)cpl->status.sc) { 506 case SPDK_NVME_SC_INTERNAL_PATH_ERROR: 507 return !cpl->status.dnr; 508 default: 509 return false; 510 } 511 case SPDK_NVME_SCT_COMMAND_SPECIFIC: 512 case SPDK_NVME_SCT_MEDIA_ERROR: 513 case SPDK_NVME_SCT_VENDOR_SPECIFIC: 514 default: 515 return false; 516 } 517 } 518 519 static void 520 nvme_qpair_manual_complete_request(struct spdk_nvme_qpair *qpair, 521 struct nvme_request *req, uint32_t sct, uint32_t sc, 522 uint32_t dnr, bool print_on_error) 523 { 524 struct spdk_nvme_cpl cpl; 525 bool error; 526 527 memset(&cpl, 0, sizeof(cpl)); 528 cpl.sqid = qpair->id; 529 cpl.status.sct = sct; 530 cpl.status.sc = sc; 531 cpl.status.dnr = dnr; 532 533 error = spdk_nvme_cpl_is_error(&cpl); 534 535 if (error && print_on_error && !qpair->ctrlr->opts.disable_error_logging) { 536 SPDK_NOTICELOG("Command completed manually:\n"); 537 spdk_nvme_qpair_print_command(qpair, &req->cmd); 538 spdk_nvme_qpair_print_completion(qpair, &cpl); 539 } 540 541 nvme_complete_request(req->cb_fn, req->cb_arg, qpair, req, &cpl); 542 nvme_free_request(req); 543 } 544 545 static void 546 _nvme_qpair_abort_queued_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr) 547 { 548 struct nvme_request *req; 549 STAILQ_HEAD(, nvme_request) tmp; 550 551 STAILQ_INIT(&tmp); 552 STAILQ_SWAP(&tmp, &qpair->queued_req, nvme_request); 553 554 while (!STAILQ_EMPTY(&tmp)) { 555 req = STAILQ_FIRST(&tmp); 556 STAILQ_REMOVE_HEAD(&tmp, stailq); 557 if (!qpair->ctrlr->opts.disable_error_logging) { 558 SPDK_ERRLOG("aborting queued i/o\n"); 559 } 560 nvme_qpair_manual_complete_request(qpair, req, SPDK_NVME_SCT_GENERIC, 561 SPDK_NVME_SC_ABORTED_BY_REQUEST, dnr, true); 562 } 563 } 564 565 /* The callback to a request may submit the next request which is queued and 566 * then the same callback may abort it immediately. This repetition may cause 567 * infinite recursive calls. Hence move aborting requests to another list here 568 * and abort them later at resubmission. 569 */ 570 static void 571 _nvme_qpair_complete_abort_queued_reqs(struct spdk_nvme_qpair *qpair) 572 { 573 struct nvme_request *req; 574 575 while (!STAILQ_EMPTY(&qpair->aborting_queued_req)) { 576 req = STAILQ_FIRST(&qpair->aborting_queued_req); 577 STAILQ_REMOVE_HEAD(&qpair->aborting_queued_req, stailq); 578 nvme_qpair_manual_complete_request(qpair, req, SPDK_NVME_SCT_GENERIC, 579 SPDK_NVME_SC_ABORTED_BY_REQUEST, 1, true); 580 } 581 } 582 583 uint32_t 584 nvme_qpair_abort_queued_reqs(struct spdk_nvme_qpair *qpair, void *cmd_cb_arg) 585 { 586 struct nvme_request *req, *tmp; 587 uint32_t aborting = 0; 588 589 STAILQ_FOREACH_SAFE(req, &qpair->queued_req, stailq, tmp) { 590 if (req->cb_arg == cmd_cb_arg) { 591 STAILQ_REMOVE(&qpair->queued_req, req, nvme_request, stailq); 592 STAILQ_INSERT_TAIL(&qpair->aborting_queued_req, req, stailq); 593 if (!qpair->ctrlr->opts.disable_error_logging) { 594 SPDK_ERRLOG("aborting queued i/o\n"); 595 } 596 aborting++; 597 } 598 } 599 600 return aborting; 601 } 602 603 static inline bool 604 nvme_qpair_check_enabled(struct spdk_nvme_qpair *qpair) 605 { 606 struct nvme_request *req; 607 608 /* 609 * Either during initial connect or reset, the qpair should follow the given state machine. 610 * QPAIR_DISABLED->QPAIR_CONNECTING->QPAIR_CONNECTED->QPAIR_ENABLING->QPAIR_ENABLED. In the 611 * reset case, once the qpair is properly connected, we need to abort any outstanding requests 612 * from the old transport connection and encourage the application to retry them. We also need 613 * to submit any queued requests that built up while we were in the connected or enabling state. 614 */ 615 if (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTED && !qpair->ctrlr->is_resetting) { 616 nvme_qpair_set_state(qpair, NVME_QPAIR_ENABLING); 617 /* 618 * PCIe is special, for fabrics transports, we can abort requests before disconnect during reset 619 * but we have historically not disconnected pcie qpairs during reset so we have to abort requests 620 * here. 621 */ 622 if (qpair->ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) { 623 nvme_qpair_abort_reqs(qpair, 0); 624 } 625 nvme_qpair_set_state(qpair, NVME_QPAIR_ENABLED); 626 while (!STAILQ_EMPTY(&qpair->queued_req)) { 627 req = STAILQ_FIRST(&qpair->queued_req); 628 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); 629 if (nvme_qpair_resubmit_request(qpair, req)) { 630 break; 631 } 632 } 633 } 634 635 /* 636 * When doing a reset, we must disconnect the qpair on the proper core. 637 * Note, reset is the only case where we set the failure reason without 638 * setting the qpair state since reset is done at the generic layer on the 639 * controller thread and we can't disconnect I/O qpairs from the controller 640 * thread. 641 */ 642 if (qpair->transport_failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE && 643 nvme_qpair_get_state(qpair) == NVME_QPAIR_ENABLED) { 644 /* Don't disconnect PCIe qpairs. They are a special case for reset. */ 645 if (qpair->ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) { 646 nvme_ctrlr_disconnect_qpair(qpair); 647 } 648 return false; 649 } 650 651 return nvme_qpair_get_state(qpair) == NVME_QPAIR_ENABLED; 652 } 653 654 void 655 nvme_qpair_resubmit_requests(struct spdk_nvme_qpair *qpair, uint32_t num_requests) 656 { 657 uint32_t i; 658 int resubmit_rc; 659 struct nvme_request *req; 660 661 for (i = 0; i < num_requests; i++) { 662 if (qpair->ctrlr->is_resetting) { 663 break; 664 } 665 if ((req = STAILQ_FIRST(&qpair->queued_req)) == NULL) { 666 break; 667 } 668 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); 669 resubmit_rc = nvme_qpair_resubmit_request(qpair, req); 670 if (spdk_unlikely(resubmit_rc != 0)) { 671 SPDK_DEBUGLOG(nvme, "Unable to resubmit as many requests as we completed.\n"); 672 break; 673 } 674 } 675 676 _nvme_qpair_complete_abort_queued_reqs(qpair); 677 } 678 679 int32_t 680 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions) 681 { 682 int32_t ret; 683 struct nvme_request *req, *tmp; 684 685 if (spdk_unlikely(qpair->ctrlr->is_failed)) { 686 if (qpair->ctrlr->is_removed) { 687 nvme_qpair_set_state(qpair, NVME_QPAIR_DESTROYING); 688 nvme_qpair_abort_reqs(qpair, 1 /* Do not retry */); 689 } 690 return -ENXIO; 691 } 692 693 if (spdk_unlikely(!nvme_qpair_check_enabled(qpair) && 694 !(nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING))) { 695 /* 696 * qpair is not enabled, likely because a controller reset is 697 * in progress. 698 */ 699 return -ENXIO; 700 } 701 702 /* error injection for those queued error requests */ 703 if (spdk_unlikely(!STAILQ_EMPTY(&qpair->err_req_head))) { 704 STAILQ_FOREACH_SAFE(req, &qpair->err_req_head, stailq, tmp) { 705 if (spdk_get_ticks() - req->submit_tick > req->timeout_tsc) { 706 STAILQ_REMOVE(&qpair->err_req_head, req, nvme_request, stailq); 707 nvme_qpair_manual_complete_request(qpair, req, 708 req->cpl.status.sct, 709 req->cpl.status.sc, 0, true); 710 } 711 } 712 } 713 714 qpair->in_completion_context = 1; 715 ret = nvme_transport_qpair_process_completions(qpair, max_completions); 716 if (ret < 0) { 717 SPDK_ERRLOG("CQ transport error %d on qpair id %hu\n", ret, qpair->id); 718 if (nvme_qpair_is_admin_queue(qpair)) { 719 nvme_ctrlr_fail(qpair->ctrlr, false); 720 } 721 } 722 qpair->in_completion_context = 0; 723 if (qpair->delete_after_completion_context) { 724 /* 725 * A request to delete this qpair was made in the context of this completion 726 * routine - so it is safe to delete it now. 727 */ 728 spdk_nvme_ctrlr_free_io_qpair(qpair); 729 return ret; 730 } 731 732 /* 733 * At this point, ret must represent the number of completions we reaped. 734 * submit as many queued requests as we completed. 735 */ 736 nvme_qpair_resubmit_requests(qpair, ret); 737 738 return ret; 739 } 740 741 spdk_nvme_qp_failure_reason 742 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair) 743 { 744 return qpair->transport_failure_reason; 745 } 746 747 int 748 nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id, 749 struct spdk_nvme_ctrlr *ctrlr, 750 enum spdk_nvme_qprio qprio, 751 uint32_t num_requests) 752 { 753 size_t req_size_padded; 754 uint32_t i; 755 756 qpair->id = id; 757 qpair->qprio = qprio; 758 759 qpair->in_completion_context = 0; 760 qpair->delete_after_completion_context = 0; 761 qpair->no_deletion_notification_needed = 0; 762 763 qpair->ctrlr = ctrlr; 764 qpair->trtype = ctrlr->trid.trtype; 765 766 STAILQ_INIT(&qpair->free_req); 767 STAILQ_INIT(&qpair->queued_req); 768 STAILQ_INIT(&qpair->aborting_queued_req); 769 TAILQ_INIT(&qpair->err_cmd_head); 770 STAILQ_INIT(&qpair->err_req_head); 771 772 req_size_padded = (sizeof(struct nvme_request) + 63) & ~(size_t)63; 773 774 qpair->req_buf = spdk_zmalloc(req_size_padded * num_requests, 64, NULL, 775 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE); 776 if (qpair->req_buf == NULL) { 777 SPDK_ERRLOG("no memory to allocate qpair(cntlid:0x%x sqid:%d) req_buf with %d request\n", 778 ctrlr->cntlid, qpair->id, num_requests); 779 return -ENOMEM; 780 } 781 782 for (i = 0; i < num_requests; i++) { 783 struct nvme_request *req = qpair->req_buf + i * req_size_padded; 784 785 req->qpair = qpair; 786 STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq); 787 } 788 789 return 0; 790 } 791 792 void 793 nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair) 794 { 795 struct nvme_request *req; 796 797 while (!STAILQ_EMPTY(&qpair->err_req_head)) { 798 req = STAILQ_FIRST(&qpair->err_req_head); 799 STAILQ_REMOVE_HEAD(&qpair->err_req_head, stailq); 800 nvme_qpair_manual_complete_request(qpair, req, 801 req->cpl.status.sct, 802 req->cpl.status.sc, 0, true); 803 } 804 } 805 806 void 807 nvme_qpair_deinit(struct spdk_nvme_qpair *qpair) 808 { 809 struct nvme_error_cmd *cmd, *entry; 810 811 _nvme_qpair_abort_queued_reqs(qpair, 1); 812 _nvme_qpair_complete_abort_queued_reqs(qpair); 813 nvme_qpair_complete_error_reqs(qpair); 814 815 TAILQ_FOREACH_SAFE(cmd, &qpair->err_cmd_head, link, entry) { 816 TAILQ_REMOVE(&qpair->err_cmd_head, cmd, link); 817 spdk_free(cmd); 818 } 819 820 spdk_free(qpair->req_buf); 821 } 822 823 static inline int 824 _nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req) 825 { 826 int rc = 0; 827 struct nvme_request *child_req, *tmp; 828 struct nvme_error_cmd *cmd; 829 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 830 bool child_req_failed = false; 831 832 nvme_qpair_check_enabled(qpair); 833 834 if (spdk_unlikely(nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED || 835 nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING || 836 nvme_qpair_get_state(qpair) == NVME_QPAIR_DESTROYING)) { 837 TAILQ_FOREACH_SAFE(child_req, &req->children, child_tailq, tmp) { 838 nvme_request_remove_child(req, child_req); 839 nvme_request_free_children(child_req); 840 nvme_free_request(child_req); 841 } 842 if (req->parent != NULL) { 843 nvme_request_remove_child(req->parent, req); 844 } 845 nvme_free_request(req); 846 return -ENXIO; 847 } 848 849 if (req->num_children) { 850 /* 851 * This is a split (parent) request. Submit all of the children but not the parent 852 * request itself, since the parent is the original unsplit request. 853 */ 854 TAILQ_FOREACH_SAFE(child_req, &req->children, child_tailq, tmp) { 855 if (spdk_likely(!child_req_failed)) { 856 rc = nvme_qpair_submit_request(qpair, child_req); 857 if (spdk_unlikely(rc != 0)) { 858 child_req_failed = true; 859 } 860 } else { /* free remaining child_reqs since one child_req fails */ 861 nvme_request_remove_child(req, child_req); 862 nvme_request_free_children(child_req); 863 nvme_free_request(child_req); 864 } 865 } 866 867 if (spdk_unlikely(child_req_failed)) { 868 /* part of children requests have been submitted, 869 * return success since we must wait for those children to complete, 870 * but set the parent request to failure. 871 */ 872 if (req->num_children) { 873 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 874 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 875 return 0; 876 } 877 goto error; 878 } 879 880 return rc; 881 } 882 883 /* queue those requests which matches with opcode in err_cmd list */ 884 if (spdk_unlikely(!TAILQ_EMPTY(&qpair->err_cmd_head))) { 885 TAILQ_FOREACH(cmd, &qpair->err_cmd_head, link) { 886 if (!cmd->do_not_submit) { 887 continue; 888 } 889 890 if ((cmd->opc == req->cmd.opc) && cmd->err_count) { 891 /* add to error request list and set cpl */ 892 req->timeout_tsc = cmd->timeout_tsc; 893 req->submit_tick = spdk_get_ticks(); 894 req->cpl.status.sct = cmd->status.sct; 895 req->cpl.status.sc = cmd->status.sc; 896 STAILQ_INSERT_TAIL(&qpair->err_req_head, req, stailq); 897 cmd->err_count--; 898 return 0; 899 } 900 } 901 } 902 903 if (spdk_unlikely(ctrlr->is_failed)) { 904 rc = -ENXIO; 905 goto error; 906 } 907 908 /* assign submit_tick before submitting req to specific transport */ 909 if (spdk_unlikely(ctrlr->timeout_enabled)) { 910 if (req->submit_tick == 0) { /* req submitted for the first time */ 911 req->submit_tick = spdk_get_ticks(); 912 req->timed_out = false; 913 } 914 } else { 915 req->submit_tick = 0; 916 } 917 918 /* Allow two cases: 919 * 1. NVMe qpair is enabled. 920 * 2. Always allow fabrics commands through - these get 921 * the controller out of reset state. 922 */ 923 if (spdk_likely(nvme_qpair_get_state(qpair) == NVME_QPAIR_ENABLED) || 924 (req->cmd.opc == SPDK_NVME_OPC_FABRIC && 925 nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING)) { 926 rc = nvme_transport_qpair_submit_request(qpair, req); 927 } else { 928 /* The controller is being reset - queue this request and 929 * submit it later when the reset is completed. 930 */ 931 return -EAGAIN; 932 } 933 934 if (spdk_likely(rc == 0)) { 935 req->queued = false; 936 return 0; 937 } 938 939 if (rc == -EAGAIN) { 940 return -EAGAIN; 941 } 942 943 error: 944 if (req->parent != NULL) { 945 nvme_request_remove_child(req->parent, req); 946 } 947 948 /* The request is from queued_req list we should trigger the callback from caller */ 949 if (spdk_unlikely(req->queued)) { 950 nvme_qpair_manual_complete_request(qpair, req, SPDK_NVME_SCT_GENERIC, 951 SPDK_NVME_SC_INTERNAL_DEVICE_ERROR, true, true); 952 return rc; 953 } 954 955 nvme_free_request(req); 956 957 return rc; 958 } 959 960 int 961 nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req) 962 { 963 int rc; 964 965 if (spdk_unlikely(!STAILQ_EMPTY(&qpair->queued_req) && req->num_children == 0)) { 966 /* 967 * requests that have no children should be sent to the transport after all 968 * currently queued requests. Requests with chilren will be split and go back 969 * through this path. 970 */ 971 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq); 972 req->queued = true; 973 return 0; 974 } 975 976 rc = _nvme_qpair_submit_request(qpair, req); 977 if (rc == -EAGAIN) { 978 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq); 979 req->queued = true; 980 rc = 0; 981 } 982 983 return rc; 984 } 985 986 static int 987 nvme_qpair_resubmit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req) 988 { 989 int rc; 990 991 /* 992 * We should never have a request with children on the queue. 993 * This is necessary to preserve the 1:1 relationship between 994 * completions and resubmissions. 995 */ 996 assert(req->num_children == 0); 997 assert(req->queued); 998 rc = _nvme_qpair_submit_request(qpair, req); 999 if (spdk_unlikely(rc == -EAGAIN)) { 1000 STAILQ_INSERT_HEAD(&qpair->queued_req, req, stailq); 1001 } 1002 1003 return rc; 1004 } 1005 1006 void 1007 nvme_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr) 1008 { 1009 nvme_qpair_complete_error_reqs(qpair); 1010 _nvme_qpair_abort_queued_reqs(qpair, dnr); 1011 _nvme_qpair_complete_abort_queued_reqs(qpair); 1012 nvme_transport_qpair_abort_reqs(qpair, dnr); 1013 } 1014 1015 int 1016 spdk_nvme_qpair_add_cmd_error_injection(struct spdk_nvme_ctrlr *ctrlr, 1017 struct spdk_nvme_qpair *qpair, 1018 uint8_t opc, bool do_not_submit, 1019 uint64_t timeout_in_us, 1020 uint32_t err_count, 1021 uint8_t sct, uint8_t sc) 1022 { 1023 struct nvme_error_cmd *entry, *cmd = NULL; 1024 1025 if (qpair == NULL) { 1026 qpair = ctrlr->adminq; 1027 } 1028 1029 TAILQ_FOREACH(entry, &qpair->err_cmd_head, link) { 1030 if (entry->opc == opc) { 1031 cmd = entry; 1032 break; 1033 } 1034 } 1035 1036 if (cmd == NULL) { 1037 cmd = spdk_zmalloc(sizeof(*cmd), 64, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1038 if (!cmd) { 1039 return -ENOMEM; 1040 } 1041 TAILQ_INSERT_TAIL(&qpair->err_cmd_head, cmd, link); 1042 } 1043 1044 cmd->do_not_submit = do_not_submit; 1045 cmd->err_count = err_count; 1046 cmd->timeout_tsc = timeout_in_us * spdk_get_ticks_hz() / 1000000ULL; 1047 cmd->opc = opc; 1048 cmd->status.sct = sct; 1049 cmd->status.sc = sc; 1050 1051 return 0; 1052 } 1053 1054 void 1055 spdk_nvme_qpair_remove_cmd_error_injection(struct spdk_nvme_ctrlr *ctrlr, 1056 struct spdk_nvme_qpair *qpair, 1057 uint8_t opc) 1058 { 1059 struct nvme_error_cmd *cmd, *entry; 1060 1061 if (qpair == NULL) { 1062 qpair = ctrlr->adminq; 1063 } 1064 1065 TAILQ_FOREACH_SAFE(cmd, &qpair->err_cmd_head, link, entry) { 1066 if (cmd->opc == opc) { 1067 TAILQ_REMOVE(&qpair->err_cmd_head, cmd, link); 1068 spdk_free(cmd); 1069 return; 1070 } 1071 } 1072 1073 return; 1074 } 1075