1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "nvme_internal.h" 8 #include "spdk/nvme_ocssd.h" 9 #include "spdk/string.h" 10 11 #define NVME_CMD_DPTR_STR_SIZE 256 12 13 static int nvme_qpair_resubmit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req); 14 15 struct nvme_string { 16 uint16_t value; 17 const char *str; 18 }; 19 20 static const struct nvme_string admin_opcode[] = { 21 { SPDK_NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" }, 22 { SPDK_NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" }, 23 { SPDK_NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" }, 24 { SPDK_NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" }, 25 { SPDK_NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" }, 26 { SPDK_NVME_OPC_IDENTIFY, "IDENTIFY" }, 27 { SPDK_NVME_OPC_ABORT, "ABORT" }, 28 { SPDK_NVME_OPC_SET_FEATURES, "SET FEATURES" }, 29 { SPDK_NVME_OPC_GET_FEATURES, "GET FEATURES" }, 30 { SPDK_NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" }, 31 { SPDK_NVME_OPC_NS_MANAGEMENT, "NAMESPACE MANAGEMENT" }, 32 { SPDK_NVME_OPC_FIRMWARE_COMMIT, "FIRMWARE COMMIT" }, 33 { SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" }, 34 { SPDK_NVME_OPC_DEVICE_SELF_TEST, "DEVICE SELF-TEST" }, 35 { SPDK_NVME_OPC_NS_ATTACHMENT, "NAMESPACE ATTACHMENT" }, 36 { SPDK_NVME_OPC_KEEP_ALIVE, "KEEP ALIVE" }, 37 { SPDK_NVME_OPC_DIRECTIVE_SEND, "DIRECTIVE SEND" }, 38 { SPDK_NVME_OPC_DIRECTIVE_RECEIVE, "DIRECTIVE RECEIVE" }, 39 { SPDK_NVME_OPC_VIRTUALIZATION_MANAGEMENT, "VIRTUALIZATION MANAGEMENT" }, 40 { SPDK_NVME_OPC_NVME_MI_SEND, "NVME-MI SEND" }, 41 { SPDK_NVME_OPC_NVME_MI_RECEIVE, "NVME-MI RECEIVE" }, 42 { SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG, "DOORBELL BUFFER CONFIG" }, 43 { SPDK_NVME_OPC_FABRIC, "FABRIC" }, 44 { SPDK_NVME_OPC_FORMAT_NVM, "FORMAT NVM" }, 45 { SPDK_NVME_OPC_SECURITY_SEND, "SECURITY SEND" }, 46 { SPDK_NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" }, 47 { SPDK_NVME_OPC_SANITIZE, "SANITIZE" }, 48 { SPDK_NVME_OPC_GET_LBA_STATUS, "GET LBA STATUS" }, 49 { SPDK_OCSSD_OPC_GEOMETRY, "OCSSD / GEOMETRY" }, 50 { 0xFFFF, "ADMIN COMMAND" } 51 }; 52 53 static const struct nvme_string fabric_opcode[] = { 54 { SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET, "PROPERTY SET" }, 55 { SPDK_NVMF_FABRIC_COMMAND_CONNECT, "CONNECT" }, 56 { SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET, "PROPERTY GET" }, 57 { SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND, "AUTHENTICATION SEND" }, 58 { SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV, "AUTHENTICATION RECV" }, 59 { 0xFFFF, "RESERVED / VENDOR SPECIFIC" } 60 }; 61 62 static const struct nvme_string feat_opcode[] = { 63 { SPDK_NVME_FEAT_ARBITRATION, "ARBITRATION" }, 64 { SPDK_NVME_FEAT_POWER_MANAGEMENT, "POWER MANAGEMENT" }, 65 { SPDK_NVME_FEAT_LBA_RANGE_TYPE, "LBA RANGE TYPE" }, 66 { SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD, "TEMPERATURE THRESHOLD" }, 67 { SPDK_NVME_FEAT_ERROR_RECOVERY, "ERROR_RECOVERY" }, 68 { SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE, "VOLATILE WRITE CACHE" }, 69 { SPDK_NVME_FEAT_NUMBER_OF_QUEUES, "NUMBER OF QUEUES" }, 70 { SPDK_NVME_FEAT_INTERRUPT_COALESCING, "INTERRUPT COALESCING" }, 71 { SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION, "INTERRUPT VECTOR CONFIGURATION" }, 72 { SPDK_NVME_FEAT_WRITE_ATOMICITY, "WRITE ATOMICITY" }, 73 { SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION, "ASYNC EVENT CONFIGURATION" }, 74 { SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION, "AUTONOMOUS POWER STATE TRANSITION" }, 75 { SPDK_NVME_FEAT_HOST_MEM_BUFFER, "HOST MEM BUFFER" }, 76 { SPDK_NVME_FEAT_TIMESTAMP, "TIMESTAMP" }, 77 { SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, "KEEP ALIVE TIMER" }, 78 { SPDK_NVME_FEAT_HOST_CONTROLLED_THERMAL_MANAGEMENT, "HOST CONTROLLED THERMAL MANAGEMENT" }, 79 { SPDK_NVME_FEAT_NON_OPERATIONAL_POWER_STATE_CONFIG, "NON OPERATIONAL POWER STATE CONFIG" }, 80 { SPDK_NVME_FEAT_SOFTWARE_PROGRESS_MARKER, "SOFTWARE PROGRESS MARKER" }, 81 { SPDK_NVME_FEAT_HOST_IDENTIFIER, "HOST IDENTIFIER" }, 82 { SPDK_NVME_FEAT_HOST_RESERVE_MASK, "HOST RESERVE MASK" }, 83 { SPDK_NVME_FEAT_HOST_RESERVE_PERSIST, "HOST RESERVE PERSIST" }, 84 { 0xFFFF, "RESERVED" } 85 }; 86 87 static const struct nvme_string io_opcode[] = { 88 { SPDK_NVME_OPC_FLUSH, "FLUSH" }, 89 { SPDK_NVME_OPC_WRITE, "WRITE" }, 90 { SPDK_NVME_OPC_READ, "READ" }, 91 { SPDK_NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" }, 92 { SPDK_NVME_OPC_COMPARE, "COMPARE" }, 93 { SPDK_NVME_OPC_WRITE_ZEROES, "WRITE ZEROES" }, 94 { SPDK_NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" }, 95 { SPDK_NVME_OPC_RESERVATION_REGISTER, "RESERVATION REGISTER" }, 96 { SPDK_NVME_OPC_RESERVATION_REPORT, "RESERVATION REPORT" }, 97 { SPDK_NVME_OPC_RESERVATION_ACQUIRE, "RESERVATION ACQUIRE" }, 98 { SPDK_NVME_OPC_RESERVATION_RELEASE, "RESERVATION RELEASE" }, 99 { SPDK_OCSSD_OPC_VECTOR_RESET, "OCSSD / VECTOR RESET" }, 100 { SPDK_OCSSD_OPC_VECTOR_WRITE, "OCSSD / VECTOR WRITE" }, 101 { SPDK_OCSSD_OPC_VECTOR_READ, "OCSSD / VECTOR READ" }, 102 { SPDK_OCSSD_OPC_VECTOR_COPY, "OCSSD / VECTOR COPY" }, 103 { 0xFFFF, "IO COMMAND" } 104 }; 105 106 static const struct nvme_string sgl_type[] = { 107 { SPDK_NVME_SGL_TYPE_DATA_BLOCK, "DATA BLOCK" }, 108 { SPDK_NVME_SGL_TYPE_BIT_BUCKET, "BIT BUCKET" }, 109 { SPDK_NVME_SGL_TYPE_SEGMENT, "SEGMENT" }, 110 { SPDK_NVME_SGL_TYPE_LAST_SEGMENT, "LAST SEGMENT" }, 111 { SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK, "KEYED DATA BLOCK" }, 112 { SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK, "TRANSPORT DATA BLOCK" }, 113 { SPDK_NVME_SGL_TYPE_VENDOR_SPECIFIC, "VENDOR SPECIFIC" }, 114 { 0xFFFF, "RESERVED" } 115 }; 116 117 static const struct nvme_string sgl_subtype[] = { 118 { SPDK_NVME_SGL_SUBTYPE_ADDRESS, "ADDRESS" }, 119 { SPDK_NVME_SGL_SUBTYPE_OFFSET, "OFFSET" }, 120 { SPDK_NVME_SGL_SUBTYPE_TRANSPORT, "TRANSPORT" }, 121 { SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY, "INVALIDATE KEY" }, 122 { 0xFFFF, "RESERVED" } 123 }; 124 125 static const char * 126 nvme_get_string(const struct nvme_string *strings, uint16_t value) 127 { 128 const struct nvme_string *entry; 129 130 entry = strings; 131 132 while (entry->value != 0xFFFF) { 133 if (entry->value == value) { 134 return entry->str; 135 } 136 entry++; 137 } 138 return entry->str; 139 } 140 141 static void 142 nvme_get_sgl_unkeyed(char *buf, size_t size, struct spdk_nvme_cmd *cmd) 143 { 144 struct spdk_nvme_sgl_descriptor *sgl = &cmd->dptr.sgl1; 145 146 snprintf(buf, size, " len:0x%x", sgl->unkeyed.length); 147 } 148 149 static void 150 nvme_get_sgl_keyed(char *buf, size_t size, struct spdk_nvme_cmd *cmd) 151 { 152 struct spdk_nvme_sgl_descriptor *sgl = &cmd->dptr.sgl1; 153 154 snprintf(buf, size, " len:0x%x key:0x%x", sgl->keyed.length, sgl->keyed.key); 155 } 156 157 static void 158 nvme_get_sgl(char *buf, size_t size, struct spdk_nvme_cmd *cmd) 159 { 160 struct spdk_nvme_sgl_descriptor *sgl = &cmd->dptr.sgl1; 161 int c; 162 163 c = snprintf(buf, size, "SGL %s %s 0x%" PRIx64, nvme_get_string(sgl_type, sgl->generic.type), 164 nvme_get_string(sgl_subtype, sgl->generic.subtype), sgl->address); 165 assert(c >= 0 && (size_t)c < size); 166 167 if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK) { 168 nvme_get_sgl_unkeyed(buf + c, size - c, cmd); 169 } 170 171 if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK) { 172 nvme_get_sgl_keyed(buf + c, size - c, cmd); 173 } 174 } 175 176 static void 177 nvme_get_prp(char *buf, size_t size, struct spdk_nvme_cmd *cmd) 178 { 179 snprintf(buf, size, "PRP1 0x%" PRIx64 " PRP2 0x%" PRIx64, cmd->dptr.prp.prp1, cmd->dptr.prp.prp2); 180 } 181 182 static void 183 nvme_get_dptr(char *buf, size_t size, struct spdk_nvme_cmd *cmd) 184 { 185 if (spdk_nvme_opc_get_data_transfer(cmd->opc) != SPDK_NVME_DATA_NONE) { 186 switch (cmd->psdt) { 187 case SPDK_NVME_PSDT_PRP: 188 nvme_get_prp(buf, size, cmd); 189 break; 190 case SPDK_NVME_PSDT_SGL_MPTR_CONTIG: 191 case SPDK_NVME_PSDT_SGL_MPTR_SGL: 192 nvme_get_sgl(buf, size, cmd); 193 break; 194 default: 195 ; 196 } 197 } 198 } 199 200 static void 201 nvme_admin_qpair_print_command(uint16_t qid, struct spdk_nvme_cmd *cmd) 202 { 203 struct spdk_nvmf_capsule_cmd *fcmd = (void *)cmd; 204 char dptr[NVME_CMD_DPTR_STR_SIZE] = {'\0'}; 205 206 assert(cmd != NULL); 207 208 nvme_get_dptr(dptr, sizeof(dptr), cmd); 209 210 switch ((int)cmd->opc) { 211 case SPDK_NVME_OPC_SET_FEATURES: 212 case SPDK_NVME_OPC_GET_FEATURES: 213 SPDK_NOTICELOG("%s %s cid:%d cdw10:%08x %s\n", 214 nvme_get_string(admin_opcode, cmd->opc), nvme_get_string(feat_opcode, 215 cmd->cdw10_bits.set_features.fid), cmd->cid, cmd->cdw10, dptr); 216 break; 217 case SPDK_NVME_OPC_FABRIC: 218 SPDK_NOTICELOG("%s %s qid:%d cid:%d %s\n", 219 nvme_get_string(admin_opcode, cmd->opc), nvme_get_string(fabric_opcode, fcmd->fctype), qid, 220 fcmd->cid, dptr); 221 break; 222 default: 223 SPDK_NOTICELOG("%s (%02x) qid:%d cid:%d nsid:%x cdw10:%08x cdw11:%08x %s\n", 224 nvme_get_string(admin_opcode, cmd->opc), cmd->opc, qid, cmd->cid, cmd->nsid, cmd->cdw10, 225 cmd->cdw11, dptr); 226 } 227 } 228 229 static void 230 nvme_io_qpair_print_command(uint16_t qid, struct spdk_nvme_cmd *cmd) 231 { 232 char dptr[NVME_CMD_DPTR_STR_SIZE] = {'\0'}; 233 234 assert(cmd != NULL); 235 236 nvme_get_dptr(dptr, sizeof(dptr), cmd); 237 238 switch ((int)cmd->opc) { 239 case SPDK_NVME_OPC_WRITE: 240 case SPDK_NVME_OPC_READ: 241 case SPDK_NVME_OPC_WRITE_UNCORRECTABLE: 242 case SPDK_NVME_OPC_COMPARE: 243 SPDK_NOTICELOG("%s sqid:%d cid:%d nsid:%d " 244 "lba:%llu len:%d %s\n", 245 nvme_get_string(io_opcode, cmd->opc), qid, cmd->cid, cmd->nsid, 246 ((unsigned long long)cmd->cdw11 << 32) + cmd->cdw10, 247 (cmd->cdw12 & 0xFFFF) + 1, dptr); 248 break; 249 case SPDK_NVME_OPC_FLUSH: 250 case SPDK_NVME_OPC_DATASET_MANAGEMENT: 251 SPDK_NOTICELOG("%s sqid:%d cid:%d nsid:%d\n", 252 nvme_get_string(io_opcode, cmd->opc), qid, cmd->cid, cmd->nsid); 253 break; 254 default: 255 SPDK_NOTICELOG("%s (%02x) sqid:%d cid:%d nsid:%d\n", 256 nvme_get_string(io_opcode, cmd->opc), cmd->opc, qid, cmd->cid, cmd->nsid); 257 break; 258 } 259 } 260 261 void 262 spdk_nvme_print_command(uint16_t qid, struct spdk_nvme_cmd *cmd) 263 { 264 assert(cmd != NULL); 265 266 if (qid == 0 || cmd->opc == SPDK_NVME_OPC_FABRIC) { 267 nvme_admin_qpair_print_command(qid, cmd); 268 } else { 269 nvme_io_qpair_print_command(qid, cmd); 270 } 271 } 272 273 void 274 spdk_nvme_qpair_print_command(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd) 275 { 276 assert(qpair != NULL); 277 assert(cmd != NULL); 278 279 spdk_nvme_print_command(qpair->id, cmd); 280 } 281 282 static const struct nvme_string generic_status[] = { 283 { SPDK_NVME_SC_SUCCESS, "SUCCESS" }, 284 { SPDK_NVME_SC_INVALID_OPCODE, "INVALID OPCODE" }, 285 { SPDK_NVME_SC_INVALID_FIELD, "INVALID FIELD" }, 286 { SPDK_NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" }, 287 { SPDK_NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" }, 288 { SPDK_NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" }, 289 { SPDK_NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" }, 290 { SPDK_NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" }, 291 { SPDK_NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" }, 292 { SPDK_NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" }, 293 { SPDK_NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" }, 294 { SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" }, 295 { SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" }, 296 { SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR, "INVALID SGL SEGMENT DESCRIPTOR" }, 297 { SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS, "INVALID NUMBER OF SGL DESCRIPTORS" }, 298 { SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID, "DATA SGL LENGTH INVALID" }, 299 { SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID, "METADATA SGL LENGTH INVALID" }, 300 { SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID, "SGL DESCRIPTOR TYPE INVALID" }, 301 { SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF, "INVALID CONTROLLER MEMORY BUFFER" }, 302 { SPDK_NVME_SC_INVALID_PRP_OFFSET, "INVALID PRP OFFSET" }, 303 { SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED, "ATOMIC WRITE UNIT EXCEEDED" }, 304 { SPDK_NVME_SC_OPERATION_DENIED, "OPERATION DENIED" }, 305 { SPDK_NVME_SC_INVALID_SGL_OFFSET, "INVALID SGL OFFSET" }, 306 { SPDK_NVME_SC_HOSTID_INCONSISTENT_FORMAT, "HOSTID INCONSISTENT FORMAT" }, 307 { SPDK_NVME_SC_KEEP_ALIVE_EXPIRED, "KEEP ALIVE EXPIRED" }, 308 { SPDK_NVME_SC_KEEP_ALIVE_INVALID, "KEEP ALIVE INVALID" }, 309 { SPDK_NVME_SC_ABORTED_PREEMPT, "ABORTED - PREEMPT AND ABORT" }, 310 { SPDK_NVME_SC_SANITIZE_FAILED, "SANITIZE FAILED" }, 311 { SPDK_NVME_SC_SANITIZE_IN_PROGRESS, "SANITIZE IN PROGRESS" }, 312 { SPDK_NVME_SC_SGL_DATA_BLOCK_GRANULARITY_INVALID, "DATA BLOCK GRANULARITY INVALID" }, 313 { SPDK_NVME_SC_COMMAND_INVALID_IN_CMB, "COMMAND NOT SUPPORTED FOR QUEUE IN CMB" }, 314 { SPDK_NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" }, 315 { SPDK_NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" }, 316 { SPDK_NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" }, 317 { SPDK_NVME_SC_RESERVATION_CONFLICT, "RESERVATION CONFLICT" }, 318 { SPDK_NVME_SC_FORMAT_IN_PROGRESS, "FORMAT IN PROGRESS" }, 319 { 0xFFFF, "GENERIC" } 320 }; 321 322 static const struct nvme_string command_specific_status[] = { 323 { SPDK_NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" }, 324 { SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" }, 325 { SPDK_NVME_SC_INVALID_QUEUE_SIZE, "INVALID QUEUE SIZE" }, 326 { SPDK_NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" }, 327 { SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" }, 328 { SPDK_NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" }, 329 { SPDK_NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" }, 330 { SPDK_NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" }, 331 { SPDK_NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" }, 332 { SPDK_NVME_SC_INVALID_FORMAT, "INVALID FORMAT" }, 333 { SPDK_NVME_SC_FIRMWARE_REQ_CONVENTIONAL_RESET, "FIRMWARE REQUIRES CONVENTIONAL RESET" }, 334 { SPDK_NVME_SC_INVALID_QUEUE_DELETION, "INVALID QUEUE DELETION" }, 335 { SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE, "FEATURE ID NOT SAVEABLE" }, 336 { SPDK_NVME_SC_FEATURE_NOT_CHANGEABLE, "FEATURE NOT CHANGEABLE" }, 337 { SPDK_NVME_SC_FEATURE_NOT_NAMESPACE_SPECIFIC, "FEATURE NOT NAMESPACE SPECIFIC" }, 338 { SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET, "FIRMWARE REQUIRES NVM RESET" }, 339 { SPDK_NVME_SC_FIRMWARE_REQ_RESET, "FIRMWARE REQUIRES RESET" }, 340 { SPDK_NVME_SC_FIRMWARE_REQ_MAX_TIME_VIOLATION, "FIRMWARE REQUIRES MAX TIME VIOLATION" }, 341 { SPDK_NVME_SC_FIRMWARE_ACTIVATION_PROHIBITED, "FIRMWARE ACTIVATION PROHIBITED" }, 342 { SPDK_NVME_SC_OVERLAPPING_RANGE, "OVERLAPPING RANGE" }, 343 { SPDK_NVME_SC_NAMESPACE_INSUFFICIENT_CAPACITY, "NAMESPACE INSUFFICIENT CAPACITY" }, 344 { SPDK_NVME_SC_NAMESPACE_ID_UNAVAILABLE, "NAMESPACE ID UNAVAILABLE" }, 345 { SPDK_NVME_SC_NAMESPACE_ALREADY_ATTACHED, "NAMESPACE ALREADY ATTACHED" }, 346 { SPDK_NVME_SC_NAMESPACE_IS_PRIVATE, "NAMESPACE IS PRIVATE" }, 347 { SPDK_NVME_SC_NAMESPACE_NOT_ATTACHED, "NAMESPACE NOT ATTACHED" }, 348 { SPDK_NVME_SC_THINPROVISIONING_NOT_SUPPORTED, "THINPROVISIONING NOT SUPPORTED" }, 349 { SPDK_NVME_SC_CONTROLLER_LIST_INVALID, "CONTROLLER LIST INVALID" }, 350 { SPDK_NVME_SC_DEVICE_SELF_TEST_IN_PROGRESS, "DEVICE SELF-TEST IN PROGRESS" }, 351 { SPDK_NVME_SC_BOOT_PARTITION_WRITE_PROHIBITED, "BOOT PARTITION WRITE PROHIBITED" }, 352 { SPDK_NVME_SC_INVALID_CTRLR_ID, "INVALID CONTROLLER ID" }, 353 { SPDK_NVME_SC_INVALID_SECONDARY_CTRLR_STATE, "INVALID SECONDARY CONTROLLER STATE" }, 354 { SPDK_NVME_SC_INVALID_NUM_CTRLR_RESOURCES, "INVALID NUMBER OF CONTROLLER RESOURCES" }, 355 { SPDK_NVME_SC_INVALID_RESOURCE_ID, "INVALID RESOURCE IDENTIFIER" }, 356 { SPDK_NVME_SC_STREAM_RESOURCE_ALLOCATION_FAILED, "STREAM RESOURCE ALLOCATION FAILED"}, 357 { SPDK_NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" }, 358 { SPDK_NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" }, 359 { SPDK_NVME_SC_ATTEMPTED_WRITE_TO_RO_RANGE, "WRITE TO RO RANGE" }, 360 { 0xFFFF, "COMMAND SPECIFIC" } 361 }; 362 363 static const struct nvme_string media_error_status[] = { 364 { SPDK_NVME_SC_WRITE_FAULTS, "WRITE FAULTS" }, 365 { SPDK_NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" }, 366 { SPDK_NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" }, 367 { SPDK_NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" }, 368 { SPDK_NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" }, 369 { SPDK_NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" }, 370 { SPDK_NVME_SC_ACCESS_DENIED, "ACCESS DENIED" }, 371 { SPDK_NVME_SC_DEALLOCATED_OR_UNWRITTEN_BLOCK, "DEALLOCATED OR UNWRITTEN BLOCK" }, 372 { SPDK_OCSSD_SC_OFFLINE_CHUNK, "RESET OFFLINE CHUNK" }, 373 { SPDK_OCSSD_SC_INVALID_RESET, "INVALID RESET" }, 374 { SPDK_OCSSD_SC_WRITE_FAIL_WRITE_NEXT_UNIT, "WRITE FAIL WRITE NEXT UNIT" }, 375 { SPDK_OCSSD_SC_WRITE_FAIL_CHUNK_EARLY_CLOSE, "WRITE FAIL CHUNK EARLY CLOSE" }, 376 { SPDK_OCSSD_SC_OUT_OF_ORDER_WRITE, "OUT OF ORDER WRITE" }, 377 { SPDK_OCSSD_SC_READ_HIGH_ECC, "READ HIGH ECC" }, 378 { 0xFFFF, "MEDIA ERROR" } 379 }; 380 381 static const struct nvme_string path_status[] = { 382 { SPDK_NVME_SC_INTERNAL_PATH_ERROR, "INTERNAL PATH ERROR" }, 383 { SPDK_NVME_SC_CONTROLLER_PATH_ERROR, "CONTROLLER PATH ERROR" }, 384 { SPDK_NVME_SC_HOST_PATH_ERROR, "HOST PATH ERROR" }, 385 { SPDK_NVME_SC_ABORTED_BY_HOST, "ABORTED BY HOST" }, 386 { 0xFFFF, "PATH ERROR" } 387 }; 388 389 const char * 390 spdk_nvme_cpl_get_status_string(const struct spdk_nvme_status *status) 391 { 392 const struct nvme_string *entry; 393 394 switch (status->sct) { 395 case SPDK_NVME_SCT_GENERIC: 396 entry = generic_status; 397 break; 398 case SPDK_NVME_SCT_COMMAND_SPECIFIC: 399 entry = command_specific_status; 400 break; 401 case SPDK_NVME_SCT_MEDIA_ERROR: 402 entry = media_error_status; 403 break; 404 case SPDK_NVME_SCT_PATH: 405 entry = path_status; 406 break; 407 case SPDK_NVME_SCT_VENDOR_SPECIFIC: 408 return "VENDOR SPECIFIC"; 409 default: 410 return "RESERVED"; 411 } 412 413 return nvme_get_string(entry, status->sc); 414 } 415 416 void 417 spdk_nvme_print_completion(uint16_t qid, struct spdk_nvme_cpl *cpl) 418 { 419 assert(cpl != NULL); 420 421 /* Check that sqid matches qid. Note that sqid is reserved 422 * for fabrics so don't print an error when sqid is 0. */ 423 if (cpl->sqid != qid && cpl->sqid != 0) { 424 SPDK_ERRLOG("sqid %u doesn't match qid\n", cpl->sqid); 425 } 426 427 SPDK_NOTICELOG("%s (%02x/%02x) qid:%d cid:%d cdw0:%x sqhd:%04x p:%x m:%x dnr:%x\n", 428 spdk_nvme_cpl_get_status_string(&cpl->status), 429 cpl->status.sct, cpl->status.sc, qid, cpl->cid, cpl->cdw0, 430 cpl->sqhd, cpl->status.p, cpl->status.m, cpl->status.dnr); 431 } 432 433 void 434 spdk_nvme_qpair_print_completion(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cpl *cpl) 435 { 436 spdk_nvme_print_completion(qpair->id, cpl); 437 } 438 439 bool 440 nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl) 441 { 442 /* 443 * TODO: spec is not clear how commands that are aborted due 444 * to TLER will be marked. So for now, it seems 445 * NAMESPACE_NOT_READY is the only case where we should 446 * look at the DNR bit. 447 */ 448 switch ((int)cpl->status.sct) { 449 case SPDK_NVME_SCT_GENERIC: 450 switch ((int)cpl->status.sc) { 451 case SPDK_NVME_SC_NAMESPACE_NOT_READY: 452 case SPDK_NVME_SC_FORMAT_IN_PROGRESS: 453 if (cpl->status.dnr) { 454 return false; 455 } else { 456 return true; 457 } 458 case SPDK_NVME_SC_INVALID_OPCODE: 459 case SPDK_NVME_SC_INVALID_FIELD: 460 case SPDK_NVME_SC_COMMAND_ID_CONFLICT: 461 case SPDK_NVME_SC_DATA_TRANSFER_ERROR: 462 case SPDK_NVME_SC_ABORTED_POWER_LOSS: 463 case SPDK_NVME_SC_INTERNAL_DEVICE_ERROR: 464 case SPDK_NVME_SC_ABORTED_BY_REQUEST: 465 case SPDK_NVME_SC_ABORTED_SQ_DELETION: 466 case SPDK_NVME_SC_ABORTED_FAILED_FUSED: 467 case SPDK_NVME_SC_ABORTED_MISSING_FUSED: 468 case SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT: 469 case SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR: 470 case SPDK_NVME_SC_LBA_OUT_OF_RANGE: 471 case SPDK_NVME_SC_CAPACITY_EXCEEDED: 472 default: 473 return false; 474 } 475 case SPDK_NVME_SCT_PATH: 476 /* 477 * Per NVMe TP 4028 (Path and Transport Error Enhancements), retries should be 478 * based on the setting of the DNR bit for Internal Path Error 479 */ 480 switch ((int)cpl->status.sc) { 481 case SPDK_NVME_SC_INTERNAL_PATH_ERROR: 482 return !cpl->status.dnr; 483 default: 484 return false; 485 } 486 case SPDK_NVME_SCT_COMMAND_SPECIFIC: 487 case SPDK_NVME_SCT_MEDIA_ERROR: 488 case SPDK_NVME_SCT_VENDOR_SPECIFIC: 489 default: 490 return false; 491 } 492 } 493 494 static void 495 nvme_qpair_manual_complete_request(struct spdk_nvme_qpair *qpair, 496 struct nvme_request *req, uint32_t sct, uint32_t sc, 497 uint32_t dnr, bool print_on_error) 498 { 499 struct spdk_nvme_cpl cpl; 500 bool error; 501 502 memset(&cpl, 0, sizeof(cpl)); 503 cpl.sqid = qpair->id; 504 cpl.status.sct = sct; 505 cpl.status.sc = sc; 506 cpl.status.dnr = dnr; 507 508 error = spdk_nvme_cpl_is_error(&cpl); 509 510 if (error && print_on_error && !qpair->ctrlr->opts.disable_error_logging) { 511 SPDK_NOTICELOG("Command completed manually:\n"); 512 spdk_nvme_qpair_print_command(qpair, &req->cmd); 513 spdk_nvme_qpair_print_completion(qpair, &cpl); 514 } 515 516 nvme_complete_request(req->cb_fn, req->cb_arg, qpair, req, &cpl); 517 nvme_free_request(req); 518 } 519 520 void 521 nvme_qpair_abort_queued_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr) 522 { 523 struct nvme_request *req; 524 STAILQ_HEAD(, nvme_request) tmp; 525 526 STAILQ_INIT(&tmp); 527 STAILQ_SWAP(&tmp, &qpair->queued_req, nvme_request); 528 529 while (!STAILQ_EMPTY(&tmp)) { 530 req = STAILQ_FIRST(&tmp); 531 STAILQ_REMOVE_HEAD(&tmp, stailq); 532 if (!qpair->ctrlr->opts.disable_error_logging) { 533 SPDK_ERRLOG("aborting queued i/o\n"); 534 } 535 nvme_qpair_manual_complete_request(qpair, req, SPDK_NVME_SCT_GENERIC, 536 SPDK_NVME_SC_ABORTED_SQ_DELETION, dnr, true); 537 } 538 } 539 540 /* The callback to a request may submit the next request which is queued and 541 * then the same callback may abort it immediately. This repetition may cause 542 * infinite recursive calls. Hence move aborting requests to another list here 543 * and abort them later at resubmission. 544 */ 545 static void 546 _nvme_qpair_complete_abort_queued_reqs(struct spdk_nvme_qpair *qpair) 547 { 548 struct nvme_request *req; 549 STAILQ_HEAD(, nvme_request) tmp; 550 551 if (spdk_likely(STAILQ_EMPTY(&qpair->aborting_queued_req))) { 552 return; 553 } 554 555 STAILQ_INIT(&tmp); 556 STAILQ_SWAP(&tmp, &qpair->aborting_queued_req, nvme_request); 557 558 while (!STAILQ_EMPTY(&tmp)) { 559 req = STAILQ_FIRST(&tmp); 560 STAILQ_REMOVE_HEAD(&tmp, stailq); 561 nvme_qpair_manual_complete_request(qpair, req, SPDK_NVME_SCT_GENERIC, 562 SPDK_NVME_SC_ABORTED_BY_REQUEST, 1, true); 563 } 564 } 565 566 uint32_t 567 nvme_qpair_abort_queued_reqs_with_cbarg(struct spdk_nvme_qpair *qpair, void *cmd_cb_arg) 568 { 569 struct nvme_request *req, *tmp; 570 uint32_t aborting = 0; 571 572 STAILQ_FOREACH_SAFE(req, &qpair->queued_req, stailq, tmp) { 573 if ((req->cb_arg != cmd_cb_arg) && 574 (req->parent == NULL || req->parent->cb_arg != cmd_cb_arg)) { 575 continue; 576 } 577 578 STAILQ_REMOVE(&qpair->queued_req, req, nvme_request, stailq); 579 STAILQ_INSERT_TAIL(&qpair->aborting_queued_req, req, stailq); 580 if (!qpair->ctrlr->opts.disable_error_logging) { 581 SPDK_ERRLOG("aborting queued i/o\n"); 582 } 583 aborting++; 584 } 585 586 return aborting; 587 } 588 589 static inline bool 590 nvme_qpair_check_enabled(struct spdk_nvme_qpair *qpair) 591 { 592 struct nvme_request *req; 593 594 /* 595 * Either during initial connect or reset, the qpair should follow the given state machine. 596 * QPAIR_DISABLED->QPAIR_CONNECTING->QPAIR_CONNECTED->QPAIR_ENABLING->QPAIR_ENABLED. In the 597 * reset case, once the qpair is properly connected, we need to abort any outstanding requests 598 * from the old transport connection and encourage the application to retry them. We also need 599 * to submit any queued requests that built up while we were in the connected or enabling state. 600 */ 601 if (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTED && !qpair->ctrlr->is_resetting) { 602 nvme_qpair_set_state(qpair, NVME_QPAIR_ENABLING); 603 /* 604 * PCIe is special, for fabrics transports, we can abort requests before disconnect during reset 605 * but we have historically not disconnected pcie qpairs during reset so we have to abort requests 606 * here. 607 */ 608 if (qpair->ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE && 609 !qpair->is_new_qpair) { 610 nvme_qpair_abort_all_queued_reqs(qpair, 0); 611 nvme_transport_qpair_abort_reqs(qpair, 0); 612 } 613 614 nvme_qpair_set_state(qpair, NVME_QPAIR_ENABLED); 615 while (!STAILQ_EMPTY(&qpair->queued_req)) { 616 req = STAILQ_FIRST(&qpair->queued_req); 617 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); 618 if (nvme_qpair_resubmit_request(qpair, req)) { 619 break; 620 } 621 } 622 } 623 624 /* 625 * When doing a reset, we must disconnect the qpair on the proper core. 626 * Note, reset is the only case where we set the failure reason without 627 * setting the qpair state since reset is done at the generic layer on the 628 * controller thread and we can't disconnect I/O qpairs from the controller 629 * thread. 630 */ 631 if (qpair->transport_failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE && 632 nvme_qpair_get_state(qpair) == NVME_QPAIR_ENABLED) { 633 /* Don't disconnect PCIe qpairs. They are a special case for reset. */ 634 if (qpair->ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) { 635 nvme_ctrlr_disconnect_qpair(qpair); 636 } 637 return false; 638 } 639 640 return nvme_qpair_get_state(qpair) == NVME_QPAIR_ENABLED; 641 } 642 643 void 644 nvme_qpair_resubmit_requests(struct spdk_nvme_qpair *qpair, uint32_t num_requests) 645 { 646 uint32_t i; 647 int resubmit_rc; 648 struct nvme_request *req; 649 650 assert(num_requests > 0); 651 652 for (i = 0; i < num_requests; i++) { 653 if (qpair->ctrlr->is_resetting) { 654 break; 655 } 656 if ((req = STAILQ_FIRST(&qpair->queued_req)) == NULL) { 657 break; 658 } 659 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); 660 resubmit_rc = nvme_qpair_resubmit_request(qpair, req); 661 if (spdk_unlikely(resubmit_rc != 0)) { 662 SPDK_DEBUGLOG(nvme, "Unable to resubmit as many requests as we completed.\n"); 663 break; 664 } 665 } 666 667 _nvme_qpair_complete_abort_queued_reqs(qpair); 668 } 669 670 static void 671 nvme_complete_register_operations(struct spdk_nvme_qpair *qpair) 672 { 673 struct nvme_register_completion *ctx; 674 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 675 STAILQ_HEAD(, nvme_register_completion) operations; 676 677 STAILQ_INIT(&operations); 678 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 679 STAILQ_SWAP(&ctrlr->register_operations, &operations, nvme_register_completion); 680 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 681 682 while (!STAILQ_EMPTY(&operations)) { 683 ctx = STAILQ_FIRST(&operations); 684 STAILQ_REMOVE_HEAD(&operations, stailq); 685 if (ctx->cb_fn != NULL) { 686 ctx->cb_fn(ctx->cb_ctx, ctx->value, &ctx->cpl); 687 } 688 free(ctx); 689 } 690 } 691 692 int32_t 693 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions) 694 { 695 int32_t ret; 696 struct nvme_request *req, *tmp; 697 698 /* Complete any pending register operations */ 699 if (nvme_qpair_is_admin_queue(qpair)) { 700 nvme_complete_register_operations(qpair); 701 } 702 703 if (spdk_unlikely(qpair->ctrlr->is_failed && 704 nvme_qpair_get_state(qpair) != NVME_QPAIR_DISCONNECTING)) { 705 if (qpair->ctrlr->is_removed) { 706 nvme_qpair_set_state(qpair, NVME_QPAIR_DESTROYING); 707 nvme_qpair_abort_all_queued_reqs(qpair, 0); 708 nvme_transport_qpair_abort_reqs(qpair, 0); 709 } 710 return -ENXIO; 711 } 712 713 if (spdk_unlikely(!nvme_qpair_check_enabled(qpair) && 714 !(nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING || 715 nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING))) { 716 /* 717 * qpair is not enabled, likely because a controller reset is 718 * in progress. 719 */ 720 return -ENXIO; 721 } 722 723 /* error injection for those queued error requests */ 724 if (spdk_unlikely(!STAILQ_EMPTY(&qpair->err_req_head))) { 725 STAILQ_FOREACH_SAFE(req, &qpair->err_req_head, stailq, tmp) { 726 if (spdk_get_ticks() - req->submit_tick > req->timeout_tsc) { 727 STAILQ_REMOVE(&qpair->err_req_head, req, nvme_request, stailq); 728 nvme_qpair_manual_complete_request(qpair, req, 729 req->cpl.status.sct, 730 req->cpl.status.sc, 0, true); 731 } 732 } 733 } 734 735 qpair->in_completion_context = 1; 736 ret = nvme_transport_qpair_process_completions(qpair, max_completions); 737 if (ret < 0) { 738 if (ret == -ENXIO && nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING) { 739 ret = 0; 740 } else { 741 SPDK_ERRLOG("CQ transport error %d (%s) on qpair id %hu\n", 742 ret, spdk_strerror(-ret), qpair->id); 743 if (nvme_qpair_is_admin_queue(qpair)) { 744 nvme_ctrlr_fail(qpair->ctrlr, false); 745 } 746 } 747 } 748 qpair->in_completion_context = 0; 749 if (qpair->delete_after_completion_context) { 750 /* 751 * A request to delete this qpair was made in the context of this completion 752 * routine - so it is safe to delete it now. 753 */ 754 spdk_nvme_ctrlr_free_io_qpair(qpair); 755 return ret; 756 } 757 758 /* 759 * At this point, ret must represent the number of completions we reaped. 760 * submit as many queued requests as we completed. 761 */ 762 if (ret > 0) { 763 nvme_qpair_resubmit_requests(qpair, ret); 764 } 765 766 return ret; 767 } 768 769 spdk_nvme_qp_failure_reason 770 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair) 771 { 772 return qpair->transport_failure_reason; 773 } 774 775 int 776 nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id, 777 struct spdk_nvme_ctrlr *ctrlr, 778 enum spdk_nvme_qprio qprio, 779 uint32_t num_requests, bool async) 780 { 781 size_t req_size_padded; 782 uint32_t i; 783 784 qpair->id = id; 785 qpair->qprio = qprio; 786 787 qpair->in_completion_context = 0; 788 qpair->delete_after_completion_context = 0; 789 qpair->no_deletion_notification_needed = 0; 790 791 qpair->ctrlr = ctrlr; 792 qpair->trtype = ctrlr->trid.trtype; 793 qpair->is_new_qpair = true; 794 qpair->async = async; 795 qpair->poll_status = NULL; 796 797 STAILQ_INIT(&qpair->free_req); 798 STAILQ_INIT(&qpair->queued_req); 799 STAILQ_INIT(&qpair->aborting_queued_req); 800 TAILQ_INIT(&qpair->err_cmd_head); 801 STAILQ_INIT(&qpair->err_req_head); 802 803 req_size_padded = (sizeof(struct nvme_request) + 63) & ~(size_t)63; 804 805 /* Add one for the reserved_req */ 806 num_requests++; 807 808 qpair->req_buf = spdk_zmalloc(req_size_padded * num_requests, 64, NULL, 809 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE); 810 if (qpair->req_buf == NULL) { 811 SPDK_ERRLOG("no memory to allocate qpair(cntlid:0x%x sqid:%d) req_buf with %d request\n", 812 ctrlr->cntlid, qpair->id, num_requests); 813 return -ENOMEM; 814 } 815 816 for (i = 0; i < num_requests; i++) { 817 struct nvme_request *req = qpair->req_buf + i * req_size_padded; 818 819 req->qpair = qpair; 820 if (i == 0) { 821 qpair->reserved_req = req; 822 } else { 823 STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq); 824 } 825 } 826 827 return 0; 828 } 829 830 void 831 nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair) 832 { 833 struct nvme_request *req; 834 835 while (!STAILQ_EMPTY(&qpair->err_req_head)) { 836 req = STAILQ_FIRST(&qpair->err_req_head); 837 STAILQ_REMOVE_HEAD(&qpair->err_req_head, stailq); 838 nvme_qpair_manual_complete_request(qpair, req, 839 req->cpl.status.sct, 840 req->cpl.status.sc, 0, true); 841 } 842 } 843 844 void 845 nvme_qpair_deinit(struct spdk_nvme_qpair *qpair) 846 { 847 struct nvme_error_cmd *cmd, *entry; 848 849 nvme_qpair_abort_queued_reqs(qpair, 0); 850 _nvme_qpair_complete_abort_queued_reqs(qpair); 851 nvme_qpair_complete_error_reqs(qpair); 852 853 TAILQ_FOREACH_SAFE(cmd, &qpair->err_cmd_head, link, entry) { 854 TAILQ_REMOVE(&qpair->err_cmd_head, cmd, link); 855 spdk_free(cmd); 856 } 857 858 spdk_free(qpair->req_buf); 859 } 860 861 static inline int 862 _nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req) 863 { 864 int rc = 0; 865 struct nvme_request *child_req, *tmp; 866 struct nvme_error_cmd *cmd; 867 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 868 bool child_req_failed = false; 869 870 nvme_qpair_check_enabled(qpair); 871 872 if (spdk_unlikely(nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED || 873 nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING || 874 nvme_qpair_get_state(qpair) == NVME_QPAIR_DESTROYING)) { 875 TAILQ_FOREACH_SAFE(child_req, &req->children, child_tailq, tmp) { 876 nvme_request_remove_child(req, child_req); 877 nvme_request_free_children(child_req); 878 nvme_free_request(child_req); 879 } 880 if (req->parent != NULL) { 881 nvme_request_remove_child(req->parent, req); 882 } 883 nvme_free_request(req); 884 return -ENXIO; 885 } 886 887 if (req->num_children) { 888 /* 889 * This is a split (parent) request. Submit all of the children but not the parent 890 * request itself, since the parent is the original unsplit request. 891 */ 892 TAILQ_FOREACH_SAFE(child_req, &req->children, child_tailq, tmp) { 893 if (spdk_likely(!child_req_failed)) { 894 rc = nvme_qpair_submit_request(qpair, child_req); 895 if (spdk_unlikely(rc != 0)) { 896 child_req_failed = true; 897 } 898 } else { /* free remaining child_reqs since one child_req fails */ 899 nvme_request_remove_child(req, child_req); 900 nvme_request_free_children(child_req); 901 nvme_free_request(child_req); 902 } 903 } 904 905 if (spdk_unlikely(child_req_failed)) { 906 /* part of children requests have been submitted, 907 * return success since we must wait for those children to complete, 908 * but set the parent request to failure. 909 */ 910 if (req->num_children) { 911 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 912 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 913 return 0; 914 } 915 goto error; 916 } 917 918 return rc; 919 } 920 921 /* queue those requests which matches with opcode in err_cmd list */ 922 if (spdk_unlikely(!TAILQ_EMPTY(&qpair->err_cmd_head))) { 923 TAILQ_FOREACH(cmd, &qpair->err_cmd_head, link) { 924 if (!cmd->do_not_submit) { 925 continue; 926 } 927 928 if ((cmd->opc == req->cmd.opc) && cmd->err_count) { 929 /* add to error request list and set cpl */ 930 req->timeout_tsc = cmd->timeout_tsc; 931 req->submit_tick = spdk_get_ticks(); 932 req->cpl.status.sct = cmd->status.sct; 933 req->cpl.status.sc = cmd->status.sc; 934 STAILQ_INSERT_TAIL(&qpair->err_req_head, req, stailq); 935 cmd->err_count--; 936 return 0; 937 } 938 } 939 } 940 941 if (spdk_unlikely(ctrlr->is_failed)) { 942 rc = -ENXIO; 943 goto error; 944 } 945 946 /* assign submit_tick before submitting req to specific transport */ 947 if (spdk_unlikely(ctrlr->timeout_enabled)) { 948 if (req->submit_tick == 0) { /* req submitted for the first time */ 949 req->submit_tick = spdk_get_ticks(); 950 req->timed_out = false; 951 } 952 } else { 953 req->submit_tick = 0; 954 } 955 956 /* Allow two cases: 957 * 1. NVMe qpair is enabled. 958 * 2. Always allow fabrics commands through - these get 959 * the controller out of reset state. 960 */ 961 if (spdk_likely(nvme_qpair_get_state(qpair) == NVME_QPAIR_ENABLED) || 962 (req->cmd.opc == SPDK_NVME_OPC_FABRIC && 963 nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING)) { 964 rc = nvme_transport_qpair_submit_request(qpair, req); 965 } else { 966 /* The controller is being reset - queue this request and 967 * submit it later when the reset is completed. 968 */ 969 return -EAGAIN; 970 } 971 972 if (spdk_likely(rc == 0)) { 973 if (SPDK_DEBUGLOG_FLAG_ENABLED("nvme")) { 974 spdk_nvme_print_command(qpair->id, &req->cmd); 975 } 976 req->queued = false; 977 return 0; 978 } 979 980 if (rc == -EAGAIN) { 981 return -EAGAIN; 982 } 983 984 error: 985 if (req->parent != NULL) { 986 nvme_request_remove_child(req->parent, req); 987 } 988 989 /* The request is from queued_req list we should trigger the callback from caller */ 990 if (spdk_unlikely(req->queued)) { 991 nvme_qpair_manual_complete_request(qpair, req, SPDK_NVME_SCT_GENERIC, 992 SPDK_NVME_SC_INTERNAL_DEVICE_ERROR, true, true); 993 return rc; 994 } 995 996 nvme_free_request(req); 997 998 return rc; 999 } 1000 1001 int 1002 nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req) 1003 { 1004 int rc; 1005 1006 if (spdk_unlikely(!STAILQ_EMPTY(&qpair->queued_req) && req->num_children == 0)) { 1007 /* 1008 * Requests that have no children should be sent to the transport after all 1009 * currently queued requests. Requests with children will be split and go back 1010 * through this path. We need to make an exception for the fabrics commands 1011 * while the qpair is connecting to be able to send the connect command 1012 * asynchronously. 1013 */ 1014 if (req->cmd.opc != SPDK_NVME_OPC_FABRIC || 1015 nvme_qpair_get_state(qpair) != NVME_QPAIR_CONNECTING) { 1016 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq); 1017 req->queued = true; 1018 return 0; 1019 } 1020 } 1021 1022 rc = _nvme_qpair_submit_request(qpair, req); 1023 if (rc == -EAGAIN) { 1024 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq); 1025 req->queued = true; 1026 rc = 0; 1027 } 1028 1029 return rc; 1030 } 1031 1032 static int 1033 nvme_qpair_resubmit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req) 1034 { 1035 int rc; 1036 1037 /* 1038 * We should never have a request with children on the queue. 1039 * This is necessary to preserve the 1:1 relationship between 1040 * completions and resubmissions. 1041 */ 1042 assert(req->num_children == 0); 1043 assert(req->queued); 1044 rc = _nvme_qpair_submit_request(qpair, req); 1045 if (spdk_unlikely(rc == -EAGAIN)) { 1046 STAILQ_INSERT_HEAD(&qpair->queued_req, req, stailq); 1047 } 1048 1049 return rc; 1050 } 1051 1052 void 1053 nvme_qpair_abort_all_queued_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr) 1054 { 1055 nvme_qpair_complete_error_reqs(qpair); 1056 nvme_qpair_abort_queued_reqs(qpair, dnr); 1057 _nvme_qpair_complete_abort_queued_reqs(qpair); 1058 } 1059 1060 int 1061 spdk_nvme_qpair_add_cmd_error_injection(struct spdk_nvme_ctrlr *ctrlr, 1062 struct spdk_nvme_qpair *qpair, 1063 uint8_t opc, bool do_not_submit, 1064 uint64_t timeout_in_us, 1065 uint32_t err_count, 1066 uint8_t sct, uint8_t sc) 1067 { 1068 struct nvme_error_cmd *entry, *cmd = NULL; 1069 int rc = 0; 1070 1071 if (qpair == NULL) { 1072 qpair = ctrlr->adminq; 1073 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1074 } 1075 1076 TAILQ_FOREACH(entry, &qpair->err_cmd_head, link) { 1077 if (entry->opc == opc) { 1078 cmd = entry; 1079 break; 1080 } 1081 } 1082 1083 if (cmd == NULL) { 1084 cmd = spdk_zmalloc(sizeof(*cmd), 64, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1085 if (!cmd) { 1086 rc = -ENOMEM; 1087 goto out; 1088 } 1089 TAILQ_INSERT_TAIL(&qpair->err_cmd_head, cmd, link); 1090 } 1091 1092 cmd->do_not_submit = do_not_submit; 1093 cmd->err_count = err_count; 1094 cmd->timeout_tsc = timeout_in_us * spdk_get_ticks_hz() / 1000000ULL; 1095 cmd->opc = opc; 1096 cmd->status.sct = sct; 1097 cmd->status.sc = sc; 1098 out: 1099 if (nvme_qpair_is_admin_queue(qpair)) { 1100 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1101 } 1102 1103 return rc; 1104 } 1105 1106 void 1107 spdk_nvme_qpair_remove_cmd_error_injection(struct spdk_nvme_ctrlr *ctrlr, 1108 struct spdk_nvme_qpair *qpair, 1109 uint8_t opc) 1110 { 1111 struct nvme_error_cmd *cmd, *entry; 1112 1113 if (qpair == NULL) { 1114 qpair = ctrlr->adminq; 1115 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1116 } 1117 1118 TAILQ_FOREACH_SAFE(cmd, &qpair->err_cmd_head, link, entry) { 1119 if (cmd->opc == opc) { 1120 TAILQ_REMOVE(&qpair->err_cmd_head, cmd, link); 1121 spdk_free(cmd); 1122 break; 1123 } 1124 } 1125 1126 if (nvme_qpair_is_admin_queue(qpair)) { 1127 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1128 } 1129 } 1130 1131 uint16_t 1132 spdk_nvme_qpair_get_id(struct spdk_nvme_qpair *qpair) 1133 { 1134 return qpair->id; 1135 } 1136