1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2015 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk_internal/cunit.h" 10 11 #include "common/lib/test_env.c" 12 13 pid_t g_spdk_nvme_pid; 14 15 bool trace_flag = false; 16 #define SPDK_LOG_NVME trace_flag 17 18 #include "nvme/nvme_qpair.c" 19 20 SPDK_LOG_REGISTER_COMPONENT(nvme) 21 22 struct nvme_driver _g_nvme_driver = { 23 .lock = PTHREAD_MUTEX_INITIALIZER, 24 }; 25 26 DEFINE_STUB_V(nvme_transport_qpair_abort_reqs, (struct spdk_nvme_qpair *qpair)); 27 DEFINE_STUB(nvme_transport_qpair_submit_request, int, 28 (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0); 29 DEFINE_STUB(spdk_nvme_ctrlr_free_io_qpair, int, (struct spdk_nvme_qpair *qpair), 0); 30 DEFINE_STUB_V(nvme_transport_ctrlr_disconnect_qpair, (struct spdk_nvme_ctrlr *ctrlr, 31 struct spdk_nvme_qpair *qpair)); 32 DEFINE_STUB_V(nvme_ctrlr_disconnect_qpair, (struct spdk_nvme_qpair *qpair)); 33 34 DEFINE_STUB_V(nvme_ctrlr_complete_queued_async_events, (struct spdk_nvme_ctrlr *ctrlr)); 35 DEFINE_STUB_V(nvme_ctrlr_abort_queued_aborts, (struct spdk_nvme_ctrlr *ctrlr)); 36 37 void 38 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove) 39 { 40 if (hot_remove) { 41 ctrlr->is_removed = true; 42 } 43 ctrlr->is_failed = true; 44 } 45 46 static bool g_called_transport_process_completions = false; 47 static int32_t g_transport_process_completions_rc = 0; 48 int32_t 49 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions) 50 { 51 g_called_transport_process_completions = true; 52 return g_transport_process_completions_rc; 53 } 54 55 static void 56 prepare_submit_request_test(struct spdk_nvme_qpair *qpair, 57 struct spdk_nvme_ctrlr *ctrlr) 58 { 59 memset(ctrlr, 0, sizeof(*ctrlr)); 60 ctrlr->free_io_qids = NULL; 61 TAILQ_INIT(&ctrlr->active_io_qpairs); 62 TAILQ_INIT(&ctrlr->active_procs); 63 MOCK_CLEAR(spdk_zmalloc); 64 nvme_qpair_init(qpair, 1, ctrlr, 0, 32, false); 65 } 66 67 static void 68 cleanup_submit_request_test(struct spdk_nvme_qpair *qpair) 69 { 70 free(qpair->req_buf); 71 } 72 73 static void 74 expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl) 75 { 76 CU_ASSERT(!spdk_nvme_cpl_is_error(cpl)); 77 } 78 79 static void 80 expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl) 81 { 82 CU_ASSERT(spdk_nvme_cpl_is_error(cpl)); 83 } 84 85 static void 86 test3(void) 87 { 88 struct spdk_nvme_qpair qpair = {}; 89 struct nvme_request *req; 90 struct spdk_nvme_ctrlr ctrlr = {}; 91 92 qpair.state = NVME_QPAIR_ENABLED; 93 prepare_submit_request_test(&qpair, &ctrlr); 94 95 req = nvme_allocate_request_null(&qpair, expected_success_callback, NULL); 96 SPDK_CU_ASSERT_FATAL(req != NULL); 97 98 CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0); 99 100 nvme_free_request(req); 101 102 cleanup_submit_request_test(&qpair); 103 } 104 105 static void 106 test_ctrlr_failed(void) 107 { 108 struct spdk_nvme_qpair qpair = {}; 109 struct nvme_request *req; 110 struct spdk_nvme_ctrlr ctrlr = {}; 111 char payload[4096]; 112 113 prepare_submit_request_test(&qpair, &ctrlr); 114 115 req = nvme_allocate_request_contig(&qpair, payload, sizeof(payload), expected_failure_callback, 116 NULL); 117 SPDK_CU_ASSERT_FATAL(req != NULL); 118 119 /* Set the controller to failed. 120 * Set the controller to resetting so that the qpair won't get re-enabled. 121 */ 122 ctrlr.is_failed = true; 123 ctrlr.is_resetting = true; 124 125 CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0); 126 127 cleanup_submit_request_test(&qpair); 128 } 129 130 static void 131 struct_packing(void) 132 { 133 /* ctrlr is the first field in nvme_qpair after the fields 134 * that are used in the I/O path. Make sure the I/O path fields 135 * all fit into two cache lines. 136 */ 137 CU_ASSERT(offsetof(struct spdk_nvme_qpair, ctrlr) <= 128); 138 } 139 140 static int g_num_cb_failed = 0; 141 static int g_num_cb_passed = 0; 142 143 static void 144 dummy_cb_fn(void *cb_arg, const struct spdk_nvme_cpl *cpl) 145 { 146 if (cpl->status.sc == SPDK_NVME_SC_SUCCESS) { 147 g_num_cb_passed++; 148 } else { 149 g_num_cb_failed++; 150 } 151 } 152 153 static void 154 test_nvme_qpair_process_completions(void) 155 { 156 struct spdk_nvme_qpair admin_qp = {0}; 157 struct spdk_nvme_qpair qpair = {0}; 158 struct spdk_nvme_ctrlr ctrlr = {{0}}; 159 struct nvme_request dummy_1 = {{0}}; 160 struct nvme_request dummy_2 = {{0}}; 161 int rc; 162 163 dummy_1.cb_fn = dummy_cb_fn; 164 dummy_2.cb_fn = dummy_cb_fn; 165 dummy_1.qpair = &qpair; 166 dummy_2.qpair = &qpair; 167 168 TAILQ_INIT(&ctrlr.active_io_qpairs); 169 TAILQ_INIT(&ctrlr.active_procs); 170 CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0); 171 nvme_qpair_init(&qpair, 1, &ctrlr, 0, 32, false); 172 nvme_qpair_init(&admin_qp, 0, &ctrlr, 0, 32, false); 173 174 ctrlr.adminq = &admin_qp; 175 176 STAILQ_INIT(&qpair.queued_req); 177 STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq); 178 STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_2, stailq); 179 qpair.num_outstanding_reqs = 2; 180 181 /* If the controller is failed, return -ENXIO */ 182 ctrlr.is_failed = true; 183 ctrlr.is_removed = false; 184 rc = spdk_nvme_qpair_process_completions(&qpair, 0); 185 CU_ASSERT(rc == -ENXIO); 186 CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req)); 187 CU_ASSERT(g_num_cb_passed == 0); 188 CU_ASSERT(g_num_cb_failed == 0); 189 CU_ASSERT(qpair.num_outstanding_reqs == 2); 190 191 /* Same if the qpair is failed at the transport layer. */ 192 ctrlr.is_failed = false; 193 ctrlr.is_removed = false; 194 qpair.state = NVME_QPAIR_DISCONNECTED; 195 rc = spdk_nvme_qpair_process_completions(&qpair, 0); 196 CU_ASSERT(rc == -ENXIO); 197 CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req)); 198 CU_ASSERT(g_num_cb_passed == 0); 199 CU_ASSERT(g_num_cb_failed == 0); 200 CU_ASSERT(qpair.num_outstanding_reqs == 2); 201 202 /* If the controller is removed, make sure we abort the requests. */ 203 ctrlr.is_failed = true; 204 ctrlr.is_removed = true; 205 qpair.state = NVME_QPAIR_CONNECTED; 206 rc = spdk_nvme_qpair_process_completions(&qpair, 0); 207 CU_ASSERT(rc == -ENXIO); 208 CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req)); 209 CU_ASSERT(g_num_cb_passed == 0); 210 CU_ASSERT(g_num_cb_failed == 2); 211 CU_ASSERT(qpair.num_outstanding_reqs == 0); 212 213 /* If we are resetting, make sure that we don't call into the transport. */ 214 STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq); 215 dummy_1.queued = true; 216 STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_2, stailq); 217 dummy_2.queued = true; 218 g_num_cb_failed = 0; 219 ctrlr.is_failed = false; 220 ctrlr.is_removed = false; 221 ctrlr.is_resetting = true; 222 rc = spdk_nvme_qpair_process_completions(&qpair, 0); 223 CU_ASSERT(rc == -ENXIO); 224 CU_ASSERT(g_called_transport_process_completions == false); 225 /* We also need to make sure we didn't abort the requests. */ 226 CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req)); 227 CU_ASSERT(g_num_cb_passed == 0); 228 CU_ASSERT(g_num_cb_failed == 0); 229 230 /* The case where we aren't resetting, but are enabling the qpair is the same as above. */ 231 ctrlr.is_resetting = false; 232 qpair.state = NVME_QPAIR_ENABLING; 233 rc = spdk_nvme_qpair_process_completions(&qpair, 0); 234 CU_ASSERT(rc == -ENXIO); 235 CU_ASSERT(g_called_transport_process_completions == false); 236 CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req)); 237 CU_ASSERT(g_num_cb_passed == 0); 238 CU_ASSERT(g_num_cb_failed == 0); 239 240 /* For other qpair states, we want to enable the qpair. */ 241 qpair.state = NVME_QPAIR_CONNECTED; 242 rc = spdk_nvme_qpair_process_completions(&qpair, 1); 243 CU_ASSERT(rc == 0); 244 CU_ASSERT(g_called_transport_process_completions == true); 245 /* These should have been submitted to the lower layer. */ 246 CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req)); 247 CU_ASSERT(g_num_cb_passed == 0); 248 CU_ASSERT(g_num_cb_failed == 0); 249 CU_ASSERT(nvme_qpair_get_state(&qpair) == NVME_QPAIR_ENABLED); 250 251 g_called_transport_process_completions = false; 252 g_transport_process_completions_rc = -ENXIO; 253 254 /* Fail the controller if we get an error from the transport on admin qpair. */ 255 admin_qp.state = NVME_QPAIR_ENABLED; 256 rc = spdk_nvme_qpair_process_completions(&admin_qp, 0); 257 CU_ASSERT(rc == -ENXIO); 258 CU_ASSERT(g_called_transport_process_completions == true); 259 CU_ASSERT(ctrlr.is_failed == true); 260 261 /* Don't fail the controller for regular qpairs. */ 262 ctrlr.is_failed = false; 263 g_called_transport_process_completions = false; 264 rc = spdk_nvme_qpair_process_completions(&qpair, 0); 265 CU_ASSERT(rc == -ENXIO); 266 CU_ASSERT(g_called_transport_process_completions == true); 267 CU_ASSERT(ctrlr.is_failed == false); 268 269 /* Make sure we don't modify the return value from the transport. */ 270 ctrlr.is_failed = false; 271 g_called_transport_process_completions = false; 272 g_transport_process_completions_rc = 23; 273 rc = spdk_nvme_qpair_process_completions(&qpair, 0); 274 CU_ASSERT(rc == 23); 275 CU_ASSERT(g_called_transport_process_completions == true); 276 CU_ASSERT(ctrlr.is_failed == false); 277 278 free(qpair.req_buf); 279 free(admin_qp.req_buf); 280 } 281 282 static void 283 test_nvme_completion_is_retry(void) 284 { 285 struct spdk_nvme_cpl cpl = {}; 286 287 cpl.status.sct = SPDK_NVME_SCT_GENERIC; 288 cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 289 cpl.status.dnr = 0; 290 CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl)); 291 292 cpl.status.sc = SPDK_NVME_SC_FORMAT_IN_PROGRESS; 293 cpl.status.dnr = 1; 294 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 295 cpl.status.dnr = 0; 296 CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl)); 297 298 cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 299 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 300 301 cpl.status.sc = SPDK_NVME_SC_INVALID_FIELD; 302 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 303 304 cpl.status.sc = SPDK_NVME_SC_COMMAND_ID_CONFLICT; 305 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 306 307 cpl.status.sc = SPDK_NVME_SC_DATA_TRANSFER_ERROR; 308 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 309 310 cpl.status.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS; 311 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 312 313 cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 314 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 315 316 cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 317 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 318 319 cpl.status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED; 320 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 321 322 cpl.status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED; 323 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 324 325 cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; 326 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 327 328 cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; 329 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 330 331 cpl.status.sc = SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR; 332 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 333 334 cpl.status.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS; 335 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 336 337 cpl.status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 338 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 339 340 cpl.status.sc = SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID; 341 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 342 343 cpl.status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID; 344 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 345 346 cpl.status.sc = SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF; 347 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 348 349 cpl.status.sc = SPDK_NVME_SC_INVALID_PRP_OFFSET; 350 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 351 352 cpl.status.sc = SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED; 353 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 354 355 cpl.status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; 356 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 357 358 cpl.status.sc = SPDK_NVME_SC_CAPACITY_EXCEEDED; 359 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 360 361 cpl.status.sc = SPDK_NVME_SC_RESERVATION_CONFLICT; 362 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 363 364 cpl.status.sc = 0x70; 365 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 366 367 cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 368 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 369 370 cpl.status.sct = SPDK_NVME_SCT_MEDIA_ERROR; 371 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 372 373 cpl.status.sct = SPDK_NVME_SCT_PATH; 374 cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 375 cpl.status.dnr = 0; 376 CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl)); 377 378 cpl.status.sct = SPDK_NVME_SCT_PATH; 379 cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 380 cpl.status.dnr = 1; 381 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 382 383 cpl.status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 384 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 385 386 cpl.status.sct = 0x4; 387 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); 388 } 389 390 #ifdef DEBUG 391 static void 392 test_get_status_string(void) 393 { 394 const char *status_string; 395 struct spdk_nvme_status status; 396 397 status.sct = SPDK_NVME_SCT_GENERIC; 398 status.sc = SPDK_NVME_SC_SUCCESS; 399 status_string = spdk_nvme_cpl_get_status_string(&status); 400 CU_ASSERT(strcmp(status_string, "SUCCESS") == 0); 401 402 status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 403 status.sc = SPDK_NVME_SC_COMPLETION_QUEUE_INVALID; 404 status_string = spdk_nvme_cpl_get_status_string(&status); 405 CU_ASSERT(strcmp(status_string, "INVALID COMPLETION QUEUE") == 0); 406 407 status.sct = SPDK_NVME_SCT_MEDIA_ERROR; 408 status.sc = SPDK_NVME_SC_UNRECOVERED_READ_ERROR; 409 status_string = spdk_nvme_cpl_get_status_string(&status); 410 CU_ASSERT(strcmp(status_string, "UNRECOVERED READ ERROR") == 0); 411 412 status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 413 status.sc = 0; 414 status_string = spdk_nvme_cpl_get_status_string(&status); 415 CU_ASSERT(strcmp(status_string, "VENDOR SPECIFIC") == 0); 416 417 status.sct = 0x4; 418 status.sc = 0; 419 status_string = spdk_nvme_cpl_get_status_string(&status); 420 CU_ASSERT(strcmp(status_string, "RESERVED") == 0); 421 } 422 #endif 423 424 static void 425 test_nvme_qpair_add_cmd_error_injection(void) 426 { 427 struct spdk_nvme_qpair qpair = {}; 428 struct spdk_nvme_ctrlr ctrlr = {}; 429 pthread_mutexattr_t attr; 430 int rc; 431 432 prepare_submit_request_test(&qpair, &ctrlr); 433 ctrlr.adminq = &qpair; 434 435 SPDK_CU_ASSERT_FATAL(pthread_mutexattr_init(&attr) == 0); 436 SPDK_CU_ASSERT_FATAL(pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) == 0); 437 SPDK_CU_ASSERT_FATAL(pthread_mutex_init(&ctrlr.ctrlr_lock, &attr) == 0); 438 pthread_mutexattr_destroy(&attr); 439 440 /* Admin error injection at submission path */ 441 MOCK_CLEAR(spdk_zmalloc); 442 rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, NULL, 443 SPDK_NVME_OPC_GET_FEATURES, true, 5000, 1, 444 SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_INVALID_FIELD); 445 446 CU_ASSERT(rc == 0); 447 CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head)); 448 449 /* Remove cmd error injection */ 450 spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, NULL, SPDK_NVME_OPC_GET_FEATURES); 451 452 CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head)); 453 454 /* IO error injection at completion path */ 455 rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair, 456 SPDK_NVME_OPC_READ, false, 0, 1, 457 SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR); 458 459 CU_ASSERT(rc == 0); 460 CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head)); 461 462 /* Provide the same opc, and check whether allocate a new entry */ 463 rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair, 464 SPDK_NVME_OPC_READ, false, 0, 1, 465 SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR); 466 467 CU_ASSERT(rc == 0); 468 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&qpair.err_cmd_head)); 469 CU_ASSERT(TAILQ_NEXT(TAILQ_FIRST(&qpair.err_cmd_head), link) == NULL); 470 471 /* Remove cmd error injection */ 472 spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_READ); 473 474 CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head)); 475 476 rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair, 477 SPDK_NVME_OPC_COMPARE, true, 0, 5, 478 SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_COMPARE_FAILURE); 479 480 CU_ASSERT(rc == 0); 481 CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head)); 482 483 /* Remove cmd error injection */ 484 spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_COMPARE); 485 486 CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head)); 487 488 pthread_mutex_destroy(&ctrlr.ctrlr_lock); 489 cleanup_submit_request_test(&qpair); 490 } 491 492 static struct nvme_request * 493 allocate_request_tree(struct spdk_nvme_qpair *qpair) 494 { 495 struct nvme_request *req, *req1, *req2, *req3, *req2_1, *req2_2, *req2_3; 496 497 /* 498 * Build a request chain like the following: 499 * req 500 * | 501 * --------------- 502 * | | | 503 * req1 req2 req3 504 * | 505 * --------------- 506 * | | | 507 * req2_1 req2_2 req2_3 508 */ 509 req = nvme_allocate_request_null(qpair, NULL, NULL); 510 CU_ASSERT(req != NULL); 511 TAILQ_INIT(&req->children); 512 513 req1 = nvme_allocate_request_null(qpair, NULL, NULL); 514 CU_ASSERT(req1 != NULL); 515 req->num_children++; 516 TAILQ_INSERT_TAIL(&req->children, req1, child_tailq); 517 req1->parent = req; 518 519 req2 = nvme_allocate_request_null(qpair, NULL, NULL); 520 CU_ASSERT(req2 != NULL); 521 TAILQ_INIT(&req2->children); 522 req->num_children++; 523 TAILQ_INSERT_TAIL(&req->children, req2, child_tailq); 524 req2->parent = req; 525 526 req3 = nvme_allocate_request_null(qpair, NULL, NULL); 527 CU_ASSERT(req3 != NULL); 528 req->num_children++; 529 TAILQ_INSERT_TAIL(&req->children, req3, child_tailq); 530 req3->parent = req; 531 532 req2_1 = nvme_allocate_request_null(qpair, NULL, NULL); 533 CU_ASSERT(req2_1 != NULL); 534 req2->num_children++; 535 TAILQ_INSERT_TAIL(&req2->children, req2_1, child_tailq); 536 req2_1->parent = req2; 537 538 req2_2 = nvme_allocate_request_null(qpair, NULL, NULL); 539 CU_ASSERT(req2_2 != NULL); 540 req2->num_children++; 541 TAILQ_INSERT_TAIL(&req2->children, req2_2, child_tailq); 542 req2_2->parent = req2; 543 544 req2_3 = nvme_allocate_request_null(qpair, NULL, NULL); 545 CU_ASSERT(req2_3 != NULL); 546 req2->num_children++; 547 TAILQ_INSERT_TAIL(&req2->children, req2_3, child_tailq); 548 req2_3->parent = req2; 549 550 return req; 551 } 552 553 static void 554 test_nvme_qpair_submit_request(void) 555 { 556 int rc; 557 struct spdk_nvme_qpair qpair = {}; 558 struct spdk_nvme_ctrlr ctrlr = {}; 559 struct nvme_request *req; 560 561 prepare_submit_request_test(&qpair, &ctrlr); 562 563 req = allocate_request_tree(&qpair); 564 ctrlr.is_failed = true; 565 rc = nvme_qpair_submit_request(&qpair, req); 566 SPDK_CU_ASSERT_FATAL(rc == -ENXIO); 567 568 req = allocate_request_tree(&qpair); 569 ctrlr.is_failed = false; 570 qpair.state = NVME_QPAIR_DISCONNECTING; 571 rc = nvme_qpair_submit_request(&qpair, req); 572 SPDK_CU_ASSERT_FATAL(rc == -ENXIO); 573 574 cleanup_submit_request_test(&qpair); 575 } 576 577 static void 578 test_nvme_qpair_resubmit_request_with_transport_failed(void) 579 { 580 int rc; 581 struct spdk_nvme_qpair qpair = {}; 582 struct spdk_nvme_ctrlr ctrlr = {}; 583 struct nvme_request *req; 584 585 prepare_submit_request_test(&qpair, &ctrlr); 586 587 req = nvme_allocate_request_null(&qpair, dummy_cb_fn, NULL); 588 CU_ASSERT(req != NULL); 589 TAILQ_INIT(&req->children); 590 591 STAILQ_INSERT_TAIL(&qpair.queued_req, req, stailq); 592 req->queued = true; 593 594 g_transport_process_completions_rc = 1; 595 qpair.state = NVME_QPAIR_ENABLED; 596 g_num_cb_failed = 0; 597 MOCK_SET(nvme_transport_qpair_submit_request, -EINVAL); 598 rc = spdk_nvme_qpair_process_completions(&qpair, g_transport_process_completions_rc); 599 MOCK_CLEAR(nvme_transport_qpair_submit_request); 600 CU_ASSERT(rc == g_transport_process_completions_rc); 601 CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req)); 602 CU_ASSERT(g_num_cb_failed == 1); 603 604 cleanup_submit_request_test(&qpair); 605 } 606 607 static void 608 ut_spdk_nvme_cmd_cb(void *cb_arg, const struct spdk_nvme_cpl *cpl) 609 { 610 CU_ASSERT(cb_arg == (void *)0xDEADBEEF); 611 CU_ASSERT(cpl->sqid == 1); 612 CU_ASSERT(cpl->status.sct == SPDK_NVME_SCT_GENERIC); 613 CU_ASSERT(cpl->status.sc == SPDK_NVME_SC_SUCCESS); 614 CU_ASSERT(cpl->status.dnr == 1); 615 } 616 617 static void 618 test_nvme_qpair_manual_complete_request(void) 619 { 620 struct spdk_nvme_qpair qpair = {}; 621 struct nvme_request req = {}; 622 struct spdk_nvme_ctrlr ctrlr = {}; 623 624 qpair.ctrlr = &ctrlr; 625 qpair.id = 1; 626 req.cb_fn = ut_spdk_nvme_cmd_cb; 627 req.cb_arg = (void *) 0xDEADBEEF; 628 req.qpair = &qpair; 629 req.num_children = 0; 630 qpair.ctrlr->opts.disable_error_logging = false; 631 STAILQ_INIT(&qpair.free_req); 632 SPDK_CU_ASSERT_FATAL(STAILQ_EMPTY(&qpair.free_req)); 633 qpair.num_outstanding_reqs = 1; 634 635 nvme_qpair_manual_complete_request(&qpair, &req, SPDK_NVME_SCT_GENERIC, 636 SPDK_NVME_SC_SUCCESS, 1, true); 637 CU_ASSERT(!STAILQ_EMPTY(&qpair.free_req)); 638 CU_ASSERT(qpair.num_outstanding_reqs == 0); 639 } 640 641 static void 642 ut_spdk_nvme_cmd_cb_empty(void *cb_arg, const struct spdk_nvme_cpl *cpl) 643 { 644 645 } 646 647 static void 648 test_nvme_qpair_init_deinit(void) 649 { 650 struct spdk_nvme_qpair qpair = {}; 651 struct nvme_request *reqs[3] = {}; 652 struct spdk_nvme_ctrlr ctrlr = {}; 653 struct nvme_error_cmd *cmd = NULL; 654 struct nvme_request *var_req = NULL; 655 int rc, i = 0; 656 657 ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE; 658 659 rc = nvme_qpair_init(&qpair, 1, &ctrlr, SPDK_NVME_QPRIO_HIGH, 3, false); 660 CU_ASSERT(rc == 0); 661 CU_ASSERT(qpair.id == 1); 662 CU_ASSERT(qpair.qprio == SPDK_NVME_QPRIO_HIGH); 663 CU_ASSERT(qpair.in_completion_context == 0); 664 CU_ASSERT(qpair.delete_after_completion_context == 0); 665 CU_ASSERT(qpair.no_deletion_notification_needed == 0); 666 CU_ASSERT(qpair.ctrlr == &ctrlr); 667 CU_ASSERT(qpair.trtype == SPDK_NVME_TRANSPORT_PCIE); 668 CU_ASSERT(qpair.req_buf != NULL); 669 670 SPDK_CU_ASSERT_FATAL(!STAILQ_EMPTY(&qpair.free_req)); 671 STAILQ_FOREACH(var_req, &qpair.free_req, stailq) { 672 /* Check requests address alignment */ 673 CU_ASSERT((uint64_t)var_req % 64 == 0); 674 CU_ASSERT(var_req->qpair == &qpair); 675 reqs[i++] = var_req; 676 } 677 CU_ASSERT(i == 3); 678 679 /* Allocate cmd memory for deinit using */ 680 cmd = spdk_zmalloc(sizeof(*cmd), 64, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE); 681 SPDK_CU_ASSERT_FATAL(cmd != NULL); 682 TAILQ_INSERT_TAIL(&qpair.err_cmd_head, cmd, link); 683 for (int i = 0; i < 3; i++) { 684 reqs[i]->cb_fn = ut_spdk_nvme_cmd_cb_empty; 685 reqs[i]->cb_arg = (void *) 0xDEADBEEF; 686 reqs[i]->num_children = 0; 687 } 688 689 /* Emulate requests into various type queues */ 690 STAILQ_REMOVE(&qpair.free_req, reqs[0], nvme_request, stailq); 691 STAILQ_INSERT_TAIL(&qpair.queued_req, reqs[0], stailq); 692 STAILQ_REMOVE(&qpair.free_req, reqs[1], nvme_request, stailq); 693 STAILQ_INSERT_TAIL(&qpair.aborting_queued_req, reqs[1], stailq); 694 STAILQ_REMOVE(&qpair.free_req, reqs[2], nvme_request, stailq); 695 STAILQ_INSERT_TAIL(&qpair.err_req_head, reqs[2], stailq); 696 CU_ASSERT(STAILQ_EMPTY(&qpair.free_req)); 697 qpair.num_outstanding_reqs = 3; 698 699 nvme_qpair_deinit(&qpair); 700 CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req)); 701 CU_ASSERT(STAILQ_EMPTY(&qpair.aborting_queued_req)); 702 CU_ASSERT(STAILQ_EMPTY(&qpair.err_req_head)); 703 CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head)); 704 CU_ASSERT(qpair.num_outstanding_reqs == 0); 705 } 706 707 static void 708 test_nvme_get_sgl_print_info(void) 709 { 710 char buf[NVME_CMD_DPTR_STR_SIZE] = {}; 711 struct spdk_nvme_cmd cmd = {}; 712 713 cmd.dptr.sgl1.keyed.length = 0x1000; 714 cmd.dptr.sgl1.keyed.key = 0xababccdd; 715 716 nvme_get_sgl_keyed(buf, NVME_CMD_DPTR_STR_SIZE, &cmd); 717 CU_ASSERT(!strncmp(buf, " len:0x1000 key:0xababccdd", NVME_CMD_DPTR_STR_SIZE)); 718 719 memset(&cmd.dptr.sgl1, 0, sizeof(cmd.dptr.sgl1)); 720 cmd.dptr.sgl1.unkeyed.length = 0x1000; 721 722 nvme_get_sgl_unkeyed(buf, NVME_CMD_DPTR_STR_SIZE, &cmd); 723 CU_ASSERT(!strncmp(buf, " len:0x1000", NVME_CMD_DPTR_STR_SIZE)); 724 725 memset(&cmd.dptr.sgl1, 0, sizeof(cmd.dptr.sgl1)); 726 cmd.dptr.sgl1.generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 727 cmd.dptr.sgl1.generic.subtype = 0; 728 cmd.dptr.sgl1.address = 0xdeadbeef; 729 cmd.dptr.sgl1.unkeyed.length = 0x1000; 730 731 nvme_get_sgl(buf, NVME_CMD_DPTR_STR_SIZE, &cmd); 732 CU_ASSERT(!strncmp(buf, "SGL DATA BLOCK ADDRESS 0xdeadbeef len:0x1000", 733 NVME_CMD_DPTR_STR_SIZE)); 734 735 memset(&cmd.dptr.sgl1, 0, sizeof(cmd.dptr.sgl1)); 736 cmd.dptr.sgl1.generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 737 cmd.dptr.sgl1.generic.subtype = 0; 738 cmd.dptr.sgl1.address = 0xdeadbeef; 739 cmd.dptr.sgl1.keyed.length = 0x1000; 740 cmd.dptr.sgl1.keyed.key = 0xababccdd; 741 742 nvme_get_sgl(buf, NVME_CMD_DPTR_STR_SIZE, &cmd); 743 CU_ASSERT(!strncmp(buf, "SGL KEYED DATA BLOCK ADDRESS 0xdeadbeef len:0x1000 key:0xababccdd", 744 NVME_CMD_DPTR_STR_SIZE)); 745 } 746 747 int 748 main(int argc, char **argv) 749 { 750 CU_pSuite suite = NULL; 751 unsigned int num_failures; 752 753 CU_initialize_registry(); 754 755 suite = CU_add_suite("nvme_qpair", NULL, NULL); 756 757 CU_ADD_TEST(suite, test3); 758 CU_ADD_TEST(suite, test_ctrlr_failed); 759 CU_ADD_TEST(suite, struct_packing); 760 CU_ADD_TEST(suite, test_nvme_qpair_process_completions); 761 CU_ADD_TEST(suite, test_nvme_completion_is_retry); 762 #ifdef DEBUG 763 CU_ADD_TEST(suite, test_get_status_string); 764 #endif 765 CU_ADD_TEST(suite, test_nvme_qpair_add_cmd_error_injection); 766 CU_ADD_TEST(suite, test_nvme_qpair_submit_request); 767 CU_ADD_TEST(suite, test_nvme_qpair_resubmit_request_with_transport_failed); 768 CU_ADD_TEST(suite, test_nvme_qpair_manual_complete_request); 769 CU_ADD_TEST(suite, test_nvme_qpair_init_deinit); 770 CU_ADD_TEST(suite, test_nvme_get_sgl_print_info); 771 772 num_failures = spdk_ut_run_tests(argc, argv, NULL); 773 CU_cleanup_registry(); 774 return num_failures; 775 } 776