1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 #include "spdk_cunit.h" 37 #include "spdk/thread.h" 38 #include "spdk/bdev_module.h" 39 #include "spdk/bdev_module.h" 40 41 #include "common/lib/ut_multithread.c" 42 43 #include "bdev/nvme/bdev_nvme.c" 44 45 #include "unit/lib/json_mock.c" 46 47 static void *g_accel_p = (void *)0xdeadbeaf; 48 49 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *, 50 (const struct spdk_nvme_transport_id *trid, void *cb_ctx, 51 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 52 spdk_nvme_remove_cb remove_cb), NULL); 53 54 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 55 enum spdk_nvme_transport_type trtype)); 56 57 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype), 58 NULL); 59 60 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 61 62 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr, 63 struct spdk_nvme_transport_id *trid), 0); 64 65 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr, 66 spdk_nvme_remove_cb remove_cb, void *remove_ctx)); 67 68 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0); 69 70 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0); 71 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf)); 72 73 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain, int); 74 75 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int, 76 (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0); 77 78 int spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr, 79 struct spdk_memory_domain **domains, int array_size) 80 { 81 HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain); 82 83 return 0; 84 } 85 86 struct spdk_io_channel * 87 spdk_accel_engine_get_io_channel(void) 88 { 89 return spdk_get_io_channel(g_accel_p); 90 } 91 92 void 93 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 94 struct spdk_nvme_io_qpair_opts *opts, size_t opts_size) 95 { 96 /* Avoid warning that opts is used uninitialised */ 97 memset(opts, 0, opts_size); 98 } 99 100 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *, 101 (struct spdk_nvme_ctrlr *ctrlr), NULL); 102 103 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t, 104 (const struct spdk_nvme_ctrlr *ctrlr), 0); 105 106 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *, 107 (struct spdk_nvme_ctrlr *ctrlr), NULL); 108 109 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr, 110 spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg)); 111 112 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr, 113 uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)); 114 115 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false); 116 117 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr, 118 struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 119 120 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr, 121 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 122 uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 123 124 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr, 125 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 126 uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 127 128 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name, 129 size_t *size), 0); 130 131 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 132 133 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 134 135 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 136 137 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0); 138 139 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false); 140 141 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 142 143 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value, 144 enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0); 145 146 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0); 147 148 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 149 char *name, size_t *size), 0); 150 151 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t, 152 (struct spdk_nvme_ns *ns), 0); 153 154 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t, 155 (const struct spdk_nvme_ctrlr *ctrlr), 0); 156 157 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t, 158 (struct spdk_nvme_ns *ns), 0); 159 160 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t, 161 (struct spdk_nvme_ns *ns), 0); 162 163 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t, 164 (struct spdk_nvme_ns *ns), 0); 165 166 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int, 167 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata, 168 uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 169 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0); 170 171 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int, 172 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba, 173 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 174 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn, 175 void *metadata, uint16_t apptag_mask, uint16_t apptag), 0); 176 177 DEFINE_STUB(spdk_nvme_zns_report_zones, int, 178 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 179 void *payload, uint32_t payload_size, uint64_t slba, 180 enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report, 181 spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 182 183 DEFINE_STUB(spdk_nvme_zns_close_zone, int, 184 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 185 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 186 187 DEFINE_STUB(spdk_nvme_zns_finish_zone, int, 188 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 189 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 190 191 DEFINE_STUB(spdk_nvme_zns_open_zone, int, 192 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 193 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 194 195 DEFINE_STUB(spdk_nvme_zns_reset_zone, int, 196 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 197 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 198 199 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL); 200 201 DEFINE_STUB(spdk_nvme_zns_offline_zone, int, 202 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 203 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 204 205 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void)); 206 207 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 208 209 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL); 210 211 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); 212 213 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, 214 struct iovec *iov, 215 uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 216 217 DEFINE_STUB_V(spdk_nvme_ctrlr_prepare_for_reset, (struct spdk_nvme_ctrlr *ctrlr)); 218 219 struct ut_nvme_req { 220 uint16_t opc; 221 spdk_nvme_cmd_cb cb_fn; 222 void *cb_arg; 223 struct spdk_nvme_cpl cpl; 224 TAILQ_ENTRY(ut_nvme_req) tailq; 225 }; 226 227 struct spdk_nvme_ns { 228 struct spdk_nvme_ctrlr *ctrlr; 229 uint32_t id; 230 bool is_active; 231 struct spdk_uuid *uuid; 232 enum spdk_nvme_ana_state ana_state; 233 enum spdk_nvme_csi csi; 234 }; 235 236 struct spdk_nvme_qpair { 237 struct spdk_nvme_ctrlr *ctrlr; 238 uint8_t failure_reason; 239 bool is_connected; 240 bool in_completion_context; 241 bool delete_after_completion_context; 242 TAILQ_HEAD(, ut_nvme_req) outstanding_reqs; 243 uint32_t num_outstanding_reqs; 244 TAILQ_ENTRY(spdk_nvme_qpair) poll_group_tailq; 245 struct spdk_nvme_poll_group *poll_group; 246 void *poll_group_tailq_head; 247 TAILQ_ENTRY(spdk_nvme_qpair) tailq; 248 }; 249 250 struct spdk_nvme_ctrlr { 251 uint32_t num_ns; 252 struct spdk_nvme_ns *ns; 253 struct spdk_nvme_ns_data *nsdata; 254 struct spdk_nvme_qpair adminq; 255 struct spdk_nvme_ctrlr_data cdata; 256 bool attached; 257 bool is_failed; 258 bool fail_reset; 259 struct spdk_nvme_transport_id trid; 260 TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs; 261 TAILQ_ENTRY(spdk_nvme_ctrlr) tailq; 262 struct spdk_nvme_ctrlr_opts opts; 263 }; 264 265 struct spdk_nvme_poll_group { 266 void *ctx; 267 struct spdk_nvme_accel_fn_table accel_fn_table; 268 TAILQ_HEAD(, spdk_nvme_qpair) connected_qpairs; 269 TAILQ_HEAD(, spdk_nvme_qpair) disconnected_qpairs; 270 }; 271 272 struct spdk_nvme_probe_ctx { 273 struct spdk_nvme_transport_id trid; 274 void *cb_ctx; 275 spdk_nvme_attach_cb attach_cb; 276 struct spdk_nvme_ctrlr *init_ctrlr; 277 }; 278 279 uint32_t 280 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr) 281 { 282 uint32_t nsid; 283 284 for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) { 285 if (ctrlr->ns[nsid - 1].is_active) { 286 return nsid; 287 } 288 } 289 290 return 0; 291 } 292 293 uint32_t 294 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 295 { 296 for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) { 297 if (ctrlr->ns[nsid - 1].is_active) { 298 return nsid; 299 } 300 } 301 302 return 0; 303 } 304 305 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs); 306 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER( 307 g_ut_attached_ctrlrs); 308 static int g_ut_attach_ctrlr_status; 309 static size_t g_ut_attach_bdev_count; 310 static int g_ut_register_bdev_status; 311 static uint16_t g_ut_cntlid; 312 static struct nvme_path_id g_any_path = {}; 313 314 static void 315 ut_init_trid(struct spdk_nvme_transport_id *trid) 316 { 317 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 318 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 319 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8"); 320 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 321 } 322 323 static void 324 ut_init_trid2(struct spdk_nvme_transport_id *trid) 325 { 326 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 327 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 328 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9"); 329 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 330 } 331 332 static void 333 ut_init_trid3(struct spdk_nvme_transport_id *trid) 334 { 335 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 336 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 337 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10"); 338 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 339 } 340 341 static int 342 cmp_int(int a, int b) 343 { 344 return a - b; 345 } 346 347 int 348 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 349 const struct spdk_nvme_transport_id *trid2) 350 { 351 int cmp; 352 353 /* We assume trtype is TCP for now. */ 354 CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP); 355 356 cmp = cmp_int(trid1->trtype, trid2->trtype); 357 if (cmp) { 358 return cmp; 359 } 360 361 cmp = strcasecmp(trid1->traddr, trid2->traddr); 362 if (cmp) { 363 return cmp; 364 } 365 366 cmp = cmp_int(trid1->adrfam, trid2->adrfam); 367 if (cmp) { 368 return cmp; 369 } 370 371 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 372 if (cmp) { 373 return cmp; 374 } 375 376 cmp = strcmp(trid1->subnqn, trid2->subnqn); 377 if (cmp) { 378 return cmp; 379 } 380 381 return 0; 382 } 383 384 static struct spdk_nvme_ctrlr * 385 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns, 386 bool ana_reporting, bool multipath) 387 { 388 struct spdk_nvme_ctrlr *ctrlr; 389 uint32_t i; 390 391 TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) { 392 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) { 393 /* There is a ctrlr whose trid matches. */ 394 return NULL; 395 } 396 } 397 398 ctrlr = calloc(1, sizeof(*ctrlr)); 399 if (ctrlr == NULL) { 400 return NULL; 401 } 402 403 ctrlr->attached = true; 404 ctrlr->adminq.ctrlr = ctrlr; 405 TAILQ_INIT(&ctrlr->adminq.outstanding_reqs); 406 ctrlr->adminq.is_connected = true; 407 408 if (num_ns != 0) { 409 ctrlr->num_ns = num_ns; 410 ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns)); 411 if (ctrlr->ns == NULL) { 412 free(ctrlr); 413 return NULL; 414 } 415 416 ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data)); 417 if (ctrlr->nsdata == NULL) { 418 free(ctrlr->ns); 419 free(ctrlr); 420 return NULL; 421 } 422 423 for (i = 0; i < num_ns; i++) { 424 ctrlr->ns[i].id = i + 1; 425 ctrlr->ns[i].ctrlr = ctrlr; 426 ctrlr->ns[i].is_active = true; 427 ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 428 ctrlr->nsdata[i].nsze = 1024; 429 ctrlr->nsdata[i].nmic.can_share = multipath; 430 } 431 432 ctrlr->cdata.nn = num_ns; 433 ctrlr->cdata.nanagrpid = num_ns; 434 } 435 436 ctrlr->cdata.cntlid = ++g_ut_cntlid; 437 ctrlr->cdata.cmic.multi_ctrlr = multipath; 438 ctrlr->cdata.cmic.ana_reporting = ana_reporting; 439 ctrlr->trid = *trid; 440 TAILQ_INIT(&ctrlr->active_io_qpairs); 441 442 TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq); 443 444 return ctrlr; 445 } 446 447 static void 448 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 449 { 450 CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs)); 451 452 TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq); 453 free(ctrlr->nsdata); 454 free(ctrlr->ns); 455 free(ctrlr); 456 } 457 458 static int 459 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 460 uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 461 { 462 struct ut_nvme_req *req; 463 464 req = calloc(1, sizeof(*req)); 465 if (req == NULL) { 466 return -ENOMEM; 467 } 468 469 req->opc = opc; 470 req->cb_fn = cb_fn; 471 req->cb_arg = cb_arg; 472 473 req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 474 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 475 476 TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq); 477 qpair->num_outstanding_reqs++; 478 479 return 0; 480 } 481 482 static struct ut_nvme_req * 483 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg) 484 { 485 struct ut_nvme_req *req; 486 487 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 488 if (req->cb_arg == cb_arg) { 489 break; 490 } 491 } 492 493 return req; 494 } 495 496 static struct spdk_bdev_io * 497 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev, 498 struct spdk_io_channel *ch) 499 { 500 struct spdk_bdev_io *bdev_io; 501 502 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 503 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 504 bdev_io->type = type; 505 bdev_io->bdev = &nbdev->disk; 506 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 507 508 return bdev_io; 509 } 510 511 static void 512 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io) 513 { 514 bdev_io->u.bdev.iovs = &bdev_io->iov; 515 bdev_io->u.bdev.iovcnt = 1; 516 517 bdev_io->iov.iov_base = (void *)0xFEEDBEEF; 518 bdev_io->iov.iov_len = 4096; 519 } 520 521 static void 522 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx) 523 { 524 if (ctrlr->is_failed) { 525 free(ctrlr); 526 return; 527 } 528 529 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts)); 530 if (probe_ctx->cb_ctx) { 531 ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx; 532 } 533 534 TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq); 535 536 if (probe_ctx->attach_cb) { 537 probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 538 } 539 } 540 541 int 542 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 543 { 544 struct spdk_nvme_ctrlr *ctrlr, *tmp; 545 546 TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) { 547 if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) { 548 continue; 549 } 550 TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq); 551 nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 552 } 553 554 free(probe_ctx); 555 556 return 0; 557 } 558 559 struct spdk_nvme_probe_ctx * 560 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 561 const struct spdk_nvme_ctrlr_opts *opts, 562 spdk_nvme_attach_cb attach_cb) 563 { 564 struct spdk_nvme_probe_ctx *probe_ctx; 565 566 if (trid == NULL) { 567 return NULL; 568 } 569 570 probe_ctx = calloc(1, sizeof(*probe_ctx)); 571 if (probe_ctx == NULL) { 572 return NULL; 573 } 574 575 probe_ctx->trid = *trid; 576 probe_ctx->cb_ctx = (void *)opts; 577 probe_ctx->attach_cb = attach_cb; 578 579 return probe_ctx; 580 } 581 582 int 583 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 584 { 585 if (ctrlr->attached) { 586 ut_detach_ctrlr(ctrlr); 587 } 588 589 return 0; 590 } 591 592 int 593 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx) 594 { 595 SPDK_CU_ASSERT_FATAL(ctx != NULL); 596 *(struct spdk_nvme_ctrlr **)ctx = ctrlr; 597 598 return 0; 599 } 600 601 int 602 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx) 603 { 604 return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx); 605 } 606 607 void 608 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 609 { 610 memset(opts, 0, opts_size); 611 612 snprintf(opts->hostnqn, sizeof(opts->hostnqn), 613 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003"); 614 } 615 616 const struct spdk_nvme_ctrlr_data * 617 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 618 { 619 return &ctrlr->cdata; 620 } 621 622 uint32_t 623 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 624 { 625 return ctrlr->num_ns; 626 } 627 628 struct spdk_nvme_ns * 629 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 630 { 631 if (nsid < 1 || nsid > ctrlr->num_ns) { 632 return NULL; 633 } 634 635 return &ctrlr->ns[nsid - 1]; 636 } 637 638 bool 639 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 640 { 641 if (nsid < 1 || nsid > ctrlr->num_ns) { 642 return false; 643 } 644 645 return ctrlr->ns[nsid - 1].is_active; 646 } 647 648 union spdk_nvme_csts_register 649 spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 650 { 651 union spdk_nvme_csts_register csts; 652 653 csts.raw = 0; 654 655 return csts; 656 } 657 658 union spdk_nvme_vs_register 659 spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 660 { 661 union spdk_nvme_vs_register vs; 662 663 vs.raw = 0; 664 665 return vs; 666 } 667 668 struct spdk_nvme_qpair * 669 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 670 const struct spdk_nvme_io_qpair_opts *user_opts, 671 size_t opts_size) 672 { 673 struct spdk_nvme_qpair *qpair; 674 675 qpair = calloc(1, sizeof(*qpair)); 676 if (qpair == NULL) { 677 return NULL; 678 } 679 680 qpair->ctrlr = ctrlr; 681 TAILQ_INIT(&qpair->outstanding_reqs); 682 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 683 684 return qpair; 685 } 686 687 static void 688 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair) 689 { 690 struct spdk_nvme_poll_group *group = qpair->poll_group; 691 692 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 693 694 qpair->poll_group_tailq_head = &group->connected_qpairs; 695 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 696 TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq); 697 } 698 699 static void 700 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) 701 { 702 struct spdk_nvme_poll_group *group = qpair->poll_group; 703 704 CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs); 705 706 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 707 TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq); 708 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 709 } 710 711 int 712 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 713 struct spdk_nvme_qpair *qpair) 714 { 715 if (qpair->is_connected) { 716 return -EISCONN; 717 } 718 719 qpair->is_connected = true; 720 qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 721 722 if (qpair->poll_group) { 723 nvme_poll_group_connect_qpair(qpair); 724 } 725 726 return 0; 727 } 728 729 void 730 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair) 731 { 732 if (!qpair->is_connected) { 733 return; 734 } 735 736 qpair->is_connected = false; 737 738 if (qpair->poll_group != NULL) { 739 nvme_poll_group_disconnect_qpair(qpair); 740 } 741 } 742 743 int 744 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 745 { 746 SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL); 747 748 if (qpair->in_completion_context) { 749 qpair->delete_after_completion_context = true; 750 return 0; 751 } 752 753 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 754 755 if (qpair->poll_group != NULL) { 756 spdk_nvme_poll_group_remove(qpair->poll_group, qpair); 757 } 758 759 TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq); 760 761 CU_ASSERT(qpair->num_outstanding_reqs == 0); 762 763 free(qpair); 764 765 return 0; 766 } 767 768 int 769 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr) 770 { 771 if (ctrlr->fail_reset) { 772 ctrlr->is_failed = true; 773 return -EIO; 774 } 775 776 ctrlr->adminq.is_connected = true; 777 return 0; 778 } 779 780 void 781 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr) 782 { 783 } 784 785 int 786 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr) 787 { 788 ctrlr->adminq.is_connected = false; 789 ctrlr->is_failed = false; 790 791 return 0; 792 } 793 794 void 795 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr) 796 { 797 ctrlr->is_failed = true; 798 } 799 800 bool 801 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr) 802 { 803 return ctrlr->is_failed; 804 } 805 806 spdk_nvme_qp_failure_reason 807 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr) 808 { 809 return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq); 810 } 811 812 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + \ 813 sizeof(uint32_t)) 814 static void 815 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length) 816 { 817 struct spdk_nvme_ana_page ana_hdr; 818 char _ana_desc[UT_ANA_DESC_SIZE]; 819 struct spdk_nvme_ana_group_descriptor *ana_desc; 820 struct spdk_nvme_ns *ns; 821 uint32_t i; 822 823 memset(&ana_hdr, 0, sizeof(ana_hdr)); 824 ana_hdr.num_ana_group_desc = ctrlr->num_ns; 825 826 SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length); 827 memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr)); 828 829 buf += sizeof(ana_hdr); 830 length -= sizeof(ana_hdr); 831 832 ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc; 833 834 for (i = 0; i < ctrlr->num_ns; i++) { 835 ns = &ctrlr->ns[i]; 836 837 if (!ns->is_active) { 838 continue; 839 } 840 841 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 842 843 ana_desc->ana_group_id = ns->id; 844 ana_desc->num_of_nsid = 1; 845 ana_desc->ana_state = ns->ana_state; 846 ana_desc->nsid[0] = ns->id; 847 848 SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length); 849 memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE); 850 851 buf += UT_ANA_DESC_SIZE; 852 length -= UT_ANA_DESC_SIZE; 853 } 854 } 855 856 int 857 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, 858 uint8_t log_page, uint32_t nsid, 859 void *payload, uint32_t payload_size, 860 uint64_t offset, 861 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 862 { 863 if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) { 864 SPDK_CU_ASSERT_FATAL(offset == 0); 865 ut_create_ana_log_page(ctrlr, payload, payload_size); 866 } 867 868 return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE, 869 cb_fn, cb_arg); 870 } 871 872 int 873 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 874 struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, 875 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 876 { 877 return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg); 878 } 879 880 int 881 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 882 void *cmd_cb_arg, 883 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 884 { 885 struct ut_nvme_req *req = NULL, *abort_req; 886 887 if (qpair == NULL) { 888 qpair = &ctrlr->adminq; 889 } 890 891 abort_req = calloc(1, sizeof(*abort_req)); 892 if (abort_req == NULL) { 893 return -ENOMEM; 894 } 895 896 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 897 if (req->cb_arg == cmd_cb_arg) { 898 break; 899 } 900 } 901 902 if (req == NULL) { 903 free(abort_req); 904 return -ENOENT; 905 } 906 907 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 908 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 909 910 abort_req->opc = SPDK_NVME_OPC_ABORT; 911 abort_req->cb_fn = cb_fn; 912 abort_req->cb_arg = cb_arg; 913 914 abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 915 abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 916 abort_req->cpl.cdw0 = 0; 917 918 TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq); 919 ctrlr->adminq.num_outstanding_reqs++; 920 921 return 0; 922 } 923 924 int32_t 925 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 926 { 927 return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0); 928 } 929 930 uint32_t 931 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 932 { 933 return ns->id; 934 } 935 936 struct spdk_nvme_ctrlr * 937 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 938 { 939 return ns->ctrlr; 940 } 941 942 static inline struct spdk_nvme_ns_data * 943 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 944 { 945 return &ns->ctrlr->nsdata[ns->id - 1]; 946 } 947 948 const struct spdk_nvme_ns_data * 949 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 950 { 951 return _nvme_ns_get_data(ns); 952 } 953 954 uint64_t 955 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 956 { 957 return _nvme_ns_get_data(ns)->nsze; 958 } 959 960 const struct spdk_uuid * 961 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 962 { 963 return ns->uuid; 964 } 965 966 enum spdk_nvme_csi 967 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) { 968 return ns->csi; 969 } 970 971 int 972 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 973 void *metadata, uint64_t lba, uint32_t lba_count, 974 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 975 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 976 { 977 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 978 } 979 980 int 981 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 982 void *buffer, void *metadata, uint64_t lba, 983 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 984 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 985 { 986 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 987 } 988 989 int 990 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 991 uint64_t lba, uint32_t lba_count, 992 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 993 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 994 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 995 uint16_t apptag_mask, uint16_t apptag) 996 { 997 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 998 } 999 1000 int 1001 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1002 uint64_t lba, uint32_t lba_count, 1003 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1004 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1005 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1006 uint16_t apptag_mask, uint16_t apptag) 1007 { 1008 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1009 } 1010 1011 static bool g_ut_readv_ext_called; 1012 int 1013 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1014 uint64_t lba, uint32_t lba_count, 1015 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1016 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1017 spdk_nvme_req_next_sge_cb next_sge_fn, 1018 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1019 { 1020 g_ut_readv_ext_called = true; 1021 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1022 } 1023 1024 static bool g_ut_writev_ext_called; 1025 int 1026 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1027 uint64_t lba, uint32_t lba_count, 1028 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1029 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1030 spdk_nvme_req_next_sge_cb next_sge_fn, 1031 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1032 { 1033 g_ut_writev_ext_called = true; 1034 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1035 } 1036 1037 int 1038 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1039 uint64_t lba, uint32_t lba_count, 1040 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1041 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1042 spdk_nvme_req_next_sge_cb next_sge_fn, 1043 void *metadata, uint16_t apptag_mask, uint16_t apptag) 1044 { 1045 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg); 1046 } 1047 1048 int 1049 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1050 uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1051 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1052 { 1053 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg); 1054 } 1055 1056 int 1057 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1058 uint64_t lba, uint32_t lba_count, 1059 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1060 uint32_t io_flags) 1061 { 1062 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg); 1063 } 1064 1065 struct spdk_nvme_poll_group * 1066 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table) 1067 { 1068 struct spdk_nvme_poll_group *group; 1069 1070 group = calloc(1, sizeof(*group)); 1071 if (group == NULL) { 1072 return NULL; 1073 } 1074 1075 group->ctx = ctx; 1076 if (table != NULL) { 1077 group->accel_fn_table = *table; 1078 } 1079 TAILQ_INIT(&group->connected_qpairs); 1080 TAILQ_INIT(&group->disconnected_qpairs); 1081 1082 return group; 1083 } 1084 1085 int 1086 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group) 1087 { 1088 if (!TAILQ_EMPTY(&group->connected_qpairs) || 1089 !TAILQ_EMPTY(&group->disconnected_qpairs)) { 1090 return -EBUSY; 1091 } 1092 1093 free(group); 1094 1095 return 0; 1096 } 1097 1098 spdk_nvme_qp_failure_reason 1099 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair) 1100 { 1101 return qpair->failure_reason; 1102 } 1103 1104 int32_t 1105 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, 1106 uint32_t max_completions) 1107 { 1108 struct ut_nvme_req *req, *tmp; 1109 uint32_t num_completions = 0; 1110 1111 if (!qpair->is_connected) { 1112 return -ENXIO; 1113 } 1114 1115 qpair->in_completion_context = true; 1116 1117 TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) { 1118 TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq); 1119 qpair->num_outstanding_reqs--; 1120 1121 req->cb_fn(req->cb_arg, &req->cpl); 1122 1123 free(req); 1124 num_completions++; 1125 } 1126 1127 qpair->in_completion_context = false; 1128 if (qpair->delete_after_completion_context) { 1129 spdk_nvme_ctrlr_free_io_qpair(qpair); 1130 } 1131 1132 return num_completions; 1133 } 1134 1135 int64_t 1136 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group, 1137 uint32_t completions_per_qpair, 1138 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 1139 { 1140 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1141 int64_t local_completions = 0, error_reason = 0, num_completions = 0; 1142 1143 SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0); 1144 1145 if (disconnected_qpair_cb == NULL) { 1146 return -EINVAL; 1147 } 1148 1149 TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) { 1150 disconnected_qpair_cb(qpair, group->ctx); 1151 } 1152 1153 TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) { 1154 if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) { 1155 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 1156 /* Bump the number of completions so this counts as "busy" */ 1157 num_completions++; 1158 continue; 1159 } 1160 1161 local_completions = spdk_nvme_qpair_process_completions(qpair, 1162 completions_per_qpair); 1163 if (local_completions < 0 && error_reason == 0) { 1164 error_reason = local_completions; 1165 } else { 1166 num_completions += local_completions; 1167 assert(num_completions >= 0); 1168 } 1169 } 1170 1171 return error_reason ? error_reason : num_completions; 1172 } 1173 1174 int 1175 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, 1176 struct spdk_nvme_qpair *qpair) 1177 { 1178 CU_ASSERT(!qpair->is_connected); 1179 1180 qpair->poll_group = group; 1181 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 1182 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 1183 1184 return 0; 1185 } 1186 1187 int 1188 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, 1189 struct spdk_nvme_qpair *qpair) 1190 { 1191 CU_ASSERT(!qpair->is_connected); 1192 1193 if (qpair->poll_group == NULL) { 1194 return -ENOENT; 1195 } 1196 1197 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 1198 1199 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 1200 1201 qpair->poll_group = NULL; 1202 qpair->poll_group_tailq_head = NULL; 1203 1204 return 0; 1205 } 1206 1207 int 1208 spdk_bdev_register(struct spdk_bdev *bdev) 1209 { 1210 return g_ut_register_bdev_status; 1211 } 1212 1213 void 1214 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 1215 { 1216 int rc; 1217 1218 rc = bdev->fn_table->destruct(bdev->ctxt); 1219 if (rc <= 0 && cb_fn != NULL) { 1220 cb_fn(cb_arg, rc); 1221 } 1222 } 1223 1224 int 1225 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) 1226 { 1227 bdev->blockcnt = size; 1228 1229 return 0; 1230 } 1231 1232 struct spdk_io_channel * 1233 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io) 1234 { 1235 return (struct spdk_io_channel *)bdev_io->internal.ch; 1236 } 1237 1238 void 1239 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 1240 { 1241 bdev_io->internal.status = status; 1242 bdev_io->internal.in_submit_request = false; 1243 } 1244 1245 void 1246 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc) 1247 { 1248 if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) { 1249 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; 1250 } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) { 1251 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED; 1252 } else { 1253 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR; 1254 } 1255 1256 bdev_io->internal.error.nvme.cdw0 = cdw0; 1257 bdev_io->internal.error.nvme.sct = sct; 1258 bdev_io->internal.error.nvme.sc = sc; 1259 1260 spdk_bdev_io_complete(bdev_io, bdev_io->internal.status); 1261 } 1262 1263 void 1264 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 1265 { 1266 struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); 1267 1268 ut_bdev_io_set_buf(bdev_io); 1269 1270 cb(ch, bdev_io, true); 1271 } 1272 1273 static void 1274 test_create_ctrlr(void) 1275 { 1276 struct spdk_nvme_transport_id trid = {}; 1277 struct spdk_nvme_ctrlr ctrlr = {}; 1278 int rc; 1279 1280 ut_init_trid(&trid); 1281 1282 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1283 CU_ASSERT(rc == 0); 1284 1285 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1286 1287 rc = bdev_nvme_delete("nvme0", &g_any_path); 1288 CU_ASSERT(rc == 0); 1289 1290 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1291 1292 poll_threads(); 1293 spdk_delay_us(1000); 1294 poll_threads(); 1295 1296 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1297 } 1298 1299 static void 1300 test_reset_ctrlr(void) 1301 { 1302 struct spdk_nvme_transport_id trid = {}; 1303 struct spdk_nvme_ctrlr ctrlr = {}; 1304 struct nvme_ctrlr *nvme_ctrlr = NULL; 1305 struct nvme_path_id *curr_trid; 1306 struct spdk_io_channel *ch1, *ch2; 1307 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1308 int rc; 1309 1310 ut_init_trid(&trid); 1311 TAILQ_INIT(&ctrlr.active_io_qpairs); 1312 1313 set_thread(0); 1314 1315 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1316 CU_ASSERT(rc == 0); 1317 1318 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1319 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1320 1321 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1322 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1323 1324 ch1 = spdk_get_io_channel(nvme_ctrlr); 1325 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1326 1327 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 1328 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1329 1330 set_thread(1); 1331 1332 ch2 = spdk_get_io_channel(nvme_ctrlr); 1333 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1334 1335 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 1336 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1337 1338 /* Reset starts from thread 1. */ 1339 set_thread(1); 1340 1341 /* Case 1: ctrlr is already being destructed. */ 1342 nvme_ctrlr->destruct = true; 1343 1344 rc = bdev_nvme_reset(nvme_ctrlr); 1345 CU_ASSERT(rc == -ENXIO); 1346 1347 /* Case 2: reset is in progress. */ 1348 nvme_ctrlr->destruct = false; 1349 nvme_ctrlr->resetting = true; 1350 1351 rc = bdev_nvme_reset(nvme_ctrlr); 1352 CU_ASSERT(rc == -EBUSY); 1353 1354 /* Case 3: reset completes successfully. */ 1355 nvme_ctrlr->resetting = false; 1356 curr_trid->is_failed = true; 1357 ctrlr.is_failed = true; 1358 1359 rc = bdev_nvme_reset(nvme_ctrlr); 1360 CU_ASSERT(rc == 0); 1361 CU_ASSERT(nvme_ctrlr->resetting == true); 1362 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1363 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1364 1365 poll_thread_times(0, 3); 1366 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1367 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1368 1369 poll_thread_times(0, 1); 1370 poll_thread_times(1, 1); 1371 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1372 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1373 CU_ASSERT(ctrlr.is_failed == true); 1374 1375 poll_thread_times(1, 1); 1376 poll_thread_times(0, 1); 1377 CU_ASSERT(ctrlr.is_failed == false); 1378 CU_ASSERT(ctrlr.adminq.is_connected == false); 1379 1380 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1381 poll_thread_times(0, 2); 1382 CU_ASSERT(ctrlr.adminq.is_connected == true); 1383 1384 poll_thread_times(0, 1); 1385 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1386 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1387 1388 poll_thread_times(1, 1); 1389 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1390 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1391 CU_ASSERT(nvme_ctrlr->resetting == true); 1392 CU_ASSERT(curr_trid->is_failed == true); 1393 1394 poll_thread_times(0, 2); 1395 CU_ASSERT(nvme_ctrlr->resetting == true); 1396 poll_thread_times(1, 1); 1397 CU_ASSERT(nvme_ctrlr->resetting == true); 1398 poll_thread_times(0, 1); 1399 CU_ASSERT(nvme_ctrlr->resetting == false); 1400 CU_ASSERT(curr_trid->is_failed == false); 1401 1402 spdk_put_io_channel(ch2); 1403 1404 set_thread(0); 1405 1406 spdk_put_io_channel(ch1); 1407 1408 poll_threads(); 1409 1410 rc = bdev_nvme_delete("nvme0", &g_any_path); 1411 CU_ASSERT(rc == 0); 1412 1413 poll_threads(); 1414 spdk_delay_us(1000); 1415 poll_threads(); 1416 1417 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1418 } 1419 1420 static void 1421 test_race_between_reset_and_destruct_ctrlr(void) 1422 { 1423 struct spdk_nvme_transport_id trid = {}; 1424 struct spdk_nvme_ctrlr ctrlr = {}; 1425 struct nvme_ctrlr *nvme_ctrlr; 1426 struct spdk_io_channel *ch1, *ch2; 1427 int rc; 1428 1429 ut_init_trid(&trid); 1430 TAILQ_INIT(&ctrlr.active_io_qpairs); 1431 1432 set_thread(0); 1433 1434 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1435 CU_ASSERT(rc == 0); 1436 1437 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1438 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1439 1440 ch1 = spdk_get_io_channel(nvme_ctrlr); 1441 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1442 1443 set_thread(1); 1444 1445 ch2 = spdk_get_io_channel(nvme_ctrlr); 1446 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1447 1448 /* Reset starts from thread 1. */ 1449 set_thread(1); 1450 1451 rc = bdev_nvme_reset(nvme_ctrlr); 1452 CU_ASSERT(rc == 0); 1453 CU_ASSERT(nvme_ctrlr->resetting == true); 1454 1455 /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ 1456 set_thread(0); 1457 1458 rc = bdev_nvme_delete("nvme0", &g_any_path); 1459 CU_ASSERT(rc == 0); 1460 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1461 CU_ASSERT(nvme_ctrlr->destruct == true); 1462 CU_ASSERT(nvme_ctrlr->resetting == true); 1463 1464 poll_threads(); 1465 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1466 poll_threads(); 1467 1468 /* Reset completed but ctrlr is not still destructed yet. */ 1469 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1470 CU_ASSERT(nvme_ctrlr->destruct == true); 1471 CU_ASSERT(nvme_ctrlr->resetting == false); 1472 1473 /* New reset request is rejected. */ 1474 rc = bdev_nvme_reset(nvme_ctrlr); 1475 CU_ASSERT(rc == -ENXIO); 1476 1477 /* Additional polling called spdk_io_device_unregister() to ctrlr, 1478 * However there are two channels and destruct is not completed yet. 1479 */ 1480 poll_threads(); 1481 1482 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1483 1484 set_thread(0); 1485 1486 spdk_put_io_channel(ch1); 1487 1488 set_thread(1); 1489 1490 spdk_put_io_channel(ch2); 1491 1492 poll_threads(); 1493 spdk_delay_us(1000); 1494 poll_threads(); 1495 1496 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1497 } 1498 1499 static void 1500 test_failover_ctrlr(void) 1501 { 1502 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 1503 struct spdk_nvme_ctrlr ctrlr = {}; 1504 struct nvme_ctrlr *nvme_ctrlr = NULL; 1505 struct nvme_path_id *curr_trid, *next_trid; 1506 struct spdk_io_channel *ch1, *ch2; 1507 int rc; 1508 1509 ut_init_trid(&trid1); 1510 ut_init_trid2(&trid2); 1511 TAILQ_INIT(&ctrlr.active_io_qpairs); 1512 1513 set_thread(0); 1514 1515 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1516 CU_ASSERT(rc == 0); 1517 1518 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1519 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1520 1521 ch1 = spdk_get_io_channel(nvme_ctrlr); 1522 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1523 1524 set_thread(1); 1525 1526 ch2 = spdk_get_io_channel(nvme_ctrlr); 1527 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1528 1529 /* First, test one trid case. */ 1530 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1531 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1532 1533 /* Failover starts from thread 1. */ 1534 set_thread(1); 1535 1536 /* Case 1: ctrlr is already being destructed. */ 1537 nvme_ctrlr->destruct = true; 1538 1539 rc = bdev_nvme_failover(nvme_ctrlr, false); 1540 CU_ASSERT(rc == -ENXIO); 1541 CU_ASSERT(curr_trid->is_failed == false); 1542 1543 /* Case 2: reset is in progress. */ 1544 nvme_ctrlr->destruct = false; 1545 nvme_ctrlr->resetting = true; 1546 1547 rc = bdev_nvme_failover(nvme_ctrlr, false); 1548 CU_ASSERT(rc == -EBUSY); 1549 1550 /* Case 3: reset completes successfully. */ 1551 nvme_ctrlr->resetting = false; 1552 1553 rc = bdev_nvme_failover(nvme_ctrlr, false); 1554 CU_ASSERT(rc == 0); 1555 1556 CU_ASSERT(nvme_ctrlr->resetting == true); 1557 CU_ASSERT(curr_trid->is_failed == true); 1558 1559 poll_threads(); 1560 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1561 poll_threads(); 1562 1563 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1564 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1565 1566 CU_ASSERT(nvme_ctrlr->resetting == false); 1567 CU_ASSERT(curr_trid->is_failed == false); 1568 1569 set_thread(0); 1570 1571 /* Second, test two trids case. */ 1572 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1573 CU_ASSERT(rc == 0); 1574 1575 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1576 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1577 CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id); 1578 CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); 1579 1580 /* Failover starts from thread 1. */ 1581 set_thread(1); 1582 1583 /* Case 4: reset is in progress. */ 1584 nvme_ctrlr->resetting = true; 1585 1586 rc = bdev_nvme_failover(nvme_ctrlr, false); 1587 CU_ASSERT(rc == -EBUSY); 1588 1589 /* Case 5: failover completes successfully. */ 1590 nvme_ctrlr->resetting = false; 1591 1592 rc = bdev_nvme_failover(nvme_ctrlr, false); 1593 CU_ASSERT(rc == 0); 1594 1595 CU_ASSERT(nvme_ctrlr->resetting == true); 1596 1597 next_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1598 SPDK_CU_ASSERT_FATAL(next_trid != NULL); 1599 CU_ASSERT(next_trid != curr_trid); 1600 CU_ASSERT(next_trid == nvme_ctrlr->active_path_id); 1601 CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); 1602 1603 poll_threads(); 1604 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1605 poll_threads(); 1606 1607 CU_ASSERT(nvme_ctrlr->resetting == false); 1608 1609 spdk_put_io_channel(ch2); 1610 1611 set_thread(0); 1612 1613 spdk_put_io_channel(ch1); 1614 1615 poll_threads(); 1616 1617 rc = bdev_nvme_delete("nvme0", &g_any_path); 1618 CU_ASSERT(rc == 0); 1619 1620 poll_threads(); 1621 spdk_delay_us(1000); 1622 poll_threads(); 1623 1624 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1625 } 1626 1627 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following. 1628 * 1629 * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was 1630 * disconnected and reset ctrlr failed repeatedly before starting failover from trid1 1631 * to trid2. While processing the failed reset, trid3 was added. trid1 should 1632 * have been active, i.e., the head of the list until the failover completed. 1633 * However trid3 was inserted to the head of the list by mistake. 1634 * 1635 * I/O qpairs have smaller polling period than admin qpair. When a connection is 1636 * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error 1637 * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr 1638 * may be executed repeatedly before failover is executed. Hence this bug is real. 1639 * 1640 * The following test verifies the fix. 1641 */ 1642 static void 1643 test_race_between_failover_and_add_secondary_trid(void) 1644 { 1645 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 1646 struct spdk_nvme_ctrlr ctrlr = {}; 1647 struct nvme_ctrlr *nvme_ctrlr = NULL; 1648 struct nvme_path_id *path_id1, *path_id2, *path_id3; 1649 struct spdk_io_channel *ch1, *ch2; 1650 int rc; 1651 1652 ut_init_trid(&trid1); 1653 ut_init_trid2(&trid2); 1654 ut_init_trid3(&trid3); 1655 TAILQ_INIT(&ctrlr.active_io_qpairs); 1656 1657 set_thread(0); 1658 1659 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1660 CU_ASSERT(rc == 0); 1661 1662 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1663 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1664 1665 ch1 = spdk_get_io_channel(nvme_ctrlr); 1666 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1667 1668 set_thread(1); 1669 1670 ch2 = spdk_get_io_channel(nvme_ctrlr); 1671 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1672 1673 set_thread(0); 1674 1675 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1676 CU_ASSERT(rc == 0); 1677 1678 path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids); 1679 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 1680 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1681 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1682 path_id2 = TAILQ_NEXT(path_id1, link); 1683 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 1684 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1685 1686 ctrlr.fail_reset = true; 1687 1688 rc = bdev_nvme_reset(nvme_ctrlr); 1689 CU_ASSERT(rc == 0); 1690 1691 poll_threads(); 1692 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1693 poll_threads(); 1694 1695 CU_ASSERT(path_id1->is_failed == true); 1696 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1697 1698 rc = bdev_nvme_reset(nvme_ctrlr); 1699 CU_ASSERT(rc == 0); 1700 1701 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 1702 CU_ASSERT(rc == 0); 1703 1704 CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids)); 1705 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1706 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1707 CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link)); 1708 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1709 path_id3 = TAILQ_NEXT(path_id2, link); 1710 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 1711 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0); 1712 1713 poll_threads(); 1714 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1715 poll_threads(); 1716 1717 spdk_put_io_channel(ch1); 1718 1719 set_thread(1); 1720 1721 spdk_put_io_channel(ch2); 1722 1723 poll_threads(); 1724 1725 set_thread(0); 1726 1727 rc = bdev_nvme_delete("nvme0", &g_any_path); 1728 CU_ASSERT(rc == 0); 1729 1730 poll_threads(); 1731 spdk_delay_us(1000); 1732 poll_threads(); 1733 1734 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1735 } 1736 1737 static void 1738 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc) 1739 { 1740 CU_ASSERT(rc == g_ut_attach_ctrlr_status); 1741 CU_ASSERT(bdev_count == g_ut_attach_bdev_count); 1742 } 1743 1744 static void 1745 test_pending_reset(void) 1746 { 1747 struct spdk_nvme_transport_id trid = {}; 1748 struct spdk_nvme_ctrlr *ctrlr; 1749 struct nvme_ctrlr *nvme_ctrlr = NULL; 1750 const int STRING_SIZE = 32; 1751 const char *attached_names[STRING_SIZE]; 1752 struct nvme_bdev *bdev; 1753 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 1754 struct spdk_io_channel *ch1, *ch2; 1755 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 1756 struct nvme_io_path *io_path1, *io_path2; 1757 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1758 int rc; 1759 1760 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1761 ut_init_trid(&trid); 1762 1763 set_thread(0); 1764 1765 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1766 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1767 1768 g_ut_attach_ctrlr_status = 0; 1769 g_ut_attach_bdev_count = 1; 1770 1771 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1772 attach_ctrlr_done, NULL, NULL, NULL, false); 1773 CU_ASSERT(rc == 0); 1774 1775 spdk_delay_us(1000); 1776 poll_threads(); 1777 1778 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1779 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1780 1781 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1782 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1783 1784 ch1 = spdk_get_io_channel(bdev); 1785 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1786 1787 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 1788 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 1789 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 1790 ctrlr_ch1 = io_path1->qpair->ctrlr_ch; 1791 SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL); 1792 1793 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 1794 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1795 1796 set_thread(1); 1797 1798 ch2 = spdk_get_io_channel(bdev); 1799 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1800 1801 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 1802 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 1803 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 1804 ctrlr_ch2 = io_path2->qpair->ctrlr_ch; 1805 SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL); 1806 1807 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 1808 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1809 1810 /* The first reset request is submitted on thread 1, and the second reset request 1811 * is submitted on thread 0 while processing the first request. 1812 */ 1813 bdev_nvme_submit_request(ch2, first_bdev_io); 1814 CU_ASSERT(nvme_ctrlr->resetting == true); 1815 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1816 1817 set_thread(0); 1818 1819 bdev_nvme_submit_request(ch1, second_bdev_io); 1820 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1821 1822 poll_threads(); 1823 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1824 poll_threads(); 1825 1826 CU_ASSERT(nvme_ctrlr->resetting == false); 1827 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1828 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1829 1830 /* The first reset request is submitted on thread 1, and the second reset request 1831 * is submitted on thread 0 while processing the first request. 1832 * 1833 * The difference from the above scenario is that the controller is removed while 1834 * processing the first request. Hence both reset requests should fail. 1835 */ 1836 set_thread(1); 1837 1838 bdev_nvme_submit_request(ch2, first_bdev_io); 1839 CU_ASSERT(nvme_ctrlr->resetting == true); 1840 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1841 1842 set_thread(0); 1843 1844 bdev_nvme_submit_request(ch1, second_bdev_io); 1845 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1846 1847 ctrlr->fail_reset = true; 1848 1849 poll_threads(); 1850 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1851 poll_threads(); 1852 1853 CU_ASSERT(nvme_ctrlr->resetting == false); 1854 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1855 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1856 1857 spdk_put_io_channel(ch1); 1858 1859 set_thread(1); 1860 1861 spdk_put_io_channel(ch2); 1862 1863 poll_threads(); 1864 1865 set_thread(0); 1866 1867 rc = bdev_nvme_delete("nvme0", &g_any_path); 1868 CU_ASSERT(rc == 0); 1869 1870 poll_threads(); 1871 spdk_delay_us(1000); 1872 poll_threads(); 1873 1874 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1875 1876 free(first_bdev_io); 1877 free(second_bdev_io); 1878 } 1879 1880 static void 1881 test_attach_ctrlr(void) 1882 { 1883 struct spdk_nvme_transport_id trid = {}; 1884 struct spdk_nvme_ctrlr *ctrlr; 1885 struct nvme_ctrlr *nvme_ctrlr; 1886 const int STRING_SIZE = 32; 1887 const char *attached_names[STRING_SIZE]; 1888 struct nvme_bdev *nbdev; 1889 int rc; 1890 1891 set_thread(0); 1892 1893 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1894 ut_init_trid(&trid); 1895 1896 /* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed 1897 * by probe polling. 1898 */ 1899 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 1900 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1901 1902 ctrlr->is_failed = true; 1903 g_ut_attach_ctrlr_status = -EIO; 1904 g_ut_attach_bdev_count = 0; 1905 1906 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1907 attach_ctrlr_done, NULL, NULL, NULL, false); 1908 CU_ASSERT(rc == 0); 1909 1910 spdk_delay_us(1000); 1911 poll_threads(); 1912 1913 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1914 1915 /* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */ 1916 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 1917 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1918 1919 g_ut_attach_ctrlr_status = 0; 1920 1921 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1922 attach_ctrlr_done, NULL, NULL, NULL, false); 1923 CU_ASSERT(rc == 0); 1924 1925 spdk_delay_us(1000); 1926 poll_threads(); 1927 1928 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1929 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1930 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 1931 1932 rc = bdev_nvme_delete("nvme0", &g_any_path); 1933 CU_ASSERT(rc == 0); 1934 1935 poll_threads(); 1936 spdk_delay_us(1000); 1937 poll_threads(); 1938 1939 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1940 1941 /* If ctrlr has one namespace, one nvme_ctrlr with one namespace and 1942 * one nvme_bdev is created. 1943 */ 1944 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1945 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1946 1947 g_ut_attach_bdev_count = 1; 1948 1949 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1950 attach_ctrlr_done, NULL, NULL, NULL, false); 1951 CU_ASSERT(rc == 0); 1952 1953 spdk_delay_us(1000); 1954 poll_threads(); 1955 1956 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1957 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1958 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 1959 1960 CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); 1961 attached_names[0] = NULL; 1962 1963 nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1964 SPDK_CU_ASSERT_FATAL(nbdev != NULL); 1965 CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); 1966 1967 rc = bdev_nvme_delete("nvme0", &g_any_path); 1968 CU_ASSERT(rc == 0); 1969 1970 poll_threads(); 1971 spdk_delay_us(1000); 1972 poll_threads(); 1973 1974 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1975 1976 /* Ctrlr has one namespace but one nvme_ctrlr with no namespace is 1977 * created because creating one nvme_bdev failed. 1978 */ 1979 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1980 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1981 1982 g_ut_register_bdev_status = -EINVAL; 1983 g_ut_attach_bdev_count = 0; 1984 1985 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1986 attach_ctrlr_done, NULL, NULL, NULL, false); 1987 CU_ASSERT(rc == 0); 1988 1989 spdk_delay_us(1000); 1990 poll_threads(); 1991 1992 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1993 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1994 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 1995 1996 CU_ASSERT(attached_names[0] == NULL); 1997 1998 rc = bdev_nvme_delete("nvme0", &g_any_path); 1999 CU_ASSERT(rc == 0); 2000 2001 poll_threads(); 2002 spdk_delay_us(1000); 2003 poll_threads(); 2004 2005 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2006 2007 g_ut_register_bdev_status = 0; 2008 } 2009 2010 static void 2011 test_aer_cb(void) 2012 { 2013 struct spdk_nvme_transport_id trid = {}; 2014 struct spdk_nvme_ctrlr *ctrlr; 2015 struct nvme_ctrlr *nvme_ctrlr; 2016 struct nvme_bdev *bdev; 2017 const int STRING_SIZE = 32; 2018 const char *attached_names[STRING_SIZE]; 2019 union spdk_nvme_async_event_completion event = {}; 2020 struct spdk_nvme_cpl cpl = {}; 2021 int rc; 2022 2023 set_thread(0); 2024 2025 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2026 ut_init_trid(&trid); 2027 2028 /* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th 2029 * namespaces are populated. 2030 */ 2031 ctrlr = ut_attach_ctrlr(&trid, 4, true, false); 2032 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2033 2034 ctrlr->ns[0].is_active = false; 2035 2036 g_ut_attach_ctrlr_status = 0; 2037 g_ut_attach_bdev_count = 3; 2038 2039 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2040 attach_ctrlr_done, NULL, NULL, NULL, false); 2041 CU_ASSERT(rc == 0); 2042 2043 spdk_delay_us(1000); 2044 poll_threads(); 2045 2046 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2047 poll_threads(); 2048 2049 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2050 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2051 2052 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL); 2053 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2054 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2055 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2056 2057 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev; 2058 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2059 CU_ASSERT(bdev->disk.blockcnt == 1024); 2060 2061 /* Dynamically populate 1st namespace and depopulate 3rd namespace, and 2062 * change the size of the 4th namespace. 2063 */ 2064 ctrlr->ns[0].is_active = true; 2065 ctrlr->ns[2].is_active = false; 2066 ctrlr->nsdata[3].nsze = 2048; 2067 2068 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2069 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 2070 cpl.cdw0 = event.raw; 2071 2072 aer_cb(nvme_ctrlr, &cpl); 2073 2074 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2075 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2076 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL); 2077 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2078 CU_ASSERT(bdev->disk.blockcnt == 2048); 2079 2080 /* Change ANA state of active namespaces. */ 2081 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2082 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2083 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2084 2085 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2086 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 2087 cpl.cdw0 = event.raw; 2088 2089 aer_cb(nvme_ctrlr, &cpl); 2090 2091 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2092 poll_threads(); 2093 2094 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2095 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2096 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2097 2098 rc = bdev_nvme_delete("nvme0", &g_any_path); 2099 CU_ASSERT(rc == 0); 2100 2101 poll_threads(); 2102 spdk_delay_us(1000); 2103 poll_threads(); 2104 2105 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2106 } 2107 2108 static void 2109 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2110 enum spdk_bdev_io_type io_type) 2111 { 2112 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2113 struct nvme_io_path *io_path; 2114 struct spdk_nvme_qpair *qpair; 2115 2116 io_path = bdev_nvme_find_io_path(nbdev_ch); 2117 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2118 qpair = io_path->qpair->qpair; 2119 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2120 2121 bdev_io->type = io_type; 2122 bdev_io->internal.in_submit_request = true; 2123 2124 bdev_nvme_submit_request(ch, bdev_io); 2125 2126 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2127 CU_ASSERT(qpair->num_outstanding_reqs == 1); 2128 2129 poll_threads(); 2130 2131 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2132 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2133 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2134 } 2135 2136 static void 2137 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2138 enum spdk_bdev_io_type io_type) 2139 { 2140 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2141 struct nvme_io_path *io_path; 2142 struct spdk_nvme_qpair *qpair; 2143 2144 io_path = bdev_nvme_find_io_path(nbdev_ch); 2145 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2146 qpair = io_path->qpair->qpair; 2147 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2148 2149 bdev_io->type = io_type; 2150 bdev_io->internal.in_submit_request = true; 2151 2152 bdev_nvme_submit_request(ch, bdev_io); 2153 2154 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2155 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2156 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2157 } 2158 2159 static void 2160 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 2161 { 2162 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2163 struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 2164 struct ut_nvme_req *req; 2165 struct nvme_io_path *io_path; 2166 struct spdk_nvme_qpair *qpair; 2167 2168 io_path = bdev_nvme_find_io_path(nbdev_ch); 2169 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2170 qpair = io_path->qpair->qpair; 2171 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2172 2173 /* Only compare and write now. */ 2174 bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; 2175 bdev_io->internal.in_submit_request = true; 2176 2177 bdev_nvme_submit_request(ch, bdev_io); 2178 2179 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2180 CU_ASSERT(qpair->num_outstanding_reqs == 2); 2181 CU_ASSERT(bio->first_fused_submitted == true); 2182 2183 /* First outstanding request is compare operation. */ 2184 req = TAILQ_FIRST(&qpair->outstanding_reqs); 2185 SPDK_CU_ASSERT_FATAL(req != NULL); 2186 CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); 2187 req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; 2188 2189 poll_threads(); 2190 2191 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2192 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2193 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2194 } 2195 2196 static void 2197 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2198 struct spdk_nvme_ctrlr *ctrlr) 2199 { 2200 bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 2201 bdev_io->internal.in_submit_request = true; 2202 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2203 2204 bdev_nvme_submit_request(ch, bdev_io); 2205 2206 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2207 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2208 2209 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2210 poll_thread_times(1, 1); 2211 2212 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2213 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2214 2215 poll_thread_times(0, 1); 2216 2217 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2218 } 2219 2220 static void 2221 test_submit_nvme_cmd(void) 2222 { 2223 struct spdk_nvme_transport_id trid = {}; 2224 struct spdk_nvme_ctrlr *ctrlr; 2225 struct nvme_ctrlr *nvme_ctrlr; 2226 const int STRING_SIZE = 32; 2227 const char *attached_names[STRING_SIZE]; 2228 struct nvme_bdev *bdev; 2229 struct spdk_bdev_io *bdev_io; 2230 struct spdk_io_channel *ch; 2231 struct spdk_bdev_ext_io_opts ext_io_opts = {}; 2232 int rc; 2233 2234 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2235 ut_init_trid(&trid); 2236 2237 set_thread(1); 2238 2239 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2240 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2241 2242 g_ut_attach_ctrlr_status = 0; 2243 g_ut_attach_bdev_count = 1; 2244 2245 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2246 attach_ctrlr_done, NULL, NULL, NULL, false); 2247 CU_ASSERT(rc == 0); 2248 2249 spdk_delay_us(1000); 2250 poll_threads(); 2251 2252 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2253 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2254 2255 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2256 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2257 2258 set_thread(0); 2259 2260 ch = spdk_get_io_channel(bdev); 2261 SPDK_CU_ASSERT_FATAL(ch != NULL); 2262 2263 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch); 2264 2265 bdev_io->u.bdev.iovs = NULL; 2266 2267 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2268 2269 ut_bdev_io_set_buf(bdev_io); 2270 2271 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2272 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2273 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE); 2274 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP); 2275 2276 ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH); 2277 2278 ut_test_submit_fused_nvme_cmd(ch, bdev_io); 2279 2280 /* Verify that ext NVME API is called if bdev_io ext_opts is set */ 2281 bdev_io->u.bdev.ext_opts = &ext_io_opts; 2282 g_ut_readv_ext_called = false; 2283 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2284 CU_ASSERT(g_ut_readv_ext_called == true); 2285 g_ut_readv_ext_called = false; 2286 2287 g_ut_writev_ext_called = false; 2288 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2289 CU_ASSERT(g_ut_writev_ext_called == true); 2290 g_ut_writev_ext_called = false; 2291 bdev_io->u.bdev.ext_opts = NULL; 2292 2293 ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); 2294 2295 free(bdev_io); 2296 2297 spdk_put_io_channel(ch); 2298 2299 poll_threads(); 2300 2301 set_thread(1); 2302 2303 rc = bdev_nvme_delete("nvme0", &g_any_path); 2304 CU_ASSERT(rc == 0); 2305 2306 poll_threads(); 2307 spdk_delay_us(1000); 2308 poll_threads(); 2309 2310 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2311 } 2312 2313 static void 2314 test_add_remove_trid(void) 2315 { 2316 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 2317 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 2318 struct nvme_ctrlr *nvme_ctrlr = NULL; 2319 const int STRING_SIZE = 32; 2320 const char *attached_names[STRING_SIZE]; 2321 struct nvme_path_id *ctrid; 2322 int rc; 2323 2324 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2325 ut_init_trid(&path1.trid); 2326 ut_init_trid2(&path2.trid); 2327 ut_init_trid3(&path3.trid); 2328 2329 set_thread(0); 2330 2331 g_ut_attach_ctrlr_status = 0; 2332 g_ut_attach_bdev_count = 0; 2333 2334 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2335 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2336 2337 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2338 attach_ctrlr_done, NULL, NULL, NULL, false); 2339 CU_ASSERT(rc == 0); 2340 2341 spdk_delay_us(1000); 2342 poll_threads(); 2343 2344 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2345 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2346 2347 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2348 2349 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2350 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2351 2352 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2353 attach_ctrlr_done, NULL, NULL, NULL, false); 2354 CU_ASSERT(rc == 0); 2355 2356 spdk_delay_us(1000); 2357 poll_threads(); 2358 2359 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2360 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2361 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2362 break; 2363 } 2364 } 2365 CU_ASSERT(ctrid != NULL); 2366 2367 /* trid3 is not in the registered list. */ 2368 rc = bdev_nvme_delete("nvme0", &path3); 2369 CU_ASSERT(rc == -ENXIO); 2370 2371 /* trid2 is not used, and simply removed. */ 2372 rc = bdev_nvme_delete("nvme0", &path2); 2373 CU_ASSERT(rc == 0); 2374 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2375 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2376 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2377 } 2378 2379 ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false); 2380 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 2381 2382 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 2383 attach_ctrlr_done, NULL, NULL, NULL, false); 2384 CU_ASSERT(rc == 0); 2385 2386 spdk_delay_us(1000); 2387 poll_threads(); 2388 2389 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2390 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2391 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) { 2392 break; 2393 } 2394 } 2395 CU_ASSERT(ctrid != NULL); 2396 2397 /* path1 is currently used and path3 is an alternative path. 2398 * If we remove path1, path is changed to path3. 2399 */ 2400 rc = bdev_nvme_delete("nvme0", &path1); 2401 CU_ASSERT(rc == 0); 2402 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2403 CU_ASSERT(nvme_ctrlr->resetting == true); 2404 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2405 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0); 2406 } 2407 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0); 2408 2409 poll_threads(); 2410 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2411 poll_threads(); 2412 2413 CU_ASSERT(nvme_ctrlr->resetting == false); 2414 2415 /* path3 is the current and only path. If we remove path3, the corresponding 2416 * nvme_ctrlr is removed. 2417 */ 2418 rc = bdev_nvme_delete("nvme0", &path3); 2419 CU_ASSERT(rc == 0); 2420 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2421 2422 poll_threads(); 2423 spdk_delay_us(1000); 2424 poll_threads(); 2425 2426 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2427 2428 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2429 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2430 2431 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2432 attach_ctrlr_done, NULL, NULL, NULL, false); 2433 CU_ASSERT(rc == 0); 2434 2435 spdk_delay_us(1000); 2436 poll_threads(); 2437 2438 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2439 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2440 2441 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2442 2443 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2444 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2445 2446 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2447 attach_ctrlr_done, NULL, NULL, NULL, false); 2448 CU_ASSERT(rc == 0); 2449 2450 spdk_delay_us(1000); 2451 poll_threads(); 2452 2453 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2454 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2455 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2456 break; 2457 } 2458 } 2459 CU_ASSERT(ctrid != NULL); 2460 2461 /* If trid is not specified, nvme_ctrlr itself is removed. */ 2462 rc = bdev_nvme_delete("nvme0", &g_any_path); 2463 CU_ASSERT(rc == 0); 2464 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2465 2466 poll_threads(); 2467 spdk_delay_us(1000); 2468 poll_threads(); 2469 2470 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2471 } 2472 2473 static void 2474 test_abort(void) 2475 { 2476 struct spdk_nvme_transport_id trid = {}; 2477 struct nvme_ctrlr_opts opts = {}; 2478 struct spdk_nvme_ctrlr *ctrlr; 2479 struct nvme_ctrlr *nvme_ctrlr; 2480 const int STRING_SIZE = 32; 2481 const char *attached_names[STRING_SIZE]; 2482 struct nvme_bdev *bdev; 2483 struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io; 2484 struct spdk_io_channel *ch1, *ch2; 2485 struct nvme_bdev_channel *nbdev_ch1; 2486 struct nvme_io_path *io_path1; 2487 struct nvme_qpair *nvme_qpair1; 2488 int rc; 2489 2490 /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on 2491 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests 2492 * are submitted on thread 1. Both should succeed. 2493 */ 2494 2495 ut_init_trid(&trid); 2496 2497 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2498 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2499 2500 g_ut_attach_ctrlr_status = 0; 2501 g_ut_attach_bdev_count = 1; 2502 2503 set_thread(1); 2504 2505 opts.ctrlr_loss_timeout_sec = -1; 2506 opts.reconnect_delay_sec = 1; 2507 2508 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2509 attach_ctrlr_done, NULL, NULL, &opts, false); 2510 CU_ASSERT(rc == 0); 2511 2512 spdk_delay_us(1000); 2513 poll_threads(); 2514 2515 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2516 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2517 2518 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2519 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2520 2521 write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 2522 ut_bdev_io_set_buf(write_io); 2523 2524 fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL); 2525 ut_bdev_io_set_buf(fuse_io); 2526 2527 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 2528 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2529 2530 abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL); 2531 2532 set_thread(0); 2533 2534 ch1 = spdk_get_io_channel(bdev); 2535 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2536 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 2537 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 2538 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 2539 nvme_qpair1 = io_path1->qpair; 2540 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 2541 2542 set_thread(1); 2543 2544 ch2 = spdk_get_io_channel(bdev); 2545 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2546 2547 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2548 fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2549 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2550 2551 /* Aborting the already completed request should fail. */ 2552 write_io->internal.in_submit_request = true; 2553 bdev_nvme_submit_request(ch1, write_io); 2554 poll_threads(); 2555 2556 CU_ASSERT(write_io->internal.in_submit_request == false); 2557 2558 abort_io->u.abort.bio_to_abort = write_io; 2559 abort_io->internal.in_submit_request = true; 2560 2561 bdev_nvme_submit_request(ch1, abort_io); 2562 2563 poll_threads(); 2564 2565 CU_ASSERT(abort_io->internal.in_submit_request == false); 2566 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2567 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2568 2569 admin_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2570 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2571 2572 admin_io->internal.in_submit_request = true; 2573 bdev_nvme_submit_request(ch1, admin_io); 2574 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2575 poll_threads(); 2576 2577 CU_ASSERT(admin_io->internal.in_submit_request == false); 2578 2579 abort_io->u.abort.bio_to_abort = admin_io; 2580 abort_io->internal.in_submit_request = true; 2581 2582 bdev_nvme_submit_request(ch2, abort_io); 2583 2584 poll_threads(); 2585 2586 CU_ASSERT(abort_io->internal.in_submit_request == false); 2587 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2588 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2589 2590 /* Aborting the write request should succeed. */ 2591 write_io->internal.in_submit_request = true; 2592 bdev_nvme_submit_request(ch1, write_io); 2593 2594 CU_ASSERT(write_io->internal.in_submit_request == true); 2595 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 2596 2597 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2598 abort_io->u.abort.bio_to_abort = write_io; 2599 abort_io->internal.in_submit_request = true; 2600 2601 bdev_nvme_submit_request(ch1, abort_io); 2602 2603 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2604 poll_threads(); 2605 2606 CU_ASSERT(abort_io->internal.in_submit_request == false); 2607 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2608 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2609 CU_ASSERT(write_io->internal.in_submit_request == false); 2610 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2611 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2612 2613 /* Aborting the fuse request should succeed. */ 2614 fuse_io->internal.in_submit_request = true; 2615 bdev_nvme_submit_request(ch1, fuse_io); 2616 2617 CU_ASSERT(fuse_io->internal.in_submit_request == true); 2618 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2); 2619 2620 abort_io->u.abort.bio_to_abort = fuse_io; 2621 abort_io->internal.in_submit_request = true; 2622 2623 bdev_nvme_submit_request(ch1, abort_io); 2624 2625 spdk_delay_us(10000); 2626 poll_threads(); 2627 2628 CU_ASSERT(abort_io->internal.in_submit_request == false); 2629 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2630 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2631 CU_ASSERT(fuse_io->internal.in_submit_request == false); 2632 CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2633 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2634 2635 /* Aborting the admin request should succeed. */ 2636 admin_io->internal.in_submit_request = true; 2637 bdev_nvme_submit_request(ch1, admin_io); 2638 2639 CU_ASSERT(admin_io->internal.in_submit_request == true); 2640 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2641 2642 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2643 abort_io->u.abort.bio_to_abort = admin_io; 2644 abort_io->internal.in_submit_request = true; 2645 2646 bdev_nvme_submit_request(ch2, abort_io); 2647 2648 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2649 poll_threads(); 2650 2651 CU_ASSERT(abort_io->internal.in_submit_request == false); 2652 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2653 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2654 CU_ASSERT(admin_io->internal.in_submit_request == false); 2655 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2656 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2657 2658 set_thread(0); 2659 2660 /* If qpair is disconnected, it is freed and then reconnected via resetting 2661 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 2662 * while resetting the nvme_ctrlr. 2663 */ 2664 nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 2665 2666 poll_thread_times(0, 3); 2667 2668 CU_ASSERT(nvme_qpair1->qpair == NULL); 2669 CU_ASSERT(nvme_ctrlr->resetting == true); 2670 2671 write_io->internal.in_submit_request = true; 2672 2673 bdev_nvme_submit_request(ch1, write_io); 2674 2675 CU_ASSERT(write_io->internal.in_submit_request == true); 2676 CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list)); 2677 2678 /* Aborting the queued write request should succeed immediately. */ 2679 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2680 abort_io->u.abort.bio_to_abort = write_io; 2681 abort_io->internal.in_submit_request = true; 2682 2683 bdev_nvme_submit_request(ch1, abort_io); 2684 2685 CU_ASSERT(abort_io->internal.in_submit_request == false); 2686 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2687 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2688 CU_ASSERT(write_io->internal.in_submit_request == false); 2689 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2690 2691 poll_threads(); 2692 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2693 poll_threads(); 2694 2695 spdk_put_io_channel(ch1); 2696 2697 set_thread(1); 2698 2699 spdk_put_io_channel(ch2); 2700 2701 poll_threads(); 2702 2703 free(write_io); 2704 free(fuse_io); 2705 free(admin_io); 2706 free(abort_io); 2707 2708 set_thread(1); 2709 2710 rc = bdev_nvme_delete("nvme0", &g_any_path); 2711 CU_ASSERT(rc == 0); 2712 2713 poll_threads(); 2714 spdk_delay_us(1000); 2715 poll_threads(); 2716 2717 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2718 } 2719 2720 static void 2721 test_get_io_qpair(void) 2722 { 2723 struct spdk_nvme_transport_id trid = {}; 2724 struct spdk_nvme_ctrlr ctrlr = {}; 2725 struct nvme_ctrlr *nvme_ctrlr = NULL; 2726 struct spdk_io_channel *ch; 2727 struct nvme_ctrlr_channel *ctrlr_ch; 2728 struct spdk_nvme_qpair *qpair; 2729 int rc; 2730 2731 ut_init_trid(&trid); 2732 TAILQ_INIT(&ctrlr.active_io_qpairs); 2733 2734 set_thread(0); 2735 2736 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 2737 CU_ASSERT(rc == 0); 2738 2739 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2740 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2741 2742 ch = spdk_get_io_channel(nvme_ctrlr); 2743 SPDK_CU_ASSERT_FATAL(ch != NULL); 2744 ctrlr_ch = spdk_io_channel_get_ctx(ch); 2745 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 2746 2747 qpair = bdev_nvme_get_io_qpair(ch); 2748 CU_ASSERT(qpair == ctrlr_ch->qpair->qpair); 2749 2750 spdk_put_io_channel(ch); 2751 2752 rc = bdev_nvme_delete("nvme0", &g_any_path); 2753 CU_ASSERT(rc == 0); 2754 2755 poll_threads(); 2756 spdk_delay_us(1000); 2757 poll_threads(); 2758 2759 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2760 } 2761 2762 /* Test a scenario that the bdev subsystem starts shutdown when there still exists 2763 * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a 2764 * test case to avoid regression for this scenario. spdk_bdev_unregister() calls 2765 * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly. 2766 */ 2767 static void 2768 test_bdev_unregister(void) 2769 { 2770 struct spdk_nvme_transport_id trid = {}; 2771 struct spdk_nvme_ctrlr *ctrlr; 2772 struct nvme_ctrlr *nvme_ctrlr; 2773 struct nvme_ns *nvme_ns1, *nvme_ns2; 2774 const int STRING_SIZE = 32; 2775 const char *attached_names[STRING_SIZE]; 2776 struct nvme_bdev *bdev1, *bdev2; 2777 int rc; 2778 2779 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2780 ut_init_trid(&trid); 2781 2782 ctrlr = ut_attach_ctrlr(&trid, 2, false, false); 2783 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2784 2785 g_ut_attach_ctrlr_status = 0; 2786 g_ut_attach_bdev_count = 2; 2787 2788 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2789 attach_ctrlr_done, NULL, NULL, NULL, false); 2790 CU_ASSERT(rc == 0); 2791 2792 spdk_delay_us(1000); 2793 poll_threads(); 2794 2795 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2796 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2797 2798 nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1); 2799 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 2800 2801 bdev1 = nvme_ns1->bdev; 2802 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 2803 2804 nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2); 2805 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 2806 2807 bdev2 = nvme_ns2->bdev; 2808 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 2809 2810 bdev_nvme_destruct(&bdev1->disk); 2811 bdev_nvme_destruct(&bdev2->disk); 2812 2813 poll_threads(); 2814 2815 CU_ASSERT(nvme_ns1->bdev == NULL); 2816 CU_ASSERT(nvme_ns2->bdev == NULL); 2817 2818 nvme_ctrlr->destruct = true; 2819 _nvme_ctrlr_destruct(nvme_ctrlr); 2820 2821 poll_threads(); 2822 spdk_delay_us(1000); 2823 poll_threads(); 2824 2825 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2826 } 2827 2828 static void 2829 test_compare_ns(void) 2830 { 2831 struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {}; 2832 struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, }; 2833 struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, }; 2834 struct spdk_uuid uuid1 = { .u.raw = { 0xAA } }; 2835 struct spdk_uuid uuid2 = { .u.raw = { 0xAB } }; 2836 2837 /* No IDs are defined. */ 2838 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2839 2840 /* Only EUI64 are defined and not matched. */ 2841 nsdata1.eui64 = 0xABCDEF0123456789; 2842 nsdata2.eui64 = 0xBBCDEF0123456789; 2843 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2844 2845 /* Only EUI64 are defined and matched. */ 2846 nsdata2.eui64 = 0xABCDEF0123456789; 2847 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2848 2849 /* Only NGUID are defined and not matched. */ 2850 nsdata1.eui64 = 0x0; 2851 nsdata2.eui64 = 0x0; 2852 nsdata1.nguid[0] = 0x12; 2853 nsdata2.nguid[0] = 0x10; 2854 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2855 2856 /* Only NGUID are defined and matched. */ 2857 nsdata2.nguid[0] = 0x12; 2858 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2859 2860 /* Only UUID are defined and not matched. */ 2861 nsdata1.nguid[0] = 0x0; 2862 nsdata2.nguid[0] = 0x0; 2863 ns1.uuid = &uuid1; 2864 ns2.uuid = &uuid2; 2865 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2866 2867 /* Only one UUID is defined. */ 2868 ns1.uuid = NULL; 2869 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2870 2871 /* Only UUID are defined and matched. */ 2872 ns1.uuid = &uuid2; 2873 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2874 2875 /* All EUI64, NGUID, and UUID are defined and matched. */ 2876 nsdata1.eui64 = 0x123456789ABCDEF; 2877 nsdata2.eui64 = 0x123456789ABCDEF; 2878 nsdata1.nguid[15] = 0x34; 2879 nsdata2.nguid[15] = 0x34; 2880 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2881 2882 /* CSI are not matched. */ 2883 ns1.csi = SPDK_NVME_CSI_ZNS; 2884 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2885 } 2886 2887 static void 2888 test_init_ana_log_page(void) 2889 { 2890 struct spdk_nvme_transport_id trid = {}; 2891 struct spdk_nvme_ctrlr *ctrlr; 2892 struct nvme_ctrlr *nvme_ctrlr; 2893 const int STRING_SIZE = 32; 2894 const char *attached_names[STRING_SIZE]; 2895 int rc; 2896 2897 set_thread(0); 2898 2899 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2900 ut_init_trid(&trid); 2901 2902 ctrlr = ut_attach_ctrlr(&trid, 5, true, false); 2903 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2904 2905 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 2906 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2907 ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2908 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 2909 ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2910 2911 g_ut_attach_ctrlr_status = 0; 2912 g_ut_attach_bdev_count = 5; 2913 2914 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2915 attach_ctrlr_done, NULL, NULL, NULL, false); 2916 CU_ASSERT(rc == 0); 2917 2918 spdk_delay_us(1000); 2919 poll_threads(); 2920 2921 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2922 poll_threads(); 2923 2924 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2925 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2926 2927 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2928 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2929 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2930 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2931 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL); 2932 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 2933 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2934 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2935 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE); 2936 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2937 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL); 2938 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL); 2939 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL); 2940 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL); 2941 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL); 2942 2943 rc = bdev_nvme_delete("nvme0", &g_any_path); 2944 CU_ASSERT(rc == 0); 2945 2946 poll_threads(); 2947 spdk_delay_us(1000); 2948 poll_threads(); 2949 2950 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2951 } 2952 2953 static void 2954 init_accel(void) 2955 { 2956 spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb, 2957 sizeof(int), "accel_p"); 2958 } 2959 2960 static void 2961 fini_accel(void) 2962 { 2963 spdk_io_device_unregister(g_accel_p, NULL); 2964 } 2965 2966 static void 2967 test_get_memory_domains(void) 2968 { 2969 struct nvme_ctrlr ctrlr = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef }; 2970 struct nvme_ns ns = { .ctrlr = &ctrlr }; 2971 struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) }; 2972 struct spdk_memory_domain *domains[2] = {}; 2973 int rc = 0; 2974 2975 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns, tailq); 2976 2977 /* nvme controller doesn't have memory domainы */ 2978 MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 0); 2979 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 2980 CU_ASSERT(rc == 0) 2981 2982 /* nvme controller has a memory domain */ 2983 MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 1); 2984 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 2985 CU_ASSERT(rc == 1); 2986 MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domain); 2987 } 2988 2989 static void 2990 test_reconnect_qpair(void) 2991 { 2992 struct spdk_nvme_transport_id trid = {}; 2993 struct spdk_nvme_ctrlr *ctrlr; 2994 struct nvme_ctrlr *nvme_ctrlr; 2995 const int STRING_SIZE = 32; 2996 const char *attached_names[STRING_SIZE]; 2997 struct nvme_bdev *bdev; 2998 struct spdk_io_channel *ch1, *ch2; 2999 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3000 struct nvme_io_path *io_path1, *io_path2; 3001 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 3002 int rc; 3003 3004 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3005 ut_init_trid(&trid); 3006 3007 set_thread(0); 3008 3009 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 3010 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3011 3012 g_ut_attach_ctrlr_status = 0; 3013 g_ut_attach_bdev_count = 1; 3014 3015 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3016 attach_ctrlr_done, NULL, NULL, NULL, false); 3017 CU_ASSERT(rc == 0); 3018 3019 spdk_delay_us(1000); 3020 poll_threads(); 3021 3022 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3023 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3024 3025 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 3026 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3027 3028 ch1 = spdk_get_io_channel(bdev); 3029 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3030 3031 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3032 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 3033 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3034 nvme_qpair1 = io_path1->qpair; 3035 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 3036 3037 set_thread(1); 3038 3039 ch2 = spdk_get_io_channel(bdev); 3040 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 3041 3042 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 3043 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 3044 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3045 nvme_qpair2 = io_path2->qpair; 3046 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 3047 3048 /* If a qpair is disconnected, it is freed and then reconnected via 3049 * resetting the corresponding nvme_ctrlr. 3050 */ 3051 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3052 ctrlr->is_failed = true; 3053 3054 poll_thread_times(1, 3); 3055 CU_ASSERT(nvme_qpair1->qpair != NULL); 3056 CU_ASSERT(nvme_qpair2->qpair == NULL); 3057 CU_ASSERT(nvme_ctrlr->resetting == true); 3058 3059 poll_thread_times(0, 3); 3060 CU_ASSERT(nvme_qpair1->qpair == NULL); 3061 CU_ASSERT(nvme_qpair2->qpair == NULL); 3062 CU_ASSERT(ctrlr->is_failed == true); 3063 3064 poll_thread_times(1, 2); 3065 poll_thread_times(0, 1); 3066 CU_ASSERT(ctrlr->is_failed == false); 3067 CU_ASSERT(ctrlr->adminq.is_connected == false); 3068 3069 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3070 poll_thread_times(0, 2); 3071 CU_ASSERT(ctrlr->adminq.is_connected == true); 3072 3073 poll_thread_times(0, 1); 3074 poll_thread_times(1, 1); 3075 CU_ASSERT(nvme_qpair1->qpair != NULL); 3076 CU_ASSERT(nvme_qpair2->qpair != NULL); 3077 CU_ASSERT(nvme_ctrlr->resetting == true); 3078 3079 poll_thread_times(0, 2); 3080 poll_thread_times(1, 1); 3081 poll_thread_times(0, 1); 3082 CU_ASSERT(nvme_ctrlr->resetting == false); 3083 3084 poll_threads(); 3085 3086 /* If a qpair is disconnected and resetting the corresponding nvme_ctrlr 3087 * fails, the qpair is just freed. 3088 */ 3089 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3090 ctrlr->is_failed = true; 3091 ctrlr->fail_reset = true; 3092 3093 poll_thread_times(1, 3); 3094 CU_ASSERT(nvme_qpair1->qpair != NULL); 3095 CU_ASSERT(nvme_qpair2->qpair == NULL); 3096 CU_ASSERT(nvme_ctrlr->resetting == true); 3097 3098 poll_thread_times(0, 3); 3099 poll_thread_times(1, 1); 3100 CU_ASSERT(nvme_qpair1->qpair == NULL); 3101 CU_ASSERT(nvme_qpair2->qpair == NULL); 3102 CU_ASSERT(ctrlr->is_failed == true); 3103 3104 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3105 poll_thread_times(0, 3); 3106 poll_thread_times(1, 1); 3107 poll_thread_times(0, 1); 3108 CU_ASSERT(ctrlr->is_failed == true); 3109 CU_ASSERT(nvme_ctrlr->resetting == false); 3110 CU_ASSERT(nvme_qpair1->qpair == NULL); 3111 CU_ASSERT(nvme_qpair2->qpair == NULL); 3112 3113 poll_threads(); 3114 3115 spdk_put_io_channel(ch2); 3116 3117 set_thread(0); 3118 3119 spdk_put_io_channel(ch1); 3120 3121 poll_threads(); 3122 3123 rc = bdev_nvme_delete("nvme0", &g_any_path); 3124 CU_ASSERT(rc == 0); 3125 3126 poll_threads(); 3127 spdk_delay_us(1000); 3128 poll_threads(); 3129 3130 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3131 } 3132 3133 static void 3134 test_create_bdev_ctrlr(void) 3135 { 3136 struct nvme_path_id path1 = {}, path2 = {}; 3137 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3138 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3139 const int STRING_SIZE = 32; 3140 const char *attached_names[STRING_SIZE]; 3141 int rc; 3142 3143 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3144 ut_init_trid(&path1.trid); 3145 ut_init_trid2(&path2.trid); 3146 3147 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3148 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3149 3150 g_ut_attach_ctrlr_status = 0; 3151 g_ut_attach_bdev_count = 0; 3152 3153 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3154 attach_ctrlr_done, NULL, NULL, NULL, true); 3155 CU_ASSERT(rc == 0); 3156 3157 spdk_delay_us(1000); 3158 poll_threads(); 3159 3160 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3161 poll_threads(); 3162 3163 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3164 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3165 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3166 3167 /* cntlid is duplicated, and adding the second ctrlr should fail. */ 3168 g_ut_attach_ctrlr_status = -EINVAL; 3169 3170 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3171 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3172 3173 ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid; 3174 3175 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3176 attach_ctrlr_done, NULL, NULL, NULL, true); 3177 CU_ASSERT(rc == 0); 3178 3179 spdk_delay_us(1000); 3180 poll_threads(); 3181 3182 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3183 poll_threads(); 3184 3185 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 3186 3187 /* cntlid is not duplicated, and adding the third ctrlr should succeed. */ 3188 g_ut_attach_ctrlr_status = 0; 3189 3190 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3191 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3192 3193 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3194 attach_ctrlr_done, NULL, NULL, NULL, true); 3195 CU_ASSERT(rc == 0); 3196 3197 spdk_delay_us(1000); 3198 poll_threads(); 3199 3200 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3201 poll_threads(); 3202 3203 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3204 3205 /* Delete two ctrlrs at once. */ 3206 rc = bdev_nvme_delete("nvme0", &g_any_path); 3207 CU_ASSERT(rc == 0); 3208 3209 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3210 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3211 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3212 3213 poll_threads(); 3214 spdk_delay_us(1000); 3215 poll_threads(); 3216 3217 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3218 3219 /* Add two ctrlrs and delete one by one. */ 3220 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3221 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3222 3223 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3224 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3225 3226 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3227 attach_ctrlr_done, NULL, NULL, NULL, true); 3228 CU_ASSERT(rc == 0); 3229 3230 spdk_delay_us(1000); 3231 poll_threads(); 3232 3233 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3234 poll_threads(); 3235 3236 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3237 attach_ctrlr_done, NULL, NULL, NULL, true); 3238 CU_ASSERT(rc == 0); 3239 3240 spdk_delay_us(1000); 3241 poll_threads(); 3242 3243 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3244 poll_threads(); 3245 3246 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3247 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3248 3249 rc = bdev_nvme_delete("nvme0", &path1); 3250 CU_ASSERT(rc == 0); 3251 3252 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3253 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3254 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3255 3256 poll_threads(); 3257 spdk_delay_us(1000); 3258 poll_threads(); 3259 3260 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3261 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3262 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3263 3264 rc = bdev_nvme_delete("nvme0", &path2); 3265 CU_ASSERT(rc == 0); 3266 3267 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3268 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3269 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3270 3271 poll_threads(); 3272 spdk_delay_us(1000); 3273 poll_threads(); 3274 3275 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3276 } 3277 3278 static struct nvme_ns * 3279 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr) 3280 { 3281 struct nvme_ns *nvme_ns; 3282 3283 TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) { 3284 if (nvme_ns->ctrlr == nvme_ctrlr) { 3285 return nvme_ns; 3286 } 3287 } 3288 3289 return NULL; 3290 } 3291 3292 static void 3293 test_add_multi_ns_to_bdev(void) 3294 { 3295 struct nvme_path_id path1 = {}, path2 = {}; 3296 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3297 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3298 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3299 struct nvme_ns *nvme_ns1, *nvme_ns2; 3300 struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4; 3301 const int STRING_SIZE = 32; 3302 const char *attached_names[STRING_SIZE]; 3303 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3304 struct spdk_uuid uuid2 = { .u.raw = { 0x2 } }; 3305 struct spdk_uuid uuid3 = { .u.raw = { 0x3 } }; 3306 struct spdk_uuid uuid4 = { .u.raw = { 0x4 } }; 3307 struct spdk_uuid uuid44 = { .u.raw = { 0x44 } }; 3308 int rc; 3309 3310 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3311 ut_init_trid(&path1.trid); 3312 ut_init_trid2(&path2.trid); 3313 3314 /* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */ 3315 3316 /* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th 3317 * namespaces are populated. 3318 */ 3319 ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true); 3320 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3321 3322 ctrlr1->ns[1].is_active = false; 3323 ctrlr1->ns[4].is_active = false; 3324 ctrlr1->ns[0].uuid = &uuid1; 3325 ctrlr1->ns[2].uuid = &uuid3; 3326 ctrlr1->ns[3].uuid = &uuid4; 3327 3328 g_ut_attach_ctrlr_status = 0; 3329 g_ut_attach_bdev_count = 3; 3330 3331 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3332 attach_ctrlr_done, NULL, NULL, NULL, true); 3333 CU_ASSERT(rc == 0); 3334 3335 spdk_delay_us(1000); 3336 poll_threads(); 3337 3338 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3339 poll_threads(); 3340 3341 /* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th 3342 * namespaces are populated. The uuid of 4th namespace is different, and hence 3343 * adding 4th namespace to a bdev should fail. 3344 */ 3345 ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true); 3346 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3347 3348 ctrlr2->ns[2].is_active = false; 3349 ctrlr2->ns[4].is_active = false; 3350 ctrlr2->ns[0].uuid = &uuid1; 3351 ctrlr2->ns[1].uuid = &uuid2; 3352 ctrlr2->ns[3].uuid = &uuid44; 3353 3354 g_ut_attach_ctrlr_status = 0; 3355 g_ut_attach_bdev_count = 2; 3356 3357 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3358 attach_ctrlr_done, NULL, NULL, NULL, true); 3359 CU_ASSERT(rc == 0); 3360 3361 spdk_delay_us(1000); 3362 poll_threads(); 3363 3364 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3365 poll_threads(); 3366 3367 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3368 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3369 3370 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3371 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3372 3373 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL); 3374 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL); 3375 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL); 3376 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL); 3377 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL); 3378 3379 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3380 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3381 3382 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL); 3383 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL); 3384 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL); 3385 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL); 3386 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL); 3387 3388 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3389 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3390 bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2); 3391 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 3392 bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3); 3393 SPDK_CU_ASSERT_FATAL(bdev3 != NULL); 3394 bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4); 3395 SPDK_CU_ASSERT_FATAL(bdev4 != NULL); 3396 CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL); 3397 3398 CU_ASSERT(bdev1->ref == 2); 3399 CU_ASSERT(bdev2->ref == 1); 3400 CU_ASSERT(bdev3->ref == 1); 3401 CU_ASSERT(bdev4->ref == 1); 3402 3403 /* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */ 3404 rc = bdev_nvme_delete("nvme0", &path1); 3405 CU_ASSERT(rc == 0); 3406 3407 poll_threads(); 3408 spdk_delay_us(1000); 3409 poll_threads(); 3410 3411 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3412 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3413 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2); 3414 3415 rc = bdev_nvme_delete("nvme0", &path2); 3416 CU_ASSERT(rc == 0); 3417 3418 poll_threads(); 3419 spdk_delay_us(1000); 3420 poll_threads(); 3421 3422 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3423 3424 /* Test if a nvme_bdev which has a shared namespace between two ctrlrs 3425 * can be deleted when the bdev subsystem shutdown. 3426 */ 3427 g_ut_attach_bdev_count = 1; 3428 3429 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3430 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3431 3432 ctrlr1->ns[0].uuid = &uuid1; 3433 3434 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3435 attach_ctrlr_done, NULL, NULL, NULL, true); 3436 CU_ASSERT(rc == 0); 3437 3438 spdk_delay_us(1000); 3439 poll_threads(); 3440 3441 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3442 poll_threads(); 3443 3444 ut_init_trid2(&path2.trid); 3445 3446 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3447 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3448 3449 ctrlr2->ns[0].uuid = &uuid1; 3450 3451 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3452 attach_ctrlr_done, NULL, NULL, NULL, true); 3453 CU_ASSERT(rc == 0); 3454 3455 spdk_delay_us(1000); 3456 poll_threads(); 3457 3458 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3459 poll_threads(); 3460 3461 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3462 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3463 3464 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3465 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3466 3467 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3468 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3469 3470 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3471 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3472 3473 /* Check if a nvme_bdev has two nvme_ns. */ 3474 nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1); 3475 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3476 CU_ASSERT(nvme_ns1->bdev == bdev1); 3477 3478 nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2); 3479 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3480 CU_ASSERT(nvme_ns2->bdev == bdev1); 3481 3482 /* Delete nvme_bdev first when the bdev subsystem shutdown. */ 3483 bdev_nvme_destruct(&bdev1->disk); 3484 3485 poll_threads(); 3486 3487 CU_ASSERT(nvme_ns1->bdev == NULL); 3488 CU_ASSERT(nvme_ns2->bdev == NULL); 3489 3490 nvme_ctrlr1->destruct = true; 3491 _nvme_ctrlr_destruct(nvme_ctrlr1); 3492 3493 poll_threads(); 3494 spdk_delay_us(1000); 3495 poll_threads(); 3496 3497 nvme_ctrlr2->destruct = true; 3498 _nvme_ctrlr_destruct(nvme_ctrlr2); 3499 3500 poll_threads(); 3501 spdk_delay_us(1000); 3502 poll_threads(); 3503 3504 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3505 } 3506 3507 static void 3508 test_add_multi_io_paths_to_nbdev_ch(void) 3509 { 3510 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 3511 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 3512 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3513 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3; 3514 struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3; 3515 const int STRING_SIZE = 32; 3516 const char *attached_names[STRING_SIZE]; 3517 struct nvme_bdev *bdev; 3518 struct spdk_io_channel *ch; 3519 struct nvme_bdev_channel *nbdev_ch; 3520 struct nvme_io_path *io_path1, *io_path2, *io_path3; 3521 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3522 int rc; 3523 3524 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3525 ut_init_trid(&path1.trid); 3526 ut_init_trid2(&path2.trid); 3527 ut_init_trid3(&path3.trid); 3528 g_ut_attach_ctrlr_status = 0; 3529 g_ut_attach_bdev_count = 1; 3530 3531 set_thread(1); 3532 3533 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3534 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3535 3536 ctrlr1->ns[0].uuid = &uuid1; 3537 3538 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3539 attach_ctrlr_done, NULL, NULL, NULL, true); 3540 CU_ASSERT(rc == 0); 3541 3542 spdk_delay_us(1000); 3543 poll_threads(); 3544 3545 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3546 poll_threads(); 3547 3548 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3549 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3550 3551 ctrlr2->ns[0].uuid = &uuid1; 3552 3553 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3554 attach_ctrlr_done, NULL, NULL, NULL, true); 3555 CU_ASSERT(rc == 0); 3556 3557 spdk_delay_us(1000); 3558 poll_threads(); 3559 3560 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3561 poll_threads(); 3562 3563 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3564 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3565 3566 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3567 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3568 3569 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3570 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3571 3572 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3573 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3574 3575 nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1); 3576 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3577 3578 nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2); 3579 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3580 3581 set_thread(0); 3582 3583 ch = spdk_get_io_channel(bdev); 3584 SPDK_CU_ASSERT_FATAL(ch != NULL); 3585 nbdev_ch = spdk_io_channel_get_ctx(ch); 3586 3587 io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1); 3588 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3589 3590 io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2); 3591 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3592 3593 set_thread(1); 3594 3595 /* Check if I/O path is dynamically added to nvme_bdev_channel. */ 3596 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 3597 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 3598 3599 ctrlr3->ns[0].uuid = &uuid1; 3600 3601 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 3602 attach_ctrlr_done, NULL, NULL, NULL, true); 3603 CU_ASSERT(rc == 0); 3604 3605 spdk_delay_us(1000); 3606 poll_threads(); 3607 3608 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3609 poll_threads(); 3610 3611 nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid); 3612 SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL); 3613 3614 nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3); 3615 SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL); 3616 3617 io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3); 3618 SPDK_CU_ASSERT_FATAL(io_path3 != NULL); 3619 3620 /* Check if I/O path is dynamically deleted from nvme_bdev_channel. */ 3621 rc = bdev_nvme_delete("nvme0", &path2); 3622 CU_ASSERT(rc == 0); 3623 3624 poll_threads(); 3625 spdk_delay_us(1000); 3626 poll_threads(); 3627 3628 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1); 3629 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 3630 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3); 3631 3632 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1); 3633 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL); 3634 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3); 3635 3636 set_thread(0); 3637 3638 spdk_put_io_channel(ch); 3639 3640 poll_threads(); 3641 3642 set_thread(1); 3643 3644 rc = bdev_nvme_delete("nvme0", &g_any_path); 3645 CU_ASSERT(rc == 0); 3646 3647 poll_threads(); 3648 spdk_delay_us(1000); 3649 poll_threads(); 3650 3651 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3652 } 3653 3654 static void 3655 test_admin_path(void) 3656 { 3657 struct nvme_path_id path1 = {}, path2 = {}; 3658 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3659 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3660 const int STRING_SIZE = 32; 3661 const char *attached_names[STRING_SIZE]; 3662 struct nvme_bdev *bdev; 3663 struct spdk_io_channel *ch; 3664 struct spdk_bdev_io *bdev_io; 3665 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3666 int rc; 3667 3668 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3669 ut_init_trid(&path1.trid); 3670 ut_init_trid2(&path2.trid); 3671 g_ut_attach_ctrlr_status = 0; 3672 g_ut_attach_bdev_count = 1; 3673 3674 set_thread(0); 3675 3676 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3677 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3678 3679 ctrlr1->ns[0].uuid = &uuid1; 3680 3681 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3682 attach_ctrlr_done, NULL, NULL, NULL, true); 3683 CU_ASSERT(rc == 0); 3684 3685 spdk_delay_us(1000); 3686 poll_threads(); 3687 3688 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3689 poll_threads(); 3690 3691 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3692 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3693 3694 ctrlr2->ns[0].uuid = &uuid1; 3695 3696 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3697 attach_ctrlr_done, NULL, NULL, NULL, true); 3698 CU_ASSERT(rc == 0); 3699 3700 spdk_delay_us(1000); 3701 poll_threads(); 3702 3703 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3704 poll_threads(); 3705 3706 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3707 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3708 3709 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3710 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3711 3712 ch = spdk_get_io_channel(bdev); 3713 SPDK_CU_ASSERT_FATAL(ch != NULL); 3714 3715 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch); 3716 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 3717 3718 /* ctrlr1 is failed but ctrlr2 is not failed. admin command is 3719 * submitted to ctrlr2. 3720 */ 3721 ctrlr1->is_failed = true; 3722 bdev_io->internal.in_submit_request = true; 3723 3724 bdev_nvme_submit_request(ch, bdev_io); 3725 3726 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3727 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1); 3728 CU_ASSERT(bdev_io->internal.in_submit_request == true); 3729 3730 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3731 poll_threads(); 3732 3733 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3734 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3735 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 3736 3737 /* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */ 3738 ctrlr2->is_failed = true; 3739 bdev_io->internal.in_submit_request = true; 3740 3741 bdev_nvme_submit_request(ch, bdev_io); 3742 3743 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3744 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3745 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3746 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 3747 3748 free(bdev_io); 3749 3750 spdk_put_io_channel(ch); 3751 3752 poll_threads(); 3753 3754 rc = bdev_nvme_delete("nvme0", &g_any_path); 3755 CU_ASSERT(rc == 0); 3756 3757 poll_threads(); 3758 spdk_delay_us(1000); 3759 poll_threads(); 3760 3761 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3762 } 3763 3764 static struct nvme_io_path * 3765 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch, 3766 struct nvme_ctrlr *nvme_ctrlr) 3767 { 3768 struct nvme_io_path *io_path; 3769 3770 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) { 3771 if (io_path->qpair->ctrlr == nvme_ctrlr) { 3772 return io_path; 3773 } 3774 } 3775 3776 return NULL; 3777 } 3778 3779 static void 3780 test_reset_bdev_ctrlr(void) 3781 { 3782 struct nvme_path_id path1 = {}, path2 = {}; 3783 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3784 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3785 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3786 struct nvme_path_id *curr_path1, *curr_path2; 3787 const int STRING_SIZE = 32; 3788 const char *attached_names[STRING_SIZE]; 3789 struct nvme_bdev *bdev; 3790 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 3791 struct nvme_bdev_io *first_bio; 3792 struct spdk_io_channel *ch1, *ch2; 3793 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3794 struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22; 3795 int rc; 3796 3797 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3798 ut_init_trid(&path1.trid); 3799 ut_init_trid2(&path2.trid); 3800 g_ut_attach_ctrlr_status = 0; 3801 g_ut_attach_bdev_count = 1; 3802 3803 set_thread(0); 3804 3805 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3806 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3807 3808 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3809 attach_ctrlr_done, NULL, NULL, NULL, true); 3810 CU_ASSERT(rc == 0); 3811 3812 spdk_delay_us(1000); 3813 poll_threads(); 3814 3815 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3816 poll_threads(); 3817 3818 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3819 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3820 3821 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3822 attach_ctrlr_done, NULL, NULL, NULL, true); 3823 CU_ASSERT(rc == 0); 3824 3825 spdk_delay_us(1000); 3826 poll_threads(); 3827 3828 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3829 poll_threads(); 3830 3831 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3832 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3833 3834 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3835 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3836 3837 curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 3838 SPDK_CU_ASSERT_FATAL(curr_path1 != NULL); 3839 3840 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3841 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3842 3843 curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 3844 SPDK_CU_ASSERT_FATAL(curr_path2 != NULL); 3845 3846 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3847 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3848 3849 set_thread(0); 3850 3851 ch1 = spdk_get_io_channel(bdev); 3852 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3853 3854 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3855 io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1); 3856 SPDK_CU_ASSERT_FATAL(io_path11 != NULL); 3857 io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2); 3858 SPDK_CU_ASSERT_FATAL(io_path12 != NULL); 3859 3860 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 3861 first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx; 3862 3863 set_thread(1); 3864 3865 ch2 = spdk_get_io_channel(bdev); 3866 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 3867 3868 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 3869 io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1); 3870 SPDK_CU_ASSERT_FATAL(io_path21 != NULL); 3871 io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2); 3872 SPDK_CU_ASSERT_FATAL(io_path22 != NULL); 3873 3874 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 3875 3876 /* The first reset request from bdev_io is submitted on thread 0. 3877 * Check if ctrlr1 is reset and then ctrlr2 is reset. 3878 * 3879 * A few extra polls are necessary after resetting ctrlr1 to check 3880 * pending reset requests for ctrlr1. 3881 */ 3882 ctrlr1->is_failed = true; 3883 curr_path1->is_failed = true; 3884 ctrlr2->is_failed = true; 3885 curr_path2->is_failed = true; 3886 3887 set_thread(0); 3888 3889 bdev_nvme_submit_request(ch1, first_bdev_io); 3890 CU_ASSERT(first_bio->io_path == io_path11); 3891 CU_ASSERT(nvme_ctrlr1->resetting == true); 3892 CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio); 3893 3894 poll_thread_times(0, 3); 3895 CU_ASSERT(io_path11->qpair->qpair == NULL); 3896 CU_ASSERT(io_path21->qpair->qpair != NULL); 3897 3898 poll_thread_times(1, 2); 3899 CU_ASSERT(io_path11->qpair->qpair == NULL); 3900 CU_ASSERT(io_path21->qpair->qpair == NULL); 3901 CU_ASSERT(ctrlr1->is_failed == true); 3902 3903 poll_thread_times(0, 1); 3904 CU_ASSERT(nvme_ctrlr1->resetting == true); 3905 CU_ASSERT(ctrlr1->is_failed == false); 3906 CU_ASSERT(ctrlr1->adminq.is_connected == false); 3907 CU_ASSERT(curr_path1->is_failed == true); 3908 3909 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3910 poll_thread_times(0, 2); 3911 CU_ASSERT(ctrlr1->adminq.is_connected == true); 3912 3913 poll_thread_times(0, 1); 3914 CU_ASSERT(io_path11->qpair->qpair != NULL); 3915 CU_ASSERT(io_path21->qpair->qpair == NULL); 3916 3917 poll_thread_times(1, 1); 3918 CU_ASSERT(io_path11->qpair->qpair != NULL); 3919 CU_ASSERT(io_path21->qpair->qpair != NULL); 3920 3921 poll_thread_times(0, 2); 3922 CU_ASSERT(nvme_ctrlr1->resetting == true); 3923 poll_thread_times(1, 1); 3924 CU_ASSERT(nvme_ctrlr1->resetting == true); 3925 poll_thread_times(0, 2); 3926 CU_ASSERT(nvme_ctrlr1->resetting == false); 3927 CU_ASSERT(curr_path1->is_failed == false); 3928 CU_ASSERT(first_bio->io_path == io_path12); 3929 CU_ASSERT(nvme_ctrlr2->resetting == true); 3930 3931 poll_thread_times(0, 3); 3932 CU_ASSERT(io_path12->qpair->qpair == NULL); 3933 CU_ASSERT(io_path22->qpair->qpair != NULL); 3934 3935 poll_thread_times(1, 2); 3936 CU_ASSERT(io_path12->qpair->qpair == NULL); 3937 CU_ASSERT(io_path22->qpair->qpair == NULL); 3938 CU_ASSERT(ctrlr2->is_failed == true); 3939 3940 poll_thread_times(0, 2); 3941 CU_ASSERT(nvme_ctrlr2->resetting == true); 3942 CU_ASSERT(ctrlr2->is_failed == false); 3943 CU_ASSERT(ctrlr2->adminq.is_connected == false); 3944 CU_ASSERT(curr_path2->is_failed == true); 3945 3946 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3947 poll_thread_times(0, 2); 3948 CU_ASSERT(ctrlr2->adminq.is_connected == true); 3949 3950 poll_thread_times(0, 1); 3951 CU_ASSERT(io_path12->qpair->qpair != NULL); 3952 CU_ASSERT(io_path22->qpair->qpair == NULL); 3953 3954 poll_thread_times(1, 2); 3955 CU_ASSERT(io_path12->qpair->qpair != NULL); 3956 CU_ASSERT(io_path22->qpair->qpair != NULL); 3957 3958 poll_thread_times(0, 2); 3959 CU_ASSERT(nvme_ctrlr2->resetting == true); 3960 poll_thread_times(1, 1); 3961 CU_ASSERT(nvme_ctrlr2->resetting == true); 3962 poll_thread_times(0, 2); 3963 CU_ASSERT(first_bio->io_path == NULL); 3964 CU_ASSERT(nvme_ctrlr2->resetting == false); 3965 CU_ASSERT(curr_path2->is_failed == false); 3966 3967 poll_threads(); 3968 3969 /* There is a race between two reset requests from bdev_io. 3970 * 3971 * The first reset request is submitted on thread 0, and the second reset 3972 * request is submitted on thread 1 while the first is resetting ctrlr1. 3973 * The second is pending on ctrlr1. After the first completes resetting ctrlr1, 3974 * both reset requests go to ctrlr2. The first comes earlier than the second. 3975 * The second is pending on ctrlr2 again. After the first completes resetting 3976 * ctrl2, both complete successfully. 3977 */ 3978 ctrlr1->is_failed = true; 3979 curr_path1->is_failed = true; 3980 ctrlr2->is_failed = true; 3981 curr_path2->is_failed = true; 3982 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 3983 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 3984 3985 set_thread(0); 3986 3987 bdev_nvme_submit_request(ch1, first_bdev_io); 3988 3989 set_thread(1); 3990 3991 bdev_nvme_submit_request(ch2, second_bdev_io); 3992 3993 CU_ASSERT(nvme_ctrlr1->resetting == true); 3994 CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio); 3995 CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == second_bdev_io); 3996 3997 poll_threads(); 3998 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3999 poll_threads(); 4000 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4001 poll_threads(); 4002 4003 CU_ASSERT(ctrlr1->is_failed == false); 4004 CU_ASSERT(curr_path1->is_failed == false); 4005 CU_ASSERT(ctrlr2->is_failed == false); 4006 CU_ASSERT(curr_path2->is_failed == false); 4007 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4008 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4009 4010 set_thread(0); 4011 4012 spdk_put_io_channel(ch1); 4013 4014 set_thread(1); 4015 4016 spdk_put_io_channel(ch2); 4017 4018 poll_threads(); 4019 4020 set_thread(0); 4021 4022 rc = bdev_nvme_delete("nvme0", &g_any_path); 4023 CU_ASSERT(rc == 0); 4024 4025 poll_threads(); 4026 spdk_delay_us(1000); 4027 poll_threads(); 4028 4029 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4030 4031 free(first_bdev_io); 4032 free(second_bdev_io); 4033 } 4034 4035 static void 4036 test_find_io_path(void) 4037 { 4038 struct nvme_bdev_channel nbdev_ch = { 4039 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 4040 }; 4041 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}; 4042 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 4043 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 4044 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}; 4045 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 4046 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 4047 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 4048 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 4049 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 4050 4051 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 4052 4053 /* Test if io_path whose ANA state is not accessible is excluded. */ 4054 4055 nvme_qpair1.qpair = &qpair1; 4056 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4057 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4058 4059 nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 4060 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4061 4062 nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 4063 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4064 4065 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4066 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4067 4068 nbdev_ch.current_io_path = NULL; 4069 4070 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4071 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4072 4073 nbdev_ch.current_io_path = NULL; 4074 4075 /* Test if io_path whose qpair is resetting is excluded. */ 4076 4077 nvme_qpair1.qpair = NULL; 4078 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4079 4080 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 4081 4082 /* Test if ANA optimized state or the first found ANA non-optimized state 4083 * is prioritized. 4084 */ 4085 4086 nvme_qpair1.qpair = &qpair1; 4087 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4088 nvme_qpair2.qpair = &qpair2; 4089 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4090 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 4091 4092 nbdev_ch.current_io_path = NULL; 4093 4094 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4095 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4096 4097 nbdev_ch.current_io_path = NULL; 4098 } 4099 4100 static void 4101 test_retry_io_if_ana_state_is_updating(void) 4102 { 4103 struct nvme_path_id path = {}; 4104 struct nvme_ctrlr_opts opts = {}; 4105 struct spdk_nvme_ctrlr *ctrlr; 4106 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4107 struct nvme_ctrlr *nvme_ctrlr; 4108 const int STRING_SIZE = 32; 4109 const char *attached_names[STRING_SIZE]; 4110 struct nvme_bdev *bdev; 4111 struct nvme_ns *nvme_ns; 4112 struct spdk_bdev_io *bdev_io1; 4113 struct spdk_io_channel *ch; 4114 struct nvme_bdev_channel *nbdev_ch; 4115 struct nvme_io_path *io_path; 4116 struct nvme_qpair *nvme_qpair; 4117 int rc; 4118 4119 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4120 ut_init_trid(&path.trid); 4121 4122 set_thread(0); 4123 4124 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4125 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4126 4127 g_ut_attach_ctrlr_status = 0; 4128 g_ut_attach_bdev_count = 1; 4129 4130 opts.ctrlr_loss_timeout_sec = -1; 4131 opts.reconnect_delay_sec = 1; 4132 4133 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4134 attach_ctrlr_done, NULL, NULL, &opts, false); 4135 CU_ASSERT(rc == 0); 4136 4137 spdk_delay_us(1000); 4138 poll_threads(); 4139 4140 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4141 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4142 4143 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4144 CU_ASSERT(nvme_ctrlr != NULL); 4145 4146 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4147 CU_ASSERT(bdev != NULL); 4148 4149 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4150 CU_ASSERT(nvme_ns != NULL); 4151 4152 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4153 ut_bdev_io_set_buf(bdev_io1); 4154 4155 ch = spdk_get_io_channel(bdev); 4156 SPDK_CU_ASSERT_FATAL(ch != NULL); 4157 4158 nbdev_ch = spdk_io_channel_get_ctx(ch); 4159 4160 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4161 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4162 4163 nvme_qpair = io_path->qpair; 4164 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4165 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4166 4167 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 4168 4169 /* If qpair is connected, I/O should succeed. */ 4170 bdev_io1->internal.in_submit_request = true; 4171 4172 bdev_nvme_submit_request(ch, bdev_io1); 4173 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4174 4175 poll_threads(); 4176 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4177 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 4178 4179 /* If ANA state of namespace is inaccessible, I/O should be queued. */ 4180 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4181 nbdev_ch->current_io_path = NULL; 4182 4183 bdev_io1->internal.in_submit_request = true; 4184 4185 bdev_nvme_submit_request(ch, bdev_io1); 4186 4187 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4188 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4189 CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4190 4191 /* ANA state became accessible while I/O was queued. */ 4192 nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4193 4194 spdk_delay_us(1000000); 4195 4196 poll_thread_times(0, 1); 4197 4198 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4199 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4200 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 4201 4202 poll_threads(); 4203 4204 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4205 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4206 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4207 4208 free(bdev_io1); 4209 4210 spdk_put_io_channel(ch); 4211 4212 poll_threads(); 4213 4214 rc = bdev_nvme_delete("nvme0", &g_any_path); 4215 CU_ASSERT(rc == 0); 4216 4217 poll_threads(); 4218 spdk_delay_us(1000); 4219 poll_threads(); 4220 4221 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4222 } 4223 4224 static void 4225 test_retry_io_for_io_path_error(void) 4226 { 4227 struct nvme_path_id path1 = {}, path2 = {}; 4228 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4229 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4230 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4231 const int STRING_SIZE = 32; 4232 const char *attached_names[STRING_SIZE]; 4233 struct nvme_bdev *bdev; 4234 struct nvme_ns *nvme_ns1, *nvme_ns2; 4235 struct spdk_bdev_io *bdev_io; 4236 struct nvme_bdev_io *bio; 4237 struct spdk_io_channel *ch; 4238 struct nvme_bdev_channel *nbdev_ch; 4239 struct nvme_io_path *io_path1, *io_path2; 4240 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 4241 struct ut_nvme_req *req; 4242 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 4243 int rc; 4244 4245 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4246 ut_init_trid(&path1.trid); 4247 ut_init_trid2(&path2.trid); 4248 4249 g_opts.bdev_retry_count = 1; 4250 4251 set_thread(0); 4252 4253 g_ut_attach_ctrlr_status = 0; 4254 g_ut_attach_bdev_count = 1; 4255 4256 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4257 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4258 4259 ctrlr1->ns[0].uuid = &uuid1; 4260 4261 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4262 attach_ctrlr_done, NULL, NULL, NULL, true); 4263 CU_ASSERT(rc == 0); 4264 4265 spdk_delay_us(1000); 4266 poll_threads(); 4267 4268 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4269 poll_threads(); 4270 4271 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4272 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4273 4274 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 4275 CU_ASSERT(nvme_ctrlr1 != NULL); 4276 4277 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4278 CU_ASSERT(bdev != NULL); 4279 4280 nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1); 4281 CU_ASSERT(nvme_ns1 != NULL); 4282 CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1)); 4283 4284 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4285 ut_bdev_io_set_buf(bdev_io); 4286 4287 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4288 4289 ch = spdk_get_io_channel(bdev); 4290 SPDK_CU_ASSERT_FATAL(ch != NULL); 4291 4292 nbdev_ch = spdk_io_channel_get_ctx(ch); 4293 4294 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 4295 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 4296 4297 nvme_qpair1 = io_path1->qpair; 4298 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 4299 SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL); 4300 4301 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4302 4303 /* I/O got a temporary I/O path error, but it should not retry if DNR is set. */ 4304 bdev_io->internal.in_submit_request = true; 4305 4306 bdev_nvme_submit_request(ch, bdev_io); 4307 4308 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4309 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4310 4311 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4312 SPDK_CU_ASSERT_FATAL(req != NULL); 4313 4314 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4315 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4316 req->cpl.status.dnr = 1; 4317 4318 poll_thread_times(0, 1); 4319 4320 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4321 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4322 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4323 4324 /* I/O got a temporary I/O path error, but it should succeed after retry. */ 4325 bdev_io->internal.in_submit_request = true; 4326 4327 bdev_nvme_submit_request(ch, bdev_io); 4328 4329 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4330 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4331 4332 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4333 SPDK_CU_ASSERT_FATAL(req != NULL); 4334 4335 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4336 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4337 4338 poll_thread_times(0, 1); 4339 4340 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4341 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4342 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4343 4344 poll_threads(); 4345 4346 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4347 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4348 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4349 4350 /* Add io_path2 dynamically, and create a multipath configuration. */ 4351 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4352 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4353 4354 ctrlr2->ns[0].uuid = &uuid1; 4355 4356 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4357 attach_ctrlr_done, NULL, NULL, NULL, true); 4358 CU_ASSERT(rc == 0); 4359 4360 spdk_delay_us(1000); 4361 poll_threads(); 4362 4363 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4364 poll_threads(); 4365 4366 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 4367 CU_ASSERT(nvme_ctrlr2 != NULL); 4368 4369 nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2); 4370 CU_ASSERT(nvme_ns2 != NULL); 4371 CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2)); 4372 4373 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 4374 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 4375 4376 nvme_qpair2 = io_path2->qpair; 4377 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 4378 SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL); 4379 4380 /* I/O is submitted to io_path1, but qpair of io_path1 was disconnected 4381 * and deleted. Hence the I/O was aborted. But io_path2 is available. 4382 * So after a retry, I/O is submitted to io_path2 and should succeed. 4383 */ 4384 bdev_io->internal.in_submit_request = true; 4385 4386 bdev_nvme_submit_request(ch, bdev_io); 4387 4388 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4389 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4390 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4391 4392 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4393 SPDK_CU_ASSERT_FATAL(req != NULL); 4394 4395 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION; 4396 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4397 4398 poll_thread_times(0, 1); 4399 4400 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4401 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4402 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4403 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4404 4405 spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair); 4406 nvme_qpair1->qpair = NULL; 4407 4408 poll_threads(); 4409 4410 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4411 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4412 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4413 4414 free(bdev_io); 4415 4416 spdk_put_io_channel(ch); 4417 4418 poll_threads(); 4419 4420 rc = bdev_nvme_delete("nvme0", &g_any_path); 4421 CU_ASSERT(rc == 0); 4422 4423 poll_threads(); 4424 spdk_delay_us(1000); 4425 poll_threads(); 4426 4427 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4428 4429 g_opts.bdev_retry_count = 0; 4430 } 4431 4432 static void 4433 test_retry_io_count(void) 4434 { 4435 struct nvme_path_id path = {}; 4436 struct spdk_nvme_ctrlr *ctrlr; 4437 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4438 struct nvme_ctrlr *nvme_ctrlr; 4439 const int STRING_SIZE = 32; 4440 const char *attached_names[STRING_SIZE]; 4441 struct nvme_bdev *bdev; 4442 struct nvme_ns *nvme_ns; 4443 struct spdk_bdev_io *bdev_io; 4444 struct nvme_bdev_io *bio; 4445 struct spdk_io_channel *ch; 4446 struct nvme_bdev_channel *nbdev_ch; 4447 struct nvme_io_path *io_path; 4448 struct nvme_qpair *nvme_qpair; 4449 struct ut_nvme_req *req; 4450 int rc; 4451 4452 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4453 ut_init_trid(&path.trid); 4454 4455 set_thread(0); 4456 4457 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4458 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4459 4460 g_ut_attach_ctrlr_status = 0; 4461 g_ut_attach_bdev_count = 1; 4462 4463 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4464 attach_ctrlr_done, NULL, NULL, NULL, false); 4465 CU_ASSERT(rc == 0); 4466 4467 spdk_delay_us(1000); 4468 poll_threads(); 4469 4470 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4471 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4472 4473 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4474 CU_ASSERT(nvme_ctrlr != NULL); 4475 4476 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4477 CU_ASSERT(bdev != NULL); 4478 4479 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4480 CU_ASSERT(nvme_ns != NULL); 4481 4482 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4483 ut_bdev_io_set_buf(bdev_io); 4484 4485 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4486 4487 ch = spdk_get_io_channel(bdev); 4488 SPDK_CU_ASSERT_FATAL(ch != NULL); 4489 4490 nbdev_ch = spdk_io_channel_get_ctx(ch); 4491 4492 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4493 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4494 4495 nvme_qpair = io_path->qpair; 4496 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4497 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4498 4499 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4500 4501 /* If I/O is aborted by request, it should not be retried. */ 4502 g_opts.bdev_retry_count = 1; 4503 4504 bdev_io->internal.in_submit_request = true; 4505 4506 bdev_nvme_submit_request(ch, bdev_io); 4507 4508 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4509 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4510 4511 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4512 SPDK_CU_ASSERT_FATAL(req != NULL); 4513 4514 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 4515 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4516 4517 poll_thread_times(0, 1); 4518 4519 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4520 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4521 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 4522 4523 /* If bio->retry_count is not less than g_opts.bdev_retry_count, 4524 * the failed I/O should not be retried. 4525 */ 4526 g_opts.bdev_retry_count = 4; 4527 4528 bdev_io->internal.in_submit_request = true; 4529 4530 bdev_nvme_submit_request(ch, bdev_io); 4531 4532 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4533 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4534 4535 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4536 SPDK_CU_ASSERT_FATAL(req != NULL); 4537 4538 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4539 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4540 bio->retry_count = 4; 4541 4542 poll_thread_times(0, 1); 4543 4544 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4545 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4546 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4547 4548 /* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */ 4549 g_opts.bdev_retry_count = -1; 4550 4551 bdev_io->internal.in_submit_request = true; 4552 4553 bdev_nvme_submit_request(ch, bdev_io); 4554 4555 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4556 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4557 4558 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4559 SPDK_CU_ASSERT_FATAL(req != NULL); 4560 4561 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4562 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4563 bio->retry_count = 4; 4564 4565 poll_thread_times(0, 1); 4566 4567 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4568 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4569 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4570 4571 poll_threads(); 4572 4573 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4574 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4575 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4576 4577 /* If bio->retry_count is less than g_opts.bdev_retry_count, 4578 * the failed I/O should be retried. 4579 */ 4580 g_opts.bdev_retry_count = 4; 4581 4582 bdev_io->internal.in_submit_request = true; 4583 4584 bdev_nvme_submit_request(ch, bdev_io); 4585 4586 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4587 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4588 4589 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4590 SPDK_CU_ASSERT_FATAL(req != NULL); 4591 4592 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4593 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4594 bio->retry_count = 3; 4595 4596 poll_thread_times(0, 1); 4597 4598 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4599 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4600 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4601 4602 poll_threads(); 4603 4604 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4605 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4606 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4607 4608 free(bdev_io); 4609 4610 spdk_put_io_channel(ch); 4611 4612 poll_threads(); 4613 4614 rc = bdev_nvme_delete("nvme0", &g_any_path); 4615 CU_ASSERT(rc == 0); 4616 4617 poll_threads(); 4618 spdk_delay_us(1000); 4619 poll_threads(); 4620 4621 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4622 4623 g_opts.bdev_retry_count = 0; 4624 } 4625 4626 static void 4627 test_concurrent_read_ana_log_page(void) 4628 { 4629 struct spdk_nvme_transport_id trid = {}; 4630 struct spdk_nvme_ctrlr *ctrlr; 4631 struct nvme_ctrlr *nvme_ctrlr; 4632 const int STRING_SIZE = 32; 4633 const char *attached_names[STRING_SIZE]; 4634 int rc; 4635 4636 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4637 ut_init_trid(&trid); 4638 4639 set_thread(0); 4640 4641 ctrlr = ut_attach_ctrlr(&trid, 1, true, false); 4642 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4643 4644 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4645 4646 g_ut_attach_ctrlr_status = 0; 4647 g_ut_attach_bdev_count = 1; 4648 4649 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 4650 attach_ctrlr_done, NULL, NULL, NULL, false); 4651 CU_ASSERT(rc == 0); 4652 4653 spdk_delay_us(1000); 4654 poll_threads(); 4655 4656 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4657 poll_threads(); 4658 4659 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 4660 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 4661 4662 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4663 4664 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4665 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4666 4667 /* Following read request should be rejected. */ 4668 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4669 4670 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4671 4672 set_thread(1); 4673 4674 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4675 4676 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4677 4678 /* Reset request while reading ANA log page should not be rejected. */ 4679 rc = bdev_nvme_reset(nvme_ctrlr); 4680 CU_ASSERT(rc == 0); 4681 4682 poll_threads(); 4683 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4684 poll_threads(); 4685 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4686 poll_threads(); 4687 4688 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4689 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 4690 4691 /* Read ANA log page while resetting ctrlr should be rejected. */ 4692 rc = bdev_nvme_reset(nvme_ctrlr); 4693 CU_ASSERT(rc == 0); 4694 4695 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4696 4697 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4698 4699 poll_threads(); 4700 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4701 poll_threads(); 4702 4703 set_thread(0); 4704 4705 rc = bdev_nvme_delete("nvme0", &g_any_path); 4706 CU_ASSERT(rc == 0); 4707 4708 poll_threads(); 4709 spdk_delay_us(1000); 4710 poll_threads(); 4711 4712 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 4713 } 4714 4715 static void 4716 test_retry_io_for_ana_error(void) 4717 { 4718 struct nvme_path_id path = {}; 4719 struct spdk_nvme_ctrlr *ctrlr; 4720 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4721 struct nvme_ctrlr *nvme_ctrlr; 4722 const int STRING_SIZE = 32; 4723 const char *attached_names[STRING_SIZE]; 4724 struct nvme_bdev *bdev; 4725 struct nvme_ns *nvme_ns; 4726 struct spdk_bdev_io *bdev_io; 4727 struct nvme_bdev_io *bio; 4728 struct spdk_io_channel *ch; 4729 struct nvme_bdev_channel *nbdev_ch; 4730 struct nvme_io_path *io_path; 4731 struct nvme_qpair *nvme_qpair; 4732 struct ut_nvme_req *req; 4733 uint64_t now; 4734 int rc; 4735 4736 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4737 ut_init_trid(&path.trid); 4738 4739 g_opts.bdev_retry_count = 1; 4740 4741 set_thread(0); 4742 4743 ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false); 4744 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4745 4746 g_ut_attach_ctrlr_status = 0; 4747 g_ut_attach_bdev_count = 1; 4748 4749 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4750 attach_ctrlr_done, NULL, NULL, NULL, false); 4751 CU_ASSERT(rc == 0); 4752 4753 spdk_delay_us(1000); 4754 poll_threads(); 4755 4756 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4757 poll_threads(); 4758 4759 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4760 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4761 4762 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4763 CU_ASSERT(nvme_ctrlr != NULL); 4764 4765 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4766 CU_ASSERT(bdev != NULL); 4767 4768 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4769 CU_ASSERT(nvme_ns != NULL); 4770 4771 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4772 ut_bdev_io_set_buf(bdev_io); 4773 4774 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4775 4776 ch = spdk_get_io_channel(bdev); 4777 SPDK_CU_ASSERT_FATAL(ch != NULL); 4778 4779 nbdev_ch = spdk_io_channel_get_ctx(ch); 4780 4781 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4782 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4783 4784 nvme_qpair = io_path->qpair; 4785 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4786 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4787 4788 now = spdk_get_ticks(); 4789 4790 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4791 4792 /* If I/O got ANA error, it should be queued, the corresponding namespace 4793 * should be freezed and its ANA state should be updated. 4794 */ 4795 bdev_io->internal.in_submit_request = true; 4796 4797 bdev_nvme_submit_request(ch, bdev_io); 4798 4799 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4800 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4801 4802 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4803 SPDK_CU_ASSERT_FATAL(req != NULL); 4804 4805 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4806 req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE; 4807 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4808 4809 poll_thread_times(0, 1); 4810 4811 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4812 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4813 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4814 /* I/O should be retried immediately. */ 4815 CU_ASSERT(bio->retry_ticks == now); 4816 CU_ASSERT(nvme_ns->ana_state_updating == true); 4817 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4818 4819 poll_threads(); 4820 4821 /* Namespace is inaccessible, and hence I/O should be queued again. */ 4822 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4823 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4824 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4825 /* I/O should be retried after a second if no I/O path was found but 4826 * any I/O path may become available. 4827 */ 4828 CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz()); 4829 4830 /* Namespace should be unfreezed after completing to update its ANA state. */ 4831 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4832 poll_threads(); 4833 4834 CU_ASSERT(nvme_ns->ana_state_updating == false); 4835 CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 4836 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4837 4838 /* Retry the queued I/O should succeed. */ 4839 spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us); 4840 poll_threads(); 4841 4842 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4843 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4844 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4845 4846 free(bdev_io); 4847 4848 spdk_put_io_channel(ch); 4849 4850 poll_threads(); 4851 4852 rc = bdev_nvme_delete("nvme0", &g_any_path); 4853 CU_ASSERT(rc == 0); 4854 4855 poll_threads(); 4856 spdk_delay_us(1000); 4857 poll_threads(); 4858 4859 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4860 4861 g_opts.bdev_retry_count = 0; 4862 } 4863 4864 static void 4865 test_retry_admin_passthru_for_path_error(void) 4866 { 4867 struct nvme_path_id path1 = {}, path2 = {}; 4868 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4869 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4870 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4871 const int STRING_SIZE = 32; 4872 const char *attached_names[STRING_SIZE]; 4873 struct nvme_bdev *bdev; 4874 struct spdk_bdev_io *admin_io; 4875 struct spdk_io_channel *ch; 4876 struct ut_nvme_req *req; 4877 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 4878 int rc; 4879 4880 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4881 ut_init_trid(&path1.trid); 4882 ut_init_trid2(&path2.trid); 4883 4884 g_opts.bdev_retry_count = 1; 4885 4886 set_thread(0); 4887 4888 g_ut_attach_ctrlr_status = 0; 4889 g_ut_attach_bdev_count = 1; 4890 4891 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4892 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4893 4894 ctrlr1->ns[0].uuid = &uuid1; 4895 4896 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4897 attach_ctrlr_done, NULL, NULL, NULL, true); 4898 CU_ASSERT(rc == 0); 4899 4900 spdk_delay_us(1000); 4901 poll_threads(); 4902 4903 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4904 poll_threads(); 4905 4906 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4907 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4908 4909 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 4910 CU_ASSERT(nvme_ctrlr1 != NULL); 4911 4912 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4913 CU_ASSERT(bdev != NULL); 4914 4915 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 4916 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 4917 4918 ch = spdk_get_io_channel(bdev); 4919 SPDK_CU_ASSERT_FATAL(ch != NULL); 4920 4921 admin_io->internal.ch = (struct spdk_bdev_channel *)ch; 4922 4923 /* Admin passthrough got a path error, but it should not retry if DNR is set. */ 4924 admin_io->internal.in_submit_request = true; 4925 4926 bdev_nvme_submit_request(ch, admin_io); 4927 4928 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1); 4929 CU_ASSERT(admin_io->internal.in_submit_request == true); 4930 4931 req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx); 4932 SPDK_CU_ASSERT_FATAL(req != NULL); 4933 4934 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4935 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4936 req->cpl.status.dnr = 1; 4937 4938 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4939 poll_thread_times(0, 2); 4940 4941 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 4942 CU_ASSERT(admin_io->internal.in_submit_request == false); 4943 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4944 4945 /* Admin passthrough got a path error, but it should succeed after retry. */ 4946 admin_io->internal.in_submit_request = true; 4947 4948 bdev_nvme_submit_request(ch, admin_io); 4949 4950 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1); 4951 CU_ASSERT(admin_io->internal.in_submit_request == true); 4952 4953 req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx); 4954 SPDK_CU_ASSERT_FATAL(req != NULL); 4955 4956 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4957 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4958 4959 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4960 poll_thread_times(0, 2); 4961 4962 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1); 4963 CU_ASSERT(admin_io->internal.in_submit_request == true); 4964 4965 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4966 poll_threads(); 4967 4968 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 4969 CU_ASSERT(admin_io->internal.in_submit_request == false); 4970 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4971 4972 /* Add ctrlr2 dynamically, and create a multipath configuration. */ 4973 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4974 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4975 4976 ctrlr2->ns[0].uuid = &uuid1; 4977 4978 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4979 attach_ctrlr_done, NULL, NULL, NULL, true); 4980 CU_ASSERT(rc == 0); 4981 4982 spdk_delay_us(1000); 4983 poll_threads(); 4984 4985 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4986 poll_threads(); 4987 4988 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 4989 CU_ASSERT(nvme_ctrlr2 != NULL); 4990 4991 /* Admin passthrough was submitted to ctrlr1, but ctrlr1 was failed. 4992 * Hence the admin passthrough was aborted. But ctrlr2 is avaialble. 4993 * So after a retry, the admin passthrough is submitted to ctrlr2 and 4994 * should succeed. 4995 */ 4996 admin_io->internal.in_submit_request = true; 4997 4998 bdev_nvme_submit_request(ch, admin_io); 4999 5000 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1); 5001 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 5002 CU_ASSERT(admin_io->internal.in_submit_request == true); 5003 5004 req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx); 5005 SPDK_CU_ASSERT_FATAL(req != NULL); 5006 5007 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION; 5008 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 5009 ctrlr1->is_failed = true; 5010 5011 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5012 poll_thread_times(0, 2); 5013 5014 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 5015 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1); 5016 CU_ASSERT(admin_io->internal.in_submit_request == true); 5017 5018 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5019 poll_threads(); 5020 5021 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 5022 CU_ASSERT(admin_io->internal.in_submit_request == false); 5023 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5024 5025 free(admin_io); 5026 5027 spdk_put_io_channel(ch); 5028 5029 poll_threads(); 5030 5031 rc = bdev_nvme_delete("nvme0", &g_any_path); 5032 CU_ASSERT(rc == 0); 5033 5034 poll_threads(); 5035 spdk_delay_us(1000); 5036 poll_threads(); 5037 5038 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5039 5040 g_opts.bdev_retry_count = 0; 5041 } 5042 5043 static void 5044 test_retry_admin_passthru_by_count(void) 5045 { 5046 struct nvme_path_id path = {}; 5047 struct spdk_nvme_ctrlr *ctrlr; 5048 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5049 struct nvme_ctrlr *nvme_ctrlr; 5050 const int STRING_SIZE = 32; 5051 const char *attached_names[STRING_SIZE]; 5052 struct nvme_bdev *bdev; 5053 struct spdk_bdev_io *admin_io; 5054 struct nvme_bdev_io *admin_bio; 5055 struct spdk_io_channel *ch; 5056 struct ut_nvme_req *req; 5057 int rc; 5058 5059 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5060 ut_init_trid(&path.trid); 5061 5062 set_thread(0); 5063 5064 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5065 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5066 5067 g_ut_attach_ctrlr_status = 0; 5068 g_ut_attach_bdev_count = 1; 5069 5070 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5071 attach_ctrlr_done, NULL, NULL, NULL, false); 5072 CU_ASSERT(rc == 0); 5073 5074 spdk_delay_us(1000); 5075 poll_threads(); 5076 5077 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5078 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5079 5080 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5081 CU_ASSERT(nvme_ctrlr != NULL); 5082 5083 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5084 CU_ASSERT(bdev != NULL); 5085 5086 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 5087 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 5088 5089 admin_bio = (struct nvme_bdev_io *)admin_io->driver_ctx; 5090 5091 ch = spdk_get_io_channel(bdev); 5092 SPDK_CU_ASSERT_FATAL(ch != NULL); 5093 5094 admin_io->internal.ch = (struct spdk_bdev_channel *)ch; 5095 5096 /* If admin passthrough is aborted by request, it should not be retried. */ 5097 g_opts.bdev_retry_count = 1; 5098 5099 admin_io->internal.in_submit_request = true; 5100 5101 bdev_nvme_submit_request(ch, admin_io); 5102 5103 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 5104 CU_ASSERT(admin_io->internal.in_submit_request == true); 5105 5106 req = ut_get_outstanding_nvme_request(&ctrlr->adminq, admin_bio); 5107 SPDK_CU_ASSERT_FATAL(req != NULL); 5108 5109 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 5110 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 5111 5112 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5113 poll_thread_times(0, 2); 5114 5115 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 5116 CU_ASSERT(admin_io->internal.in_submit_request == false); 5117 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 5118 5119 /* If bio->retry_count is not less than g_opts.bdev_retry_count, 5120 * the failed admin passthrough should not be retried. 5121 */ 5122 g_opts.bdev_retry_count = 4; 5123 5124 admin_io->internal.in_submit_request = true; 5125 5126 bdev_nvme_submit_request(ch, admin_io); 5127 5128 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 5129 CU_ASSERT(admin_io->internal.in_submit_request == true); 5130 5131 req = ut_get_outstanding_nvme_request(&ctrlr->adminq, admin_bio); 5132 SPDK_CU_ASSERT_FATAL(req != NULL); 5133 5134 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 5135 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 5136 admin_bio->retry_count = 4; 5137 5138 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5139 poll_thread_times(0, 2); 5140 5141 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 5142 CU_ASSERT(admin_io->internal.in_submit_request == false); 5143 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 5144 5145 free(admin_io); 5146 5147 spdk_put_io_channel(ch); 5148 5149 poll_threads(); 5150 5151 rc = bdev_nvme_delete("nvme0", &g_any_path); 5152 CU_ASSERT(rc == 0); 5153 5154 poll_threads(); 5155 spdk_delay_us(1000); 5156 poll_threads(); 5157 5158 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5159 5160 g_opts.bdev_retry_count = 0; 5161 } 5162 5163 static void 5164 test_check_multipath_params(void) 5165 { 5166 /* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and 5167 * 3rd parameter is fast_io_fail_timeout_sec. 5168 */ 5169 CU_ASSERT(bdev_nvme_check_multipath_params(-2, 1, 0) == false); 5170 CU_ASSERT(bdev_nvme_check_multipath_params(-1, 0, 0) == false); 5171 CU_ASSERT(bdev_nvme_check_multipath_params(1, 0, 0) == false); 5172 CU_ASSERT(bdev_nvme_check_multipath_params(1, 2, 0) == false); 5173 CU_ASSERT(bdev_nvme_check_multipath_params(0, 1, 0) == false); 5174 CU_ASSERT(bdev_nvme_check_multipath_params(-1, 1, 0) == true); 5175 CU_ASSERT(bdev_nvme_check_multipath_params(2, 2, 0) == true); 5176 CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 0) == true); 5177 CU_ASSERT(bdev_nvme_check_multipath_params(INT32_MAX, INT32_MAX, 0) == true); 5178 CU_ASSERT(bdev_nvme_check_multipath_params(-1, UINT32_MAX, 0) == true); 5179 CU_ASSERT(bdev_nvme_check_multipath_params(0, 0, 1) == false); 5180 CU_ASSERT(bdev_nvme_check_multipath_params(-1, 2, 1) == false); 5181 CU_ASSERT(bdev_nvme_check_multipath_params(3, 2, 4) == false); 5182 CU_ASSERT(bdev_nvme_check_multipath_params(3, 2, 1) == false); 5183 CU_ASSERT(bdev_nvme_check_multipath_params(-1, 1, 1) == true); 5184 CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 2) == true); 5185 CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 1) == true); 5186 CU_ASSERT(bdev_nvme_check_multipath_params(INT32_MAX, INT32_MAX, INT32_MAX) == true); 5187 CU_ASSERT(bdev_nvme_check_multipath_params(-1, UINT32_MAX, UINT32_MAX) == true); 5188 } 5189 5190 static void 5191 test_retry_io_if_ctrlr_is_resetting(void) 5192 { 5193 struct nvme_path_id path = {}; 5194 struct nvme_ctrlr_opts opts = {}; 5195 struct spdk_nvme_ctrlr *ctrlr; 5196 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5197 struct nvme_ctrlr *nvme_ctrlr; 5198 const int STRING_SIZE = 32; 5199 const char *attached_names[STRING_SIZE]; 5200 struct nvme_bdev *bdev; 5201 struct nvme_ns *nvme_ns; 5202 struct spdk_bdev_io *bdev_io1, *bdev_io2; 5203 struct spdk_io_channel *ch; 5204 struct nvme_bdev_channel *nbdev_ch; 5205 struct nvme_io_path *io_path; 5206 struct nvme_qpair *nvme_qpair; 5207 int rc; 5208 5209 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5210 ut_init_trid(&path.trid); 5211 5212 set_thread(0); 5213 5214 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5215 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5216 5217 g_ut_attach_ctrlr_status = 0; 5218 g_ut_attach_bdev_count = 1; 5219 5220 opts.ctrlr_loss_timeout_sec = -1; 5221 opts.reconnect_delay_sec = 1; 5222 5223 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5224 attach_ctrlr_done, NULL, NULL, &opts, false); 5225 CU_ASSERT(rc == 0); 5226 5227 spdk_delay_us(1000); 5228 poll_threads(); 5229 5230 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5231 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5232 5233 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5234 CU_ASSERT(nvme_ctrlr != NULL); 5235 5236 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5237 CU_ASSERT(bdev != NULL); 5238 5239 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5240 CU_ASSERT(nvme_ns != NULL); 5241 5242 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5243 ut_bdev_io_set_buf(bdev_io1); 5244 5245 bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5246 ut_bdev_io_set_buf(bdev_io2); 5247 5248 ch = spdk_get_io_channel(bdev); 5249 SPDK_CU_ASSERT_FATAL(ch != NULL); 5250 5251 nbdev_ch = spdk_io_channel_get_ctx(ch); 5252 5253 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5254 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5255 5256 nvme_qpair = io_path->qpair; 5257 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5258 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5259 5260 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 5261 bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch; 5262 5263 /* If qpair is connected, I/O should succeed. */ 5264 bdev_io1->internal.in_submit_request = true; 5265 5266 bdev_nvme_submit_request(ch, bdev_io1); 5267 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5268 5269 poll_threads(); 5270 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5271 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 5272 5273 /* If qpair is disconnected, it is freed and then reconnected via resetting 5274 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 5275 * while resetting the nvme_ctrlr. 5276 */ 5277 nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 5278 ctrlr->is_failed = true; 5279 5280 poll_thread_times(0, 5); 5281 5282 CU_ASSERT(nvme_qpair->qpair == NULL); 5283 CU_ASSERT(nvme_ctrlr->resetting == true); 5284 CU_ASSERT(ctrlr->is_failed == false); 5285 5286 bdev_io1->internal.in_submit_request = true; 5287 5288 bdev_nvme_submit_request(ch, bdev_io1); 5289 5290 spdk_delay_us(1); 5291 5292 bdev_io2->internal.in_submit_request = true; 5293 5294 bdev_nvme_submit_request(ch, bdev_io2); 5295 5296 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5297 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5298 CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5299 CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link)); 5300 5301 poll_threads(); 5302 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5303 poll_threads(); 5304 5305 CU_ASSERT(nvme_qpair->qpair != NULL); 5306 CU_ASSERT(nvme_ctrlr->resetting == false); 5307 5308 spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us); 5309 5310 poll_thread_times(0, 1); 5311 5312 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5313 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5314 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5315 CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5316 5317 poll_threads(); 5318 5319 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5320 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5321 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5322 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5323 CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5324 5325 spdk_delay_us(1); 5326 5327 poll_thread_times(0, 1); 5328 5329 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5330 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5331 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5332 5333 poll_threads(); 5334 5335 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5336 CU_ASSERT(bdev_io2->internal.in_submit_request == false); 5337 CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5338 5339 free(bdev_io1); 5340 free(bdev_io2); 5341 5342 spdk_put_io_channel(ch); 5343 5344 poll_threads(); 5345 5346 rc = bdev_nvme_delete("nvme0", &g_any_path); 5347 CU_ASSERT(rc == 0); 5348 5349 poll_threads(); 5350 spdk_delay_us(1000); 5351 poll_threads(); 5352 5353 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5354 } 5355 5356 static void 5357 test_retry_admin_passthru_if_ctrlr_is_resetting(void) 5358 { 5359 struct nvme_path_id path = {}; 5360 struct nvme_ctrlr_opts opts = {}; 5361 struct spdk_nvme_ctrlr *ctrlr; 5362 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5363 struct nvme_ctrlr *nvme_ctrlr; 5364 const int STRING_SIZE = 32; 5365 const char *attached_names[STRING_SIZE]; 5366 struct nvme_bdev *bdev; 5367 struct spdk_bdev_io *admin_io; 5368 struct spdk_io_channel *ch; 5369 struct nvme_bdev_channel *nbdev_ch; 5370 int rc; 5371 5372 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5373 ut_init_trid(&path.trid); 5374 5375 g_opts.bdev_retry_count = 1; 5376 5377 set_thread(0); 5378 5379 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5380 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5381 5382 g_ut_attach_ctrlr_status = 0; 5383 g_ut_attach_bdev_count = 1; 5384 5385 opts.ctrlr_loss_timeout_sec = -1; 5386 opts.reconnect_delay_sec = 1; 5387 5388 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5389 attach_ctrlr_done, NULL, NULL, &opts, false); 5390 CU_ASSERT(rc == 0); 5391 5392 spdk_delay_us(1000); 5393 poll_threads(); 5394 5395 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5396 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5397 5398 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5399 CU_ASSERT(nvme_ctrlr != NULL); 5400 5401 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5402 CU_ASSERT(bdev != NULL); 5403 5404 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 5405 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 5406 5407 ch = spdk_get_io_channel(bdev); 5408 SPDK_CU_ASSERT_FATAL(ch != NULL); 5409 5410 nbdev_ch = spdk_io_channel_get_ctx(ch); 5411 5412 admin_io->internal.ch = (struct spdk_bdev_channel *)ch; 5413 5414 /* If ctrlr is available, admin passthrough should succeed. */ 5415 admin_io->internal.in_submit_request = true; 5416 5417 bdev_nvme_submit_request(ch, admin_io); 5418 5419 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 5420 CU_ASSERT(admin_io->internal.in_submit_request == true); 5421 5422 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5423 poll_threads(); 5424 5425 CU_ASSERT(admin_io->internal.in_submit_request == false); 5426 CU_ASSERT(admin_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 5427 5428 /* If ctrlr is resetting, admin passthrough request should be queued 5429 * if it is submitted while resetting ctrlr. 5430 */ 5431 bdev_nvme_reset(nvme_ctrlr); 5432 5433 poll_thread_times(0, 1); 5434 5435 admin_io->internal.in_submit_request = true; 5436 5437 bdev_nvme_submit_request(ch, admin_io); 5438 5439 CU_ASSERT(admin_io->internal.in_submit_request == true); 5440 CU_ASSERT(admin_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5441 5442 poll_threads(); 5443 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5444 poll_threads(); 5445 5446 CU_ASSERT(nvme_ctrlr->resetting == false); 5447 5448 spdk_delay_us(1000000 - g_opts.nvme_adminq_poll_period_us); 5449 poll_thread_times(0, 1); 5450 5451 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 5452 CU_ASSERT(admin_io->internal.in_submit_request == true); 5453 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5454 5455 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5456 poll_threads(); 5457 5458 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 5459 CU_ASSERT(admin_io->internal.in_submit_request == false); 5460 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5461 5462 free(admin_io); 5463 5464 spdk_put_io_channel(ch); 5465 5466 poll_threads(); 5467 5468 rc = bdev_nvme_delete("nvme0", &g_any_path); 5469 CU_ASSERT(rc == 0); 5470 5471 poll_threads(); 5472 spdk_delay_us(1000); 5473 poll_threads(); 5474 5475 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5476 5477 g_opts.bdev_retry_count = 0; 5478 } 5479 5480 static void 5481 test_reconnect_ctrlr(void) 5482 { 5483 struct spdk_nvme_transport_id trid = {}; 5484 struct spdk_nvme_ctrlr ctrlr = {}; 5485 struct nvme_ctrlr *nvme_ctrlr; 5486 struct spdk_io_channel *ch1, *ch2; 5487 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 5488 int rc; 5489 5490 ut_init_trid(&trid); 5491 TAILQ_INIT(&ctrlr.active_io_qpairs); 5492 5493 set_thread(0); 5494 5495 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 5496 CU_ASSERT(rc == 0); 5497 5498 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5499 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5500 5501 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2; 5502 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5503 5504 ch1 = spdk_get_io_channel(nvme_ctrlr); 5505 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 5506 5507 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 5508 CU_ASSERT(ctrlr_ch1->qpair != NULL); 5509 5510 set_thread(1); 5511 5512 ch2 = spdk_get_io_channel(nvme_ctrlr); 5513 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 5514 5515 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 5516 5517 /* Reset starts from thread 1. */ 5518 set_thread(1); 5519 5520 /* The reset should fail and a reconnect timer should be registered. */ 5521 ctrlr.fail_reset = true; 5522 ctrlr.is_failed = true; 5523 5524 rc = bdev_nvme_reset(nvme_ctrlr); 5525 CU_ASSERT(rc == 0); 5526 CU_ASSERT(nvme_ctrlr->resetting == true); 5527 CU_ASSERT(ctrlr.is_failed == true); 5528 5529 poll_threads(); 5530 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5531 poll_threads(); 5532 5533 CU_ASSERT(nvme_ctrlr->resetting == false); 5534 CU_ASSERT(ctrlr.is_failed == false); 5535 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5536 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5537 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5538 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5539 5540 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5541 poll_threads(); 5542 5543 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5544 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5545 5546 /* Then a reconnect retry should suceeed. */ 5547 ctrlr.fail_reset = false; 5548 5549 spdk_delay_us(SPDK_SEC_TO_USEC); 5550 poll_thread_times(0, 1); 5551 5552 CU_ASSERT(nvme_ctrlr->resetting == true); 5553 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5554 5555 poll_threads(); 5556 5557 CU_ASSERT(nvme_ctrlr->resetting == false); 5558 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 5559 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 5560 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5561 5562 /* The reset should fail and a reconnect timer should be registered. */ 5563 ctrlr.fail_reset = true; 5564 ctrlr.is_failed = true; 5565 5566 rc = bdev_nvme_reset(nvme_ctrlr); 5567 CU_ASSERT(rc == 0); 5568 CU_ASSERT(nvme_ctrlr->resetting == true); 5569 CU_ASSERT(ctrlr.is_failed == true); 5570 5571 poll_threads(); 5572 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5573 poll_threads(); 5574 5575 CU_ASSERT(nvme_ctrlr->resetting == false); 5576 CU_ASSERT(ctrlr.is_failed == false); 5577 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5578 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5579 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5580 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5581 5582 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5583 poll_threads(); 5584 5585 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5586 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5587 5588 /* Then a reconnect retry should still fail. */ 5589 spdk_delay_us(SPDK_SEC_TO_USEC); 5590 poll_thread_times(0, 1); 5591 5592 CU_ASSERT(nvme_ctrlr->resetting == true); 5593 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5594 5595 poll_threads(); 5596 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5597 poll_threads(); 5598 5599 CU_ASSERT(nvme_ctrlr->resetting == false); 5600 CU_ASSERT(ctrlr.is_failed == false); 5601 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5602 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5603 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5604 5605 /* Then a reconnect retry should still fail and the ctrlr should be deleted. */ 5606 spdk_delay_us(SPDK_SEC_TO_USEC); 5607 poll_threads(); 5608 5609 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5610 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5611 CU_ASSERT(nvme_ctrlr->destruct == true); 5612 5613 spdk_put_io_channel(ch2); 5614 5615 set_thread(0); 5616 5617 spdk_put_io_channel(ch1); 5618 5619 poll_threads(); 5620 spdk_delay_us(1000); 5621 poll_threads(); 5622 5623 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5624 } 5625 5626 static struct nvme_path_id * 5627 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr, 5628 const struct spdk_nvme_transport_id *trid) 5629 { 5630 struct nvme_path_id *p; 5631 5632 TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) { 5633 if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) { 5634 break; 5635 } 5636 } 5637 5638 return p; 5639 } 5640 5641 static void 5642 test_retry_failover_ctrlr(void) 5643 { 5644 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 5645 struct spdk_nvme_ctrlr ctrlr = {}; 5646 struct nvme_ctrlr *nvme_ctrlr = NULL; 5647 struct nvme_path_id *path_id1, *path_id2, *path_id3; 5648 struct spdk_io_channel *ch; 5649 struct nvme_ctrlr_channel *ctrlr_ch; 5650 int rc; 5651 5652 ut_init_trid(&trid1); 5653 ut_init_trid2(&trid2); 5654 ut_init_trid3(&trid3); 5655 TAILQ_INIT(&ctrlr.active_io_qpairs); 5656 5657 set_thread(0); 5658 5659 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 5660 CU_ASSERT(rc == 0); 5661 5662 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5663 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5664 5665 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1; 5666 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5667 5668 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 5669 CU_ASSERT(rc == 0); 5670 5671 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 5672 CU_ASSERT(rc == 0); 5673 5674 ch = spdk_get_io_channel(nvme_ctrlr); 5675 SPDK_CU_ASSERT_FATAL(ch != NULL); 5676 5677 ctrlr_ch = spdk_io_channel_get_ctx(ch); 5678 5679 path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1); 5680 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 5681 CU_ASSERT(path_id1->is_failed == false); 5682 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5683 5684 /* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */ 5685 ctrlr.fail_reset = true; 5686 ctrlr.is_failed = true; 5687 5688 rc = bdev_nvme_reset(nvme_ctrlr); 5689 CU_ASSERT(rc == 0); 5690 5691 poll_threads(); 5692 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5693 poll_threads(); 5694 5695 CU_ASSERT(nvme_ctrlr->resetting == false); 5696 CU_ASSERT(ctrlr.is_failed == false); 5697 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5698 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5699 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5700 5701 CU_ASSERT(path_id1->is_failed == true); 5702 5703 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5704 poll_threads(); 5705 5706 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5707 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5708 5709 path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2); 5710 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 5711 CU_ASSERT(path_id2->is_failed == false); 5712 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5713 5714 /* If we remove trid2 while reconnect is scheduled, trid2 is removed and path_id is 5715 * switched to trid3 but reset is not started. 5716 */ 5717 rc = bdev_nvme_failover(nvme_ctrlr, true); 5718 CU_ASSERT(rc == 0); 5719 5720 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) == NULL); 5721 5722 path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3); 5723 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 5724 CU_ASSERT(path_id3->is_failed == false); 5725 CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id); 5726 5727 CU_ASSERT(nvme_ctrlr->resetting == false); 5728 5729 /* If reconnect succeeds, trid3 should be the active path_id */ 5730 ctrlr.fail_reset = false; 5731 5732 spdk_delay_us(SPDK_SEC_TO_USEC); 5733 poll_thread_times(0, 1); 5734 5735 CU_ASSERT(nvme_ctrlr->resetting == true); 5736 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5737 5738 poll_threads(); 5739 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5740 poll_threads(); 5741 5742 CU_ASSERT(path_id3->is_failed == false); 5743 CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id); 5744 CU_ASSERT(nvme_ctrlr->resetting == false); 5745 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 5746 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5747 5748 spdk_put_io_channel(ch); 5749 5750 poll_threads(); 5751 5752 rc = bdev_nvme_delete("nvme0", &g_any_path); 5753 CU_ASSERT(rc == 0); 5754 5755 poll_threads(); 5756 spdk_delay_us(1000); 5757 poll_threads(); 5758 5759 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5760 } 5761 5762 static void 5763 test_fail_path(void) 5764 { 5765 struct nvme_path_id path = {}; 5766 struct nvme_ctrlr_opts opts = {}; 5767 struct spdk_nvme_ctrlr *ctrlr; 5768 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5769 struct nvme_ctrlr *nvme_ctrlr; 5770 const int STRING_SIZE = 32; 5771 const char *attached_names[STRING_SIZE]; 5772 struct nvme_bdev *bdev; 5773 struct nvme_ns *nvme_ns; 5774 struct spdk_bdev_io *bdev_io; 5775 struct spdk_io_channel *ch; 5776 struct nvme_bdev_channel *nbdev_ch; 5777 struct nvme_io_path *io_path; 5778 struct nvme_ctrlr_channel *ctrlr_ch; 5779 int rc; 5780 5781 /* The test scenario is the following. 5782 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec. 5783 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated. 5784 * - While reconnecting the ctrlr, an I/O is submitted and queued. 5785 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec 5786 * comes first. The queued I/O is failed. 5787 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately. 5788 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted. 5789 */ 5790 5791 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5792 ut_init_trid(&path.trid); 5793 5794 set_thread(0); 5795 5796 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5797 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5798 5799 g_ut_attach_ctrlr_status = 0; 5800 g_ut_attach_bdev_count = 1; 5801 5802 opts.ctrlr_loss_timeout_sec = 4; 5803 opts.reconnect_delay_sec = 1; 5804 opts.fast_io_fail_timeout_sec = 2; 5805 5806 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5807 attach_ctrlr_done, NULL, NULL, &opts, false); 5808 CU_ASSERT(rc == 0); 5809 5810 spdk_delay_us(1000); 5811 poll_threads(); 5812 5813 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5814 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5815 5816 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5817 CU_ASSERT(nvme_ctrlr != NULL); 5818 5819 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5820 CU_ASSERT(bdev != NULL); 5821 5822 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5823 CU_ASSERT(nvme_ns != NULL); 5824 5825 ch = spdk_get_io_channel(bdev); 5826 SPDK_CU_ASSERT_FATAL(ch != NULL); 5827 5828 nbdev_ch = spdk_io_channel_get_ctx(ch); 5829 5830 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5831 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5832 5833 ctrlr_ch = io_path->qpair->ctrlr_ch; 5834 SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL); 5835 SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL); 5836 5837 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 5838 ut_bdev_io_set_buf(bdev_io); 5839 5840 5841 /* Resetting a ctrlr should fail and a reconnect timer should be registered. */ 5842 ctrlr->fail_reset = true; 5843 ctrlr->is_failed = true; 5844 5845 rc = bdev_nvme_reset(nvme_ctrlr); 5846 CU_ASSERT(rc == 0); 5847 CU_ASSERT(nvme_ctrlr->resetting == true); 5848 CU_ASSERT(ctrlr->is_failed == true); 5849 5850 poll_threads(); 5851 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5852 poll_threads(); 5853 5854 CU_ASSERT(nvme_ctrlr->resetting == false); 5855 CU_ASSERT(ctrlr->is_failed == false); 5856 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5857 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5858 CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0); 5859 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5860 5861 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5862 poll_threads(); 5863 5864 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5865 5866 /* I/O should be queued. */ 5867 bdev_io->internal.in_submit_request = true; 5868 5869 bdev_nvme_submit_request(ch, bdev_io); 5870 5871 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5872 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5873 5874 /* After a second, the I/O should be still queued and the ctrlr should be 5875 * still recovering. 5876 */ 5877 spdk_delay_us(SPDK_SEC_TO_USEC); 5878 poll_threads(); 5879 5880 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5881 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5882 5883 CU_ASSERT(nvme_ctrlr->resetting == false); 5884 CU_ASSERT(ctrlr->is_failed == false); 5885 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5886 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5887 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5888 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5889 5890 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5891 poll_threads(); 5892 5893 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5894 5895 /* After two seconds, ctrlr_fail_timeout_sec should expire. */ 5896 spdk_delay_us(SPDK_SEC_TO_USEC); 5897 poll_threads(); 5898 5899 CU_ASSERT(nvme_ctrlr->resetting == false); 5900 CU_ASSERT(ctrlr->is_failed == false); 5901 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5902 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5903 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5904 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true); 5905 5906 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5907 poll_threads(); 5908 5909 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5910 5911 /* Then within a second, pending I/O should be failed. */ 5912 spdk_delay_us(SPDK_SEC_TO_USEC); 5913 poll_threads(); 5914 5915 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5916 poll_threads(); 5917 5918 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5919 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5920 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5921 5922 /* Another I/O submission should be failed immediately. */ 5923 bdev_io->internal.in_submit_request = true; 5924 5925 bdev_nvme_submit_request(ch, bdev_io); 5926 5927 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5928 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5929 5930 /* After four seconds, path_loss_timeout_sec should expire and ctrlr should 5931 * be deleted. 5932 */ 5933 spdk_delay_us(SPDK_SEC_TO_USEC); 5934 poll_threads(); 5935 5936 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5937 poll_threads(); 5938 5939 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5940 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5941 CU_ASSERT(nvme_ctrlr->destruct == true); 5942 5943 spdk_put_io_channel(ch); 5944 5945 poll_threads(); 5946 spdk_delay_us(1000); 5947 poll_threads(); 5948 5949 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5950 5951 free(bdev_io); 5952 } 5953 5954 static void 5955 test_nvme_ns_cmp(void) 5956 { 5957 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 5958 5959 nvme_ns1.id = 0; 5960 nvme_ns2.id = UINT32_MAX; 5961 5962 CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0); 5963 CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0); 5964 } 5965 5966 static void 5967 test_ana_transition(void) 5968 { 5969 struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, }; 5970 struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, }; 5971 struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, }; 5972 struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, }; 5973 5974 /* case 1: ANA transition timedout is canceled. */ 5975 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5976 nvme_ns.ana_transition_timedout = true; 5977 5978 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5979 5980 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5981 5982 CU_ASSERT(nvme_ns.ana_transition_timedout == false); 5983 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5984 5985 /* case 2: ANATT timer is kept. */ 5986 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5987 nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout, 5988 &nvme_ns, 5989 ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5990 5991 desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5992 5993 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5994 5995 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5996 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 5997 5998 /* case 3: ANATT timer is stopped. */ 5999 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6000 6001 _nvme_ns_set_ana_state(&nvme_ns, &desc); 6002 6003 CU_ASSERT(nvme_ns.anatt_timer == NULL); 6004 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 6005 6006 /* ANATT timer is started. */ 6007 desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 6008 6009 _nvme_ns_set_ana_state(&nvme_ns, &desc); 6010 6011 CU_ASSERT(nvme_ns.anatt_timer != NULL); 6012 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE); 6013 6014 /* ANATT timer is expired. */ 6015 spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 6016 6017 poll_threads(); 6018 6019 CU_ASSERT(nvme_ns.anatt_timer == NULL); 6020 CU_ASSERT(nvme_ns.ana_transition_timedout == true); 6021 } 6022 6023 int 6024 main(int argc, const char **argv) 6025 { 6026 CU_pSuite suite = NULL; 6027 unsigned int num_failures; 6028 6029 CU_set_error_action(CUEA_ABORT); 6030 CU_initialize_registry(); 6031 6032 suite = CU_add_suite("nvme", NULL, NULL); 6033 6034 CU_ADD_TEST(suite, test_create_ctrlr); 6035 CU_ADD_TEST(suite, test_reset_ctrlr); 6036 CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr); 6037 CU_ADD_TEST(suite, test_failover_ctrlr); 6038 CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid); 6039 CU_ADD_TEST(suite, test_pending_reset); 6040 CU_ADD_TEST(suite, test_attach_ctrlr); 6041 CU_ADD_TEST(suite, test_aer_cb); 6042 CU_ADD_TEST(suite, test_submit_nvme_cmd); 6043 CU_ADD_TEST(suite, test_add_remove_trid); 6044 CU_ADD_TEST(suite, test_abort); 6045 CU_ADD_TEST(suite, test_get_io_qpair); 6046 CU_ADD_TEST(suite, test_bdev_unregister); 6047 CU_ADD_TEST(suite, test_compare_ns); 6048 CU_ADD_TEST(suite, test_init_ana_log_page); 6049 CU_ADD_TEST(suite, test_get_memory_domains); 6050 CU_ADD_TEST(suite, test_reconnect_qpair); 6051 CU_ADD_TEST(suite, test_create_bdev_ctrlr); 6052 CU_ADD_TEST(suite, test_add_multi_ns_to_bdev); 6053 CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch); 6054 CU_ADD_TEST(suite, test_admin_path); 6055 CU_ADD_TEST(suite, test_reset_bdev_ctrlr); 6056 CU_ADD_TEST(suite, test_find_io_path); 6057 CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating); 6058 CU_ADD_TEST(suite, test_retry_io_for_io_path_error); 6059 CU_ADD_TEST(suite, test_retry_io_count); 6060 CU_ADD_TEST(suite, test_concurrent_read_ana_log_page); 6061 CU_ADD_TEST(suite, test_retry_io_for_ana_error); 6062 CU_ADD_TEST(suite, test_retry_admin_passthru_for_path_error); 6063 CU_ADD_TEST(suite, test_retry_admin_passthru_by_count); 6064 CU_ADD_TEST(suite, test_check_multipath_params); 6065 CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting); 6066 CU_ADD_TEST(suite, test_retry_admin_passthru_if_ctrlr_is_resetting); 6067 CU_ADD_TEST(suite, test_reconnect_ctrlr); 6068 CU_ADD_TEST(suite, test_retry_failover_ctrlr); 6069 CU_ADD_TEST(suite, test_fail_path); 6070 CU_ADD_TEST(suite, test_nvme_ns_cmp); 6071 CU_ADD_TEST(suite, test_ana_transition); 6072 6073 CU_basic_set_mode(CU_BRM_VERBOSE); 6074 6075 allocate_threads(3); 6076 set_thread(0); 6077 bdev_nvme_library_init(); 6078 init_accel(); 6079 6080 CU_basic_run_tests(); 6081 6082 set_thread(0); 6083 bdev_nvme_library_fini(); 6084 fini_accel(); 6085 free_threads(); 6086 6087 num_failures = CU_get_number_of_failures(); 6088 CU_cleanup_registry(); 6089 6090 return num_failures; 6091 } 6092