1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk_cunit.h" 9 #include "spdk/thread.h" 10 #include "spdk/bdev_module.h" 11 #include "spdk/bdev_module.h" 12 13 #include "common/lib/ut_multithread.c" 14 15 #include "bdev/nvme/bdev_nvme.c" 16 17 #include "unit/lib/json_mock.c" 18 19 static void *g_accel_p = (void *)0xdeadbeaf; 20 21 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *, 22 (const struct spdk_nvme_transport_id *trid, void *cb_ctx, 23 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 24 spdk_nvme_remove_cb remove_cb), NULL); 25 26 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 27 enum spdk_nvme_transport_type trtype)); 28 29 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype), 30 NULL); 31 32 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 33 34 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr, 35 struct spdk_nvme_transport_id *trid), 0); 36 37 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr, 38 spdk_nvme_remove_cb remove_cb, void *remove_ctx)); 39 40 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0); 41 42 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0); 43 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf)); 44 45 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain, int); 46 47 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int, 48 (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0); 49 50 int spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr, 51 struct spdk_memory_domain **domains, int array_size) 52 { 53 HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain); 54 55 return 0; 56 } 57 58 struct spdk_io_channel * 59 spdk_accel_engine_get_io_channel(void) 60 { 61 return spdk_get_io_channel(g_accel_p); 62 } 63 64 void 65 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 66 struct spdk_nvme_io_qpair_opts *opts, size_t opts_size) 67 { 68 /* Avoid warning that opts is used uninitialised */ 69 memset(opts, 0, opts_size); 70 } 71 72 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *, 73 (struct spdk_nvme_ctrlr *ctrlr), NULL); 74 75 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t, 76 (const struct spdk_nvme_ctrlr *ctrlr), 0); 77 78 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *, 79 (struct spdk_nvme_ctrlr *ctrlr), NULL); 80 81 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr, 82 spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg)); 83 84 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr, 85 uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)); 86 87 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true); 88 89 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false); 90 91 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr, 92 struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 93 94 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr, 95 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 96 uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 97 98 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr, 99 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 100 uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 101 102 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name, 103 size_t *size), 0); 104 105 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 106 107 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 108 109 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 110 111 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0); 112 113 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false); 114 115 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 116 117 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value, 118 enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0); 119 120 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0); 121 122 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 123 char *name, size_t *size), 0); 124 125 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t, 126 (struct spdk_nvme_ns *ns), 0); 127 128 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t, 129 (const struct spdk_nvme_ctrlr *ctrlr), 0); 130 131 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t, 132 (struct spdk_nvme_ns *ns), 0); 133 134 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t, 135 (struct spdk_nvme_ns *ns), 0); 136 137 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t, 138 (struct spdk_nvme_ns *ns), 0); 139 140 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int, 141 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata, 142 uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 143 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0); 144 145 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int, 146 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba, 147 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 148 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn, 149 void *metadata, uint16_t apptag_mask, uint16_t apptag), 0); 150 151 DEFINE_STUB(spdk_nvme_zns_report_zones, int, 152 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 153 void *payload, uint32_t payload_size, uint64_t slba, 154 enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report, 155 spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 156 157 DEFINE_STUB(spdk_nvme_zns_close_zone, int, 158 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 159 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 160 161 DEFINE_STUB(spdk_nvme_zns_finish_zone, int, 162 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 163 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 164 165 DEFINE_STUB(spdk_nvme_zns_open_zone, int, 166 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 167 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 168 169 DEFINE_STUB(spdk_nvme_zns_reset_zone, int, 170 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 171 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 172 173 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL); 174 175 DEFINE_STUB(spdk_nvme_zns_offline_zone, int, 176 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 177 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 178 179 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void)); 180 181 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 182 183 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 184 185 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL); 186 187 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); 188 189 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, 190 struct iovec *iov, 191 uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 192 193 DEFINE_STUB_V(spdk_nvme_ctrlr_prepare_for_reset, (struct spdk_nvme_ctrlr *ctrlr)); 194 195 struct ut_nvme_req { 196 uint16_t opc; 197 spdk_nvme_cmd_cb cb_fn; 198 void *cb_arg; 199 struct spdk_nvme_cpl cpl; 200 TAILQ_ENTRY(ut_nvme_req) tailq; 201 }; 202 203 struct spdk_nvme_ns { 204 struct spdk_nvme_ctrlr *ctrlr; 205 uint32_t id; 206 bool is_active; 207 struct spdk_uuid *uuid; 208 enum spdk_nvme_ana_state ana_state; 209 enum spdk_nvme_csi csi; 210 }; 211 212 struct spdk_nvme_qpair { 213 struct spdk_nvme_ctrlr *ctrlr; 214 uint8_t failure_reason; 215 bool is_connected; 216 bool in_completion_context; 217 bool delete_after_completion_context; 218 TAILQ_HEAD(, ut_nvme_req) outstanding_reqs; 219 uint32_t num_outstanding_reqs; 220 TAILQ_ENTRY(spdk_nvme_qpair) poll_group_tailq; 221 struct spdk_nvme_poll_group *poll_group; 222 void *poll_group_tailq_head; 223 TAILQ_ENTRY(spdk_nvme_qpair) tailq; 224 }; 225 226 struct spdk_nvme_ctrlr { 227 uint32_t num_ns; 228 struct spdk_nvme_ns *ns; 229 struct spdk_nvme_ns_data *nsdata; 230 struct spdk_nvme_qpair adminq; 231 struct spdk_nvme_ctrlr_data cdata; 232 bool attached; 233 bool is_failed; 234 bool fail_reset; 235 struct spdk_nvme_transport_id trid; 236 TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs; 237 TAILQ_ENTRY(spdk_nvme_ctrlr) tailq; 238 struct spdk_nvme_ctrlr_opts opts; 239 }; 240 241 struct spdk_nvme_poll_group { 242 void *ctx; 243 struct spdk_nvme_accel_fn_table accel_fn_table; 244 TAILQ_HEAD(, spdk_nvme_qpair) connected_qpairs; 245 TAILQ_HEAD(, spdk_nvme_qpair) disconnected_qpairs; 246 }; 247 248 struct spdk_nvme_probe_ctx { 249 struct spdk_nvme_transport_id trid; 250 void *cb_ctx; 251 spdk_nvme_attach_cb attach_cb; 252 struct spdk_nvme_ctrlr *init_ctrlr; 253 }; 254 255 uint32_t 256 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr) 257 { 258 uint32_t nsid; 259 260 for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) { 261 if (ctrlr->ns[nsid - 1].is_active) { 262 return nsid; 263 } 264 } 265 266 return 0; 267 } 268 269 uint32_t 270 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 271 { 272 for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) { 273 if (ctrlr->ns[nsid - 1].is_active) { 274 return nsid; 275 } 276 } 277 278 return 0; 279 } 280 281 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs); 282 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER( 283 g_ut_attached_ctrlrs); 284 static int g_ut_attach_ctrlr_status; 285 static size_t g_ut_attach_bdev_count; 286 static int g_ut_register_bdev_status; 287 static struct spdk_bdev *g_ut_registered_bdev; 288 static uint16_t g_ut_cntlid; 289 static struct nvme_path_id g_any_path = {}; 290 291 static void 292 ut_init_trid(struct spdk_nvme_transport_id *trid) 293 { 294 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 295 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 296 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8"); 297 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 298 } 299 300 static void 301 ut_init_trid2(struct spdk_nvme_transport_id *trid) 302 { 303 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 304 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 305 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9"); 306 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 307 } 308 309 static void 310 ut_init_trid3(struct spdk_nvme_transport_id *trid) 311 { 312 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 313 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 314 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10"); 315 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 316 } 317 318 static int 319 cmp_int(int a, int b) 320 { 321 return a - b; 322 } 323 324 int 325 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 326 const struct spdk_nvme_transport_id *trid2) 327 { 328 int cmp; 329 330 /* We assume trtype is TCP for now. */ 331 CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP); 332 333 cmp = cmp_int(trid1->trtype, trid2->trtype); 334 if (cmp) { 335 return cmp; 336 } 337 338 cmp = strcasecmp(trid1->traddr, trid2->traddr); 339 if (cmp) { 340 return cmp; 341 } 342 343 cmp = cmp_int(trid1->adrfam, trid2->adrfam); 344 if (cmp) { 345 return cmp; 346 } 347 348 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 349 if (cmp) { 350 return cmp; 351 } 352 353 cmp = strcmp(trid1->subnqn, trid2->subnqn); 354 if (cmp) { 355 return cmp; 356 } 357 358 return 0; 359 } 360 361 static struct spdk_nvme_ctrlr * 362 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns, 363 bool ana_reporting, bool multipath) 364 { 365 struct spdk_nvme_ctrlr *ctrlr; 366 uint32_t i; 367 368 TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) { 369 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) { 370 /* There is a ctrlr whose trid matches. */ 371 return NULL; 372 } 373 } 374 375 ctrlr = calloc(1, sizeof(*ctrlr)); 376 if (ctrlr == NULL) { 377 return NULL; 378 } 379 380 ctrlr->attached = true; 381 ctrlr->adminq.ctrlr = ctrlr; 382 TAILQ_INIT(&ctrlr->adminq.outstanding_reqs); 383 ctrlr->adminq.is_connected = true; 384 385 if (num_ns != 0) { 386 ctrlr->num_ns = num_ns; 387 ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns)); 388 if (ctrlr->ns == NULL) { 389 free(ctrlr); 390 return NULL; 391 } 392 393 ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data)); 394 if (ctrlr->nsdata == NULL) { 395 free(ctrlr->ns); 396 free(ctrlr); 397 return NULL; 398 } 399 400 for (i = 0; i < num_ns; i++) { 401 ctrlr->ns[i].id = i + 1; 402 ctrlr->ns[i].ctrlr = ctrlr; 403 ctrlr->ns[i].is_active = true; 404 ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 405 ctrlr->nsdata[i].nsze = 1024; 406 ctrlr->nsdata[i].nmic.can_share = multipath; 407 } 408 409 ctrlr->cdata.nn = num_ns; 410 ctrlr->cdata.mnan = num_ns; 411 ctrlr->cdata.nanagrpid = num_ns; 412 } 413 414 ctrlr->cdata.cntlid = ++g_ut_cntlid; 415 ctrlr->cdata.cmic.multi_ctrlr = multipath; 416 ctrlr->cdata.cmic.ana_reporting = ana_reporting; 417 ctrlr->trid = *trid; 418 TAILQ_INIT(&ctrlr->active_io_qpairs); 419 420 TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq); 421 422 return ctrlr; 423 } 424 425 static void 426 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 427 { 428 CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs)); 429 430 TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq); 431 free(ctrlr->nsdata); 432 free(ctrlr->ns); 433 free(ctrlr); 434 } 435 436 static int 437 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 438 uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 439 { 440 struct ut_nvme_req *req; 441 442 req = calloc(1, sizeof(*req)); 443 if (req == NULL) { 444 return -ENOMEM; 445 } 446 447 req->opc = opc; 448 req->cb_fn = cb_fn; 449 req->cb_arg = cb_arg; 450 451 req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 452 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 453 454 TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq); 455 qpair->num_outstanding_reqs++; 456 457 return 0; 458 } 459 460 static struct ut_nvme_req * 461 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg) 462 { 463 struct ut_nvme_req *req; 464 465 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 466 if (req->cb_arg == cb_arg) { 467 break; 468 } 469 } 470 471 return req; 472 } 473 474 static struct spdk_bdev_io * 475 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev, 476 struct spdk_io_channel *ch) 477 { 478 struct spdk_bdev_io *bdev_io; 479 480 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 481 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 482 bdev_io->type = type; 483 bdev_io->bdev = &nbdev->disk; 484 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 485 486 return bdev_io; 487 } 488 489 static void 490 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io) 491 { 492 bdev_io->u.bdev.iovs = &bdev_io->iov; 493 bdev_io->u.bdev.iovcnt = 1; 494 495 bdev_io->iov.iov_base = (void *)0xFEEDBEEF; 496 bdev_io->iov.iov_len = 4096; 497 } 498 499 static void 500 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx) 501 { 502 if (ctrlr->is_failed) { 503 free(ctrlr); 504 return; 505 } 506 507 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts)); 508 if (probe_ctx->cb_ctx) { 509 ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx; 510 } 511 512 TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq); 513 514 if (probe_ctx->attach_cb) { 515 probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 516 } 517 } 518 519 int 520 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 521 { 522 struct spdk_nvme_ctrlr *ctrlr, *tmp; 523 524 TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) { 525 if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) { 526 continue; 527 } 528 TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq); 529 nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 530 } 531 532 free(probe_ctx); 533 534 return 0; 535 } 536 537 struct spdk_nvme_probe_ctx * 538 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 539 const struct spdk_nvme_ctrlr_opts *opts, 540 spdk_nvme_attach_cb attach_cb) 541 { 542 struct spdk_nvme_probe_ctx *probe_ctx; 543 544 if (trid == NULL) { 545 return NULL; 546 } 547 548 probe_ctx = calloc(1, sizeof(*probe_ctx)); 549 if (probe_ctx == NULL) { 550 return NULL; 551 } 552 553 probe_ctx->trid = *trid; 554 probe_ctx->cb_ctx = (void *)opts; 555 probe_ctx->attach_cb = attach_cb; 556 557 return probe_ctx; 558 } 559 560 int 561 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 562 { 563 if (ctrlr->attached) { 564 ut_detach_ctrlr(ctrlr); 565 } 566 567 return 0; 568 } 569 570 int 571 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx) 572 { 573 SPDK_CU_ASSERT_FATAL(ctx != NULL); 574 *(struct spdk_nvme_ctrlr **)ctx = ctrlr; 575 576 return 0; 577 } 578 579 int 580 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx) 581 { 582 return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx); 583 } 584 585 void 586 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 587 { 588 memset(opts, 0, opts_size); 589 590 snprintf(opts->hostnqn, sizeof(opts->hostnqn), 591 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003"); 592 } 593 594 const struct spdk_nvme_ctrlr_data * 595 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 596 { 597 return &ctrlr->cdata; 598 } 599 600 uint32_t 601 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 602 { 603 return ctrlr->num_ns; 604 } 605 606 struct spdk_nvme_ns * 607 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 608 { 609 if (nsid < 1 || nsid > ctrlr->num_ns) { 610 return NULL; 611 } 612 613 return &ctrlr->ns[nsid - 1]; 614 } 615 616 bool 617 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 618 { 619 if (nsid < 1 || nsid > ctrlr->num_ns) { 620 return false; 621 } 622 623 return ctrlr->ns[nsid - 1].is_active; 624 } 625 626 union spdk_nvme_csts_register 627 spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 628 { 629 union spdk_nvme_csts_register csts; 630 631 csts.raw = 0; 632 633 return csts; 634 } 635 636 union spdk_nvme_vs_register 637 spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 638 { 639 union spdk_nvme_vs_register vs; 640 641 vs.raw = 0; 642 643 return vs; 644 } 645 646 struct spdk_nvme_qpair * 647 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 648 const struct spdk_nvme_io_qpair_opts *user_opts, 649 size_t opts_size) 650 { 651 struct spdk_nvme_qpair *qpair; 652 653 qpair = calloc(1, sizeof(*qpair)); 654 if (qpair == NULL) { 655 return NULL; 656 } 657 658 qpair->ctrlr = ctrlr; 659 TAILQ_INIT(&qpair->outstanding_reqs); 660 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 661 662 return qpair; 663 } 664 665 static void 666 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair) 667 { 668 struct spdk_nvme_poll_group *group = qpair->poll_group; 669 670 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 671 672 qpair->poll_group_tailq_head = &group->connected_qpairs; 673 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 674 TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq); 675 } 676 677 static void 678 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) 679 { 680 struct spdk_nvme_poll_group *group = qpair->poll_group; 681 682 CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs); 683 684 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 685 TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq); 686 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 687 } 688 689 int 690 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 691 struct spdk_nvme_qpair *qpair) 692 { 693 if (qpair->is_connected) { 694 return -EISCONN; 695 } 696 697 qpair->is_connected = true; 698 qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 699 700 if (qpair->poll_group) { 701 nvme_poll_group_connect_qpair(qpair); 702 } 703 704 return 0; 705 } 706 707 void 708 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair) 709 { 710 if (!qpair->is_connected) { 711 return; 712 } 713 714 qpair->is_connected = false; 715 716 if (qpair->poll_group != NULL) { 717 nvme_poll_group_disconnect_qpair(qpair); 718 } 719 } 720 721 int 722 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 723 { 724 SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL); 725 726 if (qpair->in_completion_context) { 727 qpair->delete_after_completion_context = true; 728 return 0; 729 } 730 731 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 732 733 if (qpair->poll_group != NULL) { 734 spdk_nvme_poll_group_remove(qpair->poll_group, qpair); 735 } 736 737 TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq); 738 739 CU_ASSERT(qpair->num_outstanding_reqs == 0); 740 741 free(qpair); 742 743 return 0; 744 } 745 746 int 747 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr) 748 { 749 if (ctrlr->fail_reset) { 750 ctrlr->is_failed = true; 751 return -EIO; 752 } 753 754 ctrlr->adminq.is_connected = true; 755 return 0; 756 } 757 758 void 759 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr) 760 { 761 } 762 763 int 764 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr) 765 { 766 ctrlr->adminq.is_connected = false; 767 ctrlr->is_failed = false; 768 769 return 0; 770 } 771 772 void 773 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr) 774 { 775 ctrlr->is_failed = true; 776 } 777 778 bool 779 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr) 780 { 781 return ctrlr->is_failed; 782 } 783 784 spdk_nvme_qp_failure_reason 785 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr) 786 { 787 return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq); 788 } 789 790 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + \ 791 sizeof(uint32_t)) 792 static void 793 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length) 794 { 795 struct spdk_nvme_ana_page ana_hdr; 796 char _ana_desc[UT_ANA_DESC_SIZE]; 797 struct spdk_nvme_ana_group_descriptor *ana_desc; 798 struct spdk_nvme_ns *ns; 799 uint32_t i; 800 801 memset(&ana_hdr, 0, sizeof(ana_hdr)); 802 ana_hdr.num_ana_group_desc = ctrlr->num_ns; 803 804 SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length); 805 memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr)); 806 807 buf += sizeof(ana_hdr); 808 length -= sizeof(ana_hdr); 809 810 ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc; 811 812 for (i = 0; i < ctrlr->num_ns; i++) { 813 ns = &ctrlr->ns[i]; 814 815 if (!ns->is_active) { 816 continue; 817 } 818 819 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 820 821 ana_desc->ana_group_id = ns->id; 822 ana_desc->num_of_nsid = 1; 823 ana_desc->ana_state = ns->ana_state; 824 ana_desc->nsid[0] = ns->id; 825 826 SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length); 827 memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE); 828 829 buf += UT_ANA_DESC_SIZE; 830 length -= UT_ANA_DESC_SIZE; 831 } 832 } 833 834 int 835 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, 836 uint8_t log_page, uint32_t nsid, 837 void *payload, uint32_t payload_size, 838 uint64_t offset, 839 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 840 { 841 if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) { 842 SPDK_CU_ASSERT_FATAL(offset == 0); 843 ut_create_ana_log_page(ctrlr, payload, payload_size); 844 } 845 846 return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE, 847 cb_fn, cb_arg); 848 } 849 850 int 851 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 852 struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, 853 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 854 { 855 return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg); 856 } 857 858 int 859 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 860 void *cmd_cb_arg, 861 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 862 { 863 struct ut_nvme_req *req = NULL, *abort_req; 864 865 if (qpair == NULL) { 866 qpair = &ctrlr->adminq; 867 } 868 869 abort_req = calloc(1, sizeof(*abort_req)); 870 if (abort_req == NULL) { 871 return -ENOMEM; 872 } 873 874 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 875 if (req->cb_arg == cmd_cb_arg) { 876 break; 877 } 878 } 879 880 if (req == NULL) { 881 free(abort_req); 882 return -ENOENT; 883 } 884 885 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 886 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 887 888 abort_req->opc = SPDK_NVME_OPC_ABORT; 889 abort_req->cb_fn = cb_fn; 890 abort_req->cb_arg = cb_arg; 891 892 abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 893 abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 894 abort_req->cpl.cdw0 = 0; 895 896 TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq); 897 ctrlr->adminq.num_outstanding_reqs++; 898 899 return 0; 900 } 901 902 int32_t 903 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 904 { 905 return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0); 906 } 907 908 uint32_t 909 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 910 { 911 return ns->id; 912 } 913 914 struct spdk_nvme_ctrlr * 915 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 916 { 917 return ns->ctrlr; 918 } 919 920 static inline struct spdk_nvme_ns_data * 921 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 922 { 923 return &ns->ctrlr->nsdata[ns->id - 1]; 924 } 925 926 const struct spdk_nvme_ns_data * 927 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 928 { 929 return _nvme_ns_get_data(ns); 930 } 931 932 uint64_t 933 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 934 { 935 return _nvme_ns_get_data(ns)->nsze; 936 } 937 938 const struct spdk_uuid * 939 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 940 { 941 return ns->uuid; 942 } 943 944 enum spdk_nvme_csi 945 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) { 946 return ns->csi; 947 } 948 949 int 950 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 951 void *metadata, uint64_t lba, uint32_t lba_count, 952 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 953 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 954 { 955 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 956 } 957 958 int 959 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 960 void *buffer, void *metadata, uint64_t lba, 961 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 962 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 963 { 964 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 965 } 966 967 int 968 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 969 uint64_t lba, uint32_t lba_count, 970 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 971 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 972 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 973 uint16_t apptag_mask, uint16_t apptag) 974 { 975 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 976 } 977 978 int 979 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 980 uint64_t lba, uint32_t lba_count, 981 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 982 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 983 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 984 uint16_t apptag_mask, uint16_t apptag) 985 { 986 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 987 } 988 989 static bool g_ut_readv_ext_called; 990 int 991 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 992 uint64_t lba, uint32_t lba_count, 993 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 994 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 995 spdk_nvme_req_next_sge_cb next_sge_fn, 996 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 997 { 998 g_ut_readv_ext_called = true; 999 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1000 } 1001 1002 static bool g_ut_writev_ext_called; 1003 int 1004 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1005 uint64_t lba, uint32_t lba_count, 1006 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1007 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1008 spdk_nvme_req_next_sge_cb next_sge_fn, 1009 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1010 { 1011 g_ut_writev_ext_called = true; 1012 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1013 } 1014 1015 int 1016 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1017 uint64_t lba, uint32_t lba_count, 1018 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1019 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1020 spdk_nvme_req_next_sge_cb next_sge_fn, 1021 void *metadata, uint16_t apptag_mask, uint16_t apptag) 1022 { 1023 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg); 1024 } 1025 1026 int 1027 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1028 uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1029 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1030 { 1031 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg); 1032 } 1033 1034 int 1035 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1036 uint64_t lba, uint32_t lba_count, 1037 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1038 uint32_t io_flags) 1039 { 1040 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg); 1041 } 1042 1043 struct spdk_nvme_poll_group * 1044 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table) 1045 { 1046 struct spdk_nvme_poll_group *group; 1047 1048 group = calloc(1, sizeof(*group)); 1049 if (group == NULL) { 1050 return NULL; 1051 } 1052 1053 group->ctx = ctx; 1054 if (table != NULL) { 1055 group->accel_fn_table = *table; 1056 } 1057 TAILQ_INIT(&group->connected_qpairs); 1058 TAILQ_INIT(&group->disconnected_qpairs); 1059 1060 return group; 1061 } 1062 1063 int 1064 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group) 1065 { 1066 if (!TAILQ_EMPTY(&group->connected_qpairs) || 1067 !TAILQ_EMPTY(&group->disconnected_qpairs)) { 1068 return -EBUSY; 1069 } 1070 1071 free(group); 1072 1073 return 0; 1074 } 1075 1076 spdk_nvme_qp_failure_reason 1077 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair) 1078 { 1079 return qpair->failure_reason; 1080 } 1081 1082 int32_t 1083 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, 1084 uint32_t max_completions) 1085 { 1086 struct ut_nvme_req *req, *tmp; 1087 uint32_t num_completions = 0; 1088 1089 if (!qpair->is_connected) { 1090 return -ENXIO; 1091 } 1092 1093 qpair->in_completion_context = true; 1094 1095 TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) { 1096 TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq); 1097 qpair->num_outstanding_reqs--; 1098 1099 req->cb_fn(req->cb_arg, &req->cpl); 1100 1101 free(req); 1102 num_completions++; 1103 } 1104 1105 qpair->in_completion_context = false; 1106 if (qpair->delete_after_completion_context) { 1107 spdk_nvme_ctrlr_free_io_qpair(qpair); 1108 } 1109 1110 return num_completions; 1111 } 1112 1113 int64_t 1114 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group, 1115 uint32_t completions_per_qpair, 1116 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 1117 { 1118 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1119 int64_t local_completions = 0, error_reason = 0, num_completions = 0; 1120 1121 SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0); 1122 1123 if (disconnected_qpair_cb == NULL) { 1124 return -EINVAL; 1125 } 1126 1127 TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) { 1128 disconnected_qpair_cb(qpair, group->ctx); 1129 } 1130 1131 TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) { 1132 if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) { 1133 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 1134 /* Bump the number of completions so this counts as "busy" */ 1135 num_completions++; 1136 continue; 1137 } 1138 1139 local_completions = spdk_nvme_qpair_process_completions(qpair, 1140 completions_per_qpair); 1141 if (local_completions < 0 && error_reason == 0) { 1142 error_reason = local_completions; 1143 } else { 1144 num_completions += local_completions; 1145 assert(num_completions >= 0); 1146 } 1147 } 1148 1149 return error_reason ? error_reason : num_completions; 1150 } 1151 1152 int 1153 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, 1154 struct spdk_nvme_qpair *qpair) 1155 { 1156 CU_ASSERT(!qpair->is_connected); 1157 1158 qpair->poll_group = group; 1159 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 1160 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 1161 1162 return 0; 1163 } 1164 1165 int 1166 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, 1167 struct spdk_nvme_qpair *qpair) 1168 { 1169 CU_ASSERT(!qpair->is_connected); 1170 1171 if (qpair->poll_group == NULL) { 1172 return -ENOENT; 1173 } 1174 1175 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 1176 1177 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 1178 1179 qpair->poll_group = NULL; 1180 qpair->poll_group_tailq_head = NULL; 1181 1182 return 0; 1183 } 1184 1185 int 1186 spdk_bdev_register(struct spdk_bdev *bdev) 1187 { 1188 g_ut_registered_bdev = bdev; 1189 1190 return g_ut_register_bdev_status; 1191 } 1192 1193 void 1194 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 1195 { 1196 int rc; 1197 1198 rc = bdev->fn_table->destruct(bdev->ctxt); 1199 1200 if (bdev == g_ut_registered_bdev) { 1201 g_ut_registered_bdev = NULL; 1202 } 1203 1204 if (rc <= 0 && cb_fn != NULL) { 1205 cb_fn(cb_arg, rc); 1206 } 1207 } 1208 1209 int 1210 spdk_bdev_open_ext(const char *bdev_name, bool write, 1211 spdk_bdev_event_cb_t event_cb, void *event_ctx, 1212 struct spdk_bdev_desc **desc) 1213 { 1214 if (g_ut_registered_bdev == NULL || 1215 strcmp(g_ut_registered_bdev->name, bdev_name) != 0) { 1216 return -ENODEV; 1217 } 1218 1219 *desc = (struct spdk_bdev_desc *)g_ut_registered_bdev; 1220 1221 return 0; 1222 } 1223 1224 struct spdk_bdev * 1225 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) 1226 { 1227 return (struct spdk_bdev *)desc; 1228 } 1229 1230 int 1231 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) 1232 { 1233 bdev->blockcnt = size; 1234 1235 return 0; 1236 } 1237 1238 struct spdk_io_channel * 1239 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io) 1240 { 1241 return (struct spdk_io_channel *)bdev_io->internal.ch; 1242 } 1243 1244 void 1245 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 1246 { 1247 bdev_io->internal.status = status; 1248 bdev_io->internal.in_submit_request = false; 1249 } 1250 1251 void 1252 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc) 1253 { 1254 if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) { 1255 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; 1256 } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) { 1257 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED; 1258 } else { 1259 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR; 1260 } 1261 1262 bdev_io->internal.error.nvme.cdw0 = cdw0; 1263 bdev_io->internal.error.nvme.sct = sct; 1264 bdev_io->internal.error.nvme.sc = sc; 1265 1266 spdk_bdev_io_complete(bdev_io, bdev_io->internal.status); 1267 } 1268 1269 void 1270 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 1271 { 1272 struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); 1273 1274 ut_bdev_io_set_buf(bdev_io); 1275 1276 cb(ch, bdev_io, true); 1277 } 1278 1279 static void 1280 test_create_ctrlr(void) 1281 { 1282 struct spdk_nvme_transport_id trid = {}; 1283 struct spdk_nvme_ctrlr ctrlr = {}; 1284 int rc; 1285 1286 ut_init_trid(&trid); 1287 1288 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1289 CU_ASSERT(rc == 0); 1290 1291 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1292 1293 rc = bdev_nvme_delete("nvme0", &g_any_path); 1294 CU_ASSERT(rc == 0); 1295 1296 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1297 1298 poll_threads(); 1299 spdk_delay_us(1000); 1300 poll_threads(); 1301 1302 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1303 } 1304 1305 static void 1306 test_reset_ctrlr(void) 1307 { 1308 struct spdk_nvme_transport_id trid = {}; 1309 struct spdk_nvme_ctrlr ctrlr = {}; 1310 struct nvme_ctrlr *nvme_ctrlr = NULL; 1311 struct nvme_path_id *curr_trid; 1312 struct spdk_io_channel *ch1, *ch2; 1313 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1314 int rc; 1315 1316 ut_init_trid(&trid); 1317 TAILQ_INIT(&ctrlr.active_io_qpairs); 1318 1319 set_thread(0); 1320 1321 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1322 CU_ASSERT(rc == 0); 1323 1324 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1325 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1326 1327 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1328 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1329 1330 ch1 = spdk_get_io_channel(nvme_ctrlr); 1331 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1332 1333 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 1334 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1335 1336 set_thread(1); 1337 1338 ch2 = spdk_get_io_channel(nvme_ctrlr); 1339 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1340 1341 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 1342 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1343 1344 /* Reset starts from thread 1. */ 1345 set_thread(1); 1346 1347 /* Case 1: ctrlr is already being destructed. */ 1348 nvme_ctrlr->destruct = true; 1349 1350 rc = bdev_nvme_reset(nvme_ctrlr); 1351 CU_ASSERT(rc == -ENXIO); 1352 1353 /* Case 2: reset is in progress. */ 1354 nvme_ctrlr->destruct = false; 1355 nvme_ctrlr->resetting = true; 1356 1357 rc = bdev_nvme_reset(nvme_ctrlr); 1358 CU_ASSERT(rc == -EBUSY); 1359 1360 /* Case 3: reset completes successfully. */ 1361 nvme_ctrlr->resetting = false; 1362 curr_trid->is_failed = true; 1363 ctrlr.is_failed = true; 1364 1365 rc = bdev_nvme_reset(nvme_ctrlr); 1366 CU_ASSERT(rc == 0); 1367 CU_ASSERT(nvme_ctrlr->resetting == true); 1368 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1369 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1370 1371 poll_thread_times(0, 3); 1372 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1373 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1374 1375 poll_thread_times(0, 1); 1376 poll_thread_times(1, 1); 1377 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1378 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1379 CU_ASSERT(ctrlr.is_failed == true); 1380 1381 poll_thread_times(1, 1); 1382 poll_thread_times(0, 1); 1383 CU_ASSERT(ctrlr.is_failed == false); 1384 CU_ASSERT(ctrlr.adminq.is_connected == false); 1385 1386 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1387 poll_thread_times(0, 2); 1388 CU_ASSERT(ctrlr.adminq.is_connected == true); 1389 1390 poll_thread_times(0, 1); 1391 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1392 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1393 1394 poll_thread_times(1, 1); 1395 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1396 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1397 CU_ASSERT(nvme_ctrlr->resetting == true); 1398 CU_ASSERT(curr_trid->is_failed == true); 1399 1400 poll_thread_times(0, 2); 1401 CU_ASSERT(nvme_ctrlr->resetting == true); 1402 poll_thread_times(1, 1); 1403 CU_ASSERT(nvme_ctrlr->resetting == true); 1404 poll_thread_times(0, 1); 1405 CU_ASSERT(nvme_ctrlr->resetting == false); 1406 CU_ASSERT(curr_trid->is_failed == false); 1407 1408 spdk_put_io_channel(ch2); 1409 1410 set_thread(0); 1411 1412 spdk_put_io_channel(ch1); 1413 1414 poll_threads(); 1415 1416 rc = bdev_nvme_delete("nvme0", &g_any_path); 1417 CU_ASSERT(rc == 0); 1418 1419 poll_threads(); 1420 spdk_delay_us(1000); 1421 poll_threads(); 1422 1423 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1424 } 1425 1426 static void 1427 test_race_between_reset_and_destruct_ctrlr(void) 1428 { 1429 struct spdk_nvme_transport_id trid = {}; 1430 struct spdk_nvme_ctrlr ctrlr = {}; 1431 struct nvme_ctrlr *nvme_ctrlr; 1432 struct spdk_io_channel *ch1, *ch2; 1433 int rc; 1434 1435 ut_init_trid(&trid); 1436 TAILQ_INIT(&ctrlr.active_io_qpairs); 1437 1438 set_thread(0); 1439 1440 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1441 CU_ASSERT(rc == 0); 1442 1443 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1444 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1445 1446 ch1 = spdk_get_io_channel(nvme_ctrlr); 1447 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1448 1449 set_thread(1); 1450 1451 ch2 = spdk_get_io_channel(nvme_ctrlr); 1452 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1453 1454 /* Reset starts from thread 1. */ 1455 set_thread(1); 1456 1457 rc = bdev_nvme_reset(nvme_ctrlr); 1458 CU_ASSERT(rc == 0); 1459 CU_ASSERT(nvme_ctrlr->resetting == true); 1460 1461 /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ 1462 set_thread(0); 1463 1464 rc = bdev_nvme_delete("nvme0", &g_any_path); 1465 CU_ASSERT(rc == 0); 1466 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1467 CU_ASSERT(nvme_ctrlr->destruct == true); 1468 CU_ASSERT(nvme_ctrlr->resetting == true); 1469 1470 poll_threads(); 1471 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1472 poll_threads(); 1473 1474 /* Reset completed but ctrlr is not still destructed yet. */ 1475 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1476 CU_ASSERT(nvme_ctrlr->destruct == true); 1477 CU_ASSERT(nvme_ctrlr->resetting == false); 1478 1479 /* New reset request is rejected. */ 1480 rc = bdev_nvme_reset(nvme_ctrlr); 1481 CU_ASSERT(rc == -ENXIO); 1482 1483 /* Additional polling called spdk_io_device_unregister() to ctrlr, 1484 * However there are two channels and destruct is not completed yet. 1485 */ 1486 poll_threads(); 1487 1488 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1489 1490 set_thread(0); 1491 1492 spdk_put_io_channel(ch1); 1493 1494 set_thread(1); 1495 1496 spdk_put_io_channel(ch2); 1497 1498 poll_threads(); 1499 spdk_delay_us(1000); 1500 poll_threads(); 1501 1502 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1503 } 1504 1505 static void 1506 test_failover_ctrlr(void) 1507 { 1508 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 1509 struct spdk_nvme_ctrlr ctrlr = {}; 1510 struct nvme_ctrlr *nvme_ctrlr = NULL; 1511 struct nvme_path_id *curr_trid, *next_trid; 1512 struct spdk_io_channel *ch1, *ch2; 1513 int rc; 1514 1515 ut_init_trid(&trid1); 1516 ut_init_trid2(&trid2); 1517 TAILQ_INIT(&ctrlr.active_io_qpairs); 1518 1519 set_thread(0); 1520 1521 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1522 CU_ASSERT(rc == 0); 1523 1524 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1525 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1526 1527 ch1 = spdk_get_io_channel(nvme_ctrlr); 1528 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1529 1530 set_thread(1); 1531 1532 ch2 = spdk_get_io_channel(nvme_ctrlr); 1533 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1534 1535 /* First, test one trid case. */ 1536 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1537 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1538 1539 /* Failover starts from thread 1. */ 1540 set_thread(1); 1541 1542 /* Case 1: ctrlr is already being destructed. */ 1543 nvme_ctrlr->destruct = true; 1544 1545 rc = bdev_nvme_failover(nvme_ctrlr, false); 1546 CU_ASSERT(rc == -ENXIO); 1547 CU_ASSERT(curr_trid->is_failed == false); 1548 1549 /* Case 2: reset is in progress. */ 1550 nvme_ctrlr->destruct = false; 1551 nvme_ctrlr->resetting = true; 1552 1553 rc = bdev_nvme_failover(nvme_ctrlr, false); 1554 CU_ASSERT(rc == -EBUSY); 1555 1556 /* Case 3: reset completes successfully. */ 1557 nvme_ctrlr->resetting = false; 1558 1559 rc = bdev_nvme_failover(nvme_ctrlr, false); 1560 CU_ASSERT(rc == 0); 1561 1562 CU_ASSERT(nvme_ctrlr->resetting == true); 1563 CU_ASSERT(curr_trid->is_failed == true); 1564 1565 poll_threads(); 1566 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1567 poll_threads(); 1568 1569 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1570 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1571 1572 CU_ASSERT(nvme_ctrlr->resetting == false); 1573 CU_ASSERT(curr_trid->is_failed == false); 1574 1575 set_thread(0); 1576 1577 /* Second, test two trids case. */ 1578 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1579 CU_ASSERT(rc == 0); 1580 1581 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1582 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1583 CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id); 1584 CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); 1585 1586 /* Failover starts from thread 1. */ 1587 set_thread(1); 1588 1589 /* Case 4: reset is in progress. */ 1590 nvme_ctrlr->resetting = true; 1591 1592 rc = bdev_nvme_failover(nvme_ctrlr, false); 1593 CU_ASSERT(rc == -EBUSY); 1594 1595 /* Case 5: failover completes successfully. */ 1596 nvme_ctrlr->resetting = false; 1597 1598 rc = bdev_nvme_failover(nvme_ctrlr, false); 1599 CU_ASSERT(rc == 0); 1600 1601 CU_ASSERT(nvme_ctrlr->resetting == true); 1602 1603 next_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1604 SPDK_CU_ASSERT_FATAL(next_trid != NULL); 1605 CU_ASSERT(next_trid != curr_trid); 1606 CU_ASSERT(next_trid == nvme_ctrlr->active_path_id); 1607 CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); 1608 1609 poll_threads(); 1610 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1611 poll_threads(); 1612 1613 CU_ASSERT(nvme_ctrlr->resetting == false); 1614 1615 spdk_put_io_channel(ch2); 1616 1617 set_thread(0); 1618 1619 spdk_put_io_channel(ch1); 1620 1621 poll_threads(); 1622 1623 rc = bdev_nvme_delete("nvme0", &g_any_path); 1624 CU_ASSERT(rc == 0); 1625 1626 poll_threads(); 1627 spdk_delay_us(1000); 1628 poll_threads(); 1629 1630 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1631 } 1632 1633 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following. 1634 * 1635 * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was 1636 * disconnected and reset ctrlr failed repeatedly before starting failover from trid1 1637 * to trid2. While processing the failed reset, trid3 was added. trid1 should 1638 * have been active, i.e., the head of the list until the failover completed. 1639 * However trid3 was inserted to the head of the list by mistake. 1640 * 1641 * I/O qpairs have smaller polling period than admin qpair. When a connection is 1642 * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error 1643 * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr 1644 * may be executed repeatedly before failover is executed. Hence this bug is real. 1645 * 1646 * The following test verifies the fix. 1647 */ 1648 static void 1649 test_race_between_failover_and_add_secondary_trid(void) 1650 { 1651 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 1652 struct spdk_nvme_ctrlr ctrlr = {}; 1653 struct nvme_ctrlr *nvme_ctrlr = NULL; 1654 struct nvme_path_id *path_id1, *path_id2, *path_id3; 1655 struct spdk_io_channel *ch1, *ch2; 1656 int rc; 1657 1658 ut_init_trid(&trid1); 1659 ut_init_trid2(&trid2); 1660 ut_init_trid3(&trid3); 1661 TAILQ_INIT(&ctrlr.active_io_qpairs); 1662 1663 set_thread(0); 1664 1665 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1666 CU_ASSERT(rc == 0); 1667 1668 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1669 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1670 1671 ch1 = spdk_get_io_channel(nvme_ctrlr); 1672 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1673 1674 set_thread(1); 1675 1676 ch2 = spdk_get_io_channel(nvme_ctrlr); 1677 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1678 1679 set_thread(0); 1680 1681 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1682 CU_ASSERT(rc == 0); 1683 1684 path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids); 1685 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 1686 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1687 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1688 path_id2 = TAILQ_NEXT(path_id1, link); 1689 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 1690 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1691 1692 ctrlr.fail_reset = true; 1693 1694 rc = bdev_nvme_reset(nvme_ctrlr); 1695 CU_ASSERT(rc == 0); 1696 1697 poll_threads(); 1698 1699 CU_ASSERT(path_id1->is_failed == true); 1700 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1701 1702 rc = bdev_nvme_reset(nvme_ctrlr); 1703 CU_ASSERT(rc == 0); 1704 1705 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 1706 CU_ASSERT(rc == 0); 1707 1708 CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids)); 1709 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1710 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1711 CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link)); 1712 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1713 path_id3 = TAILQ_NEXT(path_id2, link); 1714 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 1715 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0); 1716 1717 poll_threads(); 1718 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1719 poll_threads(); 1720 1721 spdk_put_io_channel(ch1); 1722 1723 set_thread(1); 1724 1725 spdk_put_io_channel(ch2); 1726 1727 poll_threads(); 1728 1729 set_thread(0); 1730 1731 rc = bdev_nvme_delete("nvme0", &g_any_path); 1732 CU_ASSERT(rc == 0); 1733 1734 poll_threads(); 1735 spdk_delay_us(1000); 1736 poll_threads(); 1737 1738 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1739 } 1740 1741 static void 1742 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc) 1743 { 1744 CU_ASSERT(rc == g_ut_attach_ctrlr_status); 1745 CU_ASSERT(bdev_count == g_ut_attach_bdev_count); 1746 } 1747 1748 static void 1749 test_pending_reset(void) 1750 { 1751 struct spdk_nvme_transport_id trid = {}; 1752 struct spdk_nvme_ctrlr *ctrlr; 1753 struct nvme_ctrlr *nvme_ctrlr = NULL; 1754 const int STRING_SIZE = 32; 1755 const char *attached_names[STRING_SIZE]; 1756 struct nvme_bdev *bdev; 1757 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 1758 struct spdk_io_channel *ch1, *ch2; 1759 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 1760 struct nvme_io_path *io_path1, *io_path2; 1761 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1762 int rc; 1763 1764 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1765 ut_init_trid(&trid); 1766 1767 set_thread(0); 1768 1769 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1770 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1771 1772 g_ut_attach_ctrlr_status = 0; 1773 g_ut_attach_bdev_count = 1; 1774 1775 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1776 attach_ctrlr_done, NULL, NULL, NULL, false); 1777 CU_ASSERT(rc == 0); 1778 1779 spdk_delay_us(1000); 1780 poll_threads(); 1781 1782 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1783 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1784 1785 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1786 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1787 1788 ch1 = spdk_get_io_channel(bdev); 1789 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1790 1791 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 1792 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 1793 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 1794 ctrlr_ch1 = io_path1->qpair->ctrlr_ch; 1795 SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL); 1796 1797 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 1798 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1799 1800 set_thread(1); 1801 1802 ch2 = spdk_get_io_channel(bdev); 1803 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1804 1805 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 1806 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 1807 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 1808 ctrlr_ch2 = io_path2->qpair->ctrlr_ch; 1809 SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL); 1810 1811 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 1812 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1813 1814 /* The first reset request is submitted on thread 1, and the second reset request 1815 * is submitted on thread 0 while processing the first request. 1816 */ 1817 bdev_nvme_submit_request(ch2, first_bdev_io); 1818 CU_ASSERT(nvme_ctrlr->resetting == true); 1819 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1820 1821 set_thread(0); 1822 1823 bdev_nvme_submit_request(ch1, second_bdev_io); 1824 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1825 1826 poll_threads(); 1827 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1828 poll_threads(); 1829 1830 CU_ASSERT(nvme_ctrlr->resetting == false); 1831 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1832 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1833 1834 /* The first reset request is submitted on thread 1, and the second reset request 1835 * is submitted on thread 0 while processing the first request. 1836 * 1837 * The difference from the above scenario is that the controller is removed while 1838 * processing the first request. Hence both reset requests should fail. 1839 */ 1840 set_thread(1); 1841 1842 bdev_nvme_submit_request(ch2, first_bdev_io); 1843 CU_ASSERT(nvme_ctrlr->resetting == true); 1844 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1845 1846 set_thread(0); 1847 1848 bdev_nvme_submit_request(ch1, second_bdev_io); 1849 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1850 1851 ctrlr->fail_reset = true; 1852 1853 poll_threads(); 1854 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1855 poll_threads(); 1856 1857 CU_ASSERT(nvme_ctrlr->resetting == false); 1858 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1859 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1860 1861 spdk_put_io_channel(ch1); 1862 1863 set_thread(1); 1864 1865 spdk_put_io_channel(ch2); 1866 1867 poll_threads(); 1868 1869 set_thread(0); 1870 1871 rc = bdev_nvme_delete("nvme0", &g_any_path); 1872 CU_ASSERT(rc == 0); 1873 1874 poll_threads(); 1875 spdk_delay_us(1000); 1876 poll_threads(); 1877 1878 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1879 1880 free(first_bdev_io); 1881 free(second_bdev_io); 1882 } 1883 1884 static void 1885 test_attach_ctrlr(void) 1886 { 1887 struct spdk_nvme_transport_id trid = {}; 1888 struct spdk_nvme_ctrlr *ctrlr; 1889 struct nvme_ctrlr *nvme_ctrlr; 1890 const int STRING_SIZE = 32; 1891 const char *attached_names[STRING_SIZE]; 1892 struct nvme_bdev *nbdev; 1893 int rc; 1894 1895 set_thread(0); 1896 1897 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1898 ut_init_trid(&trid); 1899 1900 /* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed 1901 * by probe polling. 1902 */ 1903 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 1904 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1905 1906 ctrlr->is_failed = true; 1907 g_ut_attach_ctrlr_status = -EIO; 1908 g_ut_attach_bdev_count = 0; 1909 1910 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1911 attach_ctrlr_done, NULL, NULL, NULL, false); 1912 CU_ASSERT(rc == 0); 1913 1914 spdk_delay_us(1000); 1915 poll_threads(); 1916 1917 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1918 1919 /* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */ 1920 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 1921 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1922 1923 g_ut_attach_ctrlr_status = 0; 1924 1925 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1926 attach_ctrlr_done, NULL, NULL, NULL, false); 1927 CU_ASSERT(rc == 0); 1928 1929 spdk_delay_us(1000); 1930 poll_threads(); 1931 1932 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1933 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1934 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 1935 1936 rc = bdev_nvme_delete("nvme0", &g_any_path); 1937 CU_ASSERT(rc == 0); 1938 1939 poll_threads(); 1940 spdk_delay_us(1000); 1941 poll_threads(); 1942 1943 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1944 1945 /* If ctrlr has one namespace, one nvme_ctrlr with one namespace and 1946 * one nvme_bdev is created. 1947 */ 1948 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1949 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1950 1951 g_ut_attach_bdev_count = 1; 1952 1953 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1954 attach_ctrlr_done, NULL, NULL, NULL, false); 1955 CU_ASSERT(rc == 0); 1956 1957 spdk_delay_us(1000); 1958 poll_threads(); 1959 1960 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1961 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1962 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 1963 1964 CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); 1965 attached_names[0] = NULL; 1966 1967 nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1968 SPDK_CU_ASSERT_FATAL(nbdev != NULL); 1969 CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); 1970 1971 rc = bdev_nvme_delete("nvme0", &g_any_path); 1972 CU_ASSERT(rc == 0); 1973 1974 poll_threads(); 1975 spdk_delay_us(1000); 1976 poll_threads(); 1977 1978 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1979 1980 /* Ctrlr has one namespace but one nvme_ctrlr with no namespace is 1981 * created because creating one nvme_bdev failed. 1982 */ 1983 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1984 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1985 1986 g_ut_register_bdev_status = -EINVAL; 1987 g_ut_attach_bdev_count = 0; 1988 1989 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1990 attach_ctrlr_done, NULL, NULL, NULL, false); 1991 CU_ASSERT(rc == 0); 1992 1993 spdk_delay_us(1000); 1994 poll_threads(); 1995 1996 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1997 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1998 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 1999 2000 CU_ASSERT(attached_names[0] == NULL); 2001 2002 rc = bdev_nvme_delete("nvme0", &g_any_path); 2003 CU_ASSERT(rc == 0); 2004 2005 poll_threads(); 2006 spdk_delay_us(1000); 2007 poll_threads(); 2008 2009 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2010 2011 g_ut_register_bdev_status = 0; 2012 } 2013 2014 static void 2015 test_aer_cb(void) 2016 { 2017 struct spdk_nvme_transport_id trid = {}; 2018 struct spdk_nvme_ctrlr *ctrlr; 2019 struct nvme_ctrlr *nvme_ctrlr; 2020 struct nvme_bdev *bdev; 2021 const int STRING_SIZE = 32; 2022 const char *attached_names[STRING_SIZE]; 2023 union spdk_nvme_async_event_completion event = {}; 2024 struct spdk_nvme_cpl cpl = {}; 2025 int rc; 2026 2027 set_thread(0); 2028 2029 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2030 ut_init_trid(&trid); 2031 2032 /* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th 2033 * namespaces are populated. 2034 */ 2035 ctrlr = ut_attach_ctrlr(&trid, 4, true, false); 2036 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2037 2038 ctrlr->ns[0].is_active = false; 2039 2040 g_ut_attach_ctrlr_status = 0; 2041 g_ut_attach_bdev_count = 3; 2042 2043 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2044 attach_ctrlr_done, NULL, NULL, NULL, false); 2045 CU_ASSERT(rc == 0); 2046 2047 spdk_delay_us(1000); 2048 poll_threads(); 2049 2050 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2051 poll_threads(); 2052 2053 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2054 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2055 2056 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL); 2057 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2058 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2059 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2060 2061 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev; 2062 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2063 CU_ASSERT(bdev->disk.blockcnt == 1024); 2064 2065 /* Dynamically populate 1st namespace and depopulate 3rd namespace, and 2066 * change the size of the 4th namespace. 2067 */ 2068 ctrlr->ns[0].is_active = true; 2069 ctrlr->ns[2].is_active = false; 2070 ctrlr->nsdata[3].nsze = 2048; 2071 2072 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2073 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 2074 cpl.cdw0 = event.raw; 2075 2076 aer_cb(nvme_ctrlr, &cpl); 2077 2078 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2079 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2080 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL); 2081 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2082 CU_ASSERT(bdev->disk.blockcnt == 2048); 2083 2084 /* Change ANA state of active namespaces. */ 2085 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2086 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2087 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2088 2089 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2090 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 2091 cpl.cdw0 = event.raw; 2092 2093 aer_cb(nvme_ctrlr, &cpl); 2094 2095 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2096 poll_threads(); 2097 2098 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2099 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2100 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2101 2102 rc = bdev_nvme_delete("nvme0", &g_any_path); 2103 CU_ASSERT(rc == 0); 2104 2105 poll_threads(); 2106 spdk_delay_us(1000); 2107 poll_threads(); 2108 2109 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2110 } 2111 2112 static void 2113 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2114 enum spdk_bdev_io_type io_type) 2115 { 2116 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2117 struct nvme_io_path *io_path; 2118 struct spdk_nvme_qpair *qpair; 2119 2120 io_path = bdev_nvme_find_io_path(nbdev_ch); 2121 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2122 qpair = io_path->qpair->qpair; 2123 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2124 2125 bdev_io->type = io_type; 2126 bdev_io->internal.in_submit_request = true; 2127 2128 bdev_nvme_submit_request(ch, bdev_io); 2129 2130 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2131 CU_ASSERT(qpair->num_outstanding_reqs == 1); 2132 2133 poll_threads(); 2134 2135 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2136 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2137 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2138 } 2139 2140 static void 2141 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2142 enum spdk_bdev_io_type io_type) 2143 { 2144 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2145 struct nvme_io_path *io_path; 2146 struct spdk_nvme_qpair *qpair; 2147 2148 io_path = bdev_nvme_find_io_path(nbdev_ch); 2149 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2150 qpair = io_path->qpair->qpair; 2151 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2152 2153 bdev_io->type = io_type; 2154 bdev_io->internal.in_submit_request = true; 2155 2156 bdev_nvme_submit_request(ch, bdev_io); 2157 2158 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2159 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2160 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2161 } 2162 2163 static void 2164 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 2165 { 2166 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2167 struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 2168 struct ut_nvme_req *req; 2169 struct nvme_io_path *io_path; 2170 struct spdk_nvme_qpair *qpair; 2171 2172 io_path = bdev_nvme_find_io_path(nbdev_ch); 2173 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2174 qpair = io_path->qpair->qpair; 2175 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2176 2177 /* Only compare and write now. */ 2178 bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; 2179 bdev_io->internal.in_submit_request = true; 2180 2181 bdev_nvme_submit_request(ch, bdev_io); 2182 2183 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2184 CU_ASSERT(qpair->num_outstanding_reqs == 2); 2185 CU_ASSERT(bio->first_fused_submitted == true); 2186 2187 /* First outstanding request is compare operation. */ 2188 req = TAILQ_FIRST(&qpair->outstanding_reqs); 2189 SPDK_CU_ASSERT_FATAL(req != NULL); 2190 CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); 2191 req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; 2192 2193 poll_threads(); 2194 2195 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2196 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2197 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2198 } 2199 2200 static void 2201 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2202 struct spdk_nvme_ctrlr *ctrlr) 2203 { 2204 bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 2205 bdev_io->internal.in_submit_request = true; 2206 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2207 2208 bdev_nvme_submit_request(ch, bdev_io); 2209 2210 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2211 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2212 2213 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2214 poll_thread_times(1, 1); 2215 2216 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2217 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2218 2219 poll_thread_times(0, 1); 2220 2221 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2222 } 2223 2224 static void 2225 test_submit_nvme_cmd(void) 2226 { 2227 struct spdk_nvme_transport_id trid = {}; 2228 struct spdk_nvme_ctrlr *ctrlr; 2229 struct nvme_ctrlr *nvme_ctrlr; 2230 const int STRING_SIZE = 32; 2231 const char *attached_names[STRING_SIZE]; 2232 struct nvme_bdev *bdev; 2233 struct spdk_bdev_io *bdev_io; 2234 struct spdk_io_channel *ch; 2235 struct spdk_bdev_ext_io_opts ext_io_opts = {}; 2236 int rc; 2237 2238 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2239 ut_init_trid(&trid); 2240 2241 set_thread(1); 2242 2243 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2244 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2245 2246 g_ut_attach_ctrlr_status = 0; 2247 g_ut_attach_bdev_count = 1; 2248 2249 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2250 attach_ctrlr_done, NULL, NULL, NULL, false); 2251 CU_ASSERT(rc == 0); 2252 2253 spdk_delay_us(1000); 2254 poll_threads(); 2255 2256 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2257 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2258 2259 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2260 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2261 2262 set_thread(0); 2263 2264 ch = spdk_get_io_channel(bdev); 2265 SPDK_CU_ASSERT_FATAL(ch != NULL); 2266 2267 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch); 2268 2269 bdev_io->u.bdev.iovs = NULL; 2270 2271 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2272 2273 ut_bdev_io_set_buf(bdev_io); 2274 2275 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2276 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2277 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE); 2278 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP); 2279 2280 ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH); 2281 2282 ut_test_submit_fused_nvme_cmd(ch, bdev_io); 2283 2284 /* Verify that ext NVME API is called if bdev_io ext_opts is set */ 2285 bdev_io->u.bdev.ext_opts = &ext_io_opts; 2286 g_ut_readv_ext_called = false; 2287 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2288 CU_ASSERT(g_ut_readv_ext_called == true); 2289 g_ut_readv_ext_called = false; 2290 2291 g_ut_writev_ext_called = false; 2292 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2293 CU_ASSERT(g_ut_writev_ext_called == true); 2294 g_ut_writev_ext_called = false; 2295 bdev_io->u.bdev.ext_opts = NULL; 2296 2297 ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); 2298 2299 free(bdev_io); 2300 2301 spdk_put_io_channel(ch); 2302 2303 poll_threads(); 2304 2305 set_thread(1); 2306 2307 rc = bdev_nvme_delete("nvme0", &g_any_path); 2308 CU_ASSERT(rc == 0); 2309 2310 poll_threads(); 2311 spdk_delay_us(1000); 2312 poll_threads(); 2313 2314 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2315 } 2316 2317 static void 2318 test_add_remove_trid(void) 2319 { 2320 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 2321 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 2322 struct nvme_ctrlr *nvme_ctrlr = NULL; 2323 const int STRING_SIZE = 32; 2324 const char *attached_names[STRING_SIZE]; 2325 struct nvme_path_id *ctrid; 2326 int rc; 2327 2328 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2329 ut_init_trid(&path1.trid); 2330 ut_init_trid2(&path2.trid); 2331 ut_init_trid3(&path3.trid); 2332 2333 set_thread(0); 2334 2335 g_ut_attach_ctrlr_status = 0; 2336 g_ut_attach_bdev_count = 0; 2337 2338 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2339 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2340 2341 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2342 attach_ctrlr_done, NULL, NULL, NULL, false); 2343 CU_ASSERT(rc == 0); 2344 2345 spdk_delay_us(1000); 2346 poll_threads(); 2347 2348 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2349 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2350 2351 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2352 2353 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2354 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2355 2356 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2357 attach_ctrlr_done, NULL, NULL, NULL, false); 2358 CU_ASSERT(rc == 0); 2359 2360 spdk_delay_us(1000); 2361 poll_threads(); 2362 2363 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2364 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2365 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2366 break; 2367 } 2368 } 2369 CU_ASSERT(ctrid != NULL); 2370 2371 /* trid3 is not in the registered list. */ 2372 rc = bdev_nvme_delete("nvme0", &path3); 2373 CU_ASSERT(rc == -ENXIO); 2374 2375 /* trid2 is not used, and simply removed. */ 2376 rc = bdev_nvme_delete("nvme0", &path2); 2377 CU_ASSERT(rc == 0); 2378 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2379 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2380 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2381 } 2382 2383 ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false); 2384 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 2385 2386 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 2387 attach_ctrlr_done, NULL, NULL, NULL, false); 2388 CU_ASSERT(rc == 0); 2389 2390 spdk_delay_us(1000); 2391 poll_threads(); 2392 2393 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2394 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2395 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) { 2396 break; 2397 } 2398 } 2399 CU_ASSERT(ctrid != NULL); 2400 2401 /* path1 is currently used and path3 is an alternative path. 2402 * If we remove path1, path is changed to path3. 2403 */ 2404 rc = bdev_nvme_delete("nvme0", &path1); 2405 CU_ASSERT(rc == 0); 2406 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2407 CU_ASSERT(nvme_ctrlr->resetting == true); 2408 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2409 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0); 2410 } 2411 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0); 2412 2413 poll_threads(); 2414 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2415 poll_threads(); 2416 2417 CU_ASSERT(nvme_ctrlr->resetting == false); 2418 2419 /* path3 is the current and only path. If we remove path3, the corresponding 2420 * nvme_ctrlr is removed. 2421 */ 2422 rc = bdev_nvme_delete("nvme0", &path3); 2423 CU_ASSERT(rc == 0); 2424 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2425 2426 poll_threads(); 2427 spdk_delay_us(1000); 2428 poll_threads(); 2429 2430 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2431 2432 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2433 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2434 2435 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2436 attach_ctrlr_done, NULL, NULL, NULL, false); 2437 CU_ASSERT(rc == 0); 2438 2439 spdk_delay_us(1000); 2440 poll_threads(); 2441 2442 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2443 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2444 2445 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2446 2447 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2448 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2449 2450 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2451 attach_ctrlr_done, NULL, NULL, NULL, false); 2452 CU_ASSERT(rc == 0); 2453 2454 spdk_delay_us(1000); 2455 poll_threads(); 2456 2457 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2458 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2459 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2460 break; 2461 } 2462 } 2463 CU_ASSERT(ctrid != NULL); 2464 2465 /* If trid is not specified, nvme_ctrlr itself is removed. */ 2466 rc = bdev_nvme_delete("nvme0", &g_any_path); 2467 CU_ASSERT(rc == 0); 2468 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2469 2470 poll_threads(); 2471 spdk_delay_us(1000); 2472 poll_threads(); 2473 2474 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2475 } 2476 2477 static void 2478 test_abort(void) 2479 { 2480 struct spdk_nvme_transport_id trid = {}; 2481 struct nvme_ctrlr_opts opts = {}; 2482 struct spdk_nvme_ctrlr *ctrlr; 2483 struct nvme_ctrlr *nvme_ctrlr; 2484 const int STRING_SIZE = 32; 2485 const char *attached_names[STRING_SIZE]; 2486 struct nvme_bdev *bdev; 2487 struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io; 2488 struct spdk_io_channel *ch1, *ch2; 2489 struct nvme_bdev_channel *nbdev_ch1; 2490 struct nvme_io_path *io_path1; 2491 struct nvme_qpair *nvme_qpair1; 2492 int rc; 2493 2494 /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on 2495 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests 2496 * are submitted on thread 1. Both should succeed. 2497 */ 2498 2499 ut_init_trid(&trid); 2500 2501 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2502 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2503 2504 g_ut_attach_ctrlr_status = 0; 2505 g_ut_attach_bdev_count = 1; 2506 2507 set_thread(1); 2508 2509 opts.ctrlr_loss_timeout_sec = -1; 2510 opts.reconnect_delay_sec = 1; 2511 2512 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2513 attach_ctrlr_done, NULL, NULL, &opts, false); 2514 CU_ASSERT(rc == 0); 2515 2516 spdk_delay_us(1000); 2517 poll_threads(); 2518 2519 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2520 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2521 2522 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2523 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2524 2525 write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 2526 ut_bdev_io_set_buf(write_io); 2527 2528 fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL); 2529 ut_bdev_io_set_buf(fuse_io); 2530 2531 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 2532 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2533 2534 abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL); 2535 2536 set_thread(0); 2537 2538 ch1 = spdk_get_io_channel(bdev); 2539 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2540 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 2541 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 2542 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 2543 nvme_qpair1 = io_path1->qpair; 2544 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 2545 2546 set_thread(1); 2547 2548 ch2 = spdk_get_io_channel(bdev); 2549 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2550 2551 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2552 fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2553 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2554 2555 /* Aborting the already completed request should fail. */ 2556 write_io->internal.in_submit_request = true; 2557 bdev_nvme_submit_request(ch1, write_io); 2558 poll_threads(); 2559 2560 CU_ASSERT(write_io->internal.in_submit_request == false); 2561 2562 abort_io->u.abort.bio_to_abort = write_io; 2563 abort_io->internal.in_submit_request = true; 2564 2565 bdev_nvme_submit_request(ch1, abort_io); 2566 2567 poll_threads(); 2568 2569 CU_ASSERT(abort_io->internal.in_submit_request == false); 2570 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2571 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2572 2573 admin_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2574 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2575 2576 admin_io->internal.in_submit_request = true; 2577 bdev_nvme_submit_request(ch1, admin_io); 2578 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2579 poll_threads(); 2580 2581 CU_ASSERT(admin_io->internal.in_submit_request == false); 2582 2583 abort_io->u.abort.bio_to_abort = admin_io; 2584 abort_io->internal.in_submit_request = true; 2585 2586 bdev_nvme_submit_request(ch2, abort_io); 2587 2588 poll_threads(); 2589 2590 CU_ASSERT(abort_io->internal.in_submit_request == false); 2591 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2592 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2593 2594 /* Aborting the write request should succeed. */ 2595 write_io->internal.in_submit_request = true; 2596 bdev_nvme_submit_request(ch1, write_io); 2597 2598 CU_ASSERT(write_io->internal.in_submit_request == true); 2599 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 2600 2601 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2602 abort_io->u.abort.bio_to_abort = write_io; 2603 abort_io->internal.in_submit_request = true; 2604 2605 bdev_nvme_submit_request(ch1, abort_io); 2606 2607 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2608 poll_threads(); 2609 2610 CU_ASSERT(abort_io->internal.in_submit_request == false); 2611 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2612 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2613 CU_ASSERT(write_io->internal.in_submit_request == false); 2614 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2615 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2616 2617 /* Aborting the fuse request should succeed. */ 2618 fuse_io->internal.in_submit_request = true; 2619 bdev_nvme_submit_request(ch1, fuse_io); 2620 2621 CU_ASSERT(fuse_io->internal.in_submit_request == true); 2622 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2); 2623 2624 abort_io->u.abort.bio_to_abort = fuse_io; 2625 abort_io->internal.in_submit_request = true; 2626 2627 bdev_nvme_submit_request(ch1, abort_io); 2628 2629 spdk_delay_us(10000); 2630 poll_threads(); 2631 2632 CU_ASSERT(abort_io->internal.in_submit_request == false); 2633 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2634 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2635 CU_ASSERT(fuse_io->internal.in_submit_request == false); 2636 CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2637 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2638 2639 /* Aborting the admin request should succeed. */ 2640 admin_io->internal.in_submit_request = true; 2641 bdev_nvme_submit_request(ch1, admin_io); 2642 2643 CU_ASSERT(admin_io->internal.in_submit_request == true); 2644 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2645 2646 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2647 abort_io->u.abort.bio_to_abort = admin_io; 2648 abort_io->internal.in_submit_request = true; 2649 2650 bdev_nvme_submit_request(ch2, abort_io); 2651 2652 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2653 poll_threads(); 2654 2655 CU_ASSERT(abort_io->internal.in_submit_request == false); 2656 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2657 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2658 CU_ASSERT(admin_io->internal.in_submit_request == false); 2659 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2660 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2661 2662 set_thread(0); 2663 2664 /* If qpair is disconnected, it is freed and then reconnected via resetting 2665 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 2666 * while resetting the nvme_ctrlr. 2667 */ 2668 nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 2669 2670 poll_thread_times(0, 3); 2671 2672 CU_ASSERT(nvme_qpair1->qpair == NULL); 2673 CU_ASSERT(nvme_ctrlr->resetting == true); 2674 2675 write_io->internal.in_submit_request = true; 2676 2677 bdev_nvme_submit_request(ch1, write_io); 2678 2679 CU_ASSERT(write_io->internal.in_submit_request == true); 2680 CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list)); 2681 2682 /* Aborting the queued write request should succeed immediately. */ 2683 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2684 abort_io->u.abort.bio_to_abort = write_io; 2685 abort_io->internal.in_submit_request = true; 2686 2687 bdev_nvme_submit_request(ch1, abort_io); 2688 2689 CU_ASSERT(abort_io->internal.in_submit_request == false); 2690 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2691 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2692 CU_ASSERT(write_io->internal.in_submit_request == false); 2693 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2694 2695 poll_threads(); 2696 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2697 poll_threads(); 2698 2699 spdk_put_io_channel(ch1); 2700 2701 set_thread(1); 2702 2703 spdk_put_io_channel(ch2); 2704 2705 poll_threads(); 2706 2707 free(write_io); 2708 free(fuse_io); 2709 free(admin_io); 2710 free(abort_io); 2711 2712 set_thread(1); 2713 2714 rc = bdev_nvme_delete("nvme0", &g_any_path); 2715 CU_ASSERT(rc == 0); 2716 2717 poll_threads(); 2718 spdk_delay_us(1000); 2719 poll_threads(); 2720 2721 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2722 } 2723 2724 static void 2725 test_get_io_qpair(void) 2726 { 2727 struct spdk_nvme_transport_id trid = {}; 2728 struct spdk_nvme_ctrlr ctrlr = {}; 2729 struct nvme_ctrlr *nvme_ctrlr = NULL; 2730 struct spdk_io_channel *ch; 2731 struct nvme_ctrlr_channel *ctrlr_ch; 2732 struct spdk_nvme_qpair *qpair; 2733 int rc; 2734 2735 ut_init_trid(&trid); 2736 TAILQ_INIT(&ctrlr.active_io_qpairs); 2737 2738 set_thread(0); 2739 2740 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 2741 CU_ASSERT(rc == 0); 2742 2743 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2744 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2745 2746 ch = spdk_get_io_channel(nvme_ctrlr); 2747 SPDK_CU_ASSERT_FATAL(ch != NULL); 2748 ctrlr_ch = spdk_io_channel_get_ctx(ch); 2749 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 2750 2751 qpair = bdev_nvme_get_io_qpair(ch); 2752 CU_ASSERT(qpair == ctrlr_ch->qpair->qpair); 2753 2754 spdk_put_io_channel(ch); 2755 2756 rc = bdev_nvme_delete("nvme0", &g_any_path); 2757 CU_ASSERT(rc == 0); 2758 2759 poll_threads(); 2760 spdk_delay_us(1000); 2761 poll_threads(); 2762 2763 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2764 } 2765 2766 /* Test a scenario that the bdev subsystem starts shutdown when there still exists 2767 * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a 2768 * test case to avoid regression for this scenario. spdk_bdev_unregister() calls 2769 * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly. 2770 */ 2771 static void 2772 test_bdev_unregister(void) 2773 { 2774 struct spdk_nvme_transport_id trid = {}; 2775 struct spdk_nvme_ctrlr *ctrlr; 2776 struct nvme_ctrlr *nvme_ctrlr; 2777 struct nvme_ns *nvme_ns1, *nvme_ns2; 2778 const int STRING_SIZE = 32; 2779 const char *attached_names[STRING_SIZE]; 2780 struct nvme_bdev *bdev1, *bdev2; 2781 int rc; 2782 2783 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2784 ut_init_trid(&trid); 2785 2786 ctrlr = ut_attach_ctrlr(&trid, 2, false, false); 2787 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2788 2789 g_ut_attach_ctrlr_status = 0; 2790 g_ut_attach_bdev_count = 2; 2791 2792 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2793 attach_ctrlr_done, NULL, NULL, NULL, false); 2794 CU_ASSERT(rc == 0); 2795 2796 spdk_delay_us(1000); 2797 poll_threads(); 2798 2799 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2800 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2801 2802 nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1); 2803 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 2804 2805 bdev1 = nvme_ns1->bdev; 2806 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 2807 2808 nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2); 2809 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 2810 2811 bdev2 = nvme_ns2->bdev; 2812 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 2813 2814 bdev_nvme_destruct(&bdev1->disk); 2815 bdev_nvme_destruct(&bdev2->disk); 2816 2817 poll_threads(); 2818 2819 CU_ASSERT(nvme_ns1->bdev == NULL); 2820 CU_ASSERT(nvme_ns2->bdev == NULL); 2821 2822 nvme_ctrlr->destruct = true; 2823 _nvme_ctrlr_destruct(nvme_ctrlr); 2824 2825 poll_threads(); 2826 spdk_delay_us(1000); 2827 poll_threads(); 2828 2829 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2830 } 2831 2832 static void 2833 test_compare_ns(void) 2834 { 2835 struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {}; 2836 struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, }; 2837 struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, }; 2838 struct spdk_uuid uuid1 = { .u.raw = { 0xAA } }; 2839 struct spdk_uuid uuid2 = { .u.raw = { 0xAB } }; 2840 2841 /* No IDs are defined. */ 2842 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2843 2844 /* Only EUI64 are defined and not matched. */ 2845 nsdata1.eui64 = 0xABCDEF0123456789; 2846 nsdata2.eui64 = 0xBBCDEF0123456789; 2847 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2848 2849 /* Only EUI64 are defined and matched. */ 2850 nsdata2.eui64 = 0xABCDEF0123456789; 2851 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2852 2853 /* Only NGUID are defined and not matched. */ 2854 nsdata1.eui64 = 0x0; 2855 nsdata2.eui64 = 0x0; 2856 nsdata1.nguid[0] = 0x12; 2857 nsdata2.nguid[0] = 0x10; 2858 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2859 2860 /* Only NGUID are defined and matched. */ 2861 nsdata2.nguid[0] = 0x12; 2862 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2863 2864 /* Only UUID are defined and not matched. */ 2865 nsdata1.nguid[0] = 0x0; 2866 nsdata2.nguid[0] = 0x0; 2867 ns1.uuid = &uuid1; 2868 ns2.uuid = &uuid2; 2869 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2870 2871 /* Only one UUID is defined. */ 2872 ns1.uuid = NULL; 2873 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2874 2875 /* Only UUID are defined and matched. */ 2876 ns1.uuid = &uuid2; 2877 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2878 2879 /* All EUI64, NGUID, and UUID are defined and matched. */ 2880 nsdata1.eui64 = 0x123456789ABCDEF; 2881 nsdata2.eui64 = 0x123456789ABCDEF; 2882 nsdata1.nguid[15] = 0x34; 2883 nsdata2.nguid[15] = 0x34; 2884 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2885 2886 /* CSI are not matched. */ 2887 ns1.csi = SPDK_NVME_CSI_ZNS; 2888 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2889 } 2890 2891 static void 2892 test_init_ana_log_page(void) 2893 { 2894 struct spdk_nvme_transport_id trid = {}; 2895 struct spdk_nvme_ctrlr *ctrlr; 2896 struct nvme_ctrlr *nvme_ctrlr; 2897 const int STRING_SIZE = 32; 2898 const char *attached_names[STRING_SIZE]; 2899 int rc; 2900 2901 set_thread(0); 2902 2903 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2904 ut_init_trid(&trid); 2905 2906 ctrlr = ut_attach_ctrlr(&trid, 5, true, false); 2907 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2908 2909 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 2910 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2911 ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2912 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 2913 ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2914 2915 g_ut_attach_ctrlr_status = 0; 2916 g_ut_attach_bdev_count = 5; 2917 2918 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2919 attach_ctrlr_done, NULL, NULL, NULL, false); 2920 CU_ASSERT(rc == 0); 2921 2922 spdk_delay_us(1000); 2923 poll_threads(); 2924 2925 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2926 poll_threads(); 2927 2928 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2929 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2930 2931 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2932 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2933 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2934 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2935 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL); 2936 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 2937 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2938 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2939 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE); 2940 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2941 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL); 2942 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL); 2943 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL); 2944 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL); 2945 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL); 2946 2947 rc = bdev_nvme_delete("nvme0", &g_any_path); 2948 CU_ASSERT(rc == 0); 2949 2950 poll_threads(); 2951 spdk_delay_us(1000); 2952 poll_threads(); 2953 2954 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2955 } 2956 2957 static void 2958 init_accel(void) 2959 { 2960 spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb, 2961 sizeof(int), "accel_p"); 2962 } 2963 2964 static void 2965 fini_accel(void) 2966 { 2967 spdk_io_device_unregister(g_accel_p, NULL); 2968 } 2969 2970 static void 2971 test_get_memory_domains(void) 2972 { 2973 struct nvme_ctrlr ctrlr = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef }; 2974 struct nvme_ns ns = { .ctrlr = &ctrlr }; 2975 struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) }; 2976 struct spdk_memory_domain *domains[2] = {}; 2977 int rc = 0; 2978 2979 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns, tailq); 2980 2981 /* nvme controller doesn't have memory domainы */ 2982 MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 0); 2983 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 2984 CU_ASSERT(rc == 0) 2985 2986 /* nvme controller has a memory domain */ 2987 MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 1); 2988 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 2989 CU_ASSERT(rc == 1); 2990 MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domain); 2991 } 2992 2993 static void 2994 test_reconnect_qpair(void) 2995 { 2996 struct spdk_nvme_transport_id trid = {}; 2997 struct spdk_nvme_ctrlr *ctrlr; 2998 struct nvme_ctrlr *nvme_ctrlr; 2999 const int STRING_SIZE = 32; 3000 const char *attached_names[STRING_SIZE]; 3001 struct nvme_bdev *bdev; 3002 struct spdk_io_channel *ch1, *ch2; 3003 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3004 struct nvme_io_path *io_path1, *io_path2; 3005 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 3006 int rc; 3007 3008 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3009 ut_init_trid(&trid); 3010 3011 set_thread(0); 3012 3013 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 3014 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3015 3016 g_ut_attach_ctrlr_status = 0; 3017 g_ut_attach_bdev_count = 1; 3018 3019 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3020 attach_ctrlr_done, NULL, NULL, NULL, false); 3021 CU_ASSERT(rc == 0); 3022 3023 spdk_delay_us(1000); 3024 poll_threads(); 3025 3026 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3027 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3028 3029 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 3030 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3031 3032 ch1 = spdk_get_io_channel(bdev); 3033 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3034 3035 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3036 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 3037 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3038 nvme_qpair1 = io_path1->qpair; 3039 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 3040 3041 set_thread(1); 3042 3043 ch2 = spdk_get_io_channel(bdev); 3044 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 3045 3046 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 3047 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 3048 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3049 nvme_qpair2 = io_path2->qpair; 3050 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 3051 3052 /* If a qpair is disconnected, it is freed and then reconnected via 3053 * resetting the corresponding nvme_ctrlr. 3054 */ 3055 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3056 ctrlr->is_failed = true; 3057 3058 poll_thread_times(1, 3); 3059 CU_ASSERT(nvme_qpair1->qpair != NULL); 3060 CU_ASSERT(nvme_qpair2->qpair == NULL); 3061 CU_ASSERT(nvme_ctrlr->resetting == true); 3062 3063 poll_thread_times(0, 3); 3064 CU_ASSERT(nvme_qpair1->qpair == NULL); 3065 CU_ASSERT(nvme_qpair2->qpair == NULL); 3066 CU_ASSERT(ctrlr->is_failed == true); 3067 3068 poll_thread_times(1, 2); 3069 poll_thread_times(0, 1); 3070 CU_ASSERT(ctrlr->is_failed == false); 3071 CU_ASSERT(ctrlr->adminq.is_connected == false); 3072 3073 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3074 poll_thread_times(0, 2); 3075 CU_ASSERT(ctrlr->adminq.is_connected == true); 3076 3077 poll_thread_times(0, 1); 3078 poll_thread_times(1, 1); 3079 CU_ASSERT(nvme_qpair1->qpair != NULL); 3080 CU_ASSERT(nvme_qpair2->qpair != NULL); 3081 CU_ASSERT(nvme_ctrlr->resetting == true); 3082 3083 poll_thread_times(0, 2); 3084 poll_thread_times(1, 1); 3085 poll_thread_times(0, 1); 3086 CU_ASSERT(nvme_ctrlr->resetting == false); 3087 3088 poll_threads(); 3089 3090 /* If a qpair is disconnected and resetting the corresponding nvme_ctrlr 3091 * fails, the qpair is just freed. 3092 */ 3093 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3094 ctrlr->is_failed = true; 3095 ctrlr->fail_reset = true; 3096 3097 poll_thread_times(1, 3); 3098 CU_ASSERT(nvme_qpair1->qpair != NULL); 3099 CU_ASSERT(nvme_qpair2->qpair == NULL); 3100 CU_ASSERT(nvme_ctrlr->resetting == true); 3101 3102 poll_thread_times(0, 3); 3103 poll_thread_times(1, 1); 3104 CU_ASSERT(nvme_qpair1->qpair == NULL); 3105 CU_ASSERT(nvme_qpair2->qpair == NULL); 3106 CU_ASSERT(ctrlr->is_failed == true); 3107 3108 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3109 poll_thread_times(0, 3); 3110 poll_thread_times(1, 1); 3111 poll_thread_times(0, 1); 3112 CU_ASSERT(ctrlr->is_failed == true); 3113 CU_ASSERT(nvme_ctrlr->resetting == false); 3114 CU_ASSERT(nvme_qpair1->qpair == NULL); 3115 CU_ASSERT(nvme_qpair2->qpair == NULL); 3116 3117 poll_threads(); 3118 3119 spdk_put_io_channel(ch2); 3120 3121 set_thread(0); 3122 3123 spdk_put_io_channel(ch1); 3124 3125 poll_threads(); 3126 3127 rc = bdev_nvme_delete("nvme0", &g_any_path); 3128 CU_ASSERT(rc == 0); 3129 3130 poll_threads(); 3131 spdk_delay_us(1000); 3132 poll_threads(); 3133 3134 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3135 } 3136 3137 static void 3138 test_create_bdev_ctrlr(void) 3139 { 3140 struct nvme_path_id path1 = {}, path2 = {}; 3141 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3142 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3143 const int STRING_SIZE = 32; 3144 const char *attached_names[STRING_SIZE]; 3145 int rc; 3146 3147 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3148 ut_init_trid(&path1.trid); 3149 ut_init_trid2(&path2.trid); 3150 3151 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3152 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3153 3154 g_ut_attach_ctrlr_status = 0; 3155 g_ut_attach_bdev_count = 0; 3156 3157 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3158 attach_ctrlr_done, NULL, NULL, NULL, true); 3159 CU_ASSERT(rc == 0); 3160 3161 spdk_delay_us(1000); 3162 poll_threads(); 3163 3164 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3165 poll_threads(); 3166 3167 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3168 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3169 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3170 3171 /* cntlid is duplicated, and adding the second ctrlr should fail. */ 3172 g_ut_attach_ctrlr_status = -EINVAL; 3173 3174 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3175 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3176 3177 ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid; 3178 3179 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3180 attach_ctrlr_done, NULL, NULL, NULL, true); 3181 CU_ASSERT(rc == 0); 3182 3183 spdk_delay_us(1000); 3184 poll_threads(); 3185 3186 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3187 poll_threads(); 3188 3189 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 3190 3191 /* cntlid is not duplicated, and adding the third ctrlr should succeed. */ 3192 g_ut_attach_ctrlr_status = 0; 3193 3194 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3195 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3196 3197 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3198 attach_ctrlr_done, NULL, NULL, NULL, true); 3199 CU_ASSERT(rc == 0); 3200 3201 spdk_delay_us(1000); 3202 poll_threads(); 3203 3204 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3205 poll_threads(); 3206 3207 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3208 3209 /* Delete two ctrlrs at once. */ 3210 rc = bdev_nvme_delete("nvme0", &g_any_path); 3211 CU_ASSERT(rc == 0); 3212 3213 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3214 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3215 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3216 3217 poll_threads(); 3218 spdk_delay_us(1000); 3219 poll_threads(); 3220 3221 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3222 3223 /* Add two ctrlrs and delete one by one. */ 3224 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3225 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3226 3227 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3228 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3229 3230 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3231 attach_ctrlr_done, NULL, NULL, NULL, true); 3232 CU_ASSERT(rc == 0); 3233 3234 spdk_delay_us(1000); 3235 poll_threads(); 3236 3237 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3238 poll_threads(); 3239 3240 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3241 attach_ctrlr_done, NULL, NULL, NULL, true); 3242 CU_ASSERT(rc == 0); 3243 3244 spdk_delay_us(1000); 3245 poll_threads(); 3246 3247 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3248 poll_threads(); 3249 3250 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3251 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3252 3253 rc = bdev_nvme_delete("nvme0", &path1); 3254 CU_ASSERT(rc == 0); 3255 3256 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3257 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3258 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3259 3260 poll_threads(); 3261 spdk_delay_us(1000); 3262 poll_threads(); 3263 3264 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3265 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3266 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3267 3268 rc = bdev_nvme_delete("nvme0", &path2); 3269 CU_ASSERT(rc == 0); 3270 3271 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3272 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3273 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3274 3275 poll_threads(); 3276 spdk_delay_us(1000); 3277 poll_threads(); 3278 3279 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3280 } 3281 3282 static struct nvme_ns * 3283 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr) 3284 { 3285 struct nvme_ns *nvme_ns; 3286 3287 TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) { 3288 if (nvme_ns->ctrlr == nvme_ctrlr) { 3289 return nvme_ns; 3290 } 3291 } 3292 3293 return NULL; 3294 } 3295 3296 static void 3297 test_add_multi_ns_to_bdev(void) 3298 { 3299 struct nvme_path_id path1 = {}, path2 = {}; 3300 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3301 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3302 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3303 struct nvme_ns *nvme_ns1, *nvme_ns2; 3304 struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4; 3305 const int STRING_SIZE = 32; 3306 const char *attached_names[STRING_SIZE]; 3307 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3308 struct spdk_uuid uuid2 = { .u.raw = { 0x2 } }; 3309 struct spdk_uuid uuid3 = { .u.raw = { 0x3 } }; 3310 struct spdk_uuid uuid4 = { .u.raw = { 0x4 } }; 3311 struct spdk_uuid uuid44 = { .u.raw = { 0x44 } }; 3312 int rc; 3313 3314 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3315 ut_init_trid(&path1.trid); 3316 ut_init_trid2(&path2.trid); 3317 3318 /* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */ 3319 3320 /* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th 3321 * namespaces are populated. 3322 */ 3323 ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true); 3324 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3325 3326 ctrlr1->ns[1].is_active = false; 3327 ctrlr1->ns[4].is_active = false; 3328 ctrlr1->ns[0].uuid = &uuid1; 3329 ctrlr1->ns[2].uuid = &uuid3; 3330 ctrlr1->ns[3].uuid = &uuid4; 3331 3332 g_ut_attach_ctrlr_status = 0; 3333 g_ut_attach_bdev_count = 3; 3334 3335 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3336 attach_ctrlr_done, NULL, NULL, NULL, true); 3337 CU_ASSERT(rc == 0); 3338 3339 spdk_delay_us(1000); 3340 poll_threads(); 3341 3342 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3343 poll_threads(); 3344 3345 /* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th 3346 * namespaces are populated. The uuid of 4th namespace is different, and hence 3347 * adding 4th namespace to a bdev should fail. 3348 */ 3349 ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true); 3350 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3351 3352 ctrlr2->ns[2].is_active = false; 3353 ctrlr2->ns[4].is_active = false; 3354 ctrlr2->ns[0].uuid = &uuid1; 3355 ctrlr2->ns[1].uuid = &uuid2; 3356 ctrlr2->ns[3].uuid = &uuid44; 3357 3358 g_ut_attach_ctrlr_status = 0; 3359 g_ut_attach_bdev_count = 2; 3360 3361 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3362 attach_ctrlr_done, NULL, NULL, NULL, true); 3363 CU_ASSERT(rc == 0); 3364 3365 spdk_delay_us(1000); 3366 poll_threads(); 3367 3368 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3369 poll_threads(); 3370 3371 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3372 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3373 3374 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3375 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3376 3377 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL); 3378 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL); 3379 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL); 3380 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL); 3381 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL); 3382 3383 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3384 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3385 3386 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL); 3387 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL); 3388 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL); 3389 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL); 3390 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL); 3391 3392 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3393 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3394 bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2); 3395 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 3396 bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3); 3397 SPDK_CU_ASSERT_FATAL(bdev3 != NULL); 3398 bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4); 3399 SPDK_CU_ASSERT_FATAL(bdev4 != NULL); 3400 CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL); 3401 3402 CU_ASSERT(bdev1->ref == 2); 3403 CU_ASSERT(bdev2->ref == 1); 3404 CU_ASSERT(bdev3->ref == 1); 3405 CU_ASSERT(bdev4->ref == 1); 3406 3407 /* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */ 3408 rc = bdev_nvme_delete("nvme0", &path1); 3409 CU_ASSERT(rc == 0); 3410 3411 poll_threads(); 3412 spdk_delay_us(1000); 3413 poll_threads(); 3414 3415 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3416 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3417 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2); 3418 3419 rc = bdev_nvme_delete("nvme0", &path2); 3420 CU_ASSERT(rc == 0); 3421 3422 poll_threads(); 3423 spdk_delay_us(1000); 3424 poll_threads(); 3425 3426 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3427 3428 /* Test if a nvme_bdev which has a shared namespace between two ctrlrs 3429 * can be deleted when the bdev subsystem shutdown. 3430 */ 3431 g_ut_attach_bdev_count = 1; 3432 3433 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3434 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3435 3436 ctrlr1->ns[0].uuid = &uuid1; 3437 3438 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3439 attach_ctrlr_done, NULL, NULL, NULL, true); 3440 CU_ASSERT(rc == 0); 3441 3442 spdk_delay_us(1000); 3443 poll_threads(); 3444 3445 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3446 poll_threads(); 3447 3448 ut_init_trid2(&path2.trid); 3449 3450 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3451 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3452 3453 ctrlr2->ns[0].uuid = &uuid1; 3454 3455 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3456 attach_ctrlr_done, NULL, NULL, NULL, true); 3457 CU_ASSERT(rc == 0); 3458 3459 spdk_delay_us(1000); 3460 poll_threads(); 3461 3462 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3463 poll_threads(); 3464 3465 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3466 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3467 3468 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3469 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3470 3471 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3472 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3473 3474 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3475 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3476 3477 /* Check if a nvme_bdev has two nvme_ns. */ 3478 nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1); 3479 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3480 CU_ASSERT(nvme_ns1->bdev == bdev1); 3481 3482 nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2); 3483 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3484 CU_ASSERT(nvme_ns2->bdev == bdev1); 3485 3486 /* Delete nvme_bdev first when the bdev subsystem shutdown. */ 3487 bdev_nvme_destruct(&bdev1->disk); 3488 3489 poll_threads(); 3490 3491 CU_ASSERT(nvme_ns1->bdev == NULL); 3492 CU_ASSERT(nvme_ns2->bdev == NULL); 3493 3494 nvme_ctrlr1->destruct = true; 3495 _nvme_ctrlr_destruct(nvme_ctrlr1); 3496 3497 poll_threads(); 3498 spdk_delay_us(1000); 3499 poll_threads(); 3500 3501 nvme_ctrlr2->destruct = true; 3502 _nvme_ctrlr_destruct(nvme_ctrlr2); 3503 3504 poll_threads(); 3505 spdk_delay_us(1000); 3506 poll_threads(); 3507 3508 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3509 } 3510 3511 static void 3512 test_add_multi_io_paths_to_nbdev_ch(void) 3513 { 3514 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 3515 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 3516 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3517 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3; 3518 struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3; 3519 const int STRING_SIZE = 32; 3520 const char *attached_names[STRING_SIZE]; 3521 struct nvme_bdev *bdev; 3522 struct spdk_io_channel *ch; 3523 struct nvme_bdev_channel *nbdev_ch; 3524 struct nvme_io_path *io_path1, *io_path2, *io_path3; 3525 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3526 int rc; 3527 3528 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3529 ut_init_trid(&path1.trid); 3530 ut_init_trid2(&path2.trid); 3531 ut_init_trid3(&path3.trid); 3532 g_ut_attach_ctrlr_status = 0; 3533 g_ut_attach_bdev_count = 1; 3534 3535 set_thread(1); 3536 3537 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3538 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3539 3540 ctrlr1->ns[0].uuid = &uuid1; 3541 3542 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3543 attach_ctrlr_done, NULL, NULL, NULL, true); 3544 CU_ASSERT(rc == 0); 3545 3546 spdk_delay_us(1000); 3547 poll_threads(); 3548 3549 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3550 poll_threads(); 3551 3552 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3553 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3554 3555 ctrlr2->ns[0].uuid = &uuid1; 3556 3557 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3558 attach_ctrlr_done, NULL, NULL, NULL, true); 3559 CU_ASSERT(rc == 0); 3560 3561 spdk_delay_us(1000); 3562 poll_threads(); 3563 3564 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3565 poll_threads(); 3566 3567 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3568 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3569 3570 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3571 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3572 3573 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3574 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3575 3576 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3577 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3578 3579 nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1); 3580 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3581 3582 nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2); 3583 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3584 3585 set_thread(0); 3586 3587 ch = spdk_get_io_channel(bdev); 3588 SPDK_CU_ASSERT_FATAL(ch != NULL); 3589 nbdev_ch = spdk_io_channel_get_ctx(ch); 3590 3591 io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1); 3592 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3593 3594 io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2); 3595 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3596 3597 set_thread(1); 3598 3599 /* Check if I/O path is dynamically added to nvme_bdev_channel. */ 3600 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 3601 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 3602 3603 ctrlr3->ns[0].uuid = &uuid1; 3604 3605 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 3606 attach_ctrlr_done, NULL, NULL, NULL, true); 3607 CU_ASSERT(rc == 0); 3608 3609 spdk_delay_us(1000); 3610 poll_threads(); 3611 3612 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3613 poll_threads(); 3614 3615 nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid); 3616 SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL); 3617 3618 nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3); 3619 SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL); 3620 3621 io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3); 3622 SPDK_CU_ASSERT_FATAL(io_path3 != NULL); 3623 3624 /* Check if I/O path is dynamically deleted from nvme_bdev_channel. */ 3625 rc = bdev_nvme_delete("nvme0", &path2); 3626 CU_ASSERT(rc == 0); 3627 3628 poll_threads(); 3629 spdk_delay_us(1000); 3630 poll_threads(); 3631 3632 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1); 3633 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 3634 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3); 3635 3636 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1); 3637 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL); 3638 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3); 3639 3640 set_thread(0); 3641 3642 spdk_put_io_channel(ch); 3643 3644 poll_threads(); 3645 3646 set_thread(1); 3647 3648 rc = bdev_nvme_delete("nvme0", &g_any_path); 3649 CU_ASSERT(rc == 0); 3650 3651 poll_threads(); 3652 spdk_delay_us(1000); 3653 poll_threads(); 3654 3655 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3656 } 3657 3658 static void 3659 test_admin_path(void) 3660 { 3661 struct nvme_path_id path1 = {}, path2 = {}; 3662 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3663 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3664 const int STRING_SIZE = 32; 3665 const char *attached_names[STRING_SIZE]; 3666 struct nvme_bdev *bdev; 3667 struct spdk_io_channel *ch; 3668 struct spdk_bdev_io *bdev_io; 3669 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3670 int rc; 3671 3672 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3673 ut_init_trid(&path1.trid); 3674 ut_init_trid2(&path2.trid); 3675 g_ut_attach_ctrlr_status = 0; 3676 g_ut_attach_bdev_count = 1; 3677 3678 set_thread(0); 3679 3680 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3681 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3682 3683 ctrlr1->ns[0].uuid = &uuid1; 3684 3685 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3686 attach_ctrlr_done, NULL, NULL, NULL, true); 3687 CU_ASSERT(rc == 0); 3688 3689 spdk_delay_us(1000); 3690 poll_threads(); 3691 3692 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3693 poll_threads(); 3694 3695 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3696 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3697 3698 ctrlr2->ns[0].uuid = &uuid1; 3699 3700 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3701 attach_ctrlr_done, NULL, NULL, NULL, true); 3702 CU_ASSERT(rc == 0); 3703 3704 spdk_delay_us(1000); 3705 poll_threads(); 3706 3707 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3708 poll_threads(); 3709 3710 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3711 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3712 3713 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3714 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3715 3716 ch = spdk_get_io_channel(bdev); 3717 SPDK_CU_ASSERT_FATAL(ch != NULL); 3718 3719 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch); 3720 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 3721 3722 /* ctrlr1 is failed but ctrlr2 is not failed. admin command is 3723 * submitted to ctrlr2. 3724 */ 3725 ctrlr1->is_failed = true; 3726 bdev_io->internal.in_submit_request = true; 3727 3728 bdev_nvme_submit_request(ch, bdev_io); 3729 3730 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3731 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1); 3732 CU_ASSERT(bdev_io->internal.in_submit_request == true); 3733 3734 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3735 poll_threads(); 3736 3737 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3738 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3739 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 3740 3741 /* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */ 3742 ctrlr2->is_failed = true; 3743 bdev_io->internal.in_submit_request = true; 3744 3745 bdev_nvme_submit_request(ch, bdev_io); 3746 3747 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3748 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3749 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3750 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 3751 3752 free(bdev_io); 3753 3754 spdk_put_io_channel(ch); 3755 3756 poll_threads(); 3757 3758 rc = bdev_nvme_delete("nvme0", &g_any_path); 3759 CU_ASSERT(rc == 0); 3760 3761 poll_threads(); 3762 spdk_delay_us(1000); 3763 poll_threads(); 3764 3765 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3766 } 3767 3768 static struct nvme_io_path * 3769 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch, 3770 struct nvme_ctrlr *nvme_ctrlr) 3771 { 3772 struct nvme_io_path *io_path; 3773 3774 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) { 3775 if (io_path->qpair->ctrlr == nvme_ctrlr) { 3776 return io_path; 3777 } 3778 } 3779 3780 return NULL; 3781 } 3782 3783 static void 3784 test_reset_bdev_ctrlr(void) 3785 { 3786 struct nvme_path_id path1 = {}, path2 = {}; 3787 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3788 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3789 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3790 struct nvme_path_id *curr_path1, *curr_path2; 3791 const int STRING_SIZE = 32; 3792 const char *attached_names[STRING_SIZE]; 3793 struct nvme_bdev *bdev; 3794 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 3795 struct nvme_bdev_io *first_bio; 3796 struct spdk_io_channel *ch1, *ch2; 3797 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3798 struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22; 3799 int rc; 3800 3801 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3802 ut_init_trid(&path1.trid); 3803 ut_init_trid2(&path2.trid); 3804 g_ut_attach_ctrlr_status = 0; 3805 g_ut_attach_bdev_count = 1; 3806 3807 set_thread(0); 3808 3809 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3810 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3811 3812 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3813 attach_ctrlr_done, NULL, NULL, NULL, true); 3814 CU_ASSERT(rc == 0); 3815 3816 spdk_delay_us(1000); 3817 poll_threads(); 3818 3819 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3820 poll_threads(); 3821 3822 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3823 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3824 3825 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3826 attach_ctrlr_done, NULL, NULL, NULL, true); 3827 CU_ASSERT(rc == 0); 3828 3829 spdk_delay_us(1000); 3830 poll_threads(); 3831 3832 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3833 poll_threads(); 3834 3835 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3836 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3837 3838 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3839 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3840 3841 curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 3842 SPDK_CU_ASSERT_FATAL(curr_path1 != NULL); 3843 3844 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3845 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3846 3847 curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 3848 SPDK_CU_ASSERT_FATAL(curr_path2 != NULL); 3849 3850 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3851 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3852 3853 set_thread(0); 3854 3855 ch1 = spdk_get_io_channel(bdev); 3856 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3857 3858 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3859 io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1); 3860 SPDK_CU_ASSERT_FATAL(io_path11 != NULL); 3861 io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2); 3862 SPDK_CU_ASSERT_FATAL(io_path12 != NULL); 3863 3864 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 3865 first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx; 3866 3867 set_thread(1); 3868 3869 ch2 = spdk_get_io_channel(bdev); 3870 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 3871 3872 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 3873 io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1); 3874 SPDK_CU_ASSERT_FATAL(io_path21 != NULL); 3875 io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2); 3876 SPDK_CU_ASSERT_FATAL(io_path22 != NULL); 3877 3878 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 3879 3880 /* The first reset request from bdev_io is submitted on thread 0. 3881 * Check if ctrlr1 is reset and then ctrlr2 is reset. 3882 * 3883 * A few extra polls are necessary after resetting ctrlr1 to check 3884 * pending reset requests for ctrlr1. 3885 */ 3886 ctrlr1->is_failed = true; 3887 curr_path1->is_failed = true; 3888 ctrlr2->is_failed = true; 3889 curr_path2->is_failed = true; 3890 3891 set_thread(0); 3892 3893 bdev_nvme_submit_request(ch1, first_bdev_io); 3894 CU_ASSERT(first_bio->io_path == io_path11); 3895 CU_ASSERT(nvme_ctrlr1->resetting == true); 3896 CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio); 3897 3898 poll_thread_times(0, 3); 3899 CU_ASSERT(io_path11->qpair->qpair == NULL); 3900 CU_ASSERT(io_path21->qpair->qpair != NULL); 3901 3902 poll_thread_times(1, 2); 3903 CU_ASSERT(io_path11->qpair->qpair == NULL); 3904 CU_ASSERT(io_path21->qpair->qpair == NULL); 3905 CU_ASSERT(ctrlr1->is_failed == true); 3906 3907 poll_thread_times(0, 1); 3908 CU_ASSERT(nvme_ctrlr1->resetting == true); 3909 CU_ASSERT(ctrlr1->is_failed == false); 3910 CU_ASSERT(ctrlr1->adminq.is_connected == false); 3911 CU_ASSERT(curr_path1->is_failed == true); 3912 3913 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3914 poll_thread_times(0, 2); 3915 CU_ASSERT(ctrlr1->adminq.is_connected == true); 3916 3917 poll_thread_times(0, 1); 3918 CU_ASSERT(io_path11->qpair->qpair != NULL); 3919 CU_ASSERT(io_path21->qpair->qpair == NULL); 3920 3921 poll_thread_times(1, 1); 3922 CU_ASSERT(io_path11->qpair->qpair != NULL); 3923 CU_ASSERT(io_path21->qpair->qpair != NULL); 3924 3925 poll_thread_times(0, 2); 3926 CU_ASSERT(nvme_ctrlr1->resetting == true); 3927 poll_thread_times(1, 1); 3928 CU_ASSERT(nvme_ctrlr1->resetting == true); 3929 poll_thread_times(0, 2); 3930 CU_ASSERT(nvme_ctrlr1->resetting == false); 3931 CU_ASSERT(curr_path1->is_failed == false); 3932 CU_ASSERT(first_bio->io_path == io_path12); 3933 CU_ASSERT(nvme_ctrlr2->resetting == true); 3934 3935 poll_thread_times(0, 3); 3936 CU_ASSERT(io_path12->qpair->qpair == NULL); 3937 CU_ASSERT(io_path22->qpair->qpair != NULL); 3938 3939 poll_thread_times(1, 2); 3940 CU_ASSERT(io_path12->qpair->qpair == NULL); 3941 CU_ASSERT(io_path22->qpair->qpair == NULL); 3942 CU_ASSERT(ctrlr2->is_failed == true); 3943 3944 poll_thread_times(0, 1); 3945 CU_ASSERT(nvme_ctrlr2->resetting == true); 3946 CU_ASSERT(ctrlr2->is_failed == false); 3947 CU_ASSERT(ctrlr2->adminq.is_connected == false); 3948 CU_ASSERT(curr_path2->is_failed == true); 3949 3950 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3951 poll_thread_times(0, 2); 3952 CU_ASSERT(ctrlr2->adminq.is_connected == true); 3953 3954 poll_thread_times(0, 1); 3955 CU_ASSERT(io_path12->qpair->qpair != NULL); 3956 CU_ASSERT(io_path22->qpair->qpair == NULL); 3957 3958 poll_thread_times(1, 2); 3959 CU_ASSERT(io_path12->qpair->qpair != NULL); 3960 CU_ASSERT(io_path22->qpair->qpair != NULL); 3961 3962 poll_thread_times(0, 2); 3963 CU_ASSERT(nvme_ctrlr2->resetting == true); 3964 poll_thread_times(1, 1); 3965 CU_ASSERT(nvme_ctrlr2->resetting == true); 3966 poll_thread_times(0, 2); 3967 CU_ASSERT(first_bio->io_path == NULL); 3968 CU_ASSERT(nvme_ctrlr2->resetting == false); 3969 CU_ASSERT(curr_path2->is_failed == false); 3970 3971 poll_threads(); 3972 3973 /* There is a race between two reset requests from bdev_io. 3974 * 3975 * The first reset request is submitted on thread 0, and the second reset 3976 * request is submitted on thread 1 while the first is resetting ctrlr1. 3977 * The second is pending on ctrlr1. After the first completes resetting ctrlr1, 3978 * both reset requests go to ctrlr2. The first comes earlier than the second. 3979 * The second is pending on ctrlr2 again. After the first completes resetting 3980 * ctrl2, both complete successfully. 3981 */ 3982 ctrlr1->is_failed = true; 3983 curr_path1->is_failed = true; 3984 ctrlr2->is_failed = true; 3985 curr_path2->is_failed = true; 3986 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 3987 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 3988 3989 set_thread(0); 3990 3991 bdev_nvme_submit_request(ch1, first_bdev_io); 3992 3993 set_thread(1); 3994 3995 bdev_nvme_submit_request(ch2, second_bdev_io); 3996 3997 CU_ASSERT(nvme_ctrlr1->resetting == true); 3998 CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio); 3999 CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == second_bdev_io); 4000 4001 poll_threads(); 4002 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4003 poll_threads(); 4004 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4005 poll_threads(); 4006 4007 CU_ASSERT(ctrlr1->is_failed == false); 4008 CU_ASSERT(curr_path1->is_failed == false); 4009 CU_ASSERT(ctrlr2->is_failed == false); 4010 CU_ASSERT(curr_path2->is_failed == false); 4011 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4012 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4013 4014 set_thread(0); 4015 4016 spdk_put_io_channel(ch1); 4017 4018 set_thread(1); 4019 4020 spdk_put_io_channel(ch2); 4021 4022 poll_threads(); 4023 4024 set_thread(0); 4025 4026 rc = bdev_nvme_delete("nvme0", &g_any_path); 4027 CU_ASSERT(rc == 0); 4028 4029 poll_threads(); 4030 spdk_delay_us(1000); 4031 poll_threads(); 4032 4033 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4034 4035 free(first_bdev_io); 4036 free(second_bdev_io); 4037 } 4038 4039 static void 4040 test_find_io_path(void) 4041 { 4042 struct nvme_bdev_channel nbdev_ch = { 4043 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 4044 }; 4045 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}; 4046 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 4047 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 4048 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}; 4049 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 4050 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 4051 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 4052 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 4053 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 4054 4055 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 4056 4057 /* Test if io_path whose ANA state is not accessible is excluded. */ 4058 4059 nvme_qpair1.qpair = &qpair1; 4060 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4061 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4062 4063 nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 4064 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4065 4066 nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 4067 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4068 4069 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4070 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4071 4072 nbdev_ch.current_io_path = NULL; 4073 4074 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4075 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4076 4077 nbdev_ch.current_io_path = NULL; 4078 4079 /* Test if io_path whose qpair is resetting is excluded. */ 4080 4081 nvme_qpair1.qpair = NULL; 4082 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4083 4084 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 4085 4086 /* Test if ANA optimized state or the first found ANA non-optimized state 4087 * is prioritized. 4088 */ 4089 4090 nvme_qpair1.qpair = &qpair1; 4091 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4092 nvme_qpair2.qpair = &qpair2; 4093 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4094 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 4095 4096 nbdev_ch.current_io_path = NULL; 4097 4098 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4099 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4100 4101 nbdev_ch.current_io_path = NULL; 4102 } 4103 4104 static void 4105 test_retry_io_if_ana_state_is_updating(void) 4106 { 4107 struct nvme_path_id path = {}; 4108 struct nvme_ctrlr_opts opts = {}; 4109 struct spdk_nvme_ctrlr *ctrlr; 4110 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4111 struct nvme_ctrlr *nvme_ctrlr; 4112 const int STRING_SIZE = 32; 4113 const char *attached_names[STRING_SIZE]; 4114 struct nvme_bdev *bdev; 4115 struct nvme_ns *nvme_ns; 4116 struct spdk_bdev_io *bdev_io1; 4117 struct spdk_io_channel *ch; 4118 struct nvme_bdev_channel *nbdev_ch; 4119 struct nvme_io_path *io_path; 4120 struct nvme_qpair *nvme_qpair; 4121 int rc; 4122 4123 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4124 ut_init_trid(&path.trid); 4125 4126 set_thread(0); 4127 4128 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4129 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4130 4131 g_ut_attach_ctrlr_status = 0; 4132 g_ut_attach_bdev_count = 1; 4133 4134 opts.ctrlr_loss_timeout_sec = -1; 4135 opts.reconnect_delay_sec = 1; 4136 4137 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4138 attach_ctrlr_done, NULL, NULL, &opts, false); 4139 CU_ASSERT(rc == 0); 4140 4141 spdk_delay_us(1000); 4142 poll_threads(); 4143 4144 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4145 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4146 4147 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4148 CU_ASSERT(nvme_ctrlr != NULL); 4149 4150 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4151 CU_ASSERT(bdev != NULL); 4152 4153 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4154 CU_ASSERT(nvme_ns != NULL); 4155 4156 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4157 ut_bdev_io_set_buf(bdev_io1); 4158 4159 ch = spdk_get_io_channel(bdev); 4160 SPDK_CU_ASSERT_FATAL(ch != NULL); 4161 4162 nbdev_ch = spdk_io_channel_get_ctx(ch); 4163 4164 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4165 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4166 4167 nvme_qpair = io_path->qpair; 4168 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4169 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4170 4171 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 4172 4173 /* If qpair is connected, I/O should succeed. */ 4174 bdev_io1->internal.in_submit_request = true; 4175 4176 bdev_nvme_submit_request(ch, bdev_io1); 4177 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4178 4179 poll_threads(); 4180 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4181 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 4182 4183 /* If ANA state of namespace is inaccessible, I/O should be queued. */ 4184 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4185 nbdev_ch->current_io_path = NULL; 4186 4187 bdev_io1->internal.in_submit_request = true; 4188 4189 bdev_nvme_submit_request(ch, bdev_io1); 4190 4191 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4192 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4193 CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4194 4195 /* ANA state became accessible while I/O was queued. */ 4196 nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4197 4198 spdk_delay_us(1000000); 4199 4200 poll_thread_times(0, 1); 4201 4202 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4203 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4204 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 4205 4206 poll_threads(); 4207 4208 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4209 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4210 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4211 4212 free(bdev_io1); 4213 4214 spdk_put_io_channel(ch); 4215 4216 poll_threads(); 4217 4218 rc = bdev_nvme_delete("nvme0", &g_any_path); 4219 CU_ASSERT(rc == 0); 4220 4221 poll_threads(); 4222 spdk_delay_us(1000); 4223 poll_threads(); 4224 4225 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4226 } 4227 4228 static void 4229 test_retry_io_for_io_path_error(void) 4230 { 4231 struct nvme_path_id path1 = {}, path2 = {}; 4232 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4233 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4234 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4235 const int STRING_SIZE = 32; 4236 const char *attached_names[STRING_SIZE]; 4237 struct nvme_bdev *bdev; 4238 struct nvme_ns *nvme_ns1, *nvme_ns2; 4239 struct spdk_bdev_io *bdev_io; 4240 struct nvme_bdev_io *bio; 4241 struct spdk_io_channel *ch; 4242 struct nvme_bdev_channel *nbdev_ch; 4243 struct nvme_io_path *io_path1, *io_path2; 4244 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 4245 struct ut_nvme_req *req; 4246 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 4247 int rc; 4248 4249 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4250 ut_init_trid(&path1.trid); 4251 ut_init_trid2(&path2.trid); 4252 4253 g_opts.bdev_retry_count = 1; 4254 4255 set_thread(0); 4256 4257 g_ut_attach_ctrlr_status = 0; 4258 g_ut_attach_bdev_count = 1; 4259 4260 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4261 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4262 4263 ctrlr1->ns[0].uuid = &uuid1; 4264 4265 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4266 attach_ctrlr_done, NULL, NULL, NULL, true); 4267 CU_ASSERT(rc == 0); 4268 4269 spdk_delay_us(1000); 4270 poll_threads(); 4271 4272 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4273 poll_threads(); 4274 4275 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4276 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4277 4278 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 4279 CU_ASSERT(nvme_ctrlr1 != NULL); 4280 4281 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4282 CU_ASSERT(bdev != NULL); 4283 4284 nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1); 4285 CU_ASSERT(nvme_ns1 != NULL); 4286 CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1)); 4287 4288 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4289 ut_bdev_io_set_buf(bdev_io); 4290 4291 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4292 4293 ch = spdk_get_io_channel(bdev); 4294 SPDK_CU_ASSERT_FATAL(ch != NULL); 4295 4296 nbdev_ch = spdk_io_channel_get_ctx(ch); 4297 4298 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 4299 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 4300 4301 nvme_qpair1 = io_path1->qpair; 4302 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 4303 SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL); 4304 4305 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4306 4307 /* I/O got a temporary I/O path error, but it should not retry if DNR is set. */ 4308 bdev_io->internal.in_submit_request = true; 4309 4310 bdev_nvme_submit_request(ch, bdev_io); 4311 4312 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4313 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4314 4315 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4316 SPDK_CU_ASSERT_FATAL(req != NULL); 4317 4318 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4319 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4320 req->cpl.status.dnr = 1; 4321 4322 poll_thread_times(0, 1); 4323 4324 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4325 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4326 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4327 4328 /* I/O got a temporary I/O path error, but it should succeed after retry. */ 4329 bdev_io->internal.in_submit_request = true; 4330 4331 bdev_nvme_submit_request(ch, bdev_io); 4332 4333 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4334 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4335 4336 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4337 SPDK_CU_ASSERT_FATAL(req != NULL); 4338 4339 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4340 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4341 4342 poll_thread_times(0, 1); 4343 4344 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4345 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4346 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4347 4348 poll_threads(); 4349 4350 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4351 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4352 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4353 4354 /* Add io_path2 dynamically, and create a multipath configuration. */ 4355 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4356 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4357 4358 ctrlr2->ns[0].uuid = &uuid1; 4359 4360 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4361 attach_ctrlr_done, NULL, NULL, NULL, true); 4362 CU_ASSERT(rc == 0); 4363 4364 spdk_delay_us(1000); 4365 poll_threads(); 4366 4367 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4368 poll_threads(); 4369 4370 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 4371 CU_ASSERT(nvme_ctrlr2 != NULL); 4372 4373 nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2); 4374 CU_ASSERT(nvme_ns2 != NULL); 4375 CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2)); 4376 4377 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 4378 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 4379 4380 nvme_qpair2 = io_path2->qpair; 4381 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 4382 SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL); 4383 4384 /* I/O is submitted to io_path1, but qpair of io_path1 was disconnected 4385 * and deleted. Hence the I/O was aborted. But io_path2 is available. 4386 * So after a retry, I/O is submitted to io_path2 and should succeed. 4387 */ 4388 bdev_io->internal.in_submit_request = true; 4389 4390 bdev_nvme_submit_request(ch, bdev_io); 4391 4392 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4393 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4394 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4395 4396 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4397 SPDK_CU_ASSERT_FATAL(req != NULL); 4398 4399 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION; 4400 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4401 4402 poll_thread_times(0, 1); 4403 4404 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4405 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4406 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4407 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4408 4409 spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair); 4410 nvme_qpair1->qpair = NULL; 4411 4412 poll_threads(); 4413 4414 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4415 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4416 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4417 4418 free(bdev_io); 4419 4420 spdk_put_io_channel(ch); 4421 4422 poll_threads(); 4423 4424 rc = bdev_nvme_delete("nvme0", &g_any_path); 4425 CU_ASSERT(rc == 0); 4426 4427 poll_threads(); 4428 spdk_delay_us(1000); 4429 poll_threads(); 4430 4431 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4432 4433 g_opts.bdev_retry_count = 0; 4434 } 4435 4436 static void 4437 test_retry_io_count(void) 4438 { 4439 struct nvme_path_id path = {}; 4440 struct spdk_nvme_ctrlr *ctrlr; 4441 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4442 struct nvme_ctrlr *nvme_ctrlr; 4443 const int STRING_SIZE = 32; 4444 const char *attached_names[STRING_SIZE]; 4445 struct nvme_bdev *bdev; 4446 struct nvme_ns *nvme_ns; 4447 struct spdk_bdev_io *bdev_io; 4448 struct nvme_bdev_io *bio; 4449 struct spdk_io_channel *ch; 4450 struct nvme_bdev_channel *nbdev_ch; 4451 struct nvme_io_path *io_path; 4452 struct nvme_qpair *nvme_qpair; 4453 struct ut_nvme_req *req; 4454 int rc; 4455 4456 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4457 ut_init_trid(&path.trid); 4458 4459 set_thread(0); 4460 4461 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4462 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4463 4464 g_ut_attach_ctrlr_status = 0; 4465 g_ut_attach_bdev_count = 1; 4466 4467 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4468 attach_ctrlr_done, NULL, NULL, NULL, false); 4469 CU_ASSERT(rc == 0); 4470 4471 spdk_delay_us(1000); 4472 poll_threads(); 4473 4474 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4475 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4476 4477 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4478 CU_ASSERT(nvme_ctrlr != NULL); 4479 4480 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4481 CU_ASSERT(bdev != NULL); 4482 4483 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4484 CU_ASSERT(nvme_ns != NULL); 4485 4486 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4487 ut_bdev_io_set_buf(bdev_io); 4488 4489 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4490 4491 ch = spdk_get_io_channel(bdev); 4492 SPDK_CU_ASSERT_FATAL(ch != NULL); 4493 4494 nbdev_ch = spdk_io_channel_get_ctx(ch); 4495 4496 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4497 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4498 4499 nvme_qpair = io_path->qpair; 4500 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4501 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4502 4503 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4504 4505 /* If I/O is aborted by request, it should not be retried. */ 4506 g_opts.bdev_retry_count = 1; 4507 4508 bdev_io->internal.in_submit_request = true; 4509 4510 bdev_nvme_submit_request(ch, bdev_io); 4511 4512 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4513 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4514 4515 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4516 SPDK_CU_ASSERT_FATAL(req != NULL); 4517 4518 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 4519 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4520 4521 poll_thread_times(0, 1); 4522 4523 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4524 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4525 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 4526 4527 /* If bio->retry_count is not less than g_opts.bdev_retry_count, 4528 * the failed I/O should not be retried. 4529 */ 4530 g_opts.bdev_retry_count = 4; 4531 4532 bdev_io->internal.in_submit_request = true; 4533 4534 bdev_nvme_submit_request(ch, bdev_io); 4535 4536 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4537 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4538 4539 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4540 SPDK_CU_ASSERT_FATAL(req != NULL); 4541 4542 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4543 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4544 bio->retry_count = 4; 4545 4546 poll_thread_times(0, 1); 4547 4548 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4549 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4550 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4551 4552 /* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */ 4553 g_opts.bdev_retry_count = -1; 4554 4555 bdev_io->internal.in_submit_request = true; 4556 4557 bdev_nvme_submit_request(ch, bdev_io); 4558 4559 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4560 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4561 4562 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4563 SPDK_CU_ASSERT_FATAL(req != NULL); 4564 4565 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4566 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4567 bio->retry_count = 4; 4568 4569 poll_thread_times(0, 1); 4570 4571 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4572 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4573 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4574 4575 poll_threads(); 4576 4577 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4578 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4579 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4580 4581 /* If bio->retry_count is less than g_opts.bdev_retry_count, 4582 * the failed I/O should be retried. 4583 */ 4584 g_opts.bdev_retry_count = 4; 4585 4586 bdev_io->internal.in_submit_request = true; 4587 4588 bdev_nvme_submit_request(ch, bdev_io); 4589 4590 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4591 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4592 4593 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4594 SPDK_CU_ASSERT_FATAL(req != NULL); 4595 4596 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4597 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4598 bio->retry_count = 3; 4599 4600 poll_thread_times(0, 1); 4601 4602 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4603 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4604 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4605 4606 poll_threads(); 4607 4608 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4609 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4610 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4611 4612 free(bdev_io); 4613 4614 spdk_put_io_channel(ch); 4615 4616 poll_threads(); 4617 4618 rc = bdev_nvme_delete("nvme0", &g_any_path); 4619 CU_ASSERT(rc == 0); 4620 4621 poll_threads(); 4622 spdk_delay_us(1000); 4623 poll_threads(); 4624 4625 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4626 4627 g_opts.bdev_retry_count = 0; 4628 } 4629 4630 static void 4631 test_concurrent_read_ana_log_page(void) 4632 { 4633 struct spdk_nvme_transport_id trid = {}; 4634 struct spdk_nvme_ctrlr *ctrlr; 4635 struct nvme_ctrlr *nvme_ctrlr; 4636 const int STRING_SIZE = 32; 4637 const char *attached_names[STRING_SIZE]; 4638 int rc; 4639 4640 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4641 ut_init_trid(&trid); 4642 4643 set_thread(0); 4644 4645 ctrlr = ut_attach_ctrlr(&trid, 1, true, false); 4646 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4647 4648 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4649 4650 g_ut_attach_ctrlr_status = 0; 4651 g_ut_attach_bdev_count = 1; 4652 4653 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 4654 attach_ctrlr_done, NULL, NULL, NULL, false); 4655 CU_ASSERT(rc == 0); 4656 4657 spdk_delay_us(1000); 4658 poll_threads(); 4659 4660 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4661 poll_threads(); 4662 4663 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 4664 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 4665 4666 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4667 4668 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4669 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4670 4671 /* Following read request should be rejected. */ 4672 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4673 4674 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4675 4676 set_thread(1); 4677 4678 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4679 4680 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4681 4682 /* Reset request while reading ANA log page should not be rejected. */ 4683 rc = bdev_nvme_reset(nvme_ctrlr); 4684 CU_ASSERT(rc == 0); 4685 4686 poll_threads(); 4687 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4688 poll_threads(); 4689 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4690 poll_threads(); 4691 4692 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4693 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 4694 4695 /* Read ANA log page while resetting ctrlr should be rejected. */ 4696 rc = bdev_nvme_reset(nvme_ctrlr); 4697 CU_ASSERT(rc == 0); 4698 4699 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4700 4701 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4702 4703 poll_threads(); 4704 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4705 poll_threads(); 4706 4707 set_thread(0); 4708 4709 rc = bdev_nvme_delete("nvme0", &g_any_path); 4710 CU_ASSERT(rc == 0); 4711 4712 poll_threads(); 4713 spdk_delay_us(1000); 4714 poll_threads(); 4715 4716 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 4717 } 4718 4719 static void 4720 test_retry_io_for_ana_error(void) 4721 { 4722 struct nvme_path_id path = {}; 4723 struct spdk_nvme_ctrlr *ctrlr; 4724 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4725 struct nvme_ctrlr *nvme_ctrlr; 4726 const int STRING_SIZE = 32; 4727 const char *attached_names[STRING_SIZE]; 4728 struct nvme_bdev *bdev; 4729 struct nvme_ns *nvme_ns; 4730 struct spdk_bdev_io *bdev_io; 4731 struct nvme_bdev_io *bio; 4732 struct spdk_io_channel *ch; 4733 struct nvme_bdev_channel *nbdev_ch; 4734 struct nvme_io_path *io_path; 4735 struct nvme_qpair *nvme_qpair; 4736 struct ut_nvme_req *req; 4737 uint64_t now; 4738 int rc; 4739 4740 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4741 ut_init_trid(&path.trid); 4742 4743 g_opts.bdev_retry_count = 1; 4744 4745 set_thread(0); 4746 4747 ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false); 4748 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4749 4750 g_ut_attach_ctrlr_status = 0; 4751 g_ut_attach_bdev_count = 1; 4752 4753 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4754 attach_ctrlr_done, NULL, NULL, NULL, false); 4755 CU_ASSERT(rc == 0); 4756 4757 spdk_delay_us(1000); 4758 poll_threads(); 4759 4760 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4761 poll_threads(); 4762 4763 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4764 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4765 4766 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4767 CU_ASSERT(nvme_ctrlr != NULL); 4768 4769 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4770 CU_ASSERT(bdev != NULL); 4771 4772 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4773 CU_ASSERT(nvme_ns != NULL); 4774 4775 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4776 ut_bdev_io_set_buf(bdev_io); 4777 4778 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4779 4780 ch = spdk_get_io_channel(bdev); 4781 SPDK_CU_ASSERT_FATAL(ch != NULL); 4782 4783 nbdev_ch = spdk_io_channel_get_ctx(ch); 4784 4785 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4786 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4787 4788 nvme_qpair = io_path->qpair; 4789 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4790 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4791 4792 now = spdk_get_ticks(); 4793 4794 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4795 4796 /* If I/O got ANA error, it should be queued, the corresponding namespace 4797 * should be freezed and its ANA state should be updated. 4798 */ 4799 bdev_io->internal.in_submit_request = true; 4800 4801 bdev_nvme_submit_request(ch, bdev_io); 4802 4803 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4804 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4805 4806 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4807 SPDK_CU_ASSERT_FATAL(req != NULL); 4808 4809 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4810 req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE; 4811 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4812 4813 poll_thread_times(0, 1); 4814 4815 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4816 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4817 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4818 /* I/O should be retried immediately. */ 4819 CU_ASSERT(bio->retry_ticks == now); 4820 CU_ASSERT(nvme_ns->ana_state_updating == true); 4821 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4822 4823 poll_threads(); 4824 4825 /* Namespace is inaccessible, and hence I/O should be queued again. */ 4826 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4827 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4828 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4829 /* I/O should be retried after a second if no I/O path was found but 4830 * any I/O path may become available. 4831 */ 4832 CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz()); 4833 4834 /* Namespace should be unfreezed after completing to update its ANA state. */ 4835 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4836 poll_threads(); 4837 4838 CU_ASSERT(nvme_ns->ana_state_updating == false); 4839 CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 4840 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4841 4842 /* Retry the queued I/O should succeed. */ 4843 spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us); 4844 poll_threads(); 4845 4846 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4847 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4848 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4849 4850 free(bdev_io); 4851 4852 spdk_put_io_channel(ch); 4853 4854 poll_threads(); 4855 4856 rc = bdev_nvme_delete("nvme0", &g_any_path); 4857 CU_ASSERT(rc == 0); 4858 4859 poll_threads(); 4860 spdk_delay_us(1000); 4861 poll_threads(); 4862 4863 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4864 4865 g_opts.bdev_retry_count = 0; 4866 } 4867 4868 static void 4869 test_retry_admin_passthru_for_path_error(void) 4870 { 4871 struct nvme_path_id path1 = {}, path2 = {}; 4872 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4873 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4874 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4875 const int STRING_SIZE = 32; 4876 const char *attached_names[STRING_SIZE]; 4877 struct nvme_bdev *bdev; 4878 struct spdk_bdev_io *admin_io; 4879 struct spdk_io_channel *ch; 4880 struct ut_nvme_req *req; 4881 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 4882 int rc; 4883 4884 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4885 ut_init_trid(&path1.trid); 4886 ut_init_trid2(&path2.trid); 4887 4888 g_opts.bdev_retry_count = 1; 4889 4890 set_thread(0); 4891 4892 g_ut_attach_ctrlr_status = 0; 4893 g_ut_attach_bdev_count = 1; 4894 4895 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4896 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4897 4898 ctrlr1->ns[0].uuid = &uuid1; 4899 4900 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4901 attach_ctrlr_done, NULL, NULL, NULL, true); 4902 CU_ASSERT(rc == 0); 4903 4904 spdk_delay_us(1000); 4905 poll_threads(); 4906 4907 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4908 poll_threads(); 4909 4910 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4911 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4912 4913 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 4914 CU_ASSERT(nvme_ctrlr1 != NULL); 4915 4916 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4917 CU_ASSERT(bdev != NULL); 4918 4919 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 4920 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 4921 4922 ch = spdk_get_io_channel(bdev); 4923 SPDK_CU_ASSERT_FATAL(ch != NULL); 4924 4925 admin_io->internal.ch = (struct spdk_bdev_channel *)ch; 4926 4927 /* Admin passthrough got a path error, but it should not retry if DNR is set. */ 4928 admin_io->internal.in_submit_request = true; 4929 4930 bdev_nvme_submit_request(ch, admin_io); 4931 4932 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1); 4933 CU_ASSERT(admin_io->internal.in_submit_request == true); 4934 4935 req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx); 4936 SPDK_CU_ASSERT_FATAL(req != NULL); 4937 4938 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4939 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4940 req->cpl.status.dnr = 1; 4941 4942 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4943 poll_thread_times(0, 2); 4944 4945 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 4946 CU_ASSERT(admin_io->internal.in_submit_request == false); 4947 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4948 4949 /* Admin passthrough got a path error, but it should succeed after retry. */ 4950 admin_io->internal.in_submit_request = true; 4951 4952 bdev_nvme_submit_request(ch, admin_io); 4953 4954 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1); 4955 CU_ASSERT(admin_io->internal.in_submit_request == true); 4956 4957 req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx); 4958 SPDK_CU_ASSERT_FATAL(req != NULL); 4959 4960 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4961 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4962 4963 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4964 poll_thread_times(0, 2); 4965 4966 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1); 4967 CU_ASSERT(admin_io->internal.in_submit_request == true); 4968 4969 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4970 poll_threads(); 4971 4972 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 4973 CU_ASSERT(admin_io->internal.in_submit_request == false); 4974 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4975 4976 /* Add ctrlr2 dynamically, and create a multipath configuration. */ 4977 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4978 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4979 4980 ctrlr2->ns[0].uuid = &uuid1; 4981 4982 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4983 attach_ctrlr_done, NULL, NULL, NULL, true); 4984 CU_ASSERT(rc == 0); 4985 4986 spdk_delay_us(1000); 4987 poll_threads(); 4988 4989 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4990 poll_threads(); 4991 4992 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 4993 CU_ASSERT(nvme_ctrlr2 != NULL); 4994 4995 /* Admin passthrough was submitted to ctrlr1, but ctrlr1 was failed. 4996 * Hence the admin passthrough was aborted. But ctrlr2 is avaialble. 4997 * So after a retry, the admin passthrough is submitted to ctrlr2 and 4998 * should succeed. 4999 */ 5000 admin_io->internal.in_submit_request = true; 5001 5002 bdev_nvme_submit_request(ch, admin_io); 5003 5004 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1); 5005 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 5006 CU_ASSERT(admin_io->internal.in_submit_request == true); 5007 5008 req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx); 5009 SPDK_CU_ASSERT_FATAL(req != NULL); 5010 5011 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION; 5012 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 5013 ctrlr1->is_failed = true; 5014 5015 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5016 poll_thread_times(0, 2); 5017 5018 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 5019 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1); 5020 CU_ASSERT(admin_io->internal.in_submit_request == true); 5021 5022 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5023 poll_threads(); 5024 5025 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 5026 CU_ASSERT(admin_io->internal.in_submit_request == false); 5027 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5028 5029 free(admin_io); 5030 5031 spdk_put_io_channel(ch); 5032 5033 poll_threads(); 5034 5035 rc = bdev_nvme_delete("nvme0", &g_any_path); 5036 CU_ASSERT(rc == 0); 5037 5038 poll_threads(); 5039 spdk_delay_us(1000); 5040 poll_threads(); 5041 5042 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5043 5044 g_opts.bdev_retry_count = 0; 5045 } 5046 5047 static void 5048 test_retry_admin_passthru_by_count(void) 5049 { 5050 struct nvme_path_id path = {}; 5051 struct spdk_nvme_ctrlr *ctrlr; 5052 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5053 struct nvme_ctrlr *nvme_ctrlr; 5054 const int STRING_SIZE = 32; 5055 const char *attached_names[STRING_SIZE]; 5056 struct nvme_bdev *bdev; 5057 struct spdk_bdev_io *admin_io; 5058 struct nvme_bdev_io *admin_bio; 5059 struct spdk_io_channel *ch; 5060 struct ut_nvme_req *req; 5061 int rc; 5062 5063 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5064 ut_init_trid(&path.trid); 5065 5066 set_thread(0); 5067 5068 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5069 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5070 5071 g_ut_attach_ctrlr_status = 0; 5072 g_ut_attach_bdev_count = 1; 5073 5074 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5075 attach_ctrlr_done, NULL, NULL, NULL, false); 5076 CU_ASSERT(rc == 0); 5077 5078 spdk_delay_us(1000); 5079 poll_threads(); 5080 5081 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5082 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5083 5084 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5085 CU_ASSERT(nvme_ctrlr != NULL); 5086 5087 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5088 CU_ASSERT(bdev != NULL); 5089 5090 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 5091 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 5092 5093 admin_bio = (struct nvme_bdev_io *)admin_io->driver_ctx; 5094 5095 ch = spdk_get_io_channel(bdev); 5096 SPDK_CU_ASSERT_FATAL(ch != NULL); 5097 5098 admin_io->internal.ch = (struct spdk_bdev_channel *)ch; 5099 5100 /* If admin passthrough is aborted by request, it should not be retried. */ 5101 g_opts.bdev_retry_count = 1; 5102 5103 admin_io->internal.in_submit_request = true; 5104 5105 bdev_nvme_submit_request(ch, admin_io); 5106 5107 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 5108 CU_ASSERT(admin_io->internal.in_submit_request == true); 5109 5110 req = ut_get_outstanding_nvme_request(&ctrlr->adminq, admin_bio); 5111 SPDK_CU_ASSERT_FATAL(req != NULL); 5112 5113 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 5114 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 5115 5116 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5117 poll_thread_times(0, 2); 5118 5119 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 5120 CU_ASSERT(admin_io->internal.in_submit_request == false); 5121 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 5122 5123 /* If bio->retry_count is not less than g_opts.bdev_retry_count, 5124 * the failed admin passthrough should not be retried. 5125 */ 5126 g_opts.bdev_retry_count = 4; 5127 5128 admin_io->internal.in_submit_request = true; 5129 5130 bdev_nvme_submit_request(ch, admin_io); 5131 5132 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 5133 CU_ASSERT(admin_io->internal.in_submit_request == true); 5134 5135 req = ut_get_outstanding_nvme_request(&ctrlr->adminq, admin_bio); 5136 SPDK_CU_ASSERT_FATAL(req != NULL); 5137 5138 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 5139 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 5140 admin_bio->retry_count = 4; 5141 5142 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5143 poll_thread_times(0, 2); 5144 5145 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 5146 CU_ASSERT(admin_io->internal.in_submit_request == false); 5147 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 5148 5149 free(admin_io); 5150 5151 spdk_put_io_channel(ch); 5152 5153 poll_threads(); 5154 5155 rc = bdev_nvme_delete("nvme0", &g_any_path); 5156 CU_ASSERT(rc == 0); 5157 5158 poll_threads(); 5159 spdk_delay_us(1000); 5160 poll_threads(); 5161 5162 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5163 5164 g_opts.bdev_retry_count = 0; 5165 } 5166 5167 static void 5168 test_check_multipath_params(void) 5169 { 5170 /* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and 5171 * 3rd parameter is fast_io_fail_timeout_sec. 5172 */ 5173 CU_ASSERT(bdev_nvme_check_multipath_params(-2, 1, 0) == false); 5174 CU_ASSERT(bdev_nvme_check_multipath_params(-1, 0, 0) == false); 5175 CU_ASSERT(bdev_nvme_check_multipath_params(1, 0, 0) == false); 5176 CU_ASSERT(bdev_nvme_check_multipath_params(1, 2, 0) == false); 5177 CU_ASSERT(bdev_nvme_check_multipath_params(0, 1, 0) == false); 5178 CU_ASSERT(bdev_nvme_check_multipath_params(-1, 1, 0) == true); 5179 CU_ASSERT(bdev_nvme_check_multipath_params(2, 2, 0) == true); 5180 CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 0) == true); 5181 CU_ASSERT(bdev_nvme_check_multipath_params(INT32_MAX, INT32_MAX, 0) == true); 5182 CU_ASSERT(bdev_nvme_check_multipath_params(-1, UINT32_MAX, 0) == true); 5183 CU_ASSERT(bdev_nvme_check_multipath_params(0, 0, 1) == false); 5184 CU_ASSERT(bdev_nvme_check_multipath_params(-1, 2, 1) == false); 5185 CU_ASSERT(bdev_nvme_check_multipath_params(3, 2, 4) == false); 5186 CU_ASSERT(bdev_nvme_check_multipath_params(3, 2, 1) == false); 5187 CU_ASSERT(bdev_nvme_check_multipath_params(-1, 1, 1) == true); 5188 CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 2) == true); 5189 CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 1) == true); 5190 CU_ASSERT(bdev_nvme_check_multipath_params(INT32_MAX, INT32_MAX, INT32_MAX) == true); 5191 CU_ASSERT(bdev_nvme_check_multipath_params(-1, UINT32_MAX, UINT32_MAX) == true); 5192 } 5193 5194 static void 5195 test_retry_io_if_ctrlr_is_resetting(void) 5196 { 5197 struct nvme_path_id path = {}; 5198 struct nvme_ctrlr_opts opts = {}; 5199 struct spdk_nvme_ctrlr *ctrlr; 5200 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5201 struct nvme_ctrlr *nvme_ctrlr; 5202 const int STRING_SIZE = 32; 5203 const char *attached_names[STRING_SIZE]; 5204 struct nvme_bdev *bdev; 5205 struct nvme_ns *nvme_ns; 5206 struct spdk_bdev_io *bdev_io1, *bdev_io2; 5207 struct spdk_io_channel *ch; 5208 struct nvme_bdev_channel *nbdev_ch; 5209 struct nvme_io_path *io_path; 5210 struct nvme_qpair *nvme_qpair; 5211 int rc; 5212 5213 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5214 ut_init_trid(&path.trid); 5215 5216 set_thread(0); 5217 5218 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5219 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5220 5221 g_ut_attach_ctrlr_status = 0; 5222 g_ut_attach_bdev_count = 1; 5223 5224 opts.ctrlr_loss_timeout_sec = -1; 5225 opts.reconnect_delay_sec = 1; 5226 5227 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5228 attach_ctrlr_done, NULL, NULL, &opts, false); 5229 CU_ASSERT(rc == 0); 5230 5231 spdk_delay_us(1000); 5232 poll_threads(); 5233 5234 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5235 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5236 5237 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5238 CU_ASSERT(nvme_ctrlr != NULL); 5239 5240 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5241 CU_ASSERT(bdev != NULL); 5242 5243 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5244 CU_ASSERT(nvme_ns != NULL); 5245 5246 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5247 ut_bdev_io_set_buf(bdev_io1); 5248 5249 bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5250 ut_bdev_io_set_buf(bdev_io2); 5251 5252 ch = spdk_get_io_channel(bdev); 5253 SPDK_CU_ASSERT_FATAL(ch != NULL); 5254 5255 nbdev_ch = spdk_io_channel_get_ctx(ch); 5256 5257 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5258 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5259 5260 nvme_qpair = io_path->qpair; 5261 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5262 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5263 5264 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 5265 bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch; 5266 5267 /* If qpair is connected, I/O should succeed. */ 5268 bdev_io1->internal.in_submit_request = true; 5269 5270 bdev_nvme_submit_request(ch, bdev_io1); 5271 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5272 5273 poll_threads(); 5274 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5275 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 5276 5277 /* If qpair is disconnected, it is freed and then reconnected via resetting 5278 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 5279 * while resetting the nvme_ctrlr. 5280 */ 5281 nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 5282 ctrlr->is_failed = true; 5283 5284 poll_thread_times(0, 5); 5285 5286 CU_ASSERT(nvme_qpair->qpair == NULL); 5287 CU_ASSERT(nvme_ctrlr->resetting == true); 5288 CU_ASSERT(ctrlr->is_failed == false); 5289 5290 bdev_io1->internal.in_submit_request = true; 5291 5292 bdev_nvme_submit_request(ch, bdev_io1); 5293 5294 spdk_delay_us(1); 5295 5296 bdev_io2->internal.in_submit_request = true; 5297 5298 bdev_nvme_submit_request(ch, bdev_io2); 5299 5300 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5301 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5302 CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5303 CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link)); 5304 5305 poll_threads(); 5306 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5307 poll_threads(); 5308 5309 CU_ASSERT(nvme_qpair->qpair != NULL); 5310 CU_ASSERT(nvme_ctrlr->resetting == false); 5311 5312 spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us); 5313 5314 poll_thread_times(0, 1); 5315 5316 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5317 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5318 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5319 CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5320 5321 poll_threads(); 5322 5323 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5324 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5325 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5326 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5327 CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5328 5329 spdk_delay_us(1); 5330 5331 poll_thread_times(0, 1); 5332 5333 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5334 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5335 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5336 5337 poll_threads(); 5338 5339 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5340 CU_ASSERT(bdev_io2->internal.in_submit_request == false); 5341 CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5342 5343 free(bdev_io1); 5344 free(bdev_io2); 5345 5346 spdk_put_io_channel(ch); 5347 5348 poll_threads(); 5349 5350 rc = bdev_nvme_delete("nvme0", &g_any_path); 5351 CU_ASSERT(rc == 0); 5352 5353 poll_threads(); 5354 spdk_delay_us(1000); 5355 poll_threads(); 5356 5357 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5358 } 5359 5360 static void 5361 test_retry_admin_passthru_if_ctrlr_is_resetting(void) 5362 { 5363 struct nvme_path_id path = {}; 5364 struct nvme_ctrlr_opts opts = {}; 5365 struct spdk_nvme_ctrlr *ctrlr; 5366 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5367 struct nvme_ctrlr *nvme_ctrlr; 5368 const int STRING_SIZE = 32; 5369 const char *attached_names[STRING_SIZE]; 5370 struct nvme_bdev *bdev; 5371 struct spdk_bdev_io *admin_io; 5372 struct spdk_io_channel *ch; 5373 struct nvme_bdev_channel *nbdev_ch; 5374 int rc; 5375 5376 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5377 ut_init_trid(&path.trid); 5378 5379 g_opts.bdev_retry_count = 1; 5380 5381 set_thread(0); 5382 5383 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5384 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5385 5386 g_ut_attach_ctrlr_status = 0; 5387 g_ut_attach_bdev_count = 1; 5388 5389 opts.ctrlr_loss_timeout_sec = -1; 5390 opts.reconnect_delay_sec = 1; 5391 5392 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5393 attach_ctrlr_done, NULL, NULL, &opts, false); 5394 CU_ASSERT(rc == 0); 5395 5396 spdk_delay_us(1000); 5397 poll_threads(); 5398 5399 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5400 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5401 5402 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5403 CU_ASSERT(nvme_ctrlr != NULL); 5404 5405 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5406 CU_ASSERT(bdev != NULL); 5407 5408 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 5409 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 5410 5411 ch = spdk_get_io_channel(bdev); 5412 SPDK_CU_ASSERT_FATAL(ch != NULL); 5413 5414 nbdev_ch = spdk_io_channel_get_ctx(ch); 5415 5416 admin_io->internal.ch = (struct spdk_bdev_channel *)ch; 5417 5418 /* If ctrlr is available, admin passthrough should succeed. */ 5419 admin_io->internal.in_submit_request = true; 5420 5421 bdev_nvme_submit_request(ch, admin_io); 5422 5423 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 5424 CU_ASSERT(admin_io->internal.in_submit_request == true); 5425 5426 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5427 poll_threads(); 5428 5429 CU_ASSERT(admin_io->internal.in_submit_request == false); 5430 CU_ASSERT(admin_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 5431 5432 /* If ctrlr is resetting, admin passthrough request should be queued 5433 * if it is submitted while resetting ctrlr. 5434 */ 5435 bdev_nvme_reset(nvme_ctrlr); 5436 5437 poll_thread_times(0, 1); 5438 5439 admin_io->internal.in_submit_request = true; 5440 5441 bdev_nvme_submit_request(ch, admin_io); 5442 5443 CU_ASSERT(admin_io->internal.in_submit_request == true); 5444 CU_ASSERT(admin_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5445 5446 poll_threads(); 5447 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5448 poll_threads(); 5449 5450 CU_ASSERT(nvme_ctrlr->resetting == false); 5451 5452 spdk_delay_us(1000000 - g_opts.nvme_adminq_poll_period_us); 5453 poll_thread_times(0, 1); 5454 5455 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 5456 CU_ASSERT(admin_io->internal.in_submit_request == true); 5457 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5458 5459 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5460 poll_threads(); 5461 5462 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 5463 CU_ASSERT(admin_io->internal.in_submit_request == false); 5464 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5465 5466 free(admin_io); 5467 5468 spdk_put_io_channel(ch); 5469 5470 poll_threads(); 5471 5472 rc = bdev_nvme_delete("nvme0", &g_any_path); 5473 CU_ASSERT(rc == 0); 5474 5475 poll_threads(); 5476 spdk_delay_us(1000); 5477 poll_threads(); 5478 5479 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5480 5481 g_opts.bdev_retry_count = 0; 5482 } 5483 5484 static void 5485 test_reconnect_ctrlr(void) 5486 { 5487 struct spdk_nvme_transport_id trid = {}; 5488 struct spdk_nvme_ctrlr ctrlr = {}; 5489 struct nvme_ctrlr *nvme_ctrlr; 5490 struct spdk_io_channel *ch1, *ch2; 5491 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 5492 int rc; 5493 5494 ut_init_trid(&trid); 5495 TAILQ_INIT(&ctrlr.active_io_qpairs); 5496 5497 set_thread(0); 5498 5499 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 5500 CU_ASSERT(rc == 0); 5501 5502 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5503 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5504 5505 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2; 5506 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5507 5508 ch1 = spdk_get_io_channel(nvme_ctrlr); 5509 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 5510 5511 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 5512 CU_ASSERT(ctrlr_ch1->qpair != NULL); 5513 5514 set_thread(1); 5515 5516 ch2 = spdk_get_io_channel(nvme_ctrlr); 5517 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 5518 5519 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 5520 5521 /* Reset starts from thread 1. */ 5522 set_thread(1); 5523 5524 /* The reset should fail and a reconnect timer should be registered. */ 5525 ctrlr.fail_reset = true; 5526 ctrlr.is_failed = true; 5527 5528 rc = bdev_nvme_reset(nvme_ctrlr); 5529 CU_ASSERT(rc == 0); 5530 CU_ASSERT(nvme_ctrlr->resetting == true); 5531 CU_ASSERT(ctrlr.is_failed == true); 5532 5533 poll_threads(); 5534 5535 CU_ASSERT(nvme_ctrlr->resetting == false); 5536 CU_ASSERT(ctrlr.is_failed == false); 5537 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5538 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5539 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5540 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5541 5542 /* Then a reconnect retry should suceeed. */ 5543 ctrlr.fail_reset = false; 5544 5545 spdk_delay_us(SPDK_SEC_TO_USEC); 5546 poll_thread_times(0, 1); 5547 5548 CU_ASSERT(nvme_ctrlr->resetting == true); 5549 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5550 5551 poll_threads(); 5552 5553 CU_ASSERT(nvme_ctrlr->resetting == false); 5554 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 5555 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 5556 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5557 5558 /* The reset should fail and a reconnect timer should be registered. */ 5559 ctrlr.fail_reset = true; 5560 ctrlr.is_failed = true; 5561 5562 rc = bdev_nvme_reset(nvme_ctrlr); 5563 CU_ASSERT(rc == 0); 5564 CU_ASSERT(nvme_ctrlr->resetting == true); 5565 CU_ASSERT(ctrlr.is_failed == true); 5566 5567 poll_threads(); 5568 5569 CU_ASSERT(nvme_ctrlr->resetting == false); 5570 CU_ASSERT(ctrlr.is_failed == false); 5571 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5572 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5573 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5574 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5575 5576 /* Then a reconnect retry should still fail. */ 5577 spdk_delay_us(SPDK_SEC_TO_USEC); 5578 poll_thread_times(0, 1); 5579 5580 CU_ASSERT(nvme_ctrlr->resetting == true); 5581 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5582 5583 poll_threads(); 5584 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5585 poll_threads(); 5586 5587 CU_ASSERT(nvme_ctrlr->resetting == false); 5588 CU_ASSERT(ctrlr.is_failed == false); 5589 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5590 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5591 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5592 5593 /* Then a reconnect retry should still fail and the ctrlr should be deleted. */ 5594 spdk_delay_us(SPDK_SEC_TO_USEC); 5595 poll_threads(); 5596 5597 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5598 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5599 CU_ASSERT(nvme_ctrlr->destruct == true); 5600 5601 spdk_put_io_channel(ch2); 5602 5603 set_thread(0); 5604 5605 spdk_put_io_channel(ch1); 5606 5607 poll_threads(); 5608 spdk_delay_us(1000); 5609 poll_threads(); 5610 5611 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5612 } 5613 5614 static struct nvme_path_id * 5615 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr, 5616 const struct spdk_nvme_transport_id *trid) 5617 { 5618 struct nvme_path_id *p; 5619 5620 TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) { 5621 if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) { 5622 break; 5623 } 5624 } 5625 5626 return p; 5627 } 5628 5629 static void 5630 test_retry_failover_ctrlr(void) 5631 { 5632 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 5633 struct spdk_nvme_ctrlr ctrlr = {}; 5634 struct nvme_ctrlr *nvme_ctrlr = NULL; 5635 struct nvme_path_id *path_id1, *path_id2, *path_id3; 5636 struct spdk_io_channel *ch; 5637 struct nvme_ctrlr_channel *ctrlr_ch; 5638 int rc; 5639 5640 ut_init_trid(&trid1); 5641 ut_init_trid2(&trid2); 5642 ut_init_trid3(&trid3); 5643 TAILQ_INIT(&ctrlr.active_io_qpairs); 5644 5645 set_thread(0); 5646 5647 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 5648 CU_ASSERT(rc == 0); 5649 5650 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5651 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5652 5653 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1; 5654 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5655 5656 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 5657 CU_ASSERT(rc == 0); 5658 5659 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 5660 CU_ASSERT(rc == 0); 5661 5662 ch = spdk_get_io_channel(nvme_ctrlr); 5663 SPDK_CU_ASSERT_FATAL(ch != NULL); 5664 5665 ctrlr_ch = spdk_io_channel_get_ctx(ch); 5666 5667 path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1); 5668 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 5669 CU_ASSERT(path_id1->is_failed == false); 5670 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5671 5672 /* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */ 5673 ctrlr.fail_reset = true; 5674 ctrlr.is_failed = true; 5675 5676 rc = bdev_nvme_reset(nvme_ctrlr); 5677 CU_ASSERT(rc == 0); 5678 5679 poll_threads(); 5680 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5681 poll_threads(); 5682 5683 CU_ASSERT(nvme_ctrlr->resetting == false); 5684 CU_ASSERT(ctrlr.is_failed == false); 5685 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5686 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5687 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5688 CU_ASSERT(path_id1->is_failed == true); 5689 5690 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5691 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5692 5693 path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2); 5694 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 5695 CU_ASSERT(path_id2->is_failed == false); 5696 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5697 5698 /* If we remove trid2 while reconnect is scheduled, trid2 is removed and path_id is 5699 * switched to trid3 but reset is not started. 5700 */ 5701 rc = bdev_nvme_failover(nvme_ctrlr, true); 5702 CU_ASSERT(rc == 0); 5703 5704 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) == NULL); 5705 5706 path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3); 5707 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 5708 CU_ASSERT(path_id3->is_failed == false); 5709 CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id); 5710 5711 CU_ASSERT(nvme_ctrlr->resetting == false); 5712 5713 /* If reconnect succeeds, trid3 should be the active path_id */ 5714 ctrlr.fail_reset = false; 5715 5716 spdk_delay_us(SPDK_SEC_TO_USEC); 5717 poll_thread_times(0, 1); 5718 5719 CU_ASSERT(nvme_ctrlr->resetting == true); 5720 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5721 5722 poll_threads(); 5723 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5724 poll_threads(); 5725 5726 CU_ASSERT(path_id3->is_failed == false); 5727 CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id); 5728 CU_ASSERT(nvme_ctrlr->resetting == false); 5729 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 5730 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5731 5732 spdk_put_io_channel(ch); 5733 5734 poll_threads(); 5735 5736 rc = bdev_nvme_delete("nvme0", &g_any_path); 5737 CU_ASSERT(rc == 0); 5738 5739 poll_threads(); 5740 spdk_delay_us(1000); 5741 poll_threads(); 5742 5743 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5744 } 5745 5746 static void 5747 test_fail_path(void) 5748 { 5749 struct nvme_path_id path = {}; 5750 struct nvme_ctrlr_opts opts = {}; 5751 struct spdk_nvme_ctrlr *ctrlr; 5752 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5753 struct nvme_ctrlr *nvme_ctrlr; 5754 const int STRING_SIZE = 32; 5755 const char *attached_names[STRING_SIZE]; 5756 struct nvme_bdev *bdev; 5757 struct nvme_ns *nvme_ns; 5758 struct spdk_bdev_io *bdev_io; 5759 struct spdk_io_channel *ch; 5760 struct nvme_bdev_channel *nbdev_ch; 5761 struct nvme_io_path *io_path; 5762 struct nvme_ctrlr_channel *ctrlr_ch; 5763 int rc; 5764 5765 /* The test scenario is the following. 5766 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec. 5767 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated. 5768 * - While reconnecting the ctrlr, an I/O is submitted and queued. 5769 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec 5770 * comes first. The queued I/O is failed. 5771 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately. 5772 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted. 5773 */ 5774 5775 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5776 ut_init_trid(&path.trid); 5777 5778 set_thread(0); 5779 5780 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5781 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5782 5783 g_ut_attach_ctrlr_status = 0; 5784 g_ut_attach_bdev_count = 1; 5785 5786 opts.ctrlr_loss_timeout_sec = 4; 5787 opts.reconnect_delay_sec = 1; 5788 opts.fast_io_fail_timeout_sec = 2; 5789 5790 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5791 attach_ctrlr_done, NULL, NULL, &opts, false); 5792 CU_ASSERT(rc == 0); 5793 5794 spdk_delay_us(1000); 5795 poll_threads(); 5796 5797 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5798 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5799 5800 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5801 CU_ASSERT(nvme_ctrlr != NULL); 5802 5803 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5804 CU_ASSERT(bdev != NULL); 5805 5806 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5807 CU_ASSERT(nvme_ns != NULL); 5808 5809 ch = spdk_get_io_channel(bdev); 5810 SPDK_CU_ASSERT_FATAL(ch != NULL); 5811 5812 nbdev_ch = spdk_io_channel_get_ctx(ch); 5813 5814 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5815 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5816 5817 ctrlr_ch = io_path->qpair->ctrlr_ch; 5818 SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL); 5819 SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL); 5820 5821 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 5822 ut_bdev_io_set_buf(bdev_io); 5823 5824 5825 /* Resetting a ctrlr should fail and a reconnect timer should be registered. */ 5826 ctrlr->fail_reset = true; 5827 ctrlr->is_failed = true; 5828 5829 rc = bdev_nvme_reset(nvme_ctrlr); 5830 CU_ASSERT(rc == 0); 5831 CU_ASSERT(nvme_ctrlr->resetting == true); 5832 CU_ASSERT(ctrlr->is_failed == true); 5833 5834 poll_threads(); 5835 5836 CU_ASSERT(nvme_ctrlr->resetting == false); 5837 CU_ASSERT(ctrlr->is_failed == false); 5838 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5839 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5840 CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0); 5841 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5842 5843 /* I/O should be queued. */ 5844 bdev_io->internal.in_submit_request = true; 5845 5846 bdev_nvme_submit_request(ch, bdev_io); 5847 5848 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5849 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5850 5851 /* After a second, the I/O should be still queued and the ctrlr should be 5852 * still recovering. 5853 */ 5854 spdk_delay_us(SPDK_SEC_TO_USEC); 5855 poll_threads(); 5856 5857 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5858 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5859 5860 CU_ASSERT(nvme_ctrlr->resetting == false); 5861 CU_ASSERT(ctrlr->is_failed == false); 5862 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5863 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5864 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5865 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5866 5867 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5868 5869 /* After two seconds, ctrlr_fail_timeout_sec should expire. */ 5870 spdk_delay_us(SPDK_SEC_TO_USEC); 5871 poll_threads(); 5872 5873 CU_ASSERT(nvme_ctrlr->resetting == false); 5874 CU_ASSERT(ctrlr->is_failed == false); 5875 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5876 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5877 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5878 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true); 5879 5880 /* Then within a second, pending I/O should be failed. */ 5881 spdk_delay_us(SPDK_SEC_TO_USEC); 5882 poll_threads(); 5883 5884 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5885 poll_threads(); 5886 5887 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5888 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5889 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5890 5891 /* Another I/O submission should be failed immediately. */ 5892 bdev_io->internal.in_submit_request = true; 5893 5894 bdev_nvme_submit_request(ch, bdev_io); 5895 5896 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5897 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5898 5899 /* After four seconds, path_loss_timeout_sec should expire and ctrlr should 5900 * be deleted. 5901 */ 5902 spdk_delay_us(SPDK_SEC_TO_USEC); 5903 poll_threads(); 5904 5905 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5906 poll_threads(); 5907 5908 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5909 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5910 CU_ASSERT(nvme_ctrlr->destruct == true); 5911 5912 spdk_put_io_channel(ch); 5913 5914 poll_threads(); 5915 spdk_delay_us(1000); 5916 poll_threads(); 5917 5918 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5919 5920 free(bdev_io); 5921 } 5922 5923 static void 5924 test_nvme_ns_cmp(void) 5925 { 5926 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 5927 5928 nvme_ns1.id = 0; 5929 nvme_ns2.id = UINT32_MAX; 5930 5931 CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0); 5932 CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0); 5933 } 5934 5935 static void 5936 test_ana_transition(void) 5937 { 5938 struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, }; 5939 struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, }; 5940 struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, }; 5941 struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, }; 5942 5943 /* case 1: ANA transition timedout is canceled. */ 5944 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5945 nvme_ns.ana_transition_timedout = true; 5946 5947 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5948 5949 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5950 5951 CU_ASSERT(nvme_ns.ana_transition_timedout == false); 5952 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5953 5954 /* case 2: ANATT timer is kept. */ 5955 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5956 nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout, 5957 &nvme_ns, 5958 ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5959 5960 desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5961 5962 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5963 5964 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5965 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 5966 5967 /* case 3: ANATT timer is stopped. */ 5968 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5969 5970 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5971 5972 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5973 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5974 5975 /* ANATT timer is started. */ 5976 desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5977 5978 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5979 5980 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5981 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE); 5982 5983 /* ANATT timer is expired. */ 5984 spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5985 5986 poll_threads(); 5987 5988 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5989 CU_ASSERT(nvme_ns.ana_transition_timedout == true); 5990 } 5991 5992 static void 5993 _set_preferred_path_cb(void *cb_arg, int rc) 5994 { 5995 bool *done = cb_arg; 5996 5997 *done = true; 5998 } 5999 6000 static void 6001 test_set_preferred_path(void) 6002 { 6003 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 6004 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 6005 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6006 const int STRING_SIZE = 32; 6007 const char *attached_names[STRING_SIZE]; 6008 struct nvme_bdev *bdev; 6009 struct spdk_io_channel *ch; 6010 struct nvme_bdev_channel *nbdev_ch; 6011 struct nvme_io_path *io_path; 6012 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6013 const struct spdk_nvme_ctrlr_data *cdata; 6014 bool done; 6015 int rc; 6016 6017 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6018 ut_init_trid(&path1.trid); 6019 ut_init_trid2(&path2.trid); 6020 ut_init_trid3(&path3.trid); 6021 g_ut_attach_ctrlr_status = 0; 6022 g_ut_attach_bdev_count = 1; 6023 6024 set_thread(0); 6025 6026 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6027 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6028 6029 ctrlr1->ns[0].uuid = &uuid1; 6030 6031 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6032 attach_ctrlr_done, NULL, NULL, NULL, true); 6033 CU_ASSERT(rc == 0); 6034 6035 spdk_delay_us(1000); 6036 poll_threads(); 6037 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6038 poll_threads(); 6039 6040 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6041 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6042 6043 ctrlr2->ns[0].uuid = &uuid1; 6044 6045 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6046 attach_ctrlr_done, NULL, NULL, NULL, true); 6047 CU_ASSERT(rc == 0); 6048 6049 spdk_delay_us(1000); 6050 poll_threads(); 6051 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6052 poll_threads(); 6053 6054 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 6055 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 6056 6057 ctrlr3->ns[0].uuid = &uuid1; 6058 6059 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 6060 attach_ctrlr_done, NULL, NULL, NULL, true); 6061 CU_ASSERT(rc == 0); 6062 6063 spdk_delay_us(1000); 6064 poll_threads(); 6065 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6066 poll_threads(); 6067 6068 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6069 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6070 6071 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6072 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6073 6074 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 6075 6076 ch = spdk_get_io_channel(bdev); 6077 SPDK_CU_ASSERT_FATAL(ch != NULL); 6078 nbdev_ch = spdk_io_channel_get_ctx(ch); 6079 6080 io_path = bdev_nvme_find_io_path(nbdev_ch); 6081 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6082 6083 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6084 6085 /* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path() 6086 * should return io_path to ctrlr2. 6087 */ 6088 6089 cdata = spdk_nvme_ctrlr_get_data(ctrlr2); 6090 done = false; 6091 6092 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6093 6094 poll_threads(); 6095 CU_ASSERT(done == true); 6096 6097 io_path = bdev_nvme_find_io_path(nbdev_ch); 6098 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6099 6100 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6101 6102 /* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is 6103 * aquired, find_io_path() should return io_path to ctrlr3. 6104 */ 6105 6106 spdk_put_io_channel(ch); 6107 6108 poll_threads(); 6109 6110 cdata = spdk_nvme_ctrlr_get_data(ctrlr3); 6111 done = false; 6112 6113 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6114 6115 poll_threads(); 6116 CU_ASSERT(done == true); 6117 6118 ch = spdk_get_io_channel(bdev); 6119 SPDK_CU_ASSERT_FATAL(ch != NULL); 6120 nbdev_ch = spdk_io_channel_get_ctx(ch); 6121 6122 io_path = bdev_nvme_find_io_path(nbdev_ch); 6123 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6124 6125 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3); 6126 6127 spdk_put_io_channel(ch); 6128 6129 poll_threads(); 6130 6131 rc = bdev_nvme_delete("nvme0", &g_any_path); 6132 CU_ASSERT(rc == 0); 6133 6134 poll_threads(); 6135 spdk_delay_us(1000); 6136 poll_threads(); 6137 6138 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6139 } 6140 6141 static void 6142 test_find_next_io_path(void) 6143 { 6144 struct nvme_bdev_channel nbdev_ch = { 6145 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 6146 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6147 }; 6148 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 6149 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 6150 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 6151 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 6152 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 6153 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 6154 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 6155 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 6156 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 6157 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 6158 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 6159 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {}; 6160 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 6161 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 6162 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 6163 6164 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6165 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6166 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6167 6168 /* nbdev_ch->current_io_path is filled always when bdev_nvme_find_next_io_path() is called. */ 6169 6170 nbdev_ch.current_io_path = &io_path2; 6171 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6172 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6173 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6174 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6175 6176 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6177 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6178 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6179 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6180 6181 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6182 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6183 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6184 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6185 6186 nbdev_ch.current_io_path = &io_path3; 6187 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6188 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6189 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6190 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6191 } 6192 6193 static void 6194 test_disable_auto_failback(void) 6195 { 6196 struct nvme_path_id path1 = {}, path2 = {}; 6197 struct nvme_ctrlr_opts opts = {}; 6198 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6199 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6200 struct nvme_ctrlr *nvme_ctrlr1; 6201 const int STRING_SIZE = 32; 6202 const char *attached_names[STRING_SIZE]; 6203 struct nvme_bdev *bdev; 6204 struct spdk_io_channel *ch; 6205 struct nvme_bdev_channel *nbdev_ch; 6206 struct nvme_io_path *io_path; 6207 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6208 const struct spdk_nvme_ctrlr_data *cdata; 6209 bool done; 6210 int rc; 6211 6212 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6213 ut_init_trid(&path1.trid); 6214 ut_init_trid2(&path2.trid); 6215 g_ut_attach_ctrlr_status = 0; 6216 g_ut_attach_bdev_count = 1; 6217 6218 g_opts.disable_auto_failback = true; 6219 6220 opts.ctrlr_loss_timeout_sec = -1; 6221 opts.reconnect_delay_sec = 1; 6222 6223 set_thread(0); 6224 6225 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6226 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6227 6228 ctrlr1->ns[0].uuid = &uuid1; 6229 6230 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6231 attach_ctrlr_done, NULL, NULL, &opts, true); 6232 CU_ASSERT(rc == 0); 6233 6234 spdk_delay_us(1000); 6235 poll_threads(); 6236 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6237 poll_threads(); 6238 6239 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6240 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6241 6242 ctrlr2->ns[0].uuid = &uuid1; 6243 6244 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6245 attach_ctrlr_done, NULL, NULL, &opts, true); 6246 CU_ASSERT(rc == 0); 6247 6248 spdk_delay_us(1000); 6249 poll_threads(); 6250 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6251 poll_threads(); 6252 6253 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6254 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6255 6256 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6257 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6258 6259 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 6260 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6261 6262 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 6263 6264 ch = spdk_get_io_channel(bdev); 6265 SPDK_CU_ASSERT_FATAL(ch != NULL); 6266 nbdev_ch = spdk_io_channel_get_ctx(ch); 6267 6268 io_path = bdev_nvme_find_io_path(nbdev_ch); 6269 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6270 6271 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6272 6273 /* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */ 6274 ctrlr1->fail_reset = true; 6275 ctrlr1->is_failed = true; 6276 6277 bdev_nvme_reset(nvme_ctrlr1); 6278 6279 poll_threads(); 6280 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6281 poll_threads(); 6282 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6283 poll_threads(); 6284 6285 CU_ASSERT(ctrlr1->adminq.is_connected == false); 6286 6287 io_path = bdev_nvme_find_io_path(nbdev_ch); 6288 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6289 6290 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6291 6292 /* After a second, ctrlr1 is recovered. However, automatic failback is disabled. 6293 * Hence, io_path to ctrlr2 should still be used. 6294 */ 6295 ctrlr1->fail_reset = false; 6296 6297 spdk_delay_us(SPDK_SEC_TO_USEC); 6298 poll_threads(); 6299 6300 CU_ASSERT(ctrlr1->adminq.is_connected == true); 6301 6302 io_path = bdev_nvme_find_io_path(nbdev_ch); 6303 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6304 6305 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6306 6307 /* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should 6308 * be used again. 6309 */ 6310 6311 cdata = spdk_nvme_ctrlr_get_data(ctrlr1); 6312 done = false; 6313 6314 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6315 6316 poll_threads(); 6317 CU_ASSERT(done == true); 6318 6319 io_path = bdev_nvme_find_io_path(nbdev_ch); 6320 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6321 6322 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6323 6324 spdk_put_io_channel(ch); 6325 6326 poll_threads(); 6327 6328 rc = bdev_nvme_delete("nvme0", &g_any_path); 6329 CU_ASSERT(rc == 0); 6330 6331 poll_threads(); 6332 spdk_delay_us(1000); 6333 poll_threads(); 6334 6335 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6336 6337 g_opts.disable_auto_failback = false; 6338 } 6339 6340 int 6341 main(int argc, const char **argv) 6342 { 6343 CU_pSuite suite = NULL; 6344 unsigned int num_failures; 6345 6346 CU_set_error_action(CUEA_ABORT); 6347 CU_initialize_registry(); 6348 6349 suite = CU_add_suite("nvme", NULL, NULL); 6350 6351 CU_ADD_TEST(suite, test_create_ctrlr); 6352 CU_ADD_TEST(suite, test_reset_ctrlr); 6353 CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr); 6354 CU_ADD_TEST(suite, test_failover_ctrlr); 6355 CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid); 6356 CU_ADD_TEST(suite, test_pending_reset); 6357 CU_ADD_TEST(suite, test_attach_ctrlr); 6358 CU_ADD_TEST(suite, test_aer_cb); 6359 CU_ADD_TEST(suite, test_submit_nvme_cmd); 6360 CU_ADD_TEST(suite, test_add_remove_trid); 6361 CU_ADD_TEST(suite, test_abort); 6362 CU_ADD_TEST(suite, test_get_io_qpair); 6363 CU_ADD_TEST(suite, test_bdev_unregister); 6364 CU_ADD_TEST(suite, test_compare_ns); 6365 CU_ADD_TEST(suite, test_init_ana_log_page); 6366 CU_ADD_TEST(suite, test_get_memory_domains); 6367 CU_ADD_TEST(suite, test_reconnect_qpair); 6368 CU_ADD_TEST(suite, test_create_bdev_ctrlr); 6369 CU_ADD_TEST(suite, test_add_multi_ns_to_bdev); 6370 CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch); 6371 CU_ADD_TEST(suite, test_admin_path); 6372 CU_ADD_TEST(suite, test_reset_bdev_ctrlr); 6373 CU_ADD_TEST(suite, test_find_io_path); 6374 CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating); 6375 CU_ADD_TEST(suite, test_retry_io_for_io_path_error); 6376 CU_ADD_TEST(suite, test_retry_io_count); 6377 CU_ADD_TEST(suite, test_concurrent_read_ana_log_page); 6378 CU_ADD_TEST(suite, test_retry_io_for_ana_error); 6379 CU_ADD_TEST(suite, test_retry_admin_passthru_for_path_error); 6380 CU_ADD_TEST(suite, test_retry_admin_passthru_by_count); 6381 CU_ADD_TEST(suite, test_check_multipath_params); 6382 CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting); 6383 CU_ADD_TEST(suite, test_retry_admin_passthru_if_ctrlr_is_resetting); 6384 CU_ADD_TEST(suite, test_reconnect_ctrlr); 6385 CU_ADD_TEST(suite, test_retry_failover_ctrlr); 6386 CU_ADD_TEST(suite, test_fail_path); 6387 CU_ADD_TEST(suite, test_nvme_ns_cmp); 6388 CU_ADD_TEST(suite, test_ana_transition); 6389 CU_ADD_TEST(suite, test_set_preferred_path); 6390 CU_ADD_TEST(suite, test_find_next_io_path); 6391 CU_ADD_TEST(suite, test_disable_auto_failback); 6392 6393 CU_basic_set_mode(CU_BRM_VERBOSE); 6394 6395 allocate_threads(3); 6396 set_thread(0); 6397 bdev_nvme_library_init(); 6398 init_accel(); 6399 6400 CU_basic_run_tests(); 6401 6402 set_thread(0); 6403 bdev_nvme_library_fini(); 6404 fini_accel(); 6405 free_threads(); 6406 6407 num_failures = CU_get_number_of_failures(); 6408 CU_cleanup_registry(); 6409 6410 return num_failures; 6411 } 6412